lang
stringclasses 2
values | license
stringclasses 13
values | stderr
stringlengths 0
343
| commit
stringlengths 40
40
| returncode
int64 0
128
| repos
stringlengths 6
87.7k
| new_contents
stringlengths 0
6.23M
| new_file
stringlengths 3
311
| old_contents
stringlengths 0
6.23M
| message
stringlengths 6
9.1k
| old_file
stringlengths 3
311
| subject
stringlengths 0
4k
| git_diff
stringlengths 0
6.31M
|
---|---|---|---|---|---|---|---|---|---|---|---|---|
Java | apache-2.0 | 0e42a758334f62a6444cb353b5a14dcdc73e5c42 | 0 | alibaba/nacos,alibaba/nacos,alibaba/nacos,alibaba/nacos | /*
* Copyright 1999-2018 Alibaba Group Holding Ltd.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.alibaba.nacos.config.server.manager;
import com.alibaba.nacos.api.exception.NacosException;
import com.alibaba.nacos.common.task.AbstractDelayTask;
import com.alibaba.nacos.common.task.engine.NacosDelayTaskExecuteEngine;
import com.alibaba.nacos.config.server.constant.Constants;
import com.alibaba.nacos.config.server.monitor.MetricsMonitor;
import com.alibaba.nacos.config.server.utils.LogUtil;
import org.slf4j.Logger;
import javax.management.ObjectName;
import java.lang.management.ManagementFactory;
import java.util.Date;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.locks.Condition;
/**
* TaskManager, is aim to process the task which is need to be done.
* And this class process the task by single thread to ensure task should be process successfully.
*
* @author huali
*/
public final class TaskManager extends NacosDelayTaskExecuteEngine implements TaskManagerMBean {
private static final Logger LOGGER = LogUtil.DEFAULT_LOG;
private String name;
Condition notEmpty = this.lock.newCondition();
public TaskManager(String name) {
super(name, LOGGER, 100L);
this.name = name;
}
/**
* Close task manager.
*/
public void close() {
try {
super.shutdown();
} catch (NacosException ignored) {
}
}
/**
* Await for lock.
*
* @throws InterruptedException InterruptedException.
*/
public void await() throws InterruptedException {
this.lock.lock();
try {
while (!this.isEmpty()) {
this.notEmpty.await();
}
} finally {
this.lock.unlock();
}
}
/**
* Await for lock by timeout.
*
* @param timeout timeout value.
* @param unit time unit.
* @return success or not.
* @throws InterruptedException InterruptedException.
*/
public boolean await(long timeout, TimeUnit unit) throws InterruptedException {
this.lock.lock();
boolean isawait = false;
try {
while (!this.isEmpty()) {
isawait = this.notEmpty.await(timeout, unit);
}
return isawait;
} finally {
this.lock.unlock();
}
}
@Override
public void addTask(Object key, AbstractDelayTask newTask) {
super.addTask(key, newTask);
MetricsMonitor.getDumpTaskMonitor().set(tasks.size());
}
@Override
public AbstractDelayTask removeTask(Object key) {
AbstractDelayTask result = super.removeTask(key);
MetricsMonitor.getDumpTaskMonitor().set(tasks.size());
return result;
}
@Override
protected void processTasks() {
super.processTasks();
MetricsMonitor.getDumpTaskMonitor().set(tasks.size());
if (tasks.isEmpty()) {
this.lock.lock();
try {
this.notEmpty.signalAll();
} finally {
this.lock.unlock();
}
}
}
@Override
public String getTaskInfos() {
StringBuilder sb = new StringBuilder();
for (Object taskType : getAllProcessorKey()) {
sb.append(taskType).append(":");
AbstractDelayTask task = this.tasks.get(taskType);
if (task != null) {
sb.append(new Date(task.getLastProcessTime()).toString());
} else {
sb.append("finished");
}
sb.append(Constants.NACOS_LINE_SEPARATOR);
}
return sb.toString();
}
/**
* Init and register the mbean object.
*/
public void init() {
try {
ObjectName oName = new ObjectName(this.name + ":type=" + TaskManager.class.getSimpleName());
ManagementFactory.getPlatformMBeanServer().registerMBean(this, oName);
} catch (Exception e) {
LOGGER.error("registerMBean_fail", e);
}
}
}
| config/src/main/java/com/alibaba/nacos/config/server/manager/TaskManager.java | /*
* Copyright 1999-2018 Alibaba Group Holding Ltd.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.alibaba.nacos.config.server.manager;
import com.alibaba.nacos.api.exception.NacosException;
import com.alibaba.nacos.common.task.AbstractDelayTask;
import com.alibaba.nacos.common.task.engine.NacosDelayTaskExecuteEngine;
import com.alibaba.nacos.config.server.constant.Constants;
import com.alibaba.nacos.config.server.monitor.MetricsMonitor;
import com.alibaba.nacos.config.server.utils.LogUtil;
import org.slf4j.Logger;
import javax.management.ObjectName;
import java.lang.management.ManagementFactory;
import java.util.Date;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.locks.Condition;
/**
* TaskManager, is aim to process the task which is need to be done.
* And this class process the task by single thread to ensure task should be process successfully.
*
* @author huali
*/
public final class TaskManager extends NacosDelayTaskExecuteEngine implements TaskManagerMBean {
private static final Logger LOGGER = LogUtil.DEFAULT_LOG;
private String name;
Condition notEmpty = this.lock.newCondition();
public TaskManager(String name) {
super(name, LOGGER, 100L);
this.name = name;
}
/**
* Close task manager.
*/
public void close() {
try {
super.shutdown();
} catch (NacosException ignored) {
}
}
/**
* Await for lock.
*
* @throws InterruptedException InterruptedException.
*/
public void await() throws InterruptedException {
this.lock.lock();
try {
while (!this.isEmpty()) {
this.notEmpty.await();
}
} finally {
this.lock.unlock();
}
}
/**
* Await for lock by timeout.
*
* @param timeout timeout value.
* @param unit time unit.
* @return success or not.
* @throws InterruptedException InterruptedException.
*/
public boolean await(long timeout, TimeUnit unit) throws InterruptedException {
this.lock.lock();
boolean isawait = false;
try {
while (!this.isEmpty()) {
isawait = this.notEmpty.await(timeout, unit);
}
return isawait;
} finally {
this.lock.unlock();
}
}
@Override
public void addTask(Object key, AbstractDelayTask newTask) {
super.addTask(key, newTask);
MetricsMonitor.getDumpTaskMonitor().set(tasks.size());
}
@Override
public AbstractDelayTask removeTask(Object key) {
AbstractDelayTask result = super.removeTask(key);
MetricsMonitor.getDumpTaskMonitor().set(tasks.size());
return result;
}
@Override
protected void processTasks() {
super.processTasks();
MetricsMonitor.getDumpTaskMonitor().set(tasks.size());
if (tasks.isEmpty()) {
this.lock.lock();
try {
this.notEmpty.signalAll();
} finally {
this.lock.unlock();
}
}
}
@Override
public String getTaskInfos() {
StringBuilder sb = new StringBuilder();
for (Object taskType : getAllProcessorKey()) {
sb.append(taskType).append(":");
AbstractDelayTask task = this.tasks.get(taskType);
if (task != null) {
sb.append(new Date(task.getLastProcessTime()).toString());
} else {
sb.append("finished");
}
sb.append(Constants.NACOS_LINE_SEPARATOR);
}
return sb.toString();
}
/**
* Init and register the mbean object.
*/
public void init() {
try {
ObjectName oName = new ObjectName(this.name + ":type=" + TaskManager.class.getSimpleName());
ManagementFactory.getPlatformMBeanServer().registerMBean(this, oName);
} catch (Exception e) {
LOGGER.error("registerMBean_fail", "注册mbean出错", e);
}
}
}
| log error (#4799)
| config/src/main/java/com/alibaba/nacos/config/server/manager/TaskManager.java | log error (#4799) | <ide><path>onfig/src/main/java/com/alibaba/nacos/config/server/manager/TaskManager.java
<ide> ObjectName oName = new ObjectName(this.name + ":type=" + TaskManager.class.getSimpleName());
<ide> ManagementFactory.getPlatformMBeanServer().registerMBean(this, oName);
<ide> } catch (Exception e) {
<del> LOGGER.error("registerMBean_fail", "注册mbean出错", e);
<add> LOGGER.error("registerMBean_fail", e);
<ide> }
<ide> }
<ide> } |
|
Java | apache-2.0 | 45f4408c08dd1e9faf9453a7fa8b0fbfc2c7f090 | 0 | openengsb-domcon/openengsb-connector-gcalendar | /**
* Licensed to the Austrian Association for Software Tool Integration (AASTI)
* under one or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information regarding copyright
* ownership. The AASTI licenses this file to you under the Apache License,
* Version 2.0 (the "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.openengsb.connector.gcalendar.internal;
import java.io.IOException;
import java.net.MalformedURLException;
import java.net.URL;
import java.util.ArrayList;
import java.util.Date;
import java.util.List;
import org.openengsb.connector.gcalendar.internal.misc.AppointmentConverter;
import org.openengsb.core.api.AliveState;
import org.openengsb.core.api.DomainMethodExecutionException;
import org.openengsb.core.api.edb.EDBEventType;
import org.openengsb.core.api.edb.EDBException;
import org.openengsb.core.api.ekb.EngineeringKnowledgeBaseService;
import org.openengsb.core.common.AbstractOpenEngSBConnectorService;
import org.openengsb.domain.appointment.AppointmentDomain;
import org.openengsb.domain.appointment.AppointmentDomainEvents;
import org.openengsb.domain.appointment.models.Appointment;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.google.gdata.client.calendar.CalendarQuery;
import com.google.gdata.client.calendar.CalendarService;
import com.google.gdata.data.DateTime;
import com.google.gdata.data.calendar.CalendarEventEntry;
import com.google.gdata.data.calendar.CalendarEventFeed;
import com.google.gdata.util.AuthenticationException;
import com.google.gdata.util.ServiceException;
public class GcalendarServiceImpl extends AbstractOpenEngSBConnectorService implements AppointmentDomain {
private static final Logger LOGGER = LoggerFactory.getLogger(GcalendarServiceImpl.class);
private AppointmentDomainEvents appointmentEvents;
private EngineeringKnowledgeBaseService ekbService;
private AliveState state = AliveState.DISCONNECTED;
private String googleUser;
private String googlePassword;
private CalendarService service;
public GcalendarServiceImpl(String id) {
super(id);
}
@Override
public String createAppointment(Appointment appointment) {
String id = null;
try {
login();
URL postUrl =
new URL("https://www.google.com/calendar/feeds/default/private/full");
CalendarEventEntry myEntry = AppointmentConverter.convertAppointmentToCalendarEventEntry(appointment);
// Send the request and receive the response:
CalendarEventEntry insertedEntry = service.insert(postUrl, myEntry);
id = insertedEntry.getEditLink().getHref();
LOGGER.info("Successfully created appointment {}", id);
appointment.setId(id);
sendEvent(EDBEventType.INSERT, appointment);
} catch (MalformedURLException e) {
// should never be thrown since the URL is static
throw new DomainMethodExecutionException("invalid URL", e);
} catch (IOException e) {
throw new DomainMethodExecutionException("unable to connect to the google server", e);
} catch (ServiceException e) {
throw new DomainMethodExecutionException("unable to insert the appointment", e);
} finally {
this.state = AliveState.DISCONNECTED;
}
return id;
}
@Override
public void updateAppointment(Appointment appointment) {
login();
CalendarEventEntry entry = getAppointmentEntry(appointment);
AppointmentConverter.extendCalendarEventEntryWithAppointment(entry, appointment);
try {
URL editUrl = new URL(entry.getEditLink().getHref());
service.update(editUrl, entry);
sendEvent(EDBEventType.UPDATE, appointment);
} catch (MalformedURLException e) {
// should never be thrown since the url is provided by google
throw new DomainMethodExecutionException("invalid URL", e);
} catch (IOException e) {
throw new DomainMethodExecutionException("unable to connect to the google server", e);
} catch (ServiceException e) {
throw new DomainMethodExecutionException("unable to update the appointment", e);
} finally {
this.state = AliveState.DISCONNECTED;
}
}
@Override
public void deleteAppointment(String id) {
try {
login();
Appointment appointment = ekbService.createEmptyModelObject(Appointment.class);
appointment.setId(id);
CalendarEventEntry entry = getAppointmentEntry(appointment);
entry.delete();
sendEvent(EDBEventType.DELETE, appointment);
} catch (IOException e) {
throw new DomainMethodExecutionException("unable to connect to google", e);
} catch (ServiceException e) {
throw new DomainMethodExecutionException("unable to delete the appointment", e);
} finally {
this.state = AliveState.DISCONNECTED;
}
}
@Override
public Appointment loadAppointment(String id) {
Appointment appointment = ekbService.createEmptyModelObject(Appointment.class);
appointment.setId(id);
CalendarEventEntry entry = getAppointmentEntry(appointment);
return AppointmentConverter.convertCalendarEventEntryToAppointment(entry);
}
/**
* loads an appointment from the server
*/
private CalendarEventEntry getAppointmentEntry(Appointment appointment) {
try {
if (appointment.getId() != null) {
CalendarEventEntry entry =
(CalendarEventEntry) service.getEntry(new URL(appointment.getId()), CalendarEventEntry.class);
return entry;
} else {
LOGGER.error("given appointment has no id");
}
} catch (MalformedURLException e) {
throw new DomainMethodExecutionException("invalid id, id must be an url to contact", e);
} catch (IOException e) {
throw new DomainMethodExecutionException("unable to connect to the google server", e);
} catch (ServiceException e) {
throw new DomainMethodExecutionException("unable to retrieve the appointment", e);
}
return null;
}
/**
* searches for entries. Every parameter is only taken into concern if not null
*/
private List<CalendarEventEntry> searchForEntries(Date start, Date end, String text) {
try {
URL feedUrl = new URL("https://www.google.com/calendar/feeds/default/private/full");
CalendarQuery myQuery = new CalendarQuery(feedUrl);
if (start != null) {
myQuery.setMinimumStartTime(new DateTime(start.getTime()));
}
if (end != null) {
myQuery.setMaximumStartTime(new DateTime(end.getTime()));
}
if (text != null) {
myQuery.setFullTextQuery(text);
}
CalendarEventFeed resultFeed = service.query(myQuery, CalendarEventFeed.class);
return resultFeed.getEntries();
} catch (MalformedURLException e) {
// should never be thrown since the URL is static
throw new DomainMethodExecutionException("invalid URL", e);
} catch (IOException e) {
throw new DomainMethodExecutionException("unable to connect to the google server", e);
} catch (ServiceException e) {
throw new DomainMethodExecutionException("unable to insert the appointment", e);
}
}
@Override
public ArrayList<Appointment> getAppointments(Date start, Date end) {
login();
ArrayList<Appointment> appointments = new ArrayList<Appointment>();
for (CalendarEventEntry entry : searchForEntries(start, end, null)) {
Appointment appointment = AppointmentConverter.convertCalendarEventEntryToAppointment(entry);
appointments.add(appointment);
}
this.state = AliveState.DISCONNECTED;
return appointments;
}
@Override
public AliveState getAliveState() {
return this.state;
}
private void login() {
try {
service = new CalendarService("OPENENGSB");
service.setUserCredentials(googleUser, googlePassword);
this.state = AliveState.ONLINE;
} catch (AuthenticationException e) {
throw new DomainMethodExecutionException(
"unable to authenticate at google server, maybe wrong username and/or password?", e);
}
}
/**
* Sends a CUD event. The type is defined by the enumeration EDBEventType. Also the savingName, committer and the
* role are defined here.
*/
private void sendEvent(EDBEventType type, Appointment appointment) {
try {
sendEDBEvent(type, appointment, appointment.getId(), appointmentEvents);
} catch (EDBException e) {
throw new DomainMethodExecutionException(e);
}
}
public String getGooglePassword() {
return googlePassword;
}
public void setGooglePassword(String googlePassword) {
this.googlePassword = googlePassword;
}
public String getGoogleUser() {
return googleUser;
}
public void setGoogleUser(String googleUser) {
this.googleUser = googleUser;
}
public void setAppointmentEvents(AppointmentDomainEvents appointmentEvents) {
this.appointmentEvents = appointmentEvents;
}
public void setEkbService(EngineeringKnowledgeBaseService ekbService) {
this.ekbService = ekbService;
AppointmentConverter.setEkbService(ekbService);
}
}
| src/main/java/org/openengsb/connector/gcalendar/internal/GcalendarServiceImpl.java | /**
* Licensed to the Austrian Association for Software Tool Integration (AASTI)
* under one or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information regarding copyright
* ownership. The AASTI licenses this file to you under the Apache License,
* Version 2.0 (the "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.openengsb.connector.gcalendar.internal;
import java.io.IOException;
import java.net.MalformedURLException;
import java.net.URL;
import java.util.ArrayList;
import java.util.Date;
import java.util.List;
import org.openengsb.connector.gcalendar.internal.misc.AppointmentConverter;
import org.openengsb.core.api.AliveState;
import org.openengsb.core.api.DomainMethodExecutionException;
import org.openengsb.core.api.edb.EDBEventType;
import org.openengsb.core.api.edb.EDBException;
import org.openengsb.core.api.ekb.EngineeringKnowledgeBaseService;
import org.openengsb.core.common.AbstractOpenEngSBConnectorService;
import org.openengsb.domain.appointment.AppointmentDomain;
import org.openengsb.domain.appointment.AppointmentDomainEvents;
import org.openengsb.domain.appointment.models.Appointment;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.google.gdata.client.calendar.CalendarQuery;
import com.google.gdata.client.calendar.CalendarService;
import com.google.gdata.data.DateTime;
import com.google.gdata.data.calendar.CalendarEventEntry;
import com.google.gdata.data.calendar.CalendarEventFeed;
import com.google.gdata.util.AuthenticationException;
import com.google.gdata.util.ServiceException;
public class GcalendarServiceImpl extends AbstractOpenEngSBConnectorService implements AppointmentDomain {
private static final Logger LOGGER = LoggerFactory.getLogger(GcalendarServiceImpl.class);
private AppointmentDomainEvents appointmentEvents;
private EngineeringKnowledgeBaseService ekbService;
private AliveState state = AliveState.DISCONNECTED;
private String googleUser;
private String googlePassword;
private CalendarService service;
public GcalendarServiceImpl(String id) {
super(id);
}
@Override
public String createAppointment(Appointment appointment) {
String id = null;
try {
login();
URL postUrl =
new URL("https://www.google.com/calendar/feeds/default/private/full");
CalendarEventEntry myEntry = AppointmentConverter.convertAppointmentToCalendarEventEntry(appointment);
// Send the request and receive the response:
CalendarEventEntry insertedEntry = service.insert(postUrl, myEntry);
id = insertedEntry.getEditLink().getHref();
LOGGER.info("Successfully created appointment {}", id);
appointment.setId(id);
sendEvent(EDBEventType.INSERT, appointment);
} catch (MalformedURLException e) {
// should never be thrown since the URL is static
throw new DomainMethodExecutionException("invalid URL", e);
} catch (IOException e) {
throw new DomainMethodExecutionException("unable to connect to the google server", e);
} catch (ServiceException e) {
throw new DomainMethodExecutionException("unable to insert the appointment", e);
} finally {
this.state = AliveState.DISCONNECTED;
}
return id;
}
@Override
public void updateAppointment(Appointment appointment) {
login();
CalendarEventEntry entry = getAppointmentEntry(appointment);
AppointmentConverter.extendCalendarEventEntryWithAppointment(entry, appointment);
try {
URL editUrl = new URL(entry.getEditLink().getHref());
service.update(editUrl, entry);
sendEvent(EDBEventType.UPDATE, appointment);
} catch (MalformedURLException e) {
// should never be thrown since the url is provided by google
throw new DomainMethodExecutionException("invalid URL", e);
} catch (IOException e) {
throw new DomainMethodExecutionException("unable to connect to the google server", e);
} catch (ServiceException e) {
throw new DomainMethodExecutionException("unable to update the appointment", e);
} finally {
this.state = AliveState.DISCONNECTED;
}
}
@Override
public void deleteAppointment(String id) {
try {
login();
Appointment appointment = ekbService.createEmptyModelObject(Appointment.class);
appointment.setId(id);
CalendarEventEntry entry = getAppointmentEntry(appointment);
entry.delete();
sendEvent(EDBEventType.DELETE, appointment);
} catch (IOException e) {
throw new DomainMethodExecutionException("unable to connect to google", e);
} catch (ServiceException e) {
throw new DomainMethodExecutionException("unable to delete the appointment", e);
} finally {
this.state = AliveState.DISCONNECTED;
}
}
@Override
public Appointment loadAppointment(String id) {
Appointment appointment = ekbService.createEmptyModelObject(Appointment.class);
appointment.setId(id);
CalendarEventEntry entry = getAppointmentEntry(appointment);
return AppointmentConverter.convertCalendarEventEntryToAppointment(entry);
}
/**
* loads an appointment from the server
*/
private CalendarEventEntry getAppointmentEntry(Appointment appointment) {
try {
if (appointment.getId() != null) {
CalendarEventEntry entry =
(CalendarEventEntry) service.getEntry(new URL(appointment.getId()), CalendarEventEntry.class);
return entry;
} else {
LOGGER.error("given appointment has no id");
}
} catch (MalformedURLException e) {
throw new DomainMethodExecutionException("invalid id, id must be an url to contact", e);
} catch (IOException e) {
throw new DomainMethodExecutionException("unable to connect to the google server", e);
} catch (ServiceException e) {
throw new DomainMethodExecutionException("unable to retrieve the appointment", e);
}
return null;
}
/**
* searches for entries. Every parameter is only taken into concern if not null
*/
private List<CalendarEventEntry> searchForEntries(Date start, Date end, String text) {
try {
URL feedUrl = new URL("https://www.google.com/calendar/feeds/default/private/full");
CalendarQuery myQuery = new CalendarQuery(feedUrl);
if (start != null) {
myQuery.setMinimumStartTime(new DateTime(start.getTime()));
}
if (end != null) {
myQuery.setMaximumStartTime(new DateTime(end.getTime()));
}
if (text != null) {
myQuery.setFullTextQuery(text);
}
CalendarEventFeed resultFeed = service.query(myQuery, CalendarEventFeed.class);
return resultFeed.getEntries();
} catch (MalformedURLException e) {
// should never be thrown since the URL is static
throw new DomainMethodExecutionException("invalid URL", e);
} catch (IOException e) {
throw new DomainMethodExecutionException("unable to connect to the google server", e);
} catch (ServiceException e) {
throw new DomainMethodExecutionException("unable to insert the appointment", e);
}
}
@Override
public ArrayList<Appointment> getAppointments(Date start, Date end) {
login();
ArrayList<Appointment> appointments = new ArrayList<Appointment>();
for (CalendarEventEntry entry : searchForEntries(start, end, null)) {
Appointment appointment = AppointmentConverter.convertCalendarEventEntryToAppointment(entry);
appointments.add(appointment);
}
this.state = AliveState.DISCONNECTED;
return appointments;
}
@Override
public AliveState getAliveState() {
return this.state;
}
private void login() {
try {
service = new CalendarService("OPENENGSB");
service.setUserCredentials(googleUser, googlePassword);
this.state = AliveState.ONLINE;
} catch (AuthenticationException e) {
throw new DomainMethodExecutionException(
"unable to authenticate at google server, maybe wrong username and/or password?", e);
}
}
/**
* Sends a CUD event. The type is defined by the enumeration EDBEventType. Also the savingName, committer and the
* role are defined here.
*/
private void sendEvent(EDBEventType type, Appointment appointment) {
String oid = "gcalendar/" + googleUser + "/" + appointment.getId();
try {
sendEDBEvent(type, appointment, appointmentEvents, oid);
} catch (EDBException e) {
throw new DomainMethodExecutionException(e);
}
}
public String getGooglePassword() {
return googlePassword;
}
public void setGooglePassword(String googlePassword) {
this.googlePassword = googlePassword;
}
public String getGoogleUser() {
return googleUser;
}
public void setGoogleUser(String googleUser) {
this.googleUser = googleUser;
}
public void setAppointmentEvents(AppointmentDomainEvents appointmentEvents) {
this.appointmentEvents = appointmentEvents;
}
public void setEkbService(EngineeringKnowledgeBaseService ekbService) {
this.ekbService = ekbService;
AppointmentConverter.setEkbService(ekbService);
}
}
| [OPENENGSB-1754] updated Gcalendar implementation to the new design (no oid here any more)
| src/main/java/org/openengsb/connector/gcalendar/internal/GcalendarServiceImpl.java | [OPENENGSB-1754] updated Gcalendar implementation to the new design (no oid here any more) | <ide><path>rc/main/java/org/openengsb/connector/gcalendar/internal/GcalendarServiceImpl.java
<ide> * role are defined here.
<ide> */
<ide> private void sendEvent(EDBEventType type, Appointment appointment) {
<del> String oid = "gcalendar/" + googleUser + "/" + appointment.getId();
<del> try {
<del> sendEDBEvent(type, appointment, appointmentEvents, oid);
<add> try {
<add> sendEDBEvent(type, appointment, appointment.getId(), appointmentEvents);
<ide> } catch (EDBException e) {
<ide> throw new DomainMethodExecutionException(e);
<ide> } |
|
JavaScript | mit | 4ec37d17bb86952efc4803bc25cd784decb2c0fe | 0 | 0/paper.js,0/paper.js | /*
* Paper.js
*
* This file is part of Paper.js, a JavaScript Vector Graphics Library,
* based on Scriptographer.org and designed to be largely API compatible.
* http://paperjs.org/
* http://scriptographer.org/
*
* Distributed under the MIT license. See LICENSE file for details.
*
* Copyright (c) 2011, Juerg Lehni & Jonathan Puckey
* http://lehni.org/ & http://jonathanpuckey.com/
*
* All rights reserved.
*/
var DomElement = new function() {
function cumulateOffset(el, name, parent, test) {
var left = name + 'Left',
top = name + 'Top',
x = 0,
y = 0,
style;
// If we're asked to calculate positioned offset, stop at any parent
// element that has relative or absolute position.
while (el && el.style && (!test || !test.test(
style = DomElement.getComputedStyle(el, 'position')))) {
x += el[left] || 0;
y += el[top] || 0;
el = el[parent];
}
return {
offset: Point.create(x, y),
element: el,
style: style
};
}
function getScrollOffset(el, test) {
return cumulateOffset(el, 'scroll', 'parentNode', test).offset;
}
return {
getViewport: function(doc) {
return doc.defaultView || doc.parentWindow;
},
getViewportSize: function(el) {
var doc = el.ownerDocument,
view = this.getViewport(doc),
body = doc.getElementsByTagName(
doc.compatMode === 'CSS1Compat' ? 'html' : 'body')[0];
return Size.create(
view.innerWidth || body.clientWidth,
view.innerHeight || body.clientHeight
);
},
getComputedStyle: function(el, name) {
if (el.currentStyle)
return el.currentStyle[Base.camelize(name)];
var style = this.getViewport(el.ownerDocument)
.getComputedStyle(el, null);
return style ? style.getPropertyValue(Base.hyphenate(name)) : null;
},
getOffset: function(el, positioned, viewport) {
var res = cumulateOffset(el, 'offset', 'offsetParent',
positioned ? /^(relative|absolute|fixed)$/ : /^fixed$/);
// We need to handle fixed positioned elements seperately if we're
// asked to calculate offsets within the page (= not within
// viewport), by adding their scroll offset to the result.
if (res.style == 'fixed' && !viewport)
return res.offset.add(getScrollOffset(res.element));
// Otherwise remove scrolling from the calculated offset if we asked
// for viewport coordinates
return viewport
? res.offset.subtract(getScrollOffset(el, /^fixed$/))
: res.offset;
},
getSize: function(el) {
return Size.create(el.offsetWidth, el.offsetHeight);
},
getBounds: function(el, positioned, viewport) {
return new Rectangle(this.getOffset(el, positioned, viewport),
this.getSize(el));
},
/**
* Checks if element is invisibile (display: none, ...)
*/
isInvisible: function(el) {
return this.getSize(el).equals([0, 0]);
},
/**
* Checks if element is visibile in current viewport
*/
isVisible: function(el) {
// See if the viewport bounds intersect with the windows rectangle
// which always starts at 0, 0
return !this.isInvisible(el)
&& new Rectangle([0, 0], this.getViewportSize(el))
.intersects(this.getBounds(el, false, true));
}
};
};
| src/browser/DomElement.js | /*
* Paper.js
*
* This file is part of Paper.js, a JavaScript Vector Graphics Library,
* based on Scriptographer.org and designed to be largely API compatible.
* http://paperjs.org/
* http://scriptographer.org/
*
* Distributed under the MIT license. See LICENSE file for details.
*
* Copyright (c) 2011, Juerg Lehni & Jonathan Puckey
* http://lehni.org/ & http://jonathanpuckey.com/
*
* All rights reserved.
*/
var DomElement = new function() {
function cumulateOffset(el, name, parent, test) {
var left = name + 'Left',
top = name + 'Top',
x = 0,
y = 0,
style;
// If we're asked to calculate positioned offset, stop at any parent
// element that has relative or absolute position.
while (el && el.style && (!test || !test.test(
style = DomElement.getComputedStyle(el, 'position')))) {
x += el[left] || 0;
y += el[top] || 0;
el = el[parent];
}
return {
offset: Point.create(x, y),
element: el,
style: style
};
}
function getScrollOffset(el, test) {
return cumulateOffset(el, 'scroll', 'parentNode', test).offset;
}
return {
getViewport: function(doc) {
return doc.defaultView || doc.parentWindow;
},
getViewportSize: function(el) {
var doc = el.ownerDocument,
view = DomElement.getViewport(doc),
body = doc.getElementsByTagName(
doc.compatMode === 'CSS1Compat' ? 'html' : 'body')[0];
return Size.create(
view.innerWidth || body.clientWidth,
view.innerHeight || body.clientHeight
);
},
getComputedStyle: function(el, name) {
if (el.currentStyle)
return el.currentStyle[Base.camelize(name)];
var style = DomElement.getViewport(el.ownerDocument)
.getComputedStyle(el, null);
return style ? style.getPropertyValue(Base.hyphenate(name)) : null;
},
getOffset: function(el, positioned, viewport) {
var res = cumulateOffset(el, 'offset', 'offsetParent',
positioned ? /^(relative|absolute|fixed)$/ : /^fixed$/);
// We need to handle fixed positioned elements seperately if we're
// asked to calculate offsets within the page (= not within
// viewport), by adding their scroll offset to the result.
if (res.style == 'fixed' && !viewport)
return res.offset.add(getScrollOffset(res.element));
// Otherwise remove scrolling from the calculated offset if we asked
// for viewport coordinates
return viewport
? res.offset.subtract(getScrollOffset(el, /^fixed$/))
: res.offset;
},
getSize: function(el) {
return Size.create(el.offsetWidth, el.offsetHeight);
},
getBounds: function(el, positioned, viewport) {
return new Rectangle(DomElement.getOffset(el, positioned, viewport),
DomElement.getSize(el));
},
/**
* Checks if element is invisibile (display: none, ...)
*/
isInvisible: function(el) {
return DomElement.getSize(el).equals([0, 0]);
},
/**
* Checks if element is visibile in current viewport
*/
isVisible: function(el) {
// See if the viewport bounds intersect with the windows rectangle
// which always starts at 0, 0
return !DomElement.isInvisible(el)
&& new Rectangle([0, 0], DomElement.getViewportSize(el))
.intersects(DomElement.getBounds(el, false, true));
}
};
};
| Access other static DomElement methods through 'this'.
| src/browser/DomElement.js | Access other static DomElement methods through 'this'. | <ide><path>rc/browser/DomElement.js
<ide>
<ide> getViewportSize: function(el) {
<ide> var doc = el.ownerDocument,
<del> view = DomElement.getViewport(doc),
<add> view = this.getViewport(doc),
<ide> body = doc.getElementsByTagName(
<ide> doc.compatMode === 'CSS1Compat' ? 'html' : 'body')[0];
<ide> return Size.create(
<ide> getComputedStyle: function(el, name) {
<ide> if (el.currentStyle)
<ide> return el.currentStyle[Base.camelize(name)];
<del> var style = DomElement.getViewport(el.ownerDocument)
<add> var style = this.getViewport(el.ownerDocument)
<ide> .getComputedStyle(el, null);
<ide> return style ? style.getPropertyValue(Base.hyphenate(name)) : null;
<ide> },
<ide> },
<ide>
<ide> getBounds: function(el, positioned, viewport) {
<del> return new Rectangle(DomElement.getOffset(el, positioned, viewport),
<del> DomElement.getSize(el));
<add> return new Rectangle(this.getOffset(el, positioned, viewport),
<add> this.getSize(el));
<ide> },
<ide>
<ide> /**
<ide> * Checks if element is invisibile (display: none, ...)
<ide> */
<ide> isInvisible: function(el) {
<del> return DomElement.getSize(el).equals([0, 0]);
<add> return this.getSize(el).equals([0, 0]);
<ide> },
<ide>
<ide> /**
<ide> isVisible: function(el) {
<ide> // See if the viewport bounds intersect with the windows rectangle
<ide> // which always starts at 0, 0
<del> return !DomElement.isInvisible(el)
<del> && new Rectangle([0, 0], DomElement.getViewportSize(el))
<del> .intersects(DomElement.getBounds(el, false, true));
<add> return !this.isInvisible(el)
<add> && new Rectangle([0, 0], this.getViewportSize(el))
<add> .intersects(this.getBounds(el, false, true));
<ide> }
<ide> };
<ide> }; |
|
JavaScript | mit | 8216275db0f93ca1c11d940da5ad095a9962607d | 0 | emotionLoop/visualCaptcha-node,emotionLoop/visualCaptcha-node | ( function( window, visualCaptcha ) {
visualCaptcha( 'sample-captcha', {
imgPath: '/img/',
captcha: {
url: window.location.protocol.origin,
numberOfImages: 5
}
} );
}( window, visualCaptcha ) ); | public/js/main.js | ( function( visualCaptcha ) {
visualCaptcha( 'sample-captcha', {
imgPath: '/img/',
captcha: {
numberOfImages: 5
}
} );
}( visualCaptcha ) ); | fixed visualCaptcha endpoint url for demo
| public/js/main.js | fixed visualCaptcha endpoint url for demo | <ide><path>ublic/js/main.js
<del>( function( visualCaptcha ) {
<add>( function( window, visualCaptcha ) {
<ide> visualCaptcha( 'sample-captcha', {
<ide> imgPath: '/img/',
<ide> captcha: {
<add> url: window.location.protocol.origin,
<ide> numberOfImages: 5
<ide> }
<ide> } );
<del>}( visualCaptcha ) );
<add>}( window, visualCaptcha ) ); |
|
Java | apache-2.0 | ac3677d1afc91d55c8fa2a67c3712845d606e86a | 0 | HubSpot/Baragon,HubSpot/Baragon,HubSpot/Baragon | package com.hubspot.baragon.service.resources;
import javax.ws.rs.Consumes;
import javax.ws.rs.GET;
import javax.ws.rs.POST;
import javax.ws.rs.Path;
import javax.ws.rs.PathParam;
import javax.ws.rs.Produces;
import javax.ws.rs.QueryParam;
import javax.ws.rs.core.MediaType;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.google.common.base.Optional;
import com.google.inject.Inject;
import com.hubspot.baragon.auth.NoAuth;
import com.hubspot.baragon.data.BaragonLoadBalancerDatastore;
import com.hubspot.baragon.models.AgentCheckInResponse;
import com.hubspot.baragon.models.BaragonAgentMetadata;
import com.hubspot.baragon.models.BaragonGroup;
import com.hubspot.baragon.models.TrafficSourceState;
import com.hubspot.baragon.service.gcloud.GoogleCloudManager;
import com.hubspot.baragon.service.managers.ElbManager;
@Path("/checkin")
@Consumes({MediaType.APPLICATION_JSON})
@Produces(MediaType.APPLICATION_JSON)
public class AgentCheckinResource {
private static final Logger LOG = LoggerFactory.getLogger(AgentCheckinResource.class);
private final ElbManager elbManager;
private final GoogleCloudManager googleCloudManager;
private final BaragonLoadBalancerDatastore loadBalancerDatastore;
@Inject
public AgentCheckinResource(ElbManager elbManager,
GoogleCloudManager googleCloudManager,
BaragonLoadBalancerDatastore loadBalancerDatastore) {
this.elbManager = elbManager;
this.googleCloudManager = googleCloudManager;
this.loadBalancerDatastore = loadBalancerDatastore;
}
@POST
@Path("/{clusterName}/startup")
public AgentCheckInResponse addAgent(@PathParam("clusterName") String clusterName,
@QueryParam("status") boolean status,
BaragonAgentMetadata agent) {
LOG.info(String.format("Notified of startup for agent %s", agent.getAgentId()));
AgentCheckInResponse response;
try {
if (elbManager.isElbConfigured()) {
response = elbManager.attemptAddAgent(agent, loadBalancerDatastore.getLoadBalancerGroup(clusterName), clusterName, status);
} else if (googleCloudManager.isConfigured()) {
response = googleCloudManager.checkHealthOfAgentOnStartup(agent);
} else {
response = new AgentCheckInResponse(TrafficSourceState.DONE, Optional.absent(), 0L);
}
} catch (Exception e) {
LOG.error("Could not register agent startup", e);
response = new AgentCheckInResponse(TrafficSourceState.ERROR, Optional.of(e.getMessage()), 0L);
}
return response;
}
@POST
@Path("/{clusterName}/shutdown")
public AgentCheckInResponse removeAgent(@PathParam("clusterName") String clusterName,
@QueryParam("status") boolean status,
BaragonAgentMetadata agent) {
LOG.info(String.format("Notified of shutdown for agent %s", agent.getAgentId()));
AgentCheckInResponse response;
try {
if (elbManager.isElbConfigured()) {
response = elbManager.attemptRemoveAgent(agent, loadBalancerDatastore.getLoadBalancerGroup(clusterName), clusterName, status);
} else if (googleCloudManager.isConfigured()) {
response = googleCloudManager.checkHealthOfAgentOnShutdown(agent);
} else {
response = new AgentCheckInResponse(TrafficSourceState.DONE, Optional.absent(), 0L);
}
} catch (Exception e) {
LOG.error("Could not register agent shutdown", e);
response = new AgentCheckInResponse(TrafficSourceState.ERROR, Optional.of(e.getMessage()), 0L);
}
return response;
}
@GET
@NoAuth
@Produces(MediaType.TEXT_PLAIN)
@Path("/{clusterName}/can-shutdown")
public String canShutdownAgent(@PathParam("clusterName") String clusterName, @QueryParam("agentId") String agentId) {
Optional<BaragonAgentMetadata> maybeAgent = loadBalancerDatastore.getAgent(clusterName, agentId);
Optional<BaragonGroup> maybeGroup = loadBalancerDatastore.getLoadBalancerGroup(clusterName);
if (maybeAgent.isPresent()) {
if (elbManager.isElbEnabledAgent(maybeAgent.get(), maybeGroup, clusterName)) {
if (elbManager.isActiveAndHealthy(maybeGroup, maybeAgent.get())) {
return "0";
}
}
}
return "1";
}
}
| BaragonService/src/main/java/com/hubspot/baragon/service/resources/AgentCheckinResource.java | package com.hubspot.baragon.service.resources;
import javax.ws.rs.Consumes;
import javax.ws.rs.GET;
import javax.ws.rs.POST;
import javax.ws.rs.Path;
import javax.ws.rs.PathParam;
import javax.ws.rs.Produces;
import javax.ws.rs.QueryParam;
import javax.ws.rs.core.MediaType;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.google.common.base.Optional;
import com.google.inject.Inject;
import com.hubspot.baragon.auth.NoAuth;
import com.hubspot.baragon.data.BaragonLoadBalancerDatastore;
import com.hubspot.baragon.models.AgentCheckInResponse;
import com.hubspot.baragon.models.BaragonAgentMetadata;
import com.hubspot.baragon.models.BaragonGroup;
import com.hubspot.baragon.models.TrafficSourceState;
import com.hubspot.baragon.service.gcloud.GoogleCloudManager;
import com.hubspot.baragon.service.managers.ElbManager;
@Path("/checkin")
@Consumes({MediaType.APPLICATION_JSON})
@Produces(MediaType.APPLICATION_JSON)
public class AgentCheckinResource {
private static final Logger LOG = LoggerFactory.getLogger(AgentCheckinResource.class);
private final ElbManager elbManager;
private final GoogleCloudManager googleCloudManager;
private final BaragonLoadBalancerDatastore loadBalancerDatastore;
@Inject
public AgentCheckinResource(ElbManager elbManager,
GoogleCloudManager googleCloudManager,
BaragonLoadBalancerDatastore loadBalancerDatastore) {
this.elbManager = elbManager;
this.googleCloudManager = googleCloudManager;
this.loadBalancerDatastore = loadBalancerDatastore;
}
@POST
@Path("/{clusterName}/startup")
public AgentCheckInResponse addAgent(@PathParam("clusterName") String clusterName,
@QueryParam("status") boolean status,
BaragonAgentMetadata agent) {
LOG.info(String.format("Notified of startup for agent %s", agent.getAgentId()));
AgentCheckInResponse response;
try {
if (elbManager.isElbConfigured()) {
response = elbManager.attemptAddAgent(agent, loadBalancerDatastore.getLoadBalancerGroup(clusterName), clusterName, status);
} else if (googleCloudManager.isConfigured()) {
response = googleCloudManager.checkHealthOfAgentOnStartup(agent);
} else {
response = new AgentCheckInResponse(TrafficSourceState.DONE, Optional.absent(), 0L);
}
} catch (Exception e) {
LOG.error("Could not register agent startup", e);
response = new AgentCheckInResponse(TrafficSourceState.ERROR, Optional.of(e.getMessage()), 0L);
}
return response;
}
@POST
@Path("/{clusterName}/shutdown")
public AgentCheckInResponse removeAgent(@PathParam("clusterName") String clusterName,
@QueryParam("status") boolean status,
BaragonAgentMetadata agent) {
LOG.info(String.format("Notified of shutdown for agent %s", agent.getAgentId()));
AgentCheckInResponse response;
try {
if (elbManager.isElbConfigured()) {
response = elbManager.attemptRemoveAgent(agent, loadBalancerDatastore.getLoadBalancerGroup(clusterName), clusterName, status);
} else {
response = new AgentCheckInResponse(TrafficSourceState.DONE, Optional.absent(), 0L);
}
} catch (Exception e) {
LOG.error("Could not register agent shutdown", e);
response = new AgentCheckInResponse(TrafficSourceState.ERROR, Optional.of(e.getMessage()), 0L);
}
return response;
}
@GET
@NoAuth
@Produces(MediaType.TEXT_PLAIN)
@Path("/{clusterName}/can-shutdown")
public String canShutdownAgent(@PathParam("clusterName") String clusterName, @QueryParam("agentId") String agentId) {
Optional<BaragonAgentMetadata> maybeAgent = loadBalancerDatastore.getAgent(clusterName, agentId);
Optional<BaragonGroup> maybeGroup = loadBalancerDatastore.getLoadBalancerGroup(clusterName);
if (maybeAgent.isPresent()) {
if (elbManager.isElbEnabledAgent(maybeAgent.get(), maybeGroup, clusterName)) {
if (elbManager.isActiveAndHealthy(maybeGroup, maybeAgent.get())) {
return "0";
}
}
}
return "1";
}
}
| call the check on shutdown
| BaragonService/src/main/java/com/hubspot/baragon/service/resources/AgentCheckinResource.java | call the check on shutdown | <ide><path>aragonService/src/main/java/com/hubspot/baragon/service/resources/AgentCheckinResource.java
<ide> try {
<ide> if (elbManager.isElbConfigured()) {
<ide> response = elbManager.attemptRemoveAgent(agent, loadBalancerDatastore.getLoadBalancerGroup(clusterName), clusterName, status);
<add> } else if (googleCloudManager.isConfigured()) {
<add> response = googleCloudManager.checkHealthOfAgentOnShutdown(agent);
<ide> } else {
<ide> response = new AgentCheckInResponse(TrafficSourceState.DONE, Optional.absent(), 0L);
<ide> } |
|
JavaScript | mit | aa7ae677cfda25e59df47864ce04f243c3e5f88e | 0 | gryffon/ringteki,gryffon/ringteki,jeremylarner/ringteki,gryffon/ringteki,jeremylarner/ringteki,jeremylarner/ringteki | const DrawCard = require('../../drawcard.js');
const EventRegistrar = require('../../eventregistrar.js');
class VoidFist extends DrawCard {
setupCardAbilities(ability) {
this.cardsPlayedThisConflict = {};
this.eventRegistrar = new EventRegistrar(this.game, this);
this.eventRegistrar.register(['onConflictFinished', 'onCardPlayed']);
this.action({
title: 'Bow and send a character home',
condition: context => this.cardsPlayedThisConflict[context.player.uuid] >= 2,
target: {
cardType: 'character',
cardCondition: (card, context) =>
card.isParticipating() && this.game.currentConflict.getCharacters(context.player).some(myCard =>
myCard.hasTrait('monk') && myCard.militarySkill >= card.militarySkill
),
gameAction: [ability.actions.bow(), ability.actions.sendHome()]
},
effect: 'bow {0} and send them home'
});
}
onConflictFinished() {
this.cardsPlayedThisConflict = {};
}
onCardPlayed(event) {
if(this.game.isDuringConflict()) {
if(this.cardsPlayedThisConflict[event.player.uuid]) {
this.cardsPlayedThisConflict[event.player.uuid] += 1;
} else {
this.cardsPlayedThisConflict[event.player.uuid] = 1;
}
}
}
}
VoidFist.id = 'void-fist';
module.exports = VoidFist;
| server/game/cards/04.5-AaN/VoidFist.js | const DrawCard = require('../../drawcard.js');
const EventRegistrar = require('../../eventregistrar.js');
class VoidFist extends DrawCard {
setupCardAbilities(ability) {
this.cardsPlayedThisConflict = {};
this.eventRegistrar = new EventRegistrar(this.game, this);
this.eventRegistrar.register(['onConflictFinished', 'onCardPlayed']);
this.action({
title: 'Bow and send a character home',
condition: context => this.cardsPlayedThisConflict[context.player.uuid] >= 2,
target: {
cardType: 'character',
cardCondition: (card, context) =>
card.isParticipating() && this.game.currentConflict.getCharacters(context.player).some(myCard =>
myCard.hasTrait('monk') && myCard.militarySkill > card.militarySkill
),
gameAction: [ability.actions.bow(), ability.actions.sendHome()]
},
effect: 'bow {0} and send them home'
});
}
onConflictFinished() {
this.cardsPlayedThisConflict = {};
}
onCardPlayed(event) {
if(this.game.isDuringConflict()) {
if(this.cardsPlayedThisConflict[event.player.uuid]) {
this.cardsPlayedThisConflict[event.player.uuid] += 1;
} else {
this.cardsPlayedThisConflict[event.player.uuid] = 1;
}
}
}
}
VoidFist.id = 'void-fist';
module.exports = VoidFist;
| Void Fist fix (#2379)
| server/game/cards/04.5-AaN/VoidFist.js | Void Fist fix (#2379) | <ide><path>erver/game/cards/04.5-AaN/VoidFist.js
<ide> cardType: 'character',
<ide> cardCondition: (card, context) =>
<ide> card.isParticipating() && this.game.currentConflict.getCharacters(context.player).some(myCard =>
<del> myCard.hasTrait('monk') && myCard.militarySkill > card.militarySkill
<add> myCard.hasTrait('monk') && myCard.militarySkill >= card.militarySkill
<ide> ),
<ide> gameAction: [ability.actions.bow(), ability.actions.sendHome()]
<ide> }, |
|
Java | mit | 953a34142f738218101e2cdcb9a03aca36a89809 | 0 | Programming-Systems-Lab/phosphor,Programming-Systems-Lab/phosphor,Programming-Systems-Lab/phosphor | package edu.columbia.cs.psl.phosphor.instrumenter;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Iterator;
import java.util.LinkedList;
import java.util.List;
import java.util.Map.Entry;
import java.util.Stack;
import org.objectweb.asm.Label;
import org.objectweb.asm.MethodVisitor;
import org.objectweb.asm.Opcodes;
import org.objectweb.asm.Type;
import org.objectweb.asm.tree.AbstractInsnNode;
import org.objectweb.asm.tree.FrameNode;
import org.objectweb.asm.tree.IincInsnNode;
import org.objectweb.asm.tree.InsnNode;
import org.objectweb.asm.tree.LabelNode;
import org.objectweb.asm.tree.LdcInsnNode;
import org.objectweb.asm.tree.LineNumberNode;
import org.objectweb.asm.tree.LocalVariableNode;
import org.objectweb.asm.tree.MethodNode;
import org.objectweb.asm.tree.TypeInsnNode;
import org.objectweb.asm.tree.VarInsnNode;
import org.objectweb.asm.tree.analysis.Analyzer;
import org.objectweb.asm.tree.analysis.AnalyzerException;
import org.objectweb.asm.tree.analysis.BasicValue;
import org.objectweb.asm.tree.analysis.Frame;
import org.objectweb.asm.util.Printer;
import edu.columbia.cs.psl.phosphor.Configuration;
import edu.columbia.cs.psl.phosphor.TaintUtils;
import edu.columbia.cs.psl.phosphor.instrumenter.analyzer.BasicArrayInterpreter;
import edu.columbia.cs.psl.phosphor.instrumenter.analyzer.NeverNullArgAnalyzerAdapter;
public class PrimitiveArrayAnalyzer extends MethodVisitor {
final class PrimitiveArrayAnalyzerMN extends MethodNode {
private final String className;
private final MethodVisitor cmv;
boolean[] endsWithGOTO;
int curLabel = 0;
HashMap<Integer, Boolean> lvsThatAreArrays = new HashMap<Integer, Boolean>();
ArrayList<FrameNode> inFrames = new ArrayList<FrameNode>();
ArrayList<FrameNode> outFrames = new ArrayList<FrameNode>();
public PrimitiveArrayAnalyzerMN(int access, String name, String desc, String signature, String[] exceptions, String className, MethodVisitor cmv) {
super(Opcodes.ASM5,access, name, desc, signature, exceptions);
this.className = className;
this.cmv = cmv;
}
@Override
protected LabelNode getLabelNode(Label l) {
if(!Configuration.READ_AND_SAVE_BCI)
return super.getLabelNode(l);
if (!(l.info instanceof LabelNode)) {
l.info = new LabelNode(l);
}
return (LabelNode) l.info;
}
@Override
public void visitCode() {
if (DEBUG)
System.out.println("Visiting: " + className + "." + name + desc);
Label firstLabel = new Label();
super.visitCode();
visitLabel(firstLabel);
}
// @Override
// public void visitVarInsn(int opcode, int var) {
// if(opcode == Opcodes.ASTORE)
// {
// boolean isPrimArray = TaintAdapter.isPrimitiveStackType(analyzer.stack.get(analyzer.stack.size() - 1));
// if(lvsThatAreArrays.containsKey(var))
// {
// if(lvsThatAreArrays.get(var) != isPrimArray)
// {
// throw new IllegalStateException("This analysis is currently too lazy to handle when you have 1 var slot take different kinds of arrays");
// }
// }
// lvsThatAreArrays.put(var, isPrimArray);
// }
// super.visitVarInsn(opcode, var);
// }
private void visitFrameTypes(final int n, final Object[] types,
final List<Object> result) {
for (int i = 0; i < n; ++i) {
Object type = types[i];
result.add(type);
if (type == Opcodes.LONG || type == Opcodes.DOUBLE) {
result.add(Opcodes.TOP);
}
}
}
FrameNode generateFrameNode(int type, int nLocal, Object[] local, int nStack, Object[] stack)
{
FrameNode ret = new FrameNode(type, nLocal, local, nStack, stack);
ret.local = new ArrayList<Object>();
ret.stack= new ArrayList<Object>();
visitFrameTypes(nLocal, local, ret.local);
visitFrameTypes(nStack, stack, ret.stack);
return ret;
}
@Override
public void visitFrame(int type, int nLocal, Object[] local, int nStack, Object[] stack) {
if (DEBUG)
System.out.println("Visitframe curlabel " + (curLabel - 1));
super.visitFrame(type, nLocal, local, nStack, stack);
if (DEBUG)
System.out.println("label " + (curLabel - 1) + " reset to " + Arrays.toString(stack));
if (inFrames.size() == curLabel - 1)
inFrames.add(generateFrameNode(type, nLocal, local, nStack, stack));
else
inFrames.set(curLabel - 1, generateFrameNode(type, nLocal, local, nStack, stack));
// System.out.println(name+" " +Arrays.toString(local));
// if (curLabel > 0) {
// System.out.println("And resetting outframe " + (curLabel - 2));
// if (outFrames.size() == curLabel - 1)
// outFrames.add(new FrameNode(type, nLocal, local, nStack, stack));
// if(outFrames.get(curLabel -1) == null)
// outFrames.set(curLabel - 1, new FrameNode(type, nLocal, local, nStack, stack));
// }
}
@Override
public void visitLabel(Label label) {
// if (curLabel >= 0)
if (DEBUG)
System.out.println("Visit label: " + curLabel + " analyzer: " + analyzer.stack + " inframes size " + inFrames.size() + " " + outFrames.size());
if (analyzer.locals == null || analyzer.stack == null)
inFrames.add(new FrameNode(0, 0, new Object[0], 0, new Object[0]));
else
inFrames.add(new FrameNode(0, analyzer.locals.size(), analyzer.locals.toArray(), analyzer.stack.size(), analyzer.stack.toArray()));
// if (outFrames.size() <= curLabel) {
// if(analyzer.stack == null)
outFrames.add(null);
if (curLabel > 0 && outFrames.get(curLabel - 1) == null && analyzer.stack != null)
outFrames.set(curLabel - 1, new FrameNode(0, analyzer.locals.size(), analyzer.locals.toArray(), analyzer.stack.size(), analyzer.stack.toArray()));
if (DEBUG)
System.out.println("Added outframe for " + (outFrames.size() - 1) + " : " + analyzer.stack);
// }
super.visitLabel(label);
curLabel++;
}
@Override
public void visitTableSwitchInsn(int min, int max, Label dflt, Label... labels) {
if (DEBUG)
System.out.println("Rewriting " + curLabel + " OUT to " + analyzer.stack);
outFrames.set(curLabel - 1, new FrameNode(0, analyzer.locals.size(), analyzer.locals.toArray(), analyzer.stack.size(), analyzer.stack.toArray()));
super.visitTableSwitchInsn(min, max, dflt, labels);
}
@Override
public void visitLookupSwitchInsn(Label dflt, int[] keys, Label[] labels) {
if (DEBUG)
System.out.println("Rewriting " + curLabel + " OUT to " + analyzer.stack);
outFrames.set(curLabel - 1, new FrameNode(0, analyzer.locals.size(), analyzer.locals.toArray(), analyzer.stack.size(), analyzer.stack.toArray()));
super.visitLookupSwitchInsn(dflt, keys, labels);
}
@Override
public void visitInsn(int opcode) {
if (opcode == Opcodes.ATHROW) {
if (DEBUG)
System.out.println("Rewriting " + curLabel + " OUT to " + analyzer.stack);
if (analyzer.locals != null && analyzer.stack != null)
outFrames.set(curLabel - 1, new FrameNode(0, analyzer.locals.size(), analyzer.locals.toArray(), analyzer.stack.size(), analyzer.stack.toArray()));
}
super.visitInsn(opcode);
}
public void visitJumpInsn(int opcode, Label label) {
// System.out.println(opcode);
// if (opcode == Opcodes.GOTO) {
super.visitJumpInsn(opcode, label);
int nToPop = 0;
switch (opcode) {
case Opcodes.IFEQ:
case Opcodes.IFNE:
case Opcodes.IFLT:
case Opcodes.IFGE:
case Opcodes.IFGT:
case Opcodes.IFLE:
case Opcodes.IFNULL:
case Opcodes.IFNONNULL:
//pop 1
nToPop = 1;
break;
case Opcodes.IF_ICMPEQ:
case Opcodes.IF_ICMPNE:
case Opcodes.IF_ICMPLT:
case Opcodes.IF_ICMPGE:
case Opcodes.IF_ICMPGT:
case Opcodes.IF_ICMPLE:
case Opcodes.IF_ACMPEQ:
case Opcodes.IF_ACMPNE:
//pop 2
nToPop = 2;
break;
case Opcodes.GOTO:
//pop none
break;
default:
throw new IllegalArgumentException();
}
//The analyzer won't have executed yet, so simulate it did :'(
List<Object> stack = new ArrayList<Object>(analyzer.stack);
// System.out.println("got to remove " + nToPop + " from " + analyzer.stack + " in " + className + "."+name );
while (nToPop > 0 && !stack.isEmpty()) {
stack.remove(stack.size() - 1);
nToPop--;
}
if (DEBUG)
System.out.println(name + " Rewriting " + curLabel + " OUT to " + stack);
outFrames.set(curLabel - 1, new FrameNode(0, analyzer.locals.size(), analyzer.locals.toArray(), stack.size(), stack.toArray()));
visitLabel(new Label());
// }
}
@Override
public void visitEnd() {
final HashMap<Integer, LinkedList<Integer>> neverAutoBoxByFrame = new HashMap<Integer, LinkedList<Integer>>();
final HashMap<Integer, LinkedList<Integer>> alwaysAutoBoxByFrame = new HashMap<Integer, LinkedList<Integer>>();
final HashMap<Integer, LinkedList<Integer>> outEdges = new HashMap<Integer, LinkedList<Integer>>();
final HashSet<Integer> insertACHECKCASTBEFORE = new HashSet<Integer>();
final HashSet<Integer> insertACONSTNULLBEFORE = new HashSet<Integer>();
Analyzer a = new Analyzer(new BasicArrayInterpreter()) {
protected int[] insnToLabel;
int getLabel(int insn) {
int label = -1;
for (int j = 0; j <= insn; j++) {
label = insnToLabel[j];
}
return label;
}
int getInsnAfterFrameFor(int insn) {
int r = 0;
for (int i = 0; i < insn; i++) {
if (instructions.get(i).getType() == AbstractInsnNode.FRAME)
r = i + 1;
}
return r;
}
int getLastInsnByLabel(int label) {
int r = 0;
for (int j = 0; j < insnToLabel.length; j++) {
if (insnToLabel[j] == label) {
if (instructions.get(j).getType() == AbstractInsnNode.FRAME)
continue;
r = j;
}
}
return r;
}
int getFirstInsnByLabel(int label) {
for (int j = 0; j < insnToLabel.length; j++) {
if (insnToLabel[j] == label) {
if (instructions.get(j).getType() == AbstractInsnNode.FRAME || instructions.get(j).getType() == AbstractInsnNode.LABEL
|| instructions.get(j).getType() == AbstractInsnNode.LINE)
continue;
return j;
}
}
return -1;
}
@Override
public Frame[] analyze(String owner, MethodNode m) throws AnalyzerException {
Iterator<AbstractInsnNode> insns = m.instructions.iterator();
insnToLabel = new int[m.instructions.size()];
// System.out.println("PAAA"+ name);
int label = -1;
boolean isFirst = true;
while (insns.hasNext()) {
AbstractInsnNode insn = insns.next();
int idx = m.instructions.indexOf(insn);
if (insn instanceof LabelNode) {
label++;
}
insnToLabel[idx] = (isFirst ? 1 : label);
isFirst = false;
// System.out.println(idx + "->"+label);
}
Frame[] ret = super.analyze(owner, m);
// if (DEBUG)
// for (int i = 0; i < inFrames.size(); i++) {
// System.out.println("IN: " + i + " " + inFrames.get(i).stack);
// }
// if (DEBUG)
// for (int i = 0; i < outFrames.size(); i++) {
// System.out.println("OUT: " + i + " " + (outFrames.get(i) == null ? "null" : outFrames.get(i).stack));
// }
for (Entry<Integer, LinkedList<Integer>> edge : edges.entrySet()) {
Integer successor = edge.getKey();
if (edge.getValue().size() > 1) {
int labelToSuccessor = getLabel(successor);
if (DEBUG)
System.out.println(name + " Must merge: " + edge.getValue() + " into " + successor + " AKA " + labelToSuccessor);
if (DEBUG)
System.out.println("Input to successor: " + inFrames.get(labelToSuccessor).stack);
for (Integer toMerge : edge.getValue()) {
int labelToMerge = getLabel(toMerge);
if (DEBUG)
System.out.println(toMerge + " AKA " + labelToMerge);
if (DEBUG)
System.out.println((outFrames.get(labelToMerge) == null ? "null" : outFrames.get(labelToMerge).stack));
if (!outFrames.get(labelToMerge).stack.isEmpty() && !inFrames.get(labelToSuccessor).stack.isEmpty()) {
Object output1Top = outFrames.get(labelToMerge).stack.get(outFrames.get(labelToMerge).stack.size() - 1);
Object inputTop = inFrames.get(labelToSuccessor).stack.get(inFrames.get(labelToSuccessor).stack.size() - 1);
if (output1Top == Opcodes.TOP)
output1Top = outFrames.get(labelToMerge).stack.get(outFrames.get(labelToMerge).stack.size() - 2);
if (inputTop == Opcodes.TOP)
inputTop = inFrames.get(labelToSuccessor).stack.get(inFrames.get(labelToSuccessor).stack.size() - 2);
// System.out.println(className+"."+name+ " IN"+inputTop +" OUT " + output1Top);
if (output1Top != null && output1Top != inputTop) {
Type inputTopType = TaintAdapter.getTypeForStackType(inputTop);
Type outputTopType = TaintAdapter.getTypeForStackType(output1Top);
if ((output1Top == Opcodes.NULL) && inputTopType.getSort() == Type.ARRAY && inputTopType.getElementType().getSort() != Type.OBJECT
&& inputTopType.getDimensions() == 1) {
insertACONSTNULLBEFORE.add(toMerge);
} else if ((inputTopType.getSort() == Type.OBJECT || (inputTopType.getSort() == Type.ARRAY && inputTopType.getElementType().getSort() == Type.OBJECT)) && outputTopType.getSort() == Type.ARRAY && outputTopType.getElementType().getSort() != Type.OBJECT
&& inputTopType.getDimensions() == 1) {
insertACHECKCASTBEFORE.add(toMerge);
}
}
}
if (!outFrames.get(labelToMerge).local.isEmpty() && !inFrames.get(labelToSuccessor).local.isEmpty()) {
for (int i = 0; i < Math.min(outFrames.get(labelToMerge).local.size(), inFrames.get(labelToSuccessor).local.size()); i++) {
Object out = outFrames.get(labelToMerge).local.get(i);
Object in = inFrames.get(labelToSuccessor).local.get(i);
// System.out.println(name +" " +out + " out, " + in + " In" + " i "+i);
if (out instanceof String && in instanceof String) {
Type tout = Type.getObjectType((String) out);
Type tin = Type.getObjectType((String) in);
if (tout.getSort() == Type.ARRAY && tout.getElementType().getSort() != Type.OBJECT && tout.getDimensions() == 1 && tin.getSort() == Type.OBJECT) {
int insnN = getLastInsnByLabel(labelToMerge);
// System.out.println(name+desc);
// System.out.println(outFrames.get(labelToMerge).local + " out, \n" + inFrames.get(labelToSuccessor).local + " In" + " i "+i);
// System.out.println("T1::"+tout + " to " + tin + " this may be unsupported but should be handled by the above! in label " + instructions.get(insnN));
// System.out.println("In insn is " + getFirstInsnByLabel(labelToSuccessor));
// System.out.println("insn after frame is " + insnN +", " + instructions.get(insnN) + "<"+Printer.OPCODES[instructions.get(insnN).getOpcode()]);
// System.out.println(inFrames.get(labelToSuccessor).local);
if (!alwaysAutoBoxByFrame.containsKey(insnN))
alwaysAutoBoxByFrame.put(insnN, new LinkedList<Integer>());
alwaysAutoBoxByFrame.get(insnN).add(i);
}
}
}
}
}
}
}
//TODO: if the output of a frame is an array but the input is an obj, hint to always box?
//or is that necessary, because we already assume that it's unboxed.
return ret;
}
HashMap<Integer, LinkedList<Integer>> edges = new HashMap<Integer, LinkedList<Integer>>();
LinkedList<Integer> varsStoredThisInsn = new LinkedList<Integer>();
HashSet<String> visited = new HashSet<String>();
int insnIdxOrderVisited = 0;
@Override
protected void newControlFlowEdge(int insn, int successor) {
if(visited.contains(insn+"-"+successor))
return;
visited.add(insn+"-"+successor);
if (!edges.containsKey(successor))
edges.put(successor, new LinkedList<Integer>());
if (!edges.get(successor).contains(insn))
edges.get(successor).add(insn);
if (!outEdges.containsKey(insn))
outEdges.put(insn, new LinkedList<Integer>());
if (!outEdges.get(insn).contains(successor))
outEdges.get(insn).add(successor);
BasicBlock fromBlock;
if(!implicitAnalysisblocks.containsKey(insn))
{
//insn not added yet
fromBlock = new BasicBlock();
fromBlock.idx = insn;
fromBlock.idxOrder = insnIdxOrderVisited;
insnIdxOrderVisited++;
fromBlock.insn = instructions.get(insn);
implicitAnalysisblocks.put(insn,fromBlock);
}
else
fromBlock = implicitAnalysisblocks.get(insn);
AbstractInsnNode insnN = instructions.get(insn);
fromBlock.isJump = (insnN.getType()== AbstractInsnNode.JUMP_INSN && insnN.getOpcode() != Opcodes.GOTO)
|| insnN.getType() == AbstractInsnNode.LOOKUPSWITCH_INSN || insnN.getType() == AbstractInsnNode.TABLESWITCH_INSN;
if(fromBlock.isJump && insnN.getType() == AbstractInsnNode.JUMP_INSN)
{
switch(insnN.getOpcode())
{
case Opcodes.IF_ICMPEQ:
case Opcodes.IF_ICMPNE:
case Opcodes.IF_ICMPGE:
case Opcodes.IF_ICMPGT:
case Opcodes.IF_ICMPLT:
case Opcodes.IF_ICMPLE:
case Opcodes.IF_ACMPEQ:
case Opcodes.IF_ACMPNE:
fromBlock.is2ArgJump = true;
break;
}
}
BasicBlock succesorBlock;
if(implicitAnalysisblocks.containsKey(successor))
succesorBlock = implicitAnalysisblocks.get(successor);
else
{
succesorBlock = new BasicBlock();
succesorBlock.idx = successor;
succesorBlock.idxOrder = insnIdxOrderVisited;
insnIdxOrderVisited++;
succesorBlock.insn = instructions.get(successor);
implicitAnalysisblocks.put(successor, succesorBlock);
if(succesorBlock.insn.getType() == AbstractInsnNode.IINC_INSN)
{
succesorBlock.varsWritten.add(((IincInsnNode)succesorBlock.insn).var);
}
else if(succesorBlock.insn.getType() == AbstractInsnNode.VAR_INSN)
{
switch(succesorBlock.insn.getOpcode())
{
case ISTORE:
case ASTORE:
case DSTORE:
case LSTORE:
succesorBlock.varsWritten.add(((VarInsnNode)succesorBlock.insn).var);
break;
}
}
}
fromBlock.successors.add(succesorBlock);
succesorBlock.predecessors.add(fromBlock);
if(fromBlock.isJump)
{
if(fromBlock.covered)
succesorBlock.onTrueSideOfJumpFrom.add(fromBlock);
else
{
succesorBlock.onFalseSideOfJumpFrom.add(fromBlock);
fromBlock.covered = true;
}
}
super.newControlFlowEdge(insn, successor);
}
};
try {
Frame[] frames = a.analyze(className, this);
for(int i = 0 ; i < instructions.size(); i++)
{
if(frames[i] == null)
{
//TODO dead code elimination.
//This should be done more generically
//But, this worked for JDT's stupid bytecode, so...
AbstractInsnNode insn = instructions.get(i);
if (insn != null && !(insn instanceof LabelNode)) {
if(insn.getOpcode() == Opcodes.GOTO)
{
instructions.insertBefore(insn, new InsnNode(Opcodes.ATHROW));
instructions.remove(insn);
}
else if (insn instanceof FrameNode)
{
FrameNode fn = (FrameNode) insn;
fn.local = Collections.EMPTY_LIST;
fn.stack = Collections.singletonList("java/lang/Throwable");
}
}
}
}
// HashMap<Integer,BasicBlock> cfg = new HashMap<Integer, BasicBlock>();
// for(Integer i : outEdges.keySet())
// {
// BasicBlock b = new BasicBlock();
// b.idx = i;
// b.outEdges = outEdges.get(i);
// int endIdx = this.instructions.size();
// for(Integer jj : outEdges.get(i))
// if(i < endIdx)
// endIdx = jj;
// for(int j =i; j < endIdx; j++)
// {
// if(instructions.get(i) instanceof VarInsnNode)
// {
// VarInsnNode n = ((VarInsnNode) instructions.get(i));
// b.varsAccessed.add(n.var);
// }
// }
// cfg.put(i, b);
// }
// for(Integer i : cfg.keySet())
// {
// computeVarsAccessed(i,cfg);
// }
ArrayList<Integer> toAddNullBefore = new ArrayList<Integer>();
// toAddNullBefore.addAll(insertACONSTNULLBEFORE);
toAddNullBefore.addAll(insertACHECKCASTBEFORE);
toAddNullBefore.addAll(neverAutoBoxByFrame.keySet());
toAddNullBefore.addAll(alwaysAutoBoxByFrame.keySet());
Collections.sort(toAddNullBefore);
HashMap<LabelNode, LabelNode> problemLabels = new HashMap<LabelNode, LabelNode>();
HashMap<LabelNode, HashSet<Integer>> problemVars = new HashMap<LabelNode, HashSet<Integer>>();
int nNewNulls = 0;
for (Integer i : toAddNullBefore) {
AbstractInsnNode insertAfter = this.instructions.get(i + nNewNulls);
if (insertACONSTNULLBEFORE.contains(i)) {
// if (DEBUG)
// System.out.println("Adding Null before: " + i);
// if (insertAfter.getOpcode() == Opcodes.GOTO)
// insertAfter = insertAfter.getPrevious();
// this.instructions.insert(insertAfter, new InsnNode(Opcodes.ACONST_NULL));
// nNewNulls++;
} else if (insertACHECKCASTBEFORE.contains(i)) {
if (DEBUG)
System.out.println("Adding checkcast before: " + i + " (plus " + nNewNulls + ")");
if (insertAfter.getOpcode() == Opcodes.GOTO)
insertAfter = insertAfter.getPrevious();
this.instructions.insert(insertAfter, new TypeInsnNode(Opcodes.CHECKCAST, Type.getInternalName(Object.class)));
nNewNulls++;
} else if (neverAutoBoxByFrame.containsKey(i)) {
if (insertAfter.getOpcode() == Opcodes.GOTO)
insertAfter = insertAfter.getPrevious();
for (int j : neverAutoBoxByFrame.get(i)) {
// System.out.println("Adding nevefbox: before " + i + " (plus " + nNewNulls + ")");
this.instructions.insert(insertAfter, new VarInsnNode(TaintUtils.NEVER_AUTOBOX, j));
nNewNulls++;
}
} else if (alwaysAutoBoxByFrame.containsKey(i)) {
for (int j : alwaysAutoBoxByFrame.get(i)) {
// System.out.println("Adding checkcast always: before " + i + " (plus " + nNewNulls + ")");
// while(insertAfter.getType() == AbstractInsnNode.LABEL ||
// insertAfter.getType() == AbstractInsnNode.LINE||
// insertAfter.getType() == AbstractInsnNode.FRAME)
// insertAfter = insertAfter.getNext();
AbstractInsnNode query = insertAfter.getNext();
while(query.getNext() != null && (query.getType() == AbstractInsnNode.LABEL ||
query.getType() == AbstractInsnNode.LINE ||
query.getType() == AbstractInsnNode.FRAME || query.getOpcode() > 200))
query = query.getNext();
if(query.getOpcode() == Opcodes.ALOAD && query.getNext().getOpcode() == Opcodes.MONITOREXIT)
insertAfter = query.getNext();
if(query.getType() == AbstractInsnNode.JUMP_INSN)
insertAfter = query;
if(insertAfter.getType() == AbstractInsnNode.JUMP_INSN)
{
insertAfter = insertAfter.getPrevious();
// System.out.println(Printer.OPCODES[insertAfter.getNext().getOpcode()]);
// System.out.println("insertbefore : " + ((JumpInsnNode) insertAfter.getNext()).toString());
if(insertAfter.getNext().getOpcode() != Opcodes.GOTO)
{
this.instructions.insert(insertAfter, new VarInsnNode(TaintUtils.ALWAYS_BOX_JUMP, j));
}
else
{
// System.out.println("box immediately");
this.instructions.insert(insertAfter, new VarInsnNode(TaintUtils.ALWAYS_AUTOBOX, j));
}
}
else
{
this.instructions.insert(insertAfter, new VarInsnNode(TaintUtils.ALWAYS_AUTOBOX, j));
}
nNewNulls++;
}
}
}
// System.out.println(name+desc);
//fix LVs for android (sigh)
// for(LabelNode l : problemLabels.keySet())
// {
// System.out.println("Problem label: "+l);
// }
boolean hadChanges = true;
while (hadChanges) {
hadChanges = false;
HashSet<LocalVariableNode> newLVNodes = new HashSet<LocalVariableNode>();
if (this.localVariables != null) {
for (Object _lv : this.localVariables) {
LocalVariableNode lv = (LocalVariableNode) _lv;
AbstractInsnNode toCheck = lv.start;
LabelNode veryEnd = lv.end;
while (toCheck != null && toCheck != lv.end) {
if ((toCheck.getOpcode() == TaintUtils.ALWAYS_BOX_JUMP || toCheck.getOpcode() ==TaintUtils.ALWAYS_AUTOBOX) && ((VarInsnNode) toCheck).var == lv.index) {
// System.out.println("LV " + lv.name + " will be a prob around " + toCheck);
LabelNode beforeProblem = new LabelNode(new Label());
LabelNode afterProblem = new LabelNode(new Label());
this.instructions.insertBefore(toCheck, beforeProblem);
this.instructions.insert(toCheck.getNext(), afterProblem);
LocalVariableNode newLV = new LocalVariableNode(lv.name, lv.desc, lv.signature, afterProblem, veryEnd, lv.index);
lv.end = beforeProblem;
newLVNodes.add(newLV);
hadChanges = true;
break;
}
toCheck = toCheck.getNext();
}
}
this.localVariables.addAll(newLVNodes);
}
}
} catch (AnalyzerException e) {
e.printStackTrace();
}
if (Configuration.IMPLICIT_TRACKING || Configuration.IMPLICIT_LIGHT_TRACKING) {
boolean hasJumps = false;
for(BasicBlock b : implicitAnalysisblocks.values())
if(b.isJump)
{
hasJumps = true;
break;
}
if (implicitAnalysisblocks.size() > 1 && hasJumps) {
Stack<BasicBlock> stack = new Stack<PrimitiveArrayAnalyzer.BasicBlock>();
//Fix successors to only point to jumps or labels
/*
* public HashSet<BasicBlock> calculateSuccessorsCompact() {
if(compactSuccessorsCalculated)
return successorsCompact;
for (BasicBlock b : successors) {
compactSuccessorsCalculated = true;
if(b.isInteresting())
successorsCompact.add(b);
else
successorsCompact.addAll(b.calculateSuccessorsCompact());
}
return successorsCompact;
}
*/
boolean changed = true;
while (changed) {
changed = false;
for (BasicBlock b : implicitAnalysisblocks.values()) {
for (BasicBlock s : b.successors) {
if (s.isInteresting()){
changed |= b.successorsCompact.add(s);
}
else
{
changed |= b.successorsCompact.addAll(s.successorsCompact);
}
}
}
}
//Post dominator analysis
for(BasicBlock b : implicitAnalysisblocks.values())
b.postDominators.add(b);
changed = true;
while(changed)
{
changed = false;
for(BasicBlock b : implicitAnalysisblocks.values())
{
if(b.successorsCompact.size() > 0 && b.isInteresting())
{
HashSet<BasicBlock> intersectionOfPredecessors = new HashSet<PrimitiveArrayAnalyzer.BasicBlock>();
Iterator<BasicBlock> iter = b.successorsCompact.iterator();
BasicBlock successor = iter.next();
intersectionOfPredecessors.addAll(successor.postDominators);
while(iter.hasNext())
{
successor = iter.next();
intersectionOfPredecessors.retainAll(successor.postDominators);
}
changed |= b.postDominators.addAll(intersectionOfPredecessors);
}
}
}
//Add in markings for where jumps are resolved
for(BasicBlock j : implicitAnalysisblocks.values())
{
if(j.isJump)
{
// System.out.println(j + " " +j.postDominators);
j.postDominators.remove(j);
BasicBlock min = null;
for(BasicBlock d : j.postDominators)
{
if(min == null || min.idxOrder > d.idxOrder)
min = d;
}
// System.out.println(j + " resolved at " + min);
if (min != null) {
min.resolvedBlocks.add(j);
min.resolvedHereBlocks.add(j);
}
}
}
//Propogate forward true-side/false-side to determine which vars are written
stack.add(implicitAnalysisblocks.get(0));
while (!stack.isEmpty()) {
BasicBlock b = stack.pop();
if (b.visited)
continue;
b.visited = true;
b.onFalseSideOfJumpFrom.removeAll(b.resolvedBlocks);
b.onTrueSideOfJumpFrom.removeAll(b.resolvedBlocks);
//Propogate markings to successors
for (BasicBlock s : b.successors) {
boolean _changed = false;
_changed |= s.onFalseSideOfJumpFrom.addAll(b.onFalseSideOfJumpFrom);
_changed |= s.onTrueSideOfJumpFrom.addAll(b.onTrueSideOfJumpFrom);
_changed |= s.resolvedBlocks.addAll(b.resolvedBlocks);
if(_changed)
s.visited = false;
s.onFalseSideOfJumpFrom.remove(s);
s.onTrueSideOfJumpFrom.remove(s);
if (!s.visited)
stack.add(s);
}
}
for(BasicBlock j : implicitAnalysisblocks.values())
{
// this.instructions.insertBefore(j.insn, new LdcInsnNode(j.idx + " " + j.onTrueSideOfJumpFrom + " " + j.onFalseSideOfJumpFrom));
// System.out.println(j.idx + " " + j.postDominators);
if(j.isJump)
{
stack = new Stack<PrimitiveArrayAnalyzer.BasicBlock>();
stack.addAll(j.successors);
while(!stack.isEmpty())
{
BasicBlock b = stack.pop();
if(b.visited)
continue;
b.visited = true;
if(b.onFalseSideOfJumpFrom.contains(j))
{
j.varsWrittenTrueSide.addAll(b.varsWritten);
stack.addAll(b.successors);
}
else if(b.onTrueSideOfJumpFrom.contains(j))
{
j.varsWrittenFalseSide.addAll(b.varsWritten);
stack.addAll(b.successors);
}
}
}
}
HashMap<BasicBlock, Integer> jumpIDs = new HashMap<PrimitiveArrayAnalyzer.BasicBlock, Integer>();
int jumpID = 0;
for (BasicBlock b : implicitAnalysisblocks.values()) {
if (b.isJump) {
jumpID++;
HashSet<Integer> common = new HashSet<Integer>();
common.addAll(b.varsWrittenFalseSide);
common.retainAll(b.varsWrittenTrueSide);
HashSet<Integer> diff =new HashSet<Integer>();
diff.addAll(b.varsWrittenTrueSide);
diff.addAll(b.varsWrittenFalseSide);
diff.removeAll(common);
for(int i : diff)
{
instructions.insertBefore(b.insn, new VarInsnNode(TaintUtils.FORCE_CTRL_STORE, i));
}
instructions.insertBefore(b.insn, new VarInsnNode(TaintUtils.BRANCH_START, jumpID));
jumpIDs.put(b, jumpID);
if(b.is2ArgJump)
jumpID++;
}
}
for (BasicBlock b : implicitAnalysisblocks.values()) {
// System.out.println(b.idx + " -> " + b.successorsCompact);
// System.out.println(b.successors);
// System.out.println(b.resolvedBlocks);
for (BasicBlock r : b.resolvedHereBlocks) {
// System.out.println("Resolved: " + jumpIDs.get(r) + " at " + b.idx);
// System.out.println("GOt" + jumpIDs);
AbstractInsnNode insn = b.insn;
while (insn.getType() == AbstractInsnNode.FRAME || insn.getType() == AbstractInsnNode.LINE || insn.getType() == AbstractInsnNode.LABEL)
insn = insn.getNext();
instructions.insertBefore(insn, new VarInsnNode(TaintUtils.BRANCH_END, jumpIDs.get(r)));
if(r.is2ArgJump)
instructions.insertBefore(insn, new VarInsnNode(TaintUtils.BRANCH_END, jumpIDs.get(r)+1));
}
if(b.successors.isEmpty())
{
instructions.insertBefore(b.insn, new InsnNode(TaintUtils.FORCE_CTRL_STORE));
// if (b.insn.getOpcode() != Opcodes.ATHROW) {
HashSet<BasicBlock> live = new HashSet<PrimitiveArrayAnalyzer.BasicBlock>(b.onFalseSideOfJumpFrom);
live.addAll(b.onTrueSideOfJumpFrom);
for (BasicBlock r : live) {
instructions.insertBefore(b.insn, new VarInsnNode(TaintUtils.BRANCH_END, jumpIDs.get(r)));
if (r.is2ArgJump)
instructions.insertBefore(b.insn, new VarInsnNode(TaintUtils.BRANCH_END, jumpIDs.get(r) + 1));
}
// }
}
// System.out.println(b.insn + " - " + b.domBlocks + "-" + b.antiDomBlocks);
}
nJumps = jumpID;
}
}
// System.out.println(name);
if (Configuration.ANNOTATE_LOOPS) {
SCCAnalyzer scc = new SCCAnalyzer();
int max = 0;
for(Integer i : implicitAnalysisblocks.keySet())
{
if(i > max)
max = i;
}
BasicBlock[] flatGraph = new BasicBlock[max + 1];
for(int i = 0; i < flatGraph.length; i++)
flatGraph[i] = implicitAnalysisblocks.get(i);
List<List<BasicBlock>> sccs = scc.scc(flatGraph);
for (List<BasicBlock> c : sccs) {
if (c.size() == 1)
continue;
// System.out.println(c);
for (BasicBlock b : c) {
if (b.successors.size() > 1)
if (!c.containsAll(b.successors)) {
// loop header
this.instructions.insertBefore(b.insn, new InsnNode(TaintUtils.LOOP_HEADER));
}
}
}
}
this.maxStack += 100;
AbstractInsnNode insn = instructions.getFirst();
while(insn != null)
{
if(insn.getType() == AbstractInsnNode.FRAME)
{
//Insert a note before the instruction before this guy
AbstractInsnNode insertBefore = insn;
while (insertBefore != null && (insertBefore.getType() == AbstractInsnNode.FRAME || insertBefore.getType() == AbstractInsnNode.LINE
|| insertBefore.getType() == AbstractInsnNode.LABEL))
insertBefore = insertBefore.getPrevious();
if (insertBefore != null)
this.instructions.insertBefore(insertBefore, new InsnNode(TaintUtils.FOLLOWED_BY_FRAME));
}
insn = insn.getNext();
}
this.accept(cmv);
}
HashMap<Integer,BasicBlock> implicitAnalysisblocks = new HashMap<Integer,PrimitiveArrayAnalyzer.BasicBlock>();
void calculatePostDominators(BasicBlock b)
{
if(b.visited)
return;
b.visited = true;
b.onFalseSideOfJumpFrom.removeAll(b.resolvedBlocks);
b.onFalseSideOfJumpFrom.removeAll(b.resolvedBlocks);
//Propogate markings to successors
for(BasicBlock s : b.successors)
{
s.onFalseSideOfJumpFrom.addAll(b.onFalseSideOfJumpFrom);
s.onTrueSideOfJumpFrom.addAll(b.onTrueSideOfJumpFrom);
s.resolvedBlocks.addAll(b.resolvedBlocks);
if(!s.visited)
calculatePostDominators(s);
}
}
}
static class BasicBlock{
protected int idxOrder;
public HashSet<BasicBlock> postDominators= new HashSet<PrimitiveArrayAnalyzer.BasicBlock>() ;
int idx;
// LinkedList<Integer> outEdges = new LinkedList<Integer>();
HashSet<BasicBlock> successorsCompact = new HashSet<PrimitiveArrayAnalyzer.BasicBlock>();
HashSet<BasicBlock> successors = new HashSet<PrimitiveArrayAnalyzer.BasicBlock>();
HashSet<BasicBlock> predecessors = new HashSet<PrimitiveArrayAnalyzer.BasicBlock>();
AbstractInsnNode insn;
boolean covered;
boolean visited;
boolean isJump;
boolean is2ArgJump;
HashSet<BasicBlock> resolvedHereBlocks = new HashSet<PrimitiveArrayAnalyzer.BasicBlock>();
private boolean compactSuccessorsCalculated;
public boolean isInteresting()
{
return isJump || insn instanceof LabelNode;
}
HashSet<BasicBlock> resolvedBlocks = new HashSet<PrimitiveArrayAnalyzer.BasicBlock>();
HashSet<BasicBlock> onFalseSideOfJumpFrom = new HashSet<PrimitiveArrayAnalyzer.BasicBlock>();
HashSet<BasicBlock> onTrueSideOfJumpFrom = new HashSet<PrimitiveArrayAnalyzer.BasicBlock>();
HashSet<Integer> varsWritten = new HashSet<Integer>();
HashSet<Integer> varsWrittenTrueSide = new HashSet<Integer>();
HashSet<Integer> varsWrittenFalseSide = new HashSet<Integer>();
@Override
public String toString() {
// return insn.toString();
return ""+idx;
}
}
private static boolean isPrimitiveArrayType(BasicValue v) {
if (v == null || v.getType() == null)
return false;
return v.getType().getSort() == Type.ARRAY && v.getType().getElementType().getSort() != Type.OBJECT;
}
static final boolean DEBUG = false;
public HashSet<Type> wrapperTypesToPreAlloc = new HashSet<Type>();
public int nJumps;
@Override
public void visitInsn(int opcode) {
super.visitInsn(opcode);
switch (opcode) {
case Opcodes.FADD:
case Opcodes.FREM:
case Opcodes.FSUB:
case Opcodes.FMUL:
case Opcodes.FDIV:
if(Configuration.PREALLOC_STACK_OPS)
wrapperTypesToPreAlloc.add(TaintUtils.getContainerReturnType("F"));
break;
case Opcodes.DADD:
case Opcodes.DSUB:
case Opcodes.DMUL:
case Opcodes.DDIV:
case Opcodes.DREM:
wrapperTypesToPreAlloc.add(TaintUtils.getContainerReturnType("D"));
break;
case Opcodes.LSHL:
case Opcodes.LUSHR:
case Opcodes.LSHR:
case Opcodes.LSUB:
case Opcodes.LMUL:
case Opcodes.LADD:
case Opcodes.LDIV:
case Opcodes.LREM:
case Opcodes.LAND:
case Opcodes.LOR:
case Opcodes.LXOR:
wrapperTypesToPreAlloc.add(TaintUtils.getContainerReturnType("J"));
break;
case Opcodes.LCMP:
case Opcodes.DCMPL:
case Opcodes.DCMPG:
case Opcodes.FCMPG:
case Opcodes.FCMPL:
case Opcodes.IADD:
case Opcodes.ISUB:
case Opcodes.IMUL:
case Opcodes.IDIV:
case Opcodes.IREM:
case Opcodes.ISHL:
case Opcodes.ISHR:
case Opcodes.IUSHR:
case Opcodes.IOR:
case Opcodes.IAND:
case Opcodes.IXOR:
wrapperTypesToPreAlloc.add(TaintUtils.getContainerReturnType("I"));
break;
case Opcodes.IALOAD:
wrapperTypesToPreAlloc.add(TaintUtils.getContainerReturnType("I"));
break;
case Opcodes.BALOAD:
wrapperTypesToPreAlloc.add(TaintUtils.getContainerReturnType("B"));
wrapperTypesToPreAlloc.add(TaintUtils.getContainerReturnType("Z"));
break;
case Opcodes.CALOAD:
wrapperTypesToPreAlloc.add(TaintUtils.getContainerReturnType("C"));
break;
case Opcodes.DALOAD:
wrapperTypesToPreAlloc.add(TaintUtils.getContainerReturnType("D"));
break;
case Opcodes.LALOAD:
wrapperTypesToPreAlloc.add(TaintUtils.getContainerReturnType("J"));
break;
case Opcodes.FALOAD:
wrapperTypesToPreAlloc.add(TaintUtils.getContainerReturnType("F"));
break;
case Opcodes.SALOAD:
wrapperTypesToPreAlloc.add(TaintUtils.getContainerReturnType("S"));
break;
}
}
@Override
public void visitMethodInsn(int opcode, String owner, String name, String desc, boolean itfc) {
super.visitMethodInsn(opcode, owner, name, desc,itfc);
Type returnType = Type.getReturnType(desc);
Type newReturnType = TaintUtils.getContainerReturnType(returnType);
if(newReturnType != returnType && !(returnType.getSort() == Type.ARRAY))
wrapperTypesToPreAlloc.add(newReturnType);
}
public PrimitiveArrayAnalyzer(final String className, int access, final String name, final String desc, String signature, String[] exceptions, final MethodVisitor cmv) {
super(Opcodes.ASM5);
this.mv = new PrimitiveArrayAnalyzerMN(access, name, desc, signature, exceptions, className, cmv);
}
public PrimitiveArrayAnalyzer(Type singleWrapperTypeToAdd) {
super(Opcodes.ASM5);
this.mv = new PrimitiveArrayAnalyzerMN(0, null,null,null,null,null, null);
if(singleWrapperTypeToAdd.getSort() == Type.OBJECT && singleWrapperTypeToAdd.getInternalName().startsWith("edu/columbia/cs/psl/phosphor/struct/Tainted"))
this.wrapperTypesToPreAlloc.add(singleWrapperTypeToAdd);
}
NeverNullArgAnalyzerAdapter analyzer;
public void setAnalyzer(NeverNullArgAnalyzerAdapter preAnalyzer) {
analyzer = preAnalyzer;
}
}
| Phosphor/src/edu/columbia/cs/psl/phosphor/instrumenter/PrimitiveArrayAnalyzer.java | package edu.columbia.cs.psl.phosphor.instrumenter;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Iterator;
import java.util.LinkedList;
import java.util.List;
import java.util.Map.Entry;
import java.util.Stack;
import org.objectweb.asm.Label;
import org.objectweb.asm.MethodVisitor;
import org.objectweb.asm.Opcodes;
import org.objectweb.asm.Type;
import org.objectweb.asm.tree.AbstractInsnNode;
import org.objectweb.asm.tree.FrameNode;
import org.objectweb.asm.tree.IincInsnNode;
import org.objectweb.asm.tree.InsnNode;
import org.objectweb.asm.tree.LabelNode;
import org.objectweb.asm.tree.LdcInsnNode;
import org.objectweb.asm.tree.LocalVariableNode;
import org.objectweb.asm.tree.MethodNode;
import org.objectweb.asm.tree.TypeInsnNode;
import org.objectweb.asm.tree.VarInsnNode;
import org.objectweb.asm.tree.analysis.Analyzer;
import org.objectweb.asm.tree.analysis.AnalyzerException;
import org.objectweb.asm.tree.analysis.BasicValue;
import org.objectweb.asm.tree.analysis.Frame;
import org.objectweb.asm.util.Printer;
import edu.columbia.cs.psl.phosphor.Configuration;
import edu.columbia.cs.psl.phosphor.TaintUtils;
import edu.columbia.cs.psl.phosphor.instrumenter.analyzer.BasicArrayInterpreter;
import edu.columbia.cs.psl.phosphor.instrumenter.analyzer.NeverNullArgAnalyzerAdapter;
public class PrimitiveArrayAnalyzer extends MethodVisitor {
final class PrimitiveArrayAnalyzerMN extends MethodNode {
private final String className;
private final MethodVisitor cmv;
boolean[] endsWithGOTO;
int curLabel = 0;
HashMap<Integer, Boolean> lvsThatAreArrays = new HashMap<Integer, Boolean>();
ArrayList<FrameNode> inFrames = new ArrayList<FrameNode>();
ArrayList<FrameNode> outFrames = new ArrayList<FrameNode>();
public PrimitiveArrayAnalyzerMN(int access, String name, String desc, String signature, String[] exceptions, String className, MethodVisitor cmv) {
super(Opcodes.ASM5,access, name, desc, signature, exceptions);
this.className = className;
this.cmv = cmv;
}
@Override
protected LabelNode getLabelNode(Label l) {
if(!Configuration.READ_AND_SAVE_BCI)
return super.getLabelNode(l);
if (!(l.info instanceof LabelNode)) {
l.info = new LabelNode(l);
}
return (LabelNode) l.info;
}
@Override
public void visitCode() {
if (DEBUG)
System.out.println("Visiting: " + className + "." + name + desc);
Label firstLabel = new Label();
super.visitCode();
visitLabel(firstLabel);
}
// @Override
// public void visitVarInsn(int opcode, int var) {
// if(opcode == Opcodes.ASTORE)
// {
// boolean isPrimArray = TaintAdapter.isPrimitiveStackType(analyzer.stack.get(analyzer.stack.size() - 1));
// if(lvsThatAreArrays.containsKey(var))
// {
// if(lvsThatAreArrays.get(var) != isPrimArray)
// {
// throw new IllegalStateException("This analysis is currently too lazy to handle when you have 1 var slot take different kinds of arrays");
// }
// }
// lvsThatAreArrays.put(var, isPrimArray);
// }
// super.visitVarInsn(opcode, var);
// }
private void visitFrameTypes(final int n, final Object[] types,
final List<Object> result) {
for (int i = 0; i < n; ++i) {
Object type = types[i];
result.add(type);
if (type == Opcodes.LONG || type == Opcodes.DOUBLE) {
result.add(Opcodes.TOP);
}
}
}
FrameNode generateFrameNode(int type, int nLocal, Object[] local, int nStack, Object[] stack)
{
FrameNode ret = new FrameNode(type, nLocal, local, nStack, stack);
ret.local = new ArrayList<Object>();
ret.stack= new ArrayList<Object>();
visitFrameTypes(nLocal, local, ret.local);
visitFrameTypes(nStack, stack, ret.stack);
return ret;
}
@Override
public void visitFrame(int type, int nLocal, Object[] local, int nStack, Object[] stack) {
if (DEBUG)
System.out.println("Visitframe curlabel " + (curLabel - 1));
super.visitFrame(type, nLocal, local, nStack, stack);
if (DEBUG)
System.out.println("label " + (curLabel - 1) + " reset to " + Arrays.toString(stack));
if (inFrames.size() == curLabel - 1)
inFrames.add(generateFrameNode(type, nLocal, local, nStack, stack));
else
inFrames.set(curLabel - 1, generateFrameNode(type, nLocal, local, nStack, stack));
// System.out.println(name+" " +Arrays.toString(local));
// if (curLabel > 0) {
// System.out.println("And resetting outframe " + (curLabel - 2));
// if (outFrames.size() == curLabel - 1)
// outFrames.add(new FrameNode(type, nLocal, local, nStack, stack));
// if(outFrames.get(curLabel -1) == null)
// outFrames.set(curLabel - 1, new FrameNode(type, nLocal, local, nStack, stack));
// }
}
@Override
public void visitLabel(Label label) {
// if (curLabel >= 0)
if (DEBUG)
System.out.println("Visit label: " + curLabel + " analyzer: " + analyzer.stack + " inframes size " + inFrames.size() + " " + outFrames.size());
if (analyzer.locals == null || analyzer.stack == null)
inFrames.add(new FrameNode(0, 0, new Object[0], 0, new Object[0]));
else
inFrames.add(new FrameNode(0, analyzer.locals.size(), analyzer.locals.toArray(), analyzer.stack.size(), analyzer.stack.toArray()));
// if (outFrames.size() <= curLabel) {
// if(analyzer.stack == null)
outFrames.add(null);
if (curLabel > 0 && outFrames.get(curLabel - 1) == null && analyzer.stack != null)
outFrames.set(curLabel - 1, new FrameNode(0, analyzer.locals.size(), analyzer.locals.toArray(), analyzer.stack.size(), analyzer.stack.toArray()));
if (DEBUG)
System.out.println("Added outframe for " + (outFrames.size() - 1) + " : " + analyzer.stack);
// }
super.visitLabel(label);
curLabel++;
}
@Override
public void visitTableSwitchInsn(int min, int max, Label dflt, Label... labels) {
if (DEBUG)
System.out.println("Rewriting " + curLabel + " OUT to " + analyzer.stack);
outFrames.set(curLabel - 1, new FrameNode(0, analyzer.locals.size(), analyzer.locals.toArray(), analyzer.stack.size(), analyzer.stack.toArray()));
super.visitTableSwitchInsn(min, max, dflt, labels);
}
@Override
public void visitLookupSwitchInsn(Label dflt, int[] keys, Label[] labels) {
if (DEBUG)
System.out.println("Rewriting " + curLabel + " OUT to " + analyzer.stack);
outFrames.set(curLabel - 1, new FrameNode(0, analyzer.locals.size(), analyzer.locals.toArray(), analyzer.stack.size(), analyzer.stack.toArray()));
super.visitLookupSwitchInsn(dflt, keys, labels);
}
@Override
public void visitInsn(int opcode) {
if (opcode == Opcodes.ATHROW) {
if (DEBUG)
System.out.println("Rewriting " + curLabel + " OUT to " + analyzer.stack);
if (analyzer.locals != null && analyzer.stack != null)
outFrames.set(curLabel - 1, new FrameNode(0, analyzer.locals.size(), analyzer.locals.toArray(), analyzer.stack.size(), analyzer.stack.toArray()));
}
super.visitInsn(opcode);
}
public void visitJumpInsn(int opcode, Label label) {
// System.out.println(opcode);
// if (opcode == Opcodes.GOTO) {
super.visitJumpInsn(opcode, label);
int nToPop = 0;
switch (opcode) {
case Opcodes.IFEQ:
case Opcodes.IFNE:
case Opcodes.IFLT:
case Opcodes.IFGE:
case Opcodes.IFGT:
case Opcodes.IFLE:
case Opcodes.IFNULL:
case Opcodes.IFNONNULL:
//pop 1
nToPop = 1;
break;
case Opcodes.IF_ICMPEQ:
case Opcodes.IF_ICMPNE:
case Opcodes.IF_ICMPLT:
case Opcodes.IF_ICMPGE:
case Opcodes.IF_ICMPGT:
case Opcodes.IF_ICMPLE:
case Opcodes.IF_ACMPEQ:
case Opcodes.IF_ACMPNE:
//pop 2
nToPop = 2;
break;
case Opcodes.GOTO:
//pop none
break;
default:
throw new IllegalArgumentException();
}
//The analyzer won't have executed yet, so simulate it did :'(
List<Object> stack = new ArrayList<Object>(analyzer.stack);
// System.out.println("got to remove " + nToPop + " from " + analyzer.stack + " in " + className + "."+name );
while (nToPop > 0 && !stack.isEmpty()) {
stack.remove(stack.size() - 1);
nToPop--;
}
if (DEBUG)
System.out.println(name + " Rewriting " + curLabel + " OUT to " + stack);
outFrames.set(curLabel - 1, new FrameNode(0, analyzer.locals.size(), analyzer.locals.toArray(), stack.size(), stack.toArray()));
visitLabel(new Label());
// }
}
@Override
public void visitEnd() {
final HashMap<Integer, LinkedList<Integer>> neverAutoBoxByFrame = new HashMap<Integer, LinkedList<Integer>>();
final HashMap<Integer, LinkedList<Integer>> alwaysAutoBoxByFrame = new HashMap<Integer, LinkedList<Integer>>();
final HashMap<Integer, LinkedList<Integer>> outEdges = new HashMap<Integer, LinkedList<Integer>>();
final HashSet<Integer> insertACHECKCASTBEFORE = new HashSet<Integer>();
final HashSet<Integer> insertACONSTNULLBEFORE = new HashSet<Integer>();
Analyzer a = new Analyzer(new BasicArrayInterpreter()) {
protected int[] insnToLabel;
int getLabel(int insn) {
int label = -1;
for (int j = 0; j <= insn; j++) {
label = insnToLabel[j];
}
return label;
}
int getInsnAfterFrameFor(int insn) {
int r = 0;
for (int i = 0; i < insn; i++) {
if (instructions.get(i).getType() == AbstractInsnNode.FRAME)
r = i + 1;
}
return r;
}
int getLastInsnByLabel(int label) {
int r = 0;
for (int j = 0; j < insnToLabel.length; j++) {
if (insnToLabel[j] == label) {
if (instructions.get(j).getType() == AbstractInsnNode.FRAME)
continue;
r = j;
}
}
return r;
}
int getFirstInsnByLabel(int label) {
for (int j = 0; j < insnToLabel.length; j++) {
if (insnToLabel[j] == label) {
if (instructions.get(j).getType() == AbstractInsnNode.FRAME || instructions.get(j).getType() == AbstractInsnNode.LABEL
|| instructions.get(j).getType() == AbstractInsnNode.LINE)
continue;
return j;
}
}
return -1;
}
@Override
public Frame[] analyze(String owner, MethodNode m) throws AnalyzerException {
Iterator<AbstractInsnNode> insns = m.instructions.iterator();
insnToLabel = new int[m.instructions.size()];
endsWithGOTO = new boolean[insnToLabel.length];
// System.out.println("PAAA"+ name);
int label = -1;
boolean isFirst = true;
while (insns.hasNext()) {
AbstractInsnNode insn = insns.next();
int idx = m.instructions.indexOf(insn);
if (insn instanceof LabelNode) {
label++;
}
if (insn.getOpcode() == Opcodes.GOTO) {
endsWithGOTO[idx] = true;
}
insnToLabel[idx] = (isFirst ? 1 : label);
isFirst = false;
// System.out.println(idx + "->"+label);
}
Frame[] ret = super.analyze(owner, m);
// if (DEBUG)
// for (int i = 0; i < inFrames.size(); i++) {
// System.out.println("IN: " + i + " " + inFrames.get(i).stack);
// }
// if (DEBUG)
// for (int i = 0; i < outFrames.size(); i++) {
// System.out.println("OUT: " + i + " " + (outFrames.get(i) == null ? "null" : outFrames.get(i).stack));
// }
for (Entry<Integer, LinkedList<Integer>> edge : edges.entrySet()) {
Integer successor = edge.getKey();
if (edge.getValue().size() > 1) {
int labelToSuccessor = getLabel(successor);
if (DEBUG)
System.out.println(name + " Must merge: " + edge.getValue() + " into " + successor + " AKA " + labelToSuccessor);
if (DEBUG)
System.out.println("Input to successor: " + inFrames.get(labelToSuccessor).stack);
for (Integer toMerge : edge.getValue()) {
int labelToMerge = getLabel(toMerge);
if (DEBUG)
System.out.println(toMerge + " AKA " + labelToMerge);
if (DEBUG)
System.out.println((outFrames.get(labelToMerge) == null ? "null" : outFrames.get(labelToMerge).stack));
if (!outFrames.get(labelToMerge).stack.isEmpty() && !inFrames.get(labelToSuccessor).stack.isEmpty()) {
Object output1Top = outFrames.get(labelToMerge).stack.get(outFrames.get(labelToMerge).stack.size() - 1);
Object inputTop = inFrames.get(labelToSuccessor).stack.get(inFrames.get(labelToSuccessor).stack.size() - 1);
if (output1Top == Opcodes.TOP)
output1Top = outFrames.get(labelToMerge).stack.get(outFrames.get(labelToMerge).stack.size() - 2);
if (inputTop == Opcodes.TOP)
inputTop = inFrames.get(labelToSuccessor).stack.get(inFrames.get(labelToSuccessor).stack.size() - 2);
// System.out.println(className+"."+name+ " IN"+inputTop +" OUT " + output1Top);
if (output1Top != null && output1Top != inputTop) {
Type inputTopType = TaintAdapter.getTypeForStackType(inputTop);
Type outputTopType = TaintAdapter.getTypeForStackType(output1Top);
if ((output1Top == Opcodes.NULL) && inputTopType.getSort() == Type.ARRAY && inputTopType.getElementType().getSort() != Type.OBJECT
&& inputTopType.getDimensions() == 1) {
insertACONSTNULLBEFORE.add(toMerge);
} else if ((inputTopType.getSort() == Type.OBJECT || (inputTopType.getSort() == Type.ARRAY && inputTopType.getElementType().getSort() == Type.OBJECT)) && outputTopType.getSort() == Type.ARRAY && outputTopType.getElementType().getSort() != Type.OBJECT
&& inputTopType.getDimensions() == 1) {
insertACHECKCASTBEFORE.add(toMerge);
}
}
}
if (!outFrames.get(labelToMerge).local.isEmpty() && !inFrames.get(labelToSuccessor).local.isEmpty()) {
for (int i = 0; i < Math.min(outFrames.get(labelToMerge).local.size(), inFrames.get(labelToSuccessor).local.size()); i++) {
Object out = outFrames.get(labelToMerge).local.get(i);
Object in = inFrames.get(labelToSuccessor).local.get(i);
// System.out.println(name +" " +out + " out, " + in + " In" + " i "+i);
if (out instanceof String && in instanceof String) {
Type tout = Type.getObjectType((String) out);
Type tin = Type.getObjectType((String) in);
if (tout.getSort() == Type.ARRAY && tout.getElementType().getSort() != Type.OBJECT && tout.getDimensions() == 1 && tin.getSort() == Type.OBJECT) {
int insnN = getLastInsnByLabel(labelToMerge);
// System.out.println(name+desc);
// System.out.println(outFrames.get(labelToMerge).local + " out, \n" + inFrames.get(labelToSuccessor).local + " In" + " i "+i);
// System.out.println("T1::"+tout + " to " + tin + " this may be unsupported but should be handled by the above! in label " + instructions.get(insnN));
// System.out.println("In insn is " + getFirstInsnByLabel(labelToSuccessor));
// System.out.println("insn after frame is " + insnN +", " + instructions.get(insnN) + "<"+Printer.OPCODES[instructions.get(insnN).getOpcode()]);
// System.out.println(inFrames.get(labelToSuccessor).local);
if (!alwaysAutoBoxByFrame.containsKey(insnN))
alwaysAutoBoxByFrame.put(insnN, new LinkedList<Integer>());
alwaysAutoBoxByFrame.get(insnN).add(i);
}
}
}
}
}
}
}
//TODO: if the output of a frame is an array but the input is an obj, hint to always box?
//or is that necessary, because we already assume that it's unboxed.
return ret;
}
HashMap<Integer, LinkedList<Integer>> edges = new HashMap<Integer, LinkedList<Integer>>();
LinkedList<Integer> varsStoredThisInsn = new LinkedList<Integer>();
HashSet<String> visited = new HashSet<String>();
int insnIdxOrderVisited = 0;
@Override
protected void newControlFlowEdge(int insn, int successor) {
if(visited.contains(insn+"-"+successor))
return;
visited.add(insn+"-"+successor);
if (!edges.containsKey(successor))
edges.put(successor, new LinkedList<Integer>());
if (!edges.get(successor).contains(insn))
edges.get(successor).add(insn);
if (!outEdges.containsKey(insn))
outEdges.put(insn, new LinkedList<Integer>());
if (!outEdges.get(insn).contains(successor))
outEdges.get(insn).add(successor);
BasicBlock fromBlock;
if(!implicitAnalysisblocks.containsKey(insn))
{
//insn not added yet
fromBlock = new BasicBlock();
fromBlock.idx = insn;
fromBlock.idxOrder = insnIdxOrderVisited;
insnIdxOrderVisited++;
fromBlock.insn = instructions.get(insn);
implicitAnalysisblocks.put(insn,fromBlock);
}
else
fromBlock = implicitAnalysisblocks.get(insn);
AbstractInsnNode insnN = instructions.get(insn);
fromBlock.isJump = (insnN.getType()== AbstractInsnNode.JUMP_INSN && insnN.getOpcode() != Opcodes.GOTO)
|| insnN.getType() == AbstractInsnNode.LOOKUPSWITCH_INSN || insnN.getType() == AbstractInsnNode.TABLESWITCH_INSN;
if(fromBlock.isJump && insnN.getType() == AbstractInsnNode.JUMP_INSN)
{
switch(insnN.getOpcode())
{
case Opcodes.IF_ICMPEQ:
case Opcodes.IF_ICMPNE:
case Opcodes.IF_ICMPGE:
case Opcodes.IF_ICMPGT:
case Opcodes.IF_ICMPLT:
case Opcodes.IF_ICMPLE:
case Opcodes.IF_ACMPEQ:
case Opcodes.IF_ACMPNE:
fromBlock.is2ArgJump = true;
break;
}
}
BasicBlock succesorBlock;
if(implicitAnalysisblocks.containsKey(successor))
succesorBlock = implicitAnalysisblocks.get(successor);
else
{
succesorBlock = new BasicBlock();
succesorBlock.idx = successor;
succesorBlock.idxOrder = insnIdxOrderVisited;
insnIdxOrderVisited++;
succesorBlock.insn = instructions.get(successor);
implicitAnalysisblocks.put(successor, succesorBlock);
if(succesorBlock.insn.getType() == AbstractInsnNode.IINC_INSN)
{
succesorBlock.varsWritten.add(((IincInsnNode)succesorBlock.insn).var);
}
else if(succesorBlock.insn.getType() == AbstractInsnNode.VAR_INSN)
{
switch(succesorBlock.insn.getOpcode())
{
case ISTORE:
case ASTORE:
case DSTORE:
case LSTORE:
succesorBlock.varsWritten.add(((VarInsnNode)succesorBlock.insn).var);
break;
}
}
}
fromBlock.successors.add(succesorBlock);
succesorBlock.predecessors.add(fromBlock);
if(fromBlock.isJump)
{
if(fromBlock.covered)
succesorBlock.onTrueSideOfJumpFrom.add(fromBlock);
else
{
succesorBlock.onFalseSideOfJumpFrom.add(fromBlock);
fromBlock.covered = true;
}
}
super.newControlFlowEdge(insn, successor);
}
};
try {
Frame[] frames = a.analyze(className, this);
// HashMap<Integer,BasicBlock> cfg = new HashMap<Integer, BasicBlock>();
// for(Integer i : outEdges.keySet())
// {
// BasicBlock b = new BasicBlock();
// b.idx = i;
// b.outEdges = outEdges.get(i);
// int endIdx = this.instructions.size();
// for(Integer jj : outEdges.get(i))
// if(i < endIdx)
// endIdx = jj;
// for(int j =i; j < endIdx; j++)
// {
// if(instructions.get(i) instanceof VarInsnNode)
// {
// VarInsnNode n = ((VarInsnNode) instructions.get(i));
// b.varsAccessed.add(n.var);
// }
// }
// cfg.put(i, b);
// }
// for(Integer i : cfg.keySet())
// {
// computeVarsAccessed(i,cfg);
// }
ArrayList<Integer> toAddNullBefore = new ArrayList<Integer>();
// toAddNullBefore.addAll(insertACONSTNULLBEFORE);
toAddNullBefore.addAll(insertACHECKCASTBEFORE);
toAddNullBefore.addAll(neverAutoBoxByFrame.keySet());
toAddNullBefore.addAll(alwaysAutoBoxByFrame.keySet());
Collections.sort(toAddNullBefore);
HashMap<LabelNode, LabelNode> problemLabels = new HashMap<LabelNode, LabelNode>();
HashMap<LabelNode, HashSet<Integer>> problemVars = new HashMap<LabelNode, HashSet<Integer>>();
int nNewNulls = 0;
for (Integer i : toAddNullBefore) {
AbstractInsnNode insertAfter = this.instructions.get(i + nNewNulls);
if (insertACONSTNULLBEFORE.contains(i)) {
// if (DEBUG)
// System.out.println("Adding Null before: " + i);
// if (insertAfter.getOpcode() == Opcodes.GOTO)
// insertAfter = insertAfter.getPrevious();
// this.instructions.insert(insertAfter, new InsnNode(Opcodes.ACONST_NULL));
// nNewNulls++;
} else if (insertACHECKCASTBEFORE.contains(i)) {
if (DEBUG)
System.out.println("Adding checkcast before: " + i + " (plus " + nNewNulls + ")");
if (insertAfter.getOpcode() == Opcodes.GOTO)
insertAfter = insertAfter.getPrevious();
this.instructions.insert(insertAfter, new TypeInsnNode(Opcodes.CHECKCAST, Type.getInternalName(Object.class)));
nNewNulls++;
} else if (neverAutoBoxByFrame.containsKey(i)) {
if (insertAfter.getOpcode() == Opcodes.GOTO)
insertAfter = insertAfter.getPrevious();
for (int j : neverAutoBoxByFrame.get(i)) {
// System.out.println("Adding nevefbox: before " + i + " (plus " + nNewNulls + ")");
this.instructions.insert(insertAfter, new VarInsnNode(TaintUtils.NEVER_AUTOBOX, j));
nNewNulls++;
}
} else if (alwaysAutoBoxByFrame.containsKey(i)) {
for (int j : alwaysAutoBoxByFrame.get(i)) {
// System.out.println("Adding checkcast always: before " + i + " (plus " + nNewNulls + ")");
// while(insertAfter.getType() == AbstractInsnNode.LABEL ||
// insertAfter.getType() == AbstractInsnNode.LINE||
// insertAfter.getType() == AbstractInsnNode.FRAME)
// insertAfter = insertAfter.getNext();
AbstractInsnNode query = insertAfter.getNext();
while(query.getNext() != null && (query.getType() == AbstractInsnNode.LABEL ||
query.getType() == AbstractInsnNode.LINE ||
query.getType() == AbstractInsnNode.FRAME || query.getOpcode() > 200))
query = query.getNext();
if(query.getOpcode() == Opcodes.ALOAD && query.getNext().getOpcode() == Opcodes.MONITOREXIT)
insertAfter = query.getNext();
if(query.getType() == AbstractInsnNode.JUMP_INSN)
insertAfter = query;
if(insertAfter.getType() == AbstractInsnNode.JUMP_INSN)
{
insertAfter = insertAfter.getPrevious();
// System.out.println(Printer.OPCODES[insertAfter.getNext().getOpcode()]);
// System.out.println("insertbefore : " + ((JumpInsnNode) insertAfter.getNext()).toString());
if(insertAfter.getNext().getOpcode() != Opcodes.GOTO)
{
this.instructions.insert(insertAfter, new VarInsnNode(TaintUtils.ALWAYS_BOX_JUMP, j));
}
else
{
// System.out.println("box immediately");
this.instructions.insert(insertAfter, new VarInsnNode(TaintUtils.ALWAYS_AUTOBOX, j));
}
}
else
{
this.instructions.insert(insertAfter, new VarInsnNode(TaintUtils.ALWAYS_AUTOBOX, j));
}
nNewNulls++;
}
}
}
// System.out.println(name+desc);
//fix LVs for android (sigh)
// for(LabelNode l : problemLabels.keySet())
// {
// System.out.println("Problem label: "+l);
// }
boolean hadChanges = true;
while (hadChanges) {
hadChanges = false;
HashSet<LocalVariableNode> newLVNodes = new HashSet<LocalVariableNode>();
if (this.localVariables != null) {
for (Object _lv : this.localVariables) {
LocalVariableNode lv = (LocalVariableNode) _lv;
AbstractInsnNode toCheck = lv.start;
LabelNode veryEnd = lv.end;
while (toCheck != null && toCheck != lv.end) {
if ((toCheck.getOpcode() == TaintUtils.ALWAYS_BOX_JUMP || toCheck.getOpcode() ==TaintUtils.ALWAYS_AUTOBOX) && ((VarInsnNode) toCheck).var == lv.index) {
// System.out.println("LV " + lv.name + " will be a prob around " + toCheck);
LabelNode beforeProblem = new LabelNode(new Label());
LabelNode afterProblem = new LabelNode(new Label());
this.instructions.insertBefore(toCheck, beforeProblem);
this.instructions.insert(toCheck.getNext(), afterProblem);
LocalVariableNode newLV = new LocalVariableNode(lv.name, lv.desc, lv.signature, afterProblem, veryEnd, lv.index);
lv.end = beforeProblem;
newLVNodes.add(newLV);
hadChanges = true;
break;
}
toCheck = toCheck.getNext();
}
}
this.localVariables.addAll(newLVNodes);
}
}
} catch (AnalyzerException e) {
e.printStackTrace();
}
if (Configuration.IMPLICIT_TRACKING || Configuration.IMPLICIT_LIGHT_TRACKING) {
boolean hasJumps = false;
for(BasicBlock b : implicitAnalysisblocks.values())
if(b.isJump)
{
hasJumps = true;
break;
}
if (implicitAnalysisblocks.size() > 1 && hasJumps) {
Stack<BasicBlock> stack = new Stack<PrimitiveArrayAnalyzer.BasicBlock>();
//Fix successors to only point to jumps or labels
/*
* public HashSet<BasicBlock> calculateSuccessorsCompact() {
if(compactSuccessorsCalculated)
return successorsCompact;
for (BasicBlock b : successors) {
compactSuccessorsCalculated = true;
if(b.isInteresting())
successorsCompact.add(b);
else
successorsCompact.addAll(b.calculateSuccessorsCompact());
}
return successorsCompact;
}
*/
boolean changed = true;
while (changed) {
changed = false;
for (BasicBlock b : implicitAnalysisblocks.values()) {
for (BasicBlock s : b.successors) {
if (s.isInteresting()){
changed |= b.successorsCompact.add(s);
}
else
{
changed |= b.successorsCompact.addAll(s.successorsCompact);
}
}
}
}
//Post dominator analysis
for(BasicBlock b : implicitAnalysisblocks.values())
b.postDominators.add(b);
changed = true;
while(changed)
{
changed = false;
for(BasicBlock b : implicitAnalysisblocks.values())
{
if(b.successorsCompact.size() > 0 && b.isInteresting())
{
HashSet<BasicBlock> intersectionOfPredecessors = new HashSet<PrimitiveArrayAnalyzer.BasicBlock>();
Iterator<BasicBlock> iter = b.successorsCompact.iterator();
BasicBlock successor = iter.next();
intersectionOfPredecessors.addAll(successor.postDominators);
while(iter.hasNext())
{
successor = iter.next();
intersectionOfPredecessors.retainAll(successor.postDominators);
}
changed |= b.postDominators.addAll(intersectionOfPredecessors);
}
}
}
//Add in markings for where jumps are resolved
for(BasicBlock j : implicitAnalysisblocks.values())
{
if(j.isJump)
{
// System.out.println(j + " " +j.postDominators);
j.postDominators.remove(j);
BasicBlock min = null;
for(BasicBlock d : j.postDominators)
{
if(min == null || min.idxOrder > d.idxOrder)
min = d;
}
// System.out.println(j + " resolved at " + min);
if (min != null) {
min.resolvedBlocks.add(j);
min.resolvedHereBlocks.add(j);
}
}
}
//Propogate forward true-side/false-side to determine which vars are written
stack.add(implicitAnalysisblocks.get(0));
while (!stack.isEmpty()) {
BasicBlock b = stack.pop();
if (b.visited)
continue;
b.visited = true;
b.onFalseSideOfJumpFrom.removeAll(b.resolvedBlocks);
b.onTrueSideOfJumpFrom.removeAll(b.resolvedBlocks);
//Propogate markings to successors
for (BasicBlock s : b.successors) {
boolean _changed = false;
_changed |= s.onFalseSideOfJumpFrom.addAll(b.onFalseSideOfJumpFrom);
_changed |= s.onTrueSideOfJumpFrom.addAll(b.onTrueSideOfJumpFrom);
_changed |= s.resolvedBlocks.addAll(b.resolvedBlocks);
if(_changed)
s.visited = false;
s.onFalseSideOfJumpFrom.remove(s);
s.onTrueSideOfJumpFrom.remove(s);
if (!s.visited)
stack.add(s);
}
}
for(BasicBlock j : implicitAnalysisblocks.values())
{
// this.instructions.insertBefore(j.insn, new LdcInsnNode(j.idx + " " + j.onTrueSideOfJumpFrom + " " + j.onFalseSideOfJumpFrom));
// System.out.println(j.idx + " " + j.postDominators);
if(j.isJump)
{
stack = new Stack<PrimitiveArrayAnalyzer.BasicBlock>();
stack.addAll(j.successors);
while(!stack.isEmpty())
{
BasicBlock b = stack.pop();
if(b.visited)
continue;
b.visited = true;
if(b.onFalseSideOfJumpFrom.contains(j))
{
j.varsWrittenTrueSide.addAll(b.varsWritten);
stack.addAll(b.successors);
}
else if(b.onTrueSideOfJumpFrom.contains(j))
{
j.varsWrittenFalseSide.addAll(b.varsWritten);
stack.addAll(b.successors);
}
}
}
}
HashMap<BasicBlock, Integer> jumpIDs = new HashMap<PrimitiveArrayAnalyzer.BasicBlock, Integer>();
int jumpID = 0;
for (BasicBlock b : implicitAnalysisblocks.values()) {
if (b.isJump) {
jumpID++;
HashSet<Integer> common = new HashSet<Integer>();
common.addAll(b.varsWrittenFalseSide);
common.retainAll(b.varsWrittenTrueSide);
HashSet<Integer> diff =new HashSet<Integer>();
diff.addAll(b.varsWrittenTrueSide);
diff.addAll(b.varsWrittenFalseSide);
diff.removeAll(common);
for(int i : diff)
{
instructions.insertBefore(b.insn, new VarInsnNode(TaintUtils.FORCE_CTRL_STORE, i));
}
instructions.insertBefore(b.insn, new VarInsnNode(TaintUtils.BRANCH_START, jumpID));
jumpIDs.put(b, jumpID);
if(b.is2ArgJump)
jumpID++;
}
}
for (BasicBlock b : implicitAnalysisblocks.values()) {
// System.out.println(b.idx + " -> " + b.successorsCompact);
// System.out.println(b.successors);
// System.out.println(b.resolvedBlocks);
for (BasicBlock r : b.resolvedHereBlocks) {
// System.out.println("Resolved: " + jumpIDs.get(r) + " at " + b.idx);
// System.out.println("GOt" + jumpIDs);
AbstractInsnNode insn = b.insn;
while (insn.getType() == AbstractInsnNode.FRAME || insn.getType() == AbstractInsnNode.LINE || insn.getType() == AbstractInsnNode.LABEL)
insn = insn.getNext();
instructions.insertBefore(insn, new VarInsnNode(TaintUtils.BRANCH_END, jumpIDs.get(r)));
if(r.is2ArgJump)
instructions.insertBefore(insn, new VarInsnNode(TaintUtils.BRANCH_END, jumpIDs.get(r)+1));
}
if(b.successors.isEmpty())
{
instructions.insertBefore(b.insn, new InsnNode(TaintUtils.FORCE_CTRL_STORE));
// if (b.insn.getOpcode() != Opcodes.ATHROW) {
HashSet<BasicBlock> live = new HashSet<PrimitiveArrayAnalyzer.BasicBlock>(b.onFalseSideOfJumpFrom);
live.addAll(b.onTrueSideOfJumpFrom);
for (BasicBlock r : live) {
instructions.insertBefore(b.insn, new VarInsnNode(TaintUtils.BRANCH_END, jumpIDs.get(r)));
if (r.is2ArgJump)
instructions.insertBefore(b.insn, new VarInsnNode(TaintUtils.BRANCH_END, jumpIDs.get(r) + 1));
}
// }
}
// System.out.println(b.insn + " - " + b.domBlocks + "-" + b.antiDomBlocks);
}
nJumps = jumpID;
}
}
// System.out.println(name);
if (Configuration.ANNOTATE_LOOPS) {
SCCAnalyzer scc = new SCCAnalyzer();
int max = 0;
for(Integer i : implicitAnalysisblocks.keySet())
{
if(i > max)
max = i;
}
BasicBlock[] flatGraph = new BasicBlock[max + 1];
for(int i = 0; i < flatGraph.length; i++)
flatGraph[i] = implicitAnalysisblocks.get(i);
List<List<BasicBlock>> sccs = scc.scc(flatGraph);
for (List<BasicBlock> c : sccs) {
if (c.size() == 1)
continue;
// System.out.println(c);
for (BasicBlock b : c) {
if (b.successors.size() > 1)
if (!c.containsAll(b.successors)) {
// loop header
this.instructions.insertBefore(b.insn, new InsnNode(TaintUtils.LOOP_HEADER));
}
}
}
}
this.maxStack += 100;
AbstractInsnNode insn = instructions.getFirst();
while(insn != null)
{
if(insn.getType() == AbstractInsnNode.FRAME)
{
//Insert a note before the instruction before this guy
AbstractInsnNode insertBefore = insn;
while (insertBefore != null && (insertBefore.getType() == AbstractInsnNode.FRAME || insertBefore.getType() == AbstractInsnNode.LINE
|| insertBefore.getType() == AbstractInsnNode.LABEL))
insertBefore = insertBefore.getPrevious();
if (insertBefore != null)
this.instructions.insertBefore(insertBefore, new InsnNode(TaintUtils.FOLLOWED_BY_FRAME));
}
insn = insn.getNext();
}
this.accept(cmv);
}
HashMap<Integer,BasicBlock> implicitAnalysisblocks = new HashMap<Integer,PrimitiveArrayAnalyzer.BasicBlock>();
void calculatePostDominators(BasicBlock b)
{
if(b.visited)
return;
b.visited = true;
b.onFalseSideOfJumpFrom.removeAll(b.resolvedBlocks);
b.onFalseSideOfJumpFrom.removeAll(b.resolvedBlocks);
//Propogate markings to successors
for(BasicBlock s : b.successors)
{
s.onFalseSideOfJumpFrom.addAll(b.onFalseSideOfJumpFrom);
s.onTrueSideOfJumpFrom.addAll(b.onTrueSideOfJumpFrom);
s.resolvedBlocks.addAll(b.resolvedBlocks);
if(!s.visited)
calculatePostDominators(s);
}
}
}
static class BasicBlock{
protected int idxOrder;
public HashSet<BasicBlock> postDominators= new HashSet<PrimitiveArrayAnalyzer.BasicBlock>() ;
int idx;
// LinkedList<Integer> outEdges = new LinkedList<Integer>();
HashSet<BasicBlock> successorsCompact = new HashSet<PrimitiveArrayAnalyzer.BasicBlock>();
HashSet<BasicBlock> successors = new HashSet<PrimitiveArrayAnalyzer.BasicBlock>();
HashSet<BasicBlock> predecessors = new HashSet<PrimitiveArrayAnalyzer.BasicBlock>();
AbstractInsnNode insn;
boolean covered;
boolean visited;
boolean isJump;
boolean is2ArgJump;
HashSet<BasicBlock> resolvedHereBlocks = new HashSet<PrimitiveArrayAnalyzer.BasicBlock>();
private boolean compactSuccessorsCalculated;
public boolean isInteresting()
{
return isJump || insn instanceof LabelNode;
}
HashSet<BasicBlock> resolvedBlocks = new HashSet<PrimitiveArrayAnalyzer.BasicBlock>();
HashSet<BasicBlock> onFalseSideOfJumpFrom = new HashSet<PrimitiveArrayAnalyzer.BasicBlock>();
HashSet<BasicBlock> onTrueSideOfJumpFrom = new HashSet<PrimitiveArrayAnalyzer.BasicBlock>();
HashSet<Integer> varsWritten = new HashSet<Integer>();
HashSet<Integer> varsWrittenTrueSide = new HashSet<Integer>();
HashSet<Integer> varsWrittenFalseSide = new HashSet<Integer>();
@Override
public String toString() {
// return insn.toString();
return ""+idx;
}
}
private static boolean isPrimitiveArrayType(BasicValue v) {
if (v == null || v.getType() == null)
return false;
return v.getType().getSort() == Type.ARRAY && v.getType().getElementType().getSort() != Type.OBJECT;
}
static final boolean DEBUG = false;
public HashSet<Type> wrapperTypesToPreAlloc = new HashSet<Type>();
public int nJumps;
@Override
public void visitInsn(int opcode) {
super.visitInsn(opcode);
switch (opcode) {
case Opcodes.FADD:
case Opcodes.FREM:
case Opcodes.FSUB:
case Opcodes.FMUL:
case Opcodes.FDIV:
if(Configuration.PREALLOC_STACK_OPS)
wrapperTypesToPreAlloc.add(TaintUtils.getContainerReturnType("F"));
break;
case Opcodes.DADD:
case Opcodes.DSUB:
case Opcodes.DMUL:
case Opcodes.DDIV:
case Opcodes.DREM:
wrapperTypesToPreAlloc.add(TaintUtils.getContainerReturnType("D"));
break;
case Opcodes.LSHL:
case Opcodes.LUSHR:
case Opcodes.LSHR:
case Opcodes.LSUB:
case Opcodes.LMUL:
case Opcodes.LADD:
case Opcodes.LDIV:
case Opcodes.LREM:
case Opcodes.LAND:
case Opcodes.LOR:
case Opcodes.LXOR:
wrapperTypesToPreAlloc.add(TaintUtils.getContainerReturnType("J"));
break;
case Opcodes.LCMP:
case Opcodes.DCMPL:
case Opcodes.DCMPG:
case Opcodes.FCMPG:
case Opcodes.FCMPL:
case Opcodes.IADD:
case Opcodes.ISUB:
case Opcodes.IMUL:
case Opcodes.IDIV:
case Opcodes.IREM:
case Opcodes.ISHL:
case Opcodes.ISHR:
case Opcodes.IUSHR:
case Opcodes.IOR:
case Opcodes.IAND:
case Opcodes.IXOR:
wrapperTypesToPreAlloc.add(TaintUtils.getContainerReturnType("I"));
break;
case Opcodes.IALOAD:
wrapperTypesToPreAlloc.add(TaintUtils.getContainerReturnType("I"));
break;
case Opcodes.BALOAD:
wrapperTypesToPreAlloc.add(TaintUtils.getContainerReturnType("B"));
wrapperTypesToPreAlloc.add(TaintUtils.getContainerReturnType("Z"));
break;
case Opcodes.CALOAD:
wrapperTypesToPreAlloc.add(TaintUtils.getContainerReturnType("C"));
break;
case Opcodes.DALOAD:
wrapperTypesToPreAlloc.add(TaintUtils.getContainerReturnType("D"));
break;
case Opcodes.LALOAD:
wrapperTypesToPreAlloc.add(TaintUtils.getContainerReturnType("J"));
break;
case Opcodes.FALOAD:
wrapperTypesToPreAlloc.add(TaintUtils.getContainerReturnType("F"));
break;
case Opcodes.SALOAD:
wrapperTypesToPreAlloc.add(TaintUtils.getContainerReturnType("S"));
break;
}
}
@Override
public void visitMethodInsn(int opcode, String owner, String name, String desc, boolean itfc) {
super.visitMethodInsn(opcode, owner, name, desc,itfc);
Type returnType = Type.getReturnType(desc);
Type newReturnType = TaintUtils.getContainerReturnType(returnType);
if(newReturnType != returnType && !(returnType.getSort() == Type.ARRAY))
wrapperTypesToPreAlloc.add(newReturnType);
}
public PrimitiveArrayAnalyzer(final String className, int access, final String name, final String desc, String signature, String[] exceptions, final MethodVisitor cmv) {
super(Opcodes.ASM5);
this.mv = new PrimitiveArrayAnalyzerMN(access, name, desc, signature, exceptions, className, cmv);
}
public PrimitiveArrayAnalyzer(Type singleWrapperTypeToAdd) {
super(Opcodes.ASM5);
this.mv = new PrimitiveArrayAnalyzerMN(0, null,null,null,null,null, null);
if(singleWrapperTypeToAdd.getSort() == Type.OBJECT && singleWrapperTypeToAdd.getInternalName().startsWith("edu/columbia/cs/psl/phosphor/struct/Tainted"))
this.wrapperTypesToPreAlloc.add(singleWrapperTypeToAdd);
}
NeverNullArgAnalyzerAdapter analyzer;
public void setAnalyzer(NeverNullArgAnalyzerAdapter preAnalyzer) {
analyzer = preAnalyzer;
}
}
| A poor, but seemingly working fix for dead code
| Phosphor/src/edu/columbia/cs/psl/phosphor/instrumenter/PrimitiveArrayAnalyzer.java | A poor, but seemingly working fix for dead code | <ide><path>hosphor/src/edu/columbia/cs/psl/phosphor/instrumenter/PrimitiveArrayAnalyzer.java
<ide> import org.objectweb.asm.tree.InsnNode;
<ide> import org.objectweb.asm.tree.LabelNode;
<ide> import org.objectweb.asm.tree.LdcInsnNode;
<add>import org.objectweb.asm.tree.LineNumberNode;
<ide> import org.objectweb.asm.tree.LocalVariableNode;
<ide> import org.objectweb.asm.tree.MethodNode;
<ide> import org.objectweb.asm.tree.TypeInsnNode;
<ide> public Frame[] analyze(String owner, MethodNode m) throws AnalyzerException {
<ide> Iterator<AbstractInsnNode> insns = m.instructions.iterator();
<ide> insnToLabel = new int[m.instructions.size()];
<del> endsWithGOTO = new boolean[insnToLabel.length];
<del>
<ide> // System.out.println("PAAA"+ name);
<ide> int label = -1;
<ide> boolean isFirst = true;
<ide> label++;
<ide> }
<ide>
<del> if (insn.getOpcode() == Opcodes.GOTO) {
<del> endsWithGOTO[idx] = true;
<del> }
<ide> insnToLabel[idx] = (isFirst ? 1 : label);
<ide> isFirst = false;
<ide> // System.out.println(idx + "->"+label);
<ide> }
<ide> Frame[] ret = super.analyze(owner, m);
<add>
<ide> // if (DEBUG)
<ide> // for (int i = 0; i < inFrames.size(); i++) {
<ide> // System.out.println("IN: " + i + " " + inFrames.get(i).stack);
<ide> try {
<ide>
<ide> Frame[] frames = a.analyze(className, this);
<add> for(int i = 0 ; i < instructions.size(); i++)
<add> {
<add> if(frames[i] == null)
<add> {
<add> //TODO dead code elimination.
<add> //This should be done more generically
<add> //But, this worked for JDT's stupid bytecode, so...
<add> AbstractInsnNode insn = instructions.get(i);
<add> if (insn != null && !(insn instanceof LabelNode)) {
<add> if(insn.getOpcode() == Opcodes.GOTO)
<add> {
<add> instructions.insertBefore(insn, new InsnNode(Opcodes.ATHROW));
<add> instructions.remove(insn);
<add> }
<add> else if (insn instanceof FrameNode)
<add> {
<add> FrameNode fn = (FrameNode) insn;
<add> fn.local = Collections.EMPTY_LIST;
<add> fn.stack = Collections.singletonList("java/lang/Throwable");
<add> }
<add> }
<add> }
<add> }
<ide> // HashMap<Integer,BasicBlock> cfg = new HashMap<Integer, BasicBlock>();
<ide> // for(Integer i : outEdges.keySet())
<ide> // { |
|
JavaScript | bsd-2-clause | 8ee9636c072a19a8a76e6cdf8f590f580dac411f | 0 | salman-kamkoriwala/OsJsSample,dmikey/OS.js-v2,andersevenrud/OS.js-v2,gravityacademy/OS.js-v2,omet-ca/omet-desktop,andersevenrud/OS.js-v2,marcinlimanski/OS.js-v2,twkrol/OS.js-v2,dmikey/OS.js-v2,ellis/OS.js-v2,andersevenrud/OS.js-v2,arduino-org/OS.js-v2,omet-ca/omet-desktop,arduino-org/OS.js-v2,arduino-org/OS.js-v2,marcinlimanski/OS.js-v2,marcinlimanski/OS.js-v2,gravityacademy/OS.js-v2,arduino-org/Arduino-OS,twkrol/OS.js-v2,ellis/OS.js-v2,Rxswyers/OS.js,arduino-org/Arduino-OS,Rxswyers/OS.js,salman-kamkoriwala/OsJsSample,gravityacademy/OS.js-v2,Rxswyers/OS.js,marcinlimanski/OS.js-v2,arduino-org/Arduino-OS,dmikey/OS.js-v2,andersevenrud/OS.js-v2,arduino-org/Arduino-OS,gravityacademy/OS.js-v2,dmikey/OS.js-v2,arduino-org/OS.js-v2,twkrol/OS.js-v2,ellis/OS.js-v2,ellis/OS.js-v2,salman-kamkoriwala/OsJsSample,arduino-org/Arduino-OS,dmikey/OS.js-v2,Rxswyers/OS.js,gravityacademy/OS.js-v2,salman-kamkoriwala/OsJsSample,ellis/OS.js-v2,omet-ca/omet-desktop,salman-kamkoriwala/OsJsSample,marcinlimanski/OS.js-v2,omet-ca/omet-desktop,arduino-org/OS.js-v2,twkrol/OS.js-v2,Rxswyers/OS.js,omet-ca/omet-desktop,twkrol/OS.js-v2,andersevenrud/OS.js-v2 | /*!
* OS.js - JavaScript Operating System
*
* Copyright (c) 2011-2015, Anders Evenrud <[email protected]>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* @author Anders Evenrud <[email protected]>
* @licence Simplified BSD License
*/
(function(API, Utils, VFS) {
'use strict';
window.OSjs = window.OSjs || {};
OSjs.API = OSjs.API || {};
OSjs.GUI = OSjs.GUI || {};
OSjs.GUI.Elements = OSjs.GUI.Elements || {};
/////////////////////////////////////////////////////////////////////////////
// HELPERS
/////////////////////////////////////////////////////////////////////////////
/**
* Gets window id from upper parent element
*
* @param DOMElement el Child element (can be anything)
*
* @return int
*
* @api OSjs.GUI.Helpers.getWindowId()
*/
function getWindowId(el) {
while ( el.parentNode ) {
var attr = el.getAttribute('data-window-id');
if ( attr !== null ) {
return parseInt(attr, 10);
}
el = el.parentNode;
}
return null;
}
/**
* Gets "label" from a node
*
* @param DOMElement el The element
*
* @return String
*
* @api OSjs.GUI.Helpers.getLabel()
*/
function getLabel(el) {
var label = el.getAttribute('data-label');
return label || '';
}
/**
* Gets "label" from a node (Where it can be innerHTML and parameter)
*
* @param DOMElement el The element
*
* @return String
*
* @api OSjs.GUI.Helpers.getValueLabel()
*/
function getValueLabel(el, attr) {
var label = attr ? el.getAttribute('data-label') : null;
if ( el.childNodes.length && el.childNodes[0].nodeType === 3 && el.childNodes[0].nodeValue ) {
label = el.childNodes[0].nodeValue;
Utils.$empty(el);
}
return label || '';
}
/**
* Gets "value" from a node
*
* @param DOMElement el The element
*
* @return String
*
* @api OSjs.GUI.Helpers.getViewNodeValue()
*/
function getViewNodeValue(el) {
var value = el.getAttribute('data-value');
if ( typeof value === 'string' && value.match(/^\[|\{/) ) {
try {
value = JSON.parse(value);
} catch ( e ) {
value = null;
}
}
return value;
}
/**
* Internal for getting
*
* @param DOMElement el Element
* @param OSjs.Core.Window win (optional) Window Reference
*
* @return String
*
* @api OSjs.GUI.Helpers.getIcon()
*/
function getIcon(el, win) {
var image = el.getAttribute('data-icon');
if ( image ) {
if ( image.match(/^stock:\/\//) ) {
image = image.replace('stock://', '');
var size = '16x16';
try {
var spl = image.split('/');
var tmp = spl.shift();
var siz = tmp.match(/^\d+x\d+/);
if ( siz ) {
size = siz[0];
image = spl.join('/');
}
image = API.getIcon(image, size);
} catch ( e ) {}
} else if ( image.match(/^app:\/\//) ) {
image = API.getApplicationResource(win._app, image.replace('app://', ''));
}
}
return image;
}
/**
* Wrapper for getting custom dom element property value
*
* @param DOMElement el Element
* @param String param Parameter name
* @param String tagName (Optional) What tagname is in use? Automatic
*
* @api OSjs.GUI.Helpers.getProperty()
*
* @return Mixed
*/
function getProperty(el, param, tagName) {
tagName = tagName || el.tagName.toLowerCase();
var isDataView = tagName.match(/^gui\-(tree|icon|list|file)\-view$/);
if ( param === 'value' && !isDataView) {
if ( (['gui-text', 'gui-password', 'gui-textarea', 'gui-slider', 'gui-select', 'gui-select-list']).indexOf(tagName) >= 0 ) {
return el.querySelector('input, textarea, select').value;
}
if ( (['gui-checkbox', 'gui-radio', 'gui-switch']).indexOf(tagName) >= 0 ) {
return !!el.querySelector('input').checked;
//return el.querySelector('input').value === 'on';
}
return null;
}
if ( (param === 'value' || param === 'selected') && isDataView ) {
return OSjs.GUI.Elements[tagName].values(el);
}
return el.getAttribute('data-' + param);
}
/**
* Wrapper for setting custom dom element property value
*
* @param DOMElement el Element
* @param String param Parameter name
* @param Mixed value Parameter value
* @param String tagName (Optional) What tagname is in use? Automatic
*
* @api OSjs.GUI.Helpers.setProperty()
*
* @return void
*/
function setProperty(el, param, value, tagName) {
tagName = tagName || el.tagName.toLowerCase();
function _setInputProperty() {
var firstChild = el.querySelector('textarea, input, select, button');
if ( param === 'value' ) {
if ( tagName === 'gui-radio' || tagName === 'gui-checkbox' ) {
if ( value ) {
firstChild.setAttribute('checked', 'checked');
} else {
firstChild.removeAttribute('checked');
}
}
firstChild.value = value;
return;
} else if ( param === 'disabled' ) {
if ( value ) {
firstChild.setAttribute('disabled', 'disabled');
} else {
firstChild.removeAttribute('disabled');
}
return;
}
firstChild.setAttribute(param, value || '');
}
function _setElementProperty() {
if ( typeof value === 'boolean' ) {
value = value ? 'true' : 'false';
} else if ( typeof value === 'object' ) {
try {
value = JSON.stringify(value);
} catch ( e ) {}
}
el.setAttribute('data-' + param, value);
}
function _createInputLabel() {
if ( param === 'label' ) {
var firstChild = el.querySelector('textarea, input, select');
el.appendChild(firstChild);
Utils.$remove(el.querySelector('label'));
createInputLabel(el, tagName.replace(/^gui\-/, ''), firstChild, value);
}
}
// Generics for input elements
var firstChild = el.children[0];
var accept = ['gui-slider', 'gui-text', 'gui-password', 'gui-textarea', 'gui-checkbox', 'gui-radio', 'gui-select', 'gui-select-list', 'gui-button'];
if ( accept.indexOf(tagName) >= 0 ) {
_setInputProperty();
_createInputLabel();
}
// Other types of elements
accept = ['gui-image', 'gui-audio', 'gui-video'];
if ( (['src', 'controls', 'autoplay', 'alt']).indexOf(param) >= 0 && accept.indexOf(tagName) >= 0 ) {
firstChild[param] = value;
}
// Normal DOM attributes
if ( (['_id', '_class', '_style']).indexOf(param) >= 0 ) {
firstChild.setAttribute(param.replace(/^_/, ''), value);
return;
}
// Set the actual root element property value
if ( param !== 'value' ) {
_setElementProperty();
}
}
/**
* Creates a label for given input element
*
* @param DOMEelement el Element root
* @param String type Input element type
* @param DOMElement input The input element
* @param String label (Optional) Used when updating
*
* @return void
*
* @api OSjs.GUI.Helpers.createInputLabel()
*/
function createInputLabel(el, type, input, label) {
label = label || getLabel(el);
if ( label ) {
var lbl = document.createElement('label');
var span = document.createElement('span');
span.appendChild(document.createTextNode(label));
if ( type === 'checkbox' || type === 'radio' ) {
lbl.appendChild(input);
lbl.appendChild(span);
} else {
lbl.appendChild(span);
lbl.appendChild(input);
}
el.appendChild(lbl);
} else {
el.appendChild(input);
}
}
/**
* Create a new custom DOM element
*
* @param String tagName Tag Name
* @param Object params Dict with data-* properties
* @param Array ignoreParams (optional) list of arguments to ignore
*
* @return DOMElement
*/
function createElement(tagName, params, ignoreParams) {
ignoreParams = ignoreParams || [];
var el = document.createElement(tagName);
var classMap = {
textalign: function(v) {
Utils.$addClass(el, 'gui-align-' + v);
},
className: function(v) {
Utils.$addClass(el, v);
}
};
function getValue(k, value) {
if ( typeof value === 'boolean' ) {
value = value ? 'true' : 'false';
} else if ( typeof value === 'object' ) {
try {
value = JSON.stringify(value);
} catch ( e ) {}
}
return value;
}
if ( typeof params === 'object' ) {
Object.keys(params).forEach(function(k) {
if ( ignoreParams.indexOf(k) >= 0 ) {
return;
}
var value = params[k];
if ( classMap[k] ) {
classMap[k](value);
return;
}
var fvalue = getValue(k, value);
el.setAttribute('data-' + k, fvalue);
});
}
return el;
}
/**
* Sets the flexbox CSS style properties for given container
*
* @param DOMElement el The container
* @param int grow Grow factor
* @param int shrink Shrink factor
* @param String basis (Optional: basis, default=auto)
* @param DOMElement checkel (Optional: take defaults from this node)
*
* @api OSjs.GUI.Helpers.setFlexbox()
*/
function setFlexbox(el, grow, shrink, basis, checkel) {
checkel = checkel || el;
(function() {
if ( typeof basis === 'undefined' || basis === null ) {
basis = checkel.getAttribute('data-basis') || 'auto';
}
})();
(function() {
if ( typeof grow === 'undefined' || grow === null ) {
grow = checkel.getAttribute('data-grow') || 0;
}
})();
(function() {
if ( typeof shrink === 'undefined' || shrink === null ) {
shrink = checkel.getAttribute('data-shrink') || 0;
}
})();
var flex = [grow, shrink];
if ( basis.length ) {
flex.push(basis);
}
var style = flex.join(' ');
el.style['WebkitBoxFlex'] = style;
el.style['MozBoxFlex'] = style;
el.style['WebkitFlex'] = style;
el.style['MozFlex'] = style;
el.style['MSFlex'] = style;
el.style['OFlex'] = style;
el.style['flex'] = style;
var align = el.getAttribute('data-align');
Utils.$removeClass(el, 'gui-flex-align-start');
Utils.$removeClass(el, 'gui-flex-align-end');
if ( align ) {
Utils.$addClass(el, 'gui-flex-align-' + align);
}
}
/**
* Wrapper for creating a draggable container
*
* @param DOMElement el The container
* @param Function onDown On down action callback
* @param Function onMove On move action callback
* @param Function onUp On up action callback
*
* @api OSjs.GUI.Helpers.createDrag()
*/
function createDrag(el, onDown, onMove, onUp) {
onDown = onDown || function() {};
onMove = onMove || function() {};
onUp = onUp || function() {};
var startX, startY, currentX, currentY;
var dragging = false;
function _onMouseDown(ev, pos, touchDevice) {
ev.preventDefault();
startX = pos.x;
startY = pos.y;
onDown(ev, {x: startX, y: startY});
dragging = true;
Utils.$bind(window, 'mouseup', _onMouseUp, false);
Utils.$bind(window, 'mousemove', _onMouseMove, false);
}
function _onMouseMove(ev, pos, touchDevice) {
ev.preventDefault();
if ( dragging ) {
currentX = pos.x;
currentY = pos.y;
var diffX = currentX - startX;
var diffY = currentY - startY;
onMove(ev, {x: diffX, y: diffY}, {x: currentX, y: currentY});
}
}
function _onMouseUp(ev, pos, touchDevice) {
onUp(ev, {x: currentX, y: currentY});
dragging = false;
Utils.$unbind(window, 'mouseup', _onMouseUp, false);
Utils.$unbind(window, 'mousemove', _onMouseMove, false);
}
Utils.$bind(el, 'mousedown', _onMouseDown, false);
}
/////////////////////////////////////////////////////////////////////////////
// INTERNAL HELPERS
/////////////////////////////////////////////////////////////////////////////
/**
* Internal for parsing GUI elements
*/
function parseDynamic(scheme, node, win, args) {
args = args || {};
var translator = args._ || API._;
node.querySelectorAll('*[data-label]').forEach(function(el) {
var label = translator(el.getAttribute('data-label'));
el.setAttribute('data-label', label);
});
node.querySelectorAll('gui-label, gui-button, gui-list-view-column, gui-select-option, gui-select-list-option').forEach(function(el) {
if ( !el.children.length && !el.getAttribute('data-no-translate') ) {
var lbl = getValueLabel(el);
el.appendChild(document.createTextNode(translator(lbl)));
}
});
node.querySelectorAll('gui-button').forEach(function(el) {
var label = getValueLabel(el);
if ( label ) {
el.appendChild(document.createTextNode(API._(label)));
}
});
node.querySelectorAll('*[data-icon]').forEach(function(el) {
var image = getIcon(el, win);
el.setAttribute('data-icon', image);
});
}
/**
* Method for adding children (moving)
*/
function addChildren(frag, root) {
if ( frag ) {
var children = frag.children;
var i = 0;
while ( children.length && i < 10000 ) {
root.appendChild(children[0]);
i++;
}
}
}
/**
* Makes sure "include" fragments are rendered correctly
*/
function resolveFragments(scheme, node, el) {
function _resolve() {
var nodes = node.querySelectorAll('gui-fragment');
if ( nodes.length ) {
nodes.forEach(function(el) {
var id = el.getAttribute('data-fragment-id');
var frag = scheme.getFragment(id, 'application-fragment');
addChildren(frag, el.parentNode);
Utils.$remove(el);
});
return true;
}
return false;
}
var resolving = true;
while ( resolving ) {
resolving = _resolve();
}
}
/////////////////////////////////////////////////////////////////////////////
// UIELEMENT CLASS
/////////////////////////////////////////////////////////////////////////////
/**
* Base UIElement Class
*
* @api OSjs.GUI.UIElement()
* @class
*/
function UIElement(el, q) {
this.$element = el || null;
this.tagName = el ? el.tagName.toLowerCase() : null;
this.oldDisplay = null;
if ( !el ) {
console.error('UIElement() was constructed without a DOM element', q);
}
}
UIElement.prototype.blur = function() {
// TODO: For more elements
if ( this.$element ) {
var firstChild = this.$element.querySelector('input');
if ( firstChild ) {
firstChild.blur();
}
}
return this;
};
UIElement.prototype.focus = function() {
// TODO: For more elements
if ( this.$element ) {
var firstChild = this.$element.firstChild || this.$element; //this.$element.querySelector('input');
if ( firstChild ) {
firstChild.focus();
}
}
return this;
};
UIElement.prototype.show = function() {
if ( OSjs.GUI.Elements[this.tagName] && OSjs.GUI.Elements[this.tagName].show ) {
OSjs.GUI.Elements[this.tagName].show.apply(this, arguments);
} else {
if ( this.$element ) {
this.$element.style.display = this.oldDisplay || '';
}
}
return this;
};
UIElement.prototype.hide = function() {
if ( this.$element ) {
if ( !this.oldDisplay ) {
this.oldDisplay = this.$element.style.display;
}
this.$element.style.display = 'none';
}
return this;
};
UIElement.prototype.on = function(evName, callback, args) {
if ( OSjs.GUI.Elements[this.tagName] && OSjs.GUI.Elements[this.tagName].bind ) {
OSjs.GUI.Elements[this.tagName].bind(this.$element, evName, callback, args);
}
return this;
};
UIElement.prototype.set = function(param, value, arg) {
if ( this.$element ) {
if ( OSjs.GUI.Elements[this.tagName] && OSjs.GUI.Elements[this.tagName].set ) {
if ( OSjs.GUI.Elements[this.tagName].set(this.$element, param, value, arg) === true ) {
return this;
}
}
setProperty(this.$element, param, value, arg);
}
return this;
};
UIElement.prototype.get = function() {
if ( this.$element ) {
if ( OSjs.GUI.Elements[this.tagName] && OSjs.GUI.Elements[this.tagName].get ) {
var args = ([this.$element]).concat(Array.prototype.slice.call(arguments));
return OSjs.GUI.Elements[this.tagName].get.apply(this, args);
} else {
return getProperty(this.$element, arguments[0]);
}
}
return null;
};
UIElement.prototype.append = function(el) {
if ( el instanceof UIElement ) {
el = el.$element;
}
this.$element.appendChild(el);
};
UIElement.prototype.querySelector = function(q) {
return this.$element.querySelector(q);
};
UIElement.prototype.querySelectorAll = function(q) {
return this.$element.querySelectorAll(q);
};
UIElement.prototype._call = function(method, args) {
if ( OSjs.GUI.Elements[this.tagName] && OSjs.GUI.Elements[this.tagName].call ) {
var cargs = ([this.$element, method, args]);//.concat(args);
return OSjs.GUI.Elements[this.tagName].call.apply(this, cargs);
}
return null;//this;
};
/**
* Extended UIElement for ListView, TreeView, IconView, Select, SelectList
* @extends UIElement
* @api OSjs.GUI.UIElementDataView()
* @class
*/
function UIElementDataView() {
UIElement.apply(this, arguments);
}
UIElementDataView.prototype = Object.create(UIElement.prototype);
UIElementDataView.constructor = UIElement;
UIElementDataView.prototype.clear = function() {
return this._call('clear', []);
};
UIElementDataView.prototype.add = function(props) {
return this._call('add', [props]);
};
UIElementDataView.prototype.patch = function(props) {
return this._call('patch', [props]);
};
UIElementDataView.prototype.remove = function(id, key) {
return this._call('remove', [id, key]);
};
/////////////////////////////////////////////////////////////////////////////
// UISCHEME CLASS
/////////////////////////////////////////////////////////////////////////////
/**
* The class for loading and parsing UI Schemes
*
* @api OSjs.GUI.UIScheme
*
* @class
*/
function UIScheme(url) {
this.url = url;
this.scheme = null;
}
UIScheme.prototype.load = function(cb) {
var self = this;
function removeSelfClosingTags(html) {
var split = html.split('/>');
var newhtml = '';
for (var i = 0; i < split.length - 1;i++) {
var edsplit = split[i].split('<');
newhtml += split[i] + '></' + edsplit[edsplit.length - 1].split(' ')[0] + '>';
}
return newhtml + split[split.length-1];
}
function finished(html) {
var doc = document.createDocumentFragment();
var wrapper = document.createElement('div');
wrapper.innerHTML = Utils.cleanHTML(removeSelfClosingTags(html));
doc.appendChild(wrapper);
self.scheme = doc;
cb(false, doc);
}
if ( window.location.protocol.match(/^file/) ) {
var url = this.url;
if ( !url.match(/^\//) ) {
url = '/' + url;
}
finished(OSjs.API.getDefaultSchemes(url));
return;
}
Utils.ajax({
url: this.url,
onsuccess: function(html) {
finished(html);
},
onerror: function() {
cb('Failed to fetch scheme');
}
});
};
UIScheme.prototype.getFragment = function(id, type) {
var content = null;
if ( id ) {
if ( type ) {
content = this.scheme.querySelector(type + '[data-id="' + id + '"]');
} else {
content = this.scheme.querySelector('application-window[data-id="' + id + '"]') ||
this.scheme.querySelector('application-fragment[data-id="' + id + '"]');
}
}
return content;
};
UIScheme.prototype.parse = function(id, type, win, onparse, args) {
var self = this;
var content = this.getFragment(id, type);
if ( !content ) {
console.error('UIScheme::parse()', 'No fragment found', id, type);
return null;
}
type = type || content.tagName.toLowerCase();
onparse = onparse || function() {};
args = args || {};
if ( content ) {
var node = content.cloneNode(true);
// Resolve fragment includes before dynamic rendering
resolveFragments(this, node);
// Apply a default className to non-containers
node.querySelectorAll('*').forEach(function(el) {
var lcase = el.tagName.toLowerCase();
if ( lcase.match(/^gui\-/) && !lcase.match(/(\-container|\-(h|v)box|\-columns?|\-rows?|(status|tool)bar|(button|menu)\-bar|bar\-entry)$/) ) {
Utils.$addClass(el, 'gui-element');
}
});
// Go ahead and parse dynamic elements (like labels)
parseDynamic(this, node, win, args);
// Lastly render elements
onparse(node);
Object.keys(OSjs.GUI.Elements).forEach(function(key) {
node.querySelectorAll(key).forEach(function(pel) {
if ( pel._wasParsed ) {
return;
}
try {
OSjs.GUI.Elements[key].build(pel);
} catch ( e ) {
console.warn('UIScheme::parse()', id, type, win, 'exception');
console.warn(e, e.stack);
}
pel._wasParsed = true;
});
});
return node;
}
return null;
};
UIScheme.prototype.render = function(win, id, root, type, onparse, args) {
root = root || win._getRoot();
if ( root instanceof UIElement ) {
root = root.$element;
}
var content = this.parse(id, type, win, onparse, args);
addChildren(content, root);
};
UIScheme.prototype.create = function(win, tagName, params, parentNode, applyArgs) {
tagName = tagName || '';
params = params || {};
parentNode = parentNode || win.getRoot();
var el = createElement(tagName, params);
parentNode.appendChild(el);
OSjs.GUI.Elements[tagName].build(el, applyArgs, win);
return new UIElement(el);
};
UIScheme.prototype.find = function(win, id, root) {
root = root || win._getRoot();
var q = '[data-id="' + id + '"]';
return this.get(root.querySelector(q), q);
};
UIScheme.prototype.get = function(el, q) {
if ( el ) {
var tagName = el.tagName.toLowerCase();
if ( tagName.match(/^gui\-(list|tree|icon|file)\-view$/) || tagName.match(/^gui\-select/) ) {
return new UIElementDataView(el, q);
}
}
return new UIElement(el, q);
};
/////////////////////////////////////////////////////////////////////////////
// EXPORTS
/////////////////////////////////////////////////////////////////////////////
OSjs.GUI.Element = UIElement;
OSjs.GUI.ElementDataView = UIElementDataView;
OSjs.GUI.Scheme = UIScheme;
OSjs.GUI.Helpers = {
getProperty: getProperty,
getValueLabel: getValueLabel,
getViewNodeValue: getViewNodeValue,
getLabel: getLabel,
getIcon: getIcon,
getWindowId: getWindowId,
createInputLabel: createInputLabel,
createElement: createElement,
createDrag: createDrag,
setProperty: setProperty,
setFlexbox: setFlexbox
};
/**
* Shortcut for creating a new UIScheme class
*
* @param String url URL to scheme file
* @return UIScheme
* @api OSjs.GUI.createScheme()
*/
OSjs.GUI.createScheme = function(url) {
return new UIScheme(url);
};
})(OSjs.API, OSjs.Utils, OSjs.VFS);
| src/javascript/gui.js | /*!
* OS.js - JavaScript Operating System
*
* Copyright (c) 2011-2015, Anders Evenrud <[email protected]>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* @author Anders Evenrud <[email protected]>
* @licence Simplified BSD License
*/
(function(API, Utils, VFS) {
'use strict';
window.OSjs = window.OSjs || {};
OSjs.API = OSjs.API || {};
OSjs.GUI = OSjs.GUI || {};
OSjs.GUI.Elements = OSjs.GUI.Elements || {};
/////////////////////////////////////////////////////////////////////////////
// HELPERS
/////////////////////////////////////////////////////////////////////////////
/**
* Gets window id from upper parent element
*
* @param DOMElement el Child element (can be anything)
*
* @return int
*
* @api OSjs.GUI.Helpers.getWindowId()
*/
function getWindowId(el) {
while ( el.parentNode ) {
var attr = el.getAttribute('data-window-id');
if ( attr !== null ) {
return parseInt(attr, 10);
}
el = el.parentNode;
}
return null;
}
/**
* Gets "label" from a node
*
* @param DOMElement el The element
*
* @return String
*
* @api OSjs.GUI.Helpers.getLabel()
*/
function getLabel(el) {
var label = el.getAttribute('data-label');
return label || '';
}
/**
* Gets "label" from a node (Where it can be innerHTML and parameter)
*
* @param DOMElement el The element
*
* @return String
*
* @api OSjs.GUI.Helpers.getValueLabel()
*/
function getValueLabel(el, attr) {
var label = attr ? el.getAttribute('data-label') : null;
if ( el.childNodes.length && el.childNodes[0].nodeType === 3 && el.childNodes[0].nodeValue ) {
label = el.childNodes[0].nodeValue;
Utils.$empty(el);
}
return label || '';
}
/**
* Gets "value" from a node
*
* @param DOMElement el The element
*
* @return String
*
* @api OSjs.GUI.Helpers.getViewNodeValue()
*/
function getViewNodeValue(el) {
var value = el.getAttribute('data-value');
if ( typeof value === 'string' && value.match(/^\[|\{/) ) {
try {
value = JSON.parse(value);
} catch ( e ) {
value = null;
}
}
return value;
}
/**
* Internal for getting
*
* @param DOMElement el Element
* @param OSjs.Core.Window win (optional) Window Reference
*
* @return String
*
* @api OSjs.GUI.Helpers.getIcon()
*/
function getIcon(el, win) {
var image = el.getAttribute('data-icon');
if ( image ) {
if ( image.match(/^stock:\/\//) ) {
image = image.replace('stock://', '');
var size = '16x16';
try {
var spl = image.split('/');
var tmp = spl.shift();
var siz = tmp.match(/^\d+x\d+/);
if ( siz ) {
size = siz[0];
image = spl.join('/');
}
image = API.getIcon(image, size);
} catch ( e ) {}
} else if ( image.match(/^app:\/\//) ) {
image = API.getApplicationResource(win._app, image.replace('app://', ''));
}
}
return image;
}
/**
* Wrapper for getting custom dom element property value
*
* @param DOMElement el Element
* @param String param Parameter name
* @param String tagName (Optional) What tagname is in use? Automatic
*
* @api OSjs.GUI.Helpers.getProperty()
*
* @return Mixed
*/
function getProperty(el, param, tagName) {
tagName = tagName || el.tagName.toLowerCase();
var isDataView = tagName.match(/^gui\-(tree|icon|list|file)\-view$/);
if ( param === 'value' && !isDataView) {
if ( (['gui-text', 'gui-password', 'gui-textarea', 'gui-slider', 'gui-select', 'gui-select-list']).indexOf(tagName) >= 0 ) {
return el.querySelector('input, textarea, select').value;
}
if ( (['gui-checkbox', 'gui-radio', 'gui-switch']).indexOf(tagName) >= 0 ) {
return !!el.querySelector('input').checked;
//return el.querySelector('input').value === 'on';
}
return null;
}
if ( (param === 'value' || param === 'selected') && isDataView ) {
return OSjs.GUI.Elements[tagName].values(el);
}
return el.getAttribute('data-' + param);
}
/**
* Wrapper for setting custom dom element property value
*
* @param DOMElement el Element
* @param String param Parameter name
* @param Mixed value Parameter value
* @param String tagName (Optional) What tagname is in use? Automatic
*
* @api OSjs.GUI.Helpers.setProperty()
*
* @return void
*/
function setProperty(el, param, value, tagName) {
tagName = tagName || el.tagName.toLowerCase();
function _setInputProperty() {
var firstChild = el.querySelector('textarea, input, select, button');
if ( param === 'value' ) {
if ( tagName === 'gui-radio' || tagName === 'gui-checkbox' ) {
if ( value ) {
firstChild.setAttribute('checked', 'checked');
} else {
firstChild.removeAttribute('checked');
}
}
firstChild.value = value;
return;
} else if ( param === 'disabled' ) {
if ( value ) {
firstChild.setAttribute('disabled', 'disabled');
} else {
firstChild.removeAttribute('disabled');
}
return;
}
firstChild.setAttribute(param, value || '');
}
function _setElementProperty() {
if ( typeof value === 'boolean' ) {
value = value ? 'true' : 'false';
} else if ( typeof value === 'object' ) {
try {
value = JSON.stringify(value);
} catch ( e ) {}
}
el.setAttribute('data-' + param, value);
}
function _createInputLabel() {
if ( param === 'label' ) {
var firstChild = el.querySelector('textarea, input, select');
el.appendChild(firstChild);
Utils.$remove(el.querySelector('label'));
createInputLabel(el, tagName.replace(/^gui\-/, ''), firstChild, value);
}
}
// Generics for input elements
var firstChild = el.children[0];
var accept = ['gui-slider', 'gui-text', 'gui-password', 'gui-textarea', 'gui-checkbox', 'gui-radio', 'gui-select', 'gui-select-list', 'gui-button'];
if ( accept.indexOf(tagName) >= 0 ) {
_setInputProperty();
_createInputLabel();
}
// Other types of elements
accept = ['gui-image', 'gui-audio', 'gui-video'];
if ( (['src', 'controls', 'autoplay', 'alt']).indexOf(param) >= 0 && accept.indexOf(tagName) >= 0 ) {
firstChild[param] = value;
}
// Normal DOM attributes
if ( (['_id', '_class', '_style']).indexOf(param) >= 0 ) {
firstChild.setAttribute(param.replace(/^_/, ''), value);
return;
}
// Set the actual root element property value
if ( param !== 'value' ) {
_setElementProperty();
}
}
/**
* Creates a label for given input element
*
* @param DOMEelement el Element root
* @param String type Input element type
* @param DOMElement input The input element
* @param String label (Optional) Used when updating
*
* @return void
*
* @api OSjs.GUI.Helpers.createInputLabel()
*/
function createInputLabel(el, type, input, label) {
label = label || getLabel(el);
if ( label ) {
var lbl = document.createElement('label');
var span = document.createElement('span');
span.appendChild(document.createTextNode(label));
if ( type === 'checkbox' || type === 'radio' ) {
lbl.appendChild(input);
lbl.appendChild(span);
} else {
lbl.appendChild(span);
lbl.appendChild(input);
}
el.appendChild(lbl);
} else {
el.appendChild(input);
}
}
/**
* Create a new custom DOM element
*
* @param String tagName Tag Name
* @param Object params Dict with data-* properties
* @param Array ignoreParams (optional) list of arguments to ignore
*
* @return DOMElement
*/
function createElement(tagName, params, ignoreParams) {
ignoreParams = ignoreParams || [];
var el = document.createElement(tagName);
var classMap = {
textalign: function(v) {
Utils.$addClass(el, 'gui-align-' + v);
},
className: function(v) {
Utils.$addClass(el, v);
}
};
function getValue(k, value) {
if ( typeof value === 'boolean' ) {
value = value ? 'true' : 'false';
} else if ( typeof value === 'object' ) {
try {
value = JSON.stringify(value);
} catch ( e ) {}
}
return value;
}
if ( typeof params === 'object' ) {
Object.keys(params).forEach(function(k) {
if ( ignoreParams.indexOf(k) >= 0 ) {
return;
}
var value = params[k];
if ( classMap[k] ) {
classMap[k](value);
return;
}
var fvalue = getValue(k, value);
el.setAttribute('data-' + k, fvalue);
});
}
return el;
}
/**
* Sets the flexbox CSS style properties for given container
*
* @param DOMElement el The container
* @param int grow Grow factor
* @param int shrink Shrink factor
* @param String basis (Optional: basis, default=auto)
* @param DOMElement checkel (Optional: take defaults from this node)
*
* @api OSjs.GUI.Helpers.setFlexbox()
*/
function setFlexbox(el, grow, shrink, basis, checkel) {
checkel = checkel || el;
(function() {
if ( typeof basis === 'undefined' || basis === null ) {
basis = checkel.getAttribute('data-basis') || 'auto';
}
})();
(function() {
if ( typeof grow === 'undefined' || grow === null ) {
grow = checkel.getAttribute('data-grow') || 0;
}
})();
(function() {
if ( typeof shrink === 'undefined' || shrink === null ) {
shrink = checkel.getAttribute('data-shrink') || 0;
}
})();
var flex = [grow, shrink];
if ( basis.length ) {
flex.push(basis);
}
var style = flex.join(' ');
el.style['WebkitBoxFlex'] = style;
el.style['MozBoxFlex'] = style;
el.style['WebkitFlex'] = style;
el.style['MozFlex'] = style;
el.style['MSFlex'] = style;
el.style['OFlex'] = style;
el.style['flex'] = style;
var align = el.getAttribute('data-align');
Utils.$removeClass(el, 'gui-flex-align-start');
Utils.$removeClass(el, 'gui-flex-align-end');
if ( align ) {
Utils.$addClass(el, 'gui-flex-align-' + align);
}
}
/**
* Wrapper for creating a draggable container
*
* @param DOMElement el The container
* @param Function onDown On down action callback
* @param Function onMove On move action callback
* @param Function onUp On up action callback
*
* @api OSjs.GUI.Helpers.createDrag()
*/
function createDrag(el, onDown, onMove, onUp) {
onDown = onDown || function() {};
onMove = onMove || function() {};
onUp = onUp || function() {};
var startX, startY, currentX, currentY;
var dragging = false;
function _onMouseDown(ev, pos, touchDevice) {
ev.preventDefault();
startX = pos.x;
startY = pos.y;
onDown(ev, {x: startX, y: startY});
dragging = true;
Utils.$bind(window, 'mouseup', _onMouseUp, false);
Utils.$bind(window, 'mousemove', _onMouseMove, false);
}
function _onMouseMove(ev, pos, touchDevice) {
ev.preventDefault();
if ( dragging ) {
currentX = pos.x;
currentY = pos.y;
var diffX = currentX - startX;
var diffY = currentY - startY;
onMove(ev, {x: diffX, y: diffY}, {x: currentX, y: currentY});
}
}
function _onMouseUp(ev, pos, touchDevice) {
onUp(ev, {x: currentX, y: currentY});
dragging = false;
Utils.$unbind(window, 'mouseup', _onMouseUp, false);
Utils.$unbind(window, 'mousemove', _onMouseMove, false);
}
Utils.$bind(el, 'mousedown', _onMouseDown, false);
}
/////////////////////////////////////////////////////////////////////////////
// INTERNAL HELPERS
/////////////////////////////////////////////////////////////////////////////
/**
* Internal for parsing GUI elements
*/
function parseDynamic(scheme, node, win, args) {
args = args || {};
var translator = args._ || API._;
node.querySelectorAll('*[data-label]').forEach(function(el) {
var label = translator(el.getAttribute('data-label'));
el.setAttribute('data-label', label);
});
node.querySelectorAll('gui-label, gui-button, gui-list-view-column, gui-select-option, gui-select-list-option').forEach(function(el) {
if ( !el.children.length && !el.getAttribute('data-no-translate') ) {
var lbl = getValueLabel(el);
el.appendChild(document.createTextNode(translator(lbl)));
}
});
node.querySelectorAll('gui-button').forEach(function(el) {
var label = getValueLabel(el);
if ( label ) {
el.appendChild(document.createTextNode(API._(label)));
}
});
node.querySelectorAll('*[data-icon]').forEach(function(el) {
var image = getIcon(el, win);
el.setAttribute('data-icon', image);
});
}
/**
* Method for adding children (moving)
*/
function addChildren(frag, root) {
if ( frag ) {
var children = frag.children;
var i = 0;
while ( children.length && i < 10000 ) {
root.appendChild(children[0]);
i++;
}
}
}
/**
* Makes sure "include" fragments are rendered correctly
*/
function resolveFragments(scheme, node, el) {
function _resolve() {
var nodes = node.querySelectorAll('gui-fragment');
if ( nodes.length ) {
nodes.forEach(function(el) {
var id = el.getAttribute('data-fragment-id');
var frag = scheme.getFragment(id, 'application-fragment');
addChildren(frag, el.parentNode);
Utils.$remove(el);
});
return true;
}
return false;
}
var resolving = true;
while ( resolving ) {
resolving = _resolve();
}
}
/////////////////////////////////////////////////////////////////////////////
// UIELEMENT CLASS
/////////////////////////////////////////////////////////////////////////////
/**
* Base UIElement Class
*
* @api OSjs.GUI.UIElement()
* @class
*/
function UIElement(el, q) {
this.$element = el || null;
this.tagName = el ? el.tagName.toLowerCase() : null;
this.oldDisplay = null;
if ( !el ) {
console.error('UIElement() was constructed without a DOM element', q);
}
}
UIElement.prototype.blur = function() {
// TODO: For more elements
if ( this.$element ) {
var firstChild = this.$element.querySelector('input');
if ( firstChild ) {
firstChild.blur();
}
}
return this;
};
UIElement.prototype.focus = function() {
// TODO: For more elements
if ( this.$element ) {
var firstChild = this.$element.firstChild || this.$element; //this.$element.querySelector('input');
if ( firstChild ) {
firstChild.focus();
}
}
return this;
};
UIElement.prototype.show = function() {
if ( OSjs.GUI.Elements[this.tagName] && OSjs.GUI.Elements[this.tagName].show ) {
OSjs.GUI.Elements[this.tagName].show.apply(this, arguments);
} else {
if ( this.$element ) {
this.$element.style.display = this.oldDisplay || '';
}
}
return this;
};
UIElement.prototype.hide = function() {
if ( this.$element ) {
if ( !this.oldDisplay ) {
this.oldDisplay = this.$element.style.display;
}
this.$element.style.display = 'none';
}
return this;
};
UIElement.prototype.on = function(evName, callback, args) {
if ( OSjs.GUI.Elements[this.tagName] && OSjs.GUI.Elements[this.tagName].bind ) {
OSjs.GUI.Elements[this.tagName].bind(this.$element, evName, callback, args);
}
return this;
};
UIElement.prototype.set = function(param, value, arg) {
if ( this.$element ) {
if ( OSjs.GUI.Elements[this.tagName] && OSjs.GUI.Elements[this.tagName].set ) {
if ( OSjs.GUI.Elements[this.tagName].set(this.$element, param, value, arg) === true ) {
return this;
}
}
setProperty(this.$element, param, value, arg);
}
return this;
};
UIElement.prototype.get = function() {
if ( this.$element ) {
if ( OSjs.GUI.Elements[this.tagName] && OSjs.GUI.Elements[this.tagName].get ) {
var args = ([this.$element]).concat(Array.prototype.slice.call(arguments));
return OSjs.GUI.Elements[this.tagName].get.apply(this, args);
} else {
return getProperty(this.$element, arguments[0]);
}
}
return null;
};
UIElement.prototype.append = function(el) {
if ( el instanceof UIElement ) {
el = el.$element;
}
this.$element.appendChild(el);
};
UIElement.prototype.querySelector = function(q) {
return this.$element.querySelector(q);
};
UIElement.prototype.querySelectorAll = function(q) {
return this.$element.querySelectorAll(q);
};
UIElement.prototype._call = function(method, args) {
if ( OSjs.GUI.Elements[this.tagName] && OSjs.GUI.Elements[this.tagName].call ) {
var cargs = ([this.$element, method, args]);//.concat(args);
return OSjs.GUI.Elements[this.tagName].call.apply(this, cargs);
}
return null;//this;
};
/**
* Extended UIElement for ListView, TreeView, IconView, Select, SelectList
* @extends UIElement
* @api OSjs.GUI.UIElementDataView()
* @class
*/
function UIElementDataView() {
UIElement.apply(this, arguments);
}
UIElementDataView.prototype = Object.create(UIElement.prototype);
UIElementDataView.constructor = UIElement;
UIElementDataView.prototype.clear = function() {
return this._call('clear', []);
};
UIElementDataView.prototype.add = function(props) {
return this._call('add', [props]);
};
UIElementDataView.prototype.patch = function(props) {
return this._call('patch', [props]);
};
UIElementDataView.prototype.remove = function(id, key) {
return this._call('remove', [id, key]);
};
/////////////////////////////////////////////////////////////////////////////
// UISCHEME CLASS
/////////////////////////////////////////////////////////////////////////////
/**
* The class for loading and parsing UI Schemes
*
* @api OSjs.GUI.UIScheme
*
* @class
*/
function UIScheme(url) {
this.url = url;
this.scheme = null;
}
UIScheme.prototype.load = function(cb) {
var self = this;
function removeSelfClosingTags(html) {
var split = html.split('/>');
var newhtml = '';
for (var i = 0; i < split.length - 1;i++) {
var edsplit = split[i].split('<');
newhtml += split[i] + '></' + edsplit[edsplit.length - 1].split(' ')[0] + '>';
}
return newhtml + split[split.length-1];
}
function finished(html) {
var doc = document.createDocumentFragment();
var wrapper = document.createElement('div');
wrapper.innerHTML = Utils.cleanHTML(removeSelfClosingTags(html));
doc.appendChild(wrapper);
self.scheme = doc;
cb(false, doc);
}
if ( window.location.protocol.match(/^file/) ) {
finished(OSjs.API.getDefaultSchemes(this.url));
return;
}
Utils.ajax({
url: this.url,
onsuccess: function(html) {
finished(html);
},
onerror: function() {
cb('Failed to fetch scheme');
}
});
};
UIScheme.prototype.getFragment = function(id, type) {
var content = null;
if ( id ) {
if ( type ) {
content = this.scheme.querySelector(type + '[data-id="' + id + '"]');
} else {
content = this.scheme.querySelector('application-window[data-id="' + id + '"]') ||
this.scheme.querySelector('application-fragment[data-id="' + id + '"]');
}
}
return content;
};
UIScheme.prototype.parse = function(id, type, win, onparse, args) {
var self = this;
var content = this.getFragment(id, type);
if ( !content ) {
console.error('UIScheme::parse()', 'No fragment found', id, type);
return null;
}
type = type || content.tagName.toLowerCase();
onparse = onparse || function() {};
args = args || {};
if ( content ) {
var node = content.cloneNode(true);
// Resolve fragment includes before dynamic rendering
resolveFragments(this, node);
// Apply a default className to non-containers
node.querySelectorAll('*').forEach(function(el) {
var lcase = el.tagName.toLowerCase();
if ( lcase.match(/^gui\-/) && !lcase.match(/(\-container|\-(h|v)box|\-columns?|\-rows?|(status|tool)bar|(button|menu)\-bar|bar\-entry)$/) ) {
Utils.$addClass(el, 'gui-element');
}
});
// Go ahead and parse dynamic elements (like labels)
parseDynamic(this, node, win, args);
// Lastly render elements
onparse(node);
Object.keys(OSjs.GUI.Elements).forEach(function(key) {
node.querySelectorAll(key).forEach(function(pel) {
if ( pel._wasParsed ) {
return;
}
try {
OSjs.GUI.Elements[key].build(pel);
} catch ( e ) {
console.warn('UIScheme::parse()', id, type, win, 'exception');
console.warn(e, e.stack);
}
pel._wasParsed = true;
});
});
return node;
}
return null;
};
UIScheme.prototype.render = function(win, id, root, type, onparse, args) {
root = root || win._getRoot();
if ( root instanceof UIElement ) {
root = root.$element;
}
var content = this.parse(id, type, win, onparse, args);
addChildren(content, root);
};
UIScheme.prototype.create = function(win, tagName, params, parentNode, applyArgs) {
tagName = tagName || '';
params = params || {};
parentNode = parentNode || win.getRoot();
var el = createElement(tagName, params);
parentNode.appendChild(el);
OSjs.GUI.Elements[tagName].build(el, applyArgs, win);
return new UIElement(el);
};
UIScheme.prototype.find = function(win, id, root) {
root = root || win._getRoot();
var q = '[data-id="' + id + '"]';
return this.get(root.querySelector(q), q);
};
UIScheme.prototype.get = function(el, q) {
if ( el ) {
var tagName = el.tagName.toLowerCase();
if ( tagName.match(/^gui\-(list|tree|icon|file)\-view$/) || tagName.match(/^gui\-select/) ) {
return new UIElementDataView(el, q);
}
}
return new UIElement(el, q);
};
/////////////////////////////////////////////////////////////////////////////
// EXPORTS
/////////////////////////////////////////////////////////////////////////////
OSjs.GUI.Element = UIElement;
OSjs.GUI.ElementDataView = UIElementDataView;
OSjs.GUI.Scheme = UIScheme;
OSjs.GUI.Helpers = {
getProperty: getProperty,
getValueLabel: getValueLabel,
getViewNodeValue: getViewNodeValue,
getLabel: getLabel,
getIcon: getIcon,
getWindowId: getWindowId,
createInputLabel: createInputLabel,
createElement: createElement,
createDrag: createDrag,
setProperty: setProperty,
setFlexbox: setFlexbox
};
/**
* Shortcut for creating a new UIScheme class
*
* @param String url URL to scheme file
* @return UIScheme
* @api OSjs.GUI.createScheme()
*/
OSjs.GUI.createScheme = function(url) {
return new UIScheme(url);
};
})(OSjs.API, OSjs.Utils, OSjs.VFS);
| UI: Prepend slash in schemes storage
| src/javascript/gui.js | UI: Prepend slash in schemes storage | <ide><path>rc/javascript/gui.js
<ide> }
<ide>
<ide> if ( window.location.protocol.match(/^file/) ) {
<del> finished(OSjs.API.getDefaultSchemes(this.url));
<add> var url = this.url;
<add> if ( !url.match(/^\//) ) {
<add> url = '/' + url;
<add> }
<add> finished(OSjs.API.getDefaultSchemes(url));
<ide> return;
<ide> }
<ide> |
|
Java | apache-2.0 | 1b82b3c65b81c428ec88826aa07577d086320752 | 0 | blindpirate/gradle,blindpirate/gradle,gradle/gradle,robinverduijn/gradle,lsmaira/gradle,lsmaira/gradle,lsmaira/gradle,blindpirate/gradle,blindpirate/gradle,gstevey/gradle,gradle/gradle,robinverduijn/gradle,gradle/gradle,gstevey/gradle,blindpirate/gradle,gstevey/gradle,robinverduijn/gradle,gradle/gradle,gradle/gradle,gradle/gradle,lsmaira/gradle,lsmaira/gradle,robinverduijn/gradle,gradle/gradle,blindpirate/gradle,robinverduijn/gradle,robinverduijn/gradle,gradle/gradle,blindpirate/gradle,robinverduijn/gradle,lsmaira/gradle,gstevey/gradle,robinverduijn/gradle,robinverduijn/gradle,blindpirate/gradle,robinverduijn/gradle,lsmaira/gradle,gstevey/gradle,gstevey/gradle,gradle/gradle,blindpirate/gradle,robinverduijn/gradle,lsmaira/gradle,gstevey/gradle,gstevey/gradle,blindpirate/gradle,gradle/gradle,lsmaira/gradle,gstevey/gradle,lsmaira/gradle | /*
* Copyright 2013 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.gradle.model.internal.core;
public class ModelPath {
public static final String SEPARATOR = ".";
private final String path;
public ModelPath(String path) {
this.path = path;
}
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}
ModelPath modelPath = (ModelPath) o;
return path.equals(modelPath.path);
}
@Override
public int hashCode() {
return path.hashCode();
}
@Override
public String toString() {
return path;
}
public static ModelPath path(String path) {
return new ModelPath(path);
}
public ModelPath child(String child) {
return path(path + SEPARATOR + child);
}
public ModelPath getParent() {
int lastIndex = path.lastIndexOf(SEPARATOR);
if (lastIndex == -1) {
return null;
} else {
return path(path.substring(0, lastIndex));
}
}
public String getName() {
int lastIndex = path.lastIndexOf(SEPARATOR);
if (lastIndex == -1) {
return path;
} else {
return path.substring(lastIndex + 1);
}
}
public boolean isDirectChild(ModelPath other) {
ModelPath otherParent = other.getParent();
return otherParent != null && otherParent.equals(this);
}
}
| subprojects/model-core/src/main/java/org/gradle/model/internal/core/ModelPath.java | /*
* Copyright 2013 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.gradle.model.internal.core;
import org.gradle.api.Incubating;
@Incubating
public class ModelPath {
public static final String SEPARATOR = ".";
private final String path;
public ModelPath(String path) {
this.path = path;
}
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}
ModelPath modelPath = (ModelPath) o;
if (!path.equals(modelPath.path)) {
return false;
}
return true;
}
@Override
public int hashCode() {
return path.hashCode();
}
@Override
public String toString() {
return path;
}
public static ModelPath path(String path) {
return new ModelPath(path);
}
public ModelPath child(String child) {
return path(path + SEPARATOR + child);
}
public ModelPath getParent() {
int lastIndex = path.lastIndexOf(SEPARATOR);
if (lastIndex == -1) {
return null;
} else {
return path(path.substring(0, lastIndex));
}
}
public String getName() {
int lastIndex = path.lastIndexOf(SEPARATOR);
if (lastIndex == -1) {
return path;
} else {
return path.substring(lastIndex + 1);
}
}
public boolean isDirectChild(ModelPath other) {
ModelPath otherParent = other.getParent();
return otherParent == null ? false : otherParent.equals(this);
}
}
| minor simplification.
| subprojects/model-core/src/main/java/org/gradle/model/internal/core/ModelPath.java | minor simplification. | <ide><path>ubprojects/model-core/src/main/java/org/gradle/model/internal/core/ModelPath.java
<ide>
<ide> package org.gradle.model.internal.core;
<ide>
<del>import org.gradle.api.Incubating;
<del>
<del>@Incubating
<ide> public class ModelPath {
<ide>
<ide> public static final String SEPARATOR = ".";
<ide>
<ide> ModelPath modelPath = (ModelPath) o;
<ide>
<del> if (!path.equals(modelPath.path)) {
<del> return false;
<del> }
<del>
<del> return true;
<add> return path.equals(modelPath.path);
<ide> }
<ide>
<ide> @Override
<ide>
<ide> public boolean isDirectChild(ModelPath other) {
<ide> ModelPath otherParent = other.getParent();
<del> return otherParent == null ? false : otherParent.equals(this);
<add> return otherParent != null && otherParent.equals(this);
<ide> }
<ide> } |
|
Java | apache-2.0 | 050dffc2996624287c03921b084a1139b0488614 | 0 | ox-it/ords-database-structure-api | /*
* Copyright 2015 University of Oxford
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package uk.ac.ox.it.ords.api.database.structure.permissions;
import java.util.ArrayList;
import java.util.List;
public class DatabaseStructurePermissionSets {
public static List<String> getPermissionsForAnonymous(){
ArrayList<String> permissions = new ArrayList<String>();
permissions.add(DatabaseStructurePermissions.DATABASE_VIEW_PUBLIC);
return permissions;
}
public static List<String> getPermissionsForUser(){
List<String> permissions = getPermissionsForAnonymous();
return permissions;
}
public static List<String> getPermissionsForLocalUser(){
List<String> permissions = getPermissionsForAnonymous();
permissions.add(DatabaseStructurePermissions.DATABASE_CREATE);
return permissions;
}
public static List<String> getPermissionsForViewer(int id){
List<String> permissions = getPermissionsForUser();
permissions.add(DatabaseStructurePermissions.DATABASE_VIEW(id));
return permissions;
}
public static List<String> getPermissionsForContributor(int id){
List<String> permissions = getPermissionsForViewer(id);
permissions.add(DatabaseStructurePermissions.DATABASE_MODIFY(id));
return permissions;
}
public static List<String> getPermissionsForOwner(int id){
List<String> permissions = getPermissionsForContributor(id);
permissions.add(DatabaseStructurePermissions.DATABASE_ANY_ACTION(id));
return permissions;
}
public static List<String> getPermissionsForSysadmin(){
ArrayList<String> permissions = new ArrayList<String>();
permissions.add(DatabaseStructurePermissions.DATABASE_CREATE);
permissions.add(DatabaseStructurePermissions.DATABASE_CREATE_FULL);
permissions.add(DatabaseStructurePermissions.DATABASE_UPDATE_ALL);
permissions.add(DatabaseStructurePermissions.DATABASE_DELETE_ALL);
permissions.add(DatabaseStructurePermissions.DATABASE_VIEW_ALL);
return permissions;
}
}
| src/main/java/uk/ac/ox/it/ords/api/database/structure/permissions/DatabaseStructurePermissionSets.java | /*
* Copyright 2015 University of Oxford
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package uk.ac.ox.it.ords.api.database.structure.permissions;
import java.util.ArrayList;
import java.util.List;
public class DatabaseStructurePermissionSets {
public static List<String> getPermissionsForAnonymous(){
ArrayList<String> permissions = new ArrayList<String>();
permissions.add(DatabaseStructurePermissions.DATABASE_VIEW_PUBLIC);
return permissions;
}
public static List<String> getPermissionsForUser(){
List<String> permissions = getPermissionsForAnonymous();
return permissions;
}
public static List<String> getPermissionsForLocalUser(){
List<String> permissions = getPermissionsForAnonymous();
permissions.add(DatabaseStructurePermissions.DATABASE_CREATE);
return permissions;
}
public static List<String> getPermissionsForViewer(int id){
List<String> permissions = getPermissionsForUser();
permissions.add(DatabaseStructurePermissions.DATABASE_VIEW(id));
return permissions;
}
public static List<String> getPermissionsForContributor(int id){
List<String> permissions = getPermissionsForViewer(id);
permissions.add(DatabaseStructurePermissions.DATABASE_MODIFY(id));
return permissions;
}
public static List<String> getPermissionsForOwner(int id){
List<String> permissions = getPermissionsForContributor(id);
permissions.add(DatabaseStructurePermissions.DATABASE_ANY_ACTION(id));
return permissions;
}
public static List<String> getPermissionsForSysadmin(){
ArrayList<String> permissions = new ArrayList<String>();
permissions.add(DatabaseStructurePermissions.DATABASE_CREATE_FULL);
permissions.add(DatabaseStructurePermissions.DATABASE_UPDATE_ALL);
permissions.add(DatabaseStructurePermissions.DATABASE_DELETE_ALL);
permissions.add(DatabaseStructurePermissions.DATABASE_VIEW_ALL);
return permissions;
}
}
| Ensure admin has create_project permissions | src/main/java/uk/ac/ox/it/ords/api/database/structure/permissions/DatabaseStructurePermissionSets.java | Ensure admin has create_project permissions | <ide><path>rc/main/java/uk/ac/ox/it/ords/api/database/structure/permissions/DatabaseStructurePermissionSets.java
<ide> }
<ide> public static List<String> getPermissionsForSysadmin(){
<ide> ArrayList<String> permissions = new ArrayList<String>();
<add> permissions.add(DatabaseStructurePermissions.DATABASE_CREATE);
<ide> permissions.add(DatabaseStructurePermissions.DATABASE_CREATE_FULL);
<ide> permissions.add(DatabaseStructurePermissions.DATABASE_UPDATE_ALL);
<ide> permissions.add(DatabaseStructurePermissions.DATABASE_DELETE_ALL); |
|
Java | apache-2.0 | 91073829c7cea11e15c0bcc4ea8b9328cada6f17 | 0 | scwang90/SmartRefreshLayout | package com.scwang.refreshlayout.fragment.using;
import android.app.Activity;
import android.content.Context;
import android.content.Intent;
import android.os.Bundle;
import android.support.annotation.Nullable;
import android.support.v4.app.Fragment;
import android.support.v7.widget.DefaultItemAnimator;
import android.support.v7.widget.DividerItemDecoration;
import android.support.v7.widget.LinearLayoutManager;
import android.support.v7.widget.RecyclerView;
import android.support.v7.widget.Toolbar;
import android.view.LayoutInflater;
import android.view.View;
import android.view.ViewGroup;
import android.widget.AdapterView;
import android.widget.ImageView;
import com.scwang.refreshlayout.R;
import com.scwang.refreshlayout.activity.FragmentActivity;
import com.scwang.refreshlayout.adapter.BaseRecyclerAdapter;
import com.scwang.refreshlayout.adapter.SmartViewHolder;
import com.youth.banner.Banner;
import com.youth.banner.loader.ImageLoader;
import java.util.Arrays;
import static android.R.layout.simple_list_item_2;
import static android.support.v7.widget.DividerItemDecoration.VERTICAL;
import static com.scwang.refreshlayout.R.mipmap.image_weibo_home_1;
import static com.scwang.refreshlayout.R.mipmap.image_weibo_home_2;
/**
* 使用示例-嵌套滚动
* A simple {@link Fragment} subclass.
*/
public class NestedScrollUsingFragmentIntegral extends Fragment implements AdapterView.OnItemClickListener {
private enum Item {
NestedStandard("标准嵌套", NestedScrollUsingFragment.class),
NestedIntegral("整体嵌套", NestedScrollUsingFragmentIntegral.class),
;
public String name;
public Class<?> clazz;
Item(String name, Class<?> clazz) {
this.name = name;
this.clazz = clazz;
}
}
@Override
public View onCreateView(LayoutInflater inflater, ViewGroup container, Bundle savedInstanceState) {
return inflater.inflate(R.layout.fragment_using_nestedscroll_integral, container, false);
}
@Override
public void onViewCreated(final View root, @Nullable Bundle savedInstanceState) {
super.onViewCreated(root, savedInstanceState);
Toolbar toolbar = (Toolbar) root.findViewById(R.id.toolbar);
toolbar.setNavigationOnClickListener(new View.OnClickListener() {
@Override
public void onClick(View v) {
getActivity().finish();
}
});
Banner banner = (Banner) root.findViewById(R.id.banner);
banner.setImageLoader(new BannerImageLoader());
banner.setImages(Arrays.asList(image_weibo_home_1,image_weibo_home_2));
RecyclerView recyclerView = (RecyclerView) root.findViewById(R.id.recyclerView);
recyclerView.setItemAnimator(new DefaultItemAnimator());
recyclerView.setLayoutManager(new LinearLayoutManager(getContext()));
recyclerView.addItemDecoration(new DividerItemDecoration(getContext(), VERTICAL));
recyclerView.setAdapter(new BaseRecyclerAdapter<Item>(Arrays.asList(Item.values()), simple_list_item_2,NestedScrollUsingFragmentIntegral.this) {
@Override
protected void onBindViewHolder(SmartViewHolder holder, Item model, int position) {
holder.text(android.R.id.text1, model.name());
holder.text(android.R.id.text2, model.name);
holder.textColorId(android.R.id.text2, R.color.colorTextAssistant);
}
});
}
@Override
public void onItemClick(AdapterView<?> parent, View view, int position, long id) {
Item item = Item.values()[position];
if (Activity.class.isAssignableFrom(item.clazz)) {
startActivity(new Intent(getContext(), item.clazz));
} else if (Fragment.class.isAssignableFrom(item.clazz)) {
FragmentActivity.start(this, item.clazz);
}
}
private class BannerImageLoader extends ImageLoader {
@Override
public void displayImage(Context context, Object path, ImageView imageView) {
imageView.setImageResource((Integer)path);
}
}
}
| app/src/main/java/com/scwang/refreshlayout/fragment/using/NestedScrollUsingFragmentIntegral.java | package com.scwang.refreshlayout.fragment.using;
import android.app.Activity;
import android.content.Context;
import android.content.Intent;
import android.os.Bundle;
import android.support.annotation.Nullable;
import android.support.v4.app.Fragment;
import android.support.v7.widget.DefaultItemAnimator;
import android.support.v7.widget.DividerItemDecoration;
import android.support.v7.widget.LinearLayoutManager;
import android.support.v7.widget.RecyclerView;
import android.support.v7.widget.Toolbar;
import android.view.LayoutInflater;
import android.view.View;
import android.view.ViewGroup;
import android.widget.AdapterView;
import android.widget.ImageView;
import com.scwang.refreshlayout.R;
import com.scwang.refreshlayout.activity.FragmentActivity;
import com.scwang.refreshlayout.adapter.BaseRecyclerAdapter;
import com.scwang.refreshlayout.adapter.SmartViewHolder;
import com.youth.banner.Banner;
import com.youth.banner.loader.ImageLoader;
import java.util.Arrays;
import static android.R.layout.simple_list_item_2;
import static android.support.v7.widget.DividerItemDecoration.VERTICAL;
import static com.scwang.refreshlayout.R.mipmap.image_weibo_home_1;
import static com.scwang.refreshlayout.R.mipmap.image_weibo_home_2;
/**
* 使用示例-嵌套滚动
* A simple {@link Fragment} subclass.
*/
public class NestedScrollUsingFragmentIntegral extends Fragment implements AdapterView.OnItemClickListener {
private enum Item {
NestedStandard("标准嵌套", NestedScrollUsingFragment.class),
NestedIntegral("整体嵌套", NestedScrollUsingFragmentIntegral.class),
;
public String name;
public Class<?> clazz;
Item(String name, Class<?> clazz) {
this.name = name;
this.clazz = clazz;
}
}
@Override
public View onCreateView(LayoutInflater inflater, ViewGroup container, Bundle savedInstanceState) {
return inflater.inflate(R.layout.fragment_using_nestedscroll, container, false);
}
@Override
public void onViewCreated(final View root, @Nullable Bundle savedInstanceState) {
super.onViewCreated(root, savedInstanceState);
Toolbar toolbar = (Toolbar) root.findViewById(R.id.toolbar);
toolbar.setNavigationOnClickListener(new View.OnClickListener() {
@Override
public void onClick(View v) {
getActivity().finish();
}
});
Banner banner = (Banner) root.findViewById(R.id.banner);
banner.setImageLoader(new BannerImageLoader());
banner.setImages(Arrays.asList(image_weibo_home_1,image_weibo_home_2));
RecyclerView recyclerView = (RecyclerView) root.findViewById(R.id.recyclerView);
recyclerView.setItemAnimator(new DefaultItemAnimator());
recyclerView.setLayoutManager(new LinearLayoutManager(getContext()));
recyclerView.addItemDecoration(new DividerItemDecoration(getContext(), VERTICAL));
recyclerView.setAdapter(new BaseRecyclerAdapter<Item>(Arrays.asList(Item.values()), simple_list_item_2,NestedScrollUsingFragmentIntegral.this) {
@Override
protected void onBindViewHolder(SmartViewHolder holder, Item model, int position) {
holder.text(android.R.id.text1, model.name());
holder.text(android.R.id.text2, model.name);
holder.textColorId(android.R.id.text2, R.color.colorTextAssistant);
}
});
}
@Override
public void onItemClick(AdapterView<?> parent, View view, int position, long id) {
Item item = Item.values()[position];
if (Activity.class.isAssignableFrom(item.clazz)) {
startActivity(new Intent(getContext(), item.clazz));
} else if (Fragment.class.isAssignableFrom(item.clazz)) {
FragmentActivity.start(this, item.clazz);
}
}
private class BannerImageLoader extends ImageLoader {
@Override
public void displayImage(Context context, Object path, ImageView imageView) {
imageView.setImageResource((Integer)path);
}
}
}
| 添加demo 纯滚动模式示例 和 空页面 和 嵌套滚动
| app/src/main/java/com/scwang/refreshlayout/fragment/using/NestedScrollUsingFragmentIntegral.java | 添加demo 纯滚动模式示例 和 空页面 和 嵌套滚动 | <ide><path>pp/src/main/java/com/scwang/refreshlayout/fragment/using/NestedScrollUsingFragmentIntegral.java
<ide>
<ide> @Override
<ide> public View onCreateView(LayoutInflater inflater, ViewGroup container, Bundle savedInstanceState) {
<del> return inflater.inflate(R.layout.fragment_using_nestedscroll, container, false);
<add> return inflater.inflate(R.layout.fragment_using_nestedscroll_integral, container, false);
<ide> }
<ide>
<ide> @Override |
|
Java | apache-2.0 | e1eda1dbb3df7d9ad40a2dded6f8e5fa75c2a10e | 0 | manstis/kie-wb-common,romartin/kie-wb-common,romartin/kie-wb-common,ederign/kie-wb-common,manstis/kie-wb-common,romartin/kie-wb-common,jhrcek/kie-wb-common,ederign/kie-wb-common,manstis/kie-wb-common,ederign/kie-wb-common,porcelli-forks/kie-wb-common,jhrcek/kie-wb-common,romartin/kie-wb-common,porcelli-forks/kie-wb-common,droolsjbpm/kie-wb-common,manstis/kie-wb-common,porcelli-forks/kie-wb-common,porcelli-forks/kie-wb-common,droolsjbpm/kie-wb-common,manstis/kie-wb-common,romartin/kie-wb-common,jhrcek/kie-wb-common,ederign/kie-wb-common,jhrcek/kie-wb-common | /*
* Copyright 2014 Red Hat, Inc. and/or its affiliates.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.kie.workbench.common.services.datamodeller.driver.impl;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import java.util.Optional;
import java.util.regex.Pattern;
import org.jboss.forge.roaster.model.ValuePair;
import org.jboss.forge.roaster.model.source.AnnotationSource;
import org.kie.workbench.common.services.datamodeller.core.Annotation;
import org.kie.workbench.common.services.datamodeller.core.AnnotationDefinition;
import org.kie.workbench.common.services.datamodeller.core.AnnotationValuePairDefinition;
import org.kie.workbench.common.services.datamodeller.core.impl.AnnotationImpl;
import org.kie.workbench.common.services.datamodeller.driver.AnnotationDriver;
import org.kie.workbench.common.services.datamodeller.driver.ModelDriverException;
import org.kie.workbench.common.services.datamodeller.util.DriverUtils;
import org.kie.workbench.common.services.datamodeller.util.NamingUtils;
import org.kie.workbench.common.services.datamodeller.util.PortableStringUtils;
import org.kie.workbench.common.services.datamodeller.util.StringEscapeUtils;
public class DefaultJavaRoasterModelAnnotationDriver implements AnnotationDriver {
@Override
public Annotation buildAnnotation( AnnotationDefinition annotationDefinition, Object annotationToken ) throws ModelDriverException {
AnnotationSource javaAnnotationToken = ( AnnotationSource ) annotationToken;
AnnotationImpl annotation = new AnnotationImpl( annotationDefinition );
if ( annotationDefinition.isMarker() ) {
return annotation;
} else {
if ( javaAnnotationToken.getValues() != null ) {
List<ValuePair> values = javaAnnotationToken.getValues();
if ( values != null && values.size() > 0 ) {
for ( AnnotationValuePairDefinition valuePairDefinition : annotationDefinition.getValuePairs() ) {
Object annotationValue = buildAnnotationValue( javaAnnotationToken, valuePairDefinition );
if ( annotationValue != null ) {
annotation.setValue( valuePairDefinition.getName(), annotationValue );
}
}
}
}
}
return annotation;
}
private Object buildAnnotationValue( AnnotationSource javaAnnotationToken, AnnotationValuePairDefinition valuePairDefinition ) throws ModelDriverException {
Object result = null;
if ( javaAnnotationToken.getLiteralValue( valuePairDefinition.getName() ) != null ) {
//there's a value
if ( valuePairDefinition.isPrimitiveType() ) {
result = parsePrimitiveValue( javaAnnotationToken, valuePairDefinition );
} else if ( valuePairDefinition.isEnum() ) {
result = parseEnumValue( javaAnnotationToken, valuePairDefinition );
} else if ( valuePairDefinition.isString() ) {
result = parseStringValue( javaAnnotationToken, valuePairDefinition );
} else if ( valuePairDefinition.isClass() ) {
result = parseClassValue( javaAnnotationToken, valuePairDefinition );
} else if ( valuePairDefinition.isAnnotation() ) {
result = parseAnnotationValue( javaAnnotationToken, valuePairDefinition );
}
}
return result;
}
private Object parsePrimitiveValue( AnnotationSource javaAnnotationToken, AnnotationValuePairDefinition valuePairDefinition ) {
String value = parseLiteralValue( javaAnnotationToken.getLiteralValue( valuePairDefinition.getName() ) );
Object result;
if ( value == null ) {
return null;
}
if ( valuePairDefinition.isArray() ) {
result = parsePrimitiveArrayValue( value, valuePairDefinition.getClassName(), valuePairDefinition );
} else {
result = parsePrimitiveValue( value, valuePairDefinition.getClassName() );
}
return result;
}
private Object parseEnumValue( AnnotationSource javaAnnotationToken, AnnotationValuePairDefinition valuePairDefinition ) {
String value = parseLiteralValue( javaAnnotationToken.getLiteralValue( valuePairDefinition.getName() ) );
Object result;
if ( value == null ) {
return null;
}
if ( valuePairDefinition.isArray() ) {
result = parseEnumArrayValue( value, valuePairDefinition );
} else {
result = parseEnumValue( value, valuePairDefinition );
}
return result;
}
private Object parseStringValue( AnnotationSource javaAnnotationToken, AnnotationValuePairDefinition valuePairDefinition ) {
Object result = null;
if ( valuePairDefinition.isArray() ) {
String[] arrayValue = javaAnnotationToken.getStringArrayValue( valuePairDefinition.getName() );
if ( arrayValue != null ) {
result = Arrays.asList( arrayValue );
}
} else {
result = javaAnnotationToken.getStringValue( valuePairDefinition.getName() );
}
return result;
}
private Object parseClassValue( AnnotationSource javaAnnotationToken, AnnotationValuePairDefinition valuePairDefinition ) {
String value = null;
Object result;
List<ValuePair> values = javaAnnotationToken.getValues();
if ( values != null ) {
Optional<ValuePair> valuePair = values.stream().filter(
vp -> valuePairDefinition.getName().equals( vp.getName() ) ).findFirst( );
value = valuePair.map( vp -> vp.getLiteralValue() ).orElse( null );
}
if ( value == null ) {
return null;
}
if ( valuePairDefinition.isArray() ) {
result = parseClassArrayValue( value );
} else {
result = value;
}
return result;
}
private Object parseAnnotationValue( AnnotationSource javaAnnotationToken, AnnotationValuePairDefinition valuePairDefinition ) throws ModelDriverException {
String value = javaAnnotationToken.getLiteralValue( valuePairDefinition.getName() );
AnnotationDefinition annotationDefinition = valuePairDefinition.getAnnotationDefinition();
Object result = null;
if ( value == null ) return null;
if ( annotationDefinition == null ) {
return value;
}
if ( valuePairDefinition.isArray() ) {
AnnotationSource[] javaAnnotationTokenValueArray = javaAnnotationToken.getAnnotationArrayValue( valuePairDefinition.getName() );
List<Annotation> annotationList = new ArrayList<Annotation>();
Annotation annotation;
if ( javaAnnotationTokenValueArray != null ) {
for ( int i = 0; i < javaAnnotationTokenValueArray.length; i++ ) {
annotation = buildAnnotation( annotationDefinition, javaAnnotationTokenValueArray[ i ] );
if ( annotation != null ) {
annotationList.add( annotation );
}
}
}
result = annotationList.size() > 0 ? annotationList : null;
} else {
AnnotationSource javaAnnotationTokenValue = javaAnnotationToken.getAnnotationValue( valuePairDefinition.getName() );
if ( javaAnnotationTokenValue != null ) {
result = buildAnnotation( annotationDefinition, javaAnnotationTokenValue );
}
}
return result;
}
private Object parsePrimitiveValue( String value, String className ) {
if ( NamingUtils.isByteId( className ) ) {
return parseByteValue( value, className );
} else if ( NamingUtils.isCharId( className ) ) {
return parseCharValue( value, className );
} else {
return NamingUtils.parsePrimitiveValue( className, value );
}
}
private List<Object> parsePrimitiveArrayValue( String value, String className, AnnotationValuePairDefinition valuePairDefinition ) {
if ( value == null ) return null;
List<Object> values = new ArrayList<Object>( );
value = value.trim();
if ( !value.startsWith( "{" ) || !value.endsWith( "}" ) ) {
//mal formed array
return values;
} else if ( DriverUtils.isEmptyArray( value ) ) {
return values;
} else {
value = PortableStringUtils.removeLastChar( PortableStringUtils.removeFirstChar( value, '{' ), '}' );
String[] primitiveValues = value.split( "," );
Object primitiveValue;
for ( int i = 0; i < primitiveValues.length; i++ ) {
primitiveValue = parsePrimitiveValue( primitiveValues[i], className );
values.add( primitiveValue );
}
}
return values;
}
private Object parseByteValue( String value, String className ) {
//remove the word (byte) in case the value is something like (byte)222"
String regex = "(\\s)*\\((\\s)*byte(\\s)*\\)(\\s)*";
Pattern pattern = Pattern.compile( regex );
String[] splits = pattern.split( value );
Object result = null;
try {
if ( splits.length == 0 ) {
result = NamingUtils.parsePrimitiveValue( className, value );
} else if ( splits.length == 1 ) {
result = NamingUtils.parsePrimitiveValue( className, splits[ 0 ] );
} else if ( splits.length == 2 ) {
result = NamingUtils.parsePrimitiveValue( className, splits[ 1 ] );
} else {
result = NamingUtils.parsePrimitiveValue( className, value );
}
} catch ( NumberFormatException e ) {
result = value;
}
return result;
}
private Object parseCharValue( String value, String className ) {
String unquotedValue = StringEscapeUtils.unquoteSingle( value );
return NamingUtils.parsePrimitiveValue( className, unquotedValue );
}
private Object parseEnumValue( String value, AnnotationValuePairDefinition valuePairDefinition ) {
String[] enumValues = valuePairDefinition.enumValues();
String result = value;
if ( value != null && enumValues != null ) {
for ( int i = 0; i < enumValues.length; i++ ) {
if ( value.endsWith( enumValues[ i ] ) ) {
result = enumValues[ i ];
break;
}
}
}
return result;
}
private List<Object> parseEnumArrayValue( String value, AnnotationValuePairDefinition valuePairDefinition ) {
if ( value == null ) return null;
List<Object> values = new ArrayList<Object>( );
value = value.trim();
if ( !value.startsWith( "{" ) || !value.endsWith( "}" ) ) {
//mal formed array
return values;
} else if ( DriverUtils.isEmptyArray( value ) ) {
return values;
} else {
value = PortableStringUtils.removeLastChar( PortableStringUtils.removeFirstChar( value, '{' ), '}' );
String[] enumValues = value.split( "," );
Object enumValue;
for ( int i = 0; i < enumValues.length; i++ ) {
enumValue = parseEnumValue( enumValues[i], valuePairDefinition );
values.add( enumValue );
}
}
return values;
}
private List<Object> parseClassArrayValue( String value ) {
if ( value == null ) return null;
List<Object> values = new ArrayList<Object>( );
value = value.trim();
if ( !value.startsWith( "{" ) || !value.endsWith( "}" ) ) {
//mal formed array
return values;
} else if ( DriverUtils.isEmptyArray( value ) ) {
return values;
} else {
value = PortableStringUtils.removeLastChar( PortableStringUtils.removeFirstChar( value, '{' ), '}' );
String[] classValues = value.split( "," );
Object classValue;
for ( int i = 0; i < classValues.length; i++ ) {
classValue = parseClassValue( classValues[i] );
if ( classValue != null ) {
values.add( classValue );
}
}
}
return values;
}
private String parseClassValue( String classValue ) {
return classValue != null ? classValue.trim() : null;
}
private boolean isValidClassValue( String value ) {
String classValue = value != null ? value.trim() : value;
return classValue != null && classValue.length() > ".class".length() && classValue.endsWith( ".class" );
}
private String parseLiteralValue( String literalValue ) {
return literalValue; //literalValue != null ? StringEscapeUtils.unquote( StringEscapeUtils.unescapeJava( literalValue ) ) : literalValue;
}
}
| kie-wb-common-services/kie-wb-common-data-modeller-core/src/main/java/org/kie/workbench/common/services/datamodeller/driver/impl/DefaultJavaRoasterModelAnnotationDriver.java | /*
* Copyright 2014 Red Hat, Inc. and/or its affiliates.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.kie.workbench.common.services.datamodeller.driver.impl;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import java.util.regex.Pattern;
import org.jboss.forge.roaster.model.ValuePair;
import org.jboss.forge.roaster.model.source.AnnotationSource;
import org.kie.workbench.common.services.datamodeller.core.Annotation;
import org.kie.workbench.common.services.datamodeller.core.AnnotationDefinition;
import org.kie.workbench.common.services.datamodeller.core.AnnotationValuePairDefinition;
import org.kie.workbench.common.services.datamodeller.core.impl.AnnotationImpl;
import org.kie.workbench.common.services.datamodeller.driver.AnnotationDriver;
import org.kie.workbench.common.services.datamodeller.driver.ModelDriverException;
import org.kie.workbench.common.services.datamodeller.util.DriverUtils;
import org.kie.workbench.common.services.datamodeller.util.NamingUtils;
import org.kie.workbench.common.services.datamodeller.util.PortableStringUtils;
import org.kie.workbench.common.services.datamodeller.util.StringEscapeUtils;
public class DefaultJavaRoasterModelAnnotationDriver implements AnnotationDriver {
@Override
public Annotation buildAnnotation( AnnotationDefinition annotationDefinition, Object annotationToken ) throws ModelDriverException {
AnnotationSource javaAnnotationToken = ( AnnotationSource ) annotationToken;
AnnotationImpl annotation = new AnnotationImpl( annotationDefinition );
if ( annotationDefinition.isMarker() ) {
return annotation;
} else {
if ( javaAnnotationToken.getValues() != null ) {
List<ValuePair> values = javaAnnotationToken.getValues();
if ( values != null && values.size() > 0 ) {
for ( AnnotationValuePairDefinition valuePairDefinition : annotationDefinition.getValuePairs() ) {
Object annotationValue = buildAnnotationValue( javaAnnotationToken, valuePairDefinition );
if ( annotationValue != null ) {
annotation.setValue( valuePairDefinition.getName(), annotationValue );
}
}
}
}
}
return annotation;
}
private Object buildAnnotationValue( AnnotationSource javaAnnotationToken, AnnotationValuePairDefinition valuePairDefinition ) throws ModelDriverException {
Object result = null;
if ( javaAnnotationToken.getLiteralValue( valuePairDefinition.getName() ) != null ) {
//there's a value
if ( valuePairDefinition.isPrimitiveType() ) {
result = parsePrimitiveValue( javaAnnotationToken, valuePairDefinition );
} else if ( valuePairDefinition.isEnum() ) {
result = parseEnumValue( javaAnnotationToken, valuePairDefinition );
} else if ( valuePairDefinition.isString() ) {
result = parseStringValue( javaAnnotationToken, valuePairDefinition );
} else if ( valuePairDefinition.isClass() ) {
result = parseClassValue( javaAnnotationToken, valuePairDefinition );
} else if ( valuePairDefinition.isAnnotation() ) {
result = parseAnnotationValue( javaAnnotationToken, valuePairDefinition );
}
}
return result;
}
private Object parsePrimitiveValue( AnnotationSource javaAnnotationToken, AnnotationValuePairDefinition valuePairDefinition ) {
String value = parseLiteralValue( javaAnnotationToken.getLiteralValue( valuePairDefinition.getName() ) );
Object result;
if ( value == null ) {
return null;
}
if ( valuePairDefinition.isArray() ) {
result = parsePrimitiveArrayValue( value, valuePairDefinition.getClassName(), valuePairDefinition );
} else {
result = parsePrimitiveValue( value, valuePairDefinition.getClassName() );
}
return result;
}
private Object parseEnumValue( AnnotationSource javaAnnotationToken, AnnotationValuePairDefinition valuePairDefinition ) {
String value = parseLiteralValue( javaAnnotationToken.getLiteralValue( valuePairDefinition.getName() ) );
Object result;
if ( value == null ) {
return null;
}
if ( valuePairDefinition.isArray() ) {
result = parseEnumArrayValue( value, valuePairDefinition );
} else {
result = parseEnumValue( value, valuePairDefinition );
}
return result;
}
private Object parseStringValue( AnnotationSource javaAnnotationToken, AnnotationValuePairDefinition valuePairDefinition ) {
Object result = null;
if ( valuePairDefinition.isArray() ) {
String[] arrayValue = javaAnnotationToken.getStringArrayValue( valuePairDefinition.getName() );
if ( arrayValue != null ) {
result = Arrays.asList( arrayValue );
}
} else {
result = javaAnnotationToken.getStringValue( valuePairDefinition.getName() );
}
return result;
}
private Object parseClassValue( AnnotationSource javaAnnotationToken, AnnotationValuePairDefinition valuePairDefinition ) {
String value = parseLiteralValue( javaAnnotationToken.getLiteralValue( valuePairDefinition.getName() ) );
Object result;
if ( value == null ) {
return null;
}
if ( valuePairDefinition.isArray() ) {
result = parseClassArrayValue( value );
} else {
result = value;
}
return result;
}
private Object parseAnnotationValue( AnnotationSource javaAnnotationToken, AnnotationValuePairDefinition valuePairDefinition ) throws ModelDriverException {
String value = javaAnnotationToken.getLiteralValue( valuePairDefinition.getName() );
AnnotationDefinition annotationDefinition = valuePairDefinition.getAnnotationDefinition();
Object result = null;
if ( value == null ) return null;
if ( annotationDefinition == null ) {
return value;
}
if ( valuePairDefinition.isArray() ) {
AnnotationSource[] javaAnnotationTokenValueArray = javaAnnotationToken.getAnnotationArrayValue( valuePairDefinition.getName() );
List<Annotation> annotationList = new ArrayList<Annotation>();
Annotation annotation;
if ( javaAnnotationTokenValueArray != null ) {
for ( int i = 0; i < javaAnnotationTokenValueArray.length; i++ ) {
annotation = buildAnnotation( annotationDefinition, javaAnnotationTokenValueArray[ i ] );
if ( annotation != null ) {
annotationList.add( annotation );
}
}
}
result = annotationList.size() > 0 ? annotationList : null;
} else {
AnnotationSource javaAnnotationTokenValue = javaAnnotationToken.getAnnotationValue( valuePairDefinition.getName() );
if ( javaAnnotationTokenValue != null ) {
result = buildAnnotation( annotationDefinition, javaAnnotationTokenValue );
}
}
return result;
}
private Object parsePrimitiveValue( String value, String className ) {
if ( NamingUtils.isByteId( className ) ) {
return parseByteValue( value, className );
} else if ( NamingUtils.isCharId( className ) ) {
return parseCharValue( value, className );
} else {
return NamingUtils.parsePrimitiveValue( className, value );
}
}
private List<Object> parsePrimitiveArrayValue( String value, String className, AnnotationValuePairDefinition valuePairDefinition ) {
if ( value == null ) return null;
List<Object> values = new ArrayList<Object>( );
value = value.trim();
if ( !value.startsWith( "{" ) || !value.endsWith( "}" ) ) {
//mal formed array
return values;
} else if ( DriverUtils.isEmptyArray( value ) ) {
return values;
} else {
value = PortableStringUtils.removeLastChar( PortableStringUtils.removeFirstChar( value, '{' ), '}' );
String[] primitiveValues = value.split( "," );
Object primitiveValue;
for ( int i = 0; i < primitiveValues.length; i++ ) {
primitiveValue = parsePrimitiveValue( primitiveValues[i], className );
values.add( primitiveValue );
}
}
return values;
}
private Object parseByteValue( String value, String className ) {
//remove the word (byte) in case the value is something like (byte)222"
String regex = "(\\s)*\\((\\s)*byte(\\s)*\\)(\\s)*";
Pattern pattern = Pattern.compile( regex );
String[] splits = pattern.split( value );
Object result = null;
try {
if ( splits.length == 0 ) {
result = NamingUtils.parsePrimitiveValue( className, value );
} else if ( splits.length == 1 ) {
result = NamingUtils.parsePrimitiveValue( className, splits[ 0 ] );
} else if ( splits.length == 2 ) {
result = NamingUtils.parsePrimitiveValue( className, splits[ 1 ] );
} else {
result = NamingUtils.parsePrimitiveValue( className, value );
}
} catch ( NumberFormatException e ) {
result = value;
}
return result;
}
private Object parseCharValue( String value, String className ) {
String unquotedValue = StringEscapeUtils.unquoteSingle( value );
return NamingUtils.parsePrimitiveValue( className, unquotedValue );
}
private Object parseEnumValue( String value, AnnotationValuePairDefinition valuePairDefinition ) {
String[] enumValues = valuePairDefinition.enumValues();
String result = value;
if ( value != null && enumValues != null ) {
for ( int i = 0; i < enumValues.length; i++ ) {
if ( value.endsWith( enumValues[ i ] ) ) {
result = enumValues[ i ];
break;
}
}
}
return result;
}
private List<Object> parseEnumArrayValue( String value, AnnotationValuePairDefinition valuePairDefinition ) {
if ( value == null ) return null;
List<Object> values = new ArrayList<Object>( );
value = value.trim();
if ( !value.startsWith( "{" ) || !value.endsWith( "}" ) ) {
//mal formed array
return values;
} else if ( DriverUtils.isEmptyArray( value ) ) {
return values;
} else {
value = PortableStringUtils.removeLastChar( PortableStringUtils.removeFirstChar( value, '{' ), '}' );
String[] enumValues = value.split( "," );
Object enumValue;
for ( int i = 0; i < enumValues.length; i++ ) {
enumValue = parseEnumValue( enumValues[i], valuePairDefinition );
values.add( enumValue );
}
}
return values;
}
private List<Object> parseClassArrayValue( String value ) {
if ( value == null ) return null;
List<Object> values = new ArrayList<Object>( );
value = value.trim();
if ( !value.startsWith( "{" ) || !value.endsWith( "}" ) ) {
//mal formed array
return values;
} else if ( DriverUtils.isEmptyArray( value ) ) {
return values;
} else {
value = PortableStringUtils.removeLastChar( PortableStringUtils.removeFirstChar( value, '{' ), '}' );
String[] classValues = value.split( "," );
Object classValue;
for ( int i = 0; i < classValues.length; i++ ) {
classValue = parseClassValue( classValues[i] );
if ( classValue != null ) {
values.add( classValue );
}
}
}
return values;
}
private String parseClassValue( String classValue ) {
return classValue != null ? classValue.trim() : null;
}
private boolean isValidClassValue( String value ) {
String classValue = value != null ? value.trim() : value;
return classValue != null && classValue.length() > ".class".length() && classValue.endsWith( ".class" );
}
private String parseLiteralValue( String literalValue ) {
return literalValue; //literalValue != null ? StringEscapeUtils.unquote( StringEscapeUtils.unescapeJava( literalValue ) ) : literalValue;
}
}
| Roaster version update
| kie-wb-common-services/kie-wb-common-data-modeller-core/src/main/java/org/kie/workbench/common/services/datamodeller/driver/impl/DefaultJavaRoasterModelAnnotationDriver.java | Roaster version update | <ide><path>ie-wb-common-services/kie-wb-common-data-modeller-core/src/main/java/org/kie/workbench/common/services/datamodeller/driver/impl/DefaultJavaRoasterModelAnnotationDriver.java
<ide> import java.util.ArrayList;
<ide> import java.util.Arrays;
<ide> import java.util.List;
<add>import java.util.Optional;
<ide> import java.util.regex.Pattern;
<ide>
<ide> import org.jboss.forge.roaster.model.ValuePair;
<ide> }
<ide>
<ide> private Object parseClassValue( AnnotationSource javaAnnotationToken, AnnotationValuePairDefinition valuePairDefinition ) {
<del> String value = parseLiteralValue( javaAnnotationToken.getLiteralValue( valuePairDefinition.getName() ) );
<add> String value = null;
<ide> Object result;
<del>
<add> List<ValuePair> values = javaAnnotationToken.getValues();
<add> if ( values != null ) {
<add> Optional<ValuePair> valuePair = values.stream().filter(
<add> vp -> valuePairDefinition.getName().equals( vp.getName() ) ).findFirst( );
<add> value = valuePair.map( vp -> vp.getLiteralValue() ).orElse( null );
<add> }
<ide> if ( value == null ) {
<ide> return null;
<ide> } |
|
Java | apache-2.0 | 9ed0ff42cf19546c94f1223625cf84387f56f345 | 0 | eighthave/ChatSecureAndroid,eighthave/ChatSecureAndroid,Heart2009/ChatSecureAndroid,kden/ChatSecureAndroid,bonashen/ChatSecureAndroid,guardianproject/ChatSecureAndroid,10045125/ChatSecureAndroid,prembasumatary/ChatSecureAndroid,prembasumatary/ChatSecureAndroid,bonashen/ChatSecureAndroid,10045125/ChatSecureAndroid,bonashen/ChatSecureAndroid,joskarthic/chatsecure,n8fr8/ChatSecureAndroid,anvayarai/my-ChatSecure,joskarthic/chatsecure,n8fr8/ChatSecureAndroid,n8fr8/AwesomeApp,n8fr8/AwesomeApp,anvayarai/my-ChatSecure,n8fr8/AwesomeApp,guardianproject/ChatSecureAndroid,kden/ChatSecureAndroid,Heart2009/ChatSecureAndroid,joskarthic/chatsecure,anvayarai/my-ChatSecure,Heart2009/ChatSecureAndroid,guardianproject/ChatSecureAndroid,n8fr8/ChatSecureAndroid,10045125/ChatSecureAndroid,kden/ChatSecureAndroid,h2ri/ChatSecureAndroid,eighthave/ChatSecureAndroid,prembasumatary/ChatSecureAndroid,h2ri/ChatSecureAndroid,h2ri/ChatSecureAndroid | /*
* Copyright (C) 2008 Esmertec AG. Copyright (C) 2008 The Android Open Source
* Project
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package info.guardianproject.otr.app.im.app;
import info.guardianproject.emoji.EmojiManager;
import info.guardianproject.otr.app.im.R;
import info.guardianproject.otr.app.im.provider.Imps;
import info.guardianproject.otr.app.im.ui.AudioPlayerActivity;
import info.guardianproject.otr.app.im.ui.ImageViewActivity;
import info.guardianproject.otr.app.im.ui.RoundedAvatarDrawable;
import info.guardianproject.util.LogCleaner;
import java.io.File;
import java.io.IOException;
import java.text.DateFormat;
import java.text.SimpleDateFormat;
import java.util.Date;
import java.util.List;
import android.content.ContentResolver;
import android.content.Context;
import android.content.Intent;
import android.content.pm.PackageManager;
import android.content.pm.ResolveInfo;
import android.content.res.Resources;
import android.database.Cursor;
import android.graphics.Bitmap;
import android.graphics.BitmapFactory;
import android.graphics.Color;
import android.graphics.Typeface;
import android.graphics.drawable.Drawable;
import android.media.AudioManager;
import android.media.MediaPlayer;
import android.net.Uri;
import android.os.AsyncTask;
import android.provider.MediaStore;
import android.support.v4.util.LruCache;
import android.text.Spannable;
import android.text.SpannableString;
import android.text.style.ImageSpan;
import android.text.style.RelativeSizeSpan;
import android.text.style.StyleSpan;
import android.text.style.URLSpan;
import android.text.util.Linkify;
import android.util.AttributeSet;
import android.util.Log;
import android.view.View;
import android.widget.FrameLayout;
import android.widget.ImageView;
import android.widget.TextView;
import android.widget.Toast;
public class MessageView extends FrameLayout {
private static int sCacheSize = 512; // 1MiB
private static LruCache<String,Bitmap> mBitmapCache = new LruCache<String,Bitmap>(sCacheSize);
public enum DeliveryState {
NEUTRAL, DELIVERED, UNDELIVERED
}
public enum EncryptionState {
NONE, ENCRYPTED, ENCRYPTED_AND_VERIFIED
}
private CharSequence lastMessage = null;
private Context context;
public MessageView(Context context, AttributeSet attrs) {
super(context, attrs);
this.context = context;
}
private ViewHolder mHolder = null;
private final static DateFormat MESSAGE_DATETIME_FORMAT = SimpleDateFormat.getDateTimeInstance(DateFormat.SHORT, DateFormat.SHORT);
private final static DateFormat MESSAGE_TIME_FORMAT = SimpleDateFormat.getTimeInstance(DateFormat.SHORT);
private static final SimpleDateFormat FMT_SAME_DAY = new SimpleDateFormat("yyyyMMdd");
private final static Date DATE_NOW = new Date();
private final static char DELIVERED_SUCCESS = '\u2714';
private final static char DELIVERED_FAIL = '\u2718';
private final static String LOCK_CHAR = "Secure";
class ViewHolder
{
TextView mTextViewForMessages = (TextView) findViewById(R.id.message);
TextView mTextViewForTimestamp = (TextView) findViewById(R.id.messagets);
ImageView mAvatar = (ImageView) findViewById(R.id.avatar);
// View mStatusBlock = findViewById(R.id.status_block);
ImageView mMediaThumbnail = (ImageView) findViewById(R.id.media_thumbnail);
View mContainer = findViewById(R.id.message_container);
// save the media uri while the MediaScanner is creating the thumbnail
// if the holder was reused, the pair is broken
Uri mMediaUri = null;
public void setOnClickListenerMediaThumbnail( final String mimeType, final Uri mediaUri ) {
mMediaThumbnail.setOnClickListener( new OnClickListener() {
@Override
public void onClick(View v) {
onClickMediaIcon( mimeType, mediaUri );
}
});
}
public void resetOnClickListenerMediaThumbnail() {
mMediaThumbnail.setOnClickListener( null );
}
long mTimeDiff = -1;
}
@Override
protected void onFinishInflate() {
super.onFinishInflate();
mHolder = (ViewHolder)getTag();
if (mHolder == null)
{
mHolder = new ViewHolder();
setTag(mHolder);
}
}
public void setMessageBackground (Drawable d)
{
mHolder.mContainer.setBackgroundDrawable(d);
}
public URLSpan[] getMessageLinks() {
return mHolder.mTextViewForMessages.getUrls();
}
public String getLastMessage () {
return lastMessage.toString();
}
public void bindIncomingMessage(int id, String address, String nickname, final String mimeType, final String body, Date date, Markup smileyRes,
boolean scrolling, EncryptionState encryption, boolean showContact, int presenceStatus) {
mHolder = (ViewHolder)getTag();
mHolder.mTextViewForMessages.setVisibility(View.VISIBLE);
if (nickname == null)
nickname = address;
if (showContact && nickname != null)
{
String[] nickParts = nickname.split("/");
lastMessage = nickParts[nickParts.length-1] + ": " + formatMessage(body);
}
else
{
lastMessage = formatMessage(body);
showAvatar(address,true,presenceStatus);
mHolder.resetOnClickListenerMediaThumbnail();
if( mimeType != null ) {
mHolder.mTextViewForMessages.setVisibility(View.GONE);
mHolder.mMediaThumbnail.setVisibility(View.VISIBLE);
Uri mediaUri = Uri.parse( body ) ;
lastMessage = "";
showMediaThumbnail(mimeType, mediaUri, id, mHolder);
} else {
mHolder.mMediaThumbnail.setVisibility(View.GONE);
if (showContact)
{
String[] nickParts = nickname.split("/");
lastMessage = nickParts[nickParts.length-1] + ": " + formatMessage(body);
}
else
{
lastMessage = formatMessage(body);
}
}
}
if (lastMessage.length() > 0)
{
try {
SpannableString spannablecontent=new SpannableString(lastMessage);
EmojiManager.getInstance(getContext()).addEmoji(getContext(), spannablecontent);
mHolder.mTextViewForMessages.setText(spannablecontent);
} catch (IOException e) {
LogCleaner.error(ImApp.LOG_TAG, "error processing message", e);
}
}
else
{
mHolder.mTextViewForMessages.setText(lastMessage);
}
if (date != null)
{
CharSequence tsText = null;
if (isSameDay(date,DATE_NOW))
tsText = formatTimeStamp(date,MESSAGE_TIME_FORMAT, null, encryption);
else
tsText = formatTimeStamp(date,MESSAGE_DATETIME_FORMAT, null, encryption);
mHolder.mTextViewForTimestamp.setText(tsText);
mHolder.mTextViewForTimestamp.setVisibility(View.VISIBLE);
}
else
{
mHolder.mTextViewForTimestamp.setText("");
//mHolder.mTextViewForTimestamp.setVisibility(View.GONE);
}
Linkify.addLinks(mHolder.mTextViewForMessages, Linkify.ALL);
}
private void showMediaThumbnail (String mimeType, Uri mediaUri, int id, ViewHolder holder)
{
holder.setOnClickListenerMediaThumbnail(mimeType, mediaUri);
holder.mMediaThumbnail.setVisibility(View.VISIBLE);
holder.mTextViewForMessages.setText(lastMessage);
holder.mTextViewForMessages.setVisibility(View.GONE);
if( mimeType.startsWith("image/") ) {
setImageThumbnail( getContext().getContentResolver(), id, holder, mediaUri );
holder.mMediaThumbnail.setBackgroundColor(Color.TRANSPARENT);
// holder.mMediaThumbnail.setBackgroundColor(Color.WHITE);
}
else if (mimeType.startsWith("audio") || mimeType.startsWith("video"))
{
holder.mMediaThumbnail.setImageResource(R.drawable.media_audio_play);
holder.mMediaThumbnail.setBackgroundColor(Color.TRANSPARENT);
}
else
{
holder.mMediaThumbnail.setImageResource(R.drawable.ic_file); // generic file icon
}
holder.mContainer.setBackgroundColor(getResources().getColor(android.R.color.transparent));
}
private boolean isSameDay (Date date1, Date date2)
{
return FMT_SAME_DAY.format(date1).equals(FMT_SAME_DAY.format(date2));
}
protected String convertMediaUriToPath(Uri uri) {
String path = null;
String [] proj={MediaStore.Images.Media.DATA};
Cursor cursor = getContext().getContentResolver().query(uri, proj, null, null, null);
if (cursor != null && (!cursor.isClosed()))
{
if (cursor.isBeforeFirst())
{
int column_index = cursor.getColumnIndexOrThrow(MediaStore.Images.Media.DATA);
cursor.moveToFirst();
path = cursor.getString(column_index);
}
cursor.close();
}
return path;
}
private MediaPlayer mMediaPlayer = null;
/**
* @param mimeType
* @param body
*/
protected void onClickMediaIcon(String mimeType, Uri mediaUri) {
if (IocVfs.isVfsScheme(mediaUri.getScheme())) {
if (mimeType.startsWith("image")) {
Intent intent = new Intent(context, ImageViewActivity.class);
intent.putExtra( ImageViewActivity.FILENAME, mediaUri.getPath());
context.startActivity(intent);
return;
}
if (mimeType.startsWith("audio")) {
Intent intent = new Intent(context, AudioPlayerActivity.class);
intent.putExtra( AudioPlayerActivity.FILENAME, mediaUri.getPath());
intent.putExtra( AudioPlayerActivity.MIMETYPE, mimeType);
context.startActivity(intent);
return;
}
return;
}
else
{
String body = convertMediaUriToPath(mediaUri);
if (body == null)
body = new File(mediaUri.getPath()).getAbsolutePath();
if (mimeType.startsWith("audio") || (body.endsWith("3gp")||body.endsWith("3gpp")||body.endsWith("amr")))
{
if (mMediaPlayer != null)
mMediaPlayer.release();
try
{
mMediaPlayer = new MediaPlayer();
mMediaPlayer.setAudioStreamType(AudioManager.STREAM_MUSIC);
mMediaPlayer.setDataSource(body);
mMediaPlayer.prepare();
mMediaPlayer.start();
return;
} catch (IOException e) {
Log.e(ImApp.LOG_TAG,"error playing audio: " + body,e);
}
}
Intent intent = new Intent(Intent.ACTION_VIEW);
intent.setFlags(Intent.FLAG_ACTIVITY_CLEAR_TASK|Intent.FLAG_ACTIVITY_NEW_TASK);
//set a general mime type not specific
if (mimeType != null)
{
intent.setDataAndType(Uri.parse( body ), mimeType);
}
else
{
intent.setData(Uri.parse( body ));
}
Context context = getContext().getApplicationContext();
if (isIntentAvailable(context,intent))
{
context.startActivity(intent);
}
else
{
Toast.makeText(getContext(), R.string.there_is_no_viewer_available_for_this_file_format, Toast.LENGTH_LONG).show();
}
}
}
public static boolean isIntentAvailable(Context context, Intent intent) {
final PackageManager packageManager = context.getPackageManager();
List<ResolveInfo> list =
packageManager.queryIntentActivities(intent,
PackageManager.MATCH_DEFAULT_ONLY);
return list.size() > 0;
}
/**
* @param contentResolver
* @param id
* @param aHolder
* @param mediaUri
*/
private void setImageThumbnail(final ContentResolver contentResolver, final int id, final ViewHolder aHolder, final Uri mediaUri) {
// pair this holder to the uri. if the holder is recycled, the pairing is broken
aHolder.mMediaUri = mediaUri;
// if a content uri - already scanned
setThumbnail(contentResolver, aHolder, mediaUri);
}
/**
* @param contentResolver
* @param aHolder
* @param uri
*/
private void setThumbnail(final ContentResolver contentResolver, final ViewHolder aHolder, final Uri uri) {
new AsyncTask<String, Void, Bitmap>() {
@Override
protected Bitmap doInBackground(String... params) {
Bitmap result = mBitmapCache.get(uri.toString());
if (result == null)
return getThumbnail( contentResolver, uri );
else
return result;
}
@Override
protected void onPostExecute(Bitmap result) {
if (uri != null && result != null)
{
mBitmapCache.put(uri.toString(), result);
// confirm the holder is still paired to this uri
if( ! uri.equals( aHolder.mMediaUri ) ) {
return ;
}
// thumbnail extraction failed, use bropken image icon
if( result == null ) {
mHolder.mMediaThumbnail.setImageResource(R.drawable.ic_broken_image);
return ;
}
// set the thumbnail
aHolder.mMediaThumbnail.setImageBitmap(result);
}
}
}.execute();
}
public final static int THUMBNAIL_SIZE = 800;
public static Bitmap getThumbnail(ContentResolver cr, Uri uri) {
if (IocVfs.isVfsScheme(uri.getScheme())) {
return IocVfs.getThumbnailVfs(cr, uri);
}
return getThumbnailFile(cr, uri);
}
public static Bitmap getThumbnailFile(ContentResolver cr, Uri uri) {
java.io.File image = new java.io.File(uri.getPath());
if (!image.exists())
{
image = new info.guardianproject.iocipher.File(uri.getPath());
if (!image.exists())
return null;
}
BitmapFactory.Options options = new BitmapFactory.Options();
options.inJustDecodeBounds = true;
options.inInputShareable = true;
options.inPurgeable = true;
BitmapFactory.decodeFile(image.getPath(), options);
if ((options.outWidth == -1) || (options.outHeight == -1))
return null;
int originalSize = (options.outHeight > options.outWidth) ? options.outHeight
: options.outWidth;
BitmapFactory.Options opts = new BitmapFactory.Options();
opts.inSampleSize = originalSize / THUMBNAIL_SIZE;
Bitmap scaledBitmap = BitmapFactory.decodeFile(image.getPath(), opts);
return scaledBitmap;
}
private String formatMessage (String body)
{
return android.text.Html.fromHtml(body).toString();
}
public void bindOutgoingMessage(int id, String address, final String mimeType, final String body, Date date, Markup smileyRes, boolean scrolling,
DeliveryState delivery, EncryptionState encryption) {
mHolder = (ViewHolder)getTag();
mHolder.mTextViewForMessages.setVisibility(View.VISIBLE);
mHolder.resetOnClickListenerMediaThumbnail();
if( mimeType != null ) {
lastMessage = "";
Uri mediaUri = Uri.parse( body ) ;
showMediaThumbnail(mimeType, mediaUri, id, mHolder);
mHolder.mTextViewForMessages.setVisibility(View.GONE);
mHolder.mMediaThumbnail.setVisibility(View.VISIBLE);
} else {
mHolder.mMediaThumbnail.setVisibility(View.GONE);
lastMessage = body;//formatMessage(body);
try {
SpannableString spannablecontent=new SpannableString(lastMessage);
EmojiManager.getInstance(getContext()).addEmoji(getContext(), spannablecontent);
mHolder.mTextViewForMessages.setText(spannablecontent);
} catch (IOException e) {
// TODO Auto-generated catch block
e.printStackTrace();
}
}
/**
mHolder.mStatusBlock.setVisibility(VISIBLE);
// mHolder.mMessageContainer.setBackgroundResource(R.drawable.background_plaintext);
if (encryption == EncryptionState.NONE)
{
mHolder.mStatusBlock.setBackgroundResource(R.color.holo_red_dark);
}
else if (encryption == EncryptionState.ENCRYPTED)
{
mHolder.mStatusBlock.setBackgroundResource(R.color.holo_orange_light);
}
else if (encryption == EncryptionState.ENCRYPTED_AND_VERIFIED)
{
mHolder.mStatusBlock.setBackgroundResource(R.color.holo_green_dark);
}*/
if (date != null)
{
CharSequence tsText = null;
if (isSameDay(date,DATE_NOW))
tsText = formatTimeStamp(date,MESSAGE_TIME_FORMAT, delivery, encryption);
else
tsText = formatTimeStamp(date,MESSAGE_DATETIME_FORMAT, delivery, encryption);
mHolder.mTextViewForTimestamp.setText(tsText);
mHolder.mTextViewForTimestamp.setVisibility(View.VISIBLE);
}
else
{
mHolder.mTextViewForTimestamp.setText("");
}
Linkify.addLinks(mHolder.mTextViewForMessages, Linkify.ALL);
}
private static RoundedAvatarDrawable AVATAR_DEFAULT;
private void showAvatar (String address, boolean isLeft, int encryptionState)
{
mHolder.mAvatar.setVisibility(View.GONE);
if (address != null)
{
RoundedAvatarDrawable avatar = DatabaseUtils.getAvatarFromAddress(this.getContext().getContentResolver(),address, ImApp.DEFAULT_AVATAR_WIDTH,ImApp.DEFAULT_AVATAR_HEIGHT);
if (avatar != null)
{
if (isLeft)
{
mHolder.mAvatar.setVisibility(View.VISIBLE);
mHolder.mAvatar.setImageDrawable(avatar);
}
}
else
{
if (AVATAR_DEFAULT == null)
{
AVATAR_DEFAULT = new RoundedAvatarDrawable(BitmapFactory.decodeResource(getResources(),
R.drawable.avatar_unknown));
}
avatar = AVATAR_DEFAULT;
mHolder.mAvatar.setVisibility(View.VISIBLE);
mHolder.mAvatar.setImageDrawable(avatar);
}
setAvatarBorder(encryptionState, avatar);
}
}
public void bindPresenceMessage(String contact, int type, boolean isGroupChat, boolean scrolling) {
mHolder = (ViewHolder)getTag();
CharSequence message = formatPresenceUpdates(contact, type, isGroupChat, scrolling);
mHolder.mTextViewForMessages.setText(message);
// mHolder.mTextViewForMessages.setTextColor(getResources().getColor(R.color.chat_msg_presence));
}
public void bindErrorMessage(int errCode) {
mHolder = (ViewHolder)getTag();
mHolder.mTextViewForMessages.setText(R.string.msg_sent_failed);
mHolder.mTextViewForMessages.setTextColor(getResources().getColor(R.color.error));
}
private SpannableString formatTimeStamp(Date date, DateFormat format, MessageView.DeliveryState delivery, EncryptionState encryptionState) {
StringBuilder deliveryText = new StringBuilder();
deliveryText.append(format.format(date));
deliveryText.append(' ');
if (delivery != null)
{
if (delivery == DeliveryState.DELIVERED) {
deliveryText.append(DELIVERED_SUCCESS);
} else if (delivery == DeliveryState.UNDELIVERED) {
deliveryText.append(DELIVERED_FAIL);
}
}
SpannableString spanText = null;
if (encryptionState == EncryptionState.ENCRYPTED)
{
deliveryText.append('X');
spanText = new SpannableString(deliveryText.toString());
int len = spanText.length();
spanText.setSpan(new ImageSpan(getContext(), R.drawable.lock16), len-1,len,Spannable.SPAN_EXCLUSIVE_EXCLUSIVE);
}
else if (encryptionState == EncryptionState.ENCRYPTED_AND_VERIFIED)
{
deliveryText.append('X');
spanText = new SpannableString(deliveryText.toString());
int len = spanText.length();
spanText.setSpan(new ImageSpan(getContext(), R.drawable.lock16), len-1,len,Spannable.SPAN_EXCLUSIVE_EXCLUSIVE);
}
else
{
spanText = new SpannableString(deliveryText.toString());
int len = spanText.length();
}
// spanText.setSpan(new StyleSpan(Typeface.SANS_SERIF), 0, len, Spannable.SPAN_EXCLUSIVE_EXCLUSIVE);
// spanText.setSpan(new RelativeSizeSpan(0.8f), 0, len, Spannable.SPAN_EXCLUSIVE_EXCLUSIVE);
// spanText.setSpan(new ForegroundColorSpan(R.color.soft_grey),
// 0, len, Spannable.SPAN_EXCLUSIVE_EXCLUSIVE);
return spanText;
}
private CharSequence formatPresenceUpdates(String contact, int type, boolean isGroupChat,
boolean scrolling) {
String body;
Resources resources =getResources();
switch (type) {
case Imps.MessageType.PRESENCE_AVAILABLE:
body = resources.getString(isGroupChat ? R.string.contact_joined
: R.string.contact_online, contact);
break;
case Imps.MessageType.PRESENCE_AWAY:
body = resources.getString(R.string.contact_away, contact);
break;
case Imps.MessageType.PRESENCE_DND:
body = resources.getString(R.string.contact_busy, contact);
break;
case Imps.MessageType.PRESENCE_UNAVAILABLE:
body = resources.getString(isGroupChat ? R.string.contact_left
: R.string.contact_offline, contact);
break;
default:
return null;
}
if (scrolling) {
return body;
} else {
SpannableString spanText = new SpannableString(body);
int len = spanText.length();
spanText.setSpan(new StyleSpan(Typeface.ITALIC), 0, len,
Spannable.SPAN_EXCLUSIVE_EXCLUSIVE);
spanText.setSpan(new RelativeSizeSpan((float) 0.8), 0, len,
Spannable.SPAN_EXCLUSIVE_EXCLUSIVE);
return spanText;
}
}
public void setAvatarBorder(int status, RoundedAvatarDrawable avatar) {
switch (status) {
case Imps.Presence.AVAILABLE:
avatar.setBorderColor(getResources().getColor(R.color.holo_green_light));
avatar.setAlpha(255);
break;
case Imps.Presence.IDLE:
avatar.setBorderColor(getResources().getColor(R.color.holo_green_dark));
avatar.setAlpha(255);
break;
case Imps.Presence.AWAY:
avatar.setBorderColor(getResources().getColor(R.color.holo_orange_light));
avatar.setAlpha(255);
break;
case Imps.Presence.DO_NOT_DISTURB:
avatar.setBorderColor(getResources().getColor(R.color.holo_red_dark));
avatar.setAlpha(255);
break;
case Imps.Presence.OFFLINE:
avatar.setBorderColor(getResources().getColor(R.color.holo_grey_light));
avatar.setAlpha(100);
break;
default:
}
}
}
| src/info/guardianproject/otr/app/im/app/MessageView.java | /*
* Copyright (C) 2008 Esmertec AG. Copyright (C) 2008 The Android Open Source
* Project
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package info.guardianproject.otr.app.im.app;
import info.guardianproject.emoji.EmojiManager;
import info.guardianproject.otr.app.im.R;
import info.guardianproject.otr.app.im.provider.Imps;
import info.guardianproject.otr.app.im.ui.AudioPlayerActivity;
import info.guardianproject.otr.app.im.ui.ImageViewActivity;
import info.guardianproject.otr.app.im.ui.RoundedAvatarDrawable;
import info.guardianproject.util.LogCleaner;
import java.io.File;
import java.io.IOException;
import java.text.DateFormat;
import java.text.SimpleDateFormat;
import java.util.Date;
import java.util.List;
import android.content.ContentResolver;
import android.content.Context;
import android.content.Intent;
import android.content.pm.PackageManager;
import android.content.pm.ResolveInfo;
import android.content.res.Resources;
import android.database.Cursor;
import android.graphics.Bitmap;
import android.graphics.BitmapFactory;
import android.graphics.Color;
import android.graphics.Typeface;
import android.graphics.drawable.Drawable;
import android.media.AudioManager;
import android.media.MediaPlayer;
import android.net.Uri;
import android.os.AsyncTask;
import android.provider.MediaStore;
import android.support.v4.util.LruCache;
import android.text.Spannable;
import android.text.SpannableString;
import android.text.style.ImageSpan;
import android.text.style.RelativeSizeSpan;
import android.text.style.StyleSpan;
import android.text.style.URLSpan;
import android.text.util.Linkify;
import android.util.AttributeSet;
import android.util.Log;
import android.view.View;
import android.widget.FrameLayout;
import android.widget.ImageView;
import android.widget.TextView;
import android.widget.Toast;
public class MessageView extends FrameLayout {
private static int sCacheSize = 512; // 1MiB
private static LruCache<String,Bitmap> mBitmapCache = new LruCache<String,Bitmap>(sCacheSize);
public enum DeliveryState {
NEUTRAL, DELIVERED, UNDELIVERED
}
public enum EncryptionState {
NONE, ENCRYPTED, ENCRYPTED_AND_VERIFIED
}
private CharSequence lastMessage = null;
private Context context;
public MessageView(Context context, AttributeSet attrs) {
super(context, attrs);
this.context = context;
}
private ViewHolder mHolder = null;
private final static DateFormat MESSAGE_DATETIME_FORMAT = SimpleDateFormat.getDateTimeInstance(DateFormat.SHORT, DateFormat.SHORT);
private final static DateFormat MESSAGE_TIME_FORMAT = SimpleDateFormat.getTimeInstance(DateFormat.SHORT);
private static final SimpleDateFormat FMT_SAME_DAY = new SimpleDateFormat("yyyyMMdd");
private final static Date DATE_NOW = new Date();
private final static char DELIVERED_SUCCESS = '\u2714';
private final static char DELIVERED_FAIL = '\u2718';
private final static String LOCK_CHAR = "Secure";
class ViewHolder
{
TextView mTextViewForMessages = (TextView) findViewById(R.id.message);
TextView mTextViewForTimestamp = (TextView) findViewById(R.id.messagets);
ImageView mAvatar = (ImageView) findViewById(R.id.avatar);
// View mStatusBlock = findViewById(R.id.status_block);
ImageView mMediaThumbnail = (ImageView) findViewById(R.id.media_thumbnail);
View mContainer = findViewById(R.id.message_container);
// save the media uri while the MediaScanner is creating the thumbnail
// if the holder was reused, the pair is broken
Uri mMediaUri = null;
public void setOnClickListenerMediaThumbnail( final String mimeType, final Uri mediaUri ) {
mMediaThumbnail.setOnClickListener( new OnClickListener() {
@Override
public void onClick(View v) {
onClickMediaIcon( mimeType, mediaUri );
}
});
}
public void resetOnClickListenerMediaThumbnail() {
mMediaThumbnail.setOnClickListener( null );
}
long mTimeDiff = -1;
}
@Override
protected void onFinishInflate() {
super.onFinishInflate();
mHolder = (ViewHolder)getTag();
if (mHolder == null)
{
mHolder = new ViewHolder();
setTag(mHolder);
}
}
public void setMessageBackground (Drawable d)
{
mHolder.mContainer.setBackgroundDrawable(d);
}
public URLSpan[] getMessageLinks() {
return mHolder.mTextViewForMessages.getUrls();
}
public String getLastMessage () {
return lastMessage.toString();
}
public void bindIncomingMessage(int id, String address, String nickname, final String mimeType, final String body, Date date, Markup smileyRes,
boolean scrolling, EncryptionState encryption, boolean showContact, int presenceStatus) {
mHolder = (ViewHolder)getTag();
mHolder.mTextViewForMessages.setVisibility(View.VISIBLE);
if (nickname == null)
nickname = address;
if (showContact && nickname != null)
{
String[] nickParts = nickname.split("/");
lastMessage = nickParts[nickParts.length-1] + ": " + formatMessage(body);
}
else
{
lastMessage = formatMessage(body);
showAvatar(address,true,presenceStatus);
mHolder.resetOnClickListenerMediaThumbnail();
if( mimeType != null ) {
mHolder.mTextViewForMessages.setVisibility(View.GONE);
mHolder.mMediaThumbnail.setVisibility(View.VISIBLE);
Uri mediaUri = Uri.parse( body ) ;
lastMessage = "";
showMediaThumbnail(mimeType, mediaUri, id, mHolder);
} else {
mHolder.mMediaThumbnail.setVisibility(View.GONE);
if (showContact)
{
String[] nickParts = nickname.split("/");
lastMessage = nickParts[nickParts.length-1] + ": " + formatMessage(body);
}
else
{
lastMessage = formatMessage(body);
}
}
}
if (lastMessage.length() > 0)
{
try {
SpannableString spannablecontent=new SpannableString(lastMessage);
EmojiManager.getInstance(getContext()).addEmoji(getContext(), spannablecontent);
mHolder.mTextViewForMessages.setText(spannablecontent);
} catch (IOException e) {
LogCleaner.error(ImApp.LOG_TAG, "error processing message", e);
}
}
else
{
mHolder.mTextViewForMessages.setText(lastMessage);
}
if (date != null)
{
CharSequence tsText = null;
if (isSameDay(date,DATE_NOW))
tsText = formatTimeStamp(date,MESSAGE_TIME_FORMAT, null, encryption);
else
tsText = formatTimeStamp(date,MESSAGE_DATETIME_FORMAT, null, encryption);
mHolder.mTextViewForTimestamp.setText(tsText);
mHolder.mTextViewForTimestamp.setVisibility(View.VISIBLE);
}
else
{
mHolder.mTextViewForTimestamp.setText("");
//mHolder.mTextViewForTimestamp.setVisibility(View.GONE);
}
Linkify.addLinks(mHolder.mTextViewForMessages, Linkify.ALL);
}
private void showMediaThumbnail (String mimeType, Uri mediaUri, int id, ViewHolder holder)
{
holder.setOnClickListenerMediaThumbnail(mimeType, mediaUri);
holder.mMediaThumbnail.setVisibility(View.VISIBLE);
holder.mTextViewForMessages.setText(lastMessage);
holder.mTextViewForMessages.setVisibility(View.GONE);
if( mimeType.startsWith("image/") ) {
setImageThumbnail( getContext().getContentResolver(), id, holder, mediaUri );
holder.mMediaThumbnail.setBackgroundColor(Color.TRANSPARENT);
// holder.mMediaThumbnail.setBackgroundColor(Color.WHITE);
}
else if (mimeType.startsWith("audio") || mimeType.startsWith("video"))
{
holder.mMediaThumbnail.setImageResource(R.drawable.media_audio_play);
holder.mMediaThumbnail.setBackgroundColor(Color.TRANSPARENT);
}
else
{
holder.mMediaThumbnail.setImageResource(R.drawable.ic_file); // generic file icon
}
holder.mContainer.setBackgroundColor(getResources().getColor(android.R.color.transparent));
}
private boolean isSameDay (Date date1, Date date2)
{
return FMT_SAME_DAY.format(date1).equals(FMT_SAME_DAY.format(date2));
}
protected String convertMediaUriToPath(Uri uri) {
String path = null;
String [] proj={MediaStore.Images.Media.DATA};
Cursor cursor = getContext().getContentResolver().query(uri, proj, null, null, null);
if (cursor != null && (!cursor.isClosed()))
{
if (cursor.isBeforeFirst())
{
int column_index = cursor.getColumnIndexOrThrow(MediaStore.Images.Media.DATA);
cursor.moveToFirst();
path = cursor.getString(column_index);
}
cursor.close();
}
return path;
}
private MediaPlayer mMediaPlayer = null;
/**
* @param mimeType
* @param body
*/
protected void onClickMediaIcon(String mimeType, Uri mediaUri) {
if (IocVfs.isVfsScheme(mediaUri.getScheme())) {
if (mimeType.startsWith("image")) {
Intent intent = new Intent(context, ImageViewActivity.class);
intent.putExtra( ImageViewActivity.FILENAME, mediaUri.getPath());
context.startActivity(intent);
return;
}
if (mimeType.startsWith("audio")) {
Intent intent = new Intent(context, AudioPlayerActivity.class);
intent.putExtra( AudioPlayerActivity.FILENAME, mediaUri.getPath());
intent.putExtra( AudioPlayerActivity.MIMETYPE, mimeType);
context.startActivity(intent);
return;
}
return;
}
String body = convertMediaUriToPath(mediaUri);
if (body == null)
body = new File(mediaUri.getPath()).getAbsolutePath();
if (mimeType.startsWith("audio") || (body.endsWith("3gp")||body.endsWith("3gpp")||body.endsWith("amr")))
{
if (mMediaPlayer != null)
mMediaPlayer.release();
try
{
mMediaPlayer = new MediaPlayer();
mMediaPlayer.setAudioStreamType(AudioManager.STREAM_MUSIC);
mMediaPlayer.setDataSource(body);
mMediaPlayer.prepare();
mMediaPlayer.start();
return;
} catch (IOException e) {
Log.e(ImApp.LOG_TAG,"error playing audio: " + body,e);
}
}
Intent intent = new Intent(Intent.ACTION_VIEW);
intent.setFlags(Intent.FLAG_ACTIVITY_CLEAR_TASK|Intent.FLAG_ACTIVITY_NEW_TASK);
//set a general mime type not specific
if (mimeType != null)
{
intent.setDataAndType(Uri.parse( body ), mimeType);
}
else
{
intent.setData(Uri.parse( body ));
}
Context context = getContext().getApplicationContext();
if (isIntentAvailable(context,intent))
{
context.startActivity(intent);
}
else
{
Toast.makeText(getContext(), R.string.there_is_no_viewer_available_for_this_file_format, Toast.LENGTH_LONG).show();
}
}
public static boolean isIntentAvailable(Context context, Intent intent) {
final PackageManager packageManager = context.getPackageManager();
List<ResolveInfo> list =
packageManager.queryIntentActivities(intent,
PackageManager.MATCH_DEFAULT_ONLY);
return list.size() > 0;
}
/**
* @param contentResolver
* @param id
* @param aHolder
* @param mediaUri
*/
private void setImageThumbnail(final ContentResolver contentResolver, final int id, final ViewHolder aHolder, final Uri mediaUri) {
// pair this holder to the uri. if the holder is recycled, the pairing is broken
aHolder.mMediaUri = mediaUri;
// if a content uri - already scanned
setThumbnail(contentResolver, aHolder, mediaUri);
}
/**
* @param contentResolver
* @param aHolder
* @param uri
*/
private void setThumbnail(final ContentResolver contentResolver, final ViewHolder aHolder, final Uri uri) {
new AsyncTask<String, Void, Bitmap>() {
@Override
protected Bitmap doInBackground(String... params) {
Bitmap result = mBitmapCache.get(uri.toString());
if (result == null)
return getThumbnail( contentResolver, uri );
else
return result;
}
@Override
protected void onPostExecute(Bitmap result) {
if (uri != null && result != null)
{
mBitmapCache.put(uri.toString(), result);
// confirm the holder is still paired to this uri
if( ! uri.equals( aHolder.mMediaUri ) ) {
return ;
}
// thumbnail extraction failed, use bropken image icon
if( result == null ) {
mHolder.mMediaThumbnail.setImageResource(R.drawable.ic_broken_image);
return ;
}
// set the thumbnail
aHolder.mMediaThumbnail.setImageBitmap(result);
}
}
}.execute();
}
public final static int THUMBNAIL_SIZE = 800;
public static Bitmap getThumbnail(ContentResolver cr, Uri uri) {
if (IocVfs.isVfsScheme(uri.getScheme())) {
return IocVfs.getThumbnailVfs(cr, uri);
}
return getThumbnailFile(cr, uri);
}
public static Bitmap getThumbnailFile(ContentResolver cr, Uri uri) {
File image = new File(uri.getPath());
BitmapFactory.Options options = new BitmapFactory.Options();
options.inJustDecodeBounds = true;
options.inInputShareable = true;
options.inPurgeable = true;
BitmapFactory.decodeFile(image.getPath(), options);
if ((options.outWidth == -1) || (options.outHeight == -1))
return null;
int originalSize = (options.outHeight > options.outWidth) ? options.outHeight
: options.outWidth;
BitmapFactory.Options opts = new BitmapFactory.Options();
opts.inSampleSize = originalSize / THUMBNAIL_SIZE;
Bitmap scaledBitmap = BitmapFactory.decodeFile(image.getPath(), opts);
return scaledBitmap;
}
private String formatMessage (String body)
{
return android.text.Html.fromHtml(body).toString();
}
public void bindOutgoingMessage(int id, String address, final String mimeType, final String body, Date date, Markup smileyRes, boolean scrolling,
DeliveryState delivery, EncryptionState encryption) {
mHolder = (ViewHolder)getTag();
mHolder.mTextViewForMessages.setVisibility(View.VISIBLE);
mHolder.resetOnClickListenerMediaThumbnail();
if( mimeType != null ) {
lastMessage = "";
Uri mediaUri = Uri.parse( body ) ;
showMediaThumbnail(mimeType, mediaUri, id, mHolder);
mHolder.mTextViewForMessages.setVisibility(View.GONE);
mHolder.mMediaThumbnail.setVisibility(View.VISIBLE);
} else {
mHolder.mMediaThumbnail.setVisibility(View.GONE);
lastMessage = body;//formatMessage(body);
try {
SpannableString spannablecontent=new SpannableString(lastMessage);
EmojiManager.getInstance(getContext()).addEmoji(getContext(), spannablecontent);
mHolder.mTextViewForMessages.setText(spannablecontent);
} catch (IOException e) {
// TODO Auto-generated catch block
e.printStackTrace();
}
}
/**
mHolder.mStatusBlock.setVisibility(VISIBLE);
// mHolder.mMessageContainer.setBackgroundResource(R.drawable.background_plaintext);
if (encryption == EncryptionState.NONE)
{
mHolder.mStatusBlock.setBackgroundResource(R.color.holo_red_dark);
}
else if (encryption == EncryptionState.ENCRYPTED)
{
mHolder.mStatusBlock.setBackgroundResource(R.color.holo_orange_light);
}
else if (encryption == EncryptionState.ENCRYPTED_AND_VERIFIED)
{
mHolder.mStatusBlock.setBackgroundResource(R.color.holo_green_dark);
}*/
if (date != null)
{
CharSequence tsText = null;
if (isSameDay(date,DATE_NOW))
tsText = formatTimeStamp(date,MESSAGE_TIME_FORMAT, delivery, encryption);
else
tsText = formatTimeStamp(date,MESSAGE_DATETIME_FORMAT, delivery, encryption);
mHolder.mTextViewForTimestamp.setText(tsText);
mHolder.mTextViewForTimestamp.setVisibility(View.VISIBLE);
}
else
{
mHolder.mTextViewForTimestamp.setText("");
}
Linkify.addLinks(mHolder.mTextViewForMessages, Linkify.ALL);
}
private static RoundedAvatarDrawable AVATAR_DEFAULT;
private void showAvatar (String address, boolean isLeft, int encryptionState)
{
mHolder.mAvatar.setVisibility(View.GONE);
if (address != null)
{
RoundedAvatarDrawable avatar = DatabaseUtils.getAvatarFromAddress(this.getContext().getContentResolver(),address, ImApp.DEFAULT_AVATAR_WIDTH,ImApp.DEFAULT_AVATAR_HEIGHT);
if (avatar != null)
{
if (isLeft)
{
mHolder.mAvatar.setVisibility(View.VISIBLE);
mHolder.mAvatar.setImageDrawable(avatar);
}
}
else
{
if (AVATAR_DEFAULT == null)
{
AVATAR_DEFAULT = new RoundedAvatarDrawable(BitmapFactory.decodeResource(getResources(),
R.drawable.avatar_unknown));
}
avatar = AVATAR_DEFAULT;
mHolder.mAvatar.setVisibility(View.VISIBLE);
mHolder.mAvatar.setImageDrawable(avatar);
}
setAvatarBorder(encryptionState, avatar);
}
}
public void bindPresenceMessage(String contact, int type, boolean isGroupChat, boolean scrolling) {
mHolder = (ViewHolder)getTag();
CharSequence message = formatPresenceUpdates(contact, type, isGroupChat, scrolling);
mHolder.mTextViewForMessages.setText(message);
// mHolder.mTextViewForMessages.setTextColor(getResources().getColor(R.color.chat_msg_presence));
}
public void bindErrorMessage(int errCode) {
mHolder = (ViewHolder)getTag();
mHolder.mTextViewForMessages.setText(R.string.msg_sent_failed);
mHolder.mTextViewForMessages.setTextColor(getResources().getColor(R.color.error));
}
private SpannableString formatTimeStamp(Date date, DateFormat format, MessageView.DeliveryState delivery, EncryptionState encryptionState) {
StringBuilder deliveryText = new StringBuilder();
deliveryText.append(format.format(date));
deliveryText.append(' ');
if (delivery != null)
{
if (delivery == DeliveryState.DELIVERED) {
deliveryText.append(DELIVERED_SUCCESS);
} else if (delivery == DeliveryState.UNDELIVERED) {
deliveryText.append(DELIVERED_FAIL);
}
}
SpannableString spanText = null;
if (encryptionState == EncryptionState.ENCRYPTED)
{
deliveryText.append('X');
spanText = new SpannableString(deliveryText.toString());
int len = spanText.length();
spanText.setSpan(new ImageSpan(getContext(), R.drawable.lock16), len-1,len,Spannable.SPAN_EXCLUSIVE_EXCLUSIVE);
}
else if (encryptionState == EncryptionState.ENCRYPTED_AND_VERIFIED)
{
deliveryText.append('X');
spanText = new SpannableString(deliveryText.toString());
int len = spanText.length();
spanText.setSpan(new ImageSpan(getContext(), R.drawable.lock16), len-1,len,Spannable.SPAN_EXCLUSIVE_EXCLUSIVE);
}
else
{
spanText = new SpannableString(deliveryText.toString());
int len = spanText.length();
}
// spanText.setSpan(new StyleSpan(Typeface.SANS_SERIF), 0, len, Spannable.SPAN_EXCLUSIVE_EXCLUSIVE);
// spanText.setSpan(new RelativeSizeSpan(0.8f), 0, len, Spannable.SPAN_EXCLUSIVE_EXCLUSIVE);
// spanText.setSpan(new ForegroundColorSpan(R.color.soft_grey),
// 0, len, Spannable.SPAN_EXCLUSIVE_EXCLUSIVE);
return spanText;
}
private CharSequence formatPresenceUpdates(String contact, int type, boolean isGroupChat,
boolean scrolling) {
String body;
Resources resources =getResources();
switch (type) {
case Imps.MessageType.PRESENCE_AVAILABLE:
body = resources.getString(isGroupChat ? R.string.contact_joined
: R.string.contact_online, contact);
break;
case Imps.MessageType.PRESENCE_AWAY:
body = resources.getString(R.string.contact_away, contact);
break;
case Imps.MessageType.PRESENCE_DND:
body = resources.getString(R.string.contact_busy, contact);
break;
case Imps.MessageType.PRESENCE_UNAVAILABLE:
body = resources.getString(isGroupChat ? R.string.contact_left
: R.string.contact_offline, contact);
break;
default:
return null;
}
if (scrolling) {
return body;
} else {
SpannableString spanText = new SpannableString(body);
int len = spanText.length();
spanText.setSpan(new StyleSpan(Typeface.ITALIC), 0, len,
Spannable.SPAN_EXCLUSIVE_EXCLUSIVE);
spanText.setSpan(new RelativeSizeSpan((float) 0.8), 0, len,
Spannable.SPAN_EXCLUSIVE_EXCLUSIVE);
return spanText;
}
}
public void setAvatarBorder(int status, RoundedAvatarDrawable avatar) {
switch (status) {
case Imps.Presence.AVAILABLE:
avatar.setBorderColor(getResources().getColor(R.color.holo_green_light));
avatar.setAlpha(255);
break;
case Imps.Presence.IDLE:
avatar.setBorderColor(getResources().getColor(R.color.holo_green_dark));
avatar.setAlpha(255);
break;
case Imps.Presence.AWAY:
avatar.setBorderColor(getResources().getColor(R.color.holo_orange_light));
avatar.setAlpha(255);
break;
case Imps.Presence.DO_NOT_DISTURB:
avatar.setBorderColor(getResources().getColor(R.color.holo_red_dark));
avatar.setAlpha(255);
break;
case Imps.Presence.OFFLINE:
avatar.setBorderColor(getResources().getColor(R.color.holo_grey_light));
avatar.setAlpha(100);
break;
default:
}
}
}
| make display work with new iocipher/vfs content
| src/info/guardianproject/otr/app/im/app/MessageView.java | make display work with new iocipher/vfs content | <ide><path>rc/info/guardianproject/otr/app/im/app/MessageView.java
<ide> }
<ide> return;
<ide> }
<del>
<del> String body = convertMediaUriToPath(mediaUri);
<del>
<del> if (body == null)
<del> body = new File(mediaUri.getPath()).getAbsolutePath();
<del>
<del> if (mimeType.startsWith("audio") || (body.endsWith("3gp")||body.endsWith("3gpp")||body.endsWith("amr")))
<del> {
<del>
<del> if (mMediaPlayer != null)
<del> mMediaPlayer.release();
<del>
<del> try
<add> else
<add> {
<add>
<add>
<add> String body = convertMediaUriToPath(mediaUri);
<add>
<add> if (body == null)
<add> body = new File(mediaUri.getPath()).getAbsolutePath();
<add>
<add> if (mimeType.startsWith("audio") || (body.endsWith("3gp")||body.endsWith("3gpp")||body.endsWith("amr")))
<ide> {
<del> mMediaPlayer = new MediaPlayer();
<del> mMediaPlayer.setAudioStreamType(AudioManager.STREAM_MUSIC);
<del> mMediaPlayer.setDataSource(body);
<del> mMediaPlayer.prepare();
<del> mMediaPlayer.start();
<del>
<del> return;
<del> } catch (IOException e) {
<del> Log.e(ImApp.LOG_TAG,"error playing audio: " + body,e);
<del> }
<del>
<del>
<del> }
<del>
<del> Intent intent = new Intent(Intent.ACTION_VIEW);
<del> intent.setFlags(Intent.FLAG_ACTIVITY_CLEAR_TASK|Intent.FLAG_ACTIVITY_NEW_TASK);
<del>
<del> //set a general mime type not specific
<del> if (mimeType != null)
<del> {
<del> intent.setDataAndType(Uri.parse( body ), mimeType);
<del> }
<del> else
<del> {
<del> intent.setData(Uri.parse( body ));
<del> }
<del>
<del> Context context = getContext().getApplicationContext();
<del>
<del> if (isIntentAvailable(context,intent))
<del> {
<del> context.startActivity(intent);
<del> }
<del> else
<del> {
<del> Toast.makeText(getContext(), R.string.there_is_no_viewer_available_for_this_file_format, Toast.LENGTH_LONG).show();
<del> }
<del>
<add>
<add> if (mMediaPlayer != null)
<add> mMediaPlayer.release();
<add>
<add> try
<add> {
<add> mMediaPlayer = new MediaPlayer();
<add> mMediaPlayer.setAudioStreamType(AudioManager.STREAM_MUSIC);
<add> mMediaPlayer.setDataSource(body);
<add> mMediaPlayer.prepare();
<add> mMediaPlayer.start();
<add>
<add> return;
<add> } catch (IOException e) {
<add> Log.e(ImApp.LOG_TAG,"error playing audio: " + body,e);
<add> }
<add>
<add>
<add> }
<add>
<add> Intent intent = new Intent(Intent.ACTION_VIEW);
<add> intent.setFlags(Intent.FLAG_ACTIVITY_CLEAR_TASK|Intent.FLAG_ACTIVITY_NEW_TASK);
<add>
<add> //set a general mime type not specific
<add> if (mimeType != null)
<add> {
<add> intent.setDataAndType(Uri.parse( body ), mimeType);
<add> }
<add> else
<add> {
<add> intent.setData(Uri.parse( body ));
<add> }
<add>
<add> Context context = getContext().getApplicationContext();
<add>
<add> if (isIntentAvailable(context,intent))
<add> {
<add> context.startActivity(intent);
<add> }
<add> else
<add> {
<add> Toast.makeText(getContext(), R.string.there_is_no_viewer_available_for_this_file_format, Toast.LENGTH_LONG).show();
<add> }
<add> }
<ide> }
<ide>
<ide> public static boolean isIntentAvailable(Context context, Intent intent) {
<ide>
<ide> public static Bitmap getThumbnailFile(ContentResolver cr, Uri uri) {
<ide>
<del> File image = new File(uri.getPath());
<del>
<add> java.io.File image = new java.io.File(uri.getPath());
<add>
<add> if (!image.exists())
<add> {
<add> image = new info.guardianproject.iocipher.File(uri.getPath());
<add> if (!image.exists())
<add> return null;
<add> }
<add>
<ide> BitmapFactory.Options options = new BitmapFactory.Options();
<ide> options.inJustDecodeBounds = true;
<ide> options.inInputShareable = true;
<ide> options.inPurgeable = true;
<ide>
<add>
<ide> BitmapFactory.decodeFile(image.getPath(), options);
<ide> if ((options.outWidth == -1) || (options.outHeight == -1))
<ide> return null; |
|
Java | apache-2.0 | 735efebfb6eb3dd4b22f9cdbfd04dd118092a6e4 | 0 | carloshwa/apps-android-wikipedia,SAGROUP2/apps-android-wikipedia,parvez3019/apps-android-wikipedia,wikimedia/apps-android-wikipedia,dbrant/apps-android-wikipedia,parvez3019/apps-android-wikipedia,anirudh24seven/apps-android-wikipedia,dbrant/apps-android-wikipedia,carloshwa/apps-android-wikipedia,wikimedia/apps-android-wikipedia,SAGROUP2/apps-android-wikipedia,reproio/apps-android-wikipedia,Duct-and-rice/KrswtkhrWiki4Android,Duct-and-rice/KrswtkhrWiki4Android,reproio/apps-android-wikipedia,parvez3019/apps-android-wikipedia,Wikinaut/wikipedia-app,Wikinaut/wikipedia-app,parvez3019/apps-android-wikipedia,dbrant/apps-android-wikipedia,anirudh24seven/apps-android-wikipedia,dbrant/apps-android-wikipedia,Wikinaut/wikipedia-app,reproio/apps-android-wikipedia,anirudh24seven/apps-android-wikipedia,reproio/apps-android-wikipedia,anirudh24seven/apps-android-wikipedia,Duct-and-rice/KrswtkhrWiki4Android,parvez3019/apps-android-wikipedia,carloshwa/apps-android-wikipedia,Duct-and-rice/KrswtkhrWiki4Android,Wikinaut/wikipedia-app,carloshwa/apps-android-wikipedia,carloshwa/apps-android-wikipedia,Duct-and-rice/KrswtkhrWiki4Android,Wikinaut/wikipedia-app,SAGROUP2/apps-android-wikipedia,wikimedia/apps-android-wikipedia,dbrant/apps-android-wikipedia,SAGROUP2/apps-android-wikipedia,SAGROUP2/apps-android-wikipedia,reproio/apps-android-wikipedia,wikimedia/apps-android-wikipedia,anirudh24seven/apps-android-wikipedia | package org.wikipedia.nearby;
import android.content.Context;
import android.graphics.Bitmap;
import android.graphics.Canvas;
import android.graphics.Color;
import android.graphics.Paint;
import android.graphics.Path;
import android.graphics.PorterDuff;
import android.graphics.PorterDuffXfermode;
import android.util.AttributeSet;
import android.widget.ImageView;
public class NearbyCompassView extends ImageView {
private static final String TAG = "NearbyCompassView";
private Paint paintTick;
private Paint paintArrow;
private int tickColor = Color.BLACK;
// initialize the mask color to a ridiculous value, so that the mask bitmap is recreated for
// the Dark and Light theme
private int maskColor = Color.MAGENTA;
// make the mask bitmap static, so that it's re-used by all instances of this view
// (we're assuming that all instances will be the same size)
private static Bitmap MASK_BMP;
private static Paint MASK_BMP_PAINT;
private float displayDensity;
private static final float TICK_WIDTH = 0.6f;
private static final int TICK_LENGTH = 3;
private static final int TICK_OFFSET = 8;
private static final int NUM_TICKS = 60;
private static final int ARROW_WIDTH = 10;
private static final int ARROW_HEIGHT = 11;
private static final int ARROW_FRUSTUM = 4;
private Path arrowPath;
private float baseAngle = 0f;
private float azimuth = 0f;
public NearbyCompassView(Context context) {
super(context);
init();
}
public NearbyCompassView(Context context, AttributeSet attrs) {
super(context, attrs);
init();
}
public NearbyCompassView(Context context, AttributeSet attrs, int defStyle) {
super(context, attrs, defStyle);
init();
}
private void init() {
displayDensity = getResources().getDisplayMetrics().density;
paintTick = new Paint();
paintTick.setAntiAlias(true);
paintTick.setColor(tickColor);
paintTick.setStyle(Paint.Style.STROKE);
paintTick.setStrokeWidth(TICK_WIDTH * displayDensity);
paintArrow = new Paint();
paintArrow.setAntiAlias(true);
paintArrow.setColor(tickColor);
paintArrow.setStyle(Paint.Style.FILL);
}
@Override
public void setEnabled(boolean enabled) {
super.setEnabled(enabled);
if (enabled) {
paintTick.setColor(tickColor);
paintArrow.setColor(tickColor);
} else {
paintTick.setColor(Color.TRANSPARENT);
paintArrow.setColor(Color.TRANSPARENT);
}
}
/**
* Set the "base" angle offset from North (in degrees), moving counterclockwise.
* For example, an angle of 90 will make the arrow point due West.
* @param angle Angle offset.
*/
public void setAngle(float angle) {
this.baseAngle = angle;
invalidate();
}
/**
* Set the azimuth, which will be added to the base angle offset for our arrow.
* For example, if the base angle is 90, and the azimuth is 45, then the arrow
* will point Southwest.
* @param azimuth Azimuth to be added to the base angle.
*/
public void setAzimuth(float azimuth) {
// if it's an insignificant change, then don't worry about it
if (Math.abs(azimuth - this.azimuth) < 1.0f) {
return;
}
this.azimuth = azimuth;
invalidate();
}
public void setTickColor(int color) {
this.tickColor = color;
init();
invalidate();
}
public void setMaskColor(int color) {
if (this.maskColor != color && MASK_BMP != null) {
MASK_BMP.recycle();
MASK_BMP = null;
}
this.maskColor = color;
init();
invalidate();
}
@Override
protected void onDraw(Canvas canvas) {
//draw the original image...
super.onDraw(canvas);
float w = this.getWidth();
float h = this.getHeight();
float centerX = w / 2;
float centerY = h / 2;
//draw the circular mask bitmap
if (MASK_BMP == null) {
MASK_BMP = Bitmap.createBitmap(this.getWidth(), this.getHeight(), Bitmap.Config.ARGB_8888);
Canvas bmpCanvas = new Canvas(MASK_BMP);
bmpCanvas.drawColor(maskColor);
Paint maskPaint = new Paint();
maskPaint.setStyle(Paint.Style.FILL);
maskPaint.setColor(Color.TRANSPARENT);
maskPaint.setAntiAlias(true);
maskPaint.setXfermode(new PorterDuffXfermode(PorterDuff.Mode.SRC));
bmpCanvas.drawCircle(centerX, centerY, bmpCanvas.getWidth() / 2 - (TICK_OFFSET + TICK_LENGTH * 2) * displayDensity, maskPaint);
MASK_BMP_PAINT = new Paint();
MASK_BMP_PAINT.setXfermode(new PorterDuffXfermode(PorterDuff.Mode.SRC_OVER));
}
canvas.drawBitmap(MASK_BMP, 0, 0, MASK_BMP_PAINT);
canvas.save();
//set the initial rotation based on our given angle and azimuth
canvas.rotate(azimuth + baseAngle, centerX, centerY);
//draw ticks
canvas.save();
final int totalDegrees = 360;
for (int i = 1; i < NUM_TICKS; i++) {
canvas.rotate(totalDegrees / NUM_TICKS, centerX, centerY);
canvas.drawLine(centerX, TICK_OFFSET * displayDensity, centerX, (TICK_OFFSET + TICK_LENGTH) * displayDensity, paintTick);
}
canvas.restore();
//draw arrow
if (arrowPath == null) {
arrowPath = new Path();
arrowPath.moveTo(centerX, 0);
arrowPath.lineTo(centerX + ARROW_WIDTH * displayDensity / 2, (ARROW_HEIGHT - ARROW_FRUSTUM) * displayDensity);
arrowPath.lineTo(centerX + ARROW_WIDTH * displayDensity / 2, ARROW_HEIGHT * displayDensity);
arrowPath.lineTo(centerX - ARROW_WIDTH * displayDensity / 2, ARROW_HEIGHT * displayDensity);
arrowPath.lineTo(centerX - ARROW_WIDTH * displayDensity / 2, (ARROW_HEIGHT - ARROW_FRUSTUM) * displayDensity);
arrowPath.close();
}
canvas.drawPath(arrowPath, paintArrow);
//draw bottom tick
canvas.drawLine(centerX, h - (TICK_OFFSET + TICK_LENGTH) * displayDensity, centerX, h - (TICK_OFFSET - TICK_LENGTH) * displayDensity, paintTick);
canvas.restore();
}
}
| wikipedia/src/main/java/org/wikipedia/nearby/NearbyCompassView.java | package org.wikipedia.nearby;
import android.content.Context;
import android.graphics.Bitmap;
import android.graphics.Canvas;
import android.graphics.Color;
import android.graphics.Paint;
import android.graphics.Path;
import android.graphics.PorterDuff;
import android.graphics.PorterDuffXfermode;
import android.util.AttributeSet;
import android.widget.ImageView;
public class NearbyCompassView extends ImageView {
private static final String TAG = "NearbyCompassView";
private Paint paintTick;
private Paint paintArrow;
private int tickColor = Color.BLACK;
private int maskColor = Color.WHITE;
// make the mask bitmap static, so that it's re-used by all instances of this view
// (we're assuming that all instances will be the same size)
private static Bitmap MASK_BMP;
private static Paint MASK_BMP_PAINT;
private float displayDensity;
private static final float TICK_WIDTH = 0.6f;
private static final int TICK_LENGTH = 3;
private static final int TICK_OFFSET = 8;
private static final int NUM_TICKS = 60;
private static final int ARROW_WIDTH = 10;
private static final int ARROW_HEIGHT = 11;
private static final int ARROW_FRUSTUM = 4;
private Path arrowPath;
private float baseAngle = 0f;
private float azimuth = 0f;
public NearbyCompassView(Context context) {
super(context);
init();
}
public NearbyCompassView(Context context, AttributeSet attrs) {
super(context, attrs);
init();
}
public NearbyCompassView(Context context, AttributeSet attrs, int defStyle) {
super(context, attrs, defStyle);
init();
}
private void init() {
displayDensity = getResources().getDisplayMetrics().density;
paintTick = new Paint();
paintTick.setAntiAlias(true);
paintTick.setColor(tickColor);
paintTick.setStyle(Paint.Style.STROKE);
paintTick.setStrokeWidth(TICK_WIDTH * displayDensity);
paintArrow = new Paint();
paintArrow.setAntiAlias(true);
paintArrow.setColor(tickColor);
paintArrow.setStyle(Paint.Style.FILL);
}
@Override
public void setEnabled(boolean enabled) {
super.setEnabled(enabled);
if (enabled) {
paintTick.setColor(tickColor);
paintArrow.setColor(tickColor);
} else {
paintTick.setColor(Color.TRANSPARENT);
paintArrow.setColor(Color.TRANSPARENT);
}
}
/**
* Set the "base" angle offset from North (in degrees), moving counterclockwise.
* For example, an angle of 90 will make the arrow point due West.
* @param angle Angle offset.
*/
public void setAngle(float angle) {
this.baseAngle = angle;
invalidate();
}
/**
* Set the azimuth, which will be added to the base angle offset for our arrow.
* For example, if the base angle is 90, and the azimuth is 45, then the arrow
* will point Southwest.
* @param azimuth Azimuth to be added to the base angle.
*/
public void setAzimuth(float azimuth) {
// if it's an insignificant change, then don't worry about it
if (Math.abs(azimuth - this.azimuth) < 1.0f) {
return;
}
this.azimuth = azimuth;
invalidate();
}
public void setTickColor(int color) {
this.tickColor = color;
init();
invalidate();
}
public void setMaskColor(int color) {
if (this.maskColor != color && MASK_BMP != null) {
MASK_BMP.recycle();
MASK_BMP = null;
}
this.maskColor = color;
init();
invalidate();
}
@Override
protected void onDraw(Canvas canvas) {
//draw the original image...
super.onDraw(canvas);
float w = this.getWidth();
float h = this.getHeight();
float centerX = w / 2;
float centerY = h / 2;
//draw the circular mask bitmap
if (MASK_BMP == null) {
MASK_BMP = Bitmap.createBitmap(this.getWidth(), this.getHeight(), Bitmap.Config.ARGB_8888);
Canvas bmpCanvas = new Canvas(MASK_BMP);
bmpCanvas.drawColor(maskColor);
Paint maskPaint = new Paint();
maskPaint.setStyle(Paint.Style.FILL);
maskPaint.setColor(Color.TRANSPARENT);
maskPaint.setAntiAlias(true);
maskPaint.setXfermode(new PorterDuffXfermode(PorterDuff.Mode.SRC));
bmpCanvas.drawCircle(centerX, centerY, bmpCanvas.getWidth() / 2 - (TICK_OFFSET + TICK_LENGTH * 2) * displayDensity, maskPaint);
MASK_BMP_PAINT = new Paint();
MASK_BMP_PAINT.setXfermode(new PorterDuffXfermode(PorterDuff.Mode.SRC_OVER));
}
canvas.drawBitmap(MASK_BMP, 0, 0, MASK_BMP_PAINT);
canvas.save();
//set the initial rotation based on our given angle and azimuth
canvas.rotate(azimuth + baseAngle, centerX, centerY);
//draw ticks
canvas.save();
final int totalDegrees = 360;
for (int i = 1; i < NUM_TICKS; i++) {
canvas.rotate(totalDegrees / NUM_TICKS, centerX, centerY);
canvas.drawLine(centerX, TICK_OFFSET * displayDensity, centerX, (TICK_OFFSET + TICK_LENGTH) * displayDensity, paintTick);
}
canvas.restore();
//draw arrow
if (arrowPath == null) {
arrowPath = new Path();
arrowPath.moveTo(centerX, 0);
arrowPath.lineTo(centerX + ARROW_WIDTH * displayDensity / 2, (ARROW_HEIGHT - ARROW_FRUSTUM) * displayDensity);
arrowPath.lineTo(centerX + ARROW_WIDTH * displayDensity / 2, ARROW_HEIGHT * displayDensity);
arrowPath.lineTo(centerX - ARROW_WIDTH * displayDensity / 2, ARROW_HEIGHT * displayDensity);
arrowPath.lineTo(centerX - ARROW_WIDTH * displayDensity / 2, (ARROW_HEIGHT - ARROW_FRUSTUM) * displayDensity);
arrowPath.close();
}
canvas.drawPath(arrowPath, paintArrow);
//draw bottom tick
canvas.drawLine(centerX, h - (TICK_OFFSET + TICK_LENGTH) * displayDensity, centerX, h - (TICK_OFFSET - TICK_LENGTH) * displayDensity, paintTick);
canvas.restore();
}
}
| Fix Nearby compass background when switching dark/light theme.
Bug: 73445
Change-Id: Id413559b282fb1bc2c0d15e30a2644bd8561816c
| wikipedia/src/main/java/org/wikipedia/nearby/NearbyCompassView.java | Fix Nearby compass background when switching dark/light theme. | <ide><path>ikipedia/src/main/java/org/wikipedia/nearby/NearbyCompassView.java
<ide> private Paint paintTick;
<ide> private Paint paintArrow;
<ide> private int tickColor = Color.BLACK;
<del> private int maskColor = Color.WHITE;
<add> // initialize the mask color to a ridiculous value, so that the mask bitmap is recreated for
<add> // the Dark and Light theme
<add> private int maskColor = Color.MAGENTA;
<ide>
<ide> // make the mask bitmap static, so that it's re-used by all instances of this view
<ide> // (we're assuming that all instances will be the same size) |
|
Java | mit | 189c3074b4a9a220d35a19572f908db5d6902f5d | 0 | kenCode-de/smartcoins-wallet,computationalcore/smartcoins-wallet,hvarona/smartcoins-wallet | package de.bitshares_munich.smartcoinswallet;
import android.app.Activity;
import android.app.Dialog;
import android.content.ClipData;
import android.content.ClipboardManager;
import android.content.Context;
import android.content.DialogInterface;
import android.content.Intent;
import android.os.Bundle;
import android.os.Handler;
import android.support.design.widget.TabLayout;
import android.support.v4.view.ViewPager;
import android.support.v7.app.AlertDialog;
import android.support.v7.widget.Toolbar;
import android.util.Log;
import android.view.MenuItem;
import android.view.View;
import android.view.animation.Animation;
import android.view.animation.AnimationUtils;
import android.widget.Button;
import android.widget.EditText;
import android.widget.ImageView;
import android.widget.TextView;
import android.widget.Toast;
import com.luminiasoft.bitshares.AccountOptions;
import com.luminiasoft.bitshares.AccountUpdateTransactionBuilder;
import com.luminiasoft.bitshares.Address;
import com.luminiasoft.bitshares.Asset;
import com.luminiasoft.bitshares.Authority;
import com.luminiasoft.bitshares.BrainKey;
import com.luminiasoft.bitshares.PublicKey;
import com.luminiasoft.bitshares.Transaction;
import com.luminiasoft.bitshares.UserAccount;
import com.luminiasoft.bitshares.errors.MalformedTransactionException;
import com.luminiasoft.bitshares.interfaces.WitnessResponseListener;
import com.luminiasoft.bitshares.models.BaseResponse;
import com.luminiasoft.bitshares.models.WitnessResponse;
import com.luminiasoft.bitshares.ws.TransactionBroadcastSequence;
import org.bitcoinj.core.DumpedPrivateKey;
import org.bitcoinj.core.ECKey;
import java.io.BufferedReader;
import java.io.IOException;
import java.io.InputStreamReader;
import java.security.InvalidAlgorithmParameterException;
import java.security.InvalidKeyException;
import java.security.NoSuchAlgorithmException;
import java.util.ArrayList;
import java.util.HashMap;
import javax.crypto.BadPaddingException;
import javax.crypto.IllegalBlockSizeException;
import javax.crypto.NoSuchPaddingException;
import butterknife.Bind;
import butterknife.ButterKnife;
import butterknife.OnClick;
import de.bitshares_munich.adapters.ViewPagerAdapter;
import de.bitshares_munich.fragments.BalancesFragment;
import de.bitshares_munich.fragments.ContactsFragment;
import de.bitshares_munich.models.AccountDetails;
import de.bitshares_munich.utils.Application;
import de.bitshares_munich.utils.Crypt;
import de.bitshares_munich.utils.TinyDB;
public class TabActivity extends BaseActivity {
private String TAG = this.getClass().getName();
@Bind(R.id.toolbar)
Toolbar toolbar;
@Bind(R.id.tabs)
TabLayout tabLayout;
@Bind(R.id.viewpager)
ViewPager viewPager;
@Bind(R.id.tvBlockNumberHead_TabActivity)
TextView tvBlockNumberHead;
@Bind(R.id.tvAppVersion_TabActivity)
TextView tvAppVersion;
@Bind(R.id.ivSocketConnected_TabActivity)
ImageView ivSocketConnected;
TinyDB tinyDB;
ArrayList<AccountDetails> accountDetails;
private AccountDetails updatedAccount;
private int UPDATE_KEY_MAX_RETRIES = 3;
private int updateKeyRetryCount = 0;
private int nodeIndex = 0;
private WebsocketWorkerThread refreshKeyWorker;
private WitnessResponseListener mListener = new WitnessResponseListener() {
@Override
public void onSuccess(WitnessResponse response) {
runOnUiThread(new Runnable() {
@Override
public void run() {
Log.d(TAG,"onSuccess");
Toast.makeText(TabActivity.this, R.string.refresh_keys_success, Toast.LENGTH_LONG).show();
for(AccountDetails accountDetail : accountDetails){
if(accountDetail.account_id.equals(updatedAccount.account_id)){
accountDetail.wif_key = updatedAccount.wif_key;
accountDetail.brain_key = updatedAccount.brain_key;
accountDetail.isPostSecurityUpdate = true;
Log.d(TAG,"updating account with name: "+accountDetail.account_name+", id: "+accountDetail.account_id+", key: "+accountDetail.brain_key);
}
break;
}
tinyDB.putListObject(getString(R.string.pref_wallet_accounts), accountDetails);
displayBrainKeyBackup();
}
});
}
@Override
public void onError(BaseResponse.Error error) {
Log.d(TAG, "onError. Msg: "+error.message);
runOnUiThread(new Runnable() {
@Override
public void run() {
updatedAccount = null;
if(updateKeyRetryCount < UPDATE_KEY_MAX_RETRIES){
Log.d(TAG, "Retrying. count: "+ updateKeyRetryCount +", max: "+ UPDATE_KEY_MAX_RETRIES);
ArrayList<AccountDetails> arrayList = tinyDB.getListObject(getString(R.string.pref_wallet_accounts), AccountDetails.class);
for(AccountDetails accountDetails : arrayList){
nodeIndex = (nodeIndex + 1) % Application.urlsSocketConnection.length;
Log.d(TAG,"account id: '"+accountDetails.account_id+"', name: "+accountDetails.account_name+", wif: "+accountDetails.wif_key);
if(accountDetails.isSelected){
updateAccountAuthorities(accountDetails);
updateKeyRetryCount++;
break;
}
}
}else{
Toast.makeText(TabActivity.this, R.string.refresh_keys_fail, Toast.LENGTH_LONG).show();
}
}
});
}
};
@Override
protected void onCreate(Bundle savedInstanceState) {
super.onCreate(savedInstanceState);
setContentView(R.layout.activity_tab);
ButterKnife.bind(this);
tinyDB = new TinyDB(getApplicationContext());
accountDetails = tinyDB.getListObject(getString(R.string.pref_wallet_accounts), AccountDetails.class);
setSupportActionBar(toolbar);
getSupportActionBar().setTitle("");
toolbar.setNavigationIcon(R.mipmap.btslogo);
setupViewPager(viewPager);
tabLayout.setupWithViewPager(viewPager);
setTitle(getResources().getString(R.string.app_name));
tvAppVersion.setText("v" + BuildConfig.VERSION_NAME + getString(R.string.beta));
updateBlockNumberHead();
Intent intent = getIntent();
Bundle res = intent.getExtras();
if (res != null) {
if (res.containsKey("ask_for_pin")) {
if (res.getBoolean("ask_for_pin")) {
showDialogPin();
}
}
}
}
/**
* Will display a dialog prompting the user to make a backup of the brain key.
*/
private void displayBrainKeyBackup() {
final Dialog dialog = new Dialog(this, R.style.stylishDialog);
dialog.setTitle(getString(R.string.backup_brainkey));
dialog.setContentView(R.layout.activity_copybrainkey);
final EditText etBrainKey = (EditText) dialog.findViewById(R.id.etBrainKey);
try {
String brainKey = getBrainKey();
if (brainKey.isEmpty()) {
Toast.makeText(getApplicationContext(),getResources().getString(R.string.unable_to_load_brainkey),Toast.LENGTH_LONG).show();
return;
} else {
etBrainKey.setText(brainKey);
}
} catch (Exception e) {
Log.e(TAG,"Exception in displayBrainKeyBackup. Msg: "+e.getMessage());
}
Button btnCancel = (Button) dialog.findViewById(R.id.btnCancel);
btnCancel.setOnClickListener(new View.OnClickListener() {
@Override
public void onClick(View v) {
dialog.cancel();
}
});
Button btnCopy = (Button) dialog.findViewById(R.id.btnCopy);
btnCopy.setOnClickListener(new View.OnClickListener() {
@Override
public void onClick(View v) {
Toast.makeText(TabActivity.this, R.string.copied_to_clipboard , Toast.LENGTH_SHORT).show();
ClipboardManager clipboard = (ClipboardManager) getSystemService(Context.CLIPBOARD_SERVICE);
ClipData clip = ClipData.newPlainText("label", etBrainKey.getText().toString());
clipboard.setPrimaryClip(clip);
dialog.cancel();
}
});
dialog.setCancelable(false);
dialog.show();
}
/**
* Returns the active's account brain key
* @return
*/
private String getBrainKey() {
for (int i = 0; i < accountDetails.size(); i++) {
if (accountDetails.get(i).isSelected) {
return accountDetails.get(i).brain_key;
}
}
return "";
}
/**
* Method that will actually perform a call to the full node and update the key currently
* controlling the account passed as a parameter.
*
* @param accountDetails: The account whose key we want to update.
*/
private void updateAccountAuthorities(AccountDetails accountDetails) {
Log.d(TAG,"account to update. current brain key: "+accountDetails.brain_key);
updatedAccount = accountDetails;
try {
String currentWif = Crypt.getInstance().decrypt_string(updatedAccount.wif_key);
// Coming up with a new brainkey suggestion
BufferedReader reader = new BufferedReader(new InputStreamReader(getAssets().open(AccountActivity.BRAINKEY_FILE), "UTF-8"));
String dictionary = reader.readLine();
String suggestion = BrainKey.suggest(dictionary);
BrainKey brainKey = new BrainKey(suggestion, 0);
Log.d(TAG,"new brain key: "+suggestion);
// Keeping a reference of the account to be changed, with the updated values
Address address = new Address(ECKey.fromPublicOnly(brainKey.getPrivateKey().getPubKey()));
updatedAccount.wif_key = Crypt.getInstance().encrypt_string(brainKey.getWalletImportFormat());
updatedAccount.brain_key = suggestion;
updatedAccount.pub_key = address.toString();
// Building a transaction that will be used to update the account key
HashMap<PublicKey, Integer> authMap = new HashMap<>();
authMap.put(address.getPublicKey(), 1);
Authority authority = new Authority(1, authMap, null);
AccountOptions options = new AccountOptions(address.getPublicKey());
Transaction transaction = new AccountUpdateTransactionBuilder(DumpedPrivateKey.fromBase58(null, currentWif).getKey())
.setAccont(new UserAccount(accountDetails.account_id))
.setOwner(authority)
.setActive(authority)
.setOptions(options)
.build();
refreshKeyWorker = new WebsocketWorkerThread(new TransactionBroadcastSequence(transaction, new Asset("1.3.0"), mListener), nodeIndex);
refreshKeyWorker.start();
} catch (MalformedTransactionException e) {
Log.e(TAG, "MalformedTransactionException. Msg: "+e.getMessage());
} catch (NoSuchAlgorithmException e) {
Log.e(TAG, "NoSuchAlgorithmException. Msg: "+e.getMessage());
} catch (IOException e) {
Log.e(TAG, "IOException. Msg: "+e.getMessage());
} catch (NoSuchPaddingException e) {
Log.e(TAG, "NoSuchPaddingException. Msg: "+e.getMessage());
} catch (InvalidKeyException e) {
Log.e(TAG, "InvalidKeyException. Msg: "+e.getMessage());
} catch (InvalidAlgorithmParameterException e) {
Log.e(TAG, "InvalidAlgorithmParameterException. Msg: "+e.getMessage());
} catch (IllegalBlockSizeException e) {
Log.e(TAG, "IllegalBlockSizeException. Msg: "+e.getMessage());
} catch (BadPaddingException e) {
Log.e(TAG, "BadPaddingException. Msg: "+e.getMessage());
} catch (ClassNotFoundException e) {
Log.e(TAG, "ClassNotFoundException. Msg: "+e.getMessage());
}
}
private void setupViewPager(ViewPager viewPager) {
ViewPagerAdapter adapter = new ViewPagerAdapter(getSupportFragmentManager());
adapter.addFragment(new BalancesFragment(), getString(R.string.balances));
adapter.addFragment(new ContactsFragment(), getString(R.string.contacts));
viewPager.setAdapter(adapter);
}
private void updateBlockNumberHead() {
final Handler handler = new Handler();
final Activity myActivity = this;
final Runnable updateTask = new Runnable() {
@Override
public void run() {
if (Application.isConnected()) {
ivSocketConnected.setImageResource(R.drawable.icon_connecting);
tvBlockNumberHead.setText(Application.blockHead);
ivSocketConnected.clearAnimation();
} else {
ivSocketConnected.setImageResource(R.drawable.icon_disconnecting);
Animation myFadeInAnimation = AnimationUtils.loadAnimation(myActivity.getApplicationContext(), R.anim.flash);
ivSocketConnected.startAnimation(myFadeInAnimation);
}
handler.postDelayed(this, 1000);
}
};
handler.postDelayed(updateTask, 1000);
}
@OnClick(R.id.OnClickSettings_TabActivity)
void OnClickSettings() {
Intent intent = new Intent(this, SettingActivity.class);
startActivity(intent);
}
// Block for pin
private void showDialogPin() {
final ArrayList<AccountDetails> accountDetails = tinyDB.getListObject(getString(R.string.pref_wallet_accounts), AccountDetails.class);
final Dialog dialog = new Dialog(TabActivity.this);
dialog.setTitle(R.string.pin_verification);
dialog.setContentView(R.layout.activity_alert_pin_dialog);
Button btnDone = (Button) dialog.findViewById(R.id.btnDone);
final EditText etPin = (EditText) dialog.findViewById(R.id.etPin);
btnDone.setOnClickListener(new View.OnClickListener() {
@Override
public void onClick(View v) {
for (int i = 0; i < accountDetails.size(); i++) {
if (accountDetails.get(i).isSelected) {
if (etPin.getText().toString().equals(accountDetails.get(i).pinCode)) {
Log.d(TAG, "pin code matches");
dialog.cancel();
checkSecurityUpdate();
break;
}else{
Log.d(TAG, "pin code doesn't match");
}
}
}
}
});
dialog.setCancelable(false);
dialog.show();
}
private void checkSecurityUpdate(){
boolean isActiveAccountOld = false;
ArrayList<AccountDetails> arrayList = tinyDB.getListObject(getString(R.string.pref_wallet_accounts), AccountDetails.class);
AccountDetails activeAccount = null;
for(AccountDetails account : arrayList){
Log.d(TAG, "account: "+account.toString());
if(account.isSelected){
activeAccount = account;
try {
if(account.isPostSecurityUpdate){
Log.d(TAG, "Account creation is post security update: " + account.isPostSecurityUpdate);
}else{
Log.d(TAG, "Account creation is previous to the security update");
isActiveAccountOld = true;
}
}catch(NullPointerException e){
Log.e(TAG, "NullPointerException. Account creation is previous to the security update");
isActiveAccountOld = true;
}
break;
}
}
if(isActiveAccountOld){
final AccountDetails oldActiveAccount = activeAccount;
AlertDialog.Builder builder = new AlertDialog.Builder(this)
.setTitle(getResources().getString(R.string.security_update_title))
.setMessage(getResources().getString(R.string.security_update_summary))
.setPositiveButton(getResources().getString(R.string.dialog_proceed), new DialogInterface.OnClickListener() {
@Override
public void onClick(DialogInterface dialog, int which) {
updateAccountAuthorities(oldActiveAccount);
dialog.dismiss();
}
}).setNegativeButton(getResources().getString(R.string.dialog_later), new DialogInterface.OnClickListener() {
@Override
public void onClick(DialogInterface dialog, int which) {
dialog.dismiss();
}
});
builder.create().show();
}
}
@Override
public boolean onOptionsItemSelected(MenuItem item) {
if (item.getItemId() == android.R.id.home) {
viewPager.setCurrentItem(0);
return true;
}
return super.onOptionsItemSelected(item);
}
}
| app/src/main/java/de/bitshares_munich/smartcoinswallet/TabActivity.java | package de.bitshares_munich.smartcoinswallet;
import android.app.Activity;
import android.app.Dialog;
import android.content.ClipData;
import android.content.ClipboardManager;
import android.content.Context;
import android.content.DialogInterface;
import android.content.Intent;
import android.os.Bundle;
import android.os.Handler;
import android.support.design.widget.TabLayout;
import android.support.v4.view.ViewPager;
import android.support.v7.app.AlertDialog;
import android.support.v7.widget.Toolbar;
import android.util.Log;
import android.view.MenuItem;
import android.view.View;
import android.view.animation.Animation;
import android.view.animation.AnimationUtils;
import android.widget.Button;
import android.widget.EditText;
import android.widget.ImageView;
import android.widget.TextView;
import android.widget.Toast;
import com.luminiasoft.bitshares.AccountOptions;
import com.luminiasoft.bitshares.AccountUpdateTransactionBuilder;
import com.luminiasoft.bitshares.Address;
import com.luminiasoft.bitshares.Asset;
import com.luminiasoft.bitshares.Authority;
import com.luminiasoft.bitshares.BrainKey;
import com.luminiasoft.bitshares.PublicKey;
import com.luminiasoft.bitshares.Transaction;
import com.luminiasoft.bitshares.UserAccount;
import com.luminiasoft.bitshares.errors.MalformedTransactionException;
import com.luminiasoft.bitshares.interfaces.WitnessResponseListener;
import com.luminiasoft.bitshares.models.BaseResponse;
import com.luminiasoft.bitshares.models.WitnessResponse;
import com.luminiasoft.bitshares.ws.TransactionBroadcastSequence;
import org.bitcoinj.core.DumpedPrivateKey;
import org.bitcoinj.core.ECKey;
import java.io.BufferedReader;
import java.io.IOException;
import java.io.InputStreamReader;
import java.security.InvalidAlgorithmParameterException;
import java.security.InvalidKeyException;
import java.security.NoSuchAlgorithmException;
import java.util.ArrayList;
import java.util.HashMap;
import javax.crypto.BadPaddingException;
import javax.crypto.IllegalBlockSizeException;
import javax.crypto.NoSuchPaddingException;
import butterknife.Bind;
import butterknife.ButterKnife;
import butterknife.OnClick;
import de.bitshares_munich.adapters.ViewPagerAdapter;
import de.bitshares_munich.fragments.BalancesFragment;
import de.bitshares_munich.fragments.ContactsFragment;
import de.bitshares_munich.models.AccountDetails;
import de.bitshares_munich.utils.Application;
import de.bitshares_munich.utils.Crypt;
import de.bitshares_munich.utils.TinyDB;
public class TabActivity extends BaseActivity {
private String TAG = this.getClass().getName();
@Bind(R.id.toolbar)
Toolbar toolbar;
@Bind(R.id.tabs)
TabLayout tabLayout;
@Bind(R.id.viewpager)
ViewPager viewPager;
@Bind(R.id.tvBlockNumberHead_TabActivity)
TextView tvBlockNumberHead;
@Bind(R.id.tvAppVersion_TabActivity)
TextView tvAppVersion;
@Bind(R.id.ivSocketConnected_TabActivity)
ImageView ivSocketConnected;
TinyDB tinyDB;
ArrayList<AccountDetails> accountDetails;
private AccountDetails updatedAccount;
private int UPDATE_KEY_MAX_RETRIES = 3;
private int updateKeyRetryCount = 0;
private int nodeIndex = 0;
private WebsocketWorkerThread refreshKeyWorker;
private WitnessResponseListener mListener = new WitnessResponseListener() {
@Override
public void onSuccess(WitnessResponse response) {
runOnUiThread(new Runnable() {
@Override
public void run() {
Log.d(TAG,"onSuccess");
Toast.makeText(TabActivity.this, R.string.refresh_keys_success, Toast.LENGTH_LONG).show();
for(AccountDetails accountDetail : accountDetails){
if(accountDetail.account_id.equals(updatedAccount.account_id)){
accountDetail.wif_key = updatedAccount.wif_key;
accountDetail.brain_key = updatedAccount.brain_key;
Log.d(TAG,"updating account with name: "+accountDetail.account_name+", id: "+accountDetail.account_id+", key: "+accountDetail.brain_key);
}
break;
}
tinyDB.putListObject(getString(R.string.pref_wallet_accounts), accountDetails);
displayBrainKeyBackup();
}
});
}
@Override
public void onError(BaseResponse.Error error) {
Log.d(TAG, "onError. Msg: "+error.message);
runOnUiThread(new Runnable() {
@Override
public void run() {
updatedAccount = null;
if(updateKeyRetryCount < UPDATE_KEY_MAX_RETRIES){
Log.d(TAG, "Retrying. count: "+ updateKeyRetryCount +", max: "+ UPDATE_KEY_MAX_RETRIES);
ArrayList<AccountDetails> arrayList = tinyDB.getListObject(getString(R.string.pref_wallet_accounts), AccountDetails.class);
for(AccountDetails accountDetails : arrayList){
nodeIndex = (nodeIndex + 1) % Application.urlsSocketConnection.length;
Log.d(TAG,"account id: '"+accountDetails.account_id+"', name: "+accountDetails.account_name+", wif: "+accountDetails.wif_key);
if(accountDetails.isSelected){
updateAccountAuthorities(accountDetails);
updateKeyRetryCount++;
break;
}
}
}else{
Toast.makeText(TabActivity.this, R.string.refresh_keys_fail, Toast.LENGTH_LONG).show();
}
}
});
}
};
@Override
protected void onCreate(Bundle savedInstanceState) {
super.onCreate(savedInstanceState);
setContentView(R.layout.activity_tab);
ButterKnife.bind(this);
tinyDB = new TinyDB(getApplicationContext());
accountDetails = tinyDB.getListObject(getString(R.string.pref_wallet_accounts), AccountDetails.class);
setSupportActionBar(toolbar);
getSupportActionBar().setTitle("");
toolbar.setNavigationIcon(R.mipmap.btslogo);
setupViewPager(viewPager);
tabLayout.setupWithViewPager(viewPager);
setTitle(getResources().getString(R.string.app_name));
tvAppVersion.setText("v" + BuildConfig.VERSION_NAME + getString(R.string.beta));
updateBlockNumberHead();
Intent intent = getIntent();
Bundle res = intent.getExtras();
if (res != null) {
if (res.containsKey("ask_for_pin")) {
if (res.getBoolean("ask_for_pin")) {
showDialogPin();
}
}
}
}
/**
* Will display a dialog prompting the user to make a backup of the brain key.
*/
private void displayBrainKeyBackup() {
final Dialog dialog = new Dialog(this, R.style.stylishDialog);
dialog.setTitle(getString(R.string.backup_brainkey));
dialog.setContentView(R.layout.activity_copybrainkey);
final EditText etBrainKey = (EditText) dialog.findViewById(R.id.etBrainKey);
try {
String brainKey = getBrainKey();
if (brainKey.isEmpty()) {
Toast.makeText(getApplicationContext(),getResources().getString(R.string.unable_to_load_brainkey),Toast.LENGTH_LONG).show();
return;
} else {
etBrainKey.setText(brainKey);
}
} catch (Exception e) {
Log.e(TAG,"Exception in displayBrainKeyBackup. Msg: "+e.getMessage());
}
Button btnCancel = (Button) dialog.findViewById(R.id.btnCancel);
btnCancel.setOnClickListener(new View.OnClickListener() {
@Override
public void onClick(View v) {
dialog.cancel();
}
});
Button btnCopy = (Button) dialog.findViewById(R.id.btnCopy);
btnCopy.setOnClickListener(new View.OnClickListener() {
@Override
public void onClick(View v) {
Toast.makeText(TabActivity.this, R.string.copied_to_clipboard , Toast.LENGTH_SHORT).show();
ClipboardManager clipboard = (ClipboardManager) getSystemService(Context.CLIPBOARD_SERVICE);
ClipData clip = ClipData.newPlainText("label", etBrainKey.getText().toString());
clipboard.setPrimaryClip(clip);
dialog.cancel();
}
});
dialog.setCancelable(false);
dialog.show();
}
/**
* Returns the active's account brain key
* @return
*/
private String getBrainKey() {
for (int i = 0; i < accountDetails.size(); i++) {
if (accountDetails.get(i).isSelected) {
return accountDetails.get(i).brain_key;
}
}
return "";
}
/**
* Method that will actually perform a call to the full node and update the key currently
* controlling the account passed as a parameter.
*
* @param accountDetails: The account whose key we want to update.
*/
private void updateAccountAuthorities(AccountDetails accountDetails) {
Log.d(TAG,"account to update. current brain key: "+accountDetails.brain_key);
updatedAccount = accountDetails;
try {
String currentWif = Crypt.getInstance().decrypt_string(updatedAccount.wif_key);
// Coming up with a new brainkey suggestion
BufferedReader reader = new BufferedReader(new InputStreamReader(getAssets().open(AccountActivity.BRAINKEY_FILE), "UTF-8"));
String dictionary = reader.readLine();
String suggestion = BrainKey.suggest(dictionary);
BrainKey brainKey = new BrainKey(suggestion, 0);
Log.d(TAG,"new brain key: "+suggestion);
// Keeping a reference of the account to be changed, with the updated values
Address address = new Address(ECKey.fromPublicOnly(brainKey.getPrivateKey().getPubKey()));
updatedAccount.wif_key = Crypt.getInstance().encrypt_string(brainKey.getWalletImportFormat());
updatedAccount.brain_key = suggestion;
updatedAccount.pub_key = address.toString();
updatedAccount.isPostSecurityUpdate = true;
// Building a transaction that will be used to update the account key
HashMap<PublicKey, Integer> authMap = new HashMap<>();
authMap.put(address.getPublicKey(), 1);
Authority authority = new Authority(1, authMap, null);
AccountOptions options = new AccountOptions(address.getPublicKey());
Transaction transaction = new AccountUpdateTransactionBuilder(DumpedPrivateKey.fromBase58(null, currentWif).getKey())
.setAccont(new UserAccount(accountDetails.account_id))
.setOwner(authority)
.setActive(authority)
.setOptions(options)
.build();
refreshKeyWorker = new WebsocketWorkerThread(new TransactionBroadcastSequence(transaction, new Asset("1.3.0"), mListener), nodeIndex);
refreshKeyWorker.start();
} catch (MalformedTransactionException e) {
Log.e(TAG, "MalformedTransactionException. Msg: "+e.getMessage());
} catch (NoSuchAlgorithmException e) {
Log.e(TAG, "NoSuchAlgorithmException. Msg: "+e.getMessage());
} catch (IOException e) {
Log.e(TAG, "IOException. Msg: "+e.getMessage());
} catch (NoSuchPaddingException e) {
Log.e(TAG, "NoSuchPaddingException. Msg: "+e.getMessage());
} catch (InvalidKeyException e) {
Log.e(TAG, "InvalidKeyException. Msg: "+e.getMessage());
} catch (InvalidAlgorithmParameterException e) {
Log.e(TAG, "InvalidAlgorithmParameterException. Msg: "+e.getMessage());
} catch (IllegalBlockSizeException e) {
Log.e(TAG, "IllegalBlockSizeException. Msg: "+e.getMessage());
} catch (BadPaddingException e) {
Log.e(TAG, "BadPaddingException. Msg: "+e.getMessage());
} catch (ClassNotFoundException e) {
Log.e(TAG, "ClassNotFoundException. Msg: "+e.getMessage());
}
}
private void setupViewPager(ViewPager viewPager) {
ViewPagerAdapter adapter = new ViewPagerAdapter(getSupportFragmentManager());
adapter.addFragment(new BalancesFragment(), getString(R.string.balances));
adapter.addFragment(new ContactsFragment(), getString(R.string.contacts));
viewPager.setAdapter(adapter);
}
private void updateBlockNumberHead() {
final Handler handler = new Handler();
final Activity myActivity = this;
final Runnable updateTask = new Runnable() {
@Override
public void run() {
if (Application.isConnected()) {
ivSocketConnected.setImageResource(R.drawable.icon_connecting);
tvBlockNumberHead.setText(Application.blockHead);
ivSocketConnected.clearAnimation();
} else {
ivSocketConnected.setImageResource(R.drawable.icon_disconnecting);
Animation myFadeInAnimation = AnimationUtils.loadAnimation(myActivity.getApplicationContext(), R.anim.flash);
ivSocketConnected.startAnimation(myFadeInAnimation);
}
handler.postDelayed(this, 1000);
}
};
handler.postDelayed(updateTask, 1000);
}
@OnClick(R.id.OnClickSettings_TabActivity)
void OnClickSettings() {
Intent intent = new Intent(this, SettingActivity.class);
startActivity(intent);
}
// Block for pin
private void showDialogPin() {
final ArrayList<AccountDetails> accountDetails = tinyDB.getListObject(getString(R.string.pref_wallet_accounts), AccountDetails.class);
final Dialog dialog = new Dialog(TabActivity.this);
dialog.setTitle(R.string.pin_verification);
dialog.setContentView(R.layout.activity_alert_pin_dialog);
Button btnDone = (Button) dialog.findViewById(R.id.btnDone);
final EditText etPin = (EditText) dialog.findViewById(R.id.etPin);
btnDone.setOnClickListener(new View.OnClickListener() {
@Override
public void onClick(View v) {
for (int i = 0; i < accountDetails.size(); i++) {
if (accountDetails.get(i).isSelected) {
if (etPin.getText().toString().equals(accountDetails.get(i).pinCode)) {
Log.d(TAG, "pin code matches");
dialog.cancel();
checkSecurityUpdate();
break;
}else{
Log.d(TAG, "pin code doesn't match");
}
}
}
}
});
dialog.setCancelable(false);
dialog.show();
}
private void checkSecurityUpdate(){
boolean isActiveAccountOld = false;
ArrayList<AccountDetails> arrayList = tinyDB.getListObject(getString(R.string.pref_wallet_accounts), AccountDetails.class);
AccountDetails activeAccount = null;
for(AccountDetails account : arrayList){
Log.d(TAG, "account: "+account.toString());
if(account.isSelected){
activeAccount = account;
try {
if(account.isPostSecurityUpdate){
Log.d(TAG, "Account creation is post security update: " + account.isPostSecurityUpdate);
}else{
Log.d(TAG, "Account creation is previous to the security update");
isActiveAccountOld = true;
}
}catch(NullPointerException e){
Log.e(TAG, "NullPointerException. Account creation is previous to the security update");
isActiveAccountOld = true;
}
break;
}
}
if(isActiveAccountOld){
final AccountDetails oldActiveAccount = activeAccount;
AlertDialog.Builder builder = new AlertDialog.Builder(this)
.setTitle(getResources().getString(R.string.security_update_title))
.setMessage(getResources().getString(R.string.security_update_summary))
.setPositiveButton(getResources().getString(R.string.dialog_proceed), new DialogInterface.OnClickListener() {
@Override
public void onClick(DialogInterface dialog, int which) {
updateAccountAuthorities(oldActiveAccount);
dialog.dismiss();
}
}).setNegativeButton(getResources().getString(R.string.dialog_later), new DialogInterface.OnClickListener() {
@Override
public void onClick(DialogInterface dialog, int which) {
dialog.dismiss();
}
});
builder.create().show();
}
}
@Override
public boolean onOptionsItemSelected(MenuItem item) {
if (item.getItemId() == android.R.id.home) {
viewPager.setCurrentItem(0);
return true;
}
return super.onOptionsItemSelected(item);
}
}
| Fixing a bug in the old account detection procedure
| app/src/main/java/de/bitshares_munich/smartcoinswallet/TabActivity.java | Fixing a bug in the old account detection procedure | <ide><path>pp/src/main/java/de/bitshares_munich/smartcoinswallet/TabActivity.java
<ide> if(accountDetail.account_id.equals(updatedAccount.account_id)){
<ide> accountDetail.wif_key = updatedAccount.wif_key;
<ide> accountDetail.brain_key = updatedAccount.brain_key;
<add> accountDetail.isPostSecurityUpdate = true;
<ide> Log.d(TAG,"updating account with name: "+accountDetail.account_name+", id: "+accountDetail.account_id+", key: "+accountDetail.brain_key);
<ide> }
<ide> break;
<ide> updatedAccount.wif_key = Crypt.getInstance().encrypt_string(brainKey.getWalletImportFormat());
<ide> updatedAccount.brain_key = suggestion;
<ide> updatedAccount.pub_key = address.toString();
<del> updatedAccount.isPostSecurityUpdate = true;
<ide>
<ide> // Building a transaction that will be used to update the account key
<ide> HashMap<PublicKey, Integer> authMap = new HashMap<>(); |
|
JavaScript | apache-2.0 | 1c2f37a8bb4e2910683faf8d9baaf488bd26079a | 0 | uhm-coe/assist,uhm-coe/assist,uhm-coe/assist,uhm-coe/assist | $(function(Query) {
'use strict';
console.log("From search.js");
var query = new Query();
$('.search').on('submit', function(e) {
// stop the form from doing its default behavior
e.preventDefault();
// set the query, and go to the search page with our query URL
query
.set($('.search-box').val().trim())
.goToLocation('/search');
});
}(Query)); | assets/js/search.js | // Documentation: http://benhowdle.im/creating-a-dynamic-search-page-for-your-jekyll-blog.html
// TODO: Move to autocomplete approach: http://danreev.es/posts/jekyll-search-with-typeahead/
!(function() {
var parseQueryFromURL = function() {
var searchQuery = window.location.search;
if (!searchQuery) {
return null;
}
var regex = /[?&]([^=#]+)=([^&#]*)/g,
params = {},
match;
while (match = regex.exec(searchQuery)) {
params[match[1]] = match[2];
}
if (!params.hasOwnProperty("query")) {
return null;
}
return decodeURIComponent(params.query);
};
var scanPosts = function(posts, properties, query) {
var results = [];
posts.forEach(function(post) {
var textToScan = "",
regex = new RegExp(query, "ig");
properties.forEach(function(property) {
if (post.hasOwnProperty(property)) {
textToScan += post[property];
}
});
if (regex.test(textToScan)) {
results.push(post);
}
});
return results;
};
var outputResults = function(results, el) {
var frag = document.createDocumentFragment();
results.forEach(function(result) {
var div = document.createElement("div");
div.className = "search-result";
var title = document.createElement("h2");
var link = document.createElement("a");
var excerpt = document.createElement("p");
link.href = result.link;
link.innerHTML = result.title;
excerpt.innerHTML = result.content;
title.appendChild(link);
div.appendChild(title);
div.appendChild(excerpt);
frag.appendChild(div);
});
el.appendChild(frag);
};
var Search = function(options) {
options = options || {};
if (!options.selector) {
throw new Error("We need a selector to find");
}
this.el = document.querySelector(options.selector);
if (!this.el) {
throw new Error("We need a HTML element to output to");
}
this.posts = JEKYLL_POSTS;
if (!this.posts) {
return this.el.innerHTML = this.noResultsMessage;
}
var defaultMessage = "No results found";
this.noResultsMessage = options.noResultsMessage || defaultMessage;
var defaultProperties = ["title"];
this.properties = options.properties || defaultProperties;
this.query = parseQueryFromURL();
if (!this.query) {
return this.el.innerHTML = this.noResultsMessage;
}
this.results = scanPosts(this.posts, this.properties, this.query);
if (!this.results.length) {
return this.el.innerHTML = this.noResultsMessage;
}
outputResults(this.results, this.el);
};
window.jekyllSearch = Search;
})();
| Rewrite script to incoporate Query object.
| assets/js/search.js | Rewrite script to incoporate Query object. | <ide><path>ssets/js/search.js
<del>// Documentation: http://benhowdle.im/creating-a-dynamic-search-page-for-your-jekyll-blog.html
<add>$(function(Query) {
<add> 'use strict';
<add> console.log("From search.js");
<ide>
<del>// TODO: Move to autocomplete approach: http://danreev.es/posts/jekyll-search-with-typeahead/
<add> var query = new Query();
<ide>
<del>!(function() {
<add> $('.search').on('submit', function(e) {
<add> // stop the form from doing its default behavior
<add> e.preventDefault();
<ide>
<del> var parseQueryFromURL = function() {
<add> // set the query, and go to the search page with our query URL
<add> query
<add> .set($('.search-box').val().trim())
<add> .goToLocation('/search');
<add> });
<ide>
<del> var searchQuery = window.location.search;
<del> if (!searchQuery) {
<del> return null;
<del> }
<ide>
<del> var regex = /[?&]([^=#]+)=([^&#]*)/g,
<del> params = {},
<del> match;
<del> while (match = regex.exec(searchQuery)) {
<del> params[match[1]] = match[2];
<del> }
<del>
<del> if (!params.hasOwnProperty("query")) {
<del> return null;
<del> }
<del>
<del> return decodeURIComponent(params.query);
<del>
<del> };
<del>
<del> var scanPosts = function(posts, properties, query) {
<del>
<del> var results = [];
<del> posts.forEach(function(post) {
<del> var textToScan = "",
<del> regex = new RegExp(query, "ig");
<del>
<del> properties.forEach(function(property) {
<del> if (post.hasOwnProperty(property)) {
<del> textToScan += post[property];
<del> }
<del> });
<del>
<del> if (regex.test(textToScan)) {
<del> results.push(post);
<del> }
<del> });
<del>
<del> return results;
<del>
<del> };
<del>
<del> var outputResults = function(results, el) {
<del>
<del> var frag = document.createDocumentFragment();
<del> results.forEach(function(result) {
<del>
<del> var div = document.createElement("div");
<del> div.className = "search-result";
<del>
<del> var title = document.createElement("h2");
<del> var link = document.createElement("a");
<del> var excerpt = document.createElement("p");
<del>
<del> link.href = result.link;
<del> link.innerHTML = result.title;
<del> excerpt.innerHTML = result.content;
<del> title.appendChild(link);
<del>
<del> div.appendChild(title);
<del> div.appendChild(excerpt);
<del>
<del> frag.appendChild(div);
<del>
<del> });
<del>
<del> el.appendChild(frag);
<del>
<del> };
<del>
<del> var Search = function(options) {
<del>
<del> options = options || {};
<del>
<del> if (!options.selector) {
<del> throw new Error("We need a selector to find");
<del> }
<del>
<del> this.el = document.querySelector(options.selector);
<del> if (!this.el) {
<del> throw new Error("We need a HTML element to output to");
<del> }
<del>
<del> this.posts = JEKYLL_POSTS;
<del> if (!this.posts) {
<del> return this.el.innerHTML = this.noResultsMessage;
<del> }
<del>
<del> var defaultMessage = "No results found";
<del> this.noResultsMessage = options.noResultsMessage || defaultMessage;
<del>
<del> var defaultProperties = ["title"];
<del> this.properties = options.properties || defaultProperties;
<del>
<del> this.query = parseQueryFromURL();
<del> if (!this.query) {
<del> return this.el.innerHTML = this.noResultsMessage;
<del> }
<del>
<del> this.results = scanPosts(this.posts, this.properties, this.query);
<del> if (!this.results.length) {
<del> return this.el.innerHTML = this.noResultsMessage;
<del> }
<del>
<del> outputResults(this.results, this.el);
<del>
<del> };
<del>
<del> window.jekyllSearch = Search;
<del>
<del>})();
<add>}(Query)); |
|
Java | apache-2.0 | 1cab267baea35bf0acb51d4cfc7ca2d8bb9e425f | 0 | TDesjardins/GWT-OL3-Playground,TDesjardins/gwt-ol3,sebasbaumh/gwt-ol3,TDesjardins/GWT-OL3-Playground,sebasbaumh/gwt-ol3,TDesjardins/gwt-ol3,sebasbaumh/gwt-ol3,TDesjardins/GWT-OL3-Playground,TDesjardins/gwt-ol3 | /*******************************************************************************
* Copyright 2014, 2017 gwt-ol3
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*******************************************************************************/
package ol.format.filter;
import ol.Coordinate;
import ol.GwtOL3BaseTestCase;
import ol.OLFactory;
import ol.geom.Circle;
/**
* Test for filters.
*
* @author Tino Desjardins
*
*/
public class FilterTest extends GwtOL3BaseTestCase {
public void testAnd() {
injectUrlAndTest(() -> {
EqualTo equalTo = new EqualTo("attribute", "value");
EqualTo equalTo2 = new EqualTo("attribute2", 1);
And andFilter = new And(equalTo, equalTo2);
assertNotNull(andFilter);
});
}
public void testBbox() {
injectUrlAndTest(() -> {
Bbox bboxFilter = new Bbox("geometryName", OLFactory.createExtent(0, 0, 1, 1), "EPSG:3857");
assertNotNull(bboxFilter);
bboxFilter.setExtent(OLFactory.createExtent(1, 1, 2, 2));
bboxFilter.setGeometryName("geometryAttribute");
bboxFilter.setSrsName("EPSG:4326");
});
}
public void testEqualTo() {
injectUrlAndTest(() -> {
EqualTo equalTo = new EqualTo("attribute", "value");
assertNotNull(equalTo);
EqualTo equalToInteger = new EqualTo("attribute", 1);
assertNotNull(equalToInteger);
EqualTo equalToDouble = new EqualTo("attribute", 1.25);
assertNotNull(equalToDouble);
});
}
public void testGreaterThan() {
injectUrlAndTest(() -> {
GreaterThan greaterThan = new GreaterThan("attribute", 5);
assertNotNull(greaterThan);
});
}
public void testGreaterThanOrEqualTo() {
injectUrlAndTest(() -> {
GreaterThanOrEqualTo greaterThanOrEqualTo = new GreaterThanOrEqualTo("attribute", 5);
assertNotNull(greaterThanOrEqualTo);
});
}
public void testIntersects() {
injectUrlAndTest(() -> {
Intersects intersectsFilter = new Intersects("geometryName", new Circle(Coordinate.create(0, 0), 2), "EPSG:3857");
assertNotNull(intersectsFilter);
});
}
public void testIsBetween() {
injectUrlAndTest(() -> {
IsBetween isBetween = new IsBetween("attribute", 5, 10);
assertNotNull(isBetween);
});
}
public void testIsLike() {
injectUrlAndTest(() -> {
IsLike isLike = new IsLike("attribute", "value");
assertNotNull(isLike);
});
}
public void testIsNull() {
injectUrlAndTest(() -> {
IsNull isNull = new IsNull("attribute");
assertNotNull(isNull);
});
}
public void testLessThan() {
injectUrlAndTest(() -> {
LessThan lessThan = new LessThan("attribute", 5);
assertNotNull(lessThan);
});
}
public void testLessThanOrEqualTo() {
injectUrlAndTest(() -> {
LessThanOrEqualTo lessThanOrEqualTo = new LessThanOrEqualTo("attribute", 5);
assertNotNull(lessThanOrEqualTo);
});
}
public void testNotEqualTo() {
injectUrlAndTest(() -> {
NotEqualTo notEqualTo = new NotEqualTo("attribute", "value");
assertNotNull(notEqualTo);
NotEqualTo notEqualToInteger = new NotEqualTo("attribute", 1);
assertNotNull(notEqualToInteger);
NotEqualTo notEqualToDouble = new NotEqualTo("attribute", 1.25);
assertNotNull(notEqualToDouble);
});
}
public void testWithin() {
injectUrlAndTest(() -> {
Within within = new Within("geometryName", new Circle(Coordinate.create(5, 5), 5), "EPSG:3857");
assertNotNull(within);
});
}
}
| gwt-ol3-client/src/test/java/ol/format/filter/FilterTest.java | /*******************************************************************************
* Copyright 2014, 2017 gwt-ol3
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*******************************************************************************/
package ol.format.filter;
import ol.GwtOL3BaseTestCase;
import ol.OLFactory;
/**
* Test for filters.
*
* @author Tino Desjardins
*
*/
public class FilterTest extends GwtOL3BaseTestCase {
public void testAnd() {
injectUrlAndTest(() -> {
EqualTo equalTo = new EqualTo("attribute", "value");
EqualTo equalTo2 = new EqualTo("attribute2", 1);
And andFilter = new And(equalTo, equalTo2);
assertNotNull(andFilter);
});
}
public void testBbox() {
injectUrlAndTest(() -> {
Bbox bboxFilter = new Bbox("geometryName", OLFactory.createExtent(0, 0, 1, 1), "EPSG:3857");
assertNotNull(bboxFilter);
bboxFilter.setExtent(OLFactory.createExtent(1, 1, 2, 2));
bboxFilter.setGeometryName("geometryAttribute");
bboxFilter.setSrsName("EPSG:4326");
});
}
public void testEqualTo() {
injectUrlAndTest(() -> {
EqualTo equalTo = new EqualTo("attribute", "value");
assertNotNull(equalTo);
EqualTo equalToInteger = new EqualTo("attribute", 1);
assertNotNull(equalToInteger);
EqualTo equalToDouble = new EqualTo("attribute", 1.25);
assertNotNull(equalToDouble);
});
}
public void testGreaterThan() {
injectUrlAndTest(() -> {
GreaterThan greaterThan = new GreaterThan("attribute", 5);
assertNotNull(greaterThan);
});
}
public void testGreaterThanOrEqualTo() {
injectUrlAndTest(() -> {
GreaterThanOrEqualTo greaterThanOrEqualTo = new GreaterThanOrEqualTo("attribute", 5);
assertNotNull(greaterThanOrEqualTo);
});
}
public void testIsBetween() {
injectUrlAndTest(() -> {
IsBetween isBetween = new IsBetween("attribute", 5, 10);
assertNotNull(isBetween);
});
}
public void testIsLike() {
injectUrlAndTest(() -> {
IsLike isLike = new IsLike("attribute", "value");
assertNotNull(isLike);
});
}
public void testIsNull() {
injectUrlAndTest(() -> {
IsNull isNull = new IsNull("attribute");
assertNotNull(isNull);
});
}
public void testLessThan() {
injectUrlAndTest(() -> {
LessThan lessThan = new LessThan("attribute", 5);
assertNotNull(lessThan);
});
}
public void testLessThanOrEqualTo() {
injectUrlAndTest(() -> {
LessThanOrEqualTo lessThanOrEqualTo = new LessThanOrEqualTo("attribute", 5);
assertNotNull(lessThanOrEqualTo);
});
}
public void testNotEqualTo() {
injectUrlAndTest(() -> {
NotEqualTo notEqualTo = new NotEqualTo("attribute", "value");
assertNotNull(notEqualTo);
NotEqualTo notEqualToInteger = new NotEqualTo("attribute", 1);
assertNotNull(notEqualToInteger);
NotEqualTo notEqualToDouble = new NotEqualTo("attribute", 1.25);
assertNotNull(notEqualToDouble);
});
}
}
| Add tests for spatial filters | gwt-ol3-client/src/test/java/ol/format/filter/FilterTest.java | Add tests for spatial filters | <ide><path>wt-ol3-client/src/test/java/ol/format/filter/FilterTest.java
<ide> *******************************************************************************/
<ide> package ol.format.filter;
<ide>
<add>import ol.Coordinate;
<ide> import ol.GwtOL3BaseTestCase;
<ide> import ol.OLFactory;
<add>import ol.geom.Circle;
<ide>
<ide> /**
<ide> * Test for filters.
<ide>
<ide> public void testGreaterThanOrEqualTo() {
<ide> injectUrlAndTest(() -> {
<del>
<add>
<ide> GreaterThanOrEqualTo greaterThanOrEqualTo = new GreaterThanOrEqualTo("attribute", 5);
<ide> assertNotNull(greaterThanOrEqualTo);
<add>
<add> });
<add> }
<add>
<add> public void testIntersects() {
<add> injectUrlAndTest(() -> {
<add>
<add> Intersects intersectsFilter = new Intersects("geometryName", new Circle(Coordinate.create(0, 0), 2), "EPSG:3857");
<add> assertNotNull(intersectsFilter);
<ide>
<ide> });
<ide> }
<ide>
<ide> });
<ide> }
<del>
<add>
<add> public void testWithin() {
<add> injectUrlAndTest(() -> {
<add>
<add> Within within = new Within("geometryName", new Circle(Coordinate.create(5, 5), 5), "EPSG:3857");
<add> assertNotNull(within);
<add>
<add> });
<add> }
<add>
<ide> } |
|
Java | apache-2.0 | 0d7db3300d80feb060f108c8d2f409d9f52ae56b | 0 | filestack/filestack-java,filestack/filestack-java | package com.filestack.util;
import java.io.IOException;
import java.io.InputStream;
import java.util.Properties;
import okhttp3.MultipartBody;
import okhttp3.RequestBody;
/**
* Small helper functions that don't need their own class.
*/
public class Util {
/**
* Loads version string from properties file in resources folder.
*
* @return Version string
*/
public static String getVersion() {
ClassLoader loader = Thread.currentThread().getContextClassLoader();
InputStream inputStream = loader.getResourceAsStream("com/filestack/version.properties");
Properties prop = new Properties();
String version = "";
try {
prop.load(inputStream);
} catch (IOException e) {
version = "x.y.z";
}
version = prop.getProperty("version");
return version;
}
/**
* Creates {@link RequestBody Request Body} from String.
* For multipart form uploads.
*/
public static RequestBody createStringPart(String content) {
return RequestBody.create(MultipartBody.FORM, content);
}
}
| src/main/java/com/filestack/util/Util.java | package com.filestack.util;
import okhttp3.MultipartBody;
import okhttp3.RequestBody;
import java.io.IOException;
import java.io.InputStream;
import java.util.Properties;
/**
* Small helper functions that don't need their own class.
*/
public class Util {
/**
* Loads version string from properties file in resources folder.
*
* @return Version string
*/
public static String getVersion() {
ClassLoader loader = Thread.currentThread().getContextClassLoader();
InputStream inputStream = loader.getResourceAsStream("com/filestack/version.properties");
Properties prop = new Properties();
String version = "";
try {
prop.load(inputStream);
} catch (IOException e) {
version = "x.y.z";
}
version = prop.getProperty("version");
return version;
}
/**
* Creates {@link RequestBody Request Body} from String.
* For multipart form uploads.
*/
public static RequestBody createStringPart(String content) {
return RequestBody.create(MultipartBody.FORM, content);
}
}
| Fix linter warnings for Util
| src/main/java/com/filestack/util/Util.java | Fix linter warnings for Util | <ide><path>rc/main/java/com/filestack/util/Util.java
<ide> package com.filestack.util;
<del>
<del>import okhttp3.MultipartBody;
<del>import okhttp3.RequestBody;
<ide>
<ide> import java.io.IOException;
<ide> import java.io.InputStream;
<ide> import java.util.Properties;
<add>import okhttp3.MultipartBody;
<add>import okhttp3.RequestBody;
<ide>
<ide> /**
<ide> * Small helper functions that don't need their own class. |
|
Java | apache-2.0 | f95fe02d8084310e9f2f32bbd74d2dfe4f4984e8 | 0 | couchbase/couchbase-lite-android,gotmyjobs/couchbase-lite-android,couchbase/couchbase-lite-android,gotmyjobs/couchbase-lite-android | package com.couchbase.lite.replicator;
import com.couchbase.lite.CouchbaseLiteException;
import com.couchbase.lite.Database;
import com.couchbase.lite.Document;
import com.couchbase.lite.DocumentChange;
import com.couchbase.lite.Emitter;
import com.couchbase.lite.LiteTestCaseWithDB;
import com.couchbase.lite.LiveQuery;
import com.couchbase.lite.Manager;
import com.couchbase.lite.Mapper;
import com.couchbase.lite.Query;
import com.couchbase.lite.QueryEnumerator;
import com.couchbase.lite.QueryOptions;
import com.couchbase.lite.QueryRow;
import com.couchbase.lite.ReplicationFilter;
import com.couchbase.lite.Revision;
import com.couchbase.lite.SavedRevision;
import com.couchbase.lite.Status;
import com.couchbase.lite.TransactionalTask;
import com.couchbase.lite.UnsavedRevision;
import com.couchbase.lite.ValidationContext;
import com.couchbase.lite.Validator;
import com.couchbase.lite.View;
import com.couchbase.lite.auth.Authenticator;
import com.couchbase.lite.auth.AuthenticatorFactory;
import com.couchbase.lite.auth.BasicAuthenticator;
import com.couchbase.lite.auth.FacebookAuthorizer;
import com.couchbase.lite.internal.RevisionInternal;
import com.couchbase.lite.mockserver.MockBulkDocs;
import com.couchbase.lite.mockserver.MockChangesFeed;
import com.couchbase.lite.mockserver.MockChangesFeedNoResponse;
import com.couchbase.lite.mockserver.MockCheckpointGet;
import com.couchbase.lite.mockserver.MockCheckpointPut;
import com.couchbase.lite.mockserver.MockDispatcher;
import com.couchbase.lite.mockserver.MockDocumentBulkGet;
import com.couchbase.lite.mockserver.MockDocumentGet;
import com.couchbase.lite.mockserver.MockDocumentPut;
import com.couchbase.lite.mockserver.MockFacebookAuthPost;
import com.couchbase.lite.mockserver.MockHelper;
import com.couchbase.lite.mockserver.MockRevsDiff;
import com.couchbase.lite.mockserver.MockSessionGet;
import com.couchbase.lite.mockserver.SmartMockResponseImpl;
import com.couchbase.lite.mockserver.WrappedSmartMockResponse;
import com.couchbase.lite.support.Base64;
import com.couchbase.lite.support.CouchbaseLiteHttpClientFactory;
import com.couchbase.lite.support.HttpClientFactory;
import com.couchbase.lite.support.MultipartReader;
import com.couchbase.lite.support.MultipartReaderDelegate;
import com.couchbase.lite.support.RemoteRequestRetry;
import com.couchbase.lite.util.Log;
import com.couchbase.lite.util.Utils;
import com.couchbase.org.apache.http.entity.mime.MultipartEntity;
import com.squareup.okhttp.mockwebserver.MockResponse;
import com.squareup.okhttp.mockwebserver.MockWebServer;
import com.squareup.okhttp.mockwebserver.RecordedRequest;
import junit.framework.Assert;
import org.apache.http.HttpEntity;
import org.apache.http.HttpRequest;
import org.apache.http.HttpResponse;
import org.apache.http.client.CookieStore;
import org.apache.http.client.HttpClient;
import org.apache.http.client.HttpResponseException;
import org.apache.http.client.methods.HttpPost;
import org.apache.http.client.methods.HttpPut;
import org.apache.http.client.methods.HttpUriRequest;
import org.apache.http.cookie.Cookie;
import java.io.IOException;
import java.io.InputStream;
import java.net.MalformedURLException;
import java.net.URL;
import java.net.URLDecoder;
import java.text.SimpleDateFormat;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Calendar;
import java.util.Collections;
import java.util.Date;
import java.util.GregorianCalendar;
import java.util.HashMap;
import java.util.Iterator;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
import java.util.Queue;
import java.util.Set;
import java.util.concurrent.BlockingQueue;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicInteger;
/**
* Tests for the new state machine based replicator
*/
public class ReplicationTest extends LiteTestCaseWithDB {
/**
* TestCase(CreateReplicators) in ReplicationAPITests.m
*/
public void testCreateReplicators() throws Exception {
URL fakeRemoteURL = new URL("http://fake.fake/fakedb");
// Create a replicaton:
assertEquals(0, database.getAllReplications().size());
Replication r1 = database.createPushReplication(fakeRemoteURL);
assertNotNull(r1);
// Check the replication's properties:
assertEquals(database, r1.getLocalDatabase());
assertEquals(fakeRemoteURL, r1.getRemoteUrl());
assertFalse(r1.isPull());
assertFalse(r1.isContinuous());
assertFalse(r1.shouldCreateTarget());
assertNull(r1.getFilter());
assertNull(r1.getFilterParams());
assertNull(r1.getDocIds());
assertEquals(0, r1.getHeaders().size());
// Check that the replication hasn't started running:
assertFalse(r1.isRunning());
assertEquals(Replication.ReplicationStatus.REPLICATION_STOPPED, r1.getStatus());
assertEquals(0, r1.getChangesCount());
assertEquals(0, r1.getCompletedChangesCount());
assertNull(r1.getLastError());
// Create another replication:
Replication r2 = database.createPullReplication(fakeRemoteURL);
assertNotNull(r2);
assertTrue(r1 != r2);
// Check the replication's properties:
assertEquals(database, r2.getLocalDatabase());
assertEquals(fakeRemoteURL, r2.getRemoteUrl());
assertTrue(r2.isPull());
Replication r3 = database.createPullReplication(fakeRemoteURL);
assertNotNull(r3);
assertTrue(r3 != r2);
r3.setDocIds(Arrays.asList("doc1", "doc2"));
Replication repl = database.getManager().getReplicator(r3.getProperties());
assertEquals(r3.getDocIds(), repl.getDocIds());
}
/**
* Continuous puller starts offline
* Wait for a while .. (til what?)
* Add remote document (simulate w/ mock webserver)
* Put replication online
* Make sure doc is pulled
*/
public void testGoOnlinePuller() throws Exception {
Log.d(Log.TAG, "testGoOnlinePuller");
// create mock server
MockWebServer server = new MockWebServer();
try {
MockDispatcher dispatcher = new MockDispatcher();
dispatcher.setServerType(MockDispatcher.ServerType.SYNC_GW);
server.setDispatcher(dispatcher);
server.play();
// mock documents to be pulled
MockDocumentGet.MockDocument mockDoc1 = new MockDocumentGet.MockDocument("doc1", "1-5e38", 1);
mockDoc1.setJsonMap(MockHelper.generateRandomJsonMap());
// checkpoint PUT or GET response (sticky)
MockCheckpointPut mockCheckpointPut = new MockCheckpointPut();
mockCheckpointPut.setSticky(true);
dispatcher.enqueueResponse(MockHelper.PATH_REGEX_CHECKPOINT, mockCheckpointPut);
// _changes response 503 error (sticky)
WrappedSmartMockResponse wrapped2 = new WrappedSmartMockResponse(new MockResponse().setResponseCode(503));
wrapped2.setSticky(true);
dispatcher.enqueueResponse(MockHelper.PATH_REGEX_CHANGES, wrapped2);
// doc1 response
MockDocumentGet mockDocumentGet = new MockDocumentGet(mockDoc1);
dispatcher.enqueueResponse(mockDoc1.getDocPathRegex(), mockDocumentGet.generateMockResponse());
// _revs_diff response -- everything missing
MockRevsDiff mockRevsDiff = new MockRevsDiff();
mockRevsDiff.setSticky(true);
dispatcher.enqueueResponse(MockHelper.PATH_REGEX_REVS_DIFF, mockRevsDiff);
// _bulk_docs response -- everything stored
MockBulkDocs mockBulkDocs = new MockBulkDocs();
mockBulkDocs.setSticky(true);
dispatcher.enqueueResponse(MockHelper.PATH_REGEX_BULK_DOCS, mockBulkDocs);
// create and start replication
Replication pullReplication = database.createPullReplication(server.getUrl("/db"));
pullReplication.setContinuous(true);
pullReplication.start();
Log.d(Log.TAG, "Started pullReplication: %s", pullReplication);
// wait until a _checkpoint request have been sent
dispatcher.takeRequestBlocking(MockHelper.PATH_REGEX_CHECKPOINT);
// wait until a _changes request has been sent
dispatcher.takeRequestBlocking(MockHelper.PATH_REGEX_CHANGES);
putReplicationOffline(pullReplication);
// clear out existing queued mock responses to make room for new ones
dispatcher.clearQueuedResponse(MockHelper.PATH_REGEX_CHANGES);
// real _changes response with doc1
MockChangesFeed mockChangesFeed = new MockChangesFeed();
mockChangesFeed.add(new MockChangesFeed.MockChangedDoc(mockDoc1));
dispatcher.enqueueResponse(MockHelper.PATH_REGEX_CHANGES, mockChangesFeed.generateMockResponse());
// long poll changes feed no response
MockChangesFeedNoResponse mockChangesFeedNoResponse = new MockChangesFeedNoResponse();
mockChangesFeedNoResponse.setSticky(true);
dispatcher.enqueueResponse(MockHelper.PATH_REGEX_CHANGES, mockChangesFeedNoResponse);
putReplicationOnline(pullReplication);
Log.d(Log.TAG, "Waiting for PUT checkpoint request with seq: %d", mockDoc1.getDocSeq());
waitForPutCheckpointRequestWithSeq(dispatcher, mockDoc1.getDocSeq());
Log.d(Log.TAG, "Got PUT checkpoint request with seq: %d", mockDoc1.getDocSeq());
stopReplication(pullReplication);
} finally {
server.shutdown();
}
}
/**
* Start continuous replication with a closed db.
* <p/>
* Expected behavior:
* - Receive replication finished callback
* - Replication lastError will contain an exception
*/
public void testStartReplicationClosedDb() throws Exception {
Database db = this.manager.getDatabase("closed");
final CountDownLatch countDownLatch = new CountDownLatch(1);
final Replication replication = db.createPullReplication(new URL("http://fake.com/foo"));
replication.setContinuous(true);
replication.addChangeListener(new Replication.ChangeListener() {
@Override
public void changed(Replication.ChangeEvent event) {
Log.d(TAG, "changed event: %s", event);
if (replication.isRunning() == false) {
countDownLatch.countDown();
}
}
});
db.close();
replication.start();
boolean success = countDownLatch.await(60, TimeUnit.SECONDS);
assertTrue(success);
assertTrue(replication.getLastError() != null);
}
/**
* Start a replication and stop it immediately
*/
public void failingTestStartReplicationStartStop() throws Exception {
final CountDownLatch countDownLatch = new CountDownLatch(1);
final List<ReplicationStateTransition> transitions = new ArrayList<ReplicationStateTransition>();
final Replication replication = database.createPullReplication(new URL("http://fake.com/foo"));
replication.setContinuous(true);
replication.addChangeListener(new ReplicationFinishedObserver(countDownLatch));
replication.start();
replication.start(); // this should be ignored
replication.stop();
replication.stop(); // this should be ignored
boolean success = countDownLatch.await(60, TimeUnit.SECONDS);
assertTrue(success);
assertTrue(replication.getLastError() == null);
assertEquals(3, transitions.size());
assertEquals(ReplicationState.INITIAL, transitions.get(0).getSource());
assertEquals(ReplicationState.RUNNING, transitions.get(0).getDestination());
assertEquals(ReplicationState.RUNNING, transitions.get(1).getSource());
assertEquals(ReplicationState.STOPPING, transitions.get(1).getDestination());
assertEquals(ReplicationState.STOPPING, transitions.get(2).getSource());
assertEquals(ReplicationState.STOPPED, transitions.get(2).getDestination());
}
/**
* Pull replication test:
* <p/>
* - Single one-shot pull replication
* - Against simulated sync gateway
* - Remote docs do not have attachments
*/
public void testMockSinglePullSyncGw() throws Exception {
boolean shutdownMockWebserver = true;
boolean addAttachments = false;
mockSinglePull(shutdownMockWebserver, MockDispatcher.ServerType.SYNC_GW, addAttachments);
}
/**
* Pull replication test:
* <p/>
* - Single one-shot pull replication
* - Against simulated couchdb
* - Remote docs do not have attachments
*/
public void testMockSinglePullCouchDb() throws Exception {
boolean shutdownMockWebserver = true;
boolean addAttachments = false;
mockSinglePull(shutdownMockWebserver, MockDispatcher.ServerType.COUCHDB, addAttachments);
}
/**
* Pull replication test:
* <p/>
* - Single one-shot pull replication
* - Against simulated couchdb
* - Remote docs have attachments
*/
public void testMockSinglePullCouchDbAttachments() throws Exception {
boolean shutdownMockWebserver = true;
boolean addAttachments = true;
mockSinglePull(shutdownMockWebserver, MockDispatcher.ServerType.COUCHDB, addAttachments);
}
/**
* Pull replication test:
* <p/>
* - Single one-shot pull replication
* - Against simulated sync gateway
* - Remote docs have attachments
* <p/>
* TODO: sporadic assertion failure when checking rev field of PUT checkpoint requests
*/
public void testMockSinglePullSyncGwAttachments() throws Exception {
boolean shutdownMockWebserver = true;
boolean addAttachments = true;
mockSinglePull(shutdownMockWebserver, MockDispatcher.ServerType.SYNC_GW, addAttachments);
}
public void testMockMultiplePullSyncGw() throws Exception {
boolean shutdownMockWebserver = true;
mockMultiplePull(shutdownMockWebserver, MockDispatcher.ServerType.SYNC_GW);
}
public void testMockMultiplePullCouchDb() throws Exception {
boolean shutdownMockWebserver = true;
mockMultiplePull(shutdownMockWebserver, MockDispatcher.ServerType.COUCHDB);
}
public void testMockContinuousPullCouchDb() throws Exception {
// TODO: (IMPORTANT, FORESTDB) lastSequence for checkpoint does not match and couase dead lock
// if(!isSQLiteDB())
// fail("FORESTDB casues deadlock becasue of lastSequence mismatch for checkpoint");
boolean shutdownMockWebserver = true;
mockContinuousPull(shutdownMockWebserver, MockDispatcher.ServerType.COUCHDB);
}
/**
* Do a pull replication
*
* @param shutdownMockWebserver - should this test shutdown the mockwebserver
* when done? if another test wants to pick up
* where this left off, you should pass false.
* @param serverType - should the mock return the Sync Gateway server type in
* the "Server" HTTP Header? this changes the behavior of the
* replicator to use bulk_get and POST reqeusts for _changes feeds.
* @param addAttachments - should the mock sync gateway return docs with attachments?
* @return a map that contains the mockwebserver (key="server") and the mock dispatcher
* (key="dispatcher")
*/
public Map<String, Object> mockSinglePull(boolean shutdownMockWebserver, MockDispatcher.ServerType serverType, boolean addAttachments) throws Exception {
// create mockwebserver and custom dispatcher
MockDispatcher dispatcher = new MockDispatcher();
MockWebServer server = MockHelper.getMockWebServer(dispatcher);
try {
dispatcher.setServerType(serverType);
// mock documents to be pulled
MockDocumentGet.MockDocument mockDoc1 = new MockDocumentGet.MockDocument("doc1", "1-5e38", 1);
mockDoc1.setJsonMap(MockHelper.generateRandomJsonMap());
mockDoc1.setAttachmentName("attachment.png");
MockDocumentGet.MockDocument mockDoc2 = new MockDocumentGet.MockDocument("doc2", "1-563b", 2);
mockDoc2.setJsonMap(MockHelper.generateRandomJsonMap());
mockDoc2.setAttachmentName("attachment2.png");
// checkpoint GET response w/ 404
MockResponse fakeCheckpointResponse = new MockResponse();
MockHelper.set404NotFoundJson(fakeCheckpointResponse);
dispatcher.enqueueResponse(MockHelper.PATH_REGEX_CHECKPOINT, fakeCheckpointResponse);
// _changes response
MockChangesFeed mockChangesFeed = new MockChangesFeed();
mockChangesFeed.add(new MockChangesFeed.MockChangedDoc(mockDoc1));
mockChangesFeed.add(new MockChangesFeed.MockChangedDoc(mockDoc2));
dispatcher.enqueueResponse(MockHelper.PATH_REGEX_CHANGES, mockChangesFeed.generateMockResponse());
// doc1 response
MockDocumentGet mockDocumentGet = new MockDocumentGet(mockDoc1);
if (addAttachments) {
mockDocumentGet.addAttachmentFilename(mockDoc1.getAttachmentName());
}
dispatcher.enqueueResponse(mockDoc1.getDocPathRegex(), mockDocumentGet.generateMockResponse());
// doc2 response
mockDocumentGet = new MockDocumentGet(mockDoc2);
if (addAttachments) {
mockDocumentGet.addAttachmentFilename(mockDoc2.getAttachmentName());
}
dispatcher.enqueueResponse(mockDoc2.getDocPathRegex(), mockDocumentGet.generateMockResponse());
// _bulk_get response
MockDocumentBulkGet mockBulkGet = new MockDocumentBulkGet();
mockBulkGet.addDocument(mockDoc1);
mockBulkGet.addDocument(mockDoc2);
dispatcher.enqueueResponse(MockHelper.PATH_REGEX_BULK_GET, mockBulkGet);
// respond to all PUT Checkpoint requests
MockCheckpointPut mockCheckpointPut = new MockCheckpointPut();
mockCheckpointPut.setSticky(true);
mockCheckpointPut.setDelayMs(500);
dispatcher.enqueueResponse(MockHelper.PATH_REGEX_CHECKPOINT, mockCheckpointPut);
// start mock server
server.play();
// run pull replication
Replication pullReplication = database.createPullReplication(server.getUrl("/db"));
Map<String, Object> headers = new HashMap<String, Object>();
headers.put("foo", "bar");
pullReplication.setHeaders(headers);
String checkpointId = pullReplication.remoteCheckpointDocID();
runReplication(pullReplication);
Log.d(TAG, "pullReplication finished");
database.addChangeListener(new Database.ChangeListener() {
@Override
public void changed(Database.ChangeEvent event) {
List<DocumentChange> changes = event.getChanges();
for (DocumentChange documentChange : changes) {
Log.d(TAG, "doc change callback: %s", documentChange.getDocumentId());
}
}
});
// assert that we now have both docs in local db
assertNotNull(database);
Document doc1 = database.getDocument(mockDoc1.getDocId());
assertNotNull(doc1);
assertNotNull(doc1.getCurrentRevisionId());
assertTrue(doc1.getCurrentRevisionId().equals(mockDoc1.getDocRev()));
assertNotNull(doc1.getProperties());
assertEquals(mockDoc1.getJsonMap(), doc1.getUserProperties());
Document doc2 = database.getDocument(mockDoc2.getDocId());
assertNotNull(doc2);
assertNotNull(doc2.getCurrentRevisionId());
assertNotNull(doc2.getProperties());
assertTrue(doc2.getCurrentRevisionId().equals(mockDoc2.getDocRev()));
assertEquals(mockDoc2.getJsonMap(), doc2.getUserProperties());
// assert that docs have attachments (if applicable)
if (addAttachments) {
attachmentAsserts(mockDoc1.getAttachmentName(), doc1);
attachmentAsserts(mockDoc2.getAttachmentName(), doc2);
}
// make assertions about outgoing requests from replicator -> mock
RecordedRequest getCheckpointRequest = dispatcher.takeRequest(MockHelper.PATH_REGEX_CHECKPOINT);
assertNotNull(getCheckpointRequest);
assertEquals("bar", getCheckpointRequest.getHeader("foo"));
assertTrue(getCheckpointRequest.getMethod().equals("GET"));
assertTrue(getCheckpointRequest.getPath().matches(MockHelper.PATH_REGEX_CHECKPOINT));
RecordedRequest getChangesFeedRequest = dispatcher.takeRequest(MockHelper.PATH_REGEX_CHANGES);
if (serverType == MockDispatcher.ServerType.SYNC_GW) {
assertTrue(getChangesFeedRequest.getMethod().equals("POST"));
} else {
assertTrue(getChangesFeedRequest.getMethod().equals("GET"));
}
assertTrue(getChangesFeedRequest.getPath().matches(MockHelper.PATH_REGEX_CHANGES));
// wait until the mock webserver receives a PUT checkpoint request with doc #2's sequence
Log.d(TAG, "waiting for PUT checkpoint %s", mockDoc2.getDocSeq());
List<RecordedRequest> checkpointRequests = waitForPutCheckpointRequestWithSequence(dispatcher, mockDoc2.getDocSeq());
validateCheckpointRequestsRevisions(checkpointRequests);
Log.d(TAG, "got PUT checkpoint %s", mockDoc2.getDocSeq());
// assert our local sequence matches what is expected
String lastSequence = database.lastSequenceWithCheckpointId(checkpointId);
assertEquals(Integer.toString(mockDoc2.getDocSeq()), lastSequence);
// assert completed count makes sense
assertEquals(pullReplication.getChangesCount(), pullReplication.getCompletedChangesCount());
// allow for either a single _bulk_get request or individual doc requests.
// if the server is sync gateway, it is allowable for replicator to use _bulk_get
RecordedRequest request = dispatcher.takeRequest(MockHelper.PATH_REGEX_BULK_GET);
if (request != null) {
String body = MockHelper.getUtf8Body(request);
assertTrue(body.contains(mockDoc1.getDocId()));
assertTrue(body.contains(mockDoc2.getDocId()));
} else {
RecordedRequest doc1Request = dispatcher.takeRequest(mockDoc1.getDocPathRegex());
assertTrue(doc1Request.getMethod().equals("GET"));
assertTrue(doc1Request.getPath().matches(mockDoc1.getDocPathRegex()));
RecordedRequest doc2Request = dispatcher.takeRequest(mockDoc2.getDocPathRegex());
assertTrue(doc2Request.getMethod().equals("GET"));
assertTrue(doc2Request.getPath().matches(mockDoc2.getDocPathRegex()));
}
} finally {
// Shut down the server. Instances cannot be reused.
if (shutdownMockWebserver) {
server.shutdown();
}
}
Map<String, Object> returnVal = new HashMap<String, Object>();
returnVal.put("server", server);
returnVal.put("dispatcher", dispatcher);
return returnVal;
}
/**
* Simulate the following:
* <p/>
* - Add a few docs and do a pull replication
* - One doc on sync gateway is now updated
* - Do a second pull replication
* - Assert we get the updated doc and save it locally
*/
public Map<String, Object> mockMultiplePull(boolean shutdownMockWebserver, MockDispatcher.ServerType serverType) throws Exception {
String doc1Id = "doc1";
// create mockwebserver and custom dispatcher
boolean addAttachments = false;
// do a pull replication
Map<String, Object> serverAndDispatcher = mockSinglePull(false, serverType, addAttachments);
MockWebServer server = (MockWebServer) serverAndDispatcher.get("server");
MockDispatcher dispatcher = (MockDispatcher) serverAndDispatcher.get("dispatcher");
try {
// clear out any possible residue left from previous test, eg, mock responses queued up as
// any recorded requests that have been logged.
dispatcher.reset();
String doc1Rev = "2-2e38";
int doc1Seq = 3;
String checkpointRev = "0-1";
String checkpointLastSequence = "2";
// checkpoint GET response w/ seq = 2
MockCheckpointGet mockCheckpointGet = new MockCheckpointGet();
mockCheckpointGet.setOk("true");
mockCheckpointGet.setRev(checkpointRev);
mockCheckpointGet.setLastSequence(checkpointLastSequence);
dispatcher.enqueueResponse(MockHelper.PATH_REGEX_CHECKPOINT, mockCheckpointGet);
// _changes response
MockChangesFeed mockChangesFeed = new MockChangesFeed();
MockChangesFeed.MockChangedDoc mockChangedDoc1 = new MockChangesFeed.MockChangedDoc()
.setSeq(doc1Seq)
.setDocId(doc1Id)
.setChangedRevIds(Arrays.asList(doc1Rev));
mockChangesFeed.add(mockChangedDoc1);
MockResponse fakeChangesResponse = mockChangesFeed.generateMockResponse();
dispatcher.enqueueResponse(MockHelper.PATH_REGEX_CHANGES, fakeChangesResponse);
// doc1 response
Map<String, Object> doc1JsonMap = MockHelper.generateRandomJsonMap();
MockDocumentGet mockDocumentGet = new MockDocumentGet()
.setDocId(doc1Id)
.setRev(doc1Rev)
.setJsonMap(doc1JsonMap);
String doc1PathRegex = "/db/doc1.*";
dispatcher.enqueueResponse(doc1PathRegex, mockDocumentGet.generateMockResponse());
// checkpoint PUT response
MockCheckpointPut mockCheckpointPut = new MockCheckpointPut();
mockCheckpointGet.setSticky(true);
dispatcher.enqueueResponse(MockHelper.PATH_REGEX_CHECKPOINT, mockCheckpointPut);
// run pull replication
Replication pullReplication = database.createPullReplication(server.getUrl("/db"));
runReplication(pullReplication);
// assert that we now have both docs in local db
assertNotNull(database);
Document doc1 = database.getDocument(doc1Id);
assertNotNull(doc1);
assertNotNull(doc1.getCurrentRevisionId());
assertTrue(doc1.getCurrentRevisionId().startsWith("2-"));
assertEquals(doc1JsonMap, doc1.getUserProperties());
// make assertions about outgoing requests from replicator -> mock
RecordedRequest getCheckpointRequest = dispatcher.takeRequest(MockHelper.PATH_REGEX_CHECKPOINT);
assertNotNull(getCheckpointRequest);
assertTrue(getCheckpointRequest.getMethod().equals("GET"));
assertTrue(getCheckpointRequest.getPath().matches(MockHelper.PATH_REGEX_CHECKPOINT));
RecordedRequest getChangesFeedRequest = dispatcher.takeRequest(MockHelper.PATH_REGEX_CHANGES);
if (serverType == MockDispatcher.ServerType.SYNC_GW) {
assertTrue(getChangesFeedRequest.getMethod().equals("POST"));
} else {
assertTrue(getChangesFeedRequest.getMethod().equals("GET"));
}
assertTrue(getChangesFeedRequest.getPath().matches(MockHelper.PATH_REGEX_CHANGES));
if (serverType == MockDispatcher.ServerType.SYNC_GW) {
Map<String, Object> jsonMap = Manager.getObjectMapper().readValue(getChangesFeedRequest.getUtf8Body(), Map.class);
assertTrue(jsonMap.containsKey("since"));
Integer since = (Integer) jsonMap.get("since");
assertEquals(2, since.intValue());
}
RecordedRequest doc1Request = dispatcher.takeRequest(doc1PathRegex);
assertTrue(doc1Request.getMethod().equals("GET"));
assertTrue(doc1Request.getPath().matches("/db/doc1\\?rev=2-2e38.*"));
// wait until the mock webserver receives a PUT checkpoint request with doc #2's sequence
int expectedLastSequence = doc1Seq;
List<RecordedRequest> checkpointRequests = waitForPutCheckpointRequestWithSequence(dispatcher, expectedLastSequence);
assertEquals(1, checkpointRequests.size());
// assert our local sequence matches what is expected
String lastSequence = database.lastSequenceWithCheckpointId(pullReplication.remoteCheckpointDocID());
assertEquals(Integer.toString(expectedLastSequence), lastSequence);
// assert completed count makes sense
assertEquals(pullReplication.getChangesCount(), pullReplication.getCompletedChangesCount());
} finally {
if (shutdownMockWebserver) {
server.shutdown();
}
}
Map<String, Object> returnVal = new HashMap<String, Object>();
returnVal.put("server", server);
returnVal.put("dispatcher", dispatcher);
return returnVal;
}
public Map<String, Object> mockContinuousPull(boolean shutdownMockWebserver, MockDispatcher.ServerType serverType) throws Exception {
assertTrue(serverType == MockDispatcher.ServerType.COUCHDB);
final int numMockRemoteDocs = 20; // must be multiple of 10!
final AtomicInteger numDocsPulledLocally = new AtomicInteger(0);
MockDispatcher dispatcher = new MockDispatcher();
dispatcher.setServerType(serverType);
int numDocsPerChangesResponse = numMockRemoteDocs / 10;
MockWebServer server = MockHelper.getPreloadedPullTargetMockCouchDB(dispatcher, numMockRemoteDocs, numDocsPerChangesResponse);
try {
server.play();
final CountDownLatch receivedAllDocs = new CountDownLatch(1);
// run pull replication
Replication pullReplication = database.createPullReplication(server.getUrl("/db"));
pullReplication.setContinuous(true);
final CountDownLatch replicationDoneSignal = new CountDownLatch(1);
pullReplication.addChangeListener(new ReplicationFinishedObserver(replicationDoneSignal));
final CountDownLatch replicationIdleSignal = new CountDownLatch(1);
ReplicationIdleObserver idleObserver = new ReplicationIdleObserver(replicationIdleSignal);
pullReplication.addChangeListener(idleObserver);
database.addChangeListener(new Database.ChangeListener() {
@Override
public void changed(Database.ChangeEvent event) {
List<DocumentChange> changes = event.getChanges();
for (DocumentChange change : changes) {
numDocsPulledLocally.addAndGet(1);
}
if (numDocsPulledLocally.get() == numMockRemoteDocs) {
receivedAllDocs.countDown();
}
}
});
pullReplication.start();
// wait until we received all mock docs or timeout occurs
boolean success = receivedAllDocs.await(60, TimeUnit.SECONDS);
assertTrue(success);
// make sure all docs in local db
Map<String, Object> allDocs = database.getAllDocs(new QueryOptions());
Integer totalRows = (Integer) allDocs.get("total_rows");
List rows = (List) allDocs.get("rows");
assertEquals(numMockRemoteDocs, totalRows.intValue());
assertEquals(numMockRemoteDocs, rows.size());
// wait until idle
success = replicationIdleSignal.await(30, TimeUnit.SECONDS);
assertTrue(success);
// cleanup / shutdown
pullReplication.stop();
success = replicationDoneSignal.await(30, TimeUnit.SECONDS);
assertTrue(success);
long lastSeq = database.getLastSequenceNumber();
Log.e(TAG, "lastSequence = %d", lastSeq);
// wait until the mock webserver receives a PUT checkpoint request with last do's sequence,
// this avoids ugly and confusing exceptions in the logs.
List<RecordedRequest> checkpointRequests = waitForPutCheckpointRequestWithSequence(dispatcher, numMockRemoteDocs - 1);
validateCheckpointRequestsRevisions(checkpointRequests);
} finally {
if (shutdownMockWebserver) {
server.shutdown();
}
}
Map<String, Object> returnVal = new HashMap<String, Object>();
returnVal.put("server", server);
returnVal.put("dispatcher", dispatcher);
return returnVal;
}
/**
* https://github.com/couchbase/couchbase-lite-java-core/issues/257
* <p/>
* - Create local document with attachment
* - Start continuous pull replication
* - MockServer returns _changes with new rev of document
* - MockServer returns doc multipart response: https://gist.github.com/tleyden/bf36f688d0b5086372fd
* - Delete doc cache (not sure if needed)
* - Fetch doc fresh from database
* - Verify that it still has attachments
*/
public void testAttachmentsDeletedOnPull() throws Exception {
String doc1Id = "doc1";
int doc1Rev2Generation = 2;
String doc1Rev2Digest = "b000";
String doc1Rev2 = String.format("%d-%s", doc1Rev2Generation, doc1Rev2Digest);
int doc1Seq1 = 1;
String doc1AttachName = "attachment.png";
String contentType = "image/png";
// create mockwebserver and custom dispatcher
MockDispatcher dispatcher = new MockDispatcher();
MockWebServer server = MockHelper.getMockWebServer(dispatcher);
try {
dispatcher.setServerType(MockDispatcher.ServerType.SYNC_GW);
server.play();
// add some documents - verify it has an attachment
Document doc1 = createDocumentForPushReplication(doc1Id, doc1AttachName, contentType);
String doc1Rev1 = doc1.getCurrentRevisionId();
doc1 = database.getDocument(doc1.getId());
assertTrue(doc1.getCurrentRevision().getAttachments().size() > 0);
// checkpoint GET response w/ 404
MockResponse fakeCheckpointResponse = new MockResponse();
MockHelper.set404NotFoundJson(fakeCheckpointResponse);
dispatcher.enqueueResponse(MockHelper.PATH_REGEX_CHECKPOINT, fakeCheckpointResponse);
// checkpoint PUT response
MockCheckpointPut mockCheckpointPut = new MockCheckpointPut();
mockCheckpointPut.setSticky(true);
dispatcher.enqueueResponse(MockHelper.PATH_REGEX_CHECKPOINT, mockCheckpointPut);
// add response to 1st _changes request
final MockDocumentGet.MockDocument mockDocument1 = new MockDocumentGet.MockDocument(
doc1Id, doc1Rev2, doc1Seq1);
Map<String, Object> newProperties = new HashMap<String, Object>(doc1.getProperties());
newProperties.put("_rev", doc1Rev2);
mockDocument1.setJsonMap(newProperties);
mockDocument1.setAttachmentName(doc1AttachName);
MockChangesFeed mockChangesFeed = new MockChangesFeed();
mockChangesFeed.add(new MockChangesFeed.MockChangedDoc(mockDocument1));
dispatcher.enqueueResponse(MockHelper.PATH_REGEX_CHANGES, mockChangesFeed.generateMockResponse());
// add sticky _changes response to feed=longpoll that just blocks for 60 seconds to emulate
// server that doesn't have any new changes
MockChangesFeedNoResponse mockChangesFeedNoResponse = new MockChangesFeedNoResponse();
mockChangesFeedNoResponse.setDelayMs(60 * 1000);
mockChangesFeedNoResponse.setSticky(true);
dispatcher.enqueueResponse(MockHelper.PATH_REGEX_CHANGES, mockChangesFeedNoResponse);
// add response to doc get
MockDocumentGet mockDocumentGet = new MockDocumentGet(mockDocument1);
mockDocumentGet.addAttachmentFilename(mockDocument1.getAttachmentName());
mockDocumentGet.setIncludeAttachmentPart(false);
Map<String, Object> revHistory = new HashMap<String, Object>();
revHistory.put("start", doc1Rev2Generation);
List ids = Arrays.asList(
RevisionInternal.digestFromRevID(doc1Rev2),
RevisionInternal.digestFromRevID(doc1Rev1)
);
revHistory.put("ids", ids);
mockDocumentGet.setRevHistoryMap(revHistory);
dispatcher.enqueueResponse(mockDocument1.getDocPathRegex(), mockDocumentGet.generateMockResponse());
// create and start pull replication
Replication pullReplication = database.createPullReplication(server.getUrl("/db"));
pullReplication.setContinuous(true);
pullReplication.start();
// wait for the next PUT checkpoint request/response
waitForPutCheckpointRequestWithSeq(dispatcher, 1);
stopReplication(pullReplication);
// make sure doc has attachments
Document doc1Fetched = database.getDocument(doc1.getId());
assertTrue(doc1Fetched.getCurrentRevision().getAttachments().size() > 0);
} finally {
server.shutdown();
}
}
/**
* This is essentially a regression test for a deadlock
* that was happening when the LiveQuery#onDatabaseChanged()
* was calling waitForUpdateThread(), but that thread was
* waiting on connection to be released by the thread calling
* waitForUpdateThread(). When the deadlock bug was present,
* this test would trigger the deadlock and never finish.
* <p/>
* TODO: sporadic assertion failure when checking rev field of PUT checkpoint requests
*/
public void testPullerWithLiveQuery() throws Throwable {
View view = database.getView("testPullerWithLiveQueryView");
view.setMapReduce(new Mapper() {
@Override
public void map(Map<String, Object> document, Emitter emitter) {
if (document.get("_id") != null) {
emitter.emit(document.get("_id"), null);
}
}
}, null, "1");
final CountDownLatch countDownLatch = new CountDownLatch(1);
LiveQuery allDocsLiveQuery = view.createQuery().toLiveQuery();
allDocsLiveQuery.addChangeListener(new LiveQuery.ChangeListener() {
@Override
public void changed(LiveQuery.ChangeEvent event) {
int numTimesCalled = 0;
if (event.getError() != null) {
throw new RuntimeException(event.getError());
}
if (event.getRows().getCount() == 2) {
countDownLatch.countDown();
}
}
});
// kick off live query
allDocsLiveQuery.start();
// do pull replication against mock
mockSinglePull(true, MockDispatcher.ServerType.SYNC_GW, true);
// make sure we were called back with both docs
boolean success = countDownLatch.await(30, TimeUnit.SECONDS);
assertTrue(success);
// clean up
allDocsLiveQuery.stop();
}
/**
* Make sure that if a continuous push gets an error
* pushing a doc, it will keep retrying it rather than giving up right away.
*
* @throws Exception
*/
public void testContinuousPushRetryBehavior() throws Exception {
RemoteRequestRetry.RETRY_DELAY_MS = 5; // speed up test execution (inner loop retry delay)
ReplicationInternal.RETRY_DELAY_SECONDS = 1; // speed up test execution (outer loop retry delay)
ReplicationInternal.MAX_RETRIES = 3; // spped up test execution (outer loop retry count)
// create mockwebserver and custom dispatcher
MockDispatcher dispatcher = new MockDispatcher();
MockWebServer server = MockHelper.getMockWebServer(dispatcher);
dispatcher.setServerType(MockDispatcher.ServerType.SYNC_GW);
try {
// checkpoint GET response w/ 404 + respond to all PUT Checkpoint requests
MockCheckpointPut mockCheckpointPut = new MockCheckpointPut();
mockCheckpointPut.setSticky(true);
mockCheckpointPut.setDelayMs(500);
dispatcher.enqueueResponse(MockHelper.PATH_REGEX_CHECKPOINT, mockCheckpointPut);
// _revs_diff response -- everything missing
MockRevsDiff mockRevsDiff = new MockRevsDiff();
mockRevsDiff.setSticky(true);
dispatcher.enqueueResponse(MockHelper.PATH_REGEX_REVS_DIFF, mockRevsDiff);
// _bulk_docs response -- 503 errors
MockResponse mockResponse = new MockResponse().setResponseCode(503);
WrappedSmartMockResponse mockBulkDocs = new WrappedSmartMockResponse(mockResponse, false);
mockBulkDocs.setSticky(true);
dispatcher.enqueueResponse(MockHelper.PATH_REGEX_BULK_DOCS, mockBulkDocs);
server.play();
// create replication
Replication replication = database.createPushReplication(server.getUrl("/db"));
replication.setContinuous(true);
CountDownLatch replicationIdle = new CountDownLatch(1);
ReplicationIdleObserver idleObserver = new ReplicationIdleObserver(replicationIdle);
replication.addChangeListener(idleObserver);
replication.start();
// wait until idle
boolean success = replicationIdle.await(30, TimeUnit.SECONDS);
assertTrue(success);
replication.removeChangeListener(idleObserver);
// create a doc in local db
Document doc1 = createDocumentForPushReplication("doc1", null, null);
// we should expect to at least see numAttempts attempts at doing POST to _bulk_docs
// 1st attempt
// numAttempts are number of times retry in 1 attempt.
int numAttempts = RemoteRequestRetry.MAX_RETRIES + 1; // total number of attempts = 4 (1 initial + MAX_RETRIES)
for (int i = 0; i < numAttempts; i++) {
RecordedRequest request = dispatcher.takeRequestBlocking(MockHelper.PATH_REGEX_BULK_DOCS);
assertNotNull(request);
dispatcher.takeRecordedResponseBlocking(request);
}
// By 12/16/2014, CBL core java tries RemoteRequestRetry.MAX_RETRIES + 1 see above.
// Without fixing #299, following code should cause hang.
// outer retry loop
for (int j = 0; j < ReplicationInternal.MAX_RETRIES; j++) {
// inner retry loop
for (int i = 0; i < numAttempts; i++) {
RecordedRequest request = dispatcher.takeRequestBlocking(MockHelper.PATH_REGEX_BULK_DOCS);
assertNotNull(request);
dispatcher.takeRecordedResponseBlocking(request);
}
}
// gave up replication!!!
stopReplication(replication);
} finally {
server.shutdown();
}
}
public void testMockSinglePush() throws Exception {
boolean shutdownMockWebserver = true;
mockSinglePush(shutdownMockWebserver, MockDispatcher.ServerType.SYNC_GW);
}
/**
* Do a push replication
* <p/>
* - Create docs in local db
* - One with no attachment
* - One with small attachment
* - One with large attachment
*/
public Map<String, Object> mockSinglePush(boolean shutdownMockWebserver, MockDispatcher.ServerType serverType) throws Exception {
String doc1Id = "doc1";
String doc2Id = "doc2";
String doc3Id = "doc3";
String doc4Id = "doc4";
String doc2PathRegex = String.format("/db/%s.*", doc2Id);
String doc3PathRegex = String.format("/db/%s.*", doc3Id);
String doc2AttachName = "attachment.png";
String doc3AttachName = "attachment2.png";
String contentType = "image/png";
// create mockwebserver and custom dispatcher
MockDispatcher dispatcher = new MockDispatcher();
MockWebServer server = MockHelper.getMockWebServer(dispatcher);
dispatcher.setServerType(serverType);
try {
server.play();
// add some documents
Document doc1 = createDocumentForPushReplication(doc1Id, null, null);
Document doc2 = createDocumentForPushReplication(doc2Id, doc2AttachName, contentType);
Document doc3 = createDocumentForPushReplication(doc3Id, doc3AttachName, contentType);
Document doc4 = createDocumentForPushReplication(doc4Id, null, null);
doc4.delete();
// checkpoint GET response w/ 404 + respond to all PUT Checkpoint requests
MockCheckpointPut mockCheckpointPut = new MockCheckpointPut();
mockCheckpointPut.setSticky(true);
mockCheckpointPut.setDelayMs(50);
dispatcher.enqueueResponse(MockHelper.PATH_REGEX_CHECKPOINT, mockCheckpointPut);
// _revs_diff response -- everything missing
MockRevsDiff mockRevsDiff = new MockRevsDiff();
dispatcher.enqueueResponse(MockHelper.PATH_REGEX_REVS_DIFF, mockRevsDiff);
// _bulk_docs response -- everything stored
MockBulkDocs mockBulkDocs = new MockBulkDocs();
dispatcher.enqueueResponse(MockHelper.PATH_REGEX_BULK_DOCS, mockBulkDocs);
// doc PUT responses for docs with attachments
MockDocumentPut mockDoc2Put = new MockDocumentPut()
.setDocId(doc2Id)
.setRev(doc2.getCurrentRevisionId());
dispatcher.enqueueResponse(doc2PathRegex, mockDoc2Put.generateMockResponse());
MockDocumentPut mockDoc3Put = new MockDocumentPut()
.setDocId(doc3Id)
.setRev(doc3.getCurrentRevisionId());
dispatcher.enqueueResponse(doc3PathRegex, mockDoc3Put.generateMockResponse());
// run replication
Replication replication = database.createPushReplication(server.getUrl("/db"));
replication.setContinuous(false);
if (serverType != MockDispatcher.ServerType.SYNC_GW) {
replication.setCreateTarget(true);
Assert.assertTrue(replication.shouldCreateTarget());
}
runReplication(replication);
// make assertions about outgoing requests from replicator -> mock
RecordedRequest getCheckpointRequest = dispatcher.takeRequest(MockHelper.PATH_REGEX_CHECKPOINT);
assertTrue(getCheckpointRequest.getMethod().equals("GET"));
assertTrue(getCheckpointRequest.getPath().matches(MockHelper.PATH_REGEX_CHECKPOINT));
RecordedRequest revsDiffRequest = dispatcher.takeRequest(MockHelper.PATH_REGEX_REVS_DIFF);
assertTrue(MockHelper.getUtf8Body(revsDiffRequest).contains(doc1Id));
RecordedRequest bulkDocsRequest = dispatcher.takeRequest(MockHelper.PATH_REGEX_BULK_DOCS);
assertTrue(MockHelper.getUtf8Body(bulkDocsRequest).contains(doc1Id));
Map<String, Object> bulkDocsJson = Manager.getObjectMapper().readValue(MockHelper.getUtf8Body(bulkDocsRequest), Map.class);
Map<String, Object> doc4Map = MockBulkDocs.findDocById(bulkDocsJson, doc4Id);
assertTrue(((Boolean) doc4Map.get("_deleted")).booleanValue() == true);
String str = MockHelper.getUtf8Body(bulkDocsRequest);
Log.e(TAG, str);
assertFalse(MockHelper.getUtf8Body(bulkDocsRequest).contains(doc2Id));
RecordedRequest doc2putRequest = dispatcher.takeRequest(doc2PathRegex);
CustomMultipartReaderDelegate delegate2 = new CustomMultipartReaderDelegate();
MultipartReader reader2 = new MultipartReader(doc2putRequest.getHeader("Content-Type"), delegate2);
reader2.appendData(doc2putRequest.getBody());
String body2 = new String(delegate2.data, "UTF-8");
assertTrue(body2.contains(doc2Id));
assertFalse(body2.contains(doc3Id));
RecordedRequest doc3putRequest = dispatcher.takeRequest(doc3PathRegex);
CustomMultipartReaderDelegate delegate3 = new CustomMultipartReaderDelegate();
MultipartReader reader3 = new MultipartReader(doc3putRequest.getHeader("Content-Type"), delegate3);
reader3.appendData(doc3putRequest.getBody());
String body3 = new String(delegate3.data, "UTF-8");
assertTrue(body3.contains(doc3Id));
assertFalse(body3.contains(doc2Id));
// wait until the mock webserver receives a PUT checkpoint request
int expectedLastSequence = 5;
Log.d(TAG, "waiting for put checkpoint with lastSequence: %d", expectedLastSequence);
List<RecordedRequest> checkpointRequests = waitForPutCheckpointRequestWithSequence(dispatcher, expectedLastSequence);
Log.d(TAG, "done waiting for put checkpoint with lastSequence: %d", expectedLastSequence);
validateCheckpointRequestsRevisions(checkpointRequests);
// assert our local sequence matches what is expected
String lastSequence = database.lastSequenceWithCheckpointId(replication.remoteCheckpointDocID());
assertEquals(Integer.toString(expectedLastSequence), lastSequence);
// assert completed count makes sense
assertEquals(replication.getChangesCount(), replication.getCompletedChangesCount());
} finally {
// Shut down the server. Instances cannot be reused.
if (shutdownMockWebserver) {
server.shutdown();
}
}
Map<String, Object> returnVal = new HashMap<String, Object>();
returnVal.put("server", server);
returnVal.put("dispatcher", dispatcher);
return returnVal;
}
/**
* https://github.com/couchbase/couchbase-lite-java-core/issues/55
*/
public void testContinuousPushReplicationGoesIdle() throws Exception {
// make sure we are starting empty
assertEquals(0, database.getLastSequenceNumber());
// add docs
Map<String, Object> properties1 = new HashMap<String, Object>();
properties1.put("doc1", "testContinuousPushReplicationGoesIdle");
final Document doc1 = createDocWithProperties(properties1);
// create mockwebserver and custom dispatcher
MockDispatcher dispatcher = new MockDispatcher();
MockWebServer server = MockHelper.getMockWebServer(dispatcher);
dispatcher.setServerType(MockDispatcher.ServerType.SYNC_GW);
try {
server.play();
// checkpoint GET response w/ 404. also receives checkpoint PUT's
MockCheckpointPut mockCheckpointPut = new MockCheckpointPut();
mockCheckpointPut.setSticky(true);
dispatcher.enqueueResponse(MockHelper.PATH_REGEX_CHECKPOINT, mockCheckpointPut);
// _revs_diff response -- everything missing
MockRevsDiff mockRevsDiff = new MockRevsDiff();
dispatcher.enqueueResponse(MockHelper.PATH_REGEX_REVS_DIFF, mockRevsDiff);
// _bulk_docs response -- everything stored
MockBulkDocs mockBulkDocs = new MockBulkDocs();
dispatcher.enqueueResponse(MockHelper.PATH_REGEX_BULK_DOCS, mockBulkDocs);
// replication to do initial sync up - has to be continuous replication so the checkpoint id
// matches the next continuous replication we're gonna do later.
Replication firstPusher = database.createPushReplication(server.getUrl("/db"));
firstPusher.setContinuous(true);
final String checkpointId = firstPusher.remoteCheckpointDocID(); // save the checkpoint id for later usage
// start the continuous replication
CountDownLatch replicationIdleSignal = new CountDownLatch(1);
ReplicationIdleObserver replicationIdleObserver = new ReplicationIdleObserver(replicationIdleSignal);
firstPusher.addChangeListener(replicationIdleObserver);
firstPusher.start();
// wait until we get an IDLE event
boolean successful = replicationIdleSignal.await(30, TimeUnit.SECONDS);
assertTrue(successful);
stopReplication(firstPusher);
// wait until replication does PUT checkpoint with lastSequence=1
int expectedLastSequence = 1;
waitForPutCheckpointRequestWithSeq(dispatcher, expectedLastSequence);
// the last sequence should be "1" at this point. we will use this later
final String lastSequence = database.lastSequenceWithCheckpointId(checkpointId);
assertEquals("1", lastSequence);
// start a second continuous replication
Replication secondPusher = database.createPushReplication(server.getUrl("/db"));
secondPusher.setContinuous(true);
final String secondPusherCheckpointId = secondPusher.remoteCheckpointDocID();
assertEquals(checkpointId, secondPusherCheckpointId);
// remove current handler for the GET/PUT checkpoint request, and
// install a new handler that returns the lastSequence from previous replication
dispatcher.clearQueuedResponse(MockHelper.PATH_REGEX_CHECKPOINT);
MockCheckpointGet mockCheckpointGet = new MockCheckpointGet();
mockCheckpointGet.setLastSequence(lastSequence);
mockCheckpointGet.setRev("0-2");
dispatcher.enqueueResponse(MockHelper.PATH_REGEX_CHECKPOINT, mockCheckpointGet);
// start second replication
replicationIdleSignal = new CountDownLatch(1);
replicationIdleObserver = new ReplicationIdleObserver(replicationIdleSignal);
secondPusher.addChangeListener(replicationIdleObserver);
secondPusher.start();
// wait until we get an IDLE event
successful = replicationIdleSignal.await(30, TimeUnit.SECONDS);
assertTrue(successful);
stopReplication(secondPusher);
} finally {
server.shutdown();
}
}
/**
* https://github.com/couchbase/couchbase-lite-java-core/issues/241
* <p/>
* - Set the "retry time" to a short number
* - Setup mock server to return 404 for all _changes requests
* - Start continuous replication
* - Sleep for 5X retry time
* - Assert that we've received at least two requests to _changes feed
* - Stop replication + cleanup
*/
public void testContinuousReplication404Changes() throws Exception {
int previous = PullerInternal.CHANGE_TRACKER_RESTART_DELAY_MS;
PullerInternal.CHANGE_TRACKER_RESTART_DELAY_MS = 5;
try {
// create mockwebserver and custom dispatcher
MockDispatcher dispatcher = new MockDispatcher();
MockWebServer server = MockHelper.getMockWebServer(dispatcher);
dispatcher.setServerType(MockDispatcher.ServerType.SYNC_GW);
try {
server.play();
// mock checkpoint GET response w/ 404
MockResponse fakeCheckpointResponse = new MockResponse();
MockHelper.set404NotFoundJson(fakeCheckpointResponse);
dispatcher.enqueueResponse(MockHelper.PATH_REGEX_CHECKPOINT, fakeCheckpointResponse);
// mock _changes response
for (int i = 0; i < 100; i++) {
MockResponse mockChangesFeed = new MockResponse();
MockHelper.set404NotFoundJson(mockChangesFeed);
dispatcher.enqueueResponse(MockHelper.PATH_REGEX_CHANGES, mockChangesFeed);
}
// create new replication
int retryDelaySeconds = 1;
Replication pull = database.createPullReplication(server.getUrl("/db"));
pull.setContinuous(true);
// add done listener to replication
CountDownLatch replicationDoneSignal = new CountDownLatch(1);
ReplicationFinishedObserver replicationFinishedObserver = new ReplicationFinishedObserver(replicationDoneSignal);
pull.addChangeListener(replicationFinishedObserver);
// start the replication
pull.start();
// wait until we get a few requests
Log.d(TAG, "Waiting for a _changes request");
RecordedRequest changesReq = dispatcher.takeRequestBlocking(MockHelper.PATH_REGEX_CHANGES);
Log.d(TAG, "Got first _changes request, waiting for another _changes request");
changesReq = dispatcher.takeRequestBlocking(MockHelper.PATH_REGEX_CHANGES);
Log.d(TAG, "Got second _changes request, waiting for another _changes request");
changesReq = dispatcher.takeRequestBlocking(MockHelper.PATH_REGEX_CHANGES);
Log.d(TAG, "Got third _changes request, stopping replicator");
// the replication should still be running
assertEquals(1, replicationDoneSignal.getCount());
// cleanup
stopReplication(pull);
} finally {
server.shutdown();
}
} finally {
PullerInternal.CHANGE_TRACKER_RESTART_DELAY_MS = previous;
}
}
/**
* Regression test for issue couchbase/couchbase-lite-android#174
*/
public void testAllLeafRevisionsArePushed() throws Exception {
final CustomizableMockHttpClient mockHttpClient = new CustomizableMockHttpClient();
mockHttpClient.addResponderRevDiffsAllMissing();
mockHttpClient.setResponseDelayMilliseconds(250);
mockHttpClient.addResponderFakeLocalDocumentUpdate404();
HttpClientFactory mockHttpClientFactory = new HttpClientFactory() {
@Override
public HttpClient getHttpClient() {
return mockHttpClient;
}
@Override
public void addCookies(List<Cookie> cookies) {
}
@Override
public void deleteCookie(String name) {
}
@Override
public CookieStore getCookieStore() {
return null;
}
};
manager.setDefaultHttpClientFactory(mockHttpClientFactory);
Document doc = database.createDocument();
SavedRevision rev1a = doc.createRevision().save();
SavedRevision rev2a = createRevisionWithRandomProps(rev1a, false);
SavedRevision rev3a = createRevisionWithRandomProps(rev2a, false);
// delete the branch we've been using, then create a new one to replace it
SavedRevision rev4a = rev3a.deleteDocument();
SavedRevision rev2b = createRevisionWithRandomProps(rev1a, true);
assertEquals(rev2b.getId(), doc.getCurrentRevisionId());
// sync with remote DB -- should push both leaf revisions
Replication push = database.createPushReplication(getReplicationURL());
runReplication(push);
assertNull(push.getLastError());
// find the _revs_diff captured request and decode into json
boolean foundRevsDiff = false;
List<HttpRequest> captured = mockHttpClient.getCapturedRequests();
for (HttpRequest httpRequest : captured) {
if (httpRequest instanceof HttpPost) {
HttpPost httpPost = (HttpPost) httpRequest;
if (httpPost.getURI().toString().endsWith("_revs_diff")) {
foundRevsDiff = true;
Map<String, Object> jsonMap = CustomizableMockHttpClient.getJsonMapFromRequest(httpPost);
// assert that it contains the expected revisions
List<String> revisionIds = (List) jsonMap.get(doc.getId());
assertEquals(2, revisionIds.size());
assertTrue(revisionIds.contains(rev4a.getId()));
assertTrue(revisionIds.contains(rev2b.getId()));
}
}
}
assertTrue(foundRevsDiff);
}
/**
* Verify that when a conflict is resolved on (mock) Sync Gateway
* and a pull replication is done, the conflict is resolved locally.
* <p/>
* - Create local docs in conflict
* - Simulate sync gw responses that resolve the conflict
* - Do pull replication
* - Assert conflict is resolved locally
* <p/>
* https://github.com/couchbase/couchbase-lite-java-core/issues/77
*/
public void failingTestRemoteConflictResolution() throws Exception {
// Create a document with two conflicting edits.
Document doc = database.createDocument();
SavedRevision rev1 = doc.createRevision().save();
SavedRevision rev2a = createRevisionWithRandomProps(rev1, false);
SavedRevision rev2b = createRevisionWithRandomProps(rev1, true);
// make sure we can query the db to get the conflict
Query allDocsQuery = database.createAllDocumentsQuery();
allDocsQuery.setAllDocsMode(Query.AllDocsMode.ONLY_CONFLICTS);
QueryEnumerator rows = allDocsQuery.run();
boolean foundDoc = false;
assertEquals(1, rows.getCount());
for (Iterator<QueryRow> it = rows; it.hasNext(); ) {
QueryRow row = it.next();
if (row.getDocument().getId().equals(doc.getId())) {
foundDoc = true;
}
}
assertTrue(foundDoc);
// make sure doc in conflict
assertTrue(doc.getConflictingRevisions().size() > 1);
// create mockwebserver and custom dispatcher
MockDispatcher dispatcher = new MockDispatcher();
MockWebServer server = MockHelper.getMockWebServer(dispatcher);
dispatcher.setServerType(MockDispatcher.ServerType.COUCHDB);
try {
// checkpoint GET response w/ 404
MockResponse fakeCheckpointResponse = new MockResponse();
MockHelper.set404NotFoundJson(fakeCheckpointResponse);
dispatcher.enqueueResponse(MockHelper.PATH_REGEX_CHECKPOINT, fakeCheckpointResponse);
int rev3PromotedGeneration = 3;
String rev3PromotedDigest = "d46b";
String rev3Promoted = String.format("%d-%s", rev3PromotedGeneration, rev3PromotedDigest);
int rev3DeletedGeneration = 3;
String rev3DeletedDigest = "e768";
String rev3Deleted = String.format("%d-%s", rev3DeletedGeneration, rev3DeletedDigest);
int seq = 4;
// _changes response
MockChangesFeed mockChangesFeed = new MockChangesFeed();
MockChangesFeed.MockChangedDoc mockChangedDoc = new MockChangesFeed.MockChangedDoc();
mockChangedDoc.setDocId(doc.getId());
mockChangedDoc.setSeq(seq);
mockChangedDoc.setChangedRevIds(Arrays.asList(rev3Promoted, rev3Deleted));
mockChangesFeed.add(mockChangedDoc);
MockResponse response = mockChangesFeed.generateMockResponse();
dispatcher.enqueueResponse(MockHelper.PATH_REGEX_CHANGES, response);
// docRev3Promoted response
MockDocumentGet.MockDocument docRev3Promoted = new MockDocumentGet.MockDocument(doc.getId(), rev3Promoted, seq);
docRev3Promoted.setJsonMap(MockHelper.generateRandomJsonMap());
MockDocumentGet mockDocRev3PromotedGet = new MockDocumentGet(docRev3Promoted);
Map<String, Object> rev3PromotedRevHistory = new HashMap<String, Object>();
rev3PromotedRevHistory.put("start", rev3PromotedGeneration);
List ids = Arrays.asList(
rev3PromotedDigest,
RevisionInternal.digestFromRevID(rev2a.getId()),
RevisionInternal.digestFromRevID(rev2b.getId())
);
rev3PromotedRevHistory.put("ids", ids);
mockDocRev3PromotedGet.setRevHistoryMap(rev3PromotedRevHistory);
dispatcher.enqueueResponse(docRev3Promoted.getDocPathRegex(), mockDocRev3PromotedGet.generateMockResponse());
// docRev3Deleted response
MockDocumentGet.MockDocument docRev3Deleted = new MockDocumentGet.MockDocument(doc.getId(), rev3Deleted, seq);
Map<String, Object> jsonMap = MockHelper.generateRandomJsonMap();
jsonMap.put("_deleted", true);
docRev3Deleted.setJsonMap(jsonMap);
MockDocumentGet mockDocRev3DeletedGet = new MockDocumentGet(docRev3Deleted);
Map<String, Object> rev3DeletedRevHistory = new HashMap<String, Object>();
rev3DeletedRevHistory.put("start", rev3DeletedGeneration);
ids = Arrays.asList(
rev3DeletedDigest,
RevisionInternal.digestFromRevID(rev2b.getId()),
RevisionInternal.digestFromRevID(rev1.getId())
);
rev3DeletedRevHistory.put("ids", ids);
mockDocRev3DeletedGet.setRevHistoryMap(rev3DeletedRevHistory);
dispatcher.enqueueResponse(docRev3Deleted.getDocPathRegex(), mockDocRev3DeletedGet.generateMockResponse());
// start mock server
server.play();
// run pull replication
Replication pullReplication = database.createPullReplication(server.getUrl("/db"));
runReplication(pullReplication);
assertNull(pullReplication.getLastError());
// assertions about outgoing requests
RecordedRequest changesRequest = dispatcher.takeRequest(MockHelper.PATH_REGEX_CHANGES);
assertNotNull(changesRequest);
RecordedRequest docRev3DeletedRequest = dispatcher.takeRequest(docRev3Deleted.getDocPathRegex());
assertNotNull(docRev3DeletedRequest);
RecordedRequest docRev3PromotedRequest = dispatcher.takeRequest(docRev3Promoted.getDocPathRegex());
assertNotNull(docRev3PromotedRequest);
// Make sure the conflict was resolved locally.
assertEquals(1, doc.getConflictingRevisions().size());
} finally {
server.shutdown();
}
}
/**
* https://github.com/couchbase/couchbase-lite-java-core/issues/95
*/
public void testPushReplicationCanMissDocs() throws Exception {
assertEquals(0, database.getLastSequenceNumber());
Map<String, Object> properties1 = new HashMap<String, Object>();
properties1.put("doc1", "testPushReplicationCanMissDocs");
final Document doc1 = createDocWithProperties(properties1);
Map<String, Object> properties2 = new HashMap<String, Object>();
properties1.put("doc2", "testPushReplicationCanMissDocs");
final Document doc2 = createDocWithProperties(properties2);
UnsavedRevision doc2UnsavedRev = doc2.createRevision();
InputStream attachmentStream = getAsset("attachment.png");
doc2UnsavedRev.setAttachment("attachment.png", "image/png", attachmentStream);
SavedRevision doc2Rev = doc2UnsavedRev.save();
assertNotNull(doc2Rev);
final CustomizableMockHttpClient mockHttpClient = new CustomizableMockHttpClient();
mockHttpClient.addResponderFakeLocalDocumentUpdate404();
mockHttpClient.setResponder("_bulk_docs", new CustomizableMockHttpClient.Responder() {
@Override
public HttpResponse execute(HttpUriRequest httpUriRequest) throws IOException {
String json = "{\"error\":\"not_found\",\"reason\":\"missing\"}";
return CustomizableMockHttpClient.generateHttpResponseObject(404, "NOT FOUND", json);
}
});
mockHttpClient.setResponder(doc2.getId(), new CustomizableMockHttpClient.Responder() {
@Override
public HttpResponse execute(HttpUriRequest httpUriRequest) throws IOException {
Map<String, Object> responseObject = new HashMap<String, Object>();
responseObject.put("id", doc2.getId());
responseObject.put("ok", true);
responseObject.put("rev", doc2.getCurrentRevisionId());
return CustomizableMockHttpClient.generateHttpResponseObject(responseObject);
}
});
// create a replication obeserver to wait until replication finishes
CountDownLatch replicationDoneSignal = new CountDownLatch(1);
ReplicationFinishedObserver replicationFinishedObserver = new ReplicationFinishedObserver(replicationDoneSignal);
// create replication and add observer
manager.setDefaultHttpClientFactory(mockFactoryFactory(mockHttpClient));
Replication pusher = database.createPushReplication(getReplicationURL());
pusher.addChangeListener(replicationFinishedObserver);
// save the checkpoint id for later usage
String checkpointId = pusher.remoteCheckpointDocID();
// kick off the replication
pusher.start();
// wait for it to finish
boolean success = replicationDoneSignal.await(60, TimeUnit.SECONDS);
assertTrue(success);
Log.d(TAG, "replicationDoneSignal finished");
// we would expect it to have recorded an error because one of the docs (the one without the attachment)
// will have failed.
assertNotNull(pusher.getLastError());
// workaround for the fact that the replicationDoneSignal.wait() call will unblock before all
// the statements in Replication.stopped() have even had a chance to execute.
// (specifically the ones that come after the call to notifyChangeListeners())
Thread.sleep(500);
String localLastSequence = database.lastSequenceWithCheckpointId(checkpointId);
Log.d(TAG, "database.lastSequenceWithCheckpointId(): " + localLastSequence);
Log.d(TAG, "doc2.getCurrentRevision().getSequence(): " + doc2.getCurrentRevision().getSequence());
String msg = "Since doc1 failed, the database should _not_ have had its lastSequence bumped" +
" to doc2's sequence number. If it did, it's bug: github.com/couchbase/couchbase-lite-java-core/issues/95";
assertFalse(msg, Long.toString(doc2.getCurrentRevision().getSequence()).equals(localLastSequence));
assertNull(localLastSequence);
assertTrue(doc2.getCurrentRevision().getSequence() > 0);
}
/**
* https://github.com/couchbase/couchbase-lite-android/issues/66
*/
public void testPushUpdatedDocWithoutReSendingAttachments() throws Exception {
assertEquals(0, database.getLastSequenceNumber());
Map<String, Object> properties1 = new HashMap<String, Object>();
properties1.put("dynamic", 1);
final Document doc = createDocWithProperties(properties1);
SavedRevision doc1Rev = doc.getCurrentRevision();
// Add attachment to document
UnsavedRevision doc2UnsavedRev = doc.createRevision();
InputStream attachmentStream = getAsset("attachment.png");
doc2UnsavedRev.setAttachment("attachment.png", "image/png", attachmentStream);
SavedRevision doc2Rev = doc2UnsavedRev.save();
assertNotNull(doc2Rev);
final CustomizableMockHttpClient mockHttpClient = new CustomizableMockHttpClient();
mockHttpClient.addResponderFakeLocalDocumentUpdate404();
// http://url/db/foo (foo==docid)
mockHttpClient.setResponder(doc.getId(), new CustomizableMockHttpClient.Responder() {
@Override
public HttpResponse execute(HttpUriRequest httpUriRequest) throws IOException {
Map<String, Object> responseObject = new HashMap<String, Object>();
responseObject.put("id", doc.getId());
responseObject.put("ok", true);
responseObject.put("rev", doc.getCurrentRevisionId());
return CustomizableMockHttpClient.generateHttpResponseObject(responseObject);
}
});
// create replication and add observer
manager.setDefaultHttpClientFactory(mockFactoryFactory(mockHttpClient));
Replication pusher = database.createPushReplication(getReplicationURL());
runReplication(pusher);
List<HttpRequest> captured = mockHttpClient.getCapturedRequests();
for (HttpRequest httpRequest : captured) {
// verify that there are no PUT requests with attachments
if (httpRequest instanceof HttpPut) {
HttpPut httpPut = (HttpPut) httpRequest;
HttpEntity entity = httpPut.getEntity();
//assertFalse("PUT request with updated doc properties contains attachment", entity instanceof MultipartEntity);
}
}
mockHttpClient.clearCapturedRequests();
Document oldDoc = database.getDocument(doc.getId());
UnsavedRevision aUnsavedRev = oldDoc.createRevision();
Map<String, Object> prop = new HashMap<String, Object>();
prop.putAll(oldDoc.getProperties());
prop.put("dynamic", (Integer) oldDoc.getProperty("dynamic") + 1);
aUnsavedRev.setProperties(prop);
final SavedRevision savedRev = aUnsavedRev.save();
mockHttpClient.setResponder(doc.getId(), new CustomizableMockHttpClient.Responder() {
@Override
public HttpResponse execute(HttpUriRequest httpUriRequest) throws IOException {
Map<String, Object> responseObject = new HashMap<String, Object>();
responseObject.put("id", doc.getId());
responseObject.put("ok", true);
responseObject.put("rev", savedRev.getId());
return CustomizableMockHttpClient.generateHttpResponseObject(responseObject);
}
});
final String json = String.format("{\"%s\":{\"missing\":[\"%s\"],\"possible_ancestors\":[\"%s\",\"%s\"]}}", doc.getId(), savedRev.getId(), doc1Rev.getId(), doc2Rev.getId());
mockHttpClient.setResponder("_revs_diff", new CustomizableMockHttpClient.Responder() {
@Override
public HttpResponse execute(HttpUriRequest httpUriRequest) throws IOException {
return mockHttpClient.generateHttpResponseObject(json);
}
});
pusher = database.createPushReplication(getReplicationURL());
runReplication(pusher);
captured = mockHttpClient.getCapturedRequests();
for (HttpRequest httpRequest : captured) {
// verify that there are no PUT requests with attachments
if (httpRequest instanceof HttpPut) {
HttpPut httpPut = (HttpPut) httpRequest;
HttpEntity entity = httpPut.getEntity();
assertFalse("PUT request with updated doc properties contains attachment", entity instanceof MultipartEntity);
}
}
}
/**
* https://github.com/couchbase/couchbase-lite-java-core/issues/188
*/
public void testServerDoesNotSupportMultipart() throws Exception {
assertEquals(0, database.getLastSequenceNumber());
Map<String, Object> properties1 = new HashMap<String, Object>();
properties1.put("dynamic", 1);
final Document doc = createDocWithProperties(properties1);
SavedRevision doc1Rev = doc.getCurrentRevision();
// Add attachment to document
UnsavedRevision doc2UnsavedRev = doc.createRevision();
InputStream attachmentStream = getAsset("attachment.png");
doc2UnsavedRev.setAttachment("attachment.png", "image/png", attachmentStream);
SavedRevision doc2Rev = doc2UnsavedRev.save();
assertNotNull(doc2Rev);
final CustomizableMockHttpClient mockHttpClient = new CustomizableMockHttpClient();
mockHttpClient.addResponderFakeLocalDocumentUpdate404();
Queue<CustomizableMockHttpClient.Responder> responders = new LinkedList<CustomizableMockHttpClient.Responder>();
//first http://url/db/foo (foo==docid)
//Reject multipart PUT with response code 415
responders.add(new CustomizableMockHttpClient.Responder() {
@Override
public HttpResponse execute(HttpUriRequest httpUriRequest) throws IOException {
String json = "{\"error\":\"Unsupported Media Type\",\"reason\":\"missing\"}";
return CustomizableMockHttpClient.generateHttpResponseObject(415, "Unsupported Media Type", json);
}
});
// second http://url/db/foo (foo==docid)
// second call should be plain json, return good response
responders.add(new CustomizableMockHttpClient.Responder() {
@Override
public HttpResponse execute(HttpUriRequest httpUriRequest) throws IOException {
Map<String, Object> responseObject = new HashMap<String, Object>();
responseObject.put("id", doc.getId());
responseObject.put("ok", true);
responseObject.put("rev", doc.getCurrentRevisionId());
return CustomizableMockHttpClient.generateHttpResponseObject(responseObject);
}
});
ResponderChain responderChain = new ResponderChain(responders);
mockHttpClient.setResponder(doc.getId(), responderChain);
// create replication and add observer
manager.setDefaultHttpClientFactory(mockFactoryFactory(mockHttpClient));
Replication pusher = database.createPushReplication(getReplicationURL());
runReplication(pusher);
List<HttpRequest> captured = mockHttpClient.getCapturedRequests();
int entityIndex = 0;
for (HttpRequest httpRequest : captured) {
// verify that there are no PUT requests with attachments
if (httpRequest instanceof HttpPut) {
HttpPut httpPut = (HttpPut) httpRequest;
HttpEntity entity = httpPut.getEntity();
if (entityIndex++ == 0) {
assertTrue("PUT request with attachment is not multipart", entity instanceof MultipartEntity);
} else {
assertFalse("PUT request with attachment is multipart", entity instanceof MultipartEntity);
}
}
}
}
public void testServerIsSyncGatewayVersion() throws Exception {
Replication pusher = database.createPushReplication(getReplicationURL());
assertFalse(pusher.serverIsSyncGatewayVersion("0.01"));
pusher.setServerType("Couchbase Sync Gateway/0.93");
assertTrue(pusher.serverIsSyncGatewayVersion("0.92"));
assertFalse(pusher.serverIsSyncGatewayVersion("0.94"));
}
/**
* https://github.com/couchbase/couchbase-lite-android/issues/243
*/
public void testDifferentCheckpointsFilteredReplication() throws Exception {
Replication pullerNoFilter = database.createPullReplication(getReplicationURL());
String noFilterCheckpointDocId = pullerNoFilter.remoteCheckpointDocID();
Replication pullerWithFilter1 = database.createPullReplication(getReplicationURL());
pullerWithFilter1.setFilter("foo/bar");
Map<String, Object> filterParams = new HashMap<String, Object>();
filterParams.put("a", "aval");
filterParams.put("b", "bval");
List<String> docIds = Arrays.asList("doc3", "doc1", "doc2");
pullerWithFilter1.setDocIds(docIds);
assertEquals(docIds, pullerWithFilter1.getDocIds());
pullerWithFilter1.setFilterParams(filterParams);
String withFilterCheckpointDocId = pullerWithFilter1.remoteCheckpointDocID();
assertFalse(withFilterCheckpointDocId.equals(noFilterCheckpointDocId));
Replication pullerWithFilter2 = database.createPullReplication(getReplicationURL());
pullerWithFilter2.setFilter("foo/bar");
filterParams = new HashMap<String, Object>();
filterParams.put("b", "bval");
filterParams.put("a", "aval");
pullerWithFilter2.setDocIds(Arrays.asList("doc2", "doc3", "doc1"));
pullerWithFilter2.setFilterParams(filterParams);
String withFilterCheckpointDocId2 = pullerWithFilter2.remoteCheckpointDocID();
assertTrue(withFilterCheckpointDocId.equals(withFilterCheckpointDocId2));
}
public void testSetReplicationCookie() throws Exception {
URL replicationUrl = getReplicationURL();
Replication puller = database.createPullReplication(replicationUrl);
String cookieName = "foo";
String cookieVal = "bar";
boolean isSecure = false;
boolean httpOnly = false;
// expiration date - 1 day from now
Calendar cal = Calendar.getInstance();
cal.setTime(new Date());
int numDaysToAdd = 1;
cal.add(Calendar.DATE, numDaysToAdd);
Date expirationDate = cal.getTime();
// set the cookie
puller.setCookie(cookieName, cookieVal, "", expirationDate, isSecure, httpOnly);
// make sure it made it into cookie store and has expected params
CookieStore cookieStore = puller.getClientFactory().getCookieStore();
List<Cookie> cookies = cookieStore.getCookies();
assertEquals(1, cookies.size());
Cookie cookie = cookies.get(0);
assertEquals(cookieName, cookie.getName());
assertEquals(cookieVal, cookie.getValue());
assertEquals(replicationUrl.getHost(), cookie.getDomain());
assertEquals(replicationUrl.getPath(), cookie.getPath());
assertEquals(expirationDate, cookie.getExpiryDate());
assertEquals(isSecure, cookie.isSecure());
// add a second cookie
String cookieName2 = "foo2";
puller.setCookie(cookieName2, cookieVal, "", expirationDate, isSecure, false);
assertEquals(2, cookieStore.getCookies().size());
// delete cookie
puller.deleteCookie(cookieName2);
// should only have the original cookie left
assertEquals(1, cookieStore.getCookies().size());
assertEquals(cookieName, cookieStore.getCookies().get(0).getName());
}
/**
* https://github.com/couchbase/couchbase-lite-android/issues/376
* <p/>
* This test aims to demonstrate that when the changes feed returns purged documents the
* replicator is able to fetch all other documents but unable to finish the replication
* (STOPPED OR IDLE STATE)
*/
public void testChangesFeedWithPurgedDoc() throws Exception {
//generate documents ids
String doc1Id = "doc1-" + System.currentTimeMillis();
String doc2Id = "doc2-" + System.currentTimeMillis();
String doc3Id = "doc3-" + System.currentTimeMillis();
//generate mock documents
final MockDocumentGet.MockDocument mockDocument1 = new MockDocumentGet.MockDocument(
doc1Id, "1-a000", 1);
mockDocument1.setJsonMap(MockHelper.generateRandomJsonMap());
final MockDocumentGet.MockDocument mockDocument2 = new MockDocumentGet.MockDocument(
doc2Id, "1-b000", 2);
mockDocument2.setJsonMap(MockHelper.generateRandomJsonMap());
final MockDocumentGet.MockDocument mockDocument3 = new MockDocumentGet.MockDocument(
doc3Id, "1-c000", 3);
mockDocument3.setJsonMap(MockHelper.generateRandomJsonMap());
// create mockwebserver and custom dispatcher
MockDispatcher dispatcher = new MockDispatcher();
MockWebServer server = MockHelper.getMockWebServer(dispatcher);
dispatcher.setServerType(MockDispatcher.ServerType.COUCHDB);
try {
//add response to _local request
// checkpoint GET response w/ 404
MockResponse fakeCheckpointResponse = new MockResponse();
MockHelper.set404NotFoundJson(fakeCheckpointResponse);
dispatcher.enqueueResponse(MockHelper.PATH_REGEX_CHECKPOINT, fakeCheckpointResponse);
//add response to _changes request
// _changes response
MockChangesFeed mockChangesFeed = new MockChangesFeed();
mockChangesFeed.add(new MockChangesFeed.MockChangedDoc(mockDocument1));
mockChangesFeed.add(new MockChangesFeed.MockChangedDoc(mockDocument2));
mockChangesFeed.add(new MockChangesFeed.MockChangedDoc(mockDocument3));
dispatcher.enqueueResponse(MockHelper.PATH_REGEX_CHANGES, mockChangesFeed.generateMockResponse());
// doc1 response
MockDocumentGet mockDocumentGet1 = new MockDocumentGet(mockDocument1);
dispatcher.enqueueResponse(mockDocument1.getDocPathRegex(), mockDocumentGet1.generateMockResponse());
// doc2 missing reponse
MockResponse missingDocumentMockResponse = new MockResponse();
MockHelper.set404NotFoundJson(missingDocumentMockResponse);
dispatcher.enqueueResponse(mockDocument2.getDocPathRegex(), missingDocumentMockResponse);
// doc3 response
MockDocumentGet mockDocumentGet3 = new MockDocumentGet(mockDocument3);
dispatcher.enqueueResponse(mockDocument3.getDocPathRegex(), mockDocumentGet3.generateMockResponse());
// checkpoint PUT response
MockCheckpointPut mockCheckpointPut = new MockCheckpointPut();
mockCheckpointPut.setSticky(true);
dispatcher.enqueueResponse(MockHelper.PATH_REGEX_CHECKPOINT, mockCheckpointPut);
// start mock server
server.play();
//create url for replication
URL baseUrl = server.getUrl("/db");
//create replication
Replication pullReplication = database.createPullReplication(baseUrl);
pullReplication.setContinuous(false);
//add change listener to notify when the replication is finished
CountDownLatch replicationFinishedContCountDownLatch = new CountDownLatch(1);
ReplicationFinishedObserver replicationFinishedObserver =
new ReplicationFinishedObserver(replicationFinishedContCountDownLatch);
pullReplication.addChangeListener(replicationFinishedObserver);
//start replication
pullReplication.start();
boolean success = replicationFinishedContCountDownLatch.await(100, TimeUnit.SECONDS);
assertTrue(success);
if (pullReplication.getLastError() != null) {
Log.d(TAG, "Replication had error: " + ((HttpResponseException) pullReplication.getLastError()).getStatusCode());
}
//assert document 1 was correctly pulled
Document doc1 = database.getDocument(doc1Id);
assertNotNull(doc1);
assertNotNull(doc1.getCurrentRevision());
//assert it was impossible to pull doc2
Document doc2 = database.getDocument(doc2Id);
assertNotNull(doc2);
assertNull(doc2.getCurrentRevision());
//assert it was possible to pull doc3
Document doc3 = database.getDocument(doc3Id);
assertNotNull(doc3);
assertNotNull(doc3.getCurrentRevision());
// wait until the replicator PUT's checkpoint with mockDocument3's sequence
waitForPutCheckpointRequestWithSeq(dispatcher, mockDocument3.getDocSeq());
//last saved seq must be equal to last pulled document seq
String doc3Seq = Integer.toString(mockDocument3.getDocSeq());
String lastSequence = database.lastSequenceWithCheckpointId(pullReplication.remoteCheckpointDocID());
assertEquals(doc3Seq, lastSequence);
} finally {
//stop mock server
server.shutdown();
}
}
/**
* Reproduces https://github.com/couchbase/couchbase-lite-android/issues/167
*/
public void testPushPurgedDoc() throws Throwable {
int numBulkDocRequests = 0;
HttpPost lastBulkDocsRequest = null;
Map<String, Object> properties = new HashMap<String, Object>();
properties.put("testName", "testPurgeDocument");
Document doc = createDocumentWithProperties(database, properties);
assertNotNull(doc);
final CustomizableMockHttpClient mockHttpClient = new CustomizableMockHttpClient();
mockHttpClient.addResponderRevDiffsAllMissing();
mockHttpClient.setResponseDelayMilliseconds(250);
mockHttpClient.addResponderFakeLocalDocumentUpdate404();
HttpClientFactory mockHttpClientFactory = new HttpClientFactory() {
@Override
public HttpClient getHttpClient() {
return mockHttpClient;
}
@Override
public void addCookies(List<Cookie> cookies) {
}
@Override
public void deleteCookie(String name) {
}
@Override
public CookieStore getCookieStore() {
return null;
}
};
URL remote = getReplicationURL();
manager.setDefaultHttpClientFactory(mockHttpClientFactory);
Replication pusher = database.createPushReplication(remote);
pusher.setContinuous(true);
final CountDownLatch replicationCaughtUpSignal = new CountDownLatch(1);
pusher.addChangeListener(new Replication.ChangeListener() {
@Override
public void changed(Replication.ChangeEvent event) {
final int changesCount = event.getSource().getChangesCount();
final int completedChangesCount = event.getSource().getCompletedChangesCount();
String msg = String.format("changes: %d completed changes: %d", changesCount, completedChangesCount);
Log.d(TAG, msg);
if (changesCount == completedChangesCount && changesCount != 0) {
replicationCaughtUpSignal.countDown();
}
}
});
pusher.start();
// wait until that doc is pushed
boolean didNotTimeOut = replicationCaughtUpSignal.await(60, TimeUnit.SECONDS);
assertTrue(didNotTimeOut);
// at this point, we should have captured exactly 1 bulk docs request
numBulkDocRequests = 0;
for (HttpRequest capturedRequest : mockHttpClient.getCapturedRequests()) {
if (capturedRequest instanceof HttpPost && ((HttpPost) capturedRequest).getURI().toString().endsWith("_bulk_docs")) {
lastBulkDocsRequest = (HttpPost) capturedRequest;
numBulkDocRequests += 1;
}
}
assertEquals(1, numBulkDocRequests);
// that bulk docs request should have the "start" key under its _revisions
Map<String, Object> jsonMap = mockHttpClient.getJsonMapFromRequest((HttpPost) lastBulkDocsRequest);
List docs = (List) jsonMap.get("docs");
Map<String, Object> onlyDoc = (Map) docs.get(0);
Map<String, Object> revisions = (Map) onlyDoc.get("_revisions");
assertTrue(revisions.containsKey("start"));
// now add a new revision, which will trigger the pusher to try to push it
properties = new HashMap<String, Object>();
properties.put("testName2", "update doc");
UnsavedRevision unsavedRevision = doc.createRevision();
unsavedRevision.setUserProperties(properties);
unsavedRevision.save();
// but then immediately purge it
doc.purge();
// wait for a while to give the replicator a chance to push it
// (it should not actually push anything)
Thread.sleep(5 * 1000);
// we should not have gotten any more _bulk_docs requests, because
// the replicator should not have pushed anything else.
// (in the case of the bug, it was trying to push the purged revision)
numBulkDocRequests = 0;
for (HttpRequest capturedRequest : mockHttpClient.getCapturedRequests()) {
if (capturedRequest instanceof HttpPost && ((HttpPost) capturedRequest).getURI().toString().endsWith("_bulk_docs")) {
numBulkDocRequests += 1;
}
}
assertEquals(1, numBulkDocRequests);
stopReplication(pusher);
}
/**
* Regression test for https://github.com/couchbase/couchbase-lite-java-core/issues/72
*/
public void testPusherBatching() throws Throwable {
int previous = ReplicationInternal.INBOX_CAPACITY;
ReplicationInternal.INBOX_CAPACITY = 5;
try {
// create a bunch local documents
int numDocsToSend = ReplicationInternal.INBOX_CAPACITY * 3;
for (int i = 0; i < numDocsToSend; i++) {
Map<String, Object> properties = new HashMap<String, Object>();
properties.put("testPusherBatching", i);
createDocumentWithProperties(database, properties);
}
// kick off a one time push replication to a mock
final CustomizableMockHttpClient mockHttpClient = new CustomizableMockHttpClient();
mockHttpClient.addResponderFakeLocalDocumentUpdate404();
HttpClientFactory mockHttpClientFactory = mockFactoryFactory(mockHttpClient);
URL remote = getReplicationURL();
manager.setDefaultHttpClientFactory(mockHttpClientFactory);
Replication pusher = database.createPushReplication(remote);
runReplication(pusher);
assertNull(pusher.getLastError());
int numDocsSent = 0;
// verify that only INBOX_SIZE documents are included in any given bulk post request
List<HttpRequest> capturedRequests = mockHttpClient.getCapturedRequests();
for (HttpRequest capturedRequest : capturedRequests) {
if (capturedRequest instanceof HttpPost) {
HttpPost capturedPostRequest = (HttpPost) capturedRequest;
if (capturedPostRequest.getURI().getPath().endsWith("_bulk_docs")) {
ArrayList docs = CustomizableMockHttpClient.extractDocsFromBulkDocsPost(capturedRequest);
String msg = "# of bulk docs pushed should be <= INBOX_CAPACITY";
assertTrue(msg, docs.size() <= ReplicationInternal.INBOX_CAPACITY);
numDocsSent += docs.size();
}
}
}
assertEquals(numDocsToSend, numDocsSent);
} finally {
ReplicationInternal.INBOX_CAPACITY = previous;
}
}
public void failingTestPullerGzipped() throws Throwable {
// TODO: rewrite w/ MockWebserver
/*String docIdTimestamp = Long.toString(System.currentTimeMillis());
final String doc1Id = String.format("doc1-%s", docIdTimestamp);
String attachmentName = "attachment.png";
addDocWithId(doc1Id, attachmentName, true);
doPullReplication();
Log.d(TAG, "Fetching doc1 via id: " + doc1Id);
Document doc1 = database.getDocument(doc1Id);
assertNotNull(doc1);
assertTrue(doc1.getCurrentRevisionId().startsWith("1-"));
assertEquals(1, doc1.getProperties().get("foo"));
Attachment attachment = doc1.getCurrentRevision().getAttachment(attachmentName);
assertTrue(attachment.getLength() > 0);
assertTrue(attachment.getGZipped());
InputStream is = attachment.getContent();
byte[] receivedBytes = TextUtils.read(is);
is.close();
InputStream attachmentStream = getAsset(attachmentName);
byte[] actualBytes = TextUtils.read(attachmentStream);
Assert.assertEquals(actualBytes.length, receivedBytes.length);
Assert.assertEquals(actualBytes, receivedBytes);*/
}
/**
* Verify that validation blocks are called correctly for docs
* pulled from the sync gateway.
* <p/>
* - Add doc to (mock) sync gateway
* - Add validation function that will reject that doc
* - Do a pull replication
* - Assert that the doc does _not_ make it into the db
*/
public void testValidationBlockCalled() throws Throwable {
final MockDocumentGet.MockDocument mockDocument = new MockDocumentGet.MockDocument("doc1", "1-3e28", 1);
mockDocument.setJsonMap(MockHelper.generateRandomJsonMap());
// create mockwebserver and custom dispatcher
MockDispatcher dispatcher = new MockDispatcher();
MockWebServer server = MockHelper.getMockWebServer(dispatcher);
dispatcher.setServerType(MockDispatcher.ServerType.SYNC_GW);
try {
// checkpoint GET response w/ 404
MockResponse fakeCheckpointResponse = new MockResponse();
MockHelper.set404NotFoundJson(fakeCheckpointResponse);
dispatcher.enqueueResponse(MockHelper.PATH_REGEX_CHECKPOINT, fakeCheckpointResponse);
// _changes response
MockChangesFeed mockChangesFeed = new MockChangesFeed();
mockChangesFeed.add(new MockChangesFeed.MockChangedDoc(mockDocument));
dispatcher.enqueueResponse(MockHelper.PATH_REGEX_CHANGES, mockChangesFeed.generateMockResponse());
// doc response
MockDocumentGet mockDocumentGet = new MockDocumentGet(mockDocument);
dispatcher.enqueueResponse(mockDocument.getDocPathRegex(), mockDocumentGet.generateMockResponse());
// checkpoint PUT response
dispatcher.enqueueResponse(MockHelper.PATH_REGEX_CHECKPOINT, new MockCheckpointPut());
// start mock server
server.play();
// Add Validation block
database.setValidation("testValidationBlockCalled", new Validator() {
@Override
public void validate(Revision newRevision, ValidationContext context) {
if (newRevision.getDocument().getId().equals(mockDocument.getDocId())) {
context.reject("Reject");
}
}
});
// run pull replication
Replication pullReplication = database.createPullReplication(server.getUrl("/db"));
runReplication(pullReplication);
waitForPutCheckpointRequestWithSeq(dispatcher, mockDocument.getDocSeq());
// assert doc is not in local db
Document doc = database.getDocument(mockDocument.getDocId());
assertNull(doc.getCurrentRevision()); // doc should have been rejected by validation, and therefore not present
} finally {
server.shutdown();
}
}
/**
* Attempting to reproduce couchtalk issue:
* <p/>
* https://github.com/couchbase/couchbase-lite-android/issues/312
* <p/>
* - Start continuous puller against mock SG w/ 50 docs
* - After every 10 docs received, restart replication
* - Make sure all 50 docs are received and stored in local db
*
* @throws Exception
*/
public void testMockPullerRestart() throws Exception {
final int numMockRemoteDocs = 20; // must be multiple of 10!
final AtomicInteger numDocsPulledLocally = new AtomicInteger(0);
MockDispatcher dispatcher = new MockDispatcher();
dispatcher.setServerType(MockDispatcher.ServerType.COUCHDB);
int numDocsPerChangesResponse = numMockRemoteDocs / 10;
MockWebServer server = MockHelper.getPreloadedPullTargetMockCouchDB(dispatcher, numMockRemoteDocs, numDocsPerChangesResponse);
try {
server.play();
final CountDownLatch receivedAllDocs = new CountDownLatch(1);
// run pull replication
Replication pullReplication = database.createPullReplication(server.getUrl("/db"));
pullReplication.setContinuous(true);
// it should go idle twice, hence countdown latch = 2
final CountDownLatch replicationIdleFirstTime = new CountDownLatch(1);
final CountDownLatch replicationIdleSecondTime = new CountDownLatch(2);
pullReplication.addChangeListener(new Replication.ChangeListener() {
@Override
public void changed(Replication.ChangeEvent event) {
if (event.getTransition() != null && event.getTransition().getDestination() == ReplicationState.IDLE) {
replicationIdleFirstTime.countDown();
replicationIdleSecondTime.countDown();
}
}
});
database.addChangeListener(new Database.ChangeListener() {
@Override
public void changed(Database.ChangeEvent event) {
List<DocumentChange> changes = event.getChanges();
for (DocumentChange change : changes) {
numDocsPulledLocally.addAndGet(1);
}
if (numDocsPulledLocally.get() == numMockRemoteDocs) {
receivedAllDocs.countDown();
}
}
});
pullReplication.start();
// wait until we received all mock docs or timeout occurs
boolean success = receivedAllDocs.await(60, TimeUnit.SECONDS);
assertTrue(success);
// wait until replication goes idle
success = replicationIdleFirstTime.await(60, TimeUnit.SECONDS);
assertTrue(success);
pullReplication.restart();
// wait until replication goes idle again
success = replicationIdleSecondTime.await(60, TimeUnit.SECONDS);
assertTrue(success);
stopReplication(pullReplication);
} finally {
// cleanup / shutdown
server.shutdown();
}
}
public void testRunReplicationWithError() throws Exception {
HttpClientFactory mockHttpClientFactory = new HttpClientFactory() {
@Override
public HttpClient getHttpClient() {
CustomizableMockHttpClient mockHttpClient = new CustomizableMockHttpClient();
int statusCode = 406;
mockHttpClient.addResponderFailAllRequests(statusCode);
return mockHttpClient;
}
@Override
public void addCookies(List<Cookie> cookies) {
}
@Override
public void deleteCookie(String name) {
}
@Override
public CookieStore getCookieStore() {
return null;
}
};
manager.setDefaultHttpClientFactory(mockHttpClientFactory);
Replication r1 = database.createPushReplication(getReplicationURL());
final CountDownLatch changeEventError = new CountDownLatch(1);
r1.addChangeListener(new Replication.ChangeListener() {
@Override
public void changed(Replication.ChangeEvent event) {
Log.d(TAG, "change event: %s", event);
if (event.getError() != null) {
changeEventError.countDown();
}
}
});
Assert.assertFalse(r1.isContinuous());
runReplication(r1);
// It should have failed with a 404:
Assert.assertEquals(0, r1.getCompletedChangesCount());
Assert.assertEquals(0, r1.getChangesCount());
Assert.assertNotNull(r1.getLastError());
boolean success = changeEventError.await(5, TimeUnit.SECONDS);
Assert.assertTrue(success);
}
public void testBuildRelativeURLString() throws Exception {
String dbUrlString = "http://10.0.0.3:4984/todos/";
Replication replication = database.createPullReplication(new URL(dbUrlString));
String relativeUrlString = replication.buildRelativeURLString("foo");
String expected = "http://10.0.0.3:4984/todos/foo";
Assert.assertEquals(expected, relativeUrlString);
}
public void testBuildRelativeURLStringWithLeadingSlash() throws Exception {
String dbUrlString = "http://10.0.0.3:4984/todos/";
Replication replication = database.createPullReplication(new URL(dbUrlString));
String relativeUrlString = replication.buildRelativeURLString("/foo");
String expected = "http://10.0.0.3:4984/todos/foo";
Assert.assertEquals(expected, relativeUrlString);
}
public void testChannels() throws Exception {
URL remote = getReplicationURL();
Replication replicator = database.createPullReplication(remote);
List<String> channels = new ArrayList<String>();
channels.add("chan1");
channels.add("chan2");
replicator.setChannels(channels);
Assert.assertEquals(channels, replicator.getChannels());
replicator.setChannels(null);
Assert.assertTrue(replicator.getChannels().isEmpty());
}
public void testChannelsMore() throws MalformedURLException, CouchbaseLiteException {
Database db = startDatabase();
URL fakeRemoteURL = new URL("http://couchbase.com/no_such_db");
Replication r1 = db.createPullReplication(fakeRemoteURL);
assertTrue(r1.getChannels().isEmpty());
r1.setFilter("foo/bar");
assertTrue(r1.getChannels().isEmpty());
Map<String, Object> filterParams = new HashMap<String, Object>();
filterParams.put("a", "b");
r1.setFilterParams(filterParams);
assertTrue(r1.getChannels().isEmpty());
r1.setChannels(null);
assertEquals("foo/bar", r1.getFilter());
assertEquals(filterParams, r1.getFilterParams());
List<String> channels = new ArrayList<String>();
channels.add("NBC");
channels.add("MTV");
r1.setChannels(channels);
assertEquals(channels, r1.getChannels());
assertEquals("sync_gateway/bychannel", r1.getFilter());
filterParams = new HashMap<String, Object>();
filterParams.put("channels", "NBC,MTV");
assertEquals(filterParams, r1.getFilterParams());
r1.setChannels(null);
assertEquals(r1.getFilter(), null);
assertEquals(null, r1.getFilterParams());
}
/**
* https://github.com/couchbase/couchbase-lite-android/issues/247
*/
public void testPushReplicationRecoverableError() throws Exception {
boolean expectReplicatorError = false;
runPushReplicationWithTransientError("HTTP/1.1 503 Service Unavailable", expectReplicatorError);
}
/**
* https://github.com/couchbase/couchbase-lite-android/issues/247
*/
public void testPushReplicationNonRecoverableError() throws Exception {
boolean expectReplicatorError = true;
runPushReplicationWithTransientError("HTTP/1.1 404 Not Found", expectReplicatorError);
}
/**
* https://github.com/couchbase/couchbase-lite-android/issues/247
*/
public void runPushReplicationWithTransientError(String status, boolean expectReplicatorError) throws Exception {
String doc1Id = "doc1";
// create mockwebserver and custom dispatcher
MockDispatcher dispatcher = new MockDispatcher();
MockWebServer server = MockHelper.getMockWebServer(dispatcher);
dispatcher.setServerType(MockDispatcher.ServerType.SYNC_GW);
try {
server.play();
// add some documents
Document doc1 = createDocumentForPushReplication(doc1Id, null, null);
// checkpoint GET response w/ 404 + respond to all PUT Checkpoint requests
MockCheckpointPut mockCheckpointPut = new MockCheckpointPut();
mockCheckpointPut.setSticky(true);
mockCheckpointPut.setDelayMs(50);
dispatcher.enqueueResponse(MockHelper.PATH_REGEX_CHECKPOINT, mockCheckpointPut);
// _revs_diff response -- everything missing
MockRevsDiff mockRevsDiff = new MockRevsDiff();
dispatcher.enqueueResponse(MockHelper.PATH_REGEX_REVS_DIFF, mockRevsDiff);
// 1st _bulk_docs response -- transient error
MockResponse response = new MockResponse().setStatus(status);
dispatcher.enqueueResponse(MockHelper.PATH_REGEX_BULK_DOCS, response);
// 2nd _bulk_docs response -- everything stored
MockBulkDocs mockBulkDocs = new MockBulkDocs();
dispatcher.enqueueResponse(MockHelper.PATH_REGEX_BULK_DOCS, mockBulkDocs);
// run replication
Replication pusher = database.createPushReplication(server.getUrl("/db"));
pusher.setContinuous(false);
runReplication(pusher);
if (expectReplicatorError == true) {
assertNotNull(pusher.getLastError());
} else {
assertNull(pusher.getLastError());
}
if (expectReplicatorError == false) {
int expectedLastSequence = 1;
Log.d(TAG, "waiting for put checkpoint with lastSequence: %d", expectedLastSequence);
List<RecordedRequest> checkpointRequests = waitForPutCheckpointRequestWithSequence(dispatcher, expectedLastSequence);
Log.d(TAG, "done waiting for put checkpoint with lastSequence: %d", expectedLastSequence);
validateCheckpointRequestsRevisions(checkpointRequests);
// assert our local sequence matches what is expected
String lastSequence = database.lastSequenceWithCheckpointId(pusher.remoteCheckpointDocID());
assertEquals(Integer.toString(expectedLastSequence), lastSequence);
// assert completed count makes sense
assertEquals(pusher.getChangesCount(), pusher.getCompletedChangesCount());
}
} finally {
// Shut down the server. Instances cannot be reused.
server.shutdown();
}
}
/**
* Verify that running a one-shot push replication will complete when run against a
* mock server that throws io exceptions on every request.
*/
public void testOneShotReplicationErrorNotification() throws Throwable {
int previous = RemoteRequestRetry.RETRY_DELAY_MS;
RemoteRequestRetry.RETRY_DELAY_MS = 5;
try {
final CustomizableMockHttpClient mockHttpClient = new CustomizableMockHttpClient();
mockHttpClient.addResponderThrowExceptionAllRequests();
URL remote = getReplicationURL();
manager.setDefaultHttpClientFactory(mockFactoryFactory(mockHttpClient));
Replication pusher = database.createPushReplication(remote);
runReplication(pusher);
assertTrue(pusher.getLastError() != null);
} finally {
RemoteRequestRetry.RETRY_DELAY_MS = previous;
}
}
/**
* Verify that running a continuous push replication will emit a change while
* in an error state when run against a mock server that returns 500 Internal Server
* errors on every request.
*/
public void testContinuousReplicationErrorNotification() throws Throwable {
int previous = RemoteRequestRetry.RETRY_DELAY_MS;
RemoteRequestRetry.RETRY_DELAY_MS = 5;
try {
final CustomizableMockHttpClient mockHttpClient = new CustomizableMockHttpClient();
mockHttpClient.addResponderThrowExceptionAllRequests();
URL remote = getReplicationURL();
manager.setDefaultHttpClientFactory(mockFactoryFactory(mockHttpClient));
Replication pusher = database.createPushReplication(remote);
pusher.setContinuous(true);
// add replication observer
final CountDownLatch countDownLatch = new CountDownLatch(1);
pusher.addChangeListener(new Replication.ChangeListener() {
@Override
public void changed(Replication.ChangeEvent event) {
if (event.getError() != null) {
countDownLatch.countDown();
}
}
});
// start replication
pusher.start();
boolean success = countDownLatch.await(30, TimeUnit.SECONDS);
assertTrue(success);
stopReplication(pusher);
} finally {
RemoteRequestRetry.RETRY_DELAY_MS = previous;
}
}
/**
* Test for the goOffline() method.
*/
public void testGoOffline() throws Exception {
final int numMockDocsToServe = 2;
// create mockwebserver and custom dispatcher
MockDispatcher dispatcher = new MockDispatcher();
MockWebServer server = MockHelper.getMockWebServer(dispatcher);
dispatcher.setServerType(MockDispatcher.ServerType.COUCHDB);
try {
server.play();
// mock documents to be pulled
MockDocumentGet.MockDocument mockDoc1 = new MockDocumentGet.MockDocument("doc1", "1-5e38", 1);
mockDoc1.setJsonMap(MockHelper.generateRandomJsonMap());
mockDoc1.setAttachmentName("attachment.png");
MockDocumentGet.MockDocument mockDoc2 = new MockDocumentGet.MockDocument("doc2", "1-563b", 2);
mockDoc2.setJsonMap(MockHelper.generateRandomJsonMap());
mockDoc2.setAttachmentName("attachment2.png");
// fake checkpoint PUT and GET response w/ 404
MockCheckpointPut fakeCheckpointResponse = new MockCheckpointPut();
fakeCheckpointResponse.setSticky(true);
dispatcher.enqueueResponse(MockHelper.PATH_REGEX_CHECKPOINT, fakeCheckpointResponse);
// _changes response with docs
MockChangesFeed mockChangesFeed = new MockChangesFeed();
mockChangesFeed.add(new MockChangesFeed.MockChangedDoc(mockDoc1));
dispatcher.enqueueResponse(MockHelper.PATH_REGEX_CHANGES, mockChangesFeed.generateMockResponse());
// next _changes response will block (eg, longpoll reponse with no changes to return)
MockChangesFeed mockChangesFeedEmpty = new MockChangesFeed();
dispatcher.enqueueResponse(MockHelper.PATH_REGEX_CHANGES, mockChangesFeedEmpty.generateMockResponse());
// doc1 response
MockDocumentGet mockDocumentGet = new MockDocumentGet(mockDoc1);
dispatcher.enqueueResponse(mockDoc1.getDocPathRegex(), mockDocumentGet.generateMockResponse());
// doc2 response
mockDocumentGet = new MockDocumentGet(mockDoc2);
dispatcher.enqueueResponse(mockDoc2.getDocPathRegex(), mockDocumentGet.generateMockResponse());
// create replication
Replication pullReplication = database.createPullReplication(server.getUrl("/db"));
pullReplication.setContinuous(true);
// add a change listener
final CountDownLatch idleCountdownLatch = new CountDownLatch(1);
final CountDownLatch receivedAllDocs = new CountDownLatch(1);
pullReplication.addChangeListener(new Replication.ChangeListener() {
@Override
public void changed(Replication.ChangeEvent event) {
Log.e(Log.TAG_SYNC, "event.getCompletedChangeCount() = " + event.getCompletedChangeCount());
if (event.getTransition() != null && event.getTransition().getDestination() == ReplicationState.IDLE) {
idleCountdownLatch.countDown();
}
if (event.getCompletedChangeCount() == numMockDocsToServe) {
receivedAllDocs.countDown();
}
}
});
// start replication
pullReplication.start();
// wait until it goes into idle state
boolean success = idleCountdownLatch.await(60, TimeUnit.SECONDS);
assertTrue(success);
// WORKAROUND: With CBL Java on Jenkins, Replicator becomes IDLE state before processing doc1. (NOT 100% REPRODUCIBLE)
// NOTE: 03/20/2014 This is also observable with on Standard Android emulator with ARM. (NOT 100% REPRODUCIBLE)
// TODO: Need to fix: https://github.com/couchbase/couchbase-lite-java-core/issues/446
// NOTE: Build.BRAND.equalsIgnoreCase("generic") is only for Android, not for regular Java.
// So, till solve IDLE state issue, always wait 5 seconds.
try {
Thread.sleep(5 * 1000);
} catch (Exception e) {
}
// put the replication offline
putReplicationOffline(pullReplication);
// at this point, we shouldn't have received all of the docs yet.
assertTrue(receivedAllDocs.getCount() > 0);
// return some more docs on _changes feed
MockChangesFeed mockChangesFeed2 = new MockChangesFeed();
mockChangesFeed2.add(new MockChangesFeed.MockChangedDoc(mockDoc2));
dispatcher.enqueueResponse(MockHelper.PATH_REGEX_CHANGES, mockChangesFeed2.generateMockResponse());
// put the replication online (should see the new docs)
putReplicationOnline(pullReplication);
// wait until we receive all the docs
success = receivedAllDocs.await(60, TimeUnit.SECONDS);
assertTrue(success);
// wait until we try to PUT a checkpoint request with doc2's sequence
waitForPutCheckpointRequestWithSeq(dispatcher, mockDoc2.getDocSeq());
// make sure all docs in local db
Map<String, Object> allDocs = database.getAllDocs(new QueryOptions());
Integer totalRows = (Integer) allDocs.get("total_rows");
List rows = (List) allDocs.get("rows");
assertEquals(numMockDocsToServe, totalRows.intValue());
assertEquals(numMockDocsToServe, rows.size());
// cleanup
stopReplication(pullReplication);
} finally {
server.shutdown();
}
}
private void putReplicationOffline(Replication replication) throws InterruptedException {
Log.d(Log.TAG, "putReplicationOffline: %s", replication);
// this was a useless test, the replication wasn't even started
final CountDownLatch wentOffline = new CountDownLatch(1);
Replication.ChangeListener changeListener = new ReplicationOfflineObserver(wentOffline);
replication.addChangeListener(changeListener);
replication.goOffline();
boolean succeeded = wentOffline.await(30, TimeUnit.SECONDS);
assertTrue(succeeded);
replication.removeChangeListener(changeListener);
Log.d(Log.TAG, "/putReplicationOffline: %s", replication);
}
private void putReplicationOnline(Replication replication) throws InterruptedException {
Log.d(Log.TAG, "putReplicationOnline: %s", replication);
// this was a useless test, the replication wasn't even started
final CountDownLatch wentOnline = new CountDownLatch(1);
Replication.ChangeListener changeListener = new ReplicationActiveObserver(wentOnline);
replication.addChangeListener(changeListener);
replication.goOnline();
boolean succeeded = wentOnline.await(30, TimeUnit.SECONDS);
assertTrue(succeeded);
replication.removeChangeListener(changeListener);
Log.d(Log.TAG, "/putReplicationOnline: %s", replication);
}
/**
* https://github.com/couchbase/couchbase-lite-java-core/issues/253
*/
public void testReplicationOnlineExtraneousChangeTrackers() throws Exception {
// create mockwebserver and custom dispatcher
MockDispatcher dispatcher = new MockDispatcher();
MockWebServer server = MockHelper.getMockWebServer(dispatcher);
dispatcher.setServerType(MockDispatcher.ServerType.COUCHDB);
try {
// add sticky checkpoint GET response w/ 404
MockCheckpointGet fakeCheckpointResponse = new MockCheckpointGet();
fakeCheckpointResponse.set404(true);
fakeCheckpointResponse.setSticky(true);
dispatcher.enqueueResponse(MockHelper.PATH_REGEX_CHECKPOINT, fakeCheckpointResponse);
// add sticky _changes response to feed=longpoll that just blocks for 60 seconds to emulate
// server that doesn't have any new changes
MockChangesFeedNoResponse mockChangesFeedNoResponse = new MockChangesFeedNoResponse();
mockChangesFeedNoResponse.setDelayMs(60 * 1000);
mockChangesFeedNoResponse.setSticky(true);
dispatcher.enqueueResponse(MockHelper.PATH_REGEX_CHANGES_LONGPOLL, mockChangesFeedNoResponse);
// add _changes response to feed=normal that returns empty _changes feed immediately
MockChangesFeed mockChangesFeed = new MockChangesFeed();
MockResponse mockResponse = mockChangesFeed.generateMockResponse();
for (int i = 0; i < 500; i++) { // TODO: use setSticky instead of workaround to add a ton of mock responses
dispatcher.enqueueResponse(MockHelper.PATH_REGEX_CHANGES_NORMAL, new WrappedSmartMockResponse(mockResponse));
}
// start mock server
server.play();
//create url for replication
URL baseUrl = server.getUrl("/db");
//create replication
final Replication pullReplication = database.createPullReplication(baseUrl);
pullReplication.setContinuous(true);
pullReplication.start();
// wait until we get a request to the _changes feed
RecordedRequest changesReq = dispatcher.takeRequestBlocking(MockHelper.PATH_REGEX_CHANGES_LONGPOLL);
assertNotNull(changesReq);
putReplicationOffline(pullReplication);
// at this point since we called takeRequest earlier, our recorded _changes request queue should be empty
assertNull(dispatcher.takeRequest(MockHelper.PATH_REGEX_CHANGES_LONGPOLL));
// put replication online 10 times
for (int i = 0; i < 10; i++) {
pullReplication.goOnline();
}
// sleep for a while to give things a chance to start
Log.d(TAG, "sleeping for 2 seconds");
Thread.sleep(2 * 1000);
Log.d(TAG, "done sleeping");
// how many _changes feed requests has the replicator made since going online?
int numChangesRequests = 0;
while ((changesReq = dispatcher.takeRequest(MockHelper.PATH_REGEX_CHANGES_LONGPOLL)) != null) {
Log.d(TAG, "changesReq: %s", changesReq);
numChangesRequests += 1;
}
// assert that there was only one _changes feed request
assertEquals(1, numChangesRequests);
// shutdown
stopReplication(pullReplication);
} finally {
server.shutdown();
}
}
/**
* Test goOffline() method in the context of a continuous pusher.
* <p/>
* - 1. Add a local document
* - 2. Kick off continuous push replication
* - 3. Wait for document to be pushed
* - 4. Call goOffline()
* - 6. Call goOnline()
* - 5. Add a 2nd local document
* - 7. Wait for 2nd document to be pushed
*
* @throws Exception
*/
public void testGoOfflinePusher() throws Exception {
int previous = RemoteRequestRetry.RETRY_DELAY_MS;
RemoteRequestRetry.RETRY_DELAY_MS = 5;
try {
// 1. Add a local document
Map<String, Object> properties = new HashMap<String, Object>();
properties.put("testGoOfflinePusher", "1");
Document doc1 = createDocumentWithProperties(database, properties);
// create mock server
MockWebServer server = new MockWebServer();
try {
MockDispatcher dispatcher = new MockDispatcher();
server.setDispatcher(dispatcher);
server.play();
// checkpoint PUT response (sticky)
MockCheckpointPut mockCheckpointPut = new MockCheckpointPut();
mockCheckpointPut.setSticky(true);
dispatcher.enqueueResponse(MockHelper.PATH_REGEX_CHECKPOINT, mockCheckpointPut);
// _revs_diff response -- everything missing
MockRevsDiff mockRevsDiff = new MockRevsDiff();
mockRevsDiff.setSticky(true);
dispatcher.enqueueResponse(MockHelper.PATH_REGEX_REVS_DIFF, mockRevsDiff);
// _bulk_docs response -- everything stored
MockBulkDocs mockBulkDocs = new MockBulkDocs();
mockBulkDocs.setSticky(true);
dispatcher.enqueueResponse(MockHelper.PATH_REGEX_BULK_DOCS, mockBulkDocs);
// 2. Kick off continuous push replication
Replication replicator = database.createPushReplication(server.getUrl("/db"));
replicator.setContinuous(true);
CountDownLatch replicationIdleSignal = new CountDownLatch(1);
ReplicationIdleObserver replicationIdleObserver = new ReplicationIdleObserver(replicationIdleSignal);
replicator.addChangeListener(replicationIdleObserver);
replicator.start();
// 3. Wait for document to be pushed
// wait until replication goes idle
boolean successful = replicationIdleSignal.await(30, TimeUnit.SECONDS);
assertTrue(successful);
// wait until mock server gets the checkpoint PUT request
boolean foundCheckpointPut = false;
String expectedLastSequence = "1";
while (!foundCheckpointPut) {
RecordedRequest request = dispatcher.takeRequestBlocking(MockHelper.PATH_REGEX_CHECKPOINT);
if (request.getMethod().equals("PUT")) {
foundCheckpointPut = true;
Assert.assertTrue(request.getUtf8Body().indexOf(expectedLastSequence) != -1);
// wait until mock server responds to the checkpoint PUT request
dispatcher.takeRecordedResponseBlocking(request);
}
}
// make some assertions about the outgoing _bulk_docs requests for first doc
RecordedRequest bulkDocsRequest1 = dispatcher.takeRequest(MockHelper.PATH_REGEX_BULK_DOCS);
assertNotNull(bulkDocsRequest1);
assertBulkDocJsonContainsDoc(bulkDocsRequest1, doc1);
// 4. Call goOffline()
putReplicationOffline(replicator);
// 5. Add a 2nd local document
properties = new HashMap<String, Object>();
properties.put("testGoOfflinePusher", "2");
Document doc2 = createDocumentWithProperties(database, properties);
// make sure if push replicator does not send request during offline.
try {
Thread.sleep(1000 * 3);
} catch (Exception ex) {
}
// make sure not receive _bulk_docs during offline.
RecordedRequest bulkDocsRequest = dispatcher.takeRequest(MockHelper.PATH_REGEX_BULK_DOCS);
assertNull(bulkDocsRequest);
// 6. Call goOnline()
putReplicationOnline(replicator);
// wait until mock server gets the 2nd checkpoint PUT request
foundCheckpointPut = false;
expectedLastSequence = "2";
while (!foundCheckpointPut) {
RecordedRequest request = dispatcher.takeRequestBlocking(MockHelper.PATH_REGEX_CHECKPOINT);
if (request.getMethod().equals("PUT")) {
foundCheckpointPut = true;
Assert.assertTrue(request.getUtf8Body().indexOf(expectedLastSequence) != -1);
// wait until mock server responds to the checkpoint PUT request
dispatcher.takeRecordedResponseBlocking(request);
}
}
// make some assertions about the outgoing _bulk_docs requests for second doc
RecordedRequest bulkDocsRequest2 = dispatcher.takeRequest(MockHelper.PATH_REGEX_BULK_DOCS);
assertNotNull(bulkDocsRequest2);
assertBulkDocJsonContainsDoc(bulkDocsRequest2, doc2);
// cleanup
stopReplication(replicator);
} finally {
server.shutdown();
}
} finally {
RemoteRequestRetry.RETRY_DELAY_MS = previous;
}
}
/**
* Verify that when a replication runs into an auth error, it stops
* and the lastError() method returns that error.
*/
public void testReplicatorErrorStatus() throws Exception {
// create mockwebserver and custom dispatcher
MockDispatcher dispatcher = new MockDispatcher();
MockWebServer server = MockHelper.getMockWebServer(dispatcher);
dispatcher.setServerType(MockDispatcher.ServerType.SYNC_GW);
try {
// fake _session response
MockSessionGet mockSessionGet = new MockSessionGet();
dispatcher.enqueueResponse(MockHelper.PATH_REGEX_SESSION, mockSessionGet.generateMockResponse());
// fake _facebook response
MockFacebookAuthPost mockFacebookAuthPost = new MockFacebookAuthPost();
dispatcher.enqueueResponse(MockHelper.PATH_REGEX_FACEBOOK_AUTH, mockFacebookAuthPost.generateMockResponse());
// start mock server
server.play();
// register bogus fb token
Authenticator facebookAuthenticator = AuthenticatorFactory.createFacebookAuthenticator("fake_access_token");
// run pull replication
Replication pullReplication = database.createPullReplication(server.getUrl("/db"));
pullReplication.setAuthenticator(facebookAuthenticator);
pullReplication.setContinuous(false);
runReplication(pullReplication);
// run replicator and make sure it has an error
assertNotNull(pullReplication.getLastError());
assertTrue(pullReplication.getLastError() instanceof HttpResponseException);
assertEquals(401 /* unauthorized */, ((HttpResponseException) pullReplication.getLastError()).getStatusCode());
// assert that the replicator sent the requests we expected it to send
RecordedRequest sessionReqeust = dispatcher.takeRequest(MockHelper.PATH_REGEX_SESSION);
assertNotNull(sessionReqeust);
RecordedRequest facebookRequest = dispatcher.takeRequest(MockHelper.PATH_REGEX_FACEBOOK_AUTH);
assertNotNull(facebookRequest);
dispatcher.verifyAllRecordedRequestsTaken();
} finally {
server.shutdown();
}
}
public void testGetReplicatorWithCustomHeader() throws Throwable {
// create mockwebserver and custom dispatcher
MockDispatcher dispatcher = new MockDispatcher();
MockWebServer server = MockHelper.getMockWebServer(dispatcher);
dispatcher.setServerType(MockDispatcher.ServerType.SYNC_GW);
try {
// checkpoint PUT or GET response (sticky)
MockCheckpointPut mockCheckpointPut = new MockCheckpointPut();
mockCheckpointPut.setSticky(true);
mockCheckpointPut.setDelayMs(500);
dispatcher.enqueueResponse(MockHelper.PATH_REGEX_CHECKPOINT, mockCheckpointPut);
server.play();
Map<String, Object> properties = new HashMap<String, Object>();
properties.put("source", DEFAULT_TEST_DB);
// target with custom headers (cookie)
Map<String, Object> headers = new HashMap<String, Object>();
String coolieVal = "SyncGatewaySession=c38687c2696688a";
headers.put("Cookie", coolieVal);
Map<String, Object> targetProperties = new HashMap<String, Object>();
targetProperties.put("url", server.getUrl("/db").toExternalForm());
targetProperties.put("headers", headers);
properties.put("target", targetProperties);
Replication replicator = manager.getReplicator(properties);
assertNotNull(replicator);
assertEquals(server.getUrl("/db").toExternalForm(), replicator.getRemoteUrl().toExternalForm());
assertTrue(!replicator.isPull());
assertFalse(replicator.isContinuous());
assertFalse(replicator.isRunning());
assertTrue(replicator.getHeaders().containsKey("Cookie"));
assertEquals(replicator.getHeaders().get("Cookie"), coolieVal);
// add replication observer
CountDownLatch replicationDoneSignal = new CountDownLatch(1);
ReplicationFinishedObserver replicationFinishedObserver = new ReplicationFinishedObserver(replicationDoneSignal);
replicator.addChangeListener(replicationFinishedObserver);
// start the replicator
Log.d(TAG, "Starting replicator " + replicator);
replicator.start();
final CountDownLatch replicationStarted = new CountDownLatch(1);
replicator.addChangeListener(new ReplicationActiveObserver(replicationStarted));
boolean success = replicationStarted.await(30, TimeUnit.SECONDS);
assertTrue(success);
// now lets lookup existing replicator and stop it
Log.d(TAG, "Looking up replicator");
properties.put("cancel", true);
Replication activeReplicator = manager.getReplicator(properties);
Log.d(TAG, "Found replicator " + activeReplicator + " and calling stop()");
activeReplicator.stop();
Log.d(TAG, "called stop(), waiting for it to finish");
// wait for replication to finish
boolean didNotTimeOut = replicationDoneSignal.await(180, TimeUnit.SECONDS);
Log.d(TAG, "replicationDoneSignal.await done, didNotTimeOut: " + didNotTimeOut);
assertTrue(didNotTimeOut);
assertFalse(activeReplicator.isRunning());
} finally {
server.shutdown();
}
}
public void testGetReplicator() throws Throwable {
// create mockwebserver and custom dispatcher
MockDispatcher dispatcher = new MockDispatcher();
MockWebServer server = MockHelper.getMockWebServer(dispatcher);
dispatcher.setServerType(MockDispatcher.ServerType.SYNC_GW);
try {
// checkpoint PUT or GET response (sticky)
MockCheckpointPut mockCheckpointPut = new MockCheckpointPut();
mockCheckpointPut.setSticky(true);
mockCheckpointPut.setDelayMs(500);
dispatcher.enqueueResponse(MockHelper.PATH_REGEX_CHECKPOINT, mockCheckpointPut);
server.play();
Map<String, Object> properties = new HashMap<String, Object>();
properties.put("source", DEFAULT_TEST_DB);
properties.put("target", server.getUrl("/db").toExternalForm());
Replication replicator = manager.getReplicator(properties);
assertNotNull(replicator);
assertEquals(server.getUrl("/db").toExternalForm(), replicator.getRemoteUrl().toExternalForm());
assertTrue(!replicator.isPull());
assertFalse(replicator.isContinuous());
assertFalse(replicator.isRunning());
// add replication observer
CountDownLatch replicationDoneSignal = new CountDownLatch(1);
ReplicationFinishedObserver replicationFinishedObserver = new ReplicationFinishedObserver(replicationDoneSignal);
replicator.addChangeListener(replicationFinishedObserver);
// start the replicator
Log.d(TAG, "Starting replicator " + replicator);
replicator.start();
final CountDownLatch replicationStarted = new CountDownLatch(1);
replicator.addChangeListener(new ReplicationActiveObserver(replicationStarted));
boolean success = replicationStarted.await(30, TimeUnit.SECONDS);
assertTrue(success);
// now lets lookup existing replicator and stop it
Log.d(TAG, "Looking up replicator");
properties.put("cancel", true);
Replication activeReplicator = manager.getReplicator(properties);
Log.d(TAG, "Found replicator " + activeReplicator + " and calling stop()");
activeReplicator.stop();
Log.d(TAG, "called stop(), waiting for it to finish");
// wait for replication to finish
boolean didNotTimeOut = replicationDoneSignal.await(180, TimeUnit.SECONDS);
Log.d(TAG, "replicationDoneSignal.await done, didNotTimeOut: " + didNotTimeOut);
assertTrue(didNotTimeOut);
assertFalse(activeReplicator.isRunning());
} finally {
server.shutdown();
}
}
public void testGetReplicatorWithAuth() throws Throwable {
Map<String, Object> authProperties = getReplicationAuthParsedJson();
Map<String, Object> targetProperties = new HashMap<String, Object>();
targetProperties.put("url", getReplicationURL().toExternalForm());
targetProperties.put("auth", authProperties);
Map<String, Object> properties = new HashMap<String, Object>();
properties.put("source", DEFAULT_TEST_DB);
properties.put("target", targetProperties);
Replication replicator = manager.getReplicator(properties);
assertNotNull(replicator);
assertNotNull(replicator.getAuthenticator());
assertTrue(replicator.getAuthenticator() instanceof FacebookAuthorizer);
}
/**
* When the server returns a 409 error to a PUT checkpoint response, make
* sure it does the right thing:
* - Pull latest remote checkpoint
* - Try to push checkpiont again (this time passing latest rev)
*
* @throws Exception
*/
public void testPutCheckpoint409Recovery() throws Exception {
// create mockwebserver and custom dispatcher
MockDispatcher dispatcher = new MockDispatcher();
MockWebServer server = MockHelper.getMockWebServer(dispatcher);
dispatcher.setServerType(MockDispatcher.ServerType.SYNC_GW);
try {
// mock documents to be pulled
MockDocumentGet.MockDocument mockDoc1 = new MockDocumentGet.MockDocument("doc1", "1-5e38", 1);
mockDoc1.setJsonMap(MockHelper.generateRandomJsonMap());
// checkpoint GET response w/ 404
MockResponse fakeCheckpointResponse = new MockResponse();
MockHelper.set404NotFoundJson(fakeCheckpointResponse);
dispatcher.enqueueResponse(MockHelper.PATH_REGEX_CHECKPOINT, fakeCheckpointResponse);
// _changes response
MockChangesFeed mockChangesFeed = new MockChangesFeed();
mockChangesFeed.add(new MockChangesFeed.MockChangedDoc(mockDoc1));
dispatcher.enqueueResponse(MockHelper.PATH_REGEX_CHANGES, mockChangesFeed.generateMockResponse());
// doc1 response
MockDocumentGet mockDocumentGet = new MockDocumentGet(mockDoc1);
dispatcher.enqueueResponse(mockDoc1.getDocPathRegex(), mockDocumentGet.generateMockResponse());
// respond with 409 error to mock checkpoint PUT
MockResponse checkpointResponse409 = new MockResponse();
checkpointResponse409.setStatus("HTTP/1.1 409 CONFLICT");
dispatcher.enqueueResponse(MockHelper.PATH_REGEX_CHECKPOINT, checkpointResponse409);
// the replicator should then try to do a checkpoint GET, and in this case
// it should return a value with a rev id
MockCheckpointGet mockCheckpointGet = new MockCheckpointGet();
mockCheckpointGet.setOk("true");
mockCheckpointGet.setRev("0-1");
mockCheckpointGet.setLastSequence("0");
dispatcher.enqueueResponse(MockHelper.PATH_REGEX_CHECKPOINT, mockCheckpointGet);
// the replicator should then try a checkpoint PUT again
// and we should respond with a 201
MockCheckpointPut mockCheckpointPut = new MockCheckpointPut();
dispatcher.enqueueResponse(MockHelper.PATH_REGEX_CHECKPOINT, mockCheckpointPut);
// start mock server
server.play();
// run pull replication
Replication pullReplication = database.createPullReplication(server.getUrl("/db"));
// I had to set this to continuous, because in a one-shot replication it tries to
// save the checkpoint asynchronously as the replicator is shutting down, which
// breaks the retry logic in the case a 409 conflict is returned by server.
pullReplication.setContinuous(true);
pullReplication.start();
// we should have gotten two requests to PATH_REGEX_CHECKPOINT:
// PUT -> 409 Conflict
// PUT -> 201 Created
for (int i = 1; i <= 2; i++) {
Log.v(TAG, "waiting for PUT checkpoint: %d", i);
waitForPutCheckpointRequestWithSeq(dispatcher, mockDoc1.getDocSeq());
Log.d(TAG, "got PUT checkpoint: %d", i);
}
stopReplication(pullReplication);
} finally {
server.shutdown();
}
}
/**
* Verify that Validation based Rejects revert the entire batch that the document is in
* even if one of the documents fail the validation.
* <p/>
* https://github.com/couchbase/couchbase-lite-java-core/issues/242
*
* @throws Exception
*/
public void testVerifyPullerInsertsDocsWithValidation() throws Exception {
// create mockwebserver and custom dispatcher
MockDispatcher dispatcher = new MockDispatcher();
MockWebServer server = MockHelper.getPreloadedPullTargetMockCouchDB(dispatcher, 2, 2);
try {
server.play();
// Setup validation to reject document with id: doc1
database.setValidation("validateOnlyDoc1", new Validator() {
@Override
public void validate(Revision newRevision, ValidationContext context) {
if ("doc1".equals(newRevision.getDocument().getId())) {
context.reject();
}
}
});
// run pull replication
Replication pullReplication = database.createPullReplication(server.getUrl("/db"));
runReplication(pullReplication);
assertNotNull(database);
// doc1 should not be in the store because of validation
assertNull(database.getExistingDocument("doc1"));
// doc0 should be in the store, but it wont be because of the bug.
assertNotNull(database.getExistingDocument("doc0"));
} finally {
server.shutdown();
}
}
/**
* Make sure calling puller.setChannels() causes the changetracker to send the correct
* request to the sync gateway.
* <p/>
* https://github.com/couchbase/couchbase-lite-java-core/issues/292
*/
public void testChannelsFilter() throws Exception {
// create mockwebserver and custom dispatcher
MockDispatcher dispatcher = new MockDispatcher();
MockWebServer server = MockHelper.getMockWebServer(dispatcher);
dispatcher.setServerType(MockDispatcher.ServerType.SYNC_GW);
try {
// checkpoint PUT or GET response (sticky)
MockCheckpointPut mockCheckpointPut = new MockCheckpointPut();
mockCheckpointPut.setSticky(true);
dispatcher.enqueueResponse(MockHelper.PATH_REGEX_CHECKPOINT, mockCheckpointPut);
// _changes response
MockChangesFeed mockChangesFeed = new MockChangesFeed();
dispatcher.enqueueResponse(MockHelper.PATH_REGEX_CHANGES, mockChangesFeed.generateMockResponse());
// start mock server
server.play();
// run pull replication
Replication pullReplication = database.createPullReplication(server.getUrl("/db"));
pullReplication.setChannels(Arrays.asList("foo", "bar"));
runReplication(pullReplication);
// make assertions about outgoing requests from replicator -> mock
RecordedRequest getChangesFeedRequest = dispatcher.takeRequest(MockHelper.PATH_REGEX_CHANGES);
assertTrue(getChangesFeedRequest.getMethod().equals("POST"));
String body = getChangesFeedRequest.getUtf8Body();
Map<String, Object> jsonMap = Manager.getObjectMapper().readValue(body, Map.class);
assertTrue(jsonMap.containsKey("filter"));
String filter = (String) jsonMap.get("filter");
assertEquals("sync_gateway/bychannel", filter);
assertTrue(jsonMap.containsKey("channels"));
String channels = (String) jsonMap.get("channels");
assertTrue(channels.contains("foo"));
assertTrue(channels.contains("bar"));
} finally {
server.shutdown();
}
}
/**
* - Start continuous pull
* - Mockwebserver responds that there are no changes
* - Assert that puller goes into IDLE state
* <p/>
* https://github.com/couchbase/couchbase-lite-android/issues/445
*/
public void testContinuousPullEntersIdleState() throws Exception {
// create mockwebserver and custom dispatcher
MockDispatcher dispatcher = new MockDispatcher();
MockWebServer server = MockHelper.getMockWebServer(dispatcher);
dispatcher.setServerType(MockDispatcher.ServerType.SYNC_GW);
try {
// checkpoint GET response w/ 404
MockResponse fakeCheckpointResponse = new MockResponse();
MockHelper.set404NotFoundJson(fakeCheckpointResponse);
dispatcher.enqueueResponse(MockHelper.PATH_REGEX_CHECKPOINT, fakeCheckpointResponse);
// add non-sticky changes response that returns no changes
MockChangesFeed mockChangesFeed = new MockChangesFeed();
dispatcher.enqueueResponse(MockHelper.PATH_REGEX_CHANGES, mockChangesFeed.generateMockResponse());
// add sticky _changes response that just blocks for 60 seconds to emulate
// server that doesn't have any new changes
MockChangesFeedNoResponse mockChangesFeedNoResponse = new MockChangesFeedNoResponse();
mockChangesFeedNoResponse.setDelayMs(60 * 1000);
mockChangesFeedNoResponse.setSticky(true);
dispatcher.enqueueResponse(MockHelper.PATH_REGEX_CHANGES, mockChangesFeedNoResponse);
server.play();
// create pull replication
Replication pullReplication = database.createPullReplication(server.getUrl("/db"));
pullReplication.setContinuous(true);
final CountDownLatch enteredIdleState = new CountDownLatch(1);
pullReplication.addChangeListener(new Replication.ChangeListener() {
@Override
public void changed(Replication.ChangeEvent event) {
if (event.getSource().getStatus() == Replication.ReplicationStatus.REPLICATION_IDLE) {
enteredIdleState.countDown();
}
}
});
// start pull replication
pullReplication.start();
boolean success = enteredIdleState.await(30, TimeUnit.SECONDS);
assertTrue(success);
Log.d(TAG, "Got IDLE event, stopping replication");
stopReplication(pullReplication);
} finally {
server.shutdown();
}
}
/**
* Spotted in https://github.com/couchbase/couchbase-lite-java-core/issues/313
* But there is another ticket that is linked off 313
*/
public void failingTestMockPullBulkDocsSyncGw() throws Exception {
mockPullBulkDocs(MockDispatcher.ServerType.SYNC_GW);
}
public void mockPullBulkDocs(MockDispatcher.ServerType serverType) throws Exception {
// set INBOX_CAPACITY to a smaller value so that processing times don't skew the test
int defaultCapacity = ReplicationInternal.INBOX_CAPACITY;
ReplicationInternal.INBOX_CAPACITY = 10;
int defaultDelay = ReplicationInternal.PROCESSOR_DELAY;
ReplicationInternal.PROCESSOR_DELAY = ReplicationInternal.PROCESSOR_DELAY * 10;
// serve 25 mock docs
int numMockDocsToServe = (ReplicationInternal.INBOX_CAPACITY * 2) + (ReplicationInternal.INBOX_CAPACITY / 2);
// create mockwebserver and custom dispatcher
MockDispatcher dispatcher = new MockDispatcher();
MockWebServer server = MockHelper.getMockWebServer(dispatcher);
dispatcher.setServerType(serverType);
try {
// mock documents to be pulled
List<MockDocumentGet.MockDocument> mockDocs = MockHelper.getMockDocuments(numMockDocsToServe);
// respond to all GET (responds with 404) and PUT Checkpoint requests
MockCheckpointPut mockCheckpointPut = new MockCheckpointPut();
mockCheckpointPut.setSticky(true);
dispatcher.enqueueResponse(MockHelper.PATH_REGEX_CHECKPOINT, mockCheckpointPut);
// _changes response
MockChangesFeed mockChangesFeed = new MockChangesFeed();
for (MockDocumentGet.MockDocument mockDocument : mockDocs) {
mockChangesFeed.add(new MockChangesFeed.MockChangedDoc(mockDocument));
}
dispatcher.enqueueResponse(MockHelper.PATH_REGEX_CHANGES, mockChangesFeed.generateMockResponse());
// individual doc responses (expecting it to call _bulk_docs, but just in case)
for (MockDocumentGet.MockDocument mockDocument : mockDocs) {
MockDocumentGet mockDocumentGet = new MockDocumentGet(mockDocument);
dispatcher.enqueueResponse(mockDocument.getDocPathRegex(), mockDocumentGet.generateMockResponse());
}
// _bulk_get response
MockDocumentBulkGet mockBulkGet = new MockDocumentBulkGet();
for (MockDocumentGet.MockDocument mockDocument : mockDocs) {
mockBulkGet.addDocument(mockDocument);
}
mockBulkGet.setSticky(true);
dispatcher.enqueueResponse(MockHelper.PATH_REGEX_BULK_GET, mockBulkGet);
// start mock server
server.play();
// run pull replication
Replication pullReplication = database.createPullReplication(server.getUrl("/db"));
runReplication(pullReplication, 3 * 60);
assertTrue(pullReplication.getLastError() == null);
// wait until it pushes checkpoint of last doc
MockDocumentGet.MockDocument lastDoc = mockDocs.get(mockDocs.size() - 1);
waitForPutCheckpointRequestWithSequence(dispatcher, lastDoc.getDocSeq());
// dump out the outgoing requests for bulk docs
BlockingQueue<RecordedRequest> bulkGetRequests = dispatcher.getRequestQueueSnapshot(MockHelper.PATH_REGEX_BULK_GET);
Iterator<RecordedRequest> iterator = bulkGetRequests.iterator();
boolean first = true;
while (iterator.hasNext()) {
RecordedRequest request = iterator.next();
byte[] body = MockHelper.getUncompressedBody(request);
Map<String, Object> jsonMap = MockHelper.getJsonMapFromRequest(body);
List docs = (List) jsonMap.get("docs");
Log.w(TAG, "bulk get request: %s had %d docs", request, docs.size());
// except first one and last one, docs.size() should be (neary) equal with INBOX_CAPACTITY.
if (iterator.hasNext() && !first) {
// the bulk docs requests except for the last one should have max number of docs
// relax this a bit, so that it at least has to have greater than or equal to half max number of docs
assertTrue(docs.size() >= (ReplicationInternal.INBOX_CAPACITY / 2));
if (docs.size() != ReplicationInternal.INBOX_CAPACITY) {
Log.w(TAG, "docs.size() %d != ReplicationInternal.INBOX_CAPACITY %d", docs.size(), ReplicationInternal.INBOX_CAPACITY);
}
}
first = false;
}
} finally {
ReplicationInternal.INBOX_CAPACITY = defaultCapacity;
ReplicationInternal.PROCESSOR_DELAY = defaultDelay;
server.shutdown();
}
}
/**
* Make sure that after trying /db/_session, it should try /_session.
* <p/>
* Currently there is a bug where it tries /db/_session, and then
* tries /db_session.
* <p/>
* https://github.com/couchbase/couchbase-lite-java-core/issues/208
*/
public void testCheckSessionAtPath() throws Exception {
// create mockwebserver and custom dispatcher
MockDispatcher dispatcher = new MockDispatcher();
MockWebServer server = MockHelper.getMockWebServer(dispatcher);
dispatcher.setServerType(MockDispatcher.ServerType.COUCHDB);
try {
// session GET response w/ 404 to /db/_session
MockResponse fakeSessionResponse = new MockResponse();
MockHelper.set404NotFoundJson(fakeSessionResponse);
WrappedSmartMockResponse wrappedSmartMockResponse = new WrappedSmartMockResponse(fakeSessionResponse);
wrappedSmartMockResponse.setSticky(true);
dispatcher.enqueueResponse(MockHelper.PATH_REGEX_SESSION, wrappedSmartMockResponse);
// session GET response w/ 200 OK to /_session
MockResponse fakeSessionResponse2 = new MockResponse();
Map<String, Object> responseJson = new HashMap<String, Object>();
Map<String, Object> userCtx = new HashMap<String, Object>();
userCtx.put("name", "foo");
responseJson.put("userCtx", userCtx);
fakeSessionResponse2.setBody(Manager.getObjectMapper().writeValueAsBytes(responseJson));
MockHelper.set200OKJson(fakeSessionResponse2);
dispatcher.enqueueResponse(MockHelper.PATH_REGEX_SESSION_COUCHDB, fakeSessionResponse2);
// respond to all GET/PUT Checkpoint requests
MockCheckpointPut mockCheckpointPut = new MockCheckpointPut();
mockCheckpointPut.setSticky(true);
dispatcher.enqueueResponse(MockHelper.PATH_REGEX_CHECKPOINT, mockCheckpointPut);
// start mock server
server.play();
// run pull replication
Replication pullReplication = database.createPullReplication(server.getUrl("/db"));
pullReplication.setAuthenticator(new FacebookAuthorizer("[email protected]"));
CountDownLatch replicationDoneSignal = new CountDownLatch(1);
ReplicationFinishedObserver replicationFinishedObserver = new ReplicationFinishedObserver(replicationDoneSignal);
pullReplication.addChangeListener(replicationFinishedObserver);
pullReplication.start();
// it should first try /db/_session
dispatcher.takeRequestBlocking(MockHelper.PATH_REGEX_SESSION);
// and then it should fallback to /_session
dispatcher.takeRequestBlocking(MockHelper.PATH_REGEX_SESSION_COUCHDB);
boolean success = replicationDoneSignal.await(30, TimeUnit.SECONDS);
Assert.assertTrue(success);
} finally {
server.shutdown();
}
}
/**
* - Start one shot replication
* - Changes feed request returns error
* - Change tracker stops
* - Replication stops -- make sure ChangeListener gets error
* <p/>
* https://github.com/couchbase/couchbase-lite-java-core/issues/334
*/
public void testChangeTrackerError() throws Exception {
// create mockwebserver and custom dispatcher
MockDispatcher dispatcher = new MockDispatcher();
MockWebServer server = MockHelper.getMockWebServer(dispatcher);
dispatcher.setServerType(MockDispatcher.ServerType.SYNC_GW);
try {
// checkpoint GET response w/ 404 + respond to all PUT Checkpoint requests
MockCheckpointPut mockCheckpointPut = new MockCheckpointPut();
mockCheckpointPut.setSticky(true);
dispatcher.enqueueResponse(MockHelper.PATH_REGEX_CHECKPOINT, mockCheckpointPut);
// 404 response to _changes feed (sticky)
MockResponse mockChangesFeed = new MockResponse();
MockHelper.set404NotFoundJson(mockChangesFeed);
WrappedSmartMockResponse wrapped = new WrappedSmartMockResponse(mockChangesFeed);
wrapped.setSticky(true);
dispatcher.enqueueResponse(MockHelper.PATH_REGEX_CHANGES, wrapped);
// start mock server
server.play();
// run pull replication
Replication pullReplication = database.createPullReplication(server.getUrl("/db"));
final CountDownLatch changeEventError = new CountDownLatch(1);
pullReplication.addChangeListener(new Replication.ChangeListener() {
@Override
public void changed(Replication.ChangeEvent event) {
if (event.getError() != null) {
changeEventError.countDown();
}
}
});
runReplication(pullReplication);
Assert.assertTrue(pullReplication.getLastError() != null);
boolean success = changeEventError.await(5, TimeUnit.SECONDS);
Assert.assertTrue(success);
} finally {
server.shutdown();
}
}
/**
* https://github.com/couchbase/couchbase-lite-java-core/issues/358
*
* @related: https://github.com/couchbase/couchbase-lite-java-core/issues/55
* related: testContinuousPushReplicationGoesIdle()
* <p/>
* test steps:
* - start replicator
* - make sure replicator becomes idle state
* - add N docs
* - when callback state == idle
* - assert that mock has received N docs
*/
public void testContinuousPushReplicationGoesIdleTwice() throws Exception {
// /_local/*
// /_revs_diff
// /_bulk_docs
// /_local/*
final int EXPECTED_REQUEST_COUNT = 4;
// make sure we are starting empty
assertEquals(0, database.getLastSequenceNumber());
// 1. Setup MockWebServer
// create mockwebserver and custom dispatcher
MockDispatcher dispatcher = new MockDispatcher();
final MockWebServer server = MockHelper.getMockWebServer(dispatcher);
dispatcher.setServerType(MockDispatcher.ServerType.SYNC_GW);
try {
// checkpoint GET response w/ 404. also receives checkpoint PUT's
MockCheckpointPut mockCheckpointPut = new MockCheckpointPut();
mockCheckpointPut.setSticky(true);
mockCheckpointPut.setDelayMs(500);
dispatcher.enqueueResponse(MockHelper.PATH_REGEX_CHECKPOINT, mockCheckpointPut);
// _revs_diff response -- everything missing
MockRevsDiff mockRevsDiff = new MockRevsDiff();
dispatcher.enqueueResponse(MockHelper.PATH_REGEX_REVS_DIFF, mockRevsDiff);
// _bulk_docs response -- everything stored
MockBulkDocs mockBulkDocs = new MockBulkDocs();
dispatcher.enqueueResponse(MockHelper.PATH_REGEX_BULK_DOCS, mockBulkDocs);
server.play();
// 2. Create replication
Replication replication = database.createPushReplication(server.getUrl("/db"));
replication.setContinuous(true);
CountDownLatch replicationIdle = new CountDownLatch(1);
ReplicationIdleObserver idleObserver = new ReplicationIdleObserver(replicationIdle);
replication.addChangeListener(idleObserver);
replication.start();
// 3. Wait until idle (make sure replicator becomes IDLE state)
boolean success = replicationIdle.await(30, TimeUnit.SECONDS);
assertTrue(success);
replication.removeChangeListener(idleObserver);
// 4. make sure if /_local was called by replicator after start and before idle
RecordedRequest request1 = dispatcher.takeRequestBlocking(MockHelper.PATH_REGEX_CHECKPOINT);
assertNotNull(request1);
dispatcher.takeRecordedResponseBlocking(request1);
assertEquals(1, server.getRequestCount());
// 5. Add replication change listener for transition to IDLE
class ReplicationTransitionToIdleObserver implements Replication.ChangeListener {
private CountDownLatch doneSignal;
private CountDownLatch checkSignal;
public ReplicationTransitionToIdleObserver(CountDownLatch doneSignal, CountDownLatch checkSignal) {
this.doneSignal = doneSignal;
this.checkSignal = checkSignal;
}
public void changed(Replication.ChangeEvent event) {
Log.w(Log.TAG_SYNC, "[ChangeListener.changed()] event => " + event.toString());
if (event.getTransition() != null) {
if (event.getTransition().getSource() != event.getTransition().getDestination() &&
event.getTransition().getDestination() == ReplicationState.IDLE) {
Log.w(Log.TAG_SYNC, "[ChangeListener.changed()] Transition to IDLE");
Log.w(Log.TAG_SYNC, "[ChangeListener.changed()] Request Count => " + server.getRequestCount());
this.doneSignal.countDown();
// When replicator becomes IDLE state, check if all requests are completed
// assertEquals in inner class does not work....
// Note: sometimes server.getRequestCount() returns expected number - 1.
// Is it timing issue?
if (EXPECTED_REQUEST_COUNT == server.getRequestCount() ||
EXPECTED_REQUEST_COUNT - 1 == server.getRequestCount()) {
this.checkSignal.countDown();
}
}
}
}
}
CountDownLatch checkStateToIdle = new CountDownLatch(1);
CountDownLatch checkRequestCount = new CountDownLatch(1);
ReplicationTransitionToIdleObserver replicationTransitionToIdleObserver =
new ReplicationTransitionToIdleObserver(checkStateToIdle, checkRequestCount);
replication.addChangeListener(replicationTransitionToIdleObserver);
Log.w(Log.TAG_SYNC, "Added listener for transition to IDLE");
// 6. Add doc(s)
for (int i = 1; i <= 1; i++) {
Map<String, Object> properties1 = new HashMap<String, Object>();
properties1.put("doc" + String.valueOf(i), "testContinuousPushReplicationGoesIdleTooSoon " + String.valueOf(i));
final Document doc = createDocWithProperties(properties1);
}
// 7. Wait until idle (make sure replicator becomes IDLE state from other state)
// NOTE: 12/17/2014 - current code fails here because after adding listener, state never changed from IDLE
// By implementing stateMachine for Replication completely, address this failure.
success = checkStateToIdle.await(20, TimeUnit.SECONDS); // check if state becomes IDLE from other state
assertTrue(success);
success = checkRequestCount.await(20, TimeUnit.SECONDS); // check if request count is 4 when state becomes IDLE
assertTrue(success);
// 8. Make sure some of requests are called
// _bulk_docs
RecordedRequest request3 = dispatcher.takeRequestBlocking(MockHelper.PATH_REGEX_BULK_DOCS);
assertNotNull(request3);
dispatcher.takeRecordedResponseBlocking(request3);
// double check total request
Log.w(Log.TAG_SYNC, "Total Requested Count before stop replicator => " + server.getRequestCount());
assertTrue(EXPECTED_REQUEST_COUNT == server.getRequestCount() ||
EXPECTED_REQUEST_COUNT - 1 == server.getRequestCount());
// 9. Stop replicator
replication.removeChangeListener(replicationTransitionToIdleObserver);
stopReplication(replication);
} finally {
server.shutdown();
}
}
/**
* https://github.com/couchbase/couchbase-lite-java-core/issues/358
* <p/>
* related: testContinuousPushReplicationGoesIdleTooSoon()
* testContinuousPushReplicationGoesIdle()
* <p/>
* test steps:
* - add N docs
* - start replicator
* - when callback state == idle
* - assert that mock has received N docs
*/
public void failingTestContinuousPushReplicationGoesIdleTooSoon() throws Exception {
// smaller batch size so there are multiple requests to _bulk_docs
int previous = ReplicationInternal.INBOX_CAPACITY;
ReplicationInternal.INBOX_CAPACITY = 5;
int numDocs = ReplicationInternal.INBOX_CAPACITY * 5;
// make sure we are starting empty
assertEquals(0, database.getLastSequenceNumber());
// Add doc(s)
// NOTE: more documents causes more HTTP calls. It could be more than 4 times...
for (int i = 1; i <= numDocs; i++) {
Map<String, Object> properties = new HashMap<String, Object>();
properties.put("doc" + String.valueOf(i), "testContinuousPushReplicationGoesIdleTooSoon " + String.valueOf(i));
final Document doc = createDocWithProperties(properties);
}
// Setup MockWebServer
// create mockwebserver and custom dispatcher
MockDispatcher dispatcher = new MockDispatcher();
final MockWebServer server = MockHelper.getMockWebServer(dispatcher);
dispatcher.setServerType(MockDispatcher.ServerType.SYNC_GW);
try {
// checkpoint GET response w/ 404. also receives checkpoint PUT's
MockCheckpointPut mockCheckpointPut = new MockCheckpointPut();
mockCheckpointPut.setSticky(true);
mockCheckpointPut.setDelayMs(500);
dispatcher.enqueueResponse(MockHelper.PATH_REGEX_CHECKPOINT, mockCheckpointPut);
// _revs_diff response -- everything missing
MockRevsDiff mockRevsDiff = new MockRevsDiff();
mockRevsDiff.setSticky(true);
dispatcher.enqueueResponse(MockHelper.PATH_REGEX_REVS_DIFF, mockRevsDiff);
// _bulk_docs response -- everything stored
MockBulkDocs mockBulkDocs = new MockBulkDocs();
mockBulkDocs.setSticky(true);
dispatcher.enqueueResponse(MockHelper.PATH_REGEX_BULK_DOCS, mockBulkDocs);
server.play();
// Create replicator
Replication replication = database.createPushReplication(server.getUrl("/db"));
replication.setContinuous(true);
// special change listener for this test case.
class ReplicationTransitionToIdleObserver implements Replication.ChangeListener {
private CountDownLatch enterIdleStateSignal;
public ReplicationTransitionToIdleObserver(CountDownLatch enterIdleStateSignal) {
this.enterIdleStateSignal = enterIdleStateSignal;
}
public void changed(Replication.ChangeEvent event) {
Log.w(Log.TAG_SYNC, "[ChangeListener.changed()] event => " + event.toString());
if (event.getTransition() != null) {
if (event.getTransition().getSource() != event.getTransition().getDestination() &&
event.getTransition().getDestination() == ReplicationState.IDLE) {
Log.w(Log.TAG_SYNC, "[ChangeListener.changed()] Transition to IDLE");
Log.w(Log.TAG_SYNC, "[ChangeListener.changed()] Request Count => " + server.getRequestCount());
this.enterIdleStateSignal.countDown();
}
}
}
}
CountDownLatch enterIdleStateSignal = new CountDownLatch(1);
ReplicationTransitionToIdleObserver replicationTransitionToIdleObserver = new ReplicationTransitionToIdleObserver(enterIdleStateSignal);
replication.addChangeListener(replicationTransitionToIdleObserver);
replication.start();
// Wait until idle (make sure replicator becomes IDLE state from other state)
boolean success = enterIdleStateSignal.await(20, TimeUnit.SECONDS);
assertTrue(success);
// Once the replicator is idle get a snapshot of all the requests its made to _bulk_docs endpoint
int numDocsPushed = 0;
BlockingQueue<RecordedRequest> requests = dispatcher.getRequestQueueSnapshot(MockHelper.PATH_REGEX_BULK_DOCS);
for (RecordedRequest request : requests) {
Log.i(Log.TAG_SYNC, "request: %s", request);
byte[] body = MockHelper.getUncompressedBody(request);
Map<String, Object> jsonMap = MockHelper.getJsonMapFromRequest(body);
List docs = (List) jsonMap.get("docs");
numDocsPushed += docs.size();
}
// WORKAROUND: CBL Java Unit Test on Jenkins rarely fails following.
// TODO: Need to fix: https://github.com/couchbase/couchbase-lite-java-core/issues/446
// It seems threading issue exists, and replicator becomes IDLE even tasks in batcher.
if (System.getProperty("java.vm.name").equalsIgnoreCase("Dalvik")) {
// Assert that all docs have already been pushed by the time it goes IDLE
assertEquals(numDocs, numDocsPushed);
}
// Stop replicator and MockWebServer
stopReplication(replication);
// wait until checkpoint is pushed, since it can happen _after_ replication is finished.
// if this isn't done, there can be IOExceptions when calling server.shutdown()
waitForPutCheckpointRequestWithSeq(dispatcher, (int) database.getLastSequenceNumber());
} finally {
server.shutdown();
ReplicationInternal.INBOX_CAPACITY = previous;
}
}
/**
* https://github.com/couchbase/couchbase-lite-java-core/issues/352
* <p/>
* When retrying a replication, make sure to get session & checkpoint.
*/
public void testCheckSessionAndCheckpointWhenRetryingReplication() throws Exception {
int prev_RETRY_DELAY_MS = RemoteRequestRetry.RETRY_DELAY_MS;
int prev_RETRY_DELAY_SECONDS = ReplicationInternal.RETRY_DELAY_SECONDS;
int prev_MAX_RETRIES = ReplicationInternal.MAX_RETRIES;
try {
RemoteRequestRetry.RETRY_DELAY_MS = 5; // speed up test execution (inner loop retry delay)
ReplicationInternal.RETRY_DELAY_SECONDS = 1; // speed up test execution (outer loop retry delay)
ReplicationInternal.MAX_RETRIES = 3; // speed up test execution (outer loop retry count)
String fakeEmail = "[email protected]";
// create mockwebserver and custom dispatcher
MockDispatcher dispatcher = new MockDispatcher();
MockWebServer server = MockHelper.getMockWebServer(dispatcher);
dispatcher.setServerType(MockDispatcher.ServerType.SYNC_GW);
try {
// set up request
{
// response for /db/_session
MockSessionGet mockSessionGet = new MockSessionGet();
dispatcher.enqueueResponse(MockHelper.PATH_REGEX_SESSION, mockSessionGet.generateMockResponse());
// response for /db/_facebook
MockFacebookAuthPost mockFacebookAuthPost = new MockFacebookAuthPost();
dispatcher.enqueueResponse(MockHelper.PATH_REGEX_FACEBOOK_AUTH, mockFacebookAuthPost.generateMockResponseForSuccess(fakeEmail));
// response for /db/_local/.*
MockCheckpointPut mockCheckpointPut = new MockCheckpointPut();
mockCheckpointPut.setSticky(true);
mockCheckpointPut.setDelayMs(500);
dispatcher.enqueueResponse(MockHelper.PATH_REGEX_CHECKPOINT, mockCheckpointPut);
// response for /db/_revs_diff
MockRevsDiff mockRevsDiff = new MockRevsDiff();
mockRevsDiff.setSticky(true);
dispatcher.enqueueResponse(MockHelper.PATH_REGEX_REVS_DIFF, mockRevsDiff);
// response for /db/_bulk_docs -- 503 errors
MockResponse mockResponse = new MockResponse().setResponseCode(503);
WrappedSmartMockResponse mockBulkDocs = new WrappedSmartMockResponse(mockResponse, false);
mockBulkDocs.setSticky(true);
dispatcher.enqueueResponse(MockHelper.PATH_REGEX_BULK_DOCS, mockBulkDocs);
}
server.play();
// register bogus fb token
Authenticator facebookAuthenticator = AuthenticatorFactory.createFacebookAuthenticator("fake_access_token");
// create replication
Replication replication = database.createPushReplication(server.getUrl("/db"));
replication.setAuthenticator(facebookAuthenticator);
replication.setContinuous(true);
CountDownLatch replicationIdle = new CountDownLatch(1);
ReplicationIdleObserver idleObserver = new ReplicationIdleObserver(replicationIdle);
replication.addChangeListener(idleObserver);
replication.start();
// wait until idle
boolean success = replicationIdle.await(30, TimeUnit.SECONDS);
assertTrue(success);
replication.removeChangeListener(idleObserver);
// create a doc in local db
Document doc1 = createDocumentForPushReplication("doc1", null, null);
// initial request
{
// check /db/_session
RecordedRequest sessionRequest = dispatcher.takeRequestBlocking(MockHelper.PATH_REGEX_SESSION);
assertNotNull(sessionRequest);
dispatcher.takeRecordedResponseBlocking(sessionRequest);
// check /db/_facebook
RecordedRequest facebookSessionRequest = dispatcher.takeRequestBlocking(MockHelper.PATH_REGEX_FACEBOOK_AUTH);
assertNotNull(facebookSessionRequest);
dispatcher.takeRecordedResponseBlocking(facebookSessionRequest);
// check /db/_local/.*
RecordedRequest checkPointRequest = dispatcher.takeRequestBlocking(MockHelper.PATH_REGEX_CHECKPOINT);
assertNotNull(checkPointRequest);
dispatcher.takeRecordedResponseBlocking(checkPointRequest);
// check /db/_revs_diff
RecordedRequest revsDiffRequest = dispatcher.takeRequestBlocking(MockHelper.PATH_REGEX_REVS_DIFF);
assertNotNull(revsDiffRequest);
dispatcher.takeRecordedResponseBlocking(revsDiffRequest);
// we should expect to at least see numAttempts attempts at doing POST to _bulk_docs
// 1st attempt
// numAttempts are number of times retry in 1 attempt.
int numAttempts = RemoteRequestRetry.MAX_RETRIES + 1; // total number of attempts = 4 (1 initial + MAX_RETRIES)
for (int i = 0; i < numAttempts; i++) {
RecordedRequest request = dispatcher.takeRequestBlocking(MockHelper.PATH_REGEX_BULK_DOCS);
assertNotNull(request);
dispatcher.takeRecordedResponseBlocking(request);
}
}
// To test following, requires to fix #299 (improve retry behavior)
// Retry requests
// outer retry loop
for (int j = 0; j < ReplicationInternal.MAX_RETRIES; j++) {
// MockSessionGet does not support isSticky
MockSessionGet mockSessionGet = new MockSessionGet();
dispatcher.enqueueResponse(MockHelper.PATH_REGEX_SESSION, mockSessionGet.generateMockResponse());
// MockFacebookAuthPost does not support isSticky
MockFacebookAuthPost mockFacebookAuthPost = new MockFacebookAuthPost();
dispatcher.enqueueResponse(MockHelper.PATH_REGEX_FACEBOOK_AUTH, mockFacebookAuthPost.generateMockResponseForSuccess(fakeEmail));
// *** Retry must include session & check point ***
// check /db/_session
RecordedRequest sessionRequest = dispatcher.takeRequestBlocking(MockHelper.PATH_REGEX_SESSION);
assertNotNull(sessionRequest);
dispatcher.takeRecordedResponseBlocking(sessionRequest);
// check /db/_facebook
RecordedRequest facebookSessionRequest = dispatcher.takeRequestBlocking(MockHelper.PATH_REGEX_FACEBOOK_AUTH);
assertNotNull(facebookSessionRequest);
dispatcher.takeRecordedResponseBlocking(facebookSessionRequest);
// check /db/_local/.*
RecordedRequest checkPointRequest = dispatcher.takeRequestBlocking(MockHelper.PATH_REGEX_CHECKPOINT);
assertNotNull(checkPointRequest);
dispatcher.takeRecordedResponseBlocking(checkPointRequest);
// check /db/_revs_diff
RecordedRequest revsDiffRequest = dispatcher.takeRequestBlocking(MockHelper.PATH_REGEX_REVS_DIFF);
assertNotNull(revsDiffRequest);
dispatcher.takeRecordedResponseBlocking(revsDiffRequest);
// we should expect to at least see numAttempts attempts at doing POST to _bulk_docs
// 1st attempt
// numAttempts are number of times retry in 1 attempt.
int numAttempts = RemoteRequestRetry.MAX_RETRIES + 1; // total number of attempts = 4 (1 initial + MAX_RETRIES)
for (int i = 0; i < numAttempts; i++) {
RecordedRequest request = dispatcher.takeRequestBlocking(MockHelper.PATH_REGEX_BULK_DOCS);
assertNotNull(request);
dispatcher.takeRecordedResponseBlocking(request);
}
}
stopReplication(replication);
} finally {
server.shutdown();
}
} finally {
RemoteRequestRetry.RETRY_DELAY_MS = prev_RETRY_DELAY_MS;
ReplicationInternal.RETRY_DELAY_SECONDS = prev_RETRY_DELAY_SECONDS;
ReplicationInternal.MAX_RETRIES = prev_MAX_RETRIES;
}
}
/**
* https://github.com/couchbase/couchbase-lite-java-core/issues/352
* <p/>
* Makes the replicator stop, even if it is continuous, when it receives a permanent-type error
*/
public void failingTestStopReplicatorWhenRetryingReplicationWithPermanentError() throws Exception {
RemoteRequestRetry.RETRY_DELAY_MS = 5; // speed up test execution (inner loop retry delay)
ReplicationInternal.RETRY_DELAY_SECONDS = 1; // speed up test execution (outer loop retry delay)
ReplicationInternal.MAX_RETRIES = 3; // speed up test execution (outer loop retry count)
// create mockwebserver and custom dispatcher
MockDispatcher dispatcher = new MockDispatcher();
MockWebServer server = MockHelper.getMockWebServer(dispatcher);
dispatcher.setServerType(MockDispatcher.ServerType.SYNC_GW);
// set up request
{
// response for /db/_local/.*
MockCheckpointPut mockCheckpointPut = new MockCheckpointPut();
mockCheckpointPut.setSticky(true);
mockCheckpointPut.setDelayMs(500);
dispatcher.enqueueResponse(MockHelper.PATH_REGEX_CHECKPOINT, mockCheckpointPut);
// response for /db/_revs_diff
MockRevsDiff mockRevsDiff = new MockRevsDiff();
mockRevsDiff.setSticky(true);
dispatcher.enqueueResponse(MockHelper.PATH_REGEX_REVS_DIFF, mockRevsDiff);
// response for /db/_bulk_docs -- 400 Bad Request (not transient error)
MockResponse mockResponse = new MockResponse().setResponseCode(400);
WrappedSmartMockResponse mockBulkDocs = new WrappedSmartMockResponse(mockResponse, false);
mockBulkDocs.setSticky(true);
dispatcher.enqueueResponse(MockHelper.PATH_REGEX_BULK_DOCS, mockBulkDocs);
}
server.play();
// create replication
Replication replication = database.createPushReplication(server.getUrl("/db"));
replication.setContinuous(true);
// add replication observer for IDLE state
CountDownLatch replicationIdle = new CountDownLatch(1);
ReplicationIdleObserver idleObserver = new ReplicationIdleObserver(replicationIdle);
replication.addChangeListener(idleObserver);
// add replication observer for finished
CountDownLatch replicationDoneSignal = new CountDownLatch(1);
ReplicationFinishedObserver replicationFinishedObserver = new ReplicationFinishedObserver(replicationDoneSignal);
replication.addChangeListener(replicationFinishedObserver);
replication.start();
// wait until idle
boolean success = replicationIdle.await(30, TimeUnit.SECONDS);
assertTrue(success);
replication.removeChangeListener(idleObserver);
// create a doc in local db
Document doc1 = createDocumentForPushReplication("doc1", null, null);
// initial request
{
// check /db/_local/.*
RecordedRequest checkPointRequest = dispatcher.takeRequestBlocking(MockHelper.PATH_REGEX_CHECKPOINT);
assertNotNull(checkPointRequest);
dispatcher.takeRecordedResponseBlocking(checkPointRequest);
// check /db/_revs_diff
RecordedRequest revsDiffRequest = dispatcher.takeRequestBlocking(MockHelper.PATH_REGEX_REVS_DIFF);
assertNotNull(revsDiffRequest);
dispatcher.takeRecordedResponseBlocking(revsDiffRequest);
// we should observe only one POST to _bulk_docs request because error is not transient error
RecordedRequest request = dispatcher.takeRequestBlocking(MockHelper.PATH_REGEX_BULK_DOCS);
assertNotNull(request);
dispatcher.takeRecordedResponseBlocking(request);
}
// Without fixing CBL Java Core #352, following code causes hang.
// wait for replication to finish
boolean didNotTimeOut = replicationDoneSignal.await(180, TimeUnit.SECONDS);
Log.d(TAG, "replicationDoneSignal.await done, didNotTimeOut: " + didNotTimeOut);
assertFalse(replication.isRunning());
server.shutdown();
}
/**
* https://github.com/couchbase/couchbase-lite-java-core/issues/356
*/
public void testReplicationRestartPreservesValues() throws Exception {
// make sure we are starting empty
assertEquals(0, database.getLastSequenceNumber());
// add docs
Map<String, Object> properties1 = new HashMap<String, Object>();
properties1.put("doc1", "testContinuousPushReplicationGoesIdle");
final Document doc1 = createDocWithProperties(properties1);
// create mockwebserver and custom dispatcher
MockDispatcher dispatcher = new MockDispatcher();
MockWebServer server = MockHelper.getMockWebServer(dispatcher);
dispatcher.setServerType(MockDispatcher.ServerType.SYNC_GW);
try {
server.play();
// checkpoint GET response w/ 404. also receives checkpoint PUT's
MockCheckpointPut mockCheckpointPut = new MockCheckpointPut();
mockCheckpointPut.setSticky(true);
dispatcher.enqueueResponse(MockHelper.PATH_REGEX_CHECKPOINT, mockCheckpointPut);
// _revs_diff response -- everything missing
MockRevsDiff mockRevsDiff = new MockRevsDiff();
dispatcher.enqueueResponse(MockHelper.PATH_REGEX_REVS_DIFF, mockRevsDiff);
// _bulk_docs response -- everything stored
MockBulkDocs mockBulkDocs = new MockBulkDocs();
dispatcher.enqueueResponse(MockHelper.PATH_REGEX_BULK_DOCS, mockBulkDocs);
// create continuos replication
Replication pusher = database.createPushReplication(server.getUrl("/db"));
pusher.setContinuous(true);
// add filter properties to the replicator
String filterName = "app/clientIdAndTablesSchemeDocIdFilter";
pusher.setFilter(filterName);
Map<String, Object> filterParams = new HashMap<String, Object>();
String filterParam = "tablesSchemeDocId";
String filterVal = "foo";
filterParams.put(filterParam, filterVal);
pusher.setFilterParams(filterParams);
// doc ids
pusher.setDocIds(Arrays.asList(doc1.getId()));
// custom authenticator
BasicAuthenticator authenticator = new BasicAuthenticator("foo", "bar");
pusher.setAuthenticator(authenticator);
// custom request headers
Map<String, Object> requestHeaders = new HashMap<String, Object>();
requestHeaders.put("foo", "bar");
pusher.setHeaders(requestHeaders);
// create target
pusher.setCreateTarget(true);
// start the continuous replication
CountDownLatch replicationIdleSignal = new CountDownLatch(1);
ReplicationIdleObserver replicationIdleObserver = new ReplicationIdleObserver(replicationIdleSignal);
pusher.addChangeListener(replicationIdleObserver);
pusher.start();
// wait until we get an IDLE event
boolean successful = replicationIdleSignal.await(30, TimeUnit.SECONDS);
assertTrue(successful);
// restart the replication
CountDownLatch replicationIdleSignal2 = new CountDownLatch(1);
ReplicationIdleObserver replicationIdleObserver2 = new ReplicationIdleObserver(replicationIdleSignal2);
pusher.addChangeListener(replicationIdleObserver2);
pusher.restart();
// wait until we get another IDLE event
successful = replicationIdleSignal2.await(30, TimeUnit.SECONDS);
assertTrue(successful);
// verify the restarted replication still has the values we set up earlier
assertEquals(filterName, pusher.getFilter());
assertTrue(pusher.getFilterParams().size() == 1);
assertEquals(filterVal, pusher.getFilterParams().get(filterParam));
assertTrue(pusher.isContinuous());
assertEquals(Arrays.asList(doc1.getId()), pusher.getDocIds());
assertEquals(authenticator, pusher.getAuthenticator());
assertEquals(requestHeaders, pusher.getHeaders());
assertTrue(pusher.shouldCreateTarget());
} finally {
server.shutdown();
}
}
/**
* The observed problem:
* <p/>
* - 1. Start continuous pull
* - 2. Wait until it goes IDLE (this works fine)
* - 3. Add a new document directly to the Sync Gateway
* - 4. The continuous pull goes from IDLE -> RUNNING
* - 5. Wait until it goes IDLE again (this doesn't work, it never goes back to IDLE)
* <p/>
* The test case below simulates the above scenario using a mock sync gateway.
* <p/>
* https://github.com/couchbase/couchbase-lite-java-core/issues/383
*/
public void testContinuousPullReplicationGoesIdleTwice() throws Exception {
Log.e(TAG, "TEST START");
// create mockwebserver and custom dispatcher
MockDispatcher dispatcher = new MockDispatcher();
MockWebServer server = MockHelper.getMockWebServer(dispatcher);
dispatcher.setServerType(MockDispatcher.ServerType.SYNC_GW);
try {
// checkpoint PUT or GET response (sticky)
MockCheckpointPut mockCheckpointPut = new MockCheckpointPut();
mockCheckpointPut.setSticky(true);
dispatcher.enqueueResponse(MockHelper.PATH_REGEX_CHECKPOINT, mockCheckpointPut);
// add non-sticky changes response that returns no changes
// this will cause the pull replicator to go into the IDLE state
MockChangesFeed mockChangesFeed = new MockChangesFeed();
dispatcher.enqueueResponse(MockHelper.PATH_REGEX_CHANGES, mockChangesFeed.generateMockResponse());
// add _changes response that just blocks for a few seconds to emulate
// server that doesn't have any new changes. while the puller is blocked on this request
// to the _changes feed, the test will add a new changes listener that waits until it goes
// into the RUNNING state
MockChangesFeedNoResponse mockChangesFeedNoResponse = new MockChangesFeedNoResponse();
// It seems 5 sec delay might not be necessary. It reduce test duration 5 sec
//mockChangesFeedNoResponse.setDelayMs(5 * 1000);
dispatcher.enqueueResponse(MockHelper.PATH_REGEX_CHANGES, mockChangesFeedNoResponse);
// 3.
// after the above changes feed response returns after 5 seconds, the next time
// the puller gets the _changes feed, return a response that there is 1 new doc.
// this will cause the puller to go from IDLE -> RUNNING
MockDocumentGet.MockDocument mockDoc1 = new MockDocumentGet.MockDocument("doc1", "1-5e38", 1);
mockDoc1.setJsonMap(MockHelper.generateRandomJsonMap());
mockChangesFeed = new MockChangesFeed();
mockChangesFeed.add(new MockChangesFeed.MockChangedDoc(mockDoc1));
dispatcher.enqueueResponse(MockHelper.PATH_REGEX_CHANGES, mockChangesFeed.generateMockResponse());
// at this point, the mock _changes feed is done simulating new docs on the sync gateway
// since we've done enough to reproduce the problem. so at this point, just make the changes
// feed block for a long time.
MockChangesFeedNoResponse mockChangesFeedNoResponse2 = new MockChangesFeedNoResponse();
mockChangesFeedNoResponse2.setDelayMs(6000 * 1000); // block for > 1hr
mockChangesFeedNoResponse2.setSticky(true); // continue this behavior indefinitely
dispatcher.enqueueResponse(MockHelper.PATH_REGEX_CHANGES, mockChangesFeedNoResponse2);
// doc1 response
MockDocumentGet mockDocumentGet = new MockDocumentGet(mockDoc1);
dispatcher.enqueueResponse(mockDoc1.getDocPathRegex(), mockDocumentGet.generateMockResponse());
// _revs_diff response -- everything missing
MockRevsDiff mockRevsDiff = new MockRevsDiff();
mockRevsDiff.setSticky(true);
dispatcher.enqueueResponse(MockHelper.PATH_REGEX_REVS_DIFF, mockRevsDiff);
Log.e(TAG, "SERVER START");
server.play();
// create pull replication
final Replication pullReplication = database.createPullReplication(server.getUrl("/db"));
pullReplication.setContinuous(true);
final CountDownLatch enteredIdleState1 = new CountDownLatch(1);
pullReplication.addChangeListener(new Replication.ChangeListener() {
@Override
public void changed(Replication.ChangeEvent event) {
if (event.getSource().getStatus() == Replication.ReplicationStatus.REPLICATION_IDLE) {
Log.e(TAG, "Replication is IDLE 1");
enteredIdleState1.countDown();
pullReplication.removeChangeListener(this);
}
}
});
Log.e(TAG, "REPLICATOR START");
// 1. start pull replication
pullReplication.start();
// 2. wait until its IDLE
boolean success = enteredIdleState1.await(30, TimeUnit.SECONDS);
assertTrue(success);
// 3. see server side preparation
// change listener to see if its RUNNING
// we can't add this earlier, because the countdown latch would get
// triggered too early (the other approach would be to set the countdown
// latch to a higher number)
final CountDownLatch enteredRunningState = new CountDownLatch(1);
final CountDownLatch enteredIdleState2 = new CountDownLatch(1);
pullReplication.addChangeListener(new Replication.ChangeListener() {
@Override
public void changed(Replication.ChangeEvent event) {
if (event.getSource().getStatus() == Replication.ReplicationStatus.REPLICATION_ACTIVE) {
if (enteredRunningState.getCount() > 0) {
Log.e(TAG, "Replication is RUNNING");
enteredRunningState.countDown();
}
}
// second IDLE change listener
// handling IDLE event here. It seems IDLE event was fired before set IDLE event handler
else if (event.getSource().getStatus() == Replication.ReplicationStatus.REPLICATION_IDLE) {
if (enteredRunningState.getCount() <= 0 && enteredIdleState2.getCount() > 0) {
Log.e(TAG, "Replication is IDLE 2");
enteredIdleState2.countDown();
}
}
}
});
// 4. wait until its RUNNING
Log.e(TAG, "WAIT for RUNNING");
success = enteredRunningState.await(30, TimeUnit.SECONDS);
assertTrue(success);
// 5. wait until its IDLE again. before the fix, it would never go IDLE again, and so
// this would timeout and the test would fail.
Log.e(TAG, "WAIT for IDLE");
success = enteredIdleState2.await(30, TimeUnit.SECONDS);
assertTrue(success);
Log.e(TAG, "STOP REPLICATOR");
// clean up
stopReplication(pullReplication);
Log.e(TAG, "STOP MOCK SERVER");
} finally {
server.shutdown();
}
Log.e(TAG, "TEST DONE");
}
/**
* Test case that makes sure STOPPED notification is sent only once with continuous pull replication
* https://github.com/couchbase/couchbase-lite-android/issues/442
*/
public void testContinuousPullReplicationSendStoppedOnce() throws Exception {
Log.d(TAG, "TEST START");
// create mockwebserver and custom dispatcher
MockDispatcher dispatcher = new MockDispatcher();
MockWebServer server = MockHelper.getMockWebServer(dispatcher);
dispatcher.setServerType(MockDispatcher.ServerType.SYNC_GW);
try {
// checkpoint PUT or GET response (sticky)
MockCheckpointPut mockCheckpointPut = new MockCheckpointPut();
mockCheckpointPut.setSticky(true);
dispatcher.enqueueResponse(MockHelper.PATH_REGEX_CHECKPOINT, mockCheckpointPut);
// add non-sticky changes response that returns no changes
// this will cause the pull replicator to go into the IDLE state
MockChangesFeed mockChangesFeed = new MockChangesFeed();
dispatcher.enqueueResponse(MockHelper.PATH_REGEX_CHANGES, mockChangesFeed.generateMockResponse());
server.play();
// create pull replication
Replication pullReplication = database.createPullReplication(server.getUrl("/db"));
pullReplication.setContinuous(true);
final CountDownLatch enteredIdleState = new CountDownLatch(1);
final CountDownLatch enteredStoppedState = new CountDownLatch(2);
pullReplication.addChangeListener(new Replication.ChangeListener() {
@Override
public void changed(Replication.ChangeEvent event) {
if (event.getSource().getStatus() == Replication.ReplicationStatus.REPLICATION_IDLE) {
Log.d(TAG, "Replication is IDLE");
enteredIdleState.countDown();
} else if (event.getSource().getStatus() == Replication.ReplicationStatus.REPLICATION_STOPPED) {
Log.d(TAG, "Replication is STOPPED");
enteredStoppedState.countDown();
}
}
});
// 1. start pull replication
pullReplication.start();
// 2. wait until its IDLE
boolean success = enteredIdleState.await(30, TimeUnit.SECONDS);
assertTrue(success);
// 3. stop pull replication
stopReplication(pullReplication);
// 4. wait until its RUNNING
Log.d(TAG, "WAIT for STOPPED");
//success = enteredStoppedState.await(Replication.DEFAULT_MAX_TIMEOUT_FOR_SHUTDOWN + 30, TimeUnit.SECONDS); // replicator maximum shutdown timeout 60 sec + additional 30 sec for other stuff
// NOTE: 90 sec is too long for unit test. chnaged to 30 sec
// NOTE2: 30 sec is still too long for unit test. changed to 15sec.
success = enteredStoppedState.await(15, TimeUnit.SECONDS); // replicator maximum shutdown timeout 60 sec + additional 30 sec for other stuff
// if STOPPED notification was sent twice, enteredStoppedState becomes 0.
assertEquals(1, enteredStoppedState.getCount());
assertFalse(success);
} finally {
Log.d(TAG, "STOP MOCK SERVER");
server.shutdown();
}
Log.d(TAG, "TEST DONE");
}
/**
* Test case that makes sure STOPPED notification is sent only once with one time pull replication
* https://github.com/couchbase/couchbase-lite-android/issues/442
*/
public void testOneTimePullReplicationSendStoppedOnce() throws Exception {
Log.d(TAG, "TEST START");
// create mockwebserver and custom dispatcher
MockDispatcher dispatcher = new MockDispatcher();
MockWebServer server = MockHelper.getMockWebServer(dispatcher);
dispatcher.setServerType(MockDispatcher.ServerType.SYNC_GW);
try {
// checkpoint PUT or GET response (sticky)
MockCheckpointPut mockCheckpointPut = new MockCheckpointPut();
mockCheckpointPut.setSticky(true);
dispatcher.enqueueResponse(MockHelper.PATH_REGEX_CHECKPOINT, mockCheckpointPut);
// add non-sticky changes response that returns no changes
// this will cause the pull replicator to go into the IDLE state
MockChangesFeed mockChangesFeed = new MockChangesFeed();
dispatcher.enqueueResponse(MockHelper.PATH_REGEX_CHANGES, mockChangesFeed.generateMockResponse());
server.play();
// create pull replication
Replication pullReplication = database.createPullReplication(server.getUrl("/db"));
pullReplication.setContinuous(false);
// handle STOPPED notification
final CountDownLatch enteredStoppedState = new CountDownLatch(2);
pullReplication.addChangeListener(new Replication.ChangeListener() {
@Override
public void changed(Replication.ChangeEvent event) {
if (event.getSource().getStatus() == Replication.ReplicationStatus.REPLICATION_STOPPED &&
event.getTransition().getDestination() == ReplicationState.STOPPED) {
Log.d(TAG, "Replication is STOPPED");
enteredStoppedState.countDown();
}
}
});
// 1. start pull replication
pullReplication.start();
// 2. wait until its RUNNING
Log.d(TAG, "WAIT for STOPPED");
boolean success = enteredStoppedState.await(15, TimeUnit.SECONDS);
// if STOPPED notification was sent twice, enteredStoppedState becomes 0.
assertEquals(1, enteredStoppedState.getCount());
assertFalse(success);
} finally {
Log.d(TAG, "STOP MOCK SERVER");
server.shutdown();
}
Log.d(TAG, "TEST DONE");
}
/**
* Issue: Pull Replicator does not send IDLE state after check point
* https://github.com/couchbase/couchbase-lite-java-core/issues/389
* <p/>
* 1. Wait till pull replicator becomes IDLE state
* 2. Update change event handler for handling ACTIVE and IDLE
* 3. Create document into local db
* 4. Based on local doc information, prepare mock change response for 1st /_changes request
* 5. Prepare next mock change response for 2nd /_changes request (blocking for while)
* 6. wait for Replication IDLE -> ACTIVE -> IDLE
*/
public void testPullReplicatonSendIdleStateAfterCheckPoint() throws Exception {
Log.d(TAG, "TEST START");
// create mockwebserver and custom dispatcher
MockDispatcher dispatcher = new MockDispatcher();
MockWebServer server = MockHelper.getMockWebServer(dispatcher);
dispatcher.setServerType(MockDispatcher.ServerType.SYNC_GW);
try {
// checkpoint PUT or GET response (sticky) (for both push and pull)
MockCheckpointPut mockCheckpointPut = new MockCheckpointPut();
mockCheckpointPut.setSticky(true);
dispatcher.enqueueResponse(MockHelper.PATH_REGEX_CHECKPOINT, mockCheckpointPut);
// add non-sticky changes response that returns no changes (for pull)
// this will cause the pull replicator to go into the IDLE state
MockChangesFeed mockChangesFeedEmpty = new MockChangesFeed();
dispatcher.enqueueResponse(MockHelper.PATH_REGEX_CHANGES, mockChangesFeedEmpty.generateMockResponse());
// start mock server
server.play();
// create pull replication
Replication pullReplication = database.createPullReplication(server.getUrl("/db"));
pullReplication.setContinuous(true);
// handler to wait for IDLE
final CountDownLatch pullInitialIdleState = new CountDownLatch(1);
pullReplication.addChangeListener(new Replication.ChangeListener() {
@Override
public void changed(Replication.ChangeEvent event) {
if (event.getSource().getStatus() == Replication.ReplicationStatus.REPLICATION_IDLE) {
pullInitialIdleState.countDown();
}
}
});
// start pull replication
//pushReplication.start();
pullReplication.start();
// 1. Wait till replicator becomes IDLE
boolean success = pullInitialIdleState.await(30, TimeUnit.SECONDS);
assertTrue(success);
// clear out existing queued mock responses to make room for new ones
dispatcher.clearQueuedResponse(MockHelper.PATH_REGEX_CHANGES);
// 2. Update change event handler for handling ACTIVE and IDLE
final CountDownLatch activeSignal = new CountDownLatch(1);
final CountDownLatch idleSignal = new CountDownLatch(1);
pullReplication.addChangeListener(new Replication.ChangeListener() {
@Override
public void changed(Replication.ChangeEvent event) {
Log.e(TAG, "[changed] PULL -> " + event);
if (event.getSource().getStatus() == Replication.ReplicationStatus.REPLICATION_IDLE) {
// make sure pull replicator becomes IDLE after ACTIVE state.
// so ignore any IDLE state before ACTIVE.
if (activeSignal.getCount() == 0) {
idleSignal.countDown();
}
} else if (event.getSource().getStatus() == Replication.ReplicationStatus.REPLICATION_ACTIVE) {
activeSignal.countDown();
}
}
});
// 3. Create document into local db
Document doc = database.createDocument();
Map<String, Object> props = new HashMap<String, Object>();
props.put("key", "1");
doc.putProperties(props);
// 4. Based on local doc information, prepare mock change response for 1st /_changes request
String docId = doc.getId();
String revId = doc.getCurrentRevisionId();
int lastSeq = (int) database.getLastSequenceNumber();
MockDocumentGet.MockDocument mockDocument1 = new MockDocumentGet.MockDocument(docId, revId, lastSeq + 1);
mockDocument1.setJsonMap(MockHelper.generateRandomJsonMap());
MockChangesFeed mockChangesFeed = new MockChangesFeed();
mockChangesFeed.add(new MockChangesFeed.MockChangedDoc(mockDocument1));
dispatcher.enqueueResponse(MockHelper.PATH_REGEX_CHANGES, mockChangesFeed.generateMockResponse());
// 5. Prepare next mock change response for 2nd /_changes request (blocking for while)
MockChangesFeedNoResponse mockChangesFeedNoResponse2 = new MockChangesFeedNoResponse();
mockChangesFeedNoResponse2.setDelayMs(60 * 1000);
mockChangesFeedNoResponse2.setSticky(true);
dispatcher.enqueueResponse(MockHelper.PATH_REGEX_CHANGES, mockChangesFeedNoResponse2);
// 6. wait for Replication IDLE -> ACTIVE -> IDLE
success = activeSignal.await(30, TimeUnit.SECONDS);
assertTrue(success);
success = idleSignal.await(30, TimeUnit.SECONDS);
assertTrue(success);
// stop pull replication
stopReplication(pullReplication);
} finally {
server.shutdown();
}
Log.d(TAG, "TEST DONE");
}
/**
* Sync (pull replication) fails on document with a lot of revisions and attachments
* https://github.com/couchbase/couchbase-lite-java-core/issues/415
*/
public void testPullReplicatonWithManyAttachmentRevisions() throws Exception {
Log.d(TAG, "TEST START: testPullReplicatonWithManyAttachmentRevisions()");
String docID = "11111";
String key = "key";
String value = "one-one-one-one";
String attachmentName = "attachment.png";
// create initial document (Revision 1-xxxx)
Map<String, Object> props1 = new HashMap<String, Object>();
props1.put("_id", docID);
props1.put(key, value);
RevisionInternal rev = new RevisionInternal(props1);
Status status = new Status();
RevisionInternal savedRev = database.putRevision(rev, null, false, status);
String rev1ID = savedRev.getRevID();
// add attachment to doc (Revision 2-xxxx)
Document doc = database.getDocument(docID);
UnsavedRevision newRev = doc.createRevision();
InputStream attachmentStream = getAsset(attachmentName);
newRev.setAttachment(attachmentName, "image/png", attachmentStream);
SavedRevision saved = newRev.save(true);
String rev2ID = doc.getCurrentRevisionId();
Log.w(TAG, "saved => " + saved);
Log.w(TAG, "revID => " + doc.getCurrentRevisionId());
// Create 5 revisions with 50 conflicts each
int j = 3;
for (; j < 5; j++) {
// Create a conflict, won by the new revision:
Map<String, Object> props = new HashMap<String, Object>();
props.put("_id", docID);
props.put("_rev", j + "-0000");
props.put(key, value);
RevisionInternal leaf = new RevisionInternal(props);
database.forceInsert(leaf, new ArrayList<String>(), null);
Log.w(TAG, "revID => " + doc.getCurrentRevisionId());
for (int i = 0; i < 49; i++) {
// Create a conflict, won by the new revision:
Map<String, Object> props_conflict = new HashMap<String, Object>();
props_conflict.put("_id", docID);
String revStr = String.format("%d-%04d", j, i);
props_conflict.put("_rev", revStr);
props_conflict.put(key, value);
// attachment
byte[] attach1 = "This is the body of attach1".getBytes();
String base64 = Base64.encodeBytes(attach1);
Map<String, Object> attachment = new HashMap<String, Object>();
attachment.put("content_type", "text/plain");
attachment.put("data", base64);
Map<String, Object> attachmentDict = new HashMap<String, Object>();
attachmentDict.put("test_attachment", attachment);
props_conflict.put("_attachments", attachmentDict);
// end of attachment
RevisionInternal leaf_conflict = new RevisionInternal(props_conflict);
List<String> revHistory = new ArrayList<String>();
revHistory.add(leaf_conflict.getRevID());
for (int k = j - 1; k > 2; k--) {
revHistory.add(String.format("%d-0000", k));
}
revHistory.add(rev2ID);
revHistory.add(rev1ID);
database.forceInsert(leaf_conflict, revHistory, null);
Log.w(TAG, "revID => " + doc.getCurrentRevisionId());
}
}
String docId = doc.getId();
String revId = j + "-00";
int lastSeq = (int) database.getLastSequenceNumber();
// create mockwebserver and custom dispatcher
MockDispatcher dispatcher = new MockDispatcher();
MockWebServer server = MockHelper.getMockWebServer(dispatcher);
dispatcher.setServerType(MockDispatcher.ServerType.SYNC_GW);
try {
// checkpoint PUT or GET response (sticky) (for both push and pull)
MockCheckpointPut mockCheckpointPut = new MockCheckpointPut();
mockCheckpointPut.setSticky(true);
dispatcher.enqueueResponse(MockHelper.PATH_REGEX_CHECKPOINT, mockCheckpointPut);
MockChangesFeed mockChangesFeedEmpty = new MockChangesFeed();
dispatcher.enqueueResponse(MockHelper.PATH_REGEX_CHANGES, mockChangesFeedEmpty.generateMockResponse());
// start mock server
server.play();
// create pull replication
Replication pullReplication = database.createPullReplication(server.getUrl("/db"));
pullReplication.setContinuous(true);
final CountDownLatch idleSignal1 = new CountDownLatch(1);
final CountDownLatch idleSignal2 = new CountDownLatch(2);
pullReplication.addChangeListener(new Replication.ChangeListener() {
@Override
public void changed(Replication.ChangeEvent event) {
Log.e(TAG, event.toString());
if (event.getError() != null) {
Assert.fail("Should not have any error....");
}
if (event.getSource().getStatus() == Replication.ReplicationStatus.REPLICATION_IDLE) {
idleSignal1.countDown();
idleSignal2.countDown();
}
}
});
// start pull replication
pullReplication.start();
boolean success = idleSignal1.await(30, TimeUnit.SECONDS);
assertTrue(success);
//
MockDocumentGet.MockDocument mockDocument1 = new MockDocumentGet.MockDocument(docId, revId, lastSeq + 1);
mockDocument1.setJsonMap(MockHelper.generateRandomJsonMap());
MockChangesFeed mockChangesFeed = new MockChangesFeed();
mockChangesFeed.add(new MockChangesFeed.MockChangedDoc(mockDocument1));
dispatcher.enqueueResponse(MockHelper.PATH_REGEX_CHANGES, mockChangesFeed.generateMockResponse());
// doc response
MockDocumentGet mockDocumentGet = new MockDocumentGet(mockDocument1);
dispatcher.enqueueResponse(mockDocument1.getDocPathRegex(), mockDocumentGet.generateMockResponse());
// check /db/docid?...
RecordedRequest request = dispatcher.takeRequestBlocking(mockDocument1.getDocPathRegex(), 30 * 1000);
Log.e(TAG, request.toString());
Map<String, String> queries = query2map(request.getPath());
String atts_since = URLDecoder.decode(queries.get("atts_since"), "UTF-8");
List<String> json = (List<String>) str2json(atts_since);
Log.e(TAG, json.toString());
assertNotNull(json);
// atts_since parameter should be limit to PullerInternal.MAX_NUMBER_OF_ATTS_SINCE
assertTrue(json.size() == PullerInternal.MAX_NUMBER_OF_ATTS_SINCE);
boolean success2 = idleSignal2.await(30, TimeUnit.SECONDS);
assertTrue(success2);
// stop pull replication
stopReplication(pullReplication);
} finally {
server.shutdown();
}
Log.d(TAG, "TEST END: testPullReplicatonWithManyAttachmentRevisions()");
}
public static Object str2json(String value) {
Object result = null;
try {
result = Manager.getObjectMapper().readValue(value, Object.class);
} catch (Exception e) {
Log.w("Unable to parse JSON Query", e);
}
return result;
}
public static Map<String, String> query2map(String queryString) {
Map<String, String> queries = new HashMap<String, String>();
for (String component : queryString.split("&")) {
int location = component.indexOf('=');
if (location > 0) {
String key = component.substring(0, location);
String value = component.substring(location + 1);
queries.put(key, value);
}
}
return queries;
}
class CustomMultipartReaderDelegate implements MultipartReaderDelegate {
public Map<String, String> headers = null;
public byte[] data = null;
public boolean gzipped = false;
public boolean bJson = false;
@Override
public void startedPart(Map<String, String> headers) {
gzipped = headers.get("Content-Encoding") != null && headers.get("Content-Encoding").contains("gzip");
bJson = headers.get("Content-Type") != null && headers.get("Content-Type").contains("application/json");
}
@Override
public void appendToPart(byte[] data) {
if (gzipped && bJson) {
this.data = Utils.decompressByGzip(data);
} else if (bJson) {
this.data = data;
}
}
@Override
public void appendToPart(final byte[] data, int off, int len) {
byte[] b = Arrays.copyOfRange(data, off, len - off);
appendToPart(b);
}
@Override
public void finishedPart() {
}
}
/**
* Push Replication, never receive REPLICATION_ACTIVE status
* https://github.com/couchbase/couchbase-lite-android/issues/451
*/
public void testPushReplActiveState() throws Exception {
Log.d(TAG, "TEST START: testPushReplActiveState()");
// make sure we are starting empty
assertEquals(0, database.getLastSequenceNumber());
// create mockwebserver and custom dispatcher
MockDispatcher dispatcher = new MockDispatcher();
MockWebServer server = MockHelper.getMockWebServer(dispatcher);
dispatcher.setServerType(MockDispatcher.ServerType.SYNC_GW);
try {
server.play();
// checkpoint GET response w/ 404. also receives checkpoint PUT's
MockCheckpointPut mockCheckpointPut = new MockCheckpointPut();
mockCheckpointPut.setSticky(true);
dispatcher.enqueueResponse(MockHelper.PATH_REGEX_CHECKPOINT, mockCheckpointPut);
// _revs_diff response -- everything missing
MockRevsDiff mockRevsDiff = new MockRevsDiff();
dispatcher.enqueueResponse(MockHelper.PATH_REGEX_REVS_DIFF, mockRevsDiff);
// _bulk_docs response -- everything stored
MockBulkDocs mockBulkDocs = new MockBulkDocs();
dispatcher.enqueueResponse(MockHelper.PATH_REGEX_BULK_DOCS, mockBulkDocs);
//
Replication pullReplication = database.createPushReplication(server.getUrl("/db"));
pullReplication.setContinuous(true);
final String checkpointId = pullReplication.remoteCheckpointDocID(); // save the checkpoint id for later usage
// Event handler for IDLE
CountDownLatch idleSignal = new CountDownLatch(1);
ReplicationIdleObserver idleObserver = new ReplicationIdleObserver(idleSignal);
pullReplication.addChangeListener(idleObserver);
// start the continuous replication
pullReplication.start();
// wait until we get an IDLE event
boolean successful = idleSignal.await(30, TimeUnit.SECONDS);
assertTrue(successful);
pullReplication.removeChangeListener(idleObserver);
// Event handler for ACTIVE
CountDownLatch activeSignal = new CountDownLatch(1);
ReplicationActiveObserver activeObserver = new ReplicationActiveObserver(activeSignal);
pullReplication.addChangeListener(activeObserver);
// Event handler for IDLE2
CountDownLatch idleSignal2 = new CountDownLatch(1);
ReplicationIdleObserver idleObserver2 = new ReplicationIdleObserver(idleSignal2);
pullReplication.addChangeListener(idleObserver2);
// add docs
Map<String, Object> properties1 = new HashMap<String, Object>();
properties1.put("doc1", "testPushReplActiveState");
final Document doc1 = createDocWithProperties(properties1);
// wait until we get an ACTIVE event
successful = activeSignal.await(30, TimeUnit.SECONDS);
assertTrue(successful);
pullReplication.removeChangeListener(activeObserver);
// check _bulk_docs
RecordedRequest request = dispatcher.takeRequestBlocking(MockHelper.PATH_REGEX_BULK_DOCS);
assertNotNull(request);
assertTrue(MockHelper.getUtf8Body(request).contains("testPushReplActiveState"));
// wait until we get an IDLE event
successful = idleSignal2.await(30, TimeUnit.SECONDS);
assertTrue(successful);
pullReplication.removeChangeListener(idleObserver2);
// stop pull replication
stopReplication(pullReplication);
} finally {
server.shutdown();
}
Log.d(TAG, "TEST END: testPushReplActiveState()");
}
/**
* Error after close DB client
* https://github.com/couchbase/couchbase-lite-java/issues/52
*/
public void testStop() throws Exception {
Log.d(Log.TAG, "START testStop()");
boolean success = false;
// create mock server
MockDispatcher dispatcher = new MockDispatcher();
dispatcher.setServerType(MockDispatcher.ServerType.SYNC_GW);
MockWebServer server = new MockWebServer();
server.setDispatcher(dispatcher);
try {
server.play();
// checkpoint PUT or GET response (sticky) (for both push and pull)
MockCheckpointPut mockCheckpointPut = new MockCheckpointPut();
mockCheckpointPut.setSticky(true);
dispatcher.enqueueResponse(MockHelper.PATH_REGEX_CHECKPOINT, mockCheckpointPut);
// create pull replication & start it
Replication pull = database.createPullReplication(server.getUrl("/db"));
pull.setContinuous(true);
final CountDownLatch pullIdleState = new CountDownLatch(1);
ReplicationIdleObserver pullIdleObserver = new ReplicationIdleObserver(pullIdleState);
pull.addChangeListener(pullIdleObserver);
pull.start();
// create push replication & start it
Replication push = database.createPullReplication(server.getUrl("/db"));
push.setContinuous(true);
final CountDownLatch pushIdleState = new CountDownLatch(1);
ReplicationIdleObserver pushIdleObserver = new ReplicationIdleObserver(pushIdleState);
push.addChangeListener(pushIdleObserver);
push.start();
// wait till both push and pull replicators become idle.
success = pullIdleState.await(30, TimeUnit.SECONDS);
assertTrue(success);
pull.removeChangeListener(pullIdleObserver);
success = pushIdleState.await(30, TimeUnit.SECONDS);
assertTrue(success);
push.removeChangeListener(pushIdleObserver);
// stop both pull and push replicators
stopReplication(pull);
stopReplication(push);
boolean observedCBLRequestWorker = false;
// First give 5 sec to clean thread status.
try {
Thread.sleep(5 * 1000);
} catch (Exception e) {
}
// all threads which are associated with replicators should be terminated.
Set<Thread> threadSet = Thread.getAllStackTraces().keySet();
for (Thread t : threadSet) {
if (t.isAlive()) {
observedCBLRequestWorker = true;
if (t.getName().indexOf("CBLRequestWorker") != -1) {
observedCBLRequestWorker = true;
break;
}
}
}
// second attemtpt, if still observe CBLRequestWorker thread, makes error
if (observedCBLRequestWorker) {
// give 10 sec to clean thread status.
try {
Thread.sleep(10 * 1000);
} catch (Exception e) {
}
// all threads which are associated with replicators should be terminated.
Set<Thread> threadSet2 = Thread.getAllStackTraces().keySet();
for (Thread t : threadSet2) {
if (t.isAlive()) {
assertEquals(-1, t.getName().indexOf("CBLRequestWorker"));
}
}
}
} finally {
// shutdown mock server
server.shutdown();
}
Log.d(Log.TAG, "END testStop()");
}
/**
* http://developer.couchbase.com/mobile/develop/references/couchbase-lite/couchbase-lite/replication/replication/index.html#mapstring-string-filterparams--get-set-
* <p/>
* Params passed in filtered push throw a null exception in the filter function
* https://github.com/couchbase/couchbase-lite-java-core/issues/533
*/
public void testSetFilterParams() throws CouchbaseLiteException, IOException, InterruptedException {
// make sure we are starting empty
assertEquals(0, database.getLastSequenceNumber());
// create mockwebserver and custom dispatcher
MockDispatcher dispatcher = new MockDispatcher();
MockWebServer server = MockHelper.getMockWebServer(dispatcher);
dispatcher.setServerType(MockDispatcher.ServerType.SYNC_GW);
try {
server.play();
// checkpoint GET response w/ 404. also receives checkpoint PUT's
MockCheckpointPut mockCheckpointPut = new MockCheckpointPut();
mockCheckpointPut.setSticky(true);
dispatcher.enqueueResponse(MockHelper.PATH_REGEX_CHECKPOINT, mockCheckpointPut);
// _revs_diff response -- everything missing
MockRevsDiff mockRevsDiff = new MockRevsDiff();
dispatcher.enqueueResponse(MockHelper.PATH_REGEX_REVS_DIFF, mockRevsDiff);
// _bulk_docs response -- everything stored
MockBulkDocs mockBulkDocs = new MockBulkDocs();
dispatcher.enqueueResponse(MockHelper.PATH_REGEX_BULK_DOCS, mockBulkDocs);
// create 10 documents and delete 5
for (int i = 0; i < 10; i++) {
Document doc = null;
if (i % 2 == 0) {
doc = createDocument(i, true);
} else {
doc = createDocument(i, false);
}
if (i % 2 == 0) {
try {
doc.delete();
} catch (CouchbaseLiteException e) {
e.printStackTrace();
}
}
}
final CountDownLatch latch = new CountDownLatch(10);
final CountDownLatch check = new CountDownLatch(10);
database.setFilter("unDeleted", new ReplicationFilter() {
@Override
public boolean filter(SavedRevision savedRevision, Map<String, Object> params) {
if (params == null || !"hello".equals(params.get("name"))) {
check.countDown();
}
latch.countDown();
return !savedRevision.isDeletion();
}
});
Replication pushReplication = database.createPushReplication(server.getUrl("/db"));
pushReplication.setContinuous(false);
pushReplication.setFilter("unDeleted");
pushReplication.setFilterParams(Collections.<String, Object>singletonMap("name", "hello"));
pushReplication.start();
boolean success = latch.await(30, TimeUnit.SECONDS);
assertTrue(success);
assertEquals(10, check.getCount());
} finally {
server.shutdown();
}
}
private Document createDocument(int number, boolean flag) {
SimpleDateFormat dateFormatter = new SimpleDateFormat("yyyy-MM-dd'T'HH:mm:ss.SSS'Z'");
Calendar calendar = GregorianCalendar.getInstance();
String currentTimeString = dateFormatter.format(calendar.getTime());
Map<String, Object> properties = new HashMap<String, Object>();
properties.put("type", "test_doc");
properties.put("created_at", currentTimeString);
if (flag == true) {
properties.put("name", "Waldo");
}
Document document = database.getDocument(String.valueOf(number));
try {
document.putProperties(properties);
} catch (CouchbaseLiteException e) {
e.printStackTrace();
}
return document;
}
/**
* https://github.com/couchbase/couchbase-lite-java-core/issues/575
*/
public void testRestartWithStoppedReplicator() throws Exception {
MockDispatcher dispatcher = new MockDispatcher();
dispatcher.setServerType(MockDispatcher.ServerType.COUCHDB);
MockWebServer server = MockHelper.getPreloadedPullTargetMockCouchDB(dispatcher, 0, 0);
try {
server.play();
// run pull replication
Replication pullReplication = database.createPullReplication(server.getUrl("/db"));
pullReplication.setContinuous(true);
// it should go idle twice, hence countdown latch = 2
final CountDownLatch replicationIdleFirstTime = new CountDownLatch(1);
final CountDownLatch replicationIdleSecondTime = new CountDownLatch(2);
final CountDownLatch replicationStoppedFirstTime = new CountDownLatch(1);
pullReplication.addChangeListener(new Replication.ChangeListener() {
@Override
public void changed(Replication.ChangeEvent event) {
if (event.getTransition() != null && event.getTransition().getDestination() == ReplicationState.IDLE) {
Log.e(Log.TAG, "IDLE");
replicationIdleFirstTime.countDown();
replicationIdleSecondTime.countDown();
} else if (event.getTransition() != null && event.getTransition().getDestination() == ReplicationState.STOPPED) {
Log.e(Log.TAG, "STOPPED");
replicationStoppedFirstTime.countDown();
}
}
});
pullReplication.start();
// wait until replication goes idle
boolean success = replicationIdleFirstTime.await(60, TimeUnit.SECONDS);
assertTrue(success);
pullReplication.stop();
// wait until replication stop
success = replicationStoppedFirstTime.await(60, TimeUnit.SECONDS);
assertTrue(success);
pullReplication.restart();
// wait until replication goes idle again
success = replicationIdleSecondTime.await(60, TimeUnit.SECONDS);
assertTrue(success);
stopReplication(pullReplication);
} finally {
// cleanup / shutdown
server.shutdown();
}
}
/**
* https://github.com/couchbase/couchbase-lite-java-core/issues/696
* in Unit-Tests/Replication_Tests.m
* - (void)test18_PendingDocumentIDs
*/
public void test18_PendingDocumentIDs() throws Exception {
// create mockwebserver and custom dispatcher
MockDispatcher dispatcher = new MockDispatcher();
MockWebServer server = MockHelper.getMockWebServer(dispatcher);
dispatcher.setServerType(MockDispatcher.ServerType.SYNC_GW);
server.setDispatcher(dispatcher);
try {
server.play();
// checkpoint GET response w/ 404 + respond to all PUT Checkpoint requests
MockCheckpointPut mockCheckpointPut = new MockCheckpointPut();
mockCheckpointPut.setSticky(true);
mockCheckpointPut.setDelayMs(50);
dispatcher.enqueueResponse(MockHelper.PATH_REGEX_CHECKPOINT, mockCheckpointPut);
// _revs_diff response -- everything missing
MockRevsDiff mockRevsDiff = new MockRevsDiff();
dispatcher.enqueueResponse(MockHelper.PATH_REGEX_REVS_DIFF, mockRevsDiff);
// _bulk_docs response -- everything stored
MockBulkDocs mockBulkDocs = new MockBulkDocs();
dispatcher.enqueueResponse(MockHelper.PATH_REGEX_BULK_DOCS, mockBulkDocs);
Replication repl = database.createPushReplication(server.getUrl("/db"));
assertNotNull(repl.getPendingDocumentIDs());
assertEquals(0, repl.getPendingDocumentIDs().size());
assertTrue(database.runInTransaction(
new TransactionalTask() {
@Override
public boolean run() {
for (int i = 1; i <= 10; i++) {
Document doc = database.getDocument(String.format("doc-%d", i));
Map<String, Object> props = new HashMap<String, Object>();
props.put("index", i);
props.put("bar", false);
try {
doc.putProperties(props);
} catch (CouchbaseLiteException e) {
fail(e.getMessage());
}
}
return true;
}
}
));
assertEquals(10, repl.getPendingDocumentIDs().size());
assertTrue(repl.isDocumentPending(database.getDocument("doc-1")));
runReplication(repl);
assertNotNull(repl.getPendingDocumentIDs());
assertEquals(0, repl.getPendingDocumentIDs().size());
assertFalse(repl.isDocumentPending(database.getDocument("doc-1")));
assertTrue(database.runInTransaction(
new TransactionalTask() {
@Override
public boolean run() {
for (int i = 11; i <= 20; i++) {
Document doc = database.getDocument(String.format("doc-%d", i));
Map<String, Object> props = new HashMap<String, Object>();
props.put("index", i);
props.put("bar", false);
try {
doc.putProperties(props);
} catch (CouchbaseLiteException e) {
fail(e.getMessage());
}
}
return true;
}
}
));
repl = database.createPushReplication(server.getUrl("/db"));
assertNotNull(repl.getPendingDocumentIDs());
assertEquals(10, repl.getPendingDocumentIDs().size());
assertTrue(repl.isDocumentPending(database.getDocument("doc-11")));
assertFalse(repl.isDocumentPending(database.getDocument("doc-1")));
// pull replication
repl = database.createPullReplication(server.getUrl("/db"));
assertNull(repl.getPendingDocumentIDs());
runReplication(repl);
assertNull(repl.getPendingDocumentIDs());
} finally {
// cleanup / shutdown
server.shutdown();
}
}
/**
* https://github.com/couchbase/couchbase-lite-java-core/issues/328
* <p/>
* Without bug fix, we observe extra PUT /{db}/_local/xxx for each _bulk_docs request
* <p/>
* 1. Create 200 docs
* 2. Start push replicator
* 3. GET /{db}/_local/xxx
* 4. PUSH /{db}/_revs_diff x 2
* 5. PUSH /{db}/_bulk_docs x 2
* 6. PUT /{db}/_local/xxx
*/
public void testExcessiveCheckpointingDuringPushReplication() throws Exception {
final int NUM_DOCS = 199;
List<Document> docs = new ArrayList<Document>();
// 1. Add more than 100 docs, as chunk size is 100
for (int i = 0; i < NUM_DOCS; i++) {
Map<String, Object> properties = new HashMap<String, Object>();
properties.put("testExcessiveCheckpointingDuringPushReplication", String.valueOf(i));
Document doc = createDocumentWithProperties(database, properties);
docs.add(doc);
}
// create mock server
MockDispatcher dispatcher = new MockDispatcher();
MockWebServer server = new MockWebServer();
server.setDispatcher(dispatcher);
try {
server.play();
// checkpoint GET response -> error
// _revs_diff response -- everything missing
MockRevsDiff mockRevsDiff = new MockRevsDiff();
mockRevsDiff.setSticky(true);
dispatcher.enqueueResponse(MockHelper.PATH_REGEX_REVS_DIFF, mockRevsDiff);
// _bulk_docs response -- everything stored
MockBulkDocs mockBulkDocs = new MockBulkDocs();
mockBulkDocs.setSticky(true);
dispatcher.enqueueResponse(MockHelper.PATH_REGEX_BULK_DOCS, mockBulkDocs);
// checkpoint PUT response (sticky)
MockCheckpointPut mockCheckpointPut = new MockCheckpointPut();
mockCheckpointPut.setSticky(true);
dispatcher.enqueueResponse(MockHelper.PATH_REGEX_CHECKPOINT, mockCheckpointPut);
// 2. Kick off continuous push replication
Replication replicator = database.createPushReplication(server.getUrl("/db"));
replicator.setContinuous(true);
CountDownLatch replicationIdleSignal = new CountDownLatch(1);
ReplicationIdleObserver replicationIdleObserver = new ReplicationIdleObserver(replicationIdleSignal);
replicator.addChangeListener(replicationIdleObserver);
replicator.start();
// 3. Wait for document to be pushed
// NOTE: (Not 100% reproducible) With CBL Java on Jenkins (Super slow environment),
// Replicator becomes IDLE between batches for this case, after 100 push replicated.
// TODO: Need to investigate
// wait until replication goes idle
boolean successful = replicationIdleSignal.await(60, TimeUnit.SECONDS);
assertTrue(successful);
// wait until mock server gets the checkpoint PUT request
boolean foundCheckpointPut = false;
String expectedLastSequence = String.valueOf(NUM_DOCS);
while (!foundCheckpointPut) {
RecordedRequest request = dispatcher.takeRequestBlocking(MockHelper.PATH_REGEX_CHECKPOINT);
if (request.getMethod().equals("PUT")) {
foundCheckpointPut = true;
String body = request.getUtf8Body();
Log.e("testExcessiveCheckpointingDuringPushReplication", "body => " + body);
// TODO: this is not valid if device can not handle all replication data at once
if (System.getProperty("java.vm.name").equalsIgnoreCase("Dalvik")) {
assertTrue(body.indexOf(expectedLastSequence) != -1);
}
// wait until mock server responds to the checkpoint PUT request
dispatcher.takeRecordedResponseBlocking(request);
}
}
// make some assertions about the outgoing _bulk_docs requests
RecordedRequest bulkDocsRequest1 = dispatcher.takeRequest(MockHelper.PATH_REGEX_BULK_DOCS);
assertNotNull(bulkDocsRequest1);
if (System.getProperty("java.vm.name").equalsIgnoreCase("Dalvik")) {
RecordedRequest bulkDocsRequest2 = dispatcher.takeRequest(MockHelper.PATH_REGEX_BULK_DOCS);
assertNotNull(bulkDocsRequest2);
// TODO: this is not valid if device can not handle all replication data at once
// order may not be guaranteed
assertTrue(isBulkDocJsonContainsDoc(bulkDocsRequest1, docs.get(0)) || isBulkDocJsonContainsDoc(bulkDocsRequest2, docs.get(0)));
assertTrue(isBulkDocJsonContainsDoc(bulkDocsRequest1, docs.get(100)) || isBulkDocJsonContainsDoc(bulkDocsRequest2, docs.get(100)));
}
// check if Android CBL client sent only one PUT /{db}/_local/xxxx request
// previous check already consume this request, so queue size should be 0.
BlockingQueue<RecordedRequest> queue = dispatcher.getRequestQueueSnapshot(MockHelper.PATH_REGEX_CHECKPOINT);
assertEquals(0, queue.size());
// cleanup
stopReplication(replicator);
} finally {
server.shutdown();
}
}
// NOTE: This test should be manually tested. This test uses delay, timeout, wait,...
// this could break test on Jenkins because it run on VM with ARM emulator.
// To run test, please remove "manual" from test method name.
//
// https://github.com/couchbase/couchbase-lite-java-core/issues/736
// https://github.com/couchbase/couchbase-lite-net/issues/356
public void manualTestBulkGetTimeout() throws Exception {
int def1 = CouchbaseLiteHttpClientFactory.DEFAULT_CONNECTION_TIMEOUT_SECONDS;
int def2 = CouchbaseLiteHttpClientFactory.DEFAULT_SO_TIMEOUT_SECONDS;
int def3 = ReplicationInternal.MAX_RETRIES;
int def4 = ReplicationInternal.RETRY_DELAY_SECONDS;
try {
// TIMEOUT 1 SEC
CouchbaseLiteHttpClientFactory.DEFAULT_CONNECTION_TIMEOUT_SECONDS = 1;
CouchbaseLiteHttpClientFactory.DEFAULT_SO_TIMEOUT_SECONDS = 1;
ReplicationInternal.MAX_RETRIES = 2;
ReplicationInternal.RETRY_DELAY_SECONDS = 0;
// serve 3 mock docs
int numMockDocsToServe = 2;
// create mockwebserver and custom dispatcher
MockDispatcher dispatcher = new MockDispatcher();
MockWebServer server = MockHelper.getMockWebServer(dispatcher);
dispatcher.setServerType(MockDispatcher.ServerType.SYNC_GW);
try {
// mock documents to be pulled
List<MockDocumentGet.MockDocument> mockDocs = MockHelper.getMockDocuments(numMockDocsToServe);
// respond to all GET (responds with 404) and PUT Checkpoint requests
MockCheckpointPut mockCheckpointPut = new MockCheckpointPut();
mockCheckpointPut.setSticky(true);
dispatcher.enqueueResponse(MockHelper.PATH_REGEX_CHECKPOINT, mockCheckpointPut);
// _changes response
MockChangesFeed mockChangesFeed = new MockChangesFeed();
for (MockDocumentGet.MockDocument mockDocument : mockDocs) {
mockChangesFeed.add(new MockChangesFeed.MockChangedDoc(mockDocument));
}
SmartMockResponseImpl smartMockResponse = new SmartMockResponseImpl(mockChangesFeed.generateMockResponse());
dispatcher.enqueueResponse(MockHelper.PATH_REGEX_CHANGES, smartMockResponse);
// _bulk_get response
MockDocumentBulkGet mockBulkGet = new MockDocumentBulkGet();
for (MockDocumentGet.MockDocument mockDocument : mockDocs) {
mockBulkGet.addDocument(mockDocument);
}
// _bulk_get delays 4 SEC, which is longer custom timeout 5sec.
// so this cause timeout.
mockBulkGet.setDelayMs(4 * 1000);
// makes sticky for retry reponse
mockBulkGet.setSticky(true);
dispatcher.enqueueResponse(MockHelper.PATH_REGEX_BULK_GET, mockBulkGet);
// start mock server
server.play();
// run pull replication
Replication pullReplication = database.createPullReplication(server.getUrl("/db"));
runReplication(pullReplication, 3 * 60);
assertNotNull(pullReplication.getLastError());
assertTrue(pullReplication.getLastError() instanceof java.net.SocketTimeoutException);
// dump out the outgoing requests for bulk docs
BlockingQueue<RecordedRequest> bulkGetRequests = dispatcher.getRequestQueueSnapshot(MockHelper.PATH_REGEX_BULK_GET);
// +1 for initial request
assertEquals(ReplicationInternal.MAX_RETRIES + 1, bulkGetRequests.size());
} finally {
server.shutdown();
}
} finally {
CouchbaseLiteHttpClientFactory.DEFAULT_CONNECTION_TIMEOUT_SECONDS = def1;
CouchbaseLiteHttpClientFactory.DEFAULT_SO_TIMEOUT_SECONDS = def2;
ReplicationInternal.MAX_RETRIES = def3;
ReplicationInternal.RETRY_DELAY_SECONDS = def4;
}
}
// ReplicatorInternal.m: test_UseRemoteUUID
public void testUseRemoteUUID() throws Exception {
URL remoteURL1 = new URL("http://alice.local:55555/db");
Replication r1 = database.createPullReplication(remoteURL1);
r1.setRemoteUUID("cafebabe");
String check1 = r1.replicationInternal.remoteCheckpointDocID();
// Different URL, but same remoteUUID:
URL remoteURL2 = new URL("http://alice17.local:44444/db");
Replication r2 = database.createPullReplication(remoteURL2);
r2.setRemoteUUID("cafebabe");
String check2 = r2.replicationInternal.remoteCheckpointDocID();
assertEquals(check1, check2);
// Same UUID but different filter settings:
Replication r3 = database.createPullReplication(remoteURL2);
r3.setRemoteUUID("cafebabe");
r3.setFilter("Melitta");
String check3 = r3.replicationInternal.remoteCheckpointDocID();
assertNotSame(check2, check3);
}
/**
* This test is almost identical with
* TestCase(CBL_Pusher_DocIDs) in CBLReplicator_Tests.m
*/
public void testPushReplicationSetDocumentIDs() throws Exception {
// Create documents:
createDocumentForPushReplication("doc1", null, null);
createDocumentForPushReplication("doc2", null, null);
createDocumentForPushReplication("doc3", null, null);
createDocumentForPushReplication("doc4", null, null);
MockWebServer server = null;
try {
// Create mock server and play:
MockDispatcher dispatcher = new MockDispatcher();
server = MockHelper.getMockWebServer(dispatcher);
dispatcher.setServerType(MockDispatcher.ServerType.SYNC_GW);
server.play();
// Checkpoint GET response w/ 404 + respond to all PUT Checkpoint requests:
MockCheckpointPut mockCheckpointPut = new MockCheckpointPut();
mockCheckpointPut.setSticky(true);
mockCheckpointPut.setDelayMs(50);
dispatcher.enqueueResponse(MockHelper.PATH_REGEX_CHECKPOINT, mockCheckpointPut);
// _revs_diff response -- everything missing:
MockRevsDiff mockRevsDiff = new MockRevsDiff();
dispatcher.enqueueResponse(MockHelper.PATH_REGEX_REVS_DIFF, mockRevsDiff);
// _bulk_docs response -- everything stored
MockBulkDocs mockBulkDocs = new MockBulkDocs();
dispatcher.enqueueResponse(MockHelper.PATH_REGEX_BULK_DOCS, mockBulkDocs);
// Create push replication:
Replication replication = database.createPushReplication(server.getUrl("/db"));
replication.setDocIds(Arrays.asList(new String[] {"doc2", "doc3"}));
// check pending document IDs:
Set<String> pendingDocIDs = replication.getPendingDocumentIDs();
assertEquals(2, pendingDocIDs.size());
assertFalse(pendingDocIDs.contains("doc1"));
assertTrue(pendingDocIDs.contains("doc2"));
assertTrue(pendingDocIDs.contains("doc3"));
assertFalse(pendingDocIDs.contains("doc4"));
// Run replication:
runReplication(replication);
// Check result:
RecordedRequest bulkDocsRequest = dispatcher.takeRequest(MockHelper.PATH_REGEX_BULK_DOCS);
assertNotNull(bulkDocsRequest);
assertFalse(MockHelper.getUtf8Body(bulkDocsRequest).contains("doc1"));
assertTrue(MockHelper.getUtf8Body(bulkDocsRequest).contains("doc2"));
assertTrue(MockHelper.getUtf8Body(bulkDocsRequest).contains("doc3"));
assertFalse(MockHelper.getUtf8Body(bulkDocsRequest).contains("doc4"));
} finally {
if (server != null)
server.shutdown();
}
}
public void testPullReplicationSetDocumentIDs() throws Exception {
MockWebServer server = null;
try {
// Create mock server and play:
MockDispatcher dispatcher = new MockDispatcher();
server = MockHelper.getMockWebServer(dispatcher);
dispatcher.setServerType(MockDispatcher.ServerType.SYNC_GW);
server.play();
// checkpoint PUT or GET response (sticky):
MockCheckpointPut mockCheckpointPut = new MockCheckpointPut();
mockCheckpointPut.setSticky(true);
dispatcher.enqueueResponse(MockHelper.PATH_REGEX_CHECKPOINT, mockCheckpointPut);
// _changes response:
MockChangesFeed mockChangesFeed = new MockChangesFeed();
dispatcher.enqueueResponse(MockHelper.PATH_REGEX_CHANGES, mockChangesFeed.generateMockResponse());
// Run pull replication:
Replication replication = database.createPullReplication(server.getUrl("/db"));
replication.setDocIds(Arrays.asList(new String[] {"doc2", "doc3"}));
runReplication(replication);
// Check changes feed request:
RecordedRequest getChangesFeedRequest = dispatcher.takeRequest(MockHelper.PATH_REGEX_CHANGES);
assertTrue(getChangesFeedRequest.getMethod().equals("POST"));
String body = getChangesFeedRequest.getUtf8Body();
Map<String, Object> jsonMap = Manager.getObjectMapper().readValue(body, Map.class);
assertTrue(jsonMap.containsKey("filter"));
String filter = (String) jsonMap.get("filter");
assertEquals("_doc_ids", filter);
List<String> docIDs = (List<String>) jsonMap.get("doc_ids");
assertNotNull(docIDs);
assertEquals(2, docIDs.size());
assertTrue(docIDs.contains("doc2"));
assertTrue(docIDs.contains("doc3"));
} finally {
if (server != null)
server.shutdown();
}
}
public void testPullWithGzippedChangesFeed() throws Exception {
MockWebServer server = null;
try {
// Create mock server and play:
MockDispatcher dispatcher = new MockDispatcher();
server = MockHelper.getMockWebServer(dispatcher);
dispatcher.setServerType(MockDispatcher.ServerType.SYNC_GW);
server.play();
// Mock documents to be pulled:
MockDocumentGet.MockDocument mockDoc1 =
new MockDocumentGet.MockDocument("doc1", "1-5e38", 1);
mockDoc1.setJsonMap(MockHelper.generateRandomJsonMap());
MockDocumentGet.MockDocument mockDoc2 =
new MockDocumentGet.MockDocument("doc2", "1-563b", 2);
mockDoc2.setJsonMap(MockHelper.generateRandomJsonMap());
// // checkpoint GET response w/ 404:
MockResponse fakeCheckpointResponse = new MockResponse();
MockHelper.set404NotFoundJson(fakeCheckpointResponse);
dispatcher.enqueueResponse(MockHelper.PATH_REGEX_CHECKPOINT, fakeCheckpointResponse);
// _changes response:
MockChangesFeed mockChangesFeed = new MockChangesFeed();
mockChangesFeed.add(new MockChangesFeed.MockChangedDoc(mockDoc1));
mockChangesFeed.add(new MockChangesFeed.MockChangedDoc(mockDoc2));
dispatcher.enqueueResponse(MockHelper.PATH_REGEX_CHANGES,
mockChangesFeed.generateMockResponse(/*gzip*/true));
// doc1 response:
MockDocumentGet mockDocumentGet = new MockDocumentGet(mockDoc1);
dispatcher.enqueueResponse(mockDoc1.getDocPathRegex(),
mockDocumentGet.generateMockResponse());
// doc2 response:
mockDocumentGet = new MockDocumentGet(mockDoc2);
dispatcher.enqueueResponse(mockDoc2.getDocPathRegex(),
mockDocumentGet.generateMockResponse());
// _bulk_get response:
MockDocumentBulkGet mockBulkGet = new MockDocumentBulkGet();
mockBulkGet.addDocument(mockDoc1);
mockBulkGet.addDocument(mockDoc2);
dispatcher.enqueueResponse(MockHelper.PATH_REGEX_BULK_GET, mockBulkGet);
// Respond to all PUT Checkpoint requests
MockCheckpointPut mockCheckpointPut = new MockCheckpointPut();
mockCheckpointPut.setSticky(true);
mockCheckpointPut.setDelayMs(500);
dispatcher.enqueueResponse(MockHelper.PATH_REGEX_CHECKPOINT, mockCheckpointPut);
// Setup database change listener:
final List<String> changeDocIDs = new ArrayList<String>();
database.addChangeListener(new Database.ChangeListener() {
@Override
public void changed(Database.ChangeEvent event) {
for (DocumentChange change : event.getChanges()) {
changeDocIDs.add(change.getDocumentId());
}
}
});
// Run pull replication:
Replication replication = database.createPullReplication(server.getUrl("/db"));
runReplication(replication);
// Check result:
assertEquals(2, changeDocIDs.size());
String[] docIDs = changeDocIDs.toArray(new String[changeDocIDs.size()]);
Arrays.sort(docIDs);
assertTrue(Arrays.equals(new String[]{"doc1", "doc2"}, docIDs));
// Check changes feed request:
RecordedRequest changesFeedRequest =
dispatcher.takeRequest(MockHelper.PATH_REGEX_CHANGES);
String acceptEncoding = changesFeedRequest.getHeader("Accept-Encoding");
assertNotNull(acceptEncoding);
assertTrue(acceptEncoding.contains("gzip"));
} finally {
if (server != null)
server.shutdown();
}
}
}
| src/androidTest/java/com/couchbase/lite/replicator/ReplicationTest.java | package com.couchbase.lite.replicator;
import com.couchbase.lite.CouchbaseLiteException;
import com.couchbase.lite.Database;
import com.couchbase.lite.Document;
import com.couchbase.lite.DocumentChange;
import com.couchbase.lite.Emitter;
import com.couchbase.lite.LiteTestCaseWithDB;
import com.couchbase.lite.LiveQuery;
import com.couchbase.lite.Manager;
import com.couchbase.lite.Mapper;
import com.couchbase.lite.Query;
import com.couchbase.lite.QueryEnumerator;
import com.couchbase.lite.QueryOptions;
import com.couchbase.lite.QueryRow;
import com.couchbase.lite.ReplicationFilter;
import com.couchbase.lite.Revision;
import com.couchbase.lite.SavedRevision;
import com.couchbase.lite.Status;
import com.couchbase.lite.TransactionalTask;
import com.couchbase.lite.UnsavedRevision;
import com.couchbase.lite.ValidationContext;
import com.couchbase.lite.Validator;
import com.couchbase.lite.View;
import com.couchbase.lite.auth.Authenticator;
import com.couchbase.lite.auth.AuthenticatorFactory;
import com.couchbase.lite.auth.BasicAuthenticator;
import com.couchbase.lite.auth.FacebookAuthorizer;
import com.couchbase.lite.internal.RevisionInternal;
import com.couchbase.lite.mockserver.MockBulkDocs;
import com.couchbase.lite.mockserver.MockChangesFeed;
import com.couchbase.lite.mockserver.MockChangesFeedNoResponse;
import com.couchbase.lite.mockserver.MockCheckpointGet;
import com.couchbase.lite.mockserver.MockCheckpointPut;
import com.couchbase.lite.mockserver.MockDispatcher;
import com.couchbase.lite.mockserver.MockDocumentBulkGet;
import com.couchbase.lite.mockserver.MockDocumentGet;
import com.couchbase.lite.mockserver.MockDocumentPut;
import com.couchbase.lite.mockserver.MockFacebookAuthPost;
import com.couchbase.lite.mockserver.MockHelper;
import com.couchbase.lite.mockserver.MockRevsDiff;
import com.couchbase.lite.mockserver.MockSessionGet;
import com.couchbase.lite.mockserver.SmartMockResponseImpl;
import com.couchbase.lite.mockserver.WrappedSmartMockResponse;
import com.couchbase.lite.support.Base64;
import com.couchbase.lite.support.CouchbaseLiteHttpClientFactory;
import com.couchbase.lite.support.HttpClientFactory;
import com.couchbase.lite.support.MultipartReader;
import com.couchbase.lite.support.MultipartReaderDelegate;
import com.couchbase.lite.support.RemoteRequestRetry;
import com.couchbase.lite.util.Log;
import com.couchbase.lite.util.Utils;
import com.couchbase.org.apache.http.entity.mime.MultipartEntity;
import com.squareup.okhttp.mockwebserver.MockResponse;
import com.squareup.okhttp.mockwebserver.MockWebServer;
import com.squareup.okhttp.mockwebserver.RecordedRequest;
import junit.framework.Assert;
import org.apache.http.HttpEntity;
import org.apache.http.HttpRequest;
import org.apache.http.HttpResponse;
import org.apache.http.client.CookieStore;
import org.apache.http.client.HttpClient;
import org.apache.http.client.HttpResponseException;
import org.apache.http.client.methods.HttpPost;
import org.apache.http.client.methods.HttpPut;
import org.apache.http.client.methods.HttpUriRequest;
import org.apache.http.cookie.Cookie;
import java.io.IOException;
import java.io.InputStream;
import java.net.MalformedURLException;
import java.net.URL;
import java.net.URLDecoder;
import java.text.SimpleDateFormat;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Calendar;
import java.util.Collections;
import java.util.Date;
import java.util.GregorianCalendar;
import java.util.HashMap;
import java.util.Iterator;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
import java.util.Queue;
import java.util.Set;
import java.util.concurrent.BlockingQueue;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicInteger;
/**
* Tests for the new state machine based replicator
*/
public class ReplicationTest extends LiteTestCaseWithDB {
/**
* Continuous puller starts offline
* Wait for a while .. (til what?)
* Add remote document (simulate w/ mock webserver)
* Put replication online
* Make sure doc is pulled
*/
public void testGoOnlinePuller() throws Exception {
Log.d(Log.TAG, "testGoOnlinePuller");
// create mock server
MockWebServer server = new MockWebServer();
try {
MockDispatcher dispatcher = new MockDispatcher();
dispatcher.setServerType(MockDispatcher.ServerType.SYNC_GW);
server.setDispatcher(dispatcher);
server.play();
// mock documents to be pulled
MockDocumentGet.MockDocument mockDoc1 = new MockDocumentGet.MockDocument("doc1", "1-5e38", 1);
mockDoc1.setJsonMap(MockHelper.generateRandomJsonMap());
// checkpoint PUT or GET response (sticky)
MockCheckpointPut mockCheckpointPut = new MockCheckpointPut();
mockCheckpointPut.setSticky(true);
dispatcher.enqueueResponse(MockHelper.PATH_REGEX_CHECKPOINT, mockCheckpointPut);
// _changes response 503 error (sticky)
WrappedSmartMockResponse wrapped2 = new WrappedSmartMockResponse(new MockResponse().setResponseCode(503));
wrapped2.setSticky(true);
dispatcher.enqueueResponse(MockHelper.PATH_REGEX_CHANGES, wrapped2);
// doc1 response
MockDocumentGet mockDocumentGet = new MockDocumentGet(mockDoc1);
dispatcher.enqueueResponse(mockDoc1.getDocPathRegex(), mockDocumentGet.generateMockResponse());
// _revs_diff response -- everything missing
MockRevsDiff mockRevsDiff = new MockRevsDiff();
mockRevsDiff.setSticky(true);
dispatcher.enqueueResponse(MockHelper.PATH_REGEX_REVS_DIFF, mockRevsDiff);
// _bulk_docs response -- everything stored
MockBulkDocs mockBulkDocs = new MockBulkDocs();
mockBulkDocs.setSticky(true);
dispatcher.enqueueResponse(MockHelper.PATH_REGEX_BULK_DOCS, mockBulkDocs);
// create and start replication
Replication pullReplication = database.createPullReplication(server.getUrl("/db"));
pullReplication.setContinuous(true);
pullReplication.start();
Log.d(Log.TAG, "Started pullReplication: %s", pullReplication);
// wait until a _checkpoint request have been sent
dispatcher.takeRequestBlocking(MockHelper.PATH_REGEX_CHECKPOINT);
// wait until a _changes request has been sent
dispatcher.takeRequestBlocking(MockHelper.PATH_REGEX_CHANGES);
putReplicationOffline(pullReplication);
// clear out existing queued mock responses to make room for new ones
dispatcher.clearQueuedResponse(MockHelper.PATH_REGEX_CHANGES);
// real _changes response with doc1
MockChangesFeed mockChangesFeed = new MockChangesFeed();
mockChangesFeed.add(new MockChangesFeed.MockChangedDoc(mockDoc1));
dispatcher.enqueueResponse(MockHelper.PATH_REGEX_CHANGES, mockChangesFeed.generateMockResponse());
// long poll changes feed no response
MockChangesFeedNoResponse mockChangesFeedNoResponse = new MockChangesFeedNoResponse();
mockChangesFeedNoResponse.setSticky(true);
dispatcher.enqueueResponse(MockHelper.PATH_REGEX_CHANGES, mockChangesFeedNoResponse);
putReplicationOnline(pullReplication);
Log.d(Log.TAG, "Waiting for PUT checkpoint request with seq: %d", mockDoc1.getDocSeq());
waitForPutCheckpointRequestWithSeq(dispatcher, mockDoc1.getDocSeq());
Log.d(Log.TAG, "Got PUT checkpoint request with seq: %d", mockDoc1.getDocSeq());
stopReplication(pullReplication);
} finally {
server.shutdown();
}
}
/**
* Start continuous replication with a closed db.
* <p/>
* Expected behavior:
* - Receive replication finished callback
* - Replication lastError will contain an exception
*/
public void testStartReplicationClosedDb() throws Exception {
Database db = this.manager.getDatabase("closed");
final CountDownLatch countDownLatch = new CountDownLatch(1);
final Replication replication = db.createPullReplication(new URL("http://fake.com/foo"));
replication.setContinuous(true);
replication.addChangeListener(new Replication.ChangeListener() {
@Override
public void changed(Replication.ChangeEvent event) {
Log.d(TAG, "changed event: %s", event);
if (replication.isRunning() == false) {
countDownLatch.countDown();
}
}
});
db.close();
replication.start();
boolean success = countDownLatch.await(60, TimeUnit.SECONDS);
assertTrue(success);
assertTrue(replication.getLastError() != null);
}
/**
* Start a replication and stop it immediately
*/
public void failingTestStartReplicationStartStop() throws Exception {
final CountDownLatch countDownLatch = new CountDownLatch(1);
final List<ReplicationStateTransition> transitions = new ArrayList<ReplicationStateTransition>();
final Replication replication = database.createPullReplication(new URL("http://fake.com/foo"));
replication.setContinuous(true);
replication.addChangeListener(new ReplicationFinishedObserver(countDownLatch));
replication.start();
replication.start(); // this should be ignored
replication.stop();
replication.stop(); // this should be ignored
boolean success = countDownLatch.await(60, TimeUnit.SECONDS);
assertTrue(success);
assertTrue(replication.getLastError() == null);
assertEquals(3, transitions.size());
assertEquals(ReplicationState.INITIAL, transitions.get(0).getSource());
assertEquals(ReplicationState.RUNNING, transitions.get(0).getDestination());
assertEquals(ReplicationState.RUNNING, transitions.get(1).getSource());
assertEquals(ReplicationState.STOPPING, transitions.get(1).getDestination());
assertEquals(ReplicationState.STOPPING, transitions.get(2).getSource());
assertEquals(ReplicationState.STOPPED, transitions.get(2).getDestination());
}
/**
* Pull replication test:
* <p/>
* - Single one-shot pull replication
* - Against simulated sync gateway
* - Remote docs do not have attachments
*/
public void testMockSinglePullSyncGw() throws Exception {
boolean shutdownMockWebserver = true;
boolean addAttachments = false;
mockSinglePull(shutdownMockWebserver, MockDispatcher.ServerType.SYNC_GW, addAttachments);
}
/**
* Pull replication test:
* <p/>
* - Single one-shot pull replication
* - Against simulated couchdb
* - Remote docs do not have attachments
*/
public void testMockSinglePullCouchDb() throws Exception {
boolean shutdownMockWebserver = true;
boolean addAttachments = false;
mockSinglePull(shutdownMockWebserver, MockDispatcher.ServerType.COUCHDB, addAttachments);
}
/**
* Pull replication test:
* <p/>
* - Single one-shot pull replication
* - Against simulated couchdb
* - Remote docs have attachments
*/
public void testMockSinglePullCouchDbAttachments() throws Exception {
boolean shutdownMockWebserver = true;
boolean addAttachments = true;
mockSinglePull(shutdownMockWebserver, MockDispatcher.ServerType.COUCHDB, addAttachments);
}
/**
* Pull replication test:
* <p/>
* - Single one-shot pull replication
* - Against simulated sync gateway
* - Remote docs have attachments
* <p/>
* TODO: sporadic assertion failure when checking rev field of PUT checkpoint requests
*/
public void testMockSinglePullSyncGwAttachments() throws Exception {
boolean shutdownMockWebserver = true;
boolean addAttachments = true;
mockSinglePull(shutdownMockWebserver, MockDispatcher.ServerType.SYNC_GW, addAttachments);
}
public void testMockMultiplePullSyncGw() throws Exception {
boolean shutdownMockWebserver = true;
mockMultiplePull(shutdownMockWebserver, MockDispatcher.ServerType.SYNC_GW);
}
public void testMockMultiplePullCouchDb() throws Exception {
boolean shutdownMockWebserver = true;
mockMultiplePull(shutdownMockWebserver, MockDispatcher.ServerType.COUCHDB);
}
public void testMockContinuousPullCouchDb() throws Exception {
// TODO: (IMPORTANT, FORESTDB) lastSequence for checkpoint does not match and couase dead lock
// if(!isSQLiteDB())
// fail("FORESTDB casues deadlock becasue of lastSequence mismatch for checkpoint");
boolean shutdownMockWebserver = true;
mockContinuousPull(shutdownMockWebserver, MockDispatcher.ServerType.COUCHDB);
}
/**
* Do a pull replication
*
* @param shutdownMockWebserver - should this test shutdown the mockwebserver
* when done? if another test wants to pick up
* where this left off, you should pass false.
* @param serverType - should the mock return the Sync Gateway server type in
* the "Server" HTTP Header? this changes the behavior of the
* replicator to use bulk_get and POST reqeusts for _changes feeds.
* @param addAttachments - should the mock sync gateway return docs with attachments?
* @return a map that contains the mockwebserver (key="server") and the mock dispatcher
* (key="dispatcher")
*/
public Map<String, Object> mockSinglePull(boolean shutdownMockWebserver, MockDispatcher.ServerType serverType, boolean addAttachments) throws Exception {
// create mockwebserver and custom dispatcher
MockDispatcher dispatcher = new MockDispatcher();
MockWebServer server = MockHelper.getMockWebServer(dispatcher);
try {
dispatcher.setServerType(serverType);
// mock documents to be pulled
MockDocumentGet.MockDocument mockDoc1 = new MockDocumentGet.MockDocument("doc1", "1-5e38", 1);
mockDoc1.setJsonMap(MockHelper.generateRandomJsonMap());
mockDoc1.setAttachmentName("attachment.png");
MockDocumentGet.MockDocument mockDoc2 = new MockDocumentGet.MockDocument("doc2", "1-563b", 2);
mockDoc2.setJsonMap(MockHelper.generateRandomJsonMap());
mockDoc2.setAttachmentName("attachment2.png");
// checkpoint GET response w/ 404
MockResponse fakeCheckpointResponse = new MockResponse();
MockHelper.set404NotFoundJson(fakeCheckpointResponse);
dispatcher.enqueueResponse(MockHelper.PATH_REGEX_CHECKPOINT, fakeCheckpointResponse);
// _changes response
MockChangesFeed mockChangesFeed = new MockChangesFeed();
mockChangesFeed.add(new MockChangesFeed.MockChangedDoc(mockDoc1));
mockChangesFeed.add(new MockChangesFeed.MockChangedDoc(mockDoc2));
dispatcher.enqueueResponse(MockHelper.PATH_REGEX_CHANGES, mockChangesFeed.generateMockResponse());
// doc1 response
MockDocumentGet mockDocumentGet = new MockDocumentGet(mockDoc1);
if (addAttachments) {
mockDocumentGet.addAttachmentFilename(mockDoc1.getAttachmentName());
}
dispatcher.enqueueResponse(mockDoc1.getDocPathRegex(), mockDocumentGet.generateMockResponse());
// doc2 response
mockDocumentGet = new MockDocumentGet(mockDoc2);
if (addAttachments) {
mockDocumentGet.addAttachmentFilename(mockDoc2.getAttachmentName());
}
dispatcher.enqueueResponse(mockDoc2.getDocPathRegex(), mockDocumentGet.generateMockResponse());
// _bulk_get response
MockDocumentBulkGet mockBulkGet = new MockDocumentBulkGet();
mockBulkGet.addDocument(mockDoc1);
mockBulkGet.addDocument(mockDoc2);
dispatcher.enqueueResponse(MockHelper.PATH_REGEX_BULK_GET, mockBulkGet);
// respond to all PUT Checkpoint requests
MockCheckpointPut mockCheckpointPut = new MockCheckpointPut();
mockCheckpointPut.setSticky(true);
mockCheckpointPut.setDelayMs(500);
dispatcher.enqueueResponse(MockHelper.PATH_REGEX_CHECKPOINT, mockCheckpointPut);
// start mock server
server.play();
// run pull replication
Replication pullReplication = database.createPullReplication(server.getUrl("/db"));
Map<String, Object> headers = new HashMap<String, Object>();
headers.put("foo", "bar");
pullReplication.setHeaders(headers);
String checkpointId = pullReplication.remoteCheckpointDocID();
runReplication(pullReplication);
Log.d(TAG, "pullReplication finished");
database.addChangeListener(new Database.ChangeListener() {
@Override
public void changed(Database.ChangeEvent event) {
List<DocumentChange> changes = event.getChanges();
for (DocumentChange documentChange : changes) {
Log.d(TAG, "doc change callback: %s", documentChange.getDocumentId());
}
}
});
// assert that we now have both docs in local db
assertNotNull(database);
Document doc1 = database.getDocument(mockDoc1.getDocId());
assertNotNull(doc1);
assertNotNull(doc1.getCurrentRevisionId());
assertTrue(doc1.getCurrentRevisionId().equals(mockDoc1.getDocRev()));
assertNotNull(doc1.getProperties());
assertEquals(mockDoc1.getJsonMap(), doc1.getUserProperties());
Document doc2 = database.getDocument(mockDoc2.getDocId());
assertNotNull(doc2);
assertNotNull(doc2.getCurrentRevisionId());
assertNotNull(doc2.getProperties());
assertTrue(doc2.getCurrentRevisionId().equals(mockDoc2.getDocRev()));
assertEquals(mockDoc2.getJsonMap(), doc2.getUserProperties());
// assert that docs have attachments (if applicable)
if (addAttachments) {
attachmentAsserts(mockDoc1.getAttachmentName(), doc1);
attachmentAsserts(mockDoc2.getAttachmentName(), doc2);
}
// make assertions about outgoing requests from replicator -> mock
RecordedRequest getCheckpointRequest = dispatcher.takeRequest(MockHelper.PATH_REGEX_CHECKPOINT);
assertNotNull(getCheckpointRequest);
assertEquals("bar", getCheckpointRequest.getHeader("foo"));
assertTrue(getCheckpointRequest.getMethod().equals("GET"));
assertTrue(getCheckpointRequest.getPath().matches(MockHelper.PATH_REGEX_CHECKPOINT));
RecordedRequest getChangesFeedRequest = dispatcher.takeRequest(MockHelper.PATH_REGEX_CHANGES);
if (serverType == MockDispatcher.ServerType.SYNC_GW) {
assertTrue(getChangesFeedRequest.getMethod().equals("POST"));
} else {
assertTrue(getChangesFeedRequest.getMethod().equals("GET"));
}
assertTrue(getChangesFeedRequest.getPath().matches(MockHelper.PATH_REGEX_CHANGES));
// wait until the mock webserver receives a PUT checkpoint request with doc #2's sequence
Log.d(TAG, "waiting for PUT checkpoint %s", mockDoc2.getDocSeq());
List<RecordedRequest> checkpointRequests = waitForPutCheckpointRequestWithSequence(dispatcher, mockDoc2.getDocSeq());
validateCheckpointRequestsRevisions(checkpointRequests);
Log.d(TAG, "got PUT checkpoint %s", mockDoc2.getDocSeq());
// assert our local sequence matches what is expected
String lastSequence = database.lastSequenceWithCheckpointId(checkpointId);
assertEquals(Integer.toString(mockDoc2.getDocSeq()), lastSequence);
// assert completed count makes sense
assertEquals(pullReplication.getChangesCount(), pullReplication.getCompletedChangesCount());
// allow for either a single _bulk_get request or individual doc requests.
// if the server is sync gateway, it is allowable for replicator to use _bulk_get
RecordedRequest request = dispatcher.takeRequest(MockHelper.PATH_REGEX_BULK_GET);
if (request != null) {
String body = MockHelper.getUtf8Body(request);
assertTrue(body.contains(mockDoc1.getDocId()));
assertTrue(body.contains(mockDoc2.getDocId()));
} else {
RecordedRequest doc1Request = dispatcher.takeRequest(mockDoc1.getDocPathRegex());
assertTrue(doc1Request.getMethod().equals("GET"));
assertTrue(doc1Request.getPath().matches(mockDoc1.getDocPathRegex()));
RecordedRequest doc2Request = dispatcher.takeRequest(mockDoc2.getDocPathRegex());
assertTrue(doc2Request.getMethod().equals("GET"));
assertTrue(doc2Request.getPath().matches(mockDoc2.getDocPathRegex()));
}
} finally {
// Shut down the server. Instances cannot be reused.
if (shutdownMockWebserver) {
server.shutdown();
}
}
Map<String, Object> returnVal = new HashMap<String, Object>();
returnVal.put("server", server);
returnVal.put("dispatcher", dispatcher);
return returnVal;
}
/**
* Simulate the following:
* <p/>
* - Add a few docs and do a pull replication
* - One doc on sync gateway is now updated
* - Do a second pull replication
* - Assert we get the updated doc and save it locally
*/
public Map<String, Object> mockMultiplePull(boolean shutdownMockWebserver, MockDispatcher.ServerType serverType) throws Exception {
String doc1Id = "doc1";
// create mockwebserver and custom dispatcher
boolean addAttachments = false;
// do a pull replication
Map<String, Object> serverAndDispatcher = mockSinglePull(false, serverType, addAttachments);
MockWebServer server = (MockWebServer) serverAndDispatcher.get("server");
MockDispatcher dispatcher = (MockDispatcher) serverAndDispatcher.get("dispatcher");
try {
// clear out any possible residue left from previous test, eg, mock responses queued up as
// any recorded requests that have been logged.
dispatcher.reset();
String doc1Rev = "2-2e38";
int doc1Seq = 3;
String checkpointRev = "0-1";
String checkpointLastSequence = "2";
// checkpoint GET response w/ seq = 2
MockCheckpointGet mockCheckpointGet = new MockCheckpointGet();
mockCheckpointGet.setOk("true");
mockCheckpointGet.setRev(checkpointRev);
mockCheckpointGet.setLastSequence(checkpointLastSequence);
dispatcher.enqueueResponse(MockHelper.PATH_REGEX_CHECKPOINT, mockCheckpointGet);
// _changes response
MockChangesFeed mockChangesFeed = new MockChangesFeed();
MockChangesFeed.MockChangedDoc mockChangedDoc1 = new MockChangesFeed.MockChangedDoc()
.setSeq(doc1Seq)
.setDocId(doc1Id)
.setChangedRevIds(Arrays.asList(doc1Rev));
mockChangesFeed.add(mockChangedDoc1);
MockResponse fakeChangesResponse = mockChangesFeed.generateMockResponse();
dispatcher.enqueueResponse(MockHelper.PATH_REGEX_CHANGES, fakeChangesResponse);
// doc1 response
Map<String, Object> doc1JsonMap = MockHelper.generateRandomJsonMap();
MockDocumentGet mockDocumentGet = new MockDocumentGet()
.setDocId(doc1Id)
.setRev(doc1Rev)
.setJsonMap(doc1JsonMap);
String doc1PathRegex = "/db/doc1.*";
dispatcher.enqueueResponse(doc1PathRegex, mockDocumentGet.generateMockResponse());
// checkpoint PUT response
MockCheckpointPut mockCheckpointPut = new MockCheckpointPut();
mockCheckpointGet.setSticky(true);
dispatcher.enqueueResponse(MockHelper.PATH_REGEX_CHECKPOINT, mockCheckpointPut);
// run pull replication
Replication pullReplication = database.createPullReplication(server.getUrl("/db"));
runReplication(pullReplication);
// assert that we now have both docs in local db
assertNotNull(database);
Document doc1 = database.getDocument(doc1Id);
assertNotNull(doc1);
assertNotNull(doc1.getCurrentRevisionId());
assertTrue(doc1.getCurrentRevisionId().startsWith("2-"));
assertEquals(doc1JsonMap, doc1.getUserProperties());
// make assertions about outgoing requests from replicator -> mock
RecordedRequest getCheckpointRequest = dispatcher.takeRequest(MockHelper.PATH_REGEX_CHECKPOINT);
assertNotNull(getCheckpointRequest);
assertTrue(getCheckpointRequest.getMethod().equals("GET"));
assertTrue(getCheckpointRequest.getPath().matches(MockHelper.PATH_REGEX_CHECKPOINT));
RecordedRequest getChangesFeedRequest = dispatcher.takeRequest(MockHelper.PATH_REGEX_CHANGES);
if (serverType == MockDispatcher.ServerType.SYNC_GW) {
assertTrue(getChangesFeedRequest.getMethod().equals("POST"));
} else {
assertTrue(getChangesFeedRequest.getMethod().equals("GET"));
}
assertTrue(getChangesFeedRequest.getPath().matches(MockHelper.PATH_REGEX_CHANGES));
if (serverType == MockDispatcher.ServerType.SYNC_GW) {
Map<String, Object> jsonMap = Manager.getObjectMapper().readValue(getChangesFeedRequest.getUtf8Body(), Map.class);
assertTrue(jsonMap.containsKey("since"));
Integer since = (Integer) jsonMap.get("since");
assertEquals(2, since.intValue());
}
RecordedRequest doc1Request = dispatcher.takeRequest(doc1PathRegex);
assertTrue(doc1Request.getMethod().equals("GET"));
assertTrue(doc1Request.getPath().matches("/db/doc1\\?rev=2-2e38.*"));
// wait until the mock webserver receives a PUT checkpoint request with doc #2's sequence
int expectedLastSequence = doc1Seq;
List<RecordedRequest> checkpointRequests = waitForPutCheckpointRequestWithSequence(dispatcher, expectedLastSequence);
assertEquals(1, checkpointRequests.size());
// assert our local sequence matches what is expected
String lastSequence = database.lastSequenceWithCheckpointId(pullReplication.remoteCheckpointDocID());
assertEquals(Integer.toString(expectedLastSequence), lastSequence);
// assert completed count makes sense
assertEquals(pullReplication.getChangesCount(), pullReplication.getCompletedChangesCount());
} finally {
if (shutdownMockWebserver) {
server.shutdown();
}
}
Map<String, Object> returnVal = new HashMap<String, Object>();
returnVal.put("server", server);
returnVal.put("dispatcher", dispatcher);
return returnVal;
}
public Map<String, Object> mockContinuousPull(boolean shutdownMockWebserver, MockDispatcher.ServerType serverType) throws Exception {
assertTrue(serverType == MockDispatcher.ServerType.COUCHDB);
final int numMockRemoteDocs = 20; // must be multiple of 10!
final AtomicInteger numDocsPulledLocally = new AtomicInteger(0);
MockDispatcher dispatcher = new MockDispatcher();
dispatcher.setServerType(serverType);
int numDocsPerChangesResponse = numMockRemoteDocs / 10;
MockWebServer server = MockHelper.getPreloadedPullTargetMockCouchDB(dispatcher, numMockRemoteDocs, numDocsPerChangesResponse);
try {
server.play();
final CountDownLatch receivedAllDocs = new CountDownLatch(1);
// run pull replication
Replication pullReplication = database.createPullReplication(server.getUrl("/db"));
pullReplication.setContinuous(true);
final CountDownLatch replicationDoneSignal = new CountDownLatch(1);
pullReplication.addChangeListener(new ReplicationFinishedObserver(replicationDoneSignal));
final CountDownLatch replicationIdleSignal = new CountDownLatch(1);
ReplicationIdleObserver idleObserver = new ReplicationIdleObserver(replicationIdleSignal);
pullReplication.addChangeListener(idleObserver);
database.addChangeListener(new Database.ChangeListener() {
@Override
public void changed(Database.ChangeEvent event) {
List<DocumentChange> changes = event.getChanges();
for (DocumentChange change : changes) {
numDocsPulledLocally.addAndGet(1);
}
if (numDocsPulledLocally.get() == numMockRemoteDocs) {
receivedAllDocs.countDown();
}
}
});
pullReplication.start();
// wait until we received all mock docs or timeout occurs
boolean success = receivedAllDocs.await(60, TimeUnit.SECONDS);
assertTrue(success);
// make sure all docs in local db
Map<String, Object> allDocs = database.getAllDocs(new QueryOptions());
Integer totalRows = (Integer) allDocs.get("total_rows");
List rows = (List) allDocs.get("rows");
assertEquals(numMockRemoteDocs, totalRows.intValue());
assertEquals(numMockRemoteDocs, rows.size());
// wait until idle
success = replicationIdleSignal.await(30, TimeUnit.SECONDS);
assertTrue(success);
// cleanup / shutdown
pullReplication.stop();
success = replicationDoneSignal.await(30, TimeUnit.SECONDS);
assertTrue(success);
long lastSeq = database.getLastSequenceNumber();
Log.e(TAG, "lastSequence = %d", lastSeq);
// wait until the mock webserver receives a PUT checkpoint request with last do's sequence,
// this avoids ugly and confusing exceptions in the logs.
List<RecordedRequest> checkpointRequests = waitForPutCheckpointRequestWithSequence(dispatcher, numMockRemoteDocs - 1);
validateCheckpointRequestsRevisions(checkpointRequests);
} finally {
if (shutdownMockWebserver) {
server.shutdown();
}
}
Map<String, Object> returnVal = new HashMap<String, Object>();
returnVal.put("server", server);
returnVal.put("dispatcher", dispatcher);
return returnVal;
}
/**
* https://github.com/couchbase/couchbase-lite-java-core/issues/257
* <p/>
* - Create local document with attachment
* - Start continuous pull replication
* - MockServer returns _changes with new rev of document
* - MockServer returns doc multipart response: https://gist.github.com/tleyden/bf36f688d0b5086372fd
* - Delete doc cache (not sure if needed)
* - Fetch doc fresh from database
* - Verify that it still has attachments
*/
public void testAttachmentsDeletedOnPull() throws Exception {
String doc1Id = "doc1";
int doc1Rev2Generation = 2;
String doc1Rev2Digest = "b000";
String doc1Rev2 = String.format("%d-%s", doc1Rev2Generation, doc1Rev2Digest);
int doc1Seq1 = 1;
String doc1AttachName = "attachment.png";
String contentType = "image/png";
// create mockwebserver and custom dispatcher
MockDispatcher dispatcher = new MockDispatcher();
MockWebServer server = MockHelper.getMockWebServer(dispatcher);
try {
dispatcher.setServerType(MockDispatcher.ServerType.SYNC_GW);
server.play();
// add some documents - verify it has an attachment
Document doc1 = createDocumentForPushReplication(doc1Id, doc1AttachName, contentType);
String doc1Rev1 = doc1.getCurrentRevisionId();
doc1 = database.getDocument(doc1.getId());
assertTrue(doc1.getCurrentRevision().getAttachments().size() > 0);
// checkpoint GET response w/ 404
MockResponse fakeCheckpointResponse = new MockResponse();
MockHelper.set404NotFoundJson(fakeCheckpointResponse);
dispatcher.enqueueResponse(MockHelper.PATH_REGEX_CHECKPOINT, fakeCheckpointResponse);
// checkpoint PUT response
MockCheckpointPut mockCheckpointPut = new MockCheckpointPut();
mockCheckpointPut.setSticky(true);
dispatcher.enqueueResponse(MockHelper.PATH_REGEX_CHECKPOINT, mockCheckpointPut);
// add response to 1st _changes request
final MockDocumentGet.MockDocument mockDocument1 = new MockDocumentGet.MockDocument(
doc1Id, doc1Rev2, doc1Seq1);
Map<String, Object> newProperties = new HashMap<String, Object>(doc1.getProperties());
newProperties.put("_rev", doc1Rev2);
mockDocument1.setJsonMap(newProperties);
mockDocument1.setAttachmentName(doc1AttachName);
MockChangesFeed mockChangesFeed = new MockChangesFeed();
mockChangesFeed.add(new MockChangesFeed.MockChangedDoc(mockDocument1));
dispatcher.enqueueResponse(MockHelper.PATH_REGEX_CHANGES, mockChangesFeed.generateMockResponse());
// add sticky _changes response to feed=longpoll that just blocks for 60 seconds to emulate
// server that doesn't have any new changes
MockChangesFeedNoResponse mockChangesFeedNoResponse = new MockChangesFeedNoResponse();
mockChangesFeedNoResponse.setDelayMs(60 * 1000);
mockChangesFeedNoResponse.setSticky(true);
dispatcher.enqueueResponse(MockHelper.PATH_REGEX_CHANGES, mockChangesFeedNoResponse);
// add response to doc get
MockDocumentGet mockDocumentGet = new MockDocumentGet(mockDocument1);
mockDocumentGet.addAttachmentFilename(mockDocument1.getAttachmentName());
mockDocumentGet.setIncludeAttachmentPart(false);
Map<String, Object> revHistory = new HashMap<String, Object>();
revHistory.put("start", doc1Rev2Generation);
List ids = Arrays.asList(
RevisionInternal.digestFromRevID(doc1Rev2),
RevisionInternal.digestFromRevID(doc1Rev1)
);
revHistory.put("ids", ids);
mockDocumentGet.setRevHistoryMap(revHistory);
dispatcher.enqueueResponse(mockDocument1.getDocPathRegex(), mockDocumentGet.generateMockResponse());
// create and start pull replication
Replication pullReplication = database.createPullReplication(server.getUrl("/db"));
pullReplication.setContinuous(true);
pullReplication.start();
// wait for the next PUT checkpoint request/response
waitForPutCheckpointRequestWithSeq(dispatcher, 1);
stopReplication(pullReplication);
// make sure doc has attachments
Document doc1Fetched = database.getDocument(doc1.getId());
assertTrue(doc1Fetched.getCurrentRevision().getAttachments().size() > 0);
} finally {
server.shutdown();
}
}
/**
* This is essentially a regression test for a deadlock
* that was happening when the LiveQuery#onDatabaseChanged()
* was calling waitForUpdateThread(), but that thread was
* waiting on connection to be released by the thread calling
* waitForUpdateThread(). When the deadlock bug was present,
* this test would trigger the deadlock and never finish.
* <p/>
* TODO: sporadic assertion failure when checking rev field of PUT checkpoint requests
*/
public void testPullerWithLiveQuery() throws Throwable {
View view = database.getView("testPullerWithLiveQueryView");
view.setMapReduce(new Mapper() {
@Override
public void map(Map<String, Object> document, Emitter emitter) {
if (document.get("_id") != null) {
emitter.emit(document.get("_id"), null);
}
}
}, null, "1");
final CountDownLatch countDownLatch = new CountDownLatch(1);
LiveQuery allDocsLiveQuery = view.createQuery().toLiveQuery();
allDocsLiveQuery.addChangeListener(new LiveQuery.ChangeListener() {
@Override
public void changed(LiveQuery.ChangeEvent event) {
int numTimesCalled = 0;
if (event.getError() != null) {
throw new RuntimeException(event.getError());
}
if (event.getRows().getCount() == 2) {
countDownLatch.countDown();
}
}
});
// kick off live query
allDocsLiveQuery.start();
// do pull replication against mock
mockSinglePull(true, MockDispatcher.ServerType.SYNC_GW, true);
// make sure we were called back with both docs
boolean success = countDownLatch.await(30, TimeUnit.SECONDS);
assertTrue(success);
// clean up
allDocsLiveQuery.stop();
}
/**
* Make sure that if a continuous push gets an error
* pushing a doc, it will keep retrying it rather than giving up right away.
*
* @throws Exception
*/
public void testContinuousPushRetryBehavior() throws Exception {
RemoteRequestRetry.RETRY_DELAY_MS = 5; // speed up test execution (inner loop retry delay)
ReplicationInternal.RETRY_DELAY_SECONDS = 1; // speed up test execution (outer loop retry delay)
ReplicationInternal.MAX_RETRIES = 3; // spped up test execution (outer loop retry count)
// create mockwebserver and custom dispatcher
MockDispatcher dispatcher = new MockDispatcher();
MockWebServer server = MockHelper.getMockWebServer(dispatcher);
dispatcher.setServerType(MockDispatcher.ServerType.SYNC_GW);
try {
// checkpoint GET response w/ 404 + respond to all PUT Checkpoint requests
MockCheckpointPut mockCheckpointPut = new MockCheckpointPut();
mockCheckpointPut.setSticky(true);
mockCheckpointPut.setDelayMs(500);
dispatcher.enqueueResponse(MockHelper.PATH_REGEX_CHECKPOINT, mockCheckpointPut);
// _revs_diff response -- everything missing
MockRevsDiff mockRevsDiff = new MockRevsDiff();
mockRevsDiff.setSticky(true);
dispatcher.enqueueResponse(MockHelper.PATH_REGEX_REVS_DIFF, mockRevsDiff);
// _bulk_docs response -- 503 errors
MockResponse mockResponse = new MockResponse().setResponseCode(503);
WrappedSmartMockResponse mockBulkDocs = new WrappedSmartMockResponse(mockResponse, false);
mockBulkDocs.setSticky(true);
dispatcher.enqueueResponse(MockHelper.PATH_REGEX_BULK_DOCS, mockBulkDocs);
server.play();
// create replication
Replication replication = database.createPushReplication(server.getUrl("/db"));
replication.setContinuous(true);
CountDownLatch replicationIdle = new CountDownLatch(1);
ReplicationIdleObserver idleObserver = new ReplicationIdleObserver(replicationIdle);
replication.addChangeListener(idleObserver);
replication.start();
// wait until idle
boolean success = replicationIdle.await(30, TimeUnit.SECONDS);
assertTrue(success);
replication.removeChangeListener(idleObserver);
// create a doc in local db
Document doc1 = createDocumentForPushReplication("doc1", null, null);
// we should expect to at least see numAttempts attempts at doing POST to _bulk_docs
// 1st attempt
// numAttempts are number of times retry in 1 attempt.
int numAttempts = RemoteRequestRetry.MAX_RETRIES + 1; // total number of attempts = 4 (1 initial + MAX_RETRIES)
for (int i = 0; i < numAttempts; i++) {
RecordedRequest request = dispatcher.takeRequestBlocking(MockHelper.PATH_REGEX_BULK_DOCS);
assertNotNull(request);
dispatcher.takeRecordedResponseBlocking(request);
}
// By 12/16/2014, CBL core java tries RemoteRequestRetry.MAX_RETRIES + 1 see above.
// Without fixing #299, following code should cause hang.
// outer retry loop
for (int j = 0; j < ReplicationInternal.MAX_RETRIES; j++) {
// inner retry loop
for (int i = 0; i < numAttempts; i++) {
RecordedRequest request = dispatcher.takeRequestBlocking(MockHelper.PATH_REGEX_BULK_DOCS);
assertNotNull(request);
dispatcher.takeRecordedResponseBlocking(request);
}
}
// gave up replication!!!
stopReplication(replication);
} finally {
server.shutdown();
}
}
public void testMockSinglePush() throws Exception {
boolean shutdownMockWebserver = true;
mockSinglePush(shutdownMockWebserver, MockDispatcher.ServerType.SYNC_GW);
}
/**
* Do a push replication
* <p/>
* - Create docs in local db
* - One with no attachment
* - One with small attachment
* - One with large attachment
*/
public Map<String, Object> mockSinglePush(boolean shutdownMockWebserver, MockDispatcher.ServerType serverType) throws Exception {
String doc1Id = "doc1";
String doc2Id = "doc2";
String doc3Id = "doc3";
String doc4Id = "doc4";
String doc2PathRegex = String.format("/db/%s.*", doc2Id);
String doc3PathRegex = String.format("/db/%s.*", doc3Id);
String doc2AttachName = "attachment.png";
String doc3AttachName = "attachment2.png";
String contentType = "image/png";
// create mockwebserver and custom dispatcher
MockDispatcher dispatcher = new MockDispatcher();
MockWebServer server = MockHelper.getMockWebServer(dispatcher);
dispatcher.setServerType(serverType);
try {
server.play();
// add some documents
Document doc1 = createDocumentForPushReplication(doc1Id, null, null);
Document doc2 = createDocumentForPushReplication(doc2Id, doc2AttachName, contentType);
Document doc3 = createDocumentForPushReplication(doc3Id, doc3AttachName, contentType);
Document doc4 = createDocumentForPushReplication(doc4Id, null, null);
doc4.delete();
// checkpoint GET response w/ 404 + respond to all PUT Checkpoint requests
MockCheckpointPut mockCheckpointPut = new MockCheckpointPut();
mockCheckpointPut.setSticky(true);
mockCheckpointPut.setDelayMs(50);
dispatcher.enqueueResponse(MockHelper.PATH_REGEX_CHECKPOINT, mockCheckpointPut);
// _revs_diff response -- everything missing
MockRevsDiff mockRevsDiff = new MockRevsDiff();
dispatcher.enqueueResponse(MockHelper.PATH_REGEX_REVS_DIFF, mockRevsDiff);
// _bulk_docs response -- everything stored
MockBulkDocs mockBulkDocs = new MockBulkDocs();
dispatcher.enqueueResponse(MockHelper.PATH_REGEX_BULK_DOCS, mockBulkDocs);
// doc PUT responses for docs with attachments
MockDocumentPut mockDoc2Put = new MockDocumentPut()
.setDocId(doc2Id)
.setRev(doc2.getCurrentRevisionId());
dispatcher.enqueueResponse(doc2PathRegex, mockDoc2Put.generateMockResponse());
MockDocumentPut mockDoc3Put = new MockDocumentPut()
.setDocId(doc3Id)
.setRev(doc3.getCurrentRevisionId());
dispatcher.enqueueResponse(doc3PathRegex, mockDoc3Put.generateMockResponse());
// run replication
Replication replication = database.createPushReplication(server.getUrl("/db"));
replication.setContinuous(false);
if (serverType != MockDispatcher.ServerType.SYNC_GW) {
replication.setCreateTarget(true);
Assert.assertTrue(replication.shouldCreateTarget());
}
runReplication(replication);
// make assertions about outgoing requests from replicator -> mock
RecordedRequest getCheckpointRequest = dispatcher.takeRequest(MockHelper.PATH_REGEX_CHECKPOINT);
assertTrue(getCheckpointRequest.getMethod().equals("GET"));
assertTrue(getCheckpointRequest.getPath().matches(MockHelper.PATH_REGEX_CHECKPOINT));
RecordedRequest revsDiffRequest = dispatcher.takeRequest(MockHelper.PATH_REGEX_REVS_DIFF);
assertTrue(MockHelper.getUtf8Body(revsDiffRequest).contains(doc1Id));
RecordedRequest bulkDocsRequest = dispatcher.takeRequest(MockHelper.PATH_REGEX_BULK_DOCS);
assertTrue(MockHelper.getUtf8Body(bulkDocsRequest).contains(doc1Id));
Map<String, Object> bulkDocsJson = Manager.getObjectMapper().readValue(MockHelper.getUtf8Body(bulkDocsRequest), Map.class);
Map<String, Object> doc4Map = MockBulkDocs.findDocById(bulkDocsJson, doc4Id);
assertTrue(((Boolean) doc4Map.get("_deleted")).booleanValue() == true);
String str = MockHelper.getUtf8Body(bulkDocsRequest);
Log.e(TAG, str);
assertFalse(MockHelper.getUtf8Body(bulkDocsRequest).contains(doc2Id));
RecordedRequest doc2putRequest = dispatcher.takeRequest(doc2PathRegex);
CustomMultipartReaderDelegate delegate2 = new CustomMultipartReaderDelegate();
MultipartReader reader2 = new MultipartReader(doc2putRequest.getHeader("Content-Type"), delegate2);
reader2.appendData(doc2putRequest.getBody());
String body2 = new String(delegate2.data, "UTF-8");
assertTrue(body2.contains(doc2Id));
assertFalse(body2.contains(doc3Id));
RecordedRequest doc3putRequest = dispatcher.takeRequest(doc3PathRegex);
CustomMultipartReaderDelegate delegate3 = new CustomMultipartReaderDelegate();
MultipartReader reader3 = new MultipartReader(doc3putRequest.getHeader("Content-Type"), delegate3);
reader3.appendData(doc3putRequest.getBody());
String body3 = new String(delegate3.data, "UTF-8");
assertTrue(body3.contains(doc3Id));
assertFalse(body3.contains(doc2Id));
// wait until the mock webserver receives a PUT checkpoint request
int expectedLastSequence = 5;
Log.d(TAG, "waiting for put checkpoint with lastSequence: %d", expectedLastSequence);
List<RecordedRequest> checkpointRequests = waitForPutCheckpointRequestWithSequence(dispatcher, expectedLastSequence);
Log.d(TAG, "done waiting for put checkpoint with lastSequence: %d", expectedLastSequence);
validateCheckpointRequestsRevisions(checkpointRequests);
// assert our local sequence matches what is expected
String lastSequence = database.lastSequenceWithCheckpointId(replication.remoteCheckpointDocID());
assertEquals(Integer.toString(expectedLastSequence), lastSequence);
// assert completed count makes sense
assertEquals(replication.getChangesCount(), replication.getCompletedChangesCount());
} finally {
// Shut down the server. Instances cannot be reused.
if (shutdownMockWebserver) {
server.shutdown();
}
}
Map<String, Object> returnVal = new HashMap<String, Object>();
returnVal.put("server", server);
returnVal.put("dispatcher", dispatcher);
return returnVal;
}
/**
* https://github.com/couchbase/couchbase-lite-java-core/issues/55
*/
public void testContinuousPushReplicationGoesIdle() throws Exception {
// make sure we are starting empty
assertEquals(0, database.getLastSequenceNumber());
// add docs
Map<String, Object> properties1 = new HashMap<String, Object>();
properties1.put("doc1", "testContinuousPushReplicationGoesIdle");
final Document doc1 = createDocWithProperties(properties1);
// create mockwebserver and custom dispatcher
MockDispatcher dispatcher = new MockDispatcher();
MockWebServer server = MockHelper.getMockWebServer(dispatcher);
dispatcher.setServerType(MockDispatcher.ServerType.SYNC_GW);
try {
server.play();
// checkpoint GET response w/ 404. also receives checkpoint PUT's
MockCheckpointPut mockCheckpointPut = new MockCheckpointPut();
mockCheckpointPut.setSticky(true);
dispatcher.enqueueResponse(MockHelper.PATH_REGEX_CHECKPOINT, mockCheckpointPut);
// _revs_diff response -- everything missing
MockRevsDiff mockRevsDiff = new MockRevsDiff();
dispatcher.enqueueResponse(MockHelper.PATH_REGEX_REVS_DIFF, mockRevsDiff);
// _bulk_docs response -- everything stored
MockBulkDocs mockBulkDocs = new MockBulkDocs();
dispatcher.enqueueResponse(MockHelper.PATH_REGEX_BULK_DOCS, mockBulkDocs);
// replication to do initial sync up - has to be continuous replication so the checkpoint id
// matches the next continuous replication we're gonna do later.
Replication firstPusher = database.createPushReplication(server.getUrl("/db"));
firstPusher.setContinuous(true);
final String checkpointId = firstPusher.remoteCheckpointDocID(); // save the checkpoint id for later usage
// start the continuous replication
CountDownLatch replicationIdleSignal = new CountDownLatch(1);
ReplicationIdleObserver replicationIdleObserver = new ReplicationIdleObserver(replicationIdleSignal);
firstPusher.addChangeListener(replicationIdleObserver);
firstPusher.start();
// wait until we get an IDLE event
boolean successful = replicationIdleSignal.await(30, TimeUnit.SECONDS);
assertTrue(successful);
stopReplication(firstPusher);
// wait until replication does PUT checkpoint with lastSequence=1
int expectedLastSequence = 1;
waitForPutCheckpointRequestWithSeq(dispatcher, expectedLastSequence);
// the last sequence should be "1" at this point. we will use this later
final String lastSequence = database.lastSequenceWithCheckpointId(checkpointId);
assertEquals("1", lastSequence);
// start a second continuous replication
Replication secondPusher = database.createPushReplication(server.getUrl("/db"));
secondPusher.setContinuous(true);
final String secondPusherCheckpointId = secondPusher.remoteCheckpointDocID();
assertEquals(checkpointId, secondPusherCheckpointId);
// remove current handler for the GET/PUT checkpoint request, and
// install a new handler that returns the lastSequence from previous replication
dispatcher.clearQueuedResponse(MockHelper.PATH_REGEX_CHECKPOINT);
MockCheckpointGet mockCheckpointGet = new MockCheckpointGet();
mockCheckpointGet.setLastSequence(lastSequence);
mockCheckpointGet.setRev("0-2");
dispatcher.enqueueResponse(MockHelper.PATH_REGEX_CHECKPOINT, mockCheckpointGet);
// start second replication
replicationIdleSignal = new CountDownLatch(1);
replicationIdleObserver = new ReplicationIdleObserver(replicationIdleSignal);
secondPusher.addChangeListener(replicationIdleObserver);
secondPusher.start();
// wait until we get an IDLE event
successful = replicationIdleSignal.await(30, TimeUnit.SECONDS);
assertTrue(successful);
stopReplication(secondPusher);
} finally {
server.shutdown();
}
}
/**
* https://github.com/couchbase/couchbase-lite-java-core/issues/241
* <p/>
* - Set the "retry time" to a short number
* - Setup mock server to return 404 for all _changes requests
* - Start continuous replication
* - Sleep for 5X retry time
* - Assert that we've received at least two requests to _changes feed
* - Stop replication + cleanup
*/
public void testContinuousReplication404Changes() throws Exception {
int previous = PullerInternal.CHANGE_TRACKER_RESTART_DELAY_MS;
PullerInternal.CHANGE_TRACKER_RESTART_DELAY_MS = 5;
try {
// create mockwebserver and custom dispatcher
MockDispatcher dispatcher = new MockDispatcher();
MockWebServer server = MockHelper.getMockWebServer(dispatcher);
dispatcher.setServerType(MockDispatcher.ServerType.SYNC_GW);
try {
server.play();
// mock checkpoint GET response w/ 404
MockResponse fakeCheckpointResponse = new MockResponse();
MockHelper.set404NotFoundJson(fakeCheckpointResponse);
dispatcher.enqueueResponse(MockHelper.PATH_REGEX_CHECKPOINT, fakeCheckpointResponse);
// mock _changes response
for (int i = 0; i < 100; i++) {
MockResponse mockChangesFeed = new MockResponse();
MockHelper.set404NotFoundJson(mockChangesFeed);
dispatcher.enqueueResponse(MockHelper.PATH_REGEX_CHANGES, mockChangesFeed);
}
// create new replication
int retryDelaySeconds = 1;
Replication pull = database.createPullReplication(server.getUrl("/db"));
pull.setContinuous(true);
// add done listener to replication
CountDownLatch replicationDoneSignal = new CountDownLatch(1);
ReplicationFinishedObserver replicationFinishedObserver = new ReplicationFinishedObserver(replicationDoneSignal);
pull.addChangeListener(replicationFinishedObserver);
// start the replication
pull.start();
// wait until we get a few requests
Log.d(TAG, "Waiting for a _changes request");
RecordedRequest changesReq = dispatcher.takeRequestBlocking(MockHelper.PATH_REGEX_CHANGES);
Log.d(TAG, "Got first _changes request, waiting for another _changes request");
changesReq = dispatcher.takeRequestBlocking(MockHelper.PATH_REGEX_CHANGES);
Log.d(TAG, "Got second _changes request, waiting for another _changes request");
changesReq = dispatcher.takeRequestBlocking(MockHelper.PATH_REGEX_CHANGES);
Log.d(TAG, "Got third _changes request, stopping replicator");
// the replication should still be running
assertEquals(1, replicationDoneSignal.getCount());
// cleanup
stopReplication(pull);
} finally {
server.shutdown();
}
} finally {
PullerInternal.CHANGE_TRACKER_RESTART_DELAY_MS = previous;
}
}
/**
* Regression test for issue couchbase/couchbase-lite-android#174
*/
public void testAllLeafRevisionsArePushed() throws Exception {
final CustomizableMockHttpClient mockHttpClient = new CustomizableMockHttpClient();
mockHttpClient.addResponderRevDiffsAllMissing();
mockHttpClient.setResponseDelayMilliseconds(250);
mockHttpClient.addResponderFakeLocalDocumentUpdate404();
HttpClientFactory mockHttpClientFactory = new HttpClientFactory() {
@Override
public HttpClient getHttpClient() {
return mockHttpClient;
}
@Override
public void addCookies(List<Cookie> cookies) {
}
@Override
public void deleteCookie(String name) {
}
@Override
public CookieStore getCookieStore() {
return null;
}
};
manager.setDefaultHttpClientFactory(mockHttpClientFactory);
Document doc = database.createDocument();
SavedRevision rev1a = doc.createRevision().save();
SavedRevision rev2a = createRevisionWithRandomProps(rev1a, false);
SavedRevision rev3a = createRevisionWithRandomProps(rev2a, false);
// delete the branch we've been using, then create a new one to replace it
SavedRevision rev4a = rev3a.deleteDocument();
SavedRevision rev2b = createRevisionWithRandomProps(rev1a, true);
assertEquals(rev2b.getId(), doc.getCurrentRevisionId());
// sync with remote DB -- should push both leaf revisions
Replication push = database.createPushReplication(getReplicationURL());
runReplication(push);
assertNull(push.getLastError());
// find the _revs_diff captured request and decode into json
boolean foundRevsDiff = false;
List<HttpRequest> captured = mockHttpClient.getCapturedRequests();
for (HttpRequest httpRequest : captured) {
if (httpRequest instanceof HttpPost) {
HttpPost httpPost = (HttpPost) httpRequest;
if (httpPost.getURI().toString().endsWith("_revs_diff")) {
foundRevsDiff = true;
Map<String, Object> jsonMap = CustomizableMockHttpClient.getJsonMapFromRequest(httpPost);
// assert that it contains the expected revisions
List<String> revisionIds = (List) jsonMap.get(doc.getId());
assertEquals(2, revisionIds.size());
assertTrue(revisionIds.contains(rev4a.getId()));
assertTrue(revisionIds.contains(rev2b.getId()));
}
}
}
assertTrue(foundRevsDiff);
}
/**
* Verify that when a conflict is resolved on (mock) Sync Gateway
* and a pull replication is done, the conflict is resolved locally.
* <p/>
* - Create local docs in conflict
* - Simulate sync gw responses that resolve the conflict
* - Do pull replication
* - Assert conflict is resolved locally
* <p/>
* https://github.com/couchbase/couchbase-lite-java-core/issues/77
*/
public void failingTestRemoteConflictResolution() throws Exception {
// Create a document with two conflicting edits.
Document doc = database.createDocument();
SavedRevision rev1 = doc.createRevision().save();
SavedRevision rev2a = createRevisionWithRandomProps(rev1, false);
SavedRevision rev2b = createRevisionWithRandomProps(rev1, true);
// make sure we can query the db to get the conflict
Query allDocsQuery = database.createAllDocumentsQuery();
allDocsQuery.setAllDocsMode(Query.AllDocsMode.ONLY_CONFLICTS);
QueryEnumerator rows = allDocsQuery.run();
boolean foundDoc = false;
assertEquals(1, rows.getCount());
for (Iterator<QueryRow> it = rows; it.hasNext(); ) {
QueryRow row = it.next();
if (row.getDocument().getId().equals(doc.getId())) {
foundDoc = true;
}
}
assertTrue(foundDoc);
// make sure doc in conflict
assertTrue(doc.getConflictingRevisions().size() > 1);
// create mockwebserver and custom dispatcher
MockDispatcher dispatcher = new MockDispatcher();
MockWebServer server = MockHelper.getMockWebServer(dispatcher);
dispatcher.setServerType(MockDispatcher.ServerType.COUCHDB);
try {
// checkpoint GET response w/ 404
MockResponse fakeCheckpointResponse = new MockResponse();
MockHelper.set404NotFoundJson(fakeCheckpointResponse);
dispatcher.enqueueResponse(MockHelper.PATH_REGEX_CHECKPOINT, fakeCheckpointResponse);
int rev3PromotedGeneration = 3;
String rev3PromotedDigest = "d46b";
String rev3Promoted = String.format("%d-%s", rev3PromotedGeneration, rev3PromotedDigest);
int rev3DeletedGeneration = 3;
String rev3DeletedDigest = "e768";
String rev3Deleted = String.format("%d-%s", rev3DeletedGeneration, rev3DeletedDigest);
int seq = 4;
// _changes response
MockChangesFeed mockChangesFeed = new MockChangesFeed();
MockChangesFeed.MockChangedDoc mockChangedDoc = new MockChangesFeed.MockChangedDoc();
mockChangedDoc.setDocId(doc.getId());
mockChangedDoc.setSeq(seq);
mockChangedDoc.setChangedRevIds(Arrays.asList(rev3Promoted, rev3Deleted));
mockChangesFeed.add(mockChangedDoc);
MockResponse response = mockChangesFeed.generateMockResponse();
dispatcher.enqueueResponse(MockHelper.PATH_REGEX_CHANGES, response);
// docRev3Promoted response
MockDocumentGet.MockDocument docRev3Promoted = new MockDocumentGet.MockDocument(doc.getId(), rev3Promoted, seq);
docRev3Promoted.setJsonMap(MockHelper.generateRandomJsonMap());
MockDocumentGet mockDocRev3PromotedGet = new MockDocumentGet(docRev3Promoted);
Map<String, Object> rev3PromotedRevHistory = new HashMap<String, Object>();
rev3PromotedRevHistory.put("start", rev3PromotedGeneration);
List ids = Arrays.asList(
rev3PromotedDigest,
RevisionInternal.digestFromRevID(rev2a.getId()),
RevisionInternal.digestFromRevID(rev2b.getId())
);
rev3PromotedRevHistory.put("ids", ids);
mockDocRev3PromotedGet.setRevHistoryMap(rev3PromotedRevHistory);
dispatcher.enqueueResponse(docRev3Promoted.getDocPathRegex(), mockDocRev3PromotedGet.generateMockResponse());
// docRev3Deleted response
MockDocumentGet.MockDocument docRev3Deleted = new MockDocumentGet.MockDocument(doc.getId(), rev3Deleted, seq);
Map<String, Object> jsonMap = MockHelper.generateRandomJsonMap();
jsonMap.put("_deleted", true);
docRev3Deleted.setJsonMap(jsonMap);
MockDocumentGet mockDocRev3DeletedGet = new MockDocumentGet(docRev3Deleted);
Map<String, Object> rev3DeletedRevHistory = new HashMap<String, Object>();
rev3DeletedRevHistory.put("start", rev3DeletedGeneration);
ids = Arrays.asList(
rev3DeletedDigest,
RevisionInternal.digestFromRevID(rev2b.getId()),
RevisionInternal.digestFromRevID(rev1.getId())
);
rev3DeletedRevHistory.put("ids", ids);
mockDocRev3DeletedGet.setRevHistoryMap(rev3DeletedRevHistory);
dispatcher.enqueueResponse(docRev3Deleted.getDocPathRegex(), mockDocRev3DeletedGet.generateMockResponse());
// start mock server
server.play();
// run pull replication
Replication pullReplication = database.createPullReplication(server.getUrl("/db"));
runReplication(pullReplication);
assertNull(pullReplication.getLastError());
// assertions about outgoing requests
RecordedRequest changesRequest = dispatcher.takeRequest(MockHelper.PATH_REGEX_CHANGES);
assertNotNull(changesRequest);
RecordedRequest docRev3DeletedRequest = dispatcher.takeRequest(docRev3Deleted.getDocPathRegex());
assertNotNull(docRev3DeletedRequest);
RecordedRequest docRev3PromotedRequest = dispatcher.takeRequest(docRev3Promoted.getDocPathRegex());
assertNotNull(docRev3PromotedRequest);
// Make sure the conflict was resolved locally.
assertEquals(1, doc.getConflictingRevisions().size());
} finally {
server.shutdown();
}
}
/**
* https://github.com/couchbase/couchbase-lite-java-core/issues/95
*/
public void testPushReplicationCanMissDocs() throws Exception {
assertEquals(0, database.getLastSequenceNumber());
Map<String, Object> properties1 = new HashMap<String, Object>();
properties1.put("doc1", "testPushReplicationCanMissDocs");
final Document doc1 = createDocWithProperties(properties1);
Map<String, Object> properties2 = new HashMap<String, Object>();
properties1.put("doc2", "testPushReplicationCanMissDocs");
final Document doc2 = createDocWithProperties(properties2);
UnsavedRevision doc2UnsavedRev = doc2.createRevision();
InputStream attachmentStream = getAsset("attachment.png");
doc2UnsavedRev.setAttachment("attachment.png", "image/png", attachmentStream);
SavedRevision doc2Rev = doc2UnsavedRev.save();
assertNotNull(doc2Rev);
final CustomizableMockHttpClient mockHttpClient = new CustomizableMockHttpClient();
mockHttpClient.addResponderFakeLocalDocumentUpdate404();
mockHttpClient.setResponder("_bulk_docs", new CustomizableMockHttpClient.Responder() {
@Override
public HttpResponse execute(HttpUriRequest httpUriRequest) throws IOException {
String json = "{\"error\":\"not_found\",\"reason\":\"missing\"}";
return CustomizableMockHttpClient.generateHttpResponseObject(404, "NOT FOUND", json);
}
});
mockHttpClient.setResponder(doc2.getId(), new CustomizableMockHttpClient.Responder() {
@Override
public HttpResponse execute(HttpUriRequest httpUriRequest) throws IOException {
Map<String, Object> responseObject = new HashMap<String, Object>();
responseObject.put("id", doc2.getId());
responseObject.put("ok", true);
responseObject.put("rev", doc2.getCurrentRevisionId());
return CustomizableMockHttpClient.generateHttpResponseObject(responseObject);
}
});
// create a replication obeserver to wait until replication finishes
CountDownLatch replicationDoneSignal = new CountDownLatch(1);
ReplicationFinishedObserver replicationFinishedObserver = new ReplicationFinishedObserver(replicationDoneSignal);
// create replication and add observer
manager.setDefaultHttpClientFactory(mockFactoryFactory(mockHttpClient));
Replication pusher = database.createPushReplication(getReplicationURL());
pusher.addChangeListener(replicationFinishedObserver);
// save the checkpoint id for later usage
String checkpointId = pusher.remoteCheckpointDocID();
// kick off the replication
pusher.start();
// wait for it to finish
boolean success = replicationDoneSignal.await(60, TimeUnit.SECONDS);
assertTrue(success);
Log.d(TAG, "replicationDoneSignal finished");
// we would expect it to have recorded an error because one of the docs (the one without the attachment)
// will have failed.
assertNotNull(pusher.getLastError());
// workaround for the fact that the replicationDoneSignal.wait() call will unblock before all
// the statements in Replication.stopped() have even had a chance to execute.
// (specifically the ones that come after the call to notifyChangeListeners())
Thread.sleep(500);
String localLastSequence = database.lastSequenceWithCheckpointId(checkpointId);
Log.d(TAG, "database.lastSequenceWithCheckpointId(): " + localLastSequence);
Log.d(TAG, "doc2.getCurrentRevision().getSequence(): " + doc2.getCurrentRevision().getSequence());
String msg = "Since doc1 failed, the database should _not_ have had its lastSequence bumped" +
" to doc2's sequence number. If it did, it's bug: github.com/couchbase/couchbase-lite-java-core/issues/95";
assertFalse(msg, Long.toString(doc2.getCurrentRevision().getSequence()).equals(localLastSequence));
assertNull(localLastSequence);
assertTrue(doc2.getCurrentRevision().getSequence() > 0);
}
/**
* https://github.com/couchbase/couchbase-lite-android/issues/66
*/
public void testPushUpdatedDocWithoutReSendingAttachments() throws Exception {
assertEquals(0, database.getLastSequenceNumber());
Map<String, Object> properties1 = new HashMap<String, Object>();
properties1.put("dynamic", 1);
final Document doc = createDocWithProperties(properties1);
SavedRevision doc1Rev = doc.getCurrentRevision();
// Add attachment to document
UnsavedRevision doc2UnsavedRev = doc.createRevision();
InputStream attachmentStream = getAsset("attachment.png");
doc2UnsavedRev.setAttachment("attachment.png", "image/png", attachmentStream);
SavedRevision doc2Rev = doc2UnsavedRev.save();
assertNotNull(doc2Rev);
final CustomizableMockHttpClient mockHttpClient = new CustomizableMockHttpClient();
mockHttpClient.addResponderFakeLocalDocumentUpdate404();
// http://url/db/foo (foo==docid)
mockHttpClient.setResponder(doc.getId(), new CustomizableMockHttpClient.Responder() {
@Override
public HttpResponse execute(HttpUriRequest httpUriRequest) throws IOException {
Map<String, Object> responseObject = new HashMap<String, Object>();
responseObject.put("id", doc.getId());
responseObject.put("ok", true);
responseObject.put("rev", doc.getCurrentRevisionId());
return CustomizableMockHttpClient.generateHttpResponseObject(responseObject);
}
});
// create replication and add observer
manager.setDefaultHttpClientFactory(mockFactoryFactory(mockHttpClient));
Replication pusher = database.createPushReplication(getReplicationURL());
runReplication(pusher);
List<HttpRequest> captured = mockHttpClient.getCapturedRequests();
for (HttpRequest httpRequest : captured) {
// verify that there are no PUT requests with attachments
if (httpRequest instanceof HttpPut) {
HttpPut httpPut = (HttpPut) httpRequest;
HttpEntity entity = httpPut.getEntity();
//assertFalse("PUT request with updated doc properties contains attachment", entity instanceof MultipartEntity);
}
}
mockHttpClient.clearCapturedRequests();
Document oldDoc = database.getDocument(doc.getId());
UnsavedRevision aUnsavedRev = oldDoc.createRevision();
Map<String, Object> prop = new HashMap<String, Object>();
prop.putAll(oldDoc.getProperties());
prop.put("dynamic", (Integer) oldDoc.getProperty("dynamic") + 1);
aUnsavedRev.setProperties(prop);
final SavedRevision savedRev = aUnsavedRev.save();
mockHttpClient.setResponder(doc.getId(), new CustomizableMockHttpClient.Responder() {
@Override
public HttpResponse execute(HttpUriRequest httpUriRequest) throws IOException {
Map<String, Object> responseObject = new HashMap<String, Object>();
responseObject.put("id", doc.getId());
responseObject.put("ok", true);
responseObject.put("rev", savedRev.getId());
return CustomizableMockHttpClient.generateHttpResponseObject(responseObject);
}
});
final String json = String.format("{\"%s\":{\"missing\":[\"%s\"],\"possible_ancestors\":[\"%s\",\"%s\"]}}", doc.getId(), savedRev.getId(), doc1Rev.getId(), doc2Rev.getId());
mockHttpClient.setResponder("_revs_diff", new CustomizableMockHttpClient.Responder() {
@Override
public HttpResponse execute(HttpUriRequest httpUriRequest) throws IOException {
return mockHttpClient.generateHttpResponseObject(json);
}
});
pusher = database.createPushReplication(getReplicationURL());
runReplication(pusher);
captured = mockHttpClient.getCapturedRequests();
for (HttpRequest httpRequest : captured) {
// verify that there are no PUT requests with attachments
if (httpRequest instanceof HttpPut) {
HttpPut httpPut = (HttpPut) httpRequest;
HttpEntity entity = httpPut.getEntity();
assertFalse("PUT request with updated doc properties contains attachment", entity instanceof MultipartEntity);
}
}
}
/**
* https://github.com/couchbase/couchbase-lite-java-core/issues/188
*/
public void testServerDoesNotSupportMultipart() throws Exception {
assertEquals(0, database.getLastSequenceNumber());
Map<String, Object> properties1 = new HashMap<String, Object>();
properties1.put("dynamic", 1);
final Document doc = createDocWithProperties(properties1);
SavedRevision doc1Rev = doc.getCurrentRevision();
// Add attachment to document
UnsavedRevision doc2UnsavedRev = doc.createRevision();
InputStream attachmentStream = getAsset("attachment.png");
doc2UnsavedRev.setAttachment("attachment.png", "image/png", attachmentStream);
SavedRevision doc2Rev = doc2UnsavedRev.save();
assertNotNull(doc2Rev);
final CustomizableMockHttpClient mockHttpClient = new CustomizableMockHttpClient();
mockHttpClient.addResponderFakeLocalDocumentUpdate404();
Queue<CustomizableMockHttpClient.Responder> responders = new LinkedList<CustomizableMockHttpClient.Responder>();
//first http://url/db/foo (foo==docid)
//Reject multipart PUT with response code 415
responders.add(new CustomizableMockHttpClient.Responder() {
@Override
public HttpResponse execute(HttpUriRequest httpUriRequest) throws IOException {
String json = "{\"error\":\"Unsupported Media Type\",\"reason\":\"missing\"}";
return CustomizableMockHttpClient.generateHttpResponseObject(415, "Unsupported Media Type", json);
}
});
// second http://url/db/foo (foo==docid)
// second call should be plain json, return good response
responders.add(new CustomizableMockHttpClient.Responder() {
@Override
public HttpResponse execute(HttpUriRequest httpUriRequest) throws IOException {
Map<String, Object> responseObject = new HashMap<String, Object>();
responseObject.put("id", doc.getId());
responseObject.put("ok", true);
responseObject.put("rev", doc.getCurrentRevisionId());
return CustomizableMockHttpClient.generateHttpResponseObject(responseObject);
}
});
ResponderChain responderChain = new ResponderChain(responders);
mockHttpClient.setResponder(doc.getId(), responderChain);
// create replication and add observer
manager.setDefaultHttpClientFactory(mockFactoryFactory(mockHttpClient));
Replication pusher = database.createPushReplication(getReplicationURL());
runReplication(pusher);
List<HttpRequest> captured = mockHttpClient.getCapturedRequests();
int entityIndex = 0;
for (HttpRequest httpRequest : captured) {
// verify that there are no PUT requests with attachments
if (httpRequest instanceof HttpPut) {
HttpPut httpPut = (HttpPut) httpRequest;
HttpEntity entity = httpPut.getEntity();
if (entityIndex++ == 0) {
assertTrue("PUT request with attachment is not multipart", entity instanceof MultipartEntity);
} else {
assertFalse("PUT request with attachment is multipart", entity instanceof MultipartEntity);
}
}
}
}
public void testServerIsSyncGatewayVersion() throws Exception {
Replication pusher = database.createPushReplication(getReplicationURL());
assertFalse(pusher.serverIsSyncGatewayVersion("0.01"));
pusher.setServerType("Couchbase Sync Gateway/0.93");
assertTrue(pusher.serverIsSyncGatewayVersion("0.92"));
assertFalse(pusher.serverIsSyncGatewayVersion("0.94"));
}
/**
* https://github.com/couchbase/couchbase-lite-android/issues/243
*/
public void testDifferentCheckpointsFilteredReplication() throws Exception {
Replication pullerNoFilter = database.createPullReplication(getReplicationURL());
String noFilterCheckpointDocId = pullerNoFilter.remoteCheckpointDocID();
Replication pullerWithFilter1 = database.createPullReplication(getReplicationURL());
pullerWithFilter1.setFilter("foo/bar");
Map<String, Object> filterParams = new HashMap<String, Object>();
filterParams.put("a", "aval");
filterParams.put("b", "bval");
List<String> docIds = Arrays.asList("doc3", "doc1", "doc2");
pullerWithFilter1.setDocIds(docIds);
assertEquals(docIds, pullerWithFilter1.getDocIds());
pullerWithFilter1.setFilterParams(filterParams);
String withFilterCheckpointDocId = pullerWithFilter1.remoteCheckpointDocID();
assertFalse(withFilterCheckpointDocId.equals(noFilterCheckpointDocId));
Replication pullerWithFilter2 = database.createPullReplication(getReplicationURL());
pullerWithFilter2.setFilter("foo/bar");
filterParams = new HashMap<String, Object>();
filterParams.put("b", "bval");
filterParams.put("a", "aval");
pullerWithFilter2.setDocIds(Arrays.asList("doc2", "doc3", "doc1"));
pullerWithFilter2.setFilterParams(filterParams);
String withFilterCheckpointDocId2 = pullerWithFilter2.remoteCheckpointDocID();
assertTrue(withFilterCheckpointDocId.equals(withFilterCheckpointDocId2));
}
public void testSetReplicationCookie() throws Exception {
URL replicationUrl = getReplicationURL();
Replication puller = database.createPullReplication(replicationUrl);
String cookieName = "foo";
String cookieVal = "bar";
boolean isSecure = false;
boolean httpOnly = false;
// expiration date - 1 day from now
Calendar cal = Calendar.getInstance();
cal.setTime(new Date());
int numDaysToAdd = 1;
cal.add(Calendar.DATE, numDaysToAdd);
Date expirationDate = cal.getTime();
// set the cookie
puller.setCookie(cookieName, cookieVal, "", expirationDate, isSecure, httpOnly);
// make sure it made it into cookie store and has expected params
CookieStore cookieStore = puller.getClientFactory().getCookieStore();
List<Cookie> cookies = cookieStore.getCookies();
assertEquals(1, cookies.size());
Cookie cookie = cookies.get(0);
assertEquals(cookieName, cookie.getName());
assertEquals(cookieVal, cookie.getValue());
assertEquals(replicationUrl.getHost(), cookie.getDomain());
assertEquals(replicationUrl.getPath(), cookie.getPath());
assertEquals(expirationDate, cookie.getExpiryDate());
assertEquals(isSecure, cookie.isSecure());
// add a second cookie
String cookieName2 = "foo2";
puller.setCookie(cookieName2, cookieVal, "", expirationDate, isSecure, false);
assertEquals(2, cookieStore.getCookies().size());
// delete cookie
puller.deleteCookie(cookieName2);
// should only have the original cookie left
assertEquals(1, cookieStore.getCookies().size());
assertEquals(cookieName, cookieStore.getCookies().get(0).getName());
}
/**
* https://github.com/couchbase/couchbase-lite-android/issues/376
* <p/>
* This test aims to demonstrate that when the changes feed returns purged documents the
* replicator is able to fetch all other documents but unable to finish the replication
* (STOPPED OR IDLE STATE)
*/
public void testChangesFeedWithPurgedDoc() throws Exception {
//generate documents ids
String doc1Id = "doc1-" + System.currentTimeMillis();
String doc2Id = "doc2-" + System.currentTimeMillis();
String doc3Id = "doc3-" + System.currentTimeMillis();
//generate mock documents
final MockDocumentGet.MockDocument mockDocument1 = new MockDocumentGet.MockDocument(
doc1Id, "1-a000", 1);
mockDocument1.setJsonMap(MockHelper.generateRandomJsonMap());
final MockDocumentGet.MockDocument mockDocument2 = new MockDocumentGet.MockDocument(
doc2Id, "1-b000", 2);
mockDocument2.setJsonMap(MockHelper.generateRandomJsonMap());
final MockDocumentGet.MockDocument mockDocument3 = new MockDocumentGet.MockDocument(
doc3Id, "1-c000", 3);
mockDocument3.setJsonMap(MockHelper.generateRandomJsonMap());
// create mockwebserver and custom dispatcher
MockDispatcher dispatcher = new MockDispatcher();
MockWebServer server = MockHelper.getMockWebServer(dispatcher);
dispatcher.setServerType(MockDispatcher.ServerType.COUCHDB);
try {
//add response to _local request
// checkpoint GET response w/ 404
MockResponse fakeCheckpointResponse = new MockResponse();
MockHelper.set404NotFoundJson(fakeCheckpointResponse);
dispatcher.enqueueResponse(MockHelper.PATH_REGEX_CHECKPOINT, fakeCheckpointResponse);
//add response to _changes request
// _changes response
MockChangesFeed mockChangesFeed = new MockChangesFeed();
mockChangesFeed.add(new MockChangesFeed.MockChangedDoc(mockDocument1));
mockChangesFeed.add(new MockChangesFeed.MockChangedDoc(mockDocument2));
mockChangesFeed.add(new MockChangesFeed.MockChangedDoc(mockDocument3));
dispatcher.enqueueResponse(MockHelper.PATH_REGEX_CHANGES, mockChangesFeed.generateMockResponse());
// doc1 response
MockDocumentGet mockDocumentGet1 = new MockDocumentGet(mockDocument1);
dispatcher.enqueueResponse(mockDocument1.getDocPathRegex(), mockDocumentGet1.generateMockResponse());
// doc2 missing reponse
MockResponse missingDocumentMockResponse = new MockResponse();
MockHelper.set404NotFoundJson(missingDocumentMockResponse);
dispatcher.enqueueResponse(mockDocument2.getDocPathRegex(), missingDocumentMockResponse);
// doc3 response
MockDocumentGet mockDocumentGet3 = new MockDocumentGet(mockDocument3);
dispatcher.enqueueResponse(mockDocument3.getDocPathRegex(), mockDocumentGet3.generateMockResponse());
// checkpoint PUT response
MockCheckpointPut mockCheckpointPut = new MockCheckpointPut();
mockCheckpointPut.setSticky(true);
dispatcher.enqueueResponse(MockHelper.PATH_REGEX_CHECKPOINT, mockCheckpointPut);
// start mock server
server.play();
//create url for replication
URL baseUrl = server.getUrl("/db");
//create replication
Replication pullReplication = database.createPullReplication(baseUrl);
pullReplication.setContinuous(false);
//add change listener to notify when the replication is finished
CountDownLatch replicationFinishedContCountDownLatch = new CountDownLatch(1);
ReplicationFinishedObserver replicationFinishedObserver =
new ReplicationFinishedObserver(replicationFinishedContCountDownLatch);
pullReplication.addChangeListener(replicationFinishedObserver);
//start replication
pullReplication.start();
boolean success = replicationFinishedContCountDownLatch.await(100, TimeUnit.SECONDS);
assertTrue(success);
if (pullReplication.getLastError() != null) {
Log.d(TAG, "Replication had error: " + ((HttpResponseException) pullReplication.getLastError()).getStatusCode());
}
//assert document 1 was correctly pulled
Document doc1 = database.getDocument(doc1Id);
assertNotNull(doc1);
assertNotNull(doc1.getCurrentRevision());
//assert it was impossible to pull doc2
Document doc2 = database.getDocument(doc2Id);
assertNotNull(doc2);
assertNull(doc2.getCurrentRevision());
//assert it was possible to pull doc3
Document doc3 = database.getDocument(doc3Id);
assertNotNull(doc3);
assertNotNull(doc3.getCurrentRevision());
// wait until the replicator PUT's checkpoint with mockDocument3's sequence
waitForPutCheckpointRequestWithSeq(dispatcher, mockDocument3.getDocSeq());
//last saved seq must be equal to last pulled document seq
String doc3Seq = Integer.toString(mockDocument3.getDocSeq());
String lastSequence = database.lastSequenceWithCheckpointId(pullReplication.remoteCheckpointDocID());
assertEquals(doc3Seq, lastSequence);
} finally {
//stop mock server
server.shutdown();
}
}
/**
* Reproduces https://github.com/couchbase/couchbase-lite-android/issues/167
*/
public void testPushPurgedDoc() throws Throwable {
int numBulkDocRequests = 0;
HttpPost lastBulkDocsRequest = null;
Map<String, Object> properties = new HashMap<String, Object>();
properties.put("testName", "testPurgeDocument");
Document doc = createDocumentWithProperties(database, properties);
assertNotNull(doc);
final CustomizableMockHttpClient mockHttpClient = new CustomizableMockHttpClient();
mockHttpClient.addResponderRevDiffsAllMissing();
mockHttpClient.setResponseDelayMilliseconds(250);
mockHttpClient.addResponderFakeLocalDocumentUpdate404();
HttpClientFactory mockHttpClientFactory = new HttpClientFactory() {
@Override
public HttpClient getHttpClient() {
return mockHttpClient;
}
@Override
public void addCookies(List<Cookie> cookies) {
}
@Override
public void deleteCookie(String name) {
}
@Override
public CookieStore getCookieStore() {
return null;
}
};
URL remote = getReplicationURL();
manager.setDefaultHttpClientFactory(mockHttpClientFactory);
Replication pusher = database.createPushReplication(remote);
pusher.setContinuous(true);
final CountDownLatch replicationCaughtUpSignal = new CountDownLatch(1);
pusher.addChangeListener(new Replication.ChangeListener() {
@Override
public void changed(Replication.ChangeEvent event) {
final int changesCount = event.getSource().getChangesCount();
final int completedChangesCount = event.getSource().getCompletedChangesCount();
String msg = String.format("changes: %d completed changes: %d", changesCount, completedChangesCount);
Log.d(TAG, msg);
if (changesCount == completedChangesCount && changesCount != 0) {
replicationCaughtUpSignal.countDown();
}
}
});
pusher.start();
// wait until that doc is pushed
boolean didNotTimeOut = replicationCaughtUpSignal.await(60, TimeUnit.SECONDS);
assertTrue(didNotTimeOut);
// at this point, we should have captured exactly 1 bulk docs request
numBulkDocRequests = 0;
for (HttpRequest capturedRequest : mockHttpClient.getCapturedRequests()) {
if (capturedRequest instanceof HttpPost && ((HttpPost) capturedRequest).getURI().toString().endsWith("_bulk_docs")) {
lastBulkDocsRequest = (HttpPost) capturedRequest;
numBulkDocRequests += 1;
}
}
assertEquals(1, numBulkDocRequests);
// that bulk docs request should have the "start" key under its _revisions
Map<String, Object> jsonMap = mockHttpClient.getJsonMapFromRequest((HttpPost) lastBulkDocsRequest);
List docs = (List) jsonMap.get("docs");
Map<String, Object> onlyDoc = (Map) docs.get(0);
Map<String, Object> revisions = (Map) onlyDoc.get("_revisions");
assertTrue(revisions.containsKey("start"));
// now add a new revision, which will trigger the pusher to try to push it
properties = new HashMap<String, Object>();
properties.put("testName2", "update doc");
UnsavedRevision unsavedRevision = doc.createRevision();
unsavedRevision.setUserProperties(properties);
unsavedRevision.save();
// but then immediately purge it
doc.purge();
// wait for a while to give the replicator a chance to push it
// (it should not actually push anything)
Thread.sleep(5 * 1000);
// we should not have gotten any more _bulk_docs requests, because
// the replicator should not have pushed anything else.
// (in the case of the bug, it was trying to push the purged revision)
numBulkDocRequests = 0;
for (HttpRequest capturedRequest : mockHttpClient.getCapturedRequests()) {
if (capturedRequest instanceof HttpPost && ((HttpPost) capturedRequest).getURI().toString().endsWith("_bulk_docs")) {
numBulkDocRequests += 1;
}
}
assertEquals(1, numBulkDocRequests);
stopReplication(pusher);
}
/**
* Regression test for https://github.com/couchbase/couchbase-lite-java-core/issues/72
*/
public void testPusherBatching() throws Throwable {
int previous = ReplicationInternal.INBOX_CAPACITY;
ReplicationInternal.INBOX_CAPACITY = 5;
try {
// create a bunch local documents
int numDocsToSend = ReplicationInternal.INBOX_CAPACITY * 3;
for (int i = 0; i < numDocsToSend; i++) {
Map<String, Object> properties = new HashMap<String, Object>();
properties.put("testPusherBatching", i);
createDocumentWithProperties(database, properties);
}
// kick off a one time push replication to a mock
final CustomizableMockHttpClient mockHttpClient = new CustomizableMockHttpClient();
mockHttpClient.addResponderFakeLocalDocumentUpdate404();
HttpClientFactory mockHttpClientFactory = mockFactoryFactory(mockHttpClient);
URL remote = getReplicationURL();
manager.setDefaultHttpClientFactory(mockHttpClientFactory);
Replication pusher = database.createPushReplication(remote);
runReplication(pusher);
assertNull(pusher.getLastError());
int numDocsSent = 0;
// verify that only INBOX_SIZE documents are included in any given bulk post request
List<HttpRequest> capturedRequests = mockHttpClient.getCapturedRequests();
for (HttpRequest capturedRequest : capturedRequests) {
if (capturedRequest instanceof HttpPost) {
HttpPost capturedPostRequest = (HttpPost) capturedRequest;
if (capturedPostRequest.getURI().getPath().endsWith("_bulk_docs")) {
ArrayList docs = CustomizableMockHttpClient.extractDocsFromBulkDocsPost(capturedRequest);
String msg = "# of bulk docs pushed should be <= INBOX_CAPACITY";
assertTrue(msg, docs.size() <= ReplicationInternal.INBOX_CAPACITY);
numDocsSent += docs.size();
}
}
}
assertEquals(numDocsToSend, numDocsSent);
} finally {
ReplicationInternal.INBOX_CAPACITY = previous;
}
}
public void failingTestPullerGzipped() throws Throwable {
// TODO: rewrite w/ MockWebserver
/*String docIdTimestamp = Long.toString(System.currentTimeMillis());
final String doc1Id = String.format("doc1-%s", docIdTimestamp);
String attachmentName = "attachment.png";
addDocWithId(doc1Id, attachmentName, true);
doPullReplication();
Log.d(TAG, "Fetching doc1 via id: " + doc1Id);
Document doc1 = database.getDocument(doc1Id);
assertNotNull(doc1);
assertTrue(doc1.getCurrentRevisionId().startsWith("1-"));
assertEquals(1, doc1.getProperties().get("foo"));
Attachment attachment = doc1.getCurrentRevision().getAttachment(attachmentName);
assertTrue(attachment.getLength() > 0);
assertTrue(attachment.getGZipped());
InputStream is = attachment.getContent();
byte[] receivedBytes = TextUtils.read(is);
is.close();
InputStream attachmentStream = getAsset(attachmentName);
byte[] actualBytes = TextUtils.read(attachmentStream);
Assert.assertEquals(actualBytes.length, receivedBytes.length);
Assert.assertEquals(actualBytes, receivedBytes);*/
}
/**
* Verify that validation blocks are called correctly for docs
* pulled from the sync gateway.
* <p/>
* - Add doc to (mock) sync gateway
* - Add validation function that will reject that doc
* - Do a pull replication
* - Assert that the doc does _not_ make it into the db
*/
public void testValidationBlockCalled() throws Throwable {
final MockDocumentGet.MockDocument mockDocument = new MockDocumentGet.MockDocument("doc1", "1-3e28", 1);
mockDocument.setJsonMap(MockHelper.generateRandomJsonMap());
// create mockwebserver and custom dispatcher
MockDispatcher dispatcher = new MockDispatcher();
MockWebServer server = MockHelper.getMockWebServer(dispatcher);
dispatcher.setServerType(MockDispatcher.ServerType.SYNC_GW);
try {
// checkpoint GET response w/ 404
MockResponse fakeCheckpointResponse = new MockResponse();
MockHelper.set404NotFoundJson(fakeCheckpointResponse);
dispatcher.enqueueResponse(MockHelper.PATH_REGEX_CHECKPOINT, fakeCheckpointResponse);
// _changes response
MockChangesFeed mockChangesFeed = new MockChangesFeed();
mockChangesFeed.add(new MockChangesFeed.MockChangedDoc(mockDocument));
dispatcher.enqueueResponse(MockHelper.PATH_REGEX_CHANGES, mockChangesFeed.generateMockResponse());
// doc response
MockDocumentGet mockDocumentGet = new MockDocumentGet(mockDocument);
dispatcher.enqueueResponse(mockDocument.getDocPathRegex(), mockDocumentGet.generateMockResponse());
// checkpoint PUT response
dispatcher.enqueueResponse(MockHelper.PATH_REGEX_CHECKPOINT, new MockCheckpointPut());
// start mock server
server.play();
// Add Validation block
database.setValidation("testValidationBlockCalled", new Validator() {
@Override
public void validate(Revision newRevision, ValidationContext context) {
if (newRevision.getDocument().getId().equals(mockDocument.getDocId())) {
context.reject("Reject");
}
}
});
// run pull replication
Replication pullReplication = database.createPullReplication(server.getUrl("/db"));
runReplication(pullReplication);
waitForPutCheckpointRequestWithSeq(dispatcher, mockDocument.getDocSeq());
// assert doc is not in local db
Document doc = database.getDocument(mockDocument.getDocId());
assertNull(doc.getCurrentRevision()); // doc should have been rejected by validation, and therefore not present
} finally {
server.shutdown();
}
}
/**
* Attempting to reproduce couchtalk issue:
* <p/>
* https://github.com/couchbase/couchbase-lite-android/issues/312
* <p/>
* - Start continuous puller against mock SG w/ 50 docs
* - After every 10 docs received, restart replication
* - Make sure all 50 docs are received and stored in local db
*
* @throws Exception
*/
public void testMockPullerRestart() throws Exception {
final int numMockRemoteDocs = 20; // must be multiple of 10!
final AtomicInteger numDocsPulledLocally = new AtomicInteger(0);
MockDispatcher dispatcher = new MockDispatcher();
dispatcher.setServerType(MockDispatcher.ServerType.COUCHDB);
int numDocsPerChangesResponse = numMockRemoteDocs / 10;
MockWebServer server = MockHelper.getPreloadedPullTargetMockCouchDB(dispatcher, numMockRemoteDocs, numDocsPerChangesResponse);
try {
server.play();
final CountDownLatch receivedAllDocs = new CountDownLatch(1);
// run pull replication
Replication pullReplication = database.createPullReplication(server.getUrl("/db"));
pullReplication.setContinuous(true);
// it should go idle twice, hence countdown latch = 2
final CountDownLatch replicationIdleFirstTime = new CountDownLatch(1);
final CountDownLatch replicationIdleSecondTime = new CountDownLatch(2);
pullReplication.addChangeListener(new Replication.ChangeListener() {
@Override
public void changed(Replication.ChangeEvent event) {
if (event.getTransition() != null && event.getTransition().getDestination() == ReplicationState.IDLE) {
replicationIdleFirstTime.countDown();
replicationIdleSecondTime.countDown();
}
}
});
database.addChangeListener(new Database.ChangeListener() {
@Override
public void changed(Database.ChangeEvent event) {
List<DocumentChange> changes = event.getChanges();
for (DocumentChange change : changes) {
numDocsPulledLocally.addAndGet(1);
}
if (numDocsPulledLocally.get() == numMockRemoteDocs) {
receivedAllDocs.countDown();
}
}
});
pullReplication.start();
// wait until we received all mock docs or timeout occurs
boolean success = receivedAllDocs.await(60, TimeUnit.SECONDS);
assertTrue(success);
// wait until replication goes idle
success = replicationIdleFirstTime.await(60, TimeUnit.SECONDS);
assertTrue(success);
pullReplication.restart();
// wait until replication goes idle again
success = replicationIdleSecondTime.await(60, TimeUnit.SECONDS);
assertTrue(success);
stopReplication(pullReplication);
} finally {
// cleanup / shutdown
server.shutdown();
}
}
public void testRunReplicationWithError() throws Exception {
HttpClientFactory mockHttpClientFactory = new HttpClientFactory() {
@Override
public HttpClient getHttpClient() {
CustomizableMockHttpClient mockHttpClient = new CustomizableMockHttpClient();
int statusCode = 406;
mockHttpClient.addResponderFailAllRequests(statusCode);
return mockHttpClient;
}
@Override
public void addCookies(List<Cookie> cookies) {
}
@Override
public void deleteCookie(String name) {
}
@Override
public CookieStore getCookieStore() {
return null;
}
};
manager.setDefaultHttpClientFactory(mockHttpClientFactory);
Replication r1 = database.createPushReplication(getReplicationURL());
final CountDownLatch changeEventError = new CountDownLatch(1);
r1.addChangeListener(new Replication.ChangeListener() {
@Override
public void changed(Replication.ChangeEvent event) {
Log.d(TAG, "change event: %s", event);
if (event.getError() != null) {
changeEventError.countDown();
}
}
});
Assert.assertFalse(r1.isContinuous());
runReplication(r1);
// It should have failed with a 404:
Assert.assertEquals(0, r1.getCompletedChangesCount());
Assert.assertEquals(0, r1.getChangesCount());
Assert.assertNotNull(r1.getLastError());
boolean success = changeEventError.await(5, TimeUnit.SECONDS);
Assert.assertTrue(success);
}
public void testBuildRelativeURLString() throws Exception {
String dbUrlString = "http://10.0.0.3:4984/todos/";
Replication replication = database.createPullReplication(new URL(dbUrlString));
String relativeUrlString = replication.buildRelativeURLString("foo");
String expected = "http://10.0.0.3:4984/todos/foo";
Assert.assertEquals(expected, relativeUrlString);
}
public void testBuildRelativeURLStringWithLeadingSlash() throws Exception {
String dbUrlString = "http://10.0.0.3:4984/todos/";
Replication replication = database.createPullReplication(new URL(dbUrlString));
String relativeUrlString = replication.buildRelativeURLString("/foo");
String expected = "http://10.0.0.3:4984/todos/foo";
Assert.assertEquals(expected, relativeUrlString);
}
public void testChannels() throws Exception {
URL remote = getReplicationURL();
Replication replicator = database.createPullReplication(remote);
List<String> channels = new ArrayList<String>();
channels.add("chan1");
channels.add("chan2");
replicator.setChannels(channels);
Assert.assertEquals(channels, replicator.getChannels());
replicator.setChannels(null);
Assert.assertTrue(replicator.getChannels().isEmpty());
}
public void testChannelsMore() throws MalformedURLException, CouchbaseLiteException {
Database db = startDatabase();
URL fakeRemoteURL = new URL("http://couchbase.com/no_such_db");
Replication r1 = db.createPullReplication(fakeRemoteURL);
assertTrue(r1.getChannels().isEmpty());
r1.setFilter("foo/bar");
assertTrue(r1.getChannels().isEmpty());
Map<String, Object> filterParams = new HashMap<String, Object>();
filterParams.put("a", "b");
r1.setFilterParams(filterParams);
assertTrue(r1.getChannels().isEmpty());
r1.setChannels(null);
assertEquals("foo/bar", r1.getFilter());
assertEquals(filterParams, r1.getFilterParams());
List<String> channels = new ArrayList<String>();
channels.add("NBC");
channels.add("MTV");
r1.setChannels(channels);
assertEquals(channels, r1.getChannels());
assertEquals("sync_gateway/bychannel", r1.getFilter());
filterParams = new HashMap<String, Object>();
filterParams.put("channels", "NBC,MTV");
assertEquals(filterParams, r1.getFilterParams());
r1.setChannels(null);
assertEquals(r1.getFilter(), null);
assertEquals(null, r1.getFilterParams());
}
/**
* https://github.com/couchbase/couchbase-lite-android/issues/247
*/
public void testPushReplicationRecoverableError() throws Exception {
boolean expectReplicatorError = false;
runPushReplicationWithTransientError("HTTP/1.1 503 Service Unavailable", expectReplicatorError);
}
/**
* https://github.com/couchbase/couchbase-lite-android/issues/247
*/
public void testPushReplicationNonRecoverableError() throws Exception {
boolean expectReplicatorError = true;
runPushReplicationWithTransientError("HTTP/1.1 404 Not Found", expectReplicatorError);
}
/**
* https://github.com/couchbase/couchbase-lite-android/issues/247
*/
public void runPushReplicationWithTransientError(String status, boolean expectReplicatorError) throws Exception {
String doc1Id = "doc1";
// create mockwebserver and custom dispatcher
MockDispatcher dispatcher = new MockDispatcher();
MockWebServer server = MockHelper.getMockWebServer(dispatcher);
dispatcher.setServerType(MockDispatcher.ServerType.SYNC_GW);
try {
server.play();
// add some documents
Document doc1 = createDocumentForPushReplication(doc1Id, null, null);
// checkpoint GET response w/ 404 + respond to all PUT Checkpoint requests
MockCheckpointPut mockCheckpointPut = new MockCheckpointPut();
mockCheckpointPut.setSticky(true);
mockCheckpointPut.setDelayMs(50);
dispatcher.enqueueResponse(MockHelper.PATH_REGEX_CHECKPOINT, mockCheckpointPut);
// _revs_diff response -- everything missing
MockRevsDiff mockRevsDiff = new MockRevsDiff();
dispatcher.enqueueResponse(MockHelper.PATH_REGEX_REVS_DIFF, mockRevsDiff);
// 1st _bulk_docs response -- transient error
MockResponse response = new MockResponse().setStatus(status);
dispatcher.enqueueResponse(MockHelper.PATH_REGEX_BULK_DOCS, response);
// 2nd _bulk_docs response -- everything stored
MockBulkDocs mockBulkDocs = new MockBulkDocs();
dispatcher.enqueueResponse(MockHelper.PATH_REGEX_BULK_DOCS, mockBulkDocs);
// run replication
Replication pusher = database.createPushReplication(server.getUrl("/db"));
pusher.setContinuous(false);
runReplication(pusher);
if (expectReplicatorError == true) {
assertNotNull(pusher.getLastError());
} else {
assertNull(pusher.getLastError());
}
if (expectReplicatorError == false) {
int expectedLastSequence = 1;
Log.d(TAG, "waiting for put checkpoint with lastSequence: %d", expectedLastSequence);
List<RecordedRequest> checkpointRequests = waitForPutCheckpointRequestWithSequence(dispatcher, expectedLastSequence);
Log.d(TAG, "done waiting for put checkpoint with lastSequence: %d", expectedLastSequence);
validateCheckpointRequestsRevisions(checkpointRequests);
// assert our local sequence matches what is expected
String lastSequence = database.lastSequenceWithCheckpointId(pusher.remoteCheckpointDocID());
assertEquals(Integer.toString(expectedLastSequence), lastSequence);
// assert completed count makes sense
assertEquals(pusher.getChangesCount(), pusher.getCompletedChangesCount());
}
} finally {
// Shut down the server. Instances cannot be reused.
server.shutdown();
}
}
/**
* Verify that running a one-shot push replication will complete when run against a
* mock server that throws io exceptions on every request.
*/
public void testOneShotReplicationErrorNotification() throws Throwable {
int previous = RemoteRequestRetry.RETRY_DELAY_MS;
RemoteRequestRetry.RETRY_DELAY_MS = 5;
try {
final CustomizableMockHttpClient mockHttpClient = new CustomizableMockHttpClient();
mockHttpClient.addResponderThrowExceptionAllRequests();
URL remote = getReplicationURL();
manager.setDefaultHttpClientFactory(mockFactoryFactory(mockHttpClient));
Replication pusher = database.createPushReplication(remote);
runReplication(pusher);
assertTrue(pusher.getLastError() != null);
} finally {
RemoteRequestRetry.RETRY_DELAY_MS = previous;
}
}
/**
* Verify that running a continuous push replication will emit a change while
* in an error state when run against a mock server that returns 500 Internal Server
* errors on every request.
*/
public void testContinuousReplicationErrorNotification() throws Throwable {
int previous = RemoteRequestRetry.RETRY_DELAY_MS;
RemoteRequestRetry.RETRY_DELAY_MS = 5;
try {
final CustomizableMockHttpClient mockHttpClient = new CustomizableMockHttpClient();
mockHttpClient.addResponderThrowExceptionAllRequests();
URL remote = getReplicationURL();
manager.setDefaultHttpClientFactory(mockFactoryFactory(mockHttpClient));
Replication pusher = database.createPushReplication(remote);
pusher.setContinuous(true);
// add replication observer
final CountDownLatch countDownLatch = new CountDownLatch(1);
pusher.addChangeListener(new Replication.ChangeListener() {
@Override
public void changed(Replication.ChangeEvent event) {
if (event.getError() != null) {
countDownLatch.countDown();
}
}
});
// start replication
pusher.start();
boolean success = countDownLatch.await(30, TimeUnit.SECONDS);
assertTrue(success);
stopReplication(pusher);
} finally {
RemoteRequestRetry.RETRY_DELAY_MS = previous;
}
}
/**
* Test for the goOffline() method.
*/
public void testGoOffline() throws Exception {
final int numMockDocsToServe = 2;
// create mockwebserver and custom dispatcher
MockDispatcher dispatcher = new MockDispatcher();
MockWebServer server = MockHelper.getMockWebServer(dispatcher);
dispatcher.setServerType(MockDispatcher.ServerType.COUCHDB);
try {
server.play();
// mock documents to be pulled
MockDocumentGet.MockDocument mockDoc1 = new MockDocumentGet.MockDocument("doc1", "1-5e38", 1);
mockDoc1.setJsonMap(MockHelper.generateRandomJsonMap());
mockDoc1.setAttachmentName("attachment.png");
MockDocumentGet.MockDocument mockDoc2 = new MockDocumentGet.MockDocument("doc2", "1-563b", 2);
mockDoc2.setJsonMap(MockHelper.generateRandomJsonMap());
mockDoc2.setAttachmentName("attachment2.png");
// fake checkpoint PUT and GET response w/ 404
MockCheckpointPut fakeCheckpointResponse = new MockCheckpointPut();
fakeCheckpointResponse.setSticky(true);
dispatcher.enqueueResponse(MockHelper.PATH_REGEX_CHECKPOINT, fakeCheckpointResponse);
// _changes response with docs
MockChangesFeed mockChangesFeed = new MockChangesFeed();
mockChangesFeed.add(new MockChangesFeed.MockChangedDoc(mockDoc1));
dispatcher.enqueueResponse(MockHelper.PATH_REGEX_CHANGES, mockChangesFeed.generateMockResponse());
// next _changes response will block (eg, longpoll reponse with no changes to return)
MockChangesFeed mockChangesFeedEmpty = new MockChangesFeed();
dispatcher.enqueueResponse(MockHelper.PATH_REGEX_CHANGES, mockChangesFeedEmpty.generateMockResponse());
// doc1 response
MockDocumentGet mockDocumentGet = new MockDocumentGet(mockDoc1);
dispatcher.enqueueResponse(mockDoc1.getDocPathRegex(), mockDocumentGet.generateMockResponse());
// doc2 response
mockDocumentGet = new MockDocumentGet(mockDoc2);
dispatcher.enqueueResponse(mockDoc2.getDocPathRegex(), mockDocumentGet.generateMockResponse());
// create replication
Replication pullReplication = database.createPullReplication(server.getUrl("/db"));
pullReplication.setContinuous(true);
// add a change listener
final CountDownLatch idleCountdownLatch = new CountDownLatch(1);
final CountDownLatch receivedAllDocs = new CountDownLatch(1);
pullReplication.addChangeListener(new Replication.ChangeListener() {
@Override
public void changed(Replication.ChangeEvent event) {
Log.e(Log.TAG_SYNC, "event.getCompletedChangeCount() = " + event.getCompletedChangeCount());
if (event.getTransition() != null && event.getTransition().getDestination() == ReplicationState.IDLE) {
idleCountdownLatch.countDown();
}
if (event.getCompletedChangeCount() == numMockDocsToServe) {
receivedAllDocs.countDown();
}
}
});
// start replication
pullReplication.start();
// wait until it goes into idle state
boolean success = idleCountdownLatch.await(60, TimeUnit.SECONDS);
assertTrue(success);
// WORKAROUND: With CBL Java on Jenkins, Replicator becomes IDLE state before processing doc1. (NOT 100% REPRODUCIBLE)
// NOTE: 03/20/2014 This is also observable with on Standard Android emulator with ARM. (NOT 100% REPRODUCIBLE)
// TODO: Need to fix: https://github.com/couchbase/couchbase-lite-java-core/issues/446
// NOTE: Build.BRAND.equalsIgnoreCase("generic") is only for Android, not for regular Java.
// So, till solve IDLE state issue, always wait 5 seconds.
try {
Thread.sleep(5 * 1000);
} catch (Exception e) {
}
// put the replication offline
putReplicationOffline(pullReplication);
// at this point, we shouldn't have received all of the docs yet.
assertTrue(receivedAllDocs.getCount() > 0);
// return some more docs on _changes feed
MockChangesFeed mockChangesFeed2 = new MockChangesFeed();
mockChangesFeed2.add(new MockChangesFeed.MockChangedDoc(mockDoc2));
dispatcher.enqueueResponse(MockHelper.PATH_REGEX_CHANGES, mockChangesFeed2.generateMockResponse());
// put the replication online (should see the new docs)
putReplicationOnline(pullReplication);
// wait until we receive all the docs
success = receivedAllDocs.await(60, TimeUnit.SECONDS);
assertTrue(success);
// wait until we try to PUT a checkpoint request with doc2's sequence
waitForPutCheckpointRequestWithSeq(dispatcher, mockDoc2.getDocSeq());
// make sure all docs in local db
Map<String, Object> allDocs = database.getAllDocs(new QueryOptions());
Integer totalRows = (Integer) allDocs.get("total_rows");
List rows = (List) allDocs.get("rows");
assertEquals(numMockDocsToServe, totalRows.intValue());
assertEquals(numMockDocsToServe, rows.size());
// cleanup
stopReplication(pullReplication);
} finally {
server.shutdown();
}
}
private void putReplicationOffline(Replication replication) throws InterruptedException {
Log.d(Log.TAG, "putReplicationOffline: %s", replication);
// this was a useless test, the replication wasn't even started
final CountDownLatch wentOffline = new CountDownLatch(1);
Replication.ChangeListener changeListener = new ReplicationOfflineObserver(wentOffline);
replication.addChangeListener(changeListener);
replication.goOffline();
boolean succeeded = wentOffline.await(30, TimeUnit.SECONDS);
assertTrue(succeeded);
replication.removeChangeListener(changeListener);
Log.d(Log.TAG, "/putReplicationOffline: %s", replication);
}
private void putReplicationOnline(Replication replication) throws InterruptedException {
Log.d(Log.TAG, "putReplicationOnline: %s", replication);
// this was a useless test, the replication wasn't even started
final CountDownLatch wentOnline = new CountDownLatch(1);
Replication.ChangeListener changeListener = new ReplicationActiveObserver(wentOnline);
replication.addChangeListener(changeListener);
replication.goOnline();
boolean succeeded = wentOnline.await(30, TimeUnit.SECONDS);
assertTrue(succeeded);
replication.removeChangeListener(changeListener);
Log.d(Log.TAG, "/putReplicationOnline: %s", replication);
}
/**
* https://github.com/couchbase/couchbase-lite-java-core/issues/253
*/
public void testReplicationOnlineExtraneousChangeTrackers() throws Exception {
// create mockwebserver and custom dispatcher
MockDispatcher dispatcher = new MockDispatcher();
MockWebServer server = MockHelper.getMockWebServer(dispatcher);
dispatcher.setServerType(MockDispatcher.ServerType.COUCHDB);
try {
// add sticky checkpoint GET response w/ 404
MockCheckpointGet fakeCheckpointResponse = new MockCheckpointGet();
fakeCheckpointResponse.set404(true);
fakeCheckpointResponse.setSticky(true);
dispatcher.enqueueResponse(MockHelper.PATH_REGEX_CHECKPOINT, fakeCheckpointResponse);
// add sticky _changes response to feed=longpoll that just blocks for 60 seconds to emulate
// server that doesn't have any new changes
MockChangesFeedNoResponse mockChangesFeedNoResponse = new MockChangesFeedNoResponse();
mockChangesFeedNoResponse.setDelayMs(60 * 1000);
mockChangesFeedNoResponse.setSticky(true);
dispatcher.enqueueResponse(MockHelper.PATH_REGEX_CHANGES_LONGPOLL, mockChangesFeedNoResponse);
// add _changes response to feed=normal that returns empty _changes feed immediately
MockChangesFeed mockChangesFeed = new MockChangesFeed();
MockResponse mockResponse = mockChangesFeed.generateMockResponse();
for (int i = 0; i < 500; i++) { // TODO: use setSticky instead of workaround to add a ton of mock responses
dispatcher.enqueueResponse(MockHelper.PATH_REGEX_CHANGES_NORMAL, new WrappedSmartMockResponse(mockResponse));
}
// start mock server
server.play();
//create url for replication
URL baseUrl = server.getUrl("/db");
//create replication
final Replication pullReplication = database.createPullReplication(baseUrl);
pullReplication.setContinuous(true);
pullReplication.start();
// wait until we get a request to the _changes feed
RecordedRequest changesReq = dispatcher.takeRequestBlocking(MockHelper.PATH_REGEX_CHANGES_LONGPOLL);
assertNotNull(changesReq);
putReplicationOffline(pullReplication);
// at this point since we called takeRequest earlier, our recorded _changes request queue should be empty
assertNull(dispatcher.takeRequest(MockHelper.PATH_REGEX_CHANGES_LONGPOLL));
// put replication online 10 times
for (int i = 0; i < 10; i++) {
pullReplication.goOnline();
}
// sleep for a while to give things a chance to start
Log.d(TAG, "sleeping for 2 seconds");
Thread.sleep(2 * 1000);
Log.d(TAG, "done sleeping");
// how many _changes feed requests has the replicator made since going online?
int numChangesRequests = 0;
while ((changesReq = dispatcher.takeRequest(MockHelper.PATH_REGEX_CHANGES_LONGPOLL)) != null) {
Log.d(TAG, "changesReq: %s", changesReq);
numChangesRequests += 1;
}
// assert that there was only one _changes feed request
assertEquals(1, numChangesRequests);
// shutdown
stopReplication(pullReplication);
} finally {
server.shutdown();
}
}
/**
* Test goOffline() method in the context of a continuous pusher.
* <p/>
* - 1. Add a local document
* - 2. Kick off continuous push replication
* - 3. Wait for document to be pushed
* - 4. Call goOffline()
* - 6. Call goOnline()
* - 5. Add a 2nd local document
* - 7. Wait for 2nd document to be pushed
*
* @throws Exception
*/
public void testGoOfflinePusher() throws Exception {
int previous = RemoteRequestRetry.RETRY_DELAY_MS;
RemoteRequestRetry.RETRY_DELAY_MS = 5;
try {
// 1. Add a local document
Map<String, Object> properties = new HashMap<String, Object>();
properties.put("testGoOfflinePusher", "1");
Document doc1 = createDocumentWithProperties(database, properties);
// create mock server
MockWebServer server = new MockWebServer();
try {
MockDispatcher dispatcher = new MockDispatcher();
server.setDispatcher(dispatcher);
server.play();
// checkpoint PUT response (sticky)
MockCheckpointPut mockCheckpointPut = new MockCheckpointPut();
mockCheckpointPut.setSticky(true);
dispatcher.enqueueResponse(MockHelper.PATH_REGEX_CHECKPOINT, mockCheckpointPut);
// _revs_diff response -- everything missing
MockRevsDiff mockRevsDiff = new MockRevsDiff();
mockRevsDiff.setSticky(true);
dispatcher.enqueueResponse(MockHelper.PATH_REGEX_REVS_DIFF, mockRevsDiff);
// _bulk_docs response -- everything stored
MockBulkDocs mockBulkDocs = new MockBulkDocs();
mockBulkDocs.setSticky(true);
dispatcher.enqueueResponse(MockHelper.PATH_REGEX_BULK_DOCS, mockBulkDocs);
// 2. Kick off continuous push replication
Replication replicator = database.createPushReplication(server.getUrl("/db"));
replicator.setContinuous(true);
CountDownLatch replicationIdleSignal = new CountDownLatch(1);
ReplicationIdleObserver replicationIdleObserver = new ReplicationIdleObserver(replicationIdleSignal);
replicator.addChangeListener(replicationIdleObserver);
replicator.start();
// 3. Wait for document to be pushed
// wait until replication goes idle
boolean successful = replicationIdleSignal.await(30, TimeUnit.SECONDS);
assertTrue(successful);
// wait until mock server gets the checkpoint PUT request
boolean foundCheckpointPut = false;
String expectedLastSequence = "1";
while (!foundCheckpointPut) {
RecordedRequest request = dispatcher.takeRequestBlocking(MockHelper.PATH_REGEX_CHECKPOINT);
if (request.getMethod().equals("PUT")) {
foundCheckpointPut = true;
Assert.assertTrue(request.getUtf8Body().indexOf(expectedLastSequence) != -1);
// wait until mock server responds to the checkpoint PUT request
dispatcher.takeRecordedResponseBlocking(request);
}
}
// make some assertions about the outgoing _bulk_docs requests for first doc
RecordedRequest bulkDocsRequest1 = dispatcher.takeRequest(MockHelper.PATH_REGEX_BULK_DOCS);
assertNotNull(bulkDocsRequest1);
assertBulkDocJsonContainsDoc(bulkDocsRequest1, doc1);
// 4. Call goOffline()
putReplicationOffline(replicator);
// 5. Add a 2nd local document
properties = new HashMap<String, Object>();
properties.put("testGoOfflinePusher", "2");
Document doc2 = createDocumentWithProperties(database, properties);
// make sure if push replicator does not send request during offline.
try {
Thread.sleep(1000 * 3);
} catch (Exception ex) {
}
// make sure not receive _bulk_docs during offline.
RecordedRequest bulkDocsRequest = dispatcher.takeRequest(MockHelper.PATH_REGEX_BULK_DOCS);
assertNull(bulkDocsRequest);
// 6. Call goOnline()
putReplicationOnline(replicator);
// wait until mock server gets the 2nd checkpoint PUT request
foundCheckpointPut = false;
expectedLastSequence = "2";
while (!foundCheckpointPut) {
RecordedRequest request = dispatcher.takeRequestBlocking(MockHelper.PATH_REGEX_CHECKPOINT);
if (request.getMethod().equals("PUT")) {
foundCheckpointPut = true;
Assert.assertTrue(request.getUtf8Body().indexOf(expectedLastSequence) != -1);
// wait until mock server responds to the checkpoint PUT request
dispatcher.takeRecordedResponseBlocking(request);
}
}
// make some assertions about the outgoing _bulk_docs requests for second doc
RecordedRequest bulkDocsRequest2 = dispatcher.takeRequest(MockHelper.PATH_REGEX_BULK_DOCS);
assertNotNull(bulkDocsRequest2);
assertBulkDocJsonContainsDoc(bulkDocsRequest2, doc2);
// cleanup
stopReplication(replicator);
} finally {
server.shutdown();
}
} finally {
RemoteRequestRetry.RETRY_DELAY_MS = previous;
}
}
/**
* Verify that when a replication runs into an auth error, it stops
* and the lastError() method returns that error.
*/
public void testReplicatorErrorStatus() throws Exception {
// create mockwebserver and custom dispatcher
MockDispatcher dispatcher = new MockDispatcher();
MockWebServer server = MockHelper.getMockWebServer(dispatcher);
dispatcher.setServerType(MockDispatcher.ServerType.SYNC_GW);
try {
// fake _session response
MockSessionGet mockSessionGet = new MockSessionGet();
dispatcher.enqueueResponse(MockHelper.PATH_REGEX_SESSION, mockSessionGet.generateMockResponse());
// fake _facebook response
MockFacebookAuthPost mockFacebookAuthPost = new MockFacebookAuthPost();
dispatcher.enqueueResponse(MockHelper.PATH_REGEX_FACEBOOK_AUTH, mockFacebookAuthPost.generateMockResponse());
// start mock server
server.play();
// register bogus fb token
Authenticator facebookAuthenticator = AuthenticatorFactory.createFacebookAuthenticator("fake_access_token");
// run pull replication
Replication pullReplication = database.createPullReplication(server.getUrl("/db"));
pullReplication.setAuthenticator(facebookAuthenticator);
pullReplication.setContinuous(false);
runReplication(pullReplication);
// run replicator and make sure it has an error
assertNotNull(pullReplication.getLastError());
assertTrue(pullReplication.getLastError() instanceof HttpResponseException);
assertEquals(401 /* unauthorized */, ((HttpResponseException) pullReplication.getLastError()).getStatusCode());
// assert that the replicator sent the requests we expected it to send
RecordedRequest sessionReqeust = dispatcher.takeRequest(MockHelper.PATH_REGEX_SESSION);
assertNotNull(sessionReqeust);
RecordedRequest facebookRequest = dispatcher.takeRequest(MockHelper.PATH_REGEX_FACEBOOK_AUTH);
assertNotNull(facebookRequest);
dispatcher.verifyAllRecordedRequestsTaken();
} finally {
server.shutdown();
}
}
public void testGetReplicatorWithCustomHeader() throws Throwable {
// create mockwebserver and custom dispatcher
MockDispatcher dispatcher = new MockDispatcher();
MockWebServer server = MockHelper.getMockWebServer(dispatcher);
dispatcher.setServerType(MockDispatcher.ServerType.SYNC_GW);
try {
// checkpoint PUT or GET response (sticky)
MockCheckpointPut mockCheckpointPut = new MockCheckpointPut();
mockCheckpointPut.setSticky(true);
mockCheckpointPut.setDelayMs(500);
dispatcher.enqueueResponse(MockHelper.PATH_REGEX_CHECKPOINT, mockCheckpointPut);
server.play();
Map<String, Object> properties = new HashMap<String, Object>();
properties.put("source", DEFAULT_TEST_DB);
// target with custom headers (cookie)
Map<String, Object> headers = new HashMap<String, Object>();
String coolieVal = "SyncGatewaySession=c38687c2696688a";
headers.put("Cookie", coolieVal);
Map<String, Object> targetProperties = new HashMap<String, Object>();
targetProperties.put("url", server.getUrl("/db").toExternalForm());
targetProperties.put("headers", headers);
properties.put("target", targetProperties);
Replication replicator = manager.getReplicator(properties);
assertNotNull(replicator);
assertEquals(server.getUrl("/db").toExternalForm(), replicator.getRemoteUrl().toExternalForm());
assertTrue(!replicator.isPull());
assertFalse(replicator.isContinuous());
assertFalse(replicator.isRunning());
assertTrue(replicator.getHeaders().containsKey("Cookie"));
assertEquals(replicator.getHeaders().get("Cookie"), coolieVal);
// add replication observer
CountDownLatch replicationDoneSignal = new CountDownLatch(1);
ReplicationFinishedObserver replicationFinishedObserver = new ReplicationFinishedObserver(replicationDoneSignal);
replicator.addChangeListener(replicationFinishedObserver);
// start the replicator
Log.d(TAG, "Starting replicator " + replicator);
replicator.start();
final CountDownLatch replicationStarted = new CountDownLatch(1);
replicator.addChangeListener(new ReplicationActiveObserver(replicationStarted));
boolean success = replicationStarted.await(30, TimeUnit.SECONDS);
assertTrue(success);
// now lets lookup existing replicator and stop it
Log.d(TAG, "Looking up replicator");
properties.put("cancel", true);
Replication activeReplicator = manager.getReplicator(properties);
Log.d(TAG, "Found replicator " + activeReplicator + " and calling stop()");
activeReplicator.stop();
Log.d(TAG, "called stop(), waiting for it to finish");
// wait for replication to finish
boolean didNotTimeOut = replicationDoneSignal.await(180, TimeUnit.SECONDS);
Log.d(TAG, "replicationDoneSignal.await done, didNotTimeOut: " + didNotTimeOut);
assertTrue(didNotTimeOut);
assertFalse(activeReplicator.isRunning());
} finally {
server.shutdown();
}
}
public void testGetReplicator() throws Throwable {
// create mockwebserver and custom dispatcher
MockDispatcher dispatcher = new MockDispatcher();
MockWebServer server = MockHelper.getMockWebServer(dispatcher);
dispatcher.setServerType(MockDispatcher.ServerType.SYNC_GW);
try {
// checkpoint PUT or GET response (sticky)
MockCheckpointPut mockCheckpointPut = new MockCheckpointPut();
mockCheckpointPut.setSticky(true);
mockCheckpointPut.setDelayMs(500);
dispatcher.enqueueResponse(MockHelper.PATH_REGEX_CHECKPOINT, mockCheckpointPut);
server.play();
Map<String, Object> properties = new HashMap<String, Object>();
properties.put("source", DEFAULT_TEST_DB);
properties.put("target", server.getUrl("/db").toExternalForm());
Replication replicator = manager.getReplicator(properties);
assertNotNull(replicator);
assertEquals(server.getUrl("/db").toExternalForm(), replicator.getRemoteUrl().toExternalForm());
assertTrue(!replicator.isPull());
assertFalse(replicator.isContinuous());
assertFalse(replicator.isRunning());
// add replication observer
CountDownLatch replicationDoneSignal = new CountDownLatch(1);
ReplicationFinishedObserver replicationFinishedObserver = new ReplicationFinishedObserver(replicationDoneSignal);
replicator.addChangeListener(replicationFinishedObserver);
// start the replicator
Log.d(TAG, "Starting replicator " + replicator);
replicator.start();
final CountDownLatch replicationStarted = new CountDownLatch(1);
replicator.addChangeListener(new ReplicationActiveObserver(replicationStarted));
boolean success = replicationStarted.await(30, TimeUnit.SECONDS);
assertTrue(success);
// now lets lookup existing replicator and stop it
Log.d(TAG, "Looking up replicator");
properties.put("cancel", true);
Replication activeReplicator = manager.getReplicator(properties);
Log.d(TAG, "Found replicator " + activeReplicator + " and calling stop()");
activeReplicator.stop();
Log.d(TAG, "called stop(), waiting for it to finish");
// wait for replication to finish
boolean didNotTimeOut = replicationDoneSignal.await(180, TimeUnit.SECONDS);
Log.d(TAG, "replicationDoneSignal.await done, didNotTimeOut: " + didNotTimeOut);
assertTrue(didNotTimeOut);
assertFalse(activeReplicator.isRunning());
} finally {
server.shutdown();
}
}
public void testGetReplicatorWithAuth() throws Throwable {
Map<String, Object> authProperties = getReplicationAuthParsedJson();
Map<String, Object> targetProperties = new HashMap<String, Object>();
targetProperties.put("url", getReplicationURL().toExternalForm());
targetProperties.put("auth", authProperties);
Map<String, Object> properties = new HashMap<String, Object>();
properties.put("source", DEFAULT_TEST_DB);
properties.put("target", targetProperties);
Replication replicator = manager.getReplicator(properties);
assertNotNull(replicator);
assertNotNull(replicator.getAuthenticator());
assertTrue(replicator.getAuthenticator() instanceof FacebookAuthorizer);
}
/**
* When the server returns a 409 error to a PUT checkpoint response, make
* sure it does the right thing:
* - Pull latest remote checkpoint
* - Try to push checkpiont again (this time passing latest rev)
*
* @throws Exception
*/
public void testPutCheckpoint409Recovery() throws Exception {
// create mockwebserver and custom dispatcher
MockDispatcher dispatcher = new MockDispatcher();
MockWebServer server = MockHelper.getMockWebServer(dispatcher);
dispatcher.setServerType(MockDispatcher.ServerType.SYNC_GW);
try {
// mock documents to be pulled
MockDocumentGet.MockDocument mockDoc1 = new MockDocumentGet.MockDocument("doc1", "1-5e38", 1);
mockDoc1.setJsonMap(MockHelper.generateRandomJsonMap());
// checkpoint GET response w/ 404
MockResponse fakeCheckpointResponse = new MockResponse();
MockHelper.set404NotFoundJson(fakeCheckpointResponse);
dispatcher.enqueueResponse(MockHelper.PATH_REGEX_CHECKPOINT, fakeCheckpointResponse);
// _changes response
MockChangesFeed mockChangesFeed = new MockChangesFeed();
mockChangesFeed.add(new MockChangesFeed.MockChangedDoc(mockDoc1));
dispatcher.enqueueResponse(MockHelper.PATH_REGEX_CHANGES, mockChangesFeed.generateMockResponse());
// doc1 response
MockDocumentGet mockDocumentGet = new MockDocumentGet(mockDoc1);
dispatcher.enqueueResponse(mockDoc1.getDocPathRegex(), mockDocumentGet.generateMockResponse());
// respond with 409 error to mock checkpoint PUT
MockResponse checkpointResponse409 = new MockResponse();
checkpointResponse409.setStatus("HTTP/1.1 409 CONFLICT");
dispatcher.enqueueResponse(MockHelper.PATH_REGEX_CHECKPOINT, checkpointResponse409);
// the replicator should then try to do a checkpoint GET, and in this case
// it should return a value with a rev id
MockCheckpointGet mockCheckpointGet = new MockCheckpointGet();
mockCheckpointGet.setOk("true");
mockCheckpointGet.setRev("0-1");
mockCheckpointGet.setLastSequence("0");
dispatcher.enqueueResponse(MockHelper.PATH_REGEX_CHECKPOINT, mockCheckpointGet);
// the replicator should then try a checkpoint PUT again
// and we should respond with a 201
MockCheckpointPut mockCheckpointPut = new MockCheckpointPut();
dispatcher.enqueueResponse(MockHelper.PATH_REGEX_CHECKPOINT, mockCheckpointPut);
// start mock server
server.play();
// run pull replication
Replication pullReplication = database.createPullReplication(server.getUrl("/db"));
// I had to set this to continuous, because in a one-shot replication it tries to
// save the checkpoint asynchronously as the replicator is shutting down, which
// breaks the retry logic in the case a 409 conflict is returned by server.
pullReplication.setContinuous(true);
pullReplication.start();
// we should have gotten two requests to PATH_REGEX_CHECKPOINT:
// PUT -> 409 Conflict
// PUT -> 201 Created
for (int i = 1; i <= 2; i++) {
Log.v(TAG, "waiting for PUT checkpoint: %d", i);
waitForPutCheckpointRequestWithSeq(dispatcher, mockDoc1.getDocSeq());
Log.d(TAG, "got PUT checkpoint: %d", i);
}
stopReplication(pullReplication);
} finally {
server.shutdown();
}
}
/**
* Verify that Validation based Rejects revert the entire batch that the document is in
* even if one of the documents fail the validation.
* <p/>
* https://github.com/couchbase/couchbase-lite-java-core/issues/242
*
* @throws Exception
*/
public void testVerifyPullerInsertsDocsWithValidation() throws Exception {
// create mockwebserver and custom dispatcher
MockDispatcher dispatcher = new MockDispatcher();
MockWebServer server = MockHelper.getPreloadedPullTargetMockCouchDB(dispatcher, 2, 2);
try {
server.play();
// Setup validation to reject document with id: doc1
database.setValidation("validateOnlyDoc1", new Validator() {
@Override
public void validate(Revision newRevision, ValidationContext context) {
if ("doc1".equals(newRevision.getDocument().getId())) {
context.reject();
}
}
});
// run pull replication
Replication pullReplication = database.createPullReplication(server.getUrl("/db"));
runReplication(pullReplication);
assertNotNull(database);
// doc1 should not be in the store because of validation
assertNull(database.getExistingDocument("doc1"));
// doc0 should be in the store, but it wont be because of the bug.
assertNotNull(database.getExistingDocument("doc0"));
} finally {
server.shutdown();
}
}
/**
* Make sure calling puller.setChannels() causes the changetracker to send the correct
* request to the sync gateway.
* <p/>
* https://github.com/couchbase/couchbase-lite-java-core/issues/292
*/
public void testChannelsFilter() throws Exception {
// create mockwebserver and custom dispatcher
MockDispatcher dispatcher = new MockDispatcher();
MockWebServer server = MockHelper.getMockWebServer(dispatcher);
dispatcher.setServerType(MockDispatcher.ServerType.SYNC_GW);
try {
// checkpoint PUT or GET response (sticky)
MockCheckpointPut mockCheckpointPut = new MockCheckpointPut();
mockCheckpointPut.setSticky(true);
dispatcher.enqueueResponse(MockHelper.PATH_REGEX_CHECKPOINT, mockCheckpointPut);
// _changes response
MockChangesFeed mockChangesFeed = new MockChangesFeed();
dispatcher.enqueueResponse(MockHelper.PATH_REGEX_CHANGES, mockChangesFeed.generateMockResponse());
// start mock server
server.play();
// run pull replication
Replication pullReplication = database.createPullReplication(server.getUrl("/db"));
pullReplication.setChannels(Arrays.asList("foo", "bar"));
runReplication(pullReplication);
// make assertions about outgoing requests from replicator -> mock
RecordedRequest getChangesFeedRequest = dispatcher.takeRequest(MockHelper.PATH_REGEX_CHANGES);
assertTrue(getChangesFeedRequest.getMethod().equals("POST"));
String body = getChangesFeedRequest.getUtf8Body();
Map<String, Object> jsonMap = Manager.getObjectMapper().readValue(body, Map.class);
assertTrue(jsonMap.containsKey("filter"));
String filter = (String) jsonMap.get("filter");
assertEquals("sync_gateway/bychannel", filter);
assertTrue(jsonMap.containsKey("channels"));
String channels = (String) jsonMap.get("channels");
assertTrue(channels.contains("foo"));
assertTrue(channels.contains("bar"));
} finally {
server.shutdown();
}
}
/**
* - Start continuous pull
* - Mockwebserver responds that there are no changes
* - Assert that puller goes into IDLE state
* <p/>
* https://github.com/couchbase/couchbase-lite-android/issues/445
*/
public void testContinuousPullEntersIdleState() throws Exception {
// create mockwebserver and custom dispatcher
MockDispatcher dispatcher = new MockDispatcher();
MockWebServer server = MockHelper.getMockWebServer(dispatcher);
dispatcher.setServerType(MockDispatcher.ServerType.SYNC_GW);
try {
// checkpoint GET response w/ 404
MockResponse fakeCheckpointResponse = new MockResponse();
MockHelper.set404NotFoundJson(fakeCheckpointResponse);
dispatcher.enqueueResponse(MockHelper.PATH_REGEX_CHECKPOINT, fakeCheckpointResponse);
// add non-sticky changes response that returns no changes
MockChangesFeed mockChangesFeed = new MockChangesFeed();
dispatcher.enqueueResponse(MockHelper.PATH_REGEX_CHANGES, mockChangesFeed.generateMockResponse());
// add sticky _changes response that just blocks for 60 seconds to emulate
// server that doesn't have any new changes
MockChangesFeedNoResponse mockChangesFeedNoResponse = new MockChangesFeedNoResponse();
mockChangesFeedNoResponse.setDelayMs(60 * 1000);
mockChangesFeedNoResponse.setSticky(true);
dispatcher.enqueueResponse(MockHelper.PATH_REGEX_CHANGES, mockChangesFeedNoResponse);
server.play();
// create pull replication
Replication pullReplication = database.createPullReplication(server.getUrl("/db"));
pullReplication.setContinuous(true);
final CountDownLatch enteredIdleState = new CountDownLatch(1);
pullReplication.addChangeListener(new Replication.ChangeListener() {
@Override
public void changed(Replication.ChangeEvent event) {
if (event.getSource().getStatus() == Replication.ReplicationStatus.REPLICATION_IDLE) {
enteredIdleState.countDown();
}
}
});
// start pull replication
pullReplication.start();
boolean success = enteredIdleState.await(30, TimeUnit.SECONDS);
assertTrue(success);
Log.d(TAG, "Got IDLE event, stopping replication");
stopReplication(pullReplication);
} finally {
server.shutdown();
}
}
/**
* Spotted in https://github.com/couchbase/couchbase-lite-java-core/issues/313
* But there is another ticket that is linked off 313
*/
public void failingTestMockPullBulkDocsSyncGw() throws Exception {
mockPullBulkDocs(MockDispatcher.ServerType.SYNC_GW);
}
public void mockPullBulkDocs(MockDispatcher.ServerType serverType) throws Exception {
// set INBOX_CAPACITY to a smaller value so that processing times don't skew the test
int defaultCapacity = ReplicationInternal.INBOX_CAPACITY;
ReplicationInternal.INBOX_CAPACITY = 10;
int defaultDelay = ReplicationInternal.PROCESSOR_DELAY;
ReplicationInternal.PROCESSOR_DELAY = ReplicationInternal.PROCESSOR_DELAY * 10;
// serve 25 mock docs
int numMockDocsToServe = (ReplicationInternal.INBOX_CAPACITY * 2) + (ReplicationInternal.INBOX_CAPACITY / 2);
// create mockwebserver and custom dispatcher
MockDispatcher dispatcher = new MockDispatcher();
MockWebServer server = MockHelper.getMockWebServer(dispatcher);
dispatcher.setServerType(serverType);
try {
// mock documents to be pulled
List<MockDocumentGet.MockDocument> mockDocs = MockHelper.getMockDocuments(numMockDocsToServe);
// respond to all GET (responds with 404) and PUT Checkpoint requests
MockCheckpointPut mockCheckpointPut = new MockCheckpointPut();
mockCheckpointPut.setSticky(true);
dispatcher.enqueueResponse(MockHelper.PATH_REGEX_CHECKPOINT, mockCheckpointPut);
// _changes response
MockChangesFeed mockChangesFeed = new MockChangesFeed();
for (MockDocumentGet.MockDocument mockDocument : mockDocs) {
mockChangesFeed.add(new MockChangesFeed.MockChangedDoc(mockDocument));
}
dispatcher.enqueueResponse(MockHelper.PATH_REGEX_CHANGES, mockChangesFeed.generateMockResponse());
// individual doc responses (expecting it to call _bulk_docs, but just in case)
for (MockDocumentGet.MockDocument mockDocument : mockDocs) {
MockDocumentGet mockDocumentGet = new MockDocumentGet(mockDocument);
dispatcher.enqueueResponse(mockDocument.getDocPathRegex(), mockDocumentGet.generateMockResponse());
}
// _bulk_get response
MockDocumentBulkGet mockBulkGet = new MockDocumentBulkGet();
for (MockDocumentGet.MockDocument mockDocument : mockDocs) {
mockBulkGet.addDocument(mockDocument);
}
mockBulkGet.setSticky(true);
dispatcher.enqueueResponse(MockHelper.PATH_REGEX_BULK_GET, mockBulkGet);
// start mock server
server.play();
// run pull replication
Replication pullReplication = database.createPullReplication(server.getUrl("/db"));
runReplication(pullReplication, 3 * 60);
assertTrue(pullReplication.getLastError() == null);
// wait until it pushes checkpoint of last doc
MockDocumentGet.MockDocument lastDoc = mockDocs.get(mockDocs.size() - 1);
waitForPutCheckpointRequestWithSequence(dispatcher, lastDoc.getDocSeq());
// dump out the outgoing requests for bulk docs
BlockingQueue<RecordedRequest> bulkGetRequests = dispatcher.getRequestQueueSnapshot(MockHelper.PATH_REGEX_BULK_GET);
Iterator<RecordedRequest> iterator = bulkGetRequests.iterator();
boolean first = true;
while (iterator.hasNext()) {
RecordedRequest request = iterator.next();
byte[] body = MockHelper.getUncompressedBody(request);
Map<String, Object> jsonMap = MockHelper.getJsonMapFromRequest(body);
List docs = (List) jsonMap.get("docs");
Log.w(TAG, "bulk get request: %s had %d docs", request, docs.size());
// except first one and last one, docs.size() should be (neary) equal with INBOX_CAPACTITY.
if (iterator.hasNext() && !first) {
// the bulk docs requests except for the last one should have max number of docs
// relax this a bit, so that it at least has to have greater than or equal to half max number of docs
assertTrue(docs.size() >= (ReplicationInternal.INBOX_CAPACITY / 2));
if (docs.size() != ReplicationInternal.INBOX_CAPACITY) {
Log.w(TAG, "docs.size() %d != ReplicationInternal.INBOX_CAPACITY %d", docs.size(), ReplicationInternal.INBOX_CAPACITY);
}
}
first = false;
}
} finally {
ReplicationInternal.INBOX_CAPACITY = defaultCapacity;
ReplicationInternal.PROCESSOR_DELAY = defaultDelay;
server.shutdown();
}
}
/**
* Make sure that after trying /db/_session, it should try /_session.
* <p/>
* Currently there is a bug where it tries /db/_session, and then
* tries /db_session.
* <p/>
* https://github.com/couchbase/couchbase-lite-java-core/issues/208
*/
public void testCheckSessionAtPath() throws Exception {
// create mockwebserver and custom dispatcher
MockDispatcher dispatcher = new MockDispatcher();
MockWebServer server = MockHelper.getMockWebServer(dispatcher);
dispatcher.setServerType(MockDispatcher.ServerType.COUCHDB);
try {
// session GET response w/ 404 to /db/_session
MockResponse fakeSessionResponse = new MockResponse();
MockHelper.set404NotFoundJson(fakeSessionResponse);
WrappedSmartMockResponse wrappedSmartMockResponse = new WrappedSmartMockResponse(fakeSessionResponse);
wrappedSmartMockResponse.setSticky(true);
dispatcher.enqueueResponse(MockHelper.PATH_REGEX_SESSION, wrappedSmartMockResponse);
// session GET response w/ 200 OK to /_session
MockResponse fakeSessionResponse2 = new MockResponse();
Map<String, Object> responseJson = new HashMap<String, Object>();
Map<String, Object> userCtx = new HashMap<String, Object>();
userCtx.put("name", "foo");
responseJson.put("userCtx", userCtx);
fakeSessionResponse2.setBody(Manager.getObjectMapper().writeValueAsBytes(responseJson));
MockHelper.set200OKJson(fakeSessionResponse2);
dispatcher.enqueueResponse(MockHelper.PATH_REGEX_SESSION_COUCHDB, fakeSessionResponse2);
// respond to all GET/PUT Checkpoint requests
MockCheckpointPut mockCheckpointPut = new MockCheckpointPut();
mockCheckpointPut.setSticky(true);
dispatcher.enqueueResponse(MockHelper.PATH_REGEX_CHECKPOINT, mockCheckpointPut);
// start mock server
server.play();
// run pull replication
Replication pullReplication = database.createPullReplication(server.getUrl("/db"));
pullReplication.setAuthenticator(new FacebookAuthorizer("[email protected]"));
CountDownLatch replicationDoneSignal = new CountDownLatch(1);
ReplicationFinishedObserver replicationFinishedObserver = new ReplicationFinishedObserver(replicationDoneSignal);
pullReplication.addChangeListener(replicationFinishedObserver);
pullReplication.start();
// it should first try /db/_session
dispatcher.takeRequestBlocking(MockHelper.PATH_REGEX_SESSION);
// and then it should fallback to /_session
dispatcher.takeRequestBlocking(MockHelper.PATH_REGEX_SESSION_COUCHDB);
boolean success = replicationDoneSignal.await(30, TimeUnit.SECONDS);
Assert.assertTrue(success);
} finally {
server.shutdown();
}
}
/**
* - Start one shot replication
* - Changes feed request returns error
* - Change tracker stops
* - Replication stops -- make sure ChangeListener gets error
* <p/>
* https://github.com/couchbase/couchbase-lite-java-core/issues/334
*/
public void testChangeTrackerError() throws Exception {
// create mockwebserver and custom dispatcher
MockDispatcher dispatcher = new MockDispatcher();
MockWebServer server = MockHelper.getMockWebServer(dispatcher);
dispatcher.setServerType(MockDispatcher.ServerType.SYNC_GW);
try {
// checkpoint GET response w/ 404 + respond to all PUT Checkpoint requests
MockCheckpointPut mockCheckpointPut = new MockCheckpointPut();
mockCheckpointPut.setSticky(true);
dispatcher.enqueueResponse(MockHelper.PATH_REGEX_CHECKPOINT, mockCheckpointPut);
// 404 response to _changes feed (sticky)
MockResponse mockChangesFeed = new MockResponse();
MockHelper.set404NotFoundJson(mockChangesFeed);
WrappedSmartMockResponse wrapped = new WrappedSmartMockResponse(mockChangesFeed);
wrapped.setSticky(true);
dispatcher.enqueueResponse(MockHelper.PATH_REGEX_CHANGES, wrapped);
// start mock server
server.play();
// run pull replication
Replication pullReplication = database.createPullReplication(server.getUrl("/db"));
final CountDownLatch changeEventError = new CountDownLatch(1);
pullReplication.addChangeListener(new Replication.ChangeListener() {
@Override
public void changed(Replication.ChangeEvent event) {
if (event.getError() != null) {
changeEventError.countDown();
}
}
});
runReplication(pullReplication);
Assert.assertTrue(pullReplication.getLastError() != null);
boolean success = changeEventError.await(5, TimeUnit.SECONDS);
Assert.assertTrue(success);
} finally {
server.shutdown();
}
}
/**
* https://github.com/couchbase/couchbase-lite-java-core/issues/358
*
* @related: https://github.com/couchbase/couchbase-lite-java-core/issues/55
* related: testContinuousPushReplicationGoesIdle()
* <p/>
* test steps:
* - start replicator
* - make sure replicator becomes idle state
* - add N docs
* - when callback state == idle
* - assert that mock has received N docs
*/
public void testContinuousPushReplicationGoesIdleTwice() throws Exception {
// /_local/*
// /_revs_diff
// /_bulk_docs
// /_local/*
final int EXPECTED_REQUEST_COUNT = 4;
// make sure we are starting empty
assertEquals(0, database.getLastSequenceNumber());
// 1. Setup MockWebServer
// create mockwebserver and custom dispatcher
MockDispatcher dispatcher = new MockDispatcher();
final MockWebServer server = MockHelper.getMockWebServer(dispatcher);
dispatcher.setServerType(MockDispatcher.ServerType.SYNC_GW);
try {
// checkpoint GET response w/ 404. also receives checkpoint PUT's
MockCheckpointPut mockCheckpointPut = new MockCheckpointPut();
mockCheckpointPut.setSticky(true);
mockCheckpointPut.setDelayMs(500);
dispatcher.enqueueResponse(MockHelper.PATH_REGEX_CHECKPOINT, mockCheckpointPut);
// _revs_diff response -- everything missing
MockRevsDiff mockRevsDiff = new MockRevsDiff();
dispatcher.enqueueResponse(MockHelper.PATH_REGEX_REVS_DIFF, mockRevsDiff);
// _bulk_docs response -- everything stored
MockBulkDocs mockBulkDocs = new MockBulkDocs();
dispatcher.enqueueResponse(MockHelper.PATH_REGEX_BULK_DOCS, mockBulkDocs);
server.play();
// 2. Create replication
Replication replication = database.createPushReplication(server.getUrl("/db"));
replication.setContinuous(true);
CountDownLatch replicationIdle = new CountDownLatch(1);
ReplicationIdleObserver idleObserver = new ReplicationIdleObserver(replicationIdle);
replication.addChangeListener(idleObserver);
replication.start();
// 3. Wait until idle (make sure replicator becomes IDLE state)
boolean success = replicationIdle.await(30, TimeUnit.SECONDS);
assertTrue(success);
replication.removeChangeListener(idleObserver);
// 4. make sure if /_local was called by replicator after start and before idle
RecordedRequest request1 = dispatcher.takeRequestBlocking(MockHelper.PATH_REGEX_CHECKPOINT);
assertNotNull(request1);
dispatcher.takeRecordedResponseBlocking(request1);
assertEquals(1, server.getRequestCount());
// 5. Add replication change listener for transition to IDLE
class ReplicationTransitionToIdleObserver implements Replication.ChangeListener {
private CountDownLatch doneSignal;
private CountDownLatch checkSignal;
public ReplicationTransitionToIdleObserver(CountDownLatch doneSignal, CountDownLatch checkSignal) {
this.doneSignal = doneSignal;
this.checkSignal = checkSignal;
}
public void changed(Replication.ChangeEvent event) {
Log.w(Log.TAG_SYNC, "[ChangeListener.changed()] event => " + event.toString());
if (event.getTransition() != null) {
if (event.getTransition().getSource() != event.getTransition().getDestination() &&
event.getTransition().getDestination() == ReplicationState.IDLE) {
Log.w(Log.TAG_SYNC, "[ChangeListener.changed()] Transition to IDLE");
Log.w(Log.TAG_SYNC, "[ChangeListener.changed()] Request Count => " + server.getRequestCount());
this.doneSignal.countDown();
// When replicator becomes IDLE state, check if all requests are completed
// assertEquals in inner class does not work....
// Note: sometimes server.getRequestCount() returns expected number - 1.
// Is it timing issue?
if (EXPECTED_REQUEST_COUNT == server.getRequestCount() ||
EXPECTED_REQUEST_COUNT - 1 == server.getRequestCount()) {
this.checkSignal.countDown();
}
}
}
}
}
CountDownLatch checkStateToIdle = new CountDownLatch(1);
CountDownLatch checkRequestCount = new CountDownLatch(1);
ReplicationTransitionToIdleObserver replicationTransitionToIdleObserver =
new ReplicationTransitionToIdleObserver(checkStateToIdle, checkRequestCount);
replication.addChangeListener(replicationTransitionToIdleObserver);
Log.w(Log.TAG_SYNC, "Added listener for transition to IDLE");
// 6. Add doc(s)
for (int i = 1; i <= 1; i++) {
Map<String, Object> properties1 = new HashMap<String, Object>();
properties1.put("doc" + String.valueOf(i), "testContinuousPushReplicationGoesIdleTooSoon " + String.valueOf(i));
final Document doc = createDocWithProperties(properties1);
}
// 7. Wait until idle (make sure replicator becomes IDLE state from other state)
// NOTE: 12/17/2014 - current code fails here because after adding listener, state never changed from IDLE
// By implementing stateMachine for Replication completely, address this failure.
success = checkStateToIdle.await(20, TimeUnit.SECONDS); // check if state becomes IDLE from other state
assertTrue(success);
success = checkRequestCount.await(20, TimeUnit.SECONDS); // check if request count is 4 when state becomes IDLE
assertTrue(success);
// 8. Make sure some of requests are called
// _bulk_docs
RecordedRequest request3 = dispatcher.takeRequestBlocking(MockHelper.PATH_REGEX_BULK_DOCS);
assertNotNull(request3);
dispatcher.takeRecordedResponseBlocking(request3);
// double check total request
Log.w(Log.TAG_SYNC, "Total Requested Count before stop replicator => " + server.getRequestCount());
assertTrue(EXPECTED_REQUEST_COUNT == server.getRequestCount() ||
EXPECTED_REQUEST_COUNT - 1 == server.getRequestCount());
// 9. Stop replicator
replication.removeChangeListener(replicationTransitionToIdleObserver);
stopReplication(replication);
} finally {
server.shutdown();
}
}
/**
* https://github.com/couchbase/couchbase-lite-java-core/issues/358
* <p/>
* related: testContinuousPushReplicationGoesIdleTooSoon()
* testContinuousPushReplicationGoesIdle()
* <p/>
* test steps:
* - add N docs
* - start replicator
* - when callback state == idle
* - assert that mock has received N docs
*/
public void failingTestContinuousPushReplicationGoesIdleTooSoon() throws Exception {
// smaller batch size so there are multiple requests to _bulk_docs
int previous = ReplicationInternal.INBOX_CAPACITY;
ReplicationInternal.INBOX_CAPACITY = 5;
int numDocs = ReplicationInternal.INBOX_CAPACITY * 5;
// make sure we are starting empty
assertEquals(0, database.getLastSequenceNumber());
// Add doc(s)
// NOTE: more documents causes more HTTP calls. It could be more than 4 times...
for (int i = 1; i <= numDocs; i++) {
Map<String, Object> properties = new HashMap<String, Object>();
properties.put("doc" + String.valueOf(i), "testContinuousPushReplicationGoesIdleTooSoon " + String.valueOf(i));
final Document doc = createDocWithProperties(properties);
}
// Setup MockWebServer
// create mockwebserver and custom dispatcher
MockDispatcher dispatcher = new MockDispatcher();
final MockWebServer server = MockHelper.getMockWebServer(dispatcher);
dispatcher.setServerType(MockDispatcher.ServerType.SYNC_GW);
try {
// checkpoint GET response w/ 404. also receives checkpoint PUT's
MockCheckpointPut mockCheckpointPut = new MockCheckpointPut();
mockCheckpointPut.setSticky(true);
mockCheckpointPut.setDelayMs(500);
dispatcher.enqueueResponse(MockHelper.PATH_REGEX_CHECKPOINT, mockCheckpointPut);
// _revs_diff response -- everything missing
MockRevsDiff mockRevsDiff = new MockRevsDiff();
mockRevsDiff.setSticky(true);
dispatcher.enqueueResponse(MockHelper.PATH_REGEX_REVS_DIFF, mockRevsDiff);
// _bulk_docs response -- everything stored
MockBulkDocs mockBulkDocs = new MockBulkDocs();
mockBulkDocs.setSticky(true);
dispatcher.enqueueResponse(MockHelper.PATH_REGEX_BULK_DOCS, mockBulkDocs);
server.play();
// Create replicator
Replication replication = database.createPushReplication(server.getUrl("/db"));
replication.setContinuous(true);
// special change listener for this test case.
class ReplicationTransitionToIdleObserver implements Replication.ChangeListener {
private CountDownLatch enterIdleStateSignal;
public ReplicationTransitionToIdleObserver(CountDownLatch enterIdleStateSignal) {
this.enterIdleStateSignal = enterIdleStateSignal;
}
public void changed(Replication.ChangeEvent event) {
Log.w(Log.TAG_SYNC, "[ChangeListener.changed()] event => " + event.toString());
if (event.getTransition() != null) {
if (event.getTransition().getSource() != event.getTransition().getDestination() &&
event.getTransition().getDestination() == ReplicationState.IDLE) {
Log.w(Log.TAG_SYNC, "[ChangeListener.changed()] Transition to IDLE");
Log.w(Log.TAG_SYNC, "[ChangeListener.changed()] Request Count => " + server.getRequestCount());
this.enterIdleStateSignal.countDown();
}
}
}
}
CountDownLatch enterIdleStateSignal = new CountDownLatch(1);
ReplicationTransitionToIdleObserver replicationTransitionToIdleObserver = new ReplicationTransitionToIdleObserver(enterIdleStateSignal);
replication.addChangeListener(replicationTransitionToIdleObserver);
replication.start();
// Wait until idle (make sure replicator becomes IDLE state from other state)
boolean success = enterIdleStateSignal.await(20, TimeUnit.SECONDS);
assertTrue(success);
// Once the replicator is idle get a snapshot of all the requests its made to _bulk_docs endpoint
int numDocsPushed = 0;
BlockingQueue<RecordedRequest> requests = dispatcher.getRequestQueueSnapshot(MockHelper.PATH_REGEX_BULK_DOCS);
for (RecordedRequest request : requests) {
Log.i(Log.TAG_SYNC, "request: %s", request);
byte[] body = MockHelper.getUncompressedBody(request);
Map<String, Object> jsonMap = MockHelper.getJsonMapFromRequest(body);
List docs = (List) jsonMap.get("docs");
numDocsPushed += docs.size();
}
// WORKAROUND: CBL Java Unit Test on Jenkins rarely fails following.
// TODO: Need to fix: https://github.com/couchbase/couchbase-lite-java-core/issues/446
// It seems threading issue exists, and replicator becomes IDLE even tasks in batcher.
if (System.getProperty("java.vm.name").equalsIgnoreCase("Dalvik")) {
// Assert that all docs have already been pushed by the time it goes IDLE
assertEquals(numDocs, numDocsPushed);
}
// Stop replicator and MockWebServer
stopReplication(replication);
// wait until checkpoint is pushed, since it can happen _after_ replication is finished.
// if this isn't done, there can be IOExceptions when calling server.shutdown()
waitForPutCheckpointRequestWithSeq(dispatcher, (int) database.getLastSequenceNumber());
} finally {
server.shutdown();
ReplicationInternal.INBOX_CAPACITY = previous;
}
}
/**
* https://github.com/couchbase/couchbase-lite-java-core/issues/352
* <p/>
* When retrying a replication, make sure to get session & checkpoint.
*/
public void testCheckSessionAndCheckpointWhenRetryingReplication() throws Exception {
int prev_RETRY_DELAY_MS = RemoteRequestRetry.RETRY_DELAY_MS;
int prev_RETRY_DELAY_SECONDS = ReplicationInternal.RETRY_DELAY_SECONDS;
int prev_MAX_RETRIES = ReplicationInternal.MAX_RETRIES;
try {
RemoteRequestRetry.RETRY_DELAY_MS = 5; // speed up test execution (inner loop retry delay)
ReplicationInternal.RETRY_DELAY_SECONDS = 1; // speed up test execution (outer loop retry delay)
ReplicationInternal.MAX_RETRIES = 3; // speed up test execution (outer loop retry count)
String fakeEmail = "[email protected]";
// create mockwebserver and custom dispatcher
MockDispatcher dispatcher = new MockDispatcher();
MockWebServer server = MockHelper.getMockWebServer(dispatcher);
dispatcher.setServerType(MockDispatcher.ServerType.SYNC_GW);
try {
// set up request
{
// response for /db/_session
MockSessionGet mockSessionGet = new MockSessionGet();
dispatcher.enqueueResponse(MockHelper.PATH_REGEX_SESSION, mockSessionGet.generateMockResponse());
// response for /db/_facebook
MockFacebookAuthPost mockFacebookAuthPost = new MockFacebookAuthPost();
dispatcher.enqueueResponse(MockHelper.PATH_REGEX_FACEBOOK_AUTH, mockFacebookAuthPost.generateMockResponseForSuccess(fakeEmail));
// response for /db/_local/.*
MockCheckpointPut mockCheckpointPut = new MockCheckpointPut();
mockCheckpointPut.setSticky(true);
mockCheckpointPut.setDelayMs(500);
dispatcher.enqueueResponse(MockHelper.PATH_REGEX_CHECKPOINT, mockCheckpointPut);
// response for /db/_revs_diff
MockRevsDiff mockRevsDiff = new MockRevsDiff();
mockRevsDiff.setSticky(true);
dispatcher.enqueueResponse(MockHelper.PATH_REGEX_REVS_DIFF, mockRevsDiff);
// response for /db/_bulk_docs -- 503 errors
MockResponse mockResponse = new MockResponse().setResponseCode(503);
WrappedSmartMockResponse mockBulkDocs = new WrappedSmartMockResponse(mockResponse, false);
mockBulkDocs.setSticky(true);
dispatcher.enqueueResponse(MockHelper.PATH_REGEX_BULK_DOCS, mockBulkDocs);
}
server.play();
// register bogus fb token
Authenticator facebookAuthenticator = AuthenticatorFactory.createFacebookAuthenticator("fake_access_token");
// create replication
Replication replication = database.createPushReplication(server.getUrl("/db"));
replication.setAuthenticator(facebookAuthenticator);
replication.setContinuous(true);
CountDownLatch replicationIdle = new CountDownLatch(1);
ReplicationIdleObserver idleObserver = new ReplicationIdleObserver(replicationIdle);
replication.addChangeListener(idleObserver);
replication.start();
// wait until idle
boolean success = replicationIdle.await(30, TimeUnit.SECONDS);
assertTrue(success);
replication.removeChangeListener(idleObserver);
// create a doc in local db
Document doc1 = createDocumentForPushReplication("doc1", null, null);
// initial request
{
// check /db/_session
RecordedRequest sessionRequest = dispatcher.takeRequestBlocking(MockHelper.PATH_REGEX_SESSION);
assertNotNull(sessionRequest);
dispatcher.takeRecordedResponseBlocking(sessionRequest);
// check /db/_facebook
RecordedRequest facebookSessionRequest = dispatcher.takeRequestBlocking(MockHelper.PATH_REGEX_FACEBOOK_AUTH);
assertNotNull(facebookSessionRequest);
dispatcher.takeRecordedResponseBlocking(facebookSessionRequest);
// check /db/_local/.*
RecordedRequest checkPointRequest = dispatcher.takeRequestBlocking(MockHelper.PATH_REGEX_CHECKPOINT);
assertNotNull(checkPointRequest);
dispatcher.takeRecordedResponseBlocking(checkPointRequest);
// check /db/_revs_diff
RecordedRequest revsDiffRequest = dispatcher.takeRequestBlocking(MockHelper.PATH_REGEX_REVS_DIFF);
assertNotNull(revsDiffRequest);
dispatcher.takeRecordedResponseBlocking(revsDiffRequest);
// we should expect to at least see numAttempts attempts at doing POST to _bulk_docs
// 1st attempt
// numAttempts are number of times retry in 1 attempt.
int numAttempts = RemoteRequestRetry.MAX_RETRIES + 1; // total number of attempts = 4 (1 initial + MAX_RETRIES)
for (int i = 0; i < numAttempts; i++) {
RecordedRequest request = dispatcher.takeRequestBlocking(MockHelper.PATH_REGEX_BULK_DOCS);
assertNotNull(request);
dispatcher.takeRecordedResponseBlocking(request);
}
}
// To test following, requires to fix #299 (improve retry behavior)
// Retry requests
// outer retry loop
for (int j = 0; j < ReplicationInternal.MAX_RETRIES; j++) {
// MockSessionGet does not support isSticky
MockSessionGet mockSessionGet = new MockSessionGet();
dispatcher.enqueueResponse(MockHelper.PATH_REGEX_SESSION, mockSessionGet.generateMockResponse());
// MockFacebookAuthPost does not support isSticky
MockFacebookAuthPost mockFacebookAuthPost = new MockFacebookAuthPost();
dispatcher.enqueueResponse(MockHelper.PATH_REGEX_FACEBOOK_AUTH, mockFacebookAuthPost.generateMockResponseForSuccess(fakeEmail));
// *** Retry must include session & check point ***
// check /db/_session
RecordedRequest sessionRequest = dispatcher.takeRequestBlocking(MockHelper.PATH_REGEX_SESSION);
assertNotNull(sessionRequest);
dispatcher.takeRecordedResponseBlocking(sessionRequest);
// check /db/_facebook
RecordedRequest facebookSessionRequest = dispatcher.takeRequestBlocking(MockHelper.PATH_REGEX_FACEBOOK_AUTH);
assertNotNull(facebookSessionRequest);
dispatcher.takeRecordedResponseBlocking(facebookSessionRequest);
// check /db/_local/.*
RecordedRequest checkPointRequest = dispatcher.takeRequestBlocking(MockHelper.PATH_REGEX_CHECKPOINT);
assertNotNull(checkPointRequest);
dispatcher.takeRecordedResponseBlocking(checkPointRequest);
// check /db/_revs_diff
RecordedRequest revsDiffRequest = dispatcher.takeRequestBlocking(MockHelper.PATH_REGEX_REVS_DIFF);
assertNotNull(revsDiffRequest);
dispatcher.takeRecordedResponseBlocking(revsDiffRequest);
// we should expect to at least see numAttempts attempts at doing POST to _bulk_docs
// 1st attempt
// numAttempts are number of times retry in 1 attempt.
int numAttempts = RemoteRequestRetry.MAX_RETRIES + 1; // total number of attempts = 4 (1 initial + MAX_RETRIES)
for (int i = 0; i < numAttempts; i++) {
RecordedRequest request = dispatcher.takeRequestBlocking(MockHelper.PATH_REGEX_BULK_DOCS);
assertNotNull(request);
dispatcher.takeRecordedResponseBlocking(request);
}
}
stopReplication(replication);
} finally {
server.shutdown();
}
} finally {
RemoteRequestRetry.RETRY_DELAY_MS = prev_RETRY_DELAY_MS;
ReplicationInternal.RETRY_DELAY_SECONDS = prev_RETRY_DELAY_SECONDS;
ReplicationInternal.MAX_RETRIES = prev_MAX_RETRIES;
}
}
/**
* https://github.com/couchbase/couchbase-lite-java-core/issues/352
* <p/>
* Makes the replicator stop, even if it is continuous, when it receives a permanent-type error
*/
public void failingTestStopReplicatorWhenRetryingReplicationWithPermanentError() throws Exception {
RemoteRequestRetry.RETRY_DELAY_MS = 5; // speed up test execution (inner loop retry delay)
ReplicationInternal.RETRY_DELAY_SECONDS = 1; // speed up test execution (outer loop retry delay)
ReplicationInternal.MAX_RETRIES = 3; // speed up test execution (outer loop retry count)
// create mockwebserver and custom dispatcher
MockDispatcher dispatcher = new MockDispatcher();
MockWebServer server = MockHelper.getMockWebServer(dispatcher);
dispatcher.setServerType(MockDispatcher.ServerType.SYNC_GW);
// set up request
{
// response for /db/_local/.*
MockCheckpointPut mockCheckpointPut = new MockCheckpointPut();
mockCheckpointPut.setSticky(true);
mockCheckpointPut.setDelayMs(500);
dispatcher.enqueueResponse(MockHelper.PATH_REGEX_CHECKPOINT, mockCheckpointPut);
// response for /db/_revs_diff
MockRevsDiff mockRevsDiff = new MockRevsDiff();
mockRevsDiff.setSticky(true);
dispatcher.enqueueResponse(MockHelper.PATH_REGEX_REVS_DIFF, mockRevsDiff);
// response for /db/_bulk_docs -- 400 Bad Request (not transient error)
MockResponse mockResponse = new MockResponse().setResponseCode(400);
WrappedSmartMockResponse mockBulkDocs = new WrappedSmartMockResponse(mockResponse, false);
mockBulkDocs.setSticky(true);
dispatcher.enqueueResponse(MockHelper.PATH_REGEX_BULK_DOCS, mockBulkDocs);
}
server.play();
// create replication
Replication replication = database.createPushReplication(server.getUrl("/db"));
replication.setContinuous(true);
// add replication observer for IDLE state
CountDownLatch replicationIdle = new CountDownLatch(1);
ReplicationIdleObserver idleObserver = new ReplicationIdleObserver(replicationIdle);
replication.addChangeListener(idleObserver);
// add replication observer for finished
CountDownLatch replicationDoneSignal = new CountDownLatch(1);
ReplicationFinishedObserver replicationFinishedObserver = new ReplicationFinishedObserver(replicationDoneSignal);
replication.addChangeListener(replicationFinishedObserver);
replication.start();
// wait until idle
boolean success = replicationIdle.await(30, TimeUnit.SECONDS);
assertTrue(success);
replication.removeChangeListener(idleObserver);
// create a doc in local db
Document doc1 = createDocumentForPushReplication("doc1", null, null);
// initial request
{
// check /db/_local/.*
RecordedRequest checkPointRequest = dispatcher.takeRequestBlocking(MockHelper.PATH_REGEX_CHECKPOINT);
assertNotNull(checkPointRequest);
dispatcher.takeRecordedResponseBlocking(checkPointRequest);
// check /db/_revs_diff
RecordedRequest revsDiffRequest = dispatcher.takeRequestBlocking(MockHelper.PATH_REGEX_REVS_DIFF);
assertNotNull(revsDiffRequest);
dispatcher.takeRecordedResponseBlocking(revsDiffRequest);
// we should observe only one POST to _bulk_docs request because error is not transient error
RecordedRequest request = dispatcher.takeRequestBlocking(MockHelper.PATH_REGEX_BULK_DOCS);
assertNotNull(request);
dispatcher.takeRecordedResponseBlocking(request);
}
// Without fixing CBL Java Core #352, following code causes hang.
// wait for replication to finish
boolean didNotTimeOut = replicationDoneSignal.await(180, TimeUnit.SECONDS);
Log.d(TAG, "replicationDoneSignal.await done, didNotTimeOut: " + didNotTimeOut);
assertFalse(replication.isRunning());
server.shutdown();
}
/**
* https://github.com/couchbase/couchbase-lite-java-core/issues/356
*/
public void testReplicationRestartPreservesValues() throws Exception {
// make sure we are starting empty
assertEquals(0, database.getLastSequenceNumber());
// add docs
Map<String, Object> properties1 = new HashMap<String, Object>();
properties1.put("doc1", "testContinuousPushReplicationGoesIdle");
final Document doc1 = createDocWithProperties(properties1);
// create mockwebserver and custom dispatcher
MockDispatcher dispatcher = new MockDispatcher();
MockWebServer server = MockHelper.getMockWebServer(dispatcher);
dispatcher.setServerType(MockDispatcher.ServerType.SYNC_GW);
try {
server.play();
// checkpoint GET response w/ 404. also receives checkpoint PUT's
MockCheckpointPut mockCheckpointPut = new MockCheckpointPut();
mockCheckpointPut.setSticky(true);
dispatcher.enqueueResponse(MockHelper.PATH_REGEX_CHECKPOINT, mockCheckpointPut);
// _revs_diff response -- everything missing
MockRevsDiff mockRevsDiff = new MockRevsDiff();
dispatcher.enqueueResponse(MockHelper.PATH_REGEX_REVS_DIFF, mockRevsDiff);
// _bulk_docs response -- everything stored
MockBulkDocs mockBulkDocs = new MockBulkDocs();
dispatcher.enqueueResponse(MockHelper.PATH_REGEX_BULK_DOCS, mockBulkDocs);
// create continuos replication
Replication pusher = database.createPushReplication(server.getUrl("/db"));
pusher.setContinuous(true);
// add filter properties to the replicator
String filterName = "app/clientIdAndTablesSchemeDocIdFilter";
pusher.setFilter(filterName);
Map<String, Object> filterParams = new HashMap<String, Object>();
String filterParam = "tablesSchemeDocId";
String filterVal = "foo";
filterParams.put(filterParam, filterVal);
pusher.setFilterParams(filterParams);
// doc ids
pusher.setDocIds(Arrays.asList(doc1.getId()));
// custom authenticator
BasicAuthenticator authenticator = new BasicAuthenticator("foo", "bar");
pusher.setAuthenticator(authenticator);
// custom request headers
Map<String, Object> requestHeaders = new HashMap<String, Object>();
requestHeaders.put("foo", "bar");
pusher.setHeaders(requestHeaders);
// create target
pusher.setCreateTarget(true);
// start the continuous replication
CountDownLatch replicationIdleSignal = new CountDownLatch(1);
ReplicationIdleObserver replicationIdleObserver = new ReplicationIdleObserver(replicationIdleSignal);
pusher.addChangeListener(replicationIdleObserver);
pusher.start();
// wait until we get an IDLE event
boolean successful = replicationIdleSignal.await(30, TimeUnit.SECONDS);
assertTrue(successful);
// restart the replication
CountDownLatch replicationIdleSignal2 = new CountDownLatch(1);
ReplicationIdleObserver replicationIdleObserver2 = new ReplicationIdleObserver(replicationIdleSignal2);
pusher.addChangeListener(replicationIdleObserver2);
pusher.restart();
// wait until we get another IDLE event
successful = replicationIdleSignal2.await(30, TimeUnit.SECONDS);
assertTrue(successful);
// verify the restarted replication still has the values we set up earlier
assertEquals(filterName, pusher.getFilter());
assertTrue(pusher.getFilterParams().size() == 1);
assertEquals(filterVal, pusher.getFilterParams().get(filterParam));
assertTrue(pusher.isContinuous());
assertEquals(Arrays.asList(doc1.getId()), pusher.getDocIds());
assertEquals(authenticator, pusher.getAuthenticator());
assertEquals(requestHeaders, pusher.getHeaders());
assertTrue(pusher.shouldCreateTarget());
} finally {
server.shutdown();
}
}
/**
* The observed problem:
* <p/>
* - 1. Start continuous pull
* - 2. Wait until it goes IDLE (this works fine)
* - 3. Add a new document directly to the Sync Gateway
* - 4. The continuous pull goes from IDLE -> RUNNING
* - 5. Wait until it goes IDLE again (this doesn't work, it never goes back to IDLE)
* <p/>
* The test case below simulates the above scenario using a mock sync gateway.
* <p/>
* https://github.com/couchbase/couchbase-lite-java-core/issues/383
*/
public void testContinuousPullReplicationGoesIdleTwice() throws Exception {
Log.e(TAG, "TEST START");
// create mockwebserver and custom dispatcher
MockDispatcher dispatcher = new MockDispatcher();
MockWebServer server = MockHelper.getMockWebServer(dispatcher);
dispatcher.setServerType(MockDispatcher.ServerType.SYNC_GW);
try {
// checkpoint PUT or GET response (sticky)
MockCheckpointPut mockCheckpointPut = new MockCheckpointPut();
mockCheckpointPut.setSticky(true);
dispatcher.enqueueResponse(MockHelper.PATH_REGEX_CHECKPOINT, mockCheckpointPut);
// add non-sticky changes response that returns no changes
// this will cause the pull replicator to go into the IDLE state
MockChangesFeed mockChangesFeed = new MockChangesFeed();
dispatcher.enqueueResponse(MockHelper.PATH_REGEX_CHANGES, mockChangesFeed.generateMockResponse());
// add _changes response that just blocks for a few seconds to emulate
// server that doesn't have any new changes. while the puller is blocked on this request
// to the _changes feed, the test will add a new changes listener that waits until it goes
// into the RUNNING state
MockChangesFeedNoResponse mockChangesFeedNoResponse = new MockChangesFeedNoResponse();
// It seems 5 sec delay might not be necessary. It reduce test duration 5 sec
//mockChangesFeedNoResponse.setDelayMs(5 * 1000);
dispatcher.enqueueResponse(MockHelper.PATH_REGEX_CHANGES, mockChangesFeedNoResponse);
// 3.
// after the above changes feed response returns after 5 seconds, the next time
// the puller gets the _changes feed, return a response that there is 1 new doc.
// this will cause the puller to go from IDLE -> RUNNING
MockDocumentGet.MockDocument mockDoc1 = new MockDocumentGet.MockDocument("doc1", "1-5e38", 1);
mockDoc1.setJsonMap(MockHelper.generateRandomJsonMap());
mockChangesFeed = new MockChangesFeed();
mockChangesFeed.add(new MockChangesFeed.MockChangedDoc(mockDoc1));
dispatcher.enqueueResponse(MockHelper.PATH_REGEX_CHANGES, mockChangesFeed.generateMockResponse());
// at this point, the mock _changes feed is done simulating new docs on the sync gateway
// since we've done enough to reproduce the problem. so at this point, just make the changes
// feed block for a long time.
MockChangesFeedNoResponse mockChangesFeedNoResponse2 = new MockChangesFeedNoResponse();
mockChangesFeedNoResponse2.setDelayMs(6000 * 1000); // block for > 1hr
mockChangesFeedNoResponse2.setSticky(true); // continue this behavior indefinitely
dispatcher.enqueueResponse(MockHelper.PATH_REGEX_CHANGES, mockChangesFeedNoResponse2);
// doc1 response
MockDocumentGet mockDocumentGet = new MockDocumentGet(mockDoc1);
dispatcher.enqueueResponse(mockDoc1.getDocPathRegex(), mockDocumentGet.generateMockResponse());
// _revs_diff response -- everything missing
MockRevsDiff mockRevsDiff = new MockRevsDiff();
mockRevsDiff.setSticky(true);
dispatcher.enqueueResponse(MockHelper.PATH_REGEX_REVS_DIFF, mockRevsDiff);
Log.e(TAG, "SERVER START");
server.play();
// create pull replication
final Replication pullReplication = database.createPullReplication(server.getUrl("/db"));
pullReplication.setContinuous(true);
final CountDownLatch enteredIdleState1 = new CountDownLatch(1);
pullReplication.addChangeListener(new Replication.ChangeListener() {
@Override
public void changed(Replication.ChangeEvent event) {
if (event.getSource().getStatus() == Replication.ReplicationStatus.REPLICATION_IDLE) {
Log.e(TAG, "Replication is IDLE 1");
enteredIdleState1.countDown();
pullReplication.removeChangeListener(this);
}
}
});
Log.e(TAG, "REPLICATOR START");
// 1. start pull replication
pullReplication.start();
// 2. wait until its IDLE
boolean success = enteredIdleState1.await(30, TimeUnit.SECONDS);
assertTrue(success);
// 3. see server side preparation
// change listener to see if its RUNNING
// we can't add this earlier, because the countdown latch would get
// triggered too early (the other approach would be to set the countdown
// latch to a higher number)
final CountDownLatch enteredRunningState = new CountDownLatch(1);
final CountDownLatch enteredIdleState2 = new CountDownLatch(1);
pullReplication.addChangeListener(new Replication.ChangeListener() {
@Override
public void changed(Replication.ChangeEvent event) {
if (event.getSource().getStatus() == Replication.ReplicationStatus.REPLICATION_ACTIVE) {
if (enteredRunningState.getCount() > 0) {
Log.e(TAG, "Replication is RUNNING");
enteredRunningState.countDown();
}
}
// second IDLE change listener
// handling IDLE event here. It seems IDLE event was fired before set IDLE event handler
else if (event.getSource().getStatus() == Replication.ReplicationStatus.REPLICATION_IDLE) {
if (enteredRunningState.getCount() <= 0 && enteredIdleState2.getCount() > 0) {
Log.e(TAG, "Replication is IDLE 2");
enteredIdleState2.countDown();
}
}
}
});
// 4. wait until its RUNNING
Log.e(TAG, "WAIT for RUNNING");
success = enteredRunningState.await(30, TimeUnit.SECONDS);
assertTrue(success);
// 5. wait until its IDLE again. before the fix, it would never go IDLE again, and so
// this would timeout and the test would fail.
Log.e(TAG, "WAIT for IDLE");
success = enteredIdleState2.await(30, TimeUnit.SECONDS);
assertTrue(success);
Log.e(TAG, "STOP REPLICATOR");
// clean up
stopReplication(pullReplication);
Log.e(TAG, "STOP MOCK SERVER");
} finally {
server.shutdown();
}
Log.e(TAG, "TEST DONE");
}
/**
* Test case that makes sure STOPPED notification is sent only once with continuous pull replication
* https://github.com/couchbase/couchbase-lite-android/issues/442
*/
public void testContinuousPullReplicationSendStoppedOnce() throws Exception {
Log.d(TAG, "TEST START");
// create mockwebserver and custom dispatcher
MockDispatcher dispatcher = new MockDispatcher();
MockWebServer server = MockHelper.getMockWebServer(dispatcher);
dispatcher.setServerType(MockDispatcher.ServerType.SYNC_GW);
try {
// checkpoint PUT or GET response (sticky)
MockCheckpointPut mockCheckpointPut = new MockCheckpointPut();
mockCheckpointPut.setSticky(true);
dispatcher.enqueueResponse(MockHelper.PATH_REGEX_CHECKPOINT, mockCheckpointPut);
// add non-sticky changes response that returns no changes
// this will cause the pull replicator to go into the IDLE state
MockChangesFeed mockChangesFeed = new MockChangesFeed();
dispatcher.enqueueResponse(MockHelper.PATH_REGEX_CHANGES, mockChangesFeed.generateMockResponse());
server.play();
// create pull replication
Replication pullReplication = database.createPullReplication(server.getUrl("/db"));
pullReplication.setContinuous(true);
final CountDownLatch enteredIdleState = new CountDownLatch(1);
final CountDownLatch enteredStoppedState = new CountDownLatch(2);
pullReplication.addChangeListener(new Replication.ChangeListener() {
@Override
public void changed(Replication.ChangeEvent event) {
if (event.getSource().getStatus() == Replication.ReplicationStatus.REPLICATION_IDLE) {
Log.d(TAG, "Replication is IDLE");
enteredIdleState.countDown();
} else if (event.getSource().getStatus() == Replication.ReplicationStatus.REPLICATION_STOPPED) {
Log.d(TAG, "Replication is STOPPED");
enteredStoppedState.countDown();
}
}
});
// 1. start pull replication
pullReplication.start();
// 2. wait until its IDLE
boolean success = enteredIdleState.await(30, TimeUnit.SECONDS);
assertTrue(success);
// 3. stop pull replication
stopReplication(pullReplication);
// 4. wait until its RUNNING
Log.d(TAG, "WAIT for STOPPED");
//success = enteredStoppedState.await(Replication.DEFAULT_MAX_TIMEOUT_FOR_SHUTDOWN + 30, TimeUnit.SECONDS); // replicator maximum shutdown timeout 60 sec + additional 30 sec for other stuff
// NOTE: 90 sec is too long for unit test. chnaged to 30 sec
// NOTE2: 30 sec is still too long for unit test. changed to 15sec.
success = enteredStoppedState.await(15, TimeUnit.SECONDS); // replicator maximum shutdown timeout 60 sec + additional 30 sec for other stuff
// if STOPPED notification was sent twice, enteredStoppedState becomes 0.
assertEquals(1, enteredStoppedState.getCount());
assertFalse(success);
} finally {
Log.d(TAG, "STOP MOCK SERVER");
server.shutdown();
}
Log.d(TAG, "TEST DONE");
}
/**
* Test case that makes sure STOPPED notification is sent only once with one time pull replication
* https://github.com/couchbase/couchbase-lite-android/issues/442
*/
public void testOneTimePullReplicationSendStoppedOnce() throws Exception {
Log.d(TAG, "TEST START");
// create mockwebserver and custom dispatcher
MockDispatcher dispatcher = new MockDispatcher();
MockWebServer server = MockHelper.getMockWebServer(dispatcher);
dispatcher.setServerType(MockDispatcher.ServerType.SYNC_GW);
try {
// checkpoint PUT or GET response (sticky)
MockCheckpointPut mockCheckpointPut = new MockCheckpointPut();
mockCheckpointPut.setSticky(true);
dispatcher.enqueueResponse(MockHelper.PATH_REGEX_CHECKPOINT, mockCheckpointPut);
// add non-sticky changes response that returns no changes
// this will cause the pull replicator to go into the IDLE state
MockChangesFeed mockChangesFeed = new MockChangesFeed();
dispatcher.enqueueResponse(MockHelper.PATH_REGEX_CHANGES, mockChangesFeed.generateMockResponse());
server.play();
// create pull replication
Replication pullReplication = database.createPullReplication(server.getUrl("/db"));
pullReplication.setContinuous(false);
// handle STOPPED notification
final CountDownLatch enteredStoppedState = new CountDownLatch(2);
pullReplication.addChangeListener(new Replication.ChangeListener() {
@Override
public void changed(Replication.ChangeEvent event) {
if (event.getSource().getStatus() == Replication.ReplicationStatus.REPLICATION_STOPPED &&
event.getTransition().getDestination() == ReplicationState.STOPPED) {
Log.d(TAG, "Replication is STOPPED");
enteredStoppedState.countDown();
}
}
});
// 1. start pull replication
pullReplication.start();
// 2. wait until its RUNNING
Log.d(TAG, "WAIT for STOPPED");
boolean success = enteredStoppedState.await(15, TimeUnit.SECONDS);
// if STOPPED notification was sent twice, enteredStoppedState becomes 0.
assertEquals(1, enteredStoppedState.getCount());
assertFalse(success);
} finally {
Log.d(TAG, "STOP MOCK SERVER");
server.shutdown();
}
Log.d(TAG, "TEST DONE");
}
/**
* Issue: Pull Replicator does not send IDLE state after check point
* https://github.com/couchbase/couchbase-lite-java-core/issues/389
* <p/>
* 1. Wait till pull replicator becomes IDLE state
* 2. Update change event handler for handling ACTIVE and IDLE
* 3. Create document into local db
* 4. Based on local doc information, prepare mock change response for 1st /_changes request
* 5. Prepare next mock change response for 2nd /_changes request (blocking for while)
* 6. wait for Replication IDLE -> ACTIVE -> IDLE
*/
public void testPullReplicatonSendIdleStateAfterCheckPoint() throws Exception {
Log.d(TAG, "TEST START");
// create mockwebserver and custom dispatcher
MockDispatcher dispatcher = new MockDispatcher();
MockWebServer server = MockHelper.getMockWebServer(dispatcher);
dispatcher.setServerType(MockDispatcher.ServerType.SYNC_GW);
try {
// checkpoint PUT or GET response (sticky) (for both push and pull)
MockCheckpointPut mockCheckpointPut = new MockCheckpointPut();
mockCheckpointPut.setSticky(true);
dispatcher.enqueueResponse(MockHelper.PATH_REGEX_CHECKPOINT, mockCheckpointPut);
// add non-sticky changes response that returns no changes (for pull)
// this will cause the pull replicator to go into the IDLE state
MockChangesFeed mockChangesFeedEmpty = new MockChangesFeed();
dispatcher.enqueueResponse(MockHelper.PATH_REGEX_CHANGES, mockChangesFeedEmpty.generateMockResponse());
// start mock server
server.play();
// create pull replication
Replication pullReplication = database.createPullReplication(server.getUrl("/db"));
pullReplication.setContinuous(true);
// handler to wait for IDLE
final CountDownLatch pullInitialIdleState = new CountDownLatch(1);
pullReplication.addChangeListener(new Replication.ChangeListener() {
@Override
public void changed(Replication.ChangeEvent event) {
if (event.getSource().getStatus() == Replication.ReplicationStatus.REPLICATION_IDLE) {
pullInitialIdleState.countDown();
}
}
});
// start pull replication
//pushReplication.start();
pullReplication.start();
// 1. Wait till replicator becomes IDLE
boolean success = pullInitialIdleState.await(30, TimeUnit.SECONDS);
assertTrue(success);
// clear out existing queued mock responses to make room for new ones
dispatcher.clearQueuedResponse(MockHelper.PATH_REGEX_CHANGES);
// 2. Update change event handler for handling ACTIVE and IDLE
final CountDownLatch activeSignal = new CountDownLatch(1);
final CountDownLatch idleSignal = new CountDownLatch(1);
pullReplication.addChangeListener(new Replication.ChangeListener() {
@Override
public void changed(Replication.ChangeEvent event) {
Log.e(TAG, "[changed] PULL -> " + event);
if (event.getSource().getStatus() == Replication.ReplicationStatus.REPLICATION_IDLE) {
// make sure pull replicator becomes IDLE after ACTIVE state.
// so ignore any IDLE state before ACTIVE.
if (activeSignal.getCount() == 0) {
idleSignal.countDown();
}
} else if (event.getSource().getStatus() == Replication.ReplicationStatus.REPLICATION_ACTIVE) {
activeSignal.countDown();
}
}
});
// 3. Create document into local db
Document doc = database.createDocument();
Map<String, Object> props = new HashMap<String, Object>();
props.put("key", "1");
doc.putProperties(props);
// 4. Based on local doc information, prepare mock change response for 1st /_changes request
String docId = doc.getId();
String revId = doc.getCurrentRevisionId();
int lastSeq = (int) database.getLastSequenceNumber();
MockDocumentGet.MockDocument mockDocument1 = new MockDocumentGet.MockDocument(docId, revId, lastSeq + 1);
mockDocument1.setJsonMap(MockHelper.generateRandomJsonMap());
MockChangesFeed mockChangesFeed = new MockChangesFeed();
mockChangesFeed.add(new MockChangesFeed.MockChangedDoc(mockDocument1));
dispatcher.enqueueResponse(MockHelper.PATH_REGEX_CHANGES, mockChangesFeed.generateMockResponse());
// 5. Prepare next mock change response for 2nd /_changes request (blocking for while)
MockChangesFeedNoResponse mockChangesFeedNoResponse2 = new MockChangesFeedNoResponse();
mockChangesFeedNoResponse2.setDelayMs(60 * 1000);
mockChangesFeedNoResponse2.setSticky(true);
dispatcher.enqueueResponse(MockHelper.PATH_REGEX_CHANGES, mockChangesFeedNoResponse2);
// 6. wait for Replication IDLE -> ACTIVE -> IDLE
success = activeSignal.await(30, TimeUnit.SECONDS);
assertTrue(success);
success = idleSignal.await(30, TimeUnit.SECONDS);
assertTrue(success);
// stop pull replication
stopReplication(pullReplication);
} finally {
server.shutdown();
}
Log.d(TAG, "TEST DONE");
}
/**
* Sync (pull replication) fails on document with a lot of revisions and attachments
* https://github.com/couchbase/couchbase-lite-java-core/issues/415
*/
public void testPullReplicatonWithManyAttachmentRevisions() throws Exception {
Log.d(TAG, "TEST START: testPullReplicatonWithManyAttachmentRevisions()");
String docID = "11111";
String key = "key";
String value = "one-one-one-one";
String attachmentName = "attachment.png";
// create initial document (Revision 1-xxxx)
Map<String, Object> props1 = new HashMap<String, Object>();
props1.put("_id", docID);
props1.put(key, value);
RevisionInternal rev = new RevisionInternal(props1);
Status status = new Status();
RevisionInternal savedRev = database.putRevision(rev, null, false, status);
String rev1ID = savedRev.getRevID();
// add attachment to doc (Revision 2-xxxx)
Document doc = database.getDocument(docID);
UnsavedRevision newRev = doc.createRevision();
InputStream attachmentStream = getAsset(attachmentName);
newRev.setAttachment(attachmentName, "image/png", attachmentStream);
SavedRevision saved = newRev.save(true);
String rev2ID = doc.getCurrentRevisionId();
Log.w(TAG, "saved => " + saved);
Log.w(TAG, "revID => " + doc.getCurrentRevisionId());
// Create 5 revisions with 50 conflicts each
int j = 3;
for (; j < 5; j++) {
// Create a conflict, won by the new revision:
Map<String, Object> props = new HashMap<String, Object>();
props.put("_id", docID);
props.put("_rev", j + "-0000");
props.put(key, value);
RevisionInternal leaf = new RevisionInternal(props);
database.forceInsert(leaf, new ArrayList<String>(), null);
Log.w(TAG, "revID => " + doc.getCurrentRevisionId());
for (int i = 0; i < 49; i++) {
// Create a conflict, won by the new revision:
Map<String, Object> props_conflict = new HashMap<String, Object>();
props_conflict.put("_id", docID);
String revStr = String.format("%d-%04d", j, i);
props_conflict.put("_rev", revStr);
props_conflict.put(key, value);
// attachment
byte[] attach1 = "This is the body of attach1".getBytes();
String base64 = Base64.encodeBytes(attach1);
Map<String, Object> attachment = new HashMap<String, Object>();
attachment.put("content_type", "text/plain");
attachment.put("data", base64);
Map<String, Object> attachmentDict = new HashMap<String, Object>();
attachmentDict.put("test_attachment", attachment);
props_conflict.put("_attachments", attachmentDict);
// end of attachment
RevisionInternal leaf_conflict = new RevisionInternal(props_conflict);
List<String> revHistory = new ArrayList<String>();
revHistory.add(leaf_conflict.getRevID());
for (int k = j - 1; k > 2; k--) {
revHistory.add(String.format("%d-0000", k));
}
revHistory.add(rev2ID);
revHistory.add(rev1ID);
database.forceInsert(leaf_conflict, revHistory, null);
Log.w(TAG, "revID => " + doc.getCurrentRevisionId());
}
}
String docId = doc.getId();
String revId = j + "-00";
int lastSeq = (int) database.getLastSequenceNumber();
// create mockwebserver and custom dispatcher
MockDispatcher dispatcher = new MockDispatcher();
MockWebServer server = MockHelper.getMockWebServer(dispatcher);
dispatcher.setServerType(MockDispatcher.ServerType.SYNC_GW);
try {
// checkpoint PUT or GET response (sticky) (for both push and pull)
MockCheckpointPut mockCheckpointPut = new MockCheckpointPut();
mockCheckpointPut.setSticky(true);
dispatcher.enqueueResponse(MockHelper.PATH_REGEX_CHECKPOINT, mockCheckpointPut);
MockChangesFeed mockChangesFeedEmpty = new MockChangesFeed();
dispatcher.enqueueResponse(MockHelper.PATH_REGEX_CHANGES, mockChangesFeedEmpty.generateMockResponse());
// start mock server
server.play();
// create pull replication
Replication pullReplication = database.createPullReplication(server.getUrl("/db"));
pullReplication.setContinuous(true);
final CountDownLatch idleSignal1 = new CountDownLatch(1);
final CountDownLatch idleSignal2 = new CountDownLatch(2);
pullReplication.addChangeListener(new Replication.ChangeListener() {
@Override
public void changed(Replication.ChangeEvent event) {
Log.e(TAG, event.toString());
if (event.getError() != null) {
Assert.fail("Should not have any error....");
}
if (event.getSource().getStatus() == Replication.ReplicationStatus.REPLICATION_IDLE) {
idleSignal1.countDown();
idleSignal2.countDown();
}
}
});
// start pull replication
pullReplication.start();
boolean success = idleSignal1.await(30, TimeUnit.SECONDS);
assertTrue(success);
//
MockDocumentGet.MockDocument mockDocument1 = new MockDocumentGet.MockDocument(docId, revId, lastSeq + 1);
mockDocument1.setJsonMap(MockHelper.generateRandomJsonMap());
MockChangesFeed mockChangesFeed = new MockChangesFeed();
mockChangesFeed.add(new MockChangesFeed.MockChangedDoc(mockDocument1));
dispatcher.enqueueResponse(MockHelper.PATH_REGEX_CHANGES, mockChangesFeed.generateMockResponse());
// doc response
MockDocumentGet mockDocumentGet = new MockDocumentGet(mockDocument1);
dispatcher.enqueueResponse(mockDocument1.getDocPathRegex(), mockDocumentGet.generateMockResponse());
// check /db/docid?...
RecordedRequest request = dispatcher.takeRequestBlocking(mockDocument1.getDocPathRegex(), 30 * 1000);
Log.e(TAG, request.toString());
Map<String, String> queries = query2map(request.getPath());
String atts_since = URLDecoder.decode(queries.get("atts_since"), "UTF-8");
List<String> json = (List<String>) str2json(atts_since);
Log.e(TAG, json.toString());
assertNotNull(json);
// atts_since parameter should be limit to PullerInternal.MAX_NUMBER_OF_ATTS_SINCE
assertTrue(json.size() == PullerInternal.MAX_NUMBER_OF_ATTS_SINCE);
boolean success2 = idleSignal2.await(30, TimeUnit.SECONDS);
assertTrue(success2);
// stop pull replication
stopReplication(pullReplication);
} finally {
server.shutdown();
}
Log.d(TAG, "TEST END: testPullReplicatonWithManyAttachmentRevisions()");
}
public static Object str2json(String value) {
Object result = null;
try {
result = Manager.getObjectMapper().readValue(value, Object.class);
} catch (Exception e) {
Log.w("Unable to parse JSON Query", e);
}
return result;
}
public static Map<String, String> query2map(String queryString) {
Map<String, String> queries = new HashMap<String, String>();
for (String component : queryString.split("&")) {
int location = component.indexOf('=');
if (location > 0) {
String key = component.substring(0, location);
String value = component.substring(location + 1);
queries.put(key, value);
}
}
return queries;
}
class CustomMultipartReaderDelegate implements MultipartReaderDelegate {
public Map<String, String> headers = null;
public byte[] data = null;
public boolean gzipped = false;
public boolean bJson = false;
@Override
public void startedPart(Map<String, String> headers) {
gzipped = headers.get("Content-Encoding") != null && headers.get("Content-Encoding").contains("gzip");
bJson = headers.get("Content-Type") != null && headers.get("Content-Type").contains("application/json");
}
@Override
public void appendToPart(byte[] data) {
if (gzipped && bJson) {
this.data = Utils.decompressByGzip(data);
} else if (bJson) {
this.data = data;
}
}
@Override
public void appendToPart(final byte[] data, int off, int len) {
byte[] b = Arrays.copyOfRange(data, off, len - off);
appendToPart(b);
}
@Override
public void finishedPart() {
}
}
/**
* Push Replication, never receive REPLICATION_ACTIVE status
* https://github.com/couchbase/couchbase-lite-android/issues/451
*/
public void testPushReplActiveState() throws Exception {
Log.d(TAG, "TEST START: testPushReplActiveState()");
// make sure we are starting empty
assertEquals(0, database.getLastSequenceNumber());
// create mockwebserver and custom dispatcher
MockDispatcher dispatcher = new MockDispatcher();
MockWebServer server = MockHelper.getMockWebServer(dispatcher);
dispatcher.setServerType(MockDispatcher.ServerType.SYNC_GW);
try {
server.play();
// checkpoint GET response w/ 404. also receives checkpoint PUT's
MockCheckpointPut mockCheckpointPut = new MockCheckpointPut();
mockCheckpointPut.setSticky(true);
dispatcher.enqueueResponse(MockHelper.PATH_REGEX_CHECKPOINT, mockCheckpointPut);
// _revs_diff response -- everything missing
MockRevsDiff mockRevsDiff = new MockRevsDiff();
dispatcher.enqueueResponse(MockHelper.PATH_REGEX_REVS_DIFF, mockRevsDiff);
// _bulk_docs response -- everything stored
MockBulkDocs mockBulkDocs = new MockBulkDocs();
dispatcher.enqueueResponse(MockHelper.PATH_REGEX_BULK_DOCS, mockBulkDocs);
//
Replication pullReplication = database.createPushReplication(server.getUrl("/db"));
pullReplication.setContinuous(true);
final String checkpointId = pullReplication.remoteCheckpointDocID(); // save the checkpoint id for later usage
// Event handler for IDLE
CountDownLatch idleSignal = new CountDownLatch(1);
ReplicationIdleObserver idleObserver = new ReplicationIdleObserver(idleSignal);
pullReplication.addChangeListener(idleObserver);
// start the continuous replication
pullReplication.start();
// wait until we get an IDLE event
boolean successful = idleSignal.await(30, TimeUnit.SECONDS);
assertTrue(successful);
pullReplication.removeChangeListener(idleObserver);
// Event handler for ACTIVE
CountDownLatch activeSignal = new CountDownLatch(1);
ReplicationActiveObserver activeObserver = new ReplicationActiveObserver(activeSignal);
pullReplication.addChangeListener(activeObserver);
// Event handler for IDLE2
CountDownLatch idleSignal2 = new CountDownLatch(1);
ReplicationIdleObserver idleObserver2 = new ReplicationIdleObserver(idleSignal2);
pullReplication.addChangeListener(idleObserver2);
// add docs
Map<String, Object> properties1 = new HashMap<String, Object>();
properties1.put("doc1", "testPushReplActiveState");
final Document doc1 = createDocWithProperties(properties1);
// wait until we get an ACTIVE event
successful = activeSignal.await(30, TimeUnit.SECONDS);
assertTrue(successful);
pullReplication.removeChangeListener(activeObserver);
// check _bulk_docs
RecordedRequest request = dispatcher.takeRequestBlocking(MockHelper.PATH_REGEX_BULK_DOCS);
assertNotNull(request);
assertTrue(MockHelper.getUtf8Body(request).contains("testPushReplActiveState"));
// wait until we get an IDLE event
successful = idleSignal2.await(30, TimeUnit.SECONDS);
assertTrue(successful);
pullReplication.removeChangeListener(idleObserver2);
// stop pull replication
stopReplication(pullReplication);
} finally {
server.shutdown();
}
Log.d(TAG, "TEST END: testPushReplActiveState()");
}
/**
* Error after close DB client
* https://github.com/couchbase/couchbase-lite-java/issues/52
*/
public void testStop() throws Exception {
Log.d(Log.TAG, "START testStop()");
boolean success = false;
// create mock server
MockDispatcher dispatcher = new MockDispatcher();
dispatcher.setServerType(MockDispatcher.ServerType.SYNC_GW);
MockWebServer server = new MockWebServer();
server.setDispatcher(dispatcher);
try {
server.play();
// checkpoint PUT or GET response (sticky) (for both push and pull)
MockCheckpointPut mockCheckpointPut = new MockCheckpointPut();
mockCheckpointPut.setSticky(true);
dispatcher.enqueueResponse(MockHelper.PATH_REGEX_CHECKPOINT, mockCheckpointPut);
// create pull replication & start it
Replication pull = database.createPullReplication(server.getUrl("/db"));
pull.setContinuous(true);
final CountDownLatch pullIdleState = new CountDownLatch(1);
ReplicationIdleObserver pullIdleObserver = new ReplicationIdleObserver(pullIdleState);
pull.addChangeListener(pullIdleObserver);
pull.start();
// create push replication & start it
Replication push = database.createPullReplication(server.getUrl("/db"));
push.setContinuous(true);
final CountDownLatch pushIdleState = new CountDownLatch(1);
ReplicationIdleObserver pushIdleObserver = new ReplicationIdleObserver(pushIdleState);
push.addChangeListener(pushIdleObserver);
push.start();
// wait till both push and pull replicators become idle.
success = pullIdleState.await(30, TimeUnit.SECONDS);
assertTrue(success);
pull.removeChangeListener(pullIdleObserver);
success = pushIdleState.await(30, TimeUnit.SECONDS);
assertTrue(success);
push.removeChangeListener(pushIdleObserver);
// stop both pull and push replicators
stopReplication(pull);
stopReplication(push);
boolean observedCBLRequestWorker = false;
// First give 5 sec to clean thread status.
try {
Thread.sleep(5 * 1000);
} catch (Exception e) {
}
// all threads which are associated with replicators should be terminated.
Set<Thread> threadSet = Thread.getAllStackTraces().keySet();
for (Thread t : threadSet) {
if (t.isAlive()) {
observedCBLRequestWorker = true;
if (t.getName().indexOf("CBLRequestWorker") != -1) {
observedCBLRequestWorker = true;
break;
}
}
}
// second attemtpt, if still observe CBLRequestWorker thread, makes error
if (observedCBLRequestWorker) {
// give 10 sec to clean thread status.
try {
Thread.sleep(10 * 1000);
} catch (Exception e) {
}
// all threads which are associated with replicators should be terminated.
Set<Thread> threadSet2 = Thread.getAllStackTraces().keySet();
for (Thread t : threadSet2) {
if (t.isAlive()) {
assertEquals(-1, t.getName().indexOf("CBLRequestWorker"));
}
}
}
} finally {
// shutdown mock server
server.shutdown();
}
Log.d(Log.TAG, "END testStop()");
}
/**
* http://developer.couchbase.com/mobile/develop/references/couchbase-lite/couchbase-lite/replication/replication/index.html#mapstring-string-filterparams--get-set-
* <p/>
* Params passed in filtered push throw a null exception in the filter function
* https://github.com/couchbase/couchbase-lite-java-core/issues/533
*/
public void testSetFilterParams() throws CouchbaseLiteException, IOException, InterruptedException {
// make sure we are starting empty
assertEquals(0, database.getLastSequenceNumber());
// create mockwebserver and custom dispatcher
MockDispatcher dispatcher = new MockDispatcher();
MockWebServer server = MockHelper.getMockWebServer(dispatcher);
dispatcher.setServerType(MockDispatcher.ServerType.SYNC_GW);
try {
server.play();
// checkpoint GET response w/ 404. also receives checkpoint PUT's
MockCheckpointPut mockCheckpointPut = new MockCheckpointPut();
mockCheckpointPut.setSticky(true);
dispatcher.enqueueResponse(MockHelper.PATH_REGEX_CHECKPOINT, mockCheckpointPut);
// _revs_diff response -- everything missing
MockRevsDiff mockRevsDiff = new MockRevsDiff();
dispatcher.enqueueResponse(MockHelper.PATH_REGEX_REVS_DIFF, mockRevsDiff);
// _bulk_docs response -- everything stored
MockBulkDocs mockBulkDocs = new MockBulkDocs();
dispatcher.enqueueResponse(MockHelper.PATH_REGEX_BULK_DOCS, mockBulkDocs);
// create 10 documents and delete 5
for (int i = 0; i < 10; i++) {
Document doc = null;
if (i % 2 == 0) {
doc = createDocument(i, true);
} else {
doc = createDocument(i, false);
}
if (i % 2 == 0) {
try {
doc.delete();
} catch (CouchbaseLiteException e) {
e.printStackTrace();
}
}
}
final CountDownLatch latch = new CountDownLatch(10);
final CountDownLatch check = new CountDownLatch(10);
database.setFilter("unDeleted", new ReplicationFilter() {
@Override
public boolean filter(SavedRevision savedRevision, Map<String, Object> params) {
if (params == null || !"hello".equals(params.get("name"))) {
check.countDown();
}
latch.countDown();
return !savedRevision.isDeletion();
}
});
Replication pushReplication = database.createPushReplication(server.getUrl("/db"));
pushReplication.setContinuous(false);
pushReplication.setFilter("unDeleted");
pushReplication.setFilterParams(Collections.<String, Object>singletonMap("name", "hello"));
pushReplication.start();
boolean success = latch.await(30, TimeUnit.SECONDS);
assertTrue(success);
assertEquals(10, check.getCount());
} finally {
server.shutdown();
}
}
private Document createDocument(int number, boolean flag) {
SimpleDateFormat dateFormatter = new SimpleDateFormat("yyyy-MM-dd'T'HH:mm:ss.SSS'Z'");
Calendar calendar = GregorianCalendar.getInstance();
String currentTimeString = dateFormatter.format(calendar.getTime());
Map<String, Object> properties = new HashMap<String, Object>();
properties.put("type", "test_doc");
properties.put("created_at", currentTimeString);
if (flag == true) {
properties.put("name", "Waldo");
}
Document document = database.getDocument(String.valueOf(number));
try {
document.putProperties(properties);
} catch (CouchbaseLiteException e) {
e.printStackTrace();
}
return document;
}
/**
* https://github.com/couchbase/couchbase-lite-java-core/issues/575
*/
public void testRestartWithStoppedReplicator() throws Exception {
MockDispatcher dispatcher = new MockDispatcher();
dispatcher.setServerType(MockDispatcher.ServerType.COUCHDB);
MockWebServer server = MockHelper.getPreloadedPullTargetMockCouchDB(dispatcher, 0, 0);
try {
server.play();
// run pull replication
Replication pullReplication = database.createPullReplication(server.getUrl("/db"));
pullReplication.setContinuous(true);
// it should go idle twice, hence countdown latch = 2
final CountDownLatch replicationIdleFirstTime = new CountDownLatch(1);
final CountDownLatch replicationIdleSecondTime = new CountDownLatch(2);
final CountDownLatch replicationStoppedFirstTime = new CountDownLatch(1);
pullReplication.addChangeListener(new Replication.ChangeListener() {
@Override
public void changed(Replication.ChangeEvent event) {
if (event.getTransition() != null && event.getTransition().getDestination() == ReplicationState.IDLE) {
Log.e(Log.TAG, "IDLE");
replicationIdleFirstTime.countDown();
replicationIdleSecondTime.countDown();
} else if (event.getTransition() != null && event.getTransition().getDestination() == ReplicationState.STOPPED) {
Log.e(Log.TAG, "STOPPED");
replicationStoppedFirstTime.countDown();
}
}
});
pullReplication.start();
// wait until replication goes idle
boolean success = replicationIdleFirstTime.await(60, TimeUnit.SECONDS);
assertTrue(success);
pullReplication.stop();
// wait until replication stop
success = replicationStoppedFirstTime.await(60, TimeUnit.SECONDS);
assertTrue(success);
pullReplication.restart();
// wait until replication goes idle again
success = replicationIdleSecondTime.await(60, TimeUnit.SECONDS);
assertTrue(success);
stopReplication(pullReplication);
} finally {
// cleanup / shutdown
server.shutdown();
}
}
/**
* https://github.com/couchbase/couchbase-lite-java-core/issues/696
* in Unit-Tests/Replication_Tests.m
* - (void)test18_PendingDocumentIDs
*/
public void test18_PendingDocumentIDs() throws Exception {
// create mockwebserver and custom dispatcher
MockDispatcher dispatcher = new MockDispatcher();
MockWebServer server = MockHelper.getMockWebServer(dispatcher);
dispatcher.setServerType(MockDispatcher.ServerType.SYNC_GW);
server.setDispatcher(dispatcher);
try {
server.play();
// checkpoint GET response w/ 404 + respond to all PUT Checkpoint requests
MockCheckpointPut mockCheckpointPut = new MockCheckpointPut();
mockCheckpointPut.setSticky(true);
mockCheckpointPut.setDelayMs(50);
dispatcher.enqueueResponse(MockHelper.PATH_REGEX_CHECKPOINT, mockCheckpointPut);
// _revs_diff response -- everything missing
MockRevsDiff mockRevsDiff = new MockRevsDiff();
dispatcher.enqueueResponse(MockHelper.PATH_REGEX_REVS_DIFF, mockRevsDiff);
// _bulk_docs response -- everything stored
MockBulkDocs mockBulkDocs = new MockBulkDocs();
dispatcher.enqueueResponse(MockHelper.PATH_REGEX_BULK_DOCS, mockBulkDocs);
Replication repl = database.createPushReplication(server.getUrl("/db"));
assertNotNull(repl.getPendingDocumentIDs());
assertEquals(0, repl.getPendingDocumentIDs().size());
assertTrue(database.runInTransaction(
new TransactionalTask() {
@Override
public boolean run() {
for (int i = 1; i <= 10; i++) {
Document doc = database.getDocument(String.format("doc-%d", i));
Map<String, Object> props = new HashMap<String, Object>();
props.put("index", i);
props.put("bar", false);
try {
doc.putProperties(props);
} catch (CouchbaseLiteException e) {
fail(e.getMessage());
}
}
return true;
}
}
));
assertEquals(10, repl.getPendingDocumentIDs().size());
assertTrue(repl.isDocumentPending(database.getDocument("doc-1")));
runReplication(repl);
assertNotNull(repl.getPendingDocumentIDs());
assertEquals(0, repl.getPendingDocumentIDs().size());
assertFalse(repl.isDocumentPending(database.getDocument("doc-1")));
assertTrue(database.runInTransaction(
new TransactionalTask() {
@Override
public boolean run() {
for (int i = 11; i <= 20; i++) {
Document doc = database.getDocument(String.format("doc-%d", i));
Map<String, Object> props = new HashMap<String, Object>();
props.put("index", i);
props.put("bar", false);
try {
doc.putProperties(props);
} catch (CouchbaseLiteException e) {
fail(e.getMessage());
}
}
return true;
}
}
));
repl = database.createPushReplication(server.getUrl("/db"));
assertNotNull(repl.getPendingDocumentIDs());
assertEquals(10, repl.getPendingDocumentIDs().size());
assertTrue(repl.isDocumentPending(database.getDocument("doc-11")));
assertFalse(repl.isDocumentPending(database.getDocument("doc-1")));
// pull replication
repl = database.createPullReplication(server.getUrl("/db"));
assertNull(repl.getPendingDocumentIDs());
runReplication(repl);
assertNull(repl.getPendingDocumentIDs());
} finally {
// cleanup / shutdown
server.shutdown();
}
}
/**
* https://github.com/couchbase/couchbase-lite-java-core/issues/328
* <p/>
* Without bug fix, we observe extra PUT /{db}/_local/xxx for each _bulk_docs request
* <p/>
* 1. Create 200 docs
* 2. Start push replicator
* 3. GET /{db}/_local/xxx
* 4. PUSH /{db}/_revs_diff x 2
* 5. PUSH /{db}/_bulk_docs x 2
* 6. PUT /{db}/_local/xxx
*/
public void testExcessiveCheckpointingDuringPushReplication() throws Exception {
final int NUM_DOCS = 199;
List<Document> docs = new ArrayList<Document>();
// 1. Add more than 100 docs, as chunk size is 100
for (int i = 0; i < NUM_DOCS; i++) {
Map<String, Object> properties = new HashMap<String, Object>();
properties.put("testExcessiveCheckpointingDuringPushReplication", String.valueOf(i));
Document doc = createDocumentWithProperties(database, properties);
docs.add(doc);
}
// create mock server
MockDispatcher dispatcher = new MockDispatcher();
MockWebServer server = new MockWebServer();
server.setDispatcher(dispatcher);
try {
server.play();
// checkpoint GET response -> error
// _revs_diff response -- everything missing
MockRevsDiff mockRevsDiff = new MockRevsDiff();
mockRevsDiff.setSticky(true);
dispatcher.enqueueResponse(MockHelper.PATH_REGEX_REVS_DIFF, mockRevsDiff);
// _bulk_docs response -- everything stored
MockBulkDocs mockBulkDocs = new MockBulkDocs();
mockBulkDocs.setSticky(true);
dispatcher.enqueueResponse(MockHelper.PATH_REGEX_BULK_DOCS, mockBulkDocs);
// checkpoint PUT response (sticky)
MockCheckpointPut mockCheckpointPut = new MockCheckpointPut();
mockCheckpointPut.setSticky(true);
dispatcher.enqueueResponse(MockHelper.PATH_REGEX_CHECKPOINT, mockCheckpointPut);
// 2. Kick off continuous push replication
Replication replicator = database.createPushReplication(server.getUrl("/db"));
replicator.setContinuous(true);
CountDownLatch replicationIdleSignal = new CountDownLatch(1);
ReplicationIdleObserver replicationIdleObserver = new ReplicationIdleObserver(replicationIdleSignal);
replicator.addChangeListener(replicationIdleObserver);
replicator.start();
// 3. Wait for document to be pushed
// NOTE: (Not 100% reproducible) With CBL Java on Jenkins (Super slow environment),
// Replicator becomes IDLE between batches for this case, after 100 push replicated.
// TODO: Need to investigate
// wait until replication goes idle
boolean successful = replicationIdleSignal.await(60, TimeUnit.SECONDS);
assertTrue(successful);
// wait until mock server gets the checkpoint PUT request
boolean foundCheckpointPut = false;
String expectedLastSequence = String.valueOf(NUM_DOCS);
while (!foundCheckpointPut) {
RecordedRequest request = dispatcher.takeRequestBlocking(MockHelper.PATH_REGEX_CHECKPOINT);
if (request.getMethod().equals("PUT")) {
foundCheckpointPut = true;
String body = request.getUtf8Body();
Log.e("testExcessiveCheckpointingDuringPushReplication", "body => " + body);
// TODO: this is not valid if device can not handle all replication data at once
if (System.getProperty("java.vm.name").equalsIgnoreCase("Dalvik")) {
assertTrue(body.indexOf(expectedLastSequence) != -1);
}
// wait until mock server responds to the checkpoint PUT request
dispatcher.takeRecordedResponseBlocking(request);
}
}
// make some assertions about the outgoing _bulk_docs requests
RecordedRequest bulkDocsRequest1 = dispatcher.takeRequest(MockHelper.PATH_REGEX_BULK_DOCS);
assertNotNull(bulkDocsRequest1);
if (System.getProperty("java.vm.name").equalsIgnoreCase("Dalvik")) {
RecordedRequest bulkDocsRequest2 = dispatcher.takeRequest(MockHelper.PATH_REGEX_BULK_DOCS);
assertNotNull(bulkDocsRequest2);
// TODO: this is not valid if device can not handle all replication data at once
// order may not be guaranteed
assertTrue(isBulkDocJsonContainsDoc(bulkDocsRequest1, docs.get(0)) || isBulkDocJsonContainsDoc(bulkDocsRequest2, docs.get(0)));
assertTrue(isBulkDocJsonContainsDoc(bulkDocsRequest1, docs.get(100)) || isBulkDocJsonContainsDoc(bulkDocsRequest2, docs.get(100)));
}
// check if Android CBL client sent only one PUT /{db}/_local/xxxx request
// previous check already consume this request, so queue size should be 0.
BlockingQueue<RecordedRequest> queue = dispatcher.getRequestQueueSnapshot(MockHelper.PATH_REGEX_CHECKPOINT);
assertEquals(0, queue.size());
// cleanup
stopReplication(replicator);
} finally {
server.shutdown();
}
}
// NOTE: This test should be manually tested. This test uses delay, timeout, wait,...
// this could break test on Jenkins because it run on VM with ARM emulator.
// To run test, please remove "manual" from test method name.
//
// https://github.com/couchbase/couchbase-lite-java-core/issues/736
// https://github.com/couchbase/couchbase-lite-net/issues/356
public void manualTestBulkGetTimeout() throws Exception {
int def1 = CouchbaseLiteHttpClientFactory.DEFAULT_CONNECTION_TIMEOUT_SECONDS;
int def2 = CouchbaseLiteHttpClientFactory.DEFAULT_SO_TIMEOUT_SECONDS;
int def3 = ReplicationInternal.MAX_RETRIES;
int def4 = ReplicationInternal.RETRY_DELAY_SECONDS;
try {
// TIMEOUT 1 SEC
CouchbaseLiteHttpClientFactory.DEFAULT_CONNECTION_TIMEOUT_SECONDS = 1;
CouchbaseLiteHttpClientFactory.DEFAULT_SO_TIMEOUT_SECONDS = 1;
ReplicationInternal.MAX_RETRIES = 2;
ReplicationInternal.RETRY_DELAY_SECONDS = 0;
// serve 3 mock docs
int numMockDocsToServe = 2;
// create mockwebserver and custom dispatcher
MockDispatcher dispatcher = new MockDispatcher();
MockWebServer server = MockHelper.getMockWebServer(dispatcher);
dispatcher.setServerType(MockDispatcher.ServerType.SYNC_GW);
try {
// mock documents to be pulled
List<MockDocumentGet.MockDocument> mockDocs = MockHelper.getMockDocuments(numMockDocsToServe);
// respond to all GET (responds with 404) and PUT Checkpoint requests
MockCheckpointPut mockCheckpointPut = new MockCheckpointPut();
mockCheckpointPut.setSticky(true);
dispatcher.enqueueResponse(MockHelper.PATH_REGEX_CHECKPOINT, mockCheckpointPut);
// _changes response
MockChangesFeed mockChangesFeed = new MockChangesFeed();
for (MockDocumentGet.MockDocument mockDocument : mockDocs) {
mockChangesFeed.add(new MockChangesFeed.MockChangedDoc(mockDocument));
}
SmartMockResponseImpl smartMockResponse = new SmartMockResponseImpl(mockChangesFeed.generateMockResponse());
dispatcher.enqueueResponse(MockHelper.PATH_REGEX_CHANGES, smartMockResponse);
// _bulk_get response
MockDocumentBulkGet mockBulkGet = new MockDocumentBulkGet();
for (MockDocumentGet.MockDocument mockDocument : mockDocs) {
mockBulkGet.addDocument(mockDocument);
}
// _bulk_get delays 4 SEC, which is longer custom timeout 5sec.
// so this cause timeout.
mockBulkGet.setDelayMs(4 * 1000);
// makes sticky for retry reponse
mockBulkGet.setSticky(true);
dispatcher.enqueueResponse(MockHelper.PATH_REGEX_BULK_GET, mockBulkGet);
// start mock server
server.play();
// run pull replication
Replication pullReplication = database.createPullReplication(server.getUrl("/db"));
runReplication(pullReplication, 3 * 60);
assertNotNull(pullReplication.getLastError());
assertTrue(pullReplication.getLastError() instanceof java.net.SocketTimeoutException);
// dump out the outgoing requests for bulk docs
BlockingQueue<RecordedRequest> bulkGetRequests = dispatcher.getRequestQueueSnapshot(MockHelper.PATH_REGEX_BULK_GET);
// +1 for initial request
assertEquals(ReplicationInternal.MAX_RETRIES + 1, bulkGetRequests.size());
} finally {
server.shutdown();
}
} finally {
CouchbaseLiteHttpClientFactory.DEFAULT_CONNECTION_TIMEOUT_SECONDS = def1;
CouchbaseLiteHttpClientFactory.DEFAULT_SO_TIMEOUT_SECONDS = def2;
ReplicationInternal.MAX_RETRIES = def3;
ReplicationInternal.RETRY_DELAY_SECONDS = def4;
}
}
// ReplicatorInternal.m: test_UseRemoteUUID
public void testUseRemoteUUID() throws Exception {
URL remoteURL1 = new URL("http://alice.local:55555/db");
Replication r1 = database.createPullReplication(remoteURL1);
r1.setRemoteUUID("cafebabe");
String check1 = r1.replicationInternal.remoteCheckpointDocID();
// Different URL, but same remoteUUID:
URL remoteURL2 = new URL("http://alice17.local:44444/db");
Replication r2 = database.createPullReplication(remoteURL2);
r2.setRemoteUUID("cafebabe");
String check2 = r2.replicationInternal.remoteCheckpointDocID();
assertEquals(check1, check2);
// Same UUID but different filter settings:
Replication r3 = database.createPullReplication(remoteURL2);
r3.setRemoteUUID("cafebabe");
r3.setFilter("Melitta");
String check3 = r3.replicationInternal.remoteCheckpointDocID();
assertNotSame(check2, check3);
}
public void testPushReplicationSetDocumentIDs() throws Exception {
// Create documents:
createDocumentForPushReplication("doc1", null, null);
createDocumentForPushReplication("doc2", null, null);
createDocumentForPushReplication("doc3", null, null);
createDocumentForPushReplication("doc4", null, null);
MockWebServer server = null;
try {
// Create mock server and play:
MockDispatcher dispatcher = new MockDispatcher();
server = MockHelper.getMockWebServer(dispatcher);
dispatcher.setServerType(MockDispatcher.ServerType.SYNC_GW);
server.play();
// Checkpoint GET response w/ 404 + respond to all PUT Checkpoint requests:
MockCheckpointPut mockCheckpointPut = new MockCheckpointPut();
mockCheckpointPut.setSticky(true);
mockCheckpointPut.setDelayMs(50);
dispatcher.enqueueResponse(MockHelper.PATH_REGEX_CHECKPOINT, mockCheckpointPut);
// _revs_diff response -- everything missing:
MockRevsDiff mockRevsDiff = new MockRevsDiff();
dispatcher.enqueueResponse(MockHelper.PATH_REGEX_REVS_DIFF, mockRevsDiff);
// _bulk_docs response -- everything stored
MockBulkDocs mockBulkDocs = new MockBulkDocs();
dispatcher.enqueueResponse(MockHelper.PATH_REGEX_BULK_DOCS, mockBulkDocs);
// Create push replication:
Replication replication = database.createPushReplication(server.getUrl("/db"));
replication.setDocIds(Arrays.asList(new String[] {"doc2", "doc3"}));
// check pending document IDs:
Set<String> pendingDocIDs = replication.getPendingDocumentIDs();
assertEquals(2, pendingDocIDs.size());
assertFalse(pendingDocIDs.contains("doc1"));
assertTrue(pendingDocIDs.contains("doc2"));
assertTrue(pendingDocIDs.contains("doc3"));
assertFalse(pendingDocIDs.contains("doc4"));
// Run replication:
runReplication(replication);
// Check result:
RecordedRequest bulkDocsRequest = dispatcher.takeRequest(MockHelper.PATH_REGEX_BULK_DOCS);
assertNotNull(bulkDocsRequest);
assertFalse(MockHelper.getUtf8Body(bulkDocsRequest).contains("doc1"));
assertTrue(MockHelper.getUtf8Body(bulkDocsRequest).contains("doc2"));
assertTrue(MockHelper.getUtf8Body(bulkDocsRequest).contains("doc3"));
assertFalse(MockHelper.getUtf8Body(bulkDocsRequest).contains("doc4"));
} finally {
if (server != null)
server.shutdown();
}
}
public void testPullReplicationSetDocumentIDs() throws Exception {
MockWebServer server = null;
try {
// Create mock server and play:
MockDispatcher dispatcher = new MockDispatcher();
server = MockHelper.getMockWebServer(dispatcher);
dispatcher.setServerType(MockDispatcher.ServerType.SYNC_GW);
server.play();
// checkpoint PUT or GET response (sticky):
MockCheckpointPut mockCheckpointPut = new MockCheckpointPut();
mockCheckpointPut.setSticky(true);
dispatcher.enqueueResponse(MockHelper.PATH_REGEX_CHECKPOINT, mockCheckpointPut);
// _changes response:
MockChangesFeed mockChangesFeed = new MockChangesFeed();
dispatcher.enqueueResponse(MockHelper.PATH_REGEX_CHANGES, mockChangesFeed.generateMockResponse());
// Run pull replication:
Replication replication = database.createPullReplication(server.getUrl("/db"));
replication.setDocIds(Arrays.asList(new String[] {"doc2", "doc3"}));
runReplication(replication);
// Check changes feed request:
RecordedRequest getChangesFeedRequest = dispatcher.takeRequest(MockHelper.PATH_REGEX_CHANGES);
assertTrue(getChangesFeedRequest.getMethod().equals("POST"));
String body = getChangesFeedRequest.getUtf8Body();
Map<String, Object> jsonMap = Manager.getObjectMapper().readValue(body, Map.class);
assertTrue(jsonMap.containsKey("filter"));
String filter = (String) jsonMap.get("filter");
assertEquals("_doc_ids", filter);
List<String> docIDs = (List<String>) jsonMap.get("doc_ids");
assertNotNull(docIDs);
assertEquals(2, docIDs.size());
assertTrue(docIDs.contains("doc2"));
assertTrue(docIDs.contains("doc3"));
} finally {
if (server != null)
server.shutdown();
}
}
public void testPullWithGzippedChangesFeed() throws Exception {
MockWebServer server = null;
try {
// Create mock server and play:
MockDispatcher dispatcher = new MockDispatcher();
server = MockHelper.getMockWebServer(dispatcher);
dispatcher.setServerType(MockDispatcher.ServerType.SYNC_GW);
server.play();
// Mock documents to be pulled:
MockDocumentGet.MockDocument mockDoc1 =
new MockDocumentGet.MockDocument("doc1", "1-5e38", 1);
mockDoc1.setJsonMap(MockHelper.generateRandomJsonMap());
MockDocumentGet.MockDocument mockDoc2 =
new MockDocumentGet.MockDocument("doc2", "1-563b", 2);
mockDoc2.setJsonMap(MockHelper.generateRandomJsonMap());
// // checkpoint GET response w/ 404:
MockResponse fakeCheckpointResponse = new MockResponse();
MockHelper.set404NotFoundJson(fakeCheckpointResponse);
dispatcher.enqueueResponse(MockHelper.PATH_REGEX_CHECKPOINT, fakeCheckpointResponse);
// _changes response:
MockChangesFeed mockChangesFeed = new MockChangesFeed();
mockChangesFeed.add(new MockChangesFeed.MockChangedDoc(mockDoc1));
mockChangesFeed.add(new MockChangesFeed.MockChangedDoc(mockDoc2));
dispatcher.enqueueResponse(MockHelper.PATH_REGEX_CHANGES,
mockChangesFeed.generateMockResponse(/*gzip*/true));
// doc1 response:
MockDocumentGet mockDocumentGet = new MockDocumentGet(mockDoc1);
dispatcher.enqueueResponse(mockDoc1.getDocPathRegex(),
mockDocumentGet.generateMockResponse());
// doc2 response:
mockDocumentGet = new MockDocumentGet(mockDoc2);
dispatcher.enqueueResponse(mockDoc2.getDocPathRegex(),
mockDocumentGet.generateMockResponse());
// _bulk_get response:
MockDocumentBulkGet mockBulkGet = new MockDocumentBulkGet();
mockBulkGet.addDocument(mockDoc1);
mockBulkGet.addDocument(mockDoc2);
dispatcher.enqueueResponse(MockHelper.PATH_REGEX_BULK_GET, mockBulkGet);
// Respond to all PUT Checkpoint requests
MockCheckpointPut mockCheckpointPut = new MockCheckpointPut();
mockCheckpointPut.setSticky(true);
mockCheckpointPut.setDelayMs(500);
dispatcher.enqueueResponse(MockHelper.PATH_REGEX_CHECKPOINT, mockCheckpointPut);
// Setup database change listener:
final List<String> changeDocIDs = new ArrayList<String>();
database.addChangeListener(new Database.ChangeListener() {
@Override
public void changed(Database.ChangeEvent event) {
for (DocumentChange change : event.getChanges()) {
changeDocIDs.add(change.getDocumentId());
}
}
});
// Run pull replication:
Replication replication = database.createPullReplication(server.getUrl("/db"));
runReplication(replication);
// Check result:
assertEquals(2, changeDocIDs.size());
String[] docIDs = changeDocIDs.toArray(new String[changeDocIDs.size()]);
Arrays.sort(docIDs);
assertTrue(Arrays.equals(new String[]{"doc1", "doc2"}, docIDs));
// Check changes feed request:
RecordedRequest changesFeedRequest =
dispatcher.takeRequest(MockHelper.PATH_REGEX_CHANGES);
String acceptEncoding = changesFeedRequest.getHeader("Accept-Encoding");
assertNotNull(acceptEncoding);
assertTrue(acceptEncoding.contains("gzip"));
} finally {
if (server != null)
server.shutdown();
}
}
}
| Fixed Java Core 894 - Replication.docIds API discrepancy
- ported unit test from couchbase/couchbase-lite-ios@551ce20
| src/androidTest/java/com/couchbase/lite/replicator/ReplicationTest.java | Fixed Java Core 894 - Replication.docIds API discrepancy | <ide><path>rc/androidTest/java/com/couchbase/lite/replicator/ReplicationTest.java
<ide> public class ReplicationTest extends LiteTestCaseWithDB {
<ide>
<ide> /**
<add> * TestCase(CreateReplicators) in ReplicationAPITests.m
<add> */
<add> public void testCreateReplicators() throws Exception {
<add> URL fakeRemoteURL = new URL("http://fake.fake/fakedb");
<add>
<add> // Create a replicaton:
<add> assertEquals(0, database.getAllReplications().size());
<add> Replication r1 = database.createPushReplication(fakeRemoteURL);
<add> assertNotNull(r1);
<add>
<add> // Check the replication's properties:
<add> assertEquals(database, r1.getLocalDatabase());
<add> assertEquals(fakeRemoteURL, r1.getRemoteUrl());
<add> assertFalse(r1.isPull());
<add> assertFalse(r1.isContinuous());
<add> assertFalse(r1.shouldCreateTarget());
<add> assertNull(r1.getFilter());
<add> assertNull(r1.getFilterParams());
<add> assertNull(r1.getDocIds());
<add> assertEquals(0, r1.getHeaders().size());
<add>
<add> // Check that the replication hasn't started running:
<add> assertFalse(r1.isRunning());
<add> assertEquals(Replication.ReplicationStatus.REPLICATION_STOPPED, r1.getStatus());
<add> assertEquals(0, r1.getChangesCount());
<add> assertEquals(0, r1.getCompletedChangesCount());
<add> assertNull(r1.getLastError());
<add>
<add> // Create another replication:
<add> Replication r2 = database.createPullReplication(fakeRemoteURL);
<add> assertNotNull(r2);
<add> assertTrue(r1 != r2);
<add>
<add> // Check the replication's properties:
<add> assertEquals(database, r2.getLocalDatabase());
<add> assertEquals(fakeRemoteURL, r2.getRemoteUrl());
<add> assertTrue(r2.isPull());
<add>
<add>
<add> Replication r3 = database.createPullReplication(fakeRemoteURL);
<add> assertNotNull(r3);
<add> assertTrue(r3 != r2);
<add> r3.setDocIds(Arrays.asList("doc1", "doc2"));
<add>
<add> Replication repl = database.getManager().getReplicator(r3.getProperties());
<add> assertEquals(r3.getDocIds(), repl.getDocIds());
<add> }
<add>
<add> /**
<ide> * Continuous puller starts offline
<ide> * Wait for a while .. (til what?)
<ide> * Add remote document (simulate w/ mock webserver)
<ide> assertNotSame(check2, check3);
<ide> }
<ide>
<add> /**
<add> * This test is almost identical with
<add> * TestCase(CBL_Pusher_DocIDs) in CBLReplicator_Tests.m
<add> */
<ide> public void testPushReplicationSetDocumentIDs() throws Exception {
<ide> // Create documents:
<ide> createDocumentForPushReplication("doc1", null, null); |
|
JavaScript | agpl-3.0 | 08948e6734e12896b4739ab9033f366dca6f9b0d | 0 | databrary/databrary,databrary/databrary,databrary/databrary,databrary/databrary | 'use strict';
app.factory('modelService', [
'$q', '$cacheFactory', '$play', 'routerService', 'constantService', 'Segment',
function ($q, $cacheFactory, $play, router, constants, Segment) {
///////////////////////////////// Model: common base class and utils
function resData(res) {
return res.data;
}
function Model(init) {
this.init(init);
}
/* map of fields to to true (static, missingness is significant) or false (update when present) */
Model.prototype.fields = {
id: true,
permission: false,
};
Model.prototype.init = function (init) {
var fields = this.fields;
for (var f in fields) {
if (f in init)
this[f] = init[f];
else if (fields[f])
delete this[f];
}
};
Model.prototype.update = function (init) {
if (typeof init !== 'object')
return this;
if (this.hasOwnProperty('id') && init.id !== this.id)
throw new Error("update id mismatch");
this.init(init);
return this;
};
Model.prototype.clear = function (/*f...*/) {
for (var i = 0; i < arguments.length; i ++)
if (arguments[i] in this)
delete this[arguments[i]];
};
function hasField(obj, opt) {
return obj && opt in obj && (!obj[opt] || typeof obj[opt] !== 'object' || !obj[opt]._PLACEHOLDER);
}
/* determine whether the given object satisfies all the given dependency options already.
* returns the missing options, or null if nothing is missing. */
function checkOptions(obj, options) {
var opts = {};
var need = obj ? null : opts;
if (Array.isArray(options)) {
for (var i = 0; i < options.length; i ++)
if (!hasField(obj, options[i])) {
opts[options[i]] = '';
need = opts;
}
}
else if (!obj)
return options || opts;
else if (options)
_.each(options, function(v, o){
if (v || !hasField(obj, o)) {
opts[o] = v;
need = opts;
}
});
return need;
}
function modelCache(obj, name, size) {
obj.prototype = Object.create(Model.prototype);
obj.prototype.constructor = obj;
obj.prototype.class = name;
var opts = {};
if (size)
opts.number = size;
obj.cache = $cacheFactory(name, opts);
obj.clear = function (/*id...*/) {
if (arguments.length)
for (var i = 0; i < arguments.length; i ++)
obj.cache.remove(arguments[i]);
else
obj.cache.removeAll();
};
obj.poke = function (x) {
return obj.cache.put(x.id, x);
};
}
/* delegate the given (missing) fields on instances of obj to the sub-object sub,
* but allow assignments to work directly as usual. */
function delegate(obj, sub /*, field... */) {
function descr(f) {
return {
get: function () {
var s = this[sub];
return s && s.hasOwnProperty(f) ? s[f] : undefined;
},
set: function (v) {
Object.defineProperty(this, f, {
configurable: true,
enumerable: true,
writable: true,
value: v
});
}
};
}
for (var i = 2; i < arguments.length; i ++) {
var f = arguments[i];
Object.defineProperty(obj.prototype, f, descr(f));
}
}
///////////////////////////////// Party
function Party(init) {
Model.call(this, init);
}
modelCache(Party, 'party', 256);
Party.prototype.fields = {
id: true,
permission: false,
name: true,
sortname: true,
prename: true,
orcid: true,
affiliation: true,
email: true,
institution: true,
url: true,
authorization: false,
};
Party.prototype.init = function (init) {
Model.prototype.init.call(this, init);
if ('access' in init)
this.access = volumeMakeSubArray(init.access);
if ('volumes' in init)
this.volumes = volumeMakeArray(init.volumes);
if ('parents' in init)
this.parents = partyMakeSubArray(init.parents);
if ('children' in init)
this.children = partyMakeSubArray(init.children);
if ('comments' in init)
this.comments = commentMakeArray(null, init.comments);
};
function partyPeek(id) {
return id === Login.user.id && Login.user || Party.cache.get(id);
}
function partyMake(init) {
var p = partyPeek(init.id);
return p ? p.update(init) : Party.poke(new Party(init));
}
function partyMakeSubArray(l) {
for (var i = 0; i < l.length; i ++)
l[i].party = partyMake(l[i].party);
return l;
}
function partyMakeArray(l) {
if (l) for (var i = 0; i < l.length; i ++)
l[i] = partyMake(l[i]);
return l;
}
function partyGet(id, p, options) {
if ((options = checkOptions(p, options)))
return router.http(id == Login.user.id ? // may both be undefined (id may be string)
router.controllers.getProfile :
router.controllers.getParty,
id, options)
.then(function (res) {
return p ? p.update(res.data) : Party.poke(new Party(res.data));
});
else
return $q.successful(p);
}
Party.get = function (id, options) {
return partyGet(id, partyPeek(id), options);
};
Party.prototype.get = function (options) {
return partyGet(this.id, this, options);
};
Party.prototype.save = function (data) {
var p = this;
return router.http(router.controllers.postParty, this.id, data)
.then(function (res) {
return p.update(res.data);
});
};
Party.search = function (data) {
return router.http(router.controllers.getParties, data)
.then(function (res) {
return partyMakeArray(res.data);
});
};
Party.prototype.route = function () {
return router.party([this.id]);
};
Party.prototype.editRoute = function (page) {
var params = {};
if (page)
params.page = page;
return router.partyEdit([this.id], params);
};
Party.prototype.avatarRoute = function (size, nonce) {
var params = {};
if (nonce)
params.nonce = nonce;
return router.partyAvatar([this.id, size || 56], params);
};
Party.prototype.authorizeSearch = function (apply, param) {
param.authorize = this.id;
return Party.search(param);
};
Party.prototype.authorizeApply = function (target, data) {
var p = this;
return router.http(router.controllers.postAuthorizeApply, this.id, target, data)
.then(function (res) {
p.clear('parents');
return p;
});
};
Party.prototype.authorizeNotFound = function (data) {
return router.http(router.controllers.postAuthorizeNotFound, this.id, data);
};
Party.prototype.authorizeSave = function (target, data) {
var p = this;
return router.http(router.controllers.postAuthorize, this.id, target, data)
.then(function (res) {
p.clear('children');
return res.data;
});
};
Party.prototype.authorizeRemove = function (target) {
var p = this;
return router.http(router.controllers.deleteAuthorize, this.id, target)
.then(function (res) {
p.clear('children');
return p;
});
};
///////////////////////////////// Login
function Login(init) {
Party.call(this, init);
}
Login.prototype = Object.create(Party.prototype);
Login.prototype.constructor = Login;
Login.prototype.fields = angular.extend({
csverf: false,
superuser: false,
}, Login.prototype.fields);
Login.user = new Login({id:constants.party.NOBODY});
function loginPoke(l) {
return (Login.user = Party.poke(new Login(l)));
}
loginPoke($play.user);
router.http.csverf = $play.user.csverf;
function loginRes(res) {
var l = res.data;
if (Login.user.id === l.id && Login.user.superuser === l.superuser)
return Login.user.update(l);
$cacheFactory.removeAll();
router.http.csverf = l.csverf;
return loginPoke(l);
}
Login.isLoggedIn = function () {
return Login.user.id !== constants.party.NOBODY;
};
Login.checkAuthorization = function (level) {
return Login.user.authorization >= level;
};
Model.prototype.checkPermission = function (level) {
return this.permission >= level || Login.user.superuser;
};
/* a little hacky, but to get people SUPER on themselves: */
Login.prototype.checkPermission = function (/*level*/) {
return this.id !== constants.party.NOBODY;
};
Login.isAuthorized = function () {
return Login.isLoggedIn() && Login.checkAuthorization(constants.permission.PUBLIC);
};
Login.prototype.route = function () {
return router.profile();
};
_.each({
get: 'getUser',
login: 'postLogin',
logout: 'postLogout',
// superuserOn: 'superuserOn',
// superuserOff: 'superuserOff'
}, function(api, f){
Login[f] = function (data) {
return router.http(router.controllers[api], data).then(loginRes);
};
});
Login.prototype.saveAccount = function (data) {
var p = this;
return router.http(router.controllers.postUser, data)
.then(function (res) {
return p.update(res.data);
});
};
Login.register = function (data) {
return router.http(router.controllers.postRegister, data);
};
Login.issuePassword = function (data) {
return router.http(router.controllers.postPasswordReset, data);
};
Login.getToken = function (token, auth) {
return router.http(router.controllers.getLoginToken, token, auth)
.then(resData);
};
Login.passwordToken = function (party, data) {
return router.http(router.controllers.postPasswordToken, party, data)
.then(loginRes);
};
///////////////////////////////// Volume
function Volume(init) {
this.containers = {_PLACEHOLDER:true};
this.records = {_PLACEHOLDER:true};
this.assets = {}; // cache only
Model.call(this, init);
}
modelCache(Volume, 'volume', 8);
Volume.prototype.fields = {
id: true,
permission: false,
name: true,
alias: true,
body: true,
doi: true,
creation: true,
owners: true,
citation: false,
links: false,
funding: false,
tags: false,
// consumers: false,
// producers: false,
};
Volume.prototype.init = function (init) {
Model.prototype.init.call(this, init);
if ('access' in init) {
this.access = partyMakeSubArray(init.access);
volumeAccessPreset(this);
}
if ('records' in init) {
var rl = init.records;
for (var ri = 0; ri < rl.length; ri ++)
recordMake(this, rl[ri]);
delete this.records._PLACEHOLDER;
}
if ('containers' in init) {
var cl = init.containers;
for (var ci = 0; ci < cl.length; ci ++)
containerMake(this, cl[ci]);
delete this.containers._PLACEHOLDER;
}
if ('top' in init)
this.top = containerMake(this, init.top);
if ('excerpts' in init)
this.excerpts = assetMakeArray(this, init.excerpts);
if ('comments' in init)
this.comments = commentMakeArray(this, init.comments);
};
function volumeMake(init) {
var v = Volume.cache.get(init.id);
return v ? v.update(init) : Volume.poke(new Volume(init));
}
function volumeMakeArray(l) {
for (var i = 0; i < l.length; i ++)
l[i] = volumeMake(l[i]);
return l;
}
function volumeMakeSubArray(l) {
for (var i = 0; i < l.length; i ++)
l[i].volume = volumeMake(l[i].volume);
return l;
}
function volumeGet(id, v, options) {
if ((options = checkOptions(v, options)))
return router.http(router.controllers.getVolume,
id, options).then(function (res) {
return v ? v.update(res.data) : Volume.poke(new Volume(res.data));
});
else
return $q.successful(v);
}
Volume.get = function (id, options) {
return volumeGet(id, Volume.cache.get(id), options);
};
Volume.prototype.get = function (options) {
return volumeGet(this.id, this, options);
};
Volume.prototype.save = function (data) {
var v = this;
return router.http(router.controllers.postVolume, this.id, data)
.then(function (res) {
return v.update(res.data);
});
};
Volume.prototype.saveLinks = function (data) {
var v = this;
return router.http(router.controllers.postVolumeLinks, this.id, data)
.then(function (res) {
v.clear('links');
return v.update(res.data);
});
};
Volume.create = function (data, owner) {
if (owner !== undefined)
data.owner = owner;
return router.http(router.controllers.createVolume, data)
.then(function (res) {
if ((owner = (owner === undefined ? Login.user : partyPeek(owner))))
owner.clear('access', 'volumes');
return volumeMake(res.data);
});
};
Volume.search = function (data) {
return router.http(router.controllers.getVolumes, data)
.then(function (res) {
return res.data.map(volumeMake);
});
};
Object.defineProperty(Volume.prototype, 'type', {
get: function () {
if ('citation' in this)
return this.citation ? 'study' : 'dataset';
}
});
Object.defineProperty(Volume.prototype, 'displayName', {
get: function () {
return this.alias !== undefined ? this.alias : this.name;
}
});
Volume.prototype.route = function () {
return router.volume([this.id]);
};
Volume.prototype.editRoute = function (page) {
var params = {};
if (page)
params.page = page;
return router.volumeEdit([this.id], params);
};
Volume.prototype.thumbRoute = function (size) {
return router.volumeThumb([this.id, size]);
};
Volume.prototype.zipRoute = function () {
return router.volumeZip([this.id]);
};
Volume.prototype.csvRoute = function () {
return router.volumeCSV([this.id]);
};
Volume.prototype.accessSearch = function (name) {
return Party.search({volume:this.id,query:name});
};
function volumeAccessPreset(volume) {
if (!volume.access)
return;
var p = [];
var al = volume.access.filter(function (a) {
var pi = constants.accessPreset.parties.indexOf(a.party.id);
if (pi >= 0)
p[pi] = a.children;
else
return true;
});
var pi = constants.accessPreset.findIndex(function (preset) {
return preset.every(function (s, i) {
return preset[i] === (p[i] || 0);
});
});
if (pi >= 0) {
volume.access = al;
volume.accessPreset = pi;
}
}
Volume.prototype.accessSave = function (target, data) {
var v = this;
return router.http(router.controllers.postVolumeAccess, this.id, target, data)
.then(function (res) {
// could update v.access with res.data
v.clear('access', 'accessPreset');
return v;
});
};
Volume.prototype.accessRemove = function (target) {
return this.accessSave(target, {"delete":true});
};
Volume.prototype.fundingSave = function (funder, data) {
var v = this;
return router.http(router.controllers.postVolumeFunding, this.id, funder, data)
.then(function (res) {
// res.data could replace/add v.funding[X]
v.clear('funding');
return v;
});
};
Volume.prototype.fundingRemove = function (funder) {
var v = this;
return router.http(router.controllers.deleteVolumeFunder, this.id, funder)
.then(function (res) {
// could just remove v.funding[X]
v.clear('funding');
return v.update(res.data);
});
};
///////////////////////////////// Container/Slot
// This does not handle cross-volume inclusions
function Slot(context, init) {
this.container =
context instanceof Container ? context :
containerPrepare(context, init.container);
if (init)
Model.call(this, init);
}
Slot.prototype = Object.create(Model.prototype);
Slot.prototype.constructor = Slot;
Slot.prototype.class = 'slot';
Slot.prototype.fields = {
release: true,
tags: false,
releases: false,
};
Slot.prototype.clear = function (/*f...*/) {
Model.prototype.clear.apply(this, arguments);
Model.prototype.clear.apply(this.container, arguments);
};
function slotInit(slot, init) {
if ('assets' in init) {
var al = init.assets;
slot.assets = {};
for (var ai = 0; ai < al.length; ai ++) {
var a = assetMake(slot.container, al[ai]);
slot.assets[a.id] = a;
}
}
if ('comments' in init)
slot.comments = commentMakeArray(slot.container, init.comments);
if ('records' in init) {
var rl = init.records;
for (var ri = 0; ri < rl.length; ri ++)
rl[ri].record = rl[ri].record ? recordMake(slot.volume, rl[ri].record) : slot.volume.records[rl[ri].id];
slot.records = rl;
}
if ('excerpts' in init)
slot.excerpts = assetMakeArray(slot.container, init.excerpts);
}
Slot.prototype.init = function (init) {
Model.prototype.init.call(this, init);
this.segment = new Segment(init.segment);
if ('container' in init)
this.container.update(init.container);
if ('volume' in init)
this.volume.update(init.volume);
slotInit(this, init);
};
delegate(Slot, 'container',
'id', 'volume', 'top', 'date', 'name');
delegate(Slot, 'volume',
'permission');
Object.defineProperty(Slot.prototype, 'displayName', {
get: function () {
return constants.message(this.container.top ? 'materials' : 'session') + (this.name ? ': ' + this.name : '');
}
});
Slot.prototype.asSlot = function () {
return this.segment.full ? this.container : angular.extend(new Slot(this.container), this);
};
function Container(volume, init) {
this.volume = volume;
volume.containers[init.id] = this;
Slot.call(this, this, init);
}
Container.prototype = Object.create(Slot.prototype);
Container.prototype.constructor = Container;
Container.prototype.fields = angular.extend({
id: false,
_PLACEHOLDER: true,
name: true,
top: true,
date: true,
}, Container.prototype.fields);
Container.prototype.init = function (init) {
Model.prototype.init.call(this, init);
if ('volume' in init)
this.volume.update(init.volume);
if ('container' in init)
this.update(init.container);
slotInit(this, init);
};
Object.defineProperty(Container.prototype, 'segment', {
get: function () {
return Segment.full;
}
});
Container.prototype.remove = function () {
var c = this;
return router.http(router.controllers.deleteContainer, this.id)
.then(function () {
delete c.volume.containers[c.id];
return true;
}, function (res) {
if (res.status == 409) {
c.update(res.data);
return false;
}
return $q.reject(res);
});
};
function containerMake(volume, init) {
var c = volume.containers[init.id];
if (c) {
if (!init._PLACEHOLDER)
c.update(init);
return c;
} else
return new Container(volume, init);
}
function containerPrepare(volume, init) {
if (typeof init == 'number')
init = {id:init,_PLACEHOLDER:true};
return containerMake(volume || volumeMake(init.volume), init);
}
Volume.prototype.getSlot = function (container, segment, options) {
return containerPrepare(this, parseInt(container, 10)).getSlot(segment, options);
};
Container.prototype.getSlot = function (segment, options) {
var c = this;
if (Segment.isFull(segment))
if ((options = checkOptions(this, options)) || this._PLACEHOLDER)
return router.http(router.controllers.getSlot,
this.id, Segment.format(segment), options)
.then(function (res) {
return c.update(res.data);
});
else return $q.successful(this);
else return router.http(router.controllers.getSlot,
this.id, Segment.format(segment), checkOptions(null, options))
.then(function (res) {
return new Slot(c, res.data);
});
};
Slot.prototype.save = function (data) {
var s = this;
if (data.release === 'undefined')
data.release = '';
return router.http(router.controllers.postContainer, this.container.id, this.segment.format(), data)
.then(function (res) {
if ('release' in data) {
s.clear('releases');
s.container.clear('releases');
}
return s.update(res.data);
});
};
Volume.prototype.createContainer = function (data) {
var v = this;
return router.http(router.controllers.createContainer, this.id, data)
.then(function (res) {
return new Container(v, res.data);
});
};
Slot.prototype.addRecord = function (r, seg) {
if (!seg)
seg = this.segment;
var s = this;
return router.http(router.controllers.postRecordSlot, this.container.id, seg.format(), r.id)
.then(function (res) {
if (res.data.measures) {
r.update(res.data);
return;
}
var d = res.data;
d.record = r;
if ('records' in s)
s.records.push(d);
if (s.container !== s && 'records' in s.container)
s.container.records.push(d);
return d;
});
};
Slot.prototype.newRecord = function (c) {
var s = this;
if (c && typeof c === 'object')
c = c.id;
return router.http(router.controllers.createRecord, this.volume.id, {category:c})
.then(function (res) {
var r = new Record(s.volume, res.data);
return s.addRecord(r);
});
};
Slot.prototype.removeRecord = Slot.prototype.moveRecord = function (r, src, dst) {
if (arguments.length < 3) {
dst = null;
if (src == null)
src = this.segment;
}
var s = this;
return router.http(router.controllers.postRecordSlot, this.container.id, Segment.format(dst), r.id, {src: Segment.data(src)})
.then(function (res) {
if (!res.data)
return;
if (res.data.measures) {
r.update(res.data);
return null;
}
var d = new Segment(res.data.segment);
if (s.records) {
var ss = Segment.make(src);
for (var ri = 0; ri < s.records.length; ri ++) {
if (s.records[ri].id === r.id && ss.contains(s.records[ri].segment)) {
if (d.empty)
s.records.splice(ri, 1);
else
s.records[ri].segment = d;
break;
}
}
}
return d;
});
};
Slot.prototype.route = function (params) {
return router.slot([this.volume.id, this.container.id, this.segment.format()], params);
};
Slot.prototype.editRoute = function (params) {
return router.slotEdit([this.volume.id, this.container.id, this.segment.format()], params);
};
Slot.prototype.zipRoute = function () {
return router.slotZip([this.volume.id, this.container.id]);
};
///////////////////////////////// Record
function Record(volume, init) {
this.volume = volume;
volume.records[init.id] = this;
Model.call(this, init);
}
Record.prototype = Object.create(Model.prototype);
Record.prototype.constructor = Record;
Record.prototype.class = 'record';
Record.prototype.fields = {
id: true,
category: true,
measures: true,
// slots: false,
};
Record.prototype.init = function (init) {
Model.prototype.init.call(this, init);
if ('volume' in init)
this.volume.update(init.volume);
};
delegate(Record, 'volume',
'permission');
function recordMake(volume, init) {
var r = volume.records[init.id];
return r ? r.update(init) : new Record(volume, init);
}
Volume.prototype.getRecord = function (record) {
if (record instanceof Record)
return $q.successful(record);
if (record in this.records)
return $q.successful(this.records[record]);
var v = this;
return router.http(router.controllers.getRecord, record)
.then(function (res) {
return new Record(v, res.data);
});
};
Volume.prototype.createRecord = function (c) {
var v = this;
return router.http(router.controllers.createRecord, this.id, {category: c})
.then(function (res) {
return new Record(v, res.data);
});
};
Record.prototype.remove = function () {
var r = this;
return router.http(router.controllers.deleteRecord, this.id)
.then(function () {
delete r.volume.records[r.id];
return true;
}, function (res) {
if (res.status == 409) {
r.update(res.data);
return false;
}
return $q.reject(res);
});
};
Record.prototype.measureSet = function (metric, value) {
var r = this;
return router.http(router.controllers.postRecordMeasure, this.id, metric, {datum:value})
.then(function (res) {
return r.update(res.data);
});
};
Object.defineProperty(Record.prototype, 'displayName', {
get: function () {
var cat = constants.category[this.category];
var idents = cat && cat.ident || [constants.metricName.ID.id];
var ident = [];
for (var i = 0; i < idents.length; i ++)
if (idents[i] in this.measures)
ident.push(this.measures[idents[i]]);
ident = ident.length && ident.join(', ');
cat = cat && cat.name;
if (cat && ident)
return cat + ' ' + ident;
return cat || ident || '[' + this.id + ']';
}
});
///////////////////////////////// AssetSlot
// This usually maps to an AssetSegment
function AssetSlot(context, init) {
this.asset =
context instanceof Asset ? context :
new assetMake(context, init.asset);
Model.call(this, init);
}
AssetSlot.prototype = Object.create(Slot.prototype);
AssetSlot.prototype.constructor = AssetSlot;
AssetSlot.prototype.class = 'asset-slot';
AssetSlot.prototype.fields = angular.extend({
permission: true,
excerpt: true,
context: true
}, AssetSlot.prototype.fields);
AssetSlot.prototype.init = function (init) {
Model.prototype.init.call(this, init);
this.asset.update(init.asset);
this.segment = new Segment(init.segment);
if ('format' in init)
this.format = constants.format[init.format];
};
delegate(AssetSlot, 'asset',
'id', 'container', 'format', 'duration', 'classification', 'name', 'pending');
Object.defineProperty(AssetSlot.prototype, 'release', {
get: function () {
return Math.max(this.excerpt != null ? this.excerpt : 0, this.classification != null ? this.classification : (this.container.release || 0));
}
});
Object.defineProperty(AssetSlot.prototype, 'displayName', {
get: function () {
return this.name || this.format.name;
}
});
AssetSlot.prototype.route = function () {
return router.slotAsset([this.volume.id, this.container.id, this.segment.format(), this.id]);
};
AssetSlot.prototype.slotRoute = function () {
var params = {};
params.asset = this.id;
params.select = this.segment.format();
return this.container.route(params);
};
AssetSlot.prototype.inContext = function () {
return 'context' in this ?
angular.extend(Object.create(AssetSlot.prototype), this, {segment:Segment.make(this.context)}) :
this.asset;
};
Object.defineProperty(AssetSlot.prototype, 'icon', {
get: function () {
return '/web/images/filetype/16px/' + this.format.extension + '.svg';
}
});
AssetSlot.prototype.inSegment = function (segment) {
segment = this.segment.intersect(segment);
if (segment.equals(this.segment))
return this;
return new AssetSlot(this.asset, {permission:this.permission, segment:segment});
};
AssetSlot.prototype.setExcerpt = function (release) {
var a = this;
return router.http(release != null ? router.controllers.postExcerpt : router.controllers.deleteExcerpt, this.container.id, this.segment.format(), this.id, {release:release})
.then(function (res) {
a.clear('excerpts');
a.volume.clear('excerpts');
return a.update(res.data);
});
};
AssetSlot.prototype.thumbRoute = function (size) {
return router.assetThumb([this.container.id, this.segment.format(), this.id, size]);
};
AssetSlot.prototype.downloadRoute = function (inline) {
return router.assetDownload([this.container.id, this.segment.format(), this.id, inline]);
};
///////////////////////////////// Asset
// This usually maps to a SlotAsset, but may be an unlinked Asset
function Asset(context, init) {
if (init.container || context instanceof Container)
Slot.call(this, context, init);
else {
this.volume = context;
Model.call(this, init);
}
this.asset = this;
this.volume.assets[init.id] = this;
}
Asset.prototype = Object.create(AssetSlot.prototype);
Asset.prototype.constructor = Asset;
Asset.prototype.class = 'asset';
Asset.prototype.fields = angular.extend({
id: true,
classification: true,
name: true,
duration: true,
pending: true,
creation: false,
size: false,
}, Asset.prototype.fields);
Asset.prototype.init = function (init) {
if (!this.container && 'container' in init)
this.container = containerPrepare(this.volume, init.container);
Slot.prototype.init.call(this, init);
if ('format' in init)
this.format = constants.format[init.format];
if ('revisions' in init)
this.revisions = assetMakeArray(this.volume, init.revisions);
};
function assetMake(context, init) {
var v = context.volume || context;
if (typeof init === 'number')
return v.assets[init];
if ('id' in init) {
var a = v.assets[init.id];
return a ? a.update(init) : new Asset(context, init);
} else
return new AssetSlot(context, init);
}
function assetMakeArray(context, l) {
if (l) for (var i = 0; i < l.length; i ++)
l[i] = assetMake(context, l[i]);
return l;
}
Volume.prototype.getAsset = function (asset, container, segment, options) {
var v = this;
options = checkOptions(null, options);
return (container === undefined ?
router.http(router.controllers.getAsset, v.id, asset, options) :
router.http(router.controllers.getAssetSegment, v.id, container, Segment.format(segment), asset, options))
.then(function (res) {
return assetMake(v, res.data);
});
};
Asset.prototype.get = function (options) {
var a = this;
if ((options = checkOptions(a, options)))
return router.http(router.controllers.getAsset, a.id, options)
.then(function (res) {
return a.update(res.data);
});
else
return $q.successful(a);
};
Asset.prototype.save = function (data) {
var a = this;
if (data.classification === 'undefined')
data.classification = '';
return router.http(router.controllers.postAsset, this.id, data)
.then(function (res) {
if ('excerpt' in data) {
a.clear('excerpts');
a.volume.clear('excerpts');
}
return a.update(res.data);
});
};
Asset.prototype.link = function (slot, data) {
if (!data)
data = {};
data.container = slot.container.id;
data.position = slot.segment.l;
return this.save(data);
};
Slot.prototype.createAsset = function (data) {
var s = this;
if (!data)
data = {};
data.container = this.container.id;
if (!('position' in data) && isFinite(this.segment.l))
data.position = this.segment.l;
return router.http(router.controllers.createAsset, this.volume.id, data)
.then(function (res) {
s.clear('assets');
return assetMake(s.container, res.data);
});
};
Asset.prototype.replace = function (data) {
var a = this;
return router.http(router.controllers.postAsset, this.id, data)
.then(function (res) {
if (a.container)
a.container.clear('assets');
return assetMake(a.container || a.volume, res.data);
});
};
Asset.prototype.remove = function () {
var a = this;
return router.http(router.controllers.deleteAsset, this.id)
.then(function (res) {
if (a.container)
a.container.clear('assets');
return a.update(res.data);
});
};
///////////////////////////////// Comment
function Comment(context, init) {
Slot.call(this, context, init);
}
Comment.prototype = Object.create(Slot.prototype);
Comment.prototype.constructor = Comment;
Comment.prototype.class = 'comment';
Comment.prototype.fields = angular.extend({
id: true,
time: true,
text: true,
parents: true
}, Comment.prototype.fields);
Comment.prototype.init = function (init) {
Slot.prototype.init.call(this, init);
if ('who' in init)
this.who = partyMake(init.who);
};
function commentMakeArray(context, l) {
if (l) for (var i = 0; i < l.length; i ++)
l[i] = new Comment(context, l[i]);
return l;
}
Slot.prototype.postComment = function (data, segment, reply) {
if (segment === undefined)
segment = this.segment;
if (arguments.length < 3 && this instanceof Comment)
reply = this.id;
var s = this;
if (reply != null)
data.parent = reply;
return router.http(router.controllers.postComment, this.container.id, segment.format(), data)
.then(function (res) {
s.volume.clear('comments');
s.clear('comments');
return new Comment(s.container, res.data);
});
};
///////////////////////////////// Tag
// no point in a model, really
var Tag = {};
Tag.search = function (query) {
return router.http(router.controllers.getTags, query)
.then(function(res) {
return res.data;
});
};
Tag.top = function () {
return router.http(router.controllers.getTopTags)
.then(function(res) {
return res.data;
});
};
Slot.prototype.setTag = function (tag, vote, keyword, segment) {
if (segment === undefined)
segment = this.segment;
var s = this;
return router.http(router.controllers[vote ? (keyword ? "postKeyword" : "postTag") : (keyword ? "deleteKeyword" : "deleteTag")], this.container.id, segment.format(), tag)
.then(function (res) {
var tag = res.data;
s.volume.clear('tags');
if ('tags' in s)
s.tags[tag.id] = tag;
return tag;
});
};
/////////////////////////////////
return {
Party: Party,
Login: Login,
Volume: Volume,
Container: Container,
Slot: Slot,
Record: Record,
Asset: Asset,
AssetSlot: AssetSlot,
Comment: Comment,
Tag: Tag,
funder: function (query, all) {
return router.http(router.controllers.getFunders, {query:query,all:all})
.then(resData);
},
cite: function (url) {
return router.http(router.controllers.getCitation, {url:url})
.then(resData);
},
analytic: function () {
return router.http(router.controllers.get, {}, {cache:false});
},
activity: function () {
return router.http(router.controllers.getActivity)
.then(function (res) {
for (var i = 0; i < res.data.length; i ++) {
if ('volume' in res.data[i])
res.data[i].volume = volumeMake(res.data[i].volume);
if ('party' in res.data[i])
res.data[i].party = partyMake(res.data[i].party);
}
return res.data;
});
}
};
}
]);
| web/service/model.js | 'use strict';
app.factory('modelService', [
'$q', '$cacheFactory', '$play', 'routerService', 'constantService', 'Segment',
function ($q, $cacheFactory, $play, router, constants, Segment) {
///////////////////////////////// Model: common base class and utils
function resData(res) {
return res.data;
}
function Model(init) {
this.init(init);
}
/* map of fields to to true (static, missingness is significant) or false (update when present) */
Model.prototype.fields = {
id: true,
permission: false,
};
Model.prototype.init = function (init) {
var fields = this.fields;
for (var f in fields) {
if (f in init)
this[f] = init[f];
else if (fields[f])
delete this[f];
}
};
Model.prototype.update = function (init) {
if (typeof init !== 'object')
return this;
if (this.hasOwnProperty('id') && init.id !== this.id)
throw new Error("update id mismatch");
this.init(init);
return this;
};
Model.prototype.clear = function (/*f...*/) {
for (var i = 0; i < arguments.length; i ++)
if (arguments[i] in this)
delete this[arguments[i]];
};
function hasField(obj, opt) {
return obj && opt in obj && (!obj[opt] || typeof obj[opt] !== 'object' || !obj[opt]._PLACEHOLDER);
}
/* determine whether the given object satisfies all the given dependency options already.
* returns the missing options, or null if nothing is missing. */
function checkOptions(obj, options) {
var opts = {};
var need = obj ? null : opts;
if (Array.isArray(options)) {
for (var i = 0; i < options.length; i ++)
if (!hasField(obj, options[i])) {
opts[options[i]] = '';
need = opts;
}
}
else if (!obj)
return options || opts;
else if (options)
_.each(options, function(v, o){
if (v || !hasField(obj, o)) {
opts[o] = v;
need = opts;
}
});
return need;
}
function modelCache(obj, name, size) {
obj.prototype = Object.create(Model.prototype);
obj.prototype.constructor = obj;
obj.prototype.class = name;
var opts = {};
if (size)
opts.number = size;
obj.cache = $cacheFactory(name, opts);
obj.clear = function (/*id...*/) {
if (arguments.length)
for (var i = 0; i < arguments.length; i ++)
obj.cache.remove(arguments[i]);
else
obj.cache.removeAll();
};
obj.poke = function (x) {
return obj.cache.put(x.id, x);
};
}
/* delegate the given (missing) fields on instances of obj to the sub-object sub,
* but allow assignments to work directly as usual. */
function delegate(obj, sub /*, field... */) {
function descr(f) {
return {
get: function () {
var s = this[sub];
return s && s.hasOwnProperty(f) ? s[f] : undefined;
},
set: function (v) {
Object.defineProperty(this, f, {
configurable: true,
enumerable: true,
writable: true,
value: v
});
}
};
}
for (var i = 2; i < arguments.length; i ++) {
var f = arguments[i];
Object.defineProperty(obj.prototype, f, descr(f));
}
}
///////////////////////////////// Party
function Party(init) {
Model.call(this, init);
}
modelCache(Party, 'party', 256);
Party.prototype.fields = {
id: true,
permission: false,
name: true,
sortname: true,
prename: true,
orcid: true,
affiliation: true,
email: true,
institution: true,
url: true,
authorization: false,
};
Party.prototype.init = function (init) {
Model.prototype.init.call(this, init);
if ('access' in init)
this.access = volumeMakeSubArray(init.access);
if ('volumes' in init)
this.volumes = volumeMakeArray(init.volumes);
if ('parents' in init)
this.parents = partyMakeSubArray(init.parents);
if ('children' in init)
this.children = partyMakeSubArray(init.children);
if ('comments' in init)
this.comments = commentMakeArray(null, init.comments);
};
function partyPeek(id) {
return id === Login.user.id && Login.user || Party.cache.get(id);
}
function partyMake(init) {
var p = partyPeek(init.id);
return p ? p.update(init) : Party.poke(new Party(init));
}
function partyMakeSubArray(l) {
for (var i = 0; i < l.length; i ++)
l[i].party = partyMake(l[i].party);
return l;
}
function partyMakeArray(l) {
if (l) for (var i = 0; i < l.length; i ++)
l[i] = partyMake(l[i]);
return l;
}
function partyGet(id, p, options) {
if ((options = checkOptions(p, options)))
return router.http(id == Login.user.id ? // may both be undefined (id may be string)
router.controllers.getProfile :
router.controllers.getParty,
id, options)
.then(function (res) {
return p ? p.update(res.data) : Party.poke(new Party(res.data));
});
else
return $q.successful(p);
}
Party.get = function (id, options) {
return partyGet(id, partyPeek(id), options);
};
Party.prototype.get = function (options) {
return partyGet(this.id, this, options);
};
Party.prototype.save = function (data) {
var p = this;
return router.http(router.controllers.postParty, this.id, data)
.then(function (res) {
return p.update(res.data);
});
};
Party.search = function (data) {
return router.http(router.controllers.getParties, data)
.then(function (res) {
return partyMakeArray(res.data);
});
};
Party.prototype.route = function () {
return router.party([this.id]);
};
Object.defineProperty(Party.prototype, 'lastName', {
get: function () {
return this.name.substr(this.name.lastIndexOf(' ')+1);
}
});
Party.prototype.editRoute = function (page) {
var params = {};
if (page)
params.page = page;
return router.partyEdit([this.id], params);
};
Party.prototype.avatarRoute = function (size, nonce) {
var params = {};
if (nonce)
params.nonce = nonce;
return router.partyAvatar([this.id, size || 56], params);
};
Party.prototype.authorizeSearch = function (apply, param) {
param.authorize = this.id;
return Party.search(param);
};
Party.prototype.authorizeApply = function (target, data) {
var p = this;
return router.http(router.controllers.postAuthorizeApply, this.id, target, data)
.then(function (res) {
p.clear('parents');
return p;
});
};
Party.prototype.authorizeNotFound = function (data) {
return router.http(router.controllers.postAuthorizeNotFound, this.id, data);
};
Party.prototype.authorizeSave = function (target, data) {
var p = this;
return router.http(router.controllers.postAuthorize, this.id, target, data)
.then(function (res) {
p.clear('children');
return res.data;
});
};
Party.prototype.authorizeRemove = function (target) {
var p = this;
return router.http(router.controllers.deleteAuthorize, this.id, target)
.then(function (res) {
p.clear('children');
return p;
});
};
///////////////////////////////// Login
function Login(init) {
Party.call(this, init);
}
Login.prototype = Object.create(Party.prototype);
Login.prototype.constructor = Login;
Login.prototype.fields = angular.extend({
csverf: false,
superuser: false,
}, Login.prototype.fields);
Login.user = new Login({id:constants.party.NOBODY});
function loginPoke(l) {
return (Login.user = Party.poke(new Login(l)));
}
loginPoke($play.user);
router.http.csverf = $play.user.csverf;
function loginRes(res) {
var l = res.data;
if (Login.user.id === l.id && Login.user.superuser === l.superuser)
return Login.user.update(l);
$cacheFactory.removeAll();
router.http.csverf = l.csverf;
return loginPoke(l);
}
Login.isLoggedIn = function () {
return Login.user.id !== constants.party.NOBODY;
};
Login.checkAuthorization = function (level) {
return Login.user.authorization >= level;
};
Model.prototype.checkPermission = function (level) {
return this.permission >= level || Login.user.superuser;
};
/* a little hacky, but to get people SUPER on themselves: */
Login.prototype.checkPermission = function (/*level*/) {
return this.id !== constants.party.NOBODY;
};
Login.isAuthorized = function () {
return Login.isLoggedIn() && Login.checkAuthorization(constants.permission.PUBLIC);
};
Login.prototype.route = function () {
return router.profile();
};
_.each({
get: 'getUser',
login: 'postLogin',
logout: 'postLogout',
// superuserOn: 'superuserOn',
// superuserOff: 'superuserOff'
}, function(api, f){
Login[f] = function (data) {
return router.http(router.controllers[api], data).then(loginRes);
};
});
Login.prototype.saveAccount = function (data) {
var p = this;
return router.http(router.controllers.postUser, data)
.then(function (res) {
return p.update(res.data);
});
};
Login.register = function (data) {
return router.http(router.controllers.postRegister, data);
};
Login.issuePassword = function (data) {
return router.http(router.controllers.postPasswordReset, data);
};
Login.getToken = function (token, auth) {
return router.http(router.controllers.getLoginToken, token, auth)
.then(resData);
};
Login.passwordToken = function (party, data) {
return router.http(router.controllers.postPasswordToken, party, data)
.then(loginRes);
};
///////////////////////////////// Volume
function Volume(init) {
this.containers = {_PLACEHOLDER:true};
this.records = {_PLACEHOLDER:true};
this.assets = {}; // cache only
Model.call(this, init);
}
modelCache(Volume, 'volume', 8);
Volume.prototype.fields = {
id: true,
permission: false,
name: true,
alias: true,
body: true,
doi: true,
creation: true,
owners: true,
citation: false,
links: false,
funding: false,
tags: false,
// consumers: false,
// producers: false,
};
Volume.prototype.init = function (init) {
Model.prototype.init.call(this, init);
if ('access' in init) {
this.access = partyMakeSubArray(init.access);
volumeAccessPreset(this);
}
if ('records' in init) {
var rl = init.records;
for (var ri = 0; ri < rl.length; ri ++)
recordMake(this, rl[ri]);
delete this.records._PLACEHOLDER;
}
if ('containers' in init) {
var cl = init.containers;
for (var ci = 0; ci < cl.length; ci ++)
containerMake(this, cl[ci]);
delete this.containers._PLACEHOLDER;
}
if ('top' in init)
this.top = containerMake(this, init.top);
if ('excerpts' in init)
this.excerpts = assetMakeArray(this, init.excerpts);
if ('comments' in init)
this.comments = commentMakeArray(this, init.comments);
};
function volumeMake(init) {
var v = Volume.cache.get(init.id);
return v ? v.update(init) : Volume.poke(new Volume(init));
}
function volumeMakeArray(l) {
for (var i = 0; i < l.length; i ++)
l[i] = volumeMake(l[i]);
return l;
}
function volumeMakeSubArray(l) {
for (var i = 0; i < l.length; i ++)
l[i].volume = volumeMake(l[i].volume);
return l;
}
function volumeGet(id, v, options) {
if ((options = checkOptions(v, options)))
return router.http(router.controllers.getVolume,
id, options).then(function (res) {
return v ? v.update(res.data) : Volume.poke(new Volume(res.data));
});
else
return $q.successful(v);
}
Volume.get = function (id, options) {
return volumeGet(id, Volume.cache.get(id), options);
};
Volume.prototype.get = function (options) {
return volumeGet(this.id, this, options);
};
Volume.prototype.save = function (data) {
var v = this;
return router.http(router.controllers.postVolume, this.id, data)
.then(function (res) {
return v.update(res.data);
});
};
Volume.prototype.saveLinks = function (data) {
var v = this;
return router.http(router.controllers.postVolumeLinks, this.id, data)
.then(function (res) {
v.clear('links');
return v.update(res.data);
});
};
Volume.create = function (data, owner) {
if (owner !== undefined)
data.owner = owner;
return router.http(router.controllers.createVolume, data)
.then(function (res) {
if ((owner = (owner === undefined ? Login.user : partyPeek(owner))))
owner.clear('access', 'volumes');
return volumeMake(res.data);
});
};
Volume.search = function (data) {
return router.http(router.controllers.getVolumes, data)
.then(function (res) {
return res.data.map(volumeMake);
});
};
Object.defineProperty(Volume.prototype, 'type', {
get: function () {
if ('citation' in this)
return this.citation ? 'study' : 'dataset';
}
});
Object.defineProperty(Volume.prototype, 'displayName', {
get: function () {
return this.alias !== undefined ? this.alias : this.name;
}
});
Volume.prototype.route = function () {
return router.volume([this.id]);
};
Volume.prototype.editRoute = function (page) {
var params = {};
if (page)
params.page = page;
return router.volumeEdit([this.id], params);
};
Volume.prototype.thumbRoute = function (size) {
return router.volumeThumb([this.id, size]);
};
Volume.prototype.zipRoute = function () {
return router.volumeZip([this.id]);
};
Volume.prototype.csvRoute = function () {
return router.volumeCSV([this.id]);
};
Volume.prototype.accessSearch = function (name) {
return Party.search({volume:this.id,query:name});
};
function volumeAccessPreset(volume) {
if (!volume.access)
return;
var p = [];
var al = volume.access.filter(function (a) {
var pi = constants.accessPreset.parties.indexOf(a.party.id);
if (pi >= 0)
p[pi] = a.children;
else
return true;
});
var pi = constants.accessPreset.findIndex(function (preset) {
return preset.every(function (s, i) {
return preset[i] === (p[i] || 0);
});
});
if (pi >= 0) {
volume.access = al;
volume.accessPreset = pi;
}
}
Volume.prototype.accessSave = function (target, data) {
var v = this;
return router.http(router.controllers.postVolumeAccess, this.id, target, data)
.then(function (res) {
// could update v.access with res.data
v.clear('access', 'accessPreset');
return v;
});
};
Volume.prototype.accessRemove = function (target) {
return this.accessSave(target, {"delete":true});
};
Volume.prototype.fundingSave = function (funder, data) {
var v = this;
return router.http(router.controllers.postVolumeFunding, this.id, funder, data)
.then(function (res) {
// res.data could replace/add v.funding[X]
v.clear('funding');
return v;
});
};
Volume.prototype.fundingRemove = function (funder) {
var v = this;
return router.http(router.controllers.deleteVolumeFunder, this.id, funder)
.then(function (res) {
// could just remove v.funding[X]
v.clear('funding');
return v.update(res.data);
});
};
///////////////////////////////// Container/Slot
// This does not handle cross-volume inclusions
function Slot(context, init) {
this.container =
context instanceof Container ? context :
containerPrepare(context, init.container);
if (init)
Model.call(this, init);
}
Slot.prototype = Object.create(Model.prototype);
Slot.prototype.constructor = Slot;
Slot.prototype.class = 'slot';
Slot.prototype.fields = {
release: true,
tags: false,
releases: false,
};
Slot.prototype.clear = function (/*f...*/) {
Model.prototype.clear.apply(this, arguments);
Model.prototype.clear.apply(this.container, arguments);
};
function slotInit(slot, init) {
if ('assets' in init) {
var al = init.assets;
slot.assets = {};
for (var ai = 0; ai < al.length; ai ++) {
var a = assetMake(slot.container, al[ai]);
slot.assets[a.id] = a;
}
}
if ('comments' in init)
slot.comments = commentMakeArray(slot.container, init.comments);
if ('records' in init) {
var rl = init.records;
for (var ri = 0; ri < rl.length; ri ++)
rl[ri].record = rl[ri].record ? recordMake(slot.volume, rl[ri].record) : slot.volume.records[rl[ri].id];
slot.records = rl;
}
if ('excerpts' in init)
slot.excerpts = assetMakeArray(slot.container, init.excerpts);
}
Slot.prototype.init = function (init) {
Model.prototype.init.call(this, init);
this.segment = new Segment(init.segment);
if ('container' in init)
this.container.update(init.container);
if ('volume' in init)
this.volume.update(init.volume);
slotInit(this, init);
};
delegate(Slot, 'container',
'id', 'volume', 'top', 'date', 'name');
delegate(Slot, 'volume',
'permission');
Object.defineProperty(Slot.prototype, 'displayName', {
get: function () {
return constants.message(this.container.top ? 'materials' : 'session') + (this.name ? ': ' + this.name : '');
}
});
Slot.prototype.asSlot = function () {
return this.segment.full ? this.container : angular.extend(new Slot(this.container), this);
};
function Container(volume, init) {
this.volume = volume;
volume.containers[init.id] = this;
Slot.call(this, this, init);
}
Container.prototype = Object.create(Slot.prototype);
Container.prototype.constructor = Container;
Container.prototype.fields = angular.extend({
id: false,
_PLACEHOLDER: true,
name: true,
top: true,
date: true,
}, Container.prototype.fields);
Container.prototype.init = function (init) {
Model.prototype.init.call(this, init);
if ('volume' in init)
this.volume.update(init.volume);
if ('container' in init)
this.update(init.container);
slotInit(this, init);
};
Object.defineProperty(Container.prototype, 'segment', {
get: function () {
return Segment.full;
}
});
Container.prototype.remove = function () {
var c = this;
return router.http(router.controllers.deleteContainer, this.id)
.then(function () {
delete c.volume.containers[c.id];
return true;
}, function (res) {
if (res.status == 409) {
c.update(res.data);
return false;
}
return $q.reject(res);
});
};
function containerMake(volume, init) {
var c = volume.containers[init.id];
if (c) {
if (!init._PLACEHOLDER)
c.update(init);
return c;
} else
return new Container(volume, init);
}
function containerPrepare(volume, init) {
if (typeof init == 'number')
init = {id:init,_PLACEHOLDER:true};
return containerMake(volume || volumeMake(init.volume), init);
}
Volume.prototype.getSlot = function (container, segment, options) {
return containerPrepare(this, parseInt(container, 10)).getSlot(segment, options);
};
Container.prototype.getSlot = function (segment, options) {
var c = this;
if (Segment.isFull(segment))
if ((options = checkOptions(this, options)) || this._PLACEHOLDER)
return router.http(router.controllers.getSlot,
this.id, Segment.format(segment), options)
.then(function (res) {
return c.update(res.data);
});
else return $q.successful(this);
else return router.http(router.controllers.getSlot,
this.id, Segment.format(segment), checkOptions(null, options))
.then(function (res) {
return new Slot(c, res.data);
});
};
Slot.prototype.save = function (data) {
var s = this;
if (data.release === 'undefined')
data.release = '';
return router.http(router.controllers.postContainer, this.container.id, this.segment.format(), data)
.then(function (res) {
if ('release' in data) {
s.clear('releases');
s.container.clear('releases');
}
return s.update(res.data);
});
};
Volume.prototype.createContainer = function (data) {
var v = this;
return router.http(router.controllers.createContainer, this.id, data)
.then(function (res) {
return new Container(v, res.data);
});
};
Slot.prototype.addRecord = function (r, seg) {
if (!seg)
seg = this.segment;
var s = this;
return router.http(router.controllers.postRecordSlot, this.container.id, seg.format(), r.id)
.then(function (res) {
if (res.data.measures) {
r.update(res.data);
return;
}
var d = res.data;
d.record = r;
if ('records' in s)
s.records.push(d);
if (s.container !== s && 'records' in s.container)
s.container.records.push(d);
return d;
});
};
Slot.prototype.newRecord = function (c) {
var s = this;
if (c && typeof c === 'object')
c = c.id;
return router.http(router.controllers.createRecord, this.volume.id, {category:c})
.then(function (res) {
var r = new Record(s.volume, res.data);
return s.addRecord(r);
});
};
Slot.prototype.removeRecord = Slot.prototype.moveRecord = function (r, src, dst) {
if (arguments.length < 3) {
dst = null;
if (src == null)
src = this.segment;
}
var s = this;
return router.http(router.controllers.postRecordSlot, this.container.id, Segment.format(dst), r.id, {src: Segment.data(src)})
.then(function (res) {
if (!res.data)
return;
if (res.data.measures) {
r.update(res.data);
return null;
}
var d = new Segment(res.data.segment);
if (s.records) {
var ss = Segment.make(src);
for (var ri = 0; ri < s.records.length; ri ++) {
if (s.records[ri].id === r.id && ss.contains(s.records[ri].segment)) {
if (d.empty)
s.records.splice(ri, 1);
else
s.records[ri].segment = d;
break;
}
}
}
return d;
});
};
Slot.prototype.route = function (params) {
return router.slot([this.volume.id, this.container.id, this.segment.format()], params);
};
Slot.prototype.editRoute = function (params) {
return router.slotEdit([this.volume.id, this.container.id, this.segment.format()], params);
};
Slot.prototype.zipRoute = function () {
return router.slotZip([this.volume.id, this.container.id]);
};
///////////////////////////////// Record
function Record(volume, init) {
this.volume = volume;
volume.records[init.id] = this;
Model.call(this, init);
}
Record.prototype = Object.create(Model.prototype);
Record.prototype.constructor = Record;
Record.prototype.class = 'record';
Record.prototype.fields = {
id: true,
category: true,
measures: true,
// slots: false,
};
Record.prototype.init = function (init) {
Model.prototype.init.call(this, init);
if ('volume' in init)
this.volume.update(init.volume);
};
delegate(Record, 'volume',
'permission');
function recordMake(volume, init) {
var r = volume.records[init.id];
return r ? r.update(init) : new Record(volume, init);
}
Volume.prototype.getRecord = function (record) {
if (record instanceof Record)
return $q.successful(record);
if (record in this.records)
return $q.successful(this.records[record]);
var v = this;
return router.http(router.controllers.getRecord, record)
.then(function (res) {
return new Record(v, res.data);
});
};
Volume.prototype.createRecord = function (c) {
var v = this;
return router.http(router.controllers.createRecord, this.id, {category: c})
.then(function (res) {
return new Record(v, res.data);
});
};
Record.prototype.remove = function () {
var r = this;
return router.http(router.controllers.deleteRecord, this.id)
.then(function () {
delete r.volume.records[r.id];
return true;
}, function (res) {
if (res.status == 409) {
r.update(res.data);
return false;
}
return $q.reject(res);
});
};
Record.prototype.measureSet = function (metric, value) {
var r = this;
return router.http(router.controllers.postRecordMeasure, this.id, metric, {datum:value})
.then(function (res) {
return r.update(res.data);
});
};
Object.defineProperty(Record.prototype, 'displayName', {
get: function () {
var cat = constants.category[this.category];
var idents = cat && cat.ident || [constants.metricName.ID.id];
var ident = [];
for (var i = 0; i < idents.length; i ++)
if (idents[i] in this.measures)
ident.push(this.measures[idents[i]]);
ident = ident.length && ident.join(', ');
cat = cat && cat.name;
if (cat && ident)
return cat + ' ' + ident;
return cat || ident || '[' + this.id + ']';
}
});
///////////////////////////////// AssetSlot
// This usually maps to an AssetSegment
function AssetSlot(context, init) {
this.asset =
context instanceof Asset ? context :
new assetMake(context, init.asset);
Model.call(this, init);
}
AssetSlot.prototype = Object.create(Slot.prototype);
AssetSlot.prototype.constructor = AssetSlot;
AssetSlot.prototype.class = 'asset-slot';
AssetSlot.prototype.fields = angular.extend({
permission: true,
excerpt: true,
context: true
}, AssetSlot.prototype.fields);
AssetSlot.prototype.init = function (init) {
Model.prototype.init.call(this, init);
this.asset.update(init.asset);
this.segment = new Segment(init.segment);
if ('format' in init)
this.format = constants.format[init.format];
};
delegate(AssetSlot, 'asset',
'id', 'container', 'format', 'duration', 'classification', 'name', 'pending');
Object.defineProperty(AssetSlot.prototype, 'release', {
get: function () {
return Math.max(this.excerpt != null ? this.excerpt : 0, this.classification != null ? this.classification : (this.container.release || 0));
}
});
Object.defineProperty(AssetSlot.prototype, 'displayName', {
get: function () {
return this.name || this.format.name;
}
});
AssetSlot.prototype.route = function () {
return router.slotAsset([this.volume.id, this.container.id, this.segment.format(), this.id]);
};
AssetSlot.prototype.slotRoute = function () {
var params = {};
params.asset = this.id;
params.select = this.segment.format();
return this.container.route(params);
};
AssetSlot.prototype.inContext = function () {
return 'context' in this ?
angular.extend(Object.create(AssetSlot.prototype), this, {segment:Segment.make(this.context)}) :
this.asset;
};
Object.defineProperty(AssetSlot.prototype, 'icon', {
get: function () {
return '/web/images/filetype/16px/' + this.format.extension + '.svg';
}
});
AssetSlot.prototype.inSegment = function (segment) {
segment = this.segment.intersect(segment);
if (segment.equals(this.segment))
return this;
return new AssetSlot(this.asset, {permission:this.permission, segment:segment});
};
AssetSlot.prototype.setExcerpt = function (release) {
var a = this;
return router.http(release != null ? router.controllers.postExcerpt : router.controllers.deleteExcerpt, this.container.id, this.segment.format(), this.id, {release:release})
.then(function (res) {
a.clear('excerpts');
a.volume.clear('excerpts');
return a.update(res.data);
});
};
AssetSlot.prototype.thumbRoute = function (size) {
return router.assetThumb([this.container.id, this.segment.format(), this.id, size]);
};
AssetSlot.prototype.downloadRoute = function (inline) {
return router.assetDownload([this.container.id, this.segment.format(), this.id, inline]);
};
///////////////////////////////// Asset
// This usually maps to a SlotAsset, but may be an unlinked Asset
function Asset(context, init) {
if (init.container || context instanceof Container)
Slot.call(this, context, init);
else {
this.volume = context;
Model.call(this, init);
}
this.asset = this;
this.volume.assets[init.id] = this;
}
Asset.prototype = Object.create(AssetSlot.prototype);
Asset.prototype.constructor = Asset;
Asset.prototype.class = 'asset';
Asset.prototype.fields = angular.extend({
id: true,
classification: true,
name: true,
duration: true,
pending: true,
creation: false,
size: false,
}, Asset.prototype.fields);
Asset.prototype.init = function (init) {
if (!this.container && 'container' in init)
this.container = containerPrepare(this.volume, init.container);
Slot.prototype.init.call(this, init);
if ('format' in init)
this.format = constants.format[init.format];
if ('revisions' in init)
this.revisions = assetMakeArray(this.volume, init.revisions);
};
function assetMake(context, init) {
var v = context.volume || context;
if (typeof init === 'number')
return v.assets[init];
if ('id' in init) {
var a = v.assets[init.id];
return a ? a.update(init) : new Asset(context, init);
} else
return new AssetSlot(context, init);
}
function assetMakeArray(context, l) {
if (l) for (var i = 0; i < l.length; i ++)
l[i] = assetMake(context, l[i]);
return l;
}
Volume.prototype.getAsset = function (asset, container, segment, options) {
var v = this;
options = checkOptions(null, options);
return (container === undefined ?
router.http(router.controllers.getAsset, v.id, asset, options) :
router.http(router.controllers.getAssetSegment, v.id, container, Segment.format(segment), asset, options))
.then(function (res) {
return assetMake(v, res.data);
});
};
Asset.prototype.get = function (options) {
var a = this;
if ((options = checkOptions(a, options)))
return router.http(router.controllers.getAsset, a.id, options)
.then(function (res) {
return a.update(res.data);
});
else
return $q.successful(a);
};
Asset.prototype.save = function (data) {
var a = this;
if (data.classification === 'undefined')
data.classification = '';
return router.http(router.controllers.postAsset, this.id, data)
.then(function (res) {
if ('excerpt' in data) {
a.clear('excerpts');
a.volume.clear('excerpts');
}
return a.update(res.data);
});
};
Asset.prototype.link = function (slot, data) {
if (!data)
data = {};
data.container = slot.container.id;
data.position = slot.segment.l;
return this.save(data);
};
Slot.prototype.createAsset = function (data) {
var s = this;
if (!data)
data = {};
data.container = this.container.id;
if (!('position' in data) && isFinite(this.segment.l))
data.position = this.segment.l;
return router.http(router.controllers.createAsset, this.volume.id, data)
.then(function (res) {
s.clear('assets');
return assetMake(s.container, res.data);
});
};
Asset.prototype.replace = function (data) {
var a = this;
return router.http(router.controllers.postAsset, this.id, data)
.then(function (res) {
if (a.container)
a.container.clear('assets');
return assetMake(a.container || a.volume, res.data);
});
};
Asset.prototype.remove = function () {
var a = this;
return router.http(router.controllers.deleteAsset, this.id)
.then(function (res) {
if (a.container)
a.container.clear('assets');
return a.update(res.data);
});
};
///////////////////////////////// Comment
function Comment(context, init) {
Slot.call(this, context, init);
}
Comment.prototype = Object.create(Slot.prototype);
Comment.prototype.constructor = Comment;
Comment.prototype.class = 'comment';
Comment.prototype.fields = angular.extend({
id: true,
time: true,
text: true,
parents: true
}, Comment.prototype.fields);
Comment.prototype.init = function (init) {
Slot.prototype.init.call(this, init);
if ('who' in init)
this.who = partyMake(init.who);
};
function commentMakeArray(context, l) {
if (l) for (var i = 0; i < l.length; i ++)
l[i] = new Comment(context, l[i]);
return l;
}
Slot.prototype.postComment = function (data, segment, reply) {
if (segment === undefined)
segment = this.segment;
if (arguments.length < 3 && this instanceof Comment)
reply = this.id;
var s = this;
if (reply != null)
data.parent = reply;
return router.http(router.controllers.postComment, this.container.id, segment.format(), data)
.then(function (res) {
s.volume.clear('comments');
s.clear('comments');
return new Comment(s.container, res.data);
});
};
///////////////////////////////// Tag
// no point in a model, really
var Tag = {};
Tag.search = function (query) {
return router.http(router.controllers.getTags, query)
.then(function(res) {
return res.data;
});
};
Tag.top = function () {
return router.http(router.controllers.getTopTags)
.then(function(res) {
return res.data;
});
};
Slot.prototype.setTag = function (tag, vote, keyword, segment) {
if (segment === undefined)
segment = this.segment;
var s = this;
return router.http(router.controllers[vote ? (keyword ? "postKeyword" : "postTag") : (keyword ? "deleteKeyword" : "deleteTag")], this.container.id, segment.format(), tag)
.then(function (res) {
var tag = res.data;
s.volume.clear('tags');
if ('tags' in s)
s.tags[tag.id] = tag;
return tag;
});
};
/////////////////////////////////
return {
Party: Party,
Login: Login,
Volume: Volume,
Container: Container,
Slot: Slot,
Record: Record,
Asset: Asset,
AssetSlot: AssetSlot,
Comment: Comment,
Tag: Tag,
funder: function (query, all) {
return router.http(router.controllers.getFunders, {query:query,all:all})
.then(resData);
},
cite: function (url) {
return router.http(router.controllers.getCitation, {url:url})
.then(resData);
},
analytic: function () {
return router.http(router.controllers.get, {}, {cache:false});
},
activity: function () {
return router.http(router.controllers.getActivity)
.then(function (res) {
for (var i = 0; i < res.data.length; i ++) {
if ('volume' in res.data[i])
res.data[i].volume = volumeMake(res.data[i].volume);
if ('party' in res.data[i])
res.data[i].party = partyMake(res.data[i].party);
}
return res.data;
});
}
};
}
]);
| Remove now-deprecated Party.lastName method
| web/service/model.js | Remove now-deprecated Party.lastName method | <ide><path>eb/service/model.js
<ide> Party.prototype.route = function () {
<ide> return router.party([this.id]);
<ide> };
<del>
<del> Object.defineProperty(Party.prototype, 'lastName', {
<del> get: function () {
<del> return this.name.substr(this.name.lastIndexOf(' ')+1);
<del> }
<del> });
<ide>
<ide> Party.prototype.editRoute = function (page) {
<ide> var params = {}; |
|
Java | apache-2.0 | f9a89b85ed10c275f0da4757e37aa1123e34f6c0 | 0 | oriontribunal/CoffeeMud,bozimmerman/CoffeeMud,MaxRau/CoffeeMud,sfunk1x/CoffeeMud,oriontribunal/CoffeeMud,oriontribunal/CoffeeMud,Tycheo/coffeemud,bozimmerman/CoffeeMud,Tycheo/coffeemud,sfunk1x/CoffeeMud,sfunk1x/CoffeeMud,bozimmerman/CoffeeMud,MaxRau/CoffeeMud,Tycheo/coffeemud,sfunk1x/CoffeeMud,MaxRau/CoffeeMud,bozimmerman/CoffeeMud,MaxRau/CoffeeMud,Tycheo/coffeemud,oriontribunal/CoffeeMud | package com.planet_ink.coffee_mud.Areas;
import com.planet_ink.coffee_mud.core.interfaces.*;
import com.planet_ink.coffee_mud.core.*;
import com.planet_ink.coffee_mud.core.collections.*;
import com.planet_ink.coffee_mud.Abilities.interfaces.*;
import com.planet_ink.coffee_mud.Areas.interfaces.*;
import com.planet_ink.coffee_mud.Behaviors.interfaces.*;
import com.planet_ink.coffee_mud.CharClasses.interfaces.*;
import com.planet_ink.coffee_mud.Commands.interfaces.*;
import com.planet_ink.coffee_mud.Common.interfaces.*;
import com.planet_ink.coffee_mud.Exits.interfaces.*;
import com.planet_ink.coffee_mud.Items.Basic.StdItem;
import com.planet_ink.coffee_mud.Items.interfaces.*;
import com.planet_ink.coffee_mud.Locales.interfaces.*;
import com.planet_ink.coffee_mud.Libraries.interfaces.*;
import com.planet_ink.coffee_mud.MOBS.interfaces.*;
import com.planet_ink.coffee_mud.Races.interfaces.*;
import java.util.*;
/*
Copyright 2000-2011 Bo Zimmerman
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
public class StdThinInstance extends StdThinArea
{
public String ID(){ return "StdThinInstance";}
private long flags=Area.FLAG_THIN|Area.FLAG_INSTANCE_PARENT;
public long flags(){return flags;}
private class ThinInstanceChild
{
public List<MOB> mobs;
public StdThinInstance A;
public ThinInstanceChild(StdThinInstance A, List<MOB> mobs)
{
this.A=A;
this.mobs=mobs;
}
}
private SVector<ThinInstanceChild> instanceChildren = new SVector<ThinInstanceChild>();
private volatile int instanceCounter=0;
private long childCheckDown=CMProps.getMillisPerMudHour()/CMProps.getTickMillis();
protected String getStrippedRoomID(String roomID)
{
int x=roomID.indexOf('#');
if(x<0) return null;
return roomID.substring(x);
}
protected String convertToMyArea(String roomID)
{
String strippedID=getStrippedRoomID(roomID);
if(strippedID==null) return null;
return Name()+strippedID;
}
protected Area getParentArea()
{
int x=Name().indexOf('_');
if(x<0) return null;
if(!CMath.isNumber(Name().substring(0,x))) return null;
Area parentA = CMLib.map().getArea(Name().substring(x+1));
if((parentA==null)
||(!CMath.bset(parentA.flags(),Area.FLAG_INSTANCE_PARENT))
||(CMath.bset(parentA.flags(),Area.FLAG_INSTANCE_CHILD)))
return null;
return parentA;
}
public Room getRoom(String roomID)
{
if(!CMath.bset(flags(),Area.FLAG_INSTANCE_CHILD))
return super.getRoom(roomID);
if(!isRoom(roomID)) return null;
Room R=super.getRoom(roomID);
if(((R==null)||(R.amDestroyed()))&&(roomID!=null))
{
Area parentA=getParentArea();
if(parentA==null) return null;
if(roomID.toUpperCase().startsWith(Name().toUpperCase()+"#"))
roomID=Name()+roomID.substring(Name().length()); // for case sensitive situations
R=parentA.getRoom(parentA.Name()+getStrippedRoomID(roomID));
if(R==null) return null;
Room origRoom=R;
R=(Room)R.copyOf();
R.clearSky();
if(R instanceof GridLocale)
((GridLocale)R).clearGrid(null);
for(int d=Directions.NUM_DIRECTIONS()-1;d>=0;d--)
R.rawDoors()[d]=null;
R.setRoomID(roomID);
R.setArea(this);
addProperRoom(R);
synchronized(("SYNC"+roomID).intern())
{
for(int d=Directions.NUM_DIRECTIONS()-1;d>=0;d--)
{
Room dirR=origRoom.rawDoors()[d];
if(dirR!=null)
{
String myRID=dirR.roomID();
if((myRID!=null)&&(myRID.length()>0)&&(dirR.getArea()==parentA))
{
String localDirRID=convertToMyArea(myRID);
Room localDirR=getProperRoom(localDirRID);
if(localDirR!=null)
R.rawDoors()[d]=localDirR;
else
if(localDirRID==null)
Log.errOut("StdThinInstance","Error in linked room ID "+origRoom.roomID()+", dir="+d);
else
{
R.rawDoors()[d]=CMClass.getLocale("ThinRoom");
R.rawDoors()[d].setRoomID(localDirRID);
R.rawDoors()[d].setArea(this);
}
}
else
R.rawDoors()[d]=dirR;
}
}
}
for(Enumeration<MOB> e=R.inhabitants();e.hasMoreElements();)
e.nextElement().bringToLife(R,true);
R.startItemRejuv();
fillInAreaRoom(R);
R.setExpirationDate(System.currentTimeMillis()+WorldMap.ROOM_EXPIRATION_MILLIS);
}
return R;
}
public boolean tick(Tickable ticking, int tickID)
{
if(!super.tick(ticking, tickID))
return false;
if(CMath.bset(flags(),Area.FLAG_INSTANCE_CHILD))
return true;
if((--childCheckDown)<=0)
{
childCheckDown=CMProps.getMillisPerMudHour()/CMProps.getTickMillis();
synchronized(instanceChildren)
{
for(int i=instanceChildren.size()-1;i>=0;i--)
{
StdThinInstance childA=instanceChildren.elementAt(i).A;
if(childA.getAreaState() > Area.STATE_ACTIVE)
{
List<MOB> V=instanceChildren.elementAt(i).mobs;
boolean anyInside=false;
for(int v=0;v<V.size();v++)
{
MOB M=(MOB)V.get(v);
if(CMLib.flags().isInTheGame(M,true)
&&(M.location()!=null)
&&(M.location().getArea()==childA))
{
anyInside=true;
break;
}
}
if(!anyInside)
{
instanceChildren.remove(i);
for(int v=0;v<V.size();v++)
{
MOB M=(MOB)V.get(v);
if((M.location()!=null)
&&(M.location().getArea()==this))
M.setLocation(M.getStartRoom());
}
MOB mob=CMClass.sampleMOB();
for(Enumeration<Room> e=childA.getProperMap();e.hasMoreElements();)
{
Room R=e.nextElement();
R.executeMsg(mob,CMClass.getMsg(mob,R,null,CMMsg.MSG_EXPIRE,null));
}
CMLib.map().delArea(childA);
childA.destroy();
}
}
}
}
}
return true;
}
public boolean okMessage(final Environmental myHost, final CMMsg msg)
{
if(!super.okMessage(myHost, msg))
return false;
if(CMath.bset(flags(),Area.FLAG_INSTANCE_CHILD))
return true;
setAreaState(Area.STATE_PASSIVE);
if((msg.sourceMinor()==CMMsg.TYP_ENTER)
&&(msg.target() instanceof Room)
&&(CMath.bset(flags(),Area.FLAG_INSTANCE_PARENT))
&&(isRoom((Room)msg.target()))
&&(!CMSecurity.isAllowed(msg.source(),(Room)msg.target(),"CMDAREAS"))
&&(((msg.source().getStartRoom()==null)||(msg.source().getStartRoom().getArea()!=this))))
{
synchronized(instanceChildren)
{
int myDex=-1;
for(int i=0;i<instanceChildren.size();i++) {
List<MOB> V=instanceChildren.elementAt(i).mobs;
if(V.contains(msg.source())){ myDex=i; break;}
}
Set<MOB> grp = msg.source().getGroupMembers(new HashSet<MOB>());
for(int i=0;i<instanceChildren.size();i++) {
if(i!=myDex)
{
List<MOB> V=instanceChildren.elementAt(i).mobs;
for(int v=V.size()-1;v>=0;v--)
{
MOB M=(MOB)V.get(v);
if(grp.contains(M))
{
if(myDex<0)
{
myDex=i;
break;
}
else
if((CMLib.flags().isInTheGame(M,true))
&&(M.location().getArea()!=instanceChildren.elementAt(i).A))
{
V.remove(M);
instanceChildren.get(myDex).mobs.add(M);
}
}
}
}
}
StdThinInstance redirectA = null;
if(myDex<0)
{
StdThinInstance newA=(StdThinInstance)this.copyOf();
newA.properRooms=new STreeMap<String, Room>(new Area.RoomIDComparator());
newA.properRoomIDSet = null;
newA.metroRoomIDSet = null;
newA.blurbFlags=new STreeMap<String,String>();
newA.setName((++instanceCounter)+"_"+Name());
newA.flags |= Area.FLAG_INSTANCE_CHILD;
for(Enumeration<String> e=getProperRoomnumbers().getRoomIDs();e.hasMoreElements();)
newA.addProperRoomnumber(newA.convertToMyArea(e.nextElement()));
redirectA=newA;
CMLib.map().addArea(newA);
newA.setAreaState(Area.STATE_ACTIVE); // starts ticking
instanceChildren.add(new ThinInstanceChild(redirectA,new SVector<MOB>(msg.source())));
}
else
redirectA=instanceChildren.get(myDex).A;
Room R=redirectA.getRoom(redirectA.convertToMyArea(CMLib.map().getExtendedRoomID((Room)msg.target())));
if(R!=null) msg.setTarget(R);
}
}
return true;
}
}
| com/planet_ink/coffee_mud/Areas/StdThinInstance.java | package com.planet_ink.coffee_mud.Areas;
import com.planet_ink.coffee_mud.core.interfaces.*;
import com.planet_ink.coffee_mud.core.*;
import com.planet_ink.coffee_mud.core.collections.*;
import com.planet_ink.coffee_mud.Abilities.interfaces.*;
import com.planet_ink.coffee_mud.Areas.interfaces.*;
import com.planet_ink.coffee_mud.Behaviors.interfaces.*;
import com.planet_ink.coffee_mud.CharClasses.interfaces.*;
import com.planet_ink.coffee_mud.Commands.interfaces.*;
import com.planet_ink.coffee_mud.Common.interfaces.*;
import com.planet_ink.coffee_mud.Exits.interfaces.*;
import com.planet_ink.coffee_mud.Items.Basic.StdItem;
import com.planet_ink.coffee_mud.Items.interfaces.*;
import com.planet_ink.coffee_mud.Locales.interfaces.*;
import com.planet_ink.coffee_mud.Libraries.interfaces.*;
import com.planet_ink.coffee_mud.MOBS.interfaces.*;
import com.planet_ink.coffee_mud.Races.interfaces.*;
import java.util.*;
/*
Copyright 2000-2011 Bo Zimmerman
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
@SuppressWarnings("unchecked")
public class StdThinInstance extends StdThinArea
{
public String ID(){ return "StdThinInstance";}
private long flags=Area.FLAG_THIN|Area.FLAG_INSTANCE_PARENT;
public long flags(){return flags;}
private class ThinInstanceChild
{
public List<MOB> mobs;
public StdThinInstance A;
public ThinInstanceChild(StdThinInstance A, List<MOB> mobs)
{
this.A=A;
this.mobs=mobs;
}
}
private SVector<ThinInstanceChild> instanceChildren = new SVector<ThinInstanceChild>();
private volatile int instanceCounter=0;
private long childCheckDown=CMProps.getMillisPerMudHour()/CMProps.getTickMillis();
protected String getStrippedRoomID(String roomID)
{
int x=roomID.indexOf('#');
if(x<0) return null;
return roomID.substring(x);
}
protected String convertToMyArea(String roomID)
{
String strippedID=getStrippedRoomID(roomID);
if(strippedID==null) return null;
return Name()+strippedID;
}
protected Area getParentArea()
{
int x=Name().indexOf('_');
if(x<0) return null;
if(!CMath.isNumber(Name().substring(0,x))) return null;
Area parentA = CMLib.map().getArea(Name().substring(x+1));
if((parentA==null)
||(!CMath.bset(parentA.flags(),Area.FLAG_INSTANCE_PARENT))
||(CMath.bset(parentA.flags(),Area.FLAG_INSTANCE_CHILD)))
return null;
return parentA;
}
public Room getRoom(String roomID)
{
if(!CMath.bset(flags(),Area.FLAG_INSTANCE_CHILD))
return super.getRoom(roomID);
if(!isRoom(roomID)) return null;
Room R=super.getRoom(roomID);
if(((R==null)||(R.amDestroyed()))&&(roomID!=null))
{
Area parentA=getParentArea();
if(parentA==null) return null;
if(roomID.toUpperCase().startsWith(Name().toUpperCase()+"#"))
roomID=Name()+roomID.substring(Name().length()); // for case sensitive situations
R=parentA.getRoom(parentA.Name()+getStrippedRoomID(roomID));
if(R==null) return null;
Room origRoom=R;
R=(Room)R.copyOf();
R.clearSky();
if(R instanceof GridLocale)
((GridLocale)R).clearGrid(null);
for(int d=Directions.NUM_DIRECTIONS()-1;d>=0;d--)
R.rawDoors()[d]=null;
R.setRoomID(roomID);
R.setArea(this);
addProperRoom(R);
synchronized(("SYNC"+roomID).intern())
{
for(int d=Directions.NUM_DIRECTIONS()-1;d>=0;d--)
{
Room dirR=origRoom.rawDoors()[d];
if(dirR!=null)
{
String myRID=dirR.roomID();
if((myRID!=null)&&(myRID.length()>0)&&(dirR.getArea()==parentA))
{
String localDirRID=convertToMyArea(myRID);
Room localDirR=getProperRoom(localDirRID);
if(localDirR!=null)
R.rawDoors()[d]=localDirR;
else
if(localDirRID==null)
Log.errOut("StdThinInstance","Error in linked room ID "+origRoom.roomID()+", dir="+d);
else
{
R.rawDoors()[d]=CMClass.getLocale("ThinRoom");
R.rawDoors()[d].setRoomID(localDirRID);
R.rawDoors()[d].setArea(this);
}
}
else
R.rawDoors()[d]=dirR;
}
}
}
for(Enumeration<MOB> e=R.inhabitants();e.hasMoreElements();)
e.nextElement().bringToLife(R,true);
R.startItemRejuv();
fillInAreaRoom(R);
R.setExpirationDate(System.currentTimeMillis()+WorldMap.ROOM_EXPIRATION_MILLIS);
}
return R;
}
public boolean tick(Tickable ticking, int tickID)
{
if(!super.tick(ticking, tickID))
return false;
if(CMath.bset(flags(),Area.FLAG_INSTANCE_CHILD))
return true;
if((--childCheckDown)<=0)
{
childCheckDown=CMProps.getMillisPerMudHour()/CMProps.getTickMillis();
synchronized(instanceChildren)
{
for(int i=instanceChildren.size()-1;i>=0;i--)
{
StdThinInstance childA=instanceChildren.elementAt(i).A;
if(childA.getAreaState() > Area.STATE_ACTIVE)
{
List<MOB> V=instanceChildren.elementAt(i).mobs;
boolean anyInside=false;
for(int v=0;v<V.size();v++)
{
MOB M=(MOB)V.get(v);
if(CMLib.flags().isInTheGame(M,true)
&&(M.location()!=null)
&&(M.location().getArea()==childA))
{
anyInside=true;
break;
}
}
if(!anyInside)
{
instanceChildren.remove(i);
for(int v=0;v<V.size();v++)
{
MOB M=(MOB)V.get(v);
if((M.location()!=null)
&&(M.location().getArea()==this))
M.setLocation(M.getStartRoom());
}
MOB mob=CMClass.sampleMOB();
for(Enumeration e=childA.getProperMap();e.hasMoreElements();)
{
Room R=(Room)e.nextElement();
R.executeMsg(mob,CMClass.getMsg(mob,R,null,CMMsg.MSG_EXPIRE,null));
}
CMLib.map().delArea(childA);
childA.destroy();
}
}
}
}
}
return true;
}
public boolean okMessage(final Environmental myHost, final CMMsg msg)
{
if(!super.okMessage(myHost, msg))
return false;
if(CMath.bset(flags(),Area.FLAG_INSTANCE_CHILD))
return true;
setAreaState(Area.STATE_PASSIVE);
if((msg.sourceMinor()==CMMsg.TYP_ENTER)
&&(msg.target() instanceof Room)
&&(CMath.bset(flags(),Area.FLAG_INSTANCE_PARENT))
&&(isRoom((Room)msg.target()))
&&(!CMSecurity.isAllowed(msg.source(),(Room)msg.target(),"CMDAREAS"))
&&(((msg.source().getStartRoom()==null)||(msg.source().getStartRoom().getArea()!=this))))
{
synchronized(instanceChildren)
{
int myDex=-1;
for(int i=0;i<instanceChildren.size();i++) {
List<MOB> V=instanceChildren.elementAt(i).mobs;
if(V.contains(msg.source())){ myDex=i; break;}
}
Set<MOB> grp = msg.source().getGroupMembers(new HashSet<MOB>());
for(int i=0;i<instanceChildren.size();i++) {
if(i!=myDex)
{
List<MOB> V=instanceChildren.elementAt(i).mobs;
for(int v=V.size()-1;v>=0;v--)
{
MOB M=(MOB)V.get(v);
if(grp.contains(M))
{
if(myDex<0)
{
myDex=i;
break;
}
else
if((CMLib.flags().isInTheGame(M,true))
&&(M.location().getArea()!=instanceChildren.elementAt(i).A))
{
V.remove(M);
instanceChildren.get(myDex).mobs.add(M);
}
}
}
}
}
StdThinInstance redirectA = null;
if(myDex<0)
{
StdThinInstance newA=(StdThinInstance)this.copyOf();
newA.properRooms=new STreeMap<String, Room>(new Area.RoomIDComparator());
newA.properRoomIDSet = null;
newA.metroRoomIDSet = null;
newA.blurbFlags=new STreeMap<String,String>();
newA.setName((++instanceCounter)+"_"+Name());
newA.flags |= Area.FLAG_INSTANCE_CHILD;
for(Enumeration e=getProperRoomnumbers().getRoomIDs();e.hasMoreElements();)
newA.addProperRoomnumber(newA.convertToMyArea((String)e.nextElement()));
redirectA=newA;
CMLib.map().addArea(newA);
newA.setAreaState(Area.STATE_ACTIVE); // starts ticking
instanceChildren.add(new ThinInstanceChild(redirectA,new SVector<MOB>(msg.source())));
}
else
redirectA=instanceChildren.get(myDex).A;
Room R=redirectA.getRoom(redirectA.convertToMyArea(CMLib.map().getExtendedRoomID((Room)msg.target())));
if(R!=null) msg.setTarget(R);
}
}
return true;
}
}
|
git-svn-id: svn://192.168.1.10/public/CoffeeMud@8965 0d6f1817-ed0e-0410-87c9-987e46238f29
| com/planet_ink/coffee_mud/Areas/StdThinInstance.java | <ide><path>om/planet_ink/coffee_mud/Areas/StdThinInstance.java
<ide> See the License for the specific language governing permissions and
<ide> limitations under the License.
<ide> */
<del>@SuppressWarnings("unchecked")
<ide> public class StdThinInstance extends StdThinArea
<ide> {
<ide> public String ID(){ return "StdThinInstance";}
<add>
<ide> private long flags=Area.FLAG_THIN|Area.FLAG_INSTANCE_PARENT;
<ide> public long flags(){return flags;}
<add>
<ide> private class ThinInstanceChild
<ide> {
<ide> public List<MOB> mobs;
<ide> M.setLocation(M.getStartRoom());
<ide> }
<ide> MOB mob=CMClass.sampleMOB();
<del> for(Enumeration e=childA.getProperMap();e.hasMoreElements();)
<add> for(Enumeration<Room> e=childA.getProperMap();e.hasMoreElements();)
<ide> {
<del> Room R=(Room)e.nextElement();
<add> Room R=e.nextElement();
<ide> R.executeMsg(mob,CMClass.getMsg(mob,R,null,CMMsg.MSG_EXPIRE,null));
<ide> }
<ide> CMLib.map().delArea(childA);
<ide> newA.blurbFlags=new STreeMap<String,String>();
<ide> newA.setName((++instanceCounter)+"_"+Name());
<ide> newA.flags |= Area.FLAG_INSTANCE_CHILD;
<del> for(Enumeration e=getProperRoomnumbers().getRoomIDs();e.hasMoreElements();)
<del> newA.addProperRoomnumber(newA.convertToMyArea((String)e.nextElement()));
<add> for(Enumeration<String> e=getProperRoomnumbers().getRoomIDs();e.hasMoreElements();)
<add> newA.addProperRoomnumber(newA.convertToMyArea(e.nextElement()));
<ide> redirectA=newA;
<ide> CMLib.map().addArea(newA);
<ide> newA.setAreaState(Area.STATE_ACTIVE); // starts ticking |
||
Java | mit | 711658226141344f133d6881cb28328543c6a3ca | 0 | computelab/config | package org.computelab.config;
import static com.google.common.base.Preconditions.checkArgument;
import static com.google.common.base.Preconditions.checkNotNull;
import java.io.File;
import java.io.FileInputStream;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.io.InputStream;
import java.io.InputStreamReader;
import java.nio.charset.Charset;
import java.nio.charset.StandardCharsets;
import java.util.Properties;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* A <code>Config</code> that reads the following sources in order:
* <ol>
* <li> System properties, which are usually passed as -D parameters on command-line.
* <li> Environment variables.
* <li> The properties file in the app's home. The app's home is the hidden folder
* "~/.[app]" in the user's home. By default, the file is "app.properties".
* Thus the complete file path is "~/.[app]/app.properties". This file is optional.
* <li> The properties file as a source code resource. By default, the file is "app.properties".
* This file is optional.
* </ol>
*/
public final class DefaultConfig {
/**
* Default name of the config file. The config file here refers to the properties
* file in the app's home and in the source code. These files are optional.
*/
public static final String DEFAULT_CONFIG_FILE = "app.properties";
/**
* Creates a default config with the specified app name. The app name will be
* used to located the app's home directory. The app's home is the hidden folder
* "~/.[app]" in the user's home. For example, if the app name is "foo", the app's
* home will be "~/.foo" in the user's home directory. This directory will be
* searched for the default config file "app.properties".
*
* @param appName the name of the app that is used to locate the app's home
* @return the created default config instance
*/
public static Config create(final String appName) {
return create(appName, DEFAULT_CONFIG_FILE);
}
/**
* Creates a default config with the specified app name and the specified config
* file name.
* <p>
* The app name will be used to located the app's home directory. The
* app's home is the hidden folder "~/.[app]" in the user's home. For example,
* if the app name is "foo", the app's home will be "~/.foo" in the user's home.
* This directory will be searched for the specified properties file.
* <p>
* Instead of the default "app.properties" file, you can specify your own
* config file here.
*
* @param appName the name of the app that is used to locate the app's home
* @param configFile the name of the config file
* @return the created default config instance
*/
public static Config create(final String appName, final String configFile) {
checkNotNull(appName, "App name must not be null.");
checkArgument(!appName.isEmpty(), "App name must not be empty.");
checkNotNull(configFile, "Config file name must not be null.");
checkArgument(!configFile.isEmpty(), "Config file name must not be empty.");
return new Builder(appName, configFile).build();
}
static class Builder {
private static final Charset DEFAULT_CHARSET = StandardCharsets.UTF_8;
private final Logger logger = LoggerFactory.getLogger(DefaultConfig.class);
private final String appName;
private final String configFile;
Builder(final String appName, final String configFile) {
this.appName = appName;
this.configFile = configFile;
}
Config build() {
final ConfigBuilder builder = new ConfigBuilder();
builder.addSystemPropertyConfig();
builder.addSystemEnvConfig();
final Properties homeProps = getFromHome();
if (homeProps != null) {
builder.addPropertiesConfig(homeProps);
}
final Properties srcProps = getFromSource();
if (srcProps != null) {
builder.addPropertiesConfig(srcProps);
}
return builder.build();
}
private Properties getFromHome() {
final String userHome = System.getProperty("user.home");
final String appHome = "." + appName;
final String configFilePath = String.join(File.separator, userHome, appHome, configFile);
try (final FileInputStream inputStream = new FileInputStream(configFilePath)) {
return getProperties(inputStream);
} catch (final FileNotFoundException ex) {
logger.warn("Missing config file " + configFilePath + ". Skipping it...");
return null;
} catch (final IOException ex) {
logger.warn("Error reading config file " + configFile + ". Skipping it...", ex);
return null;
}
}
private Properties getFromSource() {
try (final InputStream inputStream = getClass().getResourceAsStream("/" + configFile)) {
if (inputStream == null) {
logger.warn("Missing resource " + configFile + ". Skipping it...");
return null;
}
return getProperties(inputStream);
} catch (final IOException ex) {
logger.warn("Error reading resource " + configFile + ". Skipping it...", ex);
return null;
}
}
private Properties getProperties(final InputStream inputStream) throws IOException {
try (final InputStreamReader reader = new InputStreamReader(inputStream, DEFAULT_CHARSET)) {
final Properties props = new Properties();
props.load(reader);
return props;
}
}
}
}
| src/main/java/org/computelab/config/DefaultConfig.java | package org.computelab.config;
import static com.google.common.base.Preconditions.checkArgument;
import static com.google.common.base.Preconditions.checkNotNull;
import java.io.File;
import java.io.FileInputStream;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.io.InputStream;
import java.io.InputStreamReader;
import java.nio.charset.Charset;
import java.nio.charset.StandardCharsets;
import java.util.Properties;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* A <code>Config</code> that reads the following sources in order:
* <ol>
* <li> System properties, which are usually passed as -D parameters on command-line.
* <li> Environment variables.
* <li> The properties file in the app's home. The app's home is the hidden folder
* "~/.[app]" in the user's home. This file is optional.
* <li> The properties file as a source code resource. Optional.
* </ol>
*/
public final class DefaultConfig {
/**
* Default name of the config file. The config file here refers to the properties
* file in the app's home and in the source code. These files are optional.
*/
public static final String DEFAULT_CONFIG_FILE = "app.properties";
/**
* Creates a default config with the specified app name. The app name will be
* used to located the app's home directory. The app's home is the hidden folder
* "~/.[app]" in the user's home. For example, if the app name is "foo", the app's
* home will be "~/.foo" in the user's home directory. This directory will be
* searched for the default config file "app.properties".
*
* @param appName the name of the app that is used to locate the app's home
* @return the created default config instance
*/
public static Config create(final String appName) {
return create(appName, DEFAULT_CONFIG_FILE);
}
/**
* Creates a default config with the specified app name and the specified config
* file name. The app name will be used to located the app's home directory. The
* app's home is the hidden folder "~/.[app]" in the user's home. For example,
* if the app name is "foo", the app's home will be "~/.foo" in the user's home
* directory. This directory will be searched for the specified properties file.
*
* @param appName the name of the app that is used to locate the app's home
* @param configFile the name of the config file
* @return the created default config instance
*/
public static Config create(final String appName, final String configFile) {
checkNotNull(appName, "App name must not be null.");
checkArgument(!appName.isEmpty(), "App name must not be empty.");
checkNotNull(configFile, "Config file name must not be null.");
checkArgument(!configFile.isEmpty(), "Config file name must not be empty.");
return new Builder(appName, configFile).build();
}
static class Builder {
private static final Charset DEFAULT_CHARSET = StandardCharsets.UTF_8;
private final Logger logger = LoggerFactory.getLogger(DefaultConfig.class);
private final String appName;
private final String configFile;
Builder(final String appName, final String configFile) {
this.appName = appName;
this.configFile = configFile;
}
Config build() {
final ConfigBuilder builder = new ConfigBuilder();
builder.addSystemPropertyConfig();
builder.addSystemEnvConfig();
final Properties homeProps = getFromHome();
if (homeProps != null) {
builder.addPropertiesConfig(homeProps);
}
final Properties srcProps = getFromSource();
if (srcProps != null) {
builder.addPropertiesConfig(srcProps);
}
return builder.build();
}
private Properties getFromHome() {
final String userHome = System.getProperty("user.home");
final String appHome = "." + appName;
final String configFilePath = String.join(File.separator, userHome, appHome, configFile);
try (final FileInputStream inputStream = new FileInputStream(configFilePath)) {
return getProperties(inputStream);
} catch (final FileNotFoundException ex) {
logger.warn("Missing config file " + configFilePath + ". Skipping it...");
return null;
} catch (final IOException ex) {
logger.warn("Error reading config file " + configFile + ". Skipping it...", ex);
return null;
}
}
private Properties getFromSource() {
try (final InputStream inputStream = getClass().getResourceAsStream("/" + configFile)) {
if (inputStream == null) {
logger.warn("Missing resource " + configFile + ". Skipping it...");
return null;
}
return getProperties(inputStream);
} catch (final IOException ex) {
logger.warn("Error reading resource " + configFile + ". Skipping it...", ex);
return null;
}
}
private Properties getProperties(final InputStream inputStream) throws IOException {
try (final InputStreamReader reader = new InputStreamReader(inputStream, DEFAULT_CHARSET)) {
final Properties props = new Properties();
props.load(reader);
return props;
}
}
}
}
| Update documentation
| src/main/java/org/computelab/config/DefaultConfig.java | Update documentation | <ide><path>rc/main/java/org/computelab/config/DefaultConfig.java
<ide> * <li> System properties, which are usually passed as -D parameters on command-line.
<ide> * <li> Environment variables.
<ide> * <li> The properties file in the app's home. The app's home is the hidden folder
<del> * "~/.[app]" in the user's home. This file is optional.
<del> * <li> The properties file as a source code resource. Optional.
<add> * "~/.[app]" in the user's home. By default, the file is "app.properties".
<add> * Thus the complete file path is "~/.[app]/app.properties". This file is optional.
<add> * <li> The properties file as a source code resource. By default, the file is "app.properties".
<add> * This file is optional.
<ide> * </ol>
<ide> */
<ide> public final class DefaultConfig {
<ide>
<ide> /**
<ide> * Creates a default config with the specified app name and the specified config
<del> * file name. The app name will be used to located the app's home directory. The
<add> * file name.
<add> * <p>
<add> * The app name will be used to located the app's home directory. The
<ide> * app's home is the hidden folder "~/.[app]" in the user's home. For example,
<del> * if the app name is "foo", the app's home will be "~/.foo" in the user's home
<del> * directory. This directory will be searched for the specified properties file.
<add> * if the app name is "foo", the app's home will be "~/.foo" in the user's home.
<add> * This directory will be searched for the specified properties file.
<add> * <p>
<add> * Instead of the default "app.properties" file, you can specify your own
<add> * config file here.
<ide> *
<ide> * @param appName the name of the app that is used to locate the app's home
<ide> * @param configFile the name of the config file |
|
Java | mit | 16a86bff026497a93e441750b92b6b801ecd35c9 | 0 | platypii/BASElineFlightComputer,platypii/BASElineFlightComputer | package com.platypii.baseline.util;
import android.support.annotation.Nullable;
import java.text.DecimalFormat;
import java.text.DecimalFormatSymbols;
import java.util.Locale;
public class Numbers {
public static boolean isReal(double value) {
return !Double.isNaN(value) && !Double.isInfinite(value);
}
/**
* Fast integer power x^y
*/
public static int pow(int x, int y) {
// base cases
if (x == 1 || y == 0) return 1;
else if (y == 1) return x;
else if (y == 2) return x * x;
else if (y == 3) return x * x * x;
// divide and conquer
final int sqrt = pow(x, y / 2);
if (y % 2 == 0) return sqrt * sqrt;
else return x * sqrt * sqrt;
}
/**
* Parse a string into a double, but use NaN instead of exceptions
*/
public static double parseDouble(@Nullable String str) {
if (str == null || str.isEmpty()) {
return Double.NaN;
} else {
try {
return Double.parseDouble(str);
} catch (NumberFormatException e) {
Exceptions.report(e);
return Double.NaN;
}
}
}
public static float parseFloat(@Nullable String str) {
if (str == null || str.isEmpty()) {
return Float.NaN;
} else {
try {
return Float.parseFloat(str);
} catch (NumberFormatException e) {
Exceptions.report(e);
return Float.NaN;
}
}
}
public static int parseInt(@Nullable String str, int defaultValue) {
if (str == null || str.isEmpty()) {
return defaultValue;
} else {
try {
return Integer.parseInt(str);
} catch (NumberFormatException e) {
Exceptions.report(e);
return defaultValue;
}
}
}
private static final DecimalFormatSymbols formatSymbols = DecimalFormatSymbols.getInstance(Locale.US);
public static final DecimalFormat format2 = new DecimalFormat("#.##", formatSymbols);
public static final DecimalFormat format3 = new DecimalFormat("#.###", formatSymbols);
public static final DecimalFormat format6 = new DecimalFormat("#.######", formatSymbols);
}
| common/src/main/java/com/platypii/baseline/util/Numbers.java | package com.platypii.baseline.util;
import android.support.annotation.Nullable;
import java.text.DecimalFormat;
public class Numbers {
public static boolean isReal(double value) {
return !Double.isNaN(value) && !Double.isInfinite(value);
}
/**
* Fast integer power x^y
*/
public static int pow(int x, int y) {
// base cases
if (x == 1 || y == 0) return 1;
else if (y == 1) return x;
else if (y == 2) return x * x;
else if (y == 3) return x * x * x;
// divide and conquer
final int sqrt = pow(x, y / 2);
if (y % 2 == 0) return sqrt * sqrt;
else return x * sqrt * sqrt;
}
/**
* Parse a string into a double, but use NaN instead of exceptions
*/
public static double parseDouble(@Nullable String str) {
if (str == null || str.isEmpty()) {
return Double.NaN;
} else {
try {
return Double.parseDouble(str);
} catch (NumberFormatException e) {
Exceptions.report(e);
return Double.NaN;
}
}
}
public static float parseFloat(@Nullable String str) {
if (str == null || str.isEmpty()) {
return Float.NaN;
} else {
try {
return Float.parseFloat(str);
} catch (NumberFormatException e) {
Exceptions.report(e);
return Float.NaN;
}
}
}
public static int parseInt(@Nullable String str, int defaultValue) {
if (str == null || str.isEmpty()) {
return defaultValue;
} else {
try {
return Integer.parseInt(str);
} catch (NumberFormatException e) {
Exceptions.report(e);
return defaultValue;
}
}
}
public static final DecimalFormat format2 = new DecimalFormat("#.##");
public static final DecimalFormat format3 = new DecimalFormat("#.###");
public static final DecimalFormat format6 = new DecimalFormat("#.######");
}
| Fix broken number formatting in 0,00 locales
| common/src/main/java/com/platypii/baseline/util/Numbers.java | Fix broken number formatting in 0,00 locales | <ide><path>ommon/src/main/java/com/platypii/baseline/util/Numbers.java
<ide>
<ide> import android.support.annotation.Nullable;
<ide> import java.text.DecimalFormat;
<add>import java.text.DecimalFormatSymbols;
<add>import java.util.Locale;
<ide>
<ide> public class Numbers {
<ide>
<ide> }
<ide> }
<ide>
<del> public static final DecimalFormat format2 = new DecimalFormat("#.##");
<del> public static final DecimalFormat format3 = new DecimalFormat("#.###");
<del> public static final DecimalFormat format6 = new DecimalFormat("#.######");
<add> private static final DecimalFormatSymbols formatSymbols = DecimalFormatSymbols.getInstance(Locale.US);
<add> public static final DecimalFormat format2 = new DecimalFormat("#.##", formatSymbols);
<add> public static final DecimalFormat format3 = new DecimalFormat("#.###", formatSymbols);
<add> public static final DecimalFormat format6 = new DecimalFormat("#.######", formatSymbols);
<ide>
<ide> } |
|
JavaScript | mit | 3e32bd2623a916ff99f94a66e5d584cc335273d8 | 0 | yusufsafak/cerebral,fopsdev/cerebral,yusufsafak/cerebral,christianalfoni/cerebral,cerebral/cerebral,cerebral/cerebral,garth/cerebral,fopsdev/cerebral,christianalfoni/cerebral,garth/cerebral | /* eslint-env mocha */
'use strict'
import assert from 'assert'
import {state, signal} from '../tags'
import {Container, connect} from '../viewFactories/react'
import { WebSocket, Server } from 'mock-socket'
import {Devtools} from './'
import Controller from '../Controller'
import React from 'react'
import TestUtils from 'react-addons-test-utils'
const version = VERSION // eslint-disable-line
import {FunctionTreeExecutionError} from 'function-tree/lib/errors'
Devtools.prototype.createSocket = function () {
this.ws = new WebSocket(`ws://${this.remoteDebugger}`)
}
describe('Devtools', () => {
it('should throw when remoteDebugger is not set', () => {
assert.throws(() => {
new Devtools() // eslint-disable-line no-new
}, (err) => {
if (err instanceof Error) {
return err.message === 'Devtools: You have to pass in the "remoteDebugger" option'
}
})
})
it('should init correctly and work when debugger is open when app loads', (done) => {
const mockServer = new Server('ws://localhost:8585')
let messages = []
mockServer.on('connection', (server) => {
server.on('message', (event) => {
const message = JSON.parse(event)
messages.push(message.type)
switch (message.type) {
case 'pong':
server.send(JSON.stringify({type: 'ping'}))
break
case 'ping':
server.send(JSON.stringify({type: 'pong'}))
break
}
})
})
const controller = new Controller({
devtools: new Devtools({
remoteDebugger: 'localhost:8585',
reconnect: true
})
})
assert.equal(controller.devtools.isConnected, false)
setTimeout(() => {
assert.deepEqual(messages, ['ping', 'init', 'components'])
assert.equal(controller.devtools.isConnected, true)
assert.equal(controller.devtools.reconnectInterval, 5000)
assert.equal(controller.devtools.doReconnect, true)
assert.deepEqual(controller.devtools.debuggerComponentsMap, {})
assert.equal(controller.devtools.debuggerComponentDetailsId, 1)
assert.equal(controller.devtools.storeMutations, true)
assert.equal(controller.devtools.preventExternalMutations, true)
assert.equal(controller.devtools.preventPropsReplacement, false)
assert.equal(controller.devtools.bigComponentsWarning, 10)
assert.deepEqual(controller.devtools.controller, controller)
assert.deepEqual(controller.devtools.originalRunTreeFunction, controller.run)
assert.equal(controller.devtools.isResettingDebugger, false)
assert.equal(controller.devtools.initialModelString, JSON.stringify(controller.model.get()))
mockServer.stop(done)
}, 70)
})
/* it.only('should work when Debugger is opened after app load', (done) => {
let messages = []
const devtools = new Devtools({
remoteDebugger: 'localhost:8585',
reconnectInterval: 800
})
setTimeout(() => {
const mockServer = new Server('ws://localhost:8585')
mockServer.on('connection', (server) => {
server.on('message', (event) => {
const message = JSON.parse(event)
messages.push(message.type)
switch (message.type) {
case 'pong':
server.send(JSON.stringify({type: 'ping'}))
break
case 'ping':
server.send(JSON.stringify({type: 'pong'}))
break
}
})
})
}, 10)
setTimeout(() => {
assert.deepEqual(messages, ['pong', 'init'])
assert.equal(devtools.isConnected, true)
mockServer.stop(done);
}, 1500);
}) */
it('should warn and try to reconnect to Debugger', (done) => {
let warnCount = 0
const originWarn = console.warn
console.warn = function (...args) {
warnCount++
assert.equal(args[0], 'Debugger application is not running on selected port... will reconnect automatically behind the scenes')
originWarn.apply(this, args)
}
const controller = new Controller({
devtools: new Devtools({
remoteDebugger: 'localhost:8585',
reconnectInterval: 500
})
})
assert.equal(controller.devtools.isConnected, false)
let mockServer
let messages = []
setTimeout(() => {
mockServer = new Server('ws://localhost:8585')
mockServer.on('connection', (server) => {
server.on('message', (event) => {
const message = JSON.parse(event)
messages.push(message.type)
switch (message.type) {
case 'pong':
server.send(JSON.stringify({type: 'ping'}))
break
case 'ping':
server.send(JSON.stringify({type: 'pong'}))
break
}
})
})
}, 400)
setTimeout(() => {
assert.deepEqual(messages, ['ping', 'init', 'components'])
assert.equal(warnCount, 1)
assert.equal(controller.devtools.isConnected, true)
console.warn = originWarn
mockServer.stop(done)
}, 1050)
})
it('should set component details and watch executions', (done) => {
const mockServer = new Server('ws://localhost:8585')
let messages = {}
let messageTypes = []
mockServer.on('connection', (server) => {
server.on('message', (event) => {
const message = JSON.parse(event)
switch (message.type) {
case 'pong':
server.send(JSON.stringify({type: 'ping'}))
break
case 'ping':
server.send(JSON.stringify({type: 'pong'}))
break
case 'init':
break
case 'execution':
messageTypes.push(message.type)
if (Array.isArray(messages[message.type])) {
messages[message.type].push(message)
} else {
messages[message.type] = [message]
}
break
default:
messageTypes.push(message.type)
messages[message.type] = message
break
}
})
})
function actionA ({path, state}) {
assert.ok(true)
state.set('foo', 'foo')
return path.success()
}
function actionB () {
assert.ok(true)
return { bar: 'baz' }
}
const controller = Controller({
devtools: new Devtools({
remoteDebugger: 'localhost:8585'
}),
state: {
foo: 'bar',
bar: 'foo'
},
signals: {
test: [
actionA, {
success: [
actionB
]
}
]
}
})
const TestComponent = connect({
foo: state`foo`,
bar: state`bar`,
test: signal`test`
}, (props) => {
return (
<div>{props.foo}</div>
)
})
TestComponent.displayName = 'TestComponent'
const tree = TestUtils.renderIntoDocument((
<Container controller={controller}>
<TestComponent />
</Container>
))
assert.equal(TestUtils.findRenderedDOMComponentWithTag(tree, 'div').innerHTML, 'bar')
setTimeout(() => {
assert.deepEqual(messageTypes, ['components'])
assert.equal(controller.devtools.isConnected, true)
assert.deepEqual(controller.devtools.debuggerComponentsMap.foo, [{ name: 'TestComponent', renderCount: 0, id: 1 }])
assert.deepEqual(controller.devtools.debuggerComponentsMap.bar, [{ name: 'TestComponent', renderCount: 0, id: 1 }])
assert.equal(controller.devtools.debuggerComponentsMap.test, undefined)
assert.equal(messages.components.source, 'c')
assert.deepEqual(messages.components.data.map.foo, [{ name: 'TestComponent', renderCount: 0, id: 1 }])
assert.deepEqual(messages.components.data.map.bar, [{ name: 'TestComponent', renderCount: 0, id: 1 }])
assert.deepEqual(messages.components.data.render, { components: [] })
controller.getSignal('test')({
foo: 'bar'
})
assert.deepEqual(controller.devtools.debuggerComponentsMap.foo, [{ name: 'TestComponent', renderCount: 1, id: 1 }])
assert.deepEqual(controller.devtools.debuggerComponentsMap.bar, [{ name: 'TestComponent', renderCount: 1, id: 1 }])
assert.equal(controller.devtools.debuggerComponentsMap.test, undefined)
assert.deepEqual(messageTypes, ['components', 'executionStart', 'execution', 'execution', 'executionPathStart', 'execution', 'executionFunctionEnd', 'executionEnd'])
assert.ok(messages.executionStart.data.execution)
assert.equal(messages.executionStart.source, 'c')
assert.equal(messages.execution.length, 3)
assert.ok(messages.execution[0].data.execution)
assert.equal(messages.execution[0].source, 'c')
assert.equal(messages.execution[0].version, version)
assert.deepEqual(messages.execution[0].data.execution.payload, { foo: 'bar' })
assert.ok(messages.execution[1].data.execution)
assert.equal(messages.execution[1].source, 'c')
assert.equal(messages.execution[1].version, version)
assert.deepEqual(messages.execution[1].data.execution.payload, { foo: 'bar' })
assert.equal(messages.execution[1].data.execution.data.method, 'set')
assert.deepEqual(messages.execution[1].data.execution.data.args, [ [ 'foo' ], 'foo' ])
assert.equal(messages.execution[1].data.execution.data.type, 'mutation')
assert.equal(messages.execution[1].data.execution.data.color, '#333')
assert.ok(messages.executionPathStart.data.execution)
assert.equal(messages.executionPathStart.source, 'c')
assert.equal(messages.executionPathStart.version, version)
assert.equal(messages.executionPathStart.data.execution.path, 'success')
assert.ok(messages.execution[2].data.execution)
assert.equal(messages.execution[2].source, 'c')
assert.equal(messages.execution[2].version, version)
assert.deepEqual(messages.execution[2].data.execution.payload, { foo: 'bar' })
assert.ok(messages.executionFunctionEnd.data.execution)
assert.equal(messages.executionFunctionEnd.source, 'c')
assert.equal(messages.executionFunctionEnd.version, version)
assert.deepEqual(messages.executionFunctionEnd.data.execution.output, { bar: 'baz' })
assert.ok(messages.executionEnd.data.execution)
assert.equal(messages.executionEnd.version, version)
assert.equal(messages.executionEnd.source, 'c')
mockServer.stop(done)
}, 70)
})
it('should watch signal execution error', (done) => {
const mockServer = new Server('ws://localhost:8585')
let messages = {}
let messageTypes = []
mockServer.on('connection', (server) => {
server.on('message', (event) => {
const message = JSON.parse(event)
switch (message.type) {
case 'pong':
server.send(JSON.stringify({type: 'ping'}))
break
case 'ping':
server.send(JSON.stringify({type: 'pong'}))
break
case 'init':
break
case 'execution':
messageTypes.push(message.type)
if (Array.isArray(messages[message.type])) {
messages[message.type].push(message)
} else {
messages[message.type] = [message]
}
break
default:
messageTypes.push(message.type)
messages[message.type] = message
break
}
})
})
function actionA () {
return {
foo: 'bar'
}
}
let errorCount = 0
const controller = Controller({
devtools: new Devtools({
remoteDebugger: 'localhost:8585'
}),
state: {
foo: 'bar',
bar: 'foo'
},
signals: {
test: [
actionA, {
success: []
}
]
},
catch: new Map([
[FunctionTreeExecutionError, [
({props}) => {
errorCount++
assert.ok(props.error.message.match(/needs to be a path of either success/))
}
]]
])
})
const TestComponent = connect({
foo: state`foo`,
bar: state`bar`,
test: signal`test`
}, (props) => {
return (
<div>{props.foo}</div>
)
})
TestComponent.displayName = 'TestComponent'
const tree = TestUtils.renderIntoDocument((
<Container controller={controller}>
<TestComponent />
</Container>
))
assert.equal(TestUtils.findRenderedDOMComponentWithTag(tree, 'div').innerHTML, 'bar')
setTimeout(() => {
controller.getSignal('test')()
assert.equal(errorCount, 1)
assert.deepEqual(messageTypes, ['components', 'components', 'executionStart', 'execution', 'executionFunctionError', 'executionStart', 'execution', 'executionEnd'])
mockServer.stop(done)
}, 70)
})
it('should reset the state', (done) => {
const mockServer = new Server('ws://localhost:8585')
mockServer.on('connection', (server) => {
server.on('message', (event) => {
const message = JSON.parse(event)
switch (message.type) {
case 'pong':
server.send(JSON.stringify({type: 'ping'}))
break
case 'ping':
server.send(JSON.stringify({type: 'pong'}))
break
default:
break
}
})
setTimeout(() => {
server.send(JSON.stringify({type: 'reset'}))
}, 150)
})
function actionA ({path, state}) {
state.set('foo', 'foo')
return path.success()
}
function actionB () {
return { bar: 'baz' }
}
const controller = Controller({
devtools: new Devtools({
remoteDebugger: 'localhost:8585'
}),
state: {
foo: 'bar',
bar: 'foo'
},
signals: {
test: [
actionA, {
success: [
actionB
]
}
]
}
})
const TestComponent = connect({
foo: state`foo`,
bar: state`bar`,
test: signal`test`
}, (props) => {
return (
<div>{props.foo}</div>
)
})
TestComponent.displayName = 'TestComponent'
const tree = TestUtils.renderIntoDocument((
<Container controller={controller}>
<TestComponent />
</Container>
))
assert.equal(TestUtils.findRenderedDOMComponentWithTag(tree, 'div').innerHTML, 'bar')
setTimeout(() => {
assert.deepEqual(JSON.parse(controller.devtools.initialModelString), {
foo: 'bar',
bar: 'foo'
})
assert.equal(controller.devtools.isConnected, true)
assert.deepEqual(controller.devtools.debuggerComponentsMap.foo, [{ name: 'TestComponent', renderCount: 0, id: 1 }])
assert.deepEqual(controller.devtools.debuggerComponentsMap.bar, [{ name: 'TestComponent', renderCount: 0, id: 1 }])
assert.equal(controller.devtools.debuggerComponentsMap.test, undefined)
controller.getSignal('test')({
foo: 'bar'
})
assert.deepEqual(controller.model.state, {
foo: 'foo',
bar: 'foo'
})
assert.deepEqual(JSON.parse(controller.devtools.initialModelString), {
foo: 'bar',
bar: 'foo'
})
assert.deepEqual(controller.devtools.debuggerComponentsMap.foo, [{ name: 'TestComponent', renderCount: 1, id: 1 }])
assert.deepEqual(controller.devtools.debuggerComponentsMap.bar, [{ name: 'TestComponent', renderCount: 1, id: 1 }])
assert.equal(controller.devtools.debuggerComponentsMap.test, undefined)
}, 70)
setTimeout(() => {
assert.deepEqual(controller.model.state, JSON.parse(controller.devtools.initialModelString))
assert.deepEqual(controller.devtools.backlog, [])
assert.deepEqual(controller.devtools.mutations, [])
assert.equal(controller.devtools.debuggerComponentsMap.test, undefined)
mockServer.stop(done)
}, 300)
})
it('should warn when remember message sent if storeMutations option is false', (done) => {
let warnCount = 0
const originWarn = console.warn
console.warn = function (...args) {
warnCount++
assert.equal(args[0], 'Cerebral Devtools - You tried to time travel, but you have turned of storing of mutations')
originWarn.apply(this, args)
}
const mockServer = new Server('ws://localhost:8585')
mockServer.on('connection', (server) => {
server.on('message', (event) => {
const message = JSON.parse(event)
switch (message.type) {
case 'pong':
server.send(JSON.stringify({type: 'ping'}))
break
case 'ping':
server.send(JSON.stringify({type: 'pong'}))
break
}
})
setTimeout(() => {
server.send(JSON.stringify({type: 'remember', data: 0}))
}, 70)
})
const controller = new Controller({
devtools: new Devtools({
remoteDebugger: 'localhost:8585',
reconnect: true,
storeMutations: false
})
})
setTimeout(() => {
assert.equal(warnCount, 1)
assert.equal(controller.devtools.storeMutations, false)
console.warn = originWarn
mockServer.stop(done)
}, 100)
})
it('should travel back in time', (done) => {
const mockServer = new Server('ws://localhost:8585')
mockServer.on('connection', (server) => {
server.on('message', (event) => {
const message = JSON.parse(event)
switch (message.type) {
case 'pong':
server.send(JSON.stringify({type: 'ping'}))
break
case 'ping':
server.send(JSON.stringify({type: 'pong'}))
break
}
})
setTimeout(() => {
server.send(JSON.stringify({type: 'remember', data: 1}))
}, 200)
setTimeout(() => {
server.send(JSON.stringify({type: 'remember', data: 0}))
}, 400)
setTimeout(() => {
server.send(JSON.stringify({type: 'remember', data: 1}))
}, 600)
})
function actionA ({state}) {
state.set('foo', 'foo')
}
function actionB ({state}) {
state.set('bar', 'bar')
}
const controller = Controller({
devtools: new Devtools({
remoteDebugger: 'localhost:8585'
}),
state: {
foo: 'bar',
bar: 'foo'
},
signals: {
testA: [
actionA
],
testB: [
actionB
]
}
})
let rememberCount = 0
controller.on('remember', (datetime) => {
rememberCount++
})
const TestComponent = connect({
foo: state`foo`,
bar: state`bar`
}, (props) => {
return (
<div>{props.foo}</div>
)
})
TestComponent.displayName = 'TestComponent'
const tree = TestUtils.renderIntoDocument((
<Container controller={controller}>
<TestComponent />
</Container>
))
assert.equal(TestUtils.findRenderedDOMComponentWithTag(tree, 'div').innerHTML, 'bar')
setTimeout(() => {
assert.deepEqual(JSON.parse(controller.devtools.initialModelString), {
foo: 'bar',
bar: 'foo'
})
assert.equal(controller.devtools.isConnected, true)
controller.getSignal('testA')()
assert.deepEqual(controller.model.state, {
foo: 'foo',
bar: 'foo'
})
controller.getSignal('testB')()
assert.deepEqual(controller.model.state, {
foo: 'foo',
bar: 'bar'
})
assert.deepEqual(JSON.parse(controller.devtools.initialModelString), {
foo: 'bar',
bar: 'foo'
})
assert.deepEqual(controller.devtools.debuggerComponentsMap.foo, [{ name: 'TestComponent', renderCount: 2, id: 1 }])
assert.deepEqual(controller.devtools.debuggerComponentsMap.bar, [{ name: 'TestComponent', renderCount: 2, id: 1 }])
assert.equal(controller.devtools.debuggerComponentsMap.test, undefined)
assert.equal(controller.devtools.mutations.length, 2)
assert.equal(rememberCount, 0)
}, 70)
setTimeout(() => {
assert.deepEqual(controller.model.state, {
foo: 'foo',
bar: 'foo'
})
assert.equal(controller.devtools.mutations.length, 2)
assert.equal(rememberCount, 1)
}, 300)
setTimeout(() => {
assert.deepEqual(controller.model.state, {
foo: 'foo',
bar: 'bar'
})
assert.equal(controller.devtools.mutations.length, 2)
assert.equal(rememberCount, 2)
}, 500)
setTimeout(() => {
assert.deepEqual(controller.model.state, {
foo: 'foo',
bar: 'foo'
})
assert.equal(controller.devtools.mutations.length, 2)
assert.equal(rememberCount, 3)
mockServer.stop(done)
}, 800)
})
it('should warn when the signal fired while debugger is remembering state', (done) => {
let warnCount = 0
const originWarn = console.warn
console.warn = function (...args) {
warnCount++
assert.equal(args[0], 'The signal "testB" fired while debugger is remembering state, it was ignored')
originWarn.apply(this, args)
}
const mockServer = new Server('ws://localhost:8585')
mockServer.on('connection', (server) => {
server.on('message', (event) => {
const message = JSON.parse(event)
switch (message.type) {
case 'pong':
server.send(JSON.stringify({type: 'ping'}))
break
case 'ping':
server.send(JSON.stringify({type: 'pong'}))
break
}
})
setTimeout(() => {
server.send(JSON.stringify({type: 'remember', data: 1}))
}, 150)
})
function actionA ({state}) {
state.set('foo', 'foo')
}
function actionB ({state}) {
state.set('bar', 'bar')
}
const controller = Controller({
devtools: new Devtools({
remoteDebugger: 'localhost:8585'
}),
state: {
foo: 'bar',
bar: 'foo'
},
signals: {
testA: [
actionA
],
testB: [
actionB
]
}
})
const TestComponent = connect({
foo: state`foo`,
bar: state`bar`
}, (props) => {
return (
<div>{props.foo}</div>
)
})
TestComponent.displayName = 'TestComponent'
const tree = TestUtils.renderIntoDocument((
<Container controller={controller}>
<TestComponent />
</Container>
))
assert.equal(TestUtils.findRenderedDOMComponentWithTag(tree, 'div').innerHTML, 'bar')
setTimeout(() => {
assert.deepEqual(JSON.parse(controller.devtools.initialModelString), {
foo: 'bar',
bar: 'foo'
})
assert.equal(controller.devtools.isConnected, true)
controller.getSignal('testA')()
assert.deepEqual(controller.model.state, {
foo: 'foo',
bar: 'foo'
})
controller.getSignal('testB')()
assert.deepEqual(controller.model.state, {
foo: 'foo',
bar: 'bar'
})
assert.deepEqual(JSON.parse(controller.devtools.initialModelString), {
foo: 'bar',
bar: 'foo'
})
assert.deepEqual(controller.devtools.debuggerComponentsMap.foo, [{ name: 'TestComponent', renderCount: 2, id: 1 }])
assert.deepEqual(controller.devtools.debuggerComponentsMap.bar, [{ name: 'TestComponent', renderCount: 2, id: 1 }])
assert.equal(controller.devtools.debuggerComponentsMap.test, undefined)
assert.equal(controller.devtools.mutations.length, 2)
}, 70)
setTimeout(() => {
assert.deepEqual(controller.model.state, {
foo: 'foo',
bar: 'foo'
})
assert.equal(controller.devtools.mutations.length, 2)
controller.getSignal('testB')()
assert.deepEqual(controller.model.state, {
foo: 'foo',
bar: 'foo'
})
assert.equal(warnCount, 1)
console.warn = originWarn
mockServer.stop(done)
}, 300)
})
it('should change model state when debugger model state changed', (done) => {
const mockServer = new Server('ws://localhost:8585')
mockServer.on('connection', (server) => {
server.on('message', (event) => {
const message = JSON.parse(event)
switch (message.type) {
case 'pong':
server.send(JSON.stringify({type: 'ping'}))
break
case 'ping':
server.send(JSON.stringify({type: 'pong'}))
break
}
})
setTimeout(() => {
server.send(JSON.stringify({type: 'changeModel', data: {path: [ 'foo' ], value: 'baz'}}))
}, 70)
})
const controller = Controller({
devtools: new Devtools({
remoteDebugger: 'localhost:8585'
}),
state: {
foo: 'bar',
bar: 'foo'
}
})
const TestComponent = connect({
foo: state`foo`,
bar: state`bar`
}, (props) => {
return (
<div>{props.foo}</div>
)
})
TestComponent.displayName = 'TestComponent'
const tree = TestUtils.renderIntoDocument((
<Container controller={controller}>
<TestComponent />
</Container>
))
assert.equal(TestUtils.findRenderedDOMComponentWithTag(tree, 'div').innerHTML, 'bar')
setTimeout(() => {
assert.deepEqual(controller.model.state, {
foo: 'baz',
bar: 'foo'
})
mockServer.stop(done)
}, 100)
})
})
| packages/cerebral/src/devtools/index.test.js | /* eslint-env mocha */
'use strict'
import assert from 'assert'
import {state, signal} from '../tags'
import {Container, connect} from '../viewFactories/react'
import { WebSocket, Server } from 'mock-socket'
import {Devtools} from './'
import Controller from '../Controller'
import React from 'react'
import TestUtils from 'react-addons-test-utils'
const version = VERSION // eslint-disable-line
import {FunctionTreeExecutionError} from 'function-tree/lib/errors'
Devtools.prototype.createSocket = function () {
this.ws = new WebSocket(`ws://${this.remoteDebugger}`)
}
describe('Devtools', () => {
it('should throw when remoteDebugger is not set', () => {
assert.throws(() => {
new Devtools() // eslint-disable-line no-new
}, (err) => {
if (err instanceof Error) {
return err.message === 'Devtools: You have to pass in the "remoteDebugger" option'
}
})
})
it('should init correctly and work when debugger is open when app loads', (done) => {
const mockServer = new Server('ws://localhost:8585')
let messages = []
mockServer.on('connection', (server) => {
server.on('message', (event) => {
const message = JSON.parse(event)
messages.push(message.type)
switch (message.type) {
case 'pong':
server.send(JSON.stringify({type: 'ping'}))
break
case 'ping':
server.send(JSON.stringify({type: 'pong'}))
break
}
})
})
const controller = new Controller({
devtools: new Devtools({
remoteDebugger: 'localhost:8585',
reconnect: true
})
})
assert.equal(controller.devtools.isConnected, false)
setTimeout(() => {
assert.deepEqual(messages, ['ping', 'init', 'bulk', 'components'])
assert.equal(controller.devtools.isConnected, true)
assert.equal(controller.devtools.reconnectInterval, 5000)
assert.equal(controller.devtools.doReconnect, true)
assert.deepEqual(controller.devtools.debuggerComponentsMap, {})
assert.equal(controller.devtools.debuggerComponentDetailsId, 1)
assert.equal(controller.devtools.storeMutations, true)
assert.equal(controller.devtools.preventExternalMutations, true)
assert.equal(controller.devtools.preventPropsReplacement, false)
assert.equal(controller.devtools.bigComponentsWarning, 10)
assert.deepEqual(controller.devtools.controller, controller)
assert.deepEqual(controller.devtools.originalRunTreeFunction, controller.run)
assert.equal(controller.devtools.isResettingDebugger, false)
assert.equal(controller.devtools.initialModelString, JSON.stringify(controller.model.get()))
mockServer.stop(done)
}, 70)
})
/* it.only('should work when Debugger is opened after app load', (done) => {
let messages = []
const devtools = new Devtools({
remoteDebugger: 'localhost:8585',
reconnectInterval: 800
})
setTimeout(() => {
const mockServer = new Server('ws://localhost:8585')
mockServer.on('connection', (server) => {
server.on('message', (event) => {
const message = JSON.parse(event)
messages.push(message.type)
switch (message.type) {
case 'pong':
server.send(JSON.stringify({type: 'ping'}))
break
case 'ping':
server.send(JSON.stringify({type: 'pong'}))
break
}
})
})
}, 10)
setTimeout(() => {
assert.deepEqual(messages, ['pong', 'init'])
assert.equal(devtools.isConnected, true)
mockServer.stop(done);
}, 1500);
}) */
it('should warn and try to reconnect to Debugger', (done) => {
let warnCount = 0
const originWarn = console.warn
console.warn = function (...args) {
warnCount++
assert.equal(args[0], 'Debugger application is not running on selected port... will reconnect automatically behind the scenes')
originWarn.apply(this, args)
}
const controller = new Controller({
devtools: new Devtools({
remoteDebugger: 'localhost:8585',
reconnectInterval: 500
})
})
assert.equal(controller.devtools.isConnected, false)
let mockServer
let messages = []
setTimeout(() => {
mockServer = new Server('ws://localhost:8585')
mockServer.on('connection', (server) => {
server.on('message', (event) => {
const message = JSON.parse(event)
messages.push(message.type)
switch (message.type) {
case 'pong':
server.send(JSON.stringify({type: 'ping'}))
break
case 'ping':
server.send(JSON.stringify({type: 'pong'}))
break
}
})
})
}, 400)
setTimeout(() => {
assert.deepEqual(messages, ['ping', 'init', 'bulk', 'components'])
assert.equal(warnCount, 1)
assert.equal(controller.devtools.isConnected, true)
console.warn = originWarn
mockServer.stop(done)
}, 1050)
})
it('should set component details and watch executions', (done) => {
const mockServer = new Server('ws://localhost:8585')
let messages = {}
let messageTypes = []
mockServer.on('connection', (server) => {
server.on('message', (event) => {
const message = JSON.parse(event)
switch (message.type) {
case 'pong':
server.send(JSON.stringify({type: 'ping'}))
break
case 'ping':
server.send(JSON.stringify({type: 'pong'}))
break
case 'init':
break
case 'execution':
messageTypes.push(message.type)
if (Array.isArray(messages[message.type])) {
messages[message.type].push(message)
} else {
messages[message.type] = [message]
}
break
default:
messageTypes.push(message.type)
messages[message.type] = message
break
}
})
})
function actionA ({path, state}) {
assert.ok(true)
state.set('foo', 'foo')
return path.success()
}
function actionB () {
assert.ok(true)
return { bar: 'baz' }
}
const controller = Controller({
devtools: new Devtools({
remoteDebugger: 'localhost:8585'
}),
state: {
foo: 'bar',
bar: 'foo'
},
signals: {
test: [
actionA, {
success: [
actionB
]
}
]
}
})
const TestComponent = connect({
foo: state`foo`,
bar: state`bar`,
test: signal`test`
}, (props) => {
return (
<div>{props.foo}</div>
)
})
TestComponent.displayName = 'TestComponent'
const tree = TestUtils.renderIntoDocument((
<Container controller={controller}>
<TestComponent />
</Container>
))
assert.equal(TestUtils.findRenderedDOMComponentWithTag(tree, 'div').innerHTML, 'bar')
setTimeout(() => {
assert.deepEqual(messageTypes, ['bulk', 'components'])
assert.equal(controller.devtools.isConnected, true)
assert.deepEqual(controller.devtools.debuggerComponentsMap.foo, [{ name: 'TestComponent', renderCount: 0, id: 1 }])
assert.deepEqual(controller.devtools.debuggerComponentsMap.bar, [{ name: 'TestComponent', renderCount: 0, id: 1 }])
assert.equal(controller.devtools.debuggerComponentsMap.test, undefined)
assert.equal(messages.components.source, 'c')
assert.deepEqual(messages.components.data.map.foo, [{ name: 'TestComponent', renderCount: 0, id: 1 }])
assert.deepEqual(messages.components.data.map.bar, [{ name: 'TestComponent', renderCount: 0, id: 1 }])
assert.deepEqual(messages.components.data.render, { components: [] })
assert.equal(messages.bulk.source, 'c')
assert.equal(messages.bulk.version, version)
assert.deepEqual(messages.bulk.data.messages, [])
controller.getSignal('test')({
foo: 'bar'
})
assert.deepEqual(controller.devtools.debuggerComponentsMap.foo, [{ name: 'TestComponent', renderCount: 1, id: 1 }])
assert.deepEqual(controller.devtools.debuggerComponentsMap.bar, [{ name: 'TestComponent', renderCount: 1, id: 1 }])
assert.equal(controller.devtools.debuggerComponentsMap.test, undefined)
assert.deepEqual(messageTypes, ['bulk', 'components', 'executionStart', 'execution', 'execution', 'executionPathStart', 'execution', 'executionFunctionEnd', 'executionEnd'])
assert.ok(messages.executionStart.data.execution)
assert.equal(messages.executionStart.source, 'c')
assert.equal(messages.execution.length, 3)
assert.ok(messages.execution[0].data.execution)
assert.equal(messages.execution[0].source, 'c')
assert.equal(messages.execution[0].version, version)
assert.deepEqual(messages.execution[0].data.execution.payload, { foo: 'bar' })
assert.ok(messages.execution[1].data.execution)
assert.equal(messages.execution[1].source, 'c')
assert.equal(messages.execution[1].version, version)
assert.deepEqual(messages.execution[1].data.execution.payload, { foo: 'bar' })
assert.equal(messages.execution[1].data.execution.data.method, 'set')
assert.deepEqual(messages.execution[1].data.execution.data.args, [ [ 'foo' ], 'foo' ])
assert.equal(messages.execution[1].data.execution.data.type, 'mutation')
assert.equal(messages.execution[1].data.execution.data.color, '#333')
assert.ok(messages.executionPathStart.data.execution)
assert.equal(messages.executionPathStart.source, 'c')
assert.equal(messages.executionPathStart.version, version)
assert.equal(messages.executionPathStart.data.execution.path, 'success')
assert.ok(messages.execution[2].data.execution)
assert.equal(messages.execution[2].source, 'c')
assert.equal(messages.execution[2].version, version)
assert.deepEqual(messages.execution[2].data.execution.payload, { foo: 'bar' })
assert.ok(messages.executionFunctionEnd.data.execution)
assert.equal(messages.executionFunctionEnd.source, 'c')
assert.equal(messages.executionFunctionEnd.version, version)
assert.deepEqual(messages.executionFunctionEnd.data.execution.output, { bar: 'baz' })
assert.ok(messages.executionEnd.data.execution)
assert.equal(messages.executionEnd.version, version)
assert.equal(messages.executionEnd.source, 'c')
mockServer.stop(done)
}, 70)
})
it('should watch signal execution error', (done) => {
const mockServer = new Server('ws://localhost:8585')
let messages = {}
let messageTypes = []
mockServer.on('connection', (server) => {
server.on('message', (event) => {
const message = JSON.parse(event)
switch (message.type) {
case 'pong':
server.send(JSON.stringify({type: 'ping'}))
break
case 'ping':
server.send(JSON.stringify({type: 'pong'}))
break
case 'init':
break
case 'execution':
messageTypes.push(message.type)
if (Array.isArray(messages[message.type])) {
messages[message.type].push(message)
} else {
messages[message.type] = [message]
}
break
default:
messageTypes.push(message.type)
messages[message.type] = message
break
}
})
})
function actionA () {
return {
foo: 'bar'
}
}
let errorCount = 0
const controller = Controller({
devtools: new Devtools({
remoteDebugger: 'localhost:8585'
}),
state: {
foo: 'bar',
bar: 'foo'
},
signals: {
test: [
actionA, {
success: []
}
]
},
catch: new Map([
[FunctionTreeExecutionError, [
({props}) => {
errorCount++
assert.ok(props.error.message.match(/needs to be a path of either success/))
}
]]
])
})
const TestComponent = connect({
foo: state`foo`,
bar: state`bar`,
test: signal`test`
}, (props) => {
return (
<div>{props.foo}</div>
)
})
TestComponent.displayName = 'TestComponent'
const tree = TestUtils.renderIntoDocument((
<Container controller={controller}>
<TestComponent />
</Container>
))
assert.equal(TestUtils.findRenderedDOMComponentWithTag(tree, 'div').innerHTML, 'bar')
setTimeout(() => {
controller.getSignal('test')()
assert.equal(errorCount, 1)
assert.deepEqual(messageTypes, ['bulk', 'components', 'components', 'executionStart', 'execution', 'executionFunctionError', 'executionStart', 'execution', 'executionEnd'])
mockServer.stop(done)
}, 70)
})
it('should reset the state', (done) => {
const mockServer = new Server('ws://localhost:8585')
mockServer.on('connection', (server) => {
server.on('message', (event) => {
const message = JSON.parse(event)
switch (message.type) {
case 'pong':
server.send(JSON.stringify({type: 'ping'}))
break
case 'ping':
server.send(JSON.stringify({type: 'pong'}))
break
default:
break
}
})
setTimeout(() => {
server.send(JSON.stringify({type: 'reset'}))
}, 150)
})
function actionA ({path, state}) {
state.set('foo', 'foo')
return path.success()
}
function actionB () {
return { bar: 'baz' }
}
const controller = Controller({
devtools: new Devtools({
remoteDebugger: 'localhost:8585'
}),
state: {
foo: 'bar',
bar: 'foo'
},
signals: {
test: [
actionA, {
success: [
actionB
]
}
]
}
})
const TestComponent = connect({
foo: state`foo`,
bar: state`bar`,
test: signal`test`
}, (props) => {
return (
<div>{props.foo}</div>
)
})
TestComponent.displayName = 'TestComponent'
const tree = TestUtils.renderIntoDocument((
<Container controller={controller}>
<TestComponent />
</Container>
))
assert.equal(TestUtils.findRenderedDOMComponentWithTag(tree, 'div').innerHTML, 'bar')
setTimeout(() => {
assert.deepEqual(JSON.parse(controller.devtools.initialModelString), {
foo: 'bar',
bar: 'foo'
})
assert.equal(controller.devtools.isConnected, true)
assert.deepEqual(controller.devtools.debuggerComponentsMap.foo, [{ name: 'TestComponent', renderCount: 0, id: 1 }])
assert.deepEqual(controller.devtools.debuggerComponentsMap.bar, [{ name: 'TestComponent', renderCount: 0, id: 1 }])
assert.equal(controller.devtools.debuggerComponentsMap.test, undefined)
controller.getSignal('test')({
foo: 'bar'
})
assert.deepEqual(controller.model.state, {
foo: 'foo',
bar: 'foo'
})
assert.deepEqual(JSON.parse(controller.devtools.initialModelString), {
foo: 'bar',
bar: 'foo'
})
assert.deepEqual(controller.devtools.debuggerComponentsMap.foo, [{ name: 'TestComponent', renderCount: 1, id: 1 }])
assert.deepEqual(controller.devtools.debuggerComponentsMap.bar, [{ name: 'TestComponent', renderCount: 1, id: 1 }])
assert.equal(controller.devtools.debuggerComponentsMap.test, undefined)
}, 70)
setTimeout(() => {
assert.deepEqual(controller.model.state, JSON.parse(controller.devtools.initialModelString))
assert.deepEqual(controller.devtools.backlog, [])
assert.deepEqual(controller.devtools.mutations, [])
assert.equal(controller.devtools.debuggerComponentsMap.test, undefined)
mockServer.stop(done)
}, 300)
})
it('should warn when remember message sent if storeMutations option is false', (done) => {
let warnCount = 0
const originWarn = console.warn
console.warn = function (...args) {
warnCount++
assert.equal(args[0], 'Cerebral Devtools - You tried to time travel, but you have turned of storing of mutations')
originWarn.apply(this, args)
}
const mockServer = new Server('ws://localhost:8585')
mockServer.on('connection', (server) => {
server.on('message', (event) => {
const message = JSON.parse(event)
switch (message.type) {
case 'pong':
server.send(JSON.stringify({type: 'ping'}))
break
case 'ping':
server.send(JSON.stringify({type: 'pong'}))
break
}
})
setTimeout(() => {
server.send(JSON.stringify({type: 'remember', data: 0}))
}, 70)
})
const controller = new Controller({
devtools: new Devtools({
remoteDebugger: 'localhost:8585',
reconnect: true,
storeMutations: false
})
})
setTimeout(() => {
assert.equal(warnCount, 1)
assert.equal(controller.devtools.storeMutations, false)
console.warn = originWarn
mockServer.stop(done)
}, 100)
})
it('should travel back in time', (done) => {
const mockServer = new Server('ws://localhost:8585')
mockServer.on('connection', (server) => {
server.on('message', (event) => {
const message = JSON.parse(event)
switch (message.type) {
case 'pong':
server.send(JSON.stringify({type: 'ping'}))
break
case 'ping':
server.send(JSON.stringify({type: 'pong'}))
break
}
})
setTimeout(() => {
server.send(JSON.stringify({type: 'remember', data: 1}))
}, 200)
setTimeout(() => {
server.send(JSON.stringify({type: 'remember', data: 0}))
}, 400)
setTimeout(() => {
server.send(JSON.stringify({type: 'remember', data: 1}))
}, 600)
})
function actionA ({state}) {
state.set('foo', 'foo')
}
function actionB ({state}) {
state.set('bar', 'bar')
}
const controller = Controller({
devtools: new Devtools({
remoteDebugger: 'localhost:8585'
}),
state: {
foo: 'bar',
bar: 'foo'
},
signals: {
testA: [
actionA
],
testB: [
actionB
]
}
})
let rememberCount = 0
controller.on('remember', (datetime) => {
rememberCount++
})
const TestComponent = connect({
foo: state`foo`,
bar: state`bar`
}, (props) => {
return (
<div>{props.foo}</div>
)
})
TestComponent.displayName = 'TestComponent'
const tree = TestUtils.renderIntoDocument((
<Container controller={controller}>
<TestComponent />
</Container>
))
assert.equal(TestUtils.findRenderedDOMComponentWithTag(tree, 'div').innerHTML, 'bar')
setTimeout(() => {
assert.deepEqual(JSON.parse(controller.devtools.initialModelString), {
foo: 'bar',
bar: 'foo'
})
assert.equal(controller.devtools.isConnected, true)
controller.getSignal('testA')()
assert.deepEqual(controller.model.state, {
foo: 'foo',
bar: 'foo'
})
controller.getSignal('testB')()
assert.deepEqual(controller.model.state, {
foo: 'foo',
bar: 'bar'
})
assert.deepEqual(JSON.parse(controller.devtools.initialModelString), {
foo: 'bar',
bar: 'foo'
})
assert.deepEqual(controller.devtools.debuggerComponentsMap.foo, [{ name: 'TestComponent', renderCount: 2, id: 1 }])
assert.deepEqual(controller.devtools.debuggerComponentsMap.bar, [{ name: 'TestComponent', renderCount: 2, id: 1 }])
assert.equal(controller.devtools.debuggerComponentsMap.test, undefined)
assert.equal(controller.devtools.mutations.length, 2)
assert.equal(rememberCount, 0)
}, 70)
setTimeout(() => {
assert.deepEqual(controller.model.state, {
foo: 'foo',
bar: 'foo'
})
assert.equal(controller.devtools.mutations.length, 2)
assert.equal(rememberCount, 1)
}, 300)
setTimeout(() => {
assert.deepEqual(controller.model.state, {
foo: 'foo',
bar: 'bar'
})
assert.equal(controller.devtools.mutations.length, 2)
assert.equal(rememberCount, 2)
}, 500)
setTimeout(() => {
assert.deepEqual(controller.model.state, {
foo: 'foo',
bar: 'foo'
})
assert.equal(controller.devtools.mutations.length, 2)
assert.equal(rememberCount, 3)
mockServer.stop(done)
}, 800)
})
it('should warn when the signal fired while debugger is remembering state', (done) => {
let warnCount = 0
const originWarn = console.warn
console.warn = function (...args) {
warnCount++
assert.equal(args[0], 'The signal "testB" fired while debugger is remembering state, it was ignored')
originWarn.apply(this, args)
}
const mockServer = new Server('ws://localhost:8585')
mockServer.on('connection', (server) => {
server.on('message', (event) => {
const message = JSON.parse(event)
switch (message.type) {
case 'pong':
server.send(JSON.stringify({type: 'ping'}))
break
case 'ping':
server.send(JSON.stringify({type: 'pong'}))
break
}
})
setTimeout(() => {
server.send(JSON.stringify({type: 'remember', data: 1}))
}, 150)
})
function actionA ({state}) {
state.set('foo', 'foo')
}
function actionB ({state}) {
state.set('bar', 'bar')
}
const controller = Controller({
devtools: new Devtools({
remoteDebugger: 'localhost:8585'
}),
state: {
foo: 'bar',
bar: 'foo'
},
signals: {
testA: [
actionA
],
testB: [
actionB
]
}
})
const TestComponent = connect({
foo: state`foo`,
bar: state`bar`
}, (props) => {
return (
<div>{props.foo}</div>
)
})
TestComponent.displayName = 'TestComponent'
const tree = TestUtils.renderIntoDocument((
<Container controller={controller}>
<TestComponent />
</Container>
))
assert.equal(TestUtils.findRenderedDOMComponentWithTag(tree, 'div').innerHTML, 'bar')
setTimeout(() => {
assert.deepEqual(JSON.parse(controller.devtools.initialModelString), {
foo: 'bar',
bar: 'foo'
})
assert.equal(controller.devtools.isConnected, true)
controller.getSignal('testA')()
assert.deepEqual(controller.model.state, {
foo: 'foo',
bar: 'foo'
})
controller.getSignal('testB')()
assert.deepEqual(controller.model.state, {
foo: 'foo',
bar: 'bar'
})
assert.deepEqual(JSON.parse(controller.devtools.initialModelString), {
foo: 'bar',
bar: 'foo'
})
assert.deepEqual(controller.devtools.debuggerComponentsMap.foo, [{ name: 'TestComponent', renderCount: 2, id: 1 }])
assert.deepEqual(controller.devtools.debuggerComponentsMap.bar, [{ name: 'TestComponent', renderCount: 2, id: 1 }])
assert.equal(controller.devtools.debuggerComponentsMap.test, undefined)
assert.equal(controller.devtools.mutations.length, 2)
}, 70)
setTimeout(() => {
assert.deepEqual(controller.model.state, {
foo: 'foo',
bar: 'foo'
})
assert.equal(controller.devtools.mutations.length, 2)
controller.getSignal('testB')()
assert.deepEqual(controller.model.state, {
foo: 'foo',
bar: 'foo'
})
assert.equal(warnCount, 1)
console.warn = originWarn
mockServer.stop(done)
}, 300)
})
it('should change model state when debugger model state changed', (done) => {
const mockServer = new Server('ws://localhost:8585')
mockServer.on('connection', (server) => {
server.on('message', (event) => {
const message = JSON.parse(event)
switch (message.type) {
case 'pong':
server.send(JSON.stringify({type: 'ping'}))
break
case 'ping':
server.send(JSON.stringify({type: 'pong'}))
break
}
})
setTimeout(() => {
server.send(JSON.stringify({type: 'changeModel', data: {path: [ 'foo' ], value: 'baz'}}))
}, 70)
})
const controller = Controller({
devtools: new Devtools({
remoteDebugger: 'localhost:8585'
}),
state: {
foo: 'bar',
bar: 'foo'
}
})
const TestComponent = connect({
foo: state`foo`,
bar: state`bar`
}, (props) => {
return (
<div>{props.foo}</div>
)
})
TestComponent.displayName = 'TestComponent'
const tree = TestUtils.renderIntoDocument((
<Container controller={controller}>
<TestComponent />
</Container>
))
assert.equal(TestUtils.findRenderedDOMComponentWithTag(tree, 'div').innerHTML, 'bar')
setTimeout(() => {
assert.deepEqual(controller.model.state, {
foo: 'baz',
bar: 'foo'
})
mockServer.stop(done)
}, 100)
})
})
| fix(tests): fix cerebral devtools tests
| packages/cerebral/src/devtools/index.test.js | fix(tests): fix cerebral devtools tests | <ide><path>ackages/cerebral/src/devtools/index.test.js
<ide> })
<ide> assert.equal(controller.devtools.isConnected, false)
<ide> setTimeout(() => {
<del> assert.deepEqual(messages, ['ping', 'init', 'bulk', 'components'])
<add> assert.deepEqual(messages, ['ping', 'init', 'components'])
<ide> assert.equal(controller.devtools.isConnected, true)
<ide> assert.equal(controller.devtools.reconnectInterval, 5000)
<ide> assert.equal(controller.devtools.doReconnect, true)
<ide> }, 400)
<ide>
<ide> setTimeout(() => {
<del> assert.deepEqual(messages, ['ping', 'init', 'bulk', 'components'])
<add> assert.deepEqual(messages, ['ping', 'init', 'components'])
<ide> assert.equal(warnCount, 1)
<ide> assert.equal(controller.devtools.isConnected, true)
<ide> console.warn = originWarn
<ide> assert.equal(TestUtils.findRenderedDOMComponentWithTag(tree, 'div').innerHTML, 'bar')
<ide>
<ide> setTimeout(() => {
<del> assert.deepEqual(messageTypes, ['bulk', 'components'])
<add> assert.deepEqual(messageTypes, ['components'])
<ide> assert.equal(controller.devtools.isConnected, true)
<ide>
<ide> assert.deepEqual(controller.devtools.debuggerComponentsMap.foo, [{ name: 'TestComponent', renderCount: 0, id: 1 }])
<ide> assert.deepEqual(messages.components.data.map.bar, [{ name: 'TestComponent', renderCount: 0, id: 1 }])
<ide> assert.deepEqual(messages.components.data.render, { components: [] })
<ide>
<del> assert.equal(messages.bulk.source, 'c')
<del> assert.equal(messages.bulk.version, version)
<del> assert.deepEqual(messages.bulk.data.messages, [])
<del>
<ide> controller.getSignal('test')({
<ide> foo: 'bar'
<ide> })
<ide> assert.deepEqual(controller.devtools.debuggerComponentsMap.bar, [{ name: 'TestComponent', renderCount: 1, id: 1 }])
<ide> assert.equal(controller.devtools.debuggerComponentsMap.test, undefined)
<ide>
<del> assert.deepEqual(messageTypes, ['bulk', 'components', 'executionStart', 'execution', 'execution', 'executionPathStart', 'execution', 'executionFunctionEnd', 'executionEnd'])
<add> assert.deepEqual(messageTypes, ['components', 'executionStart', 'execution', 'execution', 'executionPathStart', 'execution', 'executionFunctionEnd', 'executionEnd'])
<ide> assert.ok(messages.executionStart.data.execution)
<ide> assert.equal(messages.executionStart.source, 'c')
<ide>
<ide> setTimeout(() => {
<ide> controller.getSignal('test')()
<ide> assert.equal(errorCount, 1)
<del> assert.deepEqual(messageTypes, ['bulk', 'components', 'components', 'executionStart', 'execution', 'executionFunctionError', 'executionStart', 'execution', 'executionEnd'])
<add> assert.deepEqual(messageTypes, ['components', 'components', 'executionStart', 'execution', 'executionFunctionError', 'executionStart', 'execution', 'executionEnd'])
<ide> mockServer.stop(done)
<ide> }, 70)
<ide> }) |
|
Java | bsd-2-clause | 40e6604cae644676261beb2a588cfe92d57023fe | 0 | KronosDesign/runelite,l2-/runelite,runelite/runelite,runelite/runelite,devinfrench/runelite,Sethtroll/runelite,abelbriggs1/runelite,Noremac201/runelite,Noremac201/runelite,abelbriggs1/runelite,devinfrench/runelite,KronosDesign/runelite,abelbriggs1/runelite,l2-/runelite,runelite/runelite,Sethtroll/runelite | /*
* Copyright (c) 2016-2017, Adam <[email protected]>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
package net.runelite.client.ui;
import java.awt.BorderLayout;
import java.awt.Dimension;
import java.awt.event.WindowAdapter;
import java.awt.event.WindowEvent;
import java.util.Objects;
import javax.swing.JFrame;
import javax.swing.JOptionPane;
import javax.swing.JPanel;
import javax.swing.JPopupMenu;
import javax.swing.UIManager;
import javax.swing.UnsupportedLookAndFeelException;
import net.runelite.api.Client;
import net.runelite.api.GameState;
import net.runelite.client.RuneLite;
public final class ClientUI extends JFrame
{
private static final int PANEL_WIDTH = 805;
private static final int PANEL_HEIGHT = 541;
private static final int EXPANDED_WIDTH = PANEL_WIDTH + PluginPanel.PANEL_WIDTH;
private JPanel container;
private JPanel navContainer;
private ClientPanel panel;
private NavigationPanel navigationPanel;
private PluginPanel pluginPanel;
public ClientUI() throws Exception
{
init();
pack();
setTitle("RuneLite");
setIconImage(RuneLite.ICON);
setLocationRelativeTo(getOwner());
setResizable(true);
setVisible(true);
}
private void init() throws Exception
{
setDefaultCloseOperation(DO_NOTHING_ON_CLOSE);
setMinimumSize(new Dimension(PANEL_WIDTH, PANEL_HEIGHT));
addWindowListener(new WindowAdapter()
{
@Override
public void windowClosing(WindowEvent e)
{
checkExit();
}
});
JPopupMenu.setDefaultLightWeightPopupEnabled(false);
try
{
UIManager.setLookAndFeel(UIManager.getSystemLookAndFeelClassName());
}
catch (ClassNotFoundException | InstantiationException | IllegalAccessException | UnsupportedLookAndFeelException ignored)
{
}
container = new JPanel();
container.setLayout(new BorderLayout(0, 0));
panel = new ClientPanel(!RuneLite.getOptions().has("no-rs"));
container.add(panel, BorderLayout.CENTER);
navContainer = new JPanel();
navContainer.setLayout(new BorderLayout(0, 0));
container.add(navContainer, BorderLayout.EAST);
navigationPanel = new NavigationPanel();
navContainer.add(navigationPanel, BorderLayout.EAST);
add(container);
}
public void expand(PluginPanel panel)
{
if (Objects.equals(pluginPanel, panel))
{
navContainer.remove(1);
container.validate();
this.setMinimumSize(new Dimension(PANEL_WIDTH, PANEL_HEIGHT));
if (this.getWidth() == EXPANDED_WIDTH)
{
this.setSize(PANEL_WIDTH, PANEL_HEIGHT);
}
pluginPanel = null;
}
else
{
if (pluginPanel != null)
{
navContainer.remove(1);
container.validate();
}
pluginPanel = panel;
navContainer.add(pluginPanel, BorderLayout.WEST);
container.validate();
this.setMinimumSize(new Dimension(EXPANDED_WIDTH, PANEL_HEIGHT));
}
}
private void checkExit()
{
Client client = RuneLite.getClient();
int result = JOptionPane.OK_OPTION;
// only ask if not logged out
if (client != null && client.getGameState() != GameState.LOGIN_SCREEN)
{
result = JOptionPane.showConfirmDialog(this, "Are you sure you want to exit?", "Exit", JOptionPane.OK_CANCEL_OPTION, JOptionPane.QUESTION_MESSAGE);
}
if (result == JOptionPane.OK_OPTION)
{
System.exit(0);
}
}
public NavigationPanel getNavigationPanel()
{
return navigationPanel;
}
public PluginPanel getPluginPanel()
{
return pluginPanel;
}
}
| runelite-client/src/main/java/net/runelite/client/ui/ClientUI.java | /*
* Copyright (c) 2016-2017, Adam <[email protected]>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
package net.runelite.client.ui;
import java.awt.BorderLayout;
import java.awt.Dimension;
import java.awt.event.WindowAdapter;
import java.awt.event.WindowEvent;
import java.util.Objects;
import javax.swing.JFrame;
import javax.swing.JOptionPane;
import javax.swing.JPanel;
import javax.swing.JPopupMenu;
import javax.swing.UIManager;
import javax.swing.UnsupportedLookAndFeelException;
import net.runelite.client.RuneLite;
public final class ClientUI extends JFrame
{
private static final int PANEL_WIDTH = 805;
private static final int PANEL_HEIGHT = 541;
private static final int EXPANDED_WIDTH = PANEL_WIDTH + PluginPanel.PANEL_WIDTH;
private JPanel container;
private JPanel navContainer;
private ClientPanel panel;
private NavigationPanel navigationPanel;
private PluginPanel pluginPanel;
public ClientUI() throws Exception
{
init();
pack();
setTitle("RuneLite");
setIconImage(RuneLite.ICON);
setLocationRelativeTo(getOwner());
setResizable(true);
setVisible(true);
}
private void init() throws Exception
{
setDefaultCloseOperation(DO_NOTHING_ON_CLOSE);
setMinimumSize(new Dimension(PANEL_WIDTH, PANEL_HEIGHT));
addWindowListener(new WindowAdapter()
{
@Override
public void windowClosing(WindowEvent e)
{
checkExit();
}
});
JPopupMenu.setDefaultLightWeightPopupEnabled(false);
try
{
UIManager.setLookAndFeel(UIManager.getSystemLookAndFeelClassName());
}
catch (ClassNotFoundException | InstantiationException | IllegalAccessException | UnsupportedLookAndFeelException ignored)
{
}
container = new JPanel();
container.setLayout(new BorderLayout(0, 0));
panel = new ClientPanel(!RuneLite.getOptions().has("no-rs"));
container.add(panel, BorderLayout.CENTER);
navContainer = new JPanel();
navContainer.setLayout(new BorderLayout(0, 0));
container.add(navContainer, BorderLayout.EAST);
navigationPanel = new NavigationPanel();
navContainer.add(navigationPanel, BorderLayout.EAST);
add(container);
}
public void expand(PluginPanel panel)
{
if (Objects.equals(pluginPanel, panel))
{
navContainer.remove(1);
container.validate();
this.setMinimumSize(new Dimension(PANEL_WIDTH, PANEL_HEIGHT));
if (this.getWidth() == EXPANDED_WIDTH)
{
this.setSize(PANEL_WIDTH, PANEL_HEIGHT);
}
pluginPanel = null;
}
else
{
if (pluginPanel != null)
{
navContainer.remove(1);
container.validate();
}
pluginPanel = panel;
navContainer.add(pluginPanel, BorderLayout.WEST);
container.validate();
this.setMinimumSize(new Dimension(EXPANDED_WIDTH, PANEL_HEIGHT));
}
}
private void checkExit()
{
int result = JOptionPane.showConfirmDialog(this, "Are you sure you want to exit?", "Exit", JOptionPane.OK_CANCEL_OPTION, JOptionPane.QUESTION_MESSAGE);
if (result == JOptionPane.OK_OPTION)
{
System.exit(0);
}
}
public NavigationPanel getNavigationPanel()
{
return navigationPanel;
}
public PluginPanel getPluginPanel()
{
return pluginPanel;
}
}
| runelite-client: only prompt on close if logged in
| runelite-client/src/main/java/net/runelite/client/ui/ClientUI.java | runelite-client: only prompt on close if logged in | <ide><path>unelite-client/src/main/java/net/runelite/client/ui/ClientUI.java
<ide> import javax.swing.JPopupMenu;
<ide> import javax.swing.UIManager;
<ide> import javax.swing.UnsupportedLookAndFeelException;
<add>import net.runelite.api.Client;
<add>import net.runelite.api.GameState;
<ide> import net.runelite.client.RuneLite;
<ide>
<ide> public final class ClientUI extends JFrame
<ide>
<ide> private void checkExit()
<ide> {
<del> int result = JOptionPane.showConfirmDialog(this, "Are you sure you want to exit?", "Exit", JOptionPane.OK_CANCEL_OPTION, JOptionPane.QUESTION_MESSAGE);
<add> Client client = RuneLite.getClient();
<add> int result = JOptionPane.OK_OPTION;
<add>
<add> // only ask if not logged out
<add> if (client != null && client.getGameState() != GameState.LOGIN_SCREEN)
<add> {
<add> result = JOptionPane.showConfirmDialog(this, "Are you sure you want to exit?", "Exit", JOptionPane.OK_CANCEL_OPTION, JOptionPane.QUESTION_MESSAGE);
<add> }
<ide>
<ide> if (result == JOptionPane.OK_OPTION)
<ide> { |
|
Java | apache-2.0 | 5041f81dbb32dcf2d35349e1fa5a4fe3cd674c25 | 0 | smartnews/presto,smartnews/presto,smartnews/presto,smartnews/presto,smartnews/presto | /*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.trino.plugin.iceberg;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.ImmutableSet;
import io.airlift.units.DataSize;
import io.trino.Session;
import io.trino.metadata.Metadata;
import io.trino.metadata.QualifiedObjectName;
import io.trino.metadata.TableHandle;
import io.trino.operator.OperatorStats;
import io.trino.plugin.hive.HdfsEnvironment;
import io.trino.spi.QueryId;
import io.trino.spi.connector.ColumnHandle;
import io.trino.spi.connector.Constraint;
import io.trino.spi.connector.ConstraintApplicationResult;
import io.trino.spi.connector.TableNotFoundException;
import io.trino.spi.predicate.Domain;
import io.trino.spi.predicate.TupleDomain;
import io.trino.testing.BaseConnectorTest;
import io.trino.testing.DataProviders;
import io.trino.testing.MaterializedResult;
import io.trino.testing.MaterializedRow;
import io.trino.testing.QueryRunner;
import io.trino.testing.ResultWithQueryId;
import io.trino.testing.TestingConnectorBehavior;
import io.trino.testing.sql.TestTable;
import io.trino.tpch.TpchTable;
import org.apache.avro.Schema;
import org.apache.avro.file.DataFileReader;
import org.apache.avro.file.DataFileWriter;
import org.apache.avro.generic.GenericData;
import org.apache.avro.generic.GenericDatumReader;
import org.apache.avro.generic.GenericDatumWriter;
import org.apache.hadoop.fs.FileSystem;
import org.intellij.lang.annotations.Language;
import org.testng.SkipException;
import org.testng.annotations.DataProvider;
import org.testng.annotations.Test;
import java.io.File;
import java.io.IOException;
import java.io.OutputStream;
import java.net.URI;
import java.nio.file.Files;
import java.nio.file.Path;
import java.nio.file.Paths;
import java.util.List;
import java.util.Locale;
import java.util.Map;
import java.util.NoSuchElementException;
import java.util.Optional;
import java.util.Set;
import java.util.function.Consumer;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import java.util.stream.Collectors;
import java.util.stream.IntStream;
import java.util.stream.LongStream;
import java.util.stream.Stream;
import static com.google.common.base.Preconditions.checkArgument;
import static com.google.common.base.Verify.verify;
import static com.google.common.collect.ImmutableList.toImmutableList;
import static com.google.common.collect.ImmutableMap.toImmutableMap;
import static com.google.common.collect.Iterables.concat;
import static com.google.common.collect.Iterables.getOnlyElement;
import static com.google.common.collect.MoreCollectors.onlyElement;
import static io.trino.SystemSessionProperties.JOIN_DISTRIBUTION_TYPE;
import static io.trino.SystemSessionProperties.PREFERRED_WRITE_PARTITIONING_MIN_NUMBER_OF_PARTITIONS;
import static io.trino.SystemSessionProperties.SCALE_WRITERS;
import static io.trino.SystemSessionProperties.TASK_WRITER_COUNT;
import static io.trino.plugin.hive.HdfsEnvironment.HdfsContext;
import static io.trino.plugin.hive.HiveTestUtils.HDFS_ENVIRONMENT;
import static io.trino.plugin.iceberg.IcebergFileFormat.ORC;
import static io.trino.plugin.iceberg.IcebergFileFormat.PARQUET;
import static io.trino.plugin.iceberg.IcebergQueryRunner.ICEBERG_CATALOG;
import static io.trino.plugin.iceberg.IcebergSplitManager.ICEBERG_DOMAIN_COMPACTION_THRESHOLD;
import static io.trino.spi.predicate.Domain.multipleValues;
import static io.trino.spi.predicate.Domain.singleValue;
import static io.trino.spi.type.BigintType.BIGINT;
import static io.trino.spi.type.DoubleType.DOUBLE;
import static io.trino.spi.type.VarcharType.VARCHAR;
import static io.trino.sql.planner.OptimizerConfig.JoinDistributionType.BROADCAST;
import static io.trino.testing.MaterializedResult.resultBuilder;
import static io.trino.testing.QueryAssertions.assertEqualsIgnoreOrder;
import static io.trino.testing.TestingSession.testSessionBuilder;
import static io.trino.testing.assertions.Assert.assertEquals;
import static io.trino.testing.assertions.Assert.assertEventually;
import static io.trino.testing.sql.TestTable.randomTableSuffix;
import static io.trino.tpch.TpchTable.LINE_ITEM;
import static io.trino.transaction.TransactionBuilder.transaction;
import static java.lang.String.format;
import static java.lang.String.join;
import static java.util.Collections.nCopies;
import static java.util.Objects.requireNonNull;
import static java.util.stream.Collectors.joining;
import static java.util.stream.Collectors.toUnmodifiableList;
import static java.util.stream.IntStream.range;
import static org.assertj.core.api.Assertions.assertThat;
import static org.assertj.core.api.Assertions.assertThatThrownBy;
import static org.testng.Assert.assertFalse;
import static org.testng.Assert.assertNotEquals;
import static org.testng.Assert.assertTrue;
public abstract class BaseIcebergConnectorTest
extends BaseConnectorTest
{
private static final Pattern WITH_CLAUSE_EXTRACTOR = Pattern.compile(".*(WITH\\s*\\([^)]*\\))\\s*$", Pattern.DOTALL);
private final IcebergFileFormat format;
protected BaseIcebergConnectorTest(IcebergFileFormat format)
{
this.format = requireNonNull(format, "format is null");
}
@Override
protected QueryRunner createQueryRunner()
throws Exception
{
return IcebergQueryRunner.builder()
.setIcebergProperties(Map.of("iceberg.file-format", format.name()))
.setInitialTables(ImmutableList.<TpchTable<?>>builder()
.addAll(REQUIRED_TPCH_TABLES)
.add(LINE_ITEM)
.build())
.build();
}
@Override
protected boolean hasBehavior(TestingConnectorBehavior connectorBehavior)
{
switch (connectorBehavior) {
case SUPPORTS_TOPN_PUSHDOWN:
return false;
case SUPPORTS_CREATE_VIEW:
return true;
case SUPPORTS_CREATE_MATERIALIZED_VIEW:
case SUPPORTS_RENAME_MATERIALIZED_VIEW:
return true;
case SUPPORTS_RENAME_MATERIALIZED_VIEW_ACROSS_SCHEMAS:
return false;
case SUPPORTS_DELETE:
case SUPPORTS_UPDATE:
return true;
default:
return super.hasBehavior(connectorBehavior);
}
}
@Override
protected void verifyVersionedQueryFailurePermissible(Exception e)
{
assertThat(e)
.hasMessageMatching("Version pointer type is not supported: .*|" +
"Unsupported type for temporal table version: .*|" +
"Unsupported type for table version: .*|" +
"No version history table tpch.nation at or before .*|" +
"Iceberg snapshot ID does not exists: .*");
}
@Override
protected void verifyConcurrentUpdateFailurePermissible(Exception e)
{
assertThat(e).hasMessageContaining("Failed to commit Iceberg update to table");
}
@Override
protected void verifyConcurrentAddColumnFailurePermissible(Exception e)
{
assertThat(e)
.hasMessageContaining("Cannot update Iceberg table: supplied previous location does not match current location");
}
@Test
public void testDeleteOnV1Table()
{
try (TestTable table = new TestTable(getQueryRunner()::execute, "test_delete_", "WITH (format_version = 1) AS SELECT * FROM orders")) {
assertQueryFails("DELETE FROM " + table.getName() + " WHERE custkey <= 100", "Iceberg table updates require at least format version 2");
}
}
@Override
public void testCharVarcharComparison()
{
assertThatThrownBy(super::testCharVarcharComparison)
.hasMessage("Type not supported for Iceberg: char(3)");
}
@Test
@Override
public void testShowCreateSchema()
{
assertThat(computeActual("SHOW CREATE SCHEMA tpch").getOnlyValue().toString())
.matches("CREATE SCHEMA iceberg.tpch\n" +
"AUTHORIZATION USER user\n" +
"WITH \\(\n" +
"\\s+location = '.*/iceberg_data/tpch'\n" +
"\\)");
}
@Override
@Test
public void testDescribeTable()
{
MaterializedResult expectedColumns = resultBuilder(getSession(), VARCHAR, VARCHAR, VARCHAR, VARCHAR)
.row("orderkey", "bigint", "", "")
.row("custkey", "bigint", "", "")
.row("orderstatus", "varchar", "", "")
.row("totalprice", "double", "", "")
.row("orderdate", "date", "", "")
.row("orderpriority", "varchar", "", "")
.row("clerk", "varchar", "", "")
.row("shippriority", "integer", "", "")
.row("comment", "varchar", "", "")
.build();
MaterializedResult actualColumns = computeActual("DESCRIBE orders");
assertEquals(actualColumns, expectedColumns);
}
@Override
@Test
public void testShowCreateTable()
{
File tempDir = getDistributedQueryRunner().getCoordinator().getBaseDataDir().toFile();
assertThat(computeActual("SHOW CREATE TABLE orders").getOnlyValue())
.isEqualTo("CREATE TABLE iceberg.tpch.orders (\n" +
" orderkey bigint,\n" +
" custkey bigint,\n" +
" orderstatus varchar,\n" +
" totalprice double,\n" +
" orderdate date,\n" +
" orderpriority varchar,\n" +
" clerk varchar,\n" +
" shippriority integer,\n" +
" comment varchar\n" +
")\n" +
"WITH (\n" +
" format = '" + format.name() + "',\n" +
" format_version = 2,\n" +
" location = '" + tempDir + "/iceberg_data/tpch/orders'\n" +
")");
}
@Override
protected void checkInformationSchemaViewsForMaterializedView(String schemaName, String viewName)
{
// TODO should probably return materialized view, as it's also a view -- to be double checked
assertThatThrownBy(() -> super.checkInformationSchemaViewsForMaterializedView(schemaName, viewName))
.hasMessageFindingMatch("(?s)Expecting.*to contain:.*\\Q[(" + viewName + ")]");
}
@Test
public void testDecimal()
{
testDecimalWithPrecisionAndScale(1, 0);
testDecimalWithPrecisionAndScale(8, 6);
testDecimalWithPrecisionAndScale(9, 8);
testDecimalWithPrecisionAndScale(10, 8);
testDecimalWithPrecisionAndScale(18, 1);
testDecimalWithPrecisionAndScale(18, 8);
testDecimalWithPrecisionAndScale(18, 17);
testDecimalWithPrecisionAndScale(17, 16);
testDecimalWithPrecisionAndScale(18, 17);
testDecimalWithPrecisionAndScale(24, 10);
testDecimalWithPrecisionAndScale(30, 10);
testDecimalWithPrecisionAndScale(37, 26);
testDecimalWithPrecisionAndScale(38, 37);
testDecimalWithPrecisionAndScale(38, 17);
testDecimalWithPrecisionAndScale(38, 37);
}
private void testDecimalWithPrecisionAndScale(int precision, int scale)
{
checkArgument(precision >= 1 && precision <= 38, "Decimal precision (%s) must be between 1 and 38 inclusive", precision);
checkArgument(scale < precision && scale >= 0, "Decimal scale (%s) must be less than the precision (%s) and non-negative", scale, precision);
String decimalType = format("DECIMAL(%d,%d)", precision, scale);
String beforeTheDecimalPoint = "12345678901234567890123456789012345678".substring(0, precision - scale);
String afterTheDecimalPoint = "09876543210987654321098765432109876543".substring(0, scale);
String decimalValue = format("%s.%s", beforeTheDecimalPoint, afterTheDecimalPoint);
assertUpdate(format("CREATE TABLE test_iceberg_decimal (x %s)", decimalType));
assertUpdate(format("INSERT INTO test_iceberg_decimal (x) VALUES (CAST('%s' AS %s))", decimalValue, decimalType), 1);
assertQuery("SELECT * FROM test_iceberg_decimal", format("SELECT CAST('%s' AS %s)", decimalValue, decimalType));
dropTable("test_iceberg_decimal");
}
@Test
public void testTime()
{
testSelectOrPartitionedByTime(false);
}
@Test
public void testPartitionedByTime()
{
testSelectOrPartitionedByTime(true);
}
private void testSelectOrPartitionedByTime(boolean partitioned)
{
String tableName = format("test_%s_by_time", partitioned ? "partitioned" : "selected");
String partitioning = partitioned ? "WITH(partitioning = ARRAY['x'])" : "";
assertUpdate(format("CREATE TABLE %s (x TIME(6), y BIGINT) %s", tableName, partitioning));
assertUpdate(format("INSERT INTO %s VALUES (TIME '10:12:34', 12345)", tableName), 1);
assertQuery(format("SELECT COUNT(*) FROM %s", tableName), "SELECT 1");
assertQuery(format("SELECT x FROM %s", tableName), "SELECT CAST('10:12:34' AS TIME)");
assertUpdate(format("INSERT INTO %s VALUES (TIME '9:00:00', 67890)", tableName), 1);
assertQuery(format("SELECT COUNT(*) FROM %s", tableName), "SELECT 2");
assertQuery(format("SELECT x FROM %s WHERE x = TIME '10:12:34'", tableName), "SELECT CAST('10:12:34' AS TIME)");
assertQuery(format("SELECT x FROM %s WHERE x = TIME '9:00:00'", tableName), "SELECT CAST('9:00:00' AS TIME)");
assertQuery(format("SELECT x FROM %s WHERE y = 12345", tableName), "SELECT CAST('10:12:34' AS TIME)");
assertQuery(format("SELECT x FROM %s WHERE y = 67890", tableName), "SELECT CAST('9:00:00' AS TIME)");
dropTable(tableName);
}
@Test
public void testPartitionByTimestamp()
{
testSelectOrPartitionedByTimestamp(true);
}
@Test
public void testSelectByTimestamp()
{
testSelectOrPartitionedByTimestamp(false);
}
private void testSelectOrPartitionedByTimestamp(boolean partitioned)
{
String tableName = format("test_%s_by_timestamp", partitioned ? "partitioned" : "selected");
assertUpdate(format("CREATE TABLE %s (_timestamp timestamp(6)) %s",
tableName, partitioned ? "WITH (partitioning = ARRAY['_timestamp'])" : ""));
@Language("SQL") String select1 = "SELECT TIMESTAMP '2017-05-01 10:12:34' _timestamp";
@Language("SQL") String select2 = "SELECT TIMESTAMP '2017-10-01 10:12:34' _timestamp";
@Language("SQL") String select3 = "SELECT TIMESTAMP '2018-05-01 10:12:34' _timestamp";
assertUpdate(format("INSERT INTO %s %s", tableName, select1), 1);
assertUpdate(format("INSERT INTO %s %s", tableName, select2), 1);
assertUpdate(format("INSERT INTO %s %s", tableName, select3), 1);
assertQuery(format("SELECT COUNT(*) from %s", tableName), "SELECT 3");
assertQuery(format("SELECT * from %s WHERE _timestamp = TIMESTAMP '2017-05-01 10:12:34'", tableName), select1);
assertQuery(format("SELECT * from %s WHERE _timestamp < TIMESTAMP '2017-06-01 10:12:34'", tableName), select1);
assertQuery(format("SELECT * from %s WHERE _timestamp = TIMESTAMP '2017-10-01 10:12:34'", tableName), select2);
assertQuery(format("SELECT * from %s WHERE _timestamp > TIMESTAMP '2017-06-01 10:12:34' AND _timestamp < TIMESTAMP '2018-05-01 10:12:34'", tableName), select2);
assertQuery(format("SELECT * from %s WHERE _timestamp = TIMESTAMP '2018-05-01 10:12:34'", tableName), select3);
assertQuery(format("SELECT * from %s WHERE _timestamp > TIMESTAMP '2018-01-01 10:12:34'", tableName), select3);
dropTable(tableName);
}
@Test
public void testPartitionByTimestampWithTimeZone()
{
testSelectOrPartitionedByTimestampWithTimeZone(true);
}
@Test
public void testSelectByTimestampWithTimeZone()
{
testSelectOrPartitionedByTimestampWithTimeZone(false);
}
private void testSelectOrPartitionedByTimestampWithTimeZone(boolean partitioned)
{
String tableName = format("test_%s_by_timestamptz", partitioned ? "partitioned" : "selected");
assertUpdate(format(
"CREATE TABLE %s (_timestamptz timestamp(6) with time zone) %s",
tableName,
partitioned ? "WITH (partitioning = ARRAY['_timestamptz'])" : ""));
String instant1Utc = "TIMESTAMP '2021-10-31 00:30:00.005000 UTC'";
String instant1La = "TIMESTAMP '2021-10-30 17:30:00.005000 America/Los_Angeles'";
String instant2Utc = "TIMESTAMP '2021-10-31 00:30:00.006000 UTC'";
String instant2La = "TIMESTAMP '2021-10-30 17:30:00.006000 America/Los_Angeles'";
String instant3Utc = "TIMESTAMP '2021-10-31 00:30:00.007000 UTC'";
String instant3La = "TIMESTAMP '2021-10-30 17:30:00.007000 America/Los_Angeles'";
// regression test value for https://github.com/trinodb/trino/issues/12852
String instant4Utc = "TIMESTAMP '1969-12-01 05:06:07.234567 UTC'";
assertUpdate(format("INSERT INTO %s VALUES %s", tableName, instant1Utc), 1);
assertUpdate(format("INSERT INTO %s VALUES %s", tableName, instant2La /* non-UTC for this one */), 1);
assertUpdate(format("INSERT INTO %s VALUES %s", tableName, instant3Utc), 1);
assertUpdate(format("INSERT INTO %s VALUES %s", tableName, instant4Utc), 1);
assertQuery(format("SELECT COUNT(*) from %s", tableName), "SELECT 4");
// =
assertThat(query(format("SELECT * from %s WHERE _timestamptz = %s", tableName, instant1Utc)))
.matches("VALUES " + instant1Utc);
assertThat(query(format("SELECT * from %s WHERE _timestamptz = %s", tableName, instant1La)))
.matches("VALUES " + instant1Utc);
assertThat(query(format("SELECT * from %s WHERE _timestamptz = %s", tableName, instant2Utc)))
.matches("VALUES " + instant2Utc);
assertThat(query(format("SELECT * from %s WHERE _timestamptz = %s", tableName, instant2La)))
.matches("VALUES " + instant2Utc);
assertThat(query(format("SELECT * from %s WHERE _timestamptz = %s", tableName, instant3Utc)))
.matches("VALUES " + instant3Utc);
assertThat(query(format("SELECT * from %s WHERE _timestamptz = %s", tableName, instant3La)))
.matches("VALUES " + instant3Utc);
assertThat(query(format("SELECT * from %s WHERE _timestamptz = %s", tableName, instant4Utc)))
.matches("VALUES " + instant4Utc);
// <
assertThat(query(format("SELECT * from %s WHERE _timestamptz < %s", tableName, instant2Utc)))
.matches(format("VALUES %s, %s", instant1Utc, instant4Utc));
assertThat(query(format("SELECT * from %s WHERE _timestamptz < %s", tableName, instant2La)))
.matches(format("VALUES %s, %s", instant1Utc, instant4Utc));
assertThat(query(format("SELECT * from %s WHERE _timestamptz < %s", tableName, instant3Utc)))
.matches(format("VALUES %s, %s, %s", instant1Utc, instant2Utc, instant4Utc));
assertThat(query(format("SELECT * from %s WHERE _timestamptz < %s", tableName, instant3La)))
.matches(format("VALUES %s, %s, %s", instant1Utc, instant2Utc, instant4Utc));
// <=
assertThat(query(format("SELECT * from %s WHERE _timestamptz <= %s", tableName, instant2Utc)))
.matches(format("VALUES %s, %s, %s", instant1Utc, instant2Utc, instant4Utc));
assertThat(query(format("SELECT * from %s WHERE _timestamptz <= %s", tableName, instant2La)))
.matches(format("VALUES %s, %s, %s", instant1Utc, instant2Utc, instant4Utc));
// >
assertThat(query(format("SELECT * from %s WHERE _timestamptz > %s", tableName, instant2Utc)))
.matches("VALUES " + instant3Utc);
assertThat(query(format("SELECT * from %s WHERE _timestamptz > %s", tableName, instant2La)))
.matches("VALUES " + instant3Utc);
assertThat(query(format("SELECT * from %s WHERE _timestamptz > %s", tableName, instant1Utc)))
.matches(format("VALUES %s, %s", instant2Utc, instant3Utc));
assertThat(query(format("SELECT * from %s WHERE _timestamptz > %s", tableName, instant1La)))
.matches(format("VALUES %s, %s", instant2Utc, instant3Utc));
// >=
assertThat(query(format("SELECT * from %s WHERE _timestamptz >= %s", tableName, instant2Utc)))
.matches(format("VALUES %s, %s", instant2Utc, instant3Utc));
assertThat(query(format("SELECT * from %s WHERE _timestamptz >= %s", tableName, instant2La)))
.matches(format("VALUES %s, %s", instant2Utc, instant3Utc));
// open range
assertThat(query(format("SELECT * from %s WHERE _timestamptz > %s AND _timestamptz < %s", tableName, instant1Utc, instant3Utc)))
.matches("VALUES " + instant2Utc);
assertThat(query(format("SELECT * from %s WHERE _timestamptz > %s AND _timestamptz < %s", tableName, instant1La, instant3La)))
.matches("VALUES " + instant2Utc);
// closed range
assertThat(query(format("SELECT * from %s WHERE _timestamptz BETWEEN %s AND %s", tableName, instant1Utc, instant2Utc)))
.matches(format("VALUES %s, %s", instant1Utc, instant2Utc));
assertThat(query(format("SELECT * from %s WHERE _timestamptz BETWEEN %s AND %s", tableName, instant1La, instant2La)))
.matches(format("VALUES %s, %s", instant1Utc, instant2Utc));
// !=
assertThat(query(format("SELECT * from %s WHERE _timestamptz != %s", tableName, instant1Utc)))
.matches(format("VALUES %s, %s, %s", instant2Utc, instant3Utc, instant4Utc));
assertThat(query(format("SELECT * from %s WHERE _timestamptz != %s", tableName, instant1La)))
.matches(format("VALUES %s, %s, %s", instant2Utc, instant3Utc, instant4Utc));
assertThat(query(format("SELECT * from %s WHERE _timestamptz != %s", tableName, instant2Utc)))
.matches(format("VALUES %s, %s, %s", instant1Utc, instant3Utc, instant4Utc));
assertThat(query(format("SELECT * from %s WHERE _timestamptz != %s", tableName, instant2La)))
.matches(format("VALUES %s, %s, %s", instant1Utc, instant3Utc, instant4Utc));
assertThat(query(format("SELECT * from %s WHERE _timestamptz != %s", tableName, instant4Utc)))
.matches(format("VALUES %s, %s, %s", instant1Utc, instant2Utc, instant3Utc));
// IS DISTINCT FROM
assertThat(query(format("SELECT * from %s WHERE _timestamptz IS DISTINCT FROM %s", tableName, instant1Utc)))
.matches(format("VALUES %s, %s, %s", instant2Utc, instant3Utc, instant4Utc));
assertThat(query(format("SELECT * from %s WHERE _timestamptz IS DISTINCT FROM %s", tableName, instant1La)))
.matches(format("VALUES %s, %s, %s", instant2Utc, instant3Utc, instant4Utc));
assertThat(query(format("SELECT * from %s WHERE _timestamptz IS DISTINCT FROM %s", tableName, instant2Utc)))
.matches(format("VALUES %s, %s, %s", instant1Utc, instant3Utc, instant4Utc));
assertThat(query(format("SELECT * from %s WHERE _timestamptz IS DISTINCT FROM %s", tableName, instant2La)))
.matches(format("VALUES %s, %s, %s", instant1Utc, instant3Utc, instant4Utc));
assertThat(query(format("SELECT * from %s WHERE _timestamptz IS DISTINCT FROM %s", tableName, instant4Utc)))
.matches(format("VALUES %s, %s, %s", instant1Utc, instant2Utc, instant3Utc));
// IS NOT DISTINCT FROM
assertThat(query(format("SELECT * from %s WHERE _timestamptz IS NOT DISTINCT FROM %s", tableName, instant1Utc)))
.matches("VALUES " + instant1Utc);
assertThat(query(format("SELECT * from %s WHERE _timestamptz IS NOT DISTINCT FROM %s", tableName, instant1La)))
.matches("VALUES " + instant1Utc);
assertThat(query(format("SELECT * from %s WHERE _timestamptz IS NOT DISTINCT FROM %s", tableName, instant2Utc)))
.matches("VALUES " + instant2Utc);
assertThat(query(format("SELECT * from %s WHERE _timestamptz IS NOT DISTINCT FROM %s", tableName, instant2La)))
.matches("VALUES " + instant2Utc);
assertThat(query(format("SELECT * from %s WHERE _timestamptz IS NOT DISTINCT FROM %s", tableName, instant3Utc)))
.matches("VALUES " + instant3Utc);
assertThat(query(format("SELECT * from %s WHERE _timestamptz IS NOT DISTINCT FROM %s", tableName, instant3La)))
.matches("VALUES " + instant3Utc);
assertThat(query(format("SELECT * from %s WHERE _timestamptz IS NOT DISTINCT FROM %s", tableName, instant4Utc)))
.matches("VALUES " + instant4Utc);
if (partitioned) {
assertThat(query(format("SELECT record_count, file_count, partition._timestamptz FROM \"%s$partitions\"", tableName)))
.matches(format(
"VALUES (BIGINT '1', BIGINT '1', %s), (BIGINT '1', BIGINT '1', %s), (BIGINT '1', BIGINT '1', %s), (BIGINT '1', BIGINT '1', %s)",
instant1Utc,
instant2Utc,
instant3Utc,
instant4Utc));
}
else {
assertThat(query(format("SELECT record_count, file_count, data._timestamptz FROM \"%s$partitions\"", tableName)))
.matches(format(
"VALUES (BIGINT '4', BIGINT '4', CAST(ROW(%s, %s, 0, NULL) AS row(min timestamp(6) with time zone, max timestamp(6) with time zone, null_count bigint, nan_count bigint)))",
format == ORC ? "TIMESTAMP '1969-12-01 05:06:07.234000 UTC'" : instant4Utc,
format == ORC ? "TIMESTAMP '2021-10-31 00:30:00.007999 UTC'" : instant3Utc));
}
// show stats
assertThat(query("SHOW STATS FOR " + tableName))
.skippingTypesCheck()
.matches("VALUES " +
"('_timestamptz', NULL, NULL, 0e0, NULL, '1969-12-01 05:06:07.234 UTC', '2021-10-31 00:30:00.007 UTC'), " +
"(NULL, NULL, NULL, NULL, 4e0, NULL, NULL)");
if (partitioned) {
// show stats with predicate
assertThat(query("SHOW STATS FOR (SELECT * FROM " + tableName + " WHERE _timestamptz = " + instant1La + ")"))
.skippingTypesCheck()
.matches("VALUES " +
// TODO (https://github.com/trinodb/trino/issues/9716) the min/max values are off by 1 millisecond
"('_timestamptz', NULL, NULL, 0e0, NULL, '2021-10-31 00:30:00.005 UTC', '2021-10-31 00:30:00.005 UTC'), " +
"(NULL, NULL, NULL, NULL, 1e0, NULL, NULL)");
}
else {
// show stats with predicate
assertThat(query("SHOW STATS FOR (SELECT * FROM " + tableName + " WHERE _timestamptz = " + instant1La + ")"))
.skippingTypesCheck()
.matches("VALUES " +
"('_timestamptz', NULL, NULL, NULL, NULL, NULL, NULL), " +
"(NULL, NULL, NULL, NULL, NULL, NULL, NULL)");
}
assertUpdate("DROP TABLE " + tableName);
}
@Test
public void testUuid()
{
testSelectOrPartitionedByUuid(false);
}
@Test
public void testPartitionedByUuid()
{
testSelectOrPartitionedByUuid(true);
}
private void testSelectOrPartitionedByUuid(boolean partitioned)
{
String tableName = format("test_%s_by_uuid", partitioned ? "partitioned" : "selected");
String partitioning = partitioned ? "WITH (partitioning = ARRAY['x'])" : "";
assertUpdate(format("DROP TABLE IF EXISTS %s", tableName));
assertUpdate(format("CREATE TABLE %s (x uuid, y bigint) %s", tableName, partitioning));
assertUpdate(format("INSERT INTO %s VALUES (UUID '406caec7-68b9-4778-81b2-a12ece70c8b1', 12345)", tableName), 1);
assertQuery(format("SELECT count(*) FROM %s", tableName), "SELECT 1");
assertQuery(format("SELECT x FROM %s", tableName), "SELECT CAST('406caec7-68b9-4778-81b2-a12ece70c8b1' AS UUID)");
assertUpdate(format("INSERT INTO %s VALUES (UUID 'f79c3e09-677c-4bbd-a479-3f349cb785e7', 67890)", tableName), 1);
assertUpdate(format("INSERT INTO %s VALUES (NULL, 7531)", tableName), 1);
assertQuery(format("SELECT count(*) FROM %s", tableName), "SELECT 3");
assertQuery(format("SELECT * FROM %s WHERE x = UUID '406caec7-68b9-4778-81b2-a12ece70c8b1'", tableName), "SELECT CAST('406caec7-68b9-4778-81b2-a12ece70c8b1' AS UUID), 12345");
assertQuery(format("SELECT * FROM %s WHERE x = UUID 'f79c3e09-677c-4bbd-a479-3f349cb785e7'", tableName), "SELECT CAST('f79c3e09-677c-4bbd-a479-3f349cb785e7' AS UUID), 67890");
assertQuery(
format("SELECT * FROM %s WHERE x >= UUID '406caec7-68b9-4778-81b2-a12ece70c8b1'", tableName),
(format == ORC && partitioned || format == PARQUET)
// TODO (https://github.com/trinodb/trino/issues/12834): reading Parquet, or partitioned ORC, with UUID filter yields incorrect results
? "VALUES (CAST('406caec7-68b9-4778-81b2-a12ece70c8b1' AS UUID), 12345)"
: "VALUES (CAST('f79c3e09-677c-4bbd-a479-3f349cb785e7' AS UUID), 67890), (CAST('406caec7-68b9-4778-81b2-a12ece70c8b1' AS UUID), 12345)");
assertQuery(
format("SELECT * FROM %s WHERE x >= UUID 'f79c3e09-677c-4bbd-a479-3f349cb785e7'", tableName),
partitioned
? "VALUES (CAST('f79c3e09-677c-4bbd-a479-3f349cb785e7' AS UUID), 67890), (CAST('406caec7-68b9-4778-81b2-a12ece70c8b1' AS UUID), 12345)"
: "SELECT CAST('f79c3e09-677c-4bbd-a479-3f349cb785e7' AS UUID), 67890");
assertQuery(format("SELECT * FROM %s WHERE x IS NULL", tableName), "SELECT NULL, 7531");
assertQuery(format("SELECT x FROM %s WHERE y = 12345", tableName), "SELECT CAST('406caec7-68b9-4778-81b2-a12ece70c8b1' AS UUID)");
assertQuery(format("SELECT x FROM %s WHERE y = 67890", tableName), "SELECT CAST('f79c3e09-677c-4bbd-a479-3f349cb785e7' AS UUID)");
assertQuery(format("SELECT x FROM %s WHERE y = 7531", tableName), "SELECT NULL");
assertUpdate(format("INSERT INTO %s VALUES (UUID '206caec7-68b9-4778-81b2-a12ece70c8b1', 313), (UUID '906caec7-68b9-4778-81b2-a12ece70c8b1', 314)", tableName), 2);
assertThat(query("SELECT y FROM " + tableName + " WHERE x >= UUID '206caec7-68b9-4778-81b2-a12ece70c8b1'"))
.matches(
(partitioned)
// TODO (https://github.com/trinodb/trino/issues/12834): reading Parquet with UUID filter yields incorrect results
? "VALUES BIGINT '12345', 313"
: ((format == PARQUET)
// TODO (https://github.com/trinodb/trino/issues/12834): reading Parquet with UUID filter yields incorrect results
? "VALUES BIGINT '12345'"
// this one is correct
: "VALUES BIGINT '12345', 67890, 313, 314"));
assertUpdate("DROP TABLE " + tableName);
}
@Test
public void testNestedUuid()
{
assertUpdate("CREATE TABLE test_nested_uuid (int_t int, row_t row(uuid_t uuid, int_t int), map_t map(int, uuid), array_t array(uuid))");
String uuid = "UUID '406caec7-68b9-4778-81b2-a12ece70c8b1'";
String value = format("VALUES (2, row(%1$s, 1), map(array[1], array[%1$s]), array[%1$s, %1$s])", uuid);
assertUpdate("INSERT INTO test_nested_uuid " + value, 1);
assertThat(query("SELECT row_t.int_t, row_t.uuid_t FROM test_nested_uuid"))
.matches("VALUES (1, UUID '406caec7-68b9-4778-81b2-a12ece70c8b1')");
assertThat(query("SELECT map_t[1] FROM test_nested_uuid"))
.matches("VALUES UUID '406caec7-68b9-4778-81b2-a12ece70c8b1'");
assertThat(query("SELECT array_t FROM test_nested_uuid"))
.matches("VALUES ARRAY[UUID '406caec7-68b9-4778-81b2-a12ece70c8b1', UUID '406caec7-68b9-4778-81b2-a12ece70c8b1']");
assertQuery("SELECT row_t.int_t FROM test_nested_uuid WHERE row_t.uuid_t = UUID '406caec7-68b9-4778-81b2-a12ece70c8b1'", "VALUES 1");
assertQuery("SELECT int_t FROM test_nested_uuid WHERE row_t.uuid_t = UUID '406caec7-68b9-4778-81b2-a12ece70c8b1'", "VALUES 2");
}
@Test
public void testCreatePartitionedTable()
{
assertUpdate("" +
"CREATE TABLE test_partitioned_table (" +
" a_boolean boolean, " +
" an_integer integer, " +
" a_bigint bigint, " +
" a_real real, " +
" a_double double, " +
" a_short_decimal decimal(5,2), " +
" a_long_decimal decimal(38,20), " +
" a_varchar varchar, " +
" a_varbinary varbinary, " +
" a_date date, " +
" a_time time(6), " +
" a_timestamp timestamp(6), " +
" a_timestamptz timestamp(6) with time zone, " +
" a_uuid uuid, " +
" a_row row(id integer , vc varchar), " +
" an_array array(varchar), " +
" a_map map(integer, varchar) " +
") " +
"WITH (" +
"partitioning = ARRAY[" +
" 'a_boolean', " +
" 'an_integer', " +
" 'a_bigint', " +
" 'a_real', " +
" 'a_double', " +
" 'a_short_decimal', " +
" 'a_long_decimal', " +
" 'a_varchar', " +
" 'a_varbinary', " +
" 'a_date', " +
" 'a_time', " +
" 'a_timestamp', " +
" 'a_timestamptz', " +
" 'a_uuid' " +
// Note: partitioning on non-primitive columns is not allowed in Iceberg
" ]" +
")");
assertQueryReturnsEmptyResult("SELECT * FROM test_partitioned_table");
String values = "VALUES (" +
"true, " +
"1, " +
"BIGINT '1', " +
"REAL '1.0', " +
"DOUBLE '1.0', " +
"CAST(1.0 AS decimal(5,2)), " +
"CAST(11.0 AS decimal(38,20)), " +
"VARCHAR 'onefsadfdsf', " +
"X'000102f0feff', " +
"DATE '2021-07-24'," +
"TIME '02:43:57.987654', " +
"TIMESTAMP '2021-07-24 03:43:57.987654'," +
"TIMESTAMP '2021-07-24 04:43:57.987654 UTC', " +
"UUID '20050910-1330-11e9-ffff-2a86e4085a59', " +
"CAST(ROW(42, 'this is a random value') AS ROW(id int, vc varchar)), " +
"ARRAY[VARCHAR 'uno', 'dos', 'tres'], " +
"map(ARRAY[1,2], ARRAY['ek', VARCHAR 'one'])) ";
String nullValues = nCopies(17, "NULL").stream()
.collect(joining(", ", "VALUES (", ")"));
assertUpdate("INSERT INTO test_partitioned_table " + values, 1);
assertUpdate("INSERT INTO test_partitioned_table " + nullValues, 1);
// SELECT
assertThat(query("SELECT * FROM test_partitioned_table"))
.matches(values + " UNION ALL " + nullValues);
// SELECT with predicates
assertThat(query("SELECT * FROM test_partitioned_table WHERE " +
" a_boolean = true " +
"AND an_integer = 1 " +
"AND a_bigint = BIGINT '1' " +
"AND a_real = REAL '1.0' " +
"AND a_double = DOUBLE '1.0' " +
"AND a_short_decimal = CAST(1.0 AS decimal(5,2)) " +
"AND a_long_decimal = CAST(11.0 AS decimal(38,20)) " +
"AND a_varchar = VARCHAR 'onefsadfdsf' " +
"AND a_varbinary = X'000102f0feff' " +
"AND a_date = DATE '2021-07-24' " +
"AND a_time = TIME '02:43:57.987654' " +
"AND a_timestamp = TIMESTAMP '2021-07-24 03:43:57.987654' " +
"AND a_timestamptz = TIMESTAMP '2021-07-24 04:43:57.987654 UTC' " +
"AND a_uuid = UUID '20050910-1330-11e9-ffff-2a86e4085a59' " +
"AND a_row = CAST(ROW(42, 'this is a random value') AS ROW(id int, vc varchar)) " +
"AND an_array = ARRAY[VARCHAR 'uno', 'dos', 'tres'] " +
"AND a_map = map(ARRAY[1,2], ARRAY['ek', VARCHAR 'one']) " +
""))
.matches(values);
assertThat(query("SELECT * FROM test_partitioned_table WHERE " +
" a_boolean IS NULL " +
"AND an_integer IS NULL " +
"AND a_bigint IS NULL " +
"AND a_real IS NULL " +
"AND a_double IS NULL " +
"AND a_short_decimal IS NULL " +
"AND a_long_decimal IS NULL " +
"AND a_varchar IS NULL " +
"AND a_varbinary IS NULL " +
"AND a_date IS NULL " +
"AND a_time IS NULL " +
"AND a_timestamp IS NULL " +
"AND a_timestamptz IS NULL " +
"AND a_uuid IS NULL " +
"AND a_row IS NULL " +
"AND an_array IS NULL " +
"AND a_map IS NULL " +
""))
.skippingTypesCheck()
.matches(nullValues);
// SHOW STATS
if (format == ORC) {
assertQuery("SHOW STATS FOR test_partitioned_table",
"VALUES " +
" ('a_boolean', NULL, NULL, 0.5, NULL, 'true', 'true'), " +
" ('an_integer', NULL, NULL, 0.5, NULL, '1', '1'), " +
" ('a_bigint', NULL, NULL, 0.5, NULL, '1', '1'), " +
" ('a_real', NULL, NULL, 0.5, NULL, '1.0', '1.0'), " +
" ('a_double', NULL, NULL, 0.5, NULL, '1.0', '1.0'), " +
" ('a_short_decimal', NULL, NULL, 0.5, NULL, '1.0', '1.0'), " +
" ('a_long_decimal', NULL, NULL, 0.5, NULL, '11.0', '11.0'), " +
" ('a_varchar', NULL, NULL, 0.5, NULL, NULL, NULL), " +
" ('a_varbinary', NULL, NULL, 0.5, NULL, NULL, NULL), " +
" ('a_date', NULL, NULL, 0.5, NULL, '2021-07-24', '2021-07-24'), " +
" ('a_time', NULL, NULL, 0.5, NULL, NULL, NULL), " +
" ('a_timestamp', NULL, NULL, 0.5, NULL, '2021-07-24 03:43:57.987654', '2021-07-24 03:43:57.987654'), " +
" ('a_timestamptz', NULL, NULL, 0.5, NULL, '2021-07-24 04:43:57.987 UTC', '2021-07-24 04:43:57.987 UTC'), " +
" ('a_uuid', NULL, NULL, 0.5, NULL, NULL, NULL), " +
" ('a_row', NULL, NULL, 0.5, NULL, NULL, NULL), " +
" ('an_array', NULL, NULL, 0.5, NULL, NULL, NULL), " +
" ('a_map', NULL, NULL, 0.5, NULL, NULL, NULL), " +
" (NULL, NULL, NULL, NULL, 2e0, NULL, NULL)");
}
else {
assertThat(query("SHOW STATS FOR test_partitioned_table"))
.skippingTypesCheck()
.matches("VALUES " +
" ('a_boolean', NULL, NULL, 0.5e0, NULL, 'true', 'true'), " +
" ('an_integer', NULL, NULL, 0.5e0, NULL, '1', '1'), " +
" ('a_bigint', NULL, NULL, 0.5e0, NULL, '1', '1'), " +
" ('a_real', NULL, NULL, 0.5e0, NULL, '1.0', '1.0'), " +
" ('a_double', NULL, NULL, 0.5e0, NULL, '1.0', '1.0'), " +
" ('a_short_decimal', NULL, NULL, 0.5e0, NULL, '1.0', '1.0'), " +
" ('a_long_decimal', NULL, NULL, 0.5e0, NULL, '11.0', '11.0'), " +
" ('a_varchar', 87e0, NULL, 0.5e0, NULL, NULL, NULL), " +
" ('a_varbinary', 82e0, NULL, 0.5e0, NULL, NULL, NULL), " +
" ('a_date', NULL, NULL, 0.5e0, NULL, '2021-07-24', '2021-07-24'), " +
" ('a_time', NULL, NULL, 0.5e0, NULL, NULL, NULL), " +
" ('a_timestamp', NULL, NULL, 0.5e0, NULL, '2021-07-24 03:43:57.987654', '2021-07-24 03:43:57.987654'), " +
" ('a_timestamptz', NULL, NULL, 0.5e0, NULL, '2021-07-24 04:43:57.987 UTC', '2021-07-24 04:43:57.987 UTC'), " +
" ('a_uuid', NULL, NULL, 0.5e0, NULL, NULL, NULL), " +
" ('a_row', NULL, NULL, NULL, NULL, NULL, NULL), " +
" ('an_array', NULL, NULL, NULL, NULL, NULL, NULL), " +
" ('a_map', NULL, NULL, NULL, NULL, NULL, NULL), " +
" (NULL, NULL, NULL, NULL, 2e0, NULL, NULL)");
}
// $partitions
String schema = getSession().getSchema().orElseThrow();
assertThat(query("SELECT column_name FROM information_schema.columns WHERE table_schema = '" + schema + "' AND table_name = 'test_partitioned_table$partitions' "))
.skippingTypesCheck()
.matches("VALUES 'partition', 'record_count', 'file_count', 'total_size'");
assertThat(query("SELECT " +
" record_count," +
" file_count, " +
" partition.a_boolean, " +
" partition.an_integer, " +
" partition.a_bigint, " +
" partition.a_real, " +
" partition.a_double, " +
" partition.a_short_decimal, " +
" partition.a_long_decimal, " +
" partition.a_varchar, " +
" partition.a_varbinary, " +
" partition.a_date, " +
" partition.a_time, " +
" partition.a_timestamp, " +
" partition.a_timestamptz, " +
" partition.a_uuid " +
// Note: partitioning on non-primitive columns is not allowed in Iceberg
" FROM \"test_partitioned_table$partitions\" "))
.matches("" +
"VALUES (" +
" BIGINT '1', " +
" BIGINT '1', " +
" true, " +
" 1, " +
" BIGINT '1', " +
" REAL '1.0', " +
" DOUBLE '1.0', " +
" CAST(1.0 AS decimal(5,2)), " +
" CAST(11.0 AS decimal(38,20)), " +
" VARCHAR 'onefsadfdsf', " +
" X'000102f0feff', " +
" DATE '2021-07-24'," +
" TIME '02:43:57.987654', " +
" TIMESTAMP '2021-07-24 03:43:57.987654'," +
" TIMESTAMP '2021-07-24 04:43:57.987654 UTC', " +
" UUID '20050910-1330-11e9-ffff-2a86e4085a59' " +
")" +
"UNION ALL " +
"VALUES (" +
" BIGINT '1', " +
" BIGINT '1', " +
" NULL, " +
" NULL, " +
" NULL, " +
" NULL, " +
" NULL, " +
" NULL, " +
" NULL, " +
" NULL, " +
" NULL, " +
" NULL, " +
" NULL, " +
" NULL, " +
" NULL, " +
" NULL " +
")");
assertUpdate("DROP TABLE test_partitioned_table");
}
@Test
public void testCreatePartitionedTableWithNestedTypes()
{
assertUpdate("" +
"CREATE TABLE test_partitioned_table_nested_type (" +
" _string VARCHAR" +
", _struct ROW(_field1 INT, _field2 VARCHAR)" +
", _date DATE" +
") " +
"WITH (" +
" partitioning = ARRAY['_date']" +
")");
dropTable("test_partitioned_table_nested_type");
}
@Test
public void testCreatePartitionedTableAs()
{
File tempDir = getDistributedQueryRunner().getCoordinator().getBaseDataDir().toFile();
String tempDirPath = tempDir.toURI().toASCIIString() + randomTableSuffix();
assertUpdate(
"CREATE TABLE test_create_partitioned_table_as " +
"WITH (" +
"format_version = 2," +
"location = '" + tempDirPath + "', " +
"partitioning = ARRAY['ORDER_STATUS', 'Ship_Priority', 'Bucket(order_key,9)']" +
") " +
"AS " +
"SELECT orderkey AS order_key, shippriority AS ship_priority, orderstatus AS order_status " +
"FROM tpch.tiny.orders",
"SELECT count(*) from orders");
assertEquals(
computeScalar("SHOW CREATE TABLE test_create_partitioned_table_as"),
format(
"CREATE TABLE %s.%s.%s (\n" +
" order_key bigint,\n" +
" ship_priority integer,\n" +
" order_status varchar\n" +
")\n" +
"WITH (\n" +
" format = '%s',\n" +
" format_version = 2,\n" +
" location = '%s',\n" +
" partitioning = ARRAY['order_status','ship_priority','bucket(order_key, 9)']\n" +
")",
getSession().getCatalog().orElseThrow(),
getSession().getSchema().orElseThrow(),
"test_create_partitioned_table_as",
format,
tempDirPath));
assertQuery("SELECT * from test_create_partitioned_table_as", "SELECT orderkey, shippriority, orderstatus FROM orders");
dropTable("test_create_partitioned_table_as");
}
@Test
public void testTableComments()
{
File tempDir = getDistributedQueryRunner().getCoordinator().getBaseDataDir().toFile();
String tempDirPath = tempDir.toURI().toASCIIString() + randomTableSuffix();
String createTableTemplate = "" +
"CREATE TABLE iceberg.tpch.test_table_comments (\n" +
" _x bigint\n" +
")\n" +
"COMMENT '%s'\n" +
"WITH (\n" +
format(" format = '%s',\n", format) +
" format_version = 2,\n" +
format(" location = '%s'\n", tempDirPath) +
")";
String createTableWithoutComment = "" +
"CREATE TABLE iceberg.tpch.test_table_comments (\n" +
" _x bigint\n" +
")\n" +
"WITH (\n" +
" format = '" + format + "',\n" +
" format_version = 2,\n" +
" location = '" + tempDirPath + "'\n" +
")";
String createTableSql = format(createTableTemplate, "test table comment", format);
assertUpdate(createTableSql);
assertEquals(computeScalar("SHOW CREATE TABLE test_table_comments"), createTableSql);
assertUpdate("COMMENT ON TABLE test_table_comments IS 'different test table comment'");
assertEquals(computeScalar("SHOW CREATE TABLE test_table_comments"), format(createTableTemplate, "different test table comment", format));
assertUpdate("COMMENT ON TABLE test_table_comments IS NULL");
assertEquals(computeScalar("SHOW CREATE TABLE test_table_comments"), createTableWithoutComment);
dropTable("iceberg.tpch.test_table_comments");
assertUpdate(createTableWithoutComment);
assertEquals(computeScalar("SHOW CREATE TABLE test_table_comments"), createTableWithoutComment);
dropTable("iceberg.tpch.test_table_comments");
}
@Test
public void testRollbackSnapshot()
{
assertUpdate("CREATE TABLE test_rollback (col0 INTEGER, col1 BIGINT)");
long afterCreateTableId = getLatestSnapshotId("test_rollback");
assertUpdate("INSERT INTO test_rollback (col0, col1) VALUES (123, CAST(987 AS BIGINT))", 1);
long afterFirstInsertId = getLatestSnapshotId("test_rollback");
assertUpdate("INSERT INTO test_rollback (col0, col1) VALUES (456, CAST(654 AS BIGINT))", 1);
assertQuery("SELECT * FROM test_rollback ORDER BY col0", "VALUES (123, CAST(987 AS BIGINT)), (456, CAST(654 AS BIGINT))");
assertUpdate(format("CALL system.rollback_to_snapshot('tpch', 'test_rollback', %s)", afterFirstInsertId));
assertQuery("SELECT * FROM test_rollback ORDER BY col0", "VALUES (123, CAST(987 AS BIGINT))");
assertUpdate(format("CALL system.rollback_to_snapshot('tpch', 'test_rollback', %s)", afterCreateTableId));
assertEquals((long) computeActual("SELECT COUNT(*) FROM test_rollback").getOnlyValue(), 0);
assertUpdate("INSERT INTO test_rollback (col0, col1) VALUES (789, CAST(987 AS BIGINT))", 1);
long afterSecondInsertId = getLatestSnapshotId("test_rollback");
// extra insert which should be dropped on rollback
assertUpdate("INSERT INTO test_rollback (col0, col1) VALUES (999, CAST(999 AS BIGINT))", 1);
assertUpdate(format("CALL system.rollback_to_snapshot('tpch', 'test_rollback', %s)", afterSecondInsertId));
assertQuery("SELECT * FROM test_rollback ORDER BY col0", "VALUES (789, CAST(987 AS BIGINT))");
dropTable("test_rollback");
}
private long getLatestSnapshotId(String tableName)
{
return (long) computeActual(format("SELECT snapshot_id FROM \"%s$snapshots\" ORDER BY committed_at DESC LIMIT 1", tableName))
.getOnlyValue();
}
@Override
protected String errorMessageForInsertIntoNotNullColumn(String columnName)
{
return "NULL value not allowed for NOT NULL column: " + columnName;
}
@Test
public void testSchemaEvolution()
{
assertUpdate("CREATE TABLE test_schema_evolution_drop_end (col0 INTEGER, col1 INTEGER, col2 INTEGER)");
assertUpdate("INSERT INTO test_schema_evolution_drop_end VALUES (0, 1, 2)", 1);
assertQuery("SELECT * FROM test_schema_evolution_drop_end", "VALUES(0, 1, 2)");
assertUpdate("ALTER TABLE test_schema_evolution_drop_end DROP COLUMN col2");
assertQuery("SELECT * FROM test_schema_evolution_drop_end", "VALUES(0, 1)");
assertUpdate("ALTER TABLE test_schema_evolution_drop_end ADD COLUMN col2 INTEGER");
assertQuery("SELECT * FROM test_schema_evolution_drop_end", "VALUES(0, 1, NULL)");
assertUpdate("INSERT INTO test_schema_evolution_drop_end VALUES (3, 4, 5)", 1);
assertQuery("SELECT * FROM test_schema_evolution_drop_end", "VALUES(0, 1, NULL), (3, 4, 5)");
dropTable("test_schema_evolution_drop_end");
assertUpdate("CREATE TABLE test_schema_evolution_drop_middle (col0 INTEGER, col1 INTEGER, col2 INTEGER)");
assertUpdate("INSERT INTO test_schema_evolution_drop_middle VALUES (0, 1, 2)", 1);
assertQuery("SELECT * FROM test_schema_evolution_drop_middle", "VALUES(0, 1, 2)");
assertUpdate("ALTER TABLE test_schema_evolution_drop_middle DROP COLUMN col1");
assertQuery("SELECT * FROM test_schema_evolution_drop_middle", "VALUES(0, 2)");
assertUpdate("ALTER TABLE test_schema_evolution_drop_middle ADD COLUMN col1 INTEGER");
assertUpdate("INSERT INTO test_schema_evolution_drop_middle VALUES (3, 4, 5)", 1);
assertQuery("SELECT * FROM test_schema_evolution_drop_middle", "VALUES(0, 2, NULL), (3, 4, 5)");
dropTable("test_schema_evolution_drop_middle");
}
@Test
public void testShowStatsAfterAddColumn()
{
assertUpdate("CREATE TABLE test_show_stats_after_add_column (col0 INTEGER, col1 INTEGER, col2 INTEGER)");
// Insert separately to ensure the table has multiple data files
assertUpdate("INSERT INTO test_show_stats_after_add_column VALUES (1, 2, 3)", 1);
assertUpdate("INSERT INTO test_show_stats_after_add_column VALUES (4, 5, 6)", 1);
assertUpdate("INSERT INTO test_show_stats_after_add_column VALUES (NULL, NULL, NULL)", 1);
assertUpdate("INSERT INTO test_show_stats_after_add_column VALUES (7, 8, 9)", 1);
assertThat(query("SHOW STATS FOR test_show_stats_after_add_column"))
.skippingTypesCheck()
.matches("VALUES " +
" ('col0', NULL, NULL, 25e-2, NULL, '1', '7')," +
" ('col1', NULL, NULL, 25e-2, NULL, '2', '8'), " +
" ('col2', NULL, NULL, 25e-2, NULL, '3', '9'), " +
" (NULL, NULL, NULL, NULL, 4e0, NULL, NULL)");
// Columns added after some data files exist will not have valid statistics because not all files have min/max/null count statistics for the new column
assertUpdate("ALTER TABLE test_show_stats_after_add_column ADD COLUMN col3 INTEGER");
assertUpdate("INSERT INTO test_show_stats_after_add_column VALUES (10, 11, 12, 13)", 1);
assertThat(query("SHOW STATS FOR test_show_stats_after_add_column"))
.skippingTypesCheck()
.matches("VALUES " +
" ('col0', NULL, NULL, 2e-1, NULL, '1', '10')," +
" ('col1', NULL, NULL, 2e-1, NULL, '2', '11'), " +
" ('col2', NULL, NULL, 2e-1, NULL, '3', '12'), " +
" ('col3', NULL, NULL, NULL, NULL, NULL, NULL), " +
" (NULL, NULL, NULL, NULL, 5e0, NULL, NULL)");
}
@Test
public void testLargeInOnPartitionedColumns()
{
assertUpdate("CREATE TABLE test_in_predicate_large_set (col1 BIGINT, col2 BIGINT) WITH (partitioning = ARRAY['col2'])");
assertUpdate("INSERT INTO test_in_predicate_large_set VALUES (1, 10)", 1L);
assertUpdate("INSERT INTO test_in_predicate_large_set VALUES (2, 20)", 1L);
List<String> predicates = IntStream.range(0, 25_000).boxed()
.map(Object::toString)
.collect(toImmutableList());
String filter = format("col2 IN (%s)", join(",", predicates));
assertThat(query("SELECT * FROM test_in_predicate_large_set WHERE " + filter))
.matches("TABLE test_in_predicate_large_set");
dropTable("test_in_predicate_large_set");
}
@Test
public void testCreateTableFailsOnNonEmptyPath()
{
String tableName = "test_rename_table_" + randomTableSuffix();
String tmpName = "test_rename_table_tmp_" + randomTableSuffix();
try {
assertUpdate("CREATE TABLE " + tmpName + " AS SELECT 1 as a", 1);
assertUpdate("ALTER TABLE " + tmpName + " RENAME TO " + tableName);
assertQueryFails("CREATE TABLE " + tmpName + " AS SELECT 1 as a", "Cannot create a table on a non-empty location.*");
}
finally {
assertUpdate("DROP TABLE IF EXISTS " + tableName);
assertUpdate("DROP TABLE IF EXISTS " + tmpName);
}
}
@Test
public void testCreateTableSucceedsOnEmptyDirectory()
{
File tempDir = getDistributedQueryRunner().getCoordinator().getBaseDataDir().toFile();
String tmpName = "test_rename_table_tmp_" + randomTableSuffix();
Path newPath = tempDir.toPath().resolve(tmpName);
File directory = newPath.toFile();
verify(directory.mkdirs(), "Could not make directory on filesystem");
try {
assertUpdate("CREATE TABLE " + tmpName + " WITH (location='" + directory + "') AS SELECT 1 as a", 1);
}
finally {
assertUpdate("DROP TABLE IF EXISTS " + tmpName);
}
}
@Test
public void testCreateTableLike()
{
IcebergFileFormat otherFormat = (format == PARQUET) ? ORC : PARQUET;
testCreateTableLikeForFormat(otherFormat);
}
private void testCreateTableLikeForFormat(IcebergFileFormat otherFormat)
{
File tempDir = getDistributedQueryRunner().getCoordinator().getBaseDataDir().toFile();
String tempDirPath = tempDir.toURI().toASCIIString() + randomTableSuffix();
// LIKE source INCLUDING PROPERTIES copies all the properties of the source table, including the `location`.
// For this reason the source and the copied table will share the same directory.
// This test does not drop intentionally the created tables to avoid affecting the source table or the information_schema.
assertUpdate(format("CREATE TABLE test_create_table_like_original (col1 INTEGER, aDate DATE) WITH(format = '%s', location = '%s', partitioning = ARRAY['aDate'])", format, tempDirPath));
assertEquals(getTablePropertiesString("test_create_table_like_original"), "WITH (\n" +
format(" format = '%s',\n", format) +
" format_version = 2,\n" +
format(" location = '%s',\n", tempDirPath) +
" partitioning = ARRAY['adate']\n" +
")");
assertUpdate("CREATE TABLE test_create_table_like_copy0 (LIKE test_create_table_like_original, col2 INTEGER)");
assertUpdate("INSERT INTO test_create_table_like_copy0 (col1, aDate, col2) VALUES (1, CAST('1950-06-28' AS DATE), 3)", 1);
assertQuery("SELECT * from test_create_table_like_copy0", "VALUES(1, CAST('1950-06-28' AS DATE), 3)");
assertUpdate("CREATE TABLE test_create_table_like_copy1 (LIKE test_create_table_like_original)");
assertEquals(getTablePropertiesString("test_create_table_like_copy1"), "WITH (\n" +
format(" format = '%s',\n format_version = 2,\n location = '%s'\n)", format, tempDir + "/iceberg_data/tpch/test_create_table_like_copy1"));
assertUpdate("CREATE TABLE test_create_table_like_copy2 (LIKE test_create_table_like_original EXCLUDING PROPERTIES)");
assertEquals(getTablePropertiesString("test_create_table_like_copy2"), "WITH (\n" +
format(" format = '%s',\n format_version = 2,\n location = '%s'\n)", format, tempDir + "/iceberg_data/tpch/test_create_table_like_copy2"));
dropTable("test_create_table_like_copy2");
assertQueryFails("CREATE TABLE test_create_table_like_copy3 (LIKE test_create_table_like_original INCLUDING PROPERTIES)",
"Cannot create a table on a non-empty location.*");
assertQueryFails(format("CREATE TABLE test_create_table_like_copy4 (LIKE test_create_table_like_original INCLUDING PROPERTIES) WITH (format = '%s')", otherFormat),
"Cannot create a table on a non-empty location.*");
}
private String getTablePropertiesString(String tableName)
{
MaterializedResult showCreateTable = computeActual("SHOW CREATE TABLE " + tableName);
String createTable = (String) getOnlyElement(showCreateTable.getOnlyColumnAsSet());
Matcher matcher = WITH_CLAUSE_EXTRACTOR.matcher(createTable);
return matcher.matches() ? matcher.group(1) : null;
}
@Test
public void testPredicating()
{
assertUpdate("CREATE TABLE test_predicating_on_real (col REAL)");
assertUpdate("INSERT INTO test_predicating_on_real VALUES 1.2", 1);
assertQuery("SELECT * FROM test_predicating_on_real WHERE col = 1.2", "VALUES 1.2");
dropTable("test_predicating_on_real");
}
@Test
public void testHourTransform()
{
assertUpdate("CREATE TABLE test_hour_transform (d TIMESTAMP(6), b BIGINT) WITH (partitioning = ARRAY['hour(d)'])");
@Language("SQL") String values = "VALUES " +
"(NULL, 101)," +
"(TIMESTAMP '1969-12-31 22:22:22.222222', 8)," +
"(TIMESTAMP '1969-12-31 23:33:11.456789', 9)," +
"(TIMESTAMP '1969-12-31 23:44:55.567890', 10)," +
"(TIMESTAMP '1970-01-01 00:55:44.765432', 11)," +
"(TIMESTAMP '2015-01-01 10:01:23.123456', 1)," +
"(TIMESTAMP '2015-01-01 10:10:02.987654', 2)," +
"(TIMESTAMP '2015-01-01 10:55:00.456789', 3)," +
"(TIMESTAMP '2015-05-15 12:05:01.234567', 4)," +
"(TIMESTAMP '2015-05-15 12:21:02.345678', 5)," +
"(TIMESTAMP '2020-02-21 13:11:11.876543', 6)," +
"(TIMESTAMP '2020-02-21 13:12:12.654321', 7)";
assertUpdate("INSERT INTO test_hour_transform " + values, 12);
assertQuery("SELECT * FROM test_hour_transform", values);
@Language("SQL") String expected = "VALUES " +
"(NULL, 1, NULL, NULL, 101, 101), " +
"(-2, 1, TIMESTAMP '1969-12-31 22:22:22.222222', TIMESTAMP '1969-12-31 22:22:22.222222', 8, 8), " +
"(-1, 2, TIMESTAMP '1969-12-31 23:33:11.456789', TIMESTAMP '1969-12-31 23:44:55.567890', 9, 10), " +
"(0, 1, TIMESTAMP '1970-01-01 00:55:44.765432', TIMESTAMP '1970-01-01 00:55:44.765432', 11, 11), " +
"(394474, 3, TIMESTAMP '2015-01-01 10:01:23.123456', TIMESTAMP '2015-01-01 10:55:00.456789', 1, 3), " +
"(397692, 2, TIMESTAMP '2015-05-15 12:05:01.234567', TIMESTAMP '2015-05-15 12:21:02.345678', 4, 5), " +
"(439525, 2, TIMESTAMP '2020-02-21 13:11:11.876543', TIMESTAMP '2020-02-21 13:12:12.654321', 6, 7)";
String expectedTimestampStats = "'1969-12-31 22:22:22.222222', '2020-02-21 13:12:12.654321'";
if (format == ORC) {
expected = "VALUES " +
"(NULL, 1, NULL, NULL, 101, 101), " +
"(-2, 1, TIMESTAMP '1969-12-31 22:22:22.222000', TIMESTAMP '1969-12-31 22:22:22.222999', 8, 8), " +
"(-1, 2, TIMESTAMP '1969-12-31 23:33:11.456000', TIMESTAMP '1969-12-31 23:44:55.567999', 9, 10), " +
"(0, 1, TIMESTAMP '1970-01-01 00:55:44.765000', TIMESTAMP '1970-01-01 00:55:44.765999', 11, 11), " +
"(394474, 3, TIMESTAMP '2015-01-01 10:01:23.123000', TIMESTAMP '2015-01-01 10:55:00.456999', 1, 3), " +
"(397692, 2, TIMESTAMP '2015-05-15 12:05:01.234000', TIMESTAMP '2015-05-15 12:21:02.345999', 4, 5), " +
"(439525, 2, TIMESTAMP '2020-02-21 13:11:11.876000', TIMESTAMP '2020-02-21 13:12:12.654999', 6, 7)";
expectedTimestampStats = "'1969-12-31 22:22:22.222000', '2020-02-21 13:12:12.654999'";
}
assertQuery("SELECT partition.d_hour, record_count, data.d.min, data.d.max, data.b.min, data.b.max FROM \"test_hour_transform$partitions\"", expected);
// Exercise IcebergMetadata.applyFilter with non-empty Constraint.predicate, via non-pushdownable predicates
assertQuery(
"SELECT * FROM test_hour_transform WHERE day_of_week(d) = 3 AND b % 7 = 3",
"VALUES (TIMESTAMP '1969-12-31 23:44:55.567890', 10)");
assertThat(query("SHOW STATS FOR test_hour_transform"))
.skippingTypesCheck()
.matches("VALUES " +
" ('d', NULL, NULL, 0.0833333e0, NULL, " + expectedTimestampStats + "), " +
" ('b', NULL, NULL, 0e0, NULL, '1', '101'), " +
" (NULL, NULL, NULL, NULL, 12e0, NULL, NULL)");
dropTable("test_hour_transform");
}
@Test
public void testDayTransformDate()
{
assertUpdate("CREATE TABLE test_day_transform_date (d DATE, b BIGINT) WITH (partitioning = ARRAY['day(d)'])");
@Language("SQL") String values = "VALUES " +
"(NULL, 101)," +
"(DATE '1969-01-01', 10), " +
"(DATE '1969-12-31', 11), " +
"(DATE '1970-01-01', 1), " +
"(DATE '1970-03-04', 2), " +
"(DATE '2015-01-01', 3), " +
"(DATE '2015-01-13', 4), " +
"(DATE '2015-01-13', 5), " +
"(DATE '2015-05-15', 6), " +
"(DATE '2015-05-15', 7), " +
"(DATE '2020-02-21', 8), " +
"(DATE '2020-02-21', 9)";
assertUpdate("INSERT INTO test_day_transform_date " + values, 12);
assertQuery("SELECT * FROM test_day_transform_date", values);
assertQuery(
"SELECT partition.d_day, record_count, data.d.min, data.d.max, data.b.min, data.b.max FROM \"test_day_transform_date$partitions\"",
"VALUES " +
"(NULL, 1, NULL, NULL, 101, 101), " +
"(DATE '1969-01-01', 1, DATE '1969-01-01', DATE '1969-01-01', 10, 10), " +
"(DATE '1969-12-31', 1, DATE '1969-12-31', DATE '1969-12-31', 11, 11), " +
"(DATE '1970-01-01', 1, DATE '1970-01-01', DATE '1970-01-01', 1, 1), " +
"(DATE '1970-03-04', 1, DATE '1970-03-04', DATE '1970-03-04', 2, 2), " +
"(DATE '2015-01-01', 1, DATE '2015-01-01', DATE '2015-01-01', 3, 3), " +
"(DATE '2015-01-13', 2, DATE '2015-01-13', DATE '2015-01-13', 4, 5), " +
"(DATE '2015-05-15', 2, DATE '2015-05-15', DATE '2015-05-15', 6, 7), " +
"(DATE '2020-02-21', 2, DATE '2020-02-21', DATE '2020-02-21', 8, 9)");
// Exercise IcebergMetadata.applyFilter with non-empty Constraint.predicate, via non-pushdownable predicates
assertQuery(
"SELECT * FROM test_day_transform_date WHERE day_of_week(d) = 3 AND b % 7 = 3",
"VALUES (DATE '1969-01-01', 10)");
assertThat(query("SHOW STATS FOR test_day_transform_date"))
.skippingTypesCheck()
.matches("VALUES " +
" ('d', NULL, NULL, 0.0833333e0, NULL, '1969-01-01', '2020-02-21'), " +
" ('b', NULL, NULL, 0e0, NULL, '1', '101'), " +
" (NULL, NULL, NULL, NULL, 12e0, NULL, NULL)");
dropTable("test_day_transform_date");
}
@Test
public void testDayTransformTimestamp()
{
assertUpdate("CREATE TABLE test_day_transform_timestamp (d TIMESTAMP(6), b BIGINT) WITH (partitioning = ARRAY['day(d)'])");
@Language("SQL") String values = "VALUES " +
"(NULL, 101)," +
"(TIMESTAMP '1969-12-25 15:13:12.876543', 8)," +
"(TIMESTAMP '1969-12-30 18:47:33.345678', 9)," +
"(TIMESTAMP '1969-12-31 00:00:00.000000', 10)," +
"(TIMESTAMP '1969-12-31 05:06:07.234567', 11)," +
"(TIMESTAMP '1970-01-01 12:03:08.456789', 12)," +
"(TIMESTAMP '2015-01-01 10:01:23.123456', 1)," +
"(TIMESTAMP '2015-01-01 11:10:02.987654', 2)," +
"(TIMESTAMP '2015-01-01 12:55:00.456789', 3)," +
"(TIMESTAMP '2015-05-15 13:05:01.234567', 4)," +
"(TIMESTAMP '2015-05-15 14:21:02.345678', 5)," +
"(TIMESTAMP '2020-02-21 15:11:11.876543', 6)," +
"(TIMESTAMP '2020-02-21 16:12:12.654321', 7)";
assertUpdate("INSERT INTO test_day_transform_timestamp " + values, 13);
assertQuery("SELECT * FROM test_day_transform_timestamp", values);
@Language("SQL") String expected = "VALUES " +
"(NULL, 1, NULL, NULL, 101, 101), " +
"(DATE '1969-12-25', 1, TIMESTAMP '1969-12-25 15:13:12.876543', TIMESTAMP '1969-12-25 15:13:12.876543', 8, 8), " +
"(DATE '1969-12-30', 1, TIMESTAMP '1969-12-30 18:47:33.345678', TIMESTAMP '1969-12-30 18:47:33.345678', 9, 9), " +
"(DATE '1969-12-31', 2, TIMESTAMP '1969-12-31 00:00:00.000000', TIMESTAMP '1969-12-31 05:06:07.234567', 10, 11), " +
"(DATE '1970-01-01', 1, TIMESTAMP '1970-01-01 12:03:08.456789', TIMESTAMP '1970-01-01 12:03:08.456789', 12, 12), " +
"(DATE '2015-01-01', 3, TIMESTAMP '2015-01-01 10:01:23.123456', TIMESTAMP '2015-01-01 12:55:00.456789', 1, 3), " +
"(DATE '2015-05-15', 2, TIMESTAMP '2015-05-15 13:05:01.234567', TIMESTAMP '2015-05-15 14:21:02.345678', 4, 5), " +
"(DATE '2020-02-21', 2, TIMESTAMP '2020-02-21 15:11:11.876543', TIMESTAMP '2020-02-21 16:12:12.654321', 6, 7)";
String expectedTimestampStats = "'1969-12-25 15:13:12.876543', '2020-02-21 16:12:12.654321'";
if (format == ORC) {
expected = "VALUES " +
"(NULL, 1, NULL, NULL, 101, 101), " +
"(DATE '1969-12-25', 1, TIMESTAMP '1969-12-25 15:13:12.876000', TIMESTAMP '1969-12-25 15:13:12.876999', 8, 8), " +
"(DATE '1969-12-30', 1, TIMESTAMP '1969-12-30 18:47:33.345000', TIMESTAMP '1969-12-30 18:47:33.345999', 9, 9), " +
"(DATE '1969-12-31', 2, TIMESTAMP '1969-12-31 00:00:00.000000', TIMESTAMP '1969-12-31 05:06:07.234999', 10, 11), " +
"(DATE '1970-01-01', 1, TIMESTAMP '1970-01-01 12:03:08.456000', TIMESTAMP '1970-01-01 12:03:08.456999', 12, 12), " +
"(DATE '2015-01-01', 3, TIMESTAMP '2015-01-01 10:01:23.123000', TIMESTAMP '2015-01-01 12:55:00.456999', 1, 3), " +
"(DATE '2015-05-15', 2, TIMESTAMP '2015-05-15 13:05:01.234000', TIMESTAMP '2015-05-15 14:21:02.345999', 4, 5), " +
"(DATE '2020-02-21', 2, TIMESTAMP '2020-02-21 15:11:11.876000', TIMESTAMP '2020-02-21 16:12:12.654999', 6, 7)";
expectedTimestampStats = "'1969-12-25 15:13:12.876000', '2020-02-21 16:12:12.654999'";
}
assertQuery("SELECT partition.d_day, record_count, data.d.min, data.d.max, data.b.min, data.b.max FROM \"test_day_transform_timestamp$partitions\"", expected);
// Exercise IcebergMetadata.applyFilter with non-empty Constraint.predicate, via non-pushdownable predicates
assertQuery(
"SELECT * FROM test_day_transform_timestamp WHERE day_of_week(d) = 3 AND b % 7 = 3",
"VALUES (TIMESTAMP '1969-12-31 00:00:00.000000', 10)");
assertThat(query("SHOW STATS FOR test_day_transform_timestamp"))
.skippingTypesCheck()
.matches("VALUES " +
" ('d', NULL, NULL, 0.0769231e0, NULL, " + expectedTimestampStats + "), " +
" ('b', NULL, NULL, 0e0, NULL, '1', '101'), " +
" (NULL, NULL, NULL, NULL, 13e0, NULL, NULL)");
dropTable("test_day_transform_timestamp");
}
@Test
public void testDayTransformTimestampWithTimeZone()
{
assertUpdate("CREATE TABLE test_day_transform_timestamptz (d timestamp(6) with time zone, b integer) WITH (partitioning = ARRAY['day(d)'])");
String values = "VALUES " +
"(NULL, 101)," +
"(TIMESTAMP '1969-12-25 15:13:12.876543 UTC', 8)," +
"(TIMESTAMP '1969-12-30 18:47:33.345678 UTC', 9)," +
"(TIMESTAMP '1969-12-31 00:00:00.000000 UTC', 10)," +
"(TIMESTAMP '1969-12-31 05:06:07.234567 UTC', 11)," +
"(TIMESTAMP '1970-01-01 12:03:08.456789 UTC', 12)," +
"(TIMESTAMP '2015-01-01 10:01:23.123456 UTC', 1)," +
"(TIMESTAMP '2015-01-01 11:10:02.987654 UTC', 2)," +
"(TIMESTAMP '2015-01-01 12:55:00.456789 UTC', 3)," +
"(TIMESTAMP '2015-05-15 13:05:01.234567 UTC', 4)," +
"(TIMESTAMP '2015-05-15 14:21:02.345678 UTC', 5)," +
"(TIMESTAMP '2020-02-21 15:11:11.876543 UTC', 6)," +
"(TIMESTAMP '2020-02-21 16:12:12.654321 UTC', 7)";
assertUpdate("INSERT INTO test_day_transform_timestamptz " + values, 13);
assertThat(query("SELECT * FROM test_day_transform_timestamptz"))
.matches(values);
String expected = "VALUES " +
"(NULL, BIGINT '1', NULL, NULL, 101, 101), " +
"(DATE '1969-12-25', 1, TIMESTAMP '1969-12-25 15:13:12.876543 UTC', TIMESTAMP '1969-12-25 15:13:12.876543 UTC', 8, 8), " +
"(DATE '1969-12-30', 1, TIMESTAMP '1969-12-30 18:47:33.345678 UTC', TIMESTAMP '1969-12-30 18:47:33.345678 UTC', 9, 9), " +
"(DATE '1969-12-31', 2, TIMESTAMP '1969-12-31 00:00:00.000000 UTC', TIMESTAMP '1969-12-31 05:06:07.234567 UTC', 10, 11), " +
"(DATE '1970-01-01', 1, TIMESTAMP '1970-01-01 12:03:08.456789 UTC', TIMESTAMP '1970-01-01 12:03:08.456789 UTC', 12, 12), " +
"(DATE '2015-01-01', 3, TIMESTAMP '2015-01-01 10:01:23.123456 UTC', TIMESTAMP '2015-01-01 12:55:00.456789 UTC', 1, 3), " +
"(DATE '2015-05-15', 2, TIMESTAMP '2015-05-15 13:05:01.234567 UTC', TIMESTAMP '2015-05-15 14:21:02.345678 UTC', 4, 5), " +
"(DATE '2020-02-21', 2, TIMESTAMP '2020-02-21 15:11:11.876543 UTC', TIMESTAMP '2020-02-21 16:12:12.654321 UTC', 6, 7)";
String expectedTimestampStats = "'1969-12-25 15:13:12.876 UTC', '2020-02-21 16:12:12.654 UTC'";
if (format == ORC) {
expected = "VALUES " +
"(NULL, BIGINT '1', NULL, NULL, 101, 101), " +
"(DATE '1969-12-25', 1, TIMESTAMP '1969-12-25 15:13:12.876000 UTC', TIMESTAMP '1969-12-25 15:13:12.876999 UTC', 8, 8), " +
"(DATE '1969-12-30', 1, TIMESTAMP '1969-12-30 18:47:33.345000 UTC', TIMESTAMP '1969-12-30 18:47:33.345999 UTC', 9, 9), " +
"(DATE '1969-12-31', 2, TIMESTAMP '1969-12-31 00:00:00.000000 UTC', TIMESTAMP '1969-12-31 05:06:07.234999 UTC', 10, 11), " +
"(DATE '1970-01-01', 1, TIMESTAMP '1970-01-01 12:03:08.456000 UTC', TIMESTAMP '1970-01-01 12:03:08.456999 UTC', 12, 12), " +
"(DATE '2015-01-01', 3, TIMESTAMP '2015-01-01 10:01:23.123000 UTC', TIMESTAMP '2015-01-01 12:55:00.456999 UTC', 1, 3), " +
"(DATE '2015-05-15', 2, TIMESTAMP '2015-05-15 13:05:01.234000 UTC', TIMESTAMP '2015-05-15 14:21:02.345999 UTC', 4, 5), " +
"(DATE '2020-02-21', 2, TIMESTAMP '2020-02-21 15:11:11.876000 UTC', TIMESTAMP '2020-02-21 16:12:12.654999 UTC', 6, 7)";
}
assertThat(query("SELECT partition.d_day, record_count, data.d.min, data.d.max, data.b.min, data.b.max FROM \"test_day_transform_timestamptz$partitions\""))
.matches(expected);
// Exercise IcebergMetadata.applyFilter with non-empty Constraint.predicate, via non-pushdownable predicates
assertThat(query("SELECT * FROM test_day_transform_timestamptz WHERE day_of_week(d) = 3 AND b % 7 = 3"))
.matches("VALUES (TIMESTAMP '1969-12-31 00:00:00.000000 UTC', 10)");
assertThat(query("SHOW STATS FOR test_day_transform_timestamptz"))
.skippingTypesCheck()
.matches("VALUES " +
" ('d', NULL, NULL, 0.0769231e0, NULL, " + expectedTimestampStats + "), " +
" ('b', NULL, NULL, 0e0, NULL, '1', '101'), " +
" (NULL, NULL, NULL, NULL, 13e0, NULL, NULL)");
assertUpdate("DROP TABLE test_day_transform_timestamptz");
}
@Test
public void testMonthTransformDate()
{
assertUpdate("CREATE TABLE test_month_transform_date (d DATE, b BIGINT) WITH (partitioning = ARRAY['month(d)'])");
@Language("SQL") String values = "VALUES " +
"(NULL, 101)," +
"(DATE '1969-11-13', 1)," +
"(DATE '1969-12-01', 2)," +
"(DATE '1969-12-02', 3)," +
"(DATE '1969-12-31', 4)," +
"(DATE '1970-01-01', 5), " +
"(DATE '1970-05-13', 6), " +
"(DATE '1970-12-31', 7), " +
"(DATE '2020-01-01', 8), " +
"(DATE '2020-06-16', 9), " +
"(DATE '2020-06-28', 10), " +
"(DATE '2020-06-06', 11), " +
"(DATE '2020-07-18', 12), " +
"(DATE '2020-07-28', 13), " +
"(DATE '2020-12-31', 14)";
assertUpdate("INSERT INTO test_month_transform_date " + values, 15);
assertQuery("SELECT * FROM test_month_transform_date", values);
assertQuery(
"SELECT partition.d_month, record_count, data.d.min, data.d.max, data.b.min, data.b.max FROM \"test_month_transform_date$partitions\"",
"VALUES " +
"(NULL, 1, NULL, NULL, 101, 101), " +
"(-2, 1, DATE '1969-11-13', DATE '1969-11-13', 1, 1), " +
"(-1, 3, DATE '1969-12-01', DATE '1969-12-31', 2, 4), " +
"(0, 1, DATE '1970-01-01', DATE '1970-01-01', 5, 5), " +
"(4, 1, DATE '1970-05-13', DATE '1970-05-13', 6, 6), " +
"(11, 1, DATE '1970-12-31', DATE '1970-12-31', 7, 7), " +
"(600, 1, DATE '2020-01-01', DATE '2020-01-01', 8, 8), " +
"(605, 3, DATE '2020-06-06', DATE '2020-06-28', 9, 11), " +
"(606, 2, DATE '2020-07-18', DATE '2020-07-28', 12, 13), " +
"(611, 1, DATE '2020-12-31', DATE '2020-12-31', 14, 14)");
// Exercise IcebergMetadata.applyFilter with non-empty Constraint.predicate, via non-pushdownable predicates
assertQuery(
"SELECT * FROM test_month_transform_date WHERE day_of_week(d) = 7 AND b % 7 = 3",
"VALUES (DATE '2020-06-28', 10)");
assertThat(query("SHOW STATS FOR test_month_transform_date"))
.skippingTypesCheck()
.matches("VALUES " +
" ('d', NULL, NULL, 0.0666667e0, NULL, '1969-11-13', '2020-12-31'), " +
" ('b', NULL, NULL, 0e0, NULL, '1', '101'), " +
" (NULL, NULL, NULL, NULL, 15e0, NULL, NULL)");
dropTable("test_month_transform_date");
}
@Test
public void testMonthTransformTimestamp()
{
assertUpdate("CREATE TABLE test_month_transform_timestamp (d TIMESTAMP(6), b BIGINT) WITH (partitioning = ARRAY['month(d)'])");
@Language("SQL") String values = "VALUES " +
"(NULL, 101)," +
"(TIMESTAMP '1969-11-15 15:13:12.876543', 8)," +
"(TIMESTAMP '1969-11-19 18:47:33.345678', 9)," +
"(TIMESTAMP '1969-12-01 00:00:00.000000', 10)," +
"(TIMESTAMP '1969-12-01 05:06:07.234567', 11)," +
"(TIMESTAMP '1970-01-01 12:03:08.456789', 12)," +
"(TIMESTAMP '2015-01-01 10:01:23.123456', 1)," +
"(TIMESTAMP '2015-01-01 11:10:02.987654', 2)," +
"(TIMESTAMP '2015-01-01 12:55:00.456789', 3)," +
"(TIMESTAMP '2015-05-15 13:05:01.234567', 4)," +
"(TIMESTAMP '2015-05-15 14:21:02.345678', 5)," +
"(TIMESTAMP '2020-02-21 15:11:11.876543', 6)," +
"(TIMESTAMP '2020-02-21 16:12:12.654321', 7)";
assertUpdate("INSERT INTO test_month_transform_timestamp " + values, 13);
assertQuery("SELECT * FROM test_month_transform_timestamp", values);
@Language("SQL") String expected = "VALUES " +
"(NULL, 1, NULL, NULL, 101, 101), " +
"(-2, 2, TIMESTAMP '1969-11-15 15:13:12.876543', TIMESTAMP '1969-11-19 18:47:33.345678', 8, 9), " +
"(-1, 2, TIMESTAMP '1969-12-01 00:00:00.000000', TIMESTAMP '1969-12-01 05:06:07.234567', 10, 11), " +
"(0, 1, TIMESTAMP '1970-01-01 12:03:08.456789', TIMESTAMP '1970-01-01 12:03:08.456789', 12, 12), " +
"(540, 3, TIMESTAMP '2015-01-01 10:01:23.123456', TIMESTAMP '2015-01-01 12:55:00.456789', 1, 3), " +
"(544, 2, TIMESTAMP '2015-05-15 13:05:01.234567', TIMESTAMP '2015-05-15 14:21:02.345678', 4, 5), " +
"(601, 2, TIMESTAMP '2020-02-21 15:11:11.876543', TIMESTAMP '2020-02-21 16:12:12.654321', 6, 7)";
String expectedTimestampStats = "'1969-11-15 15:13:12.876543', '2020-02-21 16:12:12.654321'";
if (format == ORC) {
expected = "VALUES " +
"(NULL, 1, NULL, NULL, 101, 101), " +
"(-2, 2, TIMESTAMP '1969-11-15 15:13:12.876000', TIMESTAMP '1969-11-19 18:47:33.345999', 8, 9), " +
"(-1, 2, TIMESTAMP '1969-12-01 00:00:00.000000', TIMESTAMP '1969-12-01 05:06:07.234999', 10, 11), " +
"(0, 1, TIMESTAMP '1970-01-01 12:03:08.456000', TIMESTAMP '1970-01-01 12:03:08.456999', 12, 12), " +
"(540, 3, TIMESTAMP '2015-01-01 10:01:23.123000', TIMESTAMP '2015-01-01 12:55:00.456999', 1, 3), " +
"(544, 2, TIMESTAMP '2015-05-15 13:05:01.234000', TIMESTAMP '2015-05-15 14:21:02.345999', 4, 5), " +
"(601, 2, TIMESTAMP '2020-02-21 15:11:11.876000', TIMESTAMP '2020-02-21 16:12:12.654999', 6, 7)";
expectedTimestampStats = "'1969-11-15 15:13:12.876000', '2020-02-21 16:12:12.654999'";
}
assertQuery("SELECT partition.d_month, record_count, data.d.min, data.d.max, data.b.min, data.b.max FROM \"test_month_transform_timestamp$partitions\"", expected);
// Exercise IcebergMetadata.applyFilter with non-empty Constraint.predicate, via non-pushdownable predicates
assertQuery(
"SELECT * FROM test_month_transform_timestamp WHERE day_of_week(d) = 1 AND b % 7 = 3",
"VALUES (TIMESTAMP '1969-12-01 00:00:00.000000', 10)");
assertThat(query("SHOW STATS FOR test_month_transform_timestamp"))
.skippingTypesCheck()
.matches("VALUES " +
" ('d', NULL, NULL, 0.0769231e0, NULL, " + expectedTimestampStats + "), " +
" ('b', NULL, NULL, 0e0, NULL, '1', '101'), " +
" (NULL, NULL, NULL, NULL, 13e0, NULL, NULL)");
dropTable("test_month_transform_timestamp");
}
@Test
public void testMonthTransformTimestampWithTimeZone()
{
assertUpdate("CREATE TABLE test_month_transform_timestamptz (d timestamp(6) with time zone, b integer) WITH (partitioning = ARRAY['month(d)'])");
String values = "VALUES " +
"(NULL, 101)," +
"(TIMESTAMP '1969-11-15 15:13:12.876543 UTC', 8)," +
"(TIMESTAMP '1969-11-19 18:47:33.345678 UTC', 9)," +
"(TIMESTAMP '1969-12-01 00:00:00.000000 UTC', 10)," +
"(TIMESTAMP '1969-12-01 05:06:07.234567 UTC', 11)," +
"(TIMESTAMP '1970-01-01 12:03:08.456789 UTC', 12)," +
"(TIMESTAMP '2015-01-01 10:01:23.123456 UTC', 1)," +
"(TIMESTAMP '2015-01-01 11:10:02.987654 UTC', 2)," +
"(TIMESTAMP '2015-01-01 12:55:00.456789 UTC', 3)," +
"(TIMESTAMP '2015-05-15 13:05:01.234567 UTC', 4)," +
"(TIMESTAMP '2015-05-15 14:21:02.345678 UTC', 5)," +
"(TIMESTAMP '2020-02-21 15:11:11.876543 UTC', 6)," +
"(TIMESTAMP '2020-02-21 16:12:12.654321 UTC', 7)";
assertUpdate("INSERT INTO test_month_transform_timestamptz " + values, 13);
assertThat(query("SELECT * FROM test_month_transform_timestamptz"))
.matches(values);
String expected = "VALUES " +
"(NULL, BIGINT '1', NULL, NULL, 101, 101), " +
"(-2, 2, TIMESTAMP '1969-11-15 15:13:12.876543 UTC', TIMESTAMP '1969-11-19 18:47:33.345678 UTC', 8, 9), " +
"(-1, 2, TIMESTAMP '1969-12-01 00:00:00.000000 UTC', TIMESTAMP '1969-12-01 05:06:07.234567 UTC', 10, 11), " +
"(0, 1, TIMESTAMP '1970-01-01 12:03:08.456789 UTC', TIMESTAMP '1970-01-01 12:03:08.456789 UTC', 12, 12), " +
"(540, 3, TIMESTAMP '2015-01-01 10:01:23.123456 UTC', TIMESTAMP '2015-01-01 12:55:00.456789 UTC', 1, 3), " +
"(544, 2, TIMESTAMP '2015-05-15 13:05:01.234567 UTC', TIMESTAMP '2015-05-15 14:21:02.345678 UTC', 4, 5), " +
"(601, 2, TIMESTAMP '2020-02-21 15:11:11.876543 UTC', TIMESTAMP '2020-02-21 16:12:12.654321 UTC', 6, 7)";
String expectedTimestampStats = "'1969-11-15 15:13:12.876 UTC', '2020-02-21 16:12:12.654 UTC'";
if (format == ORC) {
expected = "VALUES " +
"(NULL, BIGINT '1', NULL, NULL, 101, 101), " +
"(-2, 2, TIMESTAMP '1969-11-15 15:13:12.876000 UTC', TIMESTAMP '1969-11-19 18:47:33.345999 UTC', 8, 9), " +
"(-1, 2, TIMESTAMP '1969-12-01 00:00:00.000000 UTC', TIMESTAMP '1969-12-01 05:06:07.234999 UTC', 10, 11), " +
"(0, 1, TIMESTAMP '1970-01-01 12:03:08.456000 UTC', TIMESTAMP '1970-01-01 12:03:08.456999 UTC', 12, 12), " +
"(540, 3, TIMESTAMP '2015-01-01 10:01:23.123000 UTC', TIMESTAMP '2015-01-01 12:55:00.456999 UTC', 1, 3), " +
"(544, 2, TIMESTAMP '2015-05-15 13:05:01.234000 UTC', TIMESTAMP '2015-05-15 14:21:02.345999 UTC', 4, 5), " +
"(601, 2, TIMESTAMP '2020-02-21 15:11:11.876000 UTC', TIMESTAMP '2020-02-21 16:12:12.654999 UTC', 6, 7)";
}
assertThat(query("SELECT partition.d_month, record_count, data.d.min, data.d.max, data.b.min, data.b.max FROM \"test_month_transform_timestamptz$partitions\""))
.matches(expected);
// Exercise IcebergMetadata.applyFilter with non-empty Constraint.predicate, via non-pushdownable predicates
assertThat(query("SELECT * FROM test_month_transform_timestamptz WHERE day_of_week(d) = 1 AND b % 7 = 3"))
.matches("VALUES (TIMESTAMP '1969-12-01 00:00:00.000000 UTC', 10)");
assertThat(query("SHOW STATS FOR test_month_transform_timestamptz"))
.skippingTypesCheck()
.matches("VALUES " +
" ('d', NULL, NULL, 0.0769231e0, NULL, " + expectedTimestampStats + "), " +
" ('b', NULL, NULL, 0e0, NULL, '1', '101'), " +
" (NULL, NULL, NULL, NULL, 13e0, NULL, NULL)");
assertUpdate("DROP TABLE test_month_transform_timestamptz");
}
@Test
public void testYearTransformDate()
{
assertUpdate("CREATE TABLE test_year_transform_date (d DATE, b BIGINT) WITH (partitioning = ARRAY['year(d)'])");
@Language("SQL") String values = "VALUES " +
"(NULL, 101)," +
"(DATE '1968-10-13', 1), " +
"(DATE '1969-01-01', 2), " +
"(DATE '1969-03-15', 3), " +
"(DATE '1970-01-01', 4), " +
"(DATE '1970-03-05', 5), " +
"(DATE '2015-01-01', 6), " +
"(DATE '2015-06-16', 7), " +
"(DATE '2015-07-28', 8), " +
"(DATE '2016-05-15', 9), " +
"(DATE '2016-06-06', 10), " +
"(DATE '2020-02-21', 11), " +
"(DATE '2020-11-10', 12)";
assertUpdate("INSERT INTO test_year_transform_date " + values, 13);
assertQuery("SELECT * FROM test_year_transform_date", values);
assertQuery(
"SELECT partition.d_year, record_count, data.d.min, data.d.max, data.b.min, data.b.max FROM \"test_year_transform_date$partitions\"",
"VALUES " +
"(NULL, 1, NULL, NULL, 101, 101), " +
"(-2, 1, DATE '1968-10-13', DATE '1968-10-13', 1, 1), " +
"(-1, 2, DATE '1969-01-01', DATE '1969-03-15', 2, 3), " +
"(0, 2, DATE '1970-01-01', DATE '1970-03-05', 4, 5), " +
"(45, 3, DATE '2015-01-01', DATE '2015-07-28', 6, 8), " +
"(46, 2, DATE '2016-05-15', DATE '2016-06-06', 9, 10), " +
"(50, 2, DATE '2020-02-21', DATE '2020-11-10', 11, 12)");
// Exercise IcebergMetadata.applyFilter with non-empty Constraint.predicate, via non-pushdownable predicates
assertQuery(
"SELECT * FROM test_year_transform_date WHERE day_of_week(d) = 1 AND b % 7 = 3",
"VALUES (DATE '2016-06-06', 10)");
assertThat(query("SHOW STATS FOR test_year_transform_date"))
.skippingTypesCheck()
.matches("VALUES " +
" ('d', NULL, NULL, 0.0769231e0, NULL, '1968-10-13', '2020-11-10'), " +
" ('b', NULL, NULL, 0e0, NULL, '1', '101'), " +
" (NULL, NULL, NULL, NULL, 13e0, NULL, NULL)");
dropTable("test_year_transform_date");
}
@Test
public void testYearTransformTimestamp()
{
assertUpdate("CREATE TABLE test_year_transform_timestamp (d TIMESTAMP(6), b BIGINT) WITH (partitioning = ARRAY['year(d)'])");
@Language("SQL") String values = "VALUES " +
"(NULL, 101)," +
"(TIMESTAMP '1968-03-15 15:13:12.876543', 1)," +
"(TIMESTAMP '1968-11-19 18:47:33.345678', 2)," +
"(TIMESTAMP '1969-01-01 00:00:00.000000', 3)," +
"(TIMESTAMP '1969-01-01 05:06:07.234567', 4)," +
"(TIMESTAMP '1970-01-18 12:03:08.456789', 5)," +
"(TIMESTAMP '1970-03-14 10:01:23.123456', 6)," +
"(TIMESTAMP '1970-08-19 11:10:02.987654', 7)," +
"(TIMESTAMP '1970-12-31 12:55:00.456789', 8)," +
"(TIMESTAMP '2015-05-15 13:05:01.234567', 9)," +
"(TIMESTAMP '2015-09-15 14:21:02.345678', 10)," +
"(TIMESTAMP '2020-02-21 15:11:11.876543', 11)," +
"(TIMESTAMP '2020-08-21 16:12:12.654321', 12)";
assertUpdate("INSERT INTO test_year_transform_timestamp " + values, 13);
assertQuery("SELECT * FROM test_year_transform_timestamp", values);
@Language("SQL") String expected = "VALUES " +
"(NULL, 1, NULL, NULL, 101, 101), " +
"(-2, 2, TIMESTAMP '1968-03-15 15:13:12.876543', TIMESTAMP '1968-11-19 18:47:33.345678', 1, 2), " +
"(-1, 2, TIMESTAMP '1969-01-01 00:00:00.000000', TIMESTAMP '1969-01-01 05:06:07.234567', 3, 4), " +
"(0, 4, TIMESTAMP '1970-01-18 12:03:08.456789', TIMESTAMP '1970-12-31 12:55:00.456789', 5, 8), " +
"(45, 2, TIMESTAMP '2015-05-15 13:05:01.234567', TIMESTAMP '2015-09-15 14:21:02.345678', 9, 10), " +
"(50, 2, TIMESTAMP '2020-02-21 15:11:11.876543', TIMESTAMP '2020-08-21 16:12:12.654321', 11, 12)";
String expectedTimestampStats = "'1968-03-15 15:13:12.876543', '2020-08-21 16:12:12.654321'";
if (format == ORC) {
expected = "VALUES " +
"(NULL, 1, NULL, NULL, 101, 101), " +
"(-2, 2, TIMESTAMP '1968-03-15 15:13:12.876000', TIMESTAMP '1968-11-19 18:47:33.345999', 1, 2), " +
"(-1, 2, TIMESTAMP '1969-01-01 00:00:00.000000', TIMESTAMP '1969-01-01 05:06:07.234999', 3, 4), " +
"(0, 4, TIMESTAMP '1970-01-18 12:03:08.456000', TIMESTAMP '1970-12-31 12:55:00.456999', 5, 8), " +
"(45, 2, TIMESTAMP '2015-05-15 13:05:01.234000', TIMESTAMP '2015-09-15 14:21:02.345999', 9, 10), " +
"(50, 2, TIMESTAMP '2020-02-21 15:11:11.876000', TIMESTAMP '2020-08-21 16:12:12.654999', 11, 12)";
expectedTimestampStats = "'1968-03-15 15:13:12.876000', '2020-08-21 16:12:12.654999'";
}
assertQuery("SELECT partition.d_year, record_count, data.d.min, data.d.max, data.b.min, data.b.max FROM \"test_year_transform_timestamp$partitions\"", expected);
// Exercise IcebergMetadata.applyFilter with non-empty Constraint.predicate, via non-pushdownable predicates
assertQuery(
"SELECT * FROM test_year_transform_timestamp WHERE day_of_week(d) = 2 AND b % 7 = 3",
"VALUES (TIMESTAMP '2015-09-15 14:21:02.345678', 10)");
assertThat(query("SHOW STATS FOR test_year_transform_timestamp"))
.skippingTypesCheck()
.matches("VALUES " +
" ('d', NULL, NULL, 0.0769231e0, NULL, " + expectedTimestampStats + "), " +
" ('b', NULL, NULL, 0e0, NULL, '1', '101'), " +
" (NULL, NULL, NULL, NULL, 13e0, NULL, NULL)");
dropTable("test_year_transform_timestamp");
}
@Test
public void testYearTransformTimestampWithTimeZone()
{
assertUpdate("CREATE TABLE test_year_transform_timestamptz (d timestamp(6) with time zone, b integer) WITH (partitioning = ARRAY['year(d)'])");
String values = "VALUES " +
"(NULL, 101)," +
"(TIMESTAMP '1968-03-15 15:13:12.876543 UTC', 1)," +
"(TIMESTAMP '1968-11-19 18:47:33.345678 UTC', 2)," +
"(TIMESTAMP '1969-01-01 00:00:00.000000 UTC', 3)," +
"(TIMESTAMP '1969-01-01 05:06:07.234567 UTC', 4)," +
"(TIMESTAMP '1970-01-18 12:03:08.456789 UTC', 5)," +
"(TIMESTAMP '1970-03-14 10:01:23.123456 UTC', 6)," +
"(TIMESTAMP '1970-08-19 11:10:02.987654 UTC', 7)," +
"(TIMESTAMP '1970-12-31 12:55:00.456789 UTC', 8)," +
"(TIMESTAMP '2015-05-15 13:05:01.234567 UTC', 9)," +
"(TIMESTAMP '2015-09-15 14:21:02.345678 UTC', 10)," +
"(TIMESTAMP '2020-02-21 15:11:11.876543 UTC', 11)," +
"(TIMESTAMP '2020-08-21 16:12:12.654321 UTC', 12)";
assertUpdate("INSERT INTO test_year_transform_timestamptz " + values, 13);
assertThat(query("SELECT * FROM test_year_transform_timestamptz"))
.matches(values);
String expected = "VALUES " +
"(NULL, BIGINT '1', NULL, NULL, 101, 101), " +
"(-2, 2, TIMESTAMP '1968-03-15 15:13:12.876543 UTC', TIMESTAMP '1968-11-19 18:47:33.345678 UTC', 1, 2), " +
"(-1, 2, TIMESTAMP '1969-01-01 00:00:00.000000 UTC', TIMESTAMP '1969-01-01 05:06:07.234567 UTC', 3, 4), " +
"(0, 4, TIMESTAMP '1970-01-18 12:03:08.456789 UTC', TIMESTAMP '1970-12-31 12:55:00.456789 UTC', 5, 8), " +
"(45, 2, TIMESTAMP '2015-05-15 13:05:01.234567 UTC', TIMESTAMP '2015-09-15 14:21:02.345678 UTC', 9, 10), " +
"(50, 2, TIMESTAMP '2020-02-21 15:11:11.876543 UTC', TIMESTAMP '2020-08-21 16:12:12.654321 UTC', 11, 12)";
String expectedTimestampStats = "'1968-03-15 15:13:12.876 UTC', '2020-08-21 16:12:12.654 UTC'";
if (format == ORC) {
expected = "VALUES " +
"(NULL, BIGINT '1', NULL, NULL, 101, 101), " +
"(-2, 2, TIMESTAMP '1968-03-15 15:13:12.876000 UTC', TIMESTAMP '1968-11-19 18:47:33.345999 UTC', 1, 2), " +
"(-1, 2, TIMESTAMP '1969-01-01 00:00:00.000000 UTC', TIMESTAMP '1969-01-01 05:06:07.234999 UTC', 3, 4), " +
"(0, 4, TIMESTAMP '1970-01-18 12:03:08.456000 UTC', TIMESTAMP '1970-12-31 12:55:00.456999 UTC', 5, 8), " +
"(45, 2, TIMESTAMP '2015-05-15 13:05:01.234000 UTC', TIMESTAMP '2015-09-15 14:21:02.345999 UTC', 9, 10), " +
"(50, 2, TIMESTAMP '2020-02-21 15:11:11.876000 UTC', TIMESTAMP '2020-08-21 16:12:12.654999 UTC', 11, 12)";
}
assertThat(query("SELECT partition.d_year, record_count, data.d.min, data.d.max, data.b.min, data.b.max FROM \"test_year_transform_timestamptz$partitions\""))
.matches(expected);
// Exercise IcebergMetadata.applyFilter with non-empty Constraint.predicate, via non-pushdownable predicates
assertThat(query("SELECT * FROM test_year_transform_timestamptz WHERE day_of_week(d) = 2 AND b % 7 = 3"))
.matches("VALUES (TIMESTAMP '2015-09-15 14:21:02.345678 UTC', 10)");
assertThat(query("SHOW STATS FOR test_year_transform_timestamptz"))
.skippingTypesCheck()
.matches("VALUES " +
" ('d', NULL, NULL, 0.0769231e0, NULL, " + expectedTimestampStats + "), " +
" ('b', NULL, NULL, 0e0, NULL, '1', '101'), " +
" (NULL, NULL, NULL, NULL, 13e0, NULL, NULL)");
assertUpdate("DROP TABLE test_year_transform_timestamptz");
}
@Test
public void testTruncateTextTransform()
{
assertUpdate("CREATE TABLE test_truncate_text_transform (d VARCHAR, b BIGINT) WITH (partitioning = ARRAY['truncate(d, 2)'])");
String select = "SELECT partition.d_trunc, record_count, data.d.min AS d_min, data.d.max AS d_max, data.b.min AS b_min, data.b.max AS b_max FROM \"test_truncate_text_transform$partitions\"";
assertUpdate("INSERT INTO test_truncate_text_transform VALUES" +
"(NULL, 101)," +
"('abcd', 1)," +
"('abxy', 2)," +
"('ab598', 3)," +
"('mommy', 4)," +
"('moscow', 5)," +
"('Greece', 6)," +
"('Grozny', 7)", 8);
assertQuery("SELECT partition.d_trunc FROM \"test_truncate_text_transform$partitions\"", "VALUES NULL, 'ab', 'mo', 'Gr'");
assertQuery("SELECT b FROM test_truncate_text_transform WHERE substring(d, 1, 2) = 'ab'", "VALUES 1, 2, 3");
assertQuery(select + " WHERE partition.d_trunc = 'ab'", "VALUES ('ab', 3, 'ab598', 'abxy', 1, 3)");
assertQuery("SELECT b FROM test_truncate_text_transform WHERE substring(d, 1, 2) = 'mo'", "VALUES 4, 5");
assertQuery(select + " WHERE partition.d_trunc = 'mo'", "VALUES ('mo', 2, 'mommy', 'moscow', 4, 5)");
assertQuery("SELECT b FROM test_truncate_text_transform WHERE substring(d, 1, 2) = 'Gr'", "VALUES 6, 7");
assertQuery(select + " WHERE partition.d_trunc = 'Gr'", "VALUES ('Gr', 2, 'Greece', 'Grozny', 6, 7)");
// Exercise IcebergMetadata.applyFilter with non-empty Constraint.predicate, via non-pushdownable predicates
assertQuery(
"SELECT * FROM test_truncate_text_transform WHERE length(d) = 4 AND b % 7 = 2",
"VALUES ('abxy', 2)");
assertThat(query("SHOW STATS FOR test_truncate_text_transform"))
.skippingTypesCheck()
.matches("VALUES " +
" ('d', " + (format == PARQUET ? "205e0" : "NULL") + ", NULL, 0.125e0, NULL, NULL, NULL), " +
" ('b', NULL, NULL, 0e0, NULL, '1', '101'), " +
" (NULL, NULL, NULL, NULL, 8e0, NULL, NULL)");
dropTable("test_truncate_text_transform");
}
@Test(dataProvider = "truncateNumberTypesProvider")
public void testTruncateIntegerTransform(String dataType)
{
String table = format("test_truncate_%s_transform", dataType);
assertUpdate(format("CREATE TABLE " + table + " (d %s, b BIGINT) WITH (partitioning = ARRAY['truncate(d, 10)'])", dataType));
String select = "SELECT partition.d_trunc, record_count, data.d.min AS d_min, data.d.max AS d_max, data.b.min AS b_min, data.b.max AS b_max FROM \"" + table + "$partitions\"";
assertUpdate("INSERT INTO " + table + " VALUES" +
"(NULL, 101)," +
"(0, 1)," +
"(1, 2)," +
"(5, 3)," +
"(9, 4)," +
"(10, 5)," +
"(11, 6)," +
"(120, 7)," +
"(121, 8)," +
"(123, 9)," +
"(-1, 10)," +
"(-5, 11)," +
"(-10, 12)," +
"(-11, 13)," +
"(-123, 14)," +
"(-130, 15)", 16);
assertQuery("SELECT partition.d_trunc FROM \"" + table + "$partitions\"", "VALUES NULL, 0, 10, 120, -10, -20, -130");
assertQuery("SELECT b FROM " + table + " WHERE d IN (0, 1, 5, 9)", "VALUES 1, 2, 3, 4");
assertQuery(select + " WHERE partition.d_trunc = 0", "VALUES (0, 4, 0, 9, 1, 4)");
assertQuery("SELECT b FROM " + table + " WHERE d IN (10, 11)", "VALUES 5, 6");
assertQuery(select + " WHERE partition.d_trunc = 10", "VALUES (10, 2, 10, 11, 5, 6)");
assertQuery("SELECT b FROM " + table + " WHERE d IN (120, 121, 123)", "VALUES 7, 8, 9");
assertQuery(select + " WHERE partition.d_trunc = 120", "VALUES (120, 3, 120, 123, 7, 9)");
assertQuery("SELECT b FROM " + table + " WHERE d IN (-1, -5, -10)", "VALUES 10, 11, 12");
assertQuery(select + " WHERE partition.d_trunc = -10", "VALUES (-10, 3, -10, -1, 10, 12)");
assertQuery("SELECT b FROM " + table + " WHERE d = -11", "VALUES 13");
assertQuery(select + " WHERE partition.d_trunc = -20", "VALUES (-20, 1, -11, -11, 13, 13)");
assertQuery("SELECT b FROM " + table + " WHERE d IN (-123, -130)", "VALUES 14, 15");
assertQuery(select + " WHERE partition.d_trunc = -130", "VALUES (-130, 2, -130, -123, 14, 15)");
// Exercise IcebergMetadata.applyFilter with non-empty Constraint.predicate, via non-pushdownable predicates
assertQuery(
"SELECT * FROM " + table + " WHERE d % 10 = -1 AND b % 7 = 3",
"VALUES (-1, 10)");
assertThat(query("SHOW STATS FOR " + table))
.skippingTypesCheck()
.matches("VALUES " +
" ('d', NULL, NULL, 0.0625e0, NULL, '-130', '123'), " +
" ('b', NULL, NULL, 0e0, NULL, '1', '101'), " +
" (NULL, NULL, NULL, NULL, 16e0, NULL, NULL)");
dropTable(table);
}
@DataProvider
public Object[][] truncateNumberTypesProvider()
{
return new Object[][] {
{"integer"},
{"bigint"},
};
}
@Test
public void testTruncateDecimalTransform()
{
assertUpdate("CREATE TABLE test_truncate_decimal_transform (d DECIMAL(9, 2), b BIGINT) WITH (partitioning = ARRAY['truncate(d, 10)'])");
String select = "SELECT partition.d_trunc, record_count, data.d.min AS d_min, data.d.max AS d_max, data.b.min AS b_min, data.b.max AS b_max FROM \"test_truncate_decimal_transform$partitions\"";
assertUpdate("INSERT INTO test_truncate_decimal_transform VALUES" +
"(NULL, 101)," +
"(12.34, 1)," +
"(12.30, 2)," +
"(12.29, 3)," +
"(0.05, 4)," +
"(-0.05, 5)", 6);
assertQuery("SELECT partition.d_trunc FROM \"test_truncate_decimal_transform$partitions\"", "VALUES NULL, 12.30, 12.20, 0.00, -0.10");
assertQuery("SELECT b FROM test_truncate_decimal_transform WHERE d IN (12.34, 12.30)", "VALUES 1, 2");
assertQuery(select + " WHERE partition.d_trunc = 12.30", "VALUES (12.30, 2, 12.30, 12.34, 1, 2)");
assertQuery("SELECT b FROM test_truncate_decimal_transform WHERE d = 12.29", "VALUES 3");
assertQuery(select + " WHERE partition.d_trunc = 12.20", "VALUES (12.20, 1, 12.29, 12.29, 3, 3)");
assertQuery("SELECT b FROM test_truncate_decimal_transform WHERE d = 0.05", "VALUES 4");
assertQuery(select + " WHERE partition.d_trunc = 0.00", "VALUES (0.00, 1, 0.05, 0.05, 4, 4)");
assertQuery("SELECT b FROM test_truncate_decimal_transform WHERE d = -0.05", "VALUES 5");
assertQuery(select + " WHERE partition.d_trunc = -0.10", "VALUES (-0.10, 1, -0.05, -0.05, 5, 5)");
// Exercise IcebergMetadata.applyFilter with non-empty Constraint.predicate, via non-pushdownable predicates
assertQuery(
"SELECT * FROM test_truncate_decimal_transform WHERE d * 100 % 10 = 9 AND b % 7 = 3",
"VALUES (12.29, 3)");
assertThat(query("SHOW STATS FOR test_truncate_decimal_transform"))
.skippingTypesCheck()
.matches("VALUES " +
" ('d', NULL, NULL, 0.166667e0, NULL, '-0.05', '12.34'), " +
" ('b', NULL, NULL, 0e0, NULL, '1', '101'), " +
" (NULL, NULL, NULL, NULL, 6e0, NULL, NULL)");
dropTable("test_truncate_decimal_transform");
}
@Test
public void testBucketTransform()
{
testBucketTransformForType("DATE", "DATE '2020-05-19'", "DATE '2020-08-19'", "DATE '2020-11-19'");
testBucketTransformForType("VARCHAR", "CAST('abcd' AS VARCHAR)", "CAST('mommy' AS VARCHAR)", "CAST('abxy' AS VARCHAR)");
testBucketTransformForType("BIGINT", "CAST(100000000 AS BIGINT)", "CAST(200000002 AS BIGINT)", "CAST(400000001 AS BIGINT)");
testBucketTransformForType(
"UUID",
"CAST('206caec7-68b9-4778-81b2-a12ece70c8b1' AS UUID)",
"CAST('906caec7-68b9-4778-81b2-a12ece70c8b1' AS UUID)",
"CAST('406caec7-68b9-4778-81b2-a12ece70c8b1' AS UUID)");
}
protected void testBucketTransformForType(
String type,
String value,
String greaterValueInSameBucket,
String valueInOtherBucket)
{
String tableName = format("test_bucket_transform%s", type.toLowerCase(Locale.ENGLISH));
assertUpdate(format("CREATE TABLE %s (d %s) WITH (partitioning = ARRAY['bucket(d, 2)'])", tableName, type));
assertUpdate(format("INSERT INTO %s VALUES (NULL), (%s), (%s), (%s)", tableName, value, greaterValueInSameBucket, valueInOtherBucket), 4);
assertThat(query(format("SELECT * FROM %s", tableName))).matches(format("VALUES (NULL), (%s), (%s), (%s)", value, greaterValueInSameBucket, valueInOtherBucket));
String selectFromPartitions = format("SELECT partition.d_bucket, record_count, data.d.min AS d_min, data.d.max AS d_max FROM \"%s$partitions\"", tableName);
if (supportsIcebergFileStatistics(type)) {
assertQuery(selectFromPartitions + " WHERE partition.d_bucket = 0", format("VALUES(0, %d, %s, %s)", 2, value, greaterValueInSameBucket));
assertQuery(selectFromPartitions + " WHERE partition.d_bucket = 1", format("VALUES(1, %d, %s, %s)", 1, valueInOtherBucket, valueInOtherBucket));
}
else {
assertQuery(selectFromPartitions + " WHERE partition.d_bucket = 0", format("VALUES(0, %d, null, null)", 2));
assertQuery(selectFromPartitions + " WHERE partition.d_bucket = 1", format("VALUES(1, %d, null, null)", 1));
}
assertThat(query("SHOW STATS FOR " + tableName))
.skippingTypesCheck()
.projected(0, 2, 3, 4) // data size, min and max may vary between types
.matches("VALUES " +
" ('d', NULL, 0.25e0, NULL), " +
" (NULL, NULL, NULL, 4e0)");
dropTable(tableName);
}
@Test
public void testApplyFilterWithNonEmptyConstraintPredicate()
{
assertUpdate("CREATE TABLE test_apply_functional_constraint (d VARCHAR, b BIGINT) WITH (partitioning = ARRAY['bucket(d, 2)'])");
assertUpdate(
"INSERT INTO test_apply_functional_constraint VALUES" +
"('abcd', 1)," +
"('abxy', 2)," +
"('ab598', 3)," +
"('mommy', 4)," +
"('moscow', 5)," +
"('Greece', 6)," +
"('Grozny', 7)",
7);
assertQuery(
"SELECT * FROM test_apply_functional_constraint WHERE length(d) = 4 AND b % 7 = 2",
"VALUES ('abxy', 2)");
assertUpdate("DROP TABLE test_apply_functional_constraint");
}
@Test
public void testVoidTransform()
{
assertUpdate("CREATE TABLE test_void_transform (d VARCHAR, b BIGINT) WITH (partitioning = ARRAY['void(d)'])");
String values = "VALUES " +
"('abcd', 1)," +
"('abxy', 2)," +
"('ab598', 3)," +
"('mommy', 4)," +
"('Warsaw', 5)," +
"(NULL, 6)," +
"(NULL, 7)";
assertUpdate("INSERT INTO test_void_transform " + values, 7);
assertQuery("SELECT * FROM test_void_transform", values);
assertQuery("SELECT COUNT(*) FROM \"test_void_transform$partitions\"", "SELECT 1");
assertQuery(
"SELECT partition.d_null, record_count, file_count, data.d.min, data.d.max, data.d.null_count, data.d.nan_count, data.b.min, data.b.max, data.b.null_count, data.b.nan_count FROM \"test_void_transform$partitions\"",
"VALUES (NULL, 7, 1, 'Warsaw', 'mommy', 2, NULL, 1, 7, 0, NULL)");
assertQuery(
"SELECT d, b FROM test_void_transform WHERE d IS NOT NULL",
"VALUES " +
"('abcd', 1)," +
"('abxy', 2)," +
"('ab598', 3)," +
"('mommy', 4)," +
"('Warsaw', 5)");
assertQuery("SELECT b FROM test_void_transform WHERE d IS NULL", "VALUES 6, 7");
assertThat(query("SHOW STATS FOR test_void_transform"))
.skippingTypesCheck()
.matches("VALUES " +
" ('d', " + (format == PARQUET ? "76e0" : "NULL") + ", NULL, 0.2857142857142857, NULL, NULL, NULL), " +
" ('b', NULL, NULL, 0e0, NULL, '1', '7'), " +
" (NULL, NULL, NULL, NULL, 7e0, NULL, NULL)");
assertUpdate("DROP TABLE " + "test_void_transform");
}
@Test
public void testMetadataDeleteSimple()
{
assertUpdate("CREATE TABLE test_metadata_delete_simple (col1 BIGINT, col2 BIGINT) WITH (partitioning = ARRAY['col1'])");
assertUpdate("INSERT INTO test_metadata_delete_simple VALUES(1, 100), (1, 101), (1, 102), (2, 200), (2, 201), (3, 300)", 6);
assertQuery("SELECT sum(col2) FROM test_metadata_delete_simple", "SELECT 1004");
assertQuery("SELECT count(*) FROM \"test_metadata_delete_simple$partitions\"", "SELECT 3");
assertUpdate("DELETE FROM test_metadata_delete_simple WHERE col1 = 1", 3);
assertQuery("SELECT sum(col2) FROM test_metadata_delete_simple", "SELECT 701");
assertQuery("SELECT count(*) FROM \"test_metadata_delete_simple$partitions\"", "SELECT 2");
dropTable("test_metadata_delete_simple");
}
@Test
public void testMetadataDelete()
{
assertUpdate("CREATE TABLE test_metadata_delete (" +
" orderkey BIGINT," +
" linenumber INTEGER," +
" linestatus VARCHAR" +
") " +
"WITH (" +
" partitioning = ARRAY[ 'linenumber', 'linestatus' ]" +
")");
assertUpdate(
"" +
"INSERT INTO test_metadata_delete " +
"SELECT orderkey, linenumber, linestatus " +
"FROM tpch.tiny.lineitem",
"SELECT count(*) FROM lineitem");
assertQuery("SELECT COUNT(*) FROM \"test_metadata_delete$partitions\"", "SELECT 14");
assertUpdate("DELETE FROM test_metadata_delete WHERE linestatus = 'F' AND linenumber = 3", 5378);
assertQuery("SELECT * FROM test_metadata_delete", "SELECT orderkey, linenumber, linestatus FROM lineitem WHERE linestatus <> 'F' or linenumber <> 3");
assertQuery("SELECT count(*) FROM \"test_metadata_delete$partitions\"", "SELECT 13");
assertUpdate("DELETE FROM test_metadata_delete WHERE linestatus='O'", 30049);
assertQuery("SELECT count(*) FROM \"test_metadata_delete$partitions\"", "SELECT 6");
assertQuery("SELECT * FROM test_metadata_delete", "SELECT orderkey, linenumber, linestatus FROM lineitem WHERE linestatus <> 'O' AND linenumber <> 3");
dropTable("test_metadata_delete");
}
@Test
public void testInSet()
{
testInSet(31);
testInSet(35);
}
private void testInSet(int inCount)
{
String values = range(1, inCount + 1)
.mapToObj(n -> format("(%s, %s)", n, n + 10))
.collect(joining(", "));
String inList = range(1, inCount + 1)
.mapToObj(Integer::toString)
.collect(joining(", "));
assertUpdate("CREATE TABLE test_in_set (col1 INTEGER, col2 BIGINT)");
assertUpdate(format("INSERT INTO test_in_set VALUES %s", values), inCount);
// This proves that SELECTs with large IN phrases work correctly
computeActual(format("SELECT col1 FROM test_in_set WHERE col1 IN (%s)", inList));
dropTable("test_in_set");
}
@Test
public void testBasicTableStatistics()
{
String tableName = "test_basic_table_statistics";
assertUpdate(format("CREATE TABLE %s (col REAL)", tableName));
assertThat(query("SHOW STATS FOR " + tableName))
.skippingTypesCheck()
.matches("VALUES " +
" ('col', 0e0, 0e0, 1e0, NULL, NULL, NULL), " +
" (NULL, NULL, NULL, NULL, 0e0, NULL, NULL)");
assertUpdate("INSERT INTO " + tableName + " VALUES -10", 1);
assertUpdate("INSERT INTO " + tableName + " VALUES 100", 1);
assertThat(query("SHOW STATS FOR " + tableName))
.skippingTypesCheck()
.matches("VALUES " +
" ('col', NULL, NULL, 0e0, NULL, '-10.0', '100.0'), " +
" (NULL, NULL, NULL, NULL, 2e0, NULL, NULL)");
assertUpdate("INSERT INTO " + tableName + " VALUES 200", 1);
assertThat(query("SHOW STATS FOR " + tableName))
.skippingTypesCheck()
.matches("VALUES " +
" ('col', NULL, NULL, 0e0, NULL, '-10.0', '200.0'), " +
" (NULL, NULL, NULL, NULL, 3e0, NULL, NULL)");
dropTable(tableName);
}
@Test
public void testMultipleColumnTableStatistics()
{
String tableName = "test_multiple_table_statistics";
assertUpdate(format("CREATE TABLE %s (col1 REAL, col2 INTEGER, col3 DATE)", tableName));
assertUpdate("INSERT INTO " + tableName + " VALUES (-10, -1, DATE '2019-06-28')", 1);
assertUpdate("INSERT INTO " + tableName + " VALUES (100, 10, DATE '2020-01-01')", 1);
MaterializedResult result = computeActual("SHOW STATS FOR " + tableName);
MaterializedResult expectedStatistics =
resultBuilder(getSession(), VARCHAR, DOUBLE, DOUBLE, DOUBLE, DOUBLE, VARCHAR, VARCHAR)
.row("col1", null, null, 0.0, null, "-10.0", "100.0")
.row("col2", null, null, 0.0, null, "-1", "10")
.row("col3", null, null, 0.0, null, "2019-06-28", "2020-01-01")
.row(null, null, null, null, 2.0, null, null)
.build();
assertEquals(result, expectedStatistics);
assertUpdate("INSERT INTO " + tableName + " VALUES (200, 20, DATE '2020-06-28')", 1);
result = computeActual("SHOW STATS FOR " + tableName);
expectedStatistics =
resultBuilder(getSession(), VARCHAR, DOUBLE, DOUBLE, DOUBLE, DOUBLE, VARCHAR, VARCHAR)
.row("col1", null, null, 0.0, null, "-10.0", "200.0")
.row("col2", null, null, 0.0, null, "-1", "20")
.row("col3", null, null, 0.0, null, "2019-06-28", "2020-06-28")
.row(null, null, null, null, 3.0, null, null)
.build();
assertEquals(result, expectedStatistics);
assertUpdate("INSERT INTO " + tableName + " VALUES " + IntStream.rangeClosed(21, 25)
.mapToObj(i -> format("(200, %d, DATE '2020-07-%d')", i, i))
.collect(joining(", ")), 5);
assertUpdate("INSERT INTO " + tableName + " VALUES " + IntStream.rangeClosed(26, 30)
.mapToObj(i -> format("(NULL, %d, DATE '2020-06-%d')", i, i))
.collect(joining(", ")), 5);
result = computeActual("SHOW STATS FOR " + tableName);
expectedStatistics =
resultBuilder(getSession(), VARCHAR, DOUBLE, DOUBLE, DOUBLE, DOUBLE, VARCHAR, VARCHAR)
.row("col1", null, null, 5.0 / 13.0, null, "-10.0", "200.0")
.row("col2", null, null, 0.0, null, "-1", "30")
.row("col3", null, null, 0.0, null, "2019-06-28", "2020-07-25")
.row(null, null, null, null, 13.0, null, null)
.build();
assertEquals(result, expectedStatistics);
dropTable(tableName);
}
@Test
public void testPartitionedTableStatistics()
{
assertUpdate("CREATE TABLE iceberg.tpch.test_partitioned_table_statistics (col1 REAL, col2 BIGINT) WITH (partitioning = ARRAY['col2'])");
assertUpdate("INSERT INTO test_partitioned_table_statistics VALUES (-10, -1)", 1);
assertUpdate("INSERT INTO test_partitioned_table_statistics VALUES (100, 10)", 1);
MaterializedResult result = computeActual("SHOW STATS FOR iceberg.tpch.test_partitioned_table_statistics");
assertEquals(result.getRowCount(), 3);
MaterializedRow row0 = result.getMaterializedRows().get(0);
assertEquals(row0.getField(0), "col1");
assertEquals(row0.getField(3), 0.0);
assertEquals(row0.getField(5), "-10.0");
assertEquals(row0.getField(6), "100.0");
MaterializedRow row1 = result.getMaterializedRows().get(1);
assertEquals(row1.getField(0), "col2");
assertEquals(row1.getField(3), 0.0);
assertEquals(row1.getField(5), "-1");
assertEquals(row1.getField(6), "10");
MaterializedRow row2 = result.getMaterializedRows().get(2);
assertEquals(row2.getField(4), 2.0);
assertUpdate("INSERT INTO test_partitioned_table_statistics VALUES " + IntStream.rangeClosed(1, 5)
.mapToObj(i -> format("(%d, 10)", i + 100))
.collect(joining(", ")), 5);
assertUpdate("INSERT INTO test_partitioned_table_statistics VALUES " + IntStream.rangeClosed(6, 10)
.mapToObj(i -> "(NULL, 10)")
.collect(joining(", ")), 5);
result = computeActual("SHOW STATS FOR iceberg.tpch.test_partitioned_table_statistics");
assertEquals(result.getRowCount(), 3);
row0 = result.getMaterializedRows().get(0);
assertEquals(row0.getField(0), "col1");
assertEquals(row0.getField(3), 5.0 / 12.0);
assertEquals(row0.getField(5), "-10.0");
assertEquals(row0.getField(6), "105.0");
row1 = result.getMaterializedRows().get(1);
assertEquals(row1.getField(0), "col2");
assertEquals(row1.getField(3), 0.0);
assertEquals(row1.getField(5), "-1");
assertEquals(row1.getField(6), "10");
row2 = result.getMaterializedRows().get(2);
assertEquals(row2.getField(4), 12.0);
assertUpdate("INSERT INTO test_partitioned_table_statistics VALUES " + IntStream.rangeClosed(6, 10)
.mapToObj(i -> "(100, NULL)")
.collect(joining(", ")), 5);
result = computeActual("SHOW STATS FOR iceberg.tpch.test_partitioned_table_statistics");
row0 = result.getMaterializedRows().get(0);
assertEquals(row0.getField(0), "col1");
assertEquals(row0.getField(3), 5.0 / 17.0);
assertEquals(row0.getField(5), "-10.0");
assertEquals(row0.getField(6), "105.0");
row1 = result.getMaterializedRows().get(1);
assertEquals(row1.getField(0), "col2");
assertEquals(row1.getField(3), 5.0 / 17.0);
assertEquals(row1.getField(5), "-1");
assertEquals(row1.getField(6), "10");
row2 = result.getMaterializedRows().get(2);
assertEquals(row2.getField(4), 17.0);
dropTable("iceberg.tpch.test_partitioned_table_statistics");
}
@Test
public void testPredicatePushdown()
{
QualifiedObjectName tableName = new QualifiedObjectName("iceberg", "tpch", "test_predicate");
assertUpdate(format("CREATE TABLE %s (col1 BIGINT, col2 BIGINT, col3 BIGINT) WITH (partitioning = ARRAY['col2', 'col3'])", tableName));
assertUpdate(format("INSERT INTO %s VALUES (1, 10, 100)", tableName), 1L);
assertUpdate(format("INSERT INTO %s VALUES (2, 20, 200)", tableName), 1L);
assertQuery(format("SELECT * FROM %s WHERE col1 = 1", tableName), "VALUES (1, 10, 100)");
assertFilterPushdown(
tableName,
ImmutableMap.of("col1", singleValue(BIGINT, 1L)),
ImmutableMap.of(),
ImmutableMap.of("col1", singleValue(BIGINT, 1L)));
assertQuery(format("SELECT * FROM %s WHERE col2 = 10", tableName), "VALUES (1, 10, 100)");
assertFilterPushdown(
tableName,
ImmutableMap.of("col2", singleValue(BIGINT, 10L)),
ImmutableMap.of("col2", singleValue(BIGINT, 10L)),
ImmutableMap.of());
assertQuery(format("SELECT * FROM %s WHERE col1 = 1 AND col2 = 10", tableName), "VALUES (1, 10, 100)");
assertFilterPushdown(
tableName,
ImmutableMap.of("col1", singleValue(BIGINT, 1L), "col2", singleValue(BIGINT, 10L)),
ImmutableMap.of("col2", singleValue(BIGINT, 10L)),
ImmutableMap.of("col1", singleValue(BIGINT, 1L)));
// Assert pushdown for an IN predicate with value count above the default compaction threshold
List<Long> values = LongStream.range(1L, 1010L).boxed()
.filter(index -> index != 20L)
.collect(toImmutableList());
assertThat(values).hasSizeGreaterThan(ICEBERG_DOMAIN_COMPACTION_THRESHOLD);
String valuesString = join(",", values.stream().map(Object::toString).collect(toImmutableList()));
String inPredicate = "%s IN (" + valuesString + ")";
assertQuery(
format("SELECT * FROM %s WHERE %s AND %s", tableName, format(inPredicate, "col1"), format(inPredicate, "col2")),
"VALUES (1, 10, 100)");
assertFilterPushdown(
tableName,
ImmutableMap.of("col1", multipleValues(BIGINT, values), "col2", multipleValues(BIGINT, values)),
ImmutableMap.of("col2", multipleValues(BIGINT, values)),
// Unenforced predicate is simplified during split generation, but not reflected here
ImmutableMap.of("col1", multipleValues(BIGINT, values)));
dropTable(tableName.getObjectName());
}
@Test
public void testPredicatesWithStructuralTypes()
{
String tableName = "test_predicate_with_structural_types";
assertUpdate("CREATE TABLE " + tableName + " (id INT, array_t ARRAY(BIGINT), map_t MAP(BIGINT, BIGINT), struct_t ROW(f1 BIGINT, f2 BIGINT))");
assertUpdate("INSERT INTO " + tableName + " VALUES " +
"(1, ARRAY[1, 2, 3], MAP(ARRAY[1,3], ARRAY[2,4]), ROW(1, 2)), " +
"(11, ARRAY[11, 12, 13], MAP(ARRAY[11, 13], ARRAY[12, 14]), ROW(11, 12)), " +
"(11, ARRAY[111, 112, 113], MAP(ARRAY[111, 13], ARRAY[112, 114]), ROW(111, 112)), " +
"(21, ARRAY[21, 22, 23], MAP(ARRAY[21, 23], ARRAY[22, 24]), ROW(21, 22))",
4);
assertQuery("SELECT id FROM " + tableName + " WHERE array_t = ARRAY[1, 2, 3]", "VALUES 1");
assertQuery("SELECT id FROM " + tableName + " WHERE map_t = MAP(ARRAY[11, 13], ARRAY[12, 14])", "VALUES 11");
assertQuery("SELECT id FROM " + tableName + " WHERE struct_t = ROW(21, 22)", "VALUES 21");
assertQuery("SELECT struct_t.f1 FROM " + tableName + " WHERE id = 11 AND map_t = MAP(ARRAY[11, 13], ARRAY[12, 14])", "VALUES 11");
dropTable(tableName);
}
@Test(dataProviderClass = DataProviders.class, dataProvider = "trueFalse")
public void testPartitionsTableWithColumnNameConflict(boolean partitioned)
{
assertUpdate("DROP TABLE IF EXISTS test_partitions_with_conflict");
assertUpdate("CREATE TABLE test_partitions_with_conflict (" +
" p integer, " +
" row_count integer, " +
" record_count integer, " +
" file_count integer, " +
" total_size integer " +
") " +
(partitioned ? "WITH(partitioning = ARRAY['p'])" : ""));
assertUpdate("INSERT INTO test_partitions_with_conflict VALUES (11, 12, 13, 14, 15)", 1);
// sanity check
assertThat(query("SELECT * FROM test_partitions_with_conflict"))
.matches("VALUES (11, 12, 13, 14, 15)");
// test $partitions
assertThat(query("SELECT * FROM \"test_partitions_with_conflict$partitions\""))
.matches("SELECT " +
(partitioned ? "CAST(ROW(11) AS row(p integer)), " : "") +
"BIGINT '1', " +
"BIGINT '1', " +
// total_size is not exactly deterministic, so grab whatever value there is
"(SELECT total_size FROM \"test_partitions_with_conflict$partitions\"), " +
"CAST(" +
" ROW (" +
(partitioned ? "" : " ROW(11, 11, 0, NULL), ") +
" ROW(12, 12, 0, NULL), " +
" ROW(13, 13, 0, NULL), " +
" ROW(14, 14, 0, NULL), " +
" ROW(15, 15, 0, NULL) " +
" ) " +
" AS row(" +
(partitioned ? "" : " p row(min integer, max integer, null_count bigint, nan_count bigint), ") +
" row_count row(min integer, max integer, null_count bigint, nan_count bigint), " +
" record_count row(min integer, max integer, null_count bigint, nan_count bigint), " +
" file_count row(min integer, max integer, null_count bigint, nan_count bigint), " +
" total_size row(min integer, max integer, null_count bigint, nan_count bigint) " +
" )" +
")");
assertUpdate("DROP TABLE test_partitions_with_conflict");
}
private void assertFilterPushdown(
QualifiedObjectName tableName,
Map<String, Domain> filter,
Map<String, Domain> expectedEnforcedPredicate,
Map<String, Domain> expectedUnenforcedPredicate)
{
Metadata metadata = getQueryRunner().getMetadata();
newTransaction().execute(getSession(), session -> {
TableHandle table = metadata.getTableHandle(session, tableName)
.orElseThrow(() -> new TableNotFoundException(tableName.asSchemaTableName()));
Map<String, ColumnHandle> columns = metadata.getColumnHandles(session, table);
TupleDomain<ColumnHandle> domains = TupleDomain.withColumnDomains(
filter.entrySet().stream()
.collect(toImmutableMap(entry -> columns.get(entry.getKey()), Map.Entry::getValue)));
Optional<ConstraintApplicationResult<TableHandle>> result = metadata.applyFilter(session, table, new Constraint(domains));
assertTrue(result.isEmpty() == (expectedUnenforcedPredicate == null && expectedEnforcedPredicate == null));
if (result.isPresent()) {
IcebergTableHandle newTable = (IcebergTableHandle) result.get().getHandle().getConnectorHandle();
assertEquals(
newTable.getEnforcedPredicate(),
TupleDomain.withColumnDomains(expectedEnforcedPredicate.entrySet().stream()
.collect(toImmutableMap(entry -> columns.get(entry.getKey()), Map.Entry::getValue))));
assertEquals(
newTable.getUnenforcedPredicate(),
TupleDomain.withColumnDomains(expectedUnenforcedPredicate.entrySet().stream()
.collect(toImmutableMap(entry -> columns.get(entry.getKey()), Map.Entry::getValue))));
}
});
}
@Test
public void testCreateNestedPartitionedTable()
{
assertUpdate("CREATE TABLE test_nested_table_1 (" +
" bool BOOLEAN" +
", int INTEGER" +
", arr ARRAY(VARCHAR)" +
", big BIGINT" +
", rl REAL" +
", dbl DOUBLE" +
", mp MAP(INTEGER, VARCHAR)" +
", dec DECIMAL(5,2)" +
", vc VARCHAR" +
", vb VARBINARY" +
", ts TIMESTAMP(6)" +
", tstz TIMESTAMP(6) WITH TIME ZONE" +
", str ROW(id INTEGER , vc VARCHAR)" +
", dt DATE)" +
" WITH (partitioning = ARRAY['int'])");
assertUpdate(
"INSERT INTO test_nested_table_1 " +
" select true, 1, array['uno', 'dos', 'tres'], BIGINT '1', REAL '1.0', DOUBLE '1.0', map(array[1,2,3,4], array['ek','don','teen','char'])," +
" CAST(1.0 as DECIMAL(5,2))," +
" 'one', VARBINARY 'binary0/1values',\n" +
" TIMESTAMP '2021-07-24 02:43:57.348000'," +
" TIMESTAMP '2021-07-24 02:43:57.348000 UTC'," +
" (CAST(ROW(null, 'this is a random value') AS ROW(int, varchar))), " +
" DATE '2021-07-24'",
1);
assertEquals(computeActual("SELECT * from test_nested_table_1").getRowCount(), 1);
assertThat(query("SHOW STATS FOR test_nested_table_1"))
.skippingTypesCheck()
.matches("VALUES " +
" ('bool', NULL, NULL, 0e0, NULL, 'true', 'true'), " +
" ('int', NULL, NULL, 0e0, NULL, '1', '1'), " +
" ('arr', NULL, NULL, " + (format == ORC ? "0e0" : "NULL") + ", NULL, NULL, NULL), " +
" ('big', NULL, NULL, 0e0, NULL, '1', '1'), " +
" ('rl', NULL, NULL, 0e0, NULL, '1.0', '1.0'), " +
" ('dbl', NULL, NULL, 0e0, NULL, '1.0', '1.0'), " +
" ('mp', NULL, NULL, " + (format == ORC ? "0e0" : "NULL") + ", NULL, NULL, NULL), " +
" ('dec', NULL, NULL, 0e0, NULL, '1.0', '1.0'), " +
" ('vc', " + (format == PARQUET ? "43e0" : "NULL") + ", NULL, 0e0, NULL, NULL, NULL), " +
" ('vb', " + (format == PARQUET ? "55e0" : "NULL") + ", NULL, 0e0, NULL, NULL, NULL), " +
" ('ts', NULL, NULL, 0e0, NULL, '2021-07-24 02:43:57.348000', " + (format == ORC ? "'2021-07-24 02:43:57.348999'" : "'2021-07-24 02:43:57.348000'") + "), " +
" ('tstz', NULL, NULL, 0e0, NULL, '2021-07-24 02:43:57.348 UTC', '2021-07-24 02:43:57.348 UTC'), " +
" ('str', NULL, NULL, " + (format == ORC ? "0e0" : "NULL") + ", NULL, NULL, NULL), " +
" ('dt', NULL, NULL, 0e0, NULL, '2021-07-24', '2021-07-24'), " +
" (NULL, NULL, NULL, NULL, 1e0, NULL, NULL)");
dropTable("test_nested_table_1");
assertUpdate("" +
"CREATE TABLE test_nested_table_2 (" +
" int INTEGER" +
", arr ARRAY(ROW(id INTEGER, vc VARCHAR))" +
", big BIGINT" +
", rl REAL" +
", dbl DOUBLE" +
", mp MAP(INTEGER, ARRAY(VARCHAR))" +
", dec DECIMAL(5,2)" +
", str ROW(id INTEGER, vc VARCHAR, arr ARRAY(INTEGER))" +
", vc VARCHAR)" +
" WITH (partitioning = ARRAY['int'])");
assertUpdate(
"INSERT INTO test_nested_table_2 " +
" select 1, array[cast(row(1, null) as row(int, varchar)), cast(row(2, 'dos') as row(int, varchar))], BIGINT '1', REAL '1.0', DOUBLE '1.0', " +
"map(array[1,2], array[array['ek', 'one'], array['don', 'do', 'two']]), CAST(1.0 as DECIMAL(5,2)), " +
"CAST(ROW(1, 'this is a random value', null) AS ROW(int, varchar, array(int))), 'one'",
1);
assertEquals(computeActual("SELECT * from test_nested_table_2").getRowCount(), 1);
assertThat(query("SHOW STATS FOR test_nested_table_2"))
.skippingTypesCheck()
.matches("VALUES " +
" ('int', NULL, NULL, 0e0, NULL, '1', '1'), " +
" ('arr', NULL, NULL, " + (format == ORC ? "0e0" : "NULL") + ", NULL, NULL, NULL), " +
" ('big', NULL, NULL, 0e0, NULL, '1', '1'), " +
" ('rl', NULL, NULL, 0e0, NULL, '1.0', '1.0'), " +
" ('dbl', NULL, NULL, 0e0, NULL, '1.0', '1.0'), " +
" ('mp', NULL, NULL, " + (format == ORC ? "0e0" : "NULL") + ", NULL, NULL, NULL), " +
" ('dec', NULL, NULL, 0e0, NULL, '1.0', '1.0'), " +
" ('vc', " + (format == PARQUET ? "43e0" : "NULL") + ", NULL, 0e0, NULL, NULL, NULL), " +
" ('str', NULL, NULL, " + (format == ORC ? "0e0" : "NULL") + ", NULL, NULL, NULL), " +
" (NULL, NULL, NULL, NULL, 1e0, NULL, NULL)");
assertUpdate("CREATE TABLE test_nested_table_3 WITH (partitioning = ARRAY['int']) AS SELECT * FROM test_nested_table_2", 1);
assertEquals(computeActual("SELECT * FROM test_nested_table_3").getRowCount(), 1);
assertThat(query("SHOW STATS FOR test_nested_table_3"))
.matches("SHOW STATS FOR test_nested_table_2");
dropTable("test_nested_table_2");
dropTable("test_nested_table_3");
}
@Test
public void testSerializableReadIsolation()
{
assertUpdate("CREATE TABLE test_read_isolation (x int)");
assertUpdate("INSERT INTO test_read_isolation VALUES 123, 456", 2);
withTransaction(session -> {
assertQuery(session, "SELECT * FROM test_read_isolation", "VALUES 123, 456");
assertUpdate("INSERT INTO test_read_isolation VALUES 789", 1);
assertQuery("SELECT * FROM test_read_isolation", "VALUES 123, 456, 789");
assertQuery(session, "SELECT * FROM test_read_isolation", "VALUES 123, 456");
});
assertQuery("SELECT * FROM test_read_isolation", "VALUES 123, 456, 789");
dropTable("test_read_isolation");
}
private void withTransaction(Consumer<Session> consumer)
{
transaction(getQueryRunner().getTransactionManager(), getQueryRunner().getAccessControl())
.readCommitted()
.execute(getSession(), consumer);
}
private void dropTable(String table)
{
Session session = getSession();
assertUpdate(session, "DROP TABLE " + table);
assertFalse(getQueryRunner().tableExists(session, table));
}
@Test
public void testOptimizedMetadataQueries()
{
Session session = Session.builder(getSession())
.setSystemProperty("optimize_metadata_queries", "true")
.build();
assertUpdate("CREATE TABLE test_metadata_optimization (a BIGINT, b BIGINT, c BIGINT) WITH (PARTITIONING = ARRAY['b', 'c'])");
assertUpdate("INSERT INTO test_metadata_optimization VALUES (5, 6, 7), (8, 9, 10)", 2);
assertQuery(session, "SELECT DISTINCT b FROM test_metadata_optimization", "VALUES (6), (9)");
assertQuery(session, "SELECT DISTINCT b, c FROM test_metadata_optimization", "VALUES (6, 7), (9, 10)");
assertQuery(session, "SELECT DISTINCT b FROM test_metadata_optimization WHERE b < 7", "VALUES (6)");
assertQuery(session, "SELECT DISTINCT b FROM test_metadata_optimization WHERE c > 8", "VALUES (9)");
// Assert behavior after metadata delete
assertUpdate("DELETE FROM test_metadata_optimization WHERE b = 6", 1);
assertQuery(session, "SELECT DISTINCT b FROM test_metadata_optimization", "VALUES (9)");
// TODO: assert behavior after deleting the last row of a partition, once row-level deletes are supported.
// i.e. a query like 'DELETE FROM test_metadata_optimization WHERE b = 6 AND a = 5'
dropTable("test_metadata_optimization");
}
@Test
public void testFileSizeInManifest()
throws Exception
{
assertUpdate("CREATE TABLE test_file_size_in_manifest (" +
"a_bigint bigint, " +
"a_varchar varchar, " +
"a_long_decimal decimal(38,20), " +
"a_map map(varchar, integer))");
assertUpdate(
"INSERT INTO test_file_size_in_manifest VALUES " +
"(NULL, NULL, NULL, NULL), " +
"(42, 'some varchar value', DECIMAL '123456789123456789.123456789123456789', map(ARRAY['abc', 'def'], ARRAY[113, -237843832]))",
2);
MaterializedResult files = computeActual("SELECT file_path, record_count, file_size_in_bytes FROM \"test_file_size_in_manifest$files\"");
long totalRecordCount = 0;
for (MaterializedRow row : files.getMaterializedRows()) {
String path = (String) row.getField(0);
Long recordCount = (Long) row.getField(1);
Long fileSizeInBytes = (Long) row.getField(2);
totalRecordCount += recordCount;
assertThat(fileSizeInBytes).isEqualTo(Files.size(Paths.get(path)));
}
// Verify sum(record_count) to make sure we have all the files.
assertThat(totalRecordCount).isEqualTo(2);
}
@Test
public void testIncorrectIcebergFileSizes()
throws Exception
{
// Create a table with a single insert
assertUpdate("CREATE TABLE test_iceberg_file_size (x BIGINT)");
assertUpdate("INSERT INTO test_iceberg_file_size VALUES (123), (456), (758)", 3);
// Get manifest file
MaterializedResult result = computeActual("SELECT path FROM \"test_iceberg_file_size$manifests\"");
assertEquals(result.getRowCount(), 1);
String manifestFile = (String) result.getOnlyValue();
// Read manifest file
Schema schema;
GenericData.Record entry = null;
try (DataFileReader<GenericData.Record> dataFileReader = new DataFileReader<>(new File(manifestFile), new GenericDatumReader<>())) {
schema = dataFileReader.getSchema();
int recordCount = 0;
while (dataFileReader.hasNext()) {
entry = dataFileReader.next();
recordCount++;
}
assertEquals(recordCount, 1);
}
// Alter data file entry to store incorrect file size
GenericData.Record dataFile = (GenericData.Record) entry.get("data_file");
long alteredValue = 50L;
assertNotEquals((long) dataFile.get("file_size_in_bytes"), alteredValue);
dataFile.put("file_size_in_bytes", alteredValue);
// Replace the file through HDFS client. This is required for correct checksums.
HdfsEnvironment.HdfsContext context = new HdfsContext(getSession().toConnectorSession());
org.apache.hadoop.fs.Path manifestFilePath = new org.apache.hadoop.fs.Path(manifestFile);
FileSystem fs = HDFS_ENVIRONMENT.getFileSystem(context, manifestFilePath);
// Write altered metadata
try (OutputStream out = fs.create(manifestFilePath);
DataFileWriter<GenericData.Record> dataFileWriter = new DataFileWriter<>(new GenericDatumWriter<>(schema))) {
dataFileWriter.create(schema, out);
dataFileWriter.append(entry);
}
// Ignoring Iceberg provided file size makes the query succeed
Session session = Session.builder(getSession())
.setCatalogSessionProperty("iceberg", "use_file_size_from_metadata", "false")
.build();
assertQuery(session, "SELECT * FROM test_iceberg_file_size", "VALUES (123), (456), (758)");
// Using Iceberg provided file size fails the query
assertQueryFails("SELECT * FROM test_iceberg_file_size",
format == ORC
? format(".*Error opening Iceberg split.*\\QIncorrect file size (%s) for file (end of stream not reached)\\E.*", alteredValue)
: format("Error reading tail from .* with length %d", alteredValue));
dropTable("test_iceberg_file_size");
}
@Test
public void testSplitPruningForFilterOnPartitionColumn()
{
String tableName = "nation_partitioned_pruning";
assertUpdate("DROP TABLE IF EXISTS " + tableName);
// disable writes redistribution to have predictable number of files written per partition (one).
Session noRedistributeWrites = Session.builder(getSession())
.setSystemProperty("redistribute_writes", "false")
.build();
assertUpdate(noRedistributeWrites, "CREATE TABLE " + tableName + " WITH (partitioning = ARRAY['regionkey']) AS SELECT * FROM nation", 25);
// sanity check that table contains exactly 5 files
assertThat(query("SELECT count(*) FROM \"" + tableName + "$files\"")).matches("VALUES CAST(5 AS BIGINT)");
verifySplitCount("SELECT * FROM " + tableName, 5);
verifySplitCount("SELECT * FROM " + tableName + " WHERE regionkey = 3", 1);
verifySplitCount("SELECT * FROM " + tableName + " WHERE regionkey < 2", 2);
verifySplitCount("SELECT * FROM " + tableName + " WHERE regionkey < 0", 0);
verifySplitCount("SELECT * FROM " + tableName + " WHERE regionkey > 1 AND regionkey < 4", 2);
verifySplitCount("SELECT * FROM " + tableName + " WHERE regionkey % 5 = 3", 1);
assertUpdate("DROP TABLE " + tableName);
}
@Test
public void testAllAvailableTypes()
{
assertUpdate("CREATE TABLE test_all_types (" +
" a_boolean boolean, " +
" an_integer integer, " +
" a_bigint bigint, " +
" a_real real, " +
" a_double double, " +
" a_short_decimal decimal(5,2), " +
" a_long_decimal decimal(38,20), " +
" a_varchar varchar, " +
" a_varbinary varbinary, " +
" a_date date, " +
" a_time time(6), " +
" a_timestamp timestamp(6), " +
" a_timestamptz timestamp(6) with time zone, " +
" a_uuid uuid, " +
" a_row row(id integer , vc varchar), " +
" an_array array(varchar), " +
" a_map map(integer, varchar) " +
")");
String values = "VALUES (" +
"true, " +
"1, " +
"BIGINT '1', " +
"REAL '1.0', " +
"DOUBLE '1.0', " +
"CAST(1.0 AS decimal(5,2)), " +
"CAST(11.0 AS decimal(38,20)), " +
"VARCHAR 'onefsadfdsf', " +
"X'000102f0feff', " +
"DATE '2021-07-24'," +
"TIME '02:43:57.987654', " +
"TIMESTAMP '2021-07-24 03:43:57.987654'," +
"TIMESTAMP '2021-07-24 04:43:57.987654 UTC', " +
"UUID '20050910-1330-11e9-ffff-2a86e4085a59', " +
"CAST(ROW(42, 'this is a random value') AS ROW(id int, vc varchar)), " +
"ARRAY[VARCHAR 'uno', 'dos', 'tres'], " +
"map(ARRAY[1,2], ARRAY['ek', VARCHAR 'one'])) ";
String nullValues = nCopies(17, "NULL").stream()
.collect(joining(", ", "VALUES (", ")"));
assertUpdate("INSERT INTO test_all_types " + values, 1);
assertUpdate("INSERT INTO test_all_types " + nullValues, 1);
// SELECT
assertThat(query("SELECT * FROM test_all_types"))
.matches(values + " UNION ALL " + nullValues);
// SELECT with predicates
assertThat(query("SELECT * FROM test_all_types WHERE " +
" a_boolean = true " +
"AND an_integer = 1 " +
"AND a_bigint = BIGINT '1' " +
"AND a_real = REAL '1.0' " +
"AND a_double = DOUBLE '1.0' " +
"AND a_short_decimal = CAST(1.0 AS decimal(5,2)) " +
"AND a_long_decimal = CAST(11.0 AS decimal(38,20)) " +
"AND a_varchar = VARCHAR 'onefsadfdsf' " +
"AND a_varbinary = X'000102f0feff' " +
"AND a_date = DATE '2021-07-24' " +
"AND a_time = TIME '02:43:57.987654' " +
"AND a_timestamp = TIMESTAMP '2021-07-24 03:43:57.987654' " +
"AND a_timestamptz = TIMESTAMP '2021-07-24 04:43:57.987654 UTC' " +
"AND a_uuid = UUID '20050910-1330-11e9-ffff-2a86e4085a59' " +
"AND a_row = CAST(ROW(42, 'this is a random value') AS ROW(id int, vc varchar)) " +
"AND an_array = ARRAY[VARCHAR 'uno', 'dos', 'tres'] " +
"AND a_map = map(ARRAY[1,2], ARRAY['ek', VARCHAR 'one']) " +
""))
.matches(values);
assertThat(query("SELECT * FROM test_all_types WHERE " +
" a_boolean IS NULL " +
"AND an_integer IS NULL " +
"AND a_bigint IS NULL " +
"AND a_real IS NULL " +
"AND a_double IS NULL " +
"AND a_short_decimal IS NULL " +
"AND a_long_decimal IS NULL " +
"AND a_varchar IS NULL " +
"AND a_varbinary IS NULL " +
"AND a_date IS NULL " +
"AND a_time IS NULL " +
"AND a_timestamp IS NULL " +
"AND a_timestamptz IS NULL " +
"AND a_uuid IS NULL " +
"AND a_row IS NULL " +
"AND an_array IS NULL " +
"AND a_map IS NULL " +
""))
.skippingTypesCheck()
.matches(nullValues);
// SHOW STATS
assertThat(query("SHOW STATS FOR test_all_types"))
.skippingTypesCheck()
.matches("VALUES " +
" ('a_boolean', NULL, NULL, 0.5e0, NULL, 'true', 'true'), " +
" ('an_integer', NULL, NULL, 0.5e0, NULL, '1', '1'), " +
" ('a_bigint', NULL, NULL, 0.5e0, NULL, '1', '1'), " +
" ('a_real', NULL, NULL, 0.5e0, NULL, '1.0', '1.0'), " +
" ('a_double', NULL, NULL, 0.5e0, NULL, '1.0', '1.0'), " +
" ('a_short_decimal', NULL, NULL, 0.5e0, NULL, '1.0', '1.0'), " +
" ('a_long_decimal', NULL, NULL, 0.5e0, NULL, '11.0', '11.0'), " +
" ('a_varchar', " + (format == PARQUET ? "87e0" : "NULL") + ", NULL, 0.5e0, NULL, NULL, NULL), " +
" ('a_varbinary', " + (format == PARQUET ? "82e0" : "NULL") + ", NULL, 0.5e0, NULL, NULL, NULL), " +
" ('a_date', NULL, NULL, 0.5e0, NULL, '2021-07-24', '2021-07-24'), " +
" ('a_time', NULL, NULL, 0.5e0, NULL, NULL, NULL), " +
" ('a_timestamp', NULL, NULL, 0.5e0, NULL, " + (format == ORC ? "'2021-07-24 03:43:57.987000', '2021-07-24 03:43:57.987999'" : "'2021-07-24 03:43:57.987654', '2021-07-24 03:43:57.987654'") + "), " +
" ('a_timestamptz', NULL, NULL, 0.5e0, NULL, '2021-07-24 04:43:57.987 UTC', '2021-07-24 04:43:57.987 UTC'), " +
" ('a_uuid', NULL, NULL, 0.5e0, NULL, NULL, NULL), " +
" ('a_row', NULL, NULL, " + (format == ORC ? "0.5" : "NULL") + ", NULL, NULL, NULL), " +
" ('an_array', NULL, NULL, " + (format == ORC ? "0.5" : "NULL") + ", NULL, NULL, NULL), " +
" ('a_map', NULL, NULL, " + (format == ORC ? "0.5" : "NULL") + ", NULL, NULL, NULL), " +
" (NULL, NULL, NULL, NULL, 2e0, NULL, NULL)");
// $partitions
String schema = getSession().getSchema().orElseThrow();
assertThat(query("SELECT column_name FROM information_schema.columns WHERE table_schema = '" + schema + "' AND table_name = 'test_all_types$partitions' "))
.skippingTypesCheck()
.matches("VALUES 'record_count', 'file_count', 'total_size', 'data'");
assertThat(query("SELECT " +
" record_count," +
" file_count, " +
" data.a_boolean, " +
" data.an_integer, " +
" data.a_bigint, " +
" data.a_real, " +
" data.a_double, " +
" data.a_short_decimal, " +
" data.a_long_decimal, " +
" data.a_varchar, " +
" data.a_varbinary, " +
" data.a_date, " +
" data.a_time, " +
" data.a_timestamp, " +
" data.a_timestamptz, " +
" data.a_uuid " +
" FROM \"test_all_types$partitions\" "))
.matches(
"VALUES (" +
" BIGINT '2', " +
" BIGINT '2', " +
" CAST(ROW(true, true, 1, NULL) AS ROW(min boolean, max boolean, null_count bigint, nan_count bigint)), " +
" CAST(ROW(1, 1, 1, NULL) AS ROW(min integer, max integer, null_count bigint, nan_count bigint)), " +
" CAST(ROW(1, 1, 1, NULL) AS ROW(min bigint, max bigint, null_count bigint, nan_count bigint)), " +
" CAST(ROW(1, 1, 1, NULL) AS ROW(min real, max real, null_count bigint, nan_count bigint)), " +
" CAST(ROW(1, 1, 1, NULL) AS ROW(min double, max double, null_count bigint, nan_count bigint)), " +
" CAST(ROW(1, 1, 1, NULL) AS ROW(min decimal(5,2), max decimal(5,2), null_count bigint, nan_count bigint)), " +
" CAST(ROW(11, 11, 1, NULL) AS ROW(min decimal(38,20), max decimal(38,20), null_count bigint, nan_count bigint)), " +
" CAST(ROW('onefsadfdsf', 'onefsadfdsf', 1, NULL) AS ROW(min varchar, max varchar, null_count bigint, nan_count bigint)), " +
(format == ORC ?
" CAST(ROW(NULL, NULL, 1, NULL) AS ROW(min varbinary, max varbinary, null_count bigint, nan_count bigint)), " :
" CAST(ROW(X'000102f0feff', X'000102f0feff', 1, NULL) AS ROW(min varbinary, max varbinary, null_count bigint, nan_count bigint)), ") +
" CAST(ROW(DATE '2021-07-24', DATE '2021-07-24', 1, NULL) AS ROW(min date, max date, null_count bigint, nan_count bigint)), " +
" CAST(ROW(TIME '02:43:57.987654', TIME '02:43:57.987654', 1, NULL) AS ROW(min time(6), max time(6), null_count bigint, nan_count bigint)), " +
(format == ORC ?
" CAST(ROW(TIMESTAMP '2021-07-24 03:43:57.987000', TIMESTAMP '2021-07-24 03:43:57.987999', 1, NULL) AS ROW(min timestamp(6), max timestamp(6), null_count bigint, nan_count bigint)), " :
" CAST(ROW(TIMESTAMP '2021-07-24 03:43:57.987654', TIMESTAMP '2021-07-24 03:43:57.987654', 1, NULL) AS ROW(min timestamp(6), max timestamp(6), null_count bigint, nan_count bigint)), ") +
(format == ORC ?
" CAST(ROW(TIMESTAMP '2021-07-24 04:43:57.987000 UTC', TIMESTAMP '2021-07-24 04:43:57.987999 UTC', 1, NULL) AS ROW(min timestamp(6) with time zone, max timestamp(6) with time zone, null_count bigint, nan_count bigint)), " :
" CAST(ROW(TIMESTAMP '2021-07-24 04:43:57.987654 UTC', TIMESTAMP '2021-07-24 04:43:57.987654 UTC', 1, NULL) AS ROW(min timestamp(6) with time zone, max timestamp(6) with time zone, null_count bigint, nan_count bigint)), ") +
(format == ORC ?
" CAST(ROW(NULL, NULL, 1, NULL) AS ROW(min uuid, max uuid, null_count bigint, nan_count bigint)) " :
" CAST(ROW(UUID '20050910-1330-11e9-ffff-2a86e4085a59', UUID '20050910-1330-11e9-ffff-2a86e4085a59', 1, NULL) AS ROW(min uuid, max uuid, null_count bigint, nan_count bigint)) "
) +
")");
assertUpdate("DROP TABLE test_all_types");
}
@Test(timeOut = 25_000)
public void testLocalDynamicFilteringWithSelectiveBuildSideJoin()
{
// We need to prepare tables for this test. The test is required to use tables that are backed by at lest two files
Session session = Session.builder(getSession())
.setSystemProperty(TASK_WRITER_COUNT, "2")
.build();
getQueryRunner().execute(session, format("CREATE TABLE IF NOT EXISTS %s AS SELECT * FROM %s", "linetime_multiple_file_backed", "tpch.tiny.lineitem")).getMaterializedRows();
getQueryRunner().execute(session, format("CREATE TABLE IF NOT EXISTS %s AS SELECT * FROM %s", "orders_multiple_file_backed", "tpch.tiny.orders")).getMaterializedRows();
long fullTableScan = (Long) computeActual("SELECT count(*) FROM linetime_multiple_file_backed").getOnlyValue();
// Pick a value for totalprice where file level stats will not be able to filter out any data
// This assumes the totalprice ranges in every file have some overlap, otherwise this test will fail.
MaterializedRow range = getOnlyElement(computeActual("SELECT max(lower_bounds[4]), min(upper_bounds[4]) FROM \"orders_multiple_file_backed$files\"").getMaterializedRows());
double totalPrice = (Double) computeActual(format(
"SELECT totalprice FROM orders_multiple_file_backed WHERE totalprice > %s AND totalprice < %s LIMIT 1",
range.getField(0),
range.getField(1)))
.getOnlyValue();
session = Session.builder(getSession())
.setSystemProperty(JOIN_DISTRIBUTION_TYPE, BROADCAST.name())
.setCatalogSessionProperty(ICEBERG_CATALOG, "dynamic_filtering_wait_timeout", "1h")
.build();
ResultWithQueryId<MaterializedResult> result = getDistributedQueryRunner().executeWithQueryId(
session,
"SELECT * FROM linetime_multiple_file_backed JOIN orders_multiple_file_backed ON linetime_multiple_file_backed.orderkey = orders_multiple_file_backed.orderkey AND orders_multiple_file_backed.totalprice = " + totalPrice);
OperatorStats probeStats = searchScanFilterAndProjectOperatorStats(
result.getQueryId(),
new QualifiedObjectName(ICEBERG_CATALOG, "tpch", "linetime_multiple_file_backed"));
// Assert some lineitem rows were filtered out on file level
assertThat(probeStats.getInputPositions()).isLessThan(fullTableScan);
}
@Test(dataProvider = "repartitioningDataProvider")
public void testRepartitionDataOnCtas(Session session, String partitioning, int expectedFiles)
{
testRepartitionData(session, "tpch.tiny.orders", true, partitioning, expectedFiles);
}
@Test(dataProvider = "repartitioningDataProvider")
public void testRepartitionDataOnInsert(Session session, String partitioning, int expectedFiles)
{
testRepartitionData(session, "tpch.tiny.orders", false, partitioning, expectedFiles);
}
@DataProvider
public Object[][] repartitioningDataProvider()
{
Session defaultSession = getSession();
// For identity-only partitioning, Iceberg connector returns ConnectorTableLayout with partitionColumns set, but without partitioning.
// This is treated by engine as "preferred", but not mandatory partitioning, and gets ignored if stats suggest number of partitions
// written is low. Without partitioning, number of files created is nondeterministic, as a writer (worker node) may or may not receive data.
Session obeyConnectorPartitioning = Session.builder(defaultSession)
.setSystemProperty(PREFERRED_WRITE_PARTITIONING_MIN_NUMBER_OF_PARTITIONS, "1")
.build();
return new Object[][] {
// identity partitioning column
{obeyConnectorPartitioning, "'orderstatus'", 3},
// bucketing
{defaultSession, "'bucket(custkey, 13)'", 13},
// varchar-based
{defaultSession, "'truncate(comment, 1)'", 35},
// complex; would exceed 100 open writers limit in IcebergPageSink without write repartitioning
{defaultSession, "'bucket(custkey, 4)', 'truncate(comment, 1)'", 131},
// same column multiple times
{defaultSession, "'truncate(comment, 1)', 'orderstatus', 'bucket(comment, 2)'", 180},
};
}
@Test
public void testStatsBasedRepartitionDataOnCtas()
{
testStatsBasedRepartitionData(true);
}
@Test
public void testStatsBasedRepartitionDataOnInsert()
{
testStatsBasedRepartitionData(false);
}
private void testStatsBasedRepartitionData(boolean ctas)
{
Session sessionRepartitionSmall = Session.builder(getSession())
.setSystemProperty(PREFERRED_WRITE_PARTITIONING_MIN_NUMBER_OF_PARTITIONS, "2")
.build();
Session sessionRepartitionMany = Session.builder(getSession())
.setSystemProperty(PREFERRED_WRITE_PARTITIONING_MIN_NUMBER_OF_PARTITIONS, "5")
.setSystemProperty(SCALE_WRITERS, "false")
.build();
// Use DISTINCT to add data redistribution between source table and the writer. This makes it more likely that all writers get some data.
String sourceRelation = "(SELECT DISTINCT orderkey, custkey, orderstatus FROM tpch.tiny.orders)";
testRepartitionData(
sessionRepartitionSmall,
sourceRelation,
ctas,
"'orderstatus'",
3);
// Test uses relatively small table (60K rows). When engine doesn't redistribute data for writes,
// occasionally a worker node doesn't get any data and fewer files get created.
assertEventually(() -> {
testRepartitionData(
sessionRepartitionMany,
sourceRelation,
ctas,
"'orderstatus'",
9);
});
}
private void testRepartitionData(Session session, String sourceRelation, boolean ctas, String partitioning, int expectedFiles)
{
String tableName = "repartition" +
"_" + sourceRelation.replaceAll("[^a-zA-Z0-9]", "") +
(ctas ? "ctas" : "insert") +
"_" + partitioning.replaceAll("[^a-zA-Z0-9]", "") +
"_" + randomTableSuffix();
long rowCount = (long) computeScalar(session, "SELECT count(*) FROM " + sourceRelation);
if (ctas) {
assertUpdate(
session,
"CREATE TABLE " + tableName + " WITH (partitioning = ARRAY[" + partitioning + "]) " +
"AS SELECT * FROM " + sourceRelation,
rowCount);
}
else {
assertUpdate(
session,
"CREATE TABLE " + tableName + " WITH (partitioning = ARRAY[" + partitioning + "]) " +
"AS SELECT * FROM " + sourceRelation + " WITH NO DATA",
0);
// Use source table big enough so that there will be multiple pages being written.
assertUpdate(session, "INSERT INTO " + tableName + " SELECT * FROM " + sourceRelation, rowCount);
}
// verify written data
assertThat(query(session, "TABLE " + tableName))
.skippingTypesCheck()
.matches("SELECT * FROM " + sourceRelation);
// verify data files, i.e. repartitioning took place
assertThat(query(session, "SELECT count(*) FROM \"" + tableName + "$files\""))
.matches("VALUES BIGINT '" + expectedFiles + "'");
assertUpdate(session, "DROP TABLE " + tableName);
}
@Test(dataProvider = "testDataMappingSmokeTestDataProvider")
public void testSplitPruningForFilterOnNonPartitionColumn(DataMappingTestSetup testSetup)
{
if (testSetup.isUnsupportedType()) {
return;
}
try (TestTable table = new TestTable(getQueryRunner()::execute, "test_split_pruning_non_partitioned", "(row_id int, col " + testSetup.getTrinoTypeName() + ")")) {
String tableName = table.getName();
String sampleValue = testSetup.getSampleValueLiteral();
String highValue = testSetup.getHighValueLiteral();
// Insert separately to ensure two files with one value each
assertUpdate("INSERT INTO " + tableName + " VALUES (1, " + sampleValue + ")", 1);
assertUpdate("INSERT INTO " + tableName + " VALUES (2, " + highValue + ")", 1);
assertQuery("select count(*) from \"" + tableName + "$files\"", "VALUES 2");
int expectedSplitCount = supportsIcebergFileStatistics(testSetup.getTrinoTypeName()) ? 1 : 2;
verifySplitCount("SELECT row_id FROM " + tableName, 2);
verifySplitCount("SELECT row_id FROM " + tableName + " WHERE col = " + sampleValue, expectedSplitCount);
verifySplitCount("SELECT row_id FROM " + tableName + " WHERE col = " + highValue, expectedSplitCount);
// ORC max timestamp statistics are truncated to millisecond precision and then appended with 999 microseconds.
// Therefore, sampleValue and highValue are within the max timestamp & there will be 2 splits.
verifySplitCount("SELECT row_id FROM " + tableName + " WHERE col > " + sampleValue,
(format == ORC && testSetup.getTrinoTypeName().contains("timestamp") ? 2 : expectedSplitCount));
verifySplitCount("SELECT row_id FROM " + tableName + " WHERE col < " + highValue,
(format == ORC && testSetup.getTrinoTypeName().contains("timestamp") ? 2 : expectedSplitCount));
}
}
@Test
public void testGetIcebergTableProperties()
{
assertUpdate("CREATE TABLE test_iceberg_get_table_props (x BIGINT)");
assertThat(query("SELECT * FROM \"test_iceberg_get_table_props$properties\""))
.matches(format("VALUES (VARCHAR 'write.format.default', VARCHAR '%s')", format.name()));
dropTable("test_iceberg_get_table_props");
}
protected abstract boolean supportsIcebergFileStatistics(String typeName);
@Test(dataProvider = "testDataMappingSmokeTestDataProvider")
public void testSplitPruningFromDataFileStatistics(DataMappingTestSetup testSetup)
{
if (testSetup.isUnsupportedType()) {
return;
}
try (TestTable table = new TestTable(
getQueryRunner()::execute,
"test_split_pruning_data_file_statistics",
// Random double is needed to make sure rows are different. Otherwise compression may deduplicate rows, resulting in only one row group
"(col " + testSetup.getTrinoTypeName() + ", r double)")) {
String tableName = table.getName();
String values =
Stream.concat(
nCopies(100, testSetup.getSampleValueLiteral()).stream(),
nCopies(100, testSetup.getHighValueLiteral()).stream())
.map(value -> "(" + value + ", rand())")
.collect(Collectors.joining(", "));
assertUpdate(withSmallRowGroups(getSession()), "INSERT INTO " + tableName + " VALUES " + values, 200);
String query = "SELECT * FROM " + tableName + " WHERE col = " + testSetup.getSampleValueLiteral();
verifyPredicatePushdownDataRead(query, supportsRowGroupStatistics(testSetup.getTrinoTypeName()));
}
}
protected abstract Session withSmallRowGroups(Session session);
protected abstract boolean supportsRowGroupStatistics(String typeName);
private void verifySplitCount(String query, int expectedSplitCount)
{
ResultWithQueryId<MaterializedResult> selectAllPartitionsResult = getDistributedQueryRunner().executeWithQueryId(getSession(), query);
assertEqualsIgnoreOrder(selectAllPartitionsResult.getResult().getMaterializedRows(), computeActual(withoutPredicatePushdown(getSession()), query).getMaterializedRows());
verifySplitCount(selectAllPartitionsResult.getQueryId(), expectedSplitCount);
}
private void verifyPredicatePushdownDataRead(@Language("SQL") String query, boolean supportsPushdown)
{
ResultWithQueryId<MaterializedResult> resultWithPredicatePushdown = getDistributedQueryRunner().executeWithQueryId(getSession(), query);
ResultWithQueryId<MaterializedResult> resultWithoutPredicatePushdown = getDistributedQueryRunner().executeWithQueryId(
withoutPredicatePushdown(getSession()),
query);
DataSize withPushdownDataSize = getOperatorStats(resultWithPredicatePushdown.getQueryId()).getInputDataSize();
DataSize withoutPushdownDataSize = getOperatorStats(resultWithoutPredicatePushdown.getQueryId()).getInputDataSize();
if (supportsPushdown) {
assertThat(withPushdownDataSize).isLessThan(withoutPushdownDataSize);
}
else {
assertThat(withPushdownDataSize).isEqualTo(withoutPushdownDataSize);
}
}
private Session withoutPredicatePushdown(Session session)
{
return Session.builder(session)
.setSystemProperty("allow_pushdown_into_connectors", "false")
.build();
}
private void verifySplitCount(QueryId queryId, long expectedSplitCount)
{
checkArgument(expectedSplitCount >= 0);
OperatorStats operatorStats = getOperatorStats(queryId);
if (expectedSplitCount > 0) {
assertThat(operatorStats.getTotalDrivers()).isEqualTo(expectedSplitCount);
assertThat(operatorStats.getPhysicalInputPositions()).isGreaterThan(0);
}
else {
// expectedSplitCount == 0
assertThat(operatorStats.getTotalDrivers()).isEqualTo(1);
assertThat(operatorStats.getPhysicalInputPositions()).isEqualTo(0);
}
}
private OperatorStats getOperatorStats(QueryId queryId)
{
try {
return getDistributedQueryRunner().getCoordinator()
.getQueryManager()
.getFullQueryInfo(queryId)
.getQueryStats()
.getOperatorSummaries()
.stream()
.filter(summary -> summary.getOperatorType().startsWith("TableScan") || summary.getOperatorType().startsWith("Scan"))
.collect(onlyElement());
}
catch (NoSuchElementException e) {
throw new RuntimeException("Couldn't find operator summary, probably due to query statistic collection error", e);
}
}
@Override
protected TestTable createTableWithDefaultColumns()
{
throw new SkipException("Iceberg connector does not support column default values");
}
@Override
protected Optional<DataMappingTestSetup> filterDataMappingSmokeTestData(DataMappingTestSetup dataMappingTestSetup)
{
String typeName = dataMappingTestSetup.getTrinoTypeName();
if (typeName.equals("tinyint")
|| typeName.equals("smallint")
|| typeName.startsWith("char(")) {
// These types are not supported by Iceberg
return Optional.of(dataMappingTestSetup.asUnsupported());
}
// According to Iceberg specification all time and timestamp values are stored with microsecond precision.
if (typeName.equals("time") ||
typeName.equals("timestamp") ||
typeName.equals("timestamp(3) with time zone")) {
return Optional.of(dataMappingTestSetup.asUnsupported());
}
return Optional.of(dataMappingTestSetup);
}
@Override
protected Optional<DataMappingTestSetup> filterCaseSensitiveDataMappingTestData(DataMappingTestSetup dataMappingTestSetup)
{
String typeName = dataMappingTestSetup.getTrinoTypeName();
if (typeName.equals("char(1)")) {
return Optional.of(dataMappingTestSetup.asUnsupported());
}
return Optional.of(dataMappingTestSetup);
}
@Test
public void testAmbiguousColumnsWithDots()
{
assertThatThrownBy(() -> assertUpdate("CREATE TABLE ambiguous (\"a.cow\" BIGINT, a ROW(cow BIGINT))"))
.hasMessage("Invalid schema: multiple fields for name a.cow: 1 and 3");
assertUpdate("CREATE TABLE ambiguous (\"a.cow\" BIGINT, b ROW(cow BIGINT))");
assertThatThrownBy(() -> assertUpdate("ALTER TABLE ambiguous RENAME COLUMN b TO a"))
.hasMessage("Invalid schema: multiple fields for name a.cow: 1 and 3");
assertUpdate("DROP TABLE ambiguous");
assertUpdate("CREATE TABLE ambiguous (a ROW(cow BIGINT))");
assertThatThrownBy(() -> assertUpdate("ALTER TABLE ambiguous ADD COLUMN \"a.cow\" BIGINT"))
.hasMessage("Cannot add column with ambiguous name: a.cow, use addColumn(parent, name, type)");
assertUpdate("DROP TABLE ambiguous");
}
@Test
public void testSchemaEvolutionWithDereferenceProjections()
{
// Fields are identified uniquely based on unique id's. If a column is dropped and recreated with the same name it should not return dropped data.
assertUpdate("CREATE TABLE evolve_test (dummy BIGINT, a row(b BIGINT, c VARCHAR))");
assertUpdate("INSERT INTO evolve_test VALUES (1, ROW(1, 'abc'))", 1);
assertUpdate("ALTER TABLE evolve_test DROP COLUMN a");
assertUpdate("ALTER TABLE evolve_test ADD COLUMN a ROW(b VARCHAR, c BIGINT)");
assertQuery("SELECT a.b FROM evolve_test", "VALUES NULL");
assertUpdate("DROP TABLE evolve_test");
// Very changing subfield ordering does not revive dropped data
assertUpdate("CREATE TABLE evolve_test (dummy BIGINT, a ROW(b BIGINT, c VARCHAR), d BIGINT) with (partitioning = ARRAY['d'])");
assertUpdate("INSERT INTO evolve_test VALUES (1, ROW(2, 'abc'), 3)", 1);
assertUpdate("ALTER TABLE evolve_test DROP COLUMN a");
assertUpdate("ALTER TABLE evolve_test ADD COLUMN a ROW(c VARCHAR, b BIGINT)");
assertUpdate("INSERT INTO evolve_test VALUES (4, 5, ROW('def', 6))", 1);
assertQuery("SELECT a.b FROM evolve_test WHERE d = 3", "VALUES NULL");
assertQuery("SELECT a.b FROM evolve_test WHERE d = 5", "VALUES 6");
assertUpdate("DROP TABLE evolve_test");
}
@Test
public void testHighlyNestedData()
{
assertUpdate("CREATE TABLE nested_data (id INT, row_t ROW(f1 INT, f2 INT, row_t ROW (f1 INT, f2 INT, row_t ROW(f1 INT, f2 INT))))");
assertUpdate("INSERT INTO nested_data VALUES (1, ROW(2, 3, ROW(4, 5, ROW(6, 7)))), (11, ROW(12, 13, ROW(14, 15, ROW(16, 17))))", 2);
assertUpdate("INSERT INTO nested_data VALUES (21, ROW(22, 23, ROW(24, 25, ROW(26, 27))))", 1);
// Test select projected columns, with and without their parent column
assertQuery("SELECT id, row_t.row_t.row_t.f2 FROM nested_data", "VALUES (1, 7), (11, 17), (21, 27)");
assertQuery("SELECT id, row_t.row_t.row_t.f2, CAST(row_t AS JSON) FROM nested_data",
"VALUES (1, 7, '{\"f1\":2,\"f2\":3,\"row_t\":{\"f1\":4,\"f2\":5,\"row_t\":{\"f1\":6,\"f2\":7}}}'), " +
"(11, 17, '{\"f1\":12,\"f2\":13,\"row_t\":{\"f1\":14,\"f2\":15,\"row_t\":{\"f1\":16,\"f2\":17}}}'), " +
"(21, 27, '{\"f1\":22,\"f2\":23,\"row_t\":{\"f1\":24,\"f2\":25,\"row_t\":{\"f1\":26,\"f2\":27}}}')");
// Test predicates on immediate child column and deeper nested column
assertQuery("SELECT id, CAST(row_t.row_t.row_t AS JSON) FROM nested_data WHERE row_t.row_t.row_t.f2 = 27", "VALUES (21, '{\"f1\":26,\"f2\":27}')");
assertQuery("SELECT id, CAST(row_t.row_t.row_t AS JSON) FROM nested_data WHERE row_t.row_t.row_t.f2 > 20", "VALUES (21, '{\"f1\":26,\"f2\":27}')");
assertQuery("SELECT id, CAST(row_t AS JSON) FROM nested_data WHERE row_t.row_t.row_t.f2 = 27",
"VALUES (21, '{\"f1\":22,\"f2\":23,\"row_t\":{\"f1\":24,\"f2\":25,\"row_t\":{\"f1\":26,\"f2\":27}}}')");
assertQuery("SELECT id, CAST(row_t AS JSON) FROM nested_data WHERE row_t.row_t.row_t.f2 > 20",
"VALUES (21, '{\"f1\":22,\"f2\":23,\"row_t\":{\"f1\":24,\"f2\":25,\"row_t\":{\"f1\":26,\"f2\":27}}}')");
// Test predicates on parent columns
assertQuery("SELECT id, row_t.row_t.row_t.f1 FROM nested_data WHERE row_t.row_t.row_t = ROW(16, 17)", "VALUES (11, 16)");
assertQuery("SELECT id, row_t.row_t.row_t.f1 FROM nested_data WHERE row_t = ROW(22, 23, ROW(24, 25, ROW(26, 27)))", "VALUES (21, 26)");
assertUpdate("DROP TABLE IF EXISTS nested_data");
}
@Test
public void testProjectionPushdownAfterRename()
{
assertUpdate("CREATE TABLE projection_pushdown_after_rename (id INT, a ROW(b INT, c ROW (d INT)))");
assertUpdate("INSERT INTO projection_pushdown_after_rename VALUES (1, ROW(2, ROW(3))), (11, ROW(12, ROW(13)))", 2);
assertUpdate("INSERT INTO projection_pushdown_after_rename VALUES (21, ROW(22, ROW(23)))", 1);
String expected = "VALUES (11, JSON '{\"b\":12,\"c\":{\"d\":13}}', 13)";
assertQuery("SELECT id, CAST(a AS JSON), a.c.d FROM projection_pushdown_after_rename WHERE a.b = 12", expected);
assertUpdate("ALTER TABLE projection_pushdown_after_rename RENAME COLUMN a TO row_t");
assertQuery("SELECT id, CAST(row_t AS JSON), row_t.c.d FROM projection_pushdown_after_rename WHERE row_t.b = 12", expected);
assertUpdate("DROP TABLE IF EXISTS projection_pushdown_after_rename");
}
@Test
public void testProjectionWithCaseSensitiveField()
{
assertUpdate("CREATE TABLE projection_with_case_sensitive_field (id INT, a ROW(\"UPPER_CASE\" INT, \"lower_case\" INT, \"MiXeD_cAsE\" INT))");
assertUpdate("INSERT INTO projection_with_case_sensitive_field VALUES (1, ROW(2, 3, 4)), (5, ROW(6, 7, 8))", 2);
String expected = "VALUES (2, 3, 4), (6, 7, 8)";
assertQuery("SELECT a.UPPER_CASE, a.lower_case, a.MiXeD_cAsE FROM projection_with_case_sensitive_field", expected);
assertQuery("SELECT a.upper_case, a.lower_case, a.mixed_case FROM projection_with_case_sensitive_field", expected);
assertQuery("SELECT a.UPPER_CASE, a.LOWER_CASE, a.MIXED_CASE FROM projection_with_case_sensitive_field", expected);
assertUpdate("DROP TABLE IF EXISTS projection_with_case_sensitive_field");
}
@Test
public void testProjectionPushdownReadsLessData()
{
String largeVarchar = "ZZZ".repeat(1000);
assertUpdate("CREATE TABLE projection_pushdown_reads_less_data (id INT, a ROW(b VARCHAR, c INT))");
assertUpdate(
format("INSERT INTO projection_pushdown_reads_less_data VALUES (1, ROW('%s', 3)), (11, ROW('%1$s', 13)), (21, ROW('%1$s', 23)), (31, ROW('%1$s', 33))", largeVarchar),
4);
String selectQuery = "SELECT a.c FROM projection_pushdown_reads_less_data";
Set<Integer> expected = ImmutableSet.of(3, 13, 23, 33);
Session sessionWithoutPushdown = Session.builder(getSession())
.setCatalogSessionProperty(ICEBERG_CATALOG, "projection_pushdown_enabled", "false")
.build();
assertQueryStats(
getSession(),
selectQuery,
statsWithPushdown -> {
DataSize processedDataSizeWithPushdown = statsWithPushdown.getProcessedInputDataSize();
assertQueryStats(
sessionWithoutPushdown,
selectQuery,
statsWithoutPushdown -> assertThat(statsWithoutPushdown.getProcessedInputDataSize()).isGreaterThan(processedDataSizeWithPushdown),
results -> assertEquals(results.getOnlyColumnAsSet(), expected));
},
results -> assertEquals(results.getOnlyColumnAsSet(), expected));
assertUpdate("DROP TABLE IF EXISTS projection_pushdown_reads_less_data");
}
@Test
public void testProjectionPushdownOnPartitionedTables()
{
assertUpdate("CREATE TABLE table_with_partition_at_beginning (id BIGINT, root ROW(f1 BIGINT, f2 BIGINT)) WITH (partitioning = ARRAY['id'])");
assertUpdate("INSERT INTO table_with_partition_at_beginning VALUES (1, ROW(1, 2)), (1, ROW(2, 3)), (1, ROW(3, 4))", 3);
assertQuery("SELECT id, root.f2 FROM table_with_partition_at_beginning", "VALUES (1, 2), (1, 3), (1, 4)");
assertUpdate("DROP TABLE table_with_partition_at_beginning");
assertUpdate("CREATE TABLE table_with_partition_at_end (root ROW(f1 BIGINT, f2 BIGINT), id BIGINT) WITH (partitioning = ARRAY['id'])");
assertUpdate("INSERT INTO table_with_partition_at_end VALUES (ROW(1, 2), 1), (ROW(2, 3), 1), (ROW(3, 4), 1)", 3);
assertQuery("SELECT root.f2, id FROM table_with_partition_at_end", "VALUES (2, 1), (3, 1), (4, 1)");
assertUpdate("DROP TABLE table_with_partition_at_end");
}
@Test
public void testProjectionPushdownOnPartitionedTableWithComments()
{
assertUpdate("CREATE TABLE test_projection_pushdown_comments (id BIGINT COMMENT 'id', qid BIGINT COMMENT 'QID', root ROW(f1 BIGINT, f2 BIGINT) COMMENT 'root') WITH (partitioning = ARRAY['id'])");
assertUpdate("INSERT INTO test_projection_pushdown_comments VALUES (1, 1, ROW(1, 2)), (1, 2, ROW(2, 3)), (1, 3, ROW(3, 4))", 3);
assertQuery("SELECT id, root.f2 FROM test_projection_pushdown_comments", "VALUES (1, 2), (1, 3), (1, 4)");
// Query with predicates on both nested and top-level columns (with partition column)
assertQuery("SELECT id, root.f2 FROM test_projection_pushdown_comments WHERE id = 1 AND qid = 1 AND root.f1 = 1", "VALUES (1, 2)");
// Query with predicates on both nested and top-level columns (no partition column)
assertQuery("SELECT id, root.f2 FROM test_projection_pushdown_comments WHERE qid = 2 AND root.f1 = 2", "VALUES (1, 3)");
// Query with predicates on top-level columns only
assertQuery("SELECT id, root.f2 FROM test_projection_pushdown_comments WHERE id = 1 AND qid = 1", "VALUES (1, 2)");
// Query with predicates on nested columns only
assertQuery("SELECT id, root.f2 FROM test_projection_pushdown_comments WHERE root.f1 = 2", "VALUES (1, 3)");
assertUpdate("DROP TABLE IF EXISTS test_projection_pushdown_comments");
}
@Test(dataProvider = "tableFormatVersion")
public void testOptimize(int formatVersion)
throws Exception
{
String tableName = "test_optimize_" + randomTableSuffix();
assertUpdate("CREATE TABLE " + tableName + " (key integer, value varchar) WITH (format_version = " + formatVersion + ")");
// DistributedQueryRunner sets node-scheduler.include-coordinator by default, so include coordinator
int workerCount = getQueryRunner().getNodeCount();
// optimize an empty table
assertQuerySucceeds("ALTER TABLE " + tableName + " EXECUTE OPTIMIZE");
assertThat(getActiveFiles(tableName)).isEmpty();
assertUpdate("INSERT INTO " + tableName + " VALUES (11, 'eleven')", 1);
assertUpdate("INSERT INTO " + tableName + " VALUES (12, 'zwölf')", 1);
assertUpdate("INSERT INTO " + tableName + " VALUES (13, 'trzynaście')", 1);
assertUpdate("INSERT INTO " + tableName + " VALUES (14, 'quatorze')", 1);
assertUpdate("INSERT INTO " + tableName + " VALUES (15, 'пʼятнадцять')", 1);
List<String> initialFiles = getActiveFiles(tableName);
assertThat(initialFiles)
.hasSize(5)
// Verify we have sufficiently many test rows with respect to worker count.
.hasSizeGreaterThan(workerCount);
computeActual("ALTER TABLE " + tableName + " EXECUTE OPTIMIZE");
assertThat(query("SELECT sum(key), listagg(value, ' ') WITHIN GROUP (ORDER BY key) FROM " + tableName))
.matches("VALUES (BIGINT '65', VARCHAR 'eleven zwölf trzynaście quatorze пʼятнадцять')");
List<String> updatedFiles = getActiveFiles(tableName);
assertThat(updatedFiles)
.hasSizeBetween(1, workerCount)
.doesNotContainAnyElementsOf(initialFiles);
// No files should be removed (this is expire_snapshots's job, when it exists)
assertThat(getAllDataFilesFromTableDirectory(tableName))
.containsExactlyInAnyOrderElementsOf(concat(initialFiles, updatedFiles));
// optimize with low retention threshold, nothing should change
computeActual("ALTER TABLE " + tableName + " EXECUTE OPTIMIZE (file_size_threshold => '33B')");
assertThat(query("SELECT sum(key), listagg(value, ' ') WITHIN GROUP (ORDER BY key) FROM " + tableName))
.matches("VALUES (BIGINT '65', VARCHAR 'eleven zwölf trzynaście quatorze пʼятнадцять')");
assertThat(getActiveFiles(tableName)).isEqualTo(updatedFiles);
assertThat(getAllDataFilesFromTableDirectory(tableName))
.containsExactlyInAnyOrderElementsOf(concat(initialFiles, updatedFiles));
// optimize with delimited procedure name
assertQueryFails("ALTER TABLE " + tableName + " EXECUTE \"optimize\"", "Procedure optimize not registered for catalog iceberg");
assertUpdate("ALTER TABLE " + tableName + " EXECUTE \"OPTIMIZE\"");
// optimize with delimited parameter name (and procedure name)
assertUpdate("ALTER TABLE " + tableName + " EXECUTE \"OPTIMIZE\" (\"file_size_threshold\" => '33B')"); // TODO (https://github.com/trinodb/trino/issues/11326) this should fail
assertUpdate("ALTER TABLE " + tableName + " EXECUTE \"OPTIMIZE\" (\"FILE_SIZE_THRESHOLD\" => '33B')");
assertUpdate("DROP TABLE " + tableName);
}
@Test(dataProvider = "tableFormatVersion")
public void testOptimizeForPartitionedTable(int formatVersion)
throws IOException
{
// This test will have its own session to make sure partitioning is indeed forced and is not a result
// of session configuration
Session session = testSessionBuilder()
.setCatalog(getQueryRunner().getDefaultSession().getCatalog())
.setSchema(getQueryRunner().getDefaultSession().getSchema())
.setSystemProperty("use_preferred_write_partitioning", "true")
.setSystemProperty("preferred_write_partitioning_min_number_of_partitions", "100")
.build();
String tableName = "test_repartitiong_during_optimize_" + randomTableSuffix();
assertUpdate(session, "CREATE TABLE " + tableName + " (key varchar, value integer) WITH (format_version = " + formatVersion + ", partitioning = ARRAY['key'])");
// optimize an empty table
assertQuerySucceeds(session, "ALTER TABLE " + tableName + " EXECUTE OPTIMIZE");
assertUpdate(session, "INSERT INTO " + tableName + " VALUES ('one', 1)", 1);
assertUpdate(session, "INSERT INTO " + tableName + " VALUES ('one', 2)", 1);
assertUpdate(session, "INSERT INTO " + tableName + " VALUES ('one', 3)", 1);
assertUpdate(session, "INSERT INTO " + tableName + " VALUES ('one', 4)", 1);
assertUpdate(session, "INSERT INTO " + tableName + " VALUES ('one', 5)", 1);
assertUpdate(session, "INSERT INTO " + tableName + " VALUES ('one', 6)", 1);
assertUpdate(session, "INSERT INTO " + tableName + " VALUES ('one', 7)", 1);
assertUpdate(session, "INSERT INTO " + tableName + " VALUES ('two', 8)", 1);
assertUpdate(session, "INSERT INTO " + tableName + " VALUES ('two', 9)", 1);
assertUpdate(session, "INSERT INTO " + tableName + " VALUES ('three', 10)", 1);
List<String> initialFiles = getActiveFiles(tableName);
assertThat(initialFiles).hasSize(10);
computeActual(session, "ALTER TABLE " + tableName + " EXECUTE OPTIMIZE");
assertThat(query(session, "SELECT sum(value), listagg(key, ' ') WITHIN GROUP (ORDER BY key) FROM " + tableName))
.matches("VALUES (BIGINT '55', VARCHAR 'one one one one one one one three two two')");
List<String> updatedFiles = getActiveFiles(tableName);
// as we force repartitioning there should be only 3 partitions
assertThat(updatedFiles).hasSize(3);
assertThat(getAllDataFilesFromTableDirectory(tableName)).containsExactlyInAnyOrderElementsOf(concat(initialFiles, updatedFiles));
assertUpdate("DROP TABLE " + tableName);
}
@DataProvider
public Object[][] tableFormatVersion()
{
return IntStream.rangeClosed(IcebergConfig.FORMAT_VERSION_SUPPORT_MIN, IcebergConfig.FORMAT_VERSION_SUPPORT_MAX).boxed()
.collect(DataProviders.toDataProvider());
}
@Test
public void testOptimizeTableAfterDeleteWithFormatVersion2()
{
String tableName = "test_optimize_" + randomTableSuffix();
assertUpdate("CREATE TABLE " + tableName + " AS SELECT * FROM nation", 25);
List<String> initialFiles = getActiveFiles(tableName);
assertUpdate("DELETE FROM " + tableName + " WHERE nationkey = 7", 1);
// Verify that delete files exists
assertQuery(
"SELECT summary['total-delete-files'] FROM \"" + tableName + "$snapshots\" WHERE snapshot_id = " + getCurrentSnapshotId(tableName),
"VALUES '1'");
computeActual("ALTER TABLE " + tableName + " EXECUTE OPTIMIZE");
List<String> updatedFiles = getActiveFiles(tableName);
assertThat(updatedFiles)
.hasSize(1)
.isNotEqualTo(initialFiles);
assertThat(query("SELECT * FROM " + tableName))
.matches("SELECT * FROM nation WHERE nationkey != 7");
assertUpdate("DROP TABLE " + tableName);
}
private List<String> getActiveFiles(String tableName)
{
return computeActual(format("SELECT file_path FROM \"%s$files\"", tableName)).getOnlyColumn()
.map(String.class::cast)
.collect(toImmutableList());
}
private List<String> getAllDataFilesFromTableDirectory(String tableName)
throws IOException
{
String schema = getSession().getSchema().orElseThrow();
Path tableDataDir = getDistributedQueryRunner().getCoordinator().getBaseDataDir().resolve("iceberg_data").resolve(schema).resolve(tableName).resolve("data");
try (Stream<Path> walk = Files.walk(tableDataDir)) {
return walk
.filter(Files::isRegularFile)
.filter(path -> !path.getFileName().toString().matches("\\..*\\.crc"))
.map(Path::toString)
.collect(toImmutableList());
}
}
@Test
public void testOptimizeParameterValidation()
{
assertQueryFails(
"ALTER TABLE no_such_table_exists EXECUTE OPTIMIZE",
"\\Qline 1:1: Table 'iceberg.tpch.no_such_table_exists' does not exist");
assertQueryFails(
"ALTER TABLE nation EXECUTE OPTIMIZE (file_size_threshold => '33')",
"\\QUnable to set catalog 'iceberg' table procedure 'OPTIMIZE' property 'file_size_threshold' to ['33']: size is not a valid data size string: 33");
assertQueryFails(
"ALTER TABLE nation EXECUTE OPTIMIZE (file_size_threshold => '33s')",
"\\QUnable to set catalog 'iceberg' table procedure 'OPTIMIZE' property 'file_size_threshold' to ['33s']: Unknown unit: s");
}
@Test
public void testTargetMaxFileSize()
{
String tableName = "test_default_max_file_size" + randomTableSuffix();
@Language("SQL") String createTableSql = format("CREATE TABLE %s AS SELECT * FROM tpch.sf1.lineitem LIMIT 100000", tableName);
Session session = Session.builder(getSession())
.setSystemProperty("task_writer_count", "1")
.build();
assertUpdate(session, createTableSql, 100000);
List<String> initialFiles = getActiveFiles(tableName);
assertThat(initialFiles.size()).isLessThanOrEqualTo(3);
assertUpdate(format("DROP TABLE %s", tableName));
DataSize maxSize = DataSize.of(40, DataSize.Unit.KILOBYTE);
session = Session.builder(getSession())
.setSystemProperty("task_writer_count", "1")
.setCatalogSessionProperty("iceberg", "target_max_file_size", maxSize.toString())
.build();
assertUpdate(session, createTableSql, 100000);
assertThat(query(format("SELECT count(*) FROM %s", tableName))).matches("VALUES BIGINT '100000'");
List<String> updatedFiles = getActiveFiles(tableName);
assertThat(updatedFiles.size()).isGreaterThan(10);
computeActual(format("SELECT file_size_in_bytes FROM \"%s$files\"", tableName))
.getMaterializedRows()
// as target_max_file_size is set to quite low value it can happen that created files are bigger,
// so just to be safe we check if it is not much bigger
.forEach(row -> assertThat((Long) row.getField(0)).isBetween(1L, maxSize.toBytes() * 3));
}
@Test
public void testDroppingIcebergAndCreatingANewTableWithTheSameNameShouldBePossible()
{
assertUpdate("CREATE TABLE test_iceberg_recreate (a_int) AS VALUES (1)", 1);
assertThat(query("SELECT min(a_int) FROM test_iceberg_recreate")).matches("VALUES 1");
dropTable("test_iceberg_recreate");
assertUpdate("CREATE TABLE test_iceberg_recreate (a_varchar) AS VALUES ('Trino')", 1);
assertThat(query("SELECT min(a_varchar) FROM test_iceberg_recreate")).matches("VALUES CAST('Trino' AS varchar)");
dropTable("test_iceberg_recreate");
}
@Test
public void testPathHiddenColumn()
{
String tableName = "test_path_" + randomTableSuffix();
@Language("SQL") String createTable = "CREATE TABLE " + tableName + " " +
"WITH ( partitioning = ARRAY['zip'] ) AS " +
"SELECT * FROM (VALUES " +
"(0, 0), (3, 0), (6, 0), " +
"(1, 1), (4, 1), (7, 1), " +
"(2, 2), (5, 2) " +
" ) t(userid, zip)";
assertUpdate(createTable, 8);
MaterializedResult expectedColumns = resultBuilder(getSession(), VARCHAR, VARCHAR, VARCHAR, VARCHAR)
.row("userid", "integer", "", "")
.row("zip", "integer", "", "")
.build();
MaterializedResult actualColumns = computeActual(format("DESCRIBE %s", tableName));
// Describe output should not have the $path hidden column
assertEquals(actualColumns, expectedColumns);
assertThat(query("SELECT file_path FROM \"" + tableName + "$files\""))
.matches("SELECT DISTINCT \"$path\" as file_path FROM " + tableName);
String somePath = (String) computeScalar("SELECT \"$path\" FROM " + tableName + " WHERE userid = 2");
assertThat(query("SELECT userid FROM " + tableName + " WHERE \"$path\" = '" + somePath + "'"))
.matches("VALUES 2, 5");
assertThat(query("SELECT userid FROM " + tableName + " WHERE \"$path\" = '" + somePath + "' AND userid > 0"))
.matches("VALUES 2, 5");
assertUpdate("DROP TABLE " + tableName);
}
@Test
public void testExpireSnapshots()
throws Exception
{
String tableName = "test_expiring_snapshots_" + randomTableSuffix();
Session sessionWithShortRetentionUnlocked = prepareCleanUpSession();
assertUpdate("CREATE TABLE " + tableName + " (key varchar, value integer)");
assertUpdate("INSERT INTO " + tableName + " VALUES ('one', 1)", 1);
assertUpdate("INSERT INTO " + tableName + " VALUES ('two', 2)", 1);
assertThat(query("SELECT sum(value), listagg(key, ' ') WITHIN GROUP (ORDER BY key) FROM " + tableName))
.matches("VALUES (BIGINT '3', VARCHAR 'one two')");
List<Long> initialSnapshots = getSnapshotIds(tableName);
List<String> initialFiles = getAllMetadataFilesFromTableDirectoryForTable(tableName);
assertQuerySucceeds(sessionWithShortRetentionUnlocked, "ALTER TABLE " + tableName + " EXECUTE EXPIRE_SNAPSHOTS (retention_threshold => '0s')");
assertThat(query("SELECT sum(value), listagg(key, ' ') WITHIN GROUP (ORDER BY key) FROM " + tableName))
.matches("VALUES (BIGINT '3', VARCHAR 'one two')");
List<String> updatedFiles = getAllMetadataFilesFromTableDirectoryForTable(tableName);
List<Long> updatedSnapshots = getSnapshotIds(tableName);
assertThat(updatedFiles.size()).isEqualTo(initialFiles.size() - 1);
assertThat(updatedSnapshots.size()).isLessThan(initialSnapshots.size());
assertThat(updatedSnapshots.size()).isEqualTo(1);
assertThat(initialSnapshots).containsAll(updatedSnapshots);
}
@Test
public void testExpireSnapshotsPartitionedTable()
throws Exception
{
String tableName = "test_expiring_snapshots_partitioned_table" + randomTableSuffix();
Session sessionWithShortRetentionUnlocked = prepareCleanUpSession();
assertUpdate("CREATE TABLE " + tableName + " (col1 BIGINT, col2 BIGINT) WITH (partitioning = ARRAY['col1'])");
assertUpdate("INSERT INTO " + tableName + " VALUES(1, 100), (1, 101), (1, 102), (2, 200), (2, 201), (3, 300)", 6);
assertUpdate("DELETE FROM " + tableName + " WHERE col1 = 1", 3);
assertUpdate("INSERT INTO " + tableName + " VALUES(4, 400)", 1);
assertQuery("SELECT sum(col2) FROM " + tableName, "SELECT 1101");
List<String> initialDataFiles = getAllDataFilesFromTableDirectory(tableName);
List<Long> initialSnapshots = getSnapshotIds(tableName);
assertQuerySucceeds(sessionWithShortRetentionUnlocked, "ALTER TABLE " + tableName + " EXECUTE EXPIRE_SNAPSHOTS (retention_threshold => '0s')");
List<String> updatedDataFiles = getAllDataFilesFromTableDirectory(tableName);
List<Long> updatedSnapshots = getSnapshotIds(tableName);
assertQuery("SELECT sum(col2) FROM " + tableName, "SELECT 1101");
assertThat(updatedDataFiles.size()).isLessThan(initialDataFiles.size());
assertThat(updatedSnapshots.size()).isLessThan(initialSnapshots.size());
}
@Test
public void testExplainExpireSnapshotOutput()
{
String tableName = "test_expiring_snapshots_output" + randomTableSuffix();
assertUpdate("CREATE TABLE " + tableName + " (key varchar, value integer) WITH (partitioning = ARRAY['key'])");
assertUpdate("INSERT INTO " + tableName + " VALUES ('one', 1)", 1);
assertUpdate("INSERT INTO " + tableName + " VALUES ('two', 2)", 1);
assertExplain("EXPLAIN ALTER TABLE " + tableName + " EXECUTE EXPIRE_SNAPSHOTS (retention_threshold => '0s')",
"SimpleTableExecute\\[iceberg:schemaTableName:tpch.test_expiring_snapshots.*\\{retentionThreshold=0\\.00s}.*");
}
@Test
public void testExpireSnapshotsParameterValidation()
{
assertQueryFails(
"ALTER TABLE no_such_table_exists EXECUTE EXPIRE_SNAPSHOTS",
"\\Qline 1:1: Table 'iceberg.tpch.no_such_table_exists' does not exist");
assertQueryFails(
"ALTER TABLE nation EXECUTE EXPIRE_SNAPSHOTS (retention_threshold => '33')",
"\\QUnable to set catalog 'iceberg' table procedure 'EXPIRE_SNAPSHOTS' property 'retention_threshold' to ['33']: duration is not a valid data duration string: 33");
assertQueryFails(
"ALTER TABLE nation EXECUTE EXPIRE_SNAPSHOTS (retention_threshold => '33mb')",
"\\QUnable to set catalog 'iceberg' table procedure 'EXPIRE_SNAPSHOTS' property 'retention_threshold' to ['33mb']: Unknown time unit: mb");
assertQueryFails(
"ALTER TABLE nation EXECUTE EXPIRE_SNAPSHOTS (retention_threshold => '33s')",
"\\QRetention specified (33.00s) is shorter than the minimum retention configured in the system (7.00d). Minimum retention can be changed with iceberg.expire_snapshots.min-retention configuration property or iceberg.expire_snapshots_min_retention session property");
}
@Test
public void testRemoveOrphanFiles()
throws Exception
{
String tableName = "test_deleting_orphan_files_unnecessary_files" + randomTableSuffix();
Session sessionWithShortRetentionUnlocked = prepareCleanUpSession();
assertUpdate("CREATE TABLE " + tableName + " (key varchar, value integer)");
assertUpdate("INSERT INTO " + tableName + " VALUES ('one', 1)", 1);
Path orphanFile = Files.createFile(Path.of(getIcebergTableDataPath(tableName).toString(), "invalidData." + format));
List<String> initialDataFiles = getAllDataFilesFromTableDirectory(tableName);
assertQuerySucceeds(sessionWithShortRetentionUnlocked, "ALTER TABLE " + tableName + " EXECUTE REMOVE_ORPHAN_FILES (retention_threshold => '0s')");
List<String> updatedDataFiles = getAllDataFilesFromTableDirectory(tableName);
assertThat(updatedDataFiles.size()).isLessThan(initialDataFiles.size());
assertThat(updatedDataFiles).doesNotContain(orphanFile.toString());
}
@Test
public void testIfRemoveOrphanFilesCleansUnnecessaryDataFilesInPartitionedTable()
throws Exception
{
String tableName = "test_deleting_orphan_files_unnecessary_files" + randomTableSuffix();
Session sessionWithShortRetentionUnlocked = prepareCleanUpSession();
assertUpdate("CREATE TABLE " + tableName + " (key varchar, value integer) WITH (partitioning = ARRAY['key'])");
assertUpdate("INSERT INTO " + tableName + " VALUES ('one', 1)", 1);
assertUpdate("INSERT INTO " + tableName + " VALUES ('two', 2)", 1);
Path orphanFile = Files.createFile(Path.of(getIcebergTableDataPath(tableName) + "/key=one/", "invalidData." + format));
List<String> initialDataFiles = getAllDataFilesFromTableDirectory(tableName);
assertQuerySucceeds(sessionWithShortRetentionUnlocked, "ALTER TABLE " + tableName + " EXECUTE REMOVE_ORPHAN_FILES (retention_threshold => '0s')");
List<String> updatedDataFiles = getAllDataFilesFromTableDirectory(tableName);
assertThat(updatedDataFiles.size()).isLessThan(initialDataFiles.size());
assertThat(updatedDataFiles).doesNotContain(orphanFile.toString());
}
@Test
public void testIfRemoveOrphanFilesCleansUnnecessaryMetadataFilesInPartitionedTable()
throws Exception
{
String tableName = "test_deleting_orphan_files_unnecessary_files" + randomTableSuffix();
Session sessionWithShortRetentionUnlocked = prepareCleanUpSession();
assertUpdate("CREATE TABLE " + tableName + " (key varchar, value integer) WITH (partitioning = ARRAY['key'])");
assertUpdate("INSERT INTO " + tableName + " VALUES ('one', 1)", 1);
assertUpdate("INSERT INTO " + tableName + " VALUES ('two', 2)", 1);
Path orphanMetadataFile = Files.createFile(Path.of(getIcebergTableMetadataPath(tableName).toString(), "invalidData." + format));
List<String> initialMetadataFiles = getAllMetadataFilesFromTableDirectoryForTable(tableName);
assertQuerySucceeds(sessionWithShortRetentionUnlocked, "ALTER TABLE " + tableName + " EXECUTE REMOVE_ORPHAN_FILES (retention_threshold => '0s')");
List<String> updatedMetadataFiles = getAllMetadataFilesFromTableDirectoryForTable(tableName);
assertThat(updatedMetadataFiles.size()).isLessThan(initialMetadataFiles.size());
assertThat(updatedMetadataFiles).doesNotContain(orphanMetadataFile.toString());
}
@Test
public void testCleaningUpWithTableWithSpecifiedLocationWithSlashAtTheEnd()
throws IOException
{
testCleaningUpWithTableWithSpecifiedLocation("/");
}
@Test
public void testCleaningUpWithTableWithSpecifiedLocationWithoutSlashAtTheEnd()
throws IOException
{
testCleaningUpWithTableWithSpecifiedLocation("");
}
private void testCleaningUpWithTableWithSpecifiedLocation(String suffix)
throws IOException
{
File tempDir = getDistributedQueryRunner().getCoordinator().getBaseDataDir().toFile();
String tempDirPath = tempDir.toURI().toASCIIString() + randomTableSuffix() + suffix;
String tableName = "test_table_cleaning_up_with_location" + randomTableSuffix();
assertUpdate(format("CREATE TABLE %s (key varchar, value integer) WITH(location = '%s')", tableName, tempDirPath));
assertUpdate("INSERT INTO " + tableName + " VALUES ('one', 1)", 1);
assertUpdate("INSERT INTO " + tableName + " VALUES ('two', 2)", 1);
List<String> initialFiles = getAllMetadataFilesFromTableDirectory(tempDirPath);
List<Long> initialSnapshots = getSnapshotIds(tableName);
Session sessionWithShortRetentionUnlocked = prepareCleanUpSession();
assertQuerySucceeds(sessionWithShortRetentionUnlocked, "ALTER TABLE " + tableName + " EXECUTE EXPIRE_SNAPSHOTS (retention_threshold => '0s')");
assertQuerySucceeds(sessionWithShortRetentionUnlocked, "ALTER TABLE " + tableName + " EXECUTE REMOVE_ORPHAN_FILES (retention_threshold => '0s')");
List<String> updatedFiles = getAllMetadataFilesFromTableDirectory(tempDirPath);
List<Long> updatedSnapshots = getSnapshotIds(tableName);
assertThat(updatedFiles.size()).isEqualTo(initialFiles.size() - 1);
assertThat(updatedSnapshots.size()).isLessThan(initialSnapshots.size());
assertThat(updatedSnapshots.size()).isEqualTo(1);
assertThat(initialSnapshots).containsAll(updatedSnapshots);
}
@Test
public void testExplainRemoveOrphanFilesOutput()
{
String tableName = "test_remove_orphan_files_output" + randomTableSuffix();
assertUpdate("CREATE TABLE " + tableName + " (key varchar, value integer) WITH (partitioning = ARRAY['key'])");
assertUpdate("INSERT INTO " + tableName + " VALUES ('one', 1)", 1);
assertUpdate("INSERT INTO " + tableName + " VALUES ('two', 2)", 1);
assertExplain("EXPLAIN ALTER TABLE " + tableName + " EXECUTE REMOVE_ORPHAN_FILES (retention_threshold => '0s')",
"SimpleTableExecute\\[iceberg:schemaTableName:tpch.test_remove_orphan_files.*\\{retentionThreshold=0\\.00s}.*");
}
@Test
public void testRemoveOrphanFilesParameterValidation()
{
assertQueryFails(
"ALTER TABLE no_such_table_exists EXECUTE REMOVE_ORPHAN_FILES",
"\\Qline 1:1: Table 'iceberg.tpch.no_such_table_exists' does not exist");
assertQueryFails(
"ALTER TABLE nation EXECUTE REMOVE_ORPHAN_FILES (retention_threshold => '33')",
"\\QUnable to set catalog 'iceberg' table procedure 'REMOVE_ORPHAN_FILES' property 'retention_threshold' to ['33']: duration is not a valid data duration string: 33");
assertQueryFails(
"ALTER TABLE nation EXECUTE REMOVE_ORPHAN_FILES (retention_threshold => '33mb')",
"\\QUnable to set catalog 'iceberg' table procedure 'REMOVE_ORPHAN_FILES' property 'retention_threshold' to ['33mb']: Unknown time unit: mb");
assertQueryFails(
"ALTER TABLE nation EXECUTE REMOVE_ORPHAN_FILES (retention_threshold => '33s')",
"\\QRetention specified (33.00s) is shorter than the minimum retention configured in the system (7.00d). Minimum retention can be changed with iceberg.remove_orphan_files.min-retention configuration property or iceberg.remove_orphan_files_min_retention session property");
}
@Test
public void testIfDeletesReturnsNumberOfRemovedRows()
{
String tableName = "test_delete_returns_number_of_rows_" + randomTableSuffix();
assertUpdate("CREATE TABLE " + tableName + " (key varchar, value integer) WITH (partitioning = ARRAY['key'])");
assertUpdate("INSERT INTO " + tableName + " VALUES ('one', 1)", 1);
assertUpdate("INSERT INTO " + tableName + " VALUES ('one', 2)", 1);
assertUpdate("INSERT INTO " + tableName + " VALUES ('one', 3)", 1);
assertUpdate("INSERT INTO " + tableName + " VALUES ('two', 1)", 1);
assertUpdate("INSERT INTO " + tableName + " VALUES ('two', 2)", 1);
assertUpdate("DELETE FROM " + tableName + " WHERE key = 'one'", 3);
assertUpdate("DELETE FROM " + tableName + " WHERE key = 'one'"); // TODO change this when iceberg will guarantee to always return this (https://github.com/apache/iceberg/issues/4647)
assertUpdate("DELETE FROM " + tableName + " WHERE key = 'three'");
assertUpdate("DELETE FROM " + tableName + " WHERE key = 'two'", 2);
}
@Test
public void testUpdatingFileFormat()
{
String tableName = "test_updating_file_format_" + randomTableSuffix();
assertUpdate("CREATE TABLE " + tableName + " WITH (format = 'orc') AS SELECT * FROM nation WHERE nationkey < 10", "SELECT count(*) FROM nation WHERE nationkey < 10");
assertQuery("SELECT value FROM \"" + tableName + "$properties\" WHERE key = 'write.format.default'", "VALUES 'ORC'");
assertUpdate("ALTER TABLE " + tableName + " SET PROPERTIES format = 'parquet'");
assertQuery("SELECT value FROM \"" + tableName + "$properties\" WHERE key = 'write.format.default'", "VALUES 'PARQUET'");
assertUpdate("INSERT INTO " + tableName + " SELECT * FROM nation WHERE nationkey >= 10", "SELECT count(*) FROM nation WHERE nationkey >= 10");
assertQuery("SELECT * FROM " + tableName, "SELECT * FROM nation");
assertQuery("SELECT count(*) FROM \"" + tableName + "$files\" WHERE file_path LIKE '%.orc'", "VALUES 1");
assertQuery("SELECT count(*) FROM \"" + tableName + "$files\" WHERE file_path LIKE '%.parquet'", "VALUES 1");
assertUpdate("DROP TABLE " + tableName);
}
@Test
public void testUpdatingInvalidTableProperty()
{
String tableName = "test_updating_invalid_table_property_" + randomTableSuffix();
assertUpdate("CREATE TABLE " + tableName + " (a INT, b INT)");
assertThatThrownBy(() -> query("ALTER TABLE " + tableName + " SET PROPERTIES not_a_valid_table_property = 'a value'"))
.hasMessage("Catalog 'iceberg' table property 'not_a_valid_table_property' does not exist");
assertUpdate("DROP TABLE " + tableName);
}
@Test
public void testEmptyCreateTableAsSelect()
{
String tableName = "test_empty_ctas_" + randomTableSuffix();
assertUpdate("CREATE TABLE " + tableName + " AS SELECT * FROM nation WHERE false", 0);
List<Long> initialTableSnapshots = getSnapshotIds(tableName);
assertThat(initialTableSnapshots.size())
.withFailMessage("CTAS operations must create Iceberg snapshot independently whether the selection is empty or not")
.isEqualTo(1);
assertQueryReturnsEmptyResult("SELECT * FROM " + tableName);
assertUpdate("DROP TABLE " + tableName);
}
@Test
public void testEmptyInsert()
{
String tableName = "test_empty_insert_" + randomTableSuffix();
assertUpdate("CREATE TABLE " + tableName + " AS SELECT * FROM nation", "SELECT count(*) FROM nation");
List<Long> initialTableSnapshots = getSnapshotIds(tableName);
assertUpdate("INSERT INTO " + tableName + " SELECT * FROM nation WHERE false", 0);
List<Long> updatedTableSnapshots = getSnapshotIds(tableName);
assertThat(initialTableSnapshots)
.withFailMessage("INSERT operations that are not changing the state of the table must not cause the creation of a new Iceberg snapshot")
.hasSize(1)
.isEqualTo(updatedTableSnapshots);
assertUpdate("DROP TABLE " + tableName);
}
@Test
public void testEmptyUpdate()
{
String tableName = "test_empty_update_" + randomTableSuffix();
assertUpdate("CREATE TABLE " + tableName + " AS SELECT * FROM nation", "SELECT count(*) FROM nation");
List<Long> initialTableSnapshots = getSnapshotIds(tableName);
assertUpdate("UPDATE " + tableName + " SET comment = 'new comment' WHERE nationkey IS NULL", 0);
List<Long> updatedTableSnapshots = getSnapshotIds(tableName);
assertThat(initialTableSnapshots)
.withFailMessage("UPDATE operations that are not changing the state of the table must not cause the creation of a new Iceberg snapshot")
.hasSize(1)
.isEqualTo(updatedTableSnapshots);
assertUpdate("DROP TABLE " + tableName);
}
@Test
public void testEmptyDelete()
{
String tableName = "test_empty_delete_" + randomTableSuffix();
assertUpdate("CREATE TABLE " + tableName + " WITH (format = '" + format.name() + "') AS SELECT * FROM nation", "SELECT count(*) FROM nation");
List<Long> initialTableSnapshots = getSnapshotIds(tableName);
assertUpdate("DELETE FROM " + tableName + " WHERE nationkey IS NULL", 0);
List<Long> updatedTableSnapshots = getSnapshotIds(tableName);
assertThat(initialTableSnapshots)
.withFailMessage("DELETE operations that are not changing the state of the table must not cause the creation of a new Iceberg snapshot")
.hasSize(1)
.isEqualTo(updatedTableSnapshots);
assertUpdate("DROP TABLE " + tableName);
}
@Test
public void testModifyingOldSnapshotIsNotPossible()
{
String tableName = "test_modifying_old_snapshot_" + randomTableSuffix();
assertUpdate(format("CREATE TABLE %s (col int)", tableName));
assertUpdate(format("INSERT INTO %s VALUES 1,2,3", tableName), 3);
long oldSnapshotId = getCurrentSnapshotId(tableName);
assertUpdate(format("INSERT INTO %s VALUES 4,5,6", tableName), 3);
assertQuery(format("SELECT * FROM \"%s@%d\"", tableName, oldSnapshotId), "VALUES 1,2,3");
assertThatThrownBy(() -> query(format("INSERT INTO \"%s@%d\" VALUES 7,8,9", tableName, oldSnapshotId)))
.hasMessage("Modifying old snapshot is not supported in Iceberg.");
assertThatThrownBy(() -> query(format("DELETE FROM \"%s@%d\" WHERE col = 5", tableName, oldSnapshotId)))
.hasMessage("Modifying old snapshot is not supported in Iceberg.");
assertThatThrownBy(() -> query(format("UPDATE \"%s@%d\" SET col = 50 WHERE col = 5", tableName, oldSnapshotId)))
.hasMessage("Modifying old snapshot is not supported in Iceberg.");
assertThatThrownBy(() -> query(format("ALTER TABLE \"%s@%d\" EXECUTE OPTIMIZE", tableName, oldSnapshotId)))
.hasMessage("Modifying old snapshot is not supported in Iceberg.");
assertUpdate(format("INSERT INTO \"%s@%d\" VALUES 7,8,9", tableName, getCurrentSnapshotId(tableName)), 3);
assertUpdate(format("DELETE FROM \"%s@%d\" WHERE col = 9", tableName, getCurrentSnapshotId(tableName)), 1);
assertUpdate(format("UPDATE \"%s@%d\" set col = 50 WHERE col = 5", tableName, getCurrentSnapshotId(tableName)), 1);
assertQuerySucceeds(format("ALTER TABLE \"%s@%d\" EXECUTE OPTIMIZE", tableName, getCurrentSnapshotId(tableName)));
assertQuery(format("SELECT * FROM %s", tableName), "VALUES 1,2,3,4,50,6,7,8");
assertUpdate("DROP TABLE " + tableName);
}
private Session prepareCleanUpSession()
{
return Session.builder(getSession())
.setCatalogSessionProperty("iceberg", "expire_snapshots_min_retention", "0s")
.setCatalogSessionProperty("iceberg", "remove_orphan_files_min_retention", "0s")
.build();
}
private List<String> getAllMetadataFilesFromTableDirectoryForTable(String tableName)
throws IOException
{
String schema = getSession().getSchema().orElseThrow();
Path tableDataDir = getDistributedQueryRunner().getCoordinator().getBaseDataDir().resolve("iceberg_data").resolve(schema).resolve(tableName).resolve("metadata");
return listAllTableFilesInDirectory(tableDataDir);
}
private List<String> getAllMetadataFilesFromTableDirectory(String tableDataDir)
throws IOException
{
return listAllTableFilesInDirectory(Path.of(URI.create(tableDataDir).getPath()));
}
private List<String> listAllTableFilesInDirectory(Path tableDataPath)
throws IOException
{
try (Stream<Path> walk = Files.walk(tableDataPath)) {
return walk
.filter(Files::isRegularFile)
.filter(path -> !path.getFileName().toString().matches("\\..*\\.crc"))
.map(Path::toString)
.collect(toImmutableList());
}
}
private List<Long> getSnapshotIds(String tableName)
{
return getQueryRunner().execute(format("SELECT snapshot_id FROM \"%s$snapshots\"", tableName))
.getOnlyColumn()
.map(Long.class::cast)
.collect(toUnmodifiableList());
}
private long getCurrentSnapshotId(String tableName)
{
return (long) computeScalar("SELECT snapshot_id FROM \"" + tableName + "$snapshots\" ORDER BY committed_at DESC LIMIT 1");
}
private Path getIcebergTableDataPath(String tableName)
{
return getIcebergTablePath(tableName, "data");
}
private Path getIcebergTableMetadataPath(String tableName)
{
return getIcebergTablePath(tableName, "metadata");
}
private Path getIcebergTablePath(String tableName, String suffix)
{
String schema = getSession().getSchema().orElseThrow();
return getDistributedQueryRunner().getCoordinator().getBaseDataDir().resolve("iceberg_data").resolve(schema).resolve(tableName).resolve(suffix);
}
}
| plugin/trino-iceberg/src/test/java/io/trino/plugin/iceberg/BaseIcebergConnectorTest.java | /*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.trino.plugin.iceberg;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.ImmutableSet;
import io.airlift.units.DataSize;
import io.trino.Session;
import io.trino.metadata.Metadata;
import io.trino.metadata.QualifiedObjectName;
import io.trino.metadata.TableHandle;
import io.trino.operator.OperatorStats;
import io.trino.plugin.hive.HdfsEnvironment;
import io.trino.spi.QueryId;
import io.trino.spi.connector.ColumnHandle;
import io.trino.spi.connector.Constraint;
import io.trino.spi.connector.ConstraintApplicationResult;
import io.trino.spi.connector.TableNotFoundException;
import io.trino.spi.predicate.Domain;
import io.trino.spi.predicate.TupleDomain;
import io.trino.testing.BaseConnectorTest;
import io.trino.testing.DataProviders;
import io.trino.testing.MaterializedResult;
import io.trino.testing.MaterializedRow;
import io.trino.testing.QueryRunner;
import io.trino.testing.ResultWithQueryId;
import io.trino.testing.TestingConnectorBehavior;
import io.trino.testing.sql.TestTable;
import io.trino.tpch.TpchTable;
import org.apache.avro.Schema;
import org.apache.avro.file.DataFileReader;
import org.apache.avro.file.DataFileWriter;
import org.apache.avro.generic.GenericData;
import org.apache.avro.generic.GenericDatumReader;
import org.apache.avro.generic.GenericDatumWriter;
import org.apache.hadoop.fs.FileSystem;
import org.intellij.lang.annotations.Language;
import org.testng.SkipException;
import org.testng.annotations.DataProvider;
import org.testng.annotations.Test;
import java.io.File;
import java.io.IOException;
import java.io.OutputStream;
import java.net.URI;
import java.nio.file.Files;
import java.nio.file.Path;
import java.nio.file.Paths;
import java.util.List;
import java.util.Locale;
import java.util.Map;
import java.util.NoSuchElementException;
import java.util.Optional;
import java.util.Set;
import java.util.function.Consumer;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import java.util.stream.Collectors;
import java.util.stream.IntStream;
import java.util.stream.LongStream;
import java.util.stream.Stream;
import static com.google.common.base.Preconditions.checkArgument;
import static com.google.common.base.Verify.verify;
import static com.google.common.collect.ImmutableList.toImmutableList;
import static com.google.common.collect.ImmutableMap.toImmutableMap;
import static com.google.common.collect.Iterables.concat;
import static com.google.common.collect.Iterables.getOnlyElement;
import static com.google.common.collect.MoreCollectors.onlyElement;
import static io.trino.SystemSessionProperties.JOIN_DISTRIBUTION_TYPE;
import static io.trino.SystemSessionProperties.PREFERRED_WRITE_PARTITIONING_MIN_NUMBER_OF_PARTITIONS;
import static io.trino.SystemSessionProperties.SCALE_WRITERS;
import static io.trino.SystemSessionProperties.TASK_WRITER_COUNT;
import static io.trino.plugin.hive.HdfsEnvironment.HdfsContext;
import static io.trino.plugin.hive.HiveTestUtils.HDFS_ENVIRONMENT;
import static io.trino.plugin.iceberg.IcebergFileFormat.ORC;
import static io.trino.plugin.iceberg.IcebergFileFormat.PARQUET;
import static io.trino.plugin.iceberg.IcebergQueryRunner.ICEBERG_CATALOG;
import static io.trino.plugin.iceberg.IcebergSplitManager.ICEBERG_DOMAIN_COMPACTION_THRESHOLD;
import static io.trino.spi.predicate.Domain.multipleValues;
import static io.trino.spi.predicate.Domain.singleValue;
import static io.trino.spi.type.BigintType.BIGINT;
import static io.trino.spi.type.DoubleType.DOUBLE;
import static io.trino.spi.type.VarcharType.VARCHAR;
import static io.trino.sql.planner.OptimizerConfig.JoinDistributionType.BROADCAST;
import static io.trino.testing.MaterializedResult.resultBuilder;
import static io.trino.testing.QueryAssertions.assertEqualsIgnoreOrder;
import static io.trino.testing.TestingSession.testSessionBuilder;
import static io.trino.testing.assertions.Assert.assertEquals;
import static io.trino.testing.assertions.Assert.assertEventually;
import static io.trino.testing.sql.TestTable.randomTableSuffix;
import static io.trino.tpch.TpchTable.LINE_ITEM;
import static io.trino.transaction.TransactionBuilder.transaction;
import static java.lang.String.format;
import static java.lang.String.join;
import static java.util.Collections.nCopies;
import static java.util.Objects.requireNonNull;
import static java.util.stream.Collectors.joining;
import static java.util.stream.Collectors.toUnmodifiableList;
import static java.util.stream.IntStream.range;
import static org.assertj.core.api.Assertions.assertThat;
import static org.assertj.core.api.Assertions.assertThatThrownBy;
import static org.testng.Assert.assertFalse;
import static org.testng.Assert.assertNotEquals;
import static org.testng.Assert.assertTrue;
public abstract class BaseIcebergConnectorTest
extends BaseConnectorTest
{
private static final Pattern WITH_CLAUSE_EXTRACTOR = Pattern.compile(".*(WITH\\s*\\([^)]*\\))\\s*$", Pattern.DOTALL);
private final IcebergFileFormat format;
protected BaseIcebergConnectorTest(IcebergFileFormat format)
{
this.format = requireNonNull(format, "format is null");
}
@Override
protected QueryRunner createQueryRunner()
throws Exception
{
return IcebergQueryRunner.builder()
.setIcebergProperties(Map.of("iceberg.file-format", format.name()))
.setInitialTables(ImmutableList.<TpchTable<?>>builder()
.addAll(REQUIRED_TPCH_TABLES)
.add(LINE_ITEM)
.build())
.build();
}
@Override
protected boolean hasBehavior(TestingConnectorBehavior connectorBehavior)
{
switch (connectorBehavior) {
case SUPPORTS_TOPN_PUSHDOWN:
return false;
case SUPPORTS_CREATE_VIEW:
return true;
case SUPPORTS_CREATE_MATERIALIZED_VIEW:
case SUPPORTS_RENAME_MATERIALIZED_VIEW:
return true;
case SUPPORTS_RENAME_MATERIALIZED_VIEW_ACROSS_SCHEMAS:
return false;
case SUPPORTS_DELETE:
case SUPPORTS_UPDATE:
return true;
default:
return super.hasBehavior(connectorBehavior);
}
}
@Override
protected void verifyVersionedQueryFailurePermissible(Exception e)
{
assertThat(e)
.hasMessageMatching("Version pointer type is not supported: .*|" +
"Unsupported type for temporal table version: .*|" +
"Unsupported type for table version: .*|" +
"No version history table tpch.nation at or before .*|" +
"Iceberg snapshot ID does not exists: .*");
}
@Override
protected void verifyConcurrentUpdateFailurePermissible(Exception e)
{
assertThat(e).hasMessageContaining("Failed to commit Iceberg update to table");
}
@Override
protected void verifyConcurrentAddColumnFailurePermissible(Exception e)
{
assertThat(e)
.hasMessageContaining("Cannot update Iceberg table: supplied previous location does not match current location");
}
@Test
public void testDeleteOnV1Table()
{
try (TestTable table = new TestTable(getQueryRunner()::execute, "test_delete_", "WITH (format_version = 1) AS SELECT * FROM orders")) {
assertQueryFails("DELETE FROM " + table.getName() + " WHERE custkey <= 100", "Iceberg table updates require at least format version 2");
}
}
@Override
public void testCharVarcharComparison()
{
assertThatThrownBy(super::testCharVarcharComparison)
.hasMessage("Type not supported for Iceberg: char(3)");
}
@Test
@Override
public void testShowCreateSchema()
{
assertThat(computeActual("SHOW CREATE SCHEMA tpch").getOnlyValue().toString())
.matches("CREATE SCHEMA iceberg.tpch\n" +
"AUTHORIZATION USER user\n" +
"WITH \\(\n" +
"\\s+location = '.*/iceberg_data/tpch'\n" +
"\\)");
}
@Override
@Test
public void testDescribeTable()
{
MaterializedResult expectedColumns = resultBuilder(getSession(), VARCHAR, VARCHAR, VARCHAR, VARCHAR)
.row("orderkey", "bigint", "", "")
.row("custkey", "bigint", "", "")
.row("orderstatus", "varchar", "", "")
.row("totalprice", "double", "", "")
.row("orderdate", "date", "", "")
.row("orderpriority", "varchar", "", "")
.row("clerk", "varchar", "", "")
.row("shippriority", "integer", "", "")
.row("comment", "varchar", "", "")
.build();
MaterializedResult actualColumns = computeActual("DESCRIBE orders");
assertEquals(actualColumns, expectedColumns);
}
@Override
@Test
public void testShowCreateTable()
{
File tempDir = getDistributedQueryRunner().getCoordinator().getBaseDataDir().toFile();
assertThat(computeActual("SHOW CREATE TABLE orders").getOnlyValue())
.isEqualTo("CREATE TABLE iceberg.tpch.orders (\n" +
" orderkey bigint,\n" +
" custkey bigint,\n" +
" orderstatus varchar,\n" +
" totalprice double,\n" +
" orderdate date,\n" +
" orderpriority varchar,\n" +
" clerk varchar,\n" +
" shippriority integer,\n" +
" comment varchar\n" +
")\n" +
"WITH (\n" +
" format = '" + format.name() + "',\n" +
" format_version = 2,\n" +
" location = '" + tempDir + "/iceberg_data/tpch/orders'\n" +
")");
}
@Override
protected void checkInformationSchemaViewsForMaterializedView(String schemaName, String viewName)
{
// TODO should probably return materialized view, as it's also a view -- to be double checked
assertThatThrownBy(() -> super.checkInformationSchemaViewsForMaterializedView(schemaName, viewName))
.hasMessageFindingMatch("(?s)Expecting.*to contain:.*\\Q[(" + viewName + ")]");
}
@Test
public void testDecimal()
{
testDecimalWithPrecisionAndScale(1, 0);
testDecimalWithPrecisionAndScale(8, 6);
testDecimalWithPrecisionAndScale(9, 8);
testDecimalWithPrecisionAndScale(10, 8);
testDecimalWithPrecisionAndScale(18, 1);
testDecimalWithPrecisionAndScale(18, 8);
testDecimalWithPrecisionAndScale(18, 17);
testDecimalWithPrecisionAndScale(17, 16);
testDecimalWithPrecisionAndScale(18, 17);
testDecimalWithPrecisionAndScale(24, 10);
testDecimalWithPrecisionAndScale(30, 10);
testDecimalWithPrecisionAndScale(37, 26);
testDecimalWithPrecisionAndScale(38, 37);
testDecimalWithPrecisionAndScale(38, 17);
testDecimalWithPrecisionAndScale(38, 37);
}
private void testDecimalWithPrecisionAndScale(int precision, int scale)
{
checkArgument(precision >= 1 && precision <= 38, "Decimal precision (%s) must be between 1 and 38 inclusive", precision);
checkArgument(scale < precision && scale >= 0, "Decimal scale (%s) must be less than the precision (%s) and non-negative", scale, precision);
String decimalType = format("DECIMAL(%d,%d)", precision, scale);
String beforeTheDecimalPoint = "12345678901234567890123456789012345678".substring(0, precision - scale);
String afterTheDecimalPoint = "09876543210987654321098765432109876543".substring(0, scale);
String decimalValue = format("%s.%s", beforeTheDecimalPoint, afterTheDecimalPoint);
assertUpdate(format("CREATE TABLE test_iceberg_decimal (x %s)", decimalType));
assertUpdate(format("INSERT INTO test_iceberg_decimal (x) VALUES (CAST('%s' AS %s))", decimalValue, decimalType), 1);
assertQuery("SELECT * FROM test_iceberg_decimal", format("SELECT CAST('%s' AS %s)", decimalValue, decimalType));
dropTable("test_iceberg_decimal");
}
@Test
public void testTime()
{
testSelectOrPartitionedByTime(false);
}
@Test
public void testPartitionedByTime()
{
testSelectOrPartitionedByTime(true);
}
private void testSelectOrPartitionedByTime(boolean partitioned)
{
String tableName = format("test_%s_by_time", partitioned ? "partitioned" : "selected");
String partitioning = partitioned ? "WITH(partitioning = ARRAY['x'])" : "";
assertUpdate(format("CREATE TABLE %s (x TIME(6), y BIGINT) %s", tableName, partitioning));
assertUpdate(format("INSERT INTO %s VALUES (TIME '10:12:34', 12345)", tableName), 1);
assertQuery(format("SELECT COUNT(*) FROM %s", tableName), "SELECT 1");
assertQuery(format("SELECT x FROM %s", tableName), "SELECT CAST('10:12:34' AS TIME)");
assertUpdate(format("INSERT INTO %s VALUES (TIME '9:00:00', 67890)", tableName), 1);
assertQuery(format("SELECT COUNT(*) FROM %s", tableName), "SELECT 2");
assertQuery(format("SELECT x FROM %s WHERE x = TIME '10:12:34'", tableName), "SELECT CAST('10:12:34' AS TIME)");
assertQuery(format("SELECT x FROM %s WHERE x = TIME '9:00:00'", tableName), "SELECT CAST('9:00:00' AS TIME)");
assertQuery(format("SELECT x FROM %s WHERE y = 12345", tableName), "SELECT CAST('10:12:34' AS TIME)");
assertQuery(format("SELECT x FROM %s WHERE y = 67890", tableName), "SELECT CAST('9:00:00' AS TIME)");
dropTable(tableName);
}
@Test
public void testPartitionByTimestamp()
{
testSelectOrPartitionedByTimestamp(true);
}
@Test
public void testSelectByTimestamp()
{
testSelectOrPartitionedByTimestamp(false);
}
private void testSelectOrPartitionedByTimestamp(boolean partitioned)
{
String tableName = format("test_%s_by_timestamp", partitioned ? "partitioned" : "selected");
assertUpdate(format("CREATE TABLE %s (_timestamp timestamp(6)) %s",
tableName, partitioned ? "WITH (partitioning = ARRAY['_timestamp'])" : ""));
@Language("SQL") String select1 = "SELECT TIMESTAMP '2017-05-01 10:12:34' _timestamp";
@Language("SQL") String select2 = "SELECT TIMESTAMP '2017-10-01 10:12:34' _timestamp";
@Language("SQL") String select3 = "SELECT TIMESTAMP '2018-05-01 10:12:34' _timestamp";
assertUpdate(format("INSERT INTO %s %s", tableName, select1), 1);
assertUpdate(format("INSERT INTO %s %s", tableName, select2), 1);
assertUpdate(format("INSERT INTO %s %s", tableName, select3), 1);
assertQuery(format("SELECT COUNT(*) from %s", tableName), "SELECT 3");
assertQuery(format("SELECT * from %s WHERE _timestamp = TIMESTAMP '2017-05-01 10:12:34'", tableName), select1);
assertQuery(format("SELECT * from %s WHERE _timestamp < TIMESTAMP '2017-06-01 10:12:34'", tableName), select1);
assertQuery(format("SELECT * from %s WHERE _timestamp = TIMESTAMP '2017-10-01 10:12:34'", tableName), select2);
assertQuery(format("SELECT * from %s WHERE _timestamp > TIMESTAMP '2017-06-01 10:12:34' AND _timestamp < TIMESTAMP '2018-05-01 10:12:34'", tableName), select2);
assertQuery(format("SELECT * from %s WHERE _timestamp = TIMESTAMP '2018-05-01 10:12:34'", tableName), select3);
assertQuery(format("SELECT * from %s WHERE _timestamp > TIMESTAMP '2018-01-01 10:12:34'", tableName), select3);
dropTable(tableName);
}
@Test
public void testPartitionByTimestampWithTimeZone()
{
testSelectOrPartitionedByTimestampWithTimeZone(true);
}
@Test
public void testSelectByTimestampWithTimeZone()
{
testSelectOrPartitionedByTimestampWithTimeZone(false);
}
private void testSelectOrPartitionedByTimestampWithTimeZone(boolean partitioned)
{
String tableName = format("test_%s_by_timestamptz", partitioned ? "partitioned" : "selected");
assertUpdate(format(
"CREATE TABLE %s (_timestamptz timestamp(6) with time zone) %s",
tableName,
partitioned ? "WITH (partitioning = ARRAY['_timestamptz'])" : ""));
String instant1Utc = "TIMESTAMP '2021-10-31 00:30:00.005000 UTC'";
String instant1La = "TIMESTAMP '2021-10-30 17:30:00.005000 America/Los_Angeles'";
String instant2Utc = "TIMESTAMP '2021-10-31 00:30:00.006000 UTC'";
String instant2La = "TIMESTAMP '2021-10-30 17:30:00.006000 America/Los_Angeles'";
String instant3Utc = "TIMESTAMP '2021-10-31 00:30:00.007000 UTC'";
String instant3La = "TIMESTAMP '2021-10-30 17:30:00.007000 America/Los_Angeles'";
// regression test value for https://github.com/trinodb/trino/issues/12852
String instant4Utc = "TIMESTAMP '1969-12-01 05:06:07.234567 UTC'";
assertUpdate(format("INSERT INTO %s VALUES %s", tableName, instant1Utc), 1);
assertUpdate(format("INSERT INTO %s VALUES %s", tableName, instant2La /* non-UTC for this one */), 1);
assertUpdate(format("INSERT INTO %s VALUES %s", tableName, instant3Utc), 1);
assertUpdate(format("INSERT INTO %s VALUES %s", tableName, instant4Utc), 1);
assertQuery(format("SELECT COUNT(*) from %s", tableName), "SELECT 4");
// =
assertThat(query(format("SELECT * from %s WHERE _timestamptz = %s", tableName, instant1Utc)))
.matches("VALUES " + instant1Utc);
assertThat(query(format("SELECT * from %s WHERE _timestamptz = %s", tableName, instant1La)))
.matches("VALUES " + instant1Utc);
assertThat(query(format("SELECT * from %s WHERE _timestamptz = %s", tableName, instant2Utc)))
.matches("VALUES " + instant2Utc);
assertThat(query(format("SELECT * from %s WHERE _timestamptz = %s", tableName, instant2La)))
.matches("VALUES " + instant2Utc);
assertThat(query(format("SELECT * from %s WHERE _timestamptz = %s", tableName, instant3Utc)))
.matches("VALUES " + instant3Utc);
assertThat(query(format("SELECT * from %s WHERE _timestamptz = %s", tableName, instant3La)))
.matches("VALUES " + instant3Utc);
assertThat(query(format("SELECT * from %s WHERE _timestamptz = %s", tableName, instant4Utc)))
.matches("VALUES " + instant4Utc);
// <
assertThat(query(format("SELECT * from %s WHERE _timestamptz < %s", tableName, instant2Utc)))
.matches(format("VALUES %s, %s", instant1Utc, instant4Utc));
assertThat(query(format("SELECT * from %s WHERE _timestamptz < %s", tableName, instant2La)))
.matches(format("VALUES %s, %s", instant1Utc, instant4Utc));
assertThat(query(format("SELECT * from %s WHERE _timestamptz < %s", tableName, instant3Utc)))
.matches(format("VALUES %s, %s, %s", instant1Utc, instant2Utc, instant4Utc));
assertThat(query(format("SELECT * from %s WHERE _timestamptz < %s", tableName, instant3La)))
.matches(format("VALUES %s, %s, %s", instant1Utc, instant2Utc, instant4Utc));
// <=
assertThat(query(format("SELECT * from %s WHERE _timestamptz <= %s", tableName, instant2Utc)))
.matches(format("VALUES %s, %s, %s", instant1Utc, instant2Utc, instant4Utc));
assertThat(query(format("SELECT * from %s WHERE _timestamptz <= %s", tableName, instant2La)))
.matches(format("VALUES %s, %s, %s", instant1Utc, instant2Utc, instant4Utc));
// >
assertThat(query(format("SELECT * from %s WHERE _timestamptz > %s", tableName, instant2Utc)))
.matches("VALUES " + instant3Utc);
assertThat(query(format("SELECT * from %s WHERE _timestamptz > %s", tableName, instant2La)))
.matches("VALUES " + instant3Utc);
assertThat(query(format("SELECT * from %s WHERE _timestamptz > %s", tableName, instant1Utc)))
.matches(format("VALUES %s, %s", instant2Utc, instant3Utc));
assertThat(query(format("SELECT * from %s WHERE _timestamptz > %s", tableName, instant1La)))
.matches(format("VALUES %s, %s", instant2Utc, instant3Utc));
// >=
assertThat(query(format("SELECT * from %s WHERE _timestamptz >= %s", tableName, instant2Utc)))
.matches(format("VALUES %s, %s", instant2Utc, instant3Utc));
assertThat(query(format("SELECT * from %s WHERE _timestamptz >= %s", tableName, instant2La)))
.matches(format("VALUES %s, %s", instant2Utc, instant3Utc));
// open range
assertThat(query(format("SELECT * from %s WHERE _timestamptz > %s AND _timestamptz < %s", tableName, instant1Utc, instant3Utc)))
.matches("VALUES " + instant2Utc);
assertThat(query(format("SELECT * from %s WHERE _timestamptz > %s AND _timestamptz < %s", tableName, instant1La, instant3La)))
.matches("VALUES " + instant2Utc);
// closed range
assertThat(query(format("SELECT * from %s WHERE _timestamptz BETWEEN %s AND %s", tableName, instant1Utc, instant2Utc)))
.matches(format("VALUES %s, %s", instant1Utc, instant2Utc));
assertThat(query(format("SELECT * from %s WHERE _timestamptz BETWEEN %s AND %s", tableName, instant1La, instant2La)))
.matches(format("VALUES %s, %s", instant1Utc, instant2Utc));
// !=
assertThat(query(format("SELECT * from %s WHERE _timestamptz != %s", tableName, instant1Utc)))
.matches(format("VALUES %s, %s, %s", instant2Utc, instant3Utc, instant4Utc));
assertThat(query(format("SELECT * from %s WHERE _timestamptz != %s", tableName, instant1La)))
.matches(format("VALUES %s, %s, %s", instant2Utc, instant3Utc, instant4Utc));
assertThat(query(format("SELECT * from %s WHERE _timestamptz != %s", tableName, instant2Utc)))
.matches(format("VALUES %s, %s, %s", instant1Utc, instant3Utc, instant4Utc));
assertThat(query(format("SELECT * from %s WHERE _timestamptz != %s", tableName, instant2La)))
.matches(format("VALUES %s, %s, %s", instant1Utc, instant3Utc, instant4Utc));
assertThat(query(format("SELECT * from %s WHERE _timestamptz != %s", tableName, instant4Utc)))
.matches(format("VALUES %s, %s, %s", instant1Utc, instant2Utc, instant3Utc));
// IS DISTINCT FROM
assertThat(query(format("SELECT * from %s WHERE _timestamptz IS DISTINCT FROM %s", tableName, instant1Utc)))
.matches(format("VALUES %s, %s, %s", instant2Utc, instant3Utc, instant4Utc));
assertThat(query(format("SELECT * from %s WHERE _timestamptz IS DISTINCT FROM %s", tableName, instant1La)))
.matches(format("VALUES %s, %s, %s", instant2Utc, instant3Utc, instant4Utc));
assertThat(query(format("SELECT * from %s WHERE _timestamptz IS DISTINCT FROM %s", tableName, instant2Utc)))
.matches(format("VALUES %s, %s, %s", instant1Utc, instant3Utc, instant4Utc));
assertThat(query(format("SELECT * from %s WHERE _timestamptz IS DISTINCT FROM %s", tableName, instant2La)))
.matches(format("VALUES %s, %s, %s", instant1Utc, instant3Utc, instant4Utc));
assertThat(query(format("SELECT * from %s WHERE _timestamptz IS DISTINCT FROM %s", tableName, instant4Utc)))
.matches(format("VALUES %s, %s, %s", instant1Utc, instant2Utc, instant3Utc));
// IS NOT DISTINCT FROM
assertThat(query(format("SELECT * from %s WHERE _timestamptz IS NOT DISTINCT FROM %s", tableName, instant1Utc)))
.matches("VALUES " + instant1Utc);
assertThat(query(format("SELECT * from %s WHERE _timestamptz IS NOT DISTINCT FROM %s", tableName, instant1La)))
.matches("VALUES " + instant1Utc);
assertThat(query(format("SELECT * from %s WHERE _timestamptz IS NOT DISTINCT FROM %s", tableName, instant2Utc)))
.matches("VALUES " + instant2Utc);
assertThat(query(format("SELECT * from %s WHERE _timestamptz IS NOT DISTINCT FROM %s", tableName, instant2La)))
.matches("VALUES " + instant2Utc);
assertThat(query(format("SELECT * from %s WHERE _timestamptz IS NOT DISTINCT FROM %s", tableName, instant3Utc)))
.matches("VALUES " + instant3Utc);
assertThat(query(format("SELECT * from %s WHERE _timestamptz IS NOT DISTINCT FROM %s", tableName, instant3La)))
.matches("VALUES " + instant3Utc);
assertThat(query(format("SELECT * from %s WHERE _timestamptz IS NOT DISTINCT FROM %s", tableName, instant4Utc)))
.matches("VALUES " + instant4Utc);
if (partitioned) {
assertThat(query(format("SELECT record_count, file_count, partition._timestamptz FROM \"%s$partitions\"", tableName)))
.matches(format(
"VALUES (BIGINT '1', BIGINT '1', %s), (BIGINT '1', BIGINT '1', %s), (BIGINT '1', BIGINT '1', %s), (BIGINT '1', BIGINT '1', %s)",
instant1Utc,
instant2Utc,
instant3Utc,
instant4Utc));
}
else {
assertThat(query(format("SELECT record_count, file_count, data._timestamptz FROM \"%s$partitions\"", tableName)))
.matches(format(
"VALUES (BIGINT '4', BIGINT '4', CAST(ROW(%s, %s, 0, NULL) AS row(min timestamp(6) with time zone, max timestamp(6) with time zone, null_count bigint, nan_count bigint)))",
format == ORC ? "TIMESTAMP '1969-12-01 05:06:07.234000 UTC'" : instant4Utc,
format == ORC ? "TIMESTAMP '2021-10-31 00:30:00.007999 UTC'" : instant3Utc));
}
// show stats
assertThat(query("SHOW STATS FOR " + tableName))
.skippingTypesCheck()
.matches("VALUES " +
"('_timestamptz', NULL, NULL, 0e0, NULL, '1969-12-01 05:06:07.234 UTC', '2021-10-31 00:30:00.007 UTC'), " +
"(NULL, NULL, NULL, NULL, 4e0, NULL, NULL)");
if (partitioned) {
// show stats with predicate
assertThat(query("SHOW STATS FOR (SELECT * FROM " + tableName + " WHERE _timestamptz = " + instant1La + ")"))
.skippingTypesCheck()
.matches("VALUES " +
// TODO (https://github.com/trinodb/trino/issues/9716) the min/max values are off by 1 millisecond
"('_timestamptz', NULL, NULL, 0e0, NULL, '2021-10-31 00:30:00.005 UTC', '2021-10-31 00:30:00.005 UTC'), " +
"(NULL, NULL, NULL, NULL, 1e0, NULL, NULL)");
}
else {
// show stats with predicate
assertThat(query("SHOW STATS FOR (SELECT * FROM " + tableName + " WHERE _timestamptz = " + instant1La + ")"))
.skippingTypesCheck()
.matches("VALUES " +
"('_timestamptz', NULL, NULL, NULL, NULL, NULL, NULL), " +
"(NULL, NULL, NULL, NULL, NULL, NULL, NULL)");
}
assertUpdate("DROP TABLE " + tableName);
}
@Test
public void testUuid()
{
testSelectOrPartitionedByUuid(false);
}
@Test
public void testPartitionedByUuid()
{
testSelectOrPartitionedByUuid(true);
}
private void testSelectOrPartitionedByUuid(boolean partitioned)
{
String tableName = format("test_%s_by_uuid", partitioned ? "partitioned" : "selected");
String partitioning = partitioned ? "WITH (partitioning = ARRAY['x'])" : "";
assertUpdate(format("DROP TABLE IF EXISTS %s", tableName));
assertUpdate(format("CREATE TABLE %s (x uuid, y bigint) %s", tableName, partitioning));
assertUpdate(format("INSERT INTO %s VALUES (UUID '406caec7-68b9-4778-81b2-a12ece70c8b1', 12345)", tableName), 1);
assertQuery(format("SELECT count(*) FROM %s", tableName), "SELECT 1");
assertQuery(format("SELECT x FROM %s", tableName), "SELECT CAST('406caec7-68b9-4778-81b2-a12ece70c8b1' AS UUID)");
assertUpdate(format("INSERT INTO %s VALUES (UUID 'f79c3e09-677c-4bbd-a479-3f349cb785e7', 67890)", tableName), 1);
assertUpdate(format("INSERT INTO %s VALUES (NULL, 7531)", tableName), 1);
assertQuery(format("SELECT count(*) FROM %s", tableName), "SELECT 3");
assertQuery(format("SELECT * FROM %s WHERE x = UUID '406caec7-68b9-4778-81b2-a12ece70c8b1'", tableName), "SELECT CAST('406caec7-68b9-4778-81b2-a12ece70c8b1' AS UUID), 12345");
assertQuery(format("SELECT * FROM %s WHERE x = UUID 'f79c3e09-677c-4bbd-a479-3f349cb785e7'", tableName), "SELECT CAST('f79c3e09-677c-4bbd-a479-3f349cb785e7' AS UUID), 67890");
assertQuery(
format("SELECT * FROM %s WHERE x >= UUID '406caec7-68b9-4778-81b2-a12ece70c8b1'", tableName),
(format == ORC && partitioned || format == PARQUET)
// TODO (https://github.com/trinodb/trino/issues/12834): reading Parquet, or partitioned ORC, with UUID filter yields incorrect results
? "VALUES (CAST('406caec7-68b9-4778-81b2-a12ece70c8b1' AS UUID), 12345)"
: "VALUES (CAST('f79c3e09-677c-4bbd-a479-3f349cb785e7' AS UUID), 67890), (CAST('406caec7-68b9-4778-81b2-a12ece70c8b1' AS UUID), 12345)");
assertQuery(
format("SELECT * FROM %s WHERE x >= UUID 'f79c3e09-677c-4bbd-a479-3f349cb785e7'", tableName),
partitioned
? "VALUES (CAST('f79c3e09-677c-4bbd-a479-3f349cb785e7' AS UUID), 67890), (CAST('406caec7-68b9-4778-81b2-a12ece70c8b1' AS UUID), 12345)"
: "SELECT CAST('f79c3e09-677c-4bbd-a479-3f349cb785e7' AS UUID), 67890");
assertQuery(format("SELECT * FROM %s WHERE x IS NULL", tableName), "SELECT NULL, 7531");
assertQuery(format("SELECT x FROM %s WHERE y = 12345", tableName), "SELECT CAST('406caec7-68b9-4778-81b2-a12ece70c8b1' AS UUID)");
assertQuery(format("SELECT x FROM %s WHERE y = 67890", tableName), "SELECT CAST('f79c3e09-677c-4bbd-a479-3f349cb785e7' AS UUID)");
assertQuery(format("SELECT x FROM %s WHERE y = 7531", tableName), "SELECT NULL");
assertUpdate(format("INSERT INTO %s VALUES (UUID '206caec7-68b9-4778-81b2-a12ece70c8b1', 313), (UUID '906caec7-68b9-4778-81b2-a12ece70c8b1', 314)", tableName), 2);
assertThat(query("SELECT y FROM " + tableName + " WHERE x >= UUID '206caec7-68b9-4778-81b2-a12ece70c8b1'"))
.matches(
(partitioned)
// TODO (https://github.com/trinodb/trino/issues/12834): reading Parquet with UUID filter yields incorrect results
? "VALUES BIGINT '12345', 313"
: ((format == PARQUET)
// TODO (https://github.com/trinodb/trino/issues/12834): reading Parquet with UUID filter yields incorrect results
? "VALUES BIGINT '12345'"
// this one is correct
: "VALUES BIGINT '12345', 67890, 313, 314"));
assertUpdate("DROP TABLE " + tableName);
}
@Test
public void testNestedUuid()
{
assertUpdate("CREATE TABLE test_nested_uuid (int_t int, row_t row(uuid_t uuid, int_t int), map_t map(int, uuid), array_t array(uuid))");
String uuid = "UUID '406caec7-68b9-4778-81b2-a12ece70c8b1'";
String value = format("VALUES (2, row(%1$s, 1), map(array[1], array[%1$s]), array[%1$s, %1$s])", uuid);
assertUpdate("INSERT INTO test_nested_uuid " + value, 1);
assertThat(query("SELECT row_t.int_t, row_t.uuid_t FROM test_nested_uuid"))
.matches("VALUES (1, UUID '406caec7-68b9-4778-81b2-a12ece70c8b1')");
assertThat(query("SELECT map_t[1] FROM test_nested_uuid"))
.matches("VALUES UUID '406caec7-68b9-4778-81b2-a12ece70c8b1'");
assertThat(query("SELECT array_t FROM test_nested_uuid"))
.matches("VALUES ARRAY[UUID '406caec7-68b9-4778-81b2-a12ece70c8b1', UUID '406caec7-68b9-4778-81b2-a12ece70c8b1']");
assertQuery("SELECT row_t.int_t FROM test_nested_uuid WHERE row_t.uuid_t = UUID '406caec7-68b9-4778-81b2-a12ece70c8b1'", "VALUES 1");
assertQuery("SELECT int_t FROM test_nested_uuid WHERE row_t.uuid_t = UUID '406caec7-68b9-4778-81b2-a12ece70c8b1'", "VALUES 2");
}
@Test
public void testCreatePartitionedTable()
{
assertUpdate("" +
"CREATE TABLE test_partitioned_table (" +
" a_boolean boolean, " +
" an_integer integer, " +
" a_bigint bigint, " +
" a_real real, " +
" a_double double, " +
" a_short_decimal decimal(5,2), " +
" a_long_decimal decimal(38,20), " +
" a_varchar varchar, " +
" a_varbinary varbinary, " +
" a_date date, " +
" a_time time(6), " +
" a_timestamp timestamp(6), " +
" a_timestamptz timestamp(6) with time zone, " +
" a_uuid uuid, " +
" a_row row(id integer , vc varchar), " +
" an_array array(varchar), " +
" a_map map(integer, varchar) " +
") " +
"WITH (" +
"partitioning = ARRAY[" +
" 'a_boolean', " +
" 'an_integer', " +
" 'a_bigint', " +
" 'a_real', " +
" 'a_double', " +
" 'a_short_decimal', " +
" 'a_long_decimal', " +
" 'a_varchar', " +
" 'a_varbinary', " +
" 'a_date', " +
" 'a_time', " +
" 'a_timestamp', " +
" 'a_timestamptz', " +
" 'a_uuid' " +
// Note: partitioning on non-primitive columns is not allowed in Iceberg
" ]" +
")");
assertQueryReturnsEmptyResult("SELECT * FROM test_partitioned_table");
String values = "VALUES (" +
"true, " +
"1, " +
"BIGINT '1', " +
"REAL '1.0', " +
"DOUBLE '1.0', " +
"CAST(1.0 AS decimal(5,2)), " +
"CAST(11.0 AS decimal(38,20)), " +
"VARCHAR 'onefsadfdsf', " +
"X'000102f0feff', " +
"DATE '2021-07-24'," +
"TIME '02:43:57.987654', " +
"TIMESTAMP '2021-07-24 03:43:57.987654'," +
"TIMESTAMP '2021-07-24 04:43:57.987654 UTC', " +
"UUID '20050910-1330-11e9-ffff-2a86e4085a59', " +
"CAST(ROW(42, 'this is a random value') AS ROW(id int, vc varchar)), " +
"ARRAY[VARCHAR 'uno', 'dos', 'tres'], " +
"map(ARRAY[1,2], ARRAY['ek', VARCHAR 'one'])) ";
String nullValues = nCopies(17, "NULL").stream()
.collect(joining(", ", "VALUES (", ")"));
assertUpdate("INSERT INTO test_partitioned_table " + values, 1);
assertUpdate("INSERT INTO test_partitioned_table " + nullValues, 1);
// SELECT
assertThat(query("SELECT * FROM test_partitioned_table"))
.matches(values + " UNION ALL " + nullValues);
// SELECT with predicates
assertThat(query("SELECT * FROM test_partitioned_table WHERE " +
" a_boolean = true " +
"AND an_integer = 1 " +
"AND a_bigint = BIGINT '1' " +
"AND a_real = REAL '1.0' " +
"AND a_double = DOUBLE '1.0' " +
"AND a_short_decimal = CAST(1.0 AS decimal(5,2)) " +
"AND a_long_decimal = CAST(11.0 AS decimal(38,20)) " +
"AND a_varchar = VARCHAR 'onefsadfdsf' " +
"AND a_varbinary = X'000102f0feff' " +
"AND a_date = DATE '2021-07-24' " +
"AND a_time = TIME '02:43:57.987654' " +
"AND a_timestamp = TIMESTAMP '2021-07-24 03:43:57.987654' " +
"AND a_timestamptz = TIMESTAMP '2021-07-24 04:43:57.987654 UTC' " +
"AND a_uuid = UUID '20050910-1330-11e9-ffff-2a86e4085a59' " +
"AND a_row = CAST(ROW(42, 'this is a random value') AS ROW(id int, vc varchar)) " +
"AND an_array = ARRAY[VARCHAR 'uno', 'dos', 'tres'] " +
"AND a_map = map(ARRAY[1,2], ARRAY['ek', VARCHAR 'one']) " +
""))
.matches(values);
assertThat(query("SELECT * FROM test_partitioned_table WHERE " +
" a_boolean IS NULL " +
"AND an_integer IS NULL " +
"AND a_bigint IS NULL " +
"AND a_real IS NULL " +
"AND a_double IS NULL " +
"AND a_short_decimal IS NULL " +
"AND a_long_decimal IS NULL " +
"AND a_varchar IS NULL " +
"AND a_varbinary IS NULL " +
"AND a_date IS NULL " +
"AND a_time IS NULL " +
"AND a_timestamp IS NULL " +
"AND a_timestamptz IS NULL " +
"AND a_uuid IS NULL " +
"AND a_row IS NULL " +
"AND an_array IS NULL " +
"AND a_map IS NULL " +
""))
.skippingTypesCheck()
.matches(nullValues);
// SHOW STATS
if (format == ORC) {
assertQuery("SHOW STATS FOR test_partitioned_table",
"VALUES " +
" ('a_boolean', NULL, NULL, 0.5, NULL, 'true', 'true'), " +
" ('an_integer', NULL, NULL, 0.5, NULL, '1', '1'), " +
" ('a_bigint', NULL, NULL, 0.5, NULL, '1', '1'), " +
" ('a_real', NULL, NULL, 0.5, NULL, '1.0', '1.0'), " +
" ('a_double', NULL, NULL, 0.5, NULL, '1.0', '1.0'), " +
" ('a_short_decimal', NULL, NULL, 0.5, NULL, '1.0', '1.0'), " +
" ('a_long_decimal', NULL, NULL, 0.5, NULL, '11.0', '11.0'), " +
" ('a_varchar', NULL, NULL, 0.5, NULL, NULL, NULL), " +
" ('a_varbinary', NULL, NULL, 0.5, NULL, NULL, NULL), " +
" ('a_date', NULL, NULL, 0.5, NULL, '2021-07-24', '2021-07-24'), " +
" ('a_time', NULL, NULL, 0.5, NULL, NULL, NULL), " +
" ('a_timestamp', NULL, NULL, 0.5, NULL, '2021-07-24 03:43:57.987654', '2021-07-24 03:43:57.987654'), " +
" ('a_timestamptz', NULL, NULL, 0.5, NULL, '2021-07-24 04:43:57.987 UTC', '2021-07-24 04:43:57.987 UTC'), " +
" ('a_uuid', NULL, NULL, 0.5, NULL, NULL, NULL), " +
" ('a_row', NULL, NULL, 0.5, NULL, NULL, NULL), " +
" ('an_array', NULL, NULL, 0.5, NULL, NULL, NULL), " +
" ('a_map', NULL, NULL, 0.5, NULL, NULL, NULL), " +
" (NULL, NULL, NULL, NULL, 2e0, NULL, NULL)");
}
else {
assertThat(query("SHOW STATS FOR test_partitioned_table"))
.skippingTypesCheck()
.matches("VALUES " +
" ('a_boolean', NULL, NULL, 0.5e0, NULL, 'true', 'true'), " +
" ('an_integer', NULL, NULL, 0.5e0, NULL, '1', '1'), " +
" ('a_bigint', NULL, NULL, 0.5e0, NULL, '1', '1'), " +
" ('a_real', NULL, NULL, 0.5e0, NULL, '1.0', '1.0'), " +
" ('a_double', NULL, NULL, 0.5e0, NULL, '1.0', '1.0'), " +
" ('a_short_decimal', NULL, NULL, 0.5e0, NULL, '1.0', '1.0'), " +
" ('a_long_decimal', NULL, NULL, 0.5e0, NULL, '11.0', '11.0'), " +
" ('a_varchar', 87e0, NULL, 0.5e0, NULL, NULL, NULL), " +
" ('a_varbinary', 82e0, NULL, 0.5e0, NULL, NULL, NULL), " +
" ('a_date', NULL, NULL, 0.5e0, NULL, '2021-07-24', '2021-07-24'), " +
" ('a_time', NULL, NULL, 0.5e0, NULL, NULL, NULL), " +
" ('a_timestamp', NULL, NULL, 0.5e0, NULL, '2021-07-24 03:43:57.987654', '2021-07-24 03:43:57.987654'), " +
" ('a_timestamptz', NULL, NULL, 0.5e0, NULL, '2021-07-24 04:43:57.987 UTC', '2021-07-24 04:43:57.987 UTC'), " +
" ('a_uuid', NULL, NULL, 0.5e0, NULL, NULL, NULL), " +
" ('a_row', NULL, NULL, NULL, NULL, NULL, NULL), " +
" ('an_array', NULL, NULL, NULL, NULL, NULL, NULL), " +
" ('a_map', NULL, NULL, NULL, NULL, NULL, NULL), " +
" (NULL, NULL, NULL, NULL, 2e0, NULL, NULL)");
}
// $partitions
String schema = getSession().getSchema().orElseThrow();
assertThat(query("SELECT column_name FROM information_schema.columns WHERE table_schema = '" + schema + "' AND table_name = 'test_partitioned_table$partitions' "))
.skippingTypesCheck()
.matches("VALUES 'partition', 'record_count', 'file_count', 'total_size'");
assertThat(query("SELECT " +
" record_count," +
" file_count, " +
" partition.a_boolean, " +
" partition.an_integer, " +
" partition.a_bigint, " +
" partition.a_real, " +
" partition.a_double, " +
" partition.a_short_decimal, " +
" partition.a_long_decimal, " +
" partition.a_varchar, " +
" partition.a_varbinary, " +
" partition.a_date, " +
" partition.a_time, " +
" partition.a_timestamp, " +
" partition.a_timestamptz, " +
" partition.a_uuid " +
// Note: partitioning on non-primitive columns is not allowed in Iceberg
" FROM \"test_partitioned_table$partitions\" "))
.matches("" +
"VALUES (" +
" BIGINT '1', " +
" BIGINT '1', " +
" true, " +
" 1, " +
" BIGINT '1', " +
" REAL '1.0', " +
" DOUBLE '1.0', " +
" CAST(1.0 AS decimal(5,2)), " +
" CAST(11.0 AS decimal(38,20)), " +
" VARCHAR 'onefsadfdsf', " +
" X'000102f0feff', " +
" DATE '2021-07-24'," +
" TIME '02:43:57.987654', " +
" TIMESTAMP '2021-07-24 03:43:57.987654'," +
" TIMESTAMP '2021-07-24 04:43:57.987654 UTC', " +
" UUID '20050910-1330-11e9-ffff-2a86e4085a59' " +
")" +
"UNION ALL " +
"VALUES (" +
" BIGINT '1', " +
" BIGINT '1', " +
" NULL, " +
" NULL, " +
" NULL, " +
" NULL, " +
" NULL, " +
" NULL, " +
" NULL, " +
" NULL, " +
" NULL, " +
" NULL, " +
" NULL, " +
" NULL, " +
" NULL, " +
" NULL " +
")");
assertUpdate("DROP TABLE test_partitioned_table");
}
@Test
public void testCreatePartitionedTableWithNestedTypes()
{
assertUpdate("" +
"CREATE TABLE test_partitioned_table_nested_type (" +
" _string VARCHAR" +
", _struct ROW(_field1 INT, _field2 VARCHAR)" +
", _date DATE" +
") " +
"WITH (" +
" partitioning = ARRAY['_date']" +
")");
dropTable("test_partitioned_table_nested_type");
}
@Test
public void testCreatePartitionedTableAs()
{
File tempDir = getDistributedQueryRunner().getCoordinator().getBaseDataDir().toFile();
String tempDirPath = tempDir.toURI().toASCIIString() + randomTableSuffix();
assertUpdate(
"CREATE TABLE test_create_partitioned_table_as " +
"WITH (" +
"format_version = 2," +
"location = '" + tempDirPath + "', " +
"partitioning = ARRAY['ORDER_STATUS', 'Ship_Priority', 'Bucket(order_key,9)']" +
") " +
"AS " +
"SELECT orderkey AS order_key, shippriority AS ship_priority, orderstatus AS order_status " +
"FROM tpch.tiny.orders",
"SELECT count(*) from orders");
assertEquals(
computeScalar("SHOW CREATE TABLE test_create_partitioned_table_as"),
format(
"CREATE TABLE %s.%s.%s (\n" +
" order_key bigint,\n" +
" ship_priority integer,\n" +
" order_status varchar\n" +
")\n" +
"WITH (\n" +
" format = '%s',\n" +
" format_version = 2,\n" +
" location = '%s',\n" +
" partitioning = ARRAY['order_status','ship_priority','bucket(order_key, 9)']\n" +
")",
getSession().getCatalog().orElseThrow(),
getSession().getSchema().orElseThrow(),
"test_create_partitioned_table_as",
format,
tempDirPath));
assertQuery("SELECT * from test_create_partitioned_table_as", "SELECT orderkey, shippriority, orderstatus FROM orders");
dropTable("test_create_partitioned_table_as");
}
@Test
public void testTableComments()
{
File tempDir = getDistributedQueryRunner().getCoordinator().getBaseDataDir().toFile();
String tempDirPath = tempDir.toURI().toASCIIString() + randomTableSuffix();
String createTableTemplate = "" +
"CREATE TABLE iceberg.tpch.test_table_comments (\n" +
" _x bigint\n" +
")\n" +
"COMMENT '%s'\n" +
"WITH (\n" +
format(" format = '%s',\n", format) +
" format_version = 2,\n" +
format(" location = '%s'\n", tempDirPath) +
")";
String createTableWithoutComment = "" +
"CREATE TABLE iceberg.tpch.test_table_comments (\n" +
" _x bigint\n" +
")\n" +
"WITH (\n" +
" format = '" + format + "',\n" +
" format_version = 2,\n" +
" location = '" + tempDirPath + "'\n" +
")";
String createTableSql = format(createTableTemplate, "test table comment", format);
assertUpdate(createTableSql);
assertEquals(computeScalar("SHOW CREATE TABLE test_table_comments"), createTableSql);
assertUpdate("COMMENT ON TABLE test_table_comments IS 'different test table comment'");
assertEquals(computeScalar("SHOW CREATE TABLE test_table_comments"), format(createTableTemplate, "different test table comment", format));
assertUpdate("COMMENT ON TABLE test_table_comments IS NULL");
assertEquals(computeScalar("SHOW CREATE TABLE test_table_comments"), createTableWithoutComment);
dropTable("iceberg.tpch.test_table_comments");
assertUpdate(createTableWithoutComment);
assertEquals(computeScalar("SHOW CREATE TABLE test_table_comments"), createTableWithoutComment);
dropTable("iceberg.tpch.test_table_comments");
}
@Test
public void testRollbackSnapshot()
{
assertUpdate("CREATE TABLE test_rollback (col0 INTEGER, col1 BIGINT)");
long afterCreateTableId = getLatestSnapshotId("test_rollback");
assertUpdate("INSERT INTO test_rollback (col0, col1) VALUES (123, CAST(987 AS BIGINT))", 1);
long afterFirstInsertId = getLatestSnapshotId("test_rollback");
assertUpdate("INSERT INTO test_rollback (col0, col1) VALUES (456, CAST(654 AS BIGINT))", 1);
assertQuery("SELECT * FROM test_rollback ORDER BY col0", "VALUES (123, CAST(987 AS BIGINT)), (456, CAST(654 AS BIGINT))");
assertUpdate(format("CALL system.rollback_to_snapshot('tpch', 'test_rollback', %s)", afterFirstInsertId));
assertQuery("SELECT * FROM test_rollback ORDER BY col0", "VALUES (123, CAST(987 AS BIGINT))");
assertUpdate(format("CALL system.rollback_to_snapshot('tpch', 'test_rollback', %s)", afterCreateTableId));
assertEquals((long) computeActual("SELECT COUNT(*) FROM test_rollback").getOnlyValue(), 0);
assertUpdate("INSERT INTO test_rollback (col0, col1) VALUES (789, CAST(987 AS BIGINT))", 1);
long afterSecondInsertId = getLatestSnapshotId("test_rollback");
// extra insert which should be dropped on rollback
assertUpdate("INSERT INTO test_rollback (col0, col1) VALUES (999, CAST(999 AS BIGINT))", 1);
assertUpdate(format("CALL system.rollback_to_snapshot('tpch', 'test_rollback', %s)", afterSecondInsertId));
assertQuery("SELECT * FROM test_rollback ORDER BY col0", "VALUES (789, CAST(987 AS BIGINT))");
dropTable("test_rollback");
}
private long getLatestSnapshotId(String tableName)
{
return (long) computeActual(format("SELECT snapshot_id FROM \"%s$snapshots\" ORDER BY committed_at DESC LIMIT 1", tableName))
.getOnlyValue();
}
@Override
protected String errorMessageForInsertIntoNotNullColumn(String columnName)
{
return "NULL value not allowed for NOT NULL column: " + columnName;
}
@Test
public void testSchemaEvolution()
{
assertUpdate("CREATE TABLE test_schema_evolution_drop_end (col0 INTEGER, col1 INTEGER, col2 INTEGER)");
assertUpdate("INSERT INTO test_schema_evolution_drop_end VALUES (0, 1, 2)", 1);
assertQuery("SELECT * FROM test_schema_evolution_drop_end", "VALUES(0, 1, 2)");
assertUpdate("ALTER TABLE test_schema_evolution_drop_end DROP COLUMN col2");
assertQuery("SELECT * FROM test_schema_evolution_drop_end", "VALUES(0, 1)");
assertUpdate("ALTER TABLE test_schema_evolution_drop_end ADD COLUMN col2 INTEGER");
assertQuery("SELECT * FROM test_schema_evolution_drop_end", "VALUES(0, 1, NULL)");
assertUpdate("INSERT INTO test_schema_evolution_drop_end VALUES (3, 4, 5)", 1);
assertQuery("SELECT * FROM test_schema_evolution_drop_end", "VALUES(0, 1, NULL), (3, 4, 5)");
dropTable("test_schema_evolution_drop_end");
assertUpdate("CREATE TABLE test_schema_evolution_drop_middle (col0 INTEGER, col1 INTEGER, col2 INTEGER)");
assertUpdate("INSERT INTO test_schema_evolution_drop_middle VALUES (0, 1, 2)", 1);
assertQuery("SELECT * FROM test_schema_evolution_drop_middle", "VALUES(0, 1, 2)");
assertUpdate("ALTER TABLE test_schema_evolution_drop_middle DROP COLUMN col1");
assertQuery("SELECT * FROM test_schema_evolution_drop_middle", "VALUES(0, 2)");
assertUpdate("ALTER TABLE test_schema_evolution_drop_middle ADD COLUMN col1 INTEGER");
assertUpdate("INSERT INTO test_schema_evolution_drop_middle VALUES (3, 4, 5)", 1);
assertQuery("SELECT * FROM test_schema_evolution_drop_middle", "VALUES(0, 2, NULL), (3, 4, 5)");
dropTable("test_schema_evolution_drop_middle");
}
@Test
public void testShowStatsAfterAddColumn()
{
assertUpdate("CREATE TABLE test_show_stats_after_add_column (col0 INTEGER, col1 INTEGER, col2 INTEGER)");
// Insert separately to ensure the table has multiple data files
assertUpdate("INSERT INTO test_show_stats_after_add_column VALUES (1, 2, 3)", 1);
assertUpdate("INSERT INTO test_show_stats_after_add_column VALUES (4, 5, 6)", 1);
assertUpdate("INSERT INTO test_show_stats_after_add_column VALUES (NULL, NULL, NULL)", 1);
assertUpdate("INSERT INTO test_show_stats_after_add_column VALUES (7, 8, 9)", 1);
assertThat(query("SHOW STATS FOR test_show_stats_after_add_column"))
.skippingTypesCheck()
.matches("VALUES " +
" ('col0', NULL, NULL, 25e-2, NULL, '1', '7')," +
" ('col1', NULL, NULL, 25e-2, NULL, '2', '8'), " +
" ('col2', NULL, NULL, 25e-2, NULL, '3', '9'), " +
" (NULL, NULL, NULL, NULL, 4e0, NULL, NULL)");
// Columns added after some data files exist will not have valid statistics because not all files have min/max/null count statistics for the new column
assertUpdate("ALTER TABLE test_show_stats_after_add_column ADD COLUMN col3 INTEGER");
assertUpdate("INSERT INTO test_show_stats_after_add_column VALUES (10, 11, 12, 13)", 1);
assertThat(query("SHOW STATS FOR test_show_stats_after_add_column"))
.skippingTypesCheck()
.matches("VALUES " +
" ('col0', NULL, NULL, 2e-1, NULL, '1', '10')," +
" ('col1', NULL, NULL, 2e-1, NULL, '2', '11'), " +
" ('col2', NULL, NULL, 2e-1, NULL, '3', '12'), " +
" ('col3', NULL, NULL, NULL, NULL, NULL, NULL), " +
" (NULL, NULL, NULL, NULL, 5e0, NULL, NULL)");
}
@Test
public void testLargeInOnPartitionedColumns()
{
assertUpdate("CREATE TABLE test_in_predicate_large_set (col1 BIGINT, col2 BIGINT) WITH (partitioning = ARRAY['col2'])");
assertUpdate("INSERT INTO test_in_predicate_large_set VALUES (1, 10)", 1L);
assertUpdate("INSERT INTO test_in_predicate_large_set VALUES (2, 20)", 1L);
List<String> predicates = IntStream.range(0, 25_000).boxed()
.map(Object::toString)
.collect(toImmutableList());
String filter = format("col2 IN (%s)", join(",", predicates));
assertThat(query("SELECT * FROM test_in_predicate_large_set WHERE " + filter))
.matches("TABLE test_in_predicate_large_set");
dropTable("test_in_predicate_large_set");
}
@Test
public void testCreateTableFailsOnNonEmptyPath()
{
String tableName = "test_rename_table_" + randomTableSuffix();
String tmpName = "test_rename_table_tmp_" + randomTableSuffix();
try {
assertUpdate("CREATE TABLE " + tmpName + " AS SELECT 1 as a", 1);
assertUpdate("ALTER TABLE " + tmpName + " RENAME TO " + tableName);
assertQueryFails("CREATE TABLE " + tmpName + " AS SELECT 1 as a", "Cannot create a table on a non-empty location.*");
}
finally {
assertUpdate("DROP TABLE IF EXISTS " + tableName);
assertUpdate("DROP TABLE IF EXISTS " + tmpName);
}
}
@Test
public void testCreateTableSucceedsOnEmptyDirectory()
{
File tempDir = getDistributedQueryRunner().getCoordinator().getBaseDataDir().toFile();
String tmpName = "test_rename_table_tmp_" + randomTableSuffix();
Path newPath = tempDir.toPath().resolve(tmpName);
File directory = newPath.toFile();
verify(directory.mkdirs(), "Could not make directory on filesystem");
try {
assertUpdate("CREATE TABLE " + tmpName + " WITH (location='" + directory + "') AS SELECT 1 as a", 1);
}
finally {
assertUpdate("DROP TABLE IF EXISTS " + tmpName);
}
}
@Test
public void testCreateTableLike()
{
IcebergFileFormat otherFormat = (format == PARQUET) ? ORC : PARQUET;
testCreateTableLikeForFormat(otherFormat);
}
private void testCreateTableLikeForFormat(IcebergFileFormat otherFormat)
{
File tempDir = getDistributedQueryRunner().getCoordinator().getBaseDataDir().toFile();
String tempDirPath = tempDir.toURI().toASCIIString() + randomTableSuffix();
// LIKE source INCLUDING PROPERTIES copies all the properties of the source table, including the `location`.
// For this reason the source and the copied table will share the same directory.
// This test does not drop intentionally the created tables to avoid affecting the source table or the information_schema.
assertUpdate(format("CREATE TABLE test_create_table_like_original (col1 INTEGER, aDate DATE) WITH(format = '%s', location = '%s', partitioning = ARRAY['aDate'])", format, tempDirPath));
assertEquals(getTablePropertiesString("test_create_table_like_original"), "WITH (\n" +
format(" format = '%s',\n", format) +
" format_version = 2,\n" +
format(" location = '%s',\n", tempDirPath) +
" partitioning = ARRAY['adate']\n" +
")");
assertUpdate("CREATE TABLE test_create_table_like_copy0 (LIKE test_create_table_like_original, col2 INTEGER)");
assertUpdate("INSERT INTO test_create_table_like_copy0 (col1, aDate, col2) VALUES (1, CAST('1950-06-28' AS DATE), 3)", 1);
assertQuery("SELECT * from test_create_table_like_copy0", "VALUES(1, CAST('1950-06-28' AS DATE), 3)");
assertUpdate("CREATE TABLE test_create_table_like_copy1 (LIKE test_create_table_like_original)");
assertEquals(getTablePropertiesString("test_create_table_like_copy1"), "WITH (\n" +
format(" format = '%s',\n format_version = 2,\n location = '%s'\n)", format, tempDir + "/iceberg_data/tpch/test_create_table_like_copy1"));
assertUpdate("CREATE TABLE test_create_table_like_copy2 (LIKE test_create_table_like_original EXCLUDING PROPERTIES)");
assertEquals(getTablePropertiesString("test_create_table_like_copy2"), "WITH (\n" +
format(" format = '%s',\n format_version = 2,\n location = '%s'\n)", format, tempDir + "/iceberg_data/tpch/test_create_table_like_copy2"));
dropTable("test_create_table_like_copy2");
assertQueryFails("CREATE TABLE test_create_table_like_copy3 (LIKE test_create_table_like_original INCLUDING PROPERTIES)",
"Cannot create a table on a non-empty location.*");
assertQueryFails(format("CREATE TABLE test_create_table_like_copy4 (LIKE test_create_table_like_original INCLUDING PROPERTIES) WITH (format = '%s')", otherFormat),
"Cannot create a table on a non-empty location.*");
}
private String getTablePropertiesString(String tableName)
{
MaterializedResult showCreateTable = computeActual("SHOW CREATE TABLE " + tableName);
String createTable = (String) getOnlyElement(showCreateTable.getOnlyColumnAsSet());
Matcher matcher = WITH_CLAUSE_EXTRACTOR.matcher(createTable);
return matcher.matches() ? matcher.group(1) : null;
}
@Test
public void testPredicating()
{
assertUpdate("CREATE TABLE test_predicating_on_real (col REAL)");
assertUpdate("INSERT INTO test_predicating_on_real VALUES 1.2", 1);
assertQuery("SELECT * FROM test_predicating_on_real WHERE col = 1.2", "VALUES 1.2");
dropTable("test_predicating_on_real");
}
@Test
public void testHourTransform()
{
assertUpdate("CREATE TABLE test_hour_transform (d TIMESTAMP(6), b BIGINT) WITH (partitioning = ARRAY['hour(d)'])");
@Language("SQL") String values = "VALUES " +
"(NULL, 101)," +
"(TIMESTAMP '1969-12-31 22:22:22.222222', 8)," +
"(TIMESTAMP '1969-12-31 23:33:11.456789', 9)," +
"(TIMESTAMP '1969-12-31 23:44:55.567890', 10)," +
"(TIMESTAMP '1970-01-01 00:55:44.765432', 11)," +
"(TIMESTAMP '2015-01-01 10:01:23.123456', 1)," +
"(TIMESTAMP '2015-01-01 10:10:02.987654', 2)," +
"(TIMESTAMP '2015-01-01 10:55:00.456789', 3)," +
"(TIMESTAMP '2015-05-15 12:05:01.234567', 4)," +
"(TIMESTAMP '2015-05-15 12:21:02.345678', 5)," +
"(TIMESTAMP '2020-02-21 13:11:11.876543', 6)," +
"(TIMESTAMP '2020-02-21 13:12:12.654321', 7)";
assertUpdate("INSERT INTO test_hour_transform " + values, 12);
assertQuery("SELECT * FROM test_hour_transform", values);
@Language("SQL") String expected = "VALUES " +
"(NULL, 1, NULL, NULL, 101, 101), " +
"(-2, 1, TIMESTAMP '1969-12-31 22:22:22.222222', TIMESTAMP '1969-12-31 22:22:22.222222', 8, 8), " +
"(-1, 2, TIMESTAMP '1969-12-31 23:33:11.456789', TIMESTAMP '1969-12-31 23:44:55.567890', 9, 10), " +
"(0, 1, TIMESTAMP '1970-01-01 00:55:44.765432', TIMESTAMP '1970-01-01 00:55:44.765432', 11, 11), " +
"(394474, 3, TIMESTAMP '2015-01-01 10:01:23.123456', TIMESTAMP '2015-01-01 10:55:00.456789', 1, 3), " +
"(397692, 2, TIMESTAMP '2015-05-15 12:05:01.234567', TIMESTAMP '2015-05-15 12:21:02.345678', 4, 5), " +
"(439525, 2, TIMESTAMP '2020-02-21 13:11:11.876543', TIMESTAMP '2020-02-21 13:12:12.654321', 6, 7)";
String expectedTimestampStats = "'1969-12-31 22:22:22.222222', '2020-02-21 13:12:12.654321'";
if (format == ORC) {
expected = "VALUES " +
"(NULL, 1, NULL, NULL, 101, 101), " +
"(-2, 1, TIMESTAMP '1969-12-31 22:22:22.222000', TIMESTAMP '1969-12-31 22:22:22.222999', 8, 8), " +
"(-1, 2, TIMESTAMP '1969-12-31 23:33:11.456000', TIMESTAMP '1969-12-31 23:44:55.567999', 9, 10), " +
"(0, 1, TIMESTAMP '1970-01-01 00:55:44.765000', TIMESTAMP '1970-01-01 00:55:44.765999', 11, 11), " +
"(394474, 3, TIMESTAMP '2015-01-01 10:01:23.123000', TIMESTAMP '2015-01-01 10:55:00.456999', 1, 3), " +
"(397692, 2, TIMESTAMP '2015-05-15 12:05:01.234000', TIMESTAMP '2015-05-15 12:21:02.345999', 4, 5), " +
"(439525, 2, TIMESTAMP '2020-02-21 13:11:11.876000', TIMESTAMP '2020-02-21 13:12:12.654999', 6, 7)";
expectedTimestampStats = "'1969-12-31 22:22:22.222000', '2020-02-21 13:12:12.654999'";
}
assertQuery("SELECT partition.d_hour, record_count, data.d.min, data.d.max, data.b.min, data.b.max FROM \"test_hour_transform$partitions\"", expected);
// Exercise IcebergMetadata.applyFilter with non-empty Constraint.predicate, via non-pushdownable predicates
assertQuery(
"SELECT * FROM test_hour_transform WHERE day_of_week(d) = 3 AND b % 7 = 3",
"VALUES (TIMESTAMP '1969-12-31 23:44:55.567890', 10)");
assertThat(query("SHOW STATS FOR test_hour_transform"))
.skippingTypesCheck()
.matches("VALUES " +
" ('d', NULL, NULL, 0.0833333e0, NULL, " + expectedTimestampStats + "), " +
" ('b', NULL, NULL, 0e0, NULL, '1', '101'), " +
" (NULL, NULL, NULL, NULL, 12e0, NULL, NULL)");
dropTable("test_hour_transform");
}
@Test
public void testDayTransformDate()
{
assertUpdate("CREATE TABLE test_day_transform_date (d DATE, b BIGINT) WITH (partitioning = ARRAY['day(d)'])");
@Language("SQL") String values = "VALUES " +
"(NULL, 101)," +
"(DATE '1969-01-01', 10), " +
"(DATE '1969-12-31', 11), " +
"(DATE '1970-01-01', 1), " +
"(DATE '1970-03-04', 2), " +
"(DATE '2015-01-01', 3), " +
"(DATE '2015-01-13', 4), " +
"(DATE '2015-01-13', 5), " +
"(DATE '2015-05-15', 6), " +
"(DATE '2015-05-15', 7), " +
"(DATE '2020-02-21', 8), " +
"(DATE '2020-02-21', 9)";
assertUpdate("INSERT INTO test_day_transform_date " + values, 12);
assertQuery("SELECT * FROM test_day_transform_date", values);
assertQuery(
"SELECT partition.d_day, record_count, data.d.min, data.d.max, data.b.min, data.b.max FROM \"test_day_transform_date$partitions\"",
"VALUES " +
"(NULL, 1, NULL, NULL, 101, 101), " +
"(DATE '1969-01-01', 1, DATE '1969-01-01', DATE '1969-01-01', 10, 10), " +
"(DATE '1969-12-31', 1, DATE '1969-12-31', DATE '1969-12-31', 11, 11), " +
"(DATE '1970-01-01', 1, DATE '1970-01-01', DATE '1970-01-01', 1, 1), " +
"(DATE '1970-03-04', 1, DATE '1970-03-04', DATE '1970-03-04', 2, 2), " +
"(DATE '2015-01-01', 1, DATE '2015-01-01', DATE '2015-01-01', 3, 3), " +
"(DATE '2015-01-13', 2, DATE '2015-01-13', DATE '2015-01-13', 4, 5), " +
"(DATE '2015-05-15', 2, DATE '2015-05-15', DATE '2015-05-15', 6, 7), " +
"(DATE '2020-02-21', 2, DATE '2020-02-21', DATE '2020-02-21', 8, 9)");
// Exercise IcebergMetadata.applyFilter with non-empty Constraint.predicate, via non-pushdownable predicates
assertQuery(
"SELECT * FROM test_day_transform_date WHERE day_of_week(d) = 3 AND b % 7 = 3",
"VALUES (DATE '1969-01-01', 10)");
assertThat(query("SHOW STATS FOR test_day_transform_date"))
.skippingTypesCheck()
.matches("VALUES " +
" ('d', NULL, NULL, 0.0833333e0, NULL, '1969-01-01', '2020-02-21'), " +
" ('b', NULL, NULL, 0e0, NULL, '1', '101'), " +
" (NULL, NULL, NULL, NULL, 12e0, NULL, NULL)");
dropTable("test_day_transform_date");
}
@Test
public void testDayTransformTimestamp()
{
assertUpdate("CREATE TABLE test_day_transform_timestamp (d TIMESTAMP(6), b BIGINT) WITH (partitioning = ARRAY['day(d)'])");
@Language("SQL") String values = "VALUES " +
"(NULL, 101)," +
"(TIMESTAMP '1969-12-25 15:13:12.876543', 8)," +
"(TIMESTAMP '1969-12-30 18:47:33.345678', 9)," +
"(TIMESTAMP '1969-12-31 00:00:00.000000', 10)," +
"(TIMESTAMP '1969-12-31 05:06:07.234567', 11)," +
"(TIMESTAMP '1970-01-01 12:03:08.456789', 12)," +
"(TIMESTAMP '2015-01-01 10:01:23.123456', 1)," +
"(TIMESTAMP '2015-01-01 11:10:02.987654', 2)," +
"(TIMESTAMP '2015-01-01 12:55:00.456789', 3)," +
"(TIMESTAMP '2015-05-15 13:05:01.234567', 4)," +
"(TIMESTAMP '2015-05-15 14:21:02.345678', 5)," +
"(TIMESTAMP '2020-02-21 15:11:11.876543', 6)," +
"(TIMESTAMP '2020-02-21 16:12:12.654321', 7)";
assertUpdate("INSERT INTO test_day_transform_timestamp " + values, 13);
assertQuery("SELECT * FROM test_day_transform_timestamp", values);
@Language("SQL") String expected = "VALUES " +
"(NULL, 1, NULL, NULL, 101, 101), " +
"(DATE '1969-12-25', 1, TIMESTAMP '1969-12-25 15:13:12.876543', TIMESTAMP '1969-12-25 15:13:12.876543', 8, 8), " +
"(DATE '1969-12-30', 1, TIMESTAMP '1969-12-30 18:47:33.345678', TIMESTAMP '1969-12-30 18:47:33.345678', 9, 9), " +
"(DATE '1969-12-31', 2, TIMESTAMP '1969-12-31 00:00:00.000000', TIMESTAMP '1969-12-31 05:06:07.234567', 10, 11), " +
"(DATE '1970-01-01', 1, TIMESTAMP '1970-01-01 12:03:08.456789', TIMESTAMP '1970-01-01 12:03:08.456789', 12, 12), " +
"(DATE '2015-01-01', 3, TIMESTAMP '2015-01-01 10:01:23.123456', TIMESTAMP '2015-01-01 12:55:00.456789', 1, 3), " +
"(DATE '2015-05-15', 2, TIMESTAMP '2015-05-15 13:05:01.234567', TIMESTAMP '2015-05-15 14:21:02.345678', 4, 5), " +
"(DATE '2020-02-21', 2, TIMESTAMP '2020-02-21 15:11:11.876543', TIMESTAMP '2020-02-21 16:12:12.654321', 6, 7)";
String expectedTimestampStats = "'1969-12-25 15:13:12.876543', '2020-02-21 16:12:12.654321'";
if (format == ORC) {
expected = "VALUES " +
"(NULL, 1, NULL, NULL, 101, 101), " +
"(DATE '1969-12-25', 1, TIMESTAMP '1969-12-25 15:13:12.876000', TIMESTAMP '1969-12-25 15:13:12.876999', 8, 8), " +
"(DATE '1969-12-30', 1, TIMESTAMP '1969-12-30 18:47:33.345000', TIMESTAMP '1969-12-30 18:47:33.345999', 9, 9), " +
"(DATE '1969-12-31', 2, TIMESTAMP '1969-12-31 00:00:00.000000', TIMESTAMP '1969-12-31 05:06:07.234999', 10, 11), " +
"(DATE '1970-01-01', 1, TIMESTAMP '1970-01-01 12:03:08.456000', TIMESTAMP '1970-01-01 12:03:08.456999', 12, 12), " +
"(DATE '2015-01-01', 3, TIMESTAMP '2015-01-01 10:01:23.123000', TIMESTAMP '2015-01-01 12:55:00.456999', 1, 3), " +
"(DATE '2015-05-15', 2, TIMESTAMP '2015-05-15 13:05:01.234000', TIMESTAMP '2015-05-15 14:21:02.345999', 4, 5), " +
"(DATE '2020-02-21', 2, TIMESTAMP '2020-02-21 15:11:11.876000', TIMESTAMP '2020-02-21 16:12:12.654999', 6, 7)";
expectedTimestampStats = "'1969-12-25 15:13:12.876000', '2020-02-21 16:12:12.654999'";
}
assertQuery("SELECT partition.d_day, record_count, data.d.min, data.d.max, data.b.min, data.b.max FROM \"test_day_transform_timestamp$partitions\"", expected);
// Exercise IcebergMetadata.applyFilter with non-empty Constraint.predicate, via non-pushdownable predicates
assertQuery(
"SELECT * FROM test_day_transform_timestamp WHERE day_of_week(d) = 3 AND b % 7 = 3",
"VALUES (TIMESTAMP '1969-12-31 00:00:00.000000', 10)");
assertThat(query("SHOW STATS FOR test_day_transform_timestamp"))
.skippingTypesCheck()
.matches("VALUES " +
" ('d', NULL, NULL, 0.0769231e0, NULL, " + expectedTimestampStats + "), " +
" ('b', NULL, NULL, 0e0, NULL, '1', '101'), " +
" (NULL, NULL, NULL, NULL, 13e0, NULL, NULL)");
dropTable("test_day_transform_timestamp");
}
@Test
public void testMonthTransformDate()
{
assertUpdate("CREATE TABLE test_month_transform_date (d DATE, b BIGINT) WITH (partitioning = ARRAY['month(d)'])");
@Language("SQL") String values = "VALUES " +
"(NULL, 101)," +
"(DATE '1969-11-13', 1)," +
"(DATE '1969-12-01', 2)," +
"(DATE '1969-12-02', 3)," +
"(DATE '1969-12-31', 4)," +
"(DATE '1970-01-01', 5), " +
"(DATE '1970-05-13', 6), " +
"(DATE '1970-12-31', 7), " +
"(DATE '2020-01-01', 8), " +
"(DATE '2020-06-16', 9), " +
"(DATE '2020-06-28', 10), " +
"(DATE '2020-06-06', 11), " +
"(DATE '2020-07-18', 12), " +
"(DATE '2020-07-28', 13), " +
"(DATE '2020-12-31', 14)";
assertUpdate("INSERT INTO test_month_transform_date " + values, 15);
assertQuery("SELECT * FROM test_month_transform_date", values);
assertQuery(
"SELECT partition.d_month, record_count, data.d.min, data.d.max, data.b.min, data.b.max FROM \"test_month_transform_date$partitions\"",
"VALUES " +
"(NULL, 1, NULL, NULL, 101, 101), " +
"(-2, 1, DATE '1969-11-13', DATE '1969-11-13', 1, 1), " +
"(-1, 3, DATE '1969-12-01', DATE '1969-12-31', 2, 4), " +
"(0, 1, DATE '1970-01-01', DATE '1970-01-01', 5, 5), " +
"(4, 1, DATE '1970-05-13', DATE '1970-05-13', 6, 6), " +
"(11, 1, DATE '1970-12-31', DATE '1970-12-31', 7, 7), " +
"(600, 1, DATE '2020-01-01', DATE '2020-01-01', 8, 8), " +
"(605, 3, DATE '2020-06-06', DATE '2020-06-28', 9, 11), " +
"(606, 2, DATE '2020-07-18', DATE '2020-07-28', 12, 13), " +
"(611, 1, DATE '2020-12-31', DATE '2020-12-31', 14, 14)");
// Exercise IcebergMetadata.applyFilter with non-empty Constraint.predicate, via non-pushdownable predicates
assertQuery(
"SELECT * FROM test_month_transform_date WHERE day_of_week(d) = 7 AND b % 7 = 3",
"VALUES (DATE '2020-06-28', 10)");
assertThat(query("SHOW STATS FOR test_month_transform_date"))
.skippingTypesCheck()
.matches("VALUES " +
" ('d', NULL, NULL, 0.0666667e0, NULL, '1969-11-13', '2020-12-31'), " +
" ('b', NULL, NULL, 0e0, NULL, '1', '101'), " +
" (NULL, NULL, NULL, NULL, 15e0, NULL, NULL)");
dropTable("test_month_transform_date");
}
@Test
public void testMonthTransformTimestamp()
{
assertUpdate("CREATE TABLE test_month_transform_timestamp (d TIMESTAMP(6), b BIGINT) WITH (partitioning = ARRAY['month(d)'])");
@Language("SQL") String values = "VALUES " +
"(NULL, 101)," +
"(TIMESTAMP '1969-11-15 15:13:12.876543', 8)," +
"(TIMESTAMP '1969-11-19 18:47:33.345678', 9)," +
"(TIMESTAMP '1969-12-01 00:00:00.000000', 10)," +
"(TIMESTAMP '1969-12-01 05:06:07.234567', 11)," +
"(TIMESTAMP '1970-01-01 12:03:08.456789', 12)," +
"(TIMESTAMP '2015-01-01 10:01:23.123456', 1)," +
"(TIMESTAMP '2015-01-01 11:10:02.987654', 2)," +
"(TIMESTAMP '2015-01-01 12:55:00.456789', 3)," +
"(TIMESTAMP '2015-05-15 13:05:01.234567', 4)," +
"(TIMESTAMP '2015-05-15 14:21:02.345678', 5)," +
"(TIMESTAMP '2020-02-21 15:11:11.876543', 6)," +
"(TIMESTAMP '2020-02-21 16:12:12.654321', 7)";
assertUpdate("INSERT INTO test_month_transform_timestamp " + values, 13);
assertQuery("SELECT * FROM test_month_transform_timestamp", values);
@Language("SQL") String expected = "VALUES " +
"(NULL, 1, NULL, NULL, 101, 101), " +
"(-2, 2, TIMESTAMP '1969-11-15 15:13:12.876543', TIMESTAMP '1969-11-19 18:47:33.345678', 8, 9), " +
"(-1, 2, TIMESTAMP '1969-12-01 00:00:00.000000', TIMESTAMP '1969-12-01 05:06:07.234567', 10, 11), " +
"(0, 1, TIMESTAMP '1970-01-01 12:03:08.456789', TIMESTAMP '1970-01-01 12:03:08.456789', 12, 12), " +
"(540, 3, TIMESTAMP '2015-01-01 10:01:23.123456', TIMESTAMP '2015-01-01 12:55:00.456789', 1, 3), " +
"(544, 2, TIMESTAMP '2015-05-15 13:05:01.234567', TIMESTAMP '2015-05-15 14:21:02.345678', 4, 5), " +
"(601, 2, TIMESTAMP '2020-02-21 15:11:11.876543', TIMESTAMP '2020-02-21 16:12:12.654321', 6, 7)";
String expectedTimestampStats = "'1969-11-15 15:13:12.876543', '2020-02-21 16:12:12.654321'";
if (format == ORC) {
expected = "VALUES " +
"(NULL, 1, NULL, NULL, 101, 101), " +
"(-2, 2, TIMESTAMP '1969-11-15 15:13:12.876000', TIMESTAMP '1969-11-19 18:47:33.345999', 8, 9), " +
"(-1, 2, TIMESTAMP '1969-12-01 00:00:00.000000', TIMESTAMP '1969-12-01 05:06:07.234999', 10, 11), " +
"(0, 1, TIMESTAMP '1970-01-01 12:03:08.456000', TIMESTAMP '1970-01-01 12:03:08.456999', 12, 12), " +
"(540, 3, TIMESTAMP '2015-01-01 10:01:23.123000', TIMESTAMP '2015-01-01 12:55:00.456999', 1, 3), " +
"(544, 2, TIMESTAMP '2015-05-15 13:05:01.234000', TIMESTAMP '2015-05-15 14:21:02.345999', 4, 5), " +
"(601, 2, TIMESTAMP '2020-02-21 15:11:11.876000', TIMESTAMP '2020-02-21 16:12:12.654999', 6, 7)";
expectedTimestampStats = "'1969-11-15 15:13:12.876000', '2020-02-21 16:12:12.654999'";
}
assertQuery("SELECT partition.d_month, record_count, data.d.min, data.d.max, data.b.min, data.b.max FROM \"test_month_transform_timestamp$partitions\"", expected);
// Exercise IcebergMetadata.applyFilter with non-empty Constraint.predicate, via non-pushdownable predicates
assertQuery(
"SELECT * FROM test_month_transform_timestamp WHERE day_of_week(d) = 1 AND b % 7 = 3",
"VALUES (TIMESTAMP '1969-12-01 00:00:00.000000', 10)");
assertThat(query("SHOW STATS FOR test_month_transform_timestamp"))
.skippingTypesCheck()
.matches("VALUES " +
" ('d', NULL, NULL, 0.0769231e0, NULL, " + expectedTimestampStats + "), " +
" ('b', NULL, NULL, 0e0, NULL, '1', '101'), " +
" (NULL, NULL, NULL, NULL, 13e0, NULL, NULL)");
dropTable("test_month_transform_timestamp");
}
@Test
public void testYearTransformDate()
{
assertUpdate("CREATE TABLE test_year_transform_date (d DATE, b BIGINT) WITH (partitioning = ARRAY['year(d)'])");
@Language("SQL") String values = "VALUES " +
"(NULL, 101)," +
"(DATE '1968-10-13', 1), " +
"(DATE '1969-01-01', 2), " +
"(DATE '1969-03-15', 3), " +
"(DATE '1970-01-01', 4), " +
"(DATE '1970-03-05', 5), " +
"(DATE '2015-01-01', 6), " +
"(DATE '2015-06-16', 7), " +
"(DATE '2015-07-28', 8), " +
"(DATE '2016-05-15', 9), " +
"(DATE '2016-06-06', 10), " +
"(DATE '2020-02-21', 11), " +
"(DATE '2020-11-10', 12)";
assertUpdate("INSERT INTO test_year_transform_date " + values, 13);
assertQuery("SELECT * FROM test_year_transform_date", values);
assertQuery(
"SELECT partition.d_year, record_count, data.d.min, data.d.max, data.b.min, data.b.max FROM \"test_year_transform_date$partitions\"",
"VALUES " +
"(NULL, 1, NULL, NULL, 101, 101), " +
"(-2, 1, DATE '1968-10-13', DATE '1968-10-13', 1, 1), " +
"(-1, 2, DATE '1969-01-01', DATE '1969-03-15', 2, 3), " +
"(0, 2, DATE '1970-01-01', DATE '1970-03-05', 4, 5), " +
"(45, 3, DATE '2015-01-01', DATE '2015-07-28', 6, 8), " +
"(46, 2, DATE '2016-05-15', DATE '2016-06-06', 9, 10), " +
"(50, 2, DATE '2020-02-21', DATE '2020-11-10', 11, 12)");
// Exercise IcebergMetadata.applyFilter with non-empty Constraint.predicate, via non-pushdownable predicates
assertQuery(
"SELECT * FROM test_year_transform_date WHERE day_of_week(d) = 1 AND b % 7 = 3",
"VALUES (DATE '2016-06-06', 10)");
assertThat(query("SHOW STATS FOR test_year_transform_date"))
.skippingTypesCheck()
.matches("VALUES " +
" ('d', NULL, NULL, 0.0769231e0, NULL, '1968-10-13', '2020-11-10'), " +
" ('b', NULL, NULL, 0e0, NULL, '1', '101'), " +
" (NULL, NULL, NULL, NULL, 13e0, NULL, NULL)");
dropTable("test_year_transform_date");
}
@Test
public void testYearTransformTimestamp()
{
assertUpdate("CREATE TABLE test_year_transform_timestamp (d TIMESTAMP(6), b BIGINT) WITH (partitioning = ARRAY['year(d)'])");
@Language("SQL") String values = "VALUES " +
"(NULL, 101)," +
"(TIMESTAMP '1968-03-15 15:13:12.876543', 1)," +
"(TIMESTAMP '1968-11-19 18:47:33.345678', 2)," +
"(TIMESTAMP '1969-01-01 00:00:00.000000', 3)," +
"(TIMESTAMP '1969-01-01 05:06:07.234567', 4)," +
"(TIMESTAMP '1970-01-18 12:03:08.456789', 5)," +
"(TIMESTAMP '1970-03-14 10:01:23.123456', 6)," +
"(TIMESTAMP '1970-08-19 11:10:02.987654', 7)," +
"(TIMESTAMP '1970-12-31 12:55:00.456789', 8)," +
"(TIMESTAMP '2015-05-15 13:05:01.234567', 9)," +
"(TIMESTAMP '2015-09-15 14:21:02.345678', 10)," +
"(TIMESTAMP '2020-02-21 15:11:11.876543', 11)," +
"(TIMESTAMP '2020-08-21 16:12:12.654321', 12)";
assertUpdate("INSERT INTO test_year_transform_timestamp " + values, 13);
assertQuery("SELECT * FROM test_year_transform_timestamp", values);
@Language("SQL") String expected = "VALUES " +
"(NULL, 1, NULL, NULL, 101, 101), " +
"(-2, 2, TIMESTAMP '1968-03-15 15:13:12.876543', TIMESTAMP '1968-11-19 18:47:33.345678', 1, 2), " +
"(-1, 2, TIMESTAMP '1969-01-01 00:00:00.000000', TIMESTAMP '1969-01-01 05:06:07.234567', 3, 4), " +
"(0, 4, TIMESTAMP '1970-01-18 12:03:08.456789', TIMESTAMP '1970-12-31 12:55:00.456789', 5, 8), " +
"(45, 2, TIMESTAMP '2015-05-15 13:05:01.234567', TIMESTAMP '2015-09-15 14:21:02.345678', 9, 10), " +
"(50, 2, TIMESTAMP '2020-02-21 15:11:11.876543', TIMESTAMP '2020-08-21 16:12:12.654321', 11, 12)";
String expectedTimestampStats = "'1968-03-15 15:13:12.876543', '2020-08-21 16:12:12.654321'";
if (format == ORC) {
expected = "VALUES " +
"(NULL, 1, NULL, NULL, 101, 101), " +
"(-2, 2, TIMESTAMP '1968-03-15 15:13:12.876000', TIMESTAMP '1968-11-19 18:47:33.345999', 1, 2), " +
"(-1, 2, TIMESTAMP '1969-01-01 00:00:00.000000', TIMESTAMP '1969-01-01 05:06:07.234999', 3, 4), " +
"(0, 4, TIMESTAMP '1970-01-18 12:03:08.456000', TIMESTAMP '1970-12-31 12:55:00.456999', 5, 8), " +
"(45, 2, TIMESTAMP '2015-05-15 13:05:01.234000', TIMESTAMP '2015-09-15 14:21:02.345999', 9, 10), " +
"(50, 2, TIMESTAMP '2020-02-21 15:11:11.876000', TIMESTAMP '2020-08-21 16:12:12.654999', 11, 12)";
expectedTimestampStats = "'1968-03-15 15:13:12.876000', '2020-08-21 16:12:12.654999'";
}
assertQuery("SELECT partition.d_year, record_count, data.d.min, data.d.max, data.b.min, data.b.max FROM \"test_year_transform_timestamp$partitions\"", expected);
// Exercise IcebergMetadata.applyFilter with non-empty Constraint.predicate, via non-pushdownable predicates
assertQuery(
"SELECT * FROM test_year_transform_timestamp WHERE day_of_week(d) = 2 AND b % 7 = 3",
"VALUES (TIMESTAMP '2015-09-15 14:21:02.345678', 10)");
assertThat(query("SHOW STATS FOR test_year_transform_timestamp"))
.skippingTypesCheck()
.matches("VALUES " +
" ('d', NULL, NULL, 0.0769231e0, NULL, " + expectedTimestampStats + "), " +
" ('b', NULL, NULL, 0e0, NULL, '1', '101'), " +
" (NULL, NULL, NULL, NULL, 13e0, NULL, NULL)");
dropTable("test_year_transform_timestamp");
}
@Test
public void testTruncateTextTransform()
{
assertUpdate("CREATE TABLE test_truncate_text_transform (d VARCHAR, b BIGINT) WITH (partitioning = ARRAY['truncate(d, 2)'])");
String select = "SELECT partition.d_trunc, record_count, data.d.min AS d_min, data.d.max AS d_max, data.b.min AS b_min, data.b.max AS b_max FROM \"test_truncate_text_transform$partitions\"";
assertUpdate("INSERT INTO test_truncate_text_transform VALUES" +
"(NULL, 101)," +
"('abcd', 1)," +
"('abxy', 2)," +
"('ab598', 3)," +
"('mommy', 4)," +
"('moscow', 5)," +
"('Greece', 6)," +
"('Grozny', 7)", 8);
assertQuery("SELECT partition.d_trunc FROM \"test_truncate_text_transform$partitions\"", "VALUES NULL, 'ab', 'mo', 'Gr'");
assertQuery("SELECT b FROM test_truncate_text_transform WHERE substring(d, 1, 2) = 'ab'", "VALUES 1, 2, 3");
assertQuery(select + " WHERE partition.d_trunc = 'ab'", "VALUES ('ab', 3, 'ab598', 'abxy', 1, 3)");
assertQuery("SELECT b FROM test_truncate_text_transform WHERE substring(d, 1, 2) = 'mo'", "VALUES 4, 5");
assertQuery(select + " WHERE partition.d_trunc = 'mo'", "VALUES ('mo', 2, 'mommy', 'moscow', 4, 5)");
assertQuery("SELECT b FROM test_truncate_text_transform WHERE substring(d, 1, 2) = 'Gr'", "VALUES 6, 7");
assertQuery(select + " WHERE partition.d_trunc = 'Gr'", "VALUES ('Gr', 2, 'Greece', 'Grozny', 6, 7)");
// Exercise IcebergMetadata.applyFilter with non-empty Constraint.predicate, via non-pushdownable predicates
assertQuery(
"SELECT * FROM test_truncate_text_transform WHERE length(d) = 4 AND b % 7 = 2",
"VALUES ('abxy', 2)");
assertThat(query("SHOW STATS FOR test_truncate_text_transform"))
.skippingTypesCheck()
.matches("VALUES " +
" ('d', " + (format == PARQUET ? "205e0" : "NULL") + ", NULL, 0.125e0, NULL, NULL, NULL), " +
" ('b', NULL, NULL, 0e0, NULL, '1', '101'), " +
" (NULL, NULL, NULL, NULL, 8e0, NULL, NULL)");
dropTable("test_truncate_text_transform");
}
@Test(dataProvider = "truncateNumberTypesProvider")
public void testTruncateIntegerTransform(String dataType)
{
String table = format("test_truncate_%s_transform", dataType);
assertUpdate(format("CREATE TABLE " + table + " (d %s, b BIGINT) WITH (partitioning = ARRAY['truncate(d, 10)'])", dataType));
String select = "SELECT partition.d_trunc, record_count, data.d.min AS d_min, data.d.max AS d_max, data.b.min AS b_min, data.b.max AS b_max FROM \"" + table + "$partitions\"";
assertUpdate("INSERT INTO " + table + " VALUES" +
"(NULL, 101)," +
"(0, 1)," +
"(1, 2)," +
"(5, 3)," +
"(9, 4)," +
"(10, 5)," +
"(11, 6)," +
"(120, 7)," +
"(121, 8)," +
"(123, 9)," +
"(-1, 10)," +
"(-5, 11)," +
"(-10, 12)," +
"(-11, 13)," +
"(-123, 14)," +
"(-130, 15)", 16);
assertQuery("SELECT partition.d_trunc FROM \"" + table + "$partitions\"", "VALUES NULL, 0, 10, 120, -10, -20, -130");
assertQuery("SELECT b FROM " + table + " WHERE d IN (0, 1, 5, 9)", "VALUES 1, 2, 3, 4");
assertQuery(select + " WHERE partition.d_trunc = 0", "VALUES (0, 4, 0, 9, 1, 4)");
assertQuery("SELECT b FROM " + table + " WHERE d IN (10, 11)", "VALUES 5, 6");
assertQuery(select + " WHERE partition.d_trunc = 10", "VALUES (10, 2, 10, 11, 5, 6)");
assertQuery("SELECT b FROM " + table + " WHERE d IN (120, 121, 123)", "VALUES 7, 8, 9");
assertQuery(select + " WHERE partition.d_trunc = 120", "VALUES (120, 3, 120, 123, 7, 9)");
assertQuery("SELECT b FROM " + table + " WHERE d IN (-1, -5, -10)", "VALUES 10, 11, 12");
assertQuery(select + " WHERE partition.d_trunc = -10", "VALUES (-10, 3, -10, -1, 10, 12)");
assertQuery("SELECT b FROM " + table + " WHERE d = -11", "VALUES 13");
assertQuery(select + " WHERE partition.d_trunc = -20", "VALUES (-20, 1, -11, -11, 13, 13)");
assertQuery("SELECT b FROM " + table + " WHERE d IN (-123, -130)", "VALUES 14, 15");
assertQuery(select + " WHERE partition.d_trunc = -130", "VALUES (-130, 2, -130, -123, 14, 15)");
// Exercise IcebergMetadata.applyFilter with non-empty Constraint.predicate, via non-pushdownable predicates
assertQuery(
"SELECT * FROM " + table + " WHERE d % 10 = -1 AND b % 7 = 3",
"VALUES (-1, 10)");
assertThat(query("SHOW STATS FOR " + table))
.skippingTypesCheck()
.matches("VALUES " +
" ('d', NULL, NULL, 0.0625e0, NULL, '-130', '123'), " +
" ('b', NULL, NULL, 0e0, NULL, '1', '101'), " +
" (NULL, NULL, NULL, NULL, 16e0, NULL, NULL)");
dropTable(table);
}
@DataProvider
public Object[][] truncateNumberTypesProvider()
{
return new Object[][] {
{"integer"},
{"bigint"},
};
}
@Test
public void testTruncateDecimalTransform()
{
assertUpdate("CREATE TABLE test_truncate_decimal_transform (d DECIMAL(9, 2), b BIGINT) WITH (partitioning = ARRAY['truncate(d, 10)'])");
String select = "SELECT partition.d_trunc, record_count, data.d.min AS d_min, data.d.max AS d_max, data.b.min AS b_min, data.b.max AS b_max FROM \"test_truncate_decimal_transform$partitions\"";
assertUpdate("INSERT INTO test_truncate_decimal_transform VALUES" +
"(NULL, 101)," +
"(12.34, 1)," +
"(12.30, 2)," +
"(12.29, 3)," +
"(0.05, 4)," +
"(-0.05, 5)", 6);
assertQuery("SELECT partition.d_trunc FROM \"test_truncate_decimal_transform$partitions\"", "VALUES NULL, 12.30, 12.20, 0.00, -0.10");
assertQuery("SELECT b FROM test_truncate_decimal_transform WHERE d IN (12.34, 12.30)", "VALUES 1, 2");
assertQuery(select + " WHERE partition.d_trunc = 12.30", "VALUES (12.30, 2, 12.30, 12.34, 1, 2)");
assertQuery("SELECT b FROM test_truncate_decimal_transform WHERE d = 12.29", "VALUES 3");
assertQuery(select + " WHERE partition.d_trunc = 12.20", "VALUES (12.20, 1, 12.29, 12.29, 3, 3)");
assertQuery("SELECT b FROM test_truncate_decimal_transform WHERE d = 0.05", "VALUES 4");
assertQuery(select + " WHERE partition.d_trunc = 0.00", "VALUES (0.00, 1, 0.05, 0.05, 4, 4)");
assertQuery("SELECT b FROM test_truncate_decimal_transform WHERE d = -0.05", "VALUES 5");
assertQuery(select + " WHERE partition.d_trunc = -0.10", "VALUES (-0.10, 1, -0.05, -0.05, 5, 5)");
// Exercise IcebergMetadata.applyFilter with non-empty Constraint.predicate, via non-pushdownable predicates
assertQuery(
"SELECT * FROM test_truncate_decimal_transform WHERE d * 100 % 10 = 9 AND b % 7 = 3",
"VALUES (12.29, 3)");
assertThat(query("SHOW STATS FOR test_truncate_decimal_transform"))
.skippingTypesCheck()
.matches("VALUES " +
" ('d', NULL, NULL, 0.166667e0, NULL, '-0.05', '12.34'), " +
" ('b', NULL, NULL, 0e0, NULL, '1', '101'), " +
" (NULL, NULL, NULL, NULL, 6e0, NULL, NULL)");
dropTable("test_truncate_decimal_transform");
}
@Test
public void testBucketTransform()
{
testBucketTransformForType("DATE", "DATE '2020-05-19'", "DATE '2020-08-19'", "DATE '2020-11-19'");
testBucketTransformForType("VARCHAR", "CAST('abcd' AS VARCHAR)", "CAST('mommy' AS VARCHAR)", "CAST('abxy' AS VARCHAR)");
testBucketTransformForType("BIGINT", "CAST(100000000 AS BIGINT)", "CAST(200000002 AS BIGINT)", "CAST(400000001 AS BIGINT)");
testBucketTransformForType(
"UUID",
"CAST('206caec7-68b9-4778-81b2-a12ece70c8b1' AS UUID)",
"CAST('906caec7-68b9-4778-81b2-a12ece70c8b1' AS UUID)",
"CAST('406caec7-68b9-4778-81b2-a12ece70c8b1' AS UUID)");
}
protected void testBucketTransformForType(
String type,
String value,
String greaterValueInSameBucket,
String valueInOtherBucket)
{
String tableName = format("test_bucket_transform%s", type.toLowerCase(Locale.ENGLISH));
assertUpdate(format("CREATE TABLE %s (d %s) WITH (partitioning = ARRAY['bucket(d, 2)'])", tableName, type));
assertUpdate(format("INSERT INTO %s VALUES (NULL), (%s), (%s), (%s)", tableName, value, greaterValueInSameBucket, valueInOtherBucket), 4);
assertThat(query(format("SELECT * FROM %s", tableName))).matches(format("VALUES (NULL), (%s), (%s), (%s)", value, greaterValueInSameBucket, valueInOtherBucket));
String selectFromPartitions = format("SELECT partition.d_bucket, record_count, data.d.min AS d_min, data.d.max AS d_max FROM \"%s$partitions\"", tableName);
if (supportsIcebergFileStatistics(type)) {
assertQuery(selectFromPartitions + " WHERE partition.d_bucket = 0", format("VALUES(0, %d, %s, %s)", 2, value, greaterValueInSameBucket));
assertQuery(selectFromPartitions + " WHERE partition.d_bucket = 1", format("VALUES(1, %d, %s, %s)", 1, valueInOtherBucket, valueInOtherBucket));
}
else {
assertQuery(selectFromPartitions + " WHERE partition.d_bucket = 0", format("VALUES(0, %d, null, null)", 2));
assertQuery(selectFromPartitions + " WHERE partition.d_bucket = 1", format("VALUES(1, %d, null, null)", 1));
}
assertThat(query("SHOW STATS FOR " + tableName))
.skippingTypesCheck()
.projected(0, 2, 3, 4) // data size, min and max may vary between types
.matches("VALUES " +
" ('d', NULL, 0.25e0, NULL), " +
" (NULL, NULL, NULL, 4e0)");
dropTable(tableName);
}
@Test
public void testApplyFilterWithNonEmptyConstraintPredicate()
{
assertUpdate("CREATE TABLE test_apply_functional_constraint (d VARCHAR, b BIGINT) WITH (partitioning = ARRAY['bucket(d, 2)'])");
assertUpdate(
"INSERT INTO test_apply_functional_constraint VALUES" +
"('abcd', 1)," +
"('abxy', 2)," +
"('ab598', 3)," +
"('mommy', 4)," +
"('moscow', 5)," +
"('Greece', 6)," +
"('Grozny', 7)",
7);
assertQuery(
"SELECT * FROM test_apply_functional_constraint WHERE length(d) = 4 AND b % 7 = 2",
"VALUES ('abxy', 2)");
assertUpdate("DROP TABLE test_apply_functional_constraint");
}
@Test
public void testVoidTransform()
{
assertUpdate("CREATE TABLE test_void_transform (d VARCHAR, b BIGINT) WITH (partitioning = ARRAY['void(d)'])");
String values = "VALUES " +
"('abcd', 1)," +
"('abxy', 2)," +
"('ab598', 3)," +
"('mommy', 4)," +
"('Warsaw', 5)," +
"(NULL, 6)," +
"(NULL, 7)";
assertUpdate("INSERT INTO test_void_transform " + values, 7);
assertQuery("SELECT * FROM test_void_transform", values);
assertQuery("SELECT COUNT(*) FROM \"test_void_transform$partitions\"", "SELECT 1");
assertQuery(
"SELECT partition.d_null, record_count, file_count, data.d.min, data.d.max, data.d.null_count, data.d.nan_count, data.b.min, data.b.max, data.b.null_count, data.b.nan_count FROM \"test_void_transform$partitions\"",
"VALUES (NULL, 7, 1, 'Warsaw', 'mommy', 2, NULL, 1, 7, 0, NULL)");
assertQuery(
"SELECT d, b FROM test_void_transform WHERE d IS NOT NULL",
"VALUES " +
"('abcd', 1)," +
"('abxy', 2)," +
"('ab598', 3)," +
"('mommy', 4)," +
"('Warsaw', 5)");
assertQuery("SELECT b FROM test_void_transform WHERE d IS NULL", "VALUES 6, 7");
assertThat(query("SHOW STATS FOR test_void_transform"))
.skippingTypesCheck()
.matches("VALUES " +
" ('d', " + (format == PARQUET ? "76e0" : "NULL") + ", NULL, 0.2857142857142857, NULL, NULL, NULL), " +
" ('b', NULL, NULL, 0e0, NULL, '1', '7'), " +
" (NULL, NULL, NULL, NULL, 7e0, NULL, NULL)");
assertUpdate("DROP TABLE " + "test_void_transform");
}
@Test
public void testMetadataDeleteSimple()
{
assertUpdate("CREATE TABLE test_metadata_delete_simple (col1 BIGINT, col2 BIGINT) WITH (partitioning = ARRAY['col1'])");
assertUpdate("INSERT INTO test_metadata_delete_simple VALUES(1, 100), (1, 101), (1, 102), (2, 200), (2, 201), (3, 300)", 6);
assertQuery("SELECT sum(col2) FROM test_metadata_delete_simple", "SELECT 1004");
assertQuery("SELECT count(*) FROM \"test_metadata_delete_simple$partitions\"", "SELECT 3");
assertUpdate("DELETE FROM test_metadata_delete_simple WHERE col1 = 1", 3);
assertQuery("SELECT sum(col2) FROM test_metadata_delete_simple", "SELECT 701");
assertQuery("SELECT count(*) FROM \"test_metadata_delete_simple$partitions\"", "SELECT 2");
dropTable("test_metadata_delete_simple");
}
@Test
public void testMetadataDelete()
{
assertUpdate("CREATE TABLE test_metadata_delete (" +
" orderkey BIGINT," +
" linenumber INTEGER," +
" linestatus VARCHAR" +
") " +
"WITH (" +
" partitioning = ARRAY[ 'linenumber', 'linestatus' ]" +
")");
assertUpdate(
"" +
"INSERT INTO test_metadata_delete " +
"SELECT orderkey, linenumber, linestatus " +
"FROM tpch.tiny.lineitem",
"SELECT count(*) FROM lineitem");
assertQuery("SELECT COUNT(*) FROM \"test_metadata_delete$partitions\"", "SELECT 14");
assertUpdate("DELETE FROM test_metadata_delete WHERE linestatus = 'F' AND linenumber = 3", 5378);
assertQuery("SELECT * FROM test_metadata_delete", "SELECT orderkey, linenumber, linestatus FROM lineitem WHERE linestatus <> 'F' or linenumber <> 3");
assertQuery("SELECT count(*) FROM \"test_metadata_delete$partitions\"", "SELECT 13");
assertUpdate("DELETE FROM test_metadata_delete WHERE linestatus='O'", 30049);
assertQuery("SELECT count(*) FROM \"test_metadata_delete$partitions\"", "SELECT 6");
assertQuery("SELECT * FROM test_metadata_delete", "SELECT orderkey, linenumber, linestatus FROM lineitem WHERE linestatus <> 'O' AND linenumber <> 3");
dropTable("test_metadata_delete");
}
@Test
public void testInSet()
{
testInSet(31);
testInSet(35);
}
private void testInSet(int inCount)
{
String values = range(1, inCount + 1)
.mapToObj(n -> format("(%s, %s)", n, n + 10))
.collect(joining(", "));
String inList = range(1, inCount + 1)
.mapToObj(Integer::toString)
.collect(joining(", "));
assertUpdate("CREATE TABLE test_in_set (col1 INTEGER, col2 BIGINT)");
assertUpdate(format("INSERT INTO test_in_set VALUES %s", values), inCount);
// This proves that SELECTs with large IN phrases work correctly
computeActual(format("SELECT col1 FROM test_in_set WHERE col1 IN (%s)", inList));
dropTable("test_in_set");
}
@Test
public void testBasicTableStatistics()
{
String tableName = "test_basic_table_statistics";
assertUpdate(format("CREATE TABLE %s (col REAL)", tableName));
assertThat(query("SHOW STATS FOR " + tableName))
.skippingTypesCheck()
.matches("VALUES " +
" ('col', 0e0, 0e0, 1e0, NULL, NULL, NULL), " +
" (NULL, NULL, NULL, NULL, 0e0, NULL, NULL)");
assertUpdate("INSERT INTO " + tableName + " VALUES -10", 1);
assertUpdate("INSERT INTO " + tableName + " VALUES 100", 1);
assertThat(query("SHOW STATS FOR " + tableName))
.skippingTypesCheck()
.matches("VALUES " +
" ('col', NULL, NULL, 0e0, NULL, '-10.0', '100.0'), " +
" (NULL, NULL, NULL, NULL, 2e0, NULL, NULL)");
assertUpdate("INSERT INTO " + tableName + " VALUES 200", 1);
assertThat(query("SHOW STATS FOR " + tableName))
.skippingTypesCheck()
.matches("VALUES " +
" ('col', NULL, NULL, 0e0, NULL, '-10.0', '200.0'), " +
" (NULL, NULL, NULL, NULL, 3e0, NULL, NULL)");
dropTable(tableName);
}
@Test
public void testMultipleColumnTableStatistics()
{
String tableName = "test_multiple_table_statistics";
assertUpdate(format("CREATE TABLE %s (col1 REAL, col2 INTEGER, col3 DATE)", tableName));
assertUpdate("INSERT INTO " + tableName + " VALUES (-10, -1, DATE '2019-06-28')", 1);
assertUpdate("INSERT INTO " + tableName + " VALUES (100, 10, DATE '2020-01-01')", 1);
MaterializedResult result = computeActual("SHOW STATS FOR " + tableName);
MaterializedResult expectedStatistics =
resultBuilder(getSession(), VARCHAR, DOUBLE, DOUBLE, DOUBLE, DOUBLE, VARCHAR, VARCHAR)
.row("col1", null, null, 0.0, null, "-10.0", "100.0")
.row("col2", null, null, 0.0, null, "-1", "10")
.row("col3", null, null, 0.0, null, "2019-06-28", "2020-01-01")
.row(null, null, null, null, 2.0, null, null)
.build();
assertEquals(result, expectedStatistics);
assertUpdate("INSERT INTO " + tableName + " VALUES (200, 20, DATE '2020-06-28')", 1);
result = computeActual("SHOW STATS FOR " + tableName);
expectedStatistics =
resultBuilder(getSession(), VARCHAR, DOUBLE, DOUBLE, DOUBLE, DOUBLE, VARCHAR, VARCHAR)
.row("col1", null, null, 0.0, null, "-10.0", "200.0")
.row("col2", null, null, 0.0, null, "-1", "20")
.row("col3", null, null, 0.0, null, "2019-06-28", "2020-06-28")
.row(null, null, null, null, 3.0, null, null)
.build();
assertEquals(result, expectedStatistics);
assertUpdate("INSERT INTO " + tableName + " VALUES " + IntStream.rangeClosed(21, 25)
.mapToObj(i -> format("(200, %d, DATE '2020-07-%d')", i, i))
.collect(joining(", ")), 5);
assertUpdate("INSERT INTO " + tableName + " VALUES " + IntStream.rangeClosed(26, 30)
.mapToObj(i -> format("(NULL, %d, DATE '2020-06-%d')", i, i))
.collect(joining(", ")), 5);
result = computeActual("SHOW STATS FOR " + tableName);
expectedStatistics =
resultBuilder(getSession(), VARCHAR, DOUBLE, DOUBLE, DOUBLE, DOUBLE, VARCHAR, VARCHAR)
.row("col1", null, null, 5.0 / 13.0, null, "-10.0", "200.0")
.row("col2", null, null, 0.0, null, "-1", "30")
.row("col3", null, null, 0.0, null, "2019-06-28", "2020-07-25")
.row(null, null, null, null, 13.0, null, null)
.build();
assertEquals(result, expectedStatistics);
dropTable(tableName);
}
@Test
public void testPartitionedTableStatistics()
{
assertUpdate("CREATE TABLE iceberg.tpch.test_partitioned_table_statistics (col1 REAL, col2 BIGINT) WITH (partitioning = ARRAY['col2'])");
assertUpdate("INSERT INTO test_partitioned_table_statistics VALUES (-10, -1)", 1);
assertUpdate("INSERT INTO test_partitioned_table_statistics VALUES (100, 10)", 1);
MaterializedResult result = computeActual("SHOW STATS FOR iceberg.tpch.test_partitioned_table_statistics");
assertEquals(result.getRowCount(), 3);
MaterializedRow row0 = result.getMaterializedRows().get(0);
assertEquals(row0.getField(0), "col1");
assertEquals(row0.getField(3), 0.0);
assertEquals(row0.getField(5), "-10.0");
assertEquals(row0.getField(6), "100.0");
MaterializedRow row1 = result.getMaterializedRows().get(1);
assertEquals(row1.getField(0), "col2");
assertEquals(row1.getField(3), 0.0);
assertEquals(row1.getField(5), "-1");
assertEquals(row1.getField(6), "10");
MaterializedRow row2 = result.getMaterializedRows().get(2);
assertEquals(row2.getField(4), 2.0);
assertUpdate("INSERT INTO test_partitioned_table_statistics VALUES " + IntStream.rangeClosed(1, 5)
.mapToObj(i -> format("(%d, 10)", i + 100))
.collect(joining(", ")), 5);
assertUpdate("INSERT INTO test_partitioned_table_statistics VALUES " + IntStream.rangeClosed(6, 10)
.mapToObj(i -> "(NULL, 10)")
.collect(joining(", ")), 5);
result = computeActual("SHOW STATS FOR iceberg.tpch.test_partitioned_table_statistics");
assertEquals(result.getRowCount(), 3);
row0 = result.getMaterializedRows().get(0);
assertEquals(row0.getField(0), "col1");
assertEquals(row0.getField(3), 5.0 / 12.0);
assertEquals(row0.getField(5), "-10.0");
assertEquals(row0.getField(6), "105.0");
row1 = result.getMaterializedRows().get(1);
assertEquals(row1.getField(0), "col2");
assertEquals(row1.getField(3), 0.0);
assertEquals(row1.getField(5), "-1");
assertEquals(row1.getField(6), "10");
row2 = result.getMaterializedRows().get(2);
assertEquals(row2.getField(4), 12.0);
assertUpdate("INSERT INTO test_partitioned_table_statistics VALUES " + IntStream.rangeClosed(6, 10)
.mapToObj(i -> "(100, NULL)")
.collect(joining(", ")), 5);
result = computeActual("SHOW STATS FOR iceberg.tpch.test_partitioned_table_statistics");
row0 = result.getMaterializedRows().get(0);
assertEquals(row0.getField(0), "col1");
assertEquals(row0.getField(3), 5.0 / 17.0);
assertEquals(row0.getField(5), "-10.0");
assertEquals(row0.getField(6), "105.0");
row1 = result.getMaterializedRows().get(1);
assertEquals(row1.getField(0), "col2");
assertEquals(row1.getField(3), 5.0 / 17.0);
assertEquals(row1.getField(5), "-1");
assertEquals(row1.getField(6), "10");
row2 = result.getMaterializedRows().get(2);
assertEquals(row2.getField(4), 17.0);
dropTable("iceberg.tpch.test_partitioned_table_statistics");
}
@Test
public void testPredicatePushdown()
{
QualifiedObjectName tableName = new QualifiedObjectName("iceberg", "tpch", "test_predicate");
assertUpdate(format("CREATE TABLE %s (col1 BIGINT, col2 BIGINT, col3 BIGINT) WITH (partitioning = ARRAY['col2', 'col3'])", tableName));
assertUpdate(format("INSERT INTO %s VALUES (1, 10, 100)", tableName), 1L);
assertUpdate(format("INSERT INTO %s VALUES (2, 20, 200)", tableName), 1L);
assertQuery(format("SELECT * FROM %s WHERE col1 = 1", tableName), "VALUES (1, 10, 100)");
assertFilterPushdown(
tableName,
ImmutableMap.of("col1", singleValue(BIGINT, 1L)),
ImmutableMap.of(),
ImmutableMap.of("col1", singleValue(BIGINT, 1L)));
assertQuery(format("SELECT * FROM %s WHERE col2 = 10", tableName), "VALUES (1, 10, 100)");
assertFilterPushdown(
tableName,
ImmutableMap.of("col2", singleValue(BIGINT, 10L)),
ImmutableMap.of("col2", singleValue(BIGINT, 10L)),
ImmutableMap.of());
assertQuery(format("SELECT * FROM %s WHERE col1 = 1 AND col2 = 10", tableName), "VALUES (1, 10, 100)");
assertFilterPushdown(
tableName,
ImmutableMap.of("col1", singleValue(BIGINT, 1L), "col2", singleValue(BIGINT, 10L)),
ImmutableMap.of("col2", singleValue(BIGINT, 10L)),
ImmutableMap.of("col1", singleValue(BIGINT, 1L)));
// Assert pushdown for an IN predicate with value count above the default compaction threshold
List<Long> values = LongStream.range(1L, 1010L).boxed()
.filter(index -> index != 20L)
.collect(toImmutableList());
assertThat(values).hasSizeGreaterThan(ICEBERG_DOMAIN_COMPACTION_THRESHOLD);
String valuesString = join(",", values.stream().map(Object::toString).collect(toImmutableList()));
String inPredicate = "%s IN (" + valuesString + ")";
assertQuery(
format("SELECT * FROM %s WHERE %s AND %s", tableName, format(inPredicate, "col1"), format(inPredicate, "col2")),
"VALUES (1, 10, 100)");
assertFilterPushdown(
tableName,
ImmutableMap.of("col1", multipleValues(BIGINT, values), "col2", multipleValues(BIGINT, values)),
ImmutableMap.of("col2", multipleValues(BIGINT, values)),
// Unenforced predicate is simplified during split generation, but not reflected here
ImmutableMap.of("col1", multipleValues(BIGINT, values)));
dropTable(tableName.getObjectName());
}
@Test
public void testPredicatesWithStructuralTypes()
{
String tableName = "test_predicate_with_structural_types";
assertUpdate("CREATE TABLE " + tableName + " (id INT, array_t ARRAY(BIGINT), map_t MAP(BIGINT, BIGINT), struct_t ROW(f1 BIGINT, f2 BIGINT))");
assertUpdate("INSERT INTO " + tableName + " VALUES " +
"(1, ARRAY[1, 2, 3], MAP(ARRAY[1,3], ARRAY[2,4]), ROW(1, 2)), " +
"(11, ARRAY[11, 12, 13], MAP(ARRAY[11, 13], ARRAY[12, 14]), ROW(11, 12)), " +
"(11, ARRAY[111, 112, 113], MAP(ARRAY[111, 13], ARRAY[112, 114]), ROW(111, 112)), " +
"(21, ARRAY[21, 22, 23], MAP(ARRAY[21, 23], ARRAY[22, 24]), ROW(21, 22))",
4);
assertQuery("SELECT id FROM " + tableName + " WHERE array_t = ARRAY[1, 2, 3]", "VALUES 1");
assertQuery("SELECT id FROM " + tableName + " WHERE map_t = MAP(ARRAY[11, 13], ARRAY[12, 14])", "VALUES 11");
assertQuery("SELECT id FROM " + tableName + " WHERE struct_t = ROW(21, 22)", "VALUES 21");
assertQuery("SELECT struct_t.f1 FROM " + tableName + " WHERE id = 11 AND map_t = MAP(ARRAY[11, 13], ARRAY[12, 14])", "VALUES 11");
dropTable(tableName);
}
@Test(dataProviderClass = DataProviders.class, dataProvider = "trueFalse")
public void testPartitionsTableWithColumnNameConflict(boolean partitioned)
{
assertUpdate("DROP TABLE IF EXISTS test_partitions_with_conflict");
assertUpdate("CREATE TABLE test_partitions_with_conflict (" +
" p integer, " +
" row_count integer, " +
" record_count integer, " +
" file_count integer, " +
" total_size integer " +
") " +
(partitioned ? "WITH(partitioning = ARRAY['p'])" : ""));
assertUpdate("INSERT INTO test_partitions_with_conflict VALUES (11, 12, 13, 14, 15)", 1);
// sanity check
assertThat(query("SELECT * FROM test_partitions_with_conflict"))
.matches("VALUES (11, 12, 13, 14, 15)");
// test $partitions
assertThat(query("SELECT * FROM \"test_partitions_with_conflict$partitions\""))
.matches("SELECT " +
(partitioned ? "CAST(ROW(11) AS row(p integer)), " : "") +
"BIGINT '1', " +
"BIGINT '1', " +
// total_size is not exactly deterministic, so grab whatever value there is
"(SELECT total_size FROM \"test_partitions_with_conflict$partitions\"), " +
"CAST(" +
" ROW (" +
(partitioned ? "" : " ROW(11, 11, 0, NULL), ") +
" ROW(12, 12, 0, NULL), " +
" ROW(13, 13, 0, NULL), " +
" ROW(14, 14, 0, NULL), " +
" ROW(15, 15, 0, NULL) " +
" ) " +
" AS row(" +
(partitioned ? "" : " p row(min integer, max integer, null_count bigint, nan_count bigint), ") +
" row_count row(min integer, max integer, null_count bigint, nan_count bigint), " +
" record_count row(min integer, max integer, null_count bigint, nan_count bigint), " +
" file_count row(min integer, max integer, null_count bigint, nan_count bigint), " +
" total_size row(min integer, max integer, null_count bigint, nan_count bigint) " +
" )" +
")");
assertUpdate("DROP TABLE test_partitions_with_conflict");
}
private void assertFilterPushdown(
QualifiedObjectName tableName,
Map<String, Domain> filter,
Map<String, Domain> expectedEnforcedPredicate,
Map<String, Domain> expectedUnenforcedPredicate)
{
Metadata metadata = getQueryRunner().getMetadata();
newTransaction().execute(getSession(), session -> {
TableHandle table = metadata.getTableHandle(session, tableName)
.orElseThrow(() -> new TableNotFoundException(tableName.asSchemaTableName()));
Map<String, ColumnHandle> columns = metadata.getColumnHandles(session, table);
TupleDomain<ColumnHandle> domains = TupleDomain.withColumnDomains(
filter.entrySet().stream()
.collect(toImmutableMap(entry -> columns.get(entry.getKey()), Map.Entry::getValue)));
Optional<ConstraintApplicationResult<TableHandle>> result = metadata.applyFilter(session, table, new Constraint(domains));
assertTrue(result.isEmpty() == (expectedUnenforcedPredicate == null && expectedEnforcedPredicate == null));
if (result.isPresent()) {
IcebergTableHandle newTable = (IcebergTableHandle) result.get().getHandle().getConnectorHandle();
assertEquals(
newTable.getEnforcedPredicate(),
TupleDomain.withColumnDomains(expectedEnforcedPredicate.entrySet().stream()
.collect(toImmutableMap(entry -> columns.get(entry.getKey()), Map.Entry::getValue))));
assertEquals(
newTable.getUnenforcedPredicate(),
TupleDomain.withColumnDomains(expectedUnenforcedPredicate.entrySet().stream()
.collect(toImmutableMap(entry -> columns.get(entry.getKey()), Map.Entry::getValue))));
}
});
}
@Test
public void testCreateNestedPartitionedTable()
{
assertUpdate("CREATE TABLE test_nested_table_1 (" +
" bool BOOLEAN" +
", int INTEGER" +
", arr ARRAY(VARCHAR)" +
", big BIGINT" +
", rl REAL" +
", dbl DOUBLE" +
", mp MAP(INTEGER, VARCHAR)" +
", dec DECIMAL(5,2)" +
", vc VARCHAR" +
", vb VARBINARY" +
", ts TIMESTAMP(6)" +
", tstz TIMESTAMP(6) WITH TIME ZONE" +
", str ROW(id INTEGER , vc VARCHAR)" +
", dt DATE)" +
" WITH (partitioning = ARRAY['int'])");
assertUpdate(
"INSERT INTO test_nested_table_1 " +
" select true, 1, array['uno', 'dos', 'tres'], BIGINT '1', REAL '1.0', DOUBLE '1.0', map(array[1,2,3,4], array['ek','don','teen','char'])," +
" CAST(1.0 as DECIMAL(5,2))," +
" 'one', VARBINARY 'binary0/1values',\n" +
" TIMESTAMP '2021-07-24 02:43:57.348000'," +
" TIMESTAMP '2021-07-24 02:43:57.348000 UTC'," +
" (CAST(ROW(null, 'this is a random value') AS ROW(int, varchar))), " +
" DATE '2021-07-24'",
1);
assertEquals(computeActual("SELECT * from test_nested_table_1").getRowCount(), 1);
assertThat(query("SHOW STATS FOR test_nested_table_1"))
.skippingTypesCheck()
.matches("VALUES " +
" ('bool', NULL, NULL, 0e0, NULL, 'true', 'true'), " +
" ('int', NULL, NULL, 0e0, NULL, '1', '1'), " +
" ('arr', NULL, NULL, " + (format == ORC ? "0e0" : "NULL") + ", NULL, NULL, NULL), " +
" ('big', NULL, NULL, 0e0, NULL, '1', '1'), " +
" ('rl', NULL, NULL, 0e0, NULL, '1.0', '1.0'), " +
" ('dbl', NULL, NULL, 0e0, NULL, '1.0', '1.0'), " +
" ('mp', NULL, NULL, " + (format == ORC ? "0e0" : "NULL") + ", NULL, NULL, NULL), " +
" ('dec', NULL, NULL, 0e0, NULL, '1.0', '1.0'), " +
" ('vc', " + (format == PARQUET ? "43e0" : "NULL") + ", NULL, 0e0, NULL, NULL, NULL), " +
" ('vb', " + (format == PARQUET ? "55e0" : "NULL") + ", NULL, 0e0, NULL, NULL, NULL), " +
" ('ts', NULL, NULL, 0e0, NULL, '2021-07-24 02:43:57.348000', " + (format == ORC ? "'2021-07-24 02:43:57.348999'" : "'2021-07-24 02:43:57.348000'") + "), " +
" ('tstz', NULL, NULL, 0e0, NULL, '2021-07-24 02:43:57.348 UTC', '2021-07-24 02:43:57.348 UTC'), " +
" ('str', NULL, NULL, " + (format == ORC ? "0e0" : "NULL") + ", NULL, NULL, NULL), " +
" ('dt', NULL, NULL, 0e0, NULL, '2021-07-24', '2021-07-24'), " +
" (NULL, NULL, NULL, NULL, 1e0, NULL, NULL)");
dropTable("test_nested_table_1");
assertUpdate("" +
"CREATE TABLE test_nested_table_2 (" +
" int INTEGER" +
", arr ARRAY(ROW(id INTEGER, vc VARCHAR))" +
", big BIGINT" +
", rl REAL" +
", dbl DOUBLE" +
", mp MAP(INTEGER, ARRAY(VARCHAR))" +
", dec DECIMAL(5,2)" +
", str ROW(id INTEGER, vc VARCHAR, arr ARRAY(INTEGER))" +
", vc VARCHAR)" +
" WITH (partitioning = ARRAY['int'])");
assertUpdate(
"INSERT INTO test_nested_table_2 " +
" select 1, array[cast(row(1, null) as row(int, varchar)), cast(row(2, 'dos') as row(int, varchar))], BIGINT '1', REAL '1.0', DOUBLE '1.0', " +
"map(array[1,2], array[array['ek', 'one'], array['don', 'do', 'two']]), CAST(1.0 as DECIMAL(5,2)), " +
"CAST(ROW(1, 'this is a random value', null) AS ROW(int, varchar, array(int))), 'one'",
1);
assertEquals(computeActual("SELECT * from test_nested_table_2").getRowCount(), 1);
assertThat(query("SHOW STATS FOR test_nested_table_2"))
.skippingTypesCheck()
.matches("VALUES " +
" ('int', NULL, NULL, 0e0, NULL, '1', '1'), " +
" ('arr', NULL, NULL, " + (format == ORC ? "0e0" : "NULL") + ", NULL, NULL, NULL), " +
" ('big', NULL, NULL, 0e0, NULL, '1', '1'), " +
" ('rl', NULL, NULL, 0e0, NULL, '1.0', '1.0'), " +
" ('dbl', NULL, NULL, 0e0, NULL, '1.0', '1.0'), " +
" ('mp', NULL, NULL, " + (format == ORC ? "0e0" : "NULL") + ", NULL, NULL, NULL), " +
" ('dec', NULL, NULL, 0e0, NULL, '1.0', '1.0'), " +
" ('vc', " + (format == PARQUET ? "43e0" : "NULL") + ", NULL, 0e0, NULL, NULL, NULL), " +
" ('str', NULL, NULL, " + (format == ORC ? "0e0" : "NULL") + ", NULL, NULL, NULL), " +
" (NULL, NULL, NULL, NULL, 1e0, NULL, NULL)");
assertUpdate("CREATE TABLE test_nested_table_3 WITH (partitioning = ARRAY['int']) AS SELECT * FROM test_nested_table_2", 1);
assertEquals(computeActual("SELECT * FROM test_nested_table_3").getRowCount(), 1);
assertThat(query("SHOW STATS FOR test_nested_table_3"))
.matches("SHOW STATS FOR test_nested_table_2");
dropTable("test_nested_table_2");
dropTable("test_nested_table_3");
}
@Test
public void testSerializableReadIsolation()
{
assertUpdate("CREATE TABLE test_read_isolation (x int)");
assertUpdate("INSERT INTO test_read_isolation VALUES 123, 456", 2);
withTransaction(session -> {
assertQuery(session, "SELECT * FROM test_read_isolation", "VALUES 123, 456");
assertUpdate("INSERT INTO test_read_isolation VALUES 789", 1);
assertQuery("SELECT * FROM test_read_isolation", "VALUES 123, 456, 789");
assertQuery(session, "SELECT * FROM test_read_isolation", "VALUES 123, 456");
});
assertQuery("SELECT * FROM test_read_isolation", "VALUES 123, 456, 789");
dropTable("test_read_isolation");
}
private void withTransaction(Consumer<Session> consumer)
{
transaction(getQueryRunner().getTransactionManager(), getQueryRunner().getAccessControl())
.readCommitted()
.execute(getSession(), consumer);
}
private void dropTable(String table)
{
Session session = getSession();
assertUpdate(session, "DROP TABLE " + table);
assertFalse(getQueryRunner().tableExists(session, table));
}
@Test
public void testOptimizedMetadataQueries()
{
Session session = Session.builder(getSession())
.setSystemProperty("optimize_metadata_queries", "true")
.build();
assertUpdate("CREATE TABLE test_metadata_optimization (a BIGINT, b BIGINT, c BIGINT) WITH (PARTITIONING = ARRAY['b', 'c'])");
assertUpdate("INSERT INTO test_metadata_optimization VALUES (5, 6, 7), (8, 9, 10)", 2);
assertQuery(session, "SELECT DISTINCT b FROM test_metadata_optimization", "VALUES (6), (9)");
assertQuery(session, "SELECT DISTINCT b, c FROM test_metadata_optimization", "VALUES (6, 7), (9, 10)");
assertQuery(session, "SELECT DISTINCT b FROM test_metadata_optimization WHERE b < 7", "VALUES (6)");
assertQuery(session, "SELECT DISTINCT b FROM test_metadata_optimization WHERE c > 8", "VALUES (9)");
// Assert behavior after metadata delete
assertUpdate("DELETE FROM test_metadata_optimization WHERE b = 6", 1);
assertQuery(session, "SELECT DISTINCT b FROM test_metadata_optimization", "VALUES (9)");
// TODO: assert behavior after deleting the last row of a partition, once row-level deletes are supported.
// i.e. a query like 'DELETE FROM test_metadata_optimization WHERE b = 6 AND a = 5'
dropTable("test_metadata_optimization");
}
@Test
public void testFileSizeInManifest()
throws Exception
{
assertUpdate("CREATE TABLE test_file_size_in_manifest (" +
"a_bigint bigint, " +
"a_varchar varchar, " +
"a_long_decimal decimal(38,20), " +
"a_map map(varchar, integer))");
assertUpdate(
"INSERT INTO test_file_size_in_manifest VALUES " +
"(NULL, NULL, NULL, NULL), " +
"(42, 'some varchar value', DECIMAL '123456789123456789.123456789123456789', map(ARRAY['abc', 'def'], ARRAY[113, -237843832]))",
2);
MaterializedResult files = computeActual("SELECT file_path, record_count, file_size_in_bytes FROM \"test_file_size_in_manifest$files\"");
long totalRecordCount = 0;
for (MaterializedRow row : files.getMaterializedRows()) {
String path = (String) row.getField(0);
Long recordCount = (Long) row.getField(1);
Long fileSizeInBytes = (Long) row.getField(2);
totalRecordCount += recordCount;
assertThat(fileSizeInBytes).isEqualTo(Files.size(Paths.get(path)));
}
// Verify sum(record_count) to make sure we have all the files.
assertThat(totalRecordCount).isEqualTo(2);
}
@Test
public void testIncorrectIcebergFileSizes()
throws Exception
{
// Create a table with a single insert
assertUpdate("CREATE TABLE test_iceberg_file_size (x BIGINT)");
assertUpdate("INSERT INTO test_iceberg_file_size VALUES (123), (456), (758)", 3);
// Get manifest file
MaterializedResult result = computeActual("SELECT path FROM \"test_iceberg_file_size$manifests\"");
assertEquals(result.getRowCount(), 1);
String manifestFile = (String) result.getOnlyValue();
// Read manifest file
Schema schema;
GenericData.Record entry = null;
try (DataFileReader<GenericData.Record> dataFileReader = new DataFileReader<>(new File(manifestFile), new GenericDatumReader<>())) {
schema = dataFileReader.getSchema();
int recordCount = 0;
while (dataFileReader.hasNext()) {
entry = dataFileReader.next();
recordCount++;
}
assertEquals(recordCount, 1);
}
// Alter data file entry to store incorrect file size
GenericData.Record dataFile = (GenericData.Record) entry.get("data_file");
long alteredValue = 50L;
assertNotEquals((long) dataFile.get("file_size_in_bytes"), alteredValue);
dataFile.put("file_size_in_bytes", alteredValue);
// Replace the file through HDFS client. This is required for correct checksums.
HdfsEnvironment.HdfsContext context = new HdfsContext(getSession().toConnectorSession());
org.apache.hadoop.fs.Path manifestFilePath = new org.apache.hadoop.fs.Path(manifestFile);
FileSystem fs = HDFS_ENVIRONMENT.getFileSystem(context, manifestFilePath);
// Write altered metadata
try (OutputStream out = fs.create(manifestFilePath);
DataFileWriter<GenericData.Record> dataFileWriter = new DataFileWriter<>(new GenericDatumWriter<>(schema))) {
dataFileWriter.create(schema, out);
dataFileWriter.append(entry);
}
// Ignoring Iceberg provided file size makes the query succeed
Session session = Session.builder(getSession())
.setCatalogSessionProperty("iceberg", "use_file_size_from_metadata", "false")
.build();
assertQuery(session, "SELECT * FROM test_iceberg_file_size", "VALUES (123), (456), (758)");
// Using Iceberg provided file size fails the query
assertQueryFails("SELECT * FROM test_iceberg_file_size",
format == ORC
? format(".*Error opening Iceberg split.*\\QIncorrect file size (%s) for file (end of stream not reached)\\E.*", alteredValue)
: format("Error reading tail from .* with length %d", alteredValue));
dropTable("test_iceberg_file_size");
}
@Test
public void testSplitPruningForFilterOnPartitionColumn()
{
String tableName = "nation_partitioned_pruning";
assertUpdate("DROP TABLE IF EXISTS " + tableName);
// disable writes redistribution to have predictable number of files written per partition (one).
Session noRedistributeWrites = Session.builder(getSession())
.setSystemProperty("redistribute_writes", "false")
.build();
assertUpdate(noRedistributeWrites, "CREATE TABLE " + tableName + " WITH (partitioning = ARRAY['regionkey']) AS SELECT * FROM nation", 25);
// sanity check that table contains exactly 5 files
assertThat(query("SELECT count(*) FROM \"" + tableName + "$files\"")).matches("VALUES CAST(5 AS BIGINT)");
verifySplitCount("SELECT * FROM " + tableName, 5);
verifySplitCount("SELECT * FROM " + tableName + " WHERE regionkey = 3", 1);
verifySplitCount("SELECT * FROM " + tableName + " WHERE regionkey < 2", 2);
verifySplitCount("SELECT * FROM " + tableName + " WHERE regionkey < 0", 0);
verifySplitCount("SELECT * FROM " + tableName + " WHERE regionkey > 1 AND regionkey < 4", 2);
verifySplitCount("SELECT * FROM " + tableName + " WHERE regionkey % 5 = 3", 1);
assertUpdate("DROP TABLE " + tableName);
}
@Test
public void testAllAvailableTypes()
{
assertUpdate("CREATE TABLE test_all_types (" +
" a_boolean boolean, " +
" an_integer integer, " +
" a_bigint bigint, " +
" a_real real, " +
" a_double double, " +
" a_short_decimal decimal(5,2), " +
" a_long_decimal decimal(38,20), " +
" a_varchar varchar, " +
" a_varbinary varbinary, " +
" a_date date, " +
" a_time time(6), " +
" a_timestamp timestamp(6), " +
" a_timestamptz timestamp(6) with time zone, " +
" a_uuid uuid, " +
" a_row row(id integer , vc varchar), " +
" an_array array(varchar), " +
" a_map map(integer, varchar) " +
")");
String values = "VALUES (" +
"true, " +
"1, " +
"BIGINT '1', " +
"REAL '1.0', " +
"DOUBLE '1.0', " +
"CAST(1.0 AS decimal(5,2)), " +
"CAST(11.0 AS decimal(38,20)), " +
"VARCHAR 'onefsadfdsf', " +
"X'000102f0feff', " +
"DATE '2021-07-24'," +
"TIME '02:43:57.987654', " +
"TIMESTAMP '2021-07-24 03:43:57.987654'," +
"TIMESTAMP '2021-07-24 04:43:57.987654 UTC', " +
"UUID '20050910-1330-11e9-ffff-2a86e4085a59', " +
"CAST(ROW(42, 'this is a random value') AS ROW(id int, vc varchar)), " +
"ARRAY[VARCHAR 'uno', 'dos', 'tres'], " +
"map(ARRAY[1,2], ARRAY['ek', VARCHAR 'one'])) ";
String nullValues = nCopies(17, "NULL").stream()
.collect(joining(", ", "VALUES (", ")"));
assertUpdate("INSERT INTO test_all_types " + values, 1);
assertUpdate("INSERT INTO test_all_types " + nullValues, 1);
// SELECT
assertThat(query("SELECT * FROM test_all_types"))
.matches(values + " UNION ALL " + nullValues);
// SELECT with predicates
assertThat(query("SELECT * FROM test_all_types WHERE " +
" a_boolean = true " +
"AND an_integer = 1 " +
"AND a_bigint = BIGINT '1' " +
"AND a_real = REAL '1.0' " +
"AND a_double = DOUBLE '1.0' " +
"AND a_short_decimal = CAST(1.0 AS decimal(5,2)) " +
"AND a_long_decimal = CAST(11.0 AS decimal(38,20)) " +
"AND a_varchar = VARCHAR 'onefsadfdsf' " +
"AND a_varbinary = X'000102f0feff' " +
"AND a_date = DATE '2021-07-24' " +
"AND a_time = TIME '02:43:57.987654' " +
"AND a_timestamp = TIMESTAMP '2021-07-24 03:43:57.987654' " +
"AND a_timestamptz = TIMESTAMP '2021-07-24 04:43:57.987654 UTC' " +
"AND a_uuid = UUID '20050910-1330-11e9-ffff-2a86e4085a59' " +
"AND a_row = CAST(ROW(42, 'this is a random value') AS ROW(id int, vc varchar)) " +
"AND an_array = ARRAY[VARCHAR 'uno', 'dos', 'tres'] " +
"AND a_map = map(ARRAY[1,2], ARRAY['ek', VARCHAR 'one']) " +
""))
.matches(values);
assertThat(query("SELECT * FROM test_all_types WHERE " +
" a_boolean IS NULL " +
"AND an_integer IS NULL " +
"AND a_bigint IS NULL " +
"AND a_real IS NULL " +
"AND a_double IS NULL " +
"AND a_short_decimal IS NULL " +
"AND a_long_decimal IS NULL " +
"AND a_varchar IS NULL " +
"AND a_varbinary IS NULL " +
"AND a_date IS NULL " +
"AND a_time IS NULL " +
"AND a_timestamp IS NULL " +
"AND a_timestamptz IS NULL " +
"AND a_uuid IS NULL " +
"AND a_row IS NULL " +
"AND an_array IS NULL " +
"AND a_map IS NULL " +
""))
.skippingTypesCheck()
.matches(nullValues);
// SHOW STATS
assertThat(query("SHOW STATS FOR test_all_types"))
.skippingTypesCheck()
.matches("VALUES " +
" ('a_boolean', NULL, NULL, 0.5e0, NULL, 'true', 'true'), " +
" ('an_integer', NULL, NULL, 0.5e0, NULL, '1', '1'), " +
" ('a_bigint', NULL, NULL, 0.5e0, NULL, '1', '1'), " +
" ('a_real', NULL, NULL, 0.5e0, NULL, '1.0', '1.0'), " +
" ('a_double', NULL, NULL, 0.5e0, NULL, '1.0', '1.0'), " +
" ('a_short_decimal', NULL, NULL, 0.5e0, NULL, '1.0', '1.0'), " +
" ('a_long_decimal', NULL, NULL, 0.5e0, NULL, '11.0', '11.0'), " +
" ('a_varchar', " + (format == PARQUET ? "87e0" : "NULL") + ", NULL, 0.5e0, NULL, NULL, NULL), " +
" ('a_varbinary', " + (format == PARQUET ? "82e0" : "NULL") + ", NULL, 0.5e0, NULL, NULL, NULL), " +
" ('a_date', NULL, NULL, 0.5e0, NULL, '2021-07-24', '2021-07-24'), " +
" ('a_time', NULL, NULL, 0.5e0, NULL, NULL, NULL), " +
" ('a_timestamp', NULL, NULL, 0.5e0, NULL, " + (format == ORC ? "'2021-07-24 03:43:57.987000', '2021-07-24 03:43:57.987999'" : "'2021-07-24 03:43:57.987654', '2021-07-24 03:43:57.987654'") + "), " +
" ('a_timestamptz', NULL, NULL, 0.5e0, NULL, '2021-07-24 04:43:57.987 UTC', '2021-07-24 04:43:57.987 UTC'), " +
" ('a_uuid', NULL, NULL, 0.5e0, NULL, NULL, NULL), " +
" ('a_row', NULL, NULL, " + (format == ORC ? "0.5" : "NULL") + ", NULL, NULL, NULL), " +
" ('an_array', NULL, NULL, " + (format == ORC ? "0.5" : "NULL") + ", NULL, NULL, NULL), " +
" ('a_map', NULL, NULL, " + (format == ORC ? "0.5" : "NULL") + ", NULL, NULL, NULL), " +
" (NULL, NULL, NULL, NULL, 2e0, NULL, NULL)");
// $partitions
String schema = getSession().getSchema().orElseThrow();
assertThat(query("SELECT column_name FROM information_schema.columns WHERE table_schema = '" + schema + "' AND table_name = 'test_all_types$partitions' "))
.skippingTypesCheck()
.matches("VALUES 'record_count', 'file_count', 'total_size', 'data'");
assertThat(query("SELECT " +
" record_count," +
" file_count, " +
" data.a_boolean, " +
" data.an_integer, " +
" data.a_bigint, " +
" data.a_real, " +
" data.a_double, " +
" data.a_short_decimal, " +
" data.a_long_decimal, " +
" data.a_varchar, " +
" data.a_varbinary, " +
" data.a_date, " +
" data.a_time, " +
" data.a_timestamp, " +
" data.a_timestamptz, " +
" data.a_uuid " +
" FROM \"test_all_types$partitions\" "))
.matches(
"VALUES (" +
" BIGINT '2', " +
" BIGINT '2', " +
" CAST(ROW(true, true, 1, NULL) AS ROW(min boolean, max boolean, null_count bigint, nan_count bigint)), " +
" CAST(ROW(1, 1, 1, NULL) AS ROW(min integer, max integer, null_count bigint, nan_count bigint)), " +
" CAST(ROW(1, 1, 1, NULL) AS ROW(min bigint, max bigint, null_count bigint, nan_count bigint)), " +
" CAST(ROW(1, 1, 1, NULL) AS ROW(min real, max real, null_count bigint, nan_count bigint)), " +
" CAST(ROW(1, 1, 1, NULL) AS ROW(min double, max double, null_count bigint, nan_count bigint)), " +
" CAST(ROW(1, 1, 1, NULL) AS ROW(min decimal(5,2), max decimal(5,2), null_count bigint, nan_count bigint)), " +
" CAST(ROW(11, 11, 1, NULL) AS ROW(min decimal(38,20), max decimal(38,20), null_count bigint, nan_count bigint)), " +
" CAST(ROW('onefsadfdsf', 'onefsadfdsf', 1, NULL) AS ROW(min varchar, max varchar, null_count bigint, nan_count bigint)), " +
(format == ORC ?
" CAST(ROW(NULL, NULL, 1, NULL) AS ROW(min varbinary, max varbinary, null_count bigint, nan_count bigint)), " :
" CAST(ROW(X'000102f0feff', X'000102f0feff', 1, NULL) AS ROW(min varbinary, max varbinary, null_count bigint, nan_count bigint)), ") +
" CAST(ROW(DATE '2021-07-24', DATE '2021-07-24', 1, NULL) AS ROW(min date, max date, null_count bigint, nan_count bigint)), " +
" CAST(ROW(TIME '02:43:57.987654', TIME '02:43:57.987654', 1, NULL) AS ROW(min time(6), max time(6), null_count bigint, nan_count bigint)), " +
(format == ORC ?
" CAST(ROW(TIMESTAMP '2021-07-24 03:43:57.987000', TIMESTAMP '2021-07-24 03:43:57.987999', 1, NULL) AS ROW(min timestamp(6), max timestamp(6), null_count bigint, nan_count bigint)), " :
" CAST(ROW(TIMESTAMP '2021-07-24 03:43:57.987654', TIMESTAMP '2021-07-24 03:43:57.987654', 1, NULL) AS ROW(min timestamp(6), max timestamp(6), null_count bigint, nan_count bigint)), ") +
(format == ORC ?
" CAST(ROW(TIMESTAMP '2021-07-24 04:43:57.987000 UTC', TIMESTAMP '2021-07-24 04:43:57.987999 UTC', 1, NULL) AS ROW(min timestamp(6) with time zone, max timestamp(6) with time zone, null_count bigint, nan_count bigint)), " :
" CAST(ROW(TIMESTAMP '2021-07-24 04:43:57.987654 UTC', TIMESTAMP '2021-07-24 04:43:57.987654 UTC', 1, NULL) AS ROW(min timestamp(6) with time zone, max timestamp(6) with time zone, null_count bigint, nan_count bigint)), ") +
(format == ORC ?
" CAST(ROW(NULL, NULL, 1, NULL) AS ROW(min uuid, max uuid, null_count bigint, nan_count bigint)) " :
" CAST(ROW(UUID '20050910-1330-11e9-ffff-2a86e4085a59', UUID '20050910-1330-11e9-ffff-2a86e4085a59', 1, NULL) AS ROW(min uuid, max uuid, null_count bigint, nan_count bigint)) "
) +
")");
assertUpdate("DROP TABLE test_all_types");
}
@Test(timeOut = 25_000)
public void testLocalDynamicFilteringWithSelectiveBuildSideJoin()
{
// We need to prepare tables for this test. The test is required to use tables that are backed by at lest two files
Session session = Session.builder(getSession())
.setSystemProperty(TASK_WRITER_COUNT, "2")
.build();
getQueryRunner().execute(session, format("CREATE TABLE IF NOT EXISTS %s AS SELECT * FROM %s", "linetime_multiple_file_backed", "tpch.tiny.lineitem")).getMaterializedRows();
getQueryRunner().execute(session, format("CREATE TABLE IF NOT EXISTS %s AS SELECT * FROM %s", "orders_multiple_file_backed", "tpch.tiny.orders")).getMaterializedRows();
long fullTableScan = (Long) computeActual("SELECT count(*) FROM linetime_multiple_file_backed").getOnlyValue();
// Pick a value for totalprice where file level stats will not be able to filter out any data
// This assumes the totalprice ranges in every file have some overlap, otherwise this test will fail.
MaterializedRow range = getOnlyElement(computeActual("SELECT max(lower_bounds[4]), min(upper_bounds[4]) FROM \"orders_multiple_file_backed$files\"").getMaterializedRows());
double totalPrice = (Double) computeActual(format(
"SELECT totalprice FROM orders_multiple_file_backed WHERE totalprice > %s AND totalprice < %s LIMIT 1",
range.getField(0),
range.getField(1)))
.getOnlyValue();
session = Session.builder(getSession())
.setSystemProperty(JOIN_DISTRIBUTION_TYPE, BROADCAST.name())
.setCatalogSessionProperty(ICEBERG_CATALOG, "dynamic_filtering_wait_timeout", "1h")
.build();
ResultWithQueryId<MaterializedResult> result = getDistributedQueryRunner().executeWithQueryId(
session,
"SELECT * FROM linetime_multiple_file_backed JOIN orders_multiple_file_backed ON linetime_multiple_file_backed.orderkey = orders_multiple_file_backed.orderkey AND orders_multiple_file_backed.totalprice = " + totalPrice);
OperatorStats probeStats = searchScanFilterAndProjectOperatorStats(
result.getQueryId(),
new QualifiedObjectName(ICEBERG_CATALOG, "tpch", "linetime_multiple_file_backed"));
// Assert some lineitem rows were filtered out on file level
assertThat(probeStats.getInputPositions()).isLessThan(fullTableScan);
}
@Test(dataProvider = "repartitioningDataProvider")
public void testRepartitionDataOnCtas(Session session, String partitioning, int expectedFiles)
{
testRepartitionData(session, "tpch.tiny.orders", true, partitioning, expectedFiles);
}
@Test(dataProvider = "repartitioningDataProvider")
public void testRepartitionDataOnInsert(Session session, String partitioning, int expectedFiles)
{
testRepartitionData(session, "tpch.tiny.orders", false, partitioning, expectedFiles);
}
@DataProvider
public Object[][] repartitioningDataProvider()
{
Session defaultSession = getSession();
// For identity-only partitioning, Iceberg connector returns ConnectorTableLayout with partitionColumns set, but without partitioning.
// This is treated by engine as "preferred", but not mandatory partitioning, and gets ignored if stats suggest number of partitions
// written is low. Without partitioning, number of files created is nondeterministic, as a writer (worker node) may or may not receive data.
Session obeyConnectorPartitioning = Session.builder(defaultSession)
.setSystemProperty(PREFERRED_WRITE_PARTITIONING_MIN_NUMBER_OF_PARTITIONS, "1")
.build();
return new Object[][] {
// identity partitioning column
{obeyConnectorPartitioning, "'orderstatus'", 3},
// bucketing
{defaultSession, "'bucket(custkey, 13)'", 13},
// varchar-based
{defaultSession, "'truncate(comment, 1)'", 35},
// complex; would exceed 100 open writers limit in IcebergPageSink without write repartitioning
{defaultSession, "'bucket(custkey, 4)', 'truncate(comment, 1)'", 131},
// same column multiple times
{defaultSession, "'truncate(comment, 1)', 'orderstatus', 'bucket(comment, 2)'", 180},
};
}
@Test
public void testStatsBasedRepartitionDataOnCtas()
{
testStatsBasedRepartitionData(true);
}
@Test
public void testStatsBasedRepartitionDataOnInsert()
{
testStatsBasedRepartitionData(false);
}
private void testStatsBasedRepartitionData(boolean ctas)
{
Session sessionRepartitionSmall = Session.builder(getSession())
.setSystemProperty(PREFERRED_WRITE_PARTITIONING_MIN_NUMBER_OF_PARTITIONS, "2")
.build();
Session sessionRepartitionMany = Session.builder(getSession())
.setSystemProperty(PREFERRED_WRITE_PARTITIONING_MIN_NUMBER_OF_PARTITIONS, "5")
.setSystemProperty(SCALE_WRITERS, "false")
.build();
// Use DISTINCT to add data redistribution between source table and the writer. This makes it more likely that all writers get some data.
String sourceRelation = "(SELECT DISTINCT orderkey, custkey, orderstatus FROM tpch.tiny.orders)";
testRepartitionData(
sessionRepartitionSmall,
sourceRelation,
ctas,
"'orderstatus'",
3);
// Test uses relatively small table (60K rows). When engine doesn't redistribute data for writes,
// occasionally a worker node doesn't get any data and fewer files get created.
assertEventually(() -> {
testRepartitionData(
sessionRepartitionMany,
sourceRelation,
ctas,
"'orderstatus'",
9);
});
}
private void testRepartitionData(Session session, String sourceRelation, boolean ctas, String partitioning, int expectedFiles)
{
String tableName = "repartition" +
"_" + sourceRelation.replaceAll("[^a-zA-Z0-9]", "") +
(ctas ? "ctas" : "insert") +
"_" + partitioning.replaceAll("[^a-zA-Z0-9]", "") +
"_" + randomTableSuffix();
long rowCount = (long) computeScalar(session, "SELECT count(*) FROM " + sourceRelation);
if (ctas) {
assertUpdate(
session,
"CREATE TABLE " + tableName + " WITH (partitioning = ARRAY[" + partitioning + "]) " +
"AS SELECT * FROM " + sourceRelation,
rowCount);
}
else {
assertUpdate(
session,
"CREATE TABLE " + tableName + " WITH (partitioning = ARRAY[" + partitioning + "]) " +
"AS SELECT * FROM " + sourceRelation + " WITH NO DATA",
0);
// Use source table big enough so that there will be multiple pages being written.
assertUpdate(session, "INSERT INTO " + tableName + " SELECT * FROM " + sourceRelation, rowCount);
}
// verify written data
assertThat(query(session, "TABLE " + tableName))
.skippingTypesCheck()
.matches("SELECT * FROM " + sourceRelation);
// verify data files, i.e. repartitioning took place
assertThat(query(session, "SELECT count(*) FROM \"" + tableName + "$files\""))
.matches("VALUES BIGINT '" + expectedFiles + "'");
assertUpdate(session, "DROP TABLE " + tableName);
}
@Test(dataProvider = "testDataMappingSmokeTestDataProvider")
public void testSplitPruningForFilterOnNonPartitionColumn(DataMappingTestSetup testSetup)
{
if (testSetup.isUnsupportedType()) {
return;
}
try (TestTable table = new TestTable(getQueryRunner()::execute, "test_split_pruning_non_partitioned", "(row_id int, col " + testSetup.getTrinoTypeName() + ")")) {
String tableName = table.getName();
String sampleValue = testSetup.getSampleValueLiteral();
String highValue = testSetup.getHighValueLiteral();
// Insert separately to ensure two files with one value each
assertUpdate("INSERT INTO " + tableName + " VALUES (1, " + sampleValue + ")", 1);
assertUpdate("INSERT INTO " + tableName + " VALUES (2, " + highValue + ")", 1);
assertQuery("select count(*) from \"" + tableName + "$files\"", "VALUES 2");
int expectedSplitCount = supportsIcebergFileStatistics(testSetup.getTrinoTypeName()) ? 1 : 2;
verifySplitCount("SELECT row_id FROM " + tableName, 2);
verifySplitCount("SELECT row_id FROM " + tableName + " WHERE col = " + sampleValue, expectedSplitCount);
verifySplitCount("SELECT row_id FROM " + tableName + " WHERE col = " + highValue, expectedSplitCount);
// ORC max timestamp statistics are truncated to millisecond precision and then appended with 999 microseconds.
// Therefore, sampleValue and highValue are within the max timestamp & there will be 2 splits.
verifySplitCount("SELECT row_id FROM " + tableName + " WHERE col > " + sampleValue,
(format == ORC && testSetup.getTrinoTypeName().contains("timestamp") ? 2 : expectedSplitCount));
verifySplitCount("SELECT row_id FROM " + tableName + " WHERE col < " + highValue,
(format == ORC && testSetup.getTrinoTypeName().contains("timestamp") ? 2 : expectedSplitCount));
}
}
@Test
public void testGetIcebergTableProperties()
{
assertUpdate("CREATE TABLE test_iceberg_get_table_props (x BIGINT)");
assertThat(query("SELECT * FROM \"test_iceberg_get_table_props$properties\""))
.matches(format("VALUES (VARCHAR 'write.format.default', VARCHAR '%s')", format.name()));
dropTable("test_iceberg_get_table_props");
}
protected abstract boolean supportsIcebergFileStatistics(String typeName);
@Test(dataProvider = "testDataMappingSmokeTestDataProvider")
public void testSplitPruningFromDataFileStatistics(DataMappingTestSetup testSetup)
{
if (testSetup.isUnsupportedType()) {
return;
}
try (TestTable table = new TestTable(
getQueryRunner()::execute,
"test_split_pruning_data_file_statistics",
// Random double is needed to make sure rows are different. Otherwise compression may deduplicate rows, resulting in only one row group
"(col " + testSetup.getTrinoTypeName() + ", r double)")) {
String tableName = table.getName();
String values =
Stream.concat(
nCopies(100, testSetup.getSampleValueLiteral()).stream(),
nCopies(100, testSetup.getHighValueLiteral()).stream())
.map(value -> "(" + value + ", rand())")
.collect(Collectors.joining(", "));
assertUpdate(withSmallRowGroups(getSession()), "INSERT INTO " + tableName + " VALUES " + values, 200);
String query = "SELECT * FROM " + tableName + " WHERE col = " + testSetup.getSampleValueLiteral();
verifyPredicatePushdownDataRead(query, supportsRowGroupStatistics(testSetup.getTrinoTypeName()));
}
}
protected abstract Session withSmallRowGroups(Session session);
protected abstract boolean supportsRowGroupStatistics(String typeName);
private void verifySplitCount(String query, int expectedSplitCount)
{
ResultWithQueryId<MaterializedResult> selectAllPartitionsResult = getDistributedQueryRunner().executeWithQueryId(getSession(), query);
assertEqualsIgnoreOrder(selectAllPartitionsResult.getResult().getMaterializedRows(), computeActual(withoutPredicatePushdown(getSession()), query).getMaterializedRows());
verifySplitCount(selectAllPartitionsResult.getQueryId(), expectedSplitCount);
}
private void verifyPredicatePushdownDataRead(@Language("SQL") String query, boolean supportsPushdown)
{
ResultWithQueryId<MaterializedResult> resultWithPredicatePushdown = getDistributedQueryRunner().executeWithQueryId(getSession(), query);
ResultWithQueryId<MaterializedResult> resultWithoutPredicatePushdown = getDistributedQueryRunner().executeWithQueryId(
withoutPredicatePushdown(getSession()),
query);
DataSize withPushdownDataSize = getOperatorStats(resultWithPredicatePushdown.getQueryId()).getInputDataSize();
DataSize withoutPushdownDataSize = getOperatorStats(resultWithoutPredicatePushdown.getQueryId()).getInputDataSize();
if (supportsPushdown) {
assertThat(withPushdownDataSize).isLessThan(withoutPushdownDataSize);
}
else {
assertThat(withPushdownDataSize).isEqualTo(withoutPushdownDataSize);
}
}
private Session withoutPredicatePushdown(Session session)
{
return Session.builder(session)
.setSystemProperty("allow_pushdown_into_connectors", "false")
.build();
}
private void verifySplitCount(QueryId queryId, long expectedSplitCount)
{
checkArgument(expectedSplitCount >= 0);
OperatorStats operatorStats = getOperatorStats(queryId);
if (expectedSplitCount > 0) {
assertThat(operatorStats.getTotalDrivers()).isEqualTo(expectedSplitCount);
assertThat(operatorStats.getPhysicalInputPositions()).isGreaterThan(0);
}
else {
// expectedSplitCount == 0
assertThat(operatorStats.getTotalDrivers()).isEqualTo(1);
assertThat(operatorStats.getPhysicalInputPositions()).isEqualTo(0);
}
}
private OperatorStats getOperatorStats(QueryId queryId)
{
try {
return getDistributedQueryRunner().getCoordinator()
.getQueryManager()
.getFullQueryInfo(queryId)
.getQueryStats()
.getOperatorSummaries()
.stream()
.filter(summary -> summary.getOperatorType().startsWith("TableScan") || summary.getOperatorType().startsWith("Scan"))
.collect(onlyElement());
}
catch (NoSuchElementException e) {
throw new RuntimeException("Couldn't find operator summary, probably due to query statistic collection error", e);
}
}
@Override
protected TestTable createTableWithDefaultColumns()
{
throw new SkipException("Iceberg connector does not support column default values");
}
@Override
protected Optional<DataMappingTestSetup> filterDataMappingSmokeTestData(DataMappingTestSetup dataMappingTestSetup)
{
String typeName = dataMappingTestSetup.getTrinoTypeName();
if (typeName.equals("tinyint")
|| typeName.equals("smallint")
|| typeName.startsWith("char(")) {
// These types are not supported by Iceberg
return Optional.of(dataMappingTestSetup.asUnsupported());
}
// According to Iceberg specification all time and timestamp values are stored with microsecond precision.
if (typeName.equals("time") ||
typeName.equals("timestamp") ||
typeName.equals("timestamp(3) with time zone")) {
return Optional.of(dataMappingTestSetup.asUnsupported());
}
return Optional.of(dataMappingTestSetup);
}
@Override
protected Optional<DataMappingTestSetup> filterCaseSensitiveDataMappingTestData(DataMappingTestSetup dataMappingTestSetup)
{
String typeName = dataMappingTestSetup.getTrinoTypeName();
if (typeName.equals("char(1)")) {
return Optional.of(dataMappingTestSetup.asUnsupported());
}
return Optional.of(dataMappingTestSetup);
}
@Test
public void testAmbiguousColumnsWithDots()
{
assertThatThrownBy(() -> assertUpdate("CREATE TABLE ambiguous (\"a.cow\" BIGINT, a ROW(cow BIGINT))"))
.hasMessage("Invalid schema: multiple fields for name a.cow: 1 and 3");
assertUpdate("CREATE TABLE ambiguous (\"a.cow\" BIGINT, b ROW(cow BIGINT))");
assertThatThrownBy(() -> assertUpdate("ALTER TABLE ambiguous RENAME COLUMN b TO a"))
.hasMessage("Invalid schema: multiple fields for name a.cow: 1 and 3");
assertUpdate("DROP TABLE ambiguous");
assertUpdate("CREATE TABLE ambiguous (a ROW(cow BIGINT))");
assertThatThrownBy(() -> assertUpdate("ALTER TABLE ambiguous ADD COLUMN \"a.cow\" BIGINT"))
.hasMessage("Cannot add column with ambiguous name: a.cow, use addColumn(parent, name, type)");
assertUpdate("DROP TABLE ambiguous");
}
@Test
public void testSchemaEvolutionWithDereferenceProjections()
{
// Fields are identified uniquely based on unique id's. If a column is dropped and recreated with the same name it should not return dropped data.
assertUpdate("CREATE TABLE evolve_test (dummy BIGINT, a row(b BIGINT, c VARCHAR))");
assertUpdate("INSERT INTO evolve_test VALUES (1, ROW(1, 'abc'))", 1);
assertUpdate("ALTER TABLE evolve_test DROP COLUMN a");
assertUpdate("ALTER TABLE evolve_test ADD COLUMN a ROW(b VARCHAR, c BIGINT)");
assertQuery("SELECT a.b FROM evolve_test", "VALUES NULL");
assertUpdate("DROP TABLE evolve_test");
// Very changing subfield ordering does not revive dropped data
assertUpdate("CREATE TABLE evolve_test (dummy BIGINT, a ROW(b BIGINT, c VARCHAR), d BIGINT) with (partitioning = ARRAY['d'])");
assertUpdate("INSERT INTO evolve_test VALUES (1, ROW(2, 'abc'), 3)", 1);
assertUpdate("ALTER TABLE evolve_test DROP COLUMN a");
assertUpdate("ALTER TABLE evolve_test ADD COLUMN a ROW(c VARCHAR, b BIGINT)");
assertUpdate("INSERT INTO evolve_test VALUES (4, 5, ROW('def', 6))", 1);
assertQuery("SELECT a.b FROM evolve_test WHERE d = 3", "VALUES NULL");
assertQuery("SELECT a.b FROM evolve_test WHERE d = 5", "VALUES 6");
assertUpdate("DROP TABLE evolve_test");
}
@Test
public void testHighlyNestedData()
{
assertUpdate("CREATE TABLE nested_data (id INT, row_t ROW(f1 INT, f2 INT, row_t ROW (f1 INT, f2 INT, row_t ROW(f1 INT, f2 INT))))");
assertUpdate("INSERT INTO nested_data VALUES (1, ROW(2, 3, ROW(4, 5, ROW(6, 7)))), (11, ROW(12, 13, ROW(14, 15, ROW(16, 17))))", 2);
assertUpdate("INSERT INTO nested_data VALUES (21, ROW(22, 23, ROW(24, 25, ROW(26, 27))))", 1);
// Test select projected columns, with and without their parent column
assertQuery("SELECT id, row_t.row_t.row_t.f2 FROM nested_data", "VALUES (1, 7), (11, 17), (21, 27)");
assertQuery("SELECT id, row_t.row_t.row_t.f2, CAST(row_t AS JSON) FROM nested_data",
"VALUES (1, 7, '{\"f1\":2,\"f2\":3,\"row_t\":{\"f1\":4,\"f2\":5,\"row_t\":{\"f1\":6,\"f2\":7}}}'), " +
"(11, 17, '{\"f1\":12,\"f2\":13,\"row_t\":{\"f1\":14,\"f2\":15,\"row_t\":{\"f1\":16,\"f2\":17}}}'), " +
"(21, 27, '{\"f1\":22,\"f2\":23,\"row_t\":{\"f1\":24,\"f2\":25,\"row_t\":{\"f1\":26,\"f2\":27}}}')");
// Test predicates on immediate child column and deeper nested column
assertQuery("SELECT id, CAST(row_t.row_t.row_t AS JSON) FROM nested_data WHERE row_t.row_t.row_t.f2 = 27", "VALUES (21, '{\"f1\":26,\"f2\":27}')");
assertQuery("SELECT id, CAST(row_t.row_t.row_t AS JSON) FROM nested_data WHERE row_t.row_t.row_t.f2 > 20", "VALUES (21, '{\"f1\":26,\"f2\":27}')");
assertQuery("SELECT id, CAST(row_t AS JSON) FROM nested_data WHERE row_t.row_t.row_t.f2 = 27",
"VALUES (21, '{\"f1\":22,\"f2\":23,\"row_t\":{\"f1\":24,\"f2\":25,\"row_t\":{\"f1\":26,\"f2\":27}}}')");
assertQuery("SELECT id, CAST(row_t AS JSON) FROM nested_data WHERE row_t.row_t.row_t.f2 > 20",
"VALUES (21, '{\"f1\":22,\"f2\":23,\"row_t\":{\"f1\":24,\"f2\":25,\"row_t\":{\"f1\":26,\"f2\":27}}}')");
// Test predicates on parent columns
assertQuery("SELECT id, row_t.row_t.row_t.f1 FROM nested_data WHERE row_t.row_t.row_t = ROW(16, 17)", "VALUES (11, 16)");
assertQuery("SELECT id, row_t.row_t.row_t.f1 FROM nested_data WHERE row_t = ROW(22, 23, ROW(24, 25, ROW(26, 27)))", "VALUES (21, 26)");
assertUpdate("DROP TABLE IF EXISTS nested_data");
}
@Test
public void testProjectionPushdownAfterRename()
{
assertUpdate("CREATE TABLE projection_pushdown_after_rename (id INT, a ROW(b INT, c ROW (d INT)))");
assertUpdate("INSERT INTO projection_pushdown_after_rename VALUES (1, ROW(2, ROW(3))), (11, ROW(12, ROW(13)))", 2);
assertUpdate("INSERT INTO projection_pushdown_after_rename VALUES (21, ROW(22, ROW(23)))", 1);
String expected = "VALUES (11, JSON '{\"b\":12,\"c\":{\"d\":13}}', 13)";
assertQuery("SELECT id, CAST(a AS JSON), a.c.d FROM projection_pushdown_after_rename WHERE a.b = 12", expected);
assertUpdate("ALTER TABLE projection_pushdown_after_rename RENAME COLUMN a TO row_t");
assertQuery("SELECT id, CAST(row_t AS JSON), row_t.c.d FROM projection_pushdown_after_rename WHERE row_t.b = 12", expected);
assertUpdate("DROP TABLE IF EXISTS projection_pushdown_after_rename");
}
@Test
public void testProjectionWithCaseSensitiveField()
{
assertUpdate("CREATE TABLE projection_with_case_sensitive_field (id INT, a ROW(\"UPPER_CASE\" INT, \"lower_case\" INT, \"MiXeD_cAsE\" INT))");
assertUpdate("INSERT INTO projection_with_case_sensitive_field VALUES (1, ROW(2, 3, 4)), (5, ROW(6, 7, 8))", 2);
String expected = "VALUES (2, 3, 4), (6, 7, 8)";
assertQuery("SELECT a.UPPER_CASE, a.lower_case, a.MiXeD_cAsE FROM projection_with_case_sensitive_field", expected);
assertQuery("SELECT a.upper_case, a.lower_case, a.mixed_case FROM projection_with_case_sensitive_field", expected);
assertQuery("SELECT a.UPPER_CASE, a.LOWER_CASE, a.MIXED_CASE FROM projection_with_case_sensitive_field", expected);
assertUpdate("DROP TABLE IF EXISTS projection_with_case_sensitive_field");
}
@Test
public void testProjectionPushdownReadsLessData()
{
String largeVarchar = "ZZZ".repeat(1000);
assertUpdate("CREATE TABLE projection_pushdown_reads_less_data (id INT, a ROW(b VARCHAR, c INT))");
assertUpdate(
format("INSERT INTO projection_pushdown_reads_less_data VALUES (1, ROW('%s', 3)), (11, ROW('%1$s', 13)), (21, ROW('%1$s', 23)), (31, ROW('%1$s', 33))", largeVarchar),
4);
String selectQuery = "SELECT a.c FROM projection_pushdown_reads_less_data";
Set<Integer> expected = ImmutableSet.of(3, 13, 23, 33);
Session sessionWithoutPushdown = Session.builder(getSession())
.setCatalogSessionProperty(ICEBERG_CATALOG, "projection_pushdown_enabled", "false")
.build();
assertQueryStats(
getSession(),
selectQuery,
statsWithPushdown -> {
DataSize processedDataSizeWithPushdown = statsWithPushdown.getProcessedInputDataSize();
assertQueryStats(
sessionWithoutPushdown,
selectQuery,
statsWithoutPushdown -> assertThat(statsWithoutPushdown.getProcessedInputDataSize()).isGreaterThan(processedDataSizeWithPushdown),
results -> assertEquals(results.getOnlyColumnAsSet(), expected));
},
results -> assertEquals(results.getOnlyColumnAsSet(), expected));
assertUpdate("DROP TABLE IF EXISTS projection_pushdown_reads_less_data");
}
@Test
public void testProjectionPushdownOnPartitionedTables()
{
assertUpdate("CREATE TABLE table_with_partition_at_beginning (id BIGINT, root ROW(f1 BIGINT, f2 BIGINT)) WITH (partitioning = ARRAY['id'])");
assertUpdate("INSERT INTO table_with_partition_at_beginning VALUES (1, ROW(1, 2)), (1, ROW(2, 3)), (1, ROW(3, 4))", 3);
assertQuery("SELECT id, root.f2 FROM table_with_partition_at_beginning", "VALUES (1, 2), (1, 3), (1, 4)");
assertUpdate("DROP TABLE table_with_partition_at_beginning");
assertUpdate("CREATE TABLE table_with_partition_at_end (root ROW(f1 BIGINT, f2 BIGINT), id BIGINT) WITH (partitioning = ARRAY['id'])");
assertUpdate("INSERT INTO table_with_partition_at_end VALUES (ROW(1, 2), 1), (ROW(2, 3), 1), (ROW(3, 4), 1)", 3);
assertQuery("SELECT root.f2, id FROM table_with_partition_at_end", "VALUES (2, 1), (3, 1), (4, 1)");
assertUpdate("DROP TABLE table_with_partition_at_end");
}
@Test
public void testProjectionPushdownOnPartitionedTableWithComments()
{
assertUpdate("CREATE TABLE test_projection_pushdown_comments (id BIGINT COMMENT 'id', qid BIGINT COMMENT 'QID', root ROW(f1 BIGINT, f2 BIGINT) COMMENT 'root') WITH (partitioning = ARRAY['id'])");
assertUpdate("INSERT INTO test_projection_pushdown_comments VALUES (1, 1, ROW(1, 2)), (1, 2, ROW(2, 3)), (1, 3, ROW(3, 4))", 3);
assertQuery("SELECT id, root.f2 FROM test_projection_pushdown_comments", "VALUES (1, 2), (1, 3), (1, 4)");
// Query with predicates on both nested and top-level columns (with partition column)
assertQuery("SELECT id, root.f2 FROM test_projection_pushdown_comments WHERE id = 1 AND qid = 1 AND root.f1 = 1", "VALUES (1, 2)");
// Query with predicates on both nested and top-level columns (no partition column)
assertQuery("SELECT id, root.f2 FROM test_projection_pushdown_comments WHERE qid = 2 AND root.f1 = 2", "VALUES (1, 3)");
// Query with predicates on top-level columns only
assertQuery("SELECT id, root.f2 FROM test_projection_pushdown_comments WHERE id = 1 AND qid = 1", "VALUES (1, 2)");
// Query with predicates on nested columns only
assertQuery("SELECT id, root.f2 FROM test_projection_pushdown_comments WHERE root.f1 = 2", "VALUES (1, 3)");
assertUpdate("DROP TABLE IF EXISTS test_projection_pushdown_comments");
}
@Test(dataProvider = "tableFormatVersion")
public void testOptimize(int formatVersion)
throws Exception
{
String tableName = "test_optimize_" + randomTableSuffix();
assertUpdate("CREATE TABLE " + tableName + " (key integer, value varchar) WITH (format_version = " + formatVersion + ")");
// DistributedQueryRunner sets node-scheduler.include-coordinator by default, so include coordinator
int workerCount = getQueryRunner().getNodeCount();
// optimize an empty table
assertQuerySucceeds("ALTER TABLE " + tableName + " EXECUTE OPTIMIZE");
assertThat(getActiveFiles(tableName)).isEmpty();
assertUpdate("INSERT INTO " + tableName + " VALUES (11, 'eleven')", 1);
assertUpdate("INSERT INTO " + tableName + " VALUES (12, 'zwölf')", 1);
assertUpdate("INSERT INTO " + tableName + " VALUES (13, 'trzynaście')", 1);
assertUpdate("INSERT INTO " + tableName + " VALUES (14, 'quatorze')", 1);
assertUpdate("INSERT INTO " + tableName + " VALUES (15, 'пʼятнадцять')", 1);
List<String> initialFiles = getActiveFiles(tableName);
assertThat(initialFiles)
.hasSize(5)
// Verify we have sufficiently many test rows with respect to worker count.
.hasSizeGreaterThan(workerCount);
computeActual("ALTER TABLE " + tableName + " EXECUTE OPTIMIZE");
assertThat(query("SELECT sum(key), listagg(value, ' ') WITHIN GROUP (ORDER BY key) FROM " + tableName))
.matches("VALUES (BIGINT '65', VARCHAR 'eleven zwölf trzynaście quatorze пʼятнадцять')");
List<String> updatedFiles = getActiveFiles(tableName);
assertThat(updatedFiles)
.hasSizeBetween(1, workerCount)
.doesNotContainAnyElementsOf(initialFiles);
// No files should be removed (this is expire_snapshots's job, when it exists)
assertThat(getAllDataFilesFromTableDirectory(tableName))
.containsExactlyInAnyOrderElementsOf(concat(initialFiles, updatedFiles));
// optimize with low retention threshold, nothing should change
computeActual("ALTER TABLE " + tableName + " EXECUTE OPTIMIZE (file_size_threshold => '33B')");
assertThat(query("SELECT sum(key), listagg(value, ' ') WITHIN GROUP (ORDER BY key) FROM " + tableName))
.matches("VALUES (BIGINT '65', VARCHAR 'eleven zwölf trzynaście quatorze пʼятнадцять')");
assertThat(getActiveFiles(tableName)).isEqualTo(updatedFiles);
assertThat(getAllDataFilesFromTableDirectory(tableName))
.containsExactlyInAnyOrderElementsOf(concat(initialFiles, updatedFiles));
// optimize with delimited procedure name
assertQueryFails("ALTER TABLE " + tableName + " EXECUTE \"optimize\"", "Procedure optimize not registered for catalog iceberg");
assertUpdate("ALTER TABLE " + tableName + " EXECUTE \"OPTIMIZE\"");
// optimize with delimited parameter name (and procedure name)
assertUpdate("ALTER TABLE " + tableName + " EXECUTE \"OPTIMIZE\" (\"file_size_threshold\" => '33B')"); // TODO (https://github.com/trinodb/trino/issues/11326) this should fail
assertUpdate("ALTER TABLE " + tableName + " EXECUTE \"OPTIMIZE\" (\"FILE_SIZE_THRESHOLD\" => '33B')");
assertUpdate("DROP TABLE " + tableName);
}
@Test(dataProvider = "tableFormatVersion")
public void testOptimizeForPartitionedTable(int formatVersion)
throws IOException
{
// This test will have its own session to make sure partitioning is indeed forced and is not a result
// of session configuration
Session session = testSessionBuilder()
.setCatalog(getQueryRunner().getDefaultSession().getCatalog())
.setSchema(getQueryRunner().getDefaultSession().getSchema())
.setSystemProperty("use_preferred_write_partitioning", "true")
.setSystemProperty("preferred_write_partitioning_min_number_of_partitions", "100")
.build();
String tableName = "test_repartitiong_during_optimize_" + randomTableSuffix();
assertUpdate(session, "CREATE TABLE " + tableName + " (key varchar, value integer) WITH (format_version = " + formatVersion + ", partitioning = ARRAY['key'])");
// optimize an empty table
assertQuerySucceeds(session, "ALTER TABLE " + tableName + " EXECUTE OPTIMIZE");
assertUpdate(session, "INSERT INTO " + tableName + " VALUES ('one', 1)", 1);
assertUpdate(session, "INSERT INTO " + tableName + " VALUES ('one', 2)", 1);
assertUpdate(session, "INSERT INTO " + tableName + " VALUES ('one', 3)", 1);
assertUpdate(session, "INSERT INTO " + tableName + " VALUES ('one', 4)", 1);
assertUpdate(session, "INSERT INTO " + tableName + " VALUES ('one', 5)", 1);
assertUpdate(session, "INSERT INTO " + tableName + " VALUES ('one', 6)", 1);
assertUpdate(session, "INSERT INTO " + tableName + " VALUES ('one', 7)", 1);
assertUpdate(session, "INSERT INTO " + tableName + " VALUES ('two', 8)", 1);
assertUpdate(session, "INSERT INTO " + tableName + " VALUES ('two', 9)", 1);
assertUpdate(session, "INSERT INTO " + tableName + " VALUES ('three', 10)", 1);
List<String> initialFiles = getActiveFiles(tableName);
assertThat(initialFiles).hasSize(10);
computeActual(session, "ALTER TABLE " + tableName + " EXECUTE OPTIMIZE");
assertThat(query(session, "SELECT sum(value), listagg(key, ' ') WITHIN GROUP (ORDER BY key) FROM " + tableName))
.matches("VALUES (BIGINT '55', VARCHAR 'one one one one one one one three two two')");
List<String> updatedFiles = getActiveFiles(tableName);
// as we force repartitioning there should be only 3 partitions
assertThat(updatedFiles).hasSize(3);
assertThat(getAllDataFilesFromTableDirectory(tableName)).containsExactlyInAnyOrderElementsOf(concat(initialFiles, updatedFiles));
assertUpdate("DROP TABLE " + tableName);
}
@DataProvider
public Object[][] tableFormatVersion()
{
return IntStream.rangeClosed(IcebergConfig.FORMAT_VERSION_SUPPORT_MIN, IcebergConfig.FORMAT_VERSION_SUPPORT_MAX).boxed()
.collect(DataProviders.toDataProvider());
}
@Test
public void testOptimizeTableAfterDeleteWithFormatVersion2()
{
String tableName = "test_optimize_" + randomTableSuffix();
assertUpdate("CREATE TABLE " + tableName + " AS SELECT * FROM nation", 25);
List<String> initialFiles = getActiveFiles(tableName);
assertUpdate("DELETE FROM " + tableName + " WHERE nationkey = 7", 1);
// Verify that delete files exists
assertQuery(
"SELECT summary['total-delete-files'] FROM \"" + tableName + "$snapshots\" WHERE snapshot_id = " + getCurrentSnapshotId(tableName),
"VALUES '1'");
computeActual("ALTER TABLE " + tableName + " EXECUTE OPTIMIZE");
List<String> updatedFiles = getActiveFiles(tableName);
assertThat(updatedFiles)
.hasSize(1)
.isNotEqualTo(initialFiles);
assertThat(query("SELECT * FROM " + tableName))
.matches("SELECT * FROM nation WHERE nationkey != 7");
assertUpdate("DROP TABLE " + tableName);
}
private List<String> getActiveFiles(String tableName)
{
return computeActual(format("SELECT file_path FROM \"%s$files\"", tableName)).getOnlyColumn()
.map(String.class::cast)
.collect(toImmutableList());
}
private List<String> getAllDataFilesFromTableDirectory(String tableName)
throws IOException
{
String schema = getSession().getSchema().orElseThrow();
Path tableDataDir = getDistributedQueryRunner().getCoordinator().getBaseDataDir().resolve("iceberg_data").resolve(schema).resolve(tableName).resolve("data");
try (Stream<Path> walk = Files.walk(tableDataDir)) {
return walk
.filter(Files::isRegularFile)
.filter(path -> !path.getFileName().toString().matches("\\..*\\.crc"))
.map(Path::toString)
.collect(toImmutableList());
}
}
@Test
public void testOptimizeParameterValidation()
{
assertQueryFails(
"ALTER TABLE no_such_table_exists EXECUTE OPTIMIZE",
"\\Qline 1:1: Table 'iceberg.tpch.no_such_table_exists' does not exist");
assertQueryFails(
"ALTER TABLE nation EXECUTE OPTIMIZE (file_size_threshold => '33')",
"\\QUnable to set catalog 'iceberg' table procedure 'OPTIMIZE' property 'file_size_threshold' to ['33']: size is not a valid data size string: 33");
assertQueryFails(
"ALTER TABLE nation EXECUTE OPTIMIZE (file_size_threshold => '33s')",
"\\QUnable to set catalog 'iceberg' table procedure 'OPTIMIZE' property 'file_size_threshold' to ['33s']: Unknown unit: s");
}
@Test
public void testTargetMaxFileSize()
{
String tableName = "test_default_max_file_size" + randomTableSuffix();
@Language("SQL") String createTableSql = format("CREATE TABLE %s AS SELECT * FROM tpch.sf1.lineitem LIMIT 100000", tableName);
Session session = Session.builder(getSession())
.setSystemProperty("task_writer_count", "1")
.build();
assertUpdate(session, createTableSql, 100000);
List<String> initialFiles = getActiveFiles(tableName);
assertThat(initialFiles.size()).isLessThanOrEqualTo(3);
assertUpdate(format("DROP TABLE %s", tableName));
DataSize maxSize = DataSize.of(40, DataSize.Unit.KILOBYTE);
session = Session.builder(getSession())
.setSystemProperty("task_writer_count", "1")
.setCatalogSessionProperty("iceberg", "target_max_file_size", maxSize.toString())
.build();
assertUpdate(session, createTableSql, 100000);
assertThat(query(format("SELECT count(*) FROM %s", tableName))).matches("VALUES BIGINT '100000'");
List<String> updatedFiles = getActiveFiles(tableName);
assertThat(updatedFiles.size()).isGreaterThan(10);
computeActual(format("SELECT file_size_in_bytes FROM \"%s$files\"", tableName))
.getMaterializedRows()
// as target_max_file_size is set to quite low value it can happen that created files are bigger,
// so just to be safe we check if it is not much bigger
.forEach(row -> assertThat((Long) row.getField(0)).isBetween(1L, maxSize.toBytes() * 3));
}
@Test
public void testDroppingIcebergAndCreatingANewTableWithTheSameNameShouldBePossible()
{
assertUpdate("CREATE TABLE test_iceberg_recreate (a_int) AS VALUES (1)", 1);
assertThat(query("SELECT min(a_int) FROM test_iceberg_recreate")).matches("VALUES 1");
dropTable("test_iceberg_recreate");
assertUpdate("CREATE TABLE test_iceberg_recreate (a_varchar) AS VALUES ('Trino')", 1);
assertThat(query("SELECT min(a_varchar) FROM test_iceberg_recreate")).matches("VALUES CAST('Trino' AS varchar)");
dropTable("test_iceberg_recreate");
}
@Test
public void testPathHiddenColumn()
{
String tableName = "test_path_" + randomTableSuffix();
@Language("SQL") String createTable = "CREATE TABLE " + tableName + " " +
"WITH ( partitioning = ARRAY['zip'] ) AS " +
"SELECT * FROM (VALUES " +
"(0, 0), (3, 0), (6, 0), " +
"(1, 1), (4, 1), (7, 1), " +
"(2, 2), (5, 2) " +
" ) t(userid, zip)";
assertUpdate(createTable, 8);
MaterializedResult expectedColumns = resultBuilder(getSession(), VARCHAR, VARCHAR, VARCHAR, VARCHAR)
.row("userid", "integer", "", "")
.row("zip", "integer", "", "")
.build();
MaterializedResult actualColumns = computeActual(format("DESCRIBE %s", tableName));
// Describe output should not have the $path hidden column
assertEquals(actualColumns, expectedColumns);
assertThat(query("SELECT file_path FROM \"" + tableName + "$files\""))
.matches("SELECT DISTINCT \"$path\" as file_path FROM " + tableName);
String somePath = (String) computeScalar("SELECT \"$path\" FROM " + tableName + " WHERE userid = 2");
assertThat(query("SELECT userid FROM " + tableName + " WHERE \"$path\" = '" + somePath + "'"))
.matches("VALUES 2, 5");
assertThat(query("SELECT userid FROM " + tableName + " WHERE \"$path\" = '" + somePath + "' AND userid > 0"))
.matches("VALUES 2, 5");
assertUpdate("DROP TABLE " + tableName);
}
@Test
public void testExpireSnapshots()
throws Exception
{
String tableName = "test_expiring_snapshots_" + randomTableSuffix();
Session sessionWithShortRetentionUnlocked = prepareCleanUpSession();
assertUpdate("CREATE TABLE " + tableName + " (key varchar, value integer)");
assertUpdate("INSERT INTO " + tableName + " VALUES ('one', 1)", 1);
assertUpdate("INSERT INTO " + tableName + " VALUES ('two', 2)", 1);
assertThat(query("SELECT sum(value), listagg(key, ' ') WITHIN GROUP (ORDER BY key) FROM " + tableName))
.matches("VALUES (BIGINT '3', VARCHAR 'one two')");
List<Long> initialSnapshots = getSnapshotIds(tableName);
List<String> initialFiles = getAllMetadataFilesFromTableDirectoryForTable(tableName);
assertQuerySucceeds(sessionWithShortRetentionUnlocked, "ALTER TABLE " + tableName + " EXECUTE EXPIRE_SNAPSHOTS (retention_threshold => '0s')");
assertThat(query("SELECT sum(value), listagg(key, ' ') WITHIN GROUP (ORDER BY key) FROM " + tableName))
.matches("VALUES (BIGINT '3', VARCHAR 'one two')");
List<String> updatedFiles = getAllMetadataFilesFromTableDirectoryForTable(tableName);
List<Long> updatedSnapshots = getSnapshotIds(tableName);
assertThat(updatedFiles.size()).isEqualTo(initialFiles.size() - 1);
assertThat(updatedSnapshots.size()).isLessThan(initialSnapshots.size());
assertThat(updatedSnapshots.size()).isEqualTo(1);
assertThat(initialSnapshots).containsAll(updatedSnapshots);
}
@Test
public void testExpireSnapshotsPartitionedTable()
throws Exception
{
String tableName = "test_expiring_snapshots_partitioned_table" + randomTableSuffix();
Session sessionWithShortRetentionUnlocked = prepareCleanUpSession();
assertUpdate("CREATE TABLE " + tableName + " (col1 BIGINT, col2 BIGINT) WITH (partitioning = ARRAY['col1'])");
assertUpdate("INSERT INTO " + tableName + " VALUES(1, 100), (1, 101), (1, 102), (2, 200), (2, 201), (3, 300)", 6);
assertUpdate("DELETE FROM " + tableName + " WHERE col1 = 1", 3);
assertUpdate("INSERT INTO " + tableName + " VALUES(4, 400)", 1);
assertQuery("SELECT sum(col2) FROM " + tableName, "SELECT 1101");
List<String> initialDataFiles = getAllDataFilesFromTableDirectory(tableName);
List<Long> initialSnapshots = getSnapshotIds(tableName);
assertQuerySucceeds(sessionWithShortRetentionUnlocked, "ALTER TABLE " + tableName + " EXECUTE EXPIRE_SNAPSHOTS (retention_threshold => '0s')");
List<String> updatedDataFiles = getAllDataFilesFromTableDirectory(tableName);
List<Long> updatedSnapshots = getSnapshotIds(tableName);
assertQuery("SELECT sum(col2) FROM " + tableName, "SELECT 1101");
assertThat(updatedDataFiles.size()).isLessThan(initialDataFiles.size());
assertThat(updatedSnapshots.size()).isLessThan(initialSnapshots.size());
}
@Test
public void testExplainExpireSnapshotOutput()
{
String tableName = "test_expiring_snapshots_output" + randomTableSuffix();
assertUpdate("CREATE TABLE " + tableName + " (key varchar, value integer) WITH (partitioning = ARRAY['key'])");
assertUpdate("INSERT INTO " + tableName + " VALUES ('one', 1)", 1);
assertUpdate("INSERT INTO " + tableName + " VALUES ('two', 2)", 1);
assertExplain("EXPLAIN ALTER TABLE " + tableName + " EXECUTE EXPIRE_SNAPSHOTS (retention_threshold => '0s')",
"SimpleTableExecute\\[iceberg:schemaTableName:tpch.test_expiring_snapshots.*\\{retentionThreshold=0\\.00s}.*");
}
@Test
public void testExpireSnapshotsParameterValidation()
{
assertQueryFails(
"ALTER TABLE no_such_table_exists EXECUTE EXPIRE_SNAPSHOTS",
"\\Qline 1:1: Table 'iceberg.tpch.no_such_table_exists' does not exist");
assertQueryFails(
"ALTER TABLE nation EXECUTE EXPIRE_SNAPSHOTS (retention_threshold => '33')",
"\\QUnable to set catalog 'iceberg' table procedure 'EXPIRE_SNAPSHOTS' property 'retention_threshold' to ['33']: duration is not a valid data duration string: 33");
assertQueryFails(
"ALTER TABLE nation EXECUTE EXPIRE_SNAPSHOTS (retention_threshold => '33mb')",
"\\QUnable to set catalog 'iceberg' table procedure 'EXPIRE_SNAPSHOTS' property 'retention_threshold' to ['33mb']: Unknown time unit: mb");
assertQueryFails(
"ALTER TABLE nation EXECUTE EXPIRE_SNAPSHOTS (retention_threshold => '33s')",
"\\QRetention specified (33.00s) is shorter than the minimum retention configured in the system (7.00d). Minimum retention can be changed with iceberg.expire_snapshots.min-retention configuration property or iceberg.expire_snapshots_min_retention session property");
}
@Test
public void testRemoveOrphanFiles()
throws Exception
{
String tableName = "test_deleting_orphan_files_unnecessary_files" + randomTableSuffix();
Session sessionWithShortRetentionUnlocked = prepareCleanUpSession();
assertUpdate("CREATE TABLE " + tableName + " (key varchar, value integer)");
assertUpdate("INSERT INTO " + tableName + " VALUES ('one', 1)", 1);
Path orphanFile = Files.createFile(Path.of(getIcebergTableDataPath(tableName).toString(), "invalidData." + format));
List<String> initialDataFiles = getAllDataFilesFromTableDirectory(tableName);
assertQuerySucceeds(sessionWithShortRetentionUnlocked, "ALTER TABLE " + tableName + " EXECUTE REMOVE_ORPHAN_FILES (retention_threshold => '0s')");
List<String> updatedDataFiles = getAllDataFilesFromTableDirectory(tableName);
assertThat(updatedDataFiles.size()).isLessThan(initialDataFiles.size());
assertThat(updatedDataFiles).doesNotContain(orphanFile.toString());
}
@Test
public void testIfRemoveOrphanFilesCleansUnnecessaryDataFilesInPartitionedTable()
throws Exception
{
String tableName = "test_deleting_orphan_files_unnecessary_files" + randomTableSuffix();
Session sessionWithShortRetentionUnlocked = prepareCleanUpSession();
assertUpdate("CREATE TABLE " + tableName + " (key varchar, value integer) WITH (partitioning = ARRAY['key'])");
assertUpdate("INSERT INTO " + tableName + " VALUES ('one', 1)", 1);
assertUpdate("INSERT INTO " + tableName + " VALUES ('two', 2)", 1);
Path orphanFile = Files.createFile(Path.of(getIcebergTableDataPath(tableName) + "/key=one/", "invalidData." + format));
List<String> initialDataFiles = getAllDataFilesFromTableDirectory(tableName);
assertQuerySucceeds(sessionWithShortRetentionUnlocked, "ALTER TABLE " + tableName + " EXECUTE REMOVE_ORPHAN_FILES (retention_threshold => '0s')");
List<String> updatedDataFiles = getAllDataFilesFromTableDirectory(tableName);
assertThat(updatedDataFiles.size()).isLessThan(initialDataFiles.size());
assertThat(updatedDataFiles).doesNotContain(orphanFile.toString());
}
@Test
public void testIfRemoveOrphanFilesCleansUnnecessaryMetadataFilesInPartitionedTable()
throws Exception
{
String tableName = "test_deleting_orphan_files_unnecessary_files" + randomTableSuffix();
Session sessionWithShortRetentionUnlocked = prepareCleanUpSession();
assertUpdate("CREATE TABLE " + tableName + " (key varchar, value integer) WITH (partitioning = ARRAY['key'])");
assertUpdate("INSERT INTO " + tableName + " VALUES ('one', 1)", 1);
assertUpdate("INSERT INTO " + tableName + " VALUES ('two', 2)", 1);
Path orphanMetadataFile = Files.createFile(Path.of(getIcebergTableMetadataPath(tableName).toString(), "invalidData." + format));
List<String> initialMetadataFiles = getAllMetadataFilesFromTableDirectoryForTable(tableName);
assertQuerySucceeds(sessionWithShortRetentionUnlocked, "ALTER TABLE " + tableName + " EXECUTE REMOVE_ORPHAN_FILES (retention_threshold => '0s')");
List<String> updatedMetadataFiles = getAllMetadataFilesFromTableDirectoryForTable(tableName);
assertThat(updatedMetadataFiles.size()).isLessThan(initialMetadataFiles.size());
assertThat(updatedMetadataFiles).doesNotContain(orphanMetadataFile.toString());
}
@Test
public void testCleaningUpWithTableWithSpecifiedLocationWithSlashAtTheEnd()
throws IOException
{
testCleaningUpWithTableWithSpecifiedLocation("/");
}
@Test
public void testCleaningUpWithTableWithSpecifiedLocationWithoutSlashAtTheEnd()
throws IOException
{
testCleaningUpWithTableWithSpecifiedLocation("");
}
private void testCleaningUpWithTableWithSpecifiedLocation(String suffix)
throws IOException
{
File tempDir = getDistributedQueryRunner().getCoordinator().getBaseDataDir().toFile();
String tempDirPath = tempDir.toURI().toASCIIString() + randomTableSuffix() + suffix;
String tableName = "test_table_cleaning_up_with_location" + randomTableSuffix();
assertUpdate(format("CREATE TABLE %s (key varchar, value integer) WITH(location = '%s')", tableName, tempDirPath));
assertUpdate("INSERT INTO " + tableName + " VALUES ('one', 1)", 1);
assertUpdate("INSERT INTO " + tableName + " VALUES ('two', 2)", 1);
List<String> initialFiles = getAllMetadataFilesFromTableDirectory(tempDirPath);
List<Long> initialSnapshots = getSnapshotIds(tableName);
Session sessionWithShortRetentionUnlocked = prepareCleanUpSession();
assertQuerySucceeds(sessionWithShortRetentionUnlocked, "ALTER TABLE " + tableName + " EXECUTE EXPIRE_SNAPSHOTS (retention_threshold => '0s')");
assertQuerySucceeds(sessionWithShortRetentionUnlocked, "ALTER TABLE " + tableName + " EXECUTE REMOVE_ORPHAN_FILES (retention_threshold => '0s')");
List<String> updatedFiles = getAllMetadataFilesFromTableDirectory(tempDirPath);
List<Long> updatedSnapshots = getSnapshotIds(tableName);
assertThat(updatedFiles.size()).isEqualTo(initialFiles.size() - 1);
assertThat(updatedSnapshots.size()).isLessThan(initialSnapshots.size());
assertThat(updatedSnapshots.size()).isEqualTo(1);
assertThat(initialSnapshots).containsAll(updatedSnapshots);
}
@Test
public void testExplainRemoveOrphanFilesOutput()
{
String tableName = "test_remove_orphan_files_output" + randomTableSuffix();
assertUpdate("CREATE TABLE " + tableName + " (key varchar, value integer) WITH (partitioning = ARRAY['key'])");
assertUpdate("INSERT INTO " + tableName + " VALUES ('one', 1)", 1);
assertUpdate("INSERT INTO " + tableName + " VALUES ('two', 2)", 1);
assertExplain("EXPLAIN ALTER TABLE " + tableName + " EXECUTE REMOVE_ORPHAN_FILES (retention_threshold => '0s')",
"SimpleTableExecute\\[iceberg:schemaTableName:tpch.test_remove_orphan_files.*\\{retentionThreshold=0\\.00s}.*");
}
@Test
public void testRemoveOrphanFilesParameterValidation()
{
assertQueryFails(
"ALTER TABLE no_such_table_exists EXECUTE REMOVE_ORPHAN_FILES",
"\\Qline 1:1: Table 'iceberg.tpch.no_such_table_exists' does not exist");
assertQueryFails(
"ALTER TABLE nation EXECUTE REMOVE_ORPHAN_FILES (retention_threshold => '33')",
"\\QUnable to set catalog 'iceberg' table procedure 'REMOVE_ORPHAN_FILES' property 'retention_threshold' to ['33']: duration is not a valid data duration string: 33");
assertQueryFails(
"ALTER TABLE nation EXECUTE REMOVE_ORPHAN_FILES (retention_threshold => '33mb')",
"\\QUnable to set catalog 'iceberg' table procedure 'REMOVE_ORPHAN_FILES' property 'retention_threshold' to ['33mb']: Unknown time unit: mb");
assertQueryFails(
"ALTER TABLE nation EXECUTE REMOVE_ORPHAN_FILES (retention_threshold => '33s')",
"\\QRetention specified (33.00s) is shorter than the minimum retention configured in the system (7.00d). Minimum retention can be changed with iceberg.remove_orphan_files.min-retention configuration property or iceberg.remove_orphan_files_min_retention session property");
}
@Test
public void testIfDeletesReturnsNumberOfRemovedRows()
{
String tableName = "test_delete_returns_number_of_rows_" + randomTableSuffix();
assertUpdate("CREATE TABLE " + tableName + " (key varchar, value integer) WITH (partitioning = ARRAY['key'])");
assertUpdate("INSERT INTO " + tableName + " VALUES ('one', 1)", 1);
assertUpdate("INSERT INTO " + tableName + " VALUES ('one', 2)", 1);
assertUpdate("INSERT INTO " + tableName + " VALUES ('one', 3)", 1);
assertUpdate("INSERT INTO " + tableName + " VALUES ('two', 1)", 1);
assertUpdate("INSERT INTO " + tableName + " VALUES ('two', 2)", 1);
assertUpdate("DELETE FROM " + tableName + " WHERE key = 'one'", 3);
assertUpdate("DELETE FROM " + tableName + " WHERE key = 'one'"); // TODO change this when iceberg will guarantee to always return this (https://github.com/apache/iceberg/issues/4647)
assertUpdate("DELETE FROM " + tableName + " WHERE key = 'three'");
assertUpdate("DELETE FROM " + tableName + " WHERE key = 'two'", 2);
}
@Test
public void testUpdatingFileFormat()
{
String tableName = "test_updating_file_format_" + randomTableSuffix();
assertUpdate("CREATE TABLE " + tableName + " WITH (format = 'orc') AS SELECT * FROM nation WHERE nationkey < 10", "SELECT count(*) FROM nation WHERE nationkey < 10");
assertQuery("SELECT value FROM \"" + tableName + "$properties\" WHERE key = 'write.format.default'", "VALUES 'ORC'");
assertUpdate("ALTER TABLE " + tableName + " SET PROPERTIES format = 'parquet'");
assertQuery("SELECT value FROM \"" + tableName + "$properties\" WHERE key = 'write.format.default'", "VALUES 'PARQUET'");
assertUpdate("INSERT INTO " + tableName + " SELECT * FROM nation WHERE nationkey >= 10", "SELECT count(*) FROM nation WHERE nationkey >= 10");
assertQuery("SELECT * FROM " + tableName, "SELECT * FROM nation");
assertQuery("SELECT count(*) FROM \"" + tableName + "$files\" WHERE file_path LIKE '%.orc'", "VALUES 1");
assertQuery("SELECT count(*) FROM \"" + tableName + "$files\" WHERE file_path LIKE '%.parquet'", "VALUES 1");
assertUpdate("DROP TABLE " + tableName);
}
@Test
public void testUpdatingInvalidTableProperty()
{
String tableName = "test_updating_invalid_table_property_" + randomTableSuffix();
assertUpdate("CREATE TABLE " + tableName + " (a INT, b INT)");
assertThatThrownBy(() -> query("ALTER TABLE " + tableName + " SET PROPERTIES not_a_valid_table_property = 'a value'"))
.hasMessage("Catalog 'iceberg' table property 'not_a_valid_table_property' does not exist");
assertUpdate("DROP TABLE " + tableName);
}
@Test
public void testEmptyCreateTableAsSelect()
{
String tableName = "test_empty_ctas_" + randomTableSuffix();
assertUpdate("CREATE TABLE " + tableName + " AS SELECT * FROM nation WHERE false", 0);
List<Long> initialTableSnapshots = getSnapshotIds(tableName);
assertThat(initialTableSnapshots.size())
.withFailMessage("CTAS operations must create Iceberg snapshot independently whether the selection is empty or not")
.isEqualTo(1);
assertQueryReturnsEmptyResult("SELECT * FROM " + tableName);
assertUpdate("DROP TABLE " + tableName);
}
@Test
public void testEmptyInsert()
{
String tableName = "test_empty_insert_" + randomTableSuffix();
assertUpdate("CREATE TABLE " + tableName + " AS SELECT * FROM nation", "SELECT count(*) FROM nation");
List<Long> initialTableSnapshots = getSnapshotIds(tableName);
assertUpdate("INSERT INTO " + tableName + " SELECT * FROM nation WHERE false", 0);
List<Long> updatedTableSnapshots = getSnapshotIds(tableName);
assertThat(initialTableSnapshots)
.withFailMessage("INSERT operations that are not changing the state of the table must not cause the creation of a new Iceberg snapshot")
.hasSize(1)
.isEqualTo(updatedTableSnapshots);
assertUpdate("DROP TABLE " + tableName);
}
@Test
public void testEmptyUpdate()
{
String tableName = "test_empty_update_" + randomTableSuffix();
assertUpdate("CREATE TABLE " + tableName + " AS SELECT * FROM nation", "SELECT count(*) FROM nation");
List<Long> initialTableSnapshots = getSnapshotIds(tableName);
assertUpdate("UPDATE " + tableName + " SET comment = 'new comment' WHERE nationkey IS NULL", 0);
List<Long> updatedTableSnapshots = getSnapshotIds(tableName);
assertThat(initialTableSnapshots)
.withFailMessage("UPDATE operations that are not changing the state of the table must not cause the creation of a new Iceberg snapshot")
.hasSize(1)
.isEqualTo(updatedTableSnapshots);
assertUpdate("DROP TABLE " + tableName);
}
@Test
public void testEmptyDelete()
{
String tableName = "test_empty_delete_" + randomTableSuffix();
assertUpdate("CREATE TABLE " + tableName + " WITH (format = '" + format.name() + "') AS SELECT * FROM nation", "SELECT count(*) FROM nation");
List<Long> initialTableSnapshots = getSnapshotIds(tableName);
assertUpdate("DELETE FROM " + tableName + " WHERE nationkey IS NULL", 0);
List<Long> updatedTableSnapshots = getSnapshotIds(tableName);
assertThat(initialTableSnapshots)
.withFailMessage("DELETE operations that are not changing the state of the table must not cause the creation of a new Iceberg snapshot")
.hasSize(1)
.isEqualTo(updatedTableSnapshots);
assertUpdate("DROP TABLE " + tableName);
}
@Test
public void testModifyingOldSnapshotIsNotPossible()
{
String tableName = "test_modifying_old_snapshot_" + randomTableSuffix();
assertUpdate(format("CREATE TABLE %s (col int)", tableName));
assertUpdate(format("INSERT INTO %s VALUES 1,2,3", tableName), 3);
long oldSnapshotId = getCurrentSnapshotId(tableName);
assertUpdate(format("INSERT INTO %s VALUES 4,5,6", tableName), 3);
assertQuery(format("SELECT * FROM \"%s@%d\"", tableName, oldSnapshotId), "VALUES 1,2,3");
assertThatThrownBy(() -> query(format("INSERT INTO \"%s@%d\" VALUES 7,8,9", tableName, oldSnapshotId)))
.hasMessage("Modifying old snapshot is not supported in Iceberg.");
assertThatThrownBy(() -> query(format("DELETE FROM \"%s@%d\" WHERE col = 5", tableName, oldSnapshotId)))
.hasMessage("Modifying old snapshot is not supported in Iceberg.");
assertThatThrownBy(() -> query(format("UPDATE \"%s@%d\" SET col = 50 WHERE col = 5", tableName, oldSnapshotId)))
.hasMessage("Modifying old snapshot is not supported in Iceberg.");
assertThatThrownBy(() -> query(format("ALTER TABLE \"%s@%d\" EXECUTE OPTIMIZE", tableName, oldSnapshotId)))
.hasMessage("Modifying old snapshot is not supported in Iceberg.");
assertUpdate(format("INSERT INTO \"%s@%d\" VALUES 7,8,9", tableName, getCurrentSnapshotId(tableName)), 3);
assertUpdate(format("DELETE FROM \"%s@%d\" WHERE col = 9", tableName, getCurrentSnapshotId(tableName)), 1);
assertUpdate(format("UPDATE \"%s@%d\" set col = 50 WHERE col = 5", tableName, getCurrentSnapshotId(tableName)), 1);
assertQuerySucceeds(format("ALTER TABLE \"%s@%d\" EXECUTE OPTIMIZE", tableName, getCurrentSnapshotId(tableName)));
assertQuery(format("SELECT * FROM %s", tableName), "VALUES 1,2,3,4,50,6,7,8");
assertUpdate("DROP TABLE " + tableName);
}
private Session prepareCleanUpSession()
{
return Session.builder(getSession())
.setCatalogSessionProperty("iceberg", "expire_snapshots_min_retention", "0s")
.setCatalogSessionProperty("iceberg", "remove_orphan_files_min_retention", "0s")
.build();
}
private List<String> getAllMetadataFilesFromTableDirectoryForTable(String tableName)
throws IOException
{
String schema = getSession().getSchema().orElseThrow();
Path tableDataDir = getDistributedQueryRunner().getCoordinator().getBaseDataDir().resolve("iceberg_data").resolve(schema).resolve(tableName).resolve("metadata");
return listAllTableFilesInDirectory(tableDataDir);
}
private List<String> getAllMetadataFilesFromTableDirectory(String tableDataDir)
throws IOException
{
return listAllTableFilesInDirectory(Path.of(URI.create(tableDataDir).getPath()));
}
private List<String> listAllTableFilesInDirectory(Path tableDataPath)
throws IOException
{
try (Stream<Path> walk = Files.walk(tableDataPath)) {
return walk
.filter(Files::isRegularFile)
.filter(path -> !path.getFileName().toString().matches("\\..*\\.crc"))
.map(Path::toString)
.collect(toImmutableList());
}
}
private List<Long> getSnapshotIds(String tableName)
{
return getQueryRunner().execute(format("SELECT snapshot_id FROM \"%s$snapshots\"", tableName))
.getOnlyColumn()
.map(Long.class::cast)
.collect(toUnmodifiableList());
}
private long getCurrentSnapshotId(String tableName)
{
return (long) computeScalar("SELECT snapshot_id FROM \"" + tableName + "$snapshots\" ORDER BY committed_at DESC LIMIT 1");
}
private Path getIcebergTableDataPath(String tableName)
{
return getIcebergTablePath(tableName, "data");
}
private Path getIcebergTableMetadataPath(String tableName)
{
return getIcebergTablePath(tableName, "metadata");
}
private Path getIcebergTablePath(String tableName, String suffix)
{
String schema = getSession().getSchema().orElseThrow();
return getDistributedQueryRunner().getCoordinator().getBaseDataDir().resolve("iceberg_data").resolve(schema).resolve(tableName).resolve(suffix);
}
}
| Test Iceberg transforms with timestamp with time zone
This adds test for `day`, `month` and `year` partitioning transforms
over `tiemstamp with time zone`. They are similar to existing tests for
`timestamp` type.
| plugin/trino-iceberg/src/test/java/io/trino/plugin/iceberg/BaseIcebergConnectorTest.java | Test Iceberg transforms with timestamp with time zone | <ide><path>lugin/trino-iceberg/src/test/java/io/trino/plugin/iceberg/BaseIcebergConnectorTest.java
<ide> }
<ide>
<ide> @Test
<add> public void testDayTransformTimestampWithTimeZone()
<add> {
<add> assertUpdate("CREATE TABLE test_day_transform_timestamptz (d timestamp(6) with time zone, b integer) WITH (partitioning = ARRAY['day(d)'])");
<add>
<add> String values = "VALUES " +
<add> "(NULL, 101)," +
<add> "(TIMESTAMP '1969-12-25 15:13:12.876543 UTC', 8)," +
<add> "(TIMESTAMP '1969-12-30 18:47:33.345678 UTC', 9)," +
<add> "(TIMESTAMP '1969-12-31 00:00:00.000000 UTC', 10)," +
<add> "(TIMESTAMP '1969-12-31 05:06:07.234567 UTC', 11)," +
<add> "(TIMESTAMP '1970-01-01 12:03:08.456789 UTC', 12)," +
<add> "(TIMESTAMP '2015-01-01 10:01:23.123456 UTC', 1)," +
<add> "(TIMESTAMP '2015-01-01 11:10:02.987654 UTC', 2)," +
<add> "(TIMESTAMP '2015-01-01 12:55:00.456789 UTC', 3)," +
<add> "(TIMESTAMP '2015-05-15 13:05:01.234567 UTC', 4)," +
<add> "(TIMESTAMP '2015-05-15 14:21:02.345678 UTC', 5)," +
<add> "(TIMESTAMP '2020-02-21 15:11:11.876543 UTC', 6)," +
<add> "(TIMESTAMP '2020-02-21 16:12:12.654321 UTC', 7)";
<add> assertUpdate("INSERT INTO test_day_transform_timestamptz " + values, 13);
<add> assertThat(query("SELECT * FROM test_day_transform_timestamptz"))
<add> .matches(values);
<add>
<add> String expected = "VALUES " +
<add> "(NULL, BIGINT '1', NULL, NULL, 101, 101), " +
<add> "(DATE '1969-12-25', 1, TIMESTAMP '1969-12-25 15:13:12.876543 UTC', TIMESTAMP '1969-12-25 15:13:12.876543 UTC', 8, 8), " +
<add> "(DATE '1969-12-30', 1, TIMESTAMP '1969-12-30 18:47:33.345678 UTC', TIMESTAMP '1969-12-30 18:47:33.345678 UTC', 9, 9), " +
<add> "(DATE '1969-12-31', 2, TIMESTAMP '1969-12-31 00:00:00.000000 UTC', TIMESTAMP '1969-12-31 05:06:07.234567 UTC', 10, 11), " +
<add> "(DATE '1970-01-01', 1, TIMESTAMP '1970-01-01 12:03:08.456789 UTC', TIMESTAMP '1970-01-01 12:03:08.456789 UTC', 12, 12), " +
<add> "(DATE '2015-01-01', 3, TIMESTAMP '2015-01-01 10:01:23.123456 UTC', TIMESTAMP '2015-01-01 12:55:00.456789 UTC', 1, 3), " +
<add> "(DATE '2015-05-15', 2, TIMESTAMP '2015-05-15 13:05:01.234567 UTC', TIMESTAMP '2015-05-15 14:21:02.345678 UTC', 4, 5), " +
<add> "(DATE '2020-02-21', 2, TIMESTAMP '2020-02-21 15:11:11.876543 UTC', TIMESTAMP '2020-02-21 16:12:12.654321 UTC', 6, 7)";
<add> String expectedTimestampStats = "'1969-12-25 15:13:12.876 UTC', '2020-02-21 16:12:12.654 UTC'";
<add> if (format == ORC) {
<add> expected = "VALUES " +
<add> "(NULL, BIGINT '1', NULL, NULL, 101, 101), " +
<add> "(DATE '1969-12-25', 1, TIMESTAMP '1969-12-25 15:13:12.876000 UTC', TIMESTAMP '1969-12-25 15:13:12.876999 UTC', 8, 8), " +
<add> "(DATE '1969-12-30', 1, TIMESTAMP '1969-12-30 18:47:33.345000 UTC', TIMESTAMP '1969-12-30 18:47:33.345999 UTC', 9, 9), " +
<add> "(DATE '1969-12-31', 2, TIMESTAMP '1969-12-31 00:00:00.000000 UTC', TIMESTAMP '1969-12-31 05:06:07.234999 UTC', 10, 11), " +
<add> "(DATE '1970-01-01', 1, TIMESTAMP '1970-01-01 12:03:08.456000 UTC', TIMESTAMP '1970-01-01 12:03:08.456999 UTC', 12, 12), " +
<add> "(DATE '2015-01-01', 3, TIMESTAMP '2015-01-01 10:01:23.123000 UTC', TIMESTAMP '2015-01-01 12:55:00.456999 UTC', 1, 3), " +
<add> "(DATE '2015-05-15', 2, TIMESTAMP '2015-05-15 13:05:01.234000 UTC', TIMESTAMP '2015-05-15 14:21:02.345999 UTC', 4, 5), " +
<add> "(DATE '2020-02-21', 2, TIMESTAMP '2020-02-21 15:11:11.876000 UTC', TIMESTAMP '2020-02-21 16:12:12.654999 UTC', 6, 7)";
<add> }
<add>
<add> assertThat(query("SELECT partition.d_day, record_count, data.d.min, data.d.max, data.b.min, data.b.max FROM \"test_day_transform_timestamptz$partitions\""))
<add> .matches(expected);
<add>
<add> // Exercise IcebergMetadata.applyFilter with non-empty Constraint.predicate, via non-pushdownable predicates
<add> assertThat(query("SELECT * FROM test_day_transform_timestamptz WHERE day_of_week(d) = 3 AND b % 7 = 3"))
<add> .matches("VALUES (TIMESTAMP '1969-12-31 00:00:00.000000 UTC', 10)");
<add>
<add> assertThat(query("SHOW STATS FOR test_day_transform_timestamptz"))
<add> .skippingTypesCheck()
<add> .matches("VALUES " +
<add> " ('d', NULL, NULL, 0.0769231e0, NULL, " + expectedTimestampStats + "), " +
<add> " ('b', NULL, NULL, 0e0, NULL, '1', '101'), " +
<add> " (NULL, NULL, NULL, NULL, 13e0, NULL, NULL)");
<add>
<add> assertUpdate("DROP TABLE test_day_transform_timestamptz");
<add> }
<add>
<add> @Test
<ide> public void testMonthTransformDate()
<ide> {
<ide> assertUpdate("CREATE TABLE test_month_transform_date (d DATE, b BIGINT) WITH (partitioning = ARRAY['month(d)'])");
<ide> }
<ide>
<ide> @Test
<add> public void testMonthTransformTimestampWithTimeZone()
<add> {
<add> assertUpdate("CREATE TABLE test_month_transform_timestamptz (d timestamp(6) with time zone, b integer) WITH (partitioning = ARRAY['month(d)'])");
<add>
<add> String values = "VALUES " +
<add> "(NULL, 101)," +
<add> "(TIMESTAMP '1969-11-15 15:13:12.876543 UTC', 8)," +
<add> "(TIMESTAMP '1969-11-19 18:47:33.345678 UTC', 9)," +
<add> "(TIMESTAMP '1969-12-01 00:00:00.000000 UTC', 10)," +
<add> "(TIMESTAMP '1969-12-01 05:06:07.234567 UTC', 11)," +
<add> "(TIMESTAMP '1970-01-01 12:03:08.456789 UTC', 12)," +
<add> "(TIMESTAMP '2015-01-01 10:01:23.123456 UTC', 1)," +
<add> "(TIMESTAMP '2015-01-01 11:10:02.987654 UTC', 2)," +
<add> "(TIMESTAMP '2015-01-01 12:55:00.456789 UTC', 3)," +
<add> "(TIMESTAMP '2015-05-15 13:05:01.234567 UTC', 4)," +
<add> "(TIMESTAMP '2015-05-15 14:21:02.345678 UTC', 5)," +
<add> "(TIMESTAMP '2020-02-21 15:11:11.876543 UTC', 6)," +
<add> "(TIMESTAMP '2020-02-21 16:12:12.654321 UTC', 7)";
<add> assertUpdate("INSERT INTO test_month_transform_timestamptz " + values, 13);
<add> assertThat(query("SELECT * FROM test_month_transform_timestamptz"))
<add> .matches(values);
<add>
<add> String expected = "VALUES " +
<add> "(NULL, BIGINT '1', NULL, NULL, 101, 101), " +
<add> "(-2, 2, TIMESTAMP '1969-11-15 15:13:12.876543 UTC', TIMESTAMP '1969-11-19 18:47:33.345678 UTC', 8, 9), " +
<add> "(-1, 2, TIMESTAMP '1969-12-01 00:00:00.000000 UTC', TIMESTAMP '1969-12-01 05:06:07.234567 UTC', 10, 11), " +
<add> "(0, 1, TIMESTAMP '1970-01-01 12:03:08.456789 UTC', TIMESTAMP '1970-01-01 12:03:08.456789 UTC', 12, 12), " +
<add> "(540, 3, TIMESTAMP '2015-01-01 10:01:23.123456 UTC', TIMESTAMP '2015-01-01 12:55:00.456789 UTC', 1, 3), " +
<add> "(544, 2, TIMESTAMP '2015-05-15 13:05:01.234567 UTC', TIMESTAMP '2015-05-15 14:21:02.345678 UTC', 4, 5), " +
<add> "(601, 2, TIMESTAMP '2020-02-21 15:11:11.876543 UTC', TIMESTAMP '2020-02-21 16:12:12.654321 UTC', 6, 7)";
<add> String expectedTimestampStats = "'1969-11-15 15:13:12.876 UTC', '2020-02-21 16:12:12.654 UTC'";
<add> if (format == ORC) {
<add> expected = "VALUES " +
<add> "(NULL, BIGINT '1', NULL, NULL, 101, 101), " +
<add> "(-2, 2, TIMESTAMP '1969-11-15 15:13:12.876000 UTC', TIMESTAMP '1969-11-19 18:47:33.345999 UTC', 8, 9), " +
<add> "(-1, 2, TIMESTAMP '1969-12-01 00:00:00.000000 UTC', TIMESTAMP '1969-12-01 05:06:07.234999 UTC', 10, 11), " +
<add> "(0, 1, TIMESTAMP '1970-01-01 12:03:08.456000 UTC', TIMESTAMP '1970-01-01 12:03:08.456999 UTC', 12, 12), " +
<add> "(540, 3, TIMESTAMP '2015-01-01 10:01:23.123000 UTC', TIMESTAMP '2015-01-01 12:55:00.456999 UTC', 1, 3), " +
<add> "(544, 2, TIMESTAMP '2015-05-15 13:05:01.234000 UTC', TIMESTAMP '2015-05-15 14:21:02.345999 UTC', 4, 5), " +
<add> "(601, 2, TIMESTAMP '2020-02-21 15:11:11.876000 UTC', TIMESTAMP '2020-02-21 16:12:12.654999 UTC', 6, 7)";
<add> }
<add>
<add> assertThat(query("SELECT partition.d_month, record_count, data.d.min, data.d.max, data.b.min, data.b.max FROM \"test_month_transform_timestamptz$partitions\""))
<add> .matches(expected);
<add>
<add> // Exercise IcebergMetadata.applyFilter with non-empty Constraint.predicate, via non-pushdownable predicates
<add> assertThat(query("SELECT * FROM test_month_transform_timestamptz WHERE day_of_week(d) = 1 AND b % 7 = 3"))
<add> .matches("VALUES (TIMESTAMP '1969-12-01 00:00:00.000000 UTC', 10)");
<add>
<add> assertThat(query("SHOW STATS FOR test_month_transform_timestamptz"))
<add> .skippingTypesCheck()
<add> .matches("VALUES " +
<add> " ('d', NULL, NULL, 0.0769231e0, NULL, " + expectedTimestampStats + "), " +
<add> " ('b', NULL, NULL, 0e0, NULL, '1', '101'), " +
<add> " (NULL, NULL, NULL, NULL, 13e0, NULL, NULL)");
<add>
<add> assertUpdate("DROP TABLE test_month_transform_timestamptz");
<add> }
<add>
<add> @Test
<ide> public void testYearTransformDate()
<ide> {
<ide> assertUpdate("CREATE TABLE test_year_transform_date (d DATE, b BIGINT) WITH (partitioning = ARRAY['year(d)'])");
<ide> " (NULL, NULL, NULL, NULL, 13e0, NULL, NULL)");
<ide>
<ide> dropTable("test_year_transform_timestamp");
<add> }
<add>
<add> @Test
<add> public void testYearTransformTimestampWithTimeZone()
<add> {
<add> assertUpdate("CREATE TABLE test_year_transform_timestamptz (d timestamp(6) with time zone, b integer) WITH (partitioning = ARRAY['year(d)'])");
<add>
<add> String values = "VALUES " +
<add> "(NULL, 101)," +
<add> "(TIMESTAMP '1968-03-15 15:13:12.876543 UTC', 1)," +
<add> "(TIMESTAMP '1968-11-19 18:47:33.345678 UTC', 2)," +
<add> "(TIMESTAMP '1969-01-01 00:00:00.000000 UTC', 3)," +
<add> "(TIMESTAMP '1969-01-01 05:06:07.234567 UTC', 4)," +
<add> "(TIMESTAMP '1970-01-18 12:03:08.456789 UTC', 5)," +
<add> "(TIMESTAMP '1970-03-14 10:01:23.123456 UTC', 6)," +
<add> "(TIMESTAMP '1970-08-19 11:10:02.987654 UTC', 7)," +
<add> "(TIMESTAMP '1970-12-31 12:55:00.456789 UTC', 8)," +
<add> "(TIMESTAMP '2015-05-15 13:05:01.234567 UTC', 9)," +
<add> "(TIMESTAMP '2015-09-15 14:21:02.345678 UTC', 10)," +
<add> "(TIMESTAMP '2020-02-21 15:11:11.876543 UTC', 11)," +
<add> "(TIMESTAMP '2020-08-21 16:12:12.654321 UTC', 12)";
<add> assertUpdate("INSERT INTO test_year_transform_timestamptz " + values, 13);
<add> assertThat(query("SELECT * FROM test_year_transform_timestamptz"))
<add> .matches(values);
<add>
<add> String expected = "VALUES " +
<add> "(NULL, BIGINT '1', NULL, NULL, 101, 101), " +
<add> "(-2, 2, TIMESTAMP '1968-03-15 15:13:12.876543 UTC', TIMESTAMP '1968-11-19 18:47:33.345678 UTC', 1, 2), " +
<add> "(-1, 2, TIMESTAMP '1969-01-01 00:00:00.000000 UTC', TIMESTAMP '1969-01-01 05:06:07.234567 UTC', 3, 4), " +
<add> "(0, 4, TIMESTAMP '1970-01-18 12:03:08.456789 UTC', TIMESTAMP '1970-12-31 12:55:00.456789 UTC', 5, 8), " +
<add> "(45, 2, TIMESTAMP '2015-05-15 13:05:01.234567 UTC', TIMESTAMP '2015-09-15 14:21:02.345678 UTC', 9, 10), " +
<add> "(50, 2, TIMESTAMP '2020-02-21 15:11:11.876543 UTC', TIMESTAMP '2020-08-21 16:12:12.654321 UTC', 11, 12)";
<add> String expectedTimestampStats = "'1968-03-15 15:13:12.876 UTC', '2020-08-21 16:12:12.654 UTC'";
<add> if (format == ORC) {
<add> expected = "VALUES " +
<add> "(NULL, BIGINT '1', NULL, NULL, 101, 101), " +
<add> "(-2, 2, TIMESTAMP '1968-03-15 15:13:12.876000 UTC', TIMESTAMP '1968-11-19 18:47:33.345999 UTC', 1, 2), " +
<add> "(-1, 2, TIMESTAMP '1969-01-01 00:00:00.000000 UTC', TIMESTAMP '1969-01-01 05:06:07.234999 UTC', 3, 4), " +
<add> "(0, 4, TIMESTAMP '1970-01-18 12:03:08.456000 UTC', TIMESTAMP '1970-12-31 12:55:00.456999 UTC', 5, 8), " +
<add> "(45, 2, TIMESTAMP '2015-05-15 13:05:01.234000 UTC', TIMESTAMP '2015-09-15 14:21:02.345999 UTC', 9, 10), " +
<add> "(50, 2, TIMESTAMP '2020-02-21 15:11:11.876000 UTC', TIMESTAMP '2020-08-21 16:12:12.654999 UTC', 11, 12)";
<add> }
<add>
<add> assertThat(query("SELECT partition.d_year, record_count, data.d.min, data.d.max, data.b.min, data.b.max FROM \"test_year_transform_timestamptz$partitions\""))
<add> .matches(expected);
<add>
<add> // Exercise IcebergMetadata.applyFilter with non-empty Constraint.predicate, via non-pushdownable predicates
<add> assertThat(query("SELECT * FROM test_year_transform_timestamptz WHERE day_of_week(d) = 2 AND b % 7 = 3"))
<add> .matches("VALUES (TIMESTAMP '2015-09-15 14:21:02.345678 UTC', 10)");
<add>
<add> assertThat(query("SHOW STATS FOR test_year_transform_timestamptz"))
<add> .skippingTypesCheck()
<add> .matches("VALUES " +
<add> " ('d', NULL, NULL, 0.0769231e0, NULL, " + expectedTimestampStats + "), " +
<add> " ('b', NULL, NULL, 0e0, NULL, '1', '101'), " +
<add> " (NULL, NULL, NULL, NULL, 13e0, NULL, NULL)");
<add>
<add> assertUpdate("DROP TABLE test_year_transform_timestamptz");
<ide> }
<ide>
<ide> @Test |
|
JavaScript | agpl-3.0 | 185fddd59841f0d689593581bf5ff92aaf83edb2 | 0 | euqip/emoncms,emoncms/emoncms,inverse/emoncms,jeremypoulter/emoncms,vinipletsch/emoncms,thaipowertech/emoncms,jesjimher/emoncms,JFPayeur/emoncms,saydulk/emoncms,euqip/emoncmsbase,inverse/emoncms,JFPayeur/emoncms,saydulk/emoncms,emoncms/emoncms,nmgeek/emoncms,euqip/emoncmsbase,chaveiro/emoncms,emoncms/emoncms,0ddie/emoncms,euqip/emoncms,vinipletsch/emoncms,inverse/emoncms,saydulk/emoncms,euqip/emoncmsbase,JFPayeur/emoncms,jeremypoulter/emoncms,jesjimher/emoncms,euqip/emoncms,inverse/emoncms,chaveiro/emoncms,0ddie/emoncms,vinipletsch/emoncms,jesjimher/emoncms,thaipowertech/emoncms,jesjimher/emoncms,sittipol123/emoncms,emoncms/emoncms,emoncms/emoncms,euqip/emoncms,nmgeek/emoncms,0ddie/emoncms,arjenhiemstra/emoncms,arjenhiemstra/emoncms,0ddie/emoncms,sittipol123/emoncms,JFPayeur/emoncms,nmgeek/emoncms,jeremypoulter/emoncms,jeremypoulter/emoncms,sittipol123/emoncms,chaveiro/emoncms,arjenhiemstra/emoncms,thaipowertech/emoncms,chaveiro/emoncms,thaipowertech/emoncms |
var input = {
'list':function()
{
var result = {};
$.ajax({ url: path+"input/list.json", dataType: 'json', async: false, success: function(data) {result = data;} });
return result;
},
'set':function(id, fields)
{
var result = {};
$.ajax({ url: path+"input/set.json", data: "inputid="+id+"&fields="+JSON.stringify(fields), async: false, success: function(data){} });
return result;
},
'remove':function(id)
{
$.ajax({ url: path+"input/delete.json", data: "inputid="+id, async: false, success: function(data){} });
},
// Process
'add_process':function(inputid,processid,arg,newfeedname)
{
var result = {};
$.ajax({ url: path+"input/process/add.json", data: "inputid="+inputid+"&processid="+processid+"&arg="+arg+"&newfeedname="+newfeedname, async: false, success: function(data){result = data;} });
return result;
},
'processlist':function(inputid)
{
var result = {};
$.ajax({ url: path+"input/process/list.json", data: "inputid="+inputid, async: false, success: function(data){result = data;} });
return result;
},
'delete_process':function(inputid,processid)
{
var result = {};
$.ajax({ url: path+"input/process/delete.json", data: "inputid="+inputid+"&processid="+processid, async: false, success: function(data){result = data;} });
return result;
},
'move_process':function(inputid,processid,moveby)
{
var result = {};
$.ajax({ url: path+"input/process/moveby.json", data: "inputid="+inputid+"&processid="+processid+"&moveby="+moveby, async: false, success: function(data){result = data;} });
return result;
},
'reset_processlist':function(inputid,processid,moveby)
{
var result = {};
$.ajax({ url: path+"input/process/reset.json", data: "inputid="+inputid, async: false, success: function(data){result = data;} });
return result;
}
}
| Modules/input/Views/input.js |
var input = {
'list':function()
{
var result = {};
$.ajax({ url: path+"input/list.json", dataType: 'json', async: false, success: function(data) {result = data;} });
return result;
},
'set':function(id, fields)
{
var result = {};
$.ajax({ url: path+"input/set.json", data: "inputid="+id+"&fields="+JSON.stringify(fields), async: false, success: function(data){} });
return result;
},
'delete':function(id)
{
$.ajax({ url: path+"input/delete.json", data: "inputid="+id, async: false, success: function(data){} });
},
// Process
'add_process':function(inputid,processid,arg,newfeedname)
{
var result = {};
$.ajax({ url: path+"input/process/add.json", data: "inputid="+inputid+"&processid="+processid+"&arg="+arg+"&newfeedname="+newfeedname, async: false, success: function(data){result = data;} });
return result;
},
'processlist':function(inputid)
{
var result = {};
$.ajax({ url: path+"input/process/list.json", data: "inputid="+inputid, async: false, success: function(data){result = data;} });
return result;
},
'delete_process':function(inputid,processid)
{
var result = {};
$.ajax({ url: path+"input/process/delete.json", data: "inputid="+inputid+"&processid="+processid, async: false, success: function(data){result = data;} });
return result;
},
'move_process':function(inputid,processid,moveby)
{
var result = {};
$.ajax({ url: path+"input/process/moveby.json", data: "inputid="+inputid+"&processid="+processid+"&moveby="+moveby, async: false, success: function(data){result = data;} });
return result;
},
'reset_processlist':function(inputid,processid,moveby)
{
var result = {};
$.ajax({ url: path+"input/process/reset.json", data: "inputid="+inputid, async: false, success: function(data){result = data;} });
return result;
}
}
| rename delete function to remove so that it does not produce an error in ie
| Modules/input/Views/input.js | rename delete function to remove so that it does not produce an error in ie | <ide><path>odules/input/Views/input.js
<ide> return result;
<ide> },
<ide>
<del> 'delete':function(id)
<add> 'remove':function(id)
<ide> {
<ide> $.ajax({ url: path+"input/delete.json", data: "inputid="+id, async: false, success: function(data){} });
<ide> }, |
|
JavaScript | mit | 50ad04c29e0023f2b1ba583a8602e8d8b0c8fd4c | 0 | massimiliano76/brackets,NickersF/brackets,GHackAnonymous/brackets,ricciozhang/brackets,Andrey-Pavlov/brackets,Rynaro/brackets,sedge/nimble,Pomax/brackets,dtcom/MyPSDBracket,iamchathu/brackets,82488059/brackets,veveykocute/brackets,shiyamkumar/brackets,sedge/nimble,siddharta1337/brackets,andrewnc/brackets,Wikunia/brackets,zaggino/brackets-electron,petetnt/brackets,No9/brackets,NGHGithub/brackets,brianjking/brackets,dangkhue27/brackets,JordanTheriault/brackets,goldcase/brackets,RobertJGabriel/brackets,simon66/brackets,zhukaixy/brackets,dtcom/MyPSDBracket,petetnt/brackets,riselabs-ufba/RiPLE-HC-ExperimentalData,siddharta1337/brackets,jacobnash/brackets,kolipka/brackets,udhayam/brackets,Rajat-dhyani/brackets,Lojsan123/brackets,fashionsun/brackets,MahadevanSrinivasan/brackets,adobe/brackets,lunode/brackets,michaeljayt/brackets,falcon1812/brackets,pratts/brackets,goldcase/brackets,humphd/brackets,wangjun/brackets,quasto/ArduinoStudio,thr0w/brackets,GHackAnonymous/brackets,alexkid64/brackets,resir014/brackets,stowball/brackets,busykai/brackets,dangkhue27/brackets,lovewitty/brackets,StephanieMak/brackets,emanziano/brackets,stowball/brackets,NGHGithub/brackets,tan9/brackets,treejames/brackets,ChaofengZhou/brackets,goldcase/brackets,SidBala/brackets,jiawenbo/brackets,jacobnash/brackets,kilroy23/brackets,TylerL-uxai/brackets,veveykocute/brackets,amrelnaggar/brackets,gupta-tarun/brackets,alicoding/nimble,mozilla/brackets,RobertJGabriel/brackets,jacobnash/brackets,chinnyannieb/brackets,NKcentinel/brackets,iamchathu/brackets,falcon1812/brackets,IAmAnubhavSaini/brackets,fvntr/brackets,xantage/brackets,ropik/brackets,ashleygwilliams/brackets,Th30/brackets,nucliweb/brackets,thehogfather/brackets,massimiliano76/brackets,ScalaInc/brackets,ricciozhang/brackets,eric-stanley/brackets,Pomax/brackets,TylerL-uxai/brackets,Cartman0/brackets,sgupta7857/brackets,netlams/brackets,rafaelstz/brackets,Rajat-dhyani/brackets,82488059/brackets,Real-Currents/brackets,pkdevbox/brackets,wangjun/brackets,SidBala/brackets,Lojsan123/brackets,zLeonjo/brackets,wesleifreitas/brackets,rafaelstz/brackets,rafaelstz/brackets,82488059/brackets,jiawenbo/brackets,pratts/brackets,iamchathu/brackets,resir014/brackets,gcommetti/brackets,ScalaInc/brackets,pratts/brackets,y12uc231/brackets,ChaofengZhou/brackets,JordanTheriault/brackets,NickersF/brackets,flukeout/brackets,falcon1812/brackets,srhbinion/brackets,m66n/brackets,riselabs-ufba/RiPLE-HC-ExperimentalData,zhukaixy/brackets,siddharta1337/brackets,MahadevanSrinivasan/brackets,lovewitty/brackets,Wikunia/brackets,gupta-tarun/brackets,Andrey-Pavlov/brackets,chambej/brackets,thehogfather/brackets,cdot-brackets-extensions/nimble-htmlLint,karevn/brackets,IAmAnubhavSaini/brackets,revi/brackets,srhbinion/brackets,jiawenbo/brackets,eric-stanley/brackets,tan9/brackets,Jonavin/brackets,dangkhue27/brackets,ropik/brackets,youprofit/brackets,jmarkina/brackets,lunode/brackets,simon66/brackets,gideonthomas/brackets,StephanieMak/brackets,weebygames/brackets,Free-Technology-Guild/brackets,quasto/ArduinoStudio,Live4Code/brackets,RamirezWillow/brackets,pkdevbox/brackets,flukeout/brackets,sprintr/brackets,srhbinion/brackets,shal1y/brackets,arduino-org/ArduinoStudio,Fcmam5/brackets,mjurczyk/brackets,iamchathu/brackets,zLeonjo/brackets,raygervais/brackets,sophiacaspar/brackets,TylerL-uxai/brackets,uwsd/brackets,sprintr/brackets,macdg/brackets,ForkedRepos/brackets,shal1y/brackets,robertkarlsson/brackets,mcanthony/brackets,fcjailybo/brackets,NKcentinel/brackets,NKcentinel/brackets,flukeout/brackets,alexkid64/brackets,Rynaro/brackets,No9/brackets,wakermahmud/brackets,uwsd/brackets,NickersF/brackets,abhisekp/brackets,JordanTheriault/brackets,82488059/brackets,humphd/brackets,thehogfather/brackets,stowball/brackets,keir-rex/brackets,shiyamkumar/brackets,rlugojr/brackets,shiyamkumar/brackets,petetnt/brackets,chrisle/brackets,jacobnash/brackets,Jonavin/brackets,jiimaho/brackets,NGHGithub/brackets,gupta-tarun/brackets,wesleifreitas/brackets,pomadgw/brackets,andrewnc/brackets,SebastianBoyd/sebastianboyd.github.io-OLD,gideonthomas/brackets,youprofit/brackets,simon66/brackets,Real-Currents/brackets,adrianhartanto0/brackets,StephanieMak/brackets,nucliweb/brackets,wakermahmud/brackets,raygervais/brackets,riselabs-ufba/RiPLE-HC-ExperimentalData,cdot-brackets-extensions/nimble-htmlLint,ecwebservices/brackets,chambej/brackets,phillipalexander/brackets,eric-stanley/brackets,show0017/brackets,jiawenbo/brackets,jacobnash/brackets,FTG-003/brackets,simon66/brackets,albertinad/brackets,gcommetti/brackets,MantisWare/brackets,ggusman/present,ricciozhang/brackets,chinnyannieb/brackets,y12uc231/brackets,kolipka/brackets,udhayam/brackets,albertinad/brackets,humphd/brackets,gcommetti/brackets,fabricadeaplicativos/brackets,No9/brackets,jmarkina/brackets,sophiacaspar/brackets,netlams/brackets,fcjailybo/brackets,Rajat-dhyani/brackets,MarcelGerber/brackets,phillipalexander/brackets,adobe/brackets,alicoding/nimble,andrewnc/brackets,GHackAnonymous/brackets,Real-Currents/brackets,nucliweb/brackets,fastrde/brackets,fastrde/brackets,CapeSepias/brackets,srinivashappy/brackets,chambej/brackets,FTG-003/brackets,nucliweb/brackets,robertkarlsson/brackets,IAmAnubhavSaini/brackets,michaeljayt/brackets,show0017/brackets,kilroy23/brackets,karevn/brackets,gideonthomas/brackets,malinkie/brackets,adrianhartanto0/brackets,ecwebservices/brackets,sgupta7857/brackets,srinivashappy/brackets,amrelnaggar/brackets,lunode/brackets,malinkie/brackets,MantisWare/brackets,emanziano/brackets,2youyouo2/cocoslite,chrisle/brackets,raygervais/brackets,alexkid64/brackets,Pomax/brackets,alexkid64/brackets,Rynaro/brackets,Fcmam5/brackets,MarcelGerber/brackets,Live4Code/brackets,flukeout/brackets,xantage/brackets,chrismoulton/brackets,youprofit/brackets,srinivashappy/brackets,ecwebservices/brackets,xantage/brackets,wakermahmud/brackets,weebygames/brackets,karevn/brackets,MarcelGerber/brackets,sedge/nimble,pomadgw/brackets,jmarkina/brackets,treejames/brackets,rafaelstz/brackets,resir014/brackets,baig/brackets,ralic/brackets,sophiacaspar/brackets,m66n/brackets,MahadevanSrinivasan/brackets,ashleygwilliams/brackets,IAmAnubhavSaini/brackets,RamirezWillow/brackets,lunode/brackets,eric-stanley/brackets,CapeSepias/brackets,RobertJGabriel/brackets,netlams/brackets,Free-Technology-Guild/brackets,bidle/brackets,nucliweb/brackets,Fcmam5/brackets,macdg/brackets,ls2uper/brackets,fastrde/brackets,ecwebservices/brackets,NGHGithub/brackets,hanmichael/brackets,srhbinion/brackets,amrelnaggar/brackets,baig/brackets,revi/brackets,keir-rex/brackets,richmondgozarin/brackets,lovewitty/brackets,MarcelGerber/brackets,eric-stanley/brackets,Lojsan123/brackets,ficristo/brackets,ggusman/present,Real-Currents/brackets,kilroy23/brackets,emanziano/brackets,ashleygwilliams/brackets,MahadevanSrinivasan/brackets,weebygames/brackets,mjurczyk/brackets,RamirezWillow/brackets,2youyouo2/cocoslite,ralic/brackets,show0017/brackets,richmondgozarin/brackets,albertinad/brackets,busykai/brackets,tan9/brackets,m66n/brackets,ecwebservices/brackets,Jonavin/brackets,wangjun/brackets,FTG-003/brackets,kolipka/brackets,srhbinion/brackets,zLeonjo/brackets,lovewitty/brackets,StephanieMak/brackets,zaggino/brackets-electron,adrianhartanto0/brackets,fvntr/brackets,richmondgozarin/brackets,robertkarlsson/brackets,treejames/brackets,dtcom/MyPSDBracket,Rajat-dhyani/brackets,kolipka/brackets,humphd/brackets,riselabs-ufba/RiPLE-HC-ExperimentalData,macdg/brackets,veveykocute/brackets,phillipalexander/brackets,ashleygwilliams/brackets,mcanthony/brackets,fashionsun/brackets,youprofit/brackets,pratts/brackets,MahadevanSrinivasan/brackets,jmarkina/brackets,SebastianBoyd/sebastianboyd.github.io-OLD,albertinad/brackets,thr0w/brackets,keir-rex/brackets,falcon1812/brackets,sprintr/brackets,thr0w/brackets,fabricadeaplicativos/brackets,alexkid64/brackets,ficristo/brackets,hanmichael/brackets,ScalaInc/brackets,malinkie/brackets,bidle/brackets,quasto/ArduinoStudio,MantisWare/brackets,jiawenbo/brackets,chrisle/brackets,fabricadeaplicativos/brackets,ls2uper/brackets,zhukaixy/brackets,adrianhartanto0/brackets,mcanthony/brackets,ForkedRepos/brackets,dangkhue27/brackets,andrewnc/brackets,simon66/brackets,uwsd/brackets,baig/brackets,MantisWare/brackets,ForkedRepos/brackets,wesleifreitas/brackets,srinivashappy/brackets,ashleygwilliams/brackets,GHackAnonymous/brackets,thr0w/brackets,abhisekp/brackets,gwynndesign/brackets,82488059/brackets,michaeljayt/brackets,Andrey-Pavlov/brackets,Mosoc/brackets,gwynndesign/brackets,mjurczyk/brackets,ropik/brackets,resir014/brackets,NGHGithub/brackets,Pomax/brackets,siddharta1337/brackets,No9/brackets,quasto/ArduinoStudio,chinnyannieb/brackets,pomadgw/brackets,wangjun/brackets,pomadgw/brackets,RobertJGabriel/brackets,goldcase/brackets,ficristo/brackets,keir-rex/brackets,adrianhartanto0/brackets,udhayam/brackets,jiimaho/brackets,busykai/brackets,shiyamkumar/brackets,MantisWare/brackets,fronzec/brackets,uwsd/brackets,Mosoc/brackets,cdot-brackets-extensions/nimble-htmlLint,wangjun/brackets,xantage/brackets,SebastianBoyd/sebastianboyd.github.io-OLD,adobe/brackets,mozilla/brackets,fashionsun/brackets,fcjailybo/brackets,ralic/brackets,FTG-003/brackets,SidBala/brackets,Live4Code/brackets,gcommetti/brackets,shal1y/brackets,dangkhue27/brackets,ralic/brackets,mjurczyk/brackets,emanziano/brackets,Wikunia/brackets,treejames/brackets,fvntr/brackets,zLeonjo/brackets,fcjailybo/brackets,RamirezWillow/brackets,TylerL-uxai/brackets,FTG-003/brackets,weebygames/brackets,hanmichael/brackets,iamchathu/brackets,baig/brackets,goldcase/brackets,fronzec/brackets,jiimaho/brackets,Mosoc/brackets,chambej/brackets,fronzec/brackets,alicoding/nimble,2youyouo2/cocoslite,RobertJGabriel/brackets,ChaofengZhou/brackets,TylerL-uxai/brackets,rlugojr/brackets,mjurczyk/brackets,GHackAnonymous/brackets,arduino-org/ArduinoStudio,adobe/brackets,chrismoulton/brackets,Free-Technology-Guild/brackets,mat-mcloughlin/brackets,petetnt/brackets,Th30/brackets,massimiliano76/brackets,xantage/brackets,StephanieMak/brackets,alicoding/nimble,Real-Currents/brackets,richmondgozarin/brackets,massimiliano76/brackets,Th30/brackets,mcanthony/brackets,2youyouo2/cocoslite,chinnyannieb/brackets,resir014/brackets,show0017/brackets,Fcmam5/brackets,Free-Technology-Guild/brackets,MarcelGerber/brackets,revi/brackets,fronzec/brackets,sedge/nimble,adobe/brackets,fastrde/brackets,ropik/brackets,sophiacaspar/brackets,tan9/brackets,treejames/brackets,marcominetti/brackets,arduino-org/ArduinoStudio,rafaelstz/brackets,uwsd/brackets,hanmichael/brackets,ScalaInc/brackets,Andrey-Pavlov/brackets,zaggino/brackets-electron,sedge/nimble,y12uc231/brackets,fashionsun/brackets,kilroy23/brackets,flukeout/brackets,fcjailybo/brackets,NKcentinel/brackets,petetnt/brackets,chinnyannieb/brackets,Live4Code/brackets,riselabs-ufba/RiPLE-HC-ExperimentalData,arduino-org/ArduinoStudio,Real-Currents/brackets,wakermahmud/brackets,mozilla/brackets,wesleifreitas/brackets,pkdevbox/brackets,busykai/brackets,marcominetti/brackets,m66n/brackets,sprintr/brackets,Wikunia/brackets,Jonavin/brackets,karevn/brackets,keir-rex/brackets,emanziano/brackets,udhayam/brackets,macdg/brackets,revi/brackets,srinivashappy/brackets,chrisle/brackets,shal1y/brackets,Pomax/brackets,pomadgw/brackets,malinkie/brackets,chrismoulton/brackets,y12uc231/brackets,thehogfather/brackets,ls2uper/brackets,lunode/brackets,arduino-org/ArduinoStudio,raygervais/brackets,chrismoulton/brackets,robertkarlsson/brackets,quasto/ArduinoStudio,massimiliano76/brackets,dtcom/MyPSDBracket,jiimaho/brackets,bidle/brackets,amrelnaggar/brackets,bidle/brackets,thehogfather/brackets,fabricadeaplicativos/brackets,chrismoulton/brackets,CapeSepias/brackets,wesleifreitas/brackets,zLeonjo/brackets,SebastianBoyd/sebastianboyd.github.io-OLD,sgupta7857/brackets,gcommetti/brackets,abhisekp/brackets,Rajat-dhyani/brackets,SebastianBoyd/sebastianboyd.github.io-OLD,sgupta7857/brackets,zhukaixy/brackets,abhisekp/brackets,SidBala/brackets,macdg/brackets,fronzec/brackets,pkdevbox/brackets,Live4Code/brackets,Lojsan123/brackets,ggusman/present,wakermahmud/brackets,ricciozhang/brackets,tan9/brackets,albertinad/brackets,stowball/brackets,baig/brackets,2youyouo2/cocoslite,Rynaro/brackets,jiimaho/brackets,y12uc231/brackets,thr0w/brackets,rlugojr/brackets,NickersF/brackets,andrewnc/brackets,zaggino/brackets-electron,ForkedRepos/brackets,fvntr/brackets,hanmichael/brackets,busykai/brackets,ScalaInc/brackets,gupta-tarun/brackets,Fcmam5/brackets,zaggino/brackets-electron,bidle/brackets,m66n/brackets,mat-mcloughlin/brackets,NickersF/brackets,Cartman0/brackets,ForkedRepos/brackets,Mosoc/brackets,brianjking/brackets,Andrey-Pavlov/brackets,lovewitty/brackets,chambej/brackets,shiyamkumar/brackets,Cartman0/brackets,udhayam/brackets,falcon1812/brackets,pkdevbox/brackets,fashionsun/brackets,shal1y/brackets,Wikunia/brackets,CapeSepias/brackets,karevn/brackets,ChaofengZhou/brackets,Th30/brackets,ralic/brackets,michaeljayt/brackets,zaggino/brackets-electron,fastrde/brackets,IAmAnubhavSaini/brackets,sophiacaspar/brackets,netlams/brackets,amrelnaggar/brackets,ficristo/brackets,sgupta7857/brackets,CapeSepias/brackets,cdot-brackets-extensions/nimble-htmlLint,Th30/brackets,brianjking/brackets,mcanthony/brackets,Free-Technology-Guild/brackets,Rynaro/brackets,siddharta1337/brackets,fvntr/brackets,revi/brackets,robertkarlsson/brackets,mozilla/brackets,Lojsan123/brackets,RamirezWillow/brackets,stowball/brackets,kilroy23/brackets,rlugojr/brackets,veveykocute/brackets,gideonthomas/brackets,mat-mcloughlin/brackets,weebygames/brackets,mat-mcloughlin/brackets,abhisekp/brackets,gwynndesign/brackets,gwynndesign/brackets,brianjking/brackets,phillipalexander/brackets,fabricadeaplicativos/brackets,ls2uper/brackets,Cartman0/brackets,gupta-tarun/brackets,rlugojr/brackets,ChaofengZhou/brackets,youprofit/brackets,sprintr/brackets,ricciozhang/brackets,ggusman/present,ficristo/brackets,No9/brackets,chrisle/brackets,SidBala/brackets,NKcentinel/brackets,michaeljayt/brackets,Cartman0/brackets,richmondgozarin/brackets,kolipka/brackets,phillipalexander/brackets,JordanTheriault/brackets,zhukaixy/brackets,gideonthomas/brackets,mozilla/brackets,netlams/brackets,humphd/brackets,malinkie/brackets,jmarkina/brackets,Mosoc/brackets,veveykocute/brackets,riselabs-ufba/RiPLE-HC-ExperimentalData,Jonavin/brackets,ls2uper/brackets,JordanTheriault/brackets,raygervais/brackets,brianjking/brackets,pratts/brackets | /*
* Copyright (c) 2012 Adobe Systems Incorporated. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
*/
/*jslint vars: true, plusplus: true, devel: true, nomen: true, indent: 4, maxerr: 50 */
/*global define, $, PathUtils */
define(function (require, exports, module) {
"use strict";
var EditorManager = require("editor/EditorManager"),
FileUtils = require("file/FileUtils"),
PerfUtils = require("utils/PerfUtils"),
LanguageManager = require("language/LanguageManager");
/**
* @constructor
* Model for the contents of a single file and its current modification state.
* See DocumentManager documentation for important usage notes.
*
* Document dispatches these events:
*
* change -- When the text of the editor changes (including due to undo/redo).
*
* Passes ({Document}, {ChangeList}), where ChangeList is a linked list (NOT an array)
* of change record objects. Each change record looks like:
*
* { from: start of change, expressed as {line: <line number>, ch: <character offset>},
* to: end of change, expressed as {line: <line number>, ch: <chracter offset>},
* text: array of lines of text to replace existing text,
* next: next change record in the linked list, or undefined if this is the last record }
*
* The line and ch offsets are both 0-based.
*
* The ch offset in "from" is inclusive, but the ch offset in "to" is exclusive. For example,
* an insertion of new content (without replacing existing content) is expressed by a range
* where from and to are the same.
*
* If "from" and "to" are undefined, then this is a replacement of the entire text content.
*
* IMPORTANT: If you listen for the "change" event, you MUST also addRef() the document
* (and releaseRef() it whenever you stop listening). You should also listen to the "deleted"
* event.
*
* (FUTURE: this is a modified version of the raw CodeMirror change event format; may want to make
* it an ordinary array)
*
* deleted -- When the file for this document has been deleted. All views onto the document should
* be closed. The document will no longer be editable or dispatch "change" events.
*
* @param {!FileEntry} file Need not lie within the project.
* @param {!Date} initialTimestamp File's timestamp when we read it off disk.
* @param {!string} rawText Text content of the file.
*/
function Document(file, initialTimestamp, rawText) {
if (!(this instanceof Document)) { // error if constructor called without 'new'
throw new Error("Document constructor must be called with 'new'");
}
this.file = file;
this._updateLanguage();
this.refreshText(rawText, initialTimestamp);
}
/**
* Number of clients who want this Document to stay alive. The Document is listed in
* DocumentManager._openDocuments whenever refCount > 0.
*/
Document.prototype._refCount = 0;
/**
* The FileEntry for this document. Need not lie within the project.
* @type {!FileEntry}
*/
Document.prototype.file = null;
/**
* The Language for this document. Will be resolved by file extension in the constructor
* @type {!Language}
*/
Document.prototype.language = null;
/**
* Whether this document has unsaved changes or not.
* When this changes on any Document, DocumentManager dispatches a "dirtyFlagChange" event.
* @type {boolean}
*/
Document.prototype.isDirty = false;
/**
* What we expect the file's timestamp to be on disk. If the timestamp differs from this, then
* it means the file was modified by an app other than Brackets.
* @type {!Date}
*/
Document.prototype.diskTimestamp = null;
/**
* The text contents of the file, or null if our backing model is _masterEditor.
* @type {?string}
*/
Document.prototype._text = null;
/**
* Editor object representing the full-size editor UI for this document. May be null if Document
* has not yet been modified or been the currentDocument; in that case, our backing model is the
* string _text.
* @type {?Editor}
*/
Document.prototype._masterEditor = null;
/**
* The content's line-endings style. If a Document is created on empty text, or text with
* inconsistent line endings, defaults to the current platform's standard endings.
* @type {FileUtils.LINE_ENDINGS_CRLF|FileUtils.LINE_ENDINGS_LF}
*/
Document.prototype._lineEndings = null;
/**
* Whether this document's language was forced (manually selected) or not.
* If true, the language will not change when _updateLanguage() is called.
* @type {boolean}
*/
Document.prototype._languageWasForced = false;
/** Add a ref to keep this Document alive */
Document.prototype.addRef = function () {
//console.log("+++REF+++ "+this);
if (this._refCount === 0) {
//console.log("+++ adding to open list");
if ($(exports).triggerHandler("_afterDocumentCreate", this)) {
return;
}
}
this._refCount++;
};
/** Remove a ref that was keeping this Document alive */
Document.prototype.releaseRef = function () {
//console.log("---REF--- "+this);
this._refCount--;
if (this._refCount < 0) {
console.error("Document ref count has fallen below zero!");
return;
}
if (this._refCount === 0) {
//console.log("--- removing from open list");
if ($(exports).triggerHandler("_beforeDocumentDelete", this)) {
return;
}
}
};
/**
* Attach a backing Editor to the Document, enabling setText() to be called. Assumes Editor has
* already been initialized with the value of getText(). ONLY Editor should call this (and only
* when EditorManager has told it to act as the master editor).
* @param {!Editor} masterEditor
*/
Document.prototype._makeEditable = function (masterEditor) {
if (this._masterEditor) {
console.error("Document is already editable");
} else {
this._text = null;
this._masterEditor = masterEditor;
$(masterEditor).on("change", this._handleEditorChange.bind(this));
}
};
/**
* Detach the backing Editor from the Document, disallowing setText(). The text content is
* stored back onto _text so other Document clients continue to have read-only access. ONLY
* Editor.destroy() should call this.
*/
Document.prototype._makeNonEditable = function () {
if (!this._masterEditor) {
console.error("Document is already non-editable");
} else {
// _text represents the raw text, so fetch without normalized line endings
this._text = this.getText(true);
this._masterEditor = null;
}
};
/**
* Guarantees that _masterEditor is non-null. If needed, asks EditorManager to create a new master
* editor bound to this Document (which in turn causes Document._makeEditable() to be called).
* Should ONLY be called by Editor and Document.
*/
Document.prototype._ensureMasterEditor = function () {
if (!this._masterEditor) {
EditorManager._createFullEditorForDocument(this);
}
};
/**
* Returns the document's current contents; may not be saved to disk yet. Whenever this
* value changes, the Document dispatches a "change" event.
*
* @param {boolean=} useOriginalLineEndings If true, line endings in the result depend on the
* Document's line endings setting (based on OS & the original text loaded from disk).
* If false, line endings are always \n (like all the other Document text getter methods).
* @return {string}
*/
Document.prototype.getText = function (useOriginalLineEndings) {
if (this._masterEditor) {
// CodeMirror.getValue() always returns text with LF line endings; fix up to match line
// endings preferred by the document, if necessary
var codeMirrorText = this._masterEditor._codeMirror.getValue();
if (useOriginalLineEndings) {
if (this._lineEndings === FileUtils.LINE_ENDINGS_CRLF) {
return codeMirrorText.replace(/\n/g, "\r\n");
}
}
return codeMirrorText;
} else {
// Optimized path that doesn't require creating master editor
if (useOriginalLineEndings) {
return this._text;
} else {
return this._text.replace(/\r\n/g, "\n");
}
}
};
/**
* Sets the contents of the document. Treated as an edit. Line endings will be rewritten to
* match the document's current line-ending style.
* @param {!string} text The text to replace the contents of the document with.
*/
Document.prototype.setText = function (text) {
this._ensureMasterEditor();
this._masterEditor._codeMirror.setValue(text);
// _handleEditorChange() triggers "change" event
};
/**
* Sets the contents of the document. Treated as reloading the document from disk: the document
* will be marked clean with a new timestamp, the undo/redo history is cleared, and we re-check
* the text's line-ending style. CAN be called even if there is no backing editor.
* @param {!string} text The text to replace the contents of the document with.
* @param {!Date} newTimestamp Timestamp of file at the time we read its new contents from disk.
*/
Document.prototype.refreshText = function (text, newTimestamp) {
var perfTimerName = PerfUtils.markStart("refreshText:\t" + (!this.file || this.file.fullPath));
if (this._masterEditor) {
this._masterEditor._resetText(text);
// _handleEditorChange() triggers "change" event for us
} else {
this._text = text;
// We fake a change record here that looks like CodeMirror's text change records, but
// omits "from" and "to", by which we mean the entire text has changed.
// TODO: Dumb to split it here just to join it again in the change handler, but this is
// the CodeMirror change format. Should we document our change format to allow this to
// either be an array of lines or a single string?
$(this).triggerHandler("change", [this, {text: text.split(/\r?\n/)}]);
}
this._markClean();
this.diskTimestamp = newTimestamp;
// Sniff line-ending style
this._lineEndings = FileUtils.sniffLineEndings(text);
if (!this._lineEndings) {
this._lineEndings = FileUtils.getPlatformLineEndings();
}
$(exports).triggerHandler("_documentRefreshed", this);
PerfUtils.addMeasurement(perfTimerName);
};
/**
* Adds, replaces, or removes text. If a range is given, the text at that range is replaced with the
* given new text; if text == "", then the entire range is effectively deleted. If 'end' is omitted,
* then the new text is inserted at that point and all existing text is preserved. Line endings will
* be rewritten to match the document's current line-ending style.
*
* IMPORTANT NOTE: Because of #1688, do not use this in cases where you might be
* operating on a linked document (like the main document for an inline editor)
* during an outer CodeMirror operation (like a key event that's handled by the
* editor itself). A common case of this is code hints in inline editors. In
* such cases, use `editor._codeMirror.replaceRange()` instead. This should be
* fixed when we migrate to use CodeMirror's native document-linking functionality.
*
* @param {!string} text Text to insert or replace the range with
* @param {!{line:number, ch:number}} start Start of range, inclusive (if 'to' specified) or insertion point (if not)
* @param {?{line:number, ch:number}} end End of range, exclusive; optional
* @param {?string} origin Optional string used to batch consecutive edits for undo.
* If origin starts with "+", then consecutive edits with the same origin will be batched for undo if
* they are close enough together in time.
* If origin starts with "*", then all consecutive edit with the same origin will be batched for
* undo.
* Edits with origins starting with other characters will not be batched.
* (Note that this is a higher level of batching than batchOperation(), which already batches all
* edits within it for undo. Origin batching works across operations.)
*/
Document.prototype.replaceRange = function (text, start, end, origin) {
this._ensureMasterEditor();
this._masterEditor._codeMirror.replaceRange(text, start, end, origin);
// _handleEditorChange() triggers "change" event
};
/**
* Returns the characters in the given range. Line endings are normalized to '\n'.
* @param {!{line:number, ch:number}} start Start of range, inclusive
* @param {!{line:number, ch:number}} end End of range, exclusive
* @return {!string}
*/
Document.prototype.getRange = function (start, end) {
this._ensureMasterEditor();
return this._masterEditor._codeMirror.getRange(start, end);
};
/**
* Returns the text of the given line (excluding any line ending characters)
* @param {number} Zero-based line number
* @return {!string}
*/
Document.prototype.getLine = function (lineNum) {
this._ensureMasterEditor();
return this._masterEditor._codeMirror.getLine(lineNum);
};
/**
* Batches a series of related Document changes. Repeated calls to replaceRange() should be wrapped in a
* batch for efficiency. Begins the batch, calls doOperation(), ends the batch, and then returns.
* @param {function()} doOperation
*/
Document.prototype.batchOperation = function (doOperation) {
this._ensureMasterEditor();
var self = this;
self._masterEditor._codeMirror.operation(doOperation);
};
/**
* Handles changes from the master backing Editor. Changes are triggered either by direct edits
* to that Editor's UI, OR by our setText()/refreshText() methods.
* @private
*/
Document.prototype._handleEditorChange = function (event, editor, changeList) {
// On any change, mark the file dirty. In the future, we should make it so that if you
// undo back to the last saved state, we mark the file clean.
var wasDirty = this.isDirty;
this.isDirty = !editor._codeMirror.isClean();
// If file just became dirty, notify listeners, and add it to working set (if not already there)
if (wasDirty !== this.isDirty) {
$(exports).triggerHandler("_dirtyFlagChange", [this]);
}
// Notify that Document's text has changed
// TODO: This needs to be kept in sync with SpecRunnerUtils.createMockDocument(). In the
// future, we should fix things so that we either don't need mock documents or that this
// is factored so it will just run in both.
$(this).triggerHandler("change", [this, changeList]);
};
/**
* @private
*/
Document.prototype._markClean = function () {
this.isDirty = false;
if (this._masterEditor) {
this._masterEditor._codeMirror.markClean();
}
$(exports).triggerHandler("_dirtyFlagChange", this);
};
/**
* Called when the document is saved (which currently happens in DocumentCommandHandlers). Marks the
* document not dirty and notifies listeners of the save.
*/
Document.prototype.notifySaved = function () {
if (!this._masterEditor) {
console.log("### Warning: saving a Document that is not modifiable!");
}
this._markClean();
// TODO: (issue #295) fetching timestamp async creates race conditions (albeit unlikely ones)
var thisDoc = this;
this.file.getMetadata(
function (metadata) {
thisDoc.diskTimestamp = metadata.modificationTime;
$(exports).triggerHandler("_documentSaved", thisDoc);
},
function (error) {
console.log("Error updating timestamp after saving file: " + thisDoc.file.fullPath);
$(exports).triggerHandler("_documentSaved", thisDoc);
}
);
};
/* (pretty toString(), to aid debugging) */
Document.prototype.toString = function () {
var dirtyInfo = (this.isDirty ? " (dirty!)" : " (clean)");
var editorInfo = (this._masterEditor ? " (Editable)" : " (Non-editable)");
var refInfo = " refs:" + this._refCount;
return "[Document " + this.file.fullPath + dirtyInfo + editorInfo + refInfo + "]";
};
/**
* Returns the language this document is written in.
* The language returned is based on the file extension.
* @return {Language} An object describing the language used in this document
*/
Document.prototype.getLanguage = function () {
return this.language;
};
/**
* Overrides the default language of this document and sets it to the given
* language.
* @param {?Language} language The language to be set for this document; if
* null, the language will be set back to the default.
*/
Document.prototype.forceLanguage = function (language) {
if (language) {
var oldLanguage = this.language;
this._languageWasForced = true;
this.language = language;
$(this).triggerHandler("languageChanged", [oldLanguage, this.language]);
} else { // if language was null, reset to default language
this._languageWasForced = false;
this._updateLanguage();
}
};
/**
* Updates the language according to the file extension. If the current
* language was forced (set manually by user), don't change it.
*/
Document.prototype._updateLanguage = function () {
if (this._languageWasForced) {
return;
}
var oldLanguage = this.language;
this.language = LanguageManager.getLanguageForPath(this.file.fullPath);
if (oldLanguage && oldLanguage !== this.language) {
$(this).triggerHandler("languageChanged", [oldLanguage, this.language]);
}
};
/** Called when Document.file has been modified (due to a rename) */
Document.prototype._notifyFilePathChanged = function () {
// File extension may have changed
this._updateLanguage();
};
// Define public API
exports.Document = Document;
});
| src/document/Document.js | /*
* Copyright (c) 2012 Adobe Systems Incorporated. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
*/
/*jslint vars: true, plusplus: true, devel: true, nomen: true, indent: 4, maxerr: 50 */
/*global define, $, PathUtils */
define(function (require, exports, module) {
"use strict";
var EditorManager = require("editor/EditorManager"),
FileUtils = require("file/FileUtils"),
PerfUtils = require("utils/PerfUtils"),
LanguageManager = require("language/LanguageManager");
/**
* @constructor
* Model for the contents of a single file and its current modification state.
* See DocumentManager documentation for important usage notes.
*
* Document dispatches these events:
*
* change -- When the text of the editor changes (including due to undo/redo).
*
* Passes ({Document}, {ChangeList}), where ChangeList is a linked list (NOT an array)
* of change record objects. Each change record looks like:
*
* { from: start of change, expressed as {line: <line number>, ch: <character offset>},
* to: end of change, expressed as {line: <line number>, ch: <chracter offset>},
* text: array of lines of text to replace existing text,
* next: next change record in the linked list, or undefined if this is the last record }
*
* The line and ch offsets are both 0-based.
*
* The ch offset in "from" is inclusive, but the ch offset in "to" is exclusive. For example,
* an insertion of new content (without replacing existing content) is expressed by a range
* where from and to are the same.
*
* If "from" and "to" are undefined, then this is a replacement of the entire text content.
*
* IMPORTANT: If you listen for the "change" event, you MUST also addRef() the document
* (and releaseRef() it whenever you stop listening). You should also listen to the "deleted"
* event.
*
* (FUTURE: this is a modified version of the raw CodeMirror change event format; may want to make
* it an ordinary array)
*
* deleted -- When the file for this document has been deleted. All views onto the document should
* be closed. The document will no longer be editable or dispatch "change" events.
*
* @param {!FileEntry} file Need not lie within the project.
* @param {!Date} initialTimestamp File's timestamp when we read it off disk.
* @param {!string} rawText Text content of the file.
*/
function Document(file, initialTimestamp, rawText) {
if (!(this instanceof Document)) { // error if constructor called without 'new'
throw new Error("Document constructor must be called with 'new'");
}
this.file = file;
this._updateLanguage();
this.refreshText(rawText, initialTimestamp);
}
/**
* Number of clients who want this Document to stay alive. The Document is listed in
* DocumentManager._openDocuments whenever refCount > 0.
*/
Document.prototype._refCount = 0;
/**
* The FileEntry for this document. Need not lie within the project.
* @type {!FileEntry}
*/
Document.prototype.file = null;
/**
* The Language for this document. Will be resolved by file extension in the constructor
* @type {!Language}
*/
Document.prototype.language = null;
/**
* Whether this document has unsaved changes or not.
* When this changes on any Document, DocumentManager dispatches a "dirtyFlagChange" event.
* @type {boolean}
*/
Document.prototype.isDirty = false;
/**
* What we expect the file's timestamp to be on disk. If the timestamp differs from this, then
* it means the file was modified by an app other than Brackets.
* @type {!Date}
*/
Document.prototype.diskTimestamp = null;
/**
* The text contents of the file, or null if our backing model is _masterEditor.
* @type {?string}
*/
Document.prototype._text = null;
/**
* Editor object representing the full-size editor UI for this document. May be null if Document
* has not yet been modified or been the currentDocument; in that case, our backing model is the
* string _text.
* @type {?Editor}
*/
Document.prototype._masterEditor = null;
/**
* The content's line-endings style. If a Document is created on empty text, or text with
* inconsistent line endings, defaults to the current platform's standard endings.
* @type {FileUtils.LINE_ENDINGS_CRLF|FileUtils.LINE_ENDINGS_LF}
*/
Document.prototype._lineEndings = null;
/** Add a ref to keep this Document alive */
Document.prototype.addRef = function () {
//console.log("+++REF+++ "+this);
if (this._refCount === 0) {
//console.log("+++ adding to open list");
if ($(exports).triggerHandler("_afterDocumentCreate", this)) {
return;
}
}
this._refCount++;
};
/** Remove a ref that was keeping this Document alive */
Document.prototype.releaseRef = function () {
//console.log("---REF--- "+this);
this._refCount--;
if (this._refCount < 0) {
console.error("Document ref count has fallen below zero!");
return;
}
if (this._refCount === 0) {
//console.log("--- removing from open list");
if ($(exports).triggerHandler("_beforeDocumentDelete", this)) {
return;
}
}
};
/**
* Attach a backing Editor to the Document, enabling setText() to be called. Assumes Editor has
* already been initialized with the value of getText(). ONLY Editor should call this (and only
* when EditorManager has told it to act as the master editor).
* @param {!Editor} masterEditor
*/
Document.prototype._makeEditable = function (masterEditor) {
if (this._masterEditor) {
console.error("Document is already editable");
} else {
this._text = null;
this._masterEditor = masterEditor;
$(masterEditor).on("change", this._handleEditorChange.bind(this));
}
};
/**
* Detach the backing Editor from the Document, disallowing setText(). The text content is
* stored back onto _text so other Document clients continue to have read-only access. ONLY
* Editor.destroy() should call this.
*/
Document.prototype._makeNonEditable = function () {
if (!this._masterEditor) {
console.error("Document is already non-editable");
} else {
// _text represents the raw text, so fetch without normalized line endings
this._text = this.getText(true);
this._masterEditor = null;
}
};
/**
* Guarantees that _masterEditor is non-null. If needed, asks EditorManager to create a new master
* editor bound to this Document (which in turn causes Document._makeEditable() to be called).
* Should ONLY be called by Editor and Document.
*/
Document.prototype._ensureMasterEditor = function () {
if (!this._masterEditor) {
EditorManager._createFullEditorForDocument(this);
}
};
/**
* Returns the document's current contents; may not be saved to disk yet. Whenever this
* value changes, the Document dispatches a "change" event.
*
* @param {boolean=} useOriginalLineEndings If true, line endings in the result depend on the
* Document's line endings setting (based on OS & the original text loaded from disk).
* If false, line endings are always \n (like all the other Document text getter methods).
* @return {string}
*/
Document.prototype.getText = function (useOriginalLineEndings) {
if (this._masterEditor) {
// CodeMirror.getValue() always returns text with LF line endings; fix up to match line
// endings preferred by the document, if necessary
var codeMirrorText = this._masterEditor._codeMirror.getValue();
if (useOriginalLineEndings) {
if (this._lineEndings === FileUtils.LINE_ENDINGS_CRLF) {
return codeMirrorText.replace(/\n/g, "\r\n");
}
}
return codeMirrorText;
} else {
// Optimized path that doesn't require creating master editor
if (useOriginalLineEndings) {
return this._text;
} else {
return this._text.replace(/\r\n/g, "\n");
}
}
};
/**
* Sets the contents of the document. Treated as an edit. Line endings will be rewritten to
* match the document's current line-ending style.
* @param {!string} text The text to replace the contents of the document with.
*/
Document.prototype.setText = function (text) {
this._ensureMasterEditor();
this._masterEditor._codeMirror.setValue(text);
// _handleEditorChange() triggers "change" event
};
/**
* Sets the contents of the document. Treated as reloading the document from disk: the document
* will be marked clean with a new timestamp, the undo/redo history is cleared, and we re-check
* the text's line-ending style. CAN be called even if there is no backing editor.
* @param {!string} text The text to replace the contents of the document with.
* @param {!Date} newTimestamp Timestamp of file at the time we read its new contents from disk.
*/
Document.prototype.refreshText = function (text, newTimestamp) {
var perfTimerName = PerfUtils.markStart("refreshText:\t" + (!this.file || this.file.fullPath));
if (this._masterEditor) {
this._masterEditor._resetText(text);
// _handleEditorChange() triggers "change" event for us
} else {
this._text = text;
// We fake a change record here that looks like CodeMirror's text change records, but
// omits "from" and "to", by which we mean the entire text has changed.
// TODO: Dumb to split it here just to join it again in the change handler, but this is
// the CodeMirror change format. Should we document our change format to allow this to
// either be an array of lines or a single string?
$(this).triggerHandler("change", [this, {text: text.split(/\r?\n/)}]);
}
this._markClean();
this.diskTimestamp = newTimestamp;
// Sniff line-ending style
this._lineEndings = FileUtils.sniffLineEndings(text);
if (!this._lineEndings) {
this._lineEndings = FileUtils.getPlatformLineEndings();
}
$(exports).triggerHandler("_documentRefreshed", this);
PerfUtils.addMeasurement(perfTimerName);
};
/**
* Adds, replaces, or removes text. If a range is given, the text at that range is replaced with the
* given new text; if text == "", then the entire range is effectively deleted. If 'end' is omitted,
* then the new text is inserted at that point and all existing text is preserved. Line endings will
* be rewritten to match the document's current line-ending style.
*
* IMPORTANT NOTE: Because of #1688, do not use this in cases where you might be
* operating on a linked document (like the main document for an inline editor)
* during an outer CodeMirror operation (like a key event that's handled by the
* editor itself). A common case of this is code hints in inline editors. In
* such cases, use `editor._codeMirror.replaceRange()` instead. This should be
* fixed when we migrate to use CodeMirror's native document-linking functionality.
*
* @param {!string} text Text to insert or replace the range with
* @param {!{line:number, ch:number}} start Start of range, inclusive (if 'to' specified) or insertion point (if not)
* @param {?{line:number, ch:number}} end End of range, exclusive; optional
* @param {?string} origin Optional string used to batch consecutive edits for undo.
* If origin starts with "+", then consecutive edits with the same origin will be batched for undo if
* they are close enough together in time.
* If origin starts with "*", then all consecutive edit with the same origin will be batched for
* undo.
* Edits with origins starting with other characters will not be batched.
* (Note that this is a higher level of batching than batchOperation(), which already batches all
* edits within it for undo. Origin batching works across operations.)
*/
Document.prototype.replaceRange = function (text, start, end, origin) {
this._ensureMasterEditor();
this._masterEditor._codeMirror.replaceRange(text, start, end, origin);
// _handleEditorChange() triggers "change" event
};
/**
* Returns the characters in the given range. Line endings are normalized to '\n'.
* @param {!{line:number, ch:number}} start Start of range, inclusive
* @param {!{line:number, ch:number}} end End of range, exclusive
* @return {!string}
*/
Document.prototype.getRange = function (start, end) {
this._ensureMasterEditor();
return this._masterEditor._codeMirror.getRange(start, end);
};
/**
* Returns the text of the given line (excluding any line ending characters)
* @param {number} Zero-based line number
* @return {!string}
*/
Document.prototype.getLine = function (lineNum) {
this._ensureMasterEditor();
return this._masterEditor._codeMirror.getLine(lineNum);
};
/**
* Batches a series of related Document changes. Repeated calls to replaceRange() should be wrapped in a
* batch for efficiency. Begins the batch, calls doOperation(), ends the batch, and then returns.
* @param {function()} doOperation
*/
Document.prototype.batchOperation = function (doOperation) {
this._ensureMasterEditor();
var self = this;
self._masterEditor._codeMirror.operation(doOperation);
};
/**
* Handles changes from the master backing Editor. Changes are triggered either by direct edits
* to that Editor's UI, OR by our setText()/refreshText() methods.
* @private
*/
Document.prototype._handleEditorChange = function (event, editor, changeList) {
// On any change, mark the file dirty. In the future, we should make it so that if you
// undo back to the last saved state, we mark the file clean.
var wasDirty = this.isDirty;
this.isDirty = !editor._codeMirror.isClean();
// If file just became dirty, notify listeners, and add it to working set (if not already there)
if (wasDirty !== this.isDirty) {
$(exports).triggerHandler("_dirtyFlagChange", [this]);
}
// Notify that Document's text has changed
// TODO: This needs to be kept in sync with SpecRunnerUtils.createMockDocument(). In the
// future, we should fix things so that we either don't need mock documents or that this
// is factored so it will just run in both.
$(this).triggerHandler("change", [this, changeList]);
};
/**
* @private
*/
Document.prototype._markClean = function () {
this.isDirty = false;
if (this._masterEditor) {
this._masterEditor._codeMirror.markClean();
}
$(exports).triggerHandler("_dirtyFlagChange", this);
};
/**
* Called when the document is saved (which currently happens in DocumentCommandHandlers). Marks the
* document not dirty and notifies listeners of the save.
*/
Document.prototype.notifySaved = function () {
if (!this._masterEditor) {
console.log("### Warning: saving a Document that is not modifiable!");
}
this._markClean();
// TODO: (issue #295) fetching timestamp async creates race conditions (albeit unlikely ones)
var thisDoc = this;
this.file.getMetadata(
function (metadata) {
thisDoc.diskTimestamp = metadata.modificationTime;
$(exports).triggerHandler("_documentSaved", thisDoc);
},
function (error) {
console.log("Error updating timestamp after saving file: " + thisDoc.file.fullPath);
$(exports).triggerHandler("_documentSaved", thisDoc);
}
);
};
/* (pretty toString(), to aid debugging) */
Document.prototype.toString = function () {
var dirtyInfo = (this.isDirty ? " (dirty!)" : " (clean)");
var editorInfo = (this._masterEditor ? " (Editable)" : " (Non-editable)");
var refInfo = " refs:" + this._refCount;
return "[Document " + this.file.fullPath + dirtyInfo + editorInfo + refInfo + "]";
};
/**
* Returns the language this document is written in.
* The language returned is based on the file extension.
* @return {Language} An object describing the language used in this document
*/
Document.prototype.getLanguage = function () {
return this.language;
};
/**
* Overrides the default language of this document and sets it to the given
* language.
* @param {?Language} language The language to be set for this document; if
* null, the language will be set back to the default.
*/
Document.prototype.forceLanguage = function (language) {
var oldLanguage = this.language;
if (language) {
language.forced = true;
this.language = language;
$(this).triggerHandler("languageChanged", [oldLanguage, this.language]);
} else { // if language was null, reset to default language
if (oldLanguage.forced) {
delete oldLanguage.forced;
}
this._updateLanguage();
}
};
/**
* Updates the language according to the file extension. If the current
* language was forced (set manually by user), don't change it.
*/
Document.prototype._updateLanguage = function () {
var oldLanguage = this.language;
if (oldLanguage && oldLanguage.forced) {
return;
}
this.language = LanguageManager.getLanguageForPath(this.file.fullPath);
if (oldLanguage && oldLanguage !== this.language) {
$(this).triggerHandler("languageChanged", [oldLanguage, this.language]);
}
};
/** Called when Document.file has been modified (due to a rename) */
Document.prototype._notifyFilePathChanged = function () {
// File extension may have changed
this._updateLanguage();
};
// Define public API
exports.Document = Document;
});
| Move forced flag to Document
| src/document/Document.js | Move forced flag to Document | <ide><path>rc/document/Document.js
<ide> * @type {FileUtils.LINE_ENDINGS_CRLF|FileUtils.LINE_ENDINGS_LF}
<ide> */
<ide> Document.prototype._lineEndings = null;
<add>
<add> /**
<add> * Whether this document's language was forced (manually selected) or not.
<add> * If true, the language will not change when _updateLanguage() is called.
<add> * @type {boolean}
<add> */
<add> Document.prototype._languageWasForced = false;
<ide>
<ide> /** Add a ref to keep this Document alive */
<ide> Document.prototype.addRef = function () {
<ide> * null, the language will be set back to the default.
<ide> */
<ide> Document.prototype.forceLanguage = function (language) {
<del> var oldLanguage = this.language;
<ide> if (language) {
<del> language.forced = true;
<add> var oldLanguage = this.language;
<add> this._languageWasForced = true;
<ide> this.language = language;
<ide> $(this).triggerHandler("languageChanged", [oldLanguage, this.language]);
<ide> } else { // if language was null, reset to default language
<del> if (oldLanguage.forced) {
<del> delete oldLanguage.forced;
<del> }
<add> this._languageWasForced = false;
<ide> this._updateLanguage();
<ide> }
<ide> };
<ide> * language was forced (set manually by user), don't change it.
<ide> */
<ide> Document.prototype._updateLanguage = function () {
<add> if (this._languageWasForced) {
<add> return;
<add> }
<ide> var oldLanguage = this.language;
<del> if (oldLanguage && oldLanguage.forced) {
<del> return;
<del> }
<ide> this.language = LanguageManager.getLanguageForPath(this.file.fullPath);
<ide> if (oldLanguage && oldLanguage !== this.language) {
<ide> $(this).triggerHandler("languageChanged", [oldLanguage, this.language]); |
|
Java | apache-2.0 | 71b0aa9ba987174166f1312e8fdcc8f10df16b64 | 0 | b2ihealthcare/snow-owl,b2ihealthcare/snow-owl,b2ihealthcare/snow-owl,b2ihealthcare/snow-owl | /*
* Copyright 2019-2021 B2i Healthcare Pte Ltd, http://b2i.sg
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.b2international.snowowl.test.commons.rest;
import static io.restassured.RestAssured.given;
import java.lang.reflect.Field;
import java.lang.reflect.Type;
import java.util.Arrays;
import java.util.List;
import java.util.Map;
import java.util.TimeZone;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.stream.Collectors;
import org.apache.commons.lang.text.StrSubstitutor;
import org.hamcrest.CoreMatchers;
import com.b2international.snowowl.core.ApplicationContext;
import com.b2international.snowowl.core.identity.JWTGenerator;
import com.b2international.snowowl.core.identity.Permission;
import com.b2international.snowowl.core.identity.Role;
import com.b2international.snowowl.core.identity.User;
import com.b2international.snowowl.core.util.PlatformUtil;
import com.fasterxml.jackson.databind.SerializationFeature;
import com.fasterxml.jackson.databind.util.StdDateFormat;
import com.fasterxml.jackson.datatype.jsr310.JavaTimeModule;
import com.google.common.base.*;
import com.google.common.collect.Iterables;
import io.restassured.RestAssured;
import io.restassured.config.LogConfig;
import io.restassured.config.ObjectMapperConfig;
import io.restassured.config.RestAssuredConfig;
import io.restassured.http.ContentType;
import io.restassured.internal.mapping.Jackson2Mapper;
import io.restassured.path.json.mapper.factory.Jackson2ObjectMapperFactory;
import io.restassured.response.Response;
import io.restassured.response.ValidatableResponse;
import io.restassured.specification.RequestSpecification;
/**
* Useful extension methods when testing Snow Owl's RESTful API. High level REST related syntactic sugars and stuff like
* that should be put in here, other API related stuff should go in a separate extension class.
*
* @since 1.0
*/
public class RestExtensions {
public static final Joiner COMMA_JOINER = Joiner.on(",");
public static final String JSON_UTF8 = ContentType.JSON.withCharset(Charsets.UTF_8);
// HTTP and REST API
private static final AtomicBoolean INITIALIZE_ONCE = new AtomicBoolean(false);
public static final String CONTEXT = "snowowl";
public static final int OK = 200;
public static final int NO_CONTENT = 204;
public static final int UNAUTHORIZED = 401;
public static final int FORBIDDEN = 403;
public static final int NOT_FOUND = 404;
public static final String LOCATION = "Location";
// Auth
public static final String DEFAULT_USER = "snowowl";
public static final String DEFAULT_PASS = "snowowl";
public static final String WRONG_PASS = "wrong";
public static final String USER;
public static final String PASS;
static {
if (!Strings.isNullOrEmpty(System.getProperty("test.user"))) {
USER = System.getProperty("test.user");
} else {
USER = DEFAULT_USER;
}
if (!Strings.isNullOrEmpty(System.getProperty("test.password"))) {
PASS = System.getProperty("test.password");
} else {
PASS = DEFAULT_PASS;
}
}
public static RequestSpecification givenUnauthenticatedRequest(String api) {
if (INITIALIZE_ONCE.compareAndSet(false, true)) {
// change Base URI if defined as sysarg
final String serverLocation = System.getProperty("test.server.location");
if (!Strings.isNullOrEmpty(serverLocation)) {
RestAssured.baseURI = serverLocation;
}
RestAssured.config = RestAssuredConfig.config()
.objectMapperConfig(
ObjectMapperConfig.objectMapperConfig().defaultObjectMapper(new Jackson2Mapper(new Jackson2ObjectMapperFactory() {
@Override
public com.fasterxml.jackson.databind.ObjectMapper create(Type cls, String charset) {
com.fasterxml.jackson.databind.ObjectMapper mapper = new com.fasterxml.jackson.databind.ObjectMapper();
mapper.registerModule(new JavaTimeModule());
//bbanfai: added date format
final StdDateFormat dateFormat = new StdDateFormat();
dateFormat.setTimeZone(TimeZone.getTimeZone("UTC"));
mapper.setDateFormat(dateFormat);
mapper.configure(SerializationFeature.WRITE_DATES_AS_TIMESTAMPS, false);
return mapper;
}
}))
)
.logConfig(
LogConfig.logConfig().enableLoggingOfRequestAndResponseIfValidationFails()
);
}
Preconditions.checkArgument(api.startsWith("/"), "Api param should start with a forward slash: '/'");
return given().port(getPort()).basePath(CONTEXT + api);
}
public static RequestSpecification givenAuthenticatedRequest(String api) {
return givenRequestWithPassword(api, PASS);
}
public static RequestSpecification givenInvalidPasswordRequest(String api) {
return givenRequestWithPassword(api, WRONG_PASS);
}
private static RequestSpecification givenRequestWithPassword(String api, String password) {
return givenUnauthenticatedRequest(api).auth().preemptive().basic(USER, password);
}
public static RequestSpecification givenRequestWithToken(String api, String token) {
return givenUnauthenticatedRequest(api).auth().preemptive().oauth2(token);
}
public static String asPath(List<? extends String> values) {
return ("/" + values.stream().collect(Collectors.joining("/"))).replaceAll("//", "/");
}
public static String location(Response it) {
final String header = it.header(LOCATION);
return Strings.isNullOrEmpty(header) ? "" : header;
}
public static String renderWithFields(String it, Object object) {
return render(it, getFieldValueMap(object));
}
public static Map<String, Object> getFieldValueMap(Object object) {
return Arrays.asList(object.getClass().getFields()).stream().collect(Collectors.toMap(Field::getName, f -> {
try {
return f.get(object);
} catch (IllegalArgumentException | IllegalAccessException e) {
throw new RuntimeException(e);
}
}));
}
public static String render(String it, Map<String, Object> fieldValueMap) {
return new StrSubstitutor(fieldValueMap).replace(it);
}
/**
* Asserts whether a JSON body inside the given Response object have the same "state" field as the given state parameter.
*/
public static void inState(Response it, String state) {
it.then().body("state", CoreMatchers.equalTo(state));
}
/**
* Returns the port used in the test environment, this is equivalent with the jetty.port configuration parameter, or with 8080 if no jetty.port parameter found.
*/
public static int getPort() {
final String jettyPortProp = System.getProperty("jetty.port");
return jettyPortProp != null ? Integer.valueOf(jettyPortProp) : 8080;
}
public static ValidatableResponse expectStatus(Response it, int expectedStatus) {
if (it.statusCode() != expectedStatus) {
System.err.println("Web server may reject your request, check access log");
System.err.println("Headers: " + it.headers());
System.err.println("Content-Type: " + it.getContentType());
System.err.println("Body: " + it.body().asString());
}
return it.then().statusCode(expectedStatus);
}
public static String lastPathSegment(String path) {
return Iterables.getLast(Splitter.on('/').splitToList(path));
}
public static String joinPath(String... segments) {
return Joiner.on('/').join(segments);
}
public static RequestSpecification withFile(RequestSpecification it, String file, Class<?> cp) {
return it.multiPart(PlatformUtil.toAbsolutePath(cp, file).toFile());
}
// Simple REST operations
public static Response get(String api, String...segments) {
return givenAuthenticatedRequest(api).get(asPath(Arrays.asList(segments)));
}
public static Response delete(String api, String...segments) {
return givenAuthenticatedRequest(api).delete(asPath(Arrays.asList(segments)));
}
public static String assertCreated(ValidatableResponse response) {
return lastPathSegment(response.statusCode(201)
.extract()
.header("Location"));
}
public static String generateToken(Permission...permissions) {
return ApplicationContext.getServiceForClass(JWTGenerator.class).generate(new User(RestExtensions.USER, List.of(new Role("custom", List.of(permissions)))));
}
}
| tests/com.b2international.snowowl.test.commons/src/com/b2international/snowowl/test/commons/rest/RestExtensions.java | /*
* Copyright 2019-2021 B2i Healthcare Pte Ltd, http://b2i.sg
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.b2international.snowowl.test.commons.rest;
import static io.restassured.RestAssured.given;
import java.lang.reflect.Field;
import java.lang.reflect.Type;
import java.util.Arrays;
import java.util.List;
import java.util.Map;
import java.util.TimeZone;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.stream.Collectors;
import org.apache.commons.lang.text.StrSubstitutor;
import org.hamcrest.CoreMatchers;
import com.b2international.snowowl.core.ApplicationContext;
import com.b2international.snowowl.core.identity.JWTGenerator;
import com.b2international.snowowl.core.identity.Permission;
import com.b2international.snowowl.core.identity.Role;
import com.b2international.snowowl.core.identity.User;
import com.b2international.snowowl.core.util.PlatformUtil;
import com.fasterxml.jackson.databind.SerializationFeature;
import com.fasterxml.jackson.databind.util.StdDateFormat;
import com.fasterxml.jackson.datatype.jsr310.JavaTimeModule;
import com.google.common.base.*;
import com.google.common.collect.Iterables;
import io.restassured.RestAssured;
import io.restassured.config.LogConfig;
import io.restassured.config.ObjectMapperConfig;
import io.restassured.config.RestAssuredConfig;
import io.restassured.http.ContentType;
import io.restassured.mapper.factory.Jackson2ObjectMapperFactory;
import io.restassured.response.Response;
import io.restassured.response.ValidatableResponse;
import io.restassured.specification.RequestSpecification;
/**
* Useful extension methods when testing Snow Owl's RESTful API. High level REST related syntactic sugars and stuff like
* that should be put in here, other API related stuff should go in a separate extension class.
*
* @since 1.0
*/
public class RestExtensions {
public static final Joiner COMMA_JOINER = Joiner.on(",");
public static final String JSON_UTF8 = ContentType.JSON.withCharset(Charsets.UTF_8);
// HTTP and REST API
private static final AtomicBoolean INITIALIZE_ONCE = new AtomicBoolean(false);
public static final String CONTEXT = "snowowl";
public static final int OK = 200;
public static final int NO_CONTENT = 204;
public static final int UNAUTHORIZED = 401;
public static final int FORBIDDEN = 403;
public static final int NOT_FOUND = 404;
public static final String LOCATION = "Location";
// Auth
public static final String DEFAULT_USER = "snowowl";
public static final String DEFAULT_PASS = "snowowl";
public static final String WRONG_PASS = "wrong";
public static final String USER;
public static final String PASS;
static {
if (!Strings.isNullOrEmpty(System.getProperty("test.user"))) {
USER = System.getProperty("test.user");
} else {
USER = DEFAULT_USER;
}
if (!Strings.isNullOrEmpty(System.getProperty("test.password"))) {
PASS = System.getProperty("test.password");
} else {
PASS = DEFAULT_PASS;
}
}
public static RequestSpecification givenUnauthenticatedRequest(String api) {
if (INITIALIZE_ONCE.compareAndSet(false, true)) {
// change Base URI if defined as sysarg
final String serverLocation = System.getProperty("test.server.location");
if (!Strings.isNullOrEmpty(serverLocation)) {
RestAssured.baseURI = serverLocation;
}
RestAssured.config = RestAssuredConfig.config()
.objectMapperConfig(
ObjectMapperConfig.objectMapperConfig().jackson2ObjectMapperFactory(new Jackson2ObjectMapperFactory() {
@Override
public com.fasterxml.jackson.databind.ObjectMapper create(Type arg0, String arg1) {
com.fasterxml.jackson.databind.ObjectMapper mapper = new com.fasterxml.jackson.databind.ObjectMapper();
mapper.registerModule(new JavaTimeModule());
//bbanfai: added date format
final StdDateFormat dateFormat = new StdDateFormat();
dateFormat.setTimeZone(TimeZone.getTimeZone("UTC"));
mapper.setDateFormat(dateFormat);
mapper.configure(SerializationFeature.WRITE_DATES_AS_TIMESTAMPS, false);
return mapper;
}
})
)
.logConfig(
LogConfig.logConfig().enableLoggingOfRequestAndResponseIfValidationFails()
);
}
Preconditions.checkArgument(api.startsWith("/"), "Api param should start with a forward slash: '/'");
return given().port(getPort()).basePath(CONTEXT + api);
}
public static RequestSpecification givenAuthenticatedRequest(String api) {
return givenRequestWithPassword(api, PASS);
}
public static RequestSpecification givenInvalidPasswordRequest(String api) {
return givenRequestWithPassword(api, WRONG_PASS);
}
private static RequestSpecification givenRequestWithPassword(String api, String password) {
return givenUnauthenticatedRequest(api).auth().preemptive().basic(USER, password);
}
public static RequestSpecification givenRequestWithToken(String api, String token) {
return givenUnauthenticatedRequest(api).auth().preemptive().oauth2(token);
}
public static String asPath(List<? extends String> values) {
return ("/" + values.stream().collect(Collectors.joining("/"))).replaceAll("//", "/");
}
public static String location(Response it) {
final String header = it.header(LOCATION);
return Strings.isNullOrEmpty(header) ? "" : header;
}
public static String renderWithFields(String it, Object object) {
return render(it, getFieldValueMap(object));
}
public static Map<String, Object> getFieldValueMap(Object object) {
return Arrays.asList(object.getClass().getFields()).stream().collect(Collectors.toMap(Field::getName, f -> {
try {
return f.get(object);
} catch (IllegalArgumentException | IllegalAccessException e) {
throw new RuntimeException(e);
}
}));
}
public static String render(String it, Map<String, Object> fieldValueMap) {
return new StrSubstitutor(fieldValueMap).replace(it);
}
/**
* Asserts whether a JSON body inside the given Response object have the same "state" field as the given state parameter.
*/
public static void inState(Response it, String state) {
it.then().body("state", CoreMatchers.equalTo(state));
}
/**
* Returns the port used in the test environment, this is equivalent with the jetty.port configuration parameter, or with 8080 if no jetty.port parameter found.
*/
public static int getPort() {
final String jettyPortProp = System.getProperty("jetty.port");
return jettyPortProp != null ? Integer.valueOf(jettyPortProp) : 8080;
}
public static ValidatableResponse expectStatus(Response it, int expectedStatus) {
if (it.statusCode() != expectedStatus) {
System.err.println("Web server may reject your request, check access log");
System.err.println("Headers: " + it.headers());
System.err.println("Content-Type: " + it.getContentType());
System.err.println("Body: " + it.body().asString());
}
return it.then().statusCode(expectedStatus);
}
public static String lastPathSegment(String path) {
return Iterables.getLast(Splitter.on('/').splitToList(path));
}
public static String joinPath(String... segments) {
return Joiner.on('/').join(segments);
}
public static RequestSpecification withFile(RequestSpecification it, String file, Class<?> cp) {
return it.multiPart(PlatformUtil.toAbsolutePath(cp, file).toFile());
}
// Simple REST operations
public static Response get(String api, String...segments) {
return givenAuthenticatedRequest(api).get(asPath(Arrays.asList(segments)));
}
public static Response delete(String api, String...segments) {
return givenAuthenticatedRequest(api).delete(asPath(Arrays.asList(segments)));
}
public static String assertCreated(ValidatableResponse response) {
return lastPathSegment(response.statusCode(201)
.extract()
.header("Location"));
}
public static String generateToken(Permission...permissions) {
return ApplicationContext.getServiceForClass(JWTGenerator.class).generate(new User(RestExtensions.USER, List.of(new Role("custom", List.of(permissions)))));
}
}
| fix(test.commons): Add default object mapper to REST-assured config
The classpath scanning-based mechanism does not find the Jackson2
backend after upgrading to 4.0, however if a default mapper is
registered, it can use it without any issue. | tests/com.b2international.snowowl.test.commons/src/com/b2international/snowowl/test/commons/rest/RestExtensions.java | fix(test.commons): Add default object mapper to REST-assured config | <ide><path>ests/com.b2international.snowowl.test.commons/src/com/b2international/snowowl/test/commons/rest/RestExtensions.java
<ide> import io.restassured.config.ObjectMapperConfig;
<ide> import io.restassured.config.RestAssuredConfig;
<ide> import io.restassured.http.ContentType;
<del>import io.restassured.mapper.factory.Jackson2ObjectMapperFactory;
<add>import io.restassured.internal.mapping.Jackson2Mapper;
<add>import io.restassured.path.json.mapper.factory.Jackson2ObjectMapperFactory;
<ide> import io.restassured.response.Response;
<ide> import io.restassured.response.ValidatableResponse;
<ide> import io.restassured.specification.RequestSpecification;
<ide>
<ide> RestAssured.config = RestAssuredConfig.config()
<ide> .objectMapperConfig(
<del> ObjectMapperConfig.objectMapperConfig().jackson2ObjectMapperFactory(new Jackson2ObjectMapperFactory() {
<add> ObjectMapperConfig.objectMapperConfig().defaultObjectMapper(new Jackson2Mapper(new Jackson2ObjectMapperFactory() {
<ide> @Override
<del> public com.fasterxml.jackson.databind.ObjectMapper create(Type arg0, String arg1) {
<add> public com.fasterxml.jackson.databind.ObjectMapper create(Type cls, String charset) {
<ide> com.fasterxml.jackson.databind.ObjectMapper mapper = new com.fasterxml.jackson.databind.ObjectMapper();
<ide> mapper.registerModule(new JavaTimeModule());
<ide>
<ide>
<ide> return mapper;
<ide> }
<del> })
<add> }))
<ide> )
<ide> .logConfig(
<ide> LogConfig.logConfig().enableLoggingOfRequestAndResponseIfValidationFails() |
|
Java | apache-2.0 | b2a1f464a57db4a7d82df0386f2afef94f3b6aff | 0 | ptrd/jmeter-plugins,Sausageo/jmeter-plugins,Sausageo/jmeter-plugins,ptrd/jmeter-plugins,ptrd/jmeter-plugins,Sausageo/jmeter-plugins,Sausageo/jmeter-plugins,Sausageo/jmeter-plugins,ptrd/jmeter-plugins,ptrd/jmeter-plugins | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package org.jmeterplugins.protocol.http.control;
import junit.framework.TestCase;
import org.apache.commons.io.IOUtils;
import java.io.*;
import java.util.HashMap;
import java.util.Map;
/**
* @author Felix Henry
* @author Vincent Daburon
*/
public class HttpSimpleTableServerTest extends TestCase {
private static final String DATA_DIR = System.getProperty("user.dir");
private static final String CRLF = HttpSimpleTableServer.lineSeparator;
public void testGetRequest() throws Exception {
// create a file to test the STS
BufferedWriter out = null;
String filename = "test-login.csv";
out = new BufferedWriter(new FileWriter(new File(DATA_DIR, filename)));
out.write("login1;password1");
out.write(CRLF);
out.write("login2;password2");
out.write(CRLF);
if (null != out) {
out.close();
}
HttpSimpleTableServer obj = new HttpSimpleTableServerEmul(-1, true, DATA_DIR);
// HELP (GET)
String result = sendHttpGet(obj, ""
+ "/sts");
assertTrue(0 < result.length()
&& result
.startsWith("<html><head><title>URL for the dataset</title><head>"));
// HELP (GET)
result = sendHttpGet(obj, "" + "/sts/");
assertTrue(0 < result.length()
&& result
.startsWith("<html><head><title>URL for the dataset</title><head>"));
// STATUS (GET) : ERROR EMPTY DATABASE
result = sendHttpGet(obj, ""
+ "/sts/STATUS");
assertEquals("<html><title>KO</title>" + CRLF + "<body>"
+ "Error : Database was empty !</body>" + CRLF + "</html>",
result);
// INITFILE (GET)
result = sendHttpGet(obj, "/sts/INITFILE", this.createParm("FILENAME", filename));
assertEquals("<html><title>OK</title>" + CRLF + "<body>2</body>" + CRLF
+ "</html>", result);
// INITFILE (GET) : ERROR FILE NOT FOUND
result = sendHttpGet(obj, "/sts/INITFILE", this.createParm("FILENAME", "unknown.txt"));
assertEquals("<html><title>KO</title>" + CRLF
+ "<body>Error : file not found !</body>" + CRLF + "</html>",
result);
// INITFILE (GET) : ERROR MISSING FILENAME
result = sendHttpGet(obj, "/sts/INITFILE", new HashMap<String, String>());
assertEquals("<html><title>KO</title>" + CRLF
+ "<body>Error : FILENAME parameter was missing !</body>"
+ CRLF + "</html>", result);
// Delete the file test-login.csv
File dataset = new File(DATA_DIR, filename);
dataset.delete();
// READ LAST KEEP=TRUE (GET)
Map<String, String> map1 = this.createParm("FILENAME", filename);
map1.put("READ_MODE", "LAST");
result = sendHttpGet(obj, "/sts/READ", map1);
assertEquals("<html><title>OK</title>" + CRLF
+ "<body>login2;password2</body>" + CRLF + "</html>", result);
// READ FIRST KEEP=FALSE (GET)
Map<String, String> map2 = this.createParm("FILENAME", filename);
map2.put("READ_MODE", "FIRST");
map2.put("KEEP", "FALSE");
result = sendHttpGet(obj, "/sts/READ", map2);
assertEquals("<html><title>OK</title>" + CRLF
+ "<body>login1;password1</body>" + CRLF + "</html>", result);
// READ (GET) : ERROR UNKNOWN READ_MODE
Map<String, String> map3 = this.createParm("FILENAME", filename);
map3.put("READ_MODE", "SECOND");
result = sendHttpGet(obj, "/sts/READ", map3);
assertEquals(
"<html><title>KO</title>"
+ CRLF
+ "<body>Error : READ_MODE value has to be FIRST, LAST or RANDOM !</body>"
+ CRLF + "</html>", result);
// READ (GET) : ERROR MISSING FILENAME
Map<String, String> map4 = this.createParm("A", filename);
map4.put("READ_MODE", "LAST");
result = sendHttpGet(obj, "/sts/READ", map4);
assertEquals("<html><title>KO</title>" + CRLF
+ "<body>Error : FILENAME parameter was missing !</body>"
+ CRLF + "</html>", result);
// READ (GET) : ERROR UNKNOWN FILENAME
result = sendHttpGet(obj, "/sts/READ", this.createParm("FILENAME", "unexpected.txt"));
assertEquals("<html><title>KO</title>" + CRLF
+ "<body>Error : unexpected.txt not loaded yet !</body>" + CRLF
+ "</html>", result);
// READ (GET) : ERROR UNKNOWN KEEP
Map<String, String> map5 = this.createParm("FILENAME", filename);
map5.put("KEEP", "NO");
result = sendHttpGet(obj, "/sts/READ", map5);
assertEquals("<html><title>KO</title>" + CRLF
+ "<body>Error : KEEP value has to be TRUE or FALSE !</body>"
+ CRLF + "</html>", result);
// LENGTH (GET)
result = sendHttpGet(obj, "/sts/LENGTH", this.createParm("FILENAME", filename));
assertEquals("<html><title>OK</title>" + CRLF + "<body>1</body>" + CRLF
+ "</html>", result);
// LENGTH (POST)
result = sendHttpPost(obj, "/sts/LENGTH", this.createParm("FILENAME", filename));
assertEquals("<html><title>OK</title>" + CRLF + "<body>1</body>" + CRLF
+ "</html>", result);
// LENGTH (GET) ERROR FILE NOT FOUND
result = sendHttpGet(obj, "/sts/LENGTH", this.createParm("FILENAME", "unknown.txt"));
assertEquals("<html><title>KO</title>" + CRLF
+ "<body>Error : unknown.txt not loaded yet !</body>" + CRLF
+ "</html>", result);
// LENGTH (GET) ERROR MISSING FILENAME
result = sendHttpGet(obj, "/sts/LENGTH", this.createParm("A", "unknown.txt"));
assertEquals("<html><title>KO</title>" + CRLF
+ "<body>Error : FILENAME parameter was missing !</body>"
+ CRLF + "</html>", result);
// ADD (POST)
Map<String, String> urlParameters = this.createParm("FILENAME", "unknown.txt");
urlParameters.put("ADD_MODE", "LAST");
urlParameters.put("FILENAME", "test-login.csv");
urlParameters.put("LINE", "login3;password3");
result = sendHttpPost(obj, "/sts/ADD", urlParameters);
assertEquals("<html><title>OK</title>" + CRLF + "<body></body>" + CRLF
+ "</html>", result);
// ADD (GET) : ERROR ADD SHOULD USE POST METHOD
result = sendHttpGet(obj, ""
+ "/sts/ADD?LINE=login4;password4&FILENAME=" + filename);
assertEquals("<html><title>KO</title>" + CRLF
+ "<body>Error : unknown command !</body>" + CRLF + "</html>",
result);
// ADD (POST) : ERROR MISSING LINE
Map<String, String> urlParameters2 = this.createParm("FILENAME", "unknown.txt");
urlParameters2.put("ADD_MODE", "LAST");
urlParameters2.put("FILENAME", "test-login.csv");
result = sendHttpPost(obj, "/sts/ADD", urlParameters2);
assertEquals("<html><title>KO</title>" + CRLF
+ "<body>Error : LINE parameter was missing !</body>" + CRLF
+ "</html>", result);
// ADD (POST) : MISSING ADD_MODE
Map<String, String> urlParameters3 = this.createParm("FILENAME", "unknown.txt");
urlParameters3.put("FILENAME", "test-login.csv");
urlParameters3.put("LINE", "login3;password3");
result = sendHttpPost(obj, "/sts/ADD", urlParameters3);
assertEquals("<html><title>OK</title>" + CRLF + "<body></body>" + CRLF
+ "</html>", result);
// ADD (POST) : ERROR WRONG ADD MODE
Map<String, String> urlParameters4 = this.createParm("FILENAME", "unknown.txt");
urlParameters4.put("ADD_MODE", "RANDOM");
urlParameters4.put("FILENAME", "test-login.csv");
urlParameters4.put("LINE", "login3;password3");
result = sendHttpPost(obj, "/sts/ADD", urlParameters4);
assertEquals(
"<html><title>KO</title>"
+ CRLF
+ "<body>Error : ADD_MODE value has to be FIRST or LAST !</body>"
+ CRLF + "</html>", result);
// READ RANDOM KEEP=TRUE (GET)
result = sendHttpGet(obj, "/sts/READ?READ_MODE=RANDOM&FILENAME=" + filename);
assertTrue(result.startsWith("<html><title>OK</title>"));
// SAVE (GET)
result = sendHttpGet(obj, ""
+ "/sts/SAVE?FILENAME=" + filename);
assertEquals("<html><title>OK</title>" + CRLF + "<body>3</body>" + CRLF
+ "</html>", result);
// SAVE (GET) : ERROR MAX SIZE REACHED
result = sendHttpGet(obj, "/sts/SAVE?FILENAME=aaaaaaaaaabbbbbbbbbbccccccccccddddddddddeeeeeeeeeeffffffffffgggggggggghhhhhhhhhhiiiiiiiiiijjjjjjjjjjkkkkkkkkkkllllllllllmmmmmmmmmm.txt"
+ filename);
assertEquals("<html><title>KO</title>" + CRLF
+ "<body>Error : Maximum size reached (128) !</body>" + CRLF
+ "</html>", result);
// SAVE (GET) : ERROR ILLEGAL CHAR
result = sendHttpGet(obj, ""
+ "/sts/SAVE?FILENAME=logins:passwords.csv");
assertEquals("<html><title>KO</title>" + CRLF
+ "<body>Error : Illegal character found !</body>" + CRLF
+ "</html>", result);
// SAVE (GET) : ERROR ILLEGAL FILENAME .
result = sendHttpGet(obj, ""
+ "/sts/SAVE?FILENAME=.");
assertEquals("<html><title>KO</title>" + CRLF
+ "<body>Error : Illegal character found !</body>" + CRLF
+ "</html>", result);
// SAVE (GET) : ERROR ILLEGAL FILENAME ..
result = sendHttpGet(obj, ""
+ "/sts/SAVE?FILENAME=..");
assertEquals("<html><title>KO</title>" + CRLF
+ "<body>Error : Illegal character found !</body>" + CRLF
+ "</html>", result);
// Delete the newly saved file test-login.csv
dataset = new File(DATA_DIR, filename);
dataset.delete();
// RESET (GET)
result = sendHttpGet(obj, ""
+ "/sts/RESET?FILENAME=" + filename);
assertEquals("<html><title>OK</title>" + CRLF + "<body></body>" + CRLF
+ "</html>", result);
// RESET (GET) ERROR MISSING FILENAME
result = sendHttpGet(obj, ""
+ "/sts/RESET");
assertEquals("<html><title>KO</title>" + CRLF
+ "<body>Error : FILENAME parameter was missing !</body>"
+ CRLF + "</html>", result);
// READ (GET) : ERROR LIST IS EMPTY
result = sendHttpGet(obj, ""
+ "/sts/READ?FILENAME=" + filename);
assertEquals("<html><title>KO</title>" + CRLF
+ "<body>Error : No more line !</body>" + CRLF + "</html>",
result);
// STATUS (GET)
result = sendHttpGet(obj, ""
+ "/sts/STATUS");
assertEquals("<html><title>OK</title>" + CRLF + "<body>" + CRLF
+ filename + " = 0<br />" + CRLF + "</body></html>", result);
}
private Map<String, String> createParm(String filename, String filename1) {
Map<String, String> res = new HashMap<String, String>();
res.put(filename, filename1);
return res;
}
private String sendHttpGet(HttpSimpleTableServer obj, String s, Map<String, String> params) throws IOException {
SessionEmulator sess = new SessionEmulator(s);
if (params != null) {
sess.setParms(params);
}
NanoHTTPD.Response resp = obj.serve(sess);
InputStream inputStream = resp.getData();
StringWriter writer = new StringWriter();
IOUtils.copy(inputStream, writer);
return writer.toString();
}
private String sendHttpGet(HttpSimpleTableServer obj, String url) throws Exception {
return sendHttpGet(obj, url, null);
}
private String sendHttpPost(HttpSimpleTableServer obj, String url, Map<String, String> parms)
throws Exception {
SessionEmulator sess = new SessionEmulator(url);
sess.setMethod(NanoHTTPD.Method.POST);
sess.setBody((parms));
NanoHTTPD.Response resp = obj.serve(sess);
InputStream inputStream = resp.getData();
StringWriter writer = new StringWriter();
IOUtils.copy(inputStream, writer);
String resp_entity = writer.toString();
return resp_entity;
}
private class SessionEmulator implements NanoHTTPD.IHTTPSession {
private final String url;
private Map<String, String> parms;
private NanoHTTPD.Method method;
private Map<String, String> body;
public SessionEmulator(String url) {
this.url = url;
}
@Override
public void execute() throws IOException {
}
@Override
public Map<String, String> getParms() {
return this.parms;
}
@Override
public Map<String, String> getHeaders() {
return null;
}
@Override
public String getUri() {
return this.url;
}
@Override
public String getQueryParameterString() {
return null;
}
@Override
public NanoHTTPD.Method getMethod() {
return this.method;
}
@Override
public InputStream getInputStream() {
return null;
}
@Override
public NanoHTTPD.CookieHandler getCookies() {
return null;
}
@Override
public void parseBody(Map<String, String> files) throws IOException, NanoHTTPD.ResponseException {
files.putAll(this.body);
parms = body;
}
public void setParms(Map<String, String> parms) {
this.parms = parms;
}
public void setMethod(NanoHTTPD.Method method) {
this.method = method;
}
public void setBody(Map<String, String> body) {
this.body = body;
}
}
}
| extras/test/org/jmeterplugins/protocol/http/control/HttpSimpleTableServerTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package org.jmeterplugins.protocol.http.control;
import junit.framework.TestCase;
import org.apache.commons.io.IOUtils;
import org.apache.http.HttpEntity;
import org.apache.http.HttpResponse;
import org.apache.http.NameValuePair;
import org.apache.http.client.HttpClient;
import org.apache.http.client.entity.UrlEncodedFormEntity;
import org.apache.http.client.methods.HttpPost;
import org.apache.http.impl.client.DefaultHttpClient;
import org.apache.http.message.BasicNameValuePair;
import org.apache.http.util.EntityUtils;
import java.io.*;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
/**
* @author Felix Henry
* @author Vincent Daburon
*/
public class HttpSimpleTableServerTest extends TestCase {
private final String USER_AGENT = "Mozilla/5.0";
private static final int HTTP_SERVER_PORT = -1;
private static final String DATA_DIR = System.getProperty("user.dir");
private static final String CRLF = HttpSimpleTableServer.lineSeparator;
public void testGetRequest() throws Exception {
// create a file to test the STS
BufferedWriter out = null;
String filename = "test-login.csv";
out = new BufferedWriter(new FileWriter(new File(DATA_DIR, filename)));
out.write("login1;password1");
out.write(CRLF);
out.write("login2;password2");
out.write(CRLF);
if (null != out) {
out.close();
}
HttpSimpleTableServer obj = new HttpSimpleTableServerEmul(-1, true, DATA_DIR);
// HELP (GET)
String result = sendHttpGet(obj, ""
+ "/sts");
assertTrue(0 < result.length()
&& result
.startsWith("<html><head><title>URL for the dataset</title><head>"));
// HELP (GET)
result = sendHttpGet(obj, "" + "/sts/");
assertTrue(0 < result.length()
&& result
.startsWith("<html><head><title>URL for the dataset</title><head>"));
// STATUS (GET) : ERROR EMPTY DATABASE
result = sendHttpGet(obj, ""
+ "/sts/STATUS");
assertEquals("<html><title>KO</title>" + CRLF + "<body>"
+ "Error : Database was empty !</body>" + CRLF + "</html>",
result);
// INITFILE (GET)
result = sendHttpGet(obj, ""
+ "/sts/INITFILE?FILENAME=" + filename);
assertEquals("<html><title>OK</title>" + CRLF + "<body>2</body>" + CRLF
+ "</html>", result);
// INITFILE (GET) : ERROR FILE NOT FOUND
result = sendHttpGet(obj, ""
+ "/sts/INITFILE?FILENAME=unknown.txt");
assertEquals("<html><title>KO</title>" + CRLF
+ "<body>Error : file not found !</body>" + CRLF + "</html>",
result);
// INITFILE (GET) : ERROR MISSING FILENAME
result = sendHttpGet(obj, ""
+ "/sts/INITFILE");
assertEquals("<html><title>KO</title>" + CRLF
+ "<body>Error : FILENAME parameter was missing !</body>"
+ CRLF + "</html>", result);
// Delete the file test-login.csv
File dataset = new File(DATA_DIR, filename);
dataset.delete();
// READ LAST KEEP=TRUE (GET)
result = sendHttpGet(obj, ""
+ "/sts/READ?READ_MODE=LAST&FILENAME=" + filename);
assertEquals("<html><title>OK</title>" + CRLF
+ "<body>login2;password2</body>" + CRLF + "</html>", result);
// READ FIRST KEEP=FALSE (GET)
result = sendHttpGet(obj, ""
+ "/sts/READ?READ_MODE=FIRST&KEEP=FALSE&FILENAME=" + filename);
assertEquals("<html><title>OK</title>" + CRLF
+ "<body>login1;password1</body>" + CRLF + "</html>", result);
// READ (GET) : ERROR UNKNOWN READ_MODE
result = sendHttpGet(obj, ""
+ "/sts/READ?READ_MODE=SECOND&FILENAME=" + filename);
assertEquals(
"<html><title>KO</title>"
+ CRLF
+ "<body>Error : READ_MODE value has to be FIRST, LAST or RANDOM !</body>"
+ CRLF + "</html>", result);
// READ (GET) : ERROR MISSING FILENAME
result = sendHttpGet(obj, ""
+ "/sts/READ?READ_MODE=LAST");
assertEquals("<html><title>KO</title>" + CRLF
+ "<body>Error : FILENAME parameter was missing !</body>"
+ CRLF + "</html>", result);
// READ (GET) : ERROR UNKNOWN FILENAME
result = sendHttpGet(obj, ""
+ "/sts/READ?FILENAME=unexpected.txt");
assertEquals("<html><title>KO</title>" + CRLF
+ "<body>Error : unexpected.txt not loaded yet !</body>" + CRLF
+ "</html>", result);
// READ (GET) : ERROR UNKNOWN KEEP
result = sendHttpGet(obj, ""
+ "/sts/READ?KEEP=NO&FILENAME=" + filename);
assertEquals("<html><title>KO</title>" + CRLF
+ "<body>Error : KEEP value has to be TRUE or FALSE !</body>"
+ CRLF + "</html>", result);
// LENGTH (GET)
result = sendHttpGet(obj, ""
+ "/sts/LENGTH?FILENAME=" + filename);
assertEquals("<html><title>OK</title>" + CRLF + "<body>1</body>" + CRLF
+ "</html>", result);
// LENGTH (POST)
List<NameValuePair> urlParameters = new ArrayList<NameValuePair>();
urlParameters.add(new BasicNameValuePair("FILENAME", filename));
result = sendHttpPost(""
+ "/sts/LENGTH", urlParameters);
assertEquals("<html><title>OK</title>" + CRLF + "<body>1</body>" + CRLF
+ "</html>", result);
// LENGTH (GET) ERROR FILE NOT FOUND
result = sendHttpGet(obj, ""
+ "/sts/LENGTH?FILENAME=unknown.txt");
assertEquals("<html><title>KO</title>" + CRLF
+ "<body>Error : unknown.txt not loaded yet !</body>" + CRLF
+ "</html>", result);
// LENGTH (GET) ERROR MISSING FILENAME
result = sendHttpGet(obj, ""
+ "/sts/LENGTH");
assertEquals("<html><title>KO</title>" + CRLF
+ "<body>Error : FILENAME parameter was missing !</body>"
+ CRLF + "</html>", result);
// ADD (POST)
urlParameters = new ArrayList<NameValuePair>();
urlParameters.add(new BasicNameValuePair("ADD_MODE", "LAST"));
urlParameters.add(new BasicNameValuePair("FILENAME", "test-login.csv"));
urlParameters.add(new BasicNameValuePair("LINE", "login3;password3"));
result = sendHttpPost(""
+ "/sts/ADD", urlParameters);
assertEquals("<html><title>OK</title>" + CRLF + "<body></body>" + CRLF
+ "</html>", result);
// ADD (GET) : ERROR ADD SHOULD USE POST METHOD
result = sendHttpGet(obj, ""
+ "/sts/ADD?LINE=login4;password4&FILENAME=" + filename);
assertEquals("<html><title>KO</title>" + CRLF
+ "<body>Error : unknown command !</body>" + CRLF + "</html>",
result);
// ADD (POST) : ERROR MISSING LINE
urlParameters = new ArrayList<NameValuePair>();
urlParameters.add(new BasicNameValuePair("ADD_MODE", "LAST"));
urlParameters.add(new BasicNameValuePair("FILENAME", "test-login.csv"));
result = sendHttpPost(""
+ "/sts/ADD", urlParameters);
assertEquals("<html><title>KO</title>" + CRLF
+ "<body>Error : LINE parameter was missing !</body>" + CRLF
+ "</html>", result);
// ADD (POST) : MISSING ADD_MODE
urlParameters = new ArrayList<NameValuePair>();
urlParameters.add(new BasicNameValuePair("FILENAME", "test-login.csv"));
urlParameters.add(new BasicNameValuePair("LINE", "login4;password4"));
result = sendHttpPost(""
+ "/sts/ADD", urlParameters);
assertEquals("<html><title>OK</title>" + CRLF + "<body></body>" + CRLF
+ "</html>", result);
// ADD (POST) : ERROR WRONG ADD MODE
urlParameters = new ArrayList<NameValuePair>();
urlParameters.add(new BasicNameValuePair("ADD_MODE", "RANDOM"));
urlParameters.add(new BasicNameValuePair("FILENAME", "test-login.csv"));
urlParameters.add(new BasicNameValuePair("LINE", "login3;password3"));
result = sendHttpPost(""
+ "/sts/ADD", urlParameters);
assertEquals(
"<html><title>KO</title>"
+ CRLF
+ "<body>Error : ADD_MODE value has to be FIRST or LAST !</body>"
+ CRLF + "</html>", result);
// READ RANDOM KEEP=TRUE (GET)
result = sendHttpGet(obj, ""
+ "/sts/READ?READ_MODE=RANDOM&FILENAME=" + filename);
assertTrue(result.startsWith("<html><title>OK</title>"));
// SAVE (GET)
result = sendHttpGet(obj, ""
+ "/sts/SAVE?FILENAME=" + filename);
assertEquals("<html><title>OK</title>" + CRLF + "<body>3</body>" + CRLF
+ "</html>", result);
// SAVE (GET) : ERROR MAX SIZE REACHED
result = sendHttpGet(obj, "http://localhost:"
+ HTTP_SERVER_PORT
+ "/sts/SAVE?FILENAME=aaaaaaaaaabbbbbbbbbbccccccccccddddddddddeeeeeeeeeeffffffffffgggggggggghhhhhhhhhhiiiiiiiiiijjjjjjjjjjkkkkkkkkkkllllllllllmmmmmmmmmm.txt"
+ filename);
assertEquals("<html><title>KO</title>" + CRLF
+ "<body>Error : Maximum size reached (128) !</body>" + CRLF
+ "</html>", result);
// SAVE (GET) : ERROR ILLEGAL CHAR
result = sendHttpGet(obj, ""
+ "/sts/SAVE?FILENAME=logins:passwords.csv");
assertEquals("<html><title>KO</title>" + CRLF
+ "<body>Error : Illegal character found !</body>" + CRLF
+ "</html>", result);
// SAVE (GET) : ERROR ILLEGAL FILENAME .
result = sendHttpGet(obj, ""
+ "/sts/SAVE?FILENAME=.");
assertEquals("<html><title>KO</title>" + CRLF
+ "<body>Error : Illegal character found !</body>" + CRLF
+ "</html>", result);
// SAVE (GET) : ERROR ILLEGAL FILENAME ..
result = sendHttpGet(obj, ""
+ "/sts/SAVE?FILENAME=..");
assertEquals("<html><title>KO</title>" + CRLF
+ "<body>Error : Illegal character found !</body>" + CRLF
+ "</html>", result);
// Delete the newly saved file test-login.csv
dataset = new File(DATA_DIR, filename);
dataset.delete();
// RESET (GET)
result = sendHttpGet(obj, ""
+ "/sts/RESET?FILENAME=" + filename);
assertEquals("<html><title>OK</title>" + CRLF + "<body></body>" + CRLF
+ "</html>", result);
// RESET (GET) ERROR MISSING FILENAME
result = sendHttpGet(obj, ""
+ "/sts/RESET");
assertEquals("<html><title>KO</title>" + CRLF
+ "<body>Error : FILENAME parameter was missing !</body>"
+ CRLF + "</html>", result);
// READ (GET) : ERROR LIST IS EMPTY
result = sendHttpGet(obj, ""
+ "/sts/READ?FILENAME=" + filename);
assertEquals("<html><title>KO</title>" + CRLF
+ "<body>Error : No more line !</body>" + CRLF + "</html>",
result);
// STATUS (GET)
result = sendHttpGet(obj, ""
+ "/sts/STATUS");
assertEquals("<html><title>OK</title>" + CRLF + "<body>" + CRLF
+ filename + " = 0<br />" + CRLF + "</body></html>", result);
}
private String sendHttpGet(HttpSimpleTableServer obj, String url) throws Exception {
NanoHTTPD.IHTTPSession sess = new SessionEmulator(url);
NanoHTTPD.Response resp = obj.serve(sess);
InputStream inputStream = resp.getData();
StringWriter writer = new StringWriter();
IOUtils.copy(inputStream, writer);
String resp_entity = writer.toString();
return resp_entity;
}
private String sendHttpPost(String url, List<NameValuePair> parms)
throws Exception {
HttpClient client = new DefaultHttpClient();
HttpPost post = new HttpPost(url);
// add header
post.setHeader("User-Agent", USER_AGENT);
post.setEntity(new UrlEncodedFormEntity(parms, "UTF-8"));
HttpResponse resp = client.execute(post);
HttpEntity resp_entity = resp.getEntity();
String result = EntityUtils.toString(resp_entity);
return result;
}
private class SessionEmulator implements NanoHTTPD.IHTTPSession {
private final String url;
public SessionEmulator(String url) {
this.url = url;
}
@Override
public void execute() throws IOException {
}
@Override
public Map<String, String> getParms() {
return null;
}
@Override
public Map<String, String> getHeaders() {
return null;
}
@Override
public String getUri() {
return this.url;
}
@Override
public String getQueryParameterString() {
return null;
}
@Override
public NanoHTTPD.Method getMethod() {
return null;
}
@Override
public InputStream getInputStream() {
return null;
}
@Override
public NanoHTTPD.CookieHandler getCookies() {
return null;
}
@Override
public void parseBody(Map<String, String> files) throws IOException, NanoHTTPD.ResponseException {
}
}
}
| Improving the test
| extras/test/org/jmeterplugins/protocol/http/control/HttpSimpleTableServerTest.java | Improving the test | <ide><path>xtras/test/org/jmeterplugins/protocol/http/control/HttpSimpleTableServerTest.java
<ide>
<ide> import junit.framework.TestCase;
<ide> import org.apache.commons.io.IOUtils;
<del>import org.apache.http.HttpEntity;
<del>import org.apache.http.HttpResponse;
<del>import org.apache.http.NameValuePair;
<del>import org.apache.http.client.HttpClient;
<del>import org.apache.http.client.entity.UrlEncodedFormEntity;
<del>import org.apache.http.client.methods.HttpPost;
<del>import org.apache.http.impl.client.DefaultHttpClient;
<del>import org.apache.http.message.BasicNameValuePair;
<del>import org.apache.http.util.EntityUtils;
<ide>
<ide> import java.io.*;
<del>import java.util.ArrayList;
<del>import java.util.List;
<add>import java.util.HashMap;
<ide> import java.util.Map;
<ide>
<ide> /**
<ide> * @author Vincent Daburon
<ide> */
<ide> public class HttpSimpleTableServerTest extends TestCase {
<del> private final String USER_AGENT = "Mozilla/5.0";
<del> private static final int HTTP_SERVER_PORT = -1;
<add>
<ide> private static final String DATA_DIR = System.getProperty("user.dir");
<ide> private static final String CRLF = HttpSimpleTableServer.lineSeparator;
<ide>
<ide> result);
<ide>
<ide> // INITFILE (GET)
<del> result = sendHttpGet(obj, ""
<del> + "/sts/INITFILE?FILENAME=" + filename);
<add> result = sendHttpGet(obj, "/sts/INITFILE", this.createParm("FILENAME", filename));
<ide> assertEquals("<html><title>OK</title>" + CRLF + "<body>2</body>" + CRLF
<ide> + "</html>", result);
<ide>
<ide> // INITFILE (GET) : ERROR FILE NOT FOUND
<del> result = sendHttpGet(obj, ""
<del> + "/sts/INITFILE?FILENAME=unknown.txt");
<add> result = sendHttpGet(obj, "/sts/INITFILE", this.createParm("FILENAME", "unknown.txt"));
<ide> assertEquals("<html><title>KO</title>" + CRLF
<ide> + "<body>Error : file not found !</body>" + CRLF + "</html>",
<ide> result);
<ide>
<ide> // INITFILE (GET) : ERROR MISSING FILENAME
<del> result = sendHttpGet(obj, ""
<del> + "/sts/INITFILE");
<add> result = sendHttpGet(obj, "/sts/INITFILE", new HashMap<String, String>());
<ide> assertEquals("<html><title>KO</title>" + CRLF
<ide> + "<body>Error : FILENAME parameter was missing !</body>"
<ide> + CRLF + "</html>", result);
<ide> dataset.delete();
<ide>
<ide> // READ LAST KEEP=TRUE (GET)
<del> result = sendHttpGet(obj, ""
<del> + "/sts/READ?READ_MODE=LAST&FILENAME=" + filename);
<add> Map<String, String> map1 = this.createParm("FILENAME", filename);
<add> map1.put("READ_MODE", "LAST");
<add> result = sendHttpGet(obj, "/sts/READ", map1);
<ide> assertEquals("<html><title>OK</title>" + CRLF
<ide> + "<body>login2;password2</body>" + CRLF + "</html>", result);
<ide>
<ide> // READ FIRST KEEP=FALSE (GET)
<del> result = sendHttpGet(obj, ""
<del> + "/sts/READ?READ_MODE=FIRST&KEEP=FALSE&FILENAME=" + filename);
<add> Map<String, String> map2 = this.createParm("FILENAME", filename);
<add> map2.put("READ_MODE", "FIRST");
<add> map2.put("KEEP", "FALSE");
<add> result = sendHttpGet(obj, "/sts/READ", map2);
<ide> assertEquals("<html><title>OK</title>" + CRLF
<ide> + "<body>login1;password1</body>" + CRLF + "</html>", result);
<ide>
<ide> // READ (GET) : ERROR UNKNOWN READ_MODE
<del> result = sendHttpGet(obj, ""
<del> + "/sts/READ?READ_MODE=SECOND&FILENAME=" + filename);
<add> Map<String, String> map3 = this.createParm("FILENAME", filename);
<add> map3.put("READ_MODE", "SECOND");
<add> result = sendHttpGet(obj, "/sts/READ", map3);
<ide> assertEquals(
<ide> "<html><title>KO</title>"
<ide> + CRLF
<ide> + CRLF + "</html>", result);
<ide>
<ide> // READ (GET) : ERROR MISSING FILENAME
<del> result = sendHttpGet(obj, ""
<del> + "/sts/READ?READ_MODE=LAST");
<add> Map<String, String> map4 = this.createParm("A", filename);
<add> map4.put("READ_MODE", "LAST");
<add> result = sendHttpGet(obj, "/sts/READ", map4);
<ide> assertEquals("<html><title>KO</title>" + CRLF
<ide> + "<body>Error : FILENAME parameter was missing !</body>"
<ide> + CRLF + "</html>", result);
<ide>
<ide> // READ (GET) : ERROR UNKNOWN FILENAME
<del> result = sendHttpGet(obj, ""
<del> + "/sts/READ?FILENAME=unexpected.txt");
<add> result = sendHttpGet(obj, "/sts/READ", this.createParm("FILENAME", "unexpected.txt"));
<ide> assertEquals("<html><title>KO</title>" + CRLF
<ide> + "<body>Error : unexpected.txt not loaded yet !</body>" + CRLF
<ide> + "</html>", result);
<ide>
<ide> // READ (GET) : ERROR UNKNOWN KEEP
<del> result = sendHttpGet(obj, ""
<del> + "/sts/READ?KEEP=NO&FILENAME=" + filename);
<add> Map<String, String> map5 = this.createParm("FILENAME", filename);
<add> map5.put("KEEP", "NO");
<add> result = sendHttpGet(obj, "/sts/READ", map5);
<ide> assertEquals("<html><title>KO</title>" + CRLF
<ide> + "<body>Error : KEEP value has to be TRUE or FALSE !</body>"
<ide> + CRLF + "</html>", result);
<ide>
<ide> // LENGTH (GET)
<del> result = sendHttpGet(obj, ""
<del> + "/sts/LENGTH?FILENAME=" + filename);
<add> result = sendHttpGet(obj, "/sts/LENGTH", this.createParm("FILENAME", filename));
<ide> assertEquals("<html><title>OK</title>" + CRLF + "<body>1</body>" + CRLF
<ide> + "</html>", result);
<ide>
<ide> // LENGTH (POST)
<del> List<NameValuePair> urlParameters = new ArrayList<NameValuePair>();
<del> urlParameters.add(new BasicNameValuePair("FILENAME", filename));
<del> result = sendHttpPost(""
<del> + "/sts/LENGTH", urlParameters);
<add> result = sendHttpPost(obj, "/sts/LENGTH", this.createParm("FILENAME", filename));
<ide> assertEquals("<html><title>OK</title>" + CRLF + "<body>1</body>" + CRLF
<ide> + "</html>", result);
<ide>
<ide> // LENGTH (GET) ERROR FILE NOT FOUND
<del> result = sendHttpGet(obj, ""
<del> + "/sts/LENGTH?FILENAME=unknown.txt");
<add> result = sendHttpGet(obj, "/sts/LENGTH", this.createParm("FILENAME", "unknown.txt"));
<ide> assertEquals("<html><title>KO</title>" + CRLF
<ide> + "<body>Error : unknown.txt not loaded yet !</body>" + CRLF
<ide> + "</html>", result);
<ide>
<ide> // LENGTH (GET) ERROR MISSING FILENAME
<del> result = sendHttpGet(obj, ""
<del> + "/sts/LENGTH");
<add> result = sendHttpGet(obj, "/sts/LENGTH", this.createParm("A", "unknown.txt"));
<ide> assertEquals("<html><title>KO</title>" + CRLF
<ide> + "<body>Error : FILENAME parameter was missing !</body>"
<ide> + CRLF + "</html>", result);
<ide>
<ide> // ADD (POST)
<del> urlParameters = new ArrayList<NameValuePair>();
<del> urlParameters.add(new BasicNameValuePair("ADD_MODE", "LAST"));
<del> urlParameters.add(new BasicNameValuePair("FILENAME", "test-login.csv"));
<del> urlParameters.add(new BasicNameValuePair("LINE", "login3;password3"));
<del> result = sendHttpPost(""
<del> + "/sts/ADD", urlParameters);
<add> Map<String, String> urlParameters = this.createParm("FILENAME", "unknown.txt");
<add> urlParameters.put("ADD_MODE", "LAST");
<add> urlParameters.put("FILENAME", "test-login.csv");
<add> urlParameters.put("LINE", "login3;password3");
<add> result = sendHttpPost(obj, "/sts/ADD", urlParameters);
<ide> assertEquals("<html><title>OK</title>" + CRLF + "<body></body>" + CRLF
<ide> + "</html>", result);
<ide>
<ide> result);
<ide>
<ide> // ADD (POST) : ERROR MISSING LINE
<del> urlParameters = new ArrayList<NameValuePair>();
<del> urlParameters.add(new BasicNameValuePair("ADD_MODE", "LAST"));
<del> urlParameters.add(new BasicNameValuePair("FILENAME", "test-login.csv"));
<del> result = sendHttpPost(""
<del> + "/sts/ADD", urlParameters);
<add> Map<String, String> urlParameters2 = this.createParm("FILENAME", "unknown.txt");
<add> urlParameters2.put("ADD_MODE", "LAST");
<add> urlParameters2.put("FILENAME", "test-login.csv");
<add> result = sendHttpPost(obj, "/sts/ADD", urlParameters2);
<ide> assertEquals("<html><title>KO</title>" + CRLF
<ide> + "<body>Error : LINE parameter was missing !</body>" + CRLF
<ide> + "</html>", result);
<ide>
<ide> // ADD (POST) : MISSING ADD_MODE
<del> urlParameters = new ArrayList<NameValuePair>();
<del> urlParameters.add(new BasicNameValuePair("FILENAME", "test-login.csv"));
<del> urlParameters.add(new BasicNameValuePair("LINE", "login4;password4"));
<del> result = sendHttpPost(""
<del> + "/sts/ADD", urlParameters);
<add> Map<String, String> urlParameters3 = this.createParm("FILENAME", "unknown.txt");
<add> urlParameters3.put("FILENAME", "test-login.csv");
<add> urlParameters3.put("LINE", "login3;password3");
<add> result = sendHttpPost(obj, "/sts/ADD", urlParameters3);
<ide> assertEquals("<html><title>OK</title>" + CRLF + "<body></body>" + CRLF
<ide> + "</html>", result);
<ide>
<ide> // ADD (POST) : ERROR WRONG ADD MODE
<del> urlParameters = new ArrayList<NameValuePair>();
<del> urlParameters.add(new BasicNameValuePair("ADD_MODE", "RANDOM"));
<del> urlParameters.add(new BasicNameValuePair("FILENAME", "test-login.csv"));
<del> urlParameters.add(new BasicNameValuePair("LINE", "login3;password3"));
<del> result = sendHttpPost(""
<del> + "/sts/ADD", urlParameters);
<add> Map<String, String> urlParameters4 = this.createParm("FILENAME", "unknown.txt");
<add> urlParameters4.put("ADD_MODE", "RANDOM");
<add> urlParameters4.put("FILENAME", "test-login.csv");
<add> urlParameters4.put("LINE", "login3;password3");
<add>
<add> result = sendHttpPost(obj, "/sts/ADD", urlParameters4);
<ide> assertEquals(
<ide> "<html><title>KO</title>"
<ide> + CRLF
<ide> + CRLF + "</html>", result);
<ide>
<ide> // READ RANDOM KEEP=TRUE (GET)
<del> result = sendHttpGet(obj, ""
<del> + "/sts/READ?READ_MODE=RANDOM&FILENAME=" + filename);
<add> result = sendHttpGet(obj, "/sts/READ?READ_MODE=RANDOM&FILENAME=" + filename);
<ide> assertTrue(result.startsWith("<html><title>OK</title>"));
<ide>
<ide> // SAVE (GET)
<ide> + "</html>", result);
<ide>
<ide> // SAVE (GET) : ERROR MAX SIZE REACHED
<del> result = sendHttpGet(obj, "http://localhost:"
<del> + HTTP_SERVER_PORT
<del> + "/sts/SAVE?FILENAME=aaaaaaaaaabbbbbbbbbbccccccccccddddddddddeeeeeeeeeeffffffffffgggggggggghhhhhhhhhhiiiiiiiiiijjjjjjjjjjkkkkkkkkkkllllllllllmmmmmmmmmm.txt"
<add> result = sendHttpGet(obj, "/sts/SAVE?FILENAME=aaaaaaaaaabbbbbbbbbbccccccccccddddddddddeeeeeeeeeeffffffffffgggggggggghhhhhhhhhhiiiiiiiiiijjjjjjjjjjkkkkkkkkkkllllllllllmmmmmmmmmm.txt"
<ide> + filename);
<ide> assertEquals("<html><title>KO</title>" + CRLF
<ide> + "<body>Error : Maximum size reached (128) !</body>" + CRLF
<ide> + filename + " = 0<br />" + CRLF + "</body></html>", result);
<ide> }
<ide>
<add>
<add> private Map<String, String> createParm(String filename, String filename1) {
<add> Map<String, String> res = new HashMap<String, String>();
<add> res.put(filename, filename1);
<add> return res;
<add> }
<add>
<add> private String sendHttpGet(HttpSimpleTableServer obj, String s, Map<String, String> params) throws IOException {
<add> SessionEmulator sess = new SessionEmulator(s);
<add>
<add> if (params != null) {
<add> sess.setParms(params);
<add> }
<add> NanoHTTPD.Response resp = obj.serve(sess);
<add> InputStream inputStream = resp.getData();
<add> StringWriter writer = new StringWriter();
<add> IOUtils.copy(inputStream, writer);
<add> return writer.toString();
<add> }
<add>
<ide> private String sendHttpGet(HttpSimpleTableServer obj, String url) throws Exception {
<del>
<del> NanoHTTPD.IHTTPSession sess = new SessionEmulator(url);
<add> return sendHttpGet(obj, url, null);
<add> }
<add>
<add> private String sendHttpPost(HttpSimpleTableServer obj, String url, Map<String, String> parms)
<add> throws Exception {
<add> SessionEmulator sess = new SessionEmulator(url);
<add> sess.setMethod(NanoHTTPD.Method.POST);
<add> sess.setBody((parms));
<ide> NanoHTTPD.Response resp = obj.serve(sess);
<ide> InputStream inputStream = resp.getData();
<ide> StringWriter writer = new StringWriter();
<ide> return resp_entity;
<ide> }
<ide>
<del> private String sendHttpPost(String url, List<NameValuePair> parms)
<del> throws Exception {
<del> HttpClient client = new DefaultHttpClient();
<del> HttpPost post = new HttpPost(url);
<del>
<del> // add header
<del> post.setHeader("User-Agent", USER_AGENT);
<del>
<del> post.setEntity(new UrlEncodedFormEntity(parms, "UTF-8"));
<del> HttpResponse resp = client.execute(post);
<del> HttpEntity resp_entity = resp.getEntity();
<del> String result = EntityUtils.toString(resp_entity);
<del> return result;
<del> }
<del>
<ide> private class SessionEmulator implements NanoHTTPD.IHTTPSession {
<ide> private final String url;
<add> private Map<String, String> parms;
<add> private NanoHTTPD.Method method;
<add> private Map<String, String> body;
<ide>
<ide> public SessionEmulator(String url) {
<ide> this.url = url;
<ide>
<ide> @Override
<ide> public Map<String, String> getParms() {
<del> return null;
<add> return this.parms;
<ide> }
<ide>
<ide> @Override
<ide>
<ide> @Override
<ide> public NanoHTTPD.Method getMethod() {
<del> return null;
<add> return this.method;
<ide> }
<ide>
<ide> @Override
<ide>
<ide> @Override
<ide> public void parseBody(Map<String, String> files) throws IOException, NanoHTTPD.ResponseException {
<del>
<add> files.putAll(this.body);
<add> parms = body;
<add> }
<add>
<add> public void setParms(Map<String, String> parms) {
<add> this.parms = parms;
<add> }
<add>
<add> public void setMethod(NanoHTTPD.Method method) {
<add> this.method = method;
<add> }
<add>
<add> public void setBody(Map<String, String> body) {
<add> this.body = body;
<ide> }
<ide> }
<ide> } |
|
Java | bsd-3-clause | dd39f312da69f5260b8a519b5ea3688228fd7e92 | 0 | benblank/Ice | package com.five35.minecraft.ice;
import net.minecraft.block.material.MapColor;
import net.minecraft.block.material.Material;
final class DeathMarkerMaterial extends Material {
private static final DeathMarkerMaterial INSTANCE = new DeathMarkerMaterial();
static DeathMarkerMaterial getInstance() {
return DeathMarkerMaterial.INSTANCE;
}
private DeathMarkerMaterial() {
super(MapColor.airColor);
this.setImmovableMobility();
}
@Override
public boolean blocksLight() {
return false;
}
@Override
public boolean blocksMovement() {
return false;
}
@Override
public boolean isSolid() {
return false;
}
}
| src/main/java/com/five35/minecraft/ice/DeathMarkerMaterial.java | package com.five35.minecraft.ice;
import net.minecraft.block.material.MapColor;
import net.minecraft.block.material.Material;
final class DeathMarkerMaterial extends Material {
private static final DeathMarkerMaterial INSTANCE = new DeathMarkerMaterial();
static DeathMarkerMaterial getInstance() {
return DeathMarkerMaterial.INSTANCE;
}
private DeathMarkerMaterial() {
super(MapColor.airColor);
this.setImmovableMobility();
}
@Override
public boolean blocksLight() {
return false;
}
@Override
public boolean isSolid() {
return false;
}
}
| death marker material doesn't block movement, so can't suffocate entities (fixes #3)
| src/main/java/com/five35/minecraft/ice/DeathMarkerMaterial.java | death marker material doesn't block movement, so can't suffocate entities (fixes #3) | <ide><path>rc/main/java/com/five35/minecraft/ice/DeathMarkerMaterial.java
<ide> }
<ide>
<ide> @Override
<add> public boolean blocksMovement() {
<add> return false;
<add> }
<add>
<add> @Override
<ide> public boolean isSolid() {
<ide> return false;
<ide> } |
|
Java | apache-2.0 | 25d7233fe32821994fd5b30a7e6209a0631c0a61 | 0 | google/guice,google/guice,google/guice | /*
* Copyright (C) 2008 Google Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.google.inject.util;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.ImmutableSet;
import com.google.common.collect.Iterables;
import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
import com.google.common.collect.Sets;
import com.google.errorprone.annotations.CheckReturnValue;
import com.google.inject.AbstractModule;
import com.google.inject.Binder;
import com.google.inject.Binding;
import com.google.inject.Key;
import com.google.inject.Module;
import com.google.inject.PrivateBinder;
import com.google.inject.PrivateModule;
import com.google.inject.Scope;
import com.google.inject.internal.Errors;
import com.google.inject.spi.DefaultBindingScopingVisitor;
import com.google.inject.spi.DefaultElementVisitor;
import com.google.inject.spi.Element;
import com.google.inject.spi.ElementVisitor;
import com.google.inject.spi.Elements;
import com.google.inject.spi.ModuleAnnotatedMethodScannerBinding;
import com.google.inject.spi.PrivateElements;
import com.google.inject.spi.ScopeBinding;
import java.lang.annotation.Annotation;
import java.util.Arrays;
import java.util.List;
import java.util.Map;
import java.util.Set;
/**
* Static utility methods for creating and working with instances of {@link Module}.
*
* @author [email protected] (Jesse Wilson)
* @since 2.0
*/
@CheckReturnValue
public final class Modules {
private Modules() {}
public static final Module EMPTY_MODULE = new EmptyModule();
private static class EmptyModule implements Module {
@Override
public void configure(Binder binder) {}
}
/**
* Returns a builder that creates a module that overlays override modules over the given modules.
* If a key is bound in both sets of modules, only the binding from the override modules is kept.
* If a single {@link PrivateModule} is supplied or all elements are from a single {@link
* PrivateBinder}, then this will overwrite the private bindings. Otherwise, private bindings will
* not be overwritten unless they are exposed. This can be used to replace the bindings of a
* production module with test bindings:
*
* <pre>
* Module functionalTestModule
* = Modules.override(new ProductionModule()).with(new TestModule());
* </pre>
*
* <p>Prefer to write smaller modules that can be reused and tested without overrides.
*
* @param modules the modules whose bindings are open to be overridden
*/
@CheckReturnValue
public static OverriddenModuleBuilder override(Module... modules) {
return override(Arrays.asList(modules));
}
/** @deprecated there's no reason to use {@code Modules.override()} without any arguments. */
@Deprecated
public static OverriddenModuleBuilder override() {
return override(Arrays.asList());
}
/**
* Returns a builder that creates a module that overlays override modules over the given modules.
* If a key is bound in both sets of modules, only the binding from the override modules is kept.
* If a single {@link PrivateModule} is supplied or all elements are from a single {@link
* PrivateBinder}, then this will overwrite the private bindings. Otherwise, private bindings will
* not be overwritten unless they are exposed. This can be used to replace the bindings of a
* production module with test bindings:
*
* <pre>
* Module functionalTestModule
* = Modules.override(getProductionModules()).with(getTestModules());
* </pre>
*
* <p>Prefer to write smaller modules that can be reused and tested without overrides.
*
* @param modules the modules whose bindings are open to be overridden
*/
public static OverriddenModuleBuilder override(Iterable<? extends Module> modules) {
return new RealOverriddenModuleBuilder(modules);
}
/**
* Returns a new module that installs all of {@code modules}.
*
* <p>Although sometimes helpful, this method is rarely necessary. Most Guice APIs accept multiple
* arguments or (like {@code install()}) can be called repeatedly. Where possible, external APIs
* that require a single module should similarly be adapted to permit multiple modules.
*/
@CheckReturnValue
public static Module combine(Module... modules) {
return combine(ImmutableSet.copyOf(modules));
}
/** @deprecated there's no need to "combine" one module; just install it directly. */
@Deprecated
public static Module combine(Module module) {
return module;
}
/** @deprecated this method call is effectively a no-op, just remove it. */
@Deprecated
public static Module combine() {
return EMPTY_MODULE;
}
/**
* Returns a new module that installs all of {@code modules}.
*
* <p>Although sometimes helpful, this method is rarely necessary. Most Guice APIs accept multiple
* arguments or (like {@code install()}) can be called repeatedly. Where possible, external APIs
* that require a single module should similarly be adapted to permit multiple modules.
*/
public static Module combine(Iterable<? extends Module> modules) {
return new CombinedModule(modules);
}
private static class CombinedModule implements Module {
final Set<Module> modulesSet;
CombinedModule(Iterable<? extends Module> modules) {
this.modulesSet = ImmutableSet.copyOf(modules);
}
@Override
public void configure(Binder binder) {
binder = binder.skipSources(getClass());
for (Module module : modulesSet) {
binder.install(module);
}
}
}
/** See the EDSL example at {@link Modules#override(Module[]) override()}. */
public interface OverriddenModuleBuilder {
/** See the EDSL example at {@link Modules#override(Module[]) override()}. */
Module with(Module... overrides);
/** @deprecated there's no reason to use {@code .with()} without any arguments. */
@Deprecated
public Module with();
/** See the EDSL example at {@link Modules#override(Module[]) override()}. */
Module with(Iterable<? extends Module> overrides);
}
private static final class RealOverriddenModuleBuilder implements OverriddenModuleBuilder {
private final ImmutableSet<Module> baseModules;
// TODO(diamondm) checkArgument(!baseModules.isEmpty())?
private RealOverriddenModuleBuilder(Iterable<? extends Module> baseModules) {
this.baseModules = ImmutableSet.copyOf(baseModules);
}
@Override
public Module with(Module... overrides) {
return with(Arrays.asList(overrides));
}
@Override
public Module with() {
return with(Arrays.asList());
}
@Override
public Module with(Iterable<? extends Module> overrides) {
return new OverrideModule(overrides, baseModules);
}
}
static class OverrideModule extends AbstractModule {
private final ImmutableSet<Module> overrides;
private final ImmutableSet<Module> baseModules;
// TODO(diamondm) checkArgument(!overrides.isEmpty())?
OverrideModule(Iterable<? extends Module> overrides, ImmutableSet<Module> baseModules) {
this.overrides = ImmutableSet.copyOf(overrides);
this.baseModules = baseModules;
}
@Override
public void configure() {
Binder baseBinder = binder();
List<Element> baseElements = Elements.getElements(currentStage(), baseModules);
// If the sole element was a PrivateElements, we want to override
// the private elements within that -- so refocus our elements
// and binder.
if (baseElements.size() == 1) {
Element element = Iterables.getOnlyElement(baseElements);
if (element instanceof PrivateElements) {
PrivateElements privateElements = (PrivateElements) element;
PrivateBinder privateBinder =
baseBinder.newPrivateBinder().withSource(privateElements.getSource());
for (Key<?> exposed : privateElements.getExposedKeys()) {
privateBinder.withSource(privateElements.getExposedSource(exposed)).expose(exposed);
}
baseBinder = privateBinder;
baseElements = privateElements.getElements();
}
}
final Binder binder = baseBinder.skipSources(this.getClass());
final ImmutableSet<Element> elements = ImmutableSet.copyOf(baseElements);
final Module scannersModule = extractScanners(elements);
final List<Element> overrideElements =
Elements.getElements(
currentStage(),
ImmutableList.<Module>builder().addAll(overrides).add(scannersModule).build());
final Set<Key<?>> overriddenKeys = Sets.newHashSet();
final Map<Class<? extends Annotation>, ScopeBinding> overridesScopeAnnotations =
Maps.newHashMap();
// execute the overrides module, keeping track of which keys and scopes are bound
new ModuleWriter(binder) {
@Override
public <T> Void visit(Binding<T> binding) {
overriddenKeys.add(binding.getKey());
return super.visit(binding);
}
@Override
public Void visit(ScopeBinding scopeBinding) {
overridesScopeAnnotations.put(scopeBinding.getAnnotationType(), scopeBinding);
return super.visit(scopeBinding);
}
@Override
public Void visit(PrivateElements privateElements) {
overriddenKeys.addAll(privateElements.getExposedKeys());
return super.visit(privateElements);
}
}.writeAll(overrideElements);
// execute the original module, skipping all scopes and overridden keys. We only skip each
// overridden binding once so things still blow up if the module binds the same thing
// multiple times.
final Map<Scope, List<Object>> scopeInstancesInUse = Maps.newHashMap();
final List<ScopeBinding> scopeBindings = Lists.newArrayList();
new ModuleWriter(binder) {
@Override
public <T> Void visit(Binding<T> binding) {
if (!overriddenKeys.remove(binding.getKey())) {
super.visit(binding);
// Record when a scope instance is used in a binding
Scope scope = getScopeInstanceOrNull(binding);
if (scope != null) {
scopeInstancesInUse
.computeIfAbsent(scope, k -> Lists.newArrayList())
.add(binding.getSource());
}
}
return null;
}
void rewrite(Binder binder, PrivateElements privateElements, Set<Key<?>> keysToSkip) {
PrivateBinder privateBinder =
binder.withSource(privateElements.getSource()).newPrivateBinder();
Set<Key<?>> skippedExposes = Sets.newHashSet();
for (Key<?> key : privateElements.getExposedKeys()) {
if (keysToSkip.remove(key)) {
skippedExposes.add(key);
} else {
privateBinder.withSource(privateElements.getExposedSource(key)).expose(key);
}
}
for (Element element : privateElements.getElements()) {
if (element instanceof Binding && skippedExposes.remove(((Binding) element).getKey())) {
continue;
}
if (element instanceof PrivateElements) {
rewrite(privateBinder, (PrivateElements) element, skippedExposes);
continue;
}
element.applyTo(privateBinder);
}
}
@Override
public Void visit(PrivateElements privateElements) {
rewrite(binder, privateElements, overriddenKeys);
return null;
}
@Override
public Void visit(ScopeBinding scopeBinding) {
scopeBindings.add(scopeBinding);
return null;
}
}.writeAll(elements);
// execute the scope bindings, skipping scopes that have been overridden. Any scope that
// is overridden and in active use will prompt an error
new ModuleWriter(binder) {
@Override
public Void visit(ScopeBinding scopeBinding) {
ScopeBinding overideBinding =
overridesScopeAnnotations.remove(scopeBinding.getAnnotationType());
if (overideBinding == null) {
super.visit(scopeBinding);
} else {
List<Object> usedSources = scopeInstancesInUse.get(scopeBinding.getScope());
if (usedSources != null) {
@SuppressWarnings("OrphanedFormatString") // passed to format method addError below
StringBuilder sb =
new StringBuilder(
"The scope for @%s is bound directly and cannot be overridden.");
sb.append("%n original binding at " + Errors.convert(scopeBinding.getSource()));
for (Object usedSource : usedSources) {
sb.append("%n bound directly at " + Errors.convert(usedSource) + "");
}
binder
.withSource(overideBinding.getSource())
.addError(sb.toString(), scopeBinding.getAnnotationType().getSimpleName());
}
}
return null;
}
}.writeAll(scopeBindings);
}
private Scope getScopeInstanceOrNull(Binding<?> binding) {
return binding.acceptScopingVisitor(
new DefaultBindingScopingVisitor<Scope>() {
@Override
public Scope visitScope(Scope scope) {
return scope;
}
});
}
}
private static class ModuleWriter extends DefaultElementVisitor<Void> {
protected final Binder binder;
ModuleWriter(Binder binder) {
this.binder = binder.skipSources(this.getClass());
}
@Override
protected Void visitOther(Element element) {
element.applyTo(binder);
return null;
}
void writeAll(Iterable<? extends Element> elements) {
for (Element element : elements) {
element.acceptVisitor(this);
}
}
}
private static Module extractScanners(Iterable<Element> elements) {
final List<ModuleAnnotatedMethodScannerBinding> scanners = Lists.newArrayList();
ElementVisitor<Void> visitor =
new DefaultElementVisitor<Void>() {
@Override
public Void visit(ModuleAnnotatedMethodScannerBinding binding) {
scanners.add(binding);
return null;
}
};
for (Element element : elements) {
element.acceptVisitor(visitor);
}
return new AbstractModule() {
@Override
protected void configure() {
for (ModuleAnnotatedMethodScannerBinding scanner : scanners) {
scanner.applyTo(binder());
}
}
};
}
/**
* Returns a module that will configure the injector to require explicit bindings.
*
* @since 4.2.3
*/
public static Module requireExplicitBindingsModule() {
return new RequireExplicitBindingsModule();
}
private static final class RequireExplicitBindingsModule implements Module {
@Override
public void configure(Binder binder) {
binder.requireExplicitBindings();
}
}
/**
* Returns a module that will configure the injector to require {@literal @}{@link Inject} on
* constructors.
*
* @since 4.2.3
* @see Binder#requireAtInjectOnConstructors
*/
public static Module requireAtInjectOnConstructorsModule() {
return new RequireAtInjectOnConstructorsModule();
}
private static final class RequireAtInjectOnConstructorsModule implements Module {
@Override
public void configure(Binder binder) {
binder.requireAtInjectOnConstructors();
}
}
/**
* Returns a module that will configure the injector to require an exactly matching binding
* annotation.
*
* @since 4.2.3
* @see Binder#requireExactBindingAnnotations
*/
public static Module requireExactBindingAnnotationsModule() {
return new RequireExactBindingAnnotationsModule();
}
private static final class RequireExactBindingAnnotationsModule implements Module {
@Override
public void configure(Binder binder) {
binder.requireExactBindingAnnotations();
}
}
/**
* Returns a module that will configure the injector to disable circular proxies.
*
* @since 4.2.3
*/
public static Module disableCircularProxiesModule() {
return new DisableCircularProxiesModule();
}
private static final class DisableCircularProxiesModule implements Module {
@Override
public void configure(Binder binder) {
binder.disableCircularProxies();
}
}
}
| core/src/com/google/inject/util/Modules.java | /*
* Copyright (C) 2008 Google Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.google.inject.util;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.ImmutableSet;
import com.google.common.collect.Iterables;
import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
import com.google.common.collect.Sets;
import com.google.errorprone.annotations.CheckReturnValue;
import com.google.inject.AbstractModule;
import com.google.inject.Binder;
import com.google.inject.Binding;
import com.google.inject.Key;
import com.google.inject.Module;
import com.google.inject.PrivateBinder;
import com.google.inject.PrivateModule;
import com.google.inject.Scope;
import com.google.inject.internal.Errors;
import com.google.inject.spi.DefaultBindingScopingVisitor;
import com.google.inject.spi.DefaultElementVisitor;
import com.google.inject.spi.Element;
import com.google.inject.spi.ElementVisitor;
import com.google.inject.spi.Elements;
import com.google.inject.spi.ModuleAnnotatedMethodScannerBinding;
import com.google.inject.spi.PrivateElements;
import com.google.inject.spi.ScopeBinding;
import java.lang.annotation.Annotation;
import java.util.Arrays;
import java.util.List;
import java.util.Map;
import java.util.Set;
/**
* Static utility methods for creating and working with instances of {@link Module}.
*
* @author [email protected] (Jesse Wilson)
* @since 2.0
*/
public final class Modules {
private Modules() {}
public static final Module EMPTY_MODULE = new EmptyModule();
private static class EmptyModule implements Module {
@Override
public void configure(Binder binder) {}
}
/**
* Returns a builder that creates a module that overlays override modules over the given modules.
* If a key is bound in both sets of modules, only the binding from the override modules is kept.
* If a single {@link PrivateModule} is supplied or all elements are from a single {@link
* PrivateBinder}, then this will overwrite the private bindings. Otherwise, private bindings will
* not be overwritten unless they are exposed. This can be used to replace the bindings of a
* production module with test bindings:
*
* <pre>
* Module functionalTestModule
* = Modules.override(new ProductionModule()).with(new TestModule());
* </pre>
*
* <p>Prefer to write smaller modules that can be reused and tested without overrides.
*
* @param modules the modules whose bindings are open to be overridden
*/
@CheckReturnValue
public static OverriddenModuleBuilder override(Module... modules) {
return override(Arrays.asList(modules));
}
/** @deprecated there's no reason to use {@code Modules.override()} without any arguments. */
@Deprecated
public static OverriddenModuleBuilder override() {
return override(Arrays.asList());
}
/**
* Returns a builder that creates a module that overlays override modules over the given modules.
* If a key is bound in both sets of modules, only the binding from the override modules is kept.
* If a single {@link PrivateModule} is supplied or all elements are from a single {@link
* PrivateBinder}, then this will overwrite the private bindings. Otherwise, private bindings will
* not be overwritten unless they are exposed. This can be used to replace the bindings of a
* production module with test bindings:
*
* <pre>
* Module functionalTestModule
* = Modules.override(getProductionModules()).with(getTestModules());
* </pre>
*
* <p>Prefer to write smaller modules that can be reused and tested without overrides.
*
* @param modules the modules whose bindings are open to be overridden
*/
public static OverriddenModuleBuilder override(Iterable<? extends Module> modules) {
return new RealOverriddenModuleBuilder(modules);
}
/**
* Returns a new module that installs all of {@code modules}.
*
* <p>Although sometimes helpful, this method is rarely necessary. Most Guice APIs accept multiple
* arguments or (like {@code install()}) can be called repeatedly. Where possible, external APIs
* that require a single module should similarly be adapted to permit multiple modules.
*/
@CheckReturnValue
public static Module combine(Module... modules) {
return combine(ImmutableSet.copyOf(modules));
}
/** @deprecated there's no need to "combine" one module; just install it directly. */
@Deprecated
public static Module combine(Module module) {
return module;
}
/** @deprecated this method call is effectively a no-op, just remove it. */
@Deprecated
public static Module combine() {
return EMPTY_MODULE;
}
/**
* Returns a new module that installs all of {@code modules}.
*
* <p>Although sometimes helpful, this method is rarely necessary. Most Guice APIs accept multiple
* arguments or (like {@code install()}) can be called repeatedly. Where possible, external APIs
* that require a single module should similarly be adapted to permit multiple modules.
*/
public static Module combine(Iterable<? extends Module> modules) {
return new CombinedModule(modules);
}
private static class CombinedModule implements Module {
final Set<Module> modulesSet;
CombinedModule(Iterable<? extends Module> modules) {
this.modulesSet = ImmutableSet.copyOf(modules);
}
@Override
public void configure(Binder binder) {
binder = binder.skipSources(getClass());
for (Module module : modulesSet) {
binder.install(module);
}
}
}
/** See the EDSL example at {@link Modules#override(Module[]) override()}. */
public interface OverriddenModuleBuilder {
/** See the EDSL example at {@link Modules#override(Module[]) override()}. */
@CheckReturnValue
Module with(Module... overrides);
/** @deprecated there's no reason to use {@code .with()} without any arguments. */
@Deprecated
public Module with();
/** See the EDSL example at {@link Modules#override(Module[]) override()}. */
Module with(Iterable<? extends Module> overrides);
}
private static final class RealOverriddenModuleBuilder implements OverriddenModuleBuilder {
private final ImmutableSet<Module> baseModules;
// TODO(diamondm) checkArgument(!baseModules.isEmpty())?
private RealOverriddenModuleBuilder(Iterable<? extends Module> baseModules) {
this.baseModules = ImmutableSet.copyOf(baseModules);
}
@Override
public Module with(Module... overrides) {
return with(Arrays.asList(overrides));
}
@Override
public Module with() {
return with(Arrays.asList());
}
@Override
public Module with(Iterable<? extends Module> overrides) {
return new OverrideModule(overrides, baseModules);
}
}
static class OverrideModule extends AbstractModule {
private final ImmutableSet<Module> overrides;
private final ImmutableSet<Module> baseModules;
// TODO(diamondm) checkArgument(!overrides.isEmpty())?
OverrideModule(Iterable<? extends Module> overrides, ImmutableSet<Module> baseModules) {
this.overrides = ImmutableSet.copyOf(overrides);
this.baseModules = baseModules;
}
@Override
public void configure() {
Binder baseBinder = binder();
List<Element> baseElements = Elements.getElements(currentStage(), baseModules);
// If the sole element was a PrivateElements, we want to override
// the private elements within that -- so refocus our elements
// and binder.
if (baseElements.size() == 1) {
Element element = Iterables.getOnlyElement(baseElements);
if (element instanceof PrivateElements) {
PrivateElements privateElements = (PrivateElements) element;
PrivateBinder privateBinder =
baseBinder.newPrivateBinder().withSource(privateElements.getSource());
for (Key<?> exposed : privateElements.getExposedKeys()) {
privateBinder.withSource(privateElements.getExposedSource(exposed)).expose(exposed);
}
baseBinder = privateBinder;
baseElements = privateElements.getElements();
}
}
final Binder binder = baseBinder.skipSources(this.getClass());
final ImmutableSet<Element> elements = ImmutableSet.copyOf(baseElements);
final Module scannersModule = extractScanners(elements);
final List<Element> overrideElements =
Elements.getElements(
currentStage(),
ImmutableList.<Module>builder().addAll(overrides).add(scannersModule).build());
final Set<Key<?>> overriddenKeys = Sets.newHashSet();
final Map<Class<? extends Annotation>, ScopeBinding> overridesScopeAnnotations =
Maps.newHashMap();
// execute the overrides module, keeping track of which keys and scopes are bound
new ModuleWriter(binder) {
@Override
public <T> Void visit(Binding<T> binding) {
overriddenKeys.add(binding.getKey());
return super.visit(binding);
}
@Override
public Void visit(ScopeBinding scopeBinding) {
overridesScopeAnnotations.put(scopeBinding.getAnnotationType(), scopeBinding);
return super.visit(scopeBinding);
}
@Override
public Void visit(PrivateElements privateElements) {
overriddenKeys.addAll(privateElements.getExposedKeys());
return super.visit(privateElements);
}
}.writeAll(overrideElements);
// execute the original module, skipping all scopes and overridden keys. We only skip each
// overridden binding once so things still blow up if the module binds the same thing
// multiple times.
final Map<Scope, List<Object>> scopeInstancesInUse = Maps.newHashMap();
final List<ScopeBinding> scopeBindings = Lists.newArrayList();
new ModuleWriter(binder) {
@Override
public <T> Void visit(Binding<T> binding) {
if (!overriddenKeys.remove(binding.getKey())) {
super.visit(binding);
// Record when a scope instance is used in a binding
Scope scope = getScopeInstanceOrNull(binding);
if (scope != null) {
scopeInstancesInUse
.computeIfAbsent(scope, k -> Lists.newArrayList())
.add(binding.getSource());
}
}
return null;
}
void rewrite(Binder binder, PrivateElements privateElements, Set<Key<?>> keysToSkip) {
PrivateBinder privateBinder =
binder.withSource(privateElements.getSource()).newPrivateBinder();
Set<Key<?>> skippedExposes = Sets.newHashSet();
for (Key<?> key : privateElements.getExposedKeys()) {
if (keysToSkip.remove(key)) {
skippedExposes.add(key);
} else {
privateBinder.withSource(privateElements.getExposedSource(key)).expose(key);
}
}
for (Element element : privateElements.getElements()) {
if (element instanceof Binding && skippedExposes.remove(((Binding) element).getKey())) {
continue;
}
if (element instanceof PrivateElements) {
rewrite(privateBinder, (PrivateElements) element, skippedExposes);
continue;
}
element.applyTo(privateBinder);
}
}
@Override
public Void visit(PrivateElements privateElements) {
rewrite(binder, privateElements, overriddenKeys);
return null;
}
@Override
public Void visit(ScopeBinding scopeBinding) {
scopeBindings.add(scopeBinding);
return null;
}
}.writeAll(elements);
// execute the scope bindings, skipping scopes that have been overridden. Any scope that
// is overridden and in active use will prompt an error
new ModuleWriter(binder) {
@Override
public Void visit(ScopeBinding scopeBinding) {
ScopeBinding overideBinding =
overridesScopeAnnotations.remove(scopeBinding.getAnnotationType());
if (overideBinding == null) {
super.visit(scopeBinding);
} else {
List<Object> usedSources = scopeInstancesInUse.get(scopeBinding.getScope());
if (usedSources != null) {
@SuppressWarnings("OrphanedFormatString") // passed to format method addError below
StringBuilder sb =
new StringBuilder(
"The scope for @%s is bound directly and cannot be overridden.");
sb.append("%n original binding at " + Errors.convert(scopeBinding.getSource()));
for (Object usedSource : usedSources) {
sb.append("%n bound directly at " + Errors.convert(usedSource) + "");
}
binder
.withSource(overideBinding.getSource())
.addError(sb.toString(), scopeBinding.getAnnotationType().getSimpleName());
}
}
return null;
}
}.writeAll(scopeBindings);
}
private Scope getScopeInstanceOrNull(Binding<?> binding) {
return binding.acceptScopingVisitor(
new DefaultBindingScopingVisitor<Scope>() {
@Override
public Scope visitScope(Scope scope) {
return scope;
}
});
}
}
private static class ModuleWriter extends DefaultElementVisitor<Void> {
protected final Binder binder;
ModuleWriter(Binder binder) {
this.binder = binder.skipSources(this.getClass());
}
@Override
protected Void visitOther(Element element) {
element.applyTo(binder);
return null;
}
void writeAll(Iterable<? extends Element> elements) {
for (Element element : elements) {
element.acceptVisitor(this);
}
}
}
private static Module extractScanners(Iterable<Element> elements) {
final List<ModuleAnnotatedMethodScannerBinding> scanners = Lists.newArrayList();
ElementVisitor<Void> visitor =
new DefaultElementVisitor<Void>() {
@Override
public Void visit(ModuleAnnotatedMethodScannerBinding binding) {
scanners.add(binding);
return null;
}
};
for (Element element : elements) {
element.acceptVisitor(visitor);
}
return new AbstractModule() {
@Override
protected void configure() {
for (ModuleAnnotatedMethodScannerBinding scanner : scanners) {
scanner.applyTo(binder());
}
}
};
}
/**
* Returns a module that will configure the injector to require explicit bindings.
*
* @since 4.2.3
*/
public static Module requireExplicitBindingsModule() {
return new RequireExplicitBindingsModule();
}
private static final class RequireExplicitBindingsModule implements Module {
@Override
public void configure(Binder binder) {
binder.requireExplicitBindings();
}
}
/**
* Returns a module that will configure the injector to require {@literal @}{@link Inject} on
* constructors.
*
* @since 4.2.3
* @see Binder#requireAtInjectOnConstructors
*/
public static Module requireAtInjectOnConstructorsModule() {
return new RequireAtInjectOnConstructorsModule();
}
private static final class RequireAtInjectOnConstructorsModule implements Module {
@Override
public void configure(Binder binder) {
binder.requireAtInjectOnConstructors();
}
}
/**
* Returns a module that will configure the injector to require an exactly matching binding
* annotation.
*
* @since 4.2.3
* @see Binder#requireExactBindingAnnotations
*/
public static Module requireExactBindingAnnotationsModule() {
return new RequireExactBindingAnnotationsModule();
}
private static final class RequireExactBindingAnnotationsModule implements Module {
@Override
public void configure(Binder binder) {
binder.requireExactBindingAnnotations();
}
}
/**
* Returns a module that will configure the injector to disable circular proxies.
*
* @since 4.2.3
*/
public static Module disableCircularProxiesModule() {
return new DisableCircularProxiesModule();
}
private static final class DisableCircularProxiesModule implements Module {
@Override
public void configure(Binder binder) {
binder.disableCircularProxies();
}
}
}
| Apply `@CheckReturnValue` to the rest of `com.google.inject.util.Modules.OverriddenModuleBuilder`.
PiperOrigin-RevId: 429668967
| core/src/com/google/inject/util/Modules.java | Apply `@CheckReturnValue` to the rest of `com.google.inject.util.Modules.OverriddenModuleBuilder`. | <ide><path>ore/src/com/google/inject/util/Modules.java
<ide> * @author [email protected] (Jesse Wilson)
<ide> * @since 2.0
<ide> */
<add>@CheckReturnValue
<ide> public final class Modules {
<ide> private Modules() {}
<ide>
<ide> public interface OverriddenModuleBuilder {
<ide>
<ide> /** See the EDSL example at {@link Modules#override(Module[]) override()}. */
<del> @CheckReturnValue
<ide> Module with(Module... overrides);
<ide>
<ide> /** @deprecated there's no reason to use {@code .with()} without any arguments. */ |
|
Java | cc0-1.0 | 064df06df16101070f7dd93c493d47a6417b9478 | 0 | iGoodie/iGoodiesFarm | package igoodie.igoodiesfarm.worldgen;
import igoodie.igoodiesfarm.blocks.GoodiesFarmBlocks;
import java.util.Random;
import net.minecraft.block.Block;
import net.minecraft.block.BlockDirt;
import net.minecraft.block.BlockGrass;
import net.minecraft.init.Blocks;
import net.minecraft.world.World;
import net.minecraft.world.biome.BiomeGenBase;
import net.minecraft.world.chunk.IChunkProvider;
import net.minecraft.world.gen.feature.WorldGenFlowers;
import net.minecraft.world.gen.feature.WorldGenMelon;
import net.minecraft.world.gen.feature.WorldGenMinable;
import net.minecraft.world.gen.feature.WorldGenTrees;
import net.minecraft.world.gen.feature.WorldGenerator;
import net.minecraftforge.common.util.ForgeDirection;
import cpw.mods.fml.common.IWorldGenerator;
public class GenBushes implements IWorldGenerator
{
@Override
public void generate(Random random, int chunkX, int chunkZ, World world, IChunkProvider chunkGenerator, IChunkProvider chunkProvider)
{
int perChunk = 9;
if(chunkX%perChunk==0 && chunkZ%perChunk==0 && world.provider.dimensionId==0)
{
int chunkSize = perChunk*16;
int node = random.nextInt(2)+1;
int x = chunkX*16 + random.nextInt(chunkSize);
int z = chunkZ*16 + random.nextInt(chunkSize);
Block[] bushes = {GoodiesFarmBlocks.blockBlackberryBush, GoodiesFarmBlocks.blockBlueberryBush, GoodiesFarmBlocks.blockRaspberryBush};
Block bush = bushes[random.nextInt(3)];
switch(node)
{
case 1:
int[][] coords1 = {{x,z}, {x+1,z}, {x+2,z}, {x+1,z-1}, {x+1,z+1}};
for(int i=0; i<coords1.length; i++)
{
if(findGround(world, coords1[i][0], coords1[i][1]) != -1)
{
int metaRand = random.nextInt(7);
world.setBlock(coords1[i][0], findGround(world, coords1[i][0], coords1[i][1]), coords1[i][1], bush);
world.setBlockMetadataWithNotify(coords1[i][0], findGround(world, coords1[i][0], coords1[i][1]), coords1[i][1], metaRand, 2);
}
}
break;
case 2:
int[][] coords2 = {{x,z}, {x+1,z-1}, {x+2,z-1}, {x+1,z}, {x+2,z}, {x+3,z}, {x,z+1}, {x+1,z+1}, {x+2,z+1}, {x+3,z+1}, {x+1,z+2}, {x+2,z+2}};
for(int i=0; i<coords2.length; i++)
{
if(findGround(world, coords2[i][0], coords2[i][1]) != -1)
{
int metaRand = random.nextInt(7);
world.setBlock(coords2[i][0], findGround(world, coords2[i][0], coords2[i][1]), coords2[i][1], bush);
world.setBlockMetadataWithNotify(coords2[i][0], findGround(world, coords2[i][0], coords2[i][1]), coords2[i][1], metaRand, 2);
}
}
break;
}
}
}
private int findGround(World world, int x, int z)
{
int returnY = -1;
for(int i=0; i<=world.getActualHeight(); i++)
{
if(world.getBlock(x, i, z) == Blocks.grass)
{
returnY = i+1;
break;
}
}
return returnY;
}
}
| src/main/java/igoodie/igoodiesfarm/worldgen/GenBushes.java | package igoodie.igoodiesfarm.worldgen;
import igoodie.igoodiesfarm.blocks.GoodiesFarmBlocks;
import java.util.Random;
import net.minecraft.block.Block;
import net.minecraft.block.BlockDirt;
import net.minecraft.block.BlockGrass;
import net.minecraft.init.Blocks;
import net.minecraft.world.World;
import net.minecraft.world.biome.BiomeGenBase;
import net.minecraft.world.chunk.IChunkProvider;
import net.minecraft.world.gen.feature.WorldGenFlowers;
import net.minecraft.world.gen.feature.WorldGenMelon;
import net.minecraft.world.gen.feature.WorldGenMinable;
import net.minecraft.world.gen.feature.WorldGenTrees;
import net.minecraft.world.gen.feature.WorldGenerator;
import net.minecraftforge.common.util.ForgeDirection;
import cpw.mods.fml.common.IWorldGenerator;
public class GenBushes implements IWorldGenerator
{
@Override
public void generate(Random random, int chunkX, int chunkZ, World world, IChunkProvider chunkGenerator, IChunkProvider chunkProvider)
{
int perChunk = 9;
if(chunkX%perChunk==0 && chunkZ%perChunk==0 && world.provider.dimensionId==0)
{
int chunkSize = perChunk*16;
int node = random.nextInt(2)+1;
int x = chunkX*16 + random.nextInt(chunkSize);
int z = chunkZ*16 + random.nextInt(chunkSize);
Block[] bushes = {GoodiesFarmBlocks.blockBlackberryBush, GoodiesFarmBlocks.blockBlueberryBush, GoodiesFarmBlocks.blockRaspberryBush};
switch(node)
{
case 1:
int[][] coords1 = {{x,z}, {x+1,z}, {x+2,z}, {x+1,z-1}, {x+1,z+1}};
for(int i=0; i<coords1.length; i++)
{
if(findGround(world, coords1[i][0], coords1[i][1]) != -1)
{
int metaRand = random.nextInt(7);
world.setBlock(coords1[i][0], findGround(world, coords1[i][0], coords1[i][1]), coords1[i][1], bushes[random.nextInt(3)]);
world.setBlockMetadataWithNotify(coords1[i][0], findGround(world, coords1[i][0], coords1[i][1]), coords1[i][1], metaRand, 2);
}
}
break;
case 2:
int[][] coords2 = {{x,z}, {x+1,z-1}, {x+2,z-1}, {x+1,z}, {x+2,z}, {x+3,z}, {x,z+1}, {x+1,z+1}, {x+2,z+1}, {x+3,z+1}, {x+1,z+2}, {x+2,z+2}};
for(int i=0; i<coords2.length; i++)
{
if(findGround(world, coords2[i][0], coords2[i][1]) != -1)
{
int metaRand = random.nextInt(7);
world.setBlock(coords2[i][0], findGround(world, coords2[i][0], coords2[i][1]), coords2[i][1], bushes[random.nextInt(3)]);
world.setBlockMetadataWithNotify(coords2[i][0], findGround(world, coords2[i][0], coords2[i][1]), coords2[i][1], metaRand, 2);
}
}
}
}
}
private int findGround(World world, int x, int z)
{
int returnY = -1;
for(int i=0; i<=world.getActualHeight(); i++)
{
if(world.getBlock(x, i, z) == Blocks.grass)
{
returnY = i+1;
}
}
return returnY;
}
}
| Added forgotten code lines.
| src/main/java/igoodie/igoodiesfarm/worldgen/GenBushes.java | Added forgotten code lines. | <ide><path>rc/main/java/igoodie/igoodiesfarm/worldgen/GenBushes.java
<ide> int x = chunkX*16 + random.nextInt(chunkSize);
<ide> int z = chunkZ*16 + random.nextInt(chunkSize);
<ide> Block[] bushes = {GoodiesFarmBlocks.blockBlackberryBush, GoodiesFarmBlocks.blockBlueberryBush, GoodiesFarmBlocks.blockRaspberryBush};
<add> Block bush = bushes[random.nextInt(3)];
<ide>
<ide> switch(node)
<ide> {
<ide> if(findGround(world, coords1[i][0], coords1[i][1]) != -1)
<ide> {
<ide> int metaRand = random.nextInt(7);
<del> world.setBlock(coords1[i][0], findGround(world, coords1[i][0], coords1[i][1]), coords1[i][1], bushes[random.nextInt(3)]);
<add> world.setBlock(coords1[i][0], findGround(world, coords1[i][0], coords1[i][1]), coords1[i][1], bush);
<ide> world.setBlockMetadataWithNotify(coords1[i][0], findGround(world, coords1[i][0], coords1[i][1]), coords1[i][1], metaRand, 2);
<ide> }
<ide> }
<ide> if(findGround(world, coords2[i][0], coords2[i][1]) != -1)
<ide> {
<ide> int metaRand = random.nextInt(7);
<del> world.setBlock(coords2[i][0], findGround(world, coords2[i][0], coords2[i][1]), coords2[i][1], bushes[random.nextInt(3)]);
<add> world.setBlock(coords2[i][0], findGround(world, coords2[i][0], coords2[i][1]), coords2[i][1], bush);
<ide> world.setBlockMetadataWithNotify(coords2[i][0], findGround(world, coords2[i][0], coords2[i][1]), coords2[i][1], metaRand, 2);
<ide> }
<ide> }
<add> break;
<ide> }
<ide>
<ide> }
<ide> if(world.getBlock(x, i, z) == Blocks.grass)
<ide> {
<ide> returnY = i+1;
<add> break;
<ide> }
<ide> }
<ide> return returnY; |
|
JavaScript | agpl-3.0 | 4de48b6c23ecf7430bcb10b0ce91c3dd371cbe58 | 0 | Qbix/Platform,Qbix/Platform,Qbix/Platform,Qbix/Platform | (function (window, Q, $, undefined) {
/**
* @module Assets
*/
var Streams = Q.Streams;
var Assets = Q.Assets;
var Web3 = Assets.NFT.Web3;
var NFT = Assets.NFT;
/**
* YUIDoc description goes here
* @class Assets NFT/preview
* @constructor
* @param {Object} [options] Override various options for this tool
* @param {boolean} [poster] URL of poster image for movie (If movie provided)
* @param {boolean} [movie] Movie URL. If no image defined during NFT creation, this movie will be used instead.
* On NFT/view the movie will display instead image (event if image defined).
* @param {boolean} [src] URL of additional image which will use instead default image.
* @param {string} [options.fallback] Error message need to display in tool as content.
* @param {Q.Event} [options.onInvoke] Event occur when user click on tool element.
* @param {Q.Event} [options.onAvatar] Event occur when click on Users/avatar tool inside tool element.
* @param {Q.Event} [options.onClaim] Event occur when user click on "Claim" button
* @param {Q.Event} [options.onCreated] Event occur when NFT created.
* @param {Q.Event} [options.onRender] Event occur after tool content rendered.
*/
Q.Tool.define("Assets/NFT/preview", function(options) {
var tool = this;
var state = tool.state;
var $toolElement = $(this.element);
tool.preview = Q.Tool.from(this.element, "Streams/preview");
var previewState = Q.getObject("preview.state", tool) || {};
var loggedInUserId = Q.Users.loggedInUserId();
var tokenId = Q.getObject("tokenId", state);
var chainId = Q.getObject("chainId", state);
var contractAddress = Q.getObject("contractAddress", state);
// is admin
var roles = Object.keys(Q.getObject("roles", Q.Users) || {});
tool.isAdmin = (roles.includes('Users/owners') || roles.includes('Users/admins'));
$toolElement.attr("data-admin", tool.isAdmin);
tool.isPublisher = (loggedInUserId && loggedInUserId === previewState.publisherId);
$toolElement.attr("data-publisher", tool.isPublisher);
// is claim
state.secondsLeft = parseInt(state.secondsLeft);
if (state.secondsLeft > 0) {
$toolElement.attr("data-claim", false);
} else if (state.secondsLeft <= 0) {
$toolElement.attr("data-claim", true);
}
if (!Q.isEmpty(previewState)) {
// <set Streams/preview imagepicker settings>
previewState.imagepicker.showSize = state.imagepicker.showSize;
previewState.imagepicker.fullSize = state.imagepicker.fullSize;
previewState.imagepicker.save = state.imagepicker.save;
previewState.imagepicker.useAnySize = true;
previewState.imagepicker.sendOriginal = true;
previewState.imagepicker.saveSizeName = {};
Q.each(NFT.icon.sizes, function (i, size) {
previewState.imagepicker.saveSizeName[size] = size;
});
// </set Streams/preview imagepicker settings>
}
var pipe = Q.pipe(["stylesheet", "text"], function (params, subjects) {
// get all data from blockchain and refresh
if (state.metadata) {
if (typeof state.metadata !== "object") {
//throw new Error("metadata is not a valid object");
state.fallback = "metadata is not a valid object";
}
tool.refresh();
} else if (state.tokenURI) {
if (!state.tokenURI.matchTypes('url').length) {
//throw new Error("tokenURI is not a valid URL");
state.fallback = "tokenURI is not a valid URL";
}
tool.refresh();
} else if (tokenId) {
if (!chainId) {
//throw new Error("chain id required");
state.fallback = "chain id required";
}
if (!contractAddress) {
//throw new Error("contract address required");
state.fallback = "contract address required";
}
tool.refresh();
} else if (!Q.isEmpty(previewState) && previewState.streamName) {
$toolElement.attr("data-publisherId", previewState.publisherId);
$toolElement.attr("data-streamName", previewState.streamName);
previewState.onRefresh.add(tool.refresh.bind(tool), tool);
} else if (!Q.isEmpty(previewState)) {
previewState.onComposer.add(tool.composer.bind(tool), tool);
}
});
Q.addStylesheet("{{Assets}}/css/tools/NFT/preview.css", pipe.fill('stylesheet'), { slotName: 'Assets' });
Q.Text.get('Assets/content', function(err, text) {
tool.text = text;
pipe.fill('text')();
}, {
ignoreCache: true
});
},
{ // default options here
useWeb3: true,
metadata: null,
tokenId: null,
tokenURI: null,
chainId: null,
contractAddress: null,
owner: null,
ownerUserId: null,
imagepicker: {
showSize: NFT.icon.defaultSize,
save: "NFT/icon"
},
show: {
avatar: true,
title: true,
description: false,
participants: false,
bidInfo: true
},
templates: {
view: {
name: 'Assets/NFT/view',
fields: {}
}
},
movie: null,
imageSrc: null,
secondsLeft: null,
fallback: null,
onClaim: new Q.Event(),
onInvoke: new Q.Event(),
onAvatar: new Q.Event(),
onCreated: new Q.Event(),
onRender: new Q.Event()
},
{
/**
* Get all data from blockchain and refresh
* @method init
* @param {Streams_Stream} stream
*/
refresh: function (stream) {
if (Streams.isStream(stream)) {
return this.renderFromStream(stream);
}
var tool = this;
var state = this.state;
var $toolElement = $(this.element);
var tokenId = Q.getObject("tokenId", state);
var chainId = Q.getObject("chainId", state);
var contractAddress = Q.getObject("contractAddress", state);
var tokenURI = state.tokenURI;
var metadata = state.metadata;
if (state.fallback) {
return tool.renderFallBack();
}
$toolElement.append('<img src="' + Q.url("{{Q}}/img/throbbers/loading.gif") + '">');
if (metadata) {
return tool.renderFromMetadata({metadata: metadata});
} else if (tokenURI) {
Q.req("Assets/NFT", "fetchMetadata", function (err, response) {
if (err) {
return;
}
var metadata = response.slots.fetchMetadata;
tool.renderFromMetadata({metadata: metadata});
}, {
fields: {
tokenURI: tokenURI
}
});
return;
}
var pipeList = ["metadata", "author", "owner", "commissionInfo", "saleInfo", "authorUserId", "ownerUserId"];
var pipe = new Q.pipe(pipeList, function (params, subjects) {
// collect errors
var errors = [];
Q.each(pipeList, function (index, value) {
var err = Q.getObject([value, 0], params);
err && errors.push(err);
});
if (!Q.isEmpty(errors)) {
return console.warn(errors);
}
tool.renderFromMetadata({
metadata: params.metadata[1],
authorAddress: params.author[1],
ownerAddress: params.owner[1],
commissionInfo: params.commissionInfo[1],
saleInfo: params.saleInfo[1],
authorUserId: params.authorUserId[1] || '',
ownerUserId: params.ownerUserId[1] || ''
});
$toolElement.removeClass("Q_working");
//Users.Web3.onAccountsChanged.set(tool.refresh.bind(tool), tool);
});
if (state.useWeb3) {
Q.handle(Assets.batchFunction(), null, ["NFT", "getInfo", tokenId, chainId, contractAddress, state.updateCache, function (err, metadata) {
state.updateCache = false;
var msg = Q.firstErrorMessage(err, metadata);
if (msg) {
return console.error(msg);
}
var currencyToken = Q.getObject(["saleInfo", 0], this);
var price = Q.getObject(["saleInfo", 1], this);
var priceDecimal = price ? parseInt(price)/1e18 : null;
var isSale = Q.getObject(["saleInfo", 2], this);
pipe.fill("authorUserId")(null, this.authorUserId || "");
pipe.fill("ownerUserId")(null, this.ownerUserId || "");
pipe.fill("metadata")(null, this.metadata || "");
pipe.fill("author")(null, this.author || "");
pipe.fill("owner")(null, this.owner || "");
pipe.fill("commissionInfo")(null, this.commissionInfo || "");
pipe.fill("saleInfo")(null, {
isSale: isSale,
price: price,
priceDecimal: priceDecimal,
currencyToken: currencyToken
});
}]);
// get smart contract just to set contract events to update preview
//Web3.getContract(state.chain);
} else {
if (state.chainId !== Q.getObject("ethereum.chainId", window)) {
return console.warn("Chain id selected is not appropriate to NFT chain id " + state.chainId);
}
// if metadata defined, don't request it
if (state.metadata) {
pipe.fill("metadata")(null, state.metadata);
} else {
Q.handle(Web3.getTokenJSON, tool, [state.tokenId, state.chain, pipe.fill("metadata")]);
}
Q.handle(Web3.getAuthor, tool, [state.tokenId, state.chain, function (err, author) {
if (err) {
return console.warn(err);
}
pipe.fill("author")(arguments[0], arguments[1], arguments[2]);
Q.req("Assets/NFT", "getUserIdByWallet", function (err, response) {
if (err) {
return console.warn(err);
}
pipe.fill("authorUserId")(null, response.slots.getUserIdByWallet);
}, {
fields: { wallet: author }
});
}]);
Q.handle(Web3.getOwner, tool, [state.tokenId, state.chain, function (err, owner) {
if (err) {
return console.warn(err);
}
pipe.fill("owner")(arguments[0], arguments[1], arguments[2]);
Q.req("Assets/NFT", "getUserIdByWallet", function (err, response) {
if (err) {
return console.warn(err);
}
pipe.fill("ownerUserId")(null, response.slots.getUserIdByWallet);
}, {
fields: { wallet: owner }
});
}]);
Q.handle(Web3.commissionInfo, tool, [state.tokenId, state.chain, pipe.fill("commissionInfo")]);
Q.handle(Web3.saleInfo, tool, [state.tokenId, state.chain, pipe.fill("saleInfo")]);
}
},
/**
* Render NFT image
* @method renderImage
* @param {jQuery|Element} $container - image container element
* @param {String} imageUrl
*/
renderImage: function ($container, imageUrl) {
if ($container instanceof Element) {
$container = $($container);
}
$container.empty().html('<img alt="icon" class="NFT_preview_icon" src="' + Q.url(imageUrl) + '">');
},
/**
* Render NFT video
* @method renderVideo element
* @param {jQuery|Element} $container - video container element
* @param {String} videoUrl
* @param {String} imageUrl - image that would be a poster of video
*/
renderVideo: function ($container, videoUrl, imageUrl) {
if ($container instanceof Element) {
$container = $($container);
}
var $qVideo = $("<div>").on(Q.Pointer.fastclick, function (e) {
e.preventDefault();
e.stopPropagation();
return false;
});
$container.empty().append($qVideo);
var videoOptions = Q.extend({}, {
url: videoUrl,
image: imageUrl && !imageUrl.includes("/img/empty_white.png") ? imageUrl : ""
});
$qVideo.tool("Q/video", videoOptions).activate();
},
/**
* Render NFT audio element
* @method renderVideo
* @param {jQuery|Element} $container - video container element
* @param {String} audioUrl
*/
renderAudio: function ($container, audioUrl) {
var $qAudio = $("<div>").on(Q.Pointer.fastclick, function (e) {
e.preventDefault();
e.stopPropagation();
return false;
});
$container.empty().append($qAudio);
var audioOptions = Q.extend({}, {
url: audioUrl
});
$qAudio.tool("Q/audio", audioOptions).activate();
},
/**
* Refreshes the appearance of the tool completely
* @method refresh
* @param {Streams_Stream} stream - NFT stream
*/
renderFromStream: function (stream) {
var tool = this;
var state = tool.state;
var $toolElement = $(this.element);
tool.stream = stream;
var publisherId = stream.fields.publisherId;
var streamName = stream.fields.name;
tool.minted = stream.getAttribute("tokenId");
$toolElement.attr("data-minted", !!tool.minted);
var templateName = state.templates.view.name;
var templateFields = Q.extend({
show: state.show
}, state.templates.view.fields);
Q.Template.render(templateName, templateFields, function (err, html) {
tool.element.innerHTML = html;
$toolElement.activate();
$(".Assets_NFT_author", tool.element).tool("Users/avatar", {
userId: publisherId,
icon: 50,
contents: true,
editable: false
}).activate(function () {
$(this.element).on(Q.Pointer.fastclick, function (e) {
Q.handle(state.onAvatar, this, [e]);
});
});
$(".Assets_NFT_participants", tool.element).tool("Streams/participants", {
showSummary: false,
showControls: true,
showBlanks: true,
publisherId: publisherId,
streamName: streamName
}).activate();
$(".Assets_NFT_title", tool.element).tool("Streams/inplace", {
editable: false,
field: "title",
inplaceType: "text",
publisherId: publisherId,
streamName: streamName
}, "nft_preview_title_" + tool.stream.fields.name.split("/").pop()).activate();
$(".Assets_NFT_description", tool.element).tool("Streams/inplace", {
editable: false,
field: "content",
inplaceType: "text",
publisherId: publisherId,
streamName: streamName
}, "nft_preview_description_" + tool.stream.fields.name.split("/").pop()).activate();
// apply Streams/preview icon behavior
var videoUrl = state.video || stream.getAttribute("videoUrl");
var audioUrl = state.audio || stream.getAttribute("audioUrl");
var videoProvider = stream.getAttribute("videoProvider");
var videoId = stream.getAttribute("videoId");
var imageUrl = state.image || stream.iconUrl(state.imagepicker.showSize);
var $container = $(".video-container", tool.element);
var $previewIcon = $("img.NFT_preview_icon", tool.element);
if (videoUrl) {
tool.renderVideo($container, videoUrl, imageUrl);
} else if (audioUrl) {
tool.renderAudio($container, audioUrl);
} else if (imageUrl) {
tool.renderImage($container, imageUrl);
} else if (videoId) {
videoUrl = Q.getObject(["video", "cloudUpload", videoProvider, "url"], Q).interpolate({videoId: videoId});
tool.renderVideo($container, videoUrl);
} else {
var overrides = NFT.icon.defaultSize ? {
"overrideShowSize": {
'': (stream.fields.defaultSize || state.defaultSize || NFT.icon.defaultSize)
}
} : {};
tool.preview.icon($previewIcon[0], null, overrides);
}
// set onInvoke event
$toolElement.off(Q.Pointer.fastclick).on(Q.Pointer.fastclick, function () {
Q.handle(state.onInvoke, tool, [publisherId, streamName]);
});
// set onMessage Streams/changed to change image or video or audio
stream.onMessage("Streams/changed").set(function (updatedStream, message) {
tool.renderFromStream(updatedStream);
}, [tool.id, Q.normalize(publisherId), Q.normalize(streamName.split("/").pop())].join("_"));
Q.handle(state.onRender, tool);
});
},
/**
* Render preview from metadata object
* @method renderFallBack
*/
renderFallBack: function () {
var tool = this;
var state = tool.state;
var $toolElement = $(this.element);
var templateName = state.templates.view.name;
var templateFields = Q.extend({
show: {
avatar: true,
title: false,
description: false,
participants: false,
bidInfo: false
}
}, state.templates.view.fields);
Q.Template.render(templateName, templateFields, (err, html) => {
tool.element.innerHTML = html;
$(".Assets_NFT_author", tool.element).addClass("Q_error").html(state.fallback);
$(".video-container", tool.element).addClass("fallback").html(JSON.stringify({
tokenURI: state.tokenURI,
tokenId: state.tokenId,
metadata: state.metadata,
owner: state.owner,
ownerUserId: state.ownerUserId,
secondsLeft: state.secondsLeft
}));
// set onInvoke event
$toolElement.off(Q.Pointer.fastclick);
Q.handle(state.onRender, tool);
});
},
/**
* Render preview from metadata object
* @method renderFromMetadata
* @param {Object} params
* @param {object} params.metadata
* @param {String} [params.authorAddress]
* @param {String} [params.ownerAddress]
* @param {object} [params.commissionInfo]
* @param {object} [params.saleInfo]
* @param {string} [params.authorUserId] - id of NFT author user
* @param {string} [params.ownerUserId] - id of NFT owner user
*/
renderFromMetadata: function (params) {
var tool = this;
var state = tool.state;
var $toolElement = $(this.element);
var metadata = Q.getObject("metadata", params);
var authorAddress = Q.getObject("authorAddress", params);
var ownerAddress = Q.getObject("ownerAddress", params);
var commissionInfo = Q.getObject("commissionInfo", params);
var saleInfo = Q.getObject("saleInfo", params);
var authorUserId = Q.getObject("authorUserId", params);
var ownerUserId = Q.getObject("ownerUserId", params);
tool.minted = true;
$toolElement.attr("data-minted", tool.minted);
var templateName = state.templates.view.name;
var templateFields = Q.extend({
title: metadata.name,
description: metadata.description,
show: state.show
}, state.templates.view.fields);
Q.Template.render(templateName, templateFields, (err, html) => {
tool.element.innerHTML = html;
$toolElement.activate();
var $Assets_NFT_author = $(".Assets_NFT_author", tool.element);
if ($Assets_NFT_author.length && authorUserId) {
$Assets_NFT_author.tool("Users/avatar", {
userId: authorUserId,
icon: 50,
contents: true,
editable: false
}).activate(function () {
$(this.element).on(Q.Pointer.fastclick, function (e) {
Q.handle(state.onAvatar, this, [e]);
});
});
} else if ($Assets_NFT_author.length) {
Q.Template.render("Assets/NFT/avatar", {
size: 50,
address: Web3.minimizeAddress(authorAddress, 20, 3)
}, (err, html) => {
$Assets_NFT_author.html(html);
});
}
var $Assets_NFT_owner = $(".Assets_NFT_owner", tool.element);
if ($Assets_NFT_owner.length && ownerUserId) {
$Assets_NFT_owner.tool("Users/avatar", {
userId: ownerUserId,
icon: 80,
contents: true,
editable: false
}).activate(function () {
$(this.element).on(Q.Pointer.fastclick, function (e) {
Q.handle(state.onAvatar, this, [e]);
});
});
} else if ($Assets_NFT_owner.length) {
Q.Template.render("Assets/NFT/avatar", {
size: 80,
address: Web3.minimizeAddress(ownerAddress, 20, 3)
}, (err, html) => {
$Assets_NFT_owner.html(html);
});
}
var videoUrl = state.video || metadata.video || metadata.youtube_url;
var audioUrl = state.audio;
var imageUrl = state.image || metadata.image || null;
if (!imageUrl && metadata.image_data) {
imageUrl = 'data:image/svg+xml;utf8,' + imageUrl;
}
var $container = $(".video-container", tool.element);
if (metadata.animation_url) {
$.ajax({
type: "HEAD",
url: metadata.animation_url,
}).done(function(message, text, jqXHR){
var contentType = jqXHR.getResponseHeader('Content-Type');
if (contentType.includes("video")) {
tool.renderVideo($container, metadata.animation_url, imageUrl);
} else if (contentType.includes("audio")) {
tool.renderAudio($container, metadata.animation_url, imageUrl);
}
});
} else if (videoUrl) {
tool.renderVideo($container, videoUrl, imageUrl);
} else if (audioUrl) {
tool.renderAudio($container, audioUrl);
} else if (imageUrl) {
tool.renderImage($container, imageUrl);
}
if (state.secondsLeft > 0) {
$(".Assets_NFT_timeout_tool", tool.element).tool("Q/timestamp", {
time: Date.now()/1000 + state.secondsLeft,
beforeRefresh: function (result, diff) {
if (diff <= 0) {
$toolElement.attr("data-claim", true);
}
}
}).activate();
}
$("button[name=claim]", tool.element).on(Q.Pointer.fastclick, function () {
Q.handle(state.onClaim, tool);
return false;
});
// set onInvoke event
$toolElement.off(Q.Pointer.fastclick).on(Q.Pointer.fastclick, function () {
Q.handle(state.onInvoke, tool, [metadata, authorAddress, ownerAddress, commissionInfo, saleInfo, authorUserId]);
});
var $assetsNFTlocked = $(".Assets_NFT_locked", tool.element);
var holderContractAddress = Q.getObject("holder.contractAddress", state);
var holderPathABI = Q.getObject("holder.pathABI", state);
/*if (holderContractAddress.length) {
$assetsNFTlocked.tool("Assets/NFT/locked", {
tokenId: state.tokenId,
seriesIdSource: {
salesAddress: holderContractAddress,
},
NFTAddress: NFT.Web3.chains[state.chainId],
//abiNFT: TokenSociety.NFT.abiNFT
}).activate();
} else {
$assetsNFTlocked.remove();
}*/
Q.handle(state.onRender, tool);
});
},
/**
* Create NFT
* @method composer
*/
composer: function () {
var tool = this;
var $toolElement = $(this.element);
var previewState = tool.preview.state;
previewState.editable = true; // we need to upload icon
Q.Template.render('Assets/NFT/composer', {}, function(err, html) {
tool.element.innerHTML = html;
// get or create composer stream
Q.req("Assets/NFT", "newItem", function (err, response) {
if (err) {
return;
}
var newItem = response.slots.newItem;
previewState.publisherId = newItem.publisherId;
previewState.streamName = newItem.streamName;
// this need for Streams/related tool to avoid appear composer twice
Q.setObject("options.streams_preview.publisherId", newItem.publisherId, tool.element);
Q.setObject("options.streams_preview.streamName", newItem.streamName, tool.element);
Streams.get(previewState.publisherId, previewState.streamName, function (err) {
if (err) {
return;
}
tool.stream = this;
$toolElement.off(Q.Pointer.fastclick).on(Q.Pointer.fastclick, tool.update.bind(tool));
});
}, {
fields: {
publisherId: previewState.publisherId,
category: previewState.related
}
});
});
},
/**
* Update NFT
* @method update
*/
update: function () {
var tool = this;
var $toolElement = $(this.element);
var state = this.state;
var isNew = $toolElement.hasClass("Streams_preview_composer");
var previewState = this.preview.state;
var publisherId = previewState.publisherId;
var streamName = previewState.streamName;
// need to update tool.stream
// actually on this stage stream should be cached, so Streams.get is just reading stream from cache, hence it can be used as synchronous
Streams.get(publisherId, streamName, function () {
tool.stream = this;
});
Q.Dialogs.push({
title: isNew ? tool.text.NFT.CreateNFT : tool.text.NFT.UpdateNFT,
className: "Assets_NFT_preview_composer",
template: {
name: "Assets/NFT/nftCreate",
fields: {
minted: false,
title: Q.getObject("stream.fields.title", tool) || "",
content: Q.getObject("stream.fields.content", tool) || "",
saveButtonText: isNew ? tool.text.NFT.CreateYourNFT : tool.text.NFT.UpdateNFT
}
},
onActivate: function (dialog) {
var $icon = $("img.NFT_preview_icon", dialog);
var $imageContainer = $icon.closest(".Assets_nft_container");
// create new Streams/preview tool to set icon behavior to $icon element
$("<div>").tool("Streams/preview", Q.extend(previewState, {editable: true})).activate(function () {
this.icon($icon[0], function (element) {
var src = element.src;
if (src.includes("empty_white")) {
$imageContainer.plugin("Q/actions", "remove");
} else {
$imageContainer.plugin("Q/actions", {
actions: {
remove: function () {
Q.confirm(tool.text.NFT.AreYouSureDeleteImage, function(result) {
if (!result) {
return;
}
Q.req("Assets/NFT", ["image"], function (err) {
if (err) {
return;
}
Streams.get.force(publisherId, streamName, function (err) {
if (err) {
return;
}
tool.renderFromStream(this);
});
}, {
method: "delete",
fields: {
publisherId: publisherId,
streamName: streamName
}
});
});
}
}
});
}
});
});
// manage attributes
tool.manageAttributes($(".Assets_nft_attributes", dialog), tool.stream.getAttribute("Assets/NFT/attributes"));
$("button[name=addAttribute]", dialog).on(Q.Pointer.fastclick, function (event) {
event.preventDefault();
tool.manageAttributes($(".Assets_nft_attributes", dialog));
return false;
});
// upload image button
$(".Assets_nft_upload_button", dialog).on(Q.Pointer.fastclick, function (event) {
event.preventDefault();
$icon.trigger("click");
});
var videoTool;
var $videoContainer = $(".Assets_nft_movie", dialog).closest(".Assets_nft_container");
var _updateVideoTool = function (options) {
var videoOptions = Q.extend({}, state.video);
var videoId = tool.stream.getAttribute("videoId");
var videoProvider = tool.stream.getAttribute("videoProvider");
var videoUrl = tool.stream.getAttribute("videoUrl");
if (options) {
videoId = Q.getObject("videoId", options) || videoId;
videoProvider = Q.getObject("videoProvider", options) || videoProvider;
videoUrl = Q.getObject("videoUrl", options) || videoUrl;
}
if (videoUrl) {
videoOptions.url = Q.url(videoUrl);
} else if (videoId && videoProvider) {
videoOptions.url = Q.getObject(["video", "cloudUpload", videoProvider, "url"], Q).interpolate({videoId: videoId})
}
var $element = $(".Assets_nft_movie", dialog);
if (Q.Tool.from($element, "Q/video")) {
var $newElement = $("<div class='Assets_nft_movie'></div>").insertAfter($element);
Q.Tool.remove($element, true, true);
$element = $newElement;
}
$videoContainer.plugin("Q/actions", "remove");
if (!videoOptions.url) {
return $videoContainer.removeClass("NFT_preview_loading");
}
$element.tool("Q/video", videoOptions).activate(function () {
videoTool = this;
$videoContainer.plugin("Q/actions", {
actions: {
remove: function () {
Q.confirm(tool.text.NFT.AreYouSureDeleteVideo, function(result) {
if (!result) {
return;
}
Q.req("Assets/NFT", ["video"], function (err) {
if (err) {
return;
}
Streams.get.force(publisherId, streamName, function (err) {
if (err) {
return;
}
tool.renderFromStream(this);
_updateVideoTool();
});
}, {
method: "delete",
fields: {
publisherId: publisherId,
streamName: streamName
}
});
});
}
}
});
$videoContainer.removeClass("NFT_preview_loading");
});
};
_updateVideoTool();
// set video Url
var $inputUrl = $("input[name=movieUrl]", dialog);
$inputUrl.on("change", function () {
if (!this.value.matchTypes('url', {requireScheme: false}).length) {
return _updateVideoTool();
}
_updateVideoTool({
videoId: null,
videoUrl: this.value
});
});
// upload video
$("input[name=movieUpload]", dialog).on("change", function () {
var file = this.files[0];
if (!file) {
return;
}
var reader = new FileReader();
$videoContainer.addClass("NFT_preview_loading");
reader.readAsDataURL(file);
reader.onload = function () {
Q.req(Q.action("Streams/stream"), 'data',function (err, res) {
var msg = Q.firstErrorMessage(err) || Q.firstErrorMessage(res && res.errors);
if (msg) {
$videoContainer.removeClass("NFT_preview_loading");
return Q.handle([state.onError, state.onFinish], tool, [msg]);
}
Streams.get.force(publisherId, streamName, function () {
tool.stream = this;
_updateVideoTool();
$inputUrl.val("");
});
}, {
fields: {
file: {
name: file.name,
data: reader.result,
subpath: publisherId.splitId() + "/" + streamName + "/video"
},
publisherId: publisherId,
streamName: streamName
},
timeout: 100000,
method: 'put'
});
};
reader.onerror = function (error) {
console.log('Error: ', error);
$videoContainer.removeClass("NFT_preview_loading");
};
this.value = null;
});
// create NFT
$("button[name=save]", dialog).on(Q.Pointer.fastclick, function (event) {
event.preventDefault();
$(dialog).addClass("Q_disabled");
// set WEB3_CONNECT_MODAL_ID element z-index
var modalLimit = 5000;
var modalPeriod = 500;
var modalCounter = 0;
tool.modalExist = setInterval(function() {
modalCounter += modalPeriod;
if (modalCounter >= modalLimit) {
clearInterval(tool.modalExist);
}
var $modal = $("#WEB3_CONNECT_MODAL_ID");
if (!$modal.length) {
return;
}
clearInterval(tool.modalExist);
var modalZIndex = $(".Q_overlay_open:visible").css("z-index");
if (!modalZIndex) {
return;
}
modalZIndex = parseInt(modalZIndex) + 1;
$(".web3modal-modal-lightbox", $modal).css("z-index", modalZIndex);
}, modalPeriod);
var attributes = {
"Assets/NFT/attributes": tool.collectAttributes(dialog)
};
if ($inputUrl.val()) {
attributes["videoUrl"] = $inputUrl.val();
}
//if (!tool.minted) {
Q.req("Assets/NFT", ["NFTStream"],function (err, response) {
Q.Dialogs.pop();
if (err) {
return Q.alert(Q.firstErrorMessage(err));
}
var streamData = response.slots.NFTStream;
Q.handle(state.onCreated, tool, [streamData]);
}, {
method: isNew ? "post" : "put",
fields: {
publisherId: publisherId,
streamName: streamName,
title: $("input[name=title]", dialog).val(),
content: $("input[name=description]", dialog).val(),
attributes: attributes,
category: previewState.related
}
});
return;
//}
var price = parseFloat($("input[name=price]", dialog).val());
var $onMarketPlace = $(".Assets_nft_check", dialog);
var onMarketPlace = $onMarketPlace.prop("checked");
var chainId = $("select[name=chain]", dialog).val();
var currencySymbol = $("select[name=currency]", dialog).val();
var chain = NFT.Web3.chains[chainId];
var currency = {};
Q.each(NFT.currencies, function (i, c) {
if (c.symbol !== currencySymbol) {
return;
}
currency = c;
currency.token = c[chainId];
});
// method to create NFT stream after tokenId created
var _reqCreateNFT = function (params) {
var tokenId = Q.getObject("tokenId", params);
var chainId = Q.getObject("chainId", params);
var attributes = Q.extend({
onMarketPlace: onMarketPlace,
currency: $("select[name=currency] option:selected", dialog).text(),
price: price
}, params);
if (tokenId) {
attributes.tokenId = tokenId;
}
if (chainId) {
attributes.chainId = chainId;
}
// after token created, create NFT stream (actually update composer stream and change relation from "new" to "NFT")
// and set tokenId, chainId, currency, royalty in attributes
Q.req("Assets/NFT",function (err) {
Q.Dialogs.pop();
if (err) {
return Q.alert(Q.firstErrorMessage(err));
}
Q.Tool.remove(tool.element, true, false);
tool.element.className = "";
tool.element.innerHTML = "";
$toolElement.tool("Assets/NFT/preview", {
tokenId: tokenId,
chainId: chainId
}).activate();
Q.handle(state.onCreated, tool, [tokenId, chainId]);
}, {
method: "post",
fields: {
userId: publisherId,
title: $("input[name=title]", dialog).val(),
content: $("input[name=description]", dialog).val(),
attributes: attributes
}
});
};
if (onMarketPlace) {
// create token for NFT
tool.createToken(price, currency, chain, royalty, onMarketPlace, function (err, tokenId, chainId) {
if (err) {
return $(dialog).removeClass("Q_disabled");
}
Q.Dialogs.pop();
// now, when tokenId create, create NFT stream
_reqCreateNFT({
"tokenId": tokenId,
"chainId": chainId
});
});
} else {
_reqCreateNFT();
Q.Dialogs.pop();
}
});
}
});
},
/**
* Create attributes list
* @method manageAttributes
* @param {Element|jQuery} element - element which need to replace with manager
* @param {Object} attributes - object with defined attributes
*/
manageAttributes: function (element, attributes) {
attributes = Q.isEmpty(attributes) ? [{}] : attributes;
var $element = element instanceof Element ? $(element) : element;
var tool = this;
var previewState = tool.preview.state;
var publisherId = previewState.publisherId;
var streamName = previewState.streamName;
// get default attributes from server
Q.req("Assets/NFT", "attributes", function (err, response) {
var fem = Q.firstErrorMessage(err, response);
if (fem) {
return console.error(fem);
}
var defaultAttributes = response.slots.attributes;
// merge exists attributes with default
Q.each(attributes, function () {
var attribute = this;
if (!attribute.display_type) {
return;
}
if (Q.isEmpty(defaultAttributes[attribute.display_type])) {
defaultAttributes[attribute.display_type] = {};
}
if (Q.isEmpty(defaultAttributes[attribute.display_type][attribute.trait_type])) {
defaultAttributes[attribute.display_type][attribute.trait_type] = [];
}
var arr = defaultAttributes[attribute.display_type][attribute.trait_type];
if (!arr.includes(attribute.value)) {
arr.push(attribute.value);
}
});
Q.each(attributes, function () {
var attribute = this;
Q.Template.render("Assets/NFT/manage/attributes", {
attributes: defaultAttributes,
}, function (err, html) {
var $html = $(html);
$element.append($html);
var $displayType = $("select[name=display_type]", $html);
$displayType.val(Q.getObject("display_type", attribute));
var $traitType = $("select[name=trait_type]", $html);
var $traitType_ = $("option[value=_]", $traitType);
var $value = $("select[name=value]", $html);
$value.val(Q.getObject("value", attribute));
var $value_ = $("option[value=_]", $value);
$(".basic32_remove", $html).on(Q.Pointer.fastclick, function () { $html.remove(); });
var _addItem = function () {
var $this = this instanceof Element ? $(this) : this;
$this.val("");
var titleKey = null;
var $lastOption = $("option[value='_']", $this);
switch ($this.prop("name")) {
case "trait_type":
titleKey = "AddTraitTitle";
break;
case "value":
titleKey = "AddValueTitle";
break;
}
Q.prompt(null, function (title) {
if (!title) {
return;
}
$('<option data-type="attr">' + title + '</option>').insertBefore($lastOption);
$this.val(title).trigger("change");
var displayType = $displayType.val();
var traitType = $traitType.val();
var value = $value.val();
// don't send POST request if some value empty
if (!displayType || !traitType || !value) {
return;
}
Q.req("Assets/NFT", ["attrUpdate"], function (err, response) {
var fem = Q.firstErrorMessage(err, response);
if (fem) {
return Q.alert(fem);
}
if (!response.slots.attrUpdate) {
return;
}
Q.setObject([displayType, traitType], value, defaultAttributes);
}, {
method: "put",
fields: {
publisherId: publisherId,
streamName: streamName,
display_type: displayType,
trait_type: traitType,
value: value
}
});
}, {
placeholder: tool.text.NFT.attributes[titleKey] //AddDisplayType
});
};
$value.on("change", function () {
var val = $value.val();
if (val === "_") {
return Q.handle(_addItem, $value);
}
});
$traitType.on("change", function () {
var dtVal = $displayType.val();
var ttVal = $traitType.val();
if (!ttVal) {
return $value.prop("disabled", true);
} else if (ttVal === "_") {
return Q.handle(_addItem, $traitType);
}
$value.prop("disabled", false);
$("[data-type=attr]", $value).remove();
Q.each(Q.getObject([dtVal, "data", ttVal], defaultAttributes) || [], function (index, value) {
$('<option data-type="attr">' + value + '</option>').insertBefore($value_);
});
var value = Q.getObject("value", attribute);
value && $value.val(value);
});
$displayType.on("change", function () {
var dtVal = $displayType.val();
if (!dtVal) {
$traitType.prop("disabled", true);
$value.prop("disabled", true);
return;
}
$traitType.prop("disabled", false);
$("[data-type=attr]", $traitType).remove();
Q.each(Q.getObject([dtVal, "data"], defaultAttributes) || [], function (index, value) {
$('<option data-type="attr">' + index + '</option>').insertBefore($traitType_);
});
var traitType = Q.getObject("trait_type", attribute);
traitType && $traitType.val(traitType);
$traitType.trigger("change");
}).trigger("change");
});
});
}, {
fields: {
publisherId: publisherId
}
});
},
/**
* Collect attributes under some element
* @method collectAttributes
* @param {Element,jQuery} element
*/
collectAttributes: function (element) {
// collect NFT attributes
var assetsNFTAttributes = [];
$(".Assets_NFT_attribute", element).each(function () {
var displayType = $("select[name=display_type]", this).val();
var traitType = $("select[name=trait_type]", this).val();
var value = $("select[name=value]", this).val();
var attribute = {};
if (displayType && displayType !== '_') {
attribute.display_type = displayType;
}
if (traitType && traitType !== '_') {
attribute.trait_type = traitType;
}
if (value && value !== '_') {
attribute.value = value;
}
if (!Q.isEmpty(attribute)) {
assetsNFTAttributes.push(attribute);
}
});
return assetsNFTAttributes;
}
});
Q.Template.set('Assets/NFT/composer',
`<div class="title-block Assets_create_titles">
<div class="video-container Assets_create_video">
<h4>{{NFT.CreateNFT}}</h4>
</div>
<div class="Assets_create_video_footer"></div>
</div>`,
{text: ['Assets/content']}
);
Q.Template.set('Assets/NFT/nftCreate',
`<div class="Assets_nft" data-minted="{{minted}}">
<form>
<div class="Assets_nft_form_group">
<input type="text" name="title" value="{{title}}" class="Assets_nft_form_control" placeholder="{{NFT.TitlePlaceholder}}">
</div>
<div class="Assets_nft_form_group">
<input type="text" name="description" value="{{content}}" class="Assets_nft_form_control" placeholder="{{NFT.DescribeYourNFT}}">
</div>
<div class="Assets_nft_form_group" data-type="nft_attributes">
<label>{{NFT.attributes.Title}}:</label>
<div class="Assets_nft_attributes"></div>
<button class="Q_button" name="addAttribute">{{NFT.attributes.NewAttribute}}</button>
</div>
<div class="Assets_nft_form_group">
<label>{{NFT.NftPicture}}:</label>
<div class="Assets_nft_container">
<img class="NFT_preview_icon">
<button class="Assets_nft_upload_button">{{NFT.UploadFile}}</button>
</div>
</div>
<div class="Assets_nft_form_group">
<label>{{NFT.NftMovie}}:</label>
<div class="Assets_nft_container">
<input name="movieUrl" placeholder="{{NFT.MovieURL}}"> <label>{{NFT.UploadMovie}}<input type="file" style="display: none;" name="movieUpload"></label>
<div class="Assets_nft_movie"></div>
</div>
</div>
<button class="Q_button" name="save">{{saveButtonText}}</button>
</form>
</div>`,
{text: ['Assets/content']});
Q.Template.set('Assets/NFT/mint', `
<div class="Assets_nft_market">
<div><label>{{NFT.PutOnMarketplace}} :</label></div>
<label class="switch">
<input type="checkbox" {{#if onMarketPlace}}checked{{/if}} class="Assets_nft_check">
<span class="slider round"></span>
</label>
</div>
<div class="Assets_nft_form_details" data-active="{{onMarketPlace}}">
<div class="Assets_nft_form_group">
<div class="Assets_price">
<input type="text" name="price" class="Assets_nft_form_control" placeholder="{{NFT.EnterPrice}}">
<select name="currency">
{{#each currencies}}
<option>{{this}}</option>
{{/each}}
</select>
{{currency}}
</div>
</div>
<div class="Assets_nft_form_group Assets_nft_royalties">
<div class="Assets_royality">
<input type="number" name="royalty" class="Assets_nft_form_control" placeholder="{{NFT.RoyaltyPlaceholder}}">%
</div>
</div>
<button class="Q_button" name="save">{{NFT.MintNFT}}</button>
</div>
`, {text: ['Assets/content']});
Q.Template.set('Assets/NFT/manage/attributes',
`<div class="Assets_NFT_attribute">
<select name='display_type'><option value="">{{NFT.attributes.DisplayTitle}}</option>` +
'{{#each attributes}}' +
'<option value="{{@key}}">{{this.name}}</option>' +
'{{/each}}' +
`</select>
<select name='trait_type'><option value="">{{NFT.attributes.TraitTitle}}</option><option value="_">{{NFT.attributes.NewTrait}}</option></select>
<select name='value'><option value="">{{NFT.attributes.ValueTitle}}</option><option value="_">{{NFT.attributes.NewValue}}</option></select>
<div class="basic32 basic32_remove"></div>
</div>`,
{text: ['Assets/content']}
);
Q.Template.set('Assets/NFT/view',
`<div class="title-block">
{{#if show.avatar}}
<div class="title_block_header">
<div class="Assets_NFT_author"></div>
</div>
{{/if}}
<div class="video-container"><img class="NFT_preview_icon"></div>
{{#if show.title}}
<div class="Assets_NFT_title">{{title}}</div>
{{/if}}
{{#if show.description}}
<div class="Assets_NFT_description">{{description}}</div>
{{/if}}
{{#if show.participants}}
<div class="Assets_NFT_participants"></div>
{{/if}}
{{#if show.bidInfo}}
<ul class="bid-info">
<li class="Assets_NFT_price">
<p><span class="Assets_NFT_price_value">{{price}}</span> {{currency.symbol}}</p>
<span class="Assets_NFT_comingsoon">Coming Soon</span>
</li>
<li class="action-block">
<button name="buy" class="Q_button">{{NFT.Buy}}</button>
<button name="soldOut" class="Q_button">{{NFT.NotOnSale}}</button>
<button name="update" class="Q_button">{{NFT.Actions}}</button>
<button name="claim" class="Q_button">{{NFT.Claim}}</button>
<div class="Assets_NFT_locked"></div>
</li>
</ul>
<div class="Assets_NFT_claim_timeout"><span>{{NFT.Unlocking}}</span> <span class="Assets_NFT_timeout_tool"></span></div>
{{/if}}
</div>`,
{text: ['Assets/content']}
);
Q.Template.set('Assets/NFT/role',
`<div class="Assets_NFT_role">
<div class="video-container"><img class="NFT_preview_icon"></div>
<div class="Assets_NFT_owner"></div>
{{#if show.title}}
<div class="Assets_NFT_title">{{title}}</div>
{{/if}}
{{#if show.participants}}
<div class="Assets_NFT_participants"></div>
{{/if}}
</div>`,
{text: ['Assets/content']}
);
Q.Template.set('Assets/NFT/avatar',
`<img src="{{baseUrl}}/Q/plugins/Users/img/icons/default/{{size}}.png" class="Users_avatar_icon Users_avatar_icon_{{size}}">
<span class="Users_avatar_name">{{address}}</span>`,
{text: ['Assets/content']}
);
})(window, Q, jQuery); | platform/plugins/Assets/web/js/tools/NFT/preview.js | (function (window, Q, $, undefined) {
/**
* @module Assets
*/
var Streams = Q.Streams;
var Assets = Q.Assets;
var Web3 = Assets.NFT.Web3;
var NFT = Assets.NFT;
/**
* YUIDoc description goes here
* @class Assets NFT/preview
* @constructor
* @param {Object} [options] Override various options for this tool
* @param {boolean} [poster] URL of poster image for movie (If movie provided)
* @param {boolean} [movie] Movie URL. If no image defined during NFT creation, this movie will be used instead.
* On NFT/view the movie will display instead image (event if image defined).
* @param {boolean} [src] URL of additional image which will use instead default image.
* @param {string} [options.fallback] Error message need to display in tool as content.
* @param {Q.Event} [options.onInvoke] Event occur when user click on tool element.
* @param {Q.Event} [options.onAvatar] Event occur when click on Users/avatar tool inside tool element.
* @param {Q.Event} [options.onClaim] Event occur when user click on "Claim" button
* @param {Q.Event} [options.onCreated] Event occur when NFT created.
* @param {Q.Event} [options.onRender] Event occur after tool content rendered.
*/
Q.Tool.define("Assets/NFT/preview", function(options) {
var tool = this;
var state = tool.state;
var $toolElement = $(this.element);
tool.preview = Q.Tool.from(this.element, "Streams/preview");
var previewState = Q.getObject("preview.state", tool) || {};
var loggedInUserId = Q.Users.loggedInUserId();
var tokenId = Q.getObject("token.id", state);
var chainId = Q.getObject("token.chainId", state);
var contractAddress = Q.getObject("token.contractAddress", state);
// is admin
var roles = Object.keys(Q.getObject("roles", Q.Users) || {});
tool.isAdmin = (roles.includes('Users/owners') || roles.includes('Users/admins'));
$toolElement.attr("data-admin", tool.isAdmin);
tool.isPublisher = (loggedInUserId && loggedInUserId === previewState.publisherId);
$toolElement.attr("data-publisher", tool.isPublisher);
// is claim
state.secondsLeft = parseInt(state.secondsLeft);
if (state.secondsLeft > 0) {
$toolElement.attr("data-claim", false);
} else if (state.secondsLeft <= 0) {
$toolElement.attr("data-claim", true);
}
if (!Q.isEmpty(previewState)) {
// <set Streams/preview imagepicker settings>
previewState.imagepicker.showSize = state.imagepicker.showSize;
previewState.imagepicker.fullSize = state.imagepicker.fullSize;
previewState.imagepicker.save = state.imagepicker.save;
previewState.imagepicker.useAnySize = true;
previewState.imagepicker.sendOriginal = true;
previewState.imagepicker.saveSizeName = {};
Q.each(NFT.icon.sizes, function (i, size) {
previewState.imagepicker.saveSizeName[size] = size;
});
// </set Streams/preview imagepicker settings>
}
var pipe = Q.pipe(["stylesheet", "text"], function (params, subjects) {
// get all data from blockchain and refresh
if (state.metadata) {
if (typeof state.metadata !== "object") {
//throw new Error("metadata is not a valid object");
state.fallback = "metadata is not a valid object";
}
tool.refresh();
} else if (state.tokenURI) {
if (!state.tokenURI.matchTypes('url').length) {
//throw new Error("tokenURI is not a valid URL");
state.fallback = "tokenURI is not a valid URL";
}
tool.refresh();
} else if (tokenId) {
if (!chainId) {
//throw new Error("chain id required");
state.fallback = "chain id required";
}
if (!contractAddress) {
//throw new Error("contract address required");
state.fallback = "contract address required";
}
tool.refresh();
} else if (!Q.isEmpty(previewState) && previewState.streamName) {
$toolElement.attr("data-publisherId", previewState.publisherId);
$toolElement.attr("data-streamName", previewState.streamName);
previewState.onRefresh.add(tool.refresh.bind(tool), tool);
} else if (!Q.isEmpty(previewState)) {
previewState.onComposer.add(tool.composer.bind(tool), tool);
}
});
Q.addStylesheet("{{Assets}}/css/tools/NFT/preview.css", pipe.fill('stylesheet'), { slotName: 'Assets' });
Q.Text.get('Assets/content', function(err, text) {
tool.text = text;
pipe.fill('text')();
}, {
ignoreCache: true
});
},
{ // default options here
useWeb3: true,
metadata: null,
tokenURI: null,
token: {
id: null,
contractAddress: null,
chainId: null
},
imagepicker: {
showSize: NFT.icon.defaultSize,
save: "NFT/icon"
},
show: {
avatar: true,
title: true,
description: false,
participants: false,
bidInfo: true
},
templates: {
view: {
name: 'Assets/NFT/view',
fields: {}
}
},
movie: null,
imageSrc: null,
secondsLeft: null,
fallback: null,
onClaim: new Q.Event(),
onInvoke: new Q.Event(),
onAvatar: new Q.Event(),
onCreated: new Q.Event(),
onRender: new Q.Event()
},
{
/**
* Get all data from blockchain and refresh
* @method init
* @param {Streams_Stream} stream
*/
refresh: function (stream) {
if (Streams.isStream(stream)) {
return this.renderFromStream(stream);
}
var tool = this;
var state = this.state;
var $toolElement = $(this.element);
var tokenId = Q.getObject("token.id", state);
var chainId = Q.getObject("token.chainId", state);
var contractAddress = Q.getObject("token.contractAddress", state);
var tokenURI = state.tokenURI;
var metadata = state.metadata;
if (state.fallback) {
return tool.renderFallBack();
}
$toolElement.append('<img src="' + Q.url("{{Q}}/img/throbbers/loading.gif") + '">');
if (metadata) {
return tool.renderFromMetadata({metadata: metadata});
} else if (tokenURI) {
Q.req("Assets/NFT", "fetchMetadata", function (err, response) {
if (err) {
return;
}
var metadata = response.slots.fetchMetadata;
tool.renderFromMetadata({metadata: metadata});
}, {
fields: {
tokenURI: tokenURI
}
});
return;
}
var pipeList = ["metadata", "author", "owner", "commissionInfo", "saleInfo", "authorUserId", "ownerUserId"];
var pipe = new Q.pipe(pipeList, function (params, subjects) {
// collect errors
var errors = [];
Q.each(pipeList, function (index, value) {
var err = Q.getObject([value, 0], params);
err && errors.push(err);
});
if (!Q.isEmpty(errors)) {
return console.warn(errors);
}
tool.renderFromMetadata({
metadata: params.metadata[1],
authorAddress: params.author[1],
ownerAddress: params.owner[1],
commissionInfo: params.commissionInfo[1],
saleInfo: params.saleInfo[1],
authorUserId: params.authorUserId[1] || '',
ownerUserId: params.ownerUserId[1] || ''
});
$toolElement.removeClass("Q_working");
//Users.Web3.onAccountsChanged.set(tool.refresh.bind(tool), tool);
});
if (state.useWeb3) {
Q.handle(Assets.batchFunction(), null, ["NFT", "getInfo", tokenId, chainId, contractAddress, state.updateCache, function (err, metadata) {
state.updateCache = false;
var msg = Q.firstErrorMessage(err, metadata);
if (msg) {
return console.error(msg);
}
var currencyToken = Q.getObject(["saleInfo", 0], this);
var price = Q.getObject(["saleInfo", 1], this);
var priceDecimal = price ? parseInt(price)/1e18 : null;
var isSale = Q.getObject(["saleInfo", 2], this);
pipe.fill("authorUserId")(null, this.authorUserId || "");
pipe.fill("ownerUserId")(null, this.ownerUserId || "");
pipe.fill("metadata")(null, this.metadata || "");
pipe.fill("author")(null, this.author || "");
pipe.fill("owner")(null, this.owner || "");
pipe.fill("commissionInfo")(null, this.commissionInfo || "");
pipe.fill("saleInfo")(null, {
isSale: isSale,
price: price,
priceDecimal: priceDecimal,
currencyToken: currencyToken
});
}]);
// get smart contract just to set contract events to update preview
//Web3.getContract(state.chain);
} else {
if (state.chainId !== Q.getObject("ethereum.chainId", window)) {
return console.warn("Chain id selected is not appropriate to NFT chain id " + state.chainId);
}
// if metadata defined, don't request it
if (state.metadata) {
pipe.fill("metadata")(null, state.metadata);
} else {
Q.handle(Web3.getTokenJSON, tool, [state.tokenId, state.chain, pipe.fill("metadata")]);
}
Q.handle(Web3.getAuthor, tool, [state.tokenId, state.chain, function (err, author) {
if (err) {
return console.warn(err);
}
pipe.fill("author")(arguments[0], arguments[1], arguments[2]);
Q.req("Assets/NFT", "getUserIdByWallet", function (err, response) {
if (err) {
return console.warn(err);
}
pipe.fill("authorUserId")(null, response.slots.getUserIdByWallet);
}, {
fields: { wallet: author }
});
}]);
Q.handle(Web3.getOwner, tool, [state.tokenId, state.chain, function (err, owner) {
if (err) {
return console.warn(err);
}
pipe.fill("owner")(arguments[0], arguments[1], arguments[2]);
Q.req("Assets/NFT", "getUserIdByWallet", function (err, response) {
if (err) {
return console.warn(err);
}
pipe.fill("ownerUserId")(null, response.slots.getUserIdByWallet);
}, {
fields: { wallet: owner }
});
}]);
Q.handle(Web3.commissionInfo, tool, [state.tokenId, state.chain, pipe.fill("commissionInfo")]);
Q.handle(Web3.saleInfo, tool, [state.tokenId, state.chain, pipe.fill("saleInfo")]);
}
},
/**
* Render NFT image
* @method renderImage
* @param {jQuery|Element} $container - image container element
* @param {String} imageUrl
*/
renderImage: function ($container, imageUrl) {
if ($container instanceof Element) {
$container = $($container);
}
$container.empty().html('<img alt="icon" class="NFT_preview_icon" src="' + Q.url(imageUrl) + '">');
},
/**
* Render NFT video
* @method renderVideo element
* @param {jQuery|Element} $container - video container element
* @param {String} videoUrl
* @param {String} imageUrl - image that would be a poster of video
*/
renderVideo: function ($container, videoUrl, imageUrl) {
if ($container instanceof Element) {
$container = $($container);
}
var $qVideo = $("<div>").on(Q.Pointer.fastclick, function (e) {
e.preventDefault();
e.stopPropagation();
return false;
});
$container.empty().append($qVideo);
var videoOptions = Q.extend({}, {
url: videoUrl,
image: imageUrl && !imageUrl.includes("/img/empty_white.png") ? imageUrl : ""
});
$qVideo.tool("Q/video", videoOptions).activate();
},
/**
* Render NFT audio element
* @method renderVideo
* @param {jQuery|Element} $container - video container element
* @param {String} audioUrl
*/
renderAudio: function ($container, audioUrl) {
var $qAudio = $("<div>").on(Q.Pointer.fastclick, function (e) {
e.preventDefault();
e.stopPropagation();
return false;
});
$container.empty().append($qAudio);
var audioOptions = Q.extend({}, {
url: audioUrl
});
$qAudio.tool("Q/audio", audioOptions).activate();
},
/**
* Refreshes the appearance of the tool completely
* @method refresh
* @param {Streams_Stream} stream - NFT stream
*/
renderFromStream: function (stream) {
var tool = this;
var state = tool.state;
var $toolElement = $(this.element);
tool.stream = stream;
var publisherId = stream.fields.publisherId;
var streamName = stream.fields.name;
tool.minted = stream.getAttribute("tokenId");
$toolElement.attr("data-minted", !!tool.minted);
var templateName = state.templates.view.name;
var templateFields = Q.extend({
show: state.show
}, state.templates.view.fields);
Q.Template.render(templateName, templateFields, function (err, html) {
tool.element.innerHTML = html;
$toolElement.activate();
$(".Assets_NFT_author", tool.element).tool("Users/avatar", {
userId: publisherId,
icon: 50,
contents: true,
editable: false
}).activate(function () {
$(this.element).on(Q.Pointer.fastclick, function (e) {
Q.handle(state.onAvatar, this, [e]);
});
});
$(".Assets_NFT_participants", tool.element).tool("Streams/participants", {
showSummary: false,
showControls: true,
showBlanks: true,
publisherId: publisherId,
streamName: streamName
}).activate();
$(".Assets_NFT_title", tool.element).tool("Streams/inplace", {
editable: false,
field: "title",
inplaceType: "text",
publisherId: publisherId,
streamName: streamName
}, "nft_preview_title_" + tool.stream.fields.name.split("/").pop()).activate();
$(".Assets_NFT_description", tool.element).tool("Streams/inplace", {
editable: false,
field: "content",
inplaceType: "text",
publisherId: publisherId,
streamName: streamName
}, "nft_preview_description_" + tool.stream.fields.name.split("/").pop()).activate();
// apply Streams/preview icon behavior
var videoUrl = state.video || stream.getAttribute("videoUrl");
var audioUrl = state.audio || stream.getAttribute("audioUrl");
var videoProvider = stream.getAttribute("videoProvider");
var videoId = stream.getAttribute("videoId");
var imageUrl = state.image || stream.iconUrl(state.imagepicker.showSize);
var $container = $(".video-container", tool.element);
var $previewIcon = $("img.NFT_preview_icon", tool.element);
if (videoUrl) {
tool.renderVideo($container, videoUrl, imageUrl);
} else if (audioUrl) {
tool.renderAudio($container, audioUrl);
} else if (imageUrl) {
tool.renderImage($container, imageUrl);
} else if (videoId) {
videoUrl = Q.getObject(["video", "cloudUpload", videoProvider, "url"], Q).interpolate({videoId: videoId});
tool.renderVideo($container, videoUrl);
} else {
var overrides = NFT.icon.defaultSize ? {
"overrideShowSize": {
'': (stream.fields.defaultSize || state.defaultSize || NFT.icon.defaultSize)
}
} : {};
tool.preview.icon($previewIcon[0], null, overrides);
}
// set onInvoke event
$toolElement.off(Q.Pointer.fastclick).on(Q.Pointer.fastclick, function () {
Q.handle(state.onInvoke, tool, [publisherId, streamName]);
});
// set onMessage Streams/changed to change image or video or audio
stream.onMessage("Streams/changed").set(function (updatedStream, message) {
tool.renderFromStream(updatedStream);
}, [tool.id, Q.normalize(publisherId), Q.normalize(streamName.split("/").pop())].join("_"));
Q.handle(state.onRender, tool);
});
},
/**
* Render preview from metadata object
* @method renderFallBack
*/
renderFallBack: function () {
var tool = this;
var state = tool.state;
var $toolElement = $(this.element);
var templateName = state.templates.view.name;
var templateFields = Q.extend({
show: {
avatar: true,
title: false,
description: false,
participants: false,
bidInfo: false
}
}, state.templates.view.fields);
Q.Template.render(templateName, templateFields, (err, html) => {
tool.element.innerHTML = html;
$(".Assets_NFT_author", tool.element).addClass("Q_error").html(state.fallback);
$(".video-container", tool.element).addClass("fallback").html(JSON.stringify({
tokenURI: state.tokenURI,
tokenId: state.tokenId,
metadata: state.metadata,
owner: state.owner,
ownerUserId: state.ownerUserId,
secondsLeft: state.secondsLeft
}));
// set onInvoke event
$toolElement.off(Q.Pointer.fastclick);
Q.handle(state.onRender, tool);
});
},
/**
* Render preview from metadata object
* @method renderFromMetadata
* @param {Object} params
* @param {object} params.metadata
* @param {String} [params.authorAddress]
* @param {String} [params.ownerAddress]
* @param {object} [params.commissionInfo]
* @param {object} [params.saleInfo]
* @param {string} [params.authorUserId] - id of NFT author user
* @param {string} [params.ownerUserId] - id of NFT owner user
*/
renderFromMetadata: function (params) {
var tool = this;
var state = tool.state;
var $toolElement = $(this.element);
var metadata = Q.getObject("metadata", params);
var authorAddress = Q.getObject("authorAddress", params);
var ownerAddress = Q.getObject("ownerAddress", params);
var commissionInfo = Q.getObject("commissionInfo", params);
var saleInfo = Q.getObject("saleInfo", params);
var authorUserId = Q.getObject("authorUserId", params);
var ownerUserId = Q.getObject("ownerUserId", params);
tool.minted = true;
$toolElement.attr("data-minted", tool.minted);
var templateName = state.templates.view.name;
var templateFields = Q.extend({
title: metadata.name,
description: metadata.description,
show: state.show
}, state.templates.view.fields);
Q.Template.render(templateName, templateFields, (err, html) => {
tool.element.innerHTML = html;
$toolElement.activate();
var $Assets_NFT_author = $(".Assets_NFT_author", tool.element);
if ($Assets_NFT_author.length && authorUserId) {
$Assets_NFT_author.tool("Users/avatar", {
userId: authorUserId,
icon: 50,
contents: true,
editable: false
}).activate(function () {
$(this.element).on(Q.Pointer.fastclick, function (e) {
Q.handle(state.onAvatar, this, [e]);
});
});
} else if ($Assets_NFT_author.length) {
Q.Template.render("Assets/NFT/avatar", {
size: 50,
address: Web3.minimizeAddress(authorAddress, 20, 3)
}, (err, html) => {
$Assets_NFT_author.html(html);
});
}
var $Assets_NFT_owner = $(".Assets_NFT_owner", tool.element);
if ($Assets_NFT_owner.length && ownerUserId) {
$Assets_NFT_owner.tool("Users/avatar", {
userId: ownerUserId,
icon: 80,
contents: true,
editable: false
}).activate(function () {
$(this.element).on(Q.Pointer.fastclick, function (e) {
Q.handle(state.onAvatar, this, [e]);
});
});
} else if ($Assets_NFT_owner.length) {
Q.Template.render("Assets/NFT/avatar", {
size: 80,
address: Web3.minimizeAddress(ownerAddress, 20, 3)
}, (err, html) => {
$Assets_NFT_owner.html(html);
});
}
var videoUrl = state.video || metadata.video || metadata.youtube_url;
var audioUrl = state.audio;
var imageUrl = state.image || metadata.image || null;
if (!imageUrl && metadata.image_data) {
imageUrl = 'data:image/svg+xml;utf8,' + imageUrl;
}
var $container = $(".video-container", tool.element);
if (metadata.animation_url) {
$.ajax({
type: "HEAD",
url: metadata.animation_url,
}).done(function(message, text, jqXHR){
var contentType = jqXHR.getResponseHeader('Content-Type');
if (contentType.includes("video")) {
tool.renderVideo($container, metadata.animation_url, imageUrl);
} else if (contentType.includes("audio")) {
tool.renderAudio($container, metadata.animation_url, imageUrl);
}
});
} else if (videoUrl) {
tool.renderVideo($container, videoUrl, imageUrl);
} else if (audioUrl) {
tool.renderAudio($container, audioUrl);
} else if (imageUrl) {
tool.renderImage($container, imageUrl);
}
if (state.secondsLeft > 0) {
$(".Assets_NFT_timeout_tool", tool.element).tool("Q/timestamp", {
time: Date.now()/1000 + state.secondsLeft,
beforeRefresh: function (result, diff) {
if (diff <= 0) {
$toolElement.attr("data-claim", true);
}
}
}).activate();
}
$("button[name=unlock]", tool.element).on(Q.Pointer.fastclick, function () {
Q.handle(state.onClaim, tool);
return false;
});
// set onInvoke event
$toolElement.off(Q.Pointer.fastclick).on(Q.Pointer.fastclick, function () {
Q.handle(state.onInvoke, tool, [metadata, authorAddress, ownerAddress, commissionInfo, saleInfo, authorUserId]);
});
Q.handle(state.onRender, tool);
});
},
/**
* Create NFT
* @method composer
*/
composer: function () {
var tool = this;
var $toolElement = $(this.element);
var previewState = tool.preview.state;
previewState.editable = true; // we need to upload icon
Q.Template.render('Assets/NFT/composer', {}, function(err, html) {
tool.element.innerHTML = html;
// get or create composer stream
Q.req("Assets/NFT", "newItem", function (err, response) {
if (err) {
return;
}
var newItem = response.slots.newItem;
previewState.publisherId = newItem.publisherId;
previewState.streamName = newItem.streamName;
// this need for Streams/related tool to avoid appear composer twice
Q.setObject("options.streams_preview.publisherId", newItem.publisherId, tool.element);
Q.setObject("options.streams_preview.streamName", newItem.streamName, tool.element);
Streams.get(previewState.publisherId, previewState.streamName, function (err) {
if (err) {
return;
}
tool.stream = this;
$toolElement.off(Q.Pointer.fastclick).on(Q.Pointer.fastclick, tool.update.bind(tool));
});
}, {
fields: {
publisherId: previewState.publisherId,
category: previewState.related
}
});
});
},
/**
* Update NFT
* @method update
*/
update: function () {
var tool = this;
var $toolElement = $(this.element);
var state = this.state;
var isNew = $toolElement.hasClass("Streams_preview_composer");
var previewState = this.preview.state;
var publisherId = previewState.publisherId;
var streamName = previewState.streamName;
// need to update tool.stream
// actually on this stage stream should be cached, so Streams.get is just reading stream from cache, hence it can be used as synchronous
Streams.get(publisherId, streamName, function () {
tool.stream = this;
});
Q.Dialogs.push({
title: isNew ? tool.text.NFT.CreateNFT : tool.text.NFT.UpdateNFT,
className: "Assets_NFT_preview_composer",
template: {
name: "Assets/NFT/nftCreate",
fields: {
minted: false,
title: Q.getObject("stream.fields.title", tool) || "",
content: Q.getObject("stream.fields.content", tool) || "",
saveButtonText: isNew ? tool.text.NFT.CreateYourNFT : tool.text.NFT.UpdateNFT
}
},
onActivate: function (dialog) {
var $icon = $("img.NFT_preview_icon", dialog);
var $imageContainer = $icon.closest(".Assets_nft_container");
// create new Streams/preview tool to set icon behavior to $icon element
$("<div>").tool("Streams/preview", Q.extend(previewState, {editable: true})).activate(function () {
this.icon($icon[0], function (element) {
var src = element.src;
if (src.includes("empty_white")) {
$imageContainer.plugin("Q/actions", "remove");
} else {
$imageContainer.plugin("Q/actions", {
actions: {
remove: function () {
Q.confirm(tool.text.NFT.AreYouSureDeleteImage, function(result) {
if (!result) {
return;
}
Q.req("Assets/NFT", ["image"], function (err) {
if (err) {
return;
}
Streams.get.force(publisherId, streamName, function (err) {
if (err) {
return;
}
tool.renderFromStream(this);
});
}, {
method: "delete",
fields: {
publisherId: publisherId,
streamName: streamName
}
});
});
}
}
});
}
});
});
// manage attributes
tool.manageAttributes($(".Assets_nft_attributes", dialog), tool.stream.getAttribute("Assets/NFT/attributes"));
$("button[name=addAttribute]", dialog).on(Q.Pointer.fastclick, function (event) {
event.preventDefault();
tool.manageAttributes($(".Assets_nft_attributes", dialog));
return false;
});
// upload image button
$(".Assets_nft_upload_button", dialog).on(Q.Pointer.fastclick, function (event) {
event.preventDefault();
$icon.trigger("click");
});
var videoTool;
var $videoContainer = $(".Assets_nft_movie", dialog).closest(".Assets_nft_container");
var _updateVideoTool = function (options) {
var videoOptions = Q.extend({}, state.video);
var videoId = tool.stream.getAttribute("videoId");
var videoProvider = tool.stream.getAttribute("videoProvider");
var videoUrl = tool.stream.getAttribute("videoUrl");
if (options) {
videoId = Q.getObject("videoId", options) || videoId;
videoProvider = Q.getObject("videoProvider", options) || videoProvider;
videoUrl = Q.getObject("videoUrl", options) || videoUrl;
}
if (videoUrl) {
videoOptions.url = Q.url(videoUrl);
} else if (videoId && videoProvider) {
videoOptions.url = Q.getObject(["video", "cloudUpload", videoProvider, "url"], Q).interpolate({videoId: videoId})
}
var $element = $(".Assets_nft_movie", dialog);
if (Q.Tool.from($element, "Q/video")) {
var $newElement = $("<div class='Assets_nft_movie'></div>").insertAfter($element);
Q.Tool.remove($element, true, true);
$element = $newElement;
}
$videoContainer.plugin("Q/actions", "remove");
if (!videoOptions.url) {
return $videoContainer.removeClass("NFT_preview_loading");
}
$element.tool("Q/video", videoOptions).activate(function () {
videoTool = this;
$videoContainer.plugin("Q/actions", {
actions: {
remove: function () {
Q.confirm(tool.text.NFT.AreYouSureDeleteVideo, function(result) {
if (!result) {
return;
}
Q.req("Assets/NFT", ["video"], function (err) {
if (err) {
return;
}
Streams.get.force(publisherId, streamName, function (err) {
if (err) {
return;
}
tool.renderFromStream(this);
_updateVideoTool();
});
}, {
method: "delete",
fields: {
publisherId: publisherId,
streamName: streamName
}
});
});
}
}
});
$videoContainer.removeClass("NFT_preview_loading");
});
};
_updateVideoTool();
// set video Url
var $inputUrl = $("input[name=movieUrl]", dialog);
$inputUrl.on("change", function () {
if (!this.value.matchTypes('url', {requireScheme: false}).length) {
return _updateVideoTool();
}
_updateVideoTool({
videoId: null,
videoUrl: this.value
});
});
// upload video
$("input[name=movieUpload]", dialog).on("change", function () {
var file = this.files[0];
if (!file) {
return;
}
var reader = new FileReader();
$videoContainer.addClass("NFT_preview_loading");
reader.readAsDataURL(file);
reader.onload = function () {
Q.req(Q.action("Streams/stream"), 'data',function (err, res) {
var msg = Q.firstErrorMessage(err) || Q.firstErrorMessage(res && res.errors);
if (msg) {
$videoContainer.removeClass("NFT_preview_loading");
return Q.handle([state.onError, state.onFinish], tool, [msg]);
}
Streams.get.force(publisherId, streamName, function () {
tool.stream = this;
_updateVideoTool();
$inputUrl.val("");
});
}, {
fields: {
file: {
name: file.name,
data: reader.result,
subpath: publisherId.splitId() + "/" + streamName + "/video"
},
publisherId: publisherId,
streamName: streamName
},
timeout: 100000,
method: 'put'
});
};
reader.onerror = function (error) {
console.log('Error: ', error);
$videoContainer.removeClass("NFT_preview_loading");
};
this.value = null;
});
// create NFT
$("button[name=save]", dialog).on(Q.Pointer.fastclick, function (event) {
event.preventDefault();
$(dialog).addClass("Q_disabled");
// set WEB3_CONNECT_MODAL_ID element z-index
var modalLimit = 5000;
var modalPeriod = 500;
var modalCounter = 0;
tool.modalExist = setInterval(function() {
modalCounter += modalPeriod;
if (modalCounter >= modalLimit) {
clearInterval(tool.modalExist);
}
var $modal = $("#WEB3_CONNECT_MODAL_ID");
if (!$modal.length) {
return;
}
clearInterval(tool.modalExist);
var modalZIndex = $(".Q_overlay_open:visible").css("z-index");
if (!modalZIndex) {
return;
}
modalZIndex = parseInt(modalZIndex) + 1;
$(".web3modal-modal-lightbox", $modal).css("z-index", modalZIndex);
}, modalPeriod);
var attributes = {
"Assets/NFT/attributes": tool.collectAttributes(dialog)
};
if ($inputUrl.val()) {
attributes["videoUrl"] = $inputUrl.val();
}
//if (!tool.minted) {
Q.req("Assets/NFT", ["NFTStream"],function (err, response) {
Q.Dialogs.pop();
if (err) {
return Q.alert(Q.firstErrorMessage(err));
}
var streamData = response.slots.NFTStream;
Q.handle(state.onCreated, tool, [streamData]);
}, {
method: isNew ? "post" : "put",
fields: {
publisherId: publisherId,
streamName: streamName,
title: $("input[name=title]", dialog).val(),
content: $("input[name=description]", dialog).val(),
attributes: attributes,
category: previewState.related
}
});
return;
//}
var price = parseFloat($("input[name=price]", dialog).val());
var $onMarketPlace = $(".Assets_nft_check", dialog);
var onMarketPlace = $onMarketPlace.prop("checked");
var chainId = $("select[name=chain]", dialog).val();
var currencySymbol = $("select[name=currency]", dialog).val();
var chain = NFT.Web3.chains[chainId];
var currency = {};
Q.each(NFT.currencies, function (i, c) {
if (c.symbol !== currencySymbol) {
return;
}
currency = c;
currency.token = c[chainId];
});
// method to create NFT stream after tokenId created
var _reqCreateNFT = function (params) {
var tokenId = Q.getObject("tokenId", params);
var chainId = Q.getObject("chainId", params);
var attributes = Q.extend({
onMarketPlace: onMarketPlace,
currency: $("select[name=currency] option:selected", dialog).text(),
price: price
}, params);
if (tokenId) {
attributes.tokenId = tokenId;
}
if (chainId) {
attributes.chainId = chainId;
}
// after token created, create NFT stream (actually update composer stream and change relation from "new" to "NFT")
// and set tokenId, chainId, currency, royalty in attributes
Q.req("Assets/NFT",function (err) {
Q.Dialogs.pop();
if (err) {
return Q.alert(Q.firstErrorMessage(err));
}
Q.Tool.remove(tool.element, true, false);
tool.element.className = "";
tool.element.innerHTML = "";
$toolElement.tool("Assets/NFT/preview", {
tokenId: tokenId,
chainId: chainId
}).activate();
Q.handle(state.onCreated, tool, [tokenId, chainId]);
}, {
method: "post",
fields: {
userId: publisherId,
title: $("input[name=title]", dialog).val(),
content: $("input[name=description]", dialog).val(),
attributes: attributes
}
});
};
if (onMarketPlace) {
// create token for NFT
tool.createToken(price, currency, chain, royalty, onMarketPlace, function (err, tokenId, chainId) {
if (err) {
return $(dialog).removeClass("Q_disabled");
}
Q.Dialogs.pop();
// now, when tokenId create, create NFT stream
_reqCreateNFT({
"tokenId": tokenId,
"chainId": chainId
});
});
} else {
_reqCreateNFT();
Q.Dialogs.pop();
}
});
}
});
},
/**
* Create attributes list
* @method manageAttributes
* @param {Element|jQuery} element - element which need to replace with manager
* @param {Object} attributes - object with defined attributes
*/
manageAttributes: function (element, attributes) {
attributes = Q.isEmpty(attributes) ? [{}] : attributes;
var $element = element instanceof Element ? $(element) : element;
var tool = this;
var previewState = tool.preview.state;
var publisherId = previewState.publisherId;
var streamName = previewState.streamName;
// get default attributes from server
Q.req("Assets/NFT", "attributes", function (err, response) {
var fem = Q.firstErrorMessage(err, response);
if (fem) {
return console.error(fem);
}
var defaultAttributes = response.slots.attributes;
// merge exists attributes with default
Q.each(attributes, function () {
var attribute = this;
if (!attribute.display_type) {
return;
}
if (Q.isEmpty(defaultAttributes[attribute.display_type])) {
defaultAttributes[attribute.display_type] = {};
}
if (Q.isEmpty(defaultAttributes[attribute.display_type][attribute.trait_type])) {
defaultAttributes[attribute.display_type][attribute.trait_type] = [];
}
var arr = defaultAttributes[attribute.display_type][attribute.trait_type];
if (!arr.includes(attribute.value)) {
arr.push(attribute.value);
}
});
Q.each(attributes, function () {
var attribute = this;
Q.Template.render("Assets/NFT/manage/attributes", {
attributes: defaultAttributes,
}, function (err, html) {
var $html = $(html);
$element.append($html);
var $displayType = $("select[name=display_type]", $html);
$displayType.val(Q.getObject("display_type", attribute));
var $traitType = $("select[name=trait_type]", $html);
var $traitType_ = $("option[value=_]", $traitType);
var $value = $("select[name=value]", $html);
$value.val(Q.getObject("value", attribute));
var $value_ = $("option[value=_]", $value);
$(".basic32_remove", $html).on(Q.Pointer.fastclick, function () { $html.remove(); });
var _addItem = function () {
var $this = this instanceof Element ? $(this) : this;
$this.val("");
var titleKey = null;
var $lastOption = $("option[value='_']", $this);
switch ($this.prop("name")) {
case "trait_type":
titleKey = "AddTraitTitle";
break;
case "value":
titleKey = "AddValueTitle";
break;
}
Q.prompt(null, function (title) {
if (!title) {
return;
}
$('<option data-type="attr">' + title + '</option>').insertBefore($lastOption);
$this.val(title).trigger("change");
var displayType = $displayType.val();
var traitType = $traitType.val();
var value = $value.val();
// don't send POST request if some value empty
if (!displayType || !traitType || !value) {
return;
}
Q.req("Assets/NFT", ["attrUpdate"], function (err, response) {
var fem = Q.firstErrorMessage(err, response);
if (fem) {
return Q.alert(fem);
}
if (!response.slots.attrUpdate) {
return;
}
Q.setObject([displayType, traitType], value, defaultAttributes);
}, {
method: "put",
fields: {
publisherId: publisherId,
streamName: streamName,
display_type: displayType,
trait_type: traitType,
value: value
}
});
}, {
placeholder: tool.text.NFT.attributes[titleKey] //AddDisplayType
});
};
$value.on("change", function () {
var val = $value.val();
if (val === "_") {
return Q.handle(_addItem, $value);
}
});
$traitType.on("change", function () {
var dtVal = $displayType.val();
var ttVal = $traitType.val();
if (!ttVal) {
return $value.prop("disabled", true);
} else if (ttVal === "_") {
return Q.handle(_addItem, $traitType);
}
$value.prop("disabled", false);
$("[data-type=attr]", $value).remove();
Q.each(Q.getObject([dtVal, "data", ttVal], defaultAttributes) || [], function (index, value) {
$('<option data-type="attr">' + value + '</option>').insertBefore($value_);
});
var value = Q.getObject("value", attribute);
value && $value.val(value);
});
$displayType.on("change", function () {
var dtVal = $displayType.val();
if (!dtVal) {
$traitType.prop("disabled", true);
$value.prop("disabled", true);
return;
}
$traitType.prop("disabled", false);
$("[data-type=attr]", $traitType).remove();
Q.each(Q.getObject([dtVal, "data"], defaultAttributes) || [], function (index, value) {
$('<option data-type="attr">' + index + '</option>').insertBefore($traitType_);
});
var traitType = Q.getObject("trait_type", attribute);
traitType && $traitType.val(traitType);
$traitType.trigger("change");
}).trigger("change");
});
});
}, {
fields: {
publisherId: publisherId
}
});
},
/**
* Collect attributes under some element
* @method collectAttributes
* @param {Element,jQuery} element
*/
collectAttributes: function (element) {
// collect NFT attributes
var assetsNFTAttributes = [];
$(".Assets_NFT_attribute", element).each(function () {
var displayType = $("select[name=display_type]", this).val();
var traitType = $("select[name=trait_type]", this).val();
var value = $("select[name=value]", this).val();
var attribute = {};
if (displayType && displayType !== '_') {
attribute.display_type = displayType;
}
if (traitType && traitType !== '_') {
attribute.trait_type = traitType;
}
if (value && value !== '_') {
attribute.value = value;
}
if (!Q.isEmpty(attribute)) {
assetsNFTAttributes.push(attribute);
}
});
return assetsNFTAttributes;
}
});
Q.Template.set('Assets/NFT/composer',
`<div class="title-block Assets_create_titles">
<div class="video-container Assets_create_video">
<h4>{{NFT.CreateNFT}}</h4>
</div>
<div class="Assets_create_video_footer"></div>
</div>`,
{text: ['Assets/content']}
);
Q.Template.set('Assets/NFT/nftCreate',
`<div class="Assets_nft" data-minted="{{minted}}">
<form>
<div class="Assets_nft_form_group">
<input type="text" name="title" value="{{title}}" class="Assets_nft_form_control" placeholder="{{NFT.TitlePlaceholder}}">
</div>
<div class="Assets_nft_form_group">
<input type="text" name="description" value="{{content}}" class="Assets_nft_form_control" placeholder="{{NFT.DescribeYourNFT}}">
</div>
<div class="Assets_nft_form_group" data-type="nft_attributes">
<label>{{NFT.attributes.Title}}:</label>
<div class="Assets_nft_attributes"></div>
<button class="Q_button" name="addAttribute">{{NFT.attributes.NewAttribute}}</button>
</div>
<div class="Assets_nft_form_group">
<label>{{NFT.NftPicture}}:</label>
<div class="Assets_nft_container">
<img class="NFT_preview_icon">
<button class="Assets_nft_upload_button">{{NFT.UploadFile}}</button>
</div>
</div>
<div class="Assets_nft_form_group">
<label>{{NFT.NftMovie}}:</label>
<div class="Assets_nft_container">
<input name="movieUrl" placeholder="{{NFT.MovieURL}}"> <label>{{NFT.UploadMovie}}<input type="file" style="display: none;" name="movieUpload"></label>
<div class="Assets_nft_movie"></div>
</div>
</div>
<button class="Q_button" name="save">{{saveButtonText}}</button>
</form>
</div>`,
{text: ['Assets/content']});
Q.Template.set('Assets/NFT/mint', `
<div class="Assets_nft_market">
<div><label>{{NFT.PutOnMarketplace}} :</label></div>
<label class="switch">
<input type="checkbox" {{#if onMarketPlace}}checked{{/if}} class="Assets_nft_check">
<span class="slider round"></span>
</label>
</div>
<div class="Assets_nft_form_details" data-active="{{onMarketPlace}}">
<div class="Assets_nft_form_group">
<div class="Assets_price">
<input type="text" name="price" class="Assets_nft_form_control" placeholder="{{NFT.EnterPrice}}">
<select name="currency">
{{#each currencies}}
<option>{{this}}</option>
{{/each}}
</select>
{{currency}}
</div>
</div>
<div class="Assets_nft_form_group Assets_nft_royalties">
<div class="Assets_royality">
<input type="number" name="royalty" class="Assets_nft_form_control" placeholder="{{NFT.RoyaltyPlaceholder}}">%
</div>
</div>
<button class="Q_button" name="save">{{NFT.MintNFT}}</button>
</div>
`, {text: ['Assets/content']});
Q.Template.set('Assets/NFT/manage/attributes',
`<div class="Assets_NFT_attribute">
<select name='display_type'><option value="">{{NFT.attributes.DisplayTitle}}</option>` +
'{{#each attributes}}' +
'<option value="{{@key}}">{{this.name}}</option>' +
'{{/each}}' +
`</select>
<select name='trait_type'><option value="">{{NFT.attributes.TraitTitle}}</option><option value="_">{{NFT.attributes.NewTrait}}</option></select>
<select name='value'><option value="">{{NFT.attributes.ValueTitle}}</option><option value="_">{{NFT.attributes.NewValue}}</option></select>
<div class="basic32 basic32_remove"></div>
</div>`,
{text: ['Assets/content']}
);
Q.Template.set('Assets/NFT/view',
`<div class="title-block">
{{#if show.avatar}}
<div class="title_block_header">
<div class="Assets_NFT_author"></div>
</div>
{{/if}}
<div class="video-container"><img class="NFT_preview_icon"></div>
{{#if show.title}}
<div class="Assets_NFT_title">{{title}}</div>
{{/if}}
{{#if show.description}}
<div class="Assets_NFT_description">{{description}}</div>
{{/if}}
{{#if show.participants}}
<div class="Assets_NFT_participants"></div>
{{/if}}
{{#if show.bidInfo}}
<ul class="bid-info">
<li class="Assets_NFT_price">
<p><span class="Assets_NFT_price_value">{{price}}</span> {{currency.symbol}}</p>
<span class="Assets_NFT_comingsoon">Coming Soon</span>
</li>
<li class="action-block">
<button name="buy" class="Q_button">{{NFT.Buy}}</button>
<button name="soldOut" class="Q_button">{{NFT.NotOnSale}}</button>
<button name="update" class="Q_button">{{NFT.Actions}}</button>
<button name="unlock" class="Q_button">{{NFT.Unlock}}</button>
</li>
</ul>
<div class="Assets_NFT_claim_timeout"><span>{{NFT.Unlocking}}</span> <span class="Assets_NFT_timeout_tool"></span></div>
{{/if}}
</div>`,
{text: ['Assets/content']}
);
Q.Template.set('Assets/NFT/role',
`<div class="Assets_NFT_role">
<div class="video-container"><img class="NFT_preview_icon"></div>
<div class="Assets_NFT_owner"></div>
{{#if show.title}}
<div class="Assets_NFT_title">{{title}}</div>
{{/if}}
{{#if show.participants}}
<div class="Assets_NFT_participants"></div>
{{/if}}
</div>`,
{text: ['Assets/content']}
);
Q.Template.set('Assets/NFT/avatar',
`<img src="{{baseUrl}}/Q/plugins/Users/img/icons/default/{{size}}.png" class="Users_avatar_icon Users_avatar_icon_{{size}}">
<span class="Users_avatar_name">{{address}}</span>`,
{text: ['Assets/content']}
);
})(window, Q, jQuery); | Updated Assets/NFT/preview tool.
| platform/plugins/Assets/web/js/tools/NFT/preview.js | Updated Assets/NFT/preview tool. | <ide><path>latform/plugins/Assets/web/js/tools/NFT/preview.js
<ide> tool.preview = Q.Tool.from(this.element, "Streams/preview");
<ide> var previewState = Q.getObject("preview.state", tool) || {};
<ide> var loggedInUserId = Q.Users.loggedInUserId();
<del> var tokenId = Q.getObject("token.id", state);
<del> var chainId = Q.getObject("token.chainId", state);
<del> var contractAddress = Q.getObject("token.contractAddress", state);
<add> var tokenId = Q.getObject("tokenId", state);
<add> var chainId = Q.getObject("chainId", state);
<add> var contractAddress = Q.getObject("contractAddress", state);
<ide>
<ide> // is admin
<ide> var roles = Object.keys(Q.getObject("roles", Q.Users) || {});
<ide> { // default options here
<ide> useWeb3: true,
<ide> metadata: null,
<add> tokenId: null,
<ide> tokenURI: null,
<del> token: {
<del> id: null,
<del> contractAddress: null,
<del> chainId: null
<del> },
<add> chainId: null,
<add> contractAddress: null,
<add> owner: null,
<add> ownerUserId: null,
<ide> imagepicker: {
<ide> showSize: NFT.icon.defaultSize,
<ide> save: "NFT/icon"
<ide> var tool = this;
<ide> var state = this.state;
<ide> var $toolElement = $(this.element);
<del> var tokenId = Q.getObject("token.id", state);
<del> var chainId = Q.getObject("token.chainId", state);
<del> var contractAddress = Q.getObject("token.contractAddress", state);
<add> var tokenId = Q.getObject("tokenId", state);
<add> var chainId = Q.getObject("chainId", state);
<add> var contractAddress = Q.getObject("contractAddress", state);
<ide> var tokenURI = state.tokenURI;
<ide> var metadata = state.metadata;
<ide>
<ide> }
<ide> }).activate();
<ide> }
<del> $("button[name=unlock]", tool.element).on(Q.Pointer.fastclick, function () {
<add> $("button[name=claim]", tool.element).on(Q.Pointer.fastclick, function () {
<ide> Q.handle(state.onClaim, tool);
<ide> return false;
<ide> });
<ide> $toolElement.off(Q.Pointer.fastclick).on(Q.Pointer.fastclick, function () {
<ide> Q.handle(state.onInvoke, tool, [metadata, authorAddress, ownerAddress, commissionInfo, saleInfo, authorUserId]);
<ide> });
<add>
<add> var $assetsNFTlocked = $(".Assets_NFT_locked", tool.element);
<add> var holderContractAddress = Q.getObject("holder.contractAddress", state);
<add> var holderPathABI = Q.getObject("holder.pathABI", state);
<add> /*if (holderContractAddress.length) {
<add> $assetsNFTlocked.tool("Assets/NFT/locked", {
<add> tokenId: state.tokenId,
<add> seriesIdSource: {
<add> salesAddress: holderContractAddress,
<add> },
<add> NFTAddress: NFT.Web3.chains[state.chainId],
<add> //abiNFT: TokenSociety.NFT.abiNFT
<add> }).activate();
<add> } else {
<add> $assetsNFTlocked.remove();
<add> }*/
<ide>
<ide> Q.handle(state.onRender, tool);
<ide> });
<ide> <button name="buy" class="Q_button">{{NFT.Buy}}</button>
<ide> <button name="soldOut" class="Q_button">{{NFT.NotOnSale}}</button>
<ide> <button name="update" class="Q_button">{{NFT.Actions}}</button>
<del> <button name="unlock" class="Q_button">{{NFT.Unlock}}</button>
<add> <button name="claim" class="Q_button">{{NFT.Claim}}</button>
<add> <div class="Assets_NFT_locked"></div>
<ide> </li>
<ide> </ul>
<ide> <div class="Assets_NFT_claim_timeout"><span>{{NFT.Unlocking}}</span> <span class="Assets_NFT_timeout_tool"></span></div> |
|
Java | apache-2.0 | a00b11701791a843ab47e1c60ddda036a7df23a3 | 0 | eayun/ovirt-engine,walteryang47/ovirt-engine,OpenUniversity/ovirt-engine,zerodengxinchao/ovirt-engine,yingyun001/ovirt-engine,halober/ovirt-engine,eayun/ovirt-engine,OpenUniversity/ovirt-engine,eayun/ovirt-engine,halober/ovirt-engine,walteryang47/ovirt-engine,OpenUniversity/ovirt-engine,yapengsong/ovirt-engine,zerodengxinchao/ovirt-engine,zerodengxinchao/ovirt-engine,halober/ovirt-engine,eayun/ovirt-engine,walteryang47/ovirt-engine,yingyun001/ovirt-engine,zerodengxinchao/ovirt-engine,walteryang47/ovirt-engine,yapengsong/ovirt-engine,eayun/ovirt-engine,OpenUniversity/ovirt-engine,yapengsong/ovirt-engine,walteryang47/ovirt-engine,halober/ovirt-engine,yingyun001/ovirt-engine,yapengsong/ovirt-engine,yingyun001/ovirt-engine,yingyun001/ovirt-engine,zerodengxinchao/ovirt-engine,yapengsong/ovirt-engine,OpenUniversity/ovirt-engine | package org.ovirt.engine.core.dal.dbbroker;
import java.sql.ResultSet;
import java.sql.SQLException;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import org.apache.commons.lang.StringUtils;
import org.ovirt.engine.core.common.VdcObjectType;
import org.ovirt.engine.core.common.businessentities.BaseDisk;
import org.ovirt.engine.core.common.businessentities.BusinessEntity;
import org.ovirt.engine.core.common.businessentities.DiskImage;
import org.ovirt.engine.core.common.businessentities.DiskImageDynamic;
import org.ovirt.engine.core.common.businessentities.Image;
import org.ovirt.engine.core.common.businessentities.Provider;
import org.ovirt.engine.core.common.businessentities.Role;
import org.ovirt.engine.core.common.businessentities.Snapshot;
import org.ovirt.engine.core.common.businessentities.StorageDomain;
import org.ovirt.engine.core.common.businessentities.StorageDomainDynamic;
import org.ovirt.engine.core.common.businessentities.StorageDomainStatic;
import org.ovirt.engine.core.common.businessentities.StorageDomainStatus;
import org.ovirt.engine.core.common.businessentities.StorageDomainType;
import org.ovirt.engine.core.common.businessentities.StoragePool;
import org.ovirt.engine.core.common.businessentities.StoragePoolIsoMap;
import org.ovirt.engine.core.common.businessentities.VdsDynamic;
import org.ovirt.engine.core.common.businessentities.VdsStatic;
import org.ovirt.engine.core.common.businessentities.VdsStatistics;
import org.ovirt.engine.core.common.businessentities.VmDevice;
import org.ovirt.engine.core.common.businessentities.VmDynamic;
import org.ovirt.engine.core.common.businessentities.VmStatic;
import org.ovirt.engine.core.common.businessentities.VmStatistics;
import org.ovirt.engine.core.common.businessentities.VmTemplate;
import org.ovirt.engine.core.common.businessentities.image_storage_domain_map;
import org.ovirt.engine.core.common.businessentities.network.Network;
import org.ovirt.engine.core.common.businessentities.network.VmNetworkInterface;
import org.ovirt.engine.core.common.businessentities.network.VmNetworkStatistics;
import org.ovirt.engine.core.common.businessentities.permissions;
import org.ovirt.engine.core.common.businessentities.vds_spm_id_map;
import org.ovirt.engine.core.common.job.Job;
import org.ovirt.engine.core.common.job.Step;
import org.ovirt.engine.core.compat.Guid;
import org.ovirt.engine.core.dao.ActionGroupDAO;
import org.ovirt.engine.core.dao.AdGroupDAO;
import org.ovirt.engine.core.dao.AsyncTaskDAO;
import org.ovirt.engine.core.dao.AuditLogDAO;
import org.ovirt.engine.core.dao.BaseDAODbFacade;
import org.ovirt.engine.core.dao.BaseDiskDao;
import org.ovirt.engine.core.dao.BookmarkDAO;
import org.ovirt.engine.core.dao.BusinessEntitySnapshotDAO;
import org.ovirt.engine.core.dao.DAO;
import org.ovirt.engine.core.dao.DaoFactory;
import org.ovirt.engine.core.dao.DbUserDAO;
import org.ovirt.engine.core.dao.DiskDao;
import org.ovirt.engine.core.dao.DiskImageDAO;
import org.ovirt.engine.core.dao.DiskImageDynamicDAO;
import org.ovirt.engine.core.dao.DiskLunMapDao;
import org.ovirt.engine.core.dao.EventDAO;
import org.ovirt.engine.core.dao.GenericDao;
import org.ovirt.engine.core.dao.ImageDao;
import org.ovirt.engine.core.dao.ImageStorageDomainMapDao;
import org.ovirt.engine.core.dao.JobDao;
import org.ovirt.engine.core.dao.JobSubjectEntityDao;
import org.ovirt.engine.core.dao.LunDAO;
import org.ovirt.engine.core.dao.PermissionDAO;
import org.ovirt.engine.core.dao.QuotaDAO;
import org.ovirt.engine.core.dao.RepoFileMetaDataDAO;
import org.ovirt.engine.core.dao.RoleDAO;
import org.ovirt.engine.core.dao.RoleGroupMapDAO;
import org.ovirt.engine.core.dao.SnapshotDao;
import org.ovirt.engine.core.dao.StepDao;
import org.ovirt.engine.core.dao.StorageDomainDAO;
import org.ovirt.engine.core.dao.StorageDomainDynamicDAO;
import org.ovirt.engine.core.dao.StorageDomainStaticDAO;
import org.ovirt.engine.core.dao.StoragePoolDAO;
import org.ovirt.engine.core.dao.StoragePoolIsoMapDAO;
import org.ovirt.engine.core.dao.StorageServerConnectionDAO;
import org.ovirt.engine.core.dao.StorageServerConnectionLunMapDAO;
import org.ovirt.engine.core.dao.TagDAO;
import org.ovirt.engine.core.dao.VdcOptionDAO;
import org.ovirt.engine.core.dao.VdsDAO;
import org.ovirt.engine.core.dao.VdsDynamicDAO;
import org.ovirt.engine.core.dao.VdsGroupDAO;
import org.ovirt.engine.core.dao.VdsSpmIdMapDAO;
import org.ovirt.engine.core.dao.VdsStaticDAO;
import org.ovirt.engine.core.dao.VdsStatisticsDAO;
import org.ovirt.engine.core.dao.VmAndTemplatesGenerationsDAO;
import org.ovirt.engine.core.dao.VmDAO;
import org.ovirt.engine.core.dao.VmDeviceDAO;
import org.ovirt.engine.core.dao.VmDynamicDAO;
import org.ovirt.engine.core.dao.VmGuestAgentInterfaceDao;
import org.ovirt.engine.core.dao.VmPoolDAO;
import org.ovirt.engine.core.dao.VmStaticDAO;
import org.ovirt.engine.core.dao.VmStatisticsDAO;
import org.ovirt.engine.core.dao.VmTemplateDAO;
import org.ovirt.engine.core.dao.gluster.GlusterBrickDao;
import org.ovirt.engine.core.dao.gluster.GlusterClusterServiceDao;
import org.ovirt.engine.core.dao.gluster.GlusterHooksDao;
import org.ovirt.engine.core.dao.gluster.GlusterOptionDao;
import org.ovirt.engine.core.dao.gluster.GlusterServerDao;
import org.ovirt.engine.core.dao.gluster.GlusterServerServiceDao;
import org.ovirt.engine.core.dao.gluster.GlusterServiceDao;
import org.ovirt.engine.core.dao.gluster.GlusterVolumeDao;
import org.ovirt.engine.core.dao.network.InterfaceDao;
import org.ovirt.engine.core.dao.network.NetworkClusterDao;
import org.ovirt.engine.core.dao.network.NetworkDao;
import org.ovirt.engine.core.dao.network.NetworkViewDao;
import org.ovirt.engine.core.dao.network.VmNetworkInterfaceDao;
import org.ovirt.engine.core.dao.network.VmNetworkStatisticsDao;
import org.ovirt.engine.core.dao.provider.ProviderDao;
import org.ovirt.engine.core.utils.linq.LinqUtils;
import org.ovirt.engine.core.utils.linq.Predicate;
import org.ovirt.engine.core.utils.log.Log;
import org.ovirt.engine.core.utils.log.LogFactory;
import org.springframework.jdbc.core.JdbcTemplate;
import org.springframework.jdbc.core.RowMapper;
import org.springframework.jdbc.core.namedparam.MapSqlParameterSource;
import org.springframework.jdbc.core.simple.SimpleJdbcCall;
public class DbFacade {
@SuppressWarnings("unused")
private static final Log log = LogFactory.getLog(DbFacade.class);
@SuppressWarnings("serial")
private final static Map<Class<?>, Class<?>> mapEntityToDao = new HashMap<Class<?>, Class<?>>()
{
{
put(StoragePool.class, StoragePoolDAO.class);
put(StoragePoolIsoMap.class, StoragePoolIsoMapDAO.class);
put(StorageDomainStatic.class, StorageDomainStaticDAO.class);
put(StorageDomainDynamic.class, StorageDomainDynamicDAO.class);
put(VdsStatic.class, VdsStaticDAO.class);
put(VdsDynamic.class, VdsDynamicDAO.class);
put(VdsStatistics.class, VdsStatisticsDAO.class);
put(vds_spm_id_map.class, VdsSpmIdMapDAO.class);
put(Role.class, RoleDAO.class);
put(VmTemplate.class, VmTemplateDAO.class);
put(VmDynamic.class, VmDynamicDAO.class);
put(VmStatic.class, VmStaticDAO.class);
put(VmStatistics.class, VmStatisticsDAO.class);
put(BaseDisk.class, BaseDiskDao.class);
put(DiskImage.class, BaseDiskDao.class);
put(DiskImageDynamic.class, DiskImageDynamicDAO.class);
put(VmNetworkInterface.class, VmNetworkInterfaceDao.class);
put(VmNetworkStatistics.class, VmNetworkStatisticsDao.class);
put(Network.class, NetworkDao.class);
put(Provider.class, ProviderDao.class);
put(Snapshot.class, SnapshotDao.class);
put(VmDevice.class, VmDeviceDAO.class);
put(image_storage_domain_map.class, ImageStorageDomainMapDao.class);
put(permissions.class, PermissionDAO.class);
put(Image.class, ImageDao.class);
put(Job.class, JobDao.class);
put(Step.class, StepDao.class);
}
};
private JdbcTemplate jdbcTemplate;
private DbEngineDialect dbEngineDialect;
private final SimpleJdbcCallsHandler callsHandler = new SimpleJdbcCallsHandler();
private int onStartConnectionTimeout;
private int connectionCheckInterval;
public void setDbEngineDialect(DbEngineDialect dbEngineDialect) {
this.dbEngineDialect = dbEngineDialect;
callsHandler.setDbEngineDialect(dbEngineDialect);
}
public DbEngineDialect getDbEngineDialect() {
return dbEngineDialect;
}
public SimpleJdbcCallsHandler getCallsHandler() {
return callsHandler;
}
/**
* Return the correct DAO for the given {@link BusinessEntity} class.
*
* @param <T>
* The Type of DAO which is returned.
* @param entityClass
* The class of the entity.
* @return The DAO for the entity.
*/
public <T extends GenericDao<?, ?>> T getDaoForEntity(Class<? extends BusinessEntity<?>> entityClass) {
@SuppressWarnings("unchecked")
Class<T> daoType = (Class<T>) mapEntityToDao.get(entityClass);
return getDao(daoType);
}
protected <T extends DAO> T getDao(Class<T> daoType) {
T dao = DaoFactory.get(daoType);
if (dao instanceof BaseDAODbFacade) {
BaseDAODbFacade dbFacadeDAO = (BaseDAODbFacade) dao;
dbFacadeDAO.setTemplate(jdbcTemplate);
dbFacadeDAO.setDialect(dbEngineDialect);
dbFacadeDAO.setDbFacade(this);
}
return dao;
}
public DbFacade() {
}
public void setTemplate(JdbcTemplate template) {
this.jdbcTemplate = template;
callsHandler.setJdbcTemplate(template);
}
/**
* just convenience so we don't refactor old code
*/
public static DbFacade getInstance() {
return DbFacadeLocator.getDbFacade();
}
private CustomMapSqlParameterSource getCustomMapSqlParameterSource() {
return new CustomMapSqlParameterSource(dbEngineDialect);
}
public String getEntityNameByIdAndType(Guid objectId, VdcObjectType vdcObjectType) {
MapSqlParameterSource parameterSource = getCustomMapSqlParameterSource().addValue("entity_id", objectId)
.addValue("object_type", vdcObjectType.getValue());
Map<String, Object> dbResults =
new SimpleJdbcCall(jdbcTemplate).withFunctionName("fn_get_entity_name").execute(
parameterSource);
String resultKey = dbEngineDialect.getFunctionReturnKey();
return dbResults.get(resultKey) != null ? dbResults.get(resultKey).toString() : null;
}
/**
* Get the column size as defined in database for char/varchar colmuns
*
* @param table
* table name
* @param column
* column name
* @return the column size (number of characters allowed)
*/
public int getColumnSize(String table, String column) {
MapSqlParameterSource parameterSource = getCustomMapSqlParameterSource().addValue("table", table)
.addValue("column", column);
Map<String, Object> dbResults =
new SimpleJdbcCall(jdbcTemplate).withFunctionName("fn_get_column_size").execute(
parameterSource);
String resultKey = dbEngineDialect.getFunctionReturnKey();
return dbResults.get(resultKey) != null ? ((Integer) dbResults.get(resultKey)).intValue() : -1;
}
public boolean isStoragePoolMasterUp(Guid storagePoolId) {
List<StorageDomain> domains = getStorageDomainDao().getAllForStoragePool(storagePoolId);
StorageDomain master = LinqUtils.firstOrNull(domains, new Predicate<StorageDomain>() {
@Override
public boolean eval(StorageDomain storage_domains) {
return storage_domains.getStorageDomainType() == StorageDomainType.Master;
}
});
return master != null
&& (master.getStatus() == StorageDomainStatus.Active || master.getStatus() == StorageDomainStatus.Unknown);
}
public Integer getSystemStatisticsValue(String entity) {
return getSystemStatisticsValue(entity, "");
}
public Integer getSystemStatisticsValue(String entity, String status) {
MapSqlParameterSource parameterSource = getCustomMapSqlParameterSource().addValue("entity", entity).addValue(
"status", status);
RowMapper<Integer> mapper = new RowMapper<Integer>() {
@Override
public Integer mapRow(ResultSet rs, int rowNum) throws SQLException {
return rs.getInt("val");
}
};
Map<String, Object> dbResults =
dbEngineDialect.createJdbcCallForQuery(jdbcTemplate).withProcedureName("Getsystem_statistics")
.returningResultSet("RETURN_VALUE", mapper).execute(parameterSource);
return (Integer) DbFacadeUtils.asSingleResult((List<?>) (dbResults.get("RETURN_VALUE")));
}
/**
* User presentation in GUI have a distinction between ADMIN/USER user. The distinction is determined by their
* permissions or their group's permissions. when Permission with the role type Admin is found, set the DbUser
* isAdmin flag to ADMIN Type or to USER otherwise. Make the change only if the value is different to what it is
* saved to db
*
* @param userIds
*/
public void updateLastAdminCheckStatus(Guid... userIds) {
MapSqlParameterSource parameterSource =
getCustomMapSqlParameterSource().addValue("userIds", StringUtils.join(userIds, ","));
new SimpleJdbcCall(jdbcTemplate).withProcedureName("UpdateLastAdminCheckStatus")
.execute(parameterSource);
}
/***
* CheckDBConnection calls a simple "select 1" SP to verify DB is up & running.
*
* @return True if DB is up & running.
*/
public boolean checkDBConnection() {
return (new SimpleJdbcCall(jdbcTemplate).withProcedureName("CheckDBConnection").execute() != null);
}
/**
* Returns a singleton instance of {@link BookmarkDAO}.
*
* @return the dao
*/
public BookmarkDAO getBookmarkDao() {
return getDao(BookmarkDAO.class);
}
/**
* Returns the singleton instance of {@link DbuserDAO}.
*
* @return the dao
*/
public DbUserDAO getDbUserDao() {
return getDao(DbUserDAO.class);
}
/**
* Returns the singleton instance of {@link VdsDAO}.
*
* @return the dao
*/
public VdsDAO getVdsDao() {
return getDao(VdsDAO.class);
}
/**
* Returns the singleton instance of {@link VmAndTemplatesGenerationsDAO}.
*
* @return the dao
*/
public VmAndTemplatesGenerationsDAO getVmAndTemplatesGenerationsDao() {
return getDao(VmAndTemplatesGenerationsDAO.class);
}
/**
* Returns the singleton instance of {@link VdsStaticDAO}.
*
* @return the dao
*/
public VdsStaticDAO getVdsStaticDao() {
return getDao(VdsStaticDAO.class);
}
/**
* Returns the singleton instance of {@link VdsDynamicDAO}.
*
* @return the dao
*/
public VdsDynamicDAO getVdsDynamicDao() {
return getDao(VdsDynamicDAO.class);
}
/**
* Returns the singleton instance of {@link VdsStatisticsDAO}.
*
* @return the dao
*/
public VdsStatisticsDAO getVdsStatisticsDao() {
return getDao(VdsStatisticsDAO.class);
}
/**
* Returns the singleton instance of {@link VdsSpmIdMapDAO}.
*
* @return the dao
*/
public VdsSpmIdMapDAO getVdsSpmIdMapDao() {
return getDao(VdsSpmIdMapDAO.class);
}
/**
* Returns the singleton instance of {@link VdsGroupDAO}.
*
* @return the dao
*/
public VdsGroupDAO getVdsGroupDao() {
return getDao(VdsGroupDAO.class);
}
/**
* Returns the single instance of {@link AuditLogDAO}.
*
* @return the dao
*/
public AuditLogDAO getAuditLogDao() {
return getDao(AuditLogDAO.class);
}
/**
* Retrieves the singleton instance of {@link LunDAO}.
*
* @return the dao
*/
public LunDAO getLunDao() {
return getDao(LunDAO.class);
}
/**
* Returns the singleton instance of {@link InterfaceDao}.
*
* @return the dao
*/
public InterfaceDao getInterfaceDao() {
return getDao(InterfaceDao.class);
}
/**
* Returns the singleton instance of {@link VmNetworkInterfaceDao}.
*
* @return the dao
*/
public VmNetworkInterfaceDao getVmNetworkInterfaceDao() {
return getDao(VmNetworkInterfaceDao.class);
}
/**
* Returns the singleton instance of {@link VmNetworkInterfaceDao}.
*
* @return the dao
*/
public VmNetworkStatisticsDao getVmNetworkStatisticsDao() {
return getDao(VmNetworkStatisticsDao.class);
}
/**
* Returns the singleton instance of {@link RoleGroupMapDAO}.
*
* @return the dao
*/
public RoleGroupMapDAO getRoleGroupMapDao() {
return getDao(RoleGroupMapDAO.class);
}
/**
* * Returns the singleton instance of {@link VmTemplateDAO}.
*
* @return the dao
*/
public VmTemplateDAO getVmTemplateDao() {
return getDao(VmTemplateDAO.class);
}
/**
* * Returns the singleton instance of {@link VmDAO}.
*
* @return the dao
*/
public VmDAO getVmDao() {
return getDao(VmDAO.class);
}
/**
* * Returns the singleton instance of {@link VmDynamicDAO}.
*
* @return the dao
*/
public VmDynamicDAO getVmDynamicDao() {
return getDao(VmDynamicDAO.class);
}
/**
* Returns the singleton instance of {@link TagDAO}.
*
* @return the dao
*/
public TagDAO getTagDao() {
return getDao(TagDAO.class);
}
/**
* Returns the singleton instance of {@link BaseDiskDao}.
*
* @return the dao
*/
public BaseDiskDao getBaseDiskDao() {
return getDao(BaseDiskDao.class);
}
/**
* Returns the singleton instance of {@link DiskDao}.
*
* @return the dao
*/
public DiskDao getDiskDao() {
return getDao(DiskDao.class);
}
/**
* Returns the singleton instance of {@link DiskLunMapDao}.
*
* @return the dao
*/
public DiskLunMapDao getDiskLunMapDao() {
return getDao(DiskLunMapDao.class);
}
/**
* Returns the singleton instance of {@link ImageDao}.
*
* @return the dao
*/
public ImageDao getImageDao() {
return getDao(ImageDao.class);
}
/**
* Returns the singleton instance of {@link DiskImageDAO}.
*
* @return the dao
*/
public DiskImageDAO getDiskImageDao() {
return getDao(DiskImageDAO.class);
}
/**
* Returns the singleton instance of {@link DiskImageDynamicDAO}.
*
* @return the dao
*/
public DiskImageDynamicDAO getDiskImageDynamicDao() {
return getDao(DiskImageDynamicDAO.class);
}
/**
* Returns the singleton instance of {@link EventSubscriberDAO}.
*
* @return the dao
*/
public EventDAO getEventDao() {
return getDao(EventDAO.class);
}
/**
* Returns the singleton instance of {@link ActionGroupDAO}.
*
* @return the dao
*/
public ActionGroupDAO getActionGroupDao() {
return getDao(ActionGroupDAO.class);
}
/**
* Retrieves the singleton instance of {@link RoleDAO}.
*
* @return the dao
*/
public RoleDAO getRoleDao() {
return getDao(RoleDAO.class);
}
/**
* Returns the singleton instance of {@link AsyncTaskDAO}.
*
* @return the dao
*/
public AsyncTaskDAO getAsyncTaskDao() {
return getDao(AsyncTaskDAO.class);
}
/**
* Retrieves the singleton instance of {@link AdGroupDAO}.
*
* @return the dao
*/
public AdGroupDAO getAdGroupDao() {
return getDao(AdGroupDAO.class);
}
/**
* Returns the singleton instance of {@link ProviderDao}.
*
* @return the dao
*/
public ProviderDao getProviderDao() {
return getDao(ProviderDao.class);
}
/**
* Returns the singleton instance of {@link NetworkDao}.
*
* @return the dao
*/
public NetworkDao getNetworkDao() {
return getDao(NetworkDao.class);
}
/**
* Returns the singleton instance of {@link NetworkViewDao}.
*
* @return the dao
*/
public NetworkViewDao getNetworkViewDao() {
return getDao(NetworkViewDao.class);
}
/**
* Returns the singleton instance of {@link NetworkClusterDao}.
*
* @return the dao
*/
public NetworkClusterDao getNetworkClusterDao() {
return getDao(NetworkClusterDao.class);
}
/**
* Returns the singleton instance of {@link PermissionDAO}.
*
* @return the dao
*/
public PermissionDAO getPermissionDao() {
return getDao(PermissionDAO.class);
}
/**
* Returns the singleton instance of {@link StorageDomainDAO}.
*
* @return the dao
*/
public StorageDomainDAO getStorageDomainDao() {
return getDao(StorageDomainDAO.class);
}
/**
* Returns the singleton instance of {@link StorageDomainDAO}.
*
* @return the dao
*/
public StorageDomainStaticDAO getStorageDomainStaticDao() {
return getDao(StorageDomainStaticDAO.class);
}
/**
* Returns the singleton instance of {@link StorageDomainDAO}.
*
* @return the dao
*/
public StorageDomainDynamicDAO getStorageDomainDynamicDao() {
return getDao(StorageDomainDynamicDAO.class);
}
/**
* Returns the singleton instance of {@link RepoFileMetaDataDAO}.
*
* @return Repository file meta data dao.
*/
public RepoFileMetaDataDAO getRepoFileMetaDataDao() {
return getDao(RepoFileMetaDataDAO.class);
}
/**
* Retrieves the singleton instance of {@link SnapshotDao}.
*
* @return the dao
*/
public SnapshotDao getSnapshotDao() {
return getDao(SnapshotDao.class);
}
/**
* Retrieves the singleton instance of {@link ImageStorageDomainMapDao}.
*
* @return the dao
*/
public ImageStorageDomainMapDao getImageStorageDomainMapDao() {
return getDao(ImageStorageDomainMapDao.class);
}
/**
* Retrieves the singleton instance of {@link StoragePoolDAO}.
*
* @return the dao
*/
public StoragePoolDAO getStoragePoolDao() {
return getDao(StoragePoolDAO.class);
}
/**
* Retrieves the singleton instance of {@link StoragePoolIsoMapDAO}.
*
* @return the dao
*/
public StoragePoolIsoMapDAO getStoragePoolIsoMapDao() {
return getDao(StoragePoolIsoMapDAO.class);
}
/**
* Retrieves the singleton instance of {@link StorageServerConnectionDAO}.
*
* @return the dao
*/
public StorageServerConnectionDAO getStorageServerConnectionDao() {
return getDao(StorageServerConnectionDAO.class);
}
/**
* Retrieves the singleton instance of {@link StorageServerConnectionLunMapDAO}.
*
* @return the dao
*/
public StorageServerConnectionLunMapDAO getStorageServerConnectionLunMapDao() {
return getDao(StorageServerConnectionLunMapDAO.class);
}
/**
* Returns the singleton instance of {@link VdcOptionDAO}.
*
* @return the dao
*/
public VdcOptionDAO getVdcOptionDao() {
return getDao(VdcOptionDAO.class);
}
/**
* Returns the singleton instance of {@link BusinessEntitySnapshotDAO}.
*
* @return
*/
public BusinessEntitySnapshotDAO getBusinessEntitySnapshotDao() {
return getDao(BusinessEntitySnapshotDAO.class);
}
/**
* Returns the singleton instance of {@link VmPoolDAO}.
*
* @return the dao
*/
public VmPoolDAO getVmPoolDao() {
return getDao(VmPoolDAO.class);
}
public VmStaticDAO getVmStaticDao() {
return getDao(VmStaticDAO.class);
}
/**
* Returns the singleton instance of {@link VmStatisticsDAO}.
*
* @return the dao
*/
public VmStatisticsDAO getVmStatisticsDao() {
return getDao(VmStatisticsDAO.class);
}
/**
* Returns the singleton instance of {@link QuotaDAO}.
*
* @return the dao
*/
public QuotaDAO getQuotaDao() {
return getDao(QuotaDAO.class);
}
public VmDeviceDAO getVmDeviceDao() {
return getDao(VmDeviceDAO.class);
}
/**
*
* Returns the singleton instance of {@link JobDao}.
*
* @return the dao
*/
public JobDao getJobDao() {
return getDao(JobDao.class);
}
/**
* Returns the singleton instance of {@link getJobSubjectEntityDao}.
*
* @return the dao
*/
public JobSubjectEntityDao getJobSubjectEntityDao() {
return getDao(JobSubjectEntityDao.class);
}
/**
* Returns the singleton instance of {@link StepDao}.
*
* @return the dao
*/
public StepDao getStepDao() {
return getDao(StepDao.class);
}
/**
* Returns the singleton instance of {@link GlusterVolumeDao}.
*
* @return the dao
*/
public GlusterVolumeDao getGlusterVolumeDao() {
return getDao(GlusterVolumeDao.class);
}
/**
* Returns the singleton instance of {@link GlusterBrickDao}.
*
* @return the dao
*/
public GlusterBrickDao getGlusterBrickDao() {
return getDao(GlusterBrickDao.class);
}
/**
* Returns the singleton instance of {@link GlusterOptionDao}.
*
* @return the dao
*/
public GlusterOptionDao getGlusterOptionDao() {
return getDao(GlusterOptionDao.class);
}
/**
* Returns the singleton instance of {@link GlusterServiceDao}.
*
* @return the dao
*/
public GlusterServiceDao getGlusterServiceDao() {
return getDao(GlusterServiceDao.class);
}
/**
* Returns the singleton instance of {@link GlusterServerServiceDao}.
*
* @return the dao
*/
public GlusterServerServiceDao getGlusterServerServiceDao() {
return getDao(GlusterServerServiceDao.class);
}
/**
* Returns the singleton instance of {@link GlusterClusterServiceDao}.
*
* @return the dao
*/
public GlusterClusterServiceDao getGlusterClusterServiceDao() {
return getDao(GlusterClusterServiceDao.class);
}
/**
* Returns the singleton instance of {@link GlusterHooksDao}.
*
* @return the dao
*/
public GlusterHooksDao getGlusterHooksDao() {
return getDao(GlusterHooksDao.class);
}
/**
* Returns the singleton instance of {@link GlusterServerDao}.
*
* @return the dao
*/
public GlusterServerDao getGlusterServerDao() {
return getDao(GlusterServerDao.class);
}
public void setOnStartConnectionTimeout(int onStartConnectionTimeout) {
this.onStartConnectionTimeout = onStartConnectionTimeout;
}
public int getOnStartConnectionTimeout() {
return onStartConnectionTimeout;
}
public void setConnectionCheckInterval(int connectionCheckInterval) {
this.connectionCheckInterval = connectionCheckInterval;
}
public int getConnectionCheckInterval() {
return connectionCheckInterval;
}
/**
* Returns the singleton instance of {@link VmGuestAgentInterfaceDao}.
*
* @return the dao
*/
public VmGuestAgentInterfaceDao getVmGuestAgentInterfaceDao() {
return getDao(VmGuestAgentInterfaceDao.class);
}
}
| backend/manager/modules/dal/src/main/java/org/ovirt/engine/core/dal/dbbroker/DbFacade.java | package org.ovirt.engine.core.dal.dbbroker;
import java.sql.ResultSet;
import java.sql.SQLException;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import org.apache.commons.lang.StringUtils;
import org.ovirt.engine.core.common.VdcObjectType;
import org.ovirt.engine.core.common.businessentities.BaseDisk;
import org.ovirt.engine.core.common.businessentities.BusinessEntity;
import org.ovirt.engine.core.common.businessentities.DiskImage;
import org.ovirt.engine.core.common.businessentities.DiskImageDynamic;
import org.ovirt.engine.core.common.businessentities.Image;
import org.ovirt.engine.core.common.businessentities.Provider;
import org.ovirt.engine.core.common.businessentities.Role;
import org.ovirt.engine.core.common.businessentities.Snapshot;
import org.ovirt.engine.core.common.businessentities.StorageDomain;
import org.ovirt.engine.core.common.businessentities.StorageDomainDynamic;
import org.ovirt.engine.core.common.businessentities.StorageDomainStatic;
import org.ovirt.engine.core.common.businessentities.StorageDomainStatus;
import org.ovirt.engine.core.common.businessentities.StorageDomainType;
import org.ovirt.engine.core.common.businessentities.StoragePool;
import org.ovirt.engine.core.common.businessentities.StoragePoolIsoMap;
import org.ovirt.engine.core.common.businessentities.VdsDynamic;
import org.ovirt.engine.core.common.businessentities.VdsStatic;
import org.ovirt.engine.core.common.businessentities.VdsStatistics;
import org.ovirt.engine.core.common.businessentities.VmDevice;
import org.ovirt.engine.core.common.businessentities.VmDynamic;
import org.ovirt.engine.core.common.businessentities.VmStatic;
import org.ovirt.engine.core.common.businessentities.VmStatistics;
import org.ovirt.engine.core.common.businessentities.VmTemplate;
import org.ovirt.engine.core.common.businessentities.image_storage_domain_map;
import org.ovirt.engine.core.common.businessentities.network.Network;
import org.ovirt.engine.core.common.businessentities.network.VmNetworkInterface;
import org.ovirt.engine.core.common.businessentities.network.VmNetworkStatistics;
import org.ovirt.engine.core.common.businessentities.permissions;
import org.ovirt.engine.core.common.businessentities.vds_spm_id_map;
import org.ovirt.engine.core.compat.Guid;
import org.ovirt.engine.core.dao.ActionGroupDAO;
import org.ovirt.engine.core.dao.AdGroupDAO;
import org.ovirt.engine.core.dao.AsyncTaskDAO;
import org.ovirt.engine.core.dao.AuditLogDAO;
import org.ovirt.engine.core.dao.BaseDAODbFacade;
import org.ovirt.engine.core.dao.BaseDiskDao;
import org.ovirt.engine.core.dao.BookmarkDAO;
import org.ovirt.engine.core.dao.BusinessEntitySnapshotDAO;
import org.ovirt.engine.core.dao.DAO;
import org.ovirt.engine.core.dao.DaoFactory;
import org.ovirt.engine.core.dao.DbUserDAO;
import org.ovirt.engine.core.dao.DiskDao;
import org.ovirt.engine.core.dao.DiskImageDAO;
import org.ovirt.engine.core.dao.DiskImageDynamicDAO;
import org.ovirt.engine.core.dao.DiskLunMapDao;
import org.ovirt.engine.core.dao.EventDAO;
import org.ovirt.engine.core.dao.GenericDao;
import org.ovirt.engine.core.dao.ImageDao;
import org.ovirt.engine.core.dao.ImageStorageDomainMapDao;
import org.ovirt.engine.core.dao.JobDao;
import org.ovirt.engine.core.dao.JobSubjectEntityDao;
import org.ovirt.engine.core.dao.LunDAO;
import org.ovirt.engine.core.dao.PermissionDAO;
import org.ovirt.engine.core.dao.QuotaDAO;
import org.ovirt.engine.core.dao.RepoFileMetaDataDAO;
import org.ovirt.engine.core.dao.RoleDAO;
import org.ovirt.engine.core.dao.RoleGroupMapDAO;
import org.ovirt.engine.core.dao.SnapshotDao;
import org.ovirt.engine.core.dao.StepDao;
import org.ovirt.engine.core.dao.StorageDomainDAO;
import org.ovirt.engine.core.dao.StorageDomainDynamicDAO;
import org.ovirt.engine.core.dao.StorageDomainStaticDAO;
import org.ovirt.engine.core.dao.StoragePoolDAO;
import org.ovirt.engine.core.dao.StoragePoolIsoMapDAO;
import org.ovirt.engine.core.dao.StorageServerConnectionDAO;
import org.ovirt.engine.core.dao.StorageServerConnectionLunMapDAO;
import org.ovirt.engine.core.dao.TagDAO;
import org.ovirt.engine.core.dao.VdcOptionDAO;
import org.ovirt.engine.core.dao.VdsDAO;
import org.ovirt.engine.core.dao.VdsDynamicDAO;
import org.ovirt.engine.core.dao.VdsGroupDAO;
import org.ovirt.engine.core.dao.VdsSpmIdMapDAO;
import org.ovirt.engine.core.dao.VdsStaticDAO;
import org.ovirt.engine.core.dao.VdsStatisticsDAO;
import org.ovirt.engine.core.dao.VmAndTemplatesGenerationsDAO;
import org.ovirt.engine.core.dao.VmDAO;
import org.ovirt.engine.core.dao.VmDeviceDAO;
import org.ovirt.engine.core.dao.VmDynamicDAO;
import org.ovirt.engine.core.dao.VmGuestAgentInterfaceDao;
import org.ovirt.engine.core.dao.VmPoolDAO;
import org.ovirt.engine.core.dao.VmStaticDAO;
import org.ovirt.engine.core.dao.VmStatisticsDAO;
import org.ovirt.engine.core.dao.VmTemplateDAO;
import org.ovirt.engine.core.dao.gluster.GlusterBrickDao;
import org.ovirt.engine.core.dao.gluster.GlusterClusterServiceDao;
import org.ovirt.engine.core.dao.gluster.GlusterHooksDao;
import org.ovirt.engine.core.dao.gluster.GlusterOptionDao;
import org.ovirt.engine.core.dao.gluster.GlusterServerDao;
import org.ovirt.engine.core.dao.gluster.GlusterServerServiceDao;
import org.ovirt.engine.core.dao.gluster.GlusterServiceDao;
import org.ovirt.engine.core.dao.gluster.GlusterVolumeDao;
import org.ovirt.engine.core.dao.network.InterfaceDao;
import org.ovirt.engine.core.dao.network.NetworkClusterDao;
import org.ovirt.engine.core.dao.network.NetworkDao;
import org.ovirt.engine.core.dao.network.NetworkViewDao;
import org.ovirt.engine.core.dao.network.VmNetworkInterfaceDao;
import org.ovirt.engine.core.dao.network.VmNetworkStatisticsDao;
import org.ovirt.engine.core.dao.provider.ProviderDao;
import org.ovirt.engine.core.utils.linq.LinqUtils;
import org.ovirt.engine.core.utils.linq.Predicate;
import org.ovirt.engine.core.utils.log.Log;
import org.ovirt.engine.core.utils.log.LogFactory;
import org.springframework.jdbc.core.JdbcTemplate;
import org.springframework.jdbc.core.RowMapper;
import org.springframework.jdbc.core.namedparam.MapSqlParameterSource;
import org.springframework.jdbc.core.simple.SimpleJdbcCall;
public class DbFacade {
@SuppressWarnings("unused")
private static final Log log = LogFactory.getLog(DbFacade.class);
@SuppressWarnings("serial")
private final static Map<Class<?>, Class<?>> mapEntityToDao = new HashMap<Class<?>, Class<?>>()
{
{
put(StoragePool.class, StoragePoolDAO.class);
put(StoragePoolIsoMap.class, StoragePoolIsoMapDAO.class);
put(StorageDomainStatic.class, StorageDomainStaticDAO.class);
put(StorageDomainDynamic.class, StorageDomainDynamicDAO.class);
put(VdsStatic.class, VdsStaticDAO.class);
put(VdsDynamic.class, VdsDynamicDAO.class);
put(VdsStatistics.class, VdsStatisticsDAO.class);
put(vds_spm_id_map.class, VdsSpmIdMapDAO.class);
put(Role.class, RoleDAO.class);
put(VmTemplate.class, VmTemplateDAO.class);
put(VmDynamic.class, VmDynamicDAO.class);
put(VmStatic.class, VmStaticDAO.class);
put(VmStatistics.class, VmStatisticsDAO.class);
put(BaseDisk.class, BaseDiskDao.class);
put(DiskImage.class, BaseDiskDao.class);
put(DiskImageDynamic.class, DiskImageDynamicDAO.class);
put(VmNetworkInterface.class, VmNetworkInterfaceDao.class);
put(VmNetworkStatistics.class, VmNetworkStatisticsDao.class);
put(Network.class, NetworkDao.class);
put(Provider.class, ProviderDao.class);
put(Snapshot.class, SnapshotDao.class);
put(VmDevice.class, VmDeviceDAO.class);
put(image_storage_domain_map.class, ImageStorageDomainMapDao.class);
put(permissions.class, PermissionDAO.class);
put(Image.class, ImageDao.class);
}
};
private JdbcTemplate jdbcTemplate;
private DbEngineDialect dbEngineDialect;
private final SimpleJdbcCallsHandler callsHandler = new SimpleJdbcCallsHandler();
private int onStartConnectionTimeout;
private int connectionCheckInterval;
public void setDbEngineDialect(DbEngineDialect dbEngineDialect) {
this.dbEngineDialect = dbEngineDialect;
callsHandler.setDbEngineDialect(dbEngineDialect);
}
public DbEngineDialect getDbEngineDialect() {
return dbEngineDialect;
}
public SimpleJdbcCallsHandler getCallsHandler() {
return callsHandler;
}
/**
* Return the correct DAO for the given {@link BusinessEntity} class.
*
* @param <T>
* The Type of DAO which is returned.
* @param entityClass
* The class of the entity.
* @return The DAO for the entity.
*/
public <T extends GenericDao<?, ?>> T getDaoForEntity(Class<? extends BusinessEntity<?>> entityClass) {
@SuppressWarnings("unchecked")
Class<T> daoType = (Class<T>) mapEntityToDao.get(entityClass);
return getDao(daoType);
}
protected <T extends DAO> T getDao(Class<T> daoType) {
T dao = DaoFactory.get(daoType);
if (dao instanceof BaseDAODbFacade) {
BaseDAODbFacade dbFacadeDAO = (BaseDAODbFacade) dao;
dbFacadeDAO.setTemplate(jdbcTemplate);
dbFacadeDAO.setDialect(dbEngineDialect);
dbFacadeDAO.setDbFacade(this);
}
return dao;
}
public DbFacade() {
}
public void setTemplate(JdbcTemplate template) {
this.jdbcTemplate = template;
callsHandler.setJdbcTemplate(template);
}
/**
* just convenience so we don't refactor old code
*/
public static DbFacade getInstance() {
return DbFacadeLocator.getDbFacade();
}
private CustomMapSqlParameterSource getCustomMapSqlParameterSource() {
return new CustomMapSqlParameterSource(dbEngineDialect);
}
public String getEntityNameByIdAndType(Guid objectId, VdcObjectType vdcObjectType) {
MapSqlParameterSource parameterSource = getCustomMapSqlParameterSource().addValue("entity_id", objectId)
.addValue("object_type", vdcObjectType.getValue());
Map<String, Object> dbResults =
new SimpleJdbcCall(jdbcTemplate).withFunctionName("fn_get_entity_name").execute(
parameterSource);
String resultKey = dbEngineDialect.getFunctionReturnKey();
return dbResults.get(resultKey) != null ? dbResults.get(resultKey).toString() : null;
}
/**
* Get the column size as defined in database for char/varchar colmuns
*
* @param table
* table name
* @param column
* column name
* @return the column size (number of characters allowed)
*/
public int getColumnSize(String table, String column) {
MapSqlParameterSource parameterSource = getCustomMapSqlParameterSource().addValue("table", table)
.addValue("column", column);
Map<String, Object> dbResults =
new SimpleJdbcCall(jdbcTemplate).withFunctionName("fn_get_column_size").execute(
parameterSource);
String resultKey = dbEngineDialect.getFunctionReturnKey();
return dbResults.get(resultKey) != null ? ((Integer) dbResults.get(resultKey)).intValue() : -1;
}
public boolean isStoragePoolMasterUp(Guid storagePoolId) {
List<StorageDomain> domains = getStorageDomainDao().getAllForStoragePool(storagePoolId);
StorageDomain master = LinqUtils.firstOrNull(domains, new Predicate<StorageDomain>() {
@Override
public boolean eval(StorageDomain storage_domains) {
return storage_domains.getStorageDomainType() == StorageDomainType.Master;
}
});
return master != null
&& (master.getStatus() == StorageDomainStatus.Active || master.getStatus() == StorageDomainStatus.Unknown);
}
public Integer getSystemStatisticsValue(String entity) {
return getSystemStatisticsValue(entity, "");
}
public Integer getSystemStatisticsValue(String entity, String status) {
MapSqlParameterSource parameterSource = getCustomMapSqlParameterSource().addValue("entity", entity).addValue(
"status", status);
RowMapper<Integer> mapper = new RowMapper<Integer>() {
@Override
public Integer mapRow(ResultSet rs, int rowNum) throws SQLException {
return rs.getInt("val");
}
};
Map<String, Object> dbResults =
dbEngineDialect.createJdbcCallForQuery(jdbcTemplate).withProcedureName("Getsystem_statistics")
.returningResultSet("RETURN_VALUE", mapper).execute(parameterSource);
return (Integer) DbFacadeUtils.asSingleResult((List<?>) (dbResults.get("RETURN_VALUE")));
}
/**
* User presentation in GUI have a distinction between ADMIN/USER user. The distinction is determined by their
* permissions or their group's permissions. when Permission with the role type Admin is found, set the DbUser
* isAdmin flag to ADMIN Type or to USER otherwise. Make the change only if the value is different to what it is
* saved to db
*
* @param userIds
*/
public void updateLastAdminCheckStatus(Guid... userIds) {
MapSqlParameterSource parameterSource =
getCustomMapSqlParameterSource().addValue("userIds", StringUtils.join(userIds, ","));
new SimpleJdbcCall(jdbcTemplate).withProcedureName("UpdateLastAdminCheckStatus")
.execute(parameterSource);
}
/***
* CheckDBConnection calls a simple "select 1" SP to verify DB is up & running.
*
* @return True if DB is up & running.
*/
public boolean checkDBConnection() {
return (new SimpleJdbcCall(jdbcTemplate).withProcedureName("CheckDBConnection").execute() != null);
}
/**
* Returns a singleton instance of {@link BookmarkDAO}.
*
* @return the dao
*/
public BookmarkDAO getBookmarkDao() {
return getDao(BookmarkDAO.class);
}
/**
* Returns the singleton instance of {@link DbuserDAO}.
*
* @return the dao
*/
public DbUserDAO getDbUserDao() {
return getDao(DbUserDAO.class);
}
/**
* Returns the singleton instance of {@link VdsDAO}.
*
* @return the dao
*/
public VdsDAO getVdsDao() {
return getDao(VdsDAO.class);
}
/**
* Returns the singleton instance of {@link VmAndTemplatesGenerationsDAO}.
*
* @return the dao
*/
public VmAndTemplatesGenerationsDAO getVmAndTemplatesGenerationsDao() {
return getDao(VmAndTemplatesGenerationsDAO.class);
}
/**
* Returns the singleton instance of {@link VdsStaticDAO}.
*
* @return the dao
*/
public VdsStaticDAO getVdsStaticDao() {
return getDao(VdsStaticDAO.class);
}
/**
* Returns the singleton instance of {@link VdsDynamicDAO}.
*
* @return the dao
*/
public VdsDynamicDAO getVdsDynamicDao() {
return getDao(VdsDynamicDAO.class);
}
/**
* Returns the singleton instance of {@link VdsStatisticsDAO}.
*
* @return the dao
*/
public VdsStatisticsDAO getVdsStatisticsDao() {
return getDao(VdsStatisticsDAO.class);
}
/**
* Returns the singleton instance of {@link VdsSpmIdMapDAO}.
*
* @return the dao
*/
public VdsSpmIdMapDAO getVdsSpmIdMapDao() {
return getDao(VdsSpmIdMapDAO.class);
}
/**
* Returns the singleton instance of {@link VdsGroupDAO}.
*
* @return the dao
*/
public VdsGroupDAO getVdsGroupDao() {
return getDao(VdsGroupDAO.class);
}
/**
* Returns the single instance of {@link AuditLogDAO}.
*
* @return the dao
*/
public AuditLogDAO getAuditLogDao() {
return getDao(AuditLogDAO.class);
}
/**
* Retrieves the singleton instance of {@link LunDAO}.
*
* @return the dao
*/
public LunDAO getLunDao() {
return getDao(LunDAO.class);
}
/**
* Returns the singleton instance of {@link InterfaceDao}.
*
* @return the dao
*/
public InterfaceDao getInterfaceDao() {
return getDao(InterfaceDao.class);
}
/**
* Returns the singleton instance of {@link VmNetworkInterfaceDao}.
*
* @return the dao
*/
public VmNetworkInterfaceDao getVmNetworkInterfaceDao() {
return getDao(VmNetworkInterfaceDao.class);
}
/**
* Returns the singleton instance of {@link VmNetworkInterfaceDao}.
*
* @return the dao
*/
public VmNetworkStatisticsDao getVmNetworkStatisticsDao() {
return getDao(VmNetworkStatisticsDao.class);
}
/**
* Returns the singleton instance of {@link RoleGroupMapDAO}.
*
* @return the dao
*/
public RoleGroupMapDAO getRoleGroupMapDao() {
return getDao(RoleGroupMapDAO.class);
}
/**
* * Returns the singleton instance of {@link VmTemplateDAO}.
*
* @return the dao
*/
public VmTemplateDAO getVmTemplateDao() {
return getDao(VmTemplateDAO.class);
}
/**
* * Returns the singleton instance of {@link VmDAO}.
*
* @return the dao
*/
public VmDAO getVmDao() {
return getDao(VmDAO.class);
}
/**
* * Returns the singleton instance of {@link VmDynamicDAO}.
*
* @return the dao
*/
public VmDynamicDAO getVmDynamicDao() {
return getDao(VmDynamicDAO.class);
}
/**
* Returns the singleton instance of {@link TagDAO}.
*
* @return the dao
*/
public TagDAO getTagDao() {
return getDao(TagDAO.class);
}
/**
* Returns the singleton instance of {@link BaseDiskDao}.
*
* @return the dao
*/
public BaseDiskDao getBaseDiskDao() {
return getDao(BaseDiskDao.class);
}
/**
* Returns the singleton instance of {@link DiskDao}.
*
* @return the dao
*/
public DiskDao getDiskDao() {
return getDao(DiskDao.class);
}
/**
* Returns the singleton instance of {@link DiskLunMapDao}.
*
* @return the dao
*/
public DiskLunMapDao getDiskLunMapDao() {
return getDao(DiskLunMapDao.class);
}
/**
* Returns the singleton instance of {@link ImageDao}.
*
* @return the dao
*/
public ImageDao getImageDao() {
return getDao(ImageDao.class);
}
/**
* Returns the singleton instance of {@link DiskImageDAO}.
*
* @return the dao
*/
public DiskImageDAO getDiskImageDao() {
return getDao(DiskImageDAO.class);
}
/**
* Returns the singleton instance of {@link DiskImageDynamicDAO}.
*
* @return the dao
*/
public DiskImageDynamicDAO getDiskImageDynamicDao() {
return getDao(DiskImageDynamicDAO.class);
}
/**
* Returns the singleton instance of {@link EventSubscriberDAO}.
*
* @return the dao
*/
public EventDAO getEventDao() {
return getDao(EventDAO.class);
}
/**
* Returns the singleton instance of {@link ActionGroupDAO}.
*
* @return the dao
*/
public ActionGroupDAO getActionGroupDao() {
return getDao(ActionGroupDAO.class);
}
/**
* Retrieves the singleton instance of {@link RoleDAO}.
*
* @return the dao
*/
public RoleDAO getRoleDao() {
return getDao(RoleDAO.class);
}
/**
* Returns the singleton instance of {@link AsyncTaskDAO}.
*
* @return the dao
*/
public AsyncTaskDAO getAsyncTaskDao() {
return getDao(AsyncTaskDAO.class);
}
/**
* Retrieves the singleton instance of {@link AdGroupDAO}.
*
* @return the dao
*/
public AdGroupDAO getAdGroupDao() {
return getDao(AdGroupDAO.class);
}
/**
* Returns the singleton instance of {@link ProviderDao}.
*
* @return the dao
*/
public ProviderDao getProviderDao() {
return getDao(ProviderDao.class);
}
/**
* Returns the singleton instance of {@link NetworkDao}.
*
* @return the dao
*/
public NetworkDao getNetworkDao() {
return getDao(NetworkDao.class);
}
/**
* Returns the singleton instance of {@link NetworkViewDao}.
*
* @return the dao
*/
public NetworkViewDao getNetworkViewDao() {
return getDao(NetworkViewDao.class);
}
/**
* Returns the singleton instance of {@link NetworkClusterDao}.
*
* @return the dao
*/
public NetworkClusterDao getNetworkClusterDao() {
return getDao(NetworkClusterDao.class);
}
/**
* Returns the singleton instance of {@link PermissionDAO}.
*
* @return the dao
*/
public PermissionDAO getPermissionDao() {
return getDao(PermissionDAO.class);
}
/**
* Returns the singleton instance of {@link StorageDomainDAO}.
*
* @return the dao
*/
public StorageDomainDAO getStorageDomainDao() {
return getDao(StorageDomainDAO.class);
}
/**
* Returns the singleton instance of {@link StorageDomainDAO}.
*
* @return the dao
*/
public StorageDomainStaticDAO getStorageDomainStaticDao() {
return getDao(StorageDomainStaticDAO.class);
}
/**
* Returns the singleton instance of {@link StorageDomainDAO}.
*
* @return the dao
*/
public StorageDomainDynamicDAO getStorageDomainDynamicDao() {
return getDao(StorageDomainDynamicDAO.class);
}
/**
* Returns the singleton instance of {@link RepoFileMetaDataDAO}.
*
* @return Repository file meta data dao.
*/
public RepoFileMetaDataDAO getRepoFileMetaDataDao() {
return getDao(RepoFileMetaDataDAO.class);
}
/**
* Retrieves the singleton instance of {@link SnapshotDao}.
*
* @return the dao
*/
public SnapshotDao getSnapshotDao() {
return getDao(SnapshotDao.class);
}
/**
* Retrieves the singleton instance of {@link ImageStorageDomainMapDao}.
*
* @return the dao
*/
public ImageStorageDomainMapDao getImageStorageDomainMapDao() {
return getDao(ImageStorageDomainMapDao.class);
}
/**
* Retrieves the singleton instance of {@link StoragePoolDAO}.
*
* @return the dao
*/
public StoragePoolDAO getStoragePoolDao() {
return getDao(StoragePoolDAO.class);
}
/**
* Retrieves the singleton instance of {@link StoragePoolIsoMapDAO}.
*
* @return the dao
*/
public StoragePoolIsoMapDAO getStoragePoolIsoMapDao() {
return getDao(StoragePoolIsoMapDAO.class);
}
/**
* Retrieves the singleton instance of {@link StorageServerConnectionDAO}.
*
* @return the dao
*/
public StorageServerConnectionDAO getStorageServerConnectionDao() {
return getDao(StorageServerConnectionDAO.class);
}
/**
* Retrieves the singleton instance of {@link StorageServerConnectionLunMapDAO}.
*
* @return the dao
*/
public StorageServerConnectionLunMapDAO getStorageServerConnectionLunMapDao() {
return getDao(StorageServerConnectionLunMapDAO.class);
}
/**
* Returns the singleton instance of {@link VdcOptionDAO}.
*
* @return the dao
*/
public VdcOptionDAO getVdcOptionDao() {
return getDao(VdcOptionDAO.class);
}
/**
* Returns the singleton instance of {@link BusinessEntitySnapshotDAO}.
*
* @return
*/
public BusinessEntitySnapshotDAO getBusinessEntitySnapshotDao() {
return getDao(BusinessEntitySnapshotDAO.class);
}
/**
* Returns the singleton instance of {@link VmPoolDAO}.
*
* @return the dao
*/
public VmPoolDAO getVmPoolDao() {
return getDao(VmPoolDAO.class);
}
public VmStaticDAO getVmStaticDao() {
return getDao(VmStaticDAO.class);
}
/**
* Returns the singleton instance of {@link VmStatisticsDAO}.
*
* @return the dao
*/
public VmStatisticsDAO getVmStatisticsDao() {
return getDao(VmStatisticsDAO.class);
}
/**
* Returns the singleton instance of {@link QuotaDAO}.
*
* @return the dao
*/
public QuotaDAO getQuotaDao() {
return getDao(QuotaDAO.class);
}
public VmDeviceDAO getVmDeviceDao() {
return getDao(VmDeviceDAO.class);
}
/**
*
* Returns the singleton instance of {@link JobDao}.
*
* @return the dao
*/
public JobDao getJobDao() {
return getDao(JobDao.class);
}
/**
* Returns the singleton instance of {@link getJobSubjectEntityDao}.
*
* @return the dao
*/
public JobSubjectEntityDao getJobSubjectEntityDao() {
return getDao(JobSubjectEntityDao.class);
}
/**
* Returns the singleton instance of {@link StepDao}.
*
* @return the dao
*/
public StepDao getStepDao() {
return getDao(StepDao.class);
}
/**
* Returns the singleton instance of {@link GlusterVolumeDao}.
*
* @return the dao
*/
public GlusterVolumeDao getGlusterVolumeDao() {
return getDao(GlusterVolumeDao.class);
}
/**
* Returns the singleton instance of {@link GlusterBrickDao}.
*
* @return the dao
*/
public GlusterBrickDao getGlusterBrickDao() {
return getDao(GlusterBrickDao.class);
}
/**
* Returns the singleton instance of {@link GlusterOptionDao}.
*
* @return the dao
*/
public GlusterOptionDao getGlusterOptionDao() {
return getDao(GlusterOptionDao.class);
}
/**
* Returns the singleton instance of {@link GlusterServiceDao}.
*
* @return the dao
*/
public GlusterServiceDao getGlusterServiceDao() {
return getDao(GlusterServiceDao.class);
}
/**
* Returns the singleton instance of {@link GlusterServerServiceDao}.
*
* @return the dao
*/
public GlusterServerServiceDao getGlusterServerServiceDao() {
return getDao(GlusterServerServiceDao.class);
}
/**
* Returns the singleton instance of {@link GlusterClusterServiceDao}.
*
* @return the dao
*/
public GlusterClusterServiceDao getGlusterClusterServiceDao() {
return getDao(GlusterClusterServiceDao.class);
}
/**
* Returns the singleton instance of {@link GlusterHooksDao}.
*
* @return the dao
*/
public GlusterHooksDao getGlusterHooksDao() {
return getDao(GlusterHooksDao.class);
}
/**
* Returns the singleton instance of {@link GlusterServerDao}.
*
* @return the dao
*/
public GlusterServerDao getGlusterServerDao() {
return getDao(GlusterServerDao.class);
}
public void setOnStartConnectionTimeout(int onStartConnectionTimeout) {
this.onStartConnectionTimeout = onStartConnectionTimeout;
}
public int getOnStartConnectionTimeout() {
return onStartConnectionTimeout;
}
public void setConnectionCheckInterval(int connectionCheckInterval) {
this.connectionCheckInterval = connectionCheckInterval;
}
public int getConnectionCheckInterval() {
return connectionCheckInterval;
}
/**
* Returns the singleton instance of {@link VmGuestAgentInterfaceDao}.
*
* @return the dao
*/
public VmGuestAgentInterfaceDao getVmGuestAgentInterfaceDao() {
return getDao(VmGuestAgentInterfaceDao.class);
}
}
| core: adding JobDao and StepDao to DAO map.
Change-Id: I42231d0a58e9629e848dc602445798d3e674d9a6
Signed-off-by: Eli Mesika <[email protected]>
Bug-Url: https://bugzilla.redhat.com/show_bug.cgi?id=872719
| backend/manager/modules/dal/src/main/java/org/ovirt/engine/core/dal/dbbroker/DbFacade.java | core: adding JobDao and StepDao to DAO map. | <ide><path>ackend/manager/modules/dal/src/main/java/org/ovirt/engine/core/dal/dbbroker/DbFacade.java
<ide> import org.ovirt.engine.core.common.businessentities.network.VmNetworkStatistics;
<ide> import org.ovirt.engine.core.common.businessentities.permissions;
<ide> import org.ovirt.engine.core.common.businessentities.vds_spm_id_map;
<add>import org.ovirt.engine.core.common.job.Job;
<add>import org.ovirt.engine.core.common.job.Step;
<ide> import org.ovirt.engine.core.compat.Guid;
<ide> import org.ovirt.engine.core.dao.ActionGroupDAO;
<ide> import org.ovirt.engine.core.dao.AdGroupDAO;
<ide> put(image_storage_domain_map.class, ImageStorageDomainMapDao.class);
<ide> put(permissions.class, PermissionDAO.class);
<ide> put(Image.class, ImageDao.class);
<add> put(Job.class, JobDao.class);
<add> put(Step.class, StepDao.class);
<ide> }
<ide> };
<ide> |
|
Java | mit | error: pathspec 'src/main/java/com/rarchives/ripme/ripper/rippers/NhentaiRipper.java' did not match any file(s) known to git
| 98438521a8f817de2162783e21e6eb1e48a71bb2 | 1 | metaprime/ripme,Wiiplay123/ripme_da_desc,4pr0n/ripme,sleaze/ripme,metaprime/ripme,cyian-1756/ripme,rephormat/ripme,4pr0n/ripme,rephormat/ripme,sleaze/ripme,sleaze/ripme,rephormat/ripme,metaprime/ripme | package com.rarchives.ripme.ripper.rippers;
import com.rarchives.ripme.ripper.AbstractHTMLRipper;
import com.rarchives.ripme.ripper.DownloadThreadPool;
import com.rarchives.ripme.utils.Http;
import com.rarchives.ripme.utils.Utils;
import org.jsoup.nodes.Document;
import org.jsoup.nodes.Element;
import org.jsoup.select.Elements;
import java.io.File;
import java.io.IOException;
import java.net.MalformedURLException;
import java.net.URL;
import java.util.ArrayList;
import java.util.List;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
public class NhentaiRipper extends AbstractHTMLRipper {
// All sleep times are in milliseconds
private static final int IMAGE_SLEEP_TIME = 1500;
private String albumTitle;
private Document firstPage;
// Thread pool for finding direct image links from "image" pages (html)
private DownloadThreadPool nhentaiThreadPool = new DownloadThreadPool("nhentai");
@Override
public DownloadThreadPool getThreadPool() {
return nhentaiThreadPool;
}
public NhentaiRipper(URL url) throws IOException {
super(url);
}
@Override
public String getDomain() {
return "nhentai.net";
}
@Override
public String getHost() {
return "nhentai";
}
@Override
public String getAlbumTitle(URL url) throws MalformedURLException {
if (firstPage == null) {
try {
firstPage = Http.url(url).get();
} catch (IOException e) {
e.printStackTrace();
}
}
String title = firstPage.select("#info > h1").text();
if (title == null) {
return getAlbumTitle(url);
}
return title;
}
@Override
public String getGID(URL url) throws MalformedURLException {
// Ex: https://nhentai.net/g/159174/
Pattern p = Pattern.compile("^https?://nhentai\\.net/g/(\\d+).*$");
Matcher m = p.matcher(url.toExternalForm());
if (m.matches()) {
// Return the text contained between () in the regex - 159174 in this case
return m.group(1);
}
throw new MalformedURLException("Expected nhentai.net URL format: " +
"nhentai.net/g/albumid - got " + url + " instead");
}
@Override
public Document getFirstPage() throws IOException {
if (firstPage == null) {
firstPage = Http.url(url).get();
}
return firstPage;
}
@Override
public List<String> getURLsFromPage(Document page) {
List<String> imageURLs = new ArrayList<String>();
Elements thumbs = page.select(".gallerythumb");
for (Element el : thumbs) {
String imageUrl = el.attr("href");
imageURLs.add("https://nhentai.net" + imageUrl);
}
return imageURLs;
}
@Override
public void downloadURL(URL url, int index) {
NHentaiImageThread t = new NHentaiImageThread(url, index, this.workingDir);
nhentaiThreadPool.addThread(t);
try {
Thread.sleep(IMAGE_SLEEP_TIME);
} catch (InterruptedException e) {
logger.warn("Interrupted while waiting to load next image", e);
}
}
private class NHentaiImageThread extends Thread {
private URL url;
private int index;
private File workingDir;
NHentaiImageThread(URL url, int index, File workingDir) {
super();
this.url = url;
this.index = index;
this.workingDir = workingDir;
}
@Override
public void run() {
fetchImage();
}
private void fetchImage() {
try {
//Document doc = getPageWithRetries(this.url);
Document doc = Http.url(this.url).get();
// Find image
Elements images = doc.select("#image-container > a > img");
if (images.size() == 0) {
// Attempt to find image elsewise (Issue #41)
images = doc.select("img#img");
if (images.size() == 0) {
logger.warn("Image not found at " + this.url);
return;
}
}
Element image = images.first();
String imgsrc = image.attr("src");
logger.info("Found URL " + imgsrc + " via " + images.get(0));
Pattern p = Pattern.compile("^https?://i.nhentai.net/galleries/\\d+/(.+)$");
Matcher m = p.matcher(imgsrc);
if (m.matches()) {
// Manually discover filename from URL
String savePath = this.workingDir + File.separator;
if (Utils.getConfigBoolean("download.save_order", true)) {
savePath += String.format("%03d_", index);
}
savePath += m.group(1);
addURLToDownload(new URL(imgsrc), new File(savePath));
} else {
// Provide prefix and let the AbstractRipper "guess" the filename
String prefix = "";
if (Utils.getConfigBoolean("download.save_order", true)) {
prefix = String.format("%03d_", index);
}
addURLToDownload(new URL(imgsrc), prefix);
}
} catch (IOException e) {
logger.error("[!] Exception while loading/parsing " + this.url, e);
}
}
}
}
| src/main/java/com/rarchives/ripme/ripper/rippers/NhentaiRipper.java | Added new ripper for http://nhentai.net/
| src/main/java/com/rarchives/ripme/ripper/rippers/NhentaiRipper.java | Added new ripper for http://nhentai.net/ | <ide><path>rc/main/java/com/rarchives/ripme/ripper/rippers/NhentaiRipper.java
<add>package com.rarchives.ripme.ripper.rippers;
<add>
<add>import com.rarchives.ripme.ripper.AbstractHTMLRipper;
<add>import com.rarchives.ripme.ripper.DownloadThreadPool;
<add>import com.rarchives.ripme.utils.Http;
<add>import com.rarchives.ripme.utils.Utils;
<add>import org.jsoup.nodes.Document;
<add>import org.jsoup.nodes.Element;
<add>import org.jsoup.select.Elements;
<add>
<add>import java.io.File;
<add>import java.io.IOException;
<add>import java.net.MalformedURLException;
<add>import java.net.URL;
<add>import java.util.ArrayList;
<add>import java.util.List;
<add>import java.util.regex.Matcher;
<add>import java.util.regex.Pattern;
<add>
<add>public class NhentaiRipper extends AbstractHTMLRipper {
<add>
<add> // All sleep times are in milliseconds
<add> private static final int IMAGE_SLEEP_TIME = 1500;
<add>
<add> private String albumTitle;
<add> private Document firstPage;
<add>
<add> // Thread pool for finding direct image links from "image" pages (html)
<add> private DownloadThreadPool nhentaiThreadPool = new DownloadThreadPool("nhentai");
<add>
<add> @Override
<add> public DownloadThreadPool getThreadPool() {
<add> return nhentaiThreadPool;
<add> }
<add>
<add> public NhentaiRipper(URL url) throws IOException {
<add> super(url);
<add> }
<add>
<add> @Override
<add> public String getDomain() {
<add> return "nhentai.net";
<add> }
<add>
<add> @Override
<add> public String getHost() {
<add> return "nhentai";
<add> }
<add>
<add> @Override
<add> public String getAlbumTitle(URL url) throws MalformedURLException {
<add> if (firstPage == null) {
<add> try {
<add> firstPage = Http.url(url).get();
<add> } catch (IOException e) {
<add> e.printStackTrace();
<add> }
<add> }
<add>
<add> String title = firstPage.select("#info > h1").text();
<add> if (title == null) {
<add> return getAlbumTitle(url);
<add> }
<add> return title;
<add> }
<add>
<add> @Override
<add> public String getGID(URL url) throws MalformedURLException {
<add> // Ex: https://nhentai.net/g/159174/
<add> Pattern p = Pattern.compile("^https?://nhentai\\.net/g/(\\d+).*$");
<add> Matcher m = p.matcher(url.toExternalForm());
<add> if (m.matches()) {
<add> // Return the text contained between () in the regex - 159174 in this case
<add> return m.group(1);
<add> }
<add> throw new MalformedURLException("Expected nhentai.net URL format: " +
<add> "nhentai.net/g/albumid - got " + url + " instead");
<add> }
<add>
<add> @Override
<add> public Document getFirstPage() throws IOException {
<add> if (firstPage == null) {
<add> firstPage = Http.url(url).get();
<add> }
<add> return firstPage;
<add> }
<add>
<add> @Override
<add> public List<String> getURLsFromPage(Document page) {
<add> List<String> imageURLs = new ArrayList<String>();
<add> Elements thumbs = page.select(".gallerythumb");
<add> for (Element el : thumbs) {
<add> String imageUrl = el.attr("href");
<add> imageURLs.add("https://nhentai.net" + imageUrl);
<add> }
<add> return imageURLs;
<add> }
<add>
<add> @Override
<add> public void downloadURL(URL url, int index) {
<add> NHentaiImageThread t = new NHentaiImageThread(url, index, this.workingDir);
<add> nhentaiThreadPool.addThread(t);
<add> try {
<add> Thread.sleep(IMAGE_SLEEP_TIME);
<add> } catch (InterruptedException e) {
<add> logger.warn("Interrupted while waiting to load next image", e);
<add> }
<add> }
<add>
<add> private class NHentaiImageThread extends Thread {
<add>
<add> private URL url;
<add> private int index;
<add> private File workingDir;
<add>
<add> NHentaiImageThread(URL url, int index, File workingDir) {
<add> super();
<add> this.url = url;
<add> this.index = index;
<add> this.workingDir = workingDir;
<add> }
<add>
<add> @Override
<add> public void run() {
<add> fetchImage();
<add> }
<add>
<add> private void fetchImage() {
<add> try {
<add> //Document doc = getPageWithRetries(this.url);
<add> Document doc = Http.url(this.url).get();
<add>
<add> // Find image
<add> Elements images = doc.select("#image-container > a > img");
<add> if (images.size() == 0) {
<add> // Attempt to find image elsewise (Issue #41)
<add> images = doc.select("img#img");
<add> if (images.size() == 0) {
<add> logger.warn("Image not found at " + this.url);
<add> return;
<add> }
<add> }
<add> Element image = images.first();
<add> String imgsrc = image.attr("src");
<add> logger.info("Found URL " + imgsrc + " via " + images.get(0));
<add>
<add> Pattern p = Pattern.compile("^https?://i.nhentai.net/galleries/\\d+/(.+)$");
<add> Matcher m = p.matcher(imgsrc);
<add> if (m.matches()) {
<add> // Manually discover filename from URL
<add> String savePath = this.workingDir + File.separator;
<add> if (Utils.getConfigBoolean("download.save_order", true)) {
<add> savePath += String.format("%03d_", index);
<add> }
<add> savePath += m.group(1);
<add> addURLToDownload(new URL(imgsrc), new File(savePath));
<add> } else {
<add> // Provide prefix and let the AbstractRipper "guess" the filename
<add> String prefix = "";
<add> if (Utils.getConfigBoolean("download.save_order", true)) {
<add> prefix = String.format("%03d_", index);
<add> }
<add> addURLToDownload(new URL(imgsrc), prefix);
<add> }
<add> } catch (IOException e) {
<add> logger.error("[!] Exception while loading/parsing " + this.url, e);
<add> }
<add> }
<add>
<add> }
<add>} |
|
Java | apache-2.0 | 7879f66e784546325b775bd1cd7f22e9a09b023e | 0 | Athou/commafeed,Hubcapp/commafeed,RavenB/commafeed,ebraminio/commafeed,syshk/commafeed,ahmadassaf/CommaFeed-RSS-Reader,zhangzuoqiang/commafeed,ebraminio/commafeed,wesley1001/commafeed,ahmadassaf/CommaFeed-RSS-Reader,Hubcapp/commafeed,ebraminio/commafeed,syshk/commafeed,zhangzuoqiang/commafeed,RavenB/commafeed,ahmadassaf/CommaFeed-RSS-Reader,Athou/commafeed,syshk/commafeed,zhangzuoqiang/commafeed,wesley1001/commafeed,Athou/commafeed,Athou/commafeed,wesley1001/commafeed,Hubcapp/commafeed,ebraminio/commafeed,RavenB/commafeed,syshk/commafeed,zhangzuoqiang/commafeed,Hubcapp/commafeed,wesley1001/commafeed,ahmadassaf/CommaFeed-RSS-Reader,RavenB/commafeed | package com.commafeed.backend.feed;
import org.junit.Assert;
import org.junit.Test;
public class FeedUtilsTest {
@Test
public void testNormalization() {
String urla1 = "http://example.com/hello?a=1&b=2";
String urla2 = "http://www.example.com/hello?a=1&b=2";
String urla3 = "http://EXAmPLe.com/HELLo?a=1&b=2";
String urla4 = "http://example.com/hello?b=2&a=1";
String urla5 = "https://example.com/hello?a=1&b=2";
String urlb1 = "http://ftr.fivefilters.org/makefulltextfeed.php?url=http%3A%2F%2Ffeeds.howtogeek.com%2FHowToGeek&max=10&summary=1";
String urlb2 = "http://ftr.fivefilters.org/makefulltextfeed.php?url=http://feeds.howtogeek.com/HowToGeek&max=10&summary=1";
String urlc1 = "http://feeds.feedburner.com/Frandroid";
String urlc2 = "http://feeds2.feedburner.com/frandroid";
String urlc3 = "http://feedproxy.google.com/frandroid";
String urlc4 = "http://feeds.feedburner.com/Frandroid/";
String urlc5 = "http://feeds.feedburner.com/Frandroid?format=rss";
String urld1 = "http://fivefilters.org/content-only/makefulltextfeed.php?url=http://feeds.feedburner.com/Frandroid";
String urld2 = "http://fivefilters.org/content-only/makefulltextfeed.php?url=http://feeds2.feedburner.com/Frandroid";
Assert.assertEquals(FeedUtils.normalizeURL(urla1), FeedUtils.normalizeURL(urla2));
Assert.assertEquals(FeedUtils.normalizeURL(urla1), FeedUtils.normalizeURL(urla3));
Assert.assertEquals(FeedUtils.normalizeURL(urla1), FeedUtils.normalizeURL(urla4));
Assert.assertEquals(FeedUtils.normalizeURL(urla1), FeedUtils.normalizeURL(urla5));
Assert.assertEquals(FeedUtils.normalizeURL(urlb1), FeedUtils.normalizeURL(urlb2));
Assert.assertEquals(FeedUtils.normalizeURL(urlc1), FeedUtils.normalizeURL(urlc2));
Assert.assertEquals(FeedUtils.normalizeURL(urlc1), FeedUtils.normalizeURL(urlc3));
Assert.assertEquals(FeedUtils.normalizeURL(urlc1), FeedUtils.normalizeURL(urlc4));
Assert.assertEquals(FeedUtils.normalizeURL(urlc1), FeedUtils.normalizeURL(urlc5));
Assert.assertNotEquals(FeedUtils.normalizeURL(urld1), FeedUtils.normalizeURL(urld2));
}
@Test
public void testToAbsoluteUrl() {
String expected = "http://a.com/blog/entry/1";
Assert.assertEquals(expected, FeedUtils.toAbsoluteUrl("http://a.com/blog/entry/1", "http://a.com/feed/", "http://a.com/feed/"));
Assert.assertEquals(expected, FeedUtils.toAbsoluteUrl("http://a.com/blog/entry/1", "http://a.com/feed", "http://a.com/feed"));
Assert.assertEquals(expected, FeedUtils.toAbsoluteUrl("../blog/entry/1", "http://a.com/feed/", "http://a.com/feed/"));
Assert.assertEquals(expected, FeedUtils.toAbsoluteUrl("../blog/entry/1", "feed.xml", "http://a.com/feed/feed.xml"));
Assert.assertEquals("http://ergoemacs.org/emacs/elisp_all_about_lines.html",
FeedUtils.toAbsoluteUrl("elisp_all_about_lines.html", "blog.xml", "http://ergoemacs.org/emacs/blog.xml"));
}
@Test
public void testExtractDeclaredEncoding() {
Assert.assertNull(FeedUtils.extractDeclaredEncoding("<?xml ?>".getBytes()));
Assert.assertNull(FeedUtils.extractDeclaredEncoding("<feed></feed>".getBytes()));
Assert.assertEquals("UTF-8", FeedUtils.extractDeclaredEncoding("<?xml encoding=\"UTF-8\" ?>".getBytes()));
Assert.assertEquals("UTF-8", FeedUtils.extractDeclaredEncoding("<?xml encoding='UTF-8' ?>".getBytes()));
Assert.assertEquals("UTF-8", FeedUtils.extractDeclaredEncoding("<?xml encoding='UTF-8'?>".getBytes()));
}
@Test
public void testReplaceHtmlEntitiesWithNumericEntities() {
String source = "<source>T´l´phone ′</source>";
Assert.assertEquals("<source>T´l´phone ′</source>", FeedUtils.replaceHtmlEntitiesWithNumericEntities(source));
}
}
| src/test/java/com/commafeed/backend/feed/FeedUtilsTest.java | package com.commafeed.backend.feed;
import org.junit.Assert;
import org.junit.Test;
public class FeedUtilsTest {
@Test
public void testNormalization() {
String urla1 = "http://example.com/hello?a=1&b=2";
String urla2 = "http://www.example.com/hello?a=1&b=2";
String urla3 = "http://EXAmPLe.com/HELLo?a=1&b=2";
String urla4 = "http://example.com/hello?b=2&a=1";
String urla5 = "https://example.com/hello?a=1&b=2";
String urlb1 = "http://ftr.fivefilters.org/makefulltextfeed.php?url=http%3A%2F%2Ffeeds.howtogeek.com%2FHowToGeek&max=10&summary=1";
String urlb2 = "http://ftr.fivefilters.org/makefulltextfeed.php?url=http://feeds.howtogeek.com/HowToGeek&max=10&summary=1";
String urlc1 = "http://feeds.feedburner.com/Frandroid";
String urlc2 = "http://feeds2.feedburner.com/frandroid";
String urlc3 = "http://feedproxy.google.com/frandroid";
String urlc4 = "http://feeds.feedburner.com/Frandroid/";
String urlc5 = "http://feeds.feedburner.com/Frandroid?format=rss";
String urld1 = "http://fivefilters.org/content-only/makefulltextfeed.php?url=http://feeds.feedburner.com/Frandroid";
String urld2 = "http://fivefilters.org/content-only/makefulltextfeed.php?url=http://feeds2.feedburner.com/Frandroid";
Assert.assertEquals(FeedUtils.normalizeURL(urla1), FeedUtils.normalizeURL(urla2));
Assert.assertEquals(FeedUtils.normalizeURL(urla1), FeedUtils.normalizeURL(urla3));
Assert.assertEquals(FeedUtils.normalizeURL(urla1), FeedUtils.normalizeURL(urla4));
Assert.assertEquals(FeedUtils.normalizeURL(urla1), FeedUtils.normalizeURL(urla5));
Assert.assertEquals(FeedUtils.normalizeURL(urlb1), FeedUtils.normalizeURL(urlb2));
Assert.assertEquals(FeedUtils.normalizeURL(urlc1), FeedUtils.normalizeURL(urlc2));
Assert.assertEquals(FeedUtils.normalizeURL(urlc1), FeedUtils.normalizeURL(urlc3));
Assert.assertEquals(FeedUtils.normalizeURL(urlc1), FeedUtils.normalizeURL(urlc4));
Assert.assertEquals(FeedUtils.normalizeURL(urlc1), FeedUtils.normalizeURL(urlc5));
Assert.assertNotEquals(FeedUtils.normalizeURL(urld1), FeedUtils.normalizeURL(urld2));
}
@Test
public void testToAbsoluteUrl() {
String expected = "http://a.com/blog/entry/1";
Assert.assertEquals(expected, FeedUtils.toAbsoluteUrl("http://a.com/blog/entry/1", "http://a.com/feed/", "http://a.com/feed/"));
Assert.assertEquals(expected, FeedUtils.toAbsoluteUrl("http://a.com/blog/entry/1", "http://a.com/feed", "http://a.com/feed"));
Assert.assertEquals(expected, FeedUtils.toAbsoluteUrl("../blog/entry/1", "http://a.com/feed/", "http://a.com/feed/"));
Assert.assertEquals(expected, FeedUtils.toAbsoluteUrl("../blog/entry/1", "feed.xml", "http://a.com/feed/feed.xml"));
Assert.assertEquals("http://ergoemacs.org/emacs/elisp_all_about_lines.html",
FeedUtils.toAbsoluteUrl("elisp_all_about_lines.html", "blog.xml", "http://ergoemacs.org/emacs/blog.xml"));
}
@Test
public void testExtractDeclaredEncoding() {
Assert.assertNull(FeedUtils.extractDeclaredEncoding("<?xml ?>".getBytes()));
Assert.assertNull(FeedUtils.extractDeclaredEncoding("<feed></feed>".getBytes()));
Assert.assertEquals("UTF-8", FeedUtils.extractDeclaredEncoding("<?xml encoding=\"UTF-8\" ?>".getBytes()));
Assert.assertEquals("UTF-8", FeedUtils.extractDeclaredEncoding("<?xml encoding='UTF-8' ?>".getBytes()));
Assert.assertEquals("UTF-8", FeedUtils.extractDeclaredEncoding("<?xml encoding='UTF-8'?>".getBytes()));
}
}
| test for html entities
| src/test/java/com/commafeed/backend/feed/FeedUtilsTest.java | test for html entities | <ide><path>rc/test/java/com/commafeed/backend/feed/FeedUtilsTest.java
<ide> Assert.assertEquals("UTF-8", FeedUtils.extractDeclaredEncoding("<?xml encoding='UTF-8' ?>".getBytes()));
<ide> Assert.assertEquals("UTF-8", FeedUtils.extractDeclaredEncoding("<?xml encoding='UTF-8'?>".getBytes()));
<ide> }
<add>
<add> @Test
<add> public void testReplaceHtmlEntitiesWithNumericEntities() {
<add> String source = "<source>T´l´phone ′</source>";
<add> Assert.assertEquals("<source>T´l´phone ′</source>", FeedUtils.replaceHtmlEntitiesWithNumericEntities(source));
<add> }
<ide> } |
|
Java | apache-2.0 | 55120872a3574fd86e1838830c35a19cf06c5b09 | 0 | vase4kin/TeamCityApp,vase4kin/TeamCityApp,vase4kin/TeamCityApp | /*
* Copyright 2016 Andrey Tolpeev
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.github.vase4kin.teamcityapp.login.view;
import android.support.test.InstrumentationRegistry;
import android.support.test.runner.AndroidJUnit4;
import com.github.vase4kin.teamcityapp.R;
import com.github.vase4kin.teamcityapp.TeamCityApplication;
import com.github.vase4kin.teamcityapp.base.extractor.BundleExtractorValues;
import com.github.vase4kin.teamcityapp.crypto.CryptoManager;
import com.github.vase4kin.teamcityapp.dagger.components.AppComponent;
import com.github.vase4kin.teamcityapp.dagger.modules.AppModule;
import com.github.vase4kin.teamcityapp.helper.CustomIntentsTestRule;
import com.github.vase4kin.teamcityapp.root.view.RootProjectsActivity;
import com.github.vase4kin.teamcityapp.storage.SharedUserStorage;
import org.junit.Before;
import org.junit.Ignore;
import org.junit.Rule;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.mockito.ArgumentCaptor;
import org.mockito.Captor;
import org.mockito.Matchers;
import org.mockito.Mock;
import org.mockito.invocation.InvocationOnMock;
import org.mockito.stubbing.Answer;
import java.io.IOException;
import javax.inject.Named;
import it.cosenonjaviste.daggermock.DaggerMockRule;
import okhttp3.Call;
import okhttp3.Callback;
import okhttp3.OkHttpClient;
import okhttp3.Protocol;
import okhttp3.Request;
import okhttp3.Response;
import static android.support.test.espresso.Espresso.onView;
import static android.support.test.espresso.action.ViewActions.clearText;
import static android.support.test.espresso.action.ViewActions.click;
import static android.support.test.espresso.action.ViewActions.closeSoftKeyboard;
import static android.support.test.espresso.action.ViewActions.pressImeActionButton;
import static android.support.test.espresso.action.ViewActions.typeText;
import static android.support.test.espresso.assertion.ViewAssertions.matches;
import static android.support.test.espresso.intent.Intents.intended;
import static android.support.test.espresso.intent.matcher.BundleMatchers.hasEntry;
import static android.support.test.espresso.intent.matcher.IntentMatchers.hasComponent;
import static android.support.test.espresso.intent.matcher.IntentMatchers.hasExtras;
import static android.support.test.espresso.matcher.ViewMatchers.assertThat;
import static android.support.test.espresso.matcher.ViewMatchers.isDisplayed;
import static android.support.test.espresso.matcher.ViewMatchers.withId;
import static android.support.test.espresso.matcher.ViewMatchers.withText;
import static com.github.vase4kin.teamcityapp.dagger.modules.AppModule.CLIENT_AUTH;
import static com.github.vase4kin.teamcityapp.dagger.modules.AppModule.CLIENT_BASE;
import static com.github.vase4kin.teamcityapp.dagger.modules.AppModule.CLIENT_BASE_UNSAFE;
import static com.github.vase4kin.teamcityapp.dagger.modules.Mocks.URL;
import static org.hamcrest.CoreMatchers.containsString;
import static org.hamcrest.CoreMatchers.equalTo;
import static org.hamcrest.CoreMatchers.is;
import static org.hamcrest.core.AllOf.allOf;
import static org.mockito.Mockito.doAnswer;
import static org.mockito.Mockito.when;
/**
* Tests for {@link LoginActivity} with mocked internet connection
*/
@RunWith(AndroidJUnit4.class)
public class LoginActivityTest {
private static final String INPUT_URL = URL.replace("https://", "");
private static final String MESSAGE_EMPTY = "";
@Rule
public DaggerMockRule<AppComponent> mDaggerRule = new DaggerMockRule<>(AppComponent.class, new AppModule((TeamCityApplication) InstrumentationRegistry.getInstrumentation().getTargetContext().getApplicationContext()))
.set(new DaggerMockRule.ComponentSetter<AppComponent>() {
@Override
public void setComponent(AppComponent appComponent) {
TeamCityApplication app = (TeamCityApplication) InstrumentationRegistry.getInstrumentation().getTargetContext().getApplicationContext();
app.setAppInjector(appComponent);
}
});
@Rule
public CustomIntentsTestRule<LoginActivity> mActivityRule = new CustomIntentsTestRule<>(LoginActivity.class);
@Captor
private ArgumentCaptor<Callback> mCallbackArgumentCaptor;
@Named(CLIENT_BASE)
@Mock
private OkHttpClient okHttpClient;
@Named(CLIENT_BASE_UNSAFE)
@Mock
private OkHttpClient unsafeOkHttpClient;
@Named(CLIENT_AUTH)
@Mock
private OkHttpClient mClientAuth;
@Mock
private CryptoManager mCryptoManager;
@Mock
private Call mCall;
@Before
public void setUp() {
TeamCityApplication app = (TeamCityApplication) InstrumentationRegistry.getInstrumentation().getTargetContext().getApplicationContext();
app.getAppInjector().sharedUserStorage().clearAll();
when(okHttpClient.newCall(Matchers.any(Request.class))).thenReturn(mCall);
when(unsafeOkHttpClient.newCall(Matchers.any(Request.class))).thenReturn(mCall);
mActivityRule.launchActivity(null);
}
/**
* Verifies that user can be logged in as guest user with correct account url
*/
@Test
public void testUserCanCreateGuestUserAccountWithCorrectUrl() throws Throwable {
final String urlWithPath = "https://teamcity.com/server";
String savedUrl = urlWithPath.concat("/");
doAnswer(new Answer() {
@Override
public Object answer(InvocationOnMock invocation) throws Throwable {
mCallbackArgumentCaptor.getValue().onResponse(
mCall,
new Response.Builder()
.request(new Request.Builder().url(urlWithPath).build())
.protocol(Protocol.HTTP_1_0)
.message(MESSAGE_EMPTY)
.code(200)
.build());
return null;
}
}).when(mCall).enqueue(mCallbackArgumentCaptor.capture());
onView(withId(R.id.teamcity_url)).perform(typeText(urlWithPath.replace("https://", "")), closeSoftKeyboard());
onView(withId(R.id.guest_user_switch)).perform(click());
onView(withId(R.id.btn_login)).perform(click());
intended(allOf(
hasComponent(RootProjectsActivity.class.getName()),
hasExtras(hasEntry(equalTo(BundleExtractorValues.IS_NEW_ACCOUNT_CREATED), equalTo(true)))));
TeamCityApplication app = (TeamCityApplication) InstrumentationRegistry.getInstrumentation().getTargetContext().getApplicationContext();
SharedUserStorage storageUtils = app.getRestApiInjector().sharedUserStorage();
assertThat(storageUtils.hasGuestAccountWithUrl(savedUrl), is(true));
assertThat(storageUtils.getActiveUser().getTeamcityUrl(), is(savedUrl));
assertThat(storageUtils.getActiveUser().isSslDisabled(), is(false));
}
/**
* Verifies that user can be logged in as guest user with correct account url ignoring ssl
*/
@Test
public void testUserCanCreateGuestUserAccountWithCorrectUrlIgnoringSsl() {
final String urlWithPath = "https://teamcity.com/server";
String savedUrl = urlWithPath.concat("/");
doAnswer(new Answer() {
@Override
public Object answer(InvocationOnMock invocation) throws Throwable {
mCallbackArgumentCaptor.getValue().onResponse(
mCall,
new Response.Builder()
.request(new Request.Builder().url(urlWithPath).build())
.protocol(Protocol.HTTP_1_0)
.message(MESSAGE_EMPTY)
.code(200)
.build());
return null;
}
}).when(mCall).enqueue(mCallbackArgumentCaptor.capture());
onView(withId(R.id.teamcity_url)).perform(typeText(urlWithPath.replace("https://", "")), closeSoftKeyboard());
onView(withId(R.id.guest_user_switch)).perform(click());
onView(withId(R.id.disable_ssl_switch)).perform(click());
onView(withText(R.string.warning_ssl_dialog_content)).check(matches(isDisplayed()));
onView(withText(R.string.dialog_ok_title)).perform(click());
onView(withId(R.id.btn_login)).perform(click());
intended(allOf(
hasComponent(RootProjectsActivity.class.getName()),
hasExtras(hasEntry(equalTo(BundleExtractorValues.IS_NEW_ACCOUNT_CREATED), equalTo(true)))));
TeamCityApplication app = (TeamCityApplication) InstrumentationRegistry.getInstrumentation().getTargetContext().getApplicationContext();
SharedUserStorage storageUtils = app.getRestApiInjector().sharedUserStorage();
assertThat(storageUtils.hasGuestAccountWithUrl(savedUrl), is(true));
assertThat(storageUtils.getActiveUser().getTeamcityUrl(), is(savedUrl));
assertThat(storageUtils.getActiveUser().isSslDisabled(), is(true));
}
/**
* Verifies that user can be logged in as guest with correct account url
*/
@Test
public void testUserCanCreateAccountWithCorrectUrlByImeButton() throws Throwable {
doAnswer(new Answer() {
@Override
public Object answer(InvocationOnMock invocation) throws Throwable {
mCallbackArgumentCaptor.getValue().onResponse(
mCall,
new Response.Builder()
.request(new Request.Builder().url(URL).build())
.protocol(Protocol.HTTP_1_0)
.message(MESSAGE_EMPTY)
.code(200)
.build());
return null;
}
}).when(mCall).enqueue(mCallbackArgumentCaptor.capture());
onView(withId(R.id.guest_user_switch)).perform(click());
onView(withId(R.id.teamcity_url)).perform(typeText(INPUT_URL), pressImeActionButton());
intended(allOf(
hasComponent(RootProjectsActivity.class.getName()),
hasExtras(hasEntry(equalTo(BundleExtractorValues.IS_NEW_ACCOUNT_CREATED), equalTo(true)))));
TeamCityApplication app = (TeamCityApplication) InstrumentationRegistry.getInstrumentation().getTargetContext().getApplicationContext();
SharedUserStorage storageUtils = app.getRestApiInjector().sharedUserStorage();
assertThat(storageUtils.hasGuestAccountWithUrl(URL), is(true));
assertThat(storageUtils.getActiveUser().getTeamcityUrl(), is(URL));
}
/**
* Verifies that user can be logged in as guest with correct account url
*/
@Test
public void testUserCanCreateAccountWithCorrectUrlWhichContainsPathByImeButton() throws Throwable {
doAnswer(new Answer() {
@Override
public Object answer(InvocationOnMock invocation) throws Throwable {
mCallbackArgumentCaptor.getValue().onResponse(
mCall,
new Response.Builder()
.request(new Request.Builder().url(URL).build())
.protocol(Protocol.HTTP_1_0)
.message(MESSAGE_EMPTY)
.code(200)
.build());
return null;
}
}).when(mCall).enqueue(mCallbackArgumentCaptor.capture());
onView(withId(R.id.guest_user_switch)).perform(click());
onView(withId(R.id.teamcity_url)).perform(typeText(INPUT_URL), pressImeActionButton());
intended(allOf(
hasComponent(RootProjectsActivity.class.getName()),
hasExtras(hasEntry(equalTo(BundleExtractorValues.IS_NEW_ACCOUNT_CREATED), equalTo(true)))));
TeamCityApplication app = (TeamCityApplication) InstrumentationRegistry.getInstrumentation().getTargetContext().getApplicationContext();
SharedUserStorage storageUtils = app.getRestApiInjector().sharedUserStorage();
assertThat(storageUtils.hasGuestAccountWithUrl(URL), is(true));
assertThat(storageUtils.getActiveUser().getTeamcityUrl(), is(URL));
}
/**
* Verifies that user can be logged in with correct account url and credentials
*/
@Ignore
@Test
public void testUserCanCreateUserAccountWithCorrectUrlAndCredentials() throws Throwable {
doAnswer(new Answer() {
@Override
public Object answer(InvocationOnMock invocation) throws Throwable {
mCallbackArgumentCaptor.getValue().onResponse(
mCall,
new Response.Builder()
.request(new Request.Builder().url(URL).build())
.protocol(Protocol.HTTP_1_0)
.code(200)
.build());
return null;
}
}).when(mCall).enqueue(mCallbackArgumentCaptor.capture());
onView(withId(R.id.teamcity_url)).perform(typeText(INPUT_URL), closeSoftKeyboard());
onView(withId(R.id.user_name)).perform(typeText("user"), pressImeActionButton());
onView(withId(R.id.password)).perform(typeText("pass"), pressImeActionButton());
intended(allOf(
hasComponent(RootProjectsActivity.class.getName()),
hasExtras(hasEntry(equalTo(BundleExtractorValues.IS_NEW_ACCOUNT_CREATED), equalTo(true)))));
TeamCityApplication app = (TeamCityApplication) InstrumentationRegistry.getInstrumentation().getTargetContext().getApplicationContext();
SharedUserStorage storageUtils = app.getRestApiInjector().sharedUserStorage();
assertThat(storageUtils.hasAccountWithUrl(URL, "user"), is(true));
assertThat(storageUtils.getActiveUser().getTeamcityUrl(), is(URL));
}
/**
* Verifies that user can be notified with error message if servers returns smth bad
*/
@Test
public void testUserIsNotifiedIfServerReturnsBadResponse() throws IOException {
doAnswer(new Answer() {
@Override
public Object answer(InvocationOnMock invocation) throws Throwable {
mCallbackArgumentCaptor.getValue().onResponse(
mCall,
new Response.Builder()
.request(new Request.Builder().url(URL).build())
.protocol(Protocol.HTTP_1_0)
.code(404)
.message("Client Error")
.build());
return null;
}
}).when(mCall).enqueue(mCallbackArgumentCaptor.capture());
onView(withId(R.id.teamcity_url)).perform(typeText(INPUT_URL), closeSoftKeyboard());
onView(withId(R.id.guest_user_switch)).perform(click());
onView(withId(R.id.btn_login)).perform(click());
onView(withText(containsString("Client Error"))).check(matches(isDisplayed()));
}
/**
* Verifies that user can be notified with dialog info for 401 errors
*/
@Test
public void testUserIsNotifiedIfServerReturns401Request() throws IOException {
doAnswer(new Answer() {
@Override
public Object answer(InvocationOnMock invocation) throws Throwable {
mCallbackArgumentCaptor.getValue().onResponse(
mCall,
new Response.Builder()
.request(new Request.Builder().url(URL).build())
.protocol(Protocol.HTTP_1_0)
.code(401)
.message("Unauthorized")
.build());
return null;
}
}).when(mCall).enqueue(mCallbackArgumentCaptor.capture());
onView(withId(R.id.teamcity_url)).perform(typeText(INPUT_URL), closeSoftKeyboard());
onView(withId(R.id.guest_user_switch)).perform(click());
onView(withId(R.id.btn_login)).perform(click());
onView(withText(R.string.info_unauthorized_dialog_title)).check(matches(isDisplayed()));
onView(withText(R.string.info_unauthorized_dialog_content)).check(matches(isDisplayed()));
}
/**
* Verifies that user can be logged in as guest user with correct account url
*/
@Test
public void testUserCanCreateGuestUserAccountWithNotSecureUrl() {
final String urlWithPath = "http://teamcity.com/server";
String savedUrl = urlWithPath.concat("/");
doAnswer(new Answer() {
@Override
public Object answer(InvocationOnMock invocation) throws Throwable {
mCallbackArgumentCaptor.getValue().onResponse(
mCall,
new Response.Builder()
.request(new Request.Builder().url(urlWithPath).build())
.protocol(Protocol.HTTP_1_0)
.message(MESSAGE_EMPTY)
.code(200)
.build());
return null;
}
}).when(mCall).enqueue(mCallbackArgumentCaptor.capture());
onView(withId(R.id.teamcity_url)).perform(clearText(), typeText(urlWithPath), closeSoftKeyboard());
onView(withId(R.id.guest_user_switch)).perform(click());
onView(withId(R.id.btn_login)).perform(click());
onView(withText(R.string.warning_ssl_dialog_title)).check(matches(isDisplayed()));
onView(withText(R.string.server_not_secure_http)).check(matches(isDisplayed()));
onView(withText(R.string.dialog_ok_title)).perform(click());
intended(allOf(
hasComponent(RootProjectsActivity.class.getName()),
hasExtras(hasEntry(equalTo(BundleExtractorValues.IS_NEW_ACCOUNT_CREATED), equalTo(true)))));
TeamCityApplication app = (TeamCityApplication) InstrumentationRegistry.getInstrumentation().getTargetContext().getApplicationContext();
SharedUserStorage storageUtils = app.getRestApiInjector().sharedUserStorage();
assertThat(storageUtils.hasGuestAccountWithUrl(savedUrl), is(true));
assertThat(storageUtils.getActiveUser().getTeamcityUrl(), is(savedUrl));
assertThat(storageUtils.getActiveUser().isSslDisabled(), is(false));
}
@Ignore
@Test
public void testUserCanNotCreateAccountIfDataWasNotSaved() throws Throwable {
// You know what to do
}
} | app/src/androidTest/java/com/github/vase4kin/teamcityapp/login/view/LoginActivityTest.java | /*
* Copyright 2016 Andrey Tolpeev
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.github.vase4kin.teamcityapp.login.view;
import android.support.test.InstrumentationRegistry;
import android.support.test.runner.AndroidJUnit4;
import com.github.vase4kin.teamcityapp.R;
import com.github.vase4kin.teamcityapp.TeamCityApplication;
import com.github.vase4kin.teamcityapp.base.extractor.BundleExtractorValues;
import com.github.vase4kin.teamcityapp.crypto.CryptoManager;
import com.github.vase4kin.teamcityapp.dagger.components.AppComponent;
import com.github.vase4kin.teamcityapp.dagger.modules.AppModule;
import com.github.vase4kin.teamcityapp.helper.CustomIntentsTestRule;
import com.github.vase4kin.teamcityapp.root.view.RootProjectsActivity;
import com.github.vase4kin.teamcityapp.storage.SharedUserStorage;
import org.junit.Before;
import org.junit.Ignore;
import org.junit.Rule;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.mockito.ArgumentCaptor;
import org.mockito.Captor;
import org.mockito.Matchers;
import org.mockito.Mock;
import org.mockito.invocation.InvocationOnMock;
import org.mockito.stubbing.Answer;
import java.io.IOException;
import javax.inject.Named;
import it.cosenonjaviste.daggermock.DaggerMockRule;
import okhttp3.Call;
import okhttp3.Callback;
import okhttp3.OkHttpClient;
import okhttp3.Protocol;
import okhttp3.Request;
import okhttp3.Response;
import static android.support.test.espresso.Espresso.onView;
import static android.support.test.espresso.action.ViewActions.click;
import static android.support.test.espresso.action.ViewActions.closeSoftKeyboard;
import static android.support.test.espresso.action.ViewActions.pressImeActionButton;
import static android.support.test.espresso.action.ViewActions.typeText;
import static android.support.test.espresso.assertion.ViewAssertions.matches;
import static android.support.test.espresso.intent.Intents.intended;
import static android.support.test.espresso.intent.matcher.BundleMatchers.hasEntry;
import static android.support.test.espresso.intent.matcher.IntentMatchers.hasComponent;
import static android.support.test.espresso.intent.matcher.IntentMatchers.hasExtras;
import static android.support.test.espresso.matcher.ViewMatchers.assertThat;
import static android.support.test.espresso.matcher.ViewMatchers.isDisplayed;
import static android.support.test.espresso.matcher.ViewMatchers.withId;
import static android.support.test.espresso.matcher.ViewMatchers.withText;
import static com.github.vase4kin.teamcityapp.dagger.modules.AppModule.CLIENT_AUTH;
import static com.github.vase4kin.teamcityapp.dagger.modules.AppModule.CLIENT_BASE;
import static com.github.vase4kin.teamcityapp.dagger.modules.AppModule.CLIENT_BASE_UNSAFE;
import static com.github.vase4kin.teamcityapp.dagger.modules.Mocks.URL;
import static org.hamcrest.CoreMatchers.containsString;
import static org.hamcrest.CoreMatchers.equalTo;
import static org.hamcrest.CoreMatchers.is;
import static org.hamcrest.core.AllOf.allOf;
import static org.mockito.Mockito.doAnswer;
import static org.mockito.Mockito.when;
/**
* Tests for {@link LoginActivity} with mocked internet connection
*/
@RunWith(AndroidJUnit4.class)
public class LoginActivityTest {
private static final String INPUT_URL = URL.replace("https://", "");
private static final String MESSAGE_EMPTY = "";
@Rule
public DaggerMockRule<AppComponent> mDaggerRule = new DaggerMockRule<>(AppComponent.class, new AppModule((TeamCityApplication) InstrumentationRegistry.getInstrumentation().getTargetContext().getApplicationContext()))
.set(new DaggerMockRule.ComponentSetter<AppComponent>() {
@Override
public void setComponent(AppComponent appComponent) {
TeamCityApplication app = (TeamCityApplication) InstrumentationRegistry.getInstrumentation().getTargetContext().getApplicationContext();
app.setAppInjector(appComponent);
}
});
@Rule
public CustomIntentsTestRule<LoginActivity> mActivityRule = new CustomIntentsTestRule<>(LoginActivity.class);
@Captor
private ArgumentCaptor<Callback> mCallbackArgumentCaptor;
@Named(CLIENT_BASE)
@Mock
private OkHttpClient okHttpClient;
@Named(CLIENT_BASE_UNSAFE)
@Mock
private OkHttpClient unsafeOkHttpClient;
@Named(CLIENT_AUTH)
@Mock
private OkHttpClient mClientAuth;
@Mock
private CryptoManager mCryptoManager;
@Mock
private Call mCall;
@Before
public void setUp() {
TeamCityApplication app = (TeamCityApplication) InstrumentationRegistry.getInstrumentation().getTargetContext().getApplicationContext();
app.getAppInjector().sharedUserStorage().clearAll();
when(okHttpClient.newCall(Matchers.any(Request.class))).thenReturn(mCall);
when(unsafeOkHttpClient.newCall(Matchers.any(Request.class))).thenReturn(mCall);
mActivityRule.launchActivity(null);
}
/**
* Verifies that user can be logged in as guest user with correct account url
*/
@Test
public void testUserCanCreateGuestUserAccountWithCorrectUrl() throws Throwable {
final String urlWithPath = "https://teamcity.com/server";
String savedUrl = urlWithPath.concat("/");
doAnswer(new Answer() {
@Override
public Object answer(InvocationOnMock invocation) throws Throwable {
mCallbackArgumentCaptor.getValue().onResponse(
mCall,
new Response.Builder()
.request(new Request.Builder().url(urlWithPath).build())
.protocol(Protocol.HTTP_1_0)
.message(MESSAGE_EMPTY)
.code(200)
.build());
return null;
}
}).when(mCall).enqueue(mCallbackArgumentCaptor.capture());
onView(withId(R.id.teamcity_url)).perform(typeText(urlWithPath.replace("https://", "")), closeSoftKeyboard());
onView(withId(R.id.guest_user_switch)).perform(click());
onView(withId(R.id.btn_login)).perform(click());
intended(allOf(
hasComponent(RootProjectsActivity.class.getName()),
hasExtras(hasEntry(equalTo(BundleExtractorValues.IS_NEW_ACCOUNT_CREATED), equalTo(true)))));
TeamCityApplication app = (TeamCityApplication) InstrumentationRegistry.getInstrumentation().getTargetContext().getApplicationContext();
SharedUserStorage storageUtils = app.getRestApiInjector().sharedUserStorage();
assertThat(storageUtils.hasGuestAccountWithUrl(savedUrl), is(true));
assertThat(storageUtils.getActiveUser().getTeamcityUrl(), is(savedUrl));
assertThat(storageUtils.getActiveUser().isSslDisabled(), is(false));
}
/**
* Verifies that user can be logged in as guest user with correct account url ignoring ssl
*/
@Test
public void testUserCanCreateGuestUserAccountWithCorrectUrlIgnoringSsl() {
final String urlWithPath = "https://teamcity.com/server";
String savedUrl = urlWithPath.concat("/");
doAnswer(new Answer() {
@Override
public Object answer(InvocationOnMock invocation) throws Throwable {
mCallbackArgumentCaptor.getValue().onResponse(
mCall,
new Response.Builder()
.request(new Request.Builder().url(urlWithPath).build())
.protocol(Protocol.HTTP_1_0)
.message(MESSAGE_EMPTY)
.code(200)
.build());
return null;
}
}).when(mCall).enqueue(mCallbackArgumentCaptor.capture());
onView(withId(R.id.teamcity_url)).perform(typeText(urlWithPath.replace("https://", "")), closeSoftKeyboard());
onView(withId(R.id.guest_user_switch)).perform(click());
onView(withId(R.id.disable_ssl_switch)).perform(click());
onView(withText(R.string.warning_ssl_dialog_content)).check(matches(isDisplayed()));
onView(withText(R.string.dialog_ok_title)).perform(click());
onView(withId(R.id.btn_login)).perform(click());
intended(allOf(
hasComponent(RootProjectsActivity.class.getName()),
hasExtras(hasEntry(equalTo(BundleExtractorValues.IS_NEW_ACCOUNT_CREATED), equalTo(true)))));
TeamCityApplication app = (TeamCityApplication) InstrumentationRegistry.getInstrumentation().getTargetContext().getApplicationContext();
SharedUserStorage storageUtils = app.getRestApiInjector().sharedUserStorage();
assertThat(storageUtils.hasGuestAccountWithUrl(savedUrl), is(true));
assertThat(storageUtils.getActiveUser().getTeamcityUrl(), is(savedUrl));
assertThat(storageUtils.getActiveUser().isSslDisabled(), is(true));
}
/**
* Verifies that user can be logged in as guest with correct account url
*/
@Test
public void testUserCanCreateAccountWithCorrectUrlByImeButton() throws Throwable {
doAnswer(new Answer() {
@Override
public Object answer(InvocationOnMock invocation) throws Throwable {
mCallbackArgumentCaptor.getValue().onResponse(
mCall,
new Response.Builder()
.request(new Request.Builder().url(URL).build())
.protocol(Protocol.HTTP_1_0)
.message(MESSAGE_EMPTY)
.code(200)
.build());
return null;
}
}).when(mCall).enqueue(mCallbackArgumentCaptor.capture());
onView(withId(R.id.guest_user_switch)).perform(click());
onView(withId(R.id.teamcity_url)).perform(typeText(INPUT_URL), pressImeActionButton());
intended(allOf(
hasComponent(RootProjectsActivity.class.getName()),
hasExtras(hasEntry(equalTo(BundleExtractorValues.IS_NEW_ACCOUNT_CREATED), equalTo(true)))));
TeamCityApplication app = (TeamCityApplication) InstrumentationRegistry.getInstrumentation().getTargetContext().getApplicationContext();
SharedUserStorage storageUtils = app.getRestApiInjector().sharedUserStorage();
assertThat(storageUtils.hasGuestAccountWithUrl(URL), is(true));
assertThat(storageUtils.getActiveUser().getTeamcityUrl(), is(URL));
}
/**
* Verifies that user can be logged in as guest with correct account url
*/
@Test
public void testUserCanCreateAccountWithCorrectUrlWhichContainsPathByImeButton() throws Throwable {
doAnswer(new Answer() {
@Override
public Object answer(InvocationOnMock invocation) throws Throwable {
mCallbackArgumentCaptor.getValue().onResponse(
mCall,
new Response.Builder()
.request(new Request.Builder().url(URL).build())
.protocol(Protocol.HTTP_1_0)
.message(MESSAGE_EMPTY)
.code(200)
.build());
return null;
}
}).when(mCall).enqueue(mCallbackArgumentCaptor.capture());
onView(withId(R.id.guest_user_switch)).perform(click());
onView(withId(R.id.teamcity_url)).perform(typeText(INPUT_URL), pressImeActionButton());
intended(allOf(
hasComponent(RootProjectsActivity.class.getName()),
hasExtras(hasEntry(equalTo(BundleExtractorValues.IS_NEW_ACCOUNT_CREATED), equalTo(true)))));
TeamCityApplication app = (TeamCityApplication) InstrumentationRegistry.getInstrumentation().getTargetContext().getApplicationContext();
SharedUserStorage storageUtils = app.getRestApiInjector().sharedUserStorage();
assertThat(storageUtils.hasGuestAccountWithUrl(URL), is(true));
assertThat(storageUtils.getActiveUser().getTeamcityUrl(), is(URL));
}
/**
* Verifies that user can be logged in with correct account url and credentials
*/
@Ignore
@Test
public void testUserCanCreateUserAccountWithCorrectUrlAndCredentials() throws Throwable {
doAnswer(new Answer() {
@Override
public Object answer(InvocationOnMock invocation) throws Throwable {
mCallbackArgumentCaptor.getValue().onResponse(
mCall,
new Response.Builder()
.request(new Request.Builder().url(URL).build())
.protocol(Protocol.HTTP_1_0)
.code(200)
.build());
return null;
}
}).when(mCall).enqueue(mCallbackArgumentCaptor.capture());
onView(withId(R.id.teamcity_url)).perform(typeText(INPUT_URL), closeSoftKeyboard());
onView(withId(R.id.user_name)).perform(typeText("user"), pressImeActionButton());
onView(withId(R.id.password)).perform(typeText("pass"), pressImeActionButton());
intended(allOf(
hasComponent(RootProjectsActivity.class.getName()),
hasExtras(hasEntry(equalTo(BundleExtractorValues.IS_NEW_ACCOUNT_CREATED), equalTo(true)))));
TeamCityApplication app = (TeamCityApplication) InstrumentationRegistry.getInstrumentation().getTargetContext().getApplicationContext();
SharedUserStorage storageUtils = app.getRestApiInjector().sharedUserStorage();
assertThat(storageUtils.hasAccountWithUrl(URL, "user"), is(true));
assertThat(storageUtils.getActiveUser().getTeamcityUrl(), is(URL));
}
/**
* Verifies that user can be notified with error message if servers returns smth bad
*/
@Test
public void testUserIsNotifiedIfServerReturnsBadResponse() throws IOException {
doAnswer(new Answer() {
@Override
public Object answer(InvocationOnMock invocation) throws Throwable {
mCallbackArgumentCaptor.getValue().onResponse(
mCall,
new Response.Builder()
.request(new Request.Builder().url(URL).build())
.protocol(Protocol.HTTP_1_0)
.code(404)
.message("Client Error")
.build());
return null;
}
}).when(mCall).enqueue(mCallbackArgumentCaptor.capture());
onView(withId(R.id.teamcity_url)).perform(typeText(INPUT_URL), closeSoftKeyboard());
onView(withId(R.id.guest_user_switch)).perform(click());
onView(withId(R.id.btn_login)).perform(click());
onView(withText(containsString("Client Error"))).check(matches(isDisplayed()));
}
/**
* Verifies that user can be notified with dialog info for 401 errors
*/
@Test
public void testUserIsNotifiedIfServerReturns401Request() throws IOException {
doAnswer(new Answer() {
@Override
public Object answer(InvocationOnMock invocation) throws Throwable {
mCallbackArgumentCaptor.getValue().onResponse(
mCall,
new Response.Builder()
.request(new Request.Builder().url(URL).build())
.protocol(Protocol.HTTP_1_0)
.code(401)
.message("Unauthorized")
.build());
return null;
}
}).when(mCall).enqueue(mCallbackArgumentCaptor.capture());
onView(withId(R.id.teamcity_url)).perform(typeText(INPUT_URL), closeSoftKeyboard());
onView(withId(R.id.guest_user_switch)).perform(click());
onView(withId(R.id.btn_login)).perform(click());
onView(withText(R.string.info_unauthorized_dialog_title)).check(matches(isDisplayed()));
onView(withText(R.string.info_unauthorized_dialog_content)).check(matches(isDisplayed()));
}
@Ignore
@Test
public void testUserCanNotCreateAccountIfDataWasNotSaved() throws Throwable {
// You know what to do
}
} | Add UI test
| app/src/androidTest/java/com/github/vase4kin/teamcityapp/login/view/LoginActivityTest.java | Add UI test | <ide><path>pp/src/androidTest/java/com/github/vase4kin/teamcityapp/login/view/LoginActivityTest.java
<ide> import okhttp3.Response;
<ide>
<ide> import static android.support.test.espresso.Espresso.onView;
<add>import static android.support.test.espresso.action.ViewActions.clearText;
<ide> import static android.support.test.espresso.action.ViewActions.click;
<ide> import static android.support.test.espresso.action.ViewActions.closeSoftKeyboard;
<ide> import static android.support.test.espresso.action.ViewActions.pressImeActionButton;
<ide> onView(withText(R.string.info_unauthorized_dialog_content)).check(matches(isDisplayed()));
<ide> }
<ide>
<add> /**
<add> * Verifies that user can be logged in as guest user with correct account url
<add> */
<add> @Test
<add> public void testUserCanCreateGuestUserAccountWithNotSecureUrl() {
<add> final String urlWithPath = "http://teamcity.com/server";
<add> String savedUrl = urlWithPath.concat("/");
<add> doAnswer(new Answer() {
<add> @Override
<add> public Object answer(InvocationOnMock invocation) throws Throwable {
<add> mCallbackArgumentCaptor.getValue().onResponse(
<add> mCall,
<add> new Response.Builder()
<add> .request(new Request.Builder().url(urlWithPath).build())
<add> .protocol(Protocol.HTTP_1_0)
<add> .message(MESSAGE_EMPTY)
<add> .code(200)
<add> .build());
<add> return null;
<add> }
<add> }).when(mCall).enqueue(mCallbackArgumentCaptor.capture());
<add>
<add> onView(withId(R.id.teamcity_url)).perform(clearText(), typeText(urlWithPath), closeSoftKeyboard());
<add> onView(withId(R.id.guest_user_switch)).perform(click());
<add> onView(withId(R.id.btn_login)).perform(click());
<add>
<add> onView(withText(R.string.warning_ssl_dialog_title)).check(matches(isDisplayed()));
<add> onView(withText(R.string.server_not_secure_http)).check(matches(isDisplayed()));
<add> onView(withText(R.string.dialog_ok_title)).perform(click());
<add>
<add> intended(allOf(
<add> hasComponent(RootProjectsActivity.class.getName()),
<add> hasExtras(hasEntry(equalTo(BundleExtractorValues.IS_NEW_ACCOUNT_CREATED), equalTo(true)))));
<add>
<add> TeamCityApplication app = (TeamCityApplication) InstrumentationRegistry.getInstrumentation().getTargetContext().getApplicationContext();
<add> SharedUserStorage storageUtils = app.getRestApiInjector().sharedUserStorage();
<add> assertThat(storageUtils.hasGuestAccountWithUrl(savedUrl), is(true));
<add> assertThat(storageUtils.getActiveUser().getTeamcityUrl(), is(savedUrl));
<add> assertThat(storageUtils.getActiveUser().isSslDisabled(), is(false));
<add> }
<add>
<ide> @Ignore
<ide> @Test
<ide> public void testUserCanNotCreateAccountIfDataWasNotSaved() throws Throwable { |
|
Java | apache-2.0 | 05a5524bc2a246511900e60a55f954213d0ea7c6 | 0 | EXEM-OSS/flamingo,EXEM-OSS/flamingo,EXEM-OSS/flamingo,EXEM-OSS/flamingo,EXEM-OSS/flamingo,EXEM-OSS/flamingo,EXEM-OSS/flamingo,EXEM-OSS/flamingo,EXEM-OSS/flamingo,EXEM-OSS/flamingo,EXEM-OSS/flamingo,EXEM-OSS/flamingo,EXEM-OSS/flamingo,EXEM-OSS/flamingo,EXEM-OSS/flamingo,EXEM-OSS/flamingo,EXEM-OSS/flamingo | package org.exem.flamingo.web.oozie.workflow;
import org.exem.flamingo.shared.core.repository.PersistentRepositoryImpl;
import org.exem.flamingo.web.jdbc.FlamingoSessionTemplate;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Repository;
import java.util.List;
import java.util.Map;
/**
* Created by sanghyunbak on 2016. 12. 1..
*/
@Repository
public class OozieWorkflowRepositoryImpl extends PersistentRepositoryImpl implements OozieWorkflowRepository {
@Override
public String getNamespace() {
return this.NAMESPACE;
}
@Autowired
public OozieWorkflowRepositoryImpl(FlamingoSessionTemplate flamingoSessionTemplate) {
super.setSqlSessionTemplate(flamingoSessionTemplate);
}
@Override
public Map selectTreeId(String jobId) {
return this.getSqlSessionTemplate().selectOne(this.getNamespace() + ".selectJobId", jobId);
}
@Override
public List<Map> listWorkflows() {
return this.getSqlSessionTemplate().selectList(this.getNamespace() + ".listWorkflows");
}
@Override
public Map getRecentWorkflow() {
return this.getSqlSessionTemplate().selectOne(this.getNamespace() + ".selectRecentWorkflow");
}
@Override
public void insertWorkflow(Map param) {
this.getSqlSessionTemplate().insert(this.getNamespace() + ".insertWorkflow", param);
}
@Override
public void updateWorkflow(Map param) {
this.getSqlSessionTemplate().insert(this.getNamespace() + ".updateWorkflow", param);
}
@Override
public void deleteWorkflow(long id) {
this.getSqlSessionTemplate().delete(this.getNamespace() + ".deleteWorkflow", id);
}
}
| flamingo-web/src/main/java/org/exem/flamingo/web/oozie/workflow/OozieWorkflowRepositoryImpl.java | package org.exem.flamingo.web.oozie.workflow;
import org.exem.flamingo.shared.core.repository.PersistentRepositoryImpl;
import org.exem.flamingo.web.jdbc.FlamingoSessionTemplate;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Repository;
import java.util.List;
import java.util.Map;
/**
* Created by sanghyunbak on 2016. 12. 1..
*/
@Repository
public class OozieWorkflowRepositoryImpl extends PersistentRepositoryImpl implements OozieWorkflowRepository {
@Override
public String getNamespace() {
return this.NAMESPACE;
}
@Autowired
public OozieWorkflowRepositoryImpl(FlamingoSessionTemplate flamingoSessionTemplate) {
super.setSqlSessionTemplate(flamingoSessionTemplate);
}
@Override
public Map selectTreeId(String jobId) {
return this.getSqlSessionTemplate().selectOne(this.getNamespace() + ".selectJobId", jobId);
}
@Override
public List<Map> listWorkflows() {
return this.getSqlSessionTemplate().selectList(this.getNamespace() + ".listWorkflows");
}
@Override
public Map getRecentWorkflow() {
return this.getSqlSessionTemplate().selectOne(this.getNamespace() + ".getRecentWorkflow");
}
@Override
public void insertWorkflow(Map param) {
this.getSqlSessionTemplate().insert(this.getNamespace() + ".insertWorkflow", param);
}
@Override
public void updateWorkflow(Map param) {
this.getSqlSessionTemplate().insert(this.getNamespace() + ".updateWorkflow", param);
}
@Override
public void deleteWorkflow(int id) {
this.getSqlSessionTemplate().delete(this.getNamespace() + ".deleteWorkflow", id);
}
}
| FL-102 delete 시 repositoryImpl에서 최근 id를 받도록 수정 (화면 연동 부분은 없음)
| flamingo-web/src/main/java/org/exem/flamingo/web/oozie/workflow/OozieWorkflowRepositoryImpl.java | FL-102 delete 시 repositoryImpl에서 최근 id를 받도록 수정 (화면 연동 부분은 없음) | <ide><path>lamingo-web/src/main/java/org/exem/flamingo/web/oozie/workflow/OozieWorkflowRepositoryImpl.java
<ide>
<ide> @Override
<ide> public Map getRecentWorkflow() {
<del> return this.getSqlSessionTemplate().selectOne(this.getNamespace() + ".getRecentWorkflow");
<add> return this.getSqlSessionTemplate().selectOne(this.getNamespace() + ".selectRecentWorkflow");
<ide> }
<ide>
<ide> @Override
<ide> }
<ide>
<ide> @Override
<del> public void deleteWorkflow(int id) {
<add> public void deleteWorkflow(long id) {
<ide> this.getSqlSessionTemplate().delete(this.getNamespace() + ".deleteWorkflow", id);
<ide> }
<ide> } |
|
Java | apache-2.0 | 5f9c41f4a005b4d37130aab94bf91b4dcbd4118d | 0 | diorcety/intellij-community,blademainer/intellij-community,orekyuu/intellij-community,ThiagoGarciaAlves/intellij-community,pwoodworth/intellij-community,asedunov/intellij-community,diorcety/intellij-community,wreckJ/intellij-community,apixandru/intellij-community,fengbaicanhe/intellij-community,FHannes/intellij-community,ThiagoGarciaAlves/intellij-community,Distrotech/intellij-community,ibinti/intellij-community,dslomov/intellij-community,MichaelNedzelsky/intellij-community,mglukhikh/intellij-community,ftomassetti/intellij-community,jagguli/intellij-community,pwoodworth/intellij-community,ftomassetti/intellij-community,signed/intellij-community,Lekanich/intellij-community,holmes/intellij-community,fitermay/intellij-community,youdonghai/intellij-community,fengbaicanhe/intellij-community,clumsy/intellij-community,kool79/intellij-community,semonte/intellij-community,MichaelNedzelsky/intellij-community,youdonghai/intellij-community,adedayo/intellij-community,dslomov/intellij-community,allotria/intellij-community,fitermay/intellij-community,michaelgallacher/intellij-community,Distrotech/intellij-community,apixandru/intellij-community,ibinti/intellij-community,Lekanich/intellij-community,blademainer/intellij-community,vvv1559/intellij-community,MER-GROUP/intellij-community,TangHao1987/intellij-community,ol-loginov/intellij-community,akosyakov/intellij-community,ernestp/consulo,vvv1559/intellij-community,samthor/intellij-community,dslomov/intellij-community,holmes/intellij-community,fnouama/intellij-community,vladmm/intellij-community,ThiagoGarciaAlves/intellij-community,robovm/robovm-studio,robovm/robovm-studio,muntasirsyed/intellij-community,da1z/intellij-community,vvv1559/intellij-community,akosyakov/intellij-community,tmpgit/intellij-community,diorcety/intellij-community,diorcety/intellij-community,holmes/intellij-community,apixandru/intellij-community,ibinti/intellij-community,ahb0327/intellij-community,akosyakov/intellij-community,fitermay/intellij-community,suncycheng/intellij-community,holmes/intellij-community,slisson/intellij-community,MER-GROUP/intellij-community,MER-GROUP/intellij-community,adedayo/intellij-community,xfournet/intellij-community,tmpgit/intellij-community,orekyuu/intellij-community,mglukhikh/intellij-community,ThiagoGarciaAlves/intellij-community,ivan-fedorov/intellij-community,supersven/intellij-community,robovm/robovm-studio,supersven/intellij-community,ol-loginov/intellij-community,suncycheng/intellij-community,suncycheng/intellij-community,Distrotech/intellij-community,lucafavatella/intellij-community,mglukhikh/intellij-community,suncycheng/intellij-community,apixandru/intellij-community,signed/intellij-community,salguarnieri/intellij-community,xfournet/intellij-community,clumsy/intellij-community,MichaelNedzelsky/intellij-community,salguarnieri/intellij-community,retomerz/intellij-community,wreckJ/intellij-community,ivan-fedorov/intellij-community,semonte/intellij-community,MER-GROUP/intellij-community,retomerz/intellij-community,semonte/intellij-community,kdwink/intellij-community,youdonghai/intellij-community,blademainer/intellij-community,Lekanich/intellij-community,holmes/intellij-community,semonte/intellij-community,ThiagoGarciaAlves/intellij-community,MER-GROUP/intellij-community,alphafoobar/intellij-community,fitermay/intellij-community,dslomov/intellij-community,da1z/intellij-community,semonte/intellij-community,samthor/intellij-community,fnouama/intellij-community,asedunov/intellij-community,ryano144/intellij-community,ivan-fedorov/intellij-community,fengbaicanhe/intellij-community,amith01994/intellij-community,muntasirsyed/intellij-community,salguarnieri/intellij-community,Distrotech/intellij-community,fnouama/intellij-community,retomerz/intellij-community,Distrotech/intellij-community,TangHao1987/intellij-community,jagguli/intellij-community,suncycheng/intellij-community,orekyuu/intellij-community,mglukhikh/intellij-community,blademainer/intellij-community,robovm/robovm-studio,wreckJ/intellij-community,mglukhikh/intellij-community,MER-GROUP/intellij-community,signed/intellij-community,signed/intellij-community,ftomassetti/intellij-community,fitermay/intellij-community,alphafoobar/intellij-community,consulo/consulo,apixandru/intellij-community,caot/intellij-community,nicolargo/intellij-community,ThiagoGarciaAlves/intellij-community,ThiagoGarciaAlves/intellij-community,blademainer/intellij-community,consulo/consulo,ol-loginov/intellij-community,lucafavatella/intellij-community,hurricup/intellij-community,idea4bsd/idea4bsd,vladmm/intellij-community,caot/intellij-community,xfournet/intellij-community,kool79/intellij-community,alphafoobar/intellij-community,salguarnieri/intellij-community,vladmm/intellij-community,wreckJ/intellij-community,petteyg/intellij-community,youdonghai/intellij-community,jagguli/intellij-community,signed/intellij-community,supersven/intellij-community,xfournet/intellij-community,caot/intellij-community,MER-GROUP/intellij-community,Distrotech/intellij-community,izonder/intellij-community,orekyuu/intellij-community,muntasirsyed/intellij-community,mglukhikh/intellij-community,slisson/intellij-community,ernestp/consulo,samthor/intellij-community,idea4bsd/idea4bsd,gnuhub/intellij-community,samthor/intellij-community,izonder/intellij-community,MER-GROUP/intellij-community,pwoodworth/intellij-community,orekyuu/intellij-community,consulo/consulo,ibinti/intellij-community,kool79/intellij-community,michaelgallacher/intellij-community,ivan-fedorov/intellij-community,fitermay/intellij-community,petteyg/intellij-community,mglukhikh/intellij-community,muntasirsyed/intellij-community,fnouama/intellij-community,MichaelNedzelsky/intellij-community,suncycheng/intellij-community,signed/intellij-community,slisson/intellij-community,SerCeMan/intellij-community,hurricup/intellij-community,TangHao1987/intellij-community,robovm/robovm-studio,MichaelNedzelsky/intellij-community,ol-loginov/intellij-community,retomerz/intellij-community,asedunov/intellij-community,akosyakov/intellij-community,xfournet/intellij-community,fengbaicanhe/intellij-community,salguarnieri/intellij-community,youdonghai/intellij-community,amith01994/intellij-community,lucafavatella/intellij-community,fengbaicanhe/intellij-community,diorcety/intellij-community,vvv1559/intellij-community,idea4bsd/idea4bsd,asedunov/intellij-community,ryano144/intellij-community,Lekanich/intellij-community,FHannes/intellij-community,dslomov/intellij-community,dslomov/intellij-community,dslomov/intellij-community,ahb0327/intellij-community,retomerz/intellij-community,Distrotech/intellij-community,MichaelNedzelsky/intellij-community,ahb0327/intellij-community,akosyakov/intellij-community,hurricup/intellij-community,jagguli/intellij-community,allotria/intellij-community,mglukhikh/intellij-community,salguarnieri/intellij-community,ahb0327/intellij-community,youdonghai/intellij-community,dslomov/intellij-community,izonder/intellij-community,tmpgit/intellij-community,nicolargo/intellij-community,izonder/intellij-community,semonte/intellij-community,orekyuu/intellij-community,clumsy/intellij-community,MER-GROUP/intellij-community,youdonghai/intellij-community,orekyuu/intellij-community,petteyg/intellij-community,signed/intellij-community,idea4bsd/idea4bsd,da1z/intellij-community,FHannes/intellij-community,jagguli/intellij-community,vladmm/intellij-community,jagguli/intellij-community,signed/intellij-community,izonder/intellij-community,FHannes/intellij-community,slisson/intellij-community,akosyakov/intellij-community,fengbaicanhe/intellij-community,samthor/intellij-community,caot/intellij-community,blademainer/intellij-community,vvv1559/intellij-community,ivan-fedorov/intellij-community,hurricup/intellij-community,apixandru/intellij-community,retomerz/intellij-community,retomerz/intellij-community,petteyg/intellij-community,hurricup/intellij-community,SerCeMan/intellij-community,ibinti/intellij-community,alphafoobar/intellij-community,apixandru/intellij-community,fnouama/intellij-community,orekyuu/intellij-community,ahb0327/intellij-community,slisson/intellij-community,slisson/intellij-community,xfournet/intellij-community,Lekanich/intellij-community,orekyuu/intellij-community,alphafoobar/intellij-community,alphafoobar/intellij-community,kdwink/intellij-community,orekyuu/intellij-community,caot/intellij-community,kdwink/intellij-community,muntasirsyed/intellij-community,asedunov/intellij-community,ryano144/intellij-community,supersven/intellij-community,apixandru/intellij-community,diorcety/intellij-community,samthor/intellij-community,asedunov/intellij-community,dslomov/intellij-community,fitermay/intellij-community,FHannes/intellij-community,SerCeMan/intellij-community,hurricup/intellij-community,Distrotech/intellij-community,xfournet/intellij-community,hurricup/intellij-community,pwoodworth/intellij-community,caot/intellij-community,samthor/intellij-community,semonte/intellij-community,supersven/intellij-community,suncycheng/intellij-community,supersven/intellij-community,asedunov/intellij-community,asedunov/intellij-community,amith01994/intellij-community,slisson/intellij-community,xfournet/intellij-community,jagguli/intellij-community,consulo/consulo,idea4bsd/idea4bsd,fitermay/intellij-community,consulo/consulo,ivan-fedorov/intellij-community,TangHao1987/intellij-community,slisson/intellij-community,muntasirsyed/intellij-community,kool79/intellij-community,tmpgit/intellij-community,vladmm/intellij-community,xfournet/intellij-community,kool79/intellij-community,nicolargo/intellij-community,da1z/intellij-community,ol-loginov/intellij-community,adedayo/intellij-community,petteyg/intellij-community,samthor/intellij-community,retomerz/intellij-community,muntasirsyed/intellij-community,tmpgit/intellij-community,kdwink/intellij-community,gnuhub/intellij-community,ahb0327/intellij-community,ThiagoGarciaAlves/intellij-community,allotria/intellij-community,muntasirsyed/intellij-community,ahb0327/intellij-community,ryano144/intellij-community,holmes/intellij-community,slisson/intellij-community,jagguli/intellij-community,TangHao1987/intellij-community,fitermay/intellij-community,akosyakov/intellij-community,allotria/intellij-community,jagguli/intellij-community,kdwink/intellij-community,michaelgallacher/intellij-community,jagguli/intellij-community,tmpgit/intellij-community,clumsy/intellij-community,nicolargo/intellij-community,izonder/intellij-community,blademainer/intellij-community,MER-GROUP/intellij-community,asedunov/intellij-community,lucafavatella/intellij-community,samthor/intellij-community,idea4bsd/idea4bsd,SerCeMan/intellij-community,fnouama/intellij-community,amith01994/intellij-community,alphafoobar/intellij-community,nicolargo/intellij-community,caot/intellij-community,muntasirsyed/intellij-community,ryano144/intellij-community,izonder/intellij-community,ahb0327/intellij-community,asedunov/intellij-community,allotria/intellij-community,ibinti/intellij-community,apixandru/intellij-community,michaelgallacher/intellij-community,salguarnieri/intellij-community,idea4bsd/idea4bsd,caot/intellij-community,blademainer/intellij-community,blademainer/intellij-community,youdonghai/intellij-community,ibinti/intellij-community,kdwink/intellij-community,ivan-fedorov/intellij-community,MER-GROUP/intellij-community,slisson/intellij-community,allotria/intellij-community,ryano144/intellij-community,Lekanich/intellij-community,michaelgallacher/intellij-community,semonte/intellij-community,michaelgallacher/intellij-community,nicolargo/intellij-community,fitermay/intellij-community,ryano144/intellij-community,retomerz/intellij-community,gnuhub/intellij-community,suncycheng/intellij-community,tmpgit/intellij-community,gnuhub/intellij-community,izonder/intellij-community,pwoodworth/intellij-community,ftomassetti/intellij-community,vvv1559/intellij-community,diorcety/intellij-community,orekyuu/intellij-community,lucafavatella/intellij-community,alphafoobar/intellij-community,dslomov/intellij-community,adedayo/intellij-community,allotria/intellij-community,diorcety/intellij-community,fitermay/intellij-community,ibinti/intellij-community,amith01994/intellij-community,allotria/intellij-community,kool79/intellij-community,michaelgallacher/intellij-community,pwoodworth/intellij-community,vvv1559/intellij-community,alphafoobar/intellij-community,mglukhikh/intellij-community,nicolargo/intellij-community,caot/intellij-community,mglukhikh/intellij-community,xfournet/intellij-community,apixandru/intellij-community,MichaelNedzelsky/intellij-community,izonder/intellij-community,ernestp/consulo,retomerz/intellij-community,ftomassetti/intellij-community,akosyakov/intellij-community,SerCeMan/intellij-community,ftomassetti/intellij-community,gnuhub/intellij-community,tmpgit/intellij-community,da1z/intellij-community,vvv1559/intellij-community,xfournet/intellij-community,Lekanich/intellij-community,muntasirsyed/intellij-community,salguarnieri/intellij-community,suncycheng/intellij-community,SerCeMan/intellij-community,lucafavatella/intellij-community,ol-loginov/intellij-community,salguarnieri/intellij-community,ibinti/intellij-community,wreckJ/intellij-community,petteyg/intellij-community,pwoodworth/intellij-community,akosyakov/intellij-community,adedayo/intellij-community,mglukhikh/intellij-community,suncycheng/intellij-community,gnuhub/intellij-community,SerCeMan/intellij-community,gnuhub/intellij-community,apixandru/intellij-community,idea4bsd/idea4bsd,ernestp/consulo,fnouama/intellij-community,adedayo/intellij-community,TangHao1987/intellij-community,ftomassetti/intellij-community,supersven/intellij-community,ol-loginov/intellij-community,petteyg/intellij-community,samthor/intellij-community,fnouama/intellij-community,SerCeMan/intellij-community,dslomov/intellij-community,gnuhub/intellij-community,salguarnieri/intellij-community,gnuhub/intellij-community,clumsy/intellij-community,salguarnieri/intellij-community,Lekanich/intellij-community,alphafoobar/intellij-community,ryano144/intellij-community,kdwink/intellij-community,TangHao1987/intellij-community,Distrotech/intellij-community,fnouama/intellij-community,lucafavatella/intellij-community,kool79/intellij-community,holmes/intellij-community,akosyakov/intellij-community,da1z/intellij-community,ol-loginov/intellij-community,SerCeMan/intellij-community,vladmm/intellij-community,robovm/robovm-studio,mglukhikh/intellij-community,asedunov/intellij-community,idea4bsd/idea4bsd,ernestp/consulo,nicolargo/intellij-community,allotria/intellij-community,michaelgallacher/intellij-community,ThiagoGarciaAlves/intellij-community,pwoodworth/intellij-community,ivan-fedorov/intellij-community,ftomassetti/intellij-community,mglukhikh/intellij-community,amith01994/intellij-community,xfournet/intellij-community,muntasirsyed/intellij-community,alphafoobar/intellij-community,vladmm/intellij-community,signed/intellij-community,amith01994/intellij-community,caot/intellij-community,amith01994/intellij-community,idea4bsd/idea4bsd,clumsy/intellij-community,signed/intellij-community,ol-loginov/intellij-community,retomerz/intellij-community,SerCeMan/intellij-community,kdwink/intellij-community,nicolargo/intellij-community,fnouama/intellij-community,holmes/intellij-community,ThiagoGarciaAlves/intellij-community,MichaelNedzelsky/intellij-community,ftomassetti/intellij-community,kool79/intellij-community,amith01994/intellij-community,clumsy/intellij-community,michaelgallacher/intellij-community,MichaelNedzelsky/intellij-community,alphafoobar/intellij-community,da1z/intellij-community,vladmm/intellij-community,fengbaicanhe/intellij-community,petteyg/intellij-community,fitermay/intellij-community,blademainer/intellij-community,hurricup/intellij-community,vvv1559/intellij-community,ivan-fedorov/intellij-community,kool79/intellij-community,ahb0327/intellij-community,clumsy/intellij-community,petteyg/intellij-community,Lekanich/intellij-community,semonte/intellij-community,TangHao1987/intellij-community,pwoodworth/intellij-community,holmes/intellij-community,allotria/intellij-community,diorcety/intellij-community,supersven/intellij-community,MichaelNedzelsky/intellij-community,supersven/intellij-community,retomerz/intellij-community,petteyg/intellij-community,samthor/intellij-community,hurricup/intellij-community,blademainer/intellij-community,idea4bsd/idea4bsd,michaelgallacher/intellij-community,ivan-fedorov/intellij-community,vladmm/intellij-community,tmpgit/intellij-community,apixandru/intellij-community,orekyuu/intellij-community,vvv1559/intellij-community,da1z/intellij-community,adedayo/intellij-community,youdonghai/intellij-community,lucafavatella/intellij-community,youdonghai/intellij-community,robovm/robovm-studio,ftomassetti/intellij-community,caot/intellij-community,fengbaicanhe/intellij-community,semonte/intellij-community,robovm/robovm-studio,ThiagoGarciaAlves/intellij-community,Distrotech/intellij-community,pwoodworth/intellij-community,ibinti/intellij-community,Lekanich/intellij-community,suncycheng/intellij-community,kdwink/intellij-community,FHannes/intellij-community,blademainer/intellij-community,robovm/robovm-studio,supersven/intellij-community,wreckJ/intellij-community,youdonghai/intellij-community,FHannes/intellij-community,ol-loginov/intellij-community,SerCeMan/intellij-community,Lekanich/intellij-community,ivan-fedorov/intellij-community,fitermay/intellij-community,adedayo/intellij-community,allotria/intellij-community,ahb0327/intellij-community,tmpgit/intellij-community,hurricup/intellij-community,da1z/intellij-community,vvv1559/intellij-community,ernestp/consulo,wreckJ/intellij-community,apixandru/intellij-community,tmpgit/intellij-community,diorcety/intellij-community,allotria/intellij-community,youdonghai/intellij-community,FHannes/intellij-community,jagguli/intellij-community,ryano144/intellij-community,izonder/intellij-community,muntasirsyed/intellij-community,izonder/intellij-community,amith01994/intellij-community,ftomassetti/intellij-community,caot/intellij-community,amith01994/intellij-community,ol-loginov/intellij-community,da1z/intellij-community,lucafavatella/intellij-community,tmpgit/intellij-community,gnuhub/intellij-community,MER-GROUP/intellij-community,ahb0327/intellij-community,lucafavatella/intellij-community,ibinti/intellij-community,clumsy/intellij-community,kool79/intellij-community,FHannes/intellij-community,robovm/robovm-studio,lucafavatella/intellij-community,semonte/intellij-community,TangHao1987/intellij-community,youdonghai/intellij-community,holmes/intellij-community,fnouama/intellij-community,vvv1559/intellij-community,amith01994/intellij-community,wreckJ/intellij-community,semonte/intellij-community,kool79/intellij-community,gnuhub/intellij-community,ftomassetti/intellij-community,akosyakov/intellij-community,kdwink/intellij-community,vladmm/intellij-community,robovm/robovm-studio,slisson/intellij-community,retomerz/intellij-community,vvv1559/intellij-community,adedayo/intellij-community,idea4bsd/idea4bsd,nicolargo/intellij-community,signed/intellij-community,diorcety/intellij-community,hurricup/intellij-community,consulo/consulo,diorcety/intellij-community,ivan-fedorov/intellij-community,TangHao1987/intellij-community,petteyg/intellij-community,vladmm/intellij-community,adedayo/intellij-community,Lekanich/intellij-community,asedunov/intellij-community,da1z/intellij-community,ibinti/intellij-community,izonder/intellij-community,kool79/intellij-community,robovm/robovm-studio,ryano144/intellij-community,petteyg/intellij-community,pwoodworth/intellij-community,kdwink/intellij-community,Distrotech/intellij-community,fengbaicanhe/intellij-community,ol-loginov/intellij-community,allotria/intellij-community,adedayo/intellij-community,FHannes/intellij-community,akosyakov/intellij-community,da1z/intellij-community,idea4bsd/idea4bsd,nicolargo/intellij-community,samthor/intellij-community,wreckJ/intellij-community,ryano144/intellij-community,wreckJ/intellij-community,salguarnieri/intellij-community,MichaelNedzelsky/intellij-community,MichaelNedzelsky/intellij-community,pwoodworth/intellij-community,clumsy/intellij-community,semonte/intellij-community,suncycheng/intellij-community,TangHao1987/intellij-community,fengbaicanhe/intellij-community,michaelgallacher/intellij-community,FHannes/intellij-community,lucafavatella/intellij-community,wreckJ/intellij-community,da1z/intellij-community,gnuhub/intellij-community,holmes/intellij-community,hurricup/intellij-community,adedayo/intellij-community,ibinti/intellij-community,lucafavatella/intellij-community,FHannes/intellij-community,Distrotech/intellij-community,signed/intellij-community,michaelgallacher/intellij-community,fengbaicanhe/intellij-community,clumsy/intellij-community,ThiagoGarciaAlves/intellij-community,vladmm/intellij-community,wreckJ/intellij-community,TangHao1987/intellij-community,supersven/intellij-community,kdwink/intellij-community,hurricup/intellij-community,slisson/intellij-community,ahb0327/intellij-community,signed/intellij-community,ryano144/intellij-community,xfournet/intellij-community,clumsy/intellij-community,FHannes/intellij-community,fengbaicanhe/intellij-community,asedunov/intellij-community,supersven/intellij-community,dslomov/intellij-community,fnouama/intellij-community,apixandru/intellij-community,jagguli/intellij-community,SerCeMan/intellij-community,nicolargo/intellij-community,holmes/intellij-community | /*
* Copyright 2000-2009 JetBrains s.r.o.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.intellij.execution.filters;
import com.intellij.execution.impl.ConsoleViewImpl;
import com.intellij.execution.ui.ConsoleView;
import com.intellij.openapi.project.Project;
import com.intellij.psi.search.GlobalSearchScope;
import java.util.ArrayList;
/**
* @author dyoma
*/
public class TextConsoleBuilderImpl extends TextConsoleBuilder {
private final Project myProject;
private final GlobalSearchScope myScope;
private final ArrayList<Filter> myFilters = new ArrayList<Filter>();
private boolean myViewer;
public TextConsoleBuilderImpl(final Project project) {
this(project, GlobalSearchScope.allScope(project));
}
public TextConsoleBuilderImpl(final Project project, GlobalSearchScope scope) {
myProject = project;
myScope = scope;
}
@Override
public ConsoleView getConsole() {
final ConsoleView consoleView = createConsole();
for (final Filter filter : myFilters) {
consoleView.addMessageFilter(filter);
}
return consoleView;
}
protected ConsoleView createConsole() {
return new ConsoleViewImpl(myProject, myScope, myViewer, null);
}
@Override
public void addFilter(final Filter filter) {
myFilters.add(filter);
}
@Override
public void setViewer(boolean isViewer) {
myViewer = isViewer;
}
protected Project getProject() {
return myProject;
}
protected GlobalSearchScope getScope() {
return myScope;
}
protected ArrayList<Filter> getFilters() {
return myFilters;
}
protected boolean isViewer() {
return myViewer;
}
}
| platform/lang-impl/src/com/intellij/execution/filters/TextConsoleBuilderImpl.java | /*
* Copyright 2000-2009 JetBrains s.r.o.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.intellij.execution.filters;
import com.intellij.execution.impl.ConsoleViewImpl;
import com.intellij.execution.ui.ConsoleView;
import com.intellij.openapi.project.Project;
import com.intellij.psi.search.GlobalSearchScope;
import java.util.ArrayList;
/**
* @author dyoma
*/
public class TextConsoleBuilderImpl extends TextConsoleBuilder {
private final Project myProject;
private final GlobalSearchScope myScope;
private final ArrayList<Filter> myFilters = new ArrayList<Filter>();
private boolean myViewer;
public TextConsoleBuilderImpl(final Project project) {
this(project, GlobalSearchScope.allScope(project));
}
public TextConsoleBuilderImpl(final Project project, GlobalSearchScope scope) {
myProject = project;
myScope = scope;
}
public ConsoleView getConsole() {
final ConsoleView consoleView = createConsole();
for (final Filter filter : myFilters) {
consoleView.addMessageFilter(filter);
}
return consoleView;
}
protected ConsoleView createConsole() {
return new ConsoleViewImpl(myProject, myScope, myViewer, null);
}
public void addFilter(final Filter filter) {
myFilters.add(filter);
}
@Override
public void setViewer(boolean isViewer) {
myViewer = isViewer;
}
protected Project getProject() {
return myProject;
}
protected GlobalSearchScope getScope() {
return myScope;
}
protected ArrayList<Filter> getFilters() {
return myFilters;
}
protected boolean isViewer() {
return myViewer;
}
}
| Code cleanup: @Override annotation added
| platform/lang-impl/src/com/intellij/execution/filters/TextConsoleBuilderImpl.java | Code cleanup: @Override annotation added | <ide><path>latform/lang-impl/src/com/intellij/execution/filters/TextConsoleBuilderImpl.java
<ide> myScope = scope;
<ide> }
<ide>
<add> @Override
<ide> public ConsoleView getConsole() {
<ide> final ConsoleView consoleView = createConsole();
<ide> for (final Filter filter : myFilters) {
<ide> return new ConsoleViewImpl(myProject, myScope, myViewer, null);
<ide> }
<ide>
<add> @Override
<ide> public void addFilter(final Filter filter) {
<ide> myFilters.add(filter);
<ide> } |
|
Java | apache-2.0 | da4b6e41a03f5bd86ab7eba0fe766521eefe564b | 0 | ldionmarcil/OSRSHelper | package com.infonuascape.osrshelper;
import android.app.Activity;
import android.content.Context;
import android.content.Intent;
import android.os.AsyncTask;
import android.os.Bundle;
import android.view.Gravity;
import android.view.View;
import android.view.View.OnClickListener;
import android.view.Window;
import android.widget.AdapterView;
import android.widget.AdapterView.OnItemSelectedListener;
import android.widget.ArrayAdapter;
import android.widget.ImageView;
import android.widget.Spinner;
import android.widget.TableLayout;
import android.widget.TableRow;
import android.widget.TextView;
import com.infonuascape.osrshelper.tracker.TrackerTimeEnum;
import com.infonuascape.osrshelper.tracker.rt.TrackerHelper;
import com.infonuascape.osrshelper.tracker.rt.Updater;
import com.infonuascape.osrshelper.utils.Skill;
import com.infonuascape.osrshelper.utils.exceptions.PlayerNotFoundException;
import com.infonuascape.osrshelper.utils.players.PlayerSkills;
import java.text.NumberFormat;
public class RTXPTrackerActivity extends Activity implements OnItemSelectedListener, OnClickListener {
private final static String EXTRA_USERNAME = "extra_username";
private String username;
private TextView header;
private Spinner spinner;
public static void show(final Context context, final String username) {
Intent intent = new Intent(context, RTXPTrackerActivity.class);
intent.putExtra(EXTRA_USERNAME, username);
context.startActivity(intent);
}
@Override
protected void onCreate(Bundle savedInstanceState) {
super.onCreate(savedInstanceState);
requestWindowFeature(Window.FEATURE_NO_TITLE);
setContentView(R.layout.xptracker);
username = getIntent().getStringExtra(EXTRA_USERNAME);
header = (TextView) findViewById(R.id.header);
header.setText(getString(R.string.loading_tracking, username));
spinner = (Spinner) findViewById(R.id.time_spinner);
ArrayAdapter<CharSequence> adapter = ArrayAdapter.createFromResource(this, R.array.time_array,
R.layout.spinner_item);
adapter.setDropDownViewResource(android.R.layout.simple_spinner_dropdown_item);
spinner.setAdapter(adapter);
spinner.setSelection(3);
spinner.setOnItemSelectedListener(this);
findViewById(R.id.update).setOnClickListener(this);
}
private void changeHeaderText(final String text, final int visibility) {
runOnUiThread(new Runnable() {
@Override
public void run() {
findViewById(R.id.progressbar).setVisibility(visibility);
header.setText(text);
}
});
}
private class PopulateTable extends AsyncTask<String, Void, PlayerSkills> {
private TrackerTimeEnum.TrackerTime time;
private PlayerSkills trackedSkills;
private boolean isUpdating;
public PopulateTable(TrackerTimeEnum.TrackerTime time, boolean isUpdating) {
this.time = time;
this.isUpdating = isUpdating;
}
@Override
protected PlayerSkills doInBackground(String... urls) {
TrackerHelper trackerHelper = new TrackerHelper();
trackerHelper.setUserName(username);
try {
if (isUpdating) {
Updater.perform(username);
}
trackedSkills = trackerHelper.getPlayerStats(time);
} catch (PlayerNotFoundException e) {
changeHeaderText(getString(R.string.not_existing_player, username), View.GONE);
} catch (Exception uhe) {
uhe.printStackTrace();
changeHeaderText(getString(R.string.network_error), View.GONE);
}
return trackedSkills;
}
@Override
protected void onPostExecute(PlayerSkills playerSkillsCallback) {
if (trackedSkills != null) {
populateTable(trackedSkills);
}
}
}
private void populateTable(PlayerSkills trackedSkills) {
changeHeaderText(getString(R.string.showing_tracking, username), View.GONE);
if (trackedSkills.sinceWhen != null) {
((TextView) findViewById(R.id.track_since)).setText(getString(R.string.tracking_since,
trackedSkills.sinceWhen));
} else {
((TextView) findViewById(R.id.track_since)).setText(getString(R.string.tracking_starting));
}
TableLayout table = (TableLayout) findViewById(R.id.table_tracking);
table.removeAllViews();
table.addView(createHeadersRow());
table.addView(createRow(trackedSkills.overall));
table.addView(createRow(trackedSkills.attack));
table.addView(createRow(trackedSkills.defence));
table.addView(createRow(trackedSkills.strength));
table.addView(createRow(trackedSkills.hitpoints));
table.addView(createRow(trackedSkills.ranged));
table.addView(createRow(trackedSkills.prayer));
table.addView(createRow(trackedSkills.magic));
table.addView(createRow(trackedSkills.cooking));
table.addView(createRow(trackedSkills.woodcutting));
table.addView(createRow(trackedSkills.fletching));
table.addView(createRow(trackedSkills.fishing));
table.addView(createRow(trackedSkills.firemaking));
table.addView(createRow(trackedSkills.crafting));
table.addView(createRow(trackedSkills.smithing));
table.addView(createRow(trackedSkills.mining));
table.addView(createRow(trackedSkills.herblore));
table.addView(createRow(trackedSkills.agility));
table.addView(createRow(trackedSkills.thieving));
table.addView(createRow(trackedSkills.slayer));
table.addView(createRow(trackedSkills.farming));
table.addView(createRow(trackedSkills.runecraft));
table.addView(createRow(trackedSkills.hunter));
table.addView(createRow(trackedSkills.construction));
}
private TableRow createHeadersRow() {
TableRow tableRow = new TableRow(this);
TableRow.LayoutParams params = new TableRow.LayoutParams();
params.weight = 1;
params.width = 0;
params.topMargin = 10;
params.bottomMargin = 10;
params.gravity = Gravity.CENTER;
// Skill
TextView text = new TextView(this);
text.setText(getString(R.string.skill));
text.setLayoutParams(params);
text.setGravity(Gravity.CENTER);
text.setTextColor(getResources().getColor(R.color.text_normal));
tableRow.addView(text);
// Lvl
text = new TextView(this);
text.setText(getString(R.string.level));
text.setLayoutParams(params);
text.setGravity(Gravity.CENTER);
text.setTextColor(getResources().getColor(R.color.text_normal));
tableRow.addView(text);
// XP
text = new TextView(this);
text.setText(getString(R.string.xp));
text.setLayoutParams(params);
text.setGravity(Gravity.CENTER);
text.setTextColor(getResources().getColor(R.color.text_normal));
tableRow.addView(text);
// Gain
text = new TextView(this);
text.setText(getString(R.string.xp_gain));
text.setLayoutParams(params);
text.setGravity(Gravity.CENTER);
text.setTextColor(getResources().getColor(R.color.text_normal));
tableRow.addView(text);
return tableRow;
}
private TableRow createRow(Skill skillTrack) {
TableRow tableRow = new TableRow(this);
TableRow.LayoutParams params = new TableRow.LayoutParams();
params.weight = 1;
params.width = 0;
params.topMargin = 10;
params.bottomMargin = 10;
params.gravity = Gravity.CENTER;
// Skill image
ImageView image = new ImageView(this);
image.setImageResource(skillTrack.getDrawableInt());
image.setLayoutParams(params);
tableRow.addView(image);
// Lvl
TextView text = new TextView(this);
text.setText(skillTrack.getLevel() + "");
text.setLayoutParams(params);
text.setGravity(Gravity.CENTER);
text.setTextColor(getResources().getColor(R.color.text_normal));
tableRow.addView(text);
// XP
text = new TextView(this);
text.setText(NumberFormat.getInstance().format(skillTrack.getExperience()));
text.setLayoutParams(params);
text.setGravity(Gravity.CENTER);
text.setTextColor(getResources().getColor(R.color.text_normal));
tableRow.addView(text);
// Gain
text = new TextView(this);
text.setLayoutParams(params);
text.setGravity(Gravity.CENTER);
int expDiff = skillTrack.getExperienceDiff();
if (expDiff == 0) {
text.setTextColor(getResources().getColor(R.color.DarkGray));
text.setText(getString(R.string.gain_small, expDiff));
} else {
text.setTextColor(getResources().getColor(R.color.Green));
if (expDiff < 1000) {
text.setText(getString(R.string.gain_small, expDiff));
} else if (expDiff >= 1000 && expDiff < 10000) {
text.setText(getString(R.string.gain_medium, expDiff / 1000.0f));
} else {
text.setText(getString(R.string.gain, expDiff / 1000));
}
}
tableRow.addView(text);
return tableRow;
}
private void createAsyncTaskToPopulate(String selectedTime, boolean isUpdating) {
TrackerTimeEnum.TrackerTime time = null;
if (selectedTime.equals("Hour")) {
time = TrackerTimeEnum.TrackerTime.Hour;
} else if (selectedTime.equals("Day")) {
time = TrackerTimeEnum.TrackerTime.Day;
} else if (selectedTime.equals("Week")) {
time = TrackerTimeEnum.TrackerTime.Week;
} else if (selectedTime.equals("Month")) {
time = TrackerTimeEnum.TrackerTime.Month;
} else if (selectedTime.equals("Year")) {
time = TrackerTimeEnum.TrackerTime.Year;
}
if (time != null) {
((TableLayout) findViewById(R.id.table_tracking)).removeAllViews();
((TextView) findViewById(R.id.track_since)).setText("");
changeHeaderText(getString(R.string.loading_tracking, username), View.VISIBLE);
new PopulateTable(time, isUpdating).execute();
}
}
@Override
public void onItemSelected(AdapterView<?> parent, View view, int position, long id) {
if(spinner.getSelectedItem() instanceof String) {
createAsyncTaskToPopulate((String) spinner.getSelectedItem(), false);
}
}
@Override
public void onNothingSelected(AdapterView<?> arg0) {
}
@Override
public void onClick(View v) {
if (v.getId() == R.id.update) {
createAsyncTaskToPopulate((String) spinner.getSelectedItem(), true);
}
}
}
| app/src/main/java/com/infonuascape/osrshelper/RTXPTrackerActivity.java | package com.infonuascape.osrshelper;
import android.app.Activity;
import android.content.Context;
import android.content.Intent;
import android.os.AsyncTask;
import android.os.Bundle;
import android.view.Gravity;
import android.view.View;
import android.view.View.OnClickListener;
import android.view.Window;
import android.widget.AdapterView;
import android.widget.AdapterView.OnItemSelectedListener;
import android.widget.ArrayAdapter;
import android.widget.ImageView;
import android.widget.Spinner;
import android.widget.TableLayout;
import android.widget.TableRow;
import android.widget.TextView;
import com.infonuascape.osrshelper.hiscore.HiscoreHelper;
import com.infonuascape.osrshelper.tracker.TrackerTimeEnum;
import com.infonuascape.osrshelper.tracker.rt.TrackerHelper;
import com.infonuascape.osrshelper.tracker.rt.Updater;
import com.infonuascape.osrshelper.utils.Skill;
import com.infonuascape.osrshelper.utils.exceptions.PlayerNotFoundException;
import com.infonuascape.osrshelper.utils.players.PlayerSkills;
import java.text.NumberFormat;
public class RTXPTrackerActivity extends Activity implements OnItemSelectedListener, OnClickListener {
private final static String EXTRA_USERNAME = "extra_username";
private String username;
private TextView header;
private Spinner spinner;
public static void show(final Context context, final String username) {
Intent intent = new Intent(context, RTXPTrackerActivity.class);
intent.putExtra(EXTRA_USERNAME, username);
context.startActivity(intent);
}
@Override
protected void onCreate(Bundle savedInstanceState) {
super.onCreate(savedInstanceState);
requestWindowFeature(Window.FEATURE_NO_TITLE);
setContentView(R.layout.xptracker);
username = getIntent().getStringExtra(EXTRA_USERNAME);
header = (TextView) findViewById(R.id.header);
header.setText(getString(R.string.loading_tracking, username));
spinner = (Spinner) findViewById(R.id.time_spinner);
ArrayAdapter<CharSequence> adapter = ArrayAdapter.createFromResource(this, R.array.time_array,
R.layout.spinner_item);
adapter.setDropDownViewResource(android.R.layout.simple_spinner_dropdown_item);
spinner.setAdapter(adapter);
spinner.setSelection(3);
spinner.setOnItemSelectedListener(this);
findViewById(R.id.update).setOnClickListener(this);
}
private void changeHeaderText(final String text, final int visibility) {
runOnUiThread(new Runnable() {
@Override
public void run() {
findViewById(R.id.progressbar).setVisibility(visibility);
header.setText(text);
}
});
}
private class PopulateTable extends AsyncTask<String, Void, PlayerSkills> {
private TrackerTimeEnum.TrackerTime time;
private PlayerSkills hiscores;
private PlayerSkills trackedSkills;
private boolean isUpdating;
public PopulateTable(TrackerTimeEnum.TrackerTime time, boolean isUpdating) {
this.time = time;
this.isUpdating = isUpdating;
}
@Override
protected PlayerSkills doInBackground(String... urls) {
TrackerHelper trackerHelper = new TrackerHelper();
trackerHelper.setUserName(username);
HiscoreHelper hiscoreHelper = new HiscoreHelper();
hiscoreHelper.setUserName(username);
try {
if (isUpdating) {
Updater.perform(username);
}
hiscores = hiscoreHelper.getPlayerStats();
trackedSkills = trackerHelper.getPlayerStats(time);
} catch (PlayerNotFoundException e) {
changeHeaderText(getString(R.string.not_existing_player, username), View.GONE);
} catch (Exception uhe) {
uhe.printStackTrace();
changeHeaderText(getString(R.string.network_error), View.GONE);
}
return trackedSkills;
}
@Override
protected void onPostExecute(PlayerSkills playerSkillsCallback) {
if (trackedSkills != null && hiscores != null) {
populateTable(hiscores, trackedSkills);
}
}
}
private void populateTable(PlayerSkills hiscores, PlayerSkills trackedSkills) {
changeHeaderText(getString(R.string.showing_tracking, username), View.GONE);
if (trackedSkills.sinceWhen != null) {
((TextView) findViewById(R.id.track_since)).setText(getString(R.string.tracking_since,
trackedSkills.sinceWhen));
} else {
((TextView) findViewById(R.id.track_since)).setText(getString(R.string.tracking_starting));
}
TableLayout table = (TableLayout) findViewById(R.id.table_tracking);
table.removeAllViews();
table.addView(createHeadersRow());
table.addView(createRow(hiscores.overall, trackedSkills.overall));
table.addView(createRow(hiscores.attack, trackedSkills.attack));
table.addView(createRow(hiscores.defence, trackedSkills.defence));
table.addView(createRow(hiscores.strength, trackedSkills.strength));
table.addView(createRow(hiscores.hitpoints, trackedSkills.hitpoints));
table.addView(createRow(hiscores.ranged, trackedSkills.ranged));
table.addView(createRow(hiscores.prayer, trackedSkills.prayer));
table.addView(createRow(hiscores.magic, trackedSkills.magic));
table.addView(createRow(hiscores.cooking, trackedSkills.cooking));
table.addView(createRow(hiscores.woodcutting, trackedSkills.woodcutting));
table.addView(createRow(hiscores.fletching, trackedSkills.fletching));
table.addView(createRow(hiscores.fishing, trackedSkills.fishing));
table.addView(createRow(hiscores.firemaking, trackedSkills.firemaking));
table.addView(createRow(hiscores.crafting, trackedSkills.crafting));
table.addView(createRow(hiscores.smithing, trackedSkills.smithing));
table.addView(createRow(hiscores.mining, trackedSkills.mining));
table.addView(createRow(hiscores.herblore, trackedSkills.herblore));
table.addView(createRow(hiscores.agility, trackedSkills.agility));
table.addView(createRow(hiscores.thieving, trackedSkills.thieving));
table.addView(createRow(hiscores.slayer, trackedSkills.slayer));
table.addView(createRow(hiscores.farming, trackedSkills.farming));
table.addView(createRow(hiscores.runecraft, trackedSkills.runecraft));
table.addView(createRow(hiscores.hunter, trackedSkills.hunter));
table.addView(createRow(hiscores.construction, trackedSkills.construction));
}
private TableRow createHeadersRow() {
TableRow tableRow = new TableRow(this);
TableRow.LayoutParams params = new TableRow.LayoutParams();
params.weight = 1;
params.width = 0;
params.topMargin = 10;
params.bottomMargin = 10;
params.gravity = Gravity.CENTER;
// Skill
TextView text = new TextView(this);
text.setText(getString(R.string.skill));
text.setLayoutParams(params);
text.setGravity(Gravity.CENTER);
text.setTextColor(getResources().getColor(R.color.text_normal));
tableRow.addView(text);
// Lvl
text = new TextView(this);
text.setText(getString(R.string.level));
text.setLayoutParams(params);
text.setGravity(Gravity.CENTER);
text.setTextColor(getResources().getColor(R.color.text_normal));
tableRow.addView(text);
// XP
text = new TextView(this);
text.setText(getString(R.string.xp));
text.setLayoutParams(params);
text.setGravity(Gravity.CENTER);
text.setTextColor(getResources().getColor(R.color.text_normal));
tableRow.addView(text);
// Gain
text = new TextView(this);
text.setText(getString(R.string.xp_gain));
text.setLayoutParams(params);
text.setGravity(Gravity.CENTER);
text.setTextColor(getResources().getColor(R.color.text_normal));
tableRow.addView(text);
return tableRow;
}
private TableRow createRow(Skill skillHiscore, Skill skillTrack) {
TableRow tableRow = new TableRow(this);
TableRow.LayoutParams params = new TableRow.LayoutParams();
params.weight = 1;
params.width = 0;
params.topMargin = 10;
params.bottomMargin = 10;
params.gravity = Gravity.CENTER;
// Skill image
ImageView image = new ImageView(this);
image.setImageResource(skillHiscore.getDrawableInt());
image.setLayoutParams(params);
tableRow.addView(image);
// Lvl
TextView text = new TextView(this);
text.setText(skillHiscore.getLevel() + "");
text.setLayoutParams(params);
text.setGravity(Gravity.CENTER);
text.setTextColor(getResources().getColor(R.color.text_normal));
tableRow.addView(text);
// XP
text = new TextView(this);
text.setText(NumberFormat.getInstance().format(skillHiscore.getExperience()));
text.setLayoutParams(params);
text.setGravity(Gravity.CENTER);
text.setTextColor(getResources().getColor(R.color.text_normal));
tableRow.addView(text);
// Gain
text = new TextView(this);
text.setLayoutParams(params);
text.setGravity(Gravity.CENTER);
if (skillTrack.getExperience() == 0) {
text.setTextColor(getResources().getColor(R.color.DarkGray));
text.setText(getString(R.string.gain_small, skillTrack.getExperience()));
} else {
text.setTextColor(getResources().getColor(R.color.Green));
if (skillTrack.getExperience() < 1000) {
text.setText(getString(R.string.gain_small, skillTrack.getExperience()));
} else if (skillTrack.getExperience() >= 1000 && skillTrack.getExperience() < 10000) {
text.setText(getString(R.string.gain_medium, skillTrack.getExperience() / 1000.0f));
} else {
text.setText(getString(R.string.gain, skillTrack.getExperience() / 1000));
}
}
tableRow.addView(text);
return tableRow;
}
private void createAsyncTaskToPopulate(String selectedTime, boolean isUpdating) {
TrackerTimeEnum.TrackerTime time = null;
if (selectedTime.equals("Hour")) {
time = TrackerTimeEnum.TrackerTime.Hour;
} else if (selectedTime.equals("Day")) {
time = TrackerTimeEnum.TrackerTime.Day;
} else if (selectedTime.equals("Week")) {
time = TrackerTimeEnum.TrackerTime.Week;
} else if (selectedTime.equals("Month")) {
time = TrackerTimeEnum.TrackerTime.Month;
} else if (selectedTime.equals("Year")) {
time = TrackerTimeEnum.TrackerTime.Year;
}
if (time != null) {
((TableLayout) findViewById(R.id.table_tracking)).removeAllViews();
((TextView) findViewById(R.id.track_since)).setText("");
changeHeaderText(getString(R.string.loading_tracking, username), View.VISIBLE);
new PopulateTable(time, isUpdating).execute();
}
}
@Override
public void onItemSelected(AdapterView<?> parent, View view, int position, long id) {
if(spinner.getSelectedItem() instanceof String) {
createAsyncTaskToPopulate((String) spinner.getSelectedItem(), false);
}
}
@Override
public void onNothingSelected(AdapterView<?> arg0) {
}
@Override
public void onClick(View v) {
if (v.getId() == R.id.update) {
createAsyncTaskToPopulate((String) spinner.getSelectedItem(), true);
}
}
}
| Remove mentions of hiscores from RT tracker
| app/src/main/java/com/infonuascape/osrshelper/RTXPTrackerActivity.java | Remove mentions of hiscores from RT tracker | <ide><path>pp/src/main/java/com/infonuascape/osrshelper/RTXPTrackerActivity.java
<ide> import android.widget.TableRow;
<ide> import android.widget.TextView;
<ide>
<del>import com.infonuascape.osrshelper.hiscore.HiscoreHelper;
<ide> import com.infonuascape.osrshelper.tracker.TrackerTimeEnum;
<ide> import com.infonuascape.osrshelper.tracker.rt.TrackerHelper;
<ide> import com.infonuascape.osrshelper.tracker.rt.Updater;
<ide>
<ide> private class PopulateTable extends AsyncTask<String, Void, PlayerSkills> {
<ide> private TrackerTimeEnum.TrackerTime time;
<del> private PlayerSkills hiscores;
<ide> private PlayerSkills trackedSkills;
<ide> private boolean isUpdating;
<ide>
<ide> protected PlayerSkills doInBackground(String... urls) {
<ide> TrackerHelper trackerHelper = new TrackerHelper();
<ide> trackerHelper.setUserName(username);
<del> HiscoreHelper hiscoreHelper = new HiscoreHelper();
<del> hiscoreHelper.setUserName(username);
<ide>
<ide> try {
<ide> if (isUpdating) {
<ide> Updater.perform(username);
<ide> }
<del> hiscores = hiscoreHelper.getPlayerStats();
<ide> trackedSkills = trackerHelper.getPlayerStats(time);
<ide> } catch (PlayerNotFoundException e) {
<ide> changeHeaderText(getString(R.string.not_existing_player, username), View.GONE);
<ide>
<ide> @Override
<ide> protected void onPostExecute(PlayerSkills playerSkillsCallback) {
<del> if (trackedSkills != null && hiscores != null) {
<del> populateTable(hiscores, trackedSkills);
<del> }
<del> }
<del> }
<del>
<del> private void populateTable(PlayerSkills hiscores, PlayerSkills trackedSkills) {
<add> if (trackedSkills != null) {
<add> populateTable(trackedSkills);
<add> }
<add> }
<add> }
<add>
<add> private void populateTable(PlayerSkills trackedSkills) {
<ide> changeHeaderText(getString(R.string.showing_tracking, username), View.GONE);
<ide> if (trackedSkills.sinceWhen != null) {
<ide> ((TextView) findViewById(R.id.track_since)).setText(getString(R.string.tracking_since,
<ide> TableLayout table = (TableLayout) findViewById(R.id.table_tracking);
<ide> table.removeAllViews();
<ide> table.addView(createHeadersRow());
<del> table.addView(createRow(hiscores.overall, trackedSkills.overall));
<del> table.addView(createRow(hiscores.attack, trackedSkills.attack));
<del> table.addView(createRow(hiscores.defence, trackedSkills.defence));
<del> table.addView(createRow(hiscores.strength, trackedSkills.strength));
<del> table.addView(createRow(hiscores.hitpoints, trackedSkills.hitpoints));
<del> table.addView(createRow(hiscores.ranged, trackedSkills.ranged));
<del> table.addView(createRow(hiscores.prayer, trackedSkills.prayer));
<del> table.addView(createRow(hiscores.magic, trackedSkills.magic));
<del> table.addView(createRow(hiscores.cooking, trackedSkills.cooking));
<del> table.addView(createRow(hiscores.woodcutting, trackedSkills.woodcutting));
<del> table.addView(createRow(hiscores.fletching, trackedSkills.fletching));
<del> table.addView(createRow(hiscores.fishing, trackedSkills.fishing));
<del> table.addView(createRow(hiscores.firemaking, trackedSkills.firemaking));
<del> table.addView(createRow(hiscores.crafting, trackedSkills.crafting));
<del> table.addView(createRow(hiscores.smithing, trackedSkills.smithing));
<del> table.addView(createRow(hiscores.mining, trackedSkills.mining));
<del> table.addView(createRow(hiscores.herblore, trackedSkills.herblore));
<del> table.addView(createRow(hiscores.agility, trackedSkills.agility));
<del> table.addView(createRow(hiscores.thieving, trackedSkills.thieving));
<del> table.addView(createRow(hiscores.slayer, trackedSkills.slayer));
<del> table.addView(createRow(hiscores.farming, trackedSkills.farming));
<del> table.addView(createRow(hiscores.runecraft, trackedSkills.runecraft));
<del> table.addView(createRow(hiscores.hunter, trackedSkills.hunter));
<del> table.addView(createRow(hiscores.construction, trackedSkills.construction));
<add> table.addView(createRow(trackedSkills.overall));
<add> table.addView(createRow(trackedSkills.attack));
<add> table.addView(createRow(trackedSkills.defence));
<add> table.addView(createRow(trackedSkills.strength));
<add> table.addView(createRow(trackedSkills.hitpoints));
<add> table.addView(createRow(trackedSkills.ranged));
<add> table.addView(createRow(trackedSkills.prayer));
<add> table.addView(createRow(trackedSkills.magic));
<add> table.addView(createRow(trackedSkills.cooking));
<add> table.addView(createRow(trackedSkills.woodcutting));
<add> table.addView(createRow(trackedSkills.fletching));
<add> table.addView(createRow(trackedSkills.fishing));
<add> table.addView(createRow(trackedSkills.firemaking));
<add> table.addView(createRow(trackedSkills.crafting));
<add> table.addView(createRow(trackedSkills.smithing));
<add> table.addView(createRow(trackedSkills.mining));
<add> table.addView(createRow(trackedSkills.herblore));
<add> table.addView(createRow(trackedSkills.agility));
<add> table.addView(createRow(trackedSkills.thieving));
<add> table.addView(createRow(trackedSkills.slayer));
<add> table.addView(createRow(trackedSkills.farming));
<add> table.addView(createRow(trackedSkills.runecraft));
<add> table.addView(createRow(trackedSkills.hunter));
<add> table.addView(createRow(trackedSkills.construction));
<ide> }
<ide>
<ide> private TableRow createHeadersRow() {
<ide> return tableRow;
<ide> }
<ide>
<del> private TableRow createRow(Skill skillHiscore, Skill skillTrack) {
<add> private TableRow createRow(Skill skillTrack) {
<ide> TableRow tableRow = new TableRow(this);
<ide> TableRow.LayoutParams params = new TableRow.LayoutParams();
<ide> params.weight = 1;
<ide>
<ide> // Skill image
<ide> ImageView image = new ImageView(this);
<del> image.setImageResource(skillHiscore.getDrawableInt());
<add> image.setImageResource(skillTrack.getDrawableInt());
<ide> image.setLayoutParams(params);
<ide> tableRow.addView(image);
<ide>
<ide> // Lvl
<ide> TextView text = new TextView(this);
<del> text.setText(skillHiscore.getLevel() + "");
<add> text.setText(skillTrack.getLevel() + "");
<ide> text.setLayoutParams(params);
<ide> text.setGravity(Gravity.CENTER);
<ide> text.setTextColor(getResources().getColor(R.color.text_normal));
<ide>
<ide> // XP
<ide> text = new TextView(this);
<del> text.setText(NumberFormat.getInstance().format(skillHiscore.getExperience()));
<add> text.setText(NumberFormat.getInstance().format(skillTrack.getExperience()));
<ide> text.setLayoutParams(params);
<ide> text.setGravity(Gravity.CENTER);
<ide> text.setTextColor(getResources().getColor(R.color.text_normal));
<ide> text = new TextView(this);
<ide> text.setLayoutParams(params);
<ide> text.setGravity(Gravity.CENTER);
<del>
<del> if (skillTrack.getExperience() == 0) {
<add> int expDiff = skillTrack.getExperienceDiff();
<add>
<add> if (expDiff == 0) {
<ide> text.setTextColor(getResources().getColor(R.color.DarkGray));
<del> text.setText(getString(R.string.gain_small, skillTrack.getExperience()));
<add> text.setText(getString(R.string.gain_small, expDiff));
<ide> } else {
<ide> text.setTextColor(getResources().getColor(R.color.Green));
<del> if (skillTrack.getExperience() < 1000) {
<del> text.setText(getString(R.string.gain_small, skillTrack.getExperience()));
<del> } else if (skillTrack.getExperience() >= 1000 && skillTrack.getExperience() < 10000) {
<del> text.setText(getString(R.string.gain_medium, skillTrack.getExperience() / 1000.0f));
<add> if (expDiff < 1000) {
<add> text.setText(getString(R.string.gain_small, expDiff));
<add> } else if (expDiff >= 1000 && expDiff < 10000) {
<add> text.setText(getString(R.string.gain_medium, expDiff / 1000.0f));
<ide> } else {
<del> text.setText(getString(R.string.gain, skillTrack.getExperience() / 1000));
<add> text.setText(getString(R.string.gain, expDiff / 1000));
<ide> }
<ide> }
<ide> tableRow.addView(text); |
|
Java | apache-2.0 | error: pathspec 'src/me/dengfengdecao/list/MergeSortedLists.java' did not match any file(s) known to git
| 792f1d1cacbb868f70f6bfb03b5358e19069e665 | 1 | dengfengdecao/InterviewQuestions | package me.dengfengdecao.list;
import org.junit.Test;
/**
* 合并两个递增的排序链表,使合并后的链表也是升序的.
* 递归比较两个递增链表的头节点,将小的放到合并链表的后面.
*
* @author linyu
*
*/
public class MergeSortedLists {
Node merge(Node head1, Node head2) {
if (head1 == null)
return head2;
else if (head2 == null)
return head1;
Node mergeHead = null;
if (head1.value < head2.value) {
mergeHead = head1;
mergeHead.next = merge(head1.next, head2);
} else {
mergeHead = head2;
mergeHead.next = merge(head1, head2.next);
}
return mergeHead;
}
// 构建单链表
// n从1开始,返回头节点
Node buildList(Node[] nodeArray) {
if (nodeArray == null) return null;
Node head = nodeArray[0];
Node cur = null;
for (int i=1; i < nodeArray.length; i++) {
Node tmp = nodeArray[i];
if (i == 1) {
head.setNext(tmp);
} else {
cur.setNext(tmp);
}
cur = tmp; // 缓存当前节点
}
return head;
}
class Node {
private int value;
private Node next;
public Node(int value) {
super();
this.value = value;
}
public int getValue() {
return value;
}
public void setValue(int value) {
this.value = value;
}
public Node getNext() {
return next;
}
public void setNext(Node next) {
this.next = next;
}
}
// list1: 1->3->5
// list2: 2->4->6
@Test
public void test1() throws Exception {
Node[] node1 = {new Node(1), new Node(3), new Node(5)};
Node[] node2 = {new Node(2), new Node(4), new Node(6)};
Node head1 = buildList(node1);
Node head2 = buildList(node2);
Node mergeHead = merge(head1, head2);
System.out.println("value of merge head:");
while (mergeHead != null) {
System.out.print(mergeHead.value + ",");
mergeHead = mergeHead.getNext();
}
}
// list1: 1->3->5
// list2: 1->3->5
@Test
public void test2() throws Exception {
Node[] node1 = {new Node(1), new Node(3), new Node(5)};
Node[] node2 = {new Node(1), new Node(3), new Node(5)};
Node head1 = buildList(node1);
Node head2 = buildList(node2);
Node mergeHead = merge(head1, head2);
System.out.println("value of merge head:");
while (mergeHead != null) {
System.out.print(mergeHead.value + ",");
mergeHead = mergeHead.getNext();
}
}
// list1: 1
// list2: 2
@Test
public void test3() throws Exception {
Node[] node1 = {new Node(1)};
Node[] node2 = {new Node(2)};
Node head1 = buildList(node1);
Node head2 = buildList(node2);
Node mergeHead = merge(head1, head2);
System.out.println("value of merge head:");
while (mergeHead != null) {
System.out.print(mergeHead.value + ",");
mergeHead = mergeHead.getNext();
}
}
// 一个链表为空
// list1: 1->3->5
// list2: null
@Test
public void test4() throws Exception {
Node[] node1 = {new Node(1), new Node(3), new Node(5)};
Node[] node2 = null;
Node head1 = buildList(node1);
Node head2 = buildList(node2);
Node mergeHead = merge(head1, head2);
System.out.println("value of merge head:");
while (mergeHead != null) {
System.out.print(mergeHead.value + ",");
mergeHead = mergeHead.getNext();
}
}
// 两个链表都为空链表
// list1: 空链表
// list2: 空链表
@Test
public void test5() throws Exception {
Node head1 = buildList(null);
Node head2 = buildList(null);
Node mergeHead = merge(head1, head2);
System.out.println("value of merge head:");
while (mergeHead != null) {
System.out.print(mergeHead.value + ",");
mergeHead = mergeHead.getNext();
}
}
}
| src/me/dengfengdecao/list/MergeSortedLists.java | 合并两个递增的排序链表,使合并后的链表也是升序的. | src/me/dengfengdecao/list/MergeSortedLists.java | 合并两个递增的排序链表,使合并后的链表也是升序的. | <ide><path>rc/me/dengfengdecao/list/MergeSortedLists.java
<add>package me.dengfengdecao.list;
<add>
<add>import org.junit.Test;
<add>
<add>/**
<add> * 合并两个递增的排序链表,使合并后的链表也是升序的.
<add> * 递归比较两个递增链表的头节点,将小的放到合并链表的后面.
<add> *
<add> * @author linyu
<add> *
<add> */
<add>public class MergeSortedLists {
<add>
<add> Node merge(Node head1, Node head2) {
<add> if (head1 == null)
<add> return head2;
<add> else if (head2 == null)
<add> return head1;
<add>
<add> Node mergeHead = null;
<add> if (head1.value < head2.value) {
<add> mergeHead = head1;
<add> mergeHead.next = merge(head1.next, head2);
<add> } else {
<add> mergeHead = head2;
<add> mergeHead.next = merge(head1, head2.next);
<add> }
<add>
<add> return mergeHead;
<add> }
<add>
<add> // 构建单链表
<add> // n从1开始,返回头节点
<add> Node buildList(Node[] nodeArray) {
<add> if (nodeArray == null) return null;
<add>
<add> Node head = nodeArray[0];
<add> Node cur = null;
<add> for (int i=1; i < nodeArray.length; i++) {
<add> Node tmp = nodeArray[i];
<add> if (i == 1) {
<add> head.setNext(tmp);
<add> } else {
<add> cur.setNext(tmp);
<add> }
<add> cur = tmp; // 缓存当前节点
<add> }
<add>
<add> return head;
<add> }
<add>
<add> class Node {
<add> private int value;
<add> private Node next;
<add>
<add> public Node(int value) {
<add> super();
<add> this.value = value;
<add> }
<add> public int getValue() {
<add> return value;
<add> }
<add> public void setValue(int value) {
<add> this.value = value;
<add> }
<add> public Node getNext() {
<add> return next;
<add> }
<add> public void setNext(Node next) {
<add> this.next = next;
<add> }
<add>
<add> }
<add>
<add> // list1: 1->3->5
<add> // list2: 2->4->6
<add> @Test
<add> public void test1() throws Exception {
<add> Node[] node1 = {new Node(1), new Node(3), new Node(5)};
<add> Node[] node2 = {new Node(2), new Node(4), new Node(6)};
<add> Node head1 = buildList(node1);
<add> Node head2 = buildList(node2);
<add> Node mergeHead = merge(head1, head2);
<add> System.out.println("value of merge head:");
<add> while (mergeHead != null) {
<add> System.out.print(mergeHead.value + ",");
<add> mergeHead = mergeHead.getNext();
<add> }
<add> }
<add> // list1: 1->3->5
<add> // list2: 1->3->5
<add> @Test
<add> public void test2() throws Exception {
<add> Node[] node1 = {new Node(1), new Node(3), new Node(5)};
<add> Node[] node2 = {new Node(1), new Node(3), new Node(5)};
<add> Node head1 = buildList(node1);
<add> Node head2 = buildList(node2);
<add> Node mergeHead = merge(head1, head2);
<add> System.out.println("value of merge head:");
<add> while (mergeHead != null) {
<add> System.out.print(mergeHead.value + ",");
<add> mergeHead = mergeHead.getNext();
<add> }
<add> }
<add> // list1: 1
<add> // list2: 2
<add> @Test
<add> public void test3() throws Exception {
<add> Node[] node1 = {new Node(1)};
<add> Node[] node2 = {new Node(2)};
<add> Node head1 = buildList(node1);
<add> Node head2 = buildList(node2);
<add> Node mergeHead = merge(head1, head2);
<add> System.out.println("value of merge head:");
<add> while (mergeHead != null) {
<add> System.out.print(mergeHead.value + ",");
<add> mergeHead = mergeHead.getNext();
<add> }
<add> }
<add>
<add> // 一个链表为空
<add> // list1: 1->3->5
<add> // list2: null
<add> @Test
<add> public void test4() throws Exception {
<add> Node[] node1 = {new Node(1), new Node(3), new Node(5)};
<add> Node[] node2 = null;
<add> Node head1 = buildList(node1);
<add> Node head2 = buildList(node2);
<add> Node mergeHead = merge(head1, head2);
<add> System.out.println("value of merge head:");
<add> while (mergeHead != null) {
<add> System.out.print(mergeHead.value + ",");
<add> mergeHead = mergeHead.getNext();
<add> }
<add> }
<add>
<add> // 两个链表都为空链表
<add> // list1: 空链表
<add> // list2: 空链表
<add> @Test
<add> public void test5() throws Exception {
<add> Node head1 = buildList(null);
<add> Node head2 = buildList(null);
<add> Node mergeHead = merge(head1, head2);
<add> System.out.println("value of merge head:");
<add> while (mergeHead != null) {
<add> System.out.print(mergeHead.value + ",");
<add> mergeHead = mergeHead.getNext();
<add> }
<add> }
<add>} |
|
JavaScript | mit | db4b0deb119ea88aae878cb4355f9481e5f6d970 | 0 | srvrdhn/ChatBotEventNotify,srvrdhn/ChatBotEventNotify | var login = require('../index.js');
var fs = require('fs');
var assert = require('assert');
login({email: "USERNAME", password: "PASS"}, function callback (err, api) {
if(err) return console.error(err);
api.setOptions({listenEvents: true});
var stopListening = api.listen(function(err, event) {
if(err) return console.error(err);
switch(event.type) {
case "message":
if(event.body === '/stop') {
api.sendMessage("Goodbye...", event.threadID);
return stopListening();
}
api.markAsRead(event.threadID, function(err) {
if(err) console.log(err);
});
console.log(event.body);
var n = event.body.search("harambe");
if(n != -1) {
api.sendMessage("#DicksOut", event.threadID);
}
var n = event.body.search("suh");
if(n != -1) {
api.sendMessage("Suh dude", event.threadID);
api.getUserID("USER NAME", function(err, data) {
console.log(data);
if(err) return callback(err);
// Send the message to the best match (best by Facebook's criteria)
var threadID = data[0].userID;
api.sendMessage("if you're reading this its too late", threadID);
});
}
break;
case "event":
console.log(event);
break;
}
});
}); | test/testing.js | var login = require('../index.js');
var fs = require('fs');
var assert = require('assert');
login({email: "USERNAME", password: "PASS"}, function callback (err, api) {
if(err) return console.error(err);
api.setOptions({listenEvents: true});
var stopListening = api.listen(function(err, event) {
if(err) return console.error(err);
switch(event.type) {
case "message":
if(event.body === '/stop') {
api.sendMessage("Goodbye...", event.threadID);
return stopListening();
}
api.markAsRead(event.threadID, function(err) {
if(err) console.log(err);
});
console.log(event.body);
var n = event.body.search("harambe");
if(n != -1) {
api.sendMessage("#DicksOut", event.threadID);
}
var n = event.body.search("suh");
if(n != -1) {
api.sendMessage("Suh dude", event.threadID);
}
break;
case "event":
console.log(event);
break;
}
});
}); | api send personal message
| test/testing.js | api send personal message | <ide><path>est/testing.js
<ide> var login = require('../index.js');
<ide> var fs = require('fs');
<ide> var assert = require('assert');
<add>
<ide>
<ide> login({email: "USERNAME", password: "PASS"}, function callback (err, api) {
<ide> if(err) return console.error(err);
<ide> var n = event.body.search("suh");
<ide> if(n != -1) {
<ide> api.sendMessage("Suh dude", event.threadID);
<add>
<add> api.getUserID("USER NAME", function(err, data) {
<add> console.log(data);
<add> if(err) return callback(err);
<add>
<add> // Send the message to the best match (best by Facebook's criteria)
<add> var threadID = data[0].userID;
<add> api.sendMessage("if you're reading this its too late", threadID);
<add> });
<ide> }
<ide> break;
<ide> case "event": |
|
Java | mit | 8138e1185e08e340e60b336516a32f00e9e79c59 | 0 | HPSoftware/hpaa-octane-dev,HPSoftware/hpaa-octane-dev,HPSoftware/hpaa-octane-dev,HPSoftware/hpaa-octane-dev,HPSoftware/hpaa-octane-dev | /*
* © Copyright 2013 EntIT Software LLC
* Certain versions of software and/or documents (“Material”) accessible here may contain branding from
* Hewlett-Packard Company (now HP Inc.) and Hewlett Packard Enterprise Company. As of September 1, 2017,
* the Material is now offered by Micro Focus, a separately owned and operated company. Any reference to the HP
* and Hewlett Packard Enterprise/HPE marks is historical in nature, and the HP and Hewlett Packard Enterprise/HPE
* marks are the property of their respective owners.
* __________________________________________________________________
* MIT License
*
* © Copyright 2012-2018 Micro Focus or one of its affiliates.
*
* The only warranties for products and services of Micro Focus and its affiliates
* and licensors (“Micro Focus”) are set forth in the express warranty statements
* accompanying such products and services. Nothing herein should be construed as
* constituting an additional warranty. Micro Focus shall not be liable for technical
* or editorial errors or omissions contained herein.
* The information contained herein is subject to change without notice.
* ___________________________________________________________________
*
*/
/*
* Create the PCModel and the PCClient and allows the connection between the job and PC
* */
package com.microfocus.application.automation.tools.run;
import com.microfocus.adm.performancecenter.plugins.common.pcEntities.*;
import com.microfocus.application.automation.tools.model.PcModel;
import com.microfocus.application.automation.tools.sse.result.model.junit.Error;
import com.microfocus.application.automation.tools.sse.result.model.junit.Failure;
import com.microfocus.application.automation.tools.pc.PcClient;
import com.microfocus.application.automation.tools.sse.result.model.junit.JUnitTestCaseStatus;
import com.microfocus.application.automation.tools.sse.result.model.junit.Testcase;
import com.microfocus.application.automation.tools.sse.result.model.junit.Testsuite;
import com.microfocus.application.automation.tools.sse.result.model.junit.Testsuites;
import com.microfocus.application.automation.tools.octane.configuration.ConfigurationService;
import hudson.Extension;
import hudson.FilePath;
import hudson.Launcher;
import hudson.PluginWrapper;
import hudson.console.HyperlinkNote;
import hudson.model.*;
import hudson.tasks.BuildStepDescriptor;
import hudson.tasks.Builder;
import hudson.util.FormValidation;
import jenkins.model.Jenkins;
import jenkins.tasks.SimpleBuildStep;
import org.apache.commons.lang.StringUtils;
import org.apache.http.client.ClientProtocolException;
import org.jenkinsci.Symbol;
import org.kohsuke.stapler.DataBoundConstructor;
import org.kohsuke.stapler.QueryParameter;
import javax.annotation.Nonnull;
import javax.xml.bind.JAXBContext;
import javax.xml.bind.Marshaller;
import java.beans.IntrospectionException;
import java.io.*;
import java.lang.reflect.Method;
import java.text.Format;
import java.text.SimpleDateFormat;
import java.util.ArrayList;
import java.util.Date;
import java.util.List;
import java.util.Map;
import static com.microfocus.adm.performancecenter.plugins.common.pcEntities.RunState.FINISHED;
import static com.microfocus.adm.performancecenter.plugins.common.pcEntities.RunState.RUN_FAILURE;
public class PcBuilder extends Builder implements SimpleBuildStep{
private static final String artifactsDirectoryName = "archive";
public static final String artifactsResourceName = "artifact";
public static final String runReportStructure = "%s/%s/performanceTestsReports/pcRun";
public static final String trendReportStructure = "%s/%s/performanceTestsReports/TrendReports";
public static final String pcReportArchiveName = "Reports.zip";
public static final String pcReportFileName = "Report.html";
private static final String RUNID_BUILD_VARIABLE = "HP_RUN_ID";
public static final String TRENDED = "Trended";
public static final String PENDING = "Pending";
public static final String PUBLISHING = "Publishing";
public static final String ERROR = "Error";
private PcModel pcModel;
private final String almPassword;
private final String timeslotDurationHours;
private final String timeslotDurationMinutes;
private final boolean statusBySLA;
private int runId;
private String testName;
private FilePath pcReportFile;
private String junitResultsFileName;
private PrintStream logger;
private File WorkspacePath;
private AbstractBuild<?, ?> _build;
@DataBoundConstructor
public PcBuilder(
String serverAndPort,
String pcServerName,
String almUserName,
String almPassword,
String almDomain,
String almProject,
String testId,
String testInstanceId,
String autoTestInstanceID,
String timeslotDurationHours,
String timeslotDurationMinutes,
PostRunAction postRunAction,
boolean vudsMode,
boolean statusBySLA,
String description,
String addRunToTrendReport,
String trendReportId,
boolean HTTPSProtocol,
String proxyOutURL,
String proxyOutUser,
String proxyOutPassword,
String retry,
String retryDelay,
String retryOccurrences) {
this.almUserName = almUserName;
this.almPassword = almPassword;
this.timeslotDurationHours = timeslotDurationHours;
this.timeslotDurationMinutes = timeslotDurationMinutes;
this.statusBySLA = statusBySLA;
pcModel =
new PcModel(
serverAndPort.trim(),
pcServerName.trim(),
almUserName.trim(),
almPassword,
almDomain.trim(),
almProject.trim(),
testId.trim(),
autoTestInstanceID,
testInstanceId.trim(),
timeslotDurationHours.trim(),
timeslotDurationMinutes.trim(),
postRunAction,
vudsMode,
description,
addRunToTrendReport,
trendReportId,
HTTPSProtocol,
proxyOutURL,
proxyOutUser,
proxyOutPassword,
(retry == null || retry.isEmpty())? "NO_RETRY" : retry,
("NO_RETRY".equals(retry)) ? "0" : (retryDelay == null || retryDelay.isEmpty()) ? "5" : retryDelay,
("NO_RETRY".equals(retry)) ? "0" : (retryOccurrences == null || retryOccurrences.isEmpty()) ? "3" : retryOccurrences);
}
@Override
public DescriptorImpl getDescriptor() {
return (DescriptorImpl) super.getDescriptor();
}
@Override
public boolean perform(AbstractBuild<?, ?> build, Launcher launcher, BuildListener listener)
throws InterruptedException, IOException {
_build = build;
if(build.getWorkspace() != null)
WorkspacePath = new File(build.getWorkspace().toURI());
else
WorkspacePath = null;
if((pcModel !=null) && (build != null) && (build instanceof AbstractBuild))
setPcModelBuildParameters(build);
if(build.getWorkspace() != null)
perform(build, build.getWorkspace(), launcher, listener);
else
return false;
return true;
}
private void setPcModelBuildParameters(AbstractBuild<?, ?> build) {
String buildParameters = build.getBuildVariables().toString();
if (!buildParameters.isEmpty())
pcModel.setBuildParameters(buildParameters);
}
public File getWorkspacePath(){
return WorkspacePath;
}
public PcModel getPcModel() {
return pcModel;
}
public String getRunResultsFileName() {
return junitResultsFileName;
}
public static String getArtifactsDirectoryName() {
return artifactsDirectoryName;
}
public static String getArtifactsResourceName() {
return artifactsResourceName;
}
public static String getRunReportStructure() {
return runReportStructure;
}
public static String getPcReportArchiveName() {
return pcReportArchiveName;
}
public static String getPcreportFileName() {
return pcReportFileName;
}
private String getVersion() {
String completeVersion = ConfigurationService.getPluginVersion();
if(completeVersion != null) {
String[] partsOfCompleteVersion = completeVersion.split(" [(]");
return partsOfCompleteVersion[0];
}
return "unknown";
}
private Testsuites execute(PcClient pcClient, Run<?, ?> build)
throws InterruptedException,NullPointerException {
try {
String version = getVersion();
if(!(version == null || version.equals("unknown")))
logger.println(String.format("%s - plugin version is '%s'",simpleDateFormater(), version));
if((pcModel !=null) && (build != null) && (build instanceof AbstractBuild))
setPcModelBuildParameters((AbstractBuild) build);
if (!StringUtils.isBlank(pcModel.getDescription()))
logger.println(String.format("%s - Test description: %s", simpleDateFormater(), pcModel.getDescription()));
if (!beforeRun(pcClient))
return null;
return run(pcClient, build);
} catch (InterruptedException e) {
build.setResult(Result.ABORTED);
pcClient.stopRun(runId);
throw e;
} catch (NullPointerException e) {
logger.println(String.format("%s - Error: %s", simpleDateFormater(), e.getMessage()));
} catch (Exception e) {
logger.println(String.format("%s - %s", simpleDateFormater(), e.getMessage()));
} finally {
pcClient.logout();
}
return null;
}
private Testsuites run(PcClient pcClient, Run<?, ?> build)
throws InterruptedException, ClientProtocolException,
IOException, PcException {
if((pcModel !=null) && (build != null) && (build instanceof AbstractBuild))
setPcModelBuildParameters((AbstractBuild) build);
PcRunResponse response = null;
String errorMessage = "";
String eventLogString = "";
boolean trendReportReady = false;
try {
runId = pcClient.startRun();
if (runId == 0)
return null;
}
catch (NumberFormatException ex) {
logger.println(String.format("%s - startRun failed. Error: %s", simpleDateFormater(),ex.getMessage()));
throw ex;
}
catch (ClientProtocolException ex) {
logger.println(String.format("%s - startRun failed. Error: %s", simpleDateFormater(),ex.getMessage()));
throw ex;
}
catch (PcException ex) {
logger.println(String.format("%s - startRun failed. Error: %s", simpleDateFormater(),ex.getMessage()));
throw ex;
}
catch (IOException ex) {
logger.println(String.format("%s - startRun failed. Error: %s", simpleDateFormater(),ex.getMessage()));
throw ex;
}
//getTestName failure should not fail test execution.
try {
testName = pcClient.getTestName();
if(testName == null) {
testName = String.format("TestId_%s", pcModel.getTestId());
logger.println(String.format("%s - getTestName failed. Using '%s' as testname.", simpleDateFormater(), testName));
}
else
logger.println(String.format("%s - test name is %s", simpleDateFormater(), testName));
}
catch (PcException ex) {
testName = String.format("TestId_%s", pcModel.getTestId());
logger.println(String.format("%s - getTestName failed. Using '%s' as testname. Error: %s \n", simpleDateFormater(), testName, ex.getMessage()));
}
catch (IOException ex) {
testName = String.format("TestId_%s", pcModel.getTestId());
logger.println(String.format("%s - getTestName failed. Using '%s' as testname. Error: %s \n", simpleDateFormater(), testName, ex.getMessage()));
}
try {
List<ParameterValue> parameters = new ArrayList<>();
parameters.add(new StringParameterValue(RUNID_BUILD_VARIABLE, "" + runId));
// This allows a user to access the runId from within Jenkins using a build variable.
build.addAction(new AdditionalParametersAction(parameters));
logger.print(String.format("%s - Set %s Environment Variable to %s \n",simpleDateFormater(), RUNID_BUILD_VARIABLE, runId));
response = pcClient.waitForRunCompletion(runId);
if (response != null && RunState.get(response.getRunState()) == FINISHED && pcModel.getPostRunAction() != PostRunAction.DO_NOTHING) {
pcReportFile = pcClient.publishRunReport(runId, getReportDirectory(build));
// Adding the trend report section if ID has been set or if the Associated Trend report is selected.
if(((("USE_ID").equals(pcModel.getAddRunToTrendReport()) && pcModel.getTrendReportId(true) != null) || ("ASSOCIATED").equals(pcModel.getAddRunToTrendReport())) && RunState.get(response.getRunState()) != RUN_FAILURE){
Thread.sleep(5000);
pcClient.addRunToTrendReport(this.runId, pcModel.getTrendReportId(true));
pcClient.waitForRunToPublishOnTrendReport(this.runId, pcModel.getTrendReportId(true));
pcClient.downloadTrendReportAsPdf(pcModel.getTrendReportId(true), getTrendReportsDirectory(build));
trendReportReady = true;
}
} else if (response != null && RunState.get(response.getRunState()).ordinal() > FINISHED.ordinal()) {
PcRunEventLog eventLog = pcClient.getRunEventLog(runId);
eventLogString = buildEventLogString(eventLog);
}
} catch (PcException e) {
logger.println(String.format("%s - Error: %s", simpleDateFormater(), e.getMessage()));
}
Testsuites ret = new Testsuites();
parsePcRunResponse(ret,response, build, errorMessage, eventLogString);
try {
parsePcTrendResponse(ret,build,pcClient,trendReportReady,pcModel.getTrendReportId(true),runId);
} catch (IntrospectionException e) {
e.printStackTrace();
} catch (NoSuchMethodException e) {
e.printStackTrace();
}
return ret;
}
private String buildEventLogString(PcRunEventLog eventLog) {
String logFormat = "%-5s | %-7s | %-19s | %s\n";
StringBuilder eventLogStr = new StringBuilder("Event Log:\n\n" + String.format(logFormat, "ID", "TYPE", "TIME","DESCRIPTION"));
for (PcRunEventLogRecord record : eventLog.getRecordsList()) {
eventLogStr.append(String.format(logFormat, record.getID(), record.getType(), record.getTime(), record.getDescription()));
}
return eventLogStr.toString();
}
private boolean beforeRun(PcClient pcClient) {
return validatePcForm() && pcClient.login();
}
private String getReportDirectory(Run<?, ?> build) {
return String.format(
runReportStructure,
build.getRootDir().getPath(),
artifactsDirectoryName);
}
private String getTrendReportsDirectory(Run<?, ?> build) {
return String.format(
trendReportStructure,
build.getRootDir().getPath(),
artifactsDirectoryName);
}
@Override
@Deprecated
public boolean perform(Build<?, ?> build, Launcher launcher, BuildListener listener) throws InterruptedException, IOException {
return super.perform(build, launcher, listener);
}
private boolean validatePcForm() {
logger.println(String.format("%s - Validating parameters before run", simpleDateFormater()));
String prefix = "doCheck";
boolean ret = true;
Method[] methods = getDescriptor().getClass().getMethods();
Method[] modelMethods = pcModel.getClass().getMethods();
for (Method method : methods) {
String name = method.getName();
if (name.startsWith(prefix)) {
name = name.replace(prefix, "").toLowerCase();
for (Method modelMethod : modelMethods) {
String modelMethodName = modelMethod.getName();
if (modelMethodName.toLowerCase().equals("get" + name) && modelMethod.getParameterTypes().length==0) {
try {
Object obj = FormValidation.ok();
if (
!("testinstanceid".equals(name) && "AUTO".equals(pcModel.getAutoTestInstanceID())) &&
!(("retrydelay".equals(name) && "NO_RETRY".equals(pcModel.getRetry())) || pcModel.getRetry().isEmpty()) &&
!(("retryoccurrences".equals(name) && "NO_RETRY".equals(pcModel.getRetry())) || pcModel.getRetry().isEmpty())
) {
obj = method.invoke(getDescriptor(), modelMethod.invoke(getPcModel()));
}
if (!obj.equals(FormValidation.ok())) {
logger.println(obj);
ret = false;
}
break;
} catch (Exception e) {
logger.println("method.getName() = " + method.getName() + "\nname = " + name + "\nmodelMethodName = " + modelMethodName + "\nexception = " + e + "\n");
}
}
}
}
}
boolean isTrendReportIdValid = validateTrendReportIdIsNumeric(getPcModel().getTrendReportId(true),("USE_ID").equals(getPcModel().getAddRunToTrendReport()));
boolean IsRetryValid = validateRetryIsNumeric(getPcModel().getRetry(),getPcModel().getRetryDelay(),getPcModel().getRetryOccurrences());
ret &= isTrendReportIdValid;
ret &= IsRetryValid;
return ret;
}
private boolean validateTrendReportIdIsNumeric(String trendReportId, boolean addRunToTrendReport){
FormValidation res = FormValidation.ok();
if(addRunToTrendReport){
if(trendReportId.isEmpty()){
res = FormValidation.error("Parameter Is Missing: trend report ID is missing");
}
else{
try{
Integer.parseInt(trendReportId);
}
catch(NumberFormatException e) {
res = FormValidation.error("Illegal Parameter: trend report ID is not a number");
}
}
}
logger.println(String.format("%s - %s", simpleDateFormater(), res.toString().replace(": <div/>","")));
return res.equals(FormValidation.ok());
}
private boolean validateRetryIsNumeric(String myRetry, String myRetryDelay, String myRetryOccurrences){
FormValidation res = FormValidation.ok();
if("RETRY".equals(myRetry)){
if(myRetryDelay.isEmpty() || myRetryOccurrences.isEmpty()){
res = FormValidation.error("Parameter Is Missing: Retry on failure parameter is missing");
}
else{
try{
if (Integer.parseInt(myRetryDelay)<=0)
res = FormValidation.error("Illegal Parameter: Retry Delay is not a positive number");
}
catch(NumberFormatException e) {
res = FormValidation.error("Illegal Parameter: Retry Delay is not a number");
}
try{
if (Integer.parseInt(myRetryOccurrences)<=0)
res = FormValidation.error("Illegal Parameter: Retry Occurrences is not a positive number");
}
catch(NumberFormatException e) {
res = FormValidation.error("Illegal Parameter: Retry Occurrences is not a number");
}
}
}
logger.println(String.format("%s - %s", simpleDateFormater(), res.toString().replace(": <div/>","")));
return res.equals(FormValidation.ok());
}
private Testsuites parsePcRunResponse(Testsuites ret,
PcRunResponse runResponse,
Run<?, ?> build,
String errorMessage, String eventLogString) throws IOException, InterruptedException {
RunState runState = RunState.get(runResponse.getRunState());
List<Testsuite> testSuites = ret.getTestsuite();
Testsuite testSuite = new Testsuite();
Testcase testCase = new Testcase();
//testCase.setClassname("Performance Tests.Test ID: " + runResponse.getTestID());
testCase.setClassname("Performance Test.Load Test");
testCase.setName(testName + "(ID:" + runResponse.getTestID() + ")");
testCase.setTime(String.valueOf(runResponse.getDuration() * 60));
if (pcReportFile != null && pcReportFile.exists() && runState == FINISHED) {
testCase.getSystemOut().add(getOutputForReportLinks(build));
}
updateTestStatus(testCase, runResponse, errorMessage, eventLogString);
testSuite.getTestcase().add(testCase);
testSuite.setName("Performance Test ID: " + runResponse.getTestID() + ", Run ID: " + runResponse.getID());
testSuites.add(testSuite);
return ret;
}
private Testsuites parsePcTrendResponse(Testsuites ret,Run<?, ?> build,PcClient pcClient,boolean trendReportReady,String TrendReportID, int runID) throws PcException,IntrospectionException,IOException, InterruptedException ,NoSuchMethodException{
if(trendReportReady){
String reportUrlTemp = trendReportStructure.replaceFirst("%s/", "") + "/trendReport%s.pdf";
String reportUrl = String.format(reportUrlTemp, artifactsResourceName, pcModel.getTrendReportId(true));
pcClient.publishTrendReport(reportUrl, pcModel.getTrendReportId(true));
// Updating all CSV files for plot plugin
// this helps to show the transaction of each result
if (isPluginActive("Plot plugin")) {
logger.println(String.format("%s Updating csv files for Trending Charts.", simpleDateFormater()));
updateCSVFilesForPlot(pcClient, runID);
String plotUrlPath = "/job/" + build.getParent().getName() + "/plot";
logger.println(String.format("%s - %s",simpleDateFormater(), HyperlinkNote.encodeTo(plotUrlPath, "Trending Charts"))); // + HyperlinkNote.encodeTo("https://wiki.jenkins-ci.org/display/JENKINS/HP+Application+Automation+Tools#HPApplicationAutomationTools-RunningPerformanceTestsusingHPPerformanceCenter","More Info"));
}else{
logger.println(String.format("%s - You can view Trending Charts directly from Jenkins using Plot Plugin, see more details on the %s (Performance Center 12.55 and Later).",simpleDateFormater(), HyperlinkNote.encodeTo("https://wiki.jenkins.io/display/JENKINS/HPE+Application+Automation+Tools#HPEApplicationAutomationTools-RunningPerformanceTestsusingHPEPerformanceCenter","documentation")));
}
}
return ret;
}
private boolean isPluginActive(String pluginDisplayName){
List<PluginWrapper> allPlugin = Jenkins.getInstance().pluginManager.getPlugins();
for (PluginWrapper pw :
allPlugin) {
if (pw.getDisplayName().toLowerCase().equals(pluginDisplayName.toLowerCase())) {
return pw.isActive();
}
}
return false;
}
private void updateCSVFilesForPlot(PcClient pcClient, int runId) throws IOException, PcException, IntrospectionException, NoSuchMethodException {
//Map<String, String> measurementMap =pcClient.getTrendReportByXML(pcModel.getTrendReportId(true),runId,TrendReportTypes.DataType.Transaction, TrendReportTypes.PctType.TRT, TrendReportTypes.Measurement.PCT_AVERAGE);
// Transaction - TRT
saveFileToWorkspacePath(pcClient,pcModel.getTrendReportId(true),runId,TrendReportTypes.DataType.Transaction, TrendReportTypes.PctType.TRT, TrendReportTypes.Measurement.PCT_MINIMUM);
saveFileToWorkspacePath(pcClient,pcModel.getTrendReportId(true),runId,TrendReportTypes.DataType.Transaction, TrendReportTypes.PctType.TRT, TrendReportTypes.Measurement.PCT_MAXIMUM);
saveFileToWorkspacePath(pcClient,pcModel.getTrendReportId(true),runId,TrendReportTypes.DataType.Transaction, TrendReportTypes.PctType.TRT, TrendReportTypes.Measurement.PCT_AVERAGE);
saveFileToWorkspacePath(pcClient,pcModel.getTrendReportId(true),runId,TrendReportTypes.DataType.Transaction, TrendReportTypes.PctType.TRT, TrendReportTypes.Measurement.PCT_MEDIAN);
saveFileToWorkspacePath(pcClient,pcModel.getTrendReportId(true),runId,TrendReportTypes.DataType.Transaction, TrendReportTypes.PctType.TRT, TrendReportTypes.Measurement.PCT_STDDEVIATION);
saveFileToWorkspacePath(pcClient,pcModel.getTrendReportId(true),runId,TrendReportTypes.DataType.Transaction, TrendReportTypes.PctType.TRT, TrendReportTypes.Measurement.PCT_COUNT1);
//saveFileToWorkspacePath(pcClient,pcModel.getTrendReportId(true),runId,TrendReportTypes.DataType.Transaction, TrendReportTypes.PctType.TRT, TrendReportTypes.Measurement.PCT_SUM1);
saveFileToWorkspacePath(pcClient,pcModel.getTrendReportId(true),runId,TrendReportTypes.DataType.Transaction, TrendReportTypes.PctType.TRT, TrendReportTypes.Measurement.PCT_PERCENTILE_90);
// Transaction - TPS
saveFileToWorkspacePath(pcClient,pcModel.getTrendReportId(true),runId,TrendReportTypes.DataType.Transaction, TrendReportTypes.PctType.TPS, TrendReportTypes.Measurement.PCT_MINIMUM);
saveFileToWorkspacePath(pcClient,pcModel.getTrendReportId(true),runId,TrendReportTypes.DataType.Transaction, TrendReportTypes.PctType.TPS, TrendReportTypes.Measurement.PCT_MAXIMUM);
saveFileToWorkspacePath(pcClient,pcModel.getTrendReportId(true),runId,TrendReportTypes.DataType.Transaction, TrendReportTypes.PctType.TPS, TrendReportTypes.Measurement.PCT_AVERAGE);
saveFileToWorkspacePath(pcClient,pcModel.getTrendReportId(true),runId,TrendReportTypes.DataType.Transaction, TrendReportTypes.PctType.TPS, TrendReportTypes.Measurement.PCT_MEDIAN);
//saveFileToWorkspacePath(pcClient,pcModel.getTrendReportId(),runId,TrendReportTypes.DataType.Transaction, TrendReportTypes.PctType.TPS, TrendReportTypes.Measurement.PCT_STDDEVIATION);
saveFileToWorkspacePath(pcClient,pcModel.getTrendReportId(true),runId,TrendReportTypes.DataType.Transaction, TrendReportTypes.PctType.TPS, TrendReportTypes.Measurement.PCT_SUM1);
// Transaction - TRS
//saveFileToWorkspacePath(pcClient,pcModel.getTrendReportId(true),runId,TrendReportTypes.DataType.Transaction, TrendReportTypes.PctType.TRS, TrendReportTypes.Measurement.PCT_MINIMUM);
//saveFileToWorkspacePath(pcClient,pcModel.getTrendReportId(true),runId,TrendReportTypes.DataType.Transaction, TrendReportTypes.PctType.TRS, TrendReportTypes.Measurement.PCT_MAXIMUM);
//saveFileToWorkspacePath(pcClient,pcModel.getTrendReportId(true),runId,TrendReportTypes.DataType.Transaction, TrendReportTypes.PctType.TRS, TrendReportTypes.Measurement.PCT_AVERAGE);
//saveFileToWorkspacePath(pcClient,pcModel.getTrendReportId(true),runId,TrendReportTypes.DataType.Transaction, TrendReportTypes.PctType.TRS, TrendReportTypes.Measurement.PCT_MEDIAN);
//saveFileToWorkspacePath(pcClient,pcModel.getTrendReportId(true),runId,TrendReportTypes.DataType.Transaction, TrendReportTypes.PctType.TRS, TrendReportTypes.Measurement.PCT_STDDEVIATION);
saveFileToWorkspacePath(pcClient,pcModel.getTrendReportId(true),runId,TrendReportTypes.DataType.Transaction, TrendReportTypes.PctType.TRS, TrendReportTypes.Measurement.PCT_COUNT1);
// Monitors - UDP
saveFileToWorkspacePath(pcClient,pcModel.getTrendReportId(true),runId,TrendReportTypes.DataType.Monitors, TrendReportTypes.PctType.UDP, TrendReportTypes.Measurement.PCT_MINIMUM);
saveFileToWorkspacePath(pcClient,pcModel.getTrendReportId(true),runId,TrendReportTypes.DataType.Monitors, TrendReportTypes.PctType.UDP, TrendReportTypes.Measurement.PCT_MAXIMUM);
saveFileToWorkspacePath(pcClient,pcModel.getTrendReportId(true),runId,TrendReportTypes.DataType.Monitors, TrendReportTypes.PctType.UDP, TrendReportTypes.Measurement.PCT_AVERAGE);
saveFileToWorkspacePath(pcClient,pcModel.getTrendReportId(true),runId,TrendReportTypes.DataType.Monitors, TrendReportTypes.PctType.UDP, TrendReportTypes.Measurement.PCT_MEDIAN);
saveFileToWorkspacePath(pcClient,pcModel.getTrendReportId(true),runId,TrendReportTypes.DataType.Monitors, TrendReportTypes.PctType.UDP, TrendReportTypes.Measurement.PCT_STDDEVIATION);
saveFileToWorkspacePath(pcClient,pcModel.getTrendReportId(true),runId,TrendReportTypes.DataType.Monitors, TrendReportTypes.PctType.UDP, TrendReportTypes.Measurement.PCT_COUNT1);
saveFileToWorkspacePath(pcClient,pcModel.getTrendReportId(true),runId,TrendReportTypes.DataType.Monitors, TrendReportTypes.PctType.UDP, TrendReportTypes.Measurement.PCT_SUM1);
// Regular - VU
//saveFileToWorkspacePath(pcClient,pcModel.getTrendReportId(true),runId,TrendReportTypes.DataType.Regular, TrendReportTypes.PctType.VU, TrendReportTypes.Measurement.PCT_MINIMUM);
saveFileToWorkspacePath(pcClient,pcModel.getTrendReportId(true),runId,TrendReportTypes.DataType.Regular, TrendReportTypes.PctType.VU, TrendReportTypes.Measurement.PCT_MAXIMUM);
saveFileToWorkspacePath(pcClient,pcModel.getTrendReportId(true),runId,TrendReportTypes.DataType.Regular, TrendReportTypes.PctType.VU, TrendReportTypes.Measurement.PCT_AVERAGE);
//saveFileToWorkspacePath(pcClient,pcModel.getTrendReportId(true),runId,TrendReportTypes.DataType.Regular, TrendReportTypes.PctType.VU, TrendReportTypes.Measurement.PCT_MEDIAN);
//saveFileToWorkspacePath(pcClient,pcModel.getTrendReportId(true),runId,TrendReportTypes.DataType.Regular, TrendReportTypes.PctType.VU, TrendReportTypes.Measurement.PCT_STDDEVIATION);
// Regular - WEB
saveFileToWorkspacePath(pcClient,pcModel.getTrendReportId(true),runId,TrendReportTypes.DataType.Regular, TrendReportTypes.PctType.WEB, TrendReportTypes.Measurement.PCT_MINIMUM);
saveFileToWorkspacePath(pcClient,pcModel.getTrendReportId(true),runId,TrendReportTypes.DataType.Regular, TrendReportTypes.PctType.WEB, TrendReportTypes.Measurement.PCT_MAXIMUM);
saveFileToWorkspacePath(pcClient,pcModel.getTrendReportId(true),runId,TrendReportTypes.DataType.Regular, TrendReportTypes.PctType.WEB, TrendReportTypes.Measurement.PCT_AVERAGE);
saveFileToWorkspacePath(pcClient,pcModel.getTrendReportId(true),runId,TrendReportTypes.DataType.Regular, TrendReportTypes.PctType.WEB, TrendReportTypes.Measurement.PCT_MEDIAN);
//saveFileToWorkspacePath(pcClient,pcModel.getTrendReportId(true),runId,TrendReportTypes.DataType.Regular, TrendReportTypes.PctType.WEB, TrendReportTypes.Measurement.PCT_STDDEVIATION);
saveFileToWorkspacePath(pcClient,pcModel.getTrendReportId(true),runId,TrendReportTypes.DataType.Regular, TrendReportTypes.PctType.WEB, TrendReportTypes.Measurement.PCT_SUM1);
// logger.print(build.getRootDir().getPath());
}
private boolean saveFileToWorkspacePath(PcClient pcClient, String trendReportID, int runId,TrendReportTypes.DataType dataType, TrendReportTypes.PctType pctType, TrendReportTypes.Measurement measurement)throws IOException, PcException, IntrospectionException, NoSuchMethodException{
String fileName = measurement.toString().toLowerCase() + "_" + pctType.toString().toLowerCase() + ".csv";
Map<String, String> measurementMap = pcClient.getTrendReportByXML(trendReportID, runId, dataType, pctType, measurement);
if (!_build.getWorkspace().isRemote()) {
try {
File file = new File(getWorkspacePath().getPath() + "/" + fileName);
if (!file.exists()) {
file.createNewFile();
}
PrintWriter writer = new PrintWriter(file);
for (String key : measurementMap.keySet()) {
writer.print(key + ",");
}
writer.print("\r\n");
for (String value : measurementMap.values()) {
writer.print(value + ",");
}
writer.close();
// logger.println(String.format("%s - %s Created.", simpleDateFormater(), fileName);
return true;
} catch (IOException e) {
if (getWorkspacePath().getPath() != null)
logger.println(String.format("%s - Error saving file: %s to workspace path: %s with Error: %s", simpleDateFormater(), getWorkspacePath().getPath(), fileName, e.getMessage()));
else
logger.println(String.format("%s - Error saving file: %s because workspace path is unavailable. Error: %s", simpleDateFormater(), fileName, e.getMessage()));
}
}
else {
try {
FilePath filePath = new FilePath(_build.getWorkspace().getChannel(), getWorkspacePath().getPath() + "/" + fileName);
String filepathContent="";
for (String key : measurementMap.keySet()) {
filepathContent += key + ",";
}
filepathContent += "\r\n";
for (String value : measurementMap.values()) {
filepathContent += value + ",";
}
filePath.write(filepathContent, null);
return true;
} catch (InterruptedException e) {
if (getWorkspacePath().getPath() != null)
logger.println(String.format("%s - Error saving file: %s to remote workspace path: %s with Error: %s", simpleDateFormater(), getWorkspacePath().getPath(), fileName, e.getMessage()));
else
logger.println(String.format("%s - Error saving file: %s because remote workspace path is unavailable. Error: %s", simpleDateFormater(), fileName, e.getMessage()));
return false;
}
}
return false;
}
private void updateTestStatus(Testcase testCase, PcRunResponse response, String errorMessage, String eventLog) {
RunState runState = RunState.get(response.getRunState());
if (runState == RUN_FAILURE) {
setError(testCase, String.format("%s. %s", runState, errorMessage), eventLog);
} else if (statusBySLA && runState == FINISHED && !(response.getRunSLAStatus().equalsIgnoreCase("passed"))) {
setFailure(testCase, "Run measurements did not reach SLA criteria. Run SLA Status: "
+ response.getRunSLAStatus(), eventLog);
} else if (runState.hasFailure()) {
setFailure(testCase, String.format("%s. %s", runState, errorMessage), eventLog);
} else if(errorMessage != null && !errorMessage.isEmpty()){
setFailure(testCase, String.format("%s. %s", runState, errorMessage), eventLog);
}
else{
testCase.setStatus(JUnitTestCaseStatus.PASS);
}
}
private void setError(Testcase testCase, String message, String eventLog) {
Error error = new Error();
error.setMessage(message);
if (!(eventLog == null || eventLog.isEmpty()))
testCase.getSystemErr().add(eventLog);
testCase.getError().add(error);
testCase.setStatus(JUnitTestCaseStatus.ERROR);
logger.println(String.format("%s - %s %s", simpleDateFormater() , message ,eventLog));
}
private void setFailure(Testcase testCase, String message, String eventLog) {
Failure failure = new Failure();
failure.setMessage(message);
if (!(eventLog == null || eventLog.isEmpty()))
testCase.getSystemErr().add(eventLog);
testCase.getFailure().add(failure);
testCase.setStatus(JUnitTestCaseStatus.FAILURE);
logger.println(String.format("%s - Failure: %s %s", simpleDateFormater(), message ,eventLog));
}
private String getOutputForReportLinks(Run<?, ?> build) {
String urlPattern = getArtifactsUrlPattern(build);
String viewUrl = String.format(urlPattern + "/%s", pcReportFileName);
String downloadUrl = String.format(urlPattern + "/%s", "*zip*/pcRun");
logger.println(String.format("%s - %s", simpleDateFormater(), HyperlinkNote.encodeTo(viewUrl, "View analysis report of run " + runId)));
return String.format("Load Test Run ID: %s\n\nView analysis report:\n%s\n\nDownload Report:\n%s", runId, pcModel.getserverAndPort() + "/" + build.getUrl() + viewUrl, pcModel.getserverAndPort() + "/" + build.getUrl() + downloadUrl);
}
private String getArtifactsUrlPattern(Run<?, ?> build) {
String runReportUrlTemp = runReportStructure.replaceFirst("%s/", "");
return String.format(
runReportUrlTemp,
artifactsResourceName);
}
private void provideStepResultStatus(Result resultStatus, Run<?, ?> build) {
String runIdStr =
(runId > 0) ? String.format(" (PC RunID: %s)", String.valueOf(runId)) : "";
logger.println(String.format("%s - Result Status%s: %s\n- - -",
simpleDateFormater(),
runIdStr,
resultStatus.toString()));
build.setResult(resultStatus);
}
private Result createRunResults(FilePath filePath, Testsuites testsuites) {
Result ret = Result.SUCCESS;
try {
if (testsuites != null) {
StringWriter writer = new StringWriter();
JAXBContext context = JAXBContext.newInstance(Testsuites.class);
Marshaller marshaller = context.createMarshaller();
marshaller.marshal(testsuites, writer);
filePath.write(writer.toString(), null);
if (containsErrorsOrFailures(testsuites.getTestsuite())) {
ret = Result.FAILURE;
}
} else {
logger.println(String.format("%s - Empty Results", simpleDateFormater()));
ret = Result.FAILURE;
}
} catch (Exception cause) {
logger.print(String.format(
"%s - Failed to create run results, Exception: %s",
simpleDateFormater(),
cause.getMessage()));
ret = Result.FAILURE;
}
return ret;
}
private boolean containsErrorsOrFailures(List<Testsuite> testsuites) {
boolean ret = false;
for (Testsuite testsuite : testsuites) {
for (Testcase testcase : testsuite.getTestcase()) {
String status = testcase.getStatus();
if (status.equals(JUnitTestCaseStatus.ERROR)
|| status.equals(JUnitTestCaseStatus.FAILURE)) {
ret = true;
break;
}
}
}
return ret;
}
private String getJunitResultsFileName() {
Format formatter = new SimpleDateFormat("ddMMyyyyHHmmssSSS");
String time = formatter.format(new Date());
junitResultsFileName = String.format("Results%s.xml", time);
return junitResultsFileName;
}
@Override
public void perform(@Nonnull Run<?, ?> build, @Nonnull FilePath workspace, @Nonnull Launcher launcher,
@Nonnull TaskListener listener) throws InterruptedException, IOException {
Result resultStatus = Result.FAILURE;
//trendReportReady = false;
logger = listener.getLogger();
PcClient pcClient = new PcClient(pcModel, logger);
Testsuites testsuites = execute(pcClient, build);
// // Create Trend Report
// if(trendReportReady){
// String reportUrlTemp = trendReportStructure.replaceFirst("%s/", "") + "/trendReport%s.pdf";
// String reportUrl = String.format(reportUrlTemp, artifactsResourceName, pcModel.getTrendReportId(true));
// pcClient.publishTrendReport(reportUrl, pcModel.getTrendReportId(true));
// }
// // End Create Trend Report
FilePath resultsFilePath = workspace.child(getJunitResultsFileName());
resultStatus = createRunResults(resultsFilePath, testsuites);
provideStepResultStatus(resultStatus, build);
if (!Result.SUCCESS.equals(resultStatus) && !Result.FAILURE.equals(resultStatus)) {
return;
}
// //Only do this if build worked (Not unstable or aborted - which might mean there is no report
// JUnitResultArchiver jUnitResultArchiver = new JUnitResultArchiver(this.getRunResultsFileName());
// jUnitResultArchiver.setKeepLongStdio(true);
// jUnitResultArchiver.perform(build, workspace, launcher, listener);
}
public String getServerAndPort()
{
return getPcModel().getserverAndPort();
}
public String getPcServerName()
{
return getPcModel().getPcServerName();
}
public String getAlmProject()
{
return getPcModel().getAlmProject();
}
public String getTestId()
{
return getPcModel().getTestId();
}
public String getAlmDomain()
{
return getPcModel().getAlmDomain();
}
public String getTimeslotDurationHours()
{
return timeslotDurationHours;
}
public String getTimeslotDurationMinutes()
{
return timeslotDurationMinutes;
}
public PostRunAction getPostRunAction()
{
return getPcModel().getPostRunAction();
}
public String getTrendReportId()
{
return getPcModel().getTrendReportId(true);
}
public String getAutoTestInstanceID()
{
return getPcModel().getAutoTestInstanceID();
}
public String getTestInstanceId()
{
return getPcModel().getTestInstanceId();
}
public String getAddRunToTrendReport()
{
return getPcModel().getAddRunToTrendReport();
}
public boolean isVudsMode()
{
return getPcModel().isVudsMode();
}
public String getRetry () {
return getPcModel().getRetry();
}
public String getRetryOccurrences () {
return getPcModel().getRetryOccurrences();
}
public String getRetryDelay () {
return getPcModel().getRetryDelay();
}
public String getDescription()
{
return getPcModel().getDescription();
}
public String getAlmUserName() {
return almUserName;
}
private final String almUserName;
public String getAlmPassword() {
return almPassword;
}
public boolean isHTTPSProtocol()
{
return getPcModel().httpsProtocol();
}
public boolean isStatusBySLA() {
return statusBySLA;
}
public String getProxyOutURL(){ return getPcModel().getProxyOutURL();}
public String getProxyOutUser(){ return getPcModel().getProxyOutUser();}
public String getProxyOutPassword(){ return getPcModel().getProxyOutPassword();}
private String simpleDateFormater()
{
try {
SimpleDateFormat simpleDateFormat = new SimpleDateFormat ("E yyyy MMM dd 'at' HH:mm:ss.SSS a zzz");
String simpleDate = simpleDateFormat.format(new Date());
if (simpleDate != null)
return simpleDate;
else
return "";
}
catch (Exception ex) {
return "";
}
}
// This indicates to Jenkins that this is an implementation of an extension
// point
@Extension
@Symbol("pcBuild")
public static final class DescriptorImpl extends BuildStepDescriptor<Builder> {
public DescriptorImpl() {
load();
}
@Override
public boolean isApplicable(
@SuppressWarnings("rawtypes") Class<? extends AbstractProject> jobType) {
return true;
}
@Override
public String getDisplayName() {
return "Execute performance test using Performance Center";
}
public FormValidation doCheckPcServerName(@QueryParameter String value) {
return validateString(value, "PC Server");
}
public FormValidation doCheckAlmUserName(@QueryParameter String value) {
return validateString(value, "User name");
}
public FormValidation doCheckAlmDomain(@QueryParameter String value) {
return validateString(value, "Domain");
}
public FormValidation doCheckAlmProject(@QueryParameter String value) {
return validateString(value, "Project");
}
public FormValidation doCheckTestId(@QueryParameter String value) {
return validateHigherThanInt(value, "Test ID", 0, true);
}
public FormValidation doCheckRetryDelay(@QueryParameter String value) {
return validateHigherThanInt(value, "Delay between attempts (in minutes)", 0, true);
}
public FormValidation doCheckRetryOccurrences(@QueryParameter String value) {
return validateHigherThanInt(value, "Number of attempts", 0, true);
}
// if autoTestInstanceID is selected we don't need to check the validation of the test instance
// public static FormValidation CheckOnlyAutoTestInstanceId(String autoTestInstanceID){
// if(autoTestInstanceID.equals("AUTO"))
// return FormValidation.ok();
// else
// return FormValidation.error("Error ");
// }
public FormValidation doCheckTestInstanceId(@QueryParameter String value){
return validateHigherThanInt(value, "Test Instance ID", 0, true);
}
public FormValidation doCheckTimeslotDuration(@QueryParameter TimeslotDuration value) {
return validateHigherThanInt(
String.valueOf(value.toMinutes()),
"Timeslot Duration (in minutes)",
30,
false);
}
public FormValidation doCheckTimeslotId(@QueryParameter String value) {
return validateHigherThanInt(value, "Timeslot ID", 0, true);
}
/**
* @param limitIncluded
* if true, value must be higher than limit. if false, value must be equal to or
* higher than limit.
*/
private FormValidation validateHigherThanInt(
String value,
String field,
int limit,
boolean limitIncluded) {
FormValidation ret = FormValidation.ok();
value = value.trim();
String messagePrefix = field + " must be ";
if (StringUtils.isBlank(value)) {
ret = FormValidation.error(messagePrefix + "set");
} else {
try {
//regular expression: parameter (with brackets or not)
if (value.matches("^\\$\\{[\\w-. ]*}$|^\\$[\\w-.]*$"))
return ret;
//regular expression: number
else if (value.matches("[0-9]*$|")) {
if (limitIncluded && Integer.parseInt(value) <= limit)
ret = FormValidation.error(messagePrefix + "higher than " + limit);
else if (Integer.parseInt(value) < limit)
ret = FormValidation.error(messagePrefix + "at least " + limit);
}
else
ret = FormValidation.error(messagePrefix + "a whole number or a parameter, e.g.: 23, $TESTID or ${TEST_ID}.");
} catch (Exception e) {
ret = FormValidation.error(messagePrefix + "a whole number or a parameter (e.g.: $TESTID or ${TestID})");
}
}
return ret;
}
private FormValidation validateString(String value, String field) {
FormValidation ret = FormValidation.ok();
if (StringUtils.isBlank(value.trim())) {
ret = FormValidation.error(field + " must be set");
}
return ret;
}
public List<PostRunAction> getPostRunActions() {
return PcModel.getPostRunActions();
}
}
}
| src/main/java/com/microfocus/application/automation/tools/run/PcBuilder.java | /*
* © Copyright 2013 EntIT Software LLC
* Certain versions of software and/or documents (“Material”) accessible here may contain branding from
* Hewlett-Packard Company (now HP Inc.) and Hewlett Packard Enterprise Company. As of September 1, 2017,
* the Material is now offered by Micro Focus, a separately owned and operated company. Any reference to the HP
* and Hewlett Packard Enterprise/HPE marks is historical in nature, and the HP and Hewlett Packard Enterprise/HPE
* marks are the property of their respective owners.
* __________________________________________________________________
* MIT License
*
* © Copyright 2012-2018 Micro Focus or one of its affiliates.
*
* The only warranties for products and services of Micro Focus and its affiliates
* and licensors (“Micro Focus”) are set forth in the express warranty statements
* accompanying such products and services. Nothing herein should be construed as
* constituting an additional warranty. Micro Focus shall not be liable for technical
* or editorial errors or omissions contained herein.
* The information contained herein is subject to change without notice.
* ___________________________________________________________________
*
*/
/*
* Create the PCModel and the PCClient and allows the connection between the job and PC
* */
package com.microfocus.application.automation.tools.run;
import com.microfocus.adm.performancecenter.plugins.common.pcEntities.*;
import com.microfocus.application.automation.tools.model.PcModel;
import com.microfocus.application.automation.tools.sse.result.model.junit.Error;
import com.microfocus.application.automation.tools.sse.result.model.junit.Failure;
import com.microfocus.application.automation.tools.pc.PcClient;
import com.microfocus.application.automation.tools.sse.result.model.junit.JUnitTestCaseStatus;
import com.microfocus.application.automation.tools.sse.result.model.junit.Testcase;
import com.microfocus.application.automation.tools.sse.result.model.junit.Testsuite;
import com.microfocus.application.automation.tools.sse.result.model.junit.Testsuites;
import com.microfocus.application.automation.tools.octane.configuration.ConfigurationService;
import hudson.Extension;
import hudson.FilePath;
import hudson.Launcher;
import hudson.PluginWrapper;
import hudson.console.HyperlinkNote;
import hudson.model.*;
import hudson.tasks.BuildStepDescriptor;
import hudson.tasks.Builder;
import hudson.util.FormValidation;
import jenkins.model.Jenkins;
import jenkins.tasks.SimpleBuildStep;
import org.apache.commons.lang.StringUtils;
import org.apache.http.client.ClientProtocolException;
import org.jenkinsci.Symbol;
import org.kohsuke.stapler.DataBoundConstructor;
import org.kohsuke.stapler.QueryParameter;
import javax.annotation.Nonnull;
import javax.xml.bind.JAXBContext;
import javax.xml.bind.Marshaller;
import java.beans.IntrospectionException;
import java.io.*;
import java.lang.reflect.Method;
import java.text.Format;
import java.text.SimpleDateFormat;
import java.util.ArrayList;
import java.util.Date;
import java.util.List;
import java.util.Map;
import static com.microfocus.adm.performancecenter.plugins.common.pcEntities.RunState.FINISHED;
import static com.microfocus.adm.performancecenter.plugins.common.pcEntities.RunState.RUN_FAILURE;
public class PcBuilder extends Builder implements SimpleBuildStep{
private static final String artifactsDirectoryName = "archive";
public static final String artifactsResourceName = "artifact";
public static final String runReportStructure = "%s/%s/performanceTestsReports/pcRun";
public static final String trendReportStructure = "%s/%s/performanceTestsReports/TrendReports";
public static final String pcReportArchiveName = "Reports.zip";
public static final String pcReportFileName = "Report.html";
private static final String RUNID_BUILD_VARIABLE = "HP_RUN_ID";
public static final String TRENDED = "Trended";
public static final String PENDING = "Pending";
public static final String PUBLISHING = "Publishing";
public static final String ERROR = "Error";
private PcModel pcModel;
private final String almPassword;
private final String timeslotDurationHours;
private final String timeslotDurationMinutes;
private final boolean statusBySLA;
private int runId;
private String testName;
private FilePath pcReportFile;
private String junitResultsFileName;
private PrintStream logger;
private File WorkspacePath;
private AbstractBuild<?, ?> _build;
@DataBoundConstructor
public PcBuilder(
String serverAndPort,
String pcServerName,
String almUserName,
String almPassword,
String almDomain,
String almProject,
String testId,
String testInstanceId,
String autoTestInstanceID,
String timeslotDurationHours,
String timeslotDurationMinutes,
PostRunAction postRunAction,
boolean vudsMode,
boolean statusBySLA,
String description,
String addRunToTrendReport,
String trendReportId,
boolean HTTPSProtocol,
String proxyOutURL,
String proxyOutUser,
String proxyOutPassword,
String retry,
String retryDelay,
String retryOccurrences) {
this.almUserName = almUserName;
this.almPassword = almPassword;
this.timeslotDurationHours = timeslotDurationHours;
this.timeslotDurationMinutes = timeslotDurationMinutes;
this.statusBySLA = statusBySLA;
pcModel =
new PcModel(
serverAndPort.trim(),
pcServerName.trim(),
almUserName.trim(),
almPassword,
almDomain.trim(),
almProject.trim(),
testId.trim(),
autoTestInstanceID,
testInstanceId.trim(),
timeslotDurationHours.trim(),
timeslotDurationMinutes.trim(),
postRunAction,
vudsMode,
description,
addRunToTrendReport,
trendReportId,
HTTPSProtocol,
proxyOutURL,
proxyOutUser,
proxyOutPassword,
retry.isEmpty()? "NO_RETRY" : retry,
retry.equals("NO_RETRY") ? "0" : retryDelay.isEmpty()? "5" : retryDelay,
retry.equals("NO_RETRY") ? "0" : retryOccurrences.isEmpty()? "3" : retryOccurrences);
}
@Override
public DescriptorImpl getDescriptor() {
return (DescriptorImpl) super.getDescriptor();
}
@Override
public boolean perform(AbstractBuild<?, ?> build, Launcher launcher, BuildListener listener)
throws InterruptedException, IOException {
_build = build;
if(build.getWorkspace() != null)
WorkspacePath = new File(build.getWorkspace().toURI());
else
WorkspacePath = null;
if((pcModel !=null) && (build != null) && (build instanceof AbstractBuild))
setPcModelBuildParameters(build);
if(build.getWorkspace() != null)
perform(build, build.getWorkspace(), launcher, listener);
else
return false;
return true;
}
private void setPcModelBuildParameters(AbstractBuild<?, ?> build) {
String buildParameters = build.getBuildVariables().toString();
if (!buildParameters.isEmpty())
pcModel.setBuildParameters(buildParameters);
}
public File getWorkspacePath(){
return WorkspacePath;
}
public PcModel getPcModel() {
return pcModel;
}
public String getRunResultsFileName() {
return junitResultsFileName;
}
public static String getArtifactsDirectoryName() {
return artifactsDirectoryName;
}
public static String getArtifactsResourceName() {
return artifactsResourceName;
}
public static String getRunReportStructure() {
return runReportStructure;
}
public static String getPcReportArchiveName() {
return pcReportArchiveName;
}
public static String getPcreportFileName() {
return pcReportFileName;
}
private String getVersion() {
String completeVersion = ConfigurationService.getPluginVersion();
if(completeVersion != null) {
String[] partsOfCompleteVersion = completeVersion.split(" [(]");
return partsOfCompleteVersion[0];
}
return "unknown";
}
private Testsuites execute(PcClient pcClient, Run<?, ?> build)
throws InterruptedException,NullPointerException {
try {
String version = getVersion();
if(!(version == null || version.equals("unknown")))
logger.println(String.format("%s - plugin version is '%s'",simpleDateFormater(), version));
if((pcModel !=null) && (build != null) && (build instanceof AbstractBuild))
setPcModelBuildParameters((AbstractBuild) build);
if (!StringUtils.isBlank(pcModel.getDescription()))
logger.println(String.format("%s - Test description: %s", simpleDateFormater(), pcModel.getDescription()));
if (!beforeRun(pcClient))
return null;
return run(pcClient, build);
} catch (InterruptedException e) {
build.setResult(Result.ABORTED);
pcClient.stopRun(runId);
throw e;
} catch (NullPointerException e) {
logger.println(String.format("%s - Error: %s", simpleDateFormater(), e.getMessage()));
} catch (Exception e) {
logger.println(String.format("%s - %s", simpleDateFormater(), e.getMessage()));
} finally {
pcClient.logout();
}
return null;
}
private Testsuites run(PcClient pcClient, Run<?, ?> build)
throws InterruptedException, ClientProtocolException,
IOException, PcException {
if((pcModel !=null) && (build != null) && (build instanceof AbstractBuild))
setPcModelBuildParameters((AbstractBuild) build);
PcRunResponse response = null;
String errorMessage = "";
String eventLogString = "";
boolean trendReportReady = false;
try {
runId = pcClient.startRun();
if (runId == 0)
return null;
}
catch (NumberFormatException ex) {
logger.println(String.format("%s - startRun failed. Error: %s", simpleDateFormater(),ex.getMessage()));
throw ex;
}
catch (ClientProtocolException ex) {
logger.println(String.format("%s - startRun failed. Error: %s", simpleDateFormater(),ex.getMessage()));
throw ex;
}
catch (PcException ex) {
logger.println(String.format("%s - startRun failed. Error: %s", simpleDateFormater(),ex.getMessage()));
throw ex;
}
catch (IOException ex) {
logger.println(String.format("%s - startRun failed. Error: %s", simpleDateFormater(),ex.getMessage()));
throw ex;
}
//getTestName failure should not fail test execution.
try {
testName = pcClient.getTestName();
if(testName == null) {
testName = String.format("TestId_%s", pcModel.getTestId());
logger.println(String.format("%s - getTestName failed. Using '%s' as testname.", simpleDateFormater(), testName));
}
else
logger.println(String.format("%s - test name is %s", simpleDateFormater(), testName));
}
catch (PcException ex) {
testName = String.format("TestId_%s", pcModel.getTestId());
logger.println(String.format("%s - getTestName failed. Using '%s' as testname. Error: %s \n", simpleDateFormater(), testName, ex.getMessage()));
}
catch (IOException ex) {
testName = String.format("TestId_%s", pcModel.getTestId());
logger.println(String.format("%s - getTestName failed. Using '%s' as testname. Error: %s \n", simpleDateFormater(), testName, ex.getMessage()));
}
try {
List<ParameterValue> parameters = new ArrayList<>();
parameters.add(new StringParameterValue(RUNID_BUILD_VARIABLE, "" + runId));
// This allows a user to access the runId from within Jenkins using a build variable.
build.addAction(new AdditionalParametersAction(parameters));
logger.print(String.format("%s - Set %s Environment Variable to %s \n",simpleDateFormater(), RUNID_BUILD_VARIABLE, runId));
response = pcClient.waitForRunCompletion(runId);
if (response != null && RunState.get(response.getRunState()) == FINISHED && pcModel.getPostRunAction() != PostRunAction.DO_NOTHING) {
pcReportFile = pcClient.publishRunReport(runId, getReportDirectory(build));
// Adding the trend report section if ID has been set or if the Associated Trend report is selected.
if(((("USE_ID").equals(pcModel.getAddRunToTrendReport()) && pcModel.getTrendReportId(true) != null) || ("ASSOCIATED").equals(pcModel.getAddRunToTrendReport())) && RunState.get(response.getRunState()) != RUN_FAILURE){
Thread.sleep(5000);
pcClient.addRunToTrendReport(this.runId, pcModel.getTrendReportId(true));
pcClient.waitForRunToPublishOnTrendReport(this.runId, pcModel.getTrendReportId(true));
pcClient.downloadTrendReportAsPdf(pcModel.getTrendReportId(true), getTrendReportsDirectory(build));
trendReportReady = true;
}
} else if (response != null && RunState.get(response.getRunState()).ordinal() > FINISHED.ordinal()) {
PcRunEventLog eventLog = pcClient.getRunEventLog(runId);
eventLogString = buildEventLogString(eventLog);
}
} catch (PcException e) {
logger.println(String.format("%s - Error: %s", simpleDateFormater(), e.getMessage()));
}
Testsuites ret = new Testsuites();
parsePcRunResponse(ret,response, build, errorMessage, eventLogString);
try {
parsePcTrendResponse(ret,build,pcClient,trendReportReady,pcModel.getTrendReportId(true),runId);
} catch (IntrospectionException e) {
e.printStackTrace();
} catch (NoSuchMethodException e) {
e.printStackTrace();
}
return ret;
}
private String buildEventLogString(PcRunEventLog eventLog) {
String logFormat = "%-5s | %-7s | %-19s | %s\n";
StringBuilder eventLogStr = new StringBuilder("Event Log:\n\n" + String.format(logFormat, "ID", "TYPE", "TIME","DESCRIPTION"));
for (PcRunEventLogRecord record : eventLog.getRecordsList()) {
eventLogStr.append(String.format(logFormat, record.getID(), record.getType(), record.getTime(), record.getDescription()));
}
return eventLogStr.toString();
}
private boolean beforeRun(PcClient pcClient) {
return validatePcForm() && pcClient.login();
}
private String getReportDirectory(Run<?, ?> build) {
return String.format(
runReportStructure,
build.getRootDir().getPath(),
artifactsDirectoryName);
}
private String getTrendReportsDirectory(Run<?, ?> build) {
return String.format(
trendReportStructure,
build.getRootDir().getPath(),
artifactsDirectoryName);
}
@Override
@Deprecated
public boolean perform(Build<?, ?> build, Launcher launcher, BuildListener listener) throws InterruptedException, IOException {
return super.perform(build, launcher, listener);
}
private boolean validatePcForm() {
logger.println(String.format("%s - Validating parameters before run", simpleDateFormater()));
String prefix = "doCheck";
boolean ret = true;
Method[] methods = getDescriptor().getClass().getMethods();
Method[] modelMethods = pcModel.getClass().getMethods();
for (Method method : methods) {
String name = method.getName();
if (name.startsWith(prefix)) {
name = name.replace(prefix, "").toLowerCase();
for (Method modelMethod : modelMethods) {
String modelMethodName = modelMethod.getName();
if (modelMethodName.toLowerCase().equals("get" + name) && modelMethod.getParameterTypes().length==0) {
try {
Object obj = FormValidation.ok();
if (
!("testinstanceid".equals(name) && "AUTO".equals(pcModel.getAutoTestInstanceID())) &&
!(("retrydelay".equals(name) && "NO_RETRY".equals(pcModel.getRetry())) || pcModel.getRetry().isEmpty()) &&
!(("retryoccurrences".equals(name) && "NO_RETRY".equals(pcModel.getRetry())) || pcModel.getRetry().isEmpty())
) {
obj = method.invoke(getDescriptor(), modelMethod.invoke(getPcModel()));
}
if (!obj.equals(FormValidation.ok())) {
logger.println(obj);
ret = false;
}
break;
} catch (Exception e) {
logger.println("method.getName() = " + method.getName() + "\nname = " + name + "\nmodelMethodName = " + modelMethodName + "\nexception = " + e + "\n");
}
}
}
}
}
boolean isTrendReportIdValid = validateTrendReportIdIsNumeric(getPcModel().getTrendReportId(true),("USE_ID").equals(getPcModel().getAddRunToTrendReport()));
boolean IsRetryValid = validateRetryIsNumeric(getPcModel().getRetry(),getPcModel().getRetryDelay(),getPcModel().getRetryOccurrences());
ret &= isTrendReportIdValid;
ret &= IsRetryValid;
return ret;
}
private boolean validateTrendReportIdIsNumeric(String trendReportId, boolean addRunToTrendReport){
FormValidation res = FormValidation.ok();
if(addRunToTrendReport){
if(trendReportId.isEmpty()){
res = FormValidation.error("Parameter Is Missing: trend report ID is missing");
}
else{
try{
Integer.parseInt(trendReportId);
}
catch(NumberFormatException e) {
res = FormValidation.error("Illegal Parameter: trend report ID is not a number");
}
}
}
logger.println(String.format("%s - %s", simpleDateFormater(), res.toString().replace(": <div/>","")));
return res.equals(FormValidation.ok());
}
private boolean validateRetryIsNumeric(String myRetry, String myRetryDelay, String myRetryOccurrences){
FormValidation res = FormValidation.ok();
if("RETRY".equals(myRetry)){
if(myRetryDelay.isEmpty() || myRetryOccurrences.isEmpty()){
res = FormValidation.error("Parameter Is Missing: Retry on failure parameter is missing");
}
else{
try{
if (Integer.parseInt(myRetryDelay)<=0)
res = FormValidation.error("Illegal Parameter: Retry Delay is not a positive number");
}
catch(NumberFormatException e) {
res = FormValidation.error("Illegal Parameter: Retry Delay is not a number");
}
try{
if (Integer.parseInt(myRetryOccurrences)<=0)
res = FormValidation.error("Illegal Parameter: Retry Occurrences is not a positive number");
}
catch(NumberFormatException e) {
res = FormValidation.error("Illegal Parameter: Retry Occurrences is not a number");
}
}
}
logger.println(String.format("%s - %s", simpleDateFormater(), res.toString().replace(": <div/>","")));
return res.equals(FormValidation.ok());
}
private Testsuites parsePcRunResponse(Testsuites ret,
PcRunResponse runResponse,
Run<?, ?> build,
String errorMessage, String eventLogString) throws IOException, InterruptedException {
RunState runState = RunState.get(runResponse.getRunState());
List<Testsuite> testSuites = ret.getTestsuite();
Testsuite testSuite = new Testsuite();
Testcase testCase = new Testcase();
//testCase.setClassname("Performance Tests.Test ID: " + runResponse.getTestID());
testCase.setClassname("Performance Test.Load Test");
testCase.setName(testName + "(ID:" + runResponse.getTestID() + ")");
testCase.setTime(String.valueOf(runResponse.getDuration() * 60));
if (pcReportFile != null && pcReportFile.exists() && runState == FINISHED) {
testCase.getSystemOut().add(getOutputForReportLinks(build));
}
updateTestStatus(testCase, runResponse, errorMessage, eventLogString);
testSuite.getTestcase().add(testCase);
testSuite.setName("Performance Test ID: " + runResponse.getTestID() + ", Run ID: " + runResponse.getID());
testSuites.add(testSuite);
return ret;
}
private Testsuites parsePcTrendResponse(Testsuites ret,Run<?, ?> build,PcClient pcClient,boolean trendReportReady,String TrendReportID, int runID) throws PcException,IntrospectionException,IOException, InterruptedException ,NoSuchMethodException{
if(trendReportReady){
String reportUrlTemp = trendReportStructure.replaceFirst("%s/", "") + "/trendReport%s.pdf";
String reportUrl = String.format(reportUrlTemp, artifactsResourceName, pcModel.getTrendReportId(true));
pcClient.publishTrendReport(reportUrl, pcModel.getTrendReportId(true));
// Updating all CSV files for plot plugin
// this helps to show the transaction of each result
if (isPluginActive("Plot plugin")) {
logger.println(String.format("%s Updating csv files for Trending Charts.", simpleDateFormater()));
updateCSVFilesForPlot(pcClient, runID);
String plotUrlPath = "/job/" + build.getParent().getName() + "/plot";
logger.println(String.format("%s - %s",simpleDateFormater(), HyperlinkNote.encodeTo(plotUrlPath, "Trending Charts"))); // + HyperlinkNote.encodeTo("https://wiki.jenkins-ci.org/display/JENKINS/HP+Application+Automation+Tools#HPApplicationAutomationTools-RunningPerformanceTestsusingHPPerformanceCenter","More Info"));
}else{
logger.println(String.format("%s - You can view Trending Charts directly from Jenkins using Plot Plugin, see more details on the %s (Performance Center 12.55 and Later).",simpleDateFormater(), HyperlinkNote.encodeTo("https://wiki.jenkins.io/display/JENKINS/HPE+Application+Automation+Tools#HPEApplicationAutomationTools-RunningPerformanceTestsusingHPEPerformanceCenter","documentation")));
}
}
return ret;
}
private boolean isPluginActive(String pluginDisplayName){
List<PluginWrapper> allPlugin = Jenkins.getInstance().pluginManager.getPlugins();
for (PluginWrapper pw :
allPlugin) {
if (pw.getDisplayName().toLowerCase().equals(pluginDisplayName.toLowerCase())) {
return pw.isActive();
}
}
return false;
}
private void updateCSVFilesForPlot(PcClient pcClient, int runId) throws IOException, PcException, IntrospectionException, NoSuchMethodException {
//Map<String, String> measurementMap =pcClient.getTrendReportByXML(pcModel.getTrendReportId(true),runId,TrendReportTypes.DataType.Transaction, TrendReportTypes.PctType.TRT, TrendReportTypes.Measurement.PCT_AVERAGE);
// Transaction - TRT
saveFileToWorkspacePath(pcClient,pcModel.getTrendReportId(true),runId,TrendReportTypes.DataType.Transaction, TrendReportTypes.PctType.TRT, TrendReportTypes.Measurement.PCT_MINIMUM);
saveFileToWorkspacePath(pcClient,pcModel.getTrendReportId(true),runId,TrendReportTypes.DataType.Transaction, TrendReportTypes.PctType.TRT, TrendReportTypes.Measurement.PCT_MAXIMUM);
saveFileToWorkspacePath(pcClient,pcModel.getTrendReportId(true),runId,TrendReportTypes.DataType.Transaction, TrendReportTypes.PctType.TRT, TrendReportTypes.Measurement.PCT_AVERAGE);
saveFileToWorkspacePath(pcClient,pcModel.getTrendReportId(true),runId,TrendReportTypes.DataType.Transaction, TrendReportTypes.PctType.TRT, TrendReportTypes.Measurement.PCT_MEDIAN);
saveFileToWorkspacePath(pcClient,pcModel.getTrendReportId(true),runId,TrendReportTypes.DataType.Transaction, TrendReportTypes.PctType.TRT, TrendReportTypes.Measurement.PCT_STDDEVIATION);
saveFileToWorkspacePath(pcClient,pcModel.getTrendReportId(true),runId,TrendReportTypes.DataType.Transaction, TrendReportTypes.PctType.TRT, TrendReportTypes.Measurement.PCT_COUNT1);
//saveFileToWorkspacePath(pcClient,pcModel.getTrendReportId(true),runId,TrendReportTypes.DataType.Transaction, TrendReportTypes.PctType.TRT, TrendReportTypes.Measurement.PCT_SUM1);
saveFileToWorkspacePath(pcClient,pcModel.getTrendReportId(true),runId,TrendReportTypes.DataType.Transaction, TrendReportTypes.PctType.TRT, TrendReportTypes.Measurement.PCT_PERCENTILE_90);
// Transaction - TPS
saveFileToWorkspacePath(pcClient,pcModel.getTrendReportId(true),runId,TrendReportTypes.DataType.Transaction, TrendReportTypes.PctType.TPS, TrendReportTypes.Measurement.PCT_MINIMUM);
saveFileToWorkspacePath(pcClient,pcModel.getTrendReportId(true),runId,TrendReportTypes.DataType.Transaction, TrendReportTypes.PctType.TPS, TrendReportTypes.Measurement.PCT_MAXIMUM);
saveFileToWorkspacePath(pcClient,pcModel.getTrendReportId(true),runId,TrendReportTypes.DataType.Transaction, TrendReportTypes.PctType.TPS, TrendReportTypes.Measurement.PCT_AVERAGE);
saveFileToWorkspacePath(pcClient,pcModel.getTrendReportId(true),runId,TrendReportTypes.DataType.Transaction, TrendReportTypes.PctType.TPS, TrendReportTypes.Measurement.PCT_MEDIAN);
//saveFileToWorkspacePath(pcClient,pcModel.getTrendReportId(),runId,TrendReportTypes.DataType.Transaction, TrendReportTypes.PctType.TPS, TrendReportTypes.Measurement.PCT_STDDEVIATION);
saveFileToWorkspacePath(pcClient,pcModel.getTrendReportId(true),runId,TrendReportTypes.DataType.Transaction, TrendReportTypes.PctType.TPS, TrendReportTypes.Measurement.PCT_SUM1);
// Transaction - TRS
//saveFileToWorkspacePath(pcClient,pcModel.getTrendReportId(true),runId,TrendReportTypes.DataType.Transaction, TrendReportTypes.PctType.TRS, TrendReportTypes.Measurement.PCT_MINIMUM);
//saveFileToWorkspacePath(pcClient,pcModel.getTrendReportId(true),runId,TrendReportTypes.DataType.Transaction, TrendReportTypes.PctType.TRS, TrendReportTypes.Measurement.PCT_MAXIMUM);
//saveFileToWorkspacePath(pcClient,pcModel.getTrendReportId(true),runId,TrendReportTypes.DataType.Transaction, TrendReportTypes.PctType.TRS, TrendReportTypes.Measurement.PCT_AVERAGE);
//saveFileToWorkspacePath(pcClient,pcModel.getTrendReportId(true),runId,TrendReportTypes.DataType.Transaction, TrendReportTypes.PctType.TRS, TrendReportTypes.Measurement.PCT_MEDIAN);
//saveFileToWorkspacePath(pcClient,pcModel.getTrendReportId(true),runId,TrendReportTypes.DataType.Transaction, TrendReportTypes.PctType.TRS, TrendReportTypes.Measurement.PCT_STDDEVIATION);
saveFileToWorkspacePath(pcClient,pcModel.getTrendReportId(true),runId,TrendReportTypes.DataType.Transaction, TrendReportTypes.PctType.TRS, TrendReportTypes.Measurement.PCT_COUNT1);
// Monitors - UDP
saveFileToWorkspacePath(pcClient,pcModel.getTrendReportId(true),runId,TrendReportTypes.DataType.Monitors, TrendReportTypes.PctType.UDP, TrendReportTypes.Measurement.PCT_MINIMUM);
saveFileToWorkspacePath(pcClient,pcModel.getTrendReportId(true),runId,TrendReportTypes.DataType.Monitors, TrendReportTypes.PctType.UDP, TrendReportTypes.Measurement.PCT_MAXIMUM);
saveFileToWorkspacePath(pcClient,pcModel.getTrendReportId(true),runId,TrendReportTypes.DataType.Monitors, TrendReportTypes.PctType.UDP, TrendReportTypes.Measurement.PCT_AVERAGE);
saveFileToWorkspacePath(pcClient,pcModel.getTrendReportId(true),runId,TrendReportTypes.DataType.Monitors, TrendReportTypes.PctType.UDP, TrendReportTypes.Measurement.PCT_MEDIAN);
saveFileToWorkspacePath(pcClient,pcModel.getTrendReportId(true),runId,TrendReportTypes.DataType.Monitors, TrendReportTypes.PctType.UDP, TrendReportTypes.Measurement.PCT_STDDEVIATION);
saveFileToWorkspacePath(pcClient,pcModel.getTrendReportId(true),runId,TrendReportTypes.DataType.Monitors, TrendReportTypes.PctType.UDP, TrendReportTypes.Measurement.PCT_COUNT1);
saveFileToWorkspacePath(pcClient,pcModel.getTrendReportId(true),runId,TrendReportTypes.DataType.Monitors, TrendReportTypes.PctType.UDP, TrendReportTypes.Measurement.PCT_SUM1);
// Regular - VU
//saveFileToWorkspacePath(pcClient,pcModel.getTrendReportId(true),runId,TrendReportTypes.DataType.Regular, TrendReportTypes.PctType.VU, TrendReportTypes.Measurement.PCT_MINIMUM);
saveFileToWorkspacePath(pcClient,pcModel.getTrendReportId(true),runId,TrendReportTypes.DataType.Regular, TrendReportTypes.PctType.VU, TrendReportTypes.Measurement.PCT_MAXIMUM);
saveFileToWorkspacePath(pcClient,pcModel.getTrendReportId(true),runId,TrendReportTypes.DataType.Regular, TrendReportTypes.PctType.VU, TrendReportTypes.Measurement.PCT_AVERAGE);
//saveFileToWorkspacePath(pcClient,pcModel.getTrendReportId(true),runId,TrendReportTypes.DataType.Regular, TrendReportTypes.PctType.VU, TrendReportTypes.Measurement.PCT_MEDIAN);
//saveFileToWorkspacePath(pcClient,pcModel.getTrendReportId(true),runId,TrendReportTypes.DataType.Regular, TrendReportTypes.PctType.VU, TrendReportTypes.Measurement.PCT_STDDEVIATION);
// Regular - WEB
saveFileToWorkspacePath(pcClient,pcModel.getTrendReportId(true),runId,TrendReportTypes.DataType.Regular, TrendReportTypes.PctType.WEB, TrendReportTypes.Measurement.PCT_MINIMUM);
saveFileToWorkspacePath(pcClient,pcModel.getTrendReportId(true),runId,TrendReportTypes.DataType.Regular, TrendReportTypes.PctType.WEB, TrendReportTypes.Measurement.PCT_MAXIMUM);
saveFileToWorkspacePath(pcClient,pcModel.getTrendReportId(true),runId,TrendReportTypes.DataType.Regular, TrendReportTypes.PctType.WEB, TrendReportTypes.Measurement.PCT_AVERAGE);
saveFileToWorkspacePath(pcClient,pcModel.getTrendReportId(true),runId,TrendReportTypes.DataType.Regular, TrendReportTypes.PctType.WEB, TrendReportTypes.Measurement.PCT_MEDIAN);
//saveFileToWorkspacePath(pcClient,pcModel.getTrendReportId(true),runId,TrendReportTypes.DataType.Regular, TrendReportTypes.PctType.WEB, TrendReportTypes.Measurement.PCT_STDDEVIATION);
saveFileToWorkspacePath(pcClient,pcModel.getTrendReportId(true),runId,TrendReportTypes.DataType.Regular, TrendReportTypes.PctType.WEB, TrendReportTypes.Measurement.PCT_SUM1);
// logger.print(build.getRootDir().getPath());
}
private boolean saveFileToWorkspacePath(PcClient pcClient, String trendReportID, int runId,TrendReportTypes.DataType dataType, TrendReportTypes.PctType pctType, TrendReportTypes.Measurement measurement)throws IOException, PcException, IntrospectionException, NoSuchMethodException{
String fileName = measurement.toString().toLowerCase() + "_" + pctType.toString().toLowerCase() + ".csv";
Map<String, String> measurementMap = pcClient.getTrendReportByXML(trendReportID, runId, dataType, pctType, measurement);
if (!_build.getWorkspace().isRemote()) {
try {
File file = new File(getWorkspacePath().getPath() + "/" + fileName);
if (!file.exists()) {
file.createNewFile();
}
PrintWriter writer = new PrintWriter(file);
for (String key : measurementMap.keySet()) {
writer.print(key + ",");
}
writer.print("\r\n");
for (String value : measurementMap.values()) {
writer.print(value + ",");
}
writer.close();
// logger.println(String.format("%s - %s Created.", simpleDateFormater(), fileName);
return true;
} catch (IOException e) {
if (getWorkspacePath().getPath() != null)
logger.println(String.format("%s - Error saving file: %s to workspace path: %s with Error: %s", simpleDateFormater(), getWorkspacePath().getPath(), fileName, e.getMessage()));
else
logger.println(String.format("%s - Error saving file: %s because workspace path is unavailable. Error: %s", simpleDateFormater(), fileName, e.getMessage()));
}
}
else {
try {
FilePath filePath = new FilePath(_build.getWorkspace().getChannel(), getWorkspacePath().getPath() + "/" + fileName);
String filepathContent="";
for (String key : measurementMap.keySet()) {
filepathContent += key + ",";
}
filepathContent += "\r\n";
for (String value : measurementMap.values()) {
filepathContent += value + ",";
}
filePath.write(filepathContent, null);
return true;
} catch (InterruptedException e) {
if (getWorkspacePath().getPath() != null)
logger.println(String.format("%s - Error saving file: %s to remote workspace path: %s with Error: %s", simpleDateFormater(), getWorkspacePath().getPath(), fileName, e.getMessage()));
else
logger.println(String.format("%s - Error saving file: %s because remote workspace path is unavailable. Error: %s", simpleDateFormater(), fileName, e.getMessage()));
return false;
}
}
return false;
}
private void updateTestStatus(Testcase testCase, PcRunResponse response, String errorMessage, String eventLog) {
RunState runState = RunState.get(response.getRunState());
if (runState == RUN_FAILURE) {
setError(testCase, String.format("%s. %s", runState, errorMessage), eventLog);
} else if (statusBySLA && runState == FINISHED && !(response.getRunSLAStatus().equalsIgnoreCase("passed"))) {
setFailure(testCase, "Run measurements did not reach SLA criteria. Run SLA Status: "
+ response.getRunSLAStatus(), eventLog);
} else if (runState.hasFailure()) {
setFailure(testCase, String.format("%s. %s", runState, errorMessage), eventLog);
} else if(errorMessage != null && !errorMessage.isEmpty()){
setFailure(testCase, String.format("%s. %s", runState, errorMessage), eventLog);
}
else{
testCase.setStatus(JUnitTestCaseStatus.PASS);
}
}
private void setError(Testcase testCase, String message, String eventLog) {
Error error = new Error();
error.setMessage(message);
if (!(eventLog == null || eventLog.isEmpty()))
testCase.getSystemErr().add(eventLog);
testCase.getError().add(error);
testCase.setStatus(JUnitTestCaseStatus.ERROR);
logger.println(String.format("%s - %s %s", simpleDateFormater() , message ,eventLog));
}
private void setFailure(Testcase testCase, String message, String eventLog) {
Failure failure = new Failure();
failure.setMessage(message);
if (!(eventLog == null || eventLog.isEmpty()))
testCase.getSystemErr().add(eventLog);
testCase.getFailure().add(failure);
testCase.setStatus(JUnitTestCaseStatus.FAILURE);
logger.println(String.format("%s - Failure: %s %s", simpleDateFormater(), message ,eventLog));
}
private String getOutputForReportLinks(Run<?, ?> build) {
String urlPattern = getArtifactsUrlPattern(build);
String viewUrl = String.format(urlPattern + "/%s", pcReportFileName);
String downloadUrl = String.format(urlPattern + "/%s", "*zip*/pcRun");
logger.println(String.format("%s - %s", simpleDateFormater(), HyperlinkNote.encodeTo(viewUrl, "View analysis report of run " + runId)));
return String.format("Load Test Run ID: %s\n\nView analysis report:\n%s\n\nDownload Report:\n%s", runId, pcModel.getserverAndPort() + "/" + build.getUrl() + viewUrl, pcModel.getserverAndPort() + "/" + build.getUrl() + downloadUrl);
}
private String getArtifactsUrlPattern(Run<?, ?> build) {
String runReportUrlTemp = runReportStructure.replaceFirst("%s/", "");
return String.format(
runReportUrlTemp,
artifactsResourceName);
}
private void provideStepResultStatus(Result resultStatus, Run<?, ?> build) {
String runIdStr =
(runId > 0) ? String.format(" (PC RunID: %s)", String.valueOf(runId)) : "";
logger.println(String.format("%s - Result Status%s: %s\n- - -",
simpleDateFormater(),
runIdStr,
resultStatus.toString()));
build.setResult(resultStatus);
}
private Result createRunResults(FilePath filePath, Testsuites testsuites) {
Result ret = Result.SUCCESS;
try {
if (testsuites != null) {
StringWriter writer = new StringWriter();
JAXBContext context = JAXBContext.newInstance(Testsuites.class);
Marshaller marshaller = context.createMarshaller();
marshaller.marshal(testsuites, writer);
filePath.write(writer.toString(), null);
if (containsErrorsOrFailures(testsuites.getTestsuite())) {
ret = Result.FAILURE;
}
} else {
logger.println(String.format("%s - Empty Results", simpleDateFormater()));
ret = Result.FAILURE;
}
} catch (Exception cause) {
logger.print(String.format(
"%s - Failed to create run results, Exception: %s",
simpleDateFormater(),
cause.getMessage()));
ret = Result.FAILURE;
}
return ret;
}
private boolean containsErrorsOrFailures(List<Testsuite> testsuites) {
boolean ret = false;
for (Testsuite testsuite : testsuites) {
for (Testcase testcase : testsuite.getTestcase()) {
String status = testcase.getStatus();
if (status.equals(JUnitTestCaseStatus.ERROR)
|| status.equals(JUnitTestCaseStatus.FAILURE)) {
ret = true;
break;
}
}
}
return ret;
}
private String getJunitResultsFileName() {
Format formatter = new SimpleDateFormat("ddMMyyyyHHmmssSSS");
String time = formatter.format(new Date());
junitResultsFileName = String.format("Results%s.xml", time);
return junitResultsFileName;
}
@Override
public void perform(@Nonnull Run<?, ?> build, @Nonnull FilePath workspace, @Nonnull Launcher launcher,
@Nonnull TaskListener listener) throws InterruptedException, IOException {
Result resultStatus = Result.FAILURE;
//trendReportReady = false;
logger = listener.getLogger();
PcClient pcClient = new PcClient(pcModel, logger);
Testsuites testsuites = execute(pcClient, build);
// // Create Trend Report
// if(trendReportReady){
// String reportUrlTemp = trendReportStructure.replaceFirst("%s/", "") + "/trendReport%s.pdf";
// String reportUrl = String.format(reportUrlTemp, artifactsResourceName, pcModel.getTrendReportId(true));
// pcClient.publishTrendReport(reportUrl, pcModel.getTrendReportId(true));
// }
// // End Create Trend Report
FilePath resultsFilePath = workspace.child(getJunitResultsFileName());
resultStatus = createRunResults(resultsFilePath, testsuites);
provideStepResultStatus(resultStatus, build);
if (!Result.SUCCESS.equals(resultStatus) && !Result.FAILURE.equals(resultStatus)) {
return;
}
// //Only do this if build worked (Not unstable or aborted - which might mean there is no report
// JUnitResultArchiver jUnitResultArchiver = new JUnitResultArchiver(this.getRunResultsFileName());
// jUnitResultArchiver.setKeepLongStdio(true);
// jUnitResultArchiver.perform(build, workspace, launcher, listener);
}
public String getServerAndPort()
{
return getPcModel().getserverAndPort();
}
public String getPcServerName()
{
return getPcModel().getPcServerName();
}
public String getAlmProject()
{
return getPcModel().getAlmProject();
}
public String getTestId()
{
return getPcModel().getTestId();
}
public String getAlmDomain()
{
return getPcModel().getAlmDomain();
}
public String getTimeslotDurationHours()
{
return timeslotDurationHours;
}
public String getTimeslotDurationMinutes()
{
return timeslotDurationMinutes;
}
public PostRunAction getPostRunAction()
{
return getPcModel().getPostRunAction();
}
public String getTrendReportId()
{
return getPcModel().getTrendReportId(true);
}
public String getAutoTestInstanceID()
{
return getPcModel().getAutoTestInstanceID();
}
public String getTestInstanceId()
{
return getPcModel().getTestInstanceId();
}
public String getAddRunToTrendReport()
{
return getPcModel().getAddRunToTrendReport();
}
public boolean isVudsMode()
{
return getPcModel().isVudsMode();
}
public String getRetry () {
return getPcModel().getRetry();
}
public String getRetryOccurrences () {
return getPcModel().getRetryOccurrences();
}
public String getRetryDelay () {
return getPcModel().getRetryDelay();
}
public String getDescription()
{
return getPcModel().getDescription();
}
public String getAlmUserName() {
return almUserName;
}
private final String almUserName;
public String getAlmPassword() {
return almPassword;
}
public boolean isHTTPSProtocol()
{
return getPcModel().httpsProtocol();
}
public boolean isStatusBySLA() {
return statusBySLA;
}
public String getProxyOutURL(){ return getPcModel().getProxyOutURL();}
public String getProxyOutUser(){ return getPcModel().getProxyOutUser();}
public String getProxyOutPassword(){ return getPcModel().getProxyOutPassword();}
private String simpleDateFormater()
{
try {
SimpleDateFormat simpleDateFormat = new SimpleDateFormat ("E yyyy MMM dd 'at' HH:mm:ss.SSS a zzz");
String simpleDate = simpleDateFormat.format(new Date());
if (simpleDate != null)
return simpleDate;
else
return "";
}
catch (Exception ex) {
return "";
}
}
// This indicates to Jenkins that this is an implementation of an extension
// point
@Extension
@Symbol("pcBuild")
public static final class DescriptorImpl extends BuildStepDescriptor<Builder> {
public DescriptorImpl() {
load();
}
@Override
public boolean isApplicable(
@SuppressWarnings("rawtypes") Class<? extends AbstractProject> jobType) {
return true;
}
@Override
public String getDisplayName() {
return "Execute performance test using Performance Center";
}
public FormValidation doCheckPcServerName(@QueryParameter String value) {
return validateString(value, "PC Server");
}
public FormValidation doCheckAlmUserName(@QueryParameter String value) {
return validateString(value, "User name");
}
public FormValidation doCheckAlmDomain(@QueryParameter String value) {
return validateString(value, "Domain");
}
public FormValidation doCheckAlmProject(@QueryParameter String value) {
return validateString(value, "Project");
}
public FormValidation doCheckTestId(@QueryParameter String value) {
return validateHigherThanInt(value, "Test ID", 0, true);
}
public FormValidation doCheckRetryDelay(@QueryParameter String value) {
return validateHigherThanInt(value, "Delay between attempts (in minutes)", 0, true);
}
public FormValidation doCheckRetryOccurrences(@QueryParameter String value) {
return validateHigherThanInt(value, "Number of attempts", 0, true);
}
// if autoTestInstanceID is selected we don't need to check the validation of the test instance
// public static FormValidation CheckOnlyAutoTestInstanceId(String autoTestInstanceID){
// if(autoTestInstanceID.equals("AUTO"))
// return FormValidation.ok();
// else
// return FormValidation.error("Error ");
// }
public FormValidation doCheckTestInstanceId(@QueryParameter String value){
return validateHigherThanInt(value, "Test Instance ID", 0, true);
}
public FormValidation doCheckTimeslotDuration(@QueryParameter TimeslotDuration value) {
return validateHigherThanInt(
String.valueOf(value.toMinutes()),
"Timeslot Duration (in minutes)",
30,
false);
}
public FormValidation doCheckTimeslotId(@QueryParameter String value) {
return validateHigherThanInt(value, "Timeslot ID", 0, true);
}
/**
* @param limitIncluded
* if true, value must be higher than limit. if false, value must be equal to or
* higher than limit.
*/
private FormValidation validateHigherThanInt(
String value,
String field,
int limit,
boolean limitIncluded) {
FormValidation ret = FormValidation.ok();
value = value.trim();
String messagePrefix = field + " must be ";
if (StringUtils.isBlank(value)) {
ret = FormValidation.error(messagePrefix + "set");
} else {
try {
//regular expression: parameter (with brackets or not)
if (value.matches("^\\$\\{[\\w-. ]*}$|^\\$[\\w-.]*$"))
return ret;
//regular expression: number
else if (value.matches("[0-9]*$|")) {
if (limitIncluded && Integer.parseInt(value) <= limit)
ret = FormValidation.error(messagePrefix + "higher than " + limit);
else if (Integer.parseInt(value) < limit)
ret = FormValidation.error(messagePrefix + "at least " + limit);
}
else
ret = FormValidation.error(messagePrefix + "a whole number or a parameter, e.g.: 23, $TESTID or ${TEST_ID}.");
} catch (Exception e) {
ret = FormValidation.error(messagePrefix + "a whole number or a parameter (e.g.: $TESTID or ${TestID})");
}
}
return ret;
}
private FormValidation validateString(String value, String field) {
FormValidation ret = FormValidation.ok();
if (StringUtils.isBlank(value.trim())) {
ret = FormValidation.error(field + " must be set");
}
return ret;
}
public List<PostRunAction> getPostRunActions() {
return PcModel.getPostRunActions();
}
}
}
| codacy recommendations
| src/main/java/com/microfocus/application/automation/tools/run/PcBuilder.java | codacy recommendations | <ide><path>rc/main/java/com/microfocus/application/automation/tools/run/PcBuilder.java
<ide> proxyOutURL,
<ide> proxyOutUser,
<ide> proxyOutPassword,
<del> retry.isEmpty()? "NO_RETRY" : retry,
<del> retry.equals("NO_RETRY") ? "0" : retryDelay.isEmpty()? "5" : retryDelay,
<del> retry.equals("NO_RETRY") ? "0" : retryOccurrences.isEmpty()? "3" : retryOccurrences);
<add> (retry == null || retry.isEmpty())? "NO_RETRY" : retry,
<add> ("NO_RETRY".equals(retry)) ? "0" : (retryDelay == null || retryDelay.isEmpty()) ? "5" : retryDelay,
<add> ("NO_RETRY".equals(retry)) ? "0" : (retryOccurrences == null || retryOccurrences.isEmpty()) ? "3" : retryOccurrences);
<ide> }
<ide>
<ide> @Override |
|
Java | lgpl-2.1 | 40ce3a6ba11a3aad960ed9300d7d9133d36de2ee | 0 | CloverETL/CloverETL-Engine,CloverETL/CloverETL-Engine,CloverETL/CloverETL-Engine,CloverETL/CloverETL-Engine | /*
* jETeL/CloverETL - Java based ETL application framework.
* Copyright (c) Javlin, a.s. ([email protected])
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
package org.jetel.util.protocols.sandbox;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import java.net.URL;
import java.net.URLConnection;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.jetel.graph.TransformationGraph;
import org.jetel.graph.runtime.IAuthorityProxy;
public class SandboxConnection extends URLConnection {
private static final Log log = LogFactory.getLog(SandboxConnection.class);
private final TransformationGraph graph;
/**
* SFTP constructor.
* @param graph
*
* @param url
*/
protected SandboxConnection(TransformationGraph graph, URL url) {
super(url);
this.graph = graph;
}
/*
* (non-Javadoc)
* @see java.net.URLConnection#getInputStream()
*/
@Override
public InputStream getInputStream() throws IOException {
String storageCode = url.getHost();
String path = url.getPath();
IAuthorityProxy authorityProxy = IAuthorityProxy.getAuthorityProxy(graph);
if (graph != null) {
long runId = graph.getRuntimeContext().getRunId();
return authorityProxy.getSandboxResourceInput(runId, storageCode, path);
} else {
return authorityProxy.getSandboxResourceInput(0, storageCode, path);
}
}
/*
* (non-Javadoc)
* @see java.net.URLConnection#getOutputStream()
*/
@Override
public OutputStream getOutputStream() throws IOException {
String storageCode = url.getHost();
String path = url.getPath();
if (graph != null) {
long runId = graph.getRuntimeContext().getRunId();
return graph.getAuthorityProxy().getSandboxResourceOutput(runId, storageCode, path);
} else {
return IAuthorityProxy.getDefaultProxy().getSandboxResourceOutput(0, storageCode, path);
}
}
/*
* (non-Javadoc)
* @see java.net.URLConnection#connect()
*/
@Override
public void connect() throws IOException {
}
}
| cloveretl.engine/src/org/jetel/util/protocols/sandbox/SandboxConnection.java | /*
* jETeL/CloverETL - Java based ETL application framework.
* Copyright (c) Javlin, a.s. ([email protected])
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
package org.jetel.util.protocols.sandbox;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import java.net.URL;
import java.net.URLConnection;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.jetel.graph.TransformationGraph;
import org.jetel.graph.runtime.IAuthorityProxy;
public class SandboxConnection extends URLConnection {
private static final Log log = LogFactory.getLog(SandboxConnection.class);
private final TransformationGraph graph;
/**
* SFTP constructor.
* @param graph
*
* @param url
*/
protected SandboxConnection(TransformationGraph graph, URL url) {
super(url);
this.graph = graph;
}
/*
* (non-Javadoc)
* @see java.net.URLConnection#getInputStream()
*/
@Override
public InputStream getInputStream() throws IOException {
String storageCode = url.getHost();
String path = url.getPath();
if (graph != null) {
long runId = graph.getRuntimeContext().getRunId();
return graph.getAuthorityProxy().getSandboxResourceInput(runId, storageCode, path);
} else {
return IAuthorityProxy.getDefaultProxy().getSandboxResourceInput(0, storageCode, path);
}
}
/*
* (non-Javadoc)
* @see java.net.URLConnection#getOutputStream()
*/
@Override
public OutputStream getOutputStream() throws IOException {
String storageCode = url.getHost();
String path = url.getPath();
if (graph != null) {
long runId = graph.getRuntimeContext().getRunId();
return graph.getAuthorityProxy().getSandboxResourceOutput(runId, storageCode, path);
} else {
return IAuthorityProxy.getDefaultProxy().getSandboxResourceOutput(0, storageCode, path);
}
}
/*
* (non-Javadoc)
* @see java.net.URLConnection#connect()
*/
@Override
public void connect() throws IOException {
}
}
| MINOR: refactoring (get authority proxy logic moved to abstract class)
git-svn-id: 7003860f782148507aa0d02fa3b12992383fb6a5@11379 a09ad3ba-1a0f-0410-b1b9-c67202f10d70
| cloveretl.engine/src/org/jetel/util/protocols/sandbox/SandboxConnection.java | MINOR: refactoring (get authority proxy logic moved to abstract class) | <ide><path>loveretl.engine/src/org/jetel/util/protocols/sandbox/SandboxConnection.java
<ide> public InputStream getInputStream() throws IOException {
<ide> String storageCode = url.getHost();
<ide> String path = url.getPath();
<add> IAuthorityProxy authorityProxy = IAuthorityProxy.getAuthorityProxy(graph);
<ide> if (graph != null) {
<ide> long runId = graph.getRuntimeContext().getRunId();
<del> return graph.getAuthorityProxy().getSandboxResourceInput(runId, storageCode, path);
<add> return authorityProxy.getSandboxResourceInput(runId, storageCode, path);
<ide> } else {
<del> return IAuthorityProxy.getDefaultProxy().getSandboxResourceInput(0, storageCode, path);
<add> return authorityProxy.getSandboxResourceInput(0, storageCode, path);
<ide> }
<ide> }
<ide> |
|
Java | bsd-3-clause | 83ea00ce3035b11e3621049afd8af78e04edc478 | 0 | NCIP/cananolab,NCIP/cananolab,NCIP/cananolab | package gov.nih.nci.calab.service.search;
import gov.nih.nci.calab.db.DataAccessProxy;
import gov.nih.nci.calab.db.IDataAccess;
import gov.nih.nci.calab.domain.nano.characterization.Characterization;
import gov.nih.nci.calab.domain.nano.function.Function;
import gov.nih.nci.calab.domain.nano.particle.Nanoparticle;
import gov.nih.nci.calab.dto.characterization.CharacterizationBean;
import gov.nih.nci.calab.dto.common.SearchableBean;
import gov.nih.nci.calab.dto.common.UserBean;
import gov.nih.nci.calab.dto.function.FunctionBean;
import gov.nih.nci.calab.dto.particle.ParticleBean;
import gov.nih.nci.calab.exception.CalabException;
import gov.nih.nci.calab.service.security.UserService;
import gov.nih.nci.calab.service.util.CaNanoLabComparators;
import gov.nih.nci.calab.service.util.CaNanoLabConstants;
import gov.nih.nci.calab.service.util.StringUtils;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import org.apache.log4j.Logger;
/**
* This class includes methods invovled in searching nanoparticles.
*
* @author pansu
*
*/
public class SearchNanoparticleService {
private static Logger logger = Logger
.getLogger(SearchNanoparticleService.class);
/**
* Search for nanoparticles based on particle source, type, function types,
* characterizationType, characterizations, keywords and filter the
* nanoparticles by user visibility.
*
* @param particleSource
* @param particleType
* @param functionTypes
* @param characterizationType
* @param characterizations
* @param keywords
* @param user
* @return
* @throws Exception
*/
public List<ParticleBean> basicSearch(String particleSource,
String particleType, String[] functionTypes,
String[] characterizations, String[] keywords, String keywordType,
String[] summaries, String summaryType, UserBean user)
throws Exception {
IDataAccess ida = (new DataAccessProxy())
.getInstance(IDataAccess.HIBERNATE);
List<ParticleBean> particles = new ArrayList<ParticleBean>();
try {
List<Object> paramList = new ArrayList<Object>();
List<String> whereList = new ArrayList<String>();
String where = "";
String keywordFrom = "";
String functionFrom = "";
String characterizationFrom = "";
String summaryForm = "";
if (particleSource != null && particleSource.length() > 0) {
where = "where ";
whereList.add("particle.source.organizationName=? ");
paramList.add(particleSource);
}
if (particleType != null && particleType.length() > 0) {
paramList.add(particleType);
where = "where ";
whereList.add("particle.type=? ");
}
if (functionTypes != null && functionTypes.length > 0) {
List<String> inList = new ArrayList<String>();
where = "where ";
for (String functionType : functionTypes) {
paramList.add(functionType);
inList.add("?");
}
functionFrom = "join particle.functionCollection function ";
whereList.add("function.type in ("
+ StringUtils.join(inList, ", ") + ") ");
}
if (keywords != null && keywords.length > 0) {
List<String> inList = new ArrayList<String>();
where = "where ";
for (String keyword : keywords) {
paramList.add(keyword);
inList.add("?");
}
if (keywordType.equals("nanoparticle")) {
keywordFrom = "join particle.keywordCollection keyword ";
} else {
keywordFrom = "join particle.characterizationCollection characterization "
+ "join characterization.derivedBioAssayDataCollection dataCollection "
+ "join dataCollection.keywordCollection keyword ";
}
whereList.add("keyword.name in ("
+ StringUtils.join(inList, ", ") + ") ");
}
if (summaries != null && summaries.length > 0) {
List<String> summaryList = new ArrayList<String>();
where = "where ";
for (String summary : summaries) {
paramList.add("%" + summary + "%");
summaryList.add("?");
}
if (summaryType.equals("characterization")) {
summaryForm = "join particle.characterizationCollection data ";
} else {
summaryForm = "join particle.characterizationCollection characterization "
+ "join characterization.derivedBioAssayDataCollection data ";
}
List<String> summaryWhere = new ArrayList<String>();
for (String summary : summaryList) {
summaryWhere.add("data.description like " + summary);
}
whereList.add(StringUtils.join(summaryWhere, " or "));
}
if (characterizations != null && characterizations.length > 0) {
List<String> inList = new ArrayList<String>();
where = "where ";
for (String characterization : characterizations) {
paramList.add(characterization);
inList.add("?");
}
// to have the if statment, the keyword will only apply to the
// characterization it specified.
if (keywords == null
|| (keywords.length > 0 && keywordType
.equals("nanoparticle"))) {
characterizationFrom = "join particle.characterizationCollection characterization ";
}
whereList.add("characterization.name in ("
+ StringUtils.join(inList, ", ") + ") ");
}
String whereStr = StringUtils.join(whereList, " and ");
String hqlString = "select particle from Nanoparticle particle "
+ functionFrom + keywordFrom + summaryForm
+ characterizationFrom + where + whereStr;
ida.open();
List<? extends Object> results = (List<? extends Object>) ida
.searchByParam(hqlString, paramList);
for (Object obj : new HashSet<Object>(results)) {
Nanoparticle particle = (Nanoparticle) obj;
ParticleBean particleBean = new ParticleBean(particle);
particles.add(particleBean);
}
} catch (Exception e) {
logger
.error("Problem finding particles with thet given search parameters ");
throw e;
} finally {
ida.close();
}
UserService userService = new UserService(
CaNanoLabConstants.CSM_APP_NAME);
List<ParticleBean> filteredParticles = userService
.getFilteredParticles(user, particles);
// sort the list by IDs
Collections.sort(filteredParticles,
new CaNanoLabComparators.SampleBeanComparator());
return filteredParticles;
}
/**
* Query nanoparticle general information such as name, type, keywords and
* visibilities.
*
* @param particleName
* @param particleType
* @return
* @throws Exception
*/
public ParticleBean getGeneralInfo(String particleName, String particleType)
throws Exception {
Nanoparticle particle = null;
IDataAccess ida = (new DataAccessProxy())
.getInstance(IDataAccess.HIBERNATE);
try {
ida.open();
// get the existing particle from database created during sample
// creation
List results = ida
.search("from Nanoparticle as particle left join fetch particle.keywordCollection where particle.name='"
+ particleName
+ "' and particle.type='"
+ particleType + "'");
for (Object obj : results) {
particle = (Nanoparticle) obj;
}
if (particle == null) {
throw new CalabException("No such particle in the database");
}
} catch (Exception e) {
logger.error("Problem finding particle with name: " + particleName);
throw e;
} finally {
ida.close();
}
ParticleBean particleBean = new ParticleBean(particle);
UserService userService = new UserService(
CaNanoLabConstants.CSM_APP_NAME);
List<String> accessibleGroups = userService.getAccessibleGroups(
particleName, CaNanoLabConstants.CSM_READ_ROLE);
String[] visibilityGroups = accessibleGroups.toArray(new String[0]);
particleBean.setVisibilityGroups(visibilityGroups);
return particleBean;
}
public List<CharacterizationBean> getCharacterizationInfo(
String particleName, String particleType) throws Exception {
List<CharacterizationBean> charBeans = new ArrayList<CharacterizationBean>();
IDataAccess ida = (new DataAccessProxy())
.getInstance(IDataAccess.HIBERNATE);
try {
ida.open();
List results = ida
.search("select chara.id, chara.name, chara.identificationName from Nanoparticle particle join particle.characterizationCollection chara where particle.name='"
+ particleName
+ "' and particle.type='"
+ particleType
+ "' order by chara.name, chara.identificationName");
for (Object obj : results) {
String charId = ((Object[]) obj)[0].toString();
String charName = (String) (((Object[]) obj)[1]);
String viewTitle = (String) (((Object[]) obj)[2]);
CharacterizationBean charBean = new CharacterizationBean(
charId, charName, viewTitle);
charBeans.add(charBean);
}
} catch (Exception e) {
logger.error("Problem finding characterization info for particle: "
+ particleName);
throw e;
} finally {
ida.close();
}
return charBeans;
}
public Characterization getCharacterizationBy(String charId)
throws Exception {
IDataAccess ida = (new DataAccessProxy())
.getInstance(IDataAccess.HIBERNATE);
Characterization aChar = null;
try {
ida.open();
List results = ida
.search(" from Characterization chara left join fetch chara.composingElementCollection left join fetch chara.derivedBioAssayDataCollection where chara.id="
+ charId);
for (Object obj : results) {
aChar = (Characterization) obj;
}
} catch (Exception e) {
logger.error("Problem finding characterization");
throw e;
} finally {
ida.close();
}
return aChar;
}
public Characterization getCharacterizationAndDerivedDataBy(String charId)
throws Exception {
IDataAccess ida = (new DataAccessProxy())
.getInstance(IDataAccess.HIBERNATE);
Characterization aChar = null;
try {
ida.open();
List results = ida
.search(" from Characterization chara left join fetch chara.derivedBioAssayDataCollection assayData"
+ " left join fetch assayData.datumCollection datum"
+ " where chara.id=" + charId);
for (Object obj : results) {
aChar = (Characterization) obj;
}
} catch (Exception e) {
logger.error("Problem finding characterization");
throw e;
} finally {
ida.close();
}
return aChar;
}
public Map<String, List<FunctionBean>> getFunctionInfo(String particleName,
String particleType) throws Exception {
Map<String, List<FunctionBean>> funcTypeFuncs = new HashMap<String, List<FunctionBean>>();
IDataAccess ida = (new DataAccessProxy())
.getInstance(IDataAccess.HIBERNATE);
try {
ida.open();
List results = ida
.search("select func.id, func.type, func.identificationName from Nanoparticle particle join particle.functionCollection func where particle.name='"
+ particleName
+ "' and particle.type='"
+ particleType + "'");
List<FunctionBean> funcs = new ArrayList<FunctionBean>();
for (Object obj : results) {
String funcId = ((Object[]) obj)[0].toString();
String funcType = ((Object[]) obj)[1].toString();
String viewTitle = (String) (((Object[]) obj)[2]);
FunctionBean funcBean = new FunctionBean(funcId, funcType,
viewTitle);
if (funcTypeFuncs.get(funcType) != null) {
funcs = (List<FunctionBean>) (funcTypeFuncs.get(funcType));
} else {
funcs = new ArrayList<FunctionBean>();
funcTypeFuncs.put(funcType, funcs);
}
funcs.add(funcBean);
}
} catch (Exception e) {
logger.error("Problem finding characterization info for particle: "
+ particleName);
throw e;
} finally {
ida.close();
}
return funcTypeFuncs;
}
public Function getFunctionBy(String funcId) throws Exception {
IDataAccess ida = (new DataAccessProxy())
.getInstance(IDataAccess.HIBERNATE);
Function func = null;
try {
ida.open();
List results = ida
.search(" from Function func left join fetch func.linkageCollection link left join fetch link.agent.agentTargetCollection where func.id="
+ funcId);
for (Object obj : results) {
func = (Function) obj;
}
} catch (Exception e) {
logger.error("Problem finding functions");
throw e;
} finally {
ida.close();
}
return func;
}
/**
* Avanced nanoparticle search based on more detailed meta data.
*
* @param particleType
* @param functionTypes
* @param searchCriteria
* @return
*/
public List<ParticleBean> advancedSearch(String particleType,
String[] functionTypes, List<SearchableBean> searchCriteria,
UserBean user) throws Exception {
IDataAccess ida = (new DataAccessProxy())
.getInstance(IDataAccess.HIBERNATE);
List<ParticleBean> particleList = null;
try {
// query by particle type and function types first
particleList = this.basicSearch(null, particleType, functionTypes,
null, null, null, null, null, user);
// return if no particles found or no other search criteria entered
if (searchCriteria.isEmpty() || particleList.isEmpty()) {
return particleList;
}
ida.open();
for (SearchableBean searchBean : searchCriteria) {
List<ParticleBean> theParticles = searchParticlesBy(ida,
searchBean);
particleList.retainAll(theParticles);
}
} catch (Exception e) {
logger.error("Problem finding particles.");
throw e;
} finally {
ida.close();
}
return particleList;
}
/**
* Return particles based on search criteria defined in SearchableBean. Used
* in the advanced search function.
*
* @param ida
* @param charInfo
* @return
* @throws Exception
*/
public List<ParticleBean> searchParticlesBy(IDataAccess ida,
SearchableBean charInfo) throws Exception {
List<ParticleBean> particles = new ArrayList<ParticleBean>();
// if no value range, don't query
if (charInfo.getLowValue().length() == 0
&& charInfo.getHighValue().length() == 0) {
return particles;
}
String hqlSelect = "select distinct particle from Nanoparticle particle "
+ "join particle.characterizationCollection char join char.derivedBioAssayDataCollection chart "
+ "join chart.datumCollection data ";
String hqlWhere = "where char.name=? and data.type=?";
List<Object> paramList = new ArrayList<Object>();
paramList.add(charInfo.getClassification());
paramList.add(charInfo.getType());
if (charInfo.getLowValue().length() > 0) {
hqlWhere += " and data.value.value>=?";
paramList.add(charInfo.getLowValue());
}
if (charInfo.getHighValue().length() > 0) {
hqlWhere += " and data.value.value<=?";
paramList.add(charInfo.getHighValue());
}
String hqlString = hqlSelect + hqlWhere;
List results = ida.searchByParam(hqlString, paramList);
for (Object obj : results) {
Nanoparticle particle = (Nanoparticle) obj;
ParticleBean particleBean = new ParticleBean(particle);
particles.add(particleBean);
}
return particles;
}
}
| src/gov/nih/nci/calab/service/search/SearchNanoparticleService.java | package gov.nih.nci.calab.service.search;
import gov.nih.nci.calab.db.DataAccessProxy;
import gov.nih.nci.calab.db.IDataAccess;
import gov.nih.nci.calab.domain.nano.characterization.Characterization;
import gov.nih.nci.calab.domain.nano.function.Function;
import gov.nih.nci.calab.domain.nano.particle.Nanoparticle;
import gov.nih.nci.calab.dto.characterization.CharacterizationBean;
import gov.nih.nci.calab.dto.characterization.DerivedBioAssayDataBean;
import gov.nih.nci.calab.dto.common.SearchableBean;
import gov.nih.nci.calab.dto.common.UserBean;
import gov.nih.nci.calab.dto.function.FunctionBean;
import gov.nih.nci.calab.dto.particle.ParticleBean;
import gov.nih.nci.calab.exception.CalabException;
import gov.nih.nci.calab.service.common.FileService;
import gov.nih.nci.calab.service.security.UserService;
import gov.nih.nci.calab.service.util.CaNanoLabComparators;
import gov.nih.nci.calab.service.util.CaNanoLabConstants;
import gov.nih.nci.calab.service.util.PropertyReader;
import gov.nih.nci.calab.service.util.StringUtils;
import java.io.File;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import org.apache.log4j.Logger;
/**
* This class includes methods invovled in searching nanoparticles.
*
* @author pansu
*
*/
public class SearchNanoparticleService {
private static Logger logger = Logger
.getLogger(SearchNanoparticleService.class);
/**
* Search for nanoparticles based on particle source, type, function types,
* characterizationType, characterizations, keywords and filter the
* nanoparticles by user visibility.
*
* @param particleSource
* @param particleType
* @param functionTypes
* @param characterizationType
* @param characterizations
* @param keywords
* @param user
* @return
* @throws Exception
*/
public List<ParticleBean> basicSearch(String particleSource,
String particleType, String[] functionTypes,
String[] characterizations, String[] keywords, String keywordType,
String[] summaries, String summaryType, UserBean user)
throws Exception {
IDataAccess ida = (new DataAccessProxy())
.getInstance(IDataAccess.HIBERNATE);
List<ParticleBean> particles = new ArrayList<ParticleBean>();
try {
List<Object> paramList = new ArrayList<Object>();
List<String> whereList = new ArrayList<String>();
String where = "";
String keywordFrom = "";
String functionFrom = "";
String characterizationFrom = "";
String summaryForm = "";
if (particleSource != null && particleSource.length() > 0) {
where = "where ";
whereList.add("particle.source.organizationName=? ");
paramList.add(particleSource);
}
if (particleType != null && particleType.length() > 0) {
paramList.add(particleType);
where = "where ";
whereList.add("particle.type=? ");
}
if (functionTypes != null && functionTypes.length > 0) {
List<String> inList = new ArrayList<String>();
where = "where ";
for (String functionType : functionTypes) {
paramList.add(functionType);
inList.add("?");
}
functionFrom = "join particle.functionCollection function ";
whereList.add("function.type in ("
+ StringUtils.join(inList, ", ") + ") ");
}
if (keywords != null && keywords.length > 0) {
List<String> inList = new ArrayList<String>();
where = "where ";
for (String keyword : keywords) {
paramList.add(keyword);
inList.add("?");
}
if (keywordType.equals("nanoparticle")) {
keywordFrom = "join particle.keywordCollection keyword ";
} else {
keywordFrom = "join particle.characterizationCollection characterization "
+ "join characterization.derivedBioAssayDataCollection dataCollection "
+ "join dataCollection.keywordCollection keyword ";
}
whereList.add("keyword.name in ("
+ StringUtils.join(inList, ", ") + ") ");
}
if (summaries != null && summaries.length > 0) {
List<String> summaryList = new ArrayList<String>();
where = "where ";
for (String summary : summaries) {
paramList.add("%" + summary + "%");
summaryList.add("?");
}
if (summaryType.equals("characterization")) {
summaryForm = "join particle.characterizationCollection data ";
} else {
summaryForm = "join particle.characterizationCollection characterization "
+ "join characterization.derivedBioAssayDataCollection data ";
}
List<String> summaryWhere = new ArrayList<String>();
for (String summary : summaryList) {
summaryWhere.add("data.description like " + summary);
}
whereList.add(StringUtils.join(summaryWhere, " or "));
}
if (characterizations != null && characterizations.length > 0) {
List<String> inList = new ArrayList<String>();
where = "where ";
for (String characterization : characterizations) {
paramList.add(characterization);
inList.add("?");
}
// to have the if statment, the keyword will only apply to the
// characterization it specified.
if (keywords == null
|| (keywords.length > 0 && keywordType
.equals("nanoparticle"))) {
characterizationFrom = "join particle.characterizationCollection characterization ";
}
whereList.add("characterization.name in ("
+ StringUtils.join(inList, ", ") + ") ");
}
String whereStr = StringUtils.join(whereList, " and ");
String hqlString = "select particle from Nanoparticle particle "
+ functionFrom + keywordFrom + summaryForm
+ characterizationFrom + where + whereStr;
ida.open();
List<? extends Object> results = (List<? extends Object>) ida
.searchByParam(hqlString, paramList);
for (Object obj : new HashSet<Object>(results)) {
Nanoparticle particle = (Nanoparticle) obj;
ParticleBean particleBean = new ParticleBean(particle);
particles.add(particleBean);
}
} catch (Exception e) {
logger
.error("Problem finding particles with thet given search parameters ");
throw e;
} finally {
ida.close();
}
UserService userService = new UserService(
CaNanoLabConstants.CSM_APP_NAME);
List<ParticleBean> filteredParticles = userService
.getFilteredParticles(user, particles);
// sort the list by IDs
Collections.sort(filteredParticles,
new CaNanoLabComparators.SampleBeanComparator());
return filteredParticles;
}
/**
* Query nanoparticle general information such as name, type, keywords and
* visibilities.
*
* @param particleName
* @param particleType
* @return
* @throws Exception
*/
public ParticleBean getGeneralInfo(String particleName, String particleType)
throws Exception {
Nanoparticle particle = null;
IDataAccess ida = (new DataAccessProxy())
.getInstance(IDataAccess.HIBERNATE);
try {
ida.open();
// get the existing particle from database created during sample
// creation
List results = ida
.search("from Nanoparticle as particle left join fetch particle.keywordCollection where particle.name='"
+ particleName
+ "' and particle.type='"
+ particleType + "'");
for (Object obj : results) {
particle = (Nanoparticle) obj;
}
if (particle == null) {
throw new CalabException("No such particle in the database");
}
} catch (Exception e) {
logger.error("Problem finding particle with name: " + particleName);
throw e;
} finally {
ida.close();
}
ParticleBean particleBean = new ParticleBean(particle);
UserService userService = new UserService(
CaNanoLabConstants.CSM_APP_NAME);
List<String> accessibleGroups = userService.getAccessibleGroups(
particleName, CaNanoLabConstants.CSM_READ_ROLE);
String[] visibilityGroups = accessibleGroups.toArray(new String[0]);
particleBean.setVisibilityGroups(visibilityGroups);
return particleBean;
}
public List<CharacterizationBean> getCharacterizationInfo(
String particleName, String particleType) throws Exception {
List<CharacterizationBean> charBeans = new ArrayList<CharacterizationBean>();
IDataAccess ida = (new DataAccessProxy())
.getInstance(IDataAccess.HIBERNATE);
try {
ida.open();
List results = ida
.search("select chara.id, chara.name, chara.identificationName from Nanoparticle particle join particle.characterizationCollection chara where particle.name='"
+ particleName
+ "' and particle.type='"
+ particleType
+ "' order by chara.name, chara.identificationName");
for (Object obj : results) {
String charId = ((Object[]) obj)[0].toString();
String charName = (String) (((Object[]) obj)[1]);
String viewTitle = (String) (((Object[]) obj)[2]);
CharacterizationBean charBean = new CharacterizationBean(
charId, charName, viewTitle);
charBeans.add(charBean);
}
} catch (Exception e) {
logger.error("Problem finding characterization info for particle: "
+ particleName);
throw e;
} finally {
ida.close();
}
return charBeans;
}
public Characterization getCharacterizationBy(String charId)
throws Exception {
IDataAccess ida = (new DataAccessProxy())
.getInstance(IDataAccess.HIBERNATE);
Characterization aChar = null;
try {
ida.open();
List results = ida
.search(" from Characterization chara left join fetch chara.composingElementCollection left join fetch chara.derivedBioAssayDataCollection where chara.id="
+ charId);
for (Object obj : results) {
aChar = (Characterization) obj;
}
} catch (Exception e) {
logger.error("Problem finding characterization");
throw e;
} finally {
ida.close();
}
return aChar;
}
public Characterization getCharacterizationAndDerivedDataBy(String charId)
throws Exception {
IDataAccess ida = (new DataAccessProxy())
.getInstance(IDataAccess.HIBERNATE);
Characterization aChar = null;
try {
ida.open();
List results = ida
.search(" from Characterization chara left join fetch chara.derivedBioAssayDataCollection assayData"
+ " left join fetch assayData.datumCollection datum"
+ " where chara.id=" + charId);
for (Object obj : results) {
aChar = (Characterization) obj;
}
} catch (Exception e) {
logger.error("Problem finding characterization");
throw e;
} finally {
ida.close();
}
return aChar;
}
public Map<String, List<FunctionBean>> getFunctionInfo(String particleName,
String particleType) throws Exception {
Map<String, List<FunctionBean>> funcTypeFuncs = new HashMap<String, List<FunctionBean>>();
IDataAccess ida = (new DataAccessProxy())
.getInstance(IDataAccess.HIBERNATE);
try {
ida.open();
List results = ida
.search("select func.id, func.type, func.identificationName from Nanoparticle particle join particle.functionCollection func where particle.name='"
+ particleName
+ "' and particle.type='"
+ particleType + "'");
List<FunctionBean> funcs = new ArrayList<FunctionBean>();
for (Object obj : results) {
String funcId = ((Object[]) obj)[0].toString();
String funcType = ((Object[]) obj)[1].toString();
String viewTitle = (String) (((Object[]) obj)[2]);
FunctionBean funcBean = new FunctionBean(funcId, funcType,
viewTitle);
if (funcTypeFuncs.get(funcType) != null) {
funcs = (List<FunctionBean>) (funcTypeFuncs.get(funcType));
} else {
funcs = new ArrayList<FunctionBean>();
funcTypeFuncs.put(funcType, funcs);
}
funcs.add(funcBean);
}
} catch (Exception e) {
logger.error("Problem finding characterization info for particle: "
+ particleName);
throw e;
} finally {
ida.close();
}
return funcTypeFuncs;
}
public Function getFunctionBy(String funcId) throws Exception {
IDataAccess ida = (new DataAccessProxy())
.getInstance(IDataAccess.HIBERNATE);
Function func = null;
try {
ida.open();
List results = ida
.search(" from Function func left join fetch func.linkageCollection link left join fetch link.agent.agentTargetCollection where func.id="
+ funcId);
for (Object obj : results) {
func = (Function) obj;
}
} catch (Exception e) {
logger.error("Problem finding functions");
throw e;
} finally {
ida.close();
}
return func;
}
/**
* Avanced nanoparticle search based on more detailed meta data.
*
* @param particleType
* @param functionTypes
* @param searchCriteria
* @return
*/
public List<ParticleBean> advancedSearch(String particleType,
String[] functionTypes, List<SearchableBean> searchCriteria,
UserBean user) throws Exception {
IDataAccess ida = (new DataAccessProxy())
.getInstance(IDataAccess.HIBERNATE);
List<ParticleBean> particleList = null;
try {
// query by particle type and function types first
particleList = this.basicSearch(null, particleType, functionTypes,
null, null, null, null, null, user);
// return if no particles found or no other search criteria entered
if (searchCriteria.isEmpty() || particleList.isEmpty()) {
return particleList;
}
ida.open();
for (SearchableBean searchBean : searchCriteria) {
List<ParticleBean> theParticles = searchParticlesBy(ida,
searchBean);
particleList.retainAll(theParticles);
}
} catch (Exception e) {
logger.error("Problem finding particles.");
throw e;
} finally {
ida.close();
}
return particleList;
}
/**
* Return particles based on search criteria defined in SearchableBean. Used
* in the advanced search function.
*
* @param ida
* @param charInfo
* @return
* @throws Exception
*/
public List<ParticleBean> searchParticlesBy(IDataAccess ida,
SearchableBean charInfo) throws Exception {
List<ParticleBean> particles = new ArrayList<ParticleBean>();
// if no value range, don't query
if (charInfo.getLowValue().length() == 0
&& charInfo.getHighValue().length() == 0) {
return particles;
}
String hqlSelect = "select distinct particle from Nanoparticle particle "
+ "join particle.characterizationCollection char join char.derivedBioAssayDataCollection chart "
+ "join chart.datumCollection data ";
String hqlWhere = "where char.name=? and data.type=?";
List<Object> paramList = new ArrayList<Object>();
paramList.add(charInfo.getClassification());
paramList.add(charInfo.getType());
if (charInfo.getLowValue().length() > 0) {
hqlWhere += " and data.value.value>=?";
paramList.add(charInfo.getLowValue());
}
if (charInfo.getHighValue().length() > 0) {
hqlWhere += " and data.value.value<=?";
paramList.add(charInfo.getHighValue());
}
String hqlString = hqlSelect + hqlWhere;
List results = ida.searchByParam(hqlString, paramList);
for (Object obj : results) {
Nanoparticle particle = (Nanoparticle) obj;
ParticleBean particleBean = new ParticleBean(particle);
particles.add(particleBean);
}
return particles;
}
}
| updated imports
SVN-Revision: 3639
| src/gov/nih/nci/calab/service/search/SearchNanoparticleService.java | updated imports | <ide><path>rc/gov/nih/nci/calab/service/search/SearchNanoparticleService.java
<ide> import gov.nih.nci.calab.domain.nano.function.Function;
<ide> import gov.nih.nci.calab.domain.nano.particle.Nanoparticle;
<ide> import gov.nih.nci.calab.dto.characterization.CharacterizationBean;
<del>import gov.nih.nci.calab.dto.characterization.DerivedBioAssayDataBean;
<ide> import gov.nih.nci.calab.dto.common.SearchableBean;
<ide> import gov.nih.nci.calab.dto.common.UserBean;
<ide> import gov.nih.nci.calab.dto.function.FunctionBean;
<ide> import gov.nih.nci.calab.dto.particle.ParticleBean;
<ide> import gov.nih.nci.calab.exception.CalabException;
<del>import gov.nih.nci.calab.service.common.FileService;
<ide> import gov.nih.nci.calab.service.security.UserService;
<ide> import gov.nih.nci.calab.service.util.CaNanoLabComparators;
<ide> import gov.nih.nci.calab.service.util.CaNanoLabConstants;
<del>import gov.nih.nci.calab.service.util.PropertyReader;
<ide> import gov.nih.nci.calab.service.util.StringUtils;
<ide>
<del>import java.io.File;
<ide> import java.util.ArrayList;
<ide> import java.util.Collections;
<ide> import java.util.HashMap; |
|
JavaScript | mit | 9aed83683b662e6e009a5d59638ddd6ebb4c91a4 | 0 | looker-open-source/admin_power_pack,looker-open-source/admin_power_pack | /*
* The MIT License (MIT)
*
* Copyright (c) 2020 Looker Data Sciences, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
const path = require("path");
const PATHS = {
app: path.join(__dirname, "src/index.jsx"),
};
module.exports = {
entry: {
app: PATHS.app,
},
output: {
path: __dirname + "/dist",
filename: "looker_admin_power_pack.js",
},
mode: "production",
module: {
rules: [
{
test: /\.(js|jsx|ts|tsx)$/,
loader: "babel-loader",
exclude: /node_modules/,
include: /src/,
},
{
test: /\.css$/i,
use: ["style-loader", "css-loader"],
},
],
},
resolve: {
extensions: [".tsx", ".ts", ".jsx", ".js"],
},
};
| webpack.prod.config.js | /*
* The MIT License (MIT)
*
* Copyright (c) 2020 Looker Data Sciences, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
const path = require("path");
const PATHS = {
app: path.join(__dirname, "src/index.jsx"),
};
module.exports = {
entry: {
app: PATHS.app,
},
output: {
path: __dirname + "/dist",
filename: "bundle.js",
},
mode: "production",
module: {
rules: [
{
test: /\.(js|jsx|ts|tsx)$/,
loader: "babel-loader",
exclude: /node_modules/,
include: /src/,
},
{
test: /\.css$/i,
use: ["style-loader", "css-loader"],
},
],
},
resolve: {
extensions: [".tsx", ".ts", ".jsx", ".js"],
},
};
| change name of build artifact to be more obviously branded
| webpack.prod.config.js | change name of build artifact to be more obviously branded | <ide><path>ebpack.prod.config.js
<ide> },
<ide> output: {
<ide> path: __dirname + "/dist",
<del> filename: "bundle.js",
<add> filename: "looker_admin_power_pack.js",
<ide> },
<ide> mode: "production",
<ide> module: { |
|
Java | bsd-3-clause | 3937343f714932c80ee5d63059c11321a2e54865 | 0 | asamgir/openspecimen,krishagni/openspecimen,asamgir/openspecimen,asamgir/openspecimen,krishagni/openspecimen,krishagni/openspecimen | package edu.wustl.catissuecore.action;
import java.util.Iterator;
import java.util.LinkedHashMap;
import java.util.Map;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
import javax.servlet.http.HttpSession;
import org.apache.struts.action.ActionForm;
import org.apache.struts.action.ActionForward;
import org.apache.struts.action.ActionMapping;
import edu.wustl.catissuecore.actionForm.ProtocolEventDetailsForm;
import edu.wustl.catissuecore.bean.CollectionProtocolEventBean;
import edu.wustl.catissuecore.util.global.Constants;
import edu.wustl.common.action.BaseAction;
import edu.wustl.common.util.dbManager.DAOException;
public class SaveProtocolEventDetailsAction extends BaseAction
{
public ActionForward executeAction(ActionMapping mapping, ActionForm form,
HttpServletRequest request, HttpServletResponse response) throws DAOException
{
ProtocolEventDetailsForm protocolEventDetailsForm = (ProtocolEventDetailsForm)form;
HttpSession session = request.getSession();
String pageOf = request.getParameter(Constants.PAGE_OF);
Map collectionProtocolEventMap = null;
CollectionProtocolEventBean collectionProtocolEventBean =null;
if(session.getAttribute(Constants.COLLECTION_PROTOCOL_EVENT_SESSION_MAP)!=null)
{
collectionProtocolEventMap = (LinkedHashMap)session.getAttribute(Constants.COLLECTION_PROTOCOL_EVENT_SESSION_MAP);
}
else
{
collectionProtocolEventMap = new LinkedHashMap();
}
if(protocolEventDetailsForm.getCollectionProtocolEventkey().equals(Constants.ADD_NEW_EVENT))
{
int eventmapSize = collectionProtocolEventMap.size();
while(collectionProtocolEventMap.containsKey(Constants.UNIQUE_IDENTIFIER_FOR_EVENTS+(eventmapSize)))
{
eventmapSize = eventmapSize + 1;
}
collectionProtocolEventBean = new CollectionProtocolEventBean();
if(eventmapSize == 0)
{
eventmapSize = eventmapSize + 1;
}
collectionProtocolEventBean.setUniqueIdentifier(Constants.UNIQUE_IDENTIFIER_FOR_EVENTS+(eventmapSize));
setCollectionProtocolBean(collectionProtocolEventBean,protocolEventDetailsForm);
collectionProtocolEventMap.put(collectionProtocolEventBean.getUniqueIdentifier(),collectionProtocolEventBean);
}
else
{
collectionProtocolEventBean = (CollectionProtocolEventBean)collectionProtocolEventMap.get(protocolEventDetailsForm.getCollectionProtocolEventkey());
setCollectionProtocolBean(collectionProtocolEventBean,protocolEventDetailsForm);
collectionProtocolEventMap.put(protocolEventDetailsForm.getCollectionProtocolEventkey(),collectionProtocolEventBean);
}
session.setAttribute(Constants.TREE_NODE_ID, protocolEventDetailsForm.getCollectionPointLabel()+"class_"+collectionProtocolEventBean.getUniqueIdentifier());
String listKey = collectionProtocolEventBean.getUniqueIdentifier();
session.setAttribute(Constants.NEW_EVENT_KEY, listKey);
//request.setAttribute("listKey", listKey);
session.setAttribute(Constants.COLLECTION_PROTOCOL_EVENT_SESSION_MAP, collectionProtocolEventMap);
return (mapping.findForward(pageOf));
}
private void setCollectionProtocolBean(CollectionProtocolEventBean collectionProtocolEventBean, ProtocolEventDetailsForm protocolEventDetailsForm)
{
collectionProtocolEventBean.setClinicalDiagnosis(protocolEventDetailsForm.getClinicalDiagnosis());
collectionProtocolEventBean.setClinicalStatus(protocolEventDetailsForm.getClinicalStatus());
collectionProtocolEventBean.setCollectionPointLabel(protocolEventDetailsForm.getCollectionPointLabel());
collectionProtocolEventBean.setStudyCalenderEventPoint(protocolEventDetailsForm.getStudyCalendarEventPoint());
collectionProtocolEventBean.setCollectedEventComments(protocolEventDetailsForm.getCollectionEventComments());
collectionProtocolEventBean.setCollectionContainer(protocolEventDetailsForm.getCollectionEventContainer());
collectionProtocolEventBean.setReceivedEventComments(protocolEventDetailsForm.getReceivedEventComments());
collectionProtocolEventBean.setReceivedQuality(protocolEventDetailsForm.getReceivedEventReceivedQuality());
collectionProtocolEventBean.setCollectionProcedure(protocolEventDetailsForm.getCollectionEventCollectionProcedure());
}
}
| WEB-INF/src/edu/wustl/catissuecore/action/SaveProtocolEventDetailsAction.java | package edu.wustl.catissuecore.action;
import java.util.Iterator;
import java.util.LinkedHashMap;
import java.util.Map;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
import javax.servlet.http.HttpSession;
import org.apache.struts.action.ActionForm;
import org.apache.struts.action.ActionForward;
import org.apache.struts.action.ActionMapping;
import edu.wustl.catissuecore.actionForm.ProtocolEventDetailsForm;
import edu.wustl.catissuecore.bean.CollectionProtocolEventBean;
import edu.wustl.catissuecore.util.global.Constants;
import edu.wustl.common.action.BaseAction;
import edu.wustl.common.util.dbManager.DAOException;
public class SaveProtocolEventDetailsAction extends BaseAction
{
public ActionForward executeAction(ActionMapping mapping, ActionForm form,
HttpServletRequest request, HttpServletResponse response) throws DAOException
{
ProtocolEventDetailsForm protocolEventDetailsForm = (ProtocolEventDetailsForm)form;
HttpSession session = request.getSession();
String pageOf = request.getParameter(Constants.PAGE_OF);
Map collectionProtocolEventMap = null;
CollectionProtocolEventBean collectionProtocolEventBean =null;
if(session.getAttribute(Constants.COLLECTION_PROTOCOL_EVENT_SESSION_MAP)!=null)
{
collectionProtocolEventMap = (LinkedHashMap)session.getAttribute(Constants.COLLECTION_PROTOCOL_EVENT_SESSION_MAP);
}
else
{
collectionProtocolEventMap = new LinkedHashMap();
}
if(protocolEventDetailsForm.getCollectionProtocolEventkey().equals(Constants.ADD_NEW_EVENT))
{
int eventmapSize = collectionProtocolEventMap.size();
while(collectionProtocolEventMap.containsKey(Constants.UNIQUE_IDENTIFIER_FOR_EVENTS+(eventmapSize)))
{
eventmapSize = eventmapSize + 1;
}
collectionProtocolEventBean = new CollectionProtocolEventBean();
if(eventmapSize == 0)
{
eventmapSize = eventmapSize + 1;
}
collectionProtocolEventBean.setUniqueIdentifier(Constants.UNIQUE_IDENTIFIER_FOR_EVENTS+(eventmapSize));
setCollectionProtocolBean(collectionProtocolEventBean,protocolEventDetailsForm);
collectionProtocolEventMap.put(collectionProtocolEventBean.getUniqueIdentifier(),collectionProtocolEventBean);
}
else
{
collectionProtocolEventBean = (CollectionProtocolEventBean)collectionProtocolEventMap.get(protocolEventDetailsForm.getCollectionProtocolEventkey());
setCollectionProtocolBean(collectionProtocolEventBean,protocolEventDetailsForm);
collectionProtocolEventMap.put(protocolEventDetailsForm.getCollectionProtocolEventkey(),collectionProtocolEventBean);
session.setAttribute(Constants.TREE_NODE_ID, protocolEventDetailsForm.getCollectionPointLabel()+"class_"+collectionProtocolEventBean.getUniqueIdentifier());
}
String listKey = collectionProtocolEventBean.getUniqueIdentifier();
session.setAttribute(Constants.NEW_EVENT_KEY, listKey);
//request.setAttribute("listKey", listKey);
session.setAttribute(Constants.COLLECTION_PROTOCOL_EVENT_SESSION_MAP, collectionProtocolEventMap);
return (mapping.findForward(pageOf));
}
private void setCollectionProtocolBean(CollectionProtocolEventBean collectionProtocolEventBean, ProtocolEventDetailsForm protocolEventDetailsForm)
{
collectionProtocolEventBean.setClinicalDiagnosis(protocolEventDetailsForm.getClinicalDiagnosis());
collectionProtocolEventBean.setClinicalStatus(protocolEventDetailsForm.getClinicalStatus());
collectionProtocolEventBean.setCollectionPointLabel(protocolEventDetailsForm.getCollectionPointLabel());
collectionProtocolEventBean.setStudyCalenderEventPoint(protocolEventDetailsForm.getStudyCalendarEventPoint());
collectionProtocolEventBean.setCollectedEventComments(protocolEventDetailsForm.getCollectionEventComments());
collectionProtocolEventBean.setCollectionContainer(protocolEventDetailsForm.getCollectionEventContainer());
collectionProtocolEventBean.setReceivedEventComments(protocolEventDetailsForm.getReceivedEventComments());
collectionProtocolEventBean.setReceivedQuality(protocolEventDetailsForm.getReceivedEventReceivedQuality());
collectionProtocolEventBean.setCollectionProcedure(protocolEventDetailsForm.getCollectionEventCollectionProcedure());
}
}
| CP Edit
SVN-Revision: 14422
| WEB-INF/src/edu/wustl/catissuecore/action/SaveProtocolEventDetailsAction.java | CP Edit | <ide><path>EB-INF/src/edu/wustl/catissuecore/action/SaveProtocolEventDetailsAction.java
<ide> collectionProtocolEventBean = (CollectionProtocolEventBean)collectionProtocolEventMap.get(protocolEventDetailsForm.getCollectionProtocolEventkey());
<ide> setCollectionProtocolBean(collectionProtocolEventBean,protocolEventDetailsForm);
<ide> collectionProtocolEventMap.put(protocolEventDetailsForm.getCollectionProtocolEventkey(),collectionProtocolEventBean);
<del> session.setAttribute(Constants.TREE_NODE_ID, protocolEventDetailsForm.getCollectionPointLabel()+"class_"+collectionProtocolEventBean.getUniqueIdentifier());
<ide> }
<add> session.setAttribute(Constants.TREE_NODE_ID, protocolEventDetailsForm.getCollectionPointLabel()+"class_"+collectionProtocolEventBean.getUniqueIdentifier());
<ide> String listKey = collectionProtocolEventBean.getUniqueIdentifier();
<ide> session.setAttribute(Constants.NEW_EVENT_KEY, listKey);
<ide> //request.setAttribute("listKey", listKey); |
|
Java | bsd-3-clause | 79e59b8c95b73d18206c9ad4e9f02e8ac4a5b305 | 0 | NCIP/nci-metathesaurus-browser,NCIP/nci-metathesaurus-browser,NCIP/nci-metathesaurus-browser | package gov.nih.nci.evs.browser.utils;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.TreeMap;
import java.util.Vector;
import org.LexGrid.LexBIG.DataModel.Collections.AssociatedConceptList;
import org.LexGrid.LexBIG.DataModel.Collections.AssociationList;
import org.LexGrid.LexBIG.DataModel.Collections.LocalNameList;
import org.LexGrid.LexBIG.DataModel.Collections.ResolvedConceptReferenceList;
import org.LexGrid.LexBIG.DataModel.Collections.SortOptionList;
import org.LexGrid.LexBIG.DataModel.Core.AssociatedConcept;
import org.LexGrid.LexBIG.DataModel.Core.Association;
import org.LexGrid.LexBIG.DataModel.Core.CodingSchemeSummary;
import org.LexGrid.LexBIG.DataModel.Core.CodingSchemeVersionOrTag;
import org.LexGrid.LexBIG.DataModel.Core.ConceptReference;
import org.LexGrid.LexBIG.DataModel.Core.NameAndValue;
import org.LexGrid.LexBIG.DataModel.Core.ResolvedConceptReference;
import org.LexGrid.LexBIG.Exceptions.LBException;
import org.LexGrid.LexBIG.Extensions.Generic.LexBIGServiceConvenienceMethods;
import org.LexGrid.LexBIG.Impl.LexBIGServiceImpl;
import org.LexGrid.LexBIG.LexBIGService.CodedNodeGraph;
import org.LexGrid.LexBIG.LexBIGService.CodedNodeSet;
import org.LexGrid.LexBIG.LexBIGService.LexBIGService;
import org.LexGrid.LexBIG.LexBIGService.CodedNodeSet.PropertyType;
import org.LexGrid.LexBIG.Utility.Constructors;
import org.LexGrid.LexBIG.Utility.ConvenienceMethods;
import org.LexGrid.commonTypes.Property;
import org.LexGrid.commonTypes.PropertyQualifier;
import org.LexGrid.commonTypes.Source;
import org.LexGrid.concepts.Presentation;
import org.apache.commons.lang.StringUtils;
import org.LexGrid.LexBIG.Utility.ConvenienceMethods;
import org.LexGrid.LexBIG.DataModel.Collections.AssociationList;
import org.LexGrid.LexBIG.DataModel.Core.AssociatedConcept;
import org.LexGrid.LexBIG.DataModel.Core.Association;
import org.LexGrid.LexBIG.DataModel.Core.NameAndValue;
import org.LexGrid.LexBIG.DataModel.Core.ResolvedConceptReference;
import org.LexGrid.LexBIG.Exceptions.LBException;
import org.LexGrid.LexBIG.Impl.LexBIGServiceImpl;
import org.LexGrid.LexBIG.LexBIGService.CodedNodeGraph;
import org.LexGrid.LexBIG.LexBIGService.CodedNodeSet;
import org.LexGrid.LexBIG.LexBIGService.LexBIGService;
import org.LexGrid.LexBIG.LexBIGService.CodedNodeSet.PropertyType;
import org.LexGrid.LexBIG.Utility.Constructors;
import org.LexGrid.LexBIG.DataModel.Collections.ResolvedConceptReferenceList;
import org.LexGrid.LexBIG.DataModel.Core.ResolvedConceptReference;
import org.LexGrid.commonTypes.EntityDescription;
import org.LexGrid.LexBIG.DataModel.Collections.NameAndValueList;
import org.LexGrid.concepts.Concept;
public class MetaTreeUtils {
static String[] hierAssocToParentNodes_ = new String[] { "PAR", "isa", "branch_of", "part_of", "tributary_of" };
static String[] hierAssocToChildNodes_ = new String[] { "CHD", "hasSubtype" };
static SortOptionList sortByCode_ = Constructors.createSortOptionList(new String[] {"code"});
LocalNameList noopList_ = Constructors.createLocalNameList("_noop_");
LexBIGServiceConvenienceMethods lbscm_ = null;
LexBIGService lbsvc_ = null;
private LexBIGService lbs;
private static String NCI_META_THESAURUS = "NCI MetaThesaurus";
private static String NCI_SOURCE = "NCI";
public MetaTreeUtils(){
init();
}
private void init(){
//lbs = LexBIGServiceImpl.defaultInstance();
lbs = RemoteServerUtil.createLexBIGService();
}
///////////////////
// Source Roots //
///////////////////
/**
* Finds the root node of a given sab.
*
* @param sab
* @throws Exception
*/
public void getRoots(String sab) throws Exception {
ResolvedConceptReference root = resolveReferenceGraphForward(getCodingSchemeRoot(sab));
AssociationList assocList = root.getSourceOf();
for(Association assoc : assocList.getAssociation()){
for(AssociatedConcept ac : assoc.getAssociatedConcepts().getAssociatedConcept()){
if(this.isSabQualifiedAssociation(ac, sab)){
displayRoot(ac);
}
}
}
}
public ResolvedConceptReferenceList getSourceRoots(String sab) throws Exception {
ResolvedConceptReferenceList rcrl = new ResolvedConceptReferenceList();
ResolvedConceptReference root = resolveReferenceGraphForward(getCodingSchemeRoot(sab));
AssociationList assocList = root.getSourceOf();
for(Association assoc : assocList.getAssociation()){
for(AssociatedConcept ac : assoc.getAssociatedConcepts().getAssociatedConcept()){
if(this.isSabQualifiedAssociation(ac, sab)){
ResolvedConceptReference r = new ResolvedConceptReference();
EntityDescription entityDescription = new EntityDescription();
entityDescription.setContent(ac.getEntityDescription().getContent());
r.setEntityDescription(entityDescription);
r.setCode(ac.getCode());
rcrl.addResolvedConceptReference(r);
}
}
}
return rcrl;
}
/**
* Displays the root node.
*
* @param ac
*/
protected void displayRoot(AssociatedConcept ac){
System.out.println(ac.getCode() + " - " + ac.getEntityDescription().getContent());
}
/**
* Gets the UMLS root node of a given SAB.
*
* @param sab
* @return
* @throws LBException
*/
private ResolvedConceptReference getCodingSchemeRoot(String sab) throws LBException {
CodedNodeSet cns = lbs.getCodingSchemeConcepts(NCI_META_THESAURUS, null);
cns.restrictToProperties(null, new PropertyType[] {PropertyType.PRESENTATION}, Constructors.createLocalNameList("SRC"), null, Constructors.createNameAndValueList("source-code", "V-"+sab));
ResolvedConceptReference[] refs = cns.resolveToList(null, null, new PropertyType[] {PropertyType.PRESENTATION}, -1).getResolvedConceptReference();
if(refs.length > 1){
throw new LBException("Found more than one Root for SAB: " + sab);
}
if(refs.length == 0){
throw new LBException("Didn't find a Root for SAB: " + sab);
}
return refs[0];
}
/**
* Resolve the relationships of a ResolvedConceptReference forward one level.
*
* @param ref
* @return
* @throws Exception
*/
private ResolvedConceptReference resolveReferenceGraphForward(ResolvedConceptReference ref) throws Exception {
CodedNodeGraph cng = lbs.getNodeGraph(NCI_META_THESAURUS, null, null);
cng.restrictToAssociations(Constructors.createNameAndValueList(new String[]{"CHD", "hasSubtype"}), null);
ResolvedConceptReference[] refs = cng.resolveAsList(ref, true, false, 1, 1, null, null, null, -1).getResolvedConceptReference();
return refs[0];
}
/**
* Determines whether or not the given reference is a root Concept for the given Coding Scheme.
*
* @param reference
* @param sourceCodingScheme
* @return
*/
private boolean isSabQualifiedAssociation(AssociatedConcept ac, String sab){
NameAndValue[] nvl = ac.getAssociationQualifiers().getNameAndValue();
for(NameAndValue nv : nvl){
if(nv.getName().equals(sab) &&
nv.getContent().equals("Source")){
return true;
}
}
return false;
}
/////////////////////
// Tree
/////////////////////
private static void Util_displayMessage(String s) {
System.out.println(s);
}
private static void Util_displayAndLogError(String s, Exception e) {
System.out.println(s);
}
/**
* Process the provided code, constraining relationships
* to the given source abbreviation.
* @throws LBException
*/
public void run(String cui, String sab) throws LBException {
// Resolve the coding scheme.
/*
CodingSchemeSummary css = Util.promptForCodeSystem();
if (css == null)
return;
String scheme = css.getCodingSchemeURI();
*/
String scheme = "NCI MetaThesaurus";
CodingSchemeVersionOrTag csvt = new CodingSchemeVersionOrTag();
//csvt.setVersion(css.getRepresentsVersion());
// Resolve the requested concept.
ResolvedConceptReference rcr = resolveConcept(scheme, csvt, cui);
if (rcr == null) {
Util_displayMessage("Unable to resolve a concept for CUI = '" + cui + "'");
return;
}
// Print a header for the item being processed.
Util_displayMessage("============================================================");
Util_displayMessage("Concept Information");;
Util_displayMessage("============================================================");
printHeader(rcr, sab);
// Print the hierarchies for the requested SAB.
Util_displayMessage("");
Util_displayMessage("============================================================");
Util_displayMessage("Hierarchies applicable for CUI " + cui + " for SAB " + sab);
Util_displayMessage("============================================================");
TreeItem ti = new TreeItem("<Start>", "Start of Tree", null);
long ms = System.currentTimeMillis();
int pathsResolved = 0;
int maxLevel = -1;
try {
// Identify the set of all codes on path from root
// to the focus code ...
TreeItem[] pathsFromRoot = buildPathsToRoot(rcr, scheme, csvt, sab, maxLevel);
pathsResolved = pathsFromRoot.length;
for (TreeItem rootItem : pathsFromRoot)
ti.addChild("CHD", rootItem);
} finally {
System.out.println("Run time (milliseconds): " + (System.currentTimeMillis() - ms) + " to resolve "
+ pathsResolved + " paths from root.");
}
printTree(ti, cui, 0);
// Print the neighboring CUIs/AUIs for this SAB.
Util_displayMessage("");
Util_displayMessage("============================================================");
Util_displayMessage("Neighboring CUIs and AUIs for CUI " + cui + " for SAB " + sab);;
Util_displayMessage("============================================================");
printNeighborhood(scheme, csvt, rcr, sab);
}
public HashMap getTreePathData(String scheme, String version, String sab, String code) throws LBException {
if (sab == null) sab = NCI_SOURCE;
return getTreePathData(scheme, version, sab, code, -1);
}
public HashMap getTreePathData(String scheme, String version, String sab, String code, int maxLevel) throws LBException {
if (sab == null) sab = NCI_SOURCE;
LexBIGService lbsvc = RemoteServerUtil.createLexBIGService();
LexBIGServiceConvenienceMethods lbscm = (LexBIGServiceConvenienceMethods) lbsvc
.getGenericExtension("LexBIGServiceConvenienceMethods");
lbscm.setLexBIGService(lbsvc);
CodingSchemeVersionOrTag csvt = new CodingSchemeVersionOrTag();
if (version != null) csvt.setVersion(version);
return getTreePathData(lbsvc, lbscm, scheme, csvt, sab, code, maxLevel);
}
public HashMap getTreePathData(LexBIGService lbsvc, LexBIGServiceConvenienceMethods lbscm, String scheme,
CodingSchemeVersionOrTag csvt, String sab, String focusCode) throws LBException {
if (sab == null) sab = NCI_SOURCE;
return getTreePathData(lbsvc, lbscm, scheme, csvt, sab, focusCode, -1);
}
public HashMap getTreePathData(LexBIGService lbsvc, LexBIGServiceConvenienceMethods lbscm, String scheme,
CodingSchemeVersionOrTag csvt, String sab, String cui, int maxLevel) throws LBException {
if (sab == null) sab = NCI_SOURCE;
HashMap hmap = new HashMap();
long ms = System.currentTimeMillis();
ResolvedConceptReference rcr = resolveConcept(scheme, csvt, cui);
if (rcr == null) {
Util_displayMessage("Unable to resolve a concept for CUI = '" + cui + "'");
return null;
}
// Dummy root (place holder)
TreeItem ti = new TreeItem("<Root>", "Root node", null);
int pathsResolved = 0;
try {
// Identify the set of all codes on path from root
// to the focus code ...
TreeItem[] pathsFromRoot = buildPathsToRoot(rcr, scheme, csvt, sab, maxLevel);
pathsResolved = pathsFromRoot.length;
for (TreeItem rootItem : pathsFromRoot) {
ti.addChild("CHD", rootItem);
}
} finally {
System.out.println("MetaTreeUtils Run time (milliseconds): " + (System.currentTimeMillis() - ms) + " to resolve "
+ pathsResolved + " paths from root.");
}
hmap.put(cui, ti);
return hmap;
}
/**
* Prints formatted text providing context for
* the given item including CUI, SAB, AUI, and Text.
* @throws LBException
*/
protected void printHeader(ResolvedConceptReference rcr, String sab)
throws LBException {
Util_displayMessage("CUI ....... : " + rcr.getConceptCode());
Util_displayMessage("Description : " + StringUtils.abbreviate(rcr.getEntityDescription().getContent(), 60));
Util_displayMessage("SAB ....... : " + sab);
Util_displayMessage("");
Util_displayMessage("AUIs with this CUI associated for this SAB :");
for (String line : getAtomText(rcr, sab).split("\\|"))
Util_displayMessage(" {" + line + '}');
}
/**
* Prints the given tree item, recursing through all branches.
*
* @param ti
*/
public void printTree(TreeItem ti, String focusCode, int depth) {
StringBuffer indent = new StringBuffer();
for (int i = 0; i < depth * 2; i++)
indent.append("| ");
StringBuffer codeAndText = new StringBuffer(indent)
.append(focusCode.equals(ti.code) ? ">" : " ")
.append(ti.code).append(':')
.append(StringUtils.abbreviate(ti.text, 60))
.append(ti.expandable ? " [+]" : "");
if (ti.auis != null)
for (String line : ti.auis.split("\\|"))
codeAndText.append('\n').append(indent)
.append(" {")
.append(StringUtils.abbreviate(line, 60))
.append('}');
Util_displayMessage(codeAndText.toString());
indent.append("| ");
for (String association : ti.assocToChildMap.keySet()) {
Util_displayMessage(indent.toString() + association);
List<TreeItem> children = ti.assocToChildMap.get(association);
Collections.sort(children);
for (TreeItem childItem : children)
printTree(childItem, focusCode, depth + 1);
}
}
/**
* Prints formatted text with the CUIs and AUIs of
* neighboring concepts for the requested SAB.
* @throws LBException
*/
protected void printNeighborhood(String scheme, CodingSchemeVersionOrTag csvt,
ResolvedConceptReference rcr, String sab)
throws LBException {
// Resolve neighboring concepts with associations
// qualified by the SAB.
CodedNodeGraph neighborsBySource = getLexBIGService().getNodeGraph(scheme, csvt, null);
neighborsBySource.restrictToAssociations(null, Constructors.createNameAndValueList(sab, "Source"));
ResolvedConceptReferenceList nodes = neighborsBySource.resolveAsList(
rcr, true, true, Integer.MAX_VALUE, 1,
null, new PropertyType[] { PropertyType.PRESENTATION },
sortByCode_, null, -1);
List<AssociatedConcept> neighbors = new ArrayList<AssociatedConcept>();
for (ResolvedConceptReference node : nodes.getResolvedConceptReference()) {
// Process sources and targets ...
if (node.getSourceOf() != null)
for (Association assoc : node.getSourceOf().getAssociation())
for (AssociatedConcept ac : assoc.getAssociatedConcepts().getAssociatedConcept())
if (isValidForSAB(ac, sab))
neighbors.add(ac);
if (node.getTargetOf() != null)
for (Association assoc : node.getTargetOf().getAssociation())
for (AssociatedConcept ac : assoc.getAssociatedConcepts().getAssociatedConcept())
if (isValidForSAB(ac, sab))
neighbors.add(ac);
// Add to printed output
for (ResolvedConceptReference neighbor : neighbors) {
Util_displayMessage(neighbor.getCode() + ':' +
StringUtils.abbreviate(neighbor.getEntityDescription().getContent(), 60));
for (String line : getAtomText(neighbor, sab).split("\\|"))
Util_displayMessage(" {" + StringUtils.abbreviate(line, 60) + '}');
}
}
}
/**
* Populate child nodes for a single branch of the tree, and indicates
* whether further expansion (to grandchildren) is possible.
*/
protected void addChildren(TreeItem ti, String scheme, CodingSchemeVersionOrTag csvt,
String sab, String branchRootCode, Set<String> codesToExclude,
String[] associationsToNavigate, boolean associationsNavigatedFwd) throws LBException {
LexBIGService lbsvc = getLexBIGService();
// Resolve the next branch, representing children of the given
// code, navigated according to the provided relationship and
// direction. Resolve the children as a code graph, looking 2
// levels deep but leaving the final level unresolved.
CodedNodeGraph cng = lbsvc.getNodeGraph(scheme, csvt, null);
ConceptReference focus = Constructors.createConceptReference(branchRootCode, scheme);
cng = cng.restrictToAssociations(
Constructors.createNameAndValueList(associationsToNavigate),
ConvenienceMethods.createNameAndValueList(sab, "Source"));
ResolvedConceptReferenceList branch = cng.resolveAsList(
focus, associationsNavigatedFwd, !associationsNavigatedFwd,
Integer.MAX_VALUE, 2,
null, new PropertyType[] { PropertyType.PRESENTATION },
sortByCode_, null, -1, true);
// The resolved branch will be represented by the first node in
// the resolved list. The node will be subdivided by source or
// target associations (depending on direction). The associated
// nodes define the children.
for (ResolvedConceptReference node : branch.getResolvedConceptReference()) {
AssociationList childAssociationList = associationsNavigatedFwd ? node.getSourceOf() : node.getTargetOf();
// Process each association defining children ...
for (Association child : childAssociationList.getAssociation()) {
String childNavText = getDirectionalLabel(scheme, csvt, child, associationsNavigatedFwd);
// Each association may have multiple children ...
AssociatedConceptList branchItemList = child.getAssociatedConcepts();
for (AssociatedConcept branchItemNode : branchItemList.getAssociatedConcept())
if (isValidForSAB(branchItemNode, sab)) {
String branchItemCode = branchItemNode.getCode();
// Add here if not in the list of excluded codes.
// This is also where we look to see if another level
// was indicated to be available. If so, mark the
// entry with a '+' to indicate it can be expanded.
if (!codesToExclude.contains(branchItemCode)) {
TreeItem childItem =
new TreeItem(branchItemCode,
branchItemNode.getEntityDescription().getContent(),
getAtomText(branchItemNode, sab));
AssociationList grandchildBranch =
associationsNavigatedFwd ? branchItemNode.getSourceOf()
: branchItemNode.getTargetOf();
/*
if (grandchildBranch != null) {
childItem.expandable = true;
}
*/
if (grandchildBranch != null) {
for (Association grandchild : grandchildBranch.getAssociation()) {
java.lang.String association_name = grandchild.getAssociationName();
//System.out.println("association_name: " + association_name);
//String grandchildNavText = getDirectionalLabel(lbscm, scheme, csvt, child, associationsNavigatedFwd);
// Each association may have multiple children ...
AssociatedConceptList grandchildbranchItemList = grandchild.getAssociatedConcepts();
for (AssociatedConcept grandchildbranchItemNode : grandchildbranchItemList.getAssociatedConcept()) {
//System.out.println("\tgrandchildbranchItemNode AssociatedConcept: " + grandchildbranchItemNode.getConceptCode());
if (isValidForSAB(grandchildbranchItemNode, sab)) {
childItem.expandable = true;
break;
}
}
}
}
ti.addChild(childNavText, childItem);
}
}
}
}
}
/**
* Returns a resolved concept for the specified code and
* scheme.
* @throws LBException
*/
protected ResolvedConceptReference resolveConcept(String scheme,
CodingSchemeVersionOrTag csvt, String code)
throws LBException {
CodedNodeSet cns = getLexBIGService().getCodingSchemeConcepts(scheme, csvt);
cns.restrictToMatchingProperties(ConvenienceMethods.createLocalNameList("conceptCode"),
null, code, "exactMatch", null);
ResolvedConceptReferenceList cnsList = cns.resolveToList(
null, null, new PropertyType[] { PropertyType.PRESENTATION },
1);
return (cnsList.getResolvedConceptReferenceCount() == 0) ? null
: cnsList.getResolvedConceptReference(0);
}
/**
* Returns a cached instance of a LexBIG service.
*/
protected LexBIGService getLexBIGService() throws LBException {
if (lbsvc_ == null)
//lbsvc_ = LexBIGServiceImpl.defaultInstance();
lbsvc_ = RemoteServerUtil.createLexBIGService();
return lbsvc_;
}
/**
* Returns a cached instance of convenience methods.
*/
protected LexBIGServiceConvenienceMethods getConvenienceMethods() throws LBException {
if (lbscm_ == null)
lbscm_ = (LexBIGServiceConvenienceMethods)
getLexBIGService().getGenericExtension("LexBIGServiceConvenienceMethods");
lbscm_.setLexBIGService(lbsvc_);
return lbscm_;
}
/**
* Returns the label to display for the given association and directional
* indicator.
*/
protected String getDirectionalLabel(LexBIGServiceConvenienceMethods lbscm, String scheme, CodingSchemeVersionOrTag csvt,
Association assoc, boolean navigatedFwd) throws LBException {
String assocLabel = navigatedFwd ? lbscm.getAssociationForwardName(assoc.getAssociationName(), scheme, csvt)
: lbscm.getAssociationReverseName(assoc.getAssociationName(), scheme, csvt);
if (StringUtils.isBlank(assocLabel))
assocLabel = (navigatedFwd ? "" : "[Inverse]") + assoc.getAssociationName();
return assocLabel;
}
protected String getDirectionalLabel(String scheme, CodingSchemeVersionOrTag csvt,
Association assoc, boolean navigatedFwd) throws LBException {
//LexBIGServiceConvenienceMethods lbscm = getConvenienceMethods();
LexBIGService lbSvc = RemoteServerUtil.createLexBIGService();
LexBIGServiceConvenienceMethods lbscm = (LexBIGServiceConvenienceMethods) lbSvc
.getGenericExtension("LexBIGServiceConvenienceMethods");
lbscm.setLexBIGService(lbSvc);
String assocLabel = navigatedFwd ? lbscm.getAssociationForwardName(assoc.getAssociationName(), scheme, csvt)
: lbscm.getAssociationReverseName(assoc.getAssociationName(), scheme, csvt);
if (StringUtils.isBlank(assocLabel))
assocLabel = (navigatedFwd ? "" : "[Inverse]") + assoc.getAssociationName();
return assocLabel;
}
/**
* Returns a string representing the AUIs and
* text presentations applicable only for the
* given source abbreviation (SAB). All AUI
* text combinations are qualified by SAB and
* delimited by '|'.
*/
protected String getAtomText(ResolvedConceptReference rcr, String sab) {
StringBuffer text = new StringBuffer();
boolean first = true;
for (Presentation p : getSourcePresentations(rcr, sab)) {
if (!first)
text.append('|');
text.append(sab).append(':')
.append(getAtomText(p)).append(':')
.append('\'')
.append(p.getValue().getContent())
.append('\'');
first = false;
}
return
text.length() > 0 ? text.toString()
: "<No Match for SAB>";
}
/**
* Returns text for AUI qualifiers for the given property.
* This method iterates through available property qualifiers.
* Typically only one AUI is expected. If more are
* discovered, returned values are delimited by '|'.
*/
protected String getAtomText(Property prop) {
StringBuffer text = new StringBuffer();
boolean first = true;
for (PropertyQualifier pq : prop.getPropertyQualifier())
if ("AUI".equalsIgnoreCase(pq.getPropertyQualifierName())) {
if (!first)
text.append('|');
text.append(pq.getValue().getContent());
first = false;
}
return
text.length() > 0 ? text.toString()
: "<No AUI>";
}
/**
* Returns all assigned presentations matching the given
* source abbreviation (SAB). This method iterates through the
* available presentations to find any qualified to match
* the specified source.
*/
protected Presentation[] getSourcePresentations(ResolvedConceptReference rcr, String sab) {
// Ensure the associated entity was resolved, and look at each
// assigned presentation for a matching source qualifier.
List<Presentation> matches = new ArrayList<Presentation>();
if (rcr.getEntity() != null)
for (Presentation p : rcr.getEntity().getPresentation())
for (Source src : p.getSource())
if (sab.equalsIgnoreCase(src.getContent()))
matches.add(p);
return matches.toArray(new Presentation[matches.size()]);
}
/**
* Indicates whether the given associated concept contains
* a qualifier for the given source abbreviation (SAB).
* @return true if a qualifier exists; false otherwise.
*/
protected boolean isValidForSAB(AssociatedConcept ac, String sab) {
for (NameAndValue qualifier : ac.getAssociationQualifiers().getNameAndValue())
if ("Source".equalsIgnoreCase(qualifier.getContent())
&& sab.equalsIgnoreCase(qualifier.getName()))
return true;
return false;
}
////////////////////////
public HashMap getSubconcepts(String scheme, String version, String code, String sab)
{
return getSubconcepts(scheme, version, code, sab, true);
}
public HashMap getSubconcepts(String scheme, String version, String code, String sab, boolean associationsNavigatedFwd)
{
HashMap hmap = new HashMap();
TreeItem ti = null;
long ms = System.currentTimeMillis();
Set<String> codesToExclude = Collections.EMPTY_SET;
boolean fwd = true;
String[] associationsToNavigate = fwd ? hierAssocToChildNodes_ : hierAssocToParentNodes_;
//boolean associationsNavigatedFwd = true;
CodingSchemeVersionOrTag csvt = new CodingSchemeVersionOrTag();
if (version != null) csvt.setVersion(version);
ResolvedConceptReferenceList matches = null;
//Vector v = new Vector();
try {
LexBIGService lbsvc = RemoteServerUtil.createLexBIGService();
LexBIGServiceConvenienceMethods lbscm = (LexBIGServiceConvenienceMethods) lbsvc
.getGenericExtension("LexBIGServiceConvenienceMethods");
lbscm.setLexBIGService(lbsvc);
String name = getCodeDescription(lbsvc, scheme, csvt, code);
ti = new TreeItem(code, name);
ti.expandable = false;
// Resolve the next branch, representing children of the given
// code, navigated according to the provided relationship and
// direction. Resolve the children as a code graph, looking 2
// levels deep but leaving the final level unresolved.
CodedNodeGraph cng = lbsvc.getNodeGraph(scheme, csvt, null);
ConceptReference focus = Constructors.createConceptReference(code, scheme);
cng = cng.restrictToAssociations(
Constructors.createNameAndValueList(associationsToNavigate),
ConvenienceMethods.createNameAndValueList(sab, "Source"));
ResolvedConceptReferenceList branch = cng.resolveAsList(
focus, associationsNavigatedFwd, !associationsNavigatedFwd,
Integer.MAX_VALUE, 2,
null, new PropertyType[] { PropertyType.PRESENTATION },
sortByCode_, null, -1, true);
// The resolved branch will be represented by the first node in
// the resolved list. The node will be subdivided by source or
// target associations (depending on direction). The associated
// nodes define the children.
for (ResolvedConceptReference node : branch.getResolvedConceptReference()) {
AssociationList childAssociationList = associationsNavigatedFwd ? node.getSourceOf() : node.getTargetOf();
// Process each association defining children ...
for (Association child : childAssociationList.getAssociation()) {
String childNavText = getDirectionalLabel(lbscm, scheme, csvt, child, associationsNavigatedFwd);
// Each association may have multiple children ...
AssociatedConceptList branchItemList = child.getAssociatedConcepts();
for (AssociatedConcept branchItemNode : branchItemList.getAssociatedConcept()) {
if (isValidForSAB(branchItemNode, sab)) {
String branchItemCode = branchItemNode.getCode();
// Add here if not in the list of excluded codes.
// This is also where we look to see if another level
// was indicated to be available. If so, mark the
// entry with a '+' to indicate it can be expanded.
if (!codesToExclude.contains(branchItemCode)) {
ti.expandable = true;
TreeItem childItem =
new TreeItem(branchItemCode,
branchItemNode.getEntityDescription().getContent());
AssociationList grandchildBranch =
associationsNavigatedFwd ? branchItemNode.getSourceOf()
: branchItemNode.getTargetOf();
if (grandchildBranch != null)
childItem.expandable = true;
ti.addChild(childNavText, childItem);
}
}
}
}
}
hmap.put(code, ti);
} catch (Exception ex) {
ex.printStackTrace();
}
System.out.println("Run time (milliseconds) getSubconcepts: " + (System.currentTimeMillis() - ms) + " to resolve " );
return hmap;
}
///////////////////////////////////////////////////////
// Helper Methods
///////////////////////////////////////////////////////
/**
* Returns the entity description for the given code.
*/
protected String getCodeDescription(LexBIGService lbsvc, String scheme, CodingSchemeVersionOrTag csvt, String code)
throws LBException {
CodedNodeSet cns = lbsvc.getCodingSchemeConcepts(scheme, csvt);
cns = cns.restrictToCodes(Constructors.createConceptReferenceList(code, scheme));
ResolvedConceptReferenceList rcrl = cns.resolveToList(null, noopList_, null, 1);
if (rcrl.getResolvedConceptReferenceCount() > 0) {
EntityDescription desc = rcrl.getResolvedConceptReference(0).getEntityDescription();
if (desc != null)
return desc.getContent();
}
return "<Not assigned>";
}
/**
* Returns the entity description for the given resolved concept reference.
*/
protected String getCodeDescription(ResolvedConceptReference ref) throws LBException {
EntityDescription desc = ref.getEntityDescription();
if (desc != null)
return desc.getContent();
return "<Not assigned>";
}
public List getTopNodes(TreeItem ti) {
List list = new ArrayList();
getTopNodes(ti, list, 0, 1);
return list;
}
public void getTopNodes(TreeItem ti, List list, int currLevel, int maxLevel) {
if (list == null) list = new ArrayList();
if (currLevel > maxLevel) return;
if (ti.assocToChildMap.keySet().size() > 0) {
if (ti.text.compareTo("Root node") != 0)
{
ResolvedConceptReference rcr = new ResolvedConceptReference();
rcr.setConceptCode(ti.code);
EntityDescription entityDescription = new EntityDescription();
entityDescription.setContent(ti.text);
rcr.setEntityDescription(entityDescription);
list.add(rcr);
}
}
for (String association : ti.assocToChildMap.keySet()) {
List<TreeItem> children = ti.assocToChildMap.get(association);
Collections.sort(children);
for (TreeItem childItem : children) {
getTopNodes(childItem, list, currLevel+1, maxLevel);
}
}
}
public static void dumpTreeItems(HashMap hmap) {
try {
Set keyset = hmap.keySet();
Object[] objs = keyset.toArray();
String code = (String) objs[0];
TreeItem ti = (TreeItem) hmap.get(code);
for (String association : ti.assocToChildMap.keySet()) {
System.out.println("\nassociation: " + association);
List<TreeItem> children = ti.assocToChildMap.get(association);
for (TreeItem childItem : children) {
System.out.println(childItem.text + "(" + childItem.code + ")");
int knt = 0;
if (childItem.expandable)
{
knt = 1;
System.out.println("\tnode.expandable");
} else {
System.out.println("\tnode.NOT expandable");
}
}
}
} catch (Exception e) {
}
}
public void run(String scheme, String version, String code) {
CodingSchemeVersionOrTag csvt = new CodingSchemeVersionOrTag();
ResolvedConceptReference rcr = null;
try {
rcr = resolveConcept(scheme, csvt, code);
} catch (Exception ex) {
}
if (rcr == null) {
Util_displayMessage("Unable to resolve a concept for CUI = '" + code + "'");
System.exit(1);
}
String name = null;
try {
name = getCodeDescription(rcr);
} catch (Exception ex) {
name = "Unknown";
}
System.out.println("Coding scheme: " + scheme);
System.out.println("code: " + code);
System.out.println("name: " + name);
String sab = "NCI";
//boolean associationsNavigatedFwd = true;
Long startTime = System.currentTimeMillis();
HashMap hmap1 = getSubconcepts(scheme, version, code, sab, true);
System.out.println("Call getSubconcepts true took: " + (System.currentTimeMillis() - startTime) + "ms");
dumpTreeItems(hmap1);
startTime = System.currentTimeMillis();
HashMap hmap2 = getSubconcepts(scheme, version, code, sab, false);
System.out.println("Call getSubconcepts false took: " + (System.currentTimeMillis() - startTime) + "ms");
dumpTreeItems(hmap2);
}
protected String getDisplayRef(ResolvedConceptReference ref){
return "[" + ref.getEntityDescription().getContent() + "(" + ref.getConceptCode() + ")]";
}
public HashMap getSubconcepts(String scheme, String version, String code, String sab, String asso_name, boolean associationsNavigatedFwd) {
HashSet hset = new HashSet();
HashMap hmap = new HashMap();
TreeItem ti = null;
Vector w = new Vector();
long ms = System.currentTimeMillis();
Set<String> codesToExclude = Collections.EMPTY_SET;
boolean fwd = true;
CodingSchemeVersionOrTag csvt = new CodingSchemeVersionOrTag();
try {
LexBIGService lbSvc = RemoteServerUtil.createLexBIGService();
LexBIGServiceConvenienceMethods lbscm = (LexBIGServiceConvenienceMethods) lbSvc
.getGenericExtension("LexBIGServiceConvenienceMethods");
lbscm.setLexBIGService(lbSvc);
String name = getCodeDescription(lbSvc, scheme, csvt, code);
ti = new TreeItem(code, name);
ti.expandable = false;
CodedNodeGraph cng = null;
ResolvedConceptReferenceList branch = null;
cng = lbSvc.getNodeGraph(scheme, null, null);
NameAndValueList nvl = null;
if (sab != null) nvl = ConvenienceMethods.createNameAndValueList(sab, "Source");
cng = cng.restrictToAssociations(Constructors.createNameAndValueList(new String[]{asso_name}), nvl);
branch = cng.resolveAsList(Constructors.createConceptReference(code, scheme),
associationsNavigatedFwd, !associationsNavigatedFwd,
Integer.MAX_VALUE, 2,
null, new PropertyType[] { PropertyType.PRESENTATION },
null, null, -1);
for (ResolvedConceptReference node : branch.getResolvedConceptReference()) {
AssociationList childAssociationList =
associationsNavigatedFwd ? node.getSourceOf()
: node.getTargetOf();
// Process each association defining children ...
for (Association child : childAssociationList.getAssociation()) {
String childNavText = getDirectionalLabel(lbscm, scheme, csvt, child, associationsNavigatedFwd);
// Each association may have multiple children ...
AssociatedConceptList branchItemList = child.getAssociatedConcepts();
for (AssociatedConcept branchItemNode : branchItemList.getAssociatedConcept()) {
//System.out.println("AssociatedConcept: " + branchItemNode.getConceptCode());
if (isValidForSAB(branchItemNode, sab)) {
String branchItemCode = branchItemNode.getCode();
// Add here if not in the list of excluded codes.
// This is also where we look to see if another level
// was indicated to be available. If so, mark the
// entry with a '+' to indicate it can be expanded.
if (!codesToExclude.contains(branchItemCode)) {
if (!hset.contains(branchItemCode)) {
hset.add(branchItemCode);
TreeItem childItem =
new TreeItem(branchItemCode, branchItemNode.getEntityDescription().getContent());
childItem.expandable = false;
AssociationList grandchildBranch =
associationsNavigatedFwd ? branchItemNode.getSourceOf()
: branchItemNode.getTargetOf();
if (grandchildBranch != null) {
for (Association grandchild : grandchildBranch.getAssociation()) {
java.lang.String association_name = grandchild.getAssociationName();
//System.out.println("association_name: " + association_name);
//String grandchildNavText = getDirectionalLabel(lbscm, scheme, csvt, child, associationsNavigatedFwd);
// Each association may have multiple children ...
AssociatedConceptList grandchildbranchItemList = grandchild.getAssociatedConcepts();
for (AssociatedConcept grandchildbranchItemNode : grandchildbranchItemList.getAssociatedConcept()) {
//System.out.println("\tgrandchildbranchItemNode AssociatedConcept: " + grandchildbranchItemNode.getConceptCode());
if (isValidForSAB(grandchildbranchItemNode, sab)) {
childItem.expandable = true;
break;
}
}
}
}
ti.addChild(childNavText, childItem);
ti.expandable = true;
}
}
}
}
}
}
//System.out.println("\t*** Is " + ti.text + "( " + ti.code + ") expandable?: " + ti.expandable);
hmap.put(code, ti);
} catch (Exception ex) {
ex.printStackTrace();
}
System.out.println("Run time (milliseconds) getSubconcepts: " + (System.currentTimeMillis() - ms) + " to resolve " );
return hmap;
}
protected String getAssociationSourceString(AssociatedConcept ac){
String sources = "";
NameAndValue[] nvl = ac.getAssociationQualifiers().getNameAndValue();
int knt = 0;
for (int i=0; i<nvl.length; i++) {
NameAndValue nv = nvl[i];
if (nv.getContent().compareToIgnoreCase("Source") == 0) {
knt++;
if (knt == 1) {
sources = sources + nv.getName();
} else {
sources = sources + " ;" + nv.getName();
}
}
}
return sources;
}
protected Vector getAssociationSources(AssociatedConcept ac){
Vector sources = new Vector();
NameAndValue[] nvl = ac.getAssociationQualifiers().getNameAndValue();
for (int i=0; i<nvl.length; i++) {
NameAndValue nv = nvl[i];
if (nv.getContent().compareToIgnoreCase("Source") == 0) {
sources.add(nv.getName());
}
}
return sources;
}
/**
* Build and returns tree items that represent the root
* and core concepts of resolved paths for printing.
* @throws LBException
*/
protected TreeItem[] buildPathsToRoot(ResolvedConceptReference rcr,
String scheme, CodingSchemeVersionOrTag csvt,
String sab, int maxLevel) throws LBException {
// Create a starting point for tree building.
TreeItem ti =
new TreeItem(rcr.getCode(), rcr.getEntityDescription().getContent(),
getAtomText(rcr, sab));
// Maintain root tree items.
Set<TreeItem> rootItems = new HashSet<TreeItem>();
Set<String> visited_links = new HashSet<String>();
// Natural flow of hierarchy relations moves forward
// from tree root to leaves. Build the paths to root here
// by processing upstream (child to parent) relationships.
//KLO testing
/*
buildPathsToUpperNodes(
ti, rcr, scheme, csvt, sab,
new HashMap<String, TreeItem>(),
rootItems, visited_links, maxLevel, 0);
*/
buildPathsToUpperNodes(
ti, rcr, scheme, csvt, sab,
new HashMap<String, TreeItem>(),
rootItems, visited_links, maxLevel, 0);//, hierAssocToParentNodes_, false);
// Return root items discovered during child to parent
// processing.
return rootItems.toArray(new TreeItem[rootItems.size()]);
}
protected boolean hasChildren(TreeItem tiParent, String code) {
if (tiParent == null) return false;
if (tiParent.assocToChildMap == null) return false;
for (String association : tiParent.assocToChildMap.keySet()) {
List<TreeItem> children = tiParent.assocToChildMap.get(association);
for (int i=0; i<children.size(); i++) {
TreeItem childItem = (TreeItem) children.get(i);
if (childItem.code.compareTo(code) == 0) return true;
}
}
return false;
}
/**
* Add all hierarchical paths to root that start from the
* referenced concept and move backward in the tree. If
* the natural flow of relations is thought of moving from tree
* root to leaves, this method processes nodes in the
* reverse direction (from child to parent).
* @throws LBException
*/
/*
protected void buildPathsToUpperNodes(TreeItem ti, ResolvedConceptReference rcr,
String scheme, CodingSchemeVersionOrTag csvt,
String sab, Map<String, TreeItem> code2Tree,
Set<TreeItem> roots, int maxLevel, int currLevel)
throws LBException {
if (maxLevel != -1 && currLevel >= maxLevel) return;
// Only need to process a code once ...
if (code2Tree.containsKey(rcr.getCode()))
return;
// Cache for future reference.
code2Tree.put(rcr.getCode(), ti);
// UMLS relations can be defined with forward direction
// being parent to child or child to parent on a source
// by source basis. Iterate twice to ensure completeness;
// once navigating child to parent relations forward
// and once navigating parent to child relations
// backward. Both have the net effect of navigating
// from the bottom of the hierarchy to the top.
boolean isRoot = true;
for (int i = 0; i <= 1; i++) {
boolean fwd = i < 1;
String[] upstreamAssoc = fwd ? hierAssocToParentNodes_ : hierAssocToChildNodes_;
// Define a code graph for all relationships tagged with
// the specified sab.
CodedNodeGraph graph = getLexBIGService().getNodeGraph(scheme, csvt, null);
graph.restrictToAssociations(
ConvenienceMethods.createNameAndValueList(upstreamAssoc),
ConvenienceMethods.createNameAndValueList(sab, "Source"));
// Resolve one hop, retrieving presentations for
// comparison of source assignments.
ResolvedConceptReference[] refs = graph.resolveAsList(
rcr, fwd, !fwd, Integer.MAX_VALUE, 1,
null, new PropertyType[] { PropertyType.PRESENTATION },
sortByCode_, null, -1).getResolvedConceptReference();
// Create a new tree item for each upstream node, add the current
// tree item as a child, and recurse to go higher (if available).
if (refs.length > 0) {
// Each associated concept represents an upstream branch.
AssociationList aList = fwd ? refs[0].getSourceOf() : refs[0].getTargetOf();
for (Association assoc : aList.getAssociation()) {
// Go through the concepts one by one, adding the
// current tree item as a child of a new tree item
// representing the upstream node. If a tree item
// already exists for the parent, we reuse it to
// keep a single branch per parent.
for (AssociatedConcept refParent : assoc.getAssociatedConcepts().getAssociatedConcept())
if (isValidForSAB(refParent, sab)) {
// Fetch the term for this context ...
Presentation[] sabMatch = getSourcePresentations(refParent, sab);
if (sabMatch.length > 0) {
// We need to take into account direction of
// navigation on each pass to get the right label.
String directionalName = getDirectionalLabel(scheme, csvt, assoc, !fwd);
// Check for a previously registered item for the
// parent. If found, re-use it. Otherwise, create
// a new parent tree item.
String parentCode = refParent.getCode();
TreeItem tiParent = code2Tree.get(parentCode);
if (tiParent == null) {
// Create a new tree item.
tiParent =
new TreeItem(parentCode, refParent.getEntityDescription().getContent(),
getAtomText(refParent, sab));
// Add immediate children of the parent code with an
// indication of sub-nodes (+). Codes already
// processed as part of the path are ignored since
// they are handled through recursion.
String[] downstreamAssoc = fwd ? hierAssocToChildNodes_ : hierAssocToParentNodes_;
addChildren(tiParent, scheme, csvt, sab, parentCode, code2Tree.keySet(),
downstreamAssoc, fwd);
// Try to go higher through recursion.
buildPathsToUpperNodes(tiParent, refParent,
scheme, csvt, sab, code2Tree, roots, maxLevel, currLevel+1);
}
// Add the child (eliminate redundancy -- e.g., hasSubtype and CHD)
if (!hasChildren(tiParent, ti.code)) {
tiParent.addChild(directionalName, ti);
//KLO
tiParent.expandable = true;
}
isRoot = false;
}
}
}
}
}
if (isRoot) {
System.out.println("================ Adding " + ti.code + " " + ti.text + " to roots.");
roots.add(ti);
}
}
*/
protected void buildPathsToUpperNodes(TreeItem ti, ResolvedConceptReference rcr,
String scheme, CodingSchemeVersionOrTag csvt,
String sab, Map<String, TreeItem> code2Tree,
Set<TreeItem> roots, Set<String> visited_links, int maxLevel, int currLevel)
throws LBException {
//if (maxLevel != -1 && currLevel >= maxLevel)
if (maxLevel != -1 && currLevel > maxLevel)
{
return;
}
// Only need to process a code once ...
if (code2Tree.containsKey(rcr.getCode()))
return;
// Cache for future reference.
code2Tree.put(rcr.getCode(), ti);
// UMLS relations can be defined with forward direction
// being parent to child or child to parent on a source
// by source basis. Iterate twice to ensure completeness;
// once navigating child to parent relations forward
// and once navigating parent to child relations
// backward. Both have the net effect of navigating
// from the bottom of the hierarchy to the top.
boolean isRoot = true;
for (int i = 0; i <= 1; i++) {
boolean fwd = i < 1;
String[] upstreamAssoc = fwd ? hierAssocToParentNodes_ : hierAssocToChildNodes_;
// Define a code graph for all relationships tagged with
// the specified sab.
CodedNodeGraph graph = getLexBIGService().getNodeGraph(scheme, csvt, null);
graph.restrictToAssociations(
ConvenienceMethods.createNameAndValueList(upstreamAssoc),
ConvenienceMethods.createNameAndValueList(sab, "Source"));
// Resolve one hop, retrieving presentations for
// comparison of source assignments.
ResolvedConceptReference[] refs = graph.resolveAsList(
rcr, fwd, !fwd, Integer.MAX_VALUE, 1,
null, new PropertyType[] { PropertyType.PRESENTATION },
sortByCode_, null, -1).getResolvedConceptReference();
// Create a new tree item for each upstream node, add the current
// tree item as a child, and recurse to go higher (if available).
if (refs.length > 0) {
// Each associated concept represents an upstream branch.
AssociationList aList = fwd ? refs[0].getSourceOf() : refs[0].getTargetOf();
for (Association assoc : aList.getAssociation()) {
// Go through the concepts one by one, adding the
// current tree item as a child of a new tree item
// representing the upstream node. If a tree item
// already exists for the parent, we reuse it to
// keep a single branch per parent.
for (AssociatedConcept refParent : assoc.getAssociatedConcepts().getAssociatedConcept())
if (isValidForSAB(refParent, sab)) {
// Fetch the term for this context ...
Presentation[] sabMatch = getSourcePresentations(refParent, sab);
if (sabMatch.length > 0) {
// We need to take into account direction of
// navigation on each pass to get the right label.
String directionalName = getDirectionalLabel(scheme, csvt, assoc, !fwd);
// Check for a previously registered item for the
// parent. If found, re-use it. Otherwise, create
// a new parent tree item.
String parentCode = refParent.getCode();
String link = rcr.getConceptCode() + "|" + parentCode;
if (!visited_links.contains(link)) {
visited_links.add(link);
TreeItem tiParent = code2Tree.get(parentCode);
if (tiParent == null) {
// Create a new tree item.
tiParent =
new TreeItem(parentCode, refParent.getEntityDescription().getContent(),
getAtomText(refParent, sab));
// Add immediate children of the parent code with an
// indication of sub-nodes (+). Codes already
// processed as part of the path are ignored since
// they are handled through recursion.
String[] downstreamAssoc = fwd ? hierAssocToChildNodes_ : hierAssocToParentNodes_;
addChildren(tiParent, scheme, csvt, sab, parentCode, code2Tree.keySet(),
downstreamAssoc, fwd);
// Try to go higher through recursion.
buildPathsToUpperNodes(tiParent, refParent,
scheme, csvt, sab, code2Tree, roots, visited_links, maxLevel, currLevel+1);
}
// Add the child (eliminate redundancy -- e.g., hasSubtype and CHD)
if (!hasChildren(tiParent, ti.code)) {
tiParent.addChild(directionalName, ti);
//KLO
tiParent.expandable = true;
}
}
isRoot = false;
}
}
}
}
}
if (maxLevel != -1 && currLevel == maxLevel) isRoot = true;
if (isRoot) {
System.out.println("================ Adding " + ti.code + " " + ti.text + " to roots.");
roots.add(ti);
}
}
protected void buildPathsToUpperNodes(TreeItem ti, ResolvedConceptReference rcr,
String scheme, CodingSchemeVersionOrTag csvt,
String sab, Map<String, TreeItem> code2Tree,
Set<TreeItem> roots, Set<String> visited_links, int maxLevel, int currLevel, String[] upstreamAssoc, boolean fwd)
throws LBException {
//if (maxLevel != -1 && currLevel >= maxLevel)
if (maxLevel != -1 && currLevel > maxLevel)
{
return;
}
// Only need to process a code once ...
if (code2Tree.containsKey(rcr.getCode()))
return;
// Cache for future reference.
code2Tree.put(rcr.getCode(), ti);
// UMLS relations can be defined with forward direction
// being parent to child or child to parent on a source
// by source basis. Iterate twice to ensure completeness;
// once navigating child to parent relations forward
// and once navigating parent to child relations
// backward. Both have the net effect of navigating
// from the bottom of the hierarchy to the top.
boolean isRoot = true;
/*
for (int i = 0; i <= 1; i++) {
boolean fwd = i < 1;
String[] upstreamAssoc = fwd ? hierAssocToParentNodes_ : hierAssocToChildNodes_;
*/
// Define a code graph for all relationships tagged with
// the specified sab.
CodedNodeGraph graph = getLexBIGService().getNodeGraph(scheme, csvt, null);
graph.restrictToAssociations(
ConvenienceMethods.createNameAndValueList(upstreamAssoc),
ConvenienceMethods.createNameAndValueList(sab, "Source"));
// Resolve one hop, retrieving presentations for
// comparison of source assignments.
ResolvedConceptReference[] refs = graph.resolveAsList(
rcr, fwd, !fwd, Integer.MAX_VALUE, 1,
null, new PropertyType[] { PropertyType.PRESENTATION },
sortByCode_, null, -1).getResolvedConceptReference();
// Create a new tree item for each upstream node, add the current
// tree item as a child, and recurse to go higher (if available).
if (refs.length > 0) {
// Each associated concept represents an upstream branch.
AssociationList aList = fwd ? refs[0].getSourceOf() : refs[0].getTargetOf();
for (Association assoc : aList.getAssociation()) {
// Go through the concepts one by one, adding the
// current tree item as a child of a new tree item
// representing the upstream node. If a tree item
// already exists for the parent, we reuse it to
// keep a single branch per parent.
for (AssociatedConcept refParent : assoc.getAssociatedConcepts().getAssociatedConcept())
if (isValidForSAB(refParent, sab)) {
// Fetch the term for this context ...
Presentation[] sabMatch = getSourcePresentations(refParent, sab);
if (sabMatch.length > 0) {
// We need to take into account direction of
// navigation on each pass to get the right label.
String directionalName = getDirectionalLabel(scheme, csvt, assoc, !fwd);
// Check for a previously registered item for the
// parent. If found, re-use it. Otherwise, create
// a new parent tree item.
String parentCode = refParent.getCode();
String link = rcr.getConceptCode() + "|" + parentCode;
if (!visited_links.contains(link)) {
visited_links.add(link);
TreeItem tiParent = code2Tree.get(parentCode);
if (tiParent == null) {
// Create a new tree item.
tiParent =
new TreeItem(parentCode, refParent.getEntityDescription().getContent(),
getAtomText(refParent, sab));
// Add immediate children of the parent code with an
// indication of sub-nodes (+). Codes already
// processed as part of the path are ignored since
// they are handled through recursion.
String[] downstreamAssoc = fwd ? hierAssocToChildNodes_ : hierAssocToParentNodes_;
addChildren(tiParent, scheme, csvt, sab, parentCode, code2Tree.keySet(),
downstreamAssoc, fwd);
// Try to go higher through recursion.
buildPathsToUpperNodes(tiParent, refParent,
scheme, csvt, sab, code2Tree, roots, visited_links, maxLevel, currLevel+1, upstreamAssoc, fwd);
}
// Add the child (eliminate redundancy -- e.g., hasSubtype and CHD)
if (!hasChildren(tiParent, ti.code)) {
tiParent.addChild(directionalName, ti);
//KLO
tiParent.expandable = true;
}
}
isRoot = false;
}
}
}
}
//}
if (maxLevel != -1 && currLevel == maxLevel) isRoot = true;
if (isRoot) {
System.out.println("================ Adding " + ti.code + " " + ti.text + " to roots.");
roots.add(ti);
}
}
public void dumpTree(HashMap hmap, String focusCode, int level) {
try {
Set keyset = hmap.keySet();
Object[] objs = keyset.toArray();
String code = (String) objs[0];
TreeItem ti = (TreeItem) hmap.get(code);
for (String association : ti.assocToChildMap.keySet()) {
System.out.println("\nassociation: " + association);
List<TreeItem> children = ti.assocToChildMap.get(association);
for (TreeItem childItem : children) {
System.out.println(childItem.text + "(" + childItem.code + ")");
int knt = 0;
if (childItem.expandable)
{
knt = 1;
System.out.println("\tnode.expandable");
printTree(childItem, focusCode, level);
List list = getTopNodes(childItem);
for (int i=0; i<list.size(); i++) {
Object obj = list.get(i);
String nd_code = "";
String nd_name = "";
if (obj instanceof ResolvedConceptReference)
{
ResolvedConceptReference node = (ResolvedConceptReference) list.get(i);
nd_code = node.getConceptCode();
nd_name = node.getEntityDescription().getContent();
}
else if (obj instanceof Concept) {
Concept node = (Concept) list.get(i);
nd_code = node.getEntityCode();
nd_name = node.getEntityDescription().getContent();
}
System.out.println("TOP NODE: " + nd_name + " (" + nd_code + ")" );
}
} else {
System.out.println("\tnode.NOT expandable");
}
}
}
} catch (Exception e) {
}
}
public static void main(String[] args) throws Exception {
MetaTreeUtils test = new MetaTreeUtils();
String scheme = "NCI MetaThesaurus";
String version = null;
String code = "C1325880";//"C0001206";
boolean associationsNavigatedFwd = true;
String sab = "NCI";
/*
test.run(scheme, version, code);
System.out.println("\n==============================================================");
code = "C1154313";
test.run(scheme, version, code);
*/
HashMap new_map = null;
code = "C1154313";
/*
new_map = test.getSubconcepts(scheme, version, code, sab, "PAR", false);
test.dumpTreeItems(new_map);
code = "CL354459";
new_map = test.getSubconcepts(scheme, version, code, sab, "PAR", false);
test.dumpTreeItems(new_map);
code = "CL354459";
new_map = test.getSubconcepts(scheme, version, code, sab, "hasSubtype", true);
test.dumpTreeItems(new_map);
code = "C0031308";
new_map = test.getSubconcepts(scheme, version, code, sab, "PAR", false);
test.dumpTreeItems(new_map);
code = "C0031308";
new_map = test.getSubconcepts(scheme, version, code, sab, "hasSubtype", true);
test.dumpTreeItems(new_map);
code = "C0007581";
new_map = test.getSubconcepts(scheme, version, code, sab, "PAR", false);
test.dumpTreeItems(new_map);
code = "C0007581";
new_map = test.getSubconcepts(scheme, version, code, sab, "hasSubtype", true);
test.dumpTreeItems(new_map);
*/
//Cell Aging (CUI C0007581)
code = "C0007581";
new_map = test.getTreePathData(scheme, version, sab, code, -1);
//test.dumpTreeItems(new_map);
test.dumpTree(new_map, code, 5);
}
} | software/ncimbrowser/src/java/gov/nih/nci/evs/browser/utils/MetaTreeUtils.java | package gov.nih.nci.evs.browser.utils;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.TreeMap;
import java.util.Vector;
import org.LexGrid.LexBIG.DataModel.Collections.AssociatedConceptList;
import org.LexGrid.LexBIG.DataModel.Collections.AssociationList;
import org.LexGrid.LexBIG.DataModel.Collections.LocalNameList;
import org.LexGrid.LexBIG.DataModel.Collections.ResolvedConceptReferenceList;
import org.LexGrid.LexBIG.DataModel.Collections.SortOptionList;
import org.LexGrid.LexBIG.DataModel.Core.AssociatedConcept;
import org.LexGrid.LexBIG.DataModel.Core.Association;
import org.LexGrid.LexBIG.DataModel.Core.CodingSchemeSummary;
import org.LexGrid.LexBIG.DataModel.Core.CodingSchemeVersionOrTag;
import org.LexGrid.LexBIG.DataModel.Core.ConceptReference;
import org.LexGrid.LexBIG.DataModel.Core.NameAndValue;
import org.LexGrid.LexBIG.DataModel.Core.ResolvedConceptReference;
import org.LexGrid.LexBIG.Exceptions.LBException;
import org.LexGrid.LexBIG.Extensions.Generic.LexBIGServiceConvenienceMethods;
import org.LexGrid.LexBIG.Impl.LexBIGServiceImpl;
import org.LexGrid.LexBIG.LexBIGService.CodedNodeGraph;
import org.LexGrid.LexBIG.LexBIGService.CodedNodeSet;
import org.LexGrid.LexBIG.LexBIGService.LexBIGService;
import org.LexGrid.LexBIG.LexBIGService.CodedNodeSet.PropertyType;
import org.LexGrid.LexBIG.Utility.Constructors;
import org.LexGrid.LexBIG.Utility.ConvenienceMethods;
import org.LexGrid.commonTypes.Property;
import org.LexGrid.commonTypes.PropertyQualifier;
import org.LexGrid.commonTypes.Source;
import org.LexGrid.concepts.Presentation;
import org.apache.commons.lang.StringUtils;
import org.LexGrid.LexBIG.Utility.ConvenienceMethods;
import org.LexGrid.LexBIG.DataModel.Collections.AssociationList;
import org.LexGrid.LexBIG.DataModel.Core.AssociatedConcept;
import org.LexGrid.LexBIG.DataModel.Core.Association;
import org.LexGrid.LexBIG.DataModel.Core.NameAndValue;
import org.LexGrid.LexBIG.DataModel.Core.ResolvedConceptReference;
import org.LexGrid.LexBIG.Exceptions.LBException;
import org.LexGrid.LexBIG.Impl.LexBIGServiceImpl;
import org.LexGrid.LexBIG.LexBIGService.CodedNodeGraph;
import org.LexGrid.LexBIG.LexBIGService.CodedNodeSet;
import org.LexGrid.LexBIG.LexBIGService.LexBIGService;
import org.LexGrid.LexBIG.LexBIGService.CodedNodeSet.PropertyType;
import org.LexGrid.LexBIG.Utility.Constructors;
import org.LexGrid.LexBIG.DataModel.Collections.ResolvedConceptReferenceList;
import org.LexGrid.LexBIG.DataModel.Core.ResolvedConceptReference;
import org.LexGrid.commonTypes.EntityDescription;
import org.LexGrid.LexBIG.DataModel.Collections.NameAndValueList;
import org.LexGrid.concepts.Concept;
public class MetaTreeUtils {
static String[] hierAssocToParentNodes_ = new String[] { "PAR", "isa", "branch_of", "part_of", "tributary_of" };
static String[] hierAssocToChildNodes_ = new String[] { "CHD", "hasSubtype" };
static SortOptionList sortByCode_ = Constructors.createSortOptionList(new String[] {"code"});
LocalNameList noopList_ = Constructors.createLocalNameList("_noop_");
LexBIGServiceConvenienceMethods lbscm_ = null;
LexBIGService lbsvc_ = null;
private LexBIGService lbs;
private static String NCI_META_THESAURUS = "NCI MetaThesaurus";
private static String NCI_SOURCE = "NCI";
public MetaTreeUtils(){
init();
}
private void init(){
//lbs = LexBIGServiceImpl.defaultInstance();
lbs = RemoteServerUtil.createLexBIGService();
}
///////////////////
// Source Roots //
///////////////////
/**
* Finds the root node of a given sab.
*
* @param sab
* @throws Exception
*/
public void getRoots(String sab) throws Exception {
ResolvedConceptReference root = resolveReferenceGraphForward(getCodingSchemeRoot(sab));
AssociationList assocList = root.getSourceOf();
for(Association assoc : assocList.getAssociation()){
for(AssociatedConcept ac : assoc.getAssociatedConcepts().getAssociatedConcept()){
if(this.isSabQualifiedAssociation(ac, sab)){
displayRoot(ac);
}
}
}
}
public ResolvedConceptReferenceList getSourceRoots(String sab) throws Exception {
ResolvedConceptReferenceList rcrl = new ResolvedConceptReferenceList();
ResolvedConceptReference root = resolveReferenceGraphForward(getCodingSchemeRoot(sab));
AssociationList assocList = root.getSourceOf();
for(Association assoc : assocList.getAssociation()){
for(AssociatedConcept ac : assoc.getAssociatedConcepts().getAssociatedConcept()){
if(this.isSabQualifiedAssociation(ac, sab)){
ResolvedConceptReference r = new ResolvedConceptReference();
EntityDescription entityDescription = new EntityDescription();
entityDescription.setContent(ac.getEntityDescription().getContent());
r.setEntityDescription(entityDescription);
r.setCode(ac.getCode());
rcrl.addResolvedConceptReference(r);
}
}
}
return rcrl;
}
/**
* Displays the root node.
*
* @param ac
*/
protected void displayRoot(AssociatedConcept ac){
System.out.println(ac.getCode() + " - " + ac.getEntityDescription().getContent());
}
/**
* Gets the UMLS root node of a given SAB.
*
* @param sab
* @return
* @throws LBException
*/
private ResolvedConceptReference getCodingSchemeRoot(String sab) throws LBException {
CodedNodeSet cns = lbs.getCodingSchemeConcepts(NCI_META_THESAURUS, null);
cns.restrictToProperties(null, new PropertyType[] {PropertyType.PRESENTATION}, Constructors.createLocalNameList("SRC"), null, Constructors.createNameAndValueList("source-code", "V-"+sab));
ResolvedConceptReference[] refs = cns.resolveToList(null, null, new PropertyType[] {PropertyType.PRESENTATION}, -1).getResolvedConceptReference();
if(refs.length > 1){
throw new LBException("Found more than one Root for SAB: " + sab);
}
if(refs.length == 0){
throw new LBException("Didn't find a Root for SAB: " + sab);
}
return refs[0];
}
/**
* Resolve the relationships of a ResolvedConceptReference forward one level.
*
* @param ref
* @return
* @throws Exception
*/
private ResolvedConceptReference resolveReferenceGraphForward(ResolvedConceptReference ref) throws Exception {
CodedNodeGraph cng = lbs.getNodeGraph(NCI_META_THESAURUS, null, null);
cng.restrictToAssociations(Constructors.createNameAndValueList(new String[]{"CHD", "hasSubtype"}), null);
ResolvedConceptReference[] refs = cng.resolveAsList(ref, true, false, 1, 1, null, null, null, -1).getResolvedConceptReference();
return refs[0];
}
/**
* Determines whether or not the given reference is a root Concept for the given Coding Scheme.
*
* @param reference
* @param sourceCodingScheme
* @return
*/
private boolean isSabQualifiedAssociation(AssociatedConcept ac, String sab){
NameAndValue[] nvl = ac.getAssociationQualifiers().getNameAndValue();
for(NameAndValue nv : nvl){
if(nv.getName().equals(sab) &&
nv.getContent().equals("Source")){
return true;
}
}
return false;
}
/////////////////////
// Tree
/////////////////////
private static void Util_displayMessage(String s) {
System.out.println(s);
}
private static void Util_displayAndLogError(String s, Exception e) {
System.out.println(s);
}
/**
* Process the provided code, constraining relationships
* to the given source abbreviation.
* @throws LBException
*/
public void run(String cui, String sab) throws LBException {
// Resolve the coding scheme.
/*
CodingSchemeSummary css = Util.promptForCodeSystem();
if (css == null)
return;
String scheme = css.getCodingSchemeURI();
*/
String scheme = "NCI MetaThesaurus";
CodingSchemeVersionOrTag csvt = new CodingSchemeVersionOrTag();
//csvt.setVersion(css.getRepresentsVersion());
// Resolve the requested concept.
ResolvedConceptReference rcr = resolveConcept(scheme, csvt, cui);
if (rcr == null) {
Util_displayMessage("Unable to resolve a concept for CUI = '" + cui + "'");
return;
}
// Print a header for the item being processed.
Util_displayMessage("============================================================");
Util_displayMessage("Concept Information");;
Util_displayMessage("============================================================");
printHeader(rcr, sab);
// Print the hierarchies for the requested SAB.
Util_displayMessage("");
Util_displayMessage("============================================================");
Util_displayMessage("Hierarchies applicable for CUI " + cui + " for SAB " + sab);
Util_displayMessage("============================================================");
TreeItem ti = new TreeItem("<Start>", "Start of Tree", null);
long ms = System.currentTimeMillis();
int pathsResolved = 0;
int maxLevel = -1;
try {
// Identify the set of all codes on path from root
// to the focus code ...
TreeItem[] pathsFromRoot = buildPathsToRoot(rcr, scheme, csvt, sab, maxLevel);
pathsResolved = pathsFromRoot.length;
for (TreeItem rootItem : pathsFromRoot)
ti.addChild("CHD", rootItem);
} finally {
System.out.println("Run time (milliseconds): " + (System.currentTimeMillis() - ms) + " to resolve "
+ pathsResolved + " paths from root.");
}
printTree(ti, cui, 0);
// Print the neighboring CUIs/AUIs for this SAB.
Util_displayMessage("");
Util_displayMessage("============================================================");
Util_displayMessage("Neighboring CUIs and AUIs for CUI " + cui + " for SAB " + sab);;
Util_displayMessage("============================================================");
printNeighborhood(scheme, csvt, rcr, sab);
}
public HashMap getTreePathData(String scheme, String version, String sab, String code) throws LBException {
if (sab == null) sab = NCI_SOURCE;
return getTreePathData(scheme, version, sab, code, -1);
}
public HashMap getTreePathData(String scheme, String version, String sab, String code, int maxLevel) throws LBException {
if (sab == null) sab = NCI_SOURCE;
LexBIGService lbsvc = RemoteServerUtil.createLexBIGService();
LexBIGServiceConvenienceMethods lbscm = (LexBIGServiceConvenienceMethods) lbsvc
.getGenericExtension("LexBIGServiceConvenienceMethods");
lbscm.setLexBIGService(lbsvc);
CodingSchemeVersionOrTag csvt = new CodingSchemeVersionOrTag();
if (version != null) csvt.setVersion(version);
return getTreePathData(lbsvc, lbscm, scheme, csvt, sab, code, maxLevel);
}
public HashMap getTreePathData(LexBIGService lbsvc, LexBIGServiceConvenienceMethods lbscm, String scheme,
CodingSchemeVersionOrTag csvt, String sab, String focusCode) throws LBException {
if (sab == null) sab = NCI_SOURCE;
return getTreePathData(lbsvc, lbscm, scheme, csvt, sab, focusCode, -1);
}
public HashMap getTreePathData(LexBIGService lbsvc, LexBIGServiceConvenienceMethods lbscm, String scheme,
CodingSchemeVersionOrTag csvt, String sab, String cui, int maxLevel) throws LBException {
if (sab == null) sab = NCI_SOURCE;
HashMap hmap = new HashMap();
long ms = System.currentTimeMillis();
ResolvedConceptReference rcr = resolveConcept(scheme, csvt, cui);
if (rcr == null) {
Util_displayMessage("Unable to resolve a concept for CUI = '" + cui + "'");
return null;
}
// Dummy root (place holder)
TreeItem ti = new TreeItem("<Root>", "Root node", null);
int pathsResolved = 0;
try {
// Identify the set of all codes on path from root
// to the focus code ...
TreeItem[] pathsFromRoot = buildPathsToRoot(rcr, scheme, csvt, sab, maxLevel);
pathsResolved = pathsFromRoot.length;
for (TreeItem rootItem : pathsFromRoot) {
ti.addChild("CHD", rootItem);
}
} finally {
System.out.println("MetaTreeUtils Run time (milliseconds): " + (System.currentTimeMillis() - ms) + " to resolve "
+ pathsResolved + " paths from root.");
}
hmap.put(cui, ti);
return hmap;
}
/**
* Prints formatted text providing context for
* the given item including CUI, SAB, AUI, and Text.
* @throws LBException
*/
protected void printHeader(ResolvedConceptReference rcr, String sab)
throws LBException {
Util_displayMessage("CUI ....... : " + rcr.getConceptCode());
Util_displayMessage("Description : " + StringUtils.abbreviate(rcr.getEntityDescription().getContent(), 60));
Util_displayMessage("SAB ....... : " + sab);
Util_displayMessage("");
Util_displayMessage("AUIs with this CUI associated for this SAB :");
for (String line : getAtomText(rcr, sab).split("\\|"))
Util_displayMessage(" {" + line + '}');
}
/**
* Prints the given tree item, recursing through all branches.
*
* @param ti
*/
public void printTree(TreeItem ti, String focusCode, int depth) {
StringBuffer indent = new StringBuffer();
for (int i = 0; i < depth * 2; i++)
indent.append("| ");
StringBuffer codeAndText = new StringBuffer(indent)
.append(focusCode.equals(ti.code) ? ">" : " ")
.append(ti.code).append(':')
.append(StringUtils.abbreviate(ti.text, 60))
.append(ti.expandable ? " [+]" : "");
if (ti.auis != null)
for (String line : ti.auis.split("\\|"))
codeAndText.append('\n').append(indent)
.append(" {")
.append(StringUtils.abbreviate(line, 60))
.append('}');
Util_displayMessage(codeAndText.toString());
indent.append("| ");
for (String association : ti.assocToChildMap.keySet()) {
Util_displayMessage(indent.toString() + association);
List<TreeItem> children = ti.assocToChildMap.get(association);
Collections.sort(children);
for (TreeItem childItem : children)
printTree(childItem, focusCode, depth + 1);
}
}
/**
* Prints formatted text with the CUIs and AUIs of
* neighboring concepts for the requested SAB.
* @throws LBException
*/
protected void printNeighborhood(String scheme, CodingSchemeVersionOrTag csvt,
ResolvedConceptReference rcr, String sab)
throws LBException {
// Resolve neighboring concepts with associations
// qualified by the SAB.
CodedNodeGraph neighborsBySource = getLexBIGService().getNodeGraph(scheme, csvt, null);
neighborsBySource.restrictToAssociations(null, Constructors.createNameAndValueList(sab, "Source"));
ResolvedConceptReferenceList nodes = neighborsBySource.resolveAsList(
rcr, true, true, Integer.MAX_VALUE, 1,
null, new PropertyType[] { PropertyType.PRESENTATION },
sortByCode_, null, -1);
List<AssociatedConcept> neighbors = new ArrayList<AssociatedConcept>();
for (ResolvedConceptReference node : nodes.getResolvedConceptReference()) {
// Process sources and targets ...
if (node.getSourceOf() != null)
for (Association assoc : node.getSourceOf().getAssociation())
for (AssociatedConcept ac : assoc.getAssociatedConcepts().getAssociatedConcept())
if (isValidForSAB(ac, sab))
neighbors.add(ac);
if (node.getTargetOf() != null)
for (Association assoc : node.getTargetOf().getAssociation())
for (AssociatedConcept ac : assoc.getAssociatedConcepts().getAssociatedConcept())
if (isValidForSAB(ac, sab))
neighbors.add(ac);
// Add to printed output
for (ResolvedConceptReference neighbor : neighbors) {
Util_displayMessage(neighbor.getCode() + ':' +
StringUtils.abbreviate(neighbor.getEntityDescription().getContent(), 60));
for (String line : getAtomText(neighbor, sab).split("\\|"))
Util_displayMessage(" {" + StringUtils.abbreviate(line, 60) + '}');
}
}
}
/**
* Populate child nodes for a single branch of the tree, and indicates
* whether further expansion (to grandchildren) is possible.
*/
protected void addChildren(TreeItem ti, String scheme, CodingSchemeVersionOrTag csvt,
String sab, String branchRootCode, Set<String> codesToExclude,
String[] associationsToNavigate, boolean associationsNavigatedFwd) throws LBException {
LexBIGService lbsvc = getLexBIGService();
// Resolve the next branch, representing children of the given
// code, navigated according to the provided relationship and
// direction. Resolve the children as a code graph, looking 2
// levels deep but leaving the final level unresolved.
CodedNodeGraph cng = lbsvc.getNodeGraph(scheme, csvt, null);
ConceptReference focus = Constructors.createConceptReference(branchRootCode, scheme);
cng = cng.restrictToAssociations(
Constructors.createNameAndValueList(associationsToNavigate),
ConvenienceMethods.createNameAndValueList(sab, "Source"));
ResolvedConceptReferenceList branch = cng.resolveAsList(
focus, associationsNavigatedFwd, !associationsNavigatedFwd,
Integer.MAX_VALUE, 2,
null, new PropertyType[] { PropertyType.PRESENTATION },
sortByCode_, null, -1, true);
// The resolved branch will be represented by the first node in
// the resolved list. The node will be subdivided by source or
// target associations (depending on direction). The associated
// nodes define the children.
for (ResolvedConceptReference node : branch.getResolvedConceptReference()) {
AssociationList childAssociationList = associationsNavigatedFwd ? node.getSourceOf() : node.getTargetOf();
// Process each association defining children ...
for (Association child : childAssociationList.getAssociation()) {
String childNavText = getDirectionalLabel(scheme, csvt, child, associationsNavigatedFwd);
// Each association may have multiple children ...
AssociatedConceptList branchItemList = child.getAssociatedConcepts();
for (AssociatedConcept branchItemNode : branchItemList.getAssociatedConcept())
if (isValidForSAB(branchItemNode, sab)) {
String branchItemCode = branchItemNode.getCode();
// Add here if not in the list of excluded codes.
// This is also where we look to see if another level
// was indicated to be available. If so, mark the
// entry with a '+' to indicate it can be expanded.
if (!codesToExclude.contains(branchItemCode)) {
TreeItem childItem =
new TreeItem(branchItemCode,
branchItemNode.getEntityDescription().getContent(),
getAtomText(branchItemNode, sab));
AssociationList grandchildBranch =
associationsNavigatedFwd ? branchItemNode.getSourceOf()
: branchItemNode.getTargetOf();
if (grandchildBranch != null) {
childItem.expandable = true;
}
ti.addChild(childNavText, childItem);
}
}
}
}
}
/**
* Returns a resolved concept for the specified code and
* scheme.
* @throws LBException
*/
protected ResolvedConceptReference resolveConcept(String scheme,
CodingSchemeVersionOrTag csvt, String code)
throws LBException {
CodedNodeSet cns = getLexBIGService().getCodingSchemeConcepts(scheme, csvt);
cns.restrictToMatchingProperties(ConvenienceMethods.createLocalNameList("conceptCode"),
null, code, "exactMatch", null);
ResolvedConceptReferenceList cnsList = cns.resolveToList(
null, null, new PropertyType[] { PropertyType.PRESENTATION },
1);
return (cnsList.getResolvedConceptReferenceCount() == 0) ? null
: cnsList.getResolvedConceptReference(0);
}
/**
* Returns a cached instance of a LexBIG service.
*/
protected LexBIGService getLexBIGService() throws LBException {
if (lbsvc_ == null)
//lbsvc_ = LexBIGServiceImpl.defaultInstance();
lbsvc_ = RemoteServerUtil.createLexBIGService();
return lbsvc_;
}
/**
* Returns a cached instance of convenience methods.
*/
protected LexBIGServiceConvenienceMethods getConvenienceMethods() throws LBException {
if (lbscm_ == null)
lbscm_ = (LexBIGServiceConvenienceMethods)
getLexBIGService().getGenericExtension("LexBIGServiceConvenienceMethods");
lbscm_.setLexBIGService(lbsvc_);
return lbscm_;
}
/**
* Returns the label to display for the given association and directional
* indicator.
*/
protected String getDirectionalLabel(LexBIGServiceConvenienceMethods lbscm, String scheme, CodingSchemeVersionOrTag csvt,
Association assoc, boolean navigatedFwd) throws LBException {
String assocLabel = navigatedFwd ? lbscm.getAssociationForwardName(assoc.getAssociationName(), scheme, csvt)
: lbscm.getAssociationReverseName(assoc.getAssociationName(), scheme, csvt);
if (StringUtils.isBlank(assocLabel))
assocLabel = (navigatedFwd ? "" : "[Inverse]") + assoc.getAssociationName();
return assocLabel;
}
protected String getDirectionalLabel(String scheme, CodingSchemeVersionOrTag csvt,
Association assoc, boolean navigatedFwd) throws LBException {
//LexBIGServiceConvenienceMethods lbscm = getConvenienceMethods();
LexBIGService lbSvc = RemoteServerUtil.createLexBIGService();
LexBIGServiceConvenienceMethods lbscm = (LexBIGServiceConvenienceMethods) lbSvc
.getGenericExtension("LexBIGServiceConvenienceMethods");
lbscm.setLexBIGService(lbSvc);
String assocLabel = navigatedFwd ? lbscm.getAssociationForwardName(assoc.getAssociationName(), scheme, csvt)
: lbscm.getAssociationReverseName(assoc.getAssociationName(), scheme, csvt);
if (StringUtils.isBlank(assocLabel))
assocLabel = (navigatedFwd ? "" : "[Inverse]") + assoc.getAssociationName();
return assocLabel;
}
/**
* Returns a string representing the AUIs and
* text presentations applicable only for the
* given source abbreviation (SAB). All AUI
* text combinations are qualified by SAB and
* delimited by '|'.
*/
protected String getAtomText(ResolvedConceptReference rcr, String sab) {
StringBuffer text = new StringBuffer();
boolean first = true;
for (Presentation p : getSourcePresentations(rcr, sab)) {
if (!first)
text.append('|');
text.append(sab).append(':')
.append(getAtomText(p)).append(':')
.append('\'')
.append(p.getValue().getContent())
.append('\'');
first = false;
}
return
text.length() > 0 ? text.toString()
: "<No Match for SAB>";
}
/**
* Returns text for AUI qualifiers for the given property.
* This method iterates through available property qualifiers.
* Typically only one AUI is expected. If more are
* discovered, returned values are delimited by '|'.
*/
protected String getAtomText(Property prop) {
StringBuffer text = new StringBuffer();
boolean first = true;
for (PropertyQualifier pq : prop.getPropertyQualifier())
if ("AUI".equalsIgnoreCase(pq.getPropertyQualifierName())) {
if (!first)
text.append('|');
text.append(pq.getValue().getContent());
first = false;
}
return
text.length() > 0 ? text.toString()
: "<No AUI>";
}
/**
* Returns all assigned presentations matching the given
* source abbreviation (SAB). This method iterates through the
* available presentations to find any qualified to match
* the specified source.
*/
protected Presentation[] getSourcePresentations(ResolvedConceptReference rcr, String sab) {
// Ensure the associated entity was resolved, and look at each
// assigned presentation for a matching source qualifier.
List<Presentation> matches = new ArrayList<Presentation>();
if (rcr.getEntity() != null)
for (Presentation p : rcr.getEntity().getPresentation())
for (Source src : p.getSource())
if (sab.equalsIgnoreCase(src.getContent()))
matches.add(p);
return matches.toArray(new Presentation[matches.size()]);
}
/**
* Indicates whether the given associated concept contains
* a qualifier for the given source abbreviation (SAB).
* @return true if a qualifier exists; false otherwise.
*/
protected boolean isValidForSAB(AssociatedConcept ac, String sab) {
for (NameAndValue qualifier : ac.getAssociationQualifiers().getNameAndValue())
if ("Source".equalsIgnoreCase(qualifier.getContent())
&& sab.equalsIgnoreCase(qualifier.getName()))
return true;
return false;
}
////////////////////////
public HashMap getSubconcepts(String scheme, String version, String code, String sab)
{
return getSubconcepts(scheme, version, code, sab, true);
}
public HashMap getSubconcepts(String scheme, String version, String code, String sab, boolean associationsNavigatedFwd)
{
HashMap hmap = new HashMap();
TreeItem ti = null;
long ms = System.currentTimeMillis();
Set<String> codesToExclude = Collections.EMPTY_SET;
boolean fwd = true;
String[] associationsToNavigate = fwd ? hierAssocToChildNodes_ : hierAssocToParentNodes_;
//boolean associationsNavigatedFwd = true;
CodingSchemeVersionOrTag csvt = new CodingSchemeVersionOrTag();
if (version != null) csvt.setVersion(version);
ResolvedConceptReferenceList matches = null;
//Vector v = new Vector();
try {
LexBIGService lbsvc = RemoteServerUtil.createLexBIGService();
LexBIGServiceConvenienceMethods lbscm = (LexBIGServiceConvenienceMethods) lbsvc
.getGenericExtension("LexBIGServiceConvenienceMethods");
lbscm.setLexBIGService(lbsvc);
String name = getCodeDescription(lbsvc, scheme, csvt, code);
ti = new TreeItem(code, name);
ti.expandable = false;
// Resolve the next branch, representing children of the given
// code, navigated according to the provided relationship and
// direction. Resolve the children as a code graph, looking 2
// levels deep but leaving the final level unresolved.
CodedNodeGraph cng = lbsvc.getNodeGraph(scheme, csvt, null);
ConceptReference focus = Constructors.createConceptReference(code, scheme);
cng = cng.restrictToAssociations(
Constructors.createNameAndValueList(associationsToNavigate),
ConvenienceMethods.createNameAndValueList(sab, "Source"));
ResolvedConceptReferenceList branch = cng.resolveAsList(
focus, associationsNavigatedFwd, !associationsNavigatedFwd,
Integer.MAX_VALUE, 2,
null, new PropertyType[] { PropertyType.PRESENTATION },
sortByCode_, null, -1, true);
// The resolved branch will be represented by the first node in
// the resolved list. The node will be subdivided by source or
// target associations (depending on direction). The associated
// nodes define the children.
for (ResolvedConceptReference node : branch.getResolvedConceptReference()) {
AssociationList childAssociationList = associationsNavigatedFwd ? node.getSourceOf() : node.getTargetOf();
// Process each association defining children ...
for (Association child : childAssociationList.getAssociation()) {
String childNavText = getDirectionalLabel(lbscm, scheme, csvt, child, associationsNavigatedFwd);
// Each association may have multiple children ...
AssociatedConceptList branchItemList = child.getAssociatedConcepts();
for (AssociatedConcept branchItemNode : branchItemList.getAssociatedConcept()) {
if (isValidForSAB(branchItemNode, sab)) {
String branchItemCode = branchItemNode.getCode();
// Add here if not in the list of excluded codes.
// This is also where we look to see if another level
// was indicated to be available. If so, mark the
// entry with a '+' to indicate it can be expanded.
if (!codesToExclude.contains(branchItemCode)) {
ti.expandable = true;
TreeItem childItem =
new TreeItem(branchItemCode,
branchItemNode.getEntityDescription().getContent());
AssociationList grandchildBranch =
associationsNavigatedFwd ? branchItemNode.getSourceOf()
: branchItemNode.getTargetOf();
if (grandchildBranch != null)
childItem.expandable = true;
ti.addChild(childNavText, childItem);
}
}
}
}
}
hmap.put(code, ti);
} catch (Exception ex) {
ex.printStackTrace();
}
System.out.println("Run time (milliseconds) getSubconcepts: " + (System.currentTimeMillis() - ms) + " to resolve " );
return hmap;
}
///////////////////////////////////////////////////////
// Helper Methods
///////////////////////////////////////////////////////
/**
* Returns the entity description for the given code.
*/
protected String getCodeDescription(LexBIGService lbsvc, String scheme, CodingSchemeVersionOrTag csvt, String code)
throws LBException {
CodedNodeSet cns = lbsvc.getCodingSchemeConcepts(scheme, csvt);
cns = cns.restrictToCodes(Constructors.createConceptReferenceList(code, scheme));
ResolvedConceptReferenceList rcrl = cns.resolveToList(null, noopList_, null, 1);
if (rcrl.getResolvedConceptReferenceCount() > 0) {
EntityDescription desc = rcrl.getResolvedConceptReference(0).getEntityDescription();
if (desc != null)
return desc.getContent();
}
return "<Not assigned>";
}
/**
* Returns the entity description for the given resolved concept reference.
*/
protected String getCodeDescription(ResolvedConceptReference ref) throws LBException {
EntityDescription desc = ref.getEntityDescription();
if (desc != null)
return desc.getContent();
return "<Not assigned>";
}
public List getTopNodes(TreeItem ti) {
List list = new ArrayList();
getTopNodes(ti, list, 0, 1);
return list;
}
public void getTopNodes(TreeItem ti, List list, int currLevel, int maxLevel) {
if (list == null) list = new ArrayList();
if (currLevel > maxLevel) return;
if (ti.assocToChildMap.keySet().size() > 0) {
if (ti.text.compareTo("Root node") != 0)
{
ResolvedConceptReference rcr = new ResolvedConceptReference();
rcr.setConceptCode(ti.code);
EntityDescription entityDescription = new EntityDescription();
entityDescription.setContent(ti.text);
rcr.setEntityDescription(entityDescription);
list.add(rcr);
}
}
for (String association : ti.assocToChildMap.keySet()) {
List<TreeItem> children = ti.assocToChildMap.get(association);
Collections.sort(children);
for (TreeItem childItem : children) {
getTopNodes(childItem, list, currLevel+1, maxLevel);
}
}
}
public static void dumpTreeItems(HashMap hmap) {
try {
Set keyset = hmap.keySet();
Object[] objs = keyset.toArray();
String code = (String) objs[0];
TreeItem ti = (TreeItem) hmap.get(code);
for (String association : ti.assocToChildMap.keySet()) {
System.out.println("\nassociation: " + association);
List<TreeItem> children = ti.assocToChildMap.get(association);
for (TreeItem childItem : children) {
System.out.println(childItem.text + "(" + childItem.code + ")");
int knt = 0;
if (childItem.expandable)
{
knt = 1;
System.out.println("\tnode.expandable");
} else {
System.out.println("\tnode.NOT expandable");
}
}
}
} catch (Exception e) {
}
}
public void run(String scheme, String version, String code) {
CodingSchemeVersionOrTag csvt = new CodingSchemeVersionOrTag();
ResolvedConceptReference rcr = null;
try {
rcr = resolveConcept(scheme, csvt, code);
} catch (Exception ex) {
}
if (rcr == null) {
Util_displayMessage("Unable to resolve a concept for CUI = '" + code + "'");
System.exit(1);
}
String name = null;
try {
name = getCodeDescription(rcr);
} catch (Exception ex) {
name = "Unknown";
}
System.out.println("Coding scheme: " + scheme);
System.out.println("code: " + code);
System.out.println("name: " + name);
String sab = "NCI";
//boolean associationsNavigatedFwd = true;
Long startTime = System.currentTimeMillis();
HashMap hmap1 = getSubconcepts(scheme, version, code, sab, true);
System.out.println("Call getSubconcepts true took: " + (System.currentTimeMillis() - startTime) + "ms");
dumpTreeItems(hmap1);
startTime = System.currentTimeMillis();
HashMap hmap2 = getSubconcepts(scheme, version, code, sab, false);
System.out.println("Call getSubconcepts false took: " + (System.currentTimeMillis() - startTime) + "ms");
dumpTreeItems(hmap2);
}
protected String getDisplayRef(ResolvedConceptReference ref){
return "[" + ref.getEntityDescription().getContent() + "(" + ref.getConceptCode() + ")]";
}
public HashMap getSubconcepts(String scheme, String version, String code, String sab, String asso_name, boolean associationsNavigatedFwd) {
HashSet hset = new HashSet();
HashMap hmap = new HashMap();
TreeItem ti = null;
Vector w = new Vector();
long ms = System.currentTimeMillis();
Set<String> codesToExclude = Collections.EMPTY_SET;
boolean fwd = true;
CodingSchemeVersionOrTag csvt = new CodingSchemeVersionOrTag();
try {
LexBIGService lbSvc = RemoteServerUtil.createLexBIGService();
LexBIGServiceConvenienceMethods lbscm = (LexBIGServiceConvenienceMethods) lbSvc
.getGenericExtension("LexBIGServiceConvenienceMethods");
lbscm.setLexBIGService(lbSvc);
String name = getCodeDescription(lbSvc, scheme, csvt, code);
ti = new TreeItem(code, name);
ti.expandable = false;
CodedNodeGraph cng = null;
ResolvedConceptReferenceList branch = null;
cng = lbSvc.getNodeGraph(scheme, null, null);
NameAndValueList nvl = null;
if (sab != null) nvl = ConvenienceMethods.createNameAndValueList(sab, "Source");
cng = cng.restrictToAssociations(Constructors.createNameAndValueList(new String[]{asso_name}), nvl);
branch = cng.resolveAsList(Constructors.createConceptReference(code, scheme),
associationsNavigatedFwd, !associationsNavigatedFwd,
Integer.MAX_VALUE, 2,
null, new PropertyType[] { PropertyType.PRESENTATION },
null, null, -1);
for (ResolvedConceptReference node : branch.getResolvedConceptReference()) {
AssociationList childAssociationList =
associationsNavigatedFwd ? node.getSourceOf()
: node.getTargetOf();
// Process each association defining children ...
for (Association child : childAssociationList.getAssociation()) {
String childNavText = getDirectionalLabel(lbscm, scheme, csvt, child, associationsNavigatedFwd);
// Each association may have multiple children ...
AssociatedConceptList branchItemList = child.getAssociatedConcepts();
for (AssociatedConcept branchItemNode : branchItemList.getAssociatedConcept()) {
//System.out.println("AssociatedConcept: " + branchItemNode.getConceptCode());
if (isValidForSAB(branchItemNode, sab)) {
String branchItemCode = branchItemNode.getCode();
// Add here if not in the list of excluded codes.
// This is also where we look to see if another level
// was indicated to be available. If so, mark the
// entry with a '+' to indicate it can be expanded.
if (!codesToExclude.contains(branchItemCode)) {
if (!hset.contains(branchItemCode)) {
hset.add(branchItemCode);
TreeItem childItem =
new TreeItem(branchItemCode, branchItemNode.getEntityDescription().getContent());
childItem.expandable = false;
AssociationList grandchildBranch =
associationsNavigatedFwd ? branchItemNode.getSourceOf()
: branchItemNode.getTargetOf();
if (grandchildBranch != null) {
for (Association grandchild : grandchildBranch.getAssociation()) {
java.lang.String association_name = grandchild.getAssociationName();
//System.out.println("association_name: " + association_name);
//String grandchildNavText = getDirectionalLabel(lbscm, scheme, csvt, child, associationsNavigatedFwd);
// Each association may have multiple children ...
AssociatedConceptList grandchildbranchItemList = grandchild.getAssociatedConcepts();
for (AssociatedConcept grandchildbranchItemNode : grandchildbranchItemList.getAssociatedConcept()) {
//System.out.println("\tgrandchildbranchItemNode AssociatedConcept: " + grandchildbranchItemNode.getConceptCode());
if (isValidForSAB(grandchildbranchItemNode, sab)) {
childItem.expandable = true;
break;
}
}
}
}
ti.addChild(childNavText, childItem);
ti.expandable = true;
}
}
}
}
}
}
//System.out.println("\t*** Is " + ti.text + "( " + ti.code + ") expandable?: " + ti.expandable);
hmap.put(code, ti);
} catch (Exception ex) {
ex.printStackTrace();
}
System.out.println("Run time (milliseconds) getSubconcepts: " + (System.currentTimeMillis() - ms) + " to resolve " );
return hmap;
}
protected String getAssociationSourceString(AssociatedConcept ac){
String sources = "";
NameAndValue[] nvl = ac.getAssociationQualifiers().getNameAndValue();
int knt = 0;
for (int i=0; i<nvl.length; i++) {
NameAndValue nv = nvl[i];
if (nv.getContent().compareToIgnoreCase("Source") == 0) {
knt++;
if (knt == 1) {
sources = sources + nv.getName();
} else {
sources = sources + " ;" + nv.getName();
}
}
}
return sources;
}
protected Vector getAssociationSources(AssociatedConcept ac){
Vector sources = new Vector();
NameAndValue[] nvl = ac.getAssociationQualifiers().getNameAndValue();
for (int i=0; i<nvl.length; i++) {
NameAndValue nv = nvl[i];
if (nv.getContent().compareToIgnoreCase("Source") == 0) {
sources.add(nv.getName());
}
}
return sources;
}
/**
* Build and returns tree items that represent the root
* and core concepts of resolved paths for printing.
* @throws LBException
*/
protected TreeItem[] buildPathsToRoot(ResolvedConceptReference rcr,
String scheme, CodingSchemeVersionOrTag csvt,
String sab, int maxLevel) throws LBException {
// Create a starting point for tree building.
TreeItem ti =
new TreeItem(rcr.getCode(), rcr.getEntityDescription().getContent(),
getAtomText(rcr, sab));
// Maintain root tree items.
Set<TreeItem> rootItems = new HashSet<TreeItem>();
Set<String> visited_links = new HashSet<String>();
// Natural flow of hierarchy relations moves forward
// from tree root to leaves. Build the paths to root here
// by processing upstream (child to parent) relationships.
buildPathsToUpperNodes(
ti, rcr, scheme, csvt, sab,
new HashMap<String, TreeItem>(),
rootItems, visited_links, maxLevel, 0);
// Return root items discovered during child to parent
// processing.
return rootItems.toArray(new TreeItem[rootItems.size()]);
}
protected boolean hasChildren(TreeItem tiParent, String code) {
if (tiParent == null) return false;
if (tiParent.assocToChildMap == null) return false;
for (String association : tiParent.assocToChildMap.keySet()) {
List<TreeItem> children = tiParent.assocToChildMap.get(association);
for (int i=0; i<children.size(); i++) {
TreeItem childItem = (TreeItem) children.get(i);
if (childItem.code.compareTo(code) == 0) return true;
}
}
return false;
}
/**
* Add all hierarchical paths to root that start from the
* referenced concept and move backward in the tree. If
* the natural flow of relations is thought of moving from tree
* root to leaves, this method processes nodes in the
* reverse direction (from child to parent).
* @throws LBException
*/
/*
protected void buildPathsToUpperNodes(TreeItem ti, ResolvedConceptReference rcr,
String scheme, CodingSchemeVersionOrTag csvt,
String sab, Map<String, TreeItem> code2Tree,
Set<TreeItem> roots, int maxLevel, int currLevel)
throws LBException {
if (maxLevel != -1 && currLevel >= maxLevel) return;
// Only need to process a code once ...
if (code2Tree.containsKey(rcr.getCode()))
return;
// Cache for future reference.
code2Tree.put(rcr.getCode(), ti);
// UMLS relations can be defined with forward direction
// being parent to child or child to parent on a source
// by source basis. Iterate twice to ensure completeness;
// once navigating child to parent relations forward
// and once navigating parent to child relations
// backward. Both have the net effect of navigating
// from the bottom of the hierarchy to the top.
boolean isRoot = true;
for (int i = 0; i <= 1; i++) {
boolean fwd = i < 1;
String[] upstreamAssoc = fwd ? hierAssocToParentNodes_ : hierAssocToChildNodes_;
// Define a code graph for all relationships tagged with
// the specified sab.
CodedNodeGraph graph = getLexBIGService().getNodeGraph(scheme, csvt, null);
graph.restrictToAssociations(
ConvenienceMethods.createNameAndValueList(upstreamAssoc),
ConvenienceMethods.createNameAndValueList(sab, "Source"));
// Resolve one hop, retrieving presentations for
// comparison of source assignments.
ResolvedConceptReference[] refs = graph.resolveAsList(
rcr, fwd, !fwd, Integer.MAX_VALUE, 1,
null, new PropertyType[] { PropertyType.PRESENTATION },
sortByCode_, null, -1).getResolvedConceptReference();
// Create a new tree item for each upstream node, add the current
// tree item as a child, and recurse to go higher (if available).
if (refs.length > 0) {
// Each associated concept represents an upstream branch.
AssociationList aList = fwd ? refs[0].getSourceOf() : refs[0].getTargetOf();
for (Association assoc : aList.getAssociation()) {
// Go through the concepts one by one, adding the
// current tree item as a child of a new tree item
// representing the upstream node. If a tree item
// already exists for the parent, we reuse it to
// keep a single branch per parent.
for (AssociatedConcept refParent : assoc.getAssociatedConcepts().getAssociatedConcept())
if (isValidForSAB(refParent, sab)) {
// Fetch the term for this context ...
Presentation[] sabMatch = getSourcePresentations(refParent, sab);
if (sabMatch.length > 0) {
// We need to take into account direction of
// navigation on each pass to get the right label.
String directionalName = getDirectionalLabel(scheme, csvt, assoc, !fwd);
// Check for a previously registered item for the
// parent. If found, re-use it. Otherwise, create
// a new parent tree item.
String parentCode = refParent.getCode();
TreeItem tiParent = code2Tree.get(parentCode);
if (tiParent == null) {
// Create a new tree item.
tiParent =
new TreeItem(parentCode, refParent.getEntityDescription().getContent(),
getAtomText(refParent, sab));
// Add immediate children of the parent code with an
// indication of sub-nodes (+). Codes already
// processed as part of the path are ignored since
// they are handled through recursion.
String[] downstreamAssoc = fwd ? hierAssocToChildNodes_ : hierAssocToParentNodes_;
addChildren(tiParent, scheme, csvt, sab, parentCode, code2Tree.keySet(),
downstreamAssoc, fwd);
// Try to go higher through recursion.
buildPathsToUpperNodes(tiParent, refParent,
scheme, csvt, sab, code2Tree, roots, maxLevel, currLevel+1);
}
// Add the child (eliminate redundancy -- e.g., hasSubtype and CHD)
if (!hasChildren(tiParent, ti.code)) {
tiParent.addChild(directionalName, ti);
//KLO
tiParent.expandable = true;
}
isRoot = false;
}
}
}
}
}
if (isRoot) {
System.out.println("================ Adding " + ti.code + " " + ti.text + " to roots.");
roots.add(ti);
}
}
*/
protected void buildPathsToUpperNodes(TreeItem ti, ResolvedConceptReference rcr,
String scheme, CodingSchemeVersionOrTag csvt,
String sab, Map<String, TreeItem> code2Tree,
Set<TreeItem> roots, Set<String> visited_links, int maxLevel, int currLevel)
throws LBException {
//if (maxLevel != -1 && currLevel >= maxLevel)
if (maxLevel != -1 && currLevel > maxLevel)
{
return;
}
// Only need to process a code once ...
if (code2Tree.containsKey(rcr.getCode()))
return;
// Cache for future reference.
code2Tree.put(rcr.getCode(), ti);
// UMLS relations can be defined with forward direction
// being parent to child or child to parent on a source
// by source basis. Iterate twice to ensure completeness;
// once navigating child to parent relations forward
// and once navigating parent to child relations
// backward. Both have the net effect of navigating
// from the bottom of the hierarchy to the top.
boolean isRoot = true;
for (int i = 0; i <= 1; i++) {
boolean fwd = i < 1;
String[] upstreamAssoc = fwd ? hierAssocToParentNodes_ : hierAssocToChildNodes_;
// Define a code graph for all relationships tagged with
// the specified sab.
CodedNodeGraph graph = getLexBIGService().getNodeGraph(scheme, csvt, null);
graph.restrictToAssociations(
ConvenienceMethods.createNameAndValueList(upstreamAssoc),
ConvenienceMethods.createNameAndValueList(sab, "Source"));
// Resolve one hop, retrieving presentations for
// comparison of source assignments.
ResolvedConceptReference[] refs = graph.resolveAsList(
rcr, fwd, !fwd, Integer.MAX_VALUE, 1,
null, new PropertyType[] { PropertyType.PRESENTATION },
sortByCode_, null, -1).getResolvedConceptReference();
// Create a new tree item for each upstream node, add the current
// tree item as a child, and recurse to go higher (if available).
if (refs.length > 0) {
// Each associated concept represents an upstream branch.
AssociationList aList = fwd ? refs[0].getSourceOf() : refs[0].getTargetOf();
for (Association assoc : aList.getAssociation()) {
// Go through the concepts one by one, adding the
// current tree item as a child of a new tree item
// representing the upstream node. If a tree item
// already exists for the parent, we reuse it to
// keep a single branch per parent.
for (AssociatedConcept refParent : assoc.getAssociatedConcepts().getAssociatedConcept())
if (isValidForSAB(refParent, sab)) {
// Fetch the term for this context ...
Presentation[] sabMatch = getSourcePresentations(refParent, sab);
if (sabMatch.length > 0) {
// We need to take into account direction of
// navigation on each pass to get the right label.
String directionalName = getDirectionalLabel(scheme, csvt, assoc, !fwd);
// Check for a previously registered item for the
// parent. If found, re-use it. Otherwise, create
// a new parent tree item.
String parentCode = refParent.getCode();
String link = rcr.getConceptCode() + "|" + parentCode;
if (!visited_links.contains(link)) {
visited_links.add(link);
TreeItem tiParent = code2Tree.get(parentCode);
if (tiParent == null) {
// Create a new tree item.
tiParent =
new TreeItem(parentCode, refParent.getEntityDescription().getContent(),
getAtomText(refParent, sab));
// Add immediate children of the parent code with an
// indication of sub-nodes (+). Codes already
// processed as part of the path are ignored since
// they are handled through recursion.
String[] downstreamAssoc = fwd ? hierAssocToChildNodes_ : hierAssocToParentNodes_;
addChildren(tiParent, scheme, csvt, sab, parentCode, code2Tree.keySet(),
downstreamAssoc, fwd);
// Try to go higher through recursion.
buildPathsToUpperNodes(tiParent, refParent,
scheme, csvt, sab, code2Tree, roots, visited_links, maxLevel, currLevel+1);
}
// Add the child (eliminate redundancy -- e.g., hasSubtype and CHD)
if (!hasChildren(tiParent, ti.code)) {
tiParent.addChild(directionalName, ti);
//KLO
tiParent.expandable = true;
}
}
isRoot = false;
}
}
}
}
}
if (maxLevel != -1 && currLevel == maxLevel) isRoot = true;
if (isRoot) {
System.out.println("================ Adding " + ti.code + " " + ti.text + " to roots.");
roots.add(ti);
}
}
public void dumpTree(HashMap hmap, String focusCode, int level) {
try {
Set keyset = hmap.keySet();
Object[] objs = keyset.toArray();
String code = (String) objs[0];
TreeItem ti = (TreeItem) hmap.get(code);
for (String association : ti.assocToChildMap.keySet()) {
System.out.println("\nassociation: " + association);
List<TreeItem> children = ti.assocToChildMap.get(association);
for (TreeItem childItem : children) {
System.out.println(childItem.text + "(" + childItem.code + ")");
int knt = 0;
if (childItem.expandable)
{
knt = 1;
System.out.println("\tnode.expandable");
printTree(childItem, focusCode, level);
List list = getTopNodes(childItem);
for (int i=0; i<list.size(); i++) {
Object obj = list.get(i);
String nd_code = "";
String nd_name = "";
if (obj instanceof ResolvedConceptReference)
{
ResolvedConceptReference node = (ResolvedConceptReference) list.get(i);
nd_code = node.getConceptCode();
nd_name = node.getEntityDescription().getContent();
}
else if (obj instanceof Concept) {
Concept node = (Concept) list.get(i);
nd_code = node.getEntityCode();
nd_name = node.getEntityDescription().getContent();
}
System.out.println("TOP NODE: " + nd_name + " (" + nd_code + ")" );
}
} else {
System.out.println("\tnode.NOT expandable");
}
}
}
} catch (Exception e) {
}
}
public static void main(String[] args) throws Exception {
MetaTreeUtils test = new MetaTreeUtils();
String scheme = "NCI MetaThesaurus";
String version = null;
String code = "C1325880";//"C0001206";
boolean associationsNavigatedFwd = true;
String sab = "NCI";
/*
test.run(scheme, version, code);
System.out.println("\n==============================================================");
code = "C1154313";
test.run(scheme, version, code);
*/
HashMap new_map = null;
code = "C1154313";
/*
new_map = test.getSubconcepts(scheme, version, code, sab, "PAR", false);
test.dumpTreeItems(new_map);
code = "CL354459";
new_map = test.getSubconcepts(scheme, version, code, sab, "PAR", false);
test.dumpTreeItems(new_map);
code = "CL354459";
new_map = test.getSubconcepts(scheme, version, code, sab, "hasSubtype", true);
test.dumpTreeItems(new_map);
code = "C0031308";
new_map = test.getSubconcepts(scheme, version, code, sab, "PAR", false);
test.dumpTreeItems(new_map);
code = "C0031308";
new_map = test.getSubconcepts(scheme, version, code, sab, "hasSubtype", true);
test.dumpTreeItems(new_map);
code = "C0007581";
new_map = test.getSubconcepts(scheme, version, code, sab, "PAR", false);
test.dumpTreeItems(new_map);
code = "C0007581";
new_map = test.getSubconcepts(scheme, version, code, sab, "hasSubtype", true);
test.dumpTreeItems(new_map);
*/
//Cell Aging (CUI C0007581)
code = "C0007581";
new_map = test.getTreePathData(scheme, version, sab, code, -1);
//test.dumpTreeItems(new_map);
test.dumpTree(new_map, code, 5);
}
} | GF#21350 [KLO, 063009]
git-svn-id: 7af4603ee811ab7d38b44ab6bf739118ab0e87f8@330 b145b987-6716-48ef-8e4a-68a7679c2f12
| software/ncimbrowser/src/java/gov/nih/nci/evs/browser/utils/MetaTreeUtils.java | GF#21350 [KLO, 063009] | <ide><path>oftware/ncimbrowser/src/java/gov/nih/nci/evs/browser/utils/MetaTreeUtils.java
<ide> AssociationList grandchildBranch =
<ide> associationsNavigatedFwd ? branchItemNode.getSourceOf()
<ide> : branchItemNode.getTargetOf();
<add>
<add> /*
<ide> if (grandchildBranch != null) {
<del> childItem.expandable = true;
<add> childItem.expandable = true;
<ide> }
<add> */
<add> if (grandchildBranch != null) {
<add> for (Association grandchild : grandchildBranch.getAssociation()) {
<add>
<add> java.lang.String association_name = grandchild.getAssociationName();
<add> //System.out.println("association_name: " + association_name);
<add>
<add> //String grandchildNavText = getDirectionalLabel(lbscm, scheme, csvt, child, associationsNavigatedFwd);
<add> // Each association may have multiple children ...
<add> AssociatedConceptList grandchildbranchItemList = grandchild.getAssociatedConcepts();
<add> for (AssociatedConcept grandchildbranchItemNode : grandchildbranchItemList.getAssociatedConcept()) {
<add>
<add> //System.out.println("\tgrandchildbranchItemNode AssociatedConcept: " + grandchildbranchItemNode.getConceptCode());
<add>
<add> if (isValidForSAB(grandchildbranchItemNode, sab)) {
<add> childItem.expandable = true;
<add> break;
<add> }
<add> }
<add> }
<add> }
<add>
<ide> ti.addChild(childNavText, childItem);
<ide> }
<ide> }
<ide> // Natural flow of hierarchy relations moves forward
<ide> // from tree root to leaves. Build the paths to root here
<ide> // by processing upstream (child to parent) relationships.
<add>
<add> //KLO testing
<add> /*
<ide> buildPathsToUpperNodes(
<ide> ti, rcr, scheme, csvt, sab,
<ide> new HashMap<String, TreeItem>(),
<ide> rootItems, visited_links, maxLevel, 0);
<add> */
<add>
<add> buildPathsToUpperNodes(
<add> ti, rcr, scheme, csvt, sab,
<add> new HashMap<String, TreeItem>(),
<add> rootItems, visited_links, maxLevel, 0);//, hierAssocToParentNodes_, false);
<add>
<ide>
<ide> // Return root items discovered during child to parent
<ide> // processing.
<ide> // backward. Both have the net effect of navigating
<ide> // from the bottom of the hierarchy to the top.
<ide> boolean isRoot = true;
<add>
<ide> for (int i = 0; i <= 1; i++) {
<ide> boolean fwd = i < 1;
<add>
<ide> String[] upstreamAssoc = fwd ? hierAssocToParentNodes_ : hierAssocToChildNodes_;
<ide>
<ide> // Define a code graph for all relationships tagged with
<ide> // indication of sub-nodes (+). Codes already
<ide> // processed as part of the path are ignored since
<ide> // they are handled through recursion.
<add>
<ide> String[] downstreamAssoc = fwd ? hierAssocToChildNodes_ : hierAssocToParentNodes_;
<ide> addChildren(tiParent, scheme, csvt, sab, parentCode, code2Tree.keySet(),
<ide> downstreamAssoc, fwd);
<add>
<ide>
<ide> // Try to go higher through recursion.
<ide> buildPathsToUpperNodes(tiParent, refParent,
<ide> roots.add(ti);
<ide> }
<ide> }
<add>
<add>
<add> protected void buildPathsToUpperNodes(TreeItem ti, ResolvedConceptReference rcr,
<add> String scheme, CodingSchemeVersionOrTag csvt,
<add> String sab, Map<String, TreeItem> code2Tree,
<add> Set<TreeItem> roots, Set<String> visited_links, int maxLevel, int currLevel, String[] upstreamAssoc, boolean fwd)
<add> throws LBException {
<add>
<add> //if (maxLevel != -1 && currLevel >= maxLevel)
<add> if (maxLevel != -1 && currLevel > maxLevel)
<add> {
<add> return;
<add> }
<add>
<add> // Only need to process a code once ...
<add> if (code2Tree.containsKey(rcr.getCode()))
<add> return;
<add>
<add> // Cache for future reference.
<add> code2Tree.put(rcr.getCode(), ti);
<add>
<add> // UMLS relations can be defined with forward direction
<add> // being parent to child or child to parent on a source
<add> // by source basis. Iterate twice to ensure completeness;
<add> // once navigating child to parent relations forward
<add> // and once navigating parent to child relations
<add> // backward. Both have the net effect of navigating
<add> // from the bottom of the hierarchy to the top.
<add> boolean isRoot = true;
<add> /*
<add> for (int i = 0; i <= 1; i++) {
<add> boolean fwd = i < 1;
<add>
<add> String[] upstreamAssoc = fwd ? hierAssocToParentNodes_ : hierAssocToChildNodes_;
<add> */
<add> // Define a code graph for all relationships tagged with
<add> // the specified sab.
<add> CodedNodeGraph graph = getLexBIGService().getNodeGraph(scheme, csvt, null);
<add> graph.restrictToAssociations(
<add> ConvenienceMethods.createNameAndValueList(upstreamAssoc),
<add> ConvenienceMethods.createNameAndValueList(sab, "Source"));
<add>
<add> // Resolve one hop, retrieving presentations for
<add> // comparison of source assignments.
<add> ResolvedConceptReference[] refs = graph.resolveAsList(
<add> rcr, fwd, !fwd, Integer.MAX_VALUE, 1,
<add> null, new PropertyType[] { PropertyType.PRESENTATION },
<add> sortByCode_, null, -1).getResolvedConceptReference();
<add>
<add> // Create a new tree item for each upstream node, add the current
<add> // tree item as a child, and recurse to go higher (if available).
<add> if (refs.length > 0) {
<add>
<add> // Each associated concept represents an upstream branch.
<add> AssociationList aList = fwd ? refs[0].getSourceOf() : refs[0].getTargetOf();
<add> for (Association assoc : aList.getAssociation()) {
<add>
<add> // Go through the concepts one by one, adding the
<add> // current tree item as a child of a new tree item
<add> // representing the upstream node. If a tree item
<add> // already exists for the parent, we reuse it to
<add> // keep a single branch per parent.
<add> for (AssociatedConcept refParent : assoc.getAssociatedConcepts().getAssociatedConcept())
<add> if (isValidForSAB(refParent, sab)) {
<add>
<add> // Fetch the term for this context ...
<add> Presentation[] sabMatch = getSourcePresentations(refParent, sab);
<add> if (sabMatch.length > 0) {
<add>
<add> // We need to take into account direction of
<add> // navigation on each pass to get the right label.
<add> String directionalName = getDirectionalLabel(scheme, csvt, assoc, !fwd);
<add>
<add> // Check for a previously registered item for the
<add> // parent. If found, re-use it. Otherwise, create
<add> // a new parent tree item.
<add> String parentCode = refParent.getCode();
<add>
<add> String link = rcr.getConceptCode() + "|" + parentCode;
<add> if (!visited_links.contains(link)) {
<add> visited_links.add(link);
<add> TreeItem tiParent = code2Tree.get(parentCode);
<add> if (tiParent == null) {
<add>
<add> // Create a new tree item.
<add> tiParent =
<add> new TreeItem(parentCode, refParent.getEntityDescription().getContent(),
<add> getAtomText(refParent, sab));
<add>
<add> // Add immediate children of the parent code with an
<add> // indication of sub-nodes (+). Codes already
<add> // processed as part of the path are ignored since
<add> // they are handled through recursion.
<add>
<add> String[] downstreamAssoc = fwd ? hierAssocToChildNodes_ : hierAssocToParentNodes_;
<add> addChildren(tiParent, scheme, csvt, sab, parentCode, code2Tree.keySet(),
<add> downstreamAssoc, fwd);
<add>
<add>
<add> // Try to go higher through recursion.
<add> buildPathsToUpperNodes(tiParent, refParent,
<add> scheme, csvt, sab, code2Tree, roots, visited_links, maxLevel, currLevel+1, upstreamAssoc, fwd);
<add>
<add> }
<add>
<add> // Add the child (eliminate redundancy -- e.g., hasSubtype and CHD)
<add> if (!hasChildren(tiParent, ti.code)) {
<add> tiParent.addChild(directionalName, ti);
<add> //KLO
<add> tiParent.expandable = true;
<add> }
<add> }
<add> isRoot = false;
<add> }
<add> }
<add> }
<add> }
<add> //}
<add> if (maxLevel != -1 && currLevel == maxLevel) isRoot = true;
<add> if (isRoot) {
<add> System.out.println("================ Adding " + ti.code + " " + ti.text + " to roots.");
<add> roots.add(ti);
<add> }
<add> }
<add>
<ide>
<ide>
<ide> public void dumpTree(HashMap hmap, String focusCode, int level) { |
|
Java | apache-2.0 | dba5ee785a92ed818637c1d76e24fe10ea3680f9 | 0 | godrin/TowerDefense,godrin/TowerDefense | package com.cdm.view.enemy;
import java.util.Arrays;
import java.util.List;
import com.badlogic.gdx.graphics.Color;
import com.badlogic.gdx.math.Vector3;
import com.cdm.view.IRenderer;
import com.cdm.view.Position;
import com.cdm.view.elements.MathTools;
import com.cdm.view.elements.RotatingThing;
public class Truck extends GroundMovingEnemy {
public Position nextStep = null;
public static final float SPEED = 0.21f;
private static final Vector3 c0 = new Vector3(-1.5f, -1, 0);
private static final Vector3 c1 = new Vector3(1, -1, 0);
private static final Vector3 c2 = new Vector3(1, 1, 0);
private static final Vector3 c3 = new Vector3(-1.5f, 1, 0);
private static final Vector3 c01 = new Vector3(-1.25f, -0.75f, 0);
private static final Vector3 c11 = new Vector3(0.75f, -0.75f, 0);
private static final Vector3 c21 = new Vector3(0.75f, 0.75f, 0);
private static final Vector3 c31 = new Vector3(-1.25f, 0.75f, 0);
private static final Vector3 d0 = new Vector3(1f, -0.8f, 0);
private static final Vector3 d1 = new Vector3(2f, -0.8f, 0);
private static final Vector3 d2 = new Vector3(2f, 0.81f, 0);
private static final Vector3 d3 = new Vector3(1f, 0.8f, 0);
private static final Vector3 k0 = new Vector3(-1f, -1.5f, 0);
private static final Vector3 k1 = new Vector3(-0.3f, -1.5f, 0);
private static final Vector3 k2 = new Vector3(-0.3f, -1.1f, 0);
private static final Vector3 k3 = new Vector3(-1f, -1.1f, 0);
private static final Vector3 x0 = new Vector3(-1f, 1.5f, 0);
private static final Vector3 x1 = new Vector3(-0.3f, 1.5f, 0);
private static final Vector3 x2 = new Vector3(-0.3f, 1.1f, 0);
private static final Vector3 x3 = new Vector3(-1f, 1.1f, 0);
private static final Vector3 xx0 = new Vector3(1.7f, 1.5f, 0);
private static final Vector3 xx1 = new Vector3(1f, 1.5f, 0);
private static final Vector3 xx2 = new Vector3(1f, 1.1f, 0);
private static final Vector3 xx3 = new Vector3(1.7f, 1.1f, 0);
private static final Vector3 xy0 = new Vector3(1.7f, -1.5f, 0);
private static final Vector3 xy1 = new Vector3(1f, -1.5f, 0);
private static final Vector3 xy2 = new Vector3(1f, -1.1f, 0);
private static final Vector3 xy3 = new Vector3(1.7f, -1.1f, 0);
private static final List<Vector3> lines = Arrays.asList(new Vector3[] {
c0, c1, c1, c2, c2, c3, c3, c0, c01, c11, c11, c21, c21, c31, c31,
c01, d0, d1, d1, d2, d2, d3, d3, d0, k0, k1, k1, k2, k2, k3, k3,
k0, x0, x1, x1, x2, x2, x3, x3, x0, xx0, xx1, xx1, xx2, xx2, xx3,
xx3, xx0, xy0, xy1, xy1, xy2, xy2, xy3, xy3, xy0 });
private static final List<Vector3> poly = Arrays.asList(new Vector3[] { c0,
c1, c2, c0, c2, c3, d0, d1, d2, d2, d3, d0 });
private static final List<Vector3> chainLines = Arrays
.asList(new Vector3[] { new Vector3(0, -1.5f, 0),
new Vector3(0, -1.1f, 0), new Vector3(0, -1.5f, 0),
new Vector3(0, -1.1f, 0), new Vector3(0, -1.5f, 0),
new Vector3(0, -1.1f, 0), new Vector3(0, -1.5f, 0),
new Vector3(0, -1.1f, 0),
new Vector3(0, 1.5f, 0), new Vector3(0, 1.1f, 0),
new Vector3(0, 1.5f, 0), new Vector3(0, 1.1f, 0),
new Vector3(0, 1.5f, 0), new Vector3(0, 1.1f, 0),
new Vector3(0, 1.5f, 0), new Vector3(0, 1.1f, 0), });
private float chainPhase = 0.0f;
private static final Color innerColor = new Color(0.3f, 0.2f, 0.0f, 1.0f);
private static final Color outerColor = new Color(0.8f, 0.7f, 0f, 1.0f);
private static final Vector3 DEFAULT_DIRECTION = new Vector3(1, 0, 0);
private Vector3 diff = new Vector3();
private Vector3 movingDir = new Vector3();
private RotatingThing rotation = new RotatingThing();
public Truck(Position pos) {
super(pos);
setSize(0.25f);
}
@Override
public void move(float time) {
super.move(time);
chainPhase += time;
while (time > 0) {
if (nextStep == null) {
nextStep = getLevel().getNextPos(getPosition().alignedToGrid());
}
Position nuPos = new Position(getPosition());
diff.set(getPosition().to(nextStep));
float targetAngle = MathTools.angle(diff);
rotation.setTargetAngle(targetAngle);
time -= rotation.move(time);
if (time < 0.00001f)
return;
float len = diff.len();
float delta = time * getSpeed();
if (delta >= len) {
setPosition(nextStep);
time -= len / delta;
nextStep = null;
} else {
diff.mul(delta / diff.len());
nuPos.x += diff.x;
nuPos.y += diff.y;
setPosition(nuPos);
time = 0;
}
}
}
@Override
public void draw(IRenderer renderer) {
renderer.drawPoly(getPosition(), poly, getAngle(), innerColor,
getSize());
renderer.drawLines(getPosition(), lines, getAngle(), outerColor,
getSize());
drawChain(renderer);
super.draw(renderer);
}
private void drawChain(IRenderer renderer) {
float x;
float startX = -0.9f;
float delta = 0.55f;
int size = 4;
float speed = 0.5f;
for (int i = 0; i < size; i++) {
x = ((float) i) / size * 3.1415f * 0.5f;
x += chainPhase * speed + 3.1415;
x %= 3.1415 * 0.5;
x = (float) Math.sin(x);
x *= delta;
x += startX;
for (int lr = 0; lr < size * 4; lr += size * 2) {
Vector3 a = chainLines.get(lr + i * 2);
Vector3 b = chainLines.get(lr + i * 2 + 1);
a.x = x;
b.x = x;
}
}
renderer.drawLines(getPosition(), chainLines, getAngle(), outerColor,
getSize());
float startX2 = 1f;
for (int i = 0; i < size; i++) {
x = ((float) i) / size * 3.1415f * 0.5f;
x += chainPhase * speed + 3.1415;
x %= 3.1415 * 0.5;
x = (float) Math.sin(x);
x *= delta;
x += startX2;
for (int lr = 0; lr < size * 4; lr += size * 2) {
Vector3 a = chainLines.get(lr + i * 2);
Vector3 b = chainLines.get(lr + i * 2 + 1);
a.x = x;
b.x = x;
}
}
renderer.drawLines(getPosition(), chainLines, getAngle(), outerColor,
getSize());
}
@Override
public int getMoney() {
return 1;
}
@Override
public int getPoints() {
return 1;
}
@Override
public int getBonus() {
return 0;
}
@Override
public float getOriginalSpeed() {
return SPEED;
}
}
| src/com/cdm/view/enemy/Truck.java | package com.cdm.view.enemy;
import java.util.Arrays;
import java.util.List;
import com.badlogic.gdx.graphics.Color;
import com.badlogic.gdx.math.Vector3;
import com.cdm.view.IRenderer;
import com.cdm.view.Position;
import com.cdm.view.elements.MathTools;
import com.cdm.view.elements.RotatingThing;
public class Truck extends GroundMovingEnemy {
public Position nextStep = null;
public static final float SPEED = 0.33f;
private static final Vector3 c0 = new Vector3(-1.5f, -1, 0);
private static final Vector3 c1 = new Vector3(1, -1, 0);
private static final Vector3 c2 = new Vector3(1, 1, 0);
private static final Vector3 c3 = new Vector3(-1.5f, 1, 0);
private static final Vector3 c01 = new Vector3(-1.25f, -0.75f, 0);
private static final Vector3 c11 = new Vector3(0.75f, -0.75f, 0);
private static final Vector3 c21 = new Vector3(0.75f, 0.75f, 0);
private static final Vector3 c31 = new Vector3(-1.25f, 0.75f, 0);
private static final Vector3 d0 = new Vector3(1f, -0.8f, 0);
private static final Vector3 d1 = new Vector3(2f, -0.8f, 0);
private static final Vector3 d2 = new Vector3(2f, 0.81f, 0);
private static final Vector3 d3 = new Vector3(1f, 0.8f, 0);
private static final Vector3 k0 = new Vector3(-1f, -1.5f, 0);
private static final Vector3 k1 = new Vector3(-0.3f, -1.5f, 0);
private static final Vector3 k2 = new Vector3(-0.3f, -1.1f, 0);
private static final Vector3 k3 = new Vector3(-1f, -1.1f, 0);
private static final Vector3 x0 = new Vector3(-1f, 1.5f, 0);
private static final Vector3 x1 = new Vector3(-0.3f, 1.5f, 0);
private static final Vector3 x2 = new Vector3(-0.3f, 1.1f, 0);
private static final Vector3 x3 = new Vector3(-1f, 1.1f, 0);
private static final Vector3 xx0 = new Vector3(1.7f, 1.5f, 0);
private static final Vector3 xx1 = new Vector3(1f, 1.5f, 0);
private static final Vector3 xx2 = new Vector3(1f, 1.1f, 0);
private static final Vector3 xx3 = new Vector3(1.7f, 1.1f, 0);
private static final Vector3 xy0 = new Vector3(1.7f, -1.5f, 0);
private static final Vector3 xy1 = new Vector3(1f, -1.5f, 0);
private static final Vector3 xy2 = new Vector3(1f, -1.1f, 0);
private static final Vector3 xy3 = new Vector3(1.7f, -1.1f, 0);
private static final List<Vector3> lines = Arrays.asList(new Vector3[] {
c0, c1, c1, c2, c2, c3, c3, c0, c01, c11, c11, c21, c21, c31, c31,
c01, d0, d1, d1, d2, d2, d3, d3, d0, k0, k1, k1, k2, k2, k3, k3,
k0, x0, x1, x1, x2, x2, x3, x3, x0, xx0, xx1, xx1, xx2, xx2, xx3,
xx3, xx0, xy0, xy1, xy1, xy2, xy2, xy3, xy3, xy0 });
private static final List<Vector3> poly = Arrays.asList(new Vector3[] { c0,
c1, c2, c0, c2, c3, d0, d1, d2, d2, d3, d0 });
private static final List<Vector3> chainLines = Arrays
.asList(new Vector3[] { new Vector3(0, -1.5f, 0),
new Vector3(0, -1.1f, 0), new Vector3(0, -1.5f, 0),
new Vector3(0, -1.1f, 0), new Vector3(0, -1.5f, 0),
new Vector3(0, -1.1f, 0), new Vector3(0, -1.5f, 0),
new Vector3(0, -1.1f, 0),
new Vector3(0, 1.5f, 0), new Vector3(0, 1.1f, 0),
new Vector3(0, 1.5f, 0), new Vector3(0, 1.1f, 0),
new Vector3(0, 1.5f, 0), new Vector3(0, 1.1f, 0),
new Vector3(0, 1.5f, 0), new Vector3(0, 1.1f, 0), });
private float chainPhase = 0.0f;
private static final Color innerColor = new Color(0.3f, 0.2f, 0.0f, 1.0f);
private static final Color outerColor = new Color(0.8f, 0.7f, 0f, 1.0f);
private static final Vector3 DEFAULT_DIRECTION = new Vector3(1, 0, 0);
private Vector3 diff = new Vector3();
private Vector3 movingDir = new Vector3();
private RotatingThing rotation = new RotatingThing();
public Truck(Position pos) {
super(pos);
setSize(0.25f);
}
@Override
public void move(float time) {
super.move(time);
chainPhase += time;
while (time > 0) {
if (nextStep == null) {
nextStep = getLevel().getNextPos(getPosition().alignedToGrid());
}
Position nuPos = new Position(getPosition());
diff.set(getPosition().to(nextStep));
float targetAngle = MathTools.angle(diff);
rotation.setTargetAngle(targetAngle);
time -= rotation.move(time);
if (time < 0.00001f)
return;
float len = diff.len();
float delta = time * getSpeed();
if (delta >= len) {
setPosition(nextStep);
time -= len / delta;
nextStep = null;
} else {
diff.mul(delta / diff.len());
nuPos.x += diff.x;
nuPos.y += diff.y;
setPosition(nuPos);
time = 0;
}
}
}
@Override
public void draw(IRenderer renderer) {
renderer.drawPoly(getPosition(), poly, getAngle(), innerColor,
getSize());
renderer.drawLines(getPosition(), lines, getAngle(), outerColor,
getSize());
drawChain(renderer);
super.draw(renderer);
}
private void drawChain(IRenderer renderer) {
float x;
float startX = -0.9f;
float delta = 0.55f;
int size = 4;
float speed = 0.5f;
for (int i = 0; i < size; i++) {
x = ((float) i) / size * 3.1415f * 0.5f;
x += chainPhase * speed + 3.1415;
x %= 3.1415 * 0.5;
x = (float) Math.sin(x);
x *= delta;
x += startX;
for (int lr = 0; lr < size * 4; lr += size * 2) {
Vector3 a = chainLines.get(lr + i * 2);
Vector3 b = chainLines.get(lr + i * 2 + 1);
a.x = x;
b.x = x;
}
}
renderer.drawLines(getPosition(), chainLines, getAngle(), outerColor,
getSize());
float startX2 = 1f;
for (int i = 0; i < size; i++) {
x = ((float) i) / size * 3.1415f * 0.5f;
x += chainPhase * speed + 3.1415;
x %= 3.1415 * 0.5;
x = (float) Math.sin(x);
x *= delta;
x += startX2;
for (int lr = 0; lr < size * 4; lr += size * 2) {
Vector3 a = chainLines.get(lr + i * 2);
Vector3 b = chainLines.get(lr + i * 2 + 1);
a.x = x;
b.x = x;
}
}
renderer.drawLines(getPosition(), chainLines, getAngle(), outerColor,
getSize());
}
@Override
public int getMoney() {
return 1;
}
@Override
public int getPoints() {
return 1;
}
@Override
public int getBonus() {
return 0;
}
@Override
public float getOriginalSpeed() {
return SPEED;
}
}
| truckspeed... | src/com/cdm/view/enemy/Truck.java | truckspeed... | <ide><path>rc/com/cdm/view/enemy/Truck.java
<ide>
<ide>
<ide> public Position nextStep = null;
<del> public static final float SPEED = 0.33f;
<add> public static final float SPEED = 0.21f;
<ide>
<ide>
<ide> private static final Vector3 c0 = new Vector3(-1.5f, -1, 0);
<ide> getSize());
<ide> renderer.drawLines(getPosition(), lines, getAngle(), outerColor,
<ide> getSize());
<del>
<add>
<ide> drawChain(renderer);
<ide>
<ide> super.draw(renderer); |
|
Java | apache-2.0 | 164cf2f97865b435dfeb595fa9c1238a9e4cd2e5 | 0 | Landmaster/PlusTiC,Landmaster/PlusTiC | package landmaster.plustic.modules;
import java.util.*;
public interface IModule {
public static final Set<IModule> modules = new LinkedHashSet<>();
default void init() {}
default void init2() {}
}
| src/main/java/landmaster/plustic/modules/IModule.java | package landmaster.plustic.modules;
import java.util.*;
// TODO retrofit modules to implement this
public interface IModule {
public static final Set<IModule> modules = new LinkedHashSet<>();
default void init() {}
default void init2() {}
}
| Remove already-resolved TODO
| src/main/java/landmaster/plustic/modules/IModule.java | Remove already-resolved TODO | <ide><path>rc/main/java/landmaster/plustic/modules/IModule.java
<ide>
<ide> import java.util.*;
<ide>
<del>// TODO retrofit modules to implement this
<ide> public interface IModule {
<ide> public static final Set<IModule> modules = new LinkedHashSet<>();
<ide> |
|
Java | mit | db1aaef3533310d59e3f655f236d4efc43a450f3 | 0 | ArekkuusuJerii/Solar | /*******************************************************************************
* Arekkuusu / Solar 2017
*
* This project is licensed under the MIT.
* The source code is available on github:
* https://github.com/ArekkuusuJerii/Solar#solar
******************************************************************************/
package arekkuusu.solar.common.block;
import arekkuusu.solar.api.state.MoonPhase;
import arekkuusu.solar.api.state.State;
import arekkuusu.solar.client.util.baker.DummyBakedRegistry;
import arekkuusu.solar.client.util.baker.baked.BakedCosmicResonator;
import arekkuusu.solar.client.util.helper.ModelHandler;
import arekkuusu.solar.common.lib.LibNames;
import net.minecraft.block.material.Material;
import net.minecraft.block.state.BlockStateContainer;
import net.minecraft.block.state.IBlockState;
import net.minecraft.entity.EntityLivingBase;
import net.minecraft.util.EnumFacing;
import net.minecraft.util.EnumHand;
import net.minecraft.util.math.AxisAlignedBB;
import net.minecraft.util.math.BlockPos;
import net.minecraft.world.IBlockAccess;
import net.minecraft.world.World;
import net.minecraftforge.fml.relauncher.Side;
import net.minecraftforge.fml.relauncher.SideOnly;
import java.util.Random;
/**
* Created by <Arekkuusu> on 29/12/2017.
* It's distributed as part of Solar.
*/
@SuppressWarnings("deprecation")
public class BlockCelestialResonator extends BlockBase {
private static final AxisAlignedBB BB = new AxisAlignedBB(0.3D,0.3D,0.3D, 0.7D, 0.7D, 0.7D);
public BlockCelestialResonator() {
super(LibNames.CELESTIAL_RESONATOR, Material.ROCK);
setDefaultState(getDefaultState().withProperty(MoonPhase.MOON_PHASE, MoonPhase.FULL_MOON).withProperty(State.ACTIVE, false));
setHarvestLevel(Tool.PICK, ToolLevel.WOOD_GOLD);
setHardness(0.5F);
}
@Override
public void onBlockAdded(World world, BlockPos pos, IBlockState state) {
if(!world.isRemote) {
world.scheduleUpdate(pos, this, tickRate(world));
}
}
@Override
public void updateTick(World world, BlockPos pos, IBlockState state, Random rand) {
if(!world.isRemote) {
MoonPhase currentPhase = state.getValue(MoonPhase.MOON_PHASE);
MoonPhase newPhase = MoonPhase.getMoonPhase(world);
if(currentPhase != newPhase) {
world.setBlockState(pos, state.withProperty(MoonPhase.MOON_PHASE, newPhase).withProperty(State.ACTIVE, true));
} else if(state.getValue(State.ACTIVE)) {
world.setBlockState(pos, state.withProperty(State.ACTIVE, false));
}
world.scheduleUpdate(pos, this, tickRate(world));
}
}
@Override
public IBlockState getStateForPlacement(World world, BlockPos pos, EnumFacing facing, float hitX, float hitY, float hitZ, int meta, EntityLivingBase placer, EnumHand hand) {
return getDefaultState().withProperty(MoonPhase.MOON_PHASE, MoonPhase.getMoonPhase(world));
}
@Override
public int tickRate(World world) {
return 1;
}
@Override
public boolean canProvidePower(IBlockState state) {
return state.getValue(State.ACTIVE);
}
@Override
public int getWeakPower(IBlockState state, IBlockAccess world, BlockPos pos, EnumFacing side) {
return state.getValue(State.ACTIVE) ? 1 : 0;
}
@Override
public int getLightValue(IBlockState state, IBlockAccess world, BlockPos pos) {
MoonPhase phase = state.getValue(MoonPhase.MOON_PHASE);
switch(phase) {
case FULL_MOON:
return 15;
case WAXING_GIBBOUS:
case WANING_GIBBOUS:
return 12;
case FIRST_QUARTER:
case LAST_QUARTER:
return 8;
case WAXING_CRESCENT:
case WANING_CRESCENT:
return 3;
case NEW_MOON:
case ECLIPSE:
return 0;
}
return 0;
}
@Override
public int getMetaFromState(IBlockState state) {
int i = state.getValue(MoonPhase.MOON_PHASE).ordinal();
if(state.getValue(State.ACTIVE)) {
i |= 8;
}
return i;
}
@Override
public IBlockState getStateFromMeta(int meta) {
MoonPhase phase = MoonPhase.values()[meta & 7];
return getDefaultState().withProperty(MoonPhase.MOON_PHASE, phase).withProperty(State.ACTIVE, (meta & 8) > 0);
}
@Override
protected BlockStateContainer createBlockState() {
return new BlockStateContainer(this, State.ACTIVE, MoonPhase.MOON_PHASE);
}
@Override
public boolean isFullCube(IBlockState state) {
return false;
}
@Override
public boolean isOpaqueCube(IBlockState state) {
return false;
}
@Override
public AxisAlignedBB getBoundingBox(IBlockState state, IBlockAccess source, BlockPos pos) {
return BB;
}
@Override
@SideOnly(Side.CLIENT)
public void registerModel() {
DummyBakedRegistry.register(this, BakedCosmicResonator::new);
ModelHandler.registerModel(this, 0, "");
}
}
| src/main/java/arekkuusu/solar/common/block/BlockCelestialResonator.java | /*******************************************************************************
* Arekkuusu / Solar 2017
*
* This project is licensed under the MIT.
* The source code is available on github:
* https://github.com/ArekkuusuJerii/Solar#solar
******************************************************************************/
package arekkuusu.solar.common.block;
import arekkuusu.solar.api.state.MoonPhase;
import arekkuusu.solar.api.state.State;
import arekkuusu.solar.client.util.baker.DummyBakedRegistry;
import arekkuusu.solar.client.util.baker.baked.BakedCosmicResonator;
import arekkuusu.solar.client.util.helper.ModelHandler;
import arekkuusu.solar.common.lib.LibNames;
import net.minecraft.block.material.Material;
import net.minecraft.block.state.BlockStateContainer;
import net.minecraft.block.state.IBlockState;
import net.minecraft.util.EnumFacing;
import net.minecraft.util.math.AxisAlignedBB;
import net.minecraft.util.math.BlockPos;
import net.minecraft.world.IBlockAccess;
import net.minecraft.world.World;
import net.minecraftforge.fml.relauncher.Side;
import net.minecraftforge.fml.relauncher.SideOnly;
import java.util.Random;
/**
* Created by <Arekkuusu> on 29/12/2017.
* It's distributed as part of Solar.
*/
@SuppressWarnings("deprecation")
public class BlockCelestialResonator extends BlockBase {
private static final AxisAlignedBB BB = new AxisAlignedBB(0.3D,0.3D,0.3D, 0.7D, 0.7D, 0.7D);
public BlockCelestialResonator() {
super(LibNames.CELESTIAL_RESONATOR, Material.ROCK);
setDefaultState(getDefaultState().withProperty(State.ACTIVE, false));
setHarvestLevel(Tool.PICK, ToolLevel.WOOD_GOLD);
setHardness(0.5F);
}
@Override
public void onBlockAdded(World world, BlockPos pos, IBlockState state) {
if(!world.isRemote) {
world.scheduleUpdate(pos, this, tickRate(world));
}
}
@Override
public void updateTick(World world, BlockPos pos, IBlockState state, Random rand) {
if(!world.isRemote) {
MoonPhase currentPhase = state.getValue(MoonPhase.MOON_PHASE);
MoonPhase newPhase = MoonPhase.getMoonPhase(world);
if(currentPhase != newPhase) {
world.setBlockState(pos, state.withProperty(MoonPhase.MOON_PHASE, newPhase).withProperty(State.ACTIVE, true));
} else if(state.getValue(State.ACTIVE)) {
world.setBlockState(pos, state.withProperty(State.ACTIVE, false));
}
world.scheduleUpdate(pos, this, tickRate(world));
}
}
@Override
public int tickRate(World world) {
return 1;
}
@Override
public boolean canProvidePower(IBlockState state) {
return state.getValue(State.ACTIVE);
}
@Override
public int getWeakPower(IBlockState state, IBlockAccess world, BlockPos pos, EnumFacing side) {
return state.getValue(State.ACTIVE) ? 1 : 0;
}
@Override
public int getLightValue(IBlockState state, IBlockAccess world, BlockPos pos) {
MoonPhase phase = state.getValue(MoonPhase.MOON_PHASE);
switch(phase) {
case FULL_MOON:
return 15;
case WAXING_GIBBOUS:
case WANING_GIBBOUS:
return 12;
case FIRST_QUARTER:
case LAST_QUARTER:
return 8;
case WAXING_CRESCENT:
case WANING_CRESCENT:
return 3;
case NEW_MOON:
case ECLIPSE:
return 0;
}
return 0;
}
@Override
public int getMetaFromState(IBlockState state) {
int i = state.getValue(MoonPhase.MOON_PHASE).ordinal();
if(state.getValue(State.ACTIVE)) {
i |= 8;
}
return i;
}
@Override
public IBlockState getStateFromMeta(int meta) {
MoonPhase phase = MoonPhase.values()[meta & 7];
return getDefaultState().withProperty(MoonPhase.MOON_PHASE, phase).withProperty(State.ACTIVE, (meta & 8) > 0);
}
@Override
protected BlockStateContainer createBlockState() {
return new BlockStateContainer(this, State.ACTIVE, MoonPhase.MOON_PHASE);
}
@Override
public boolean isFullCube(IBlockState state) {
return false;
}
@Override
public boolean isOpaqueCube(IBlockState state) {
return false;
}
@Override
public AxisAlignedBB getBoundingBox(IBlockState state, IBlockAccess source, BlockPos pos) {
return BB;
}
@Override
@SideOnly(Side.CLIENT)
public void registerModel() {
DummyBakedRegistry.register(this, BakedCosmicResonator::new);
ModelHandler.registerModel(this, 0, "");
}
}
| Default Moon Phase
| src/main/java/arekkuusu/solar/common/block/BlockCelestialResonator.java | Default Moon Phase | <ide><path>rc/main/java/arekkuusu/solar/common/block/BlockCelestialResonator.java
<ide> import net.minecraft.block.material.Material;
<ide> import net.minecraft.block.state.BlockStateContainer;
<ide> import net.minecraft.block.state.IBlockState;
<add>import net.minecraft.entity.EntityLivingBase;
<ide> import net.minecraft.util.EnumFacing;
<add>import net.minecraft.util.EnumHand;
<ide> import net.minecraft.util.math.AxisAlignedBB;
<ide> import net.minecraft.util.math.BlockPos;
<ide> import net.minecraft.world.IBlockAccess;
<ide>
<ide> public BlockCelestialResonator() {
<ide> super(LibNames.CELESTIAL_RESONATOR, Material.ROCK);
<del> setDefaultState(getDefaultState().withProperty(State.ACTIVE, false));
<add> setDefaultState(getDefaultState().withProperty(MoonPhase.MOON_PHASE, MoonPhase.FULL_MOON).withProperty(State.ACTIVE, false));
<ide> setHarvestLevel(Tool.PICK, ToolLevel.WOOD_GOLD);
<ide> setHardness(0.5F);
<ide> }
<ide> }
<ide> world.scheduleUpdate(pos, this, tickRate(world));
<ide> }
<add> }
<add>
<add> @Override
<add> public IBlockState getStateForPlacement(World world, BlockPos pos, EnumFacing facing, float hitX, float hitY, float hitZ, int meta, EntityLivingBase placer, EnumHand hand) {
<add> return getDefaultState().withProperty(MoonPhase.MOON_PHASE, MoonPhase.getMoonPhase(world));
<ide> }
<ide>
<ide> @Override |
|
Java | apache-2.0 | 2dcef62a48e12bf6c4d8ec4fcb25e12b7f2754f0 | 0 | gchq/stroom,gchq/stroom,gchq/stroom,gchq/stroom,gchq/stroom,gchq/stroom | /*
* Copyright 2016 Crown Copyright
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package stroom.util.shared;
import java.util.Comparator;
public final class ModelStringUtil {
private static final int METRIC_DIV = 1000;
private static final int IEC_BYTE_DIV = 1024;
private static Divider[] SIZE_DIVIDER = new Divider[] {
new Divider(1, ""),
new Divider(METRIC_DIV, "K"),
new Divider(METRIC_DIV, "M"),
new Divider(METRIC_DIV, "G"),
new Divider(METRIC_DIV, "T")
};
private static Divider[] METRIC_BYTE_SIZE_DIVIDER = new Divider[] {
new Divider(1, "B", "b", "bytes", ""),
new Divider(METRIC_DIV, "K", "KB"),
new Divider(METRIC_DIV, "M", "MB"),
new Divider(METRIC_DIV, "G", "GB"),
new Divider(METRIC_DIV, "T", "TB")
};
private static Divider[] IEC_BYTE_SIZE_DIVIDER = new Divider[]{
new Divider(1, "B", "b", "bytes", ""),
new Divider(IEC_BYTE_DIV, "K", "KB", "KiB"),
new Divider(IEC_BYTE_DIV, "M", "MB", "MiB"),
new Divider(IEC_BYTE_DIV, "G", "GB", "GiB"),
new Divider(IEC_BYTE_DIV, "T", "TB", "TiB")
};
/**
* Format always append ms but parse consider ms and '' as the same thing
*/
private static Divider[] TIME_SIZE_DIVIDER = new Divider[]{
new Divider(1, "ms", ""),
new Divider(1000, "s"),
new Divider(60, "m"),
new Divider(60, "h"),
new Divider(24, "d")
};
// private static Divider[] TIME_SIZE_DIVIDER_PARSE = new Divider(1, "", new Divider(1, " ms",
// new Divider(1000, " s", new Divider(60, " m", new Divider(60, " h", new Divider(24, " d", null))))));
private ModelStringUtil() {
// Utility class.
}
/**
* Pad a string out (yes I know apache commons can do this but it's used by
* GWT).
*
* @param amount pad size
* @param in string
* @return padded value.
*/
public static String zeroPad(final int amount, final String in) {
final int left = amount - in.length();
final StringBuilder out = new StringBuilder();
for (int i = 0; i < left; i++) {
out.append("0");
}
out.append(in);
return out.toString();
}
/**
* Return nice string like "25 B", "4 kB", "45 MB", etc.
*/
public static String formatMetricByteSizeString(final Long streamSize) {
if (streamSize == null) {
return "";
}
return formatNumberString(streamSize, METRIC_BYTE_SIZE_DIVIDER);
}
/**
* Return nice string like "25 B", "4KiB", "45 MiB", etc.
*/
public static String formatIECByteSizeString(final Long streamSize) {
if (streamSize == null) {
return "";
}
return formatNumberString(streamSize, IEC_BYTE_SIZE_DIVIDER);
}
public static String formatDurationString(final Long ms) {
if (ms == null) {
return "";
}
return formatNumberString(ms, TIME_SIZE_DIVIDER);
}
private static final String formatNumberString(final double number, final Divider[] dividers) {
double nextNumber = number;
Divider lastDivider = dividers[0];
for (final Divider divider : dividers) {
if (nextNumber < divider.div) {
break;
}
nextNumber = nextNumber / divider.div;
lastDivider = divider;
}
// Show the first dec place if the number is smaller than 10
if (lastDivider != null) {
if (nextNumber < 10) {
String str = String.valueOf(nextNumber);
final int decPt = str.indexOf(".");
if (decPt > 0 && decPt + 2 < str.length()) {
str = str.substring(0, decPt + 2);
}
return str + " " + lastDivider.unit[0];
} else {
return (long) nextNumber + " " + lastDivider.unit[0];
}
}
return String.valueOf(nextNumber);
}
public static final Long parseNumberString(final String str) throws NumberFormatException {
return parseNumberString(str, SIZE_DIVIDER);
}
public static final Long parseMetricByteSizeString(final String str) throws NumberFormatException {
return parseNumberString(str, METRIC_BYTE_SIZE_DIVIDER);
}
public static final Long parseIECByteSizeString(final String str) throws NumberFormatException {
return parseNumberString(str, IEC_BYTE_SIZE_DIVIDER);
}
public static final Long parseDurationString(final String str) throws NumberFormatException {
return parseNumberString(str, TIME_SIZE_DIVIDER);
}
public static final Integer parseNumberStringAsInt(final String str) throws NumberFormatException {
final Long num = parseNumberString(str, SIZE_DIVIDER);
if (num == null) {
return null;
}
if (num.longValue() > Integer.MAX_VALUE) {
throw new NumberFormatException(str + " is too big for an int. (Max value " + formatCsv(Integer.MAX_VALUE)
+ " and you number was " + formatCsv(num) + ")");
}
if (num.longValue() < Integer.MIN_VALUE) {
throw new NumberFormatException(str + " is too small for an int. (Min value "
+ formatCsv(Integer.MIN_VALUE) + " and you number was " + formatCsv(num) + ")");
}
return num.intValue();
}
public static final String format(final HasDisplayValue hasDisplayValue) {
if (hasDisplayValue == null) {
return "";
} else {
return hasDisplayValue.getDisplayValue();
}
}
private static final Long parseNumberString(String str, final Divider[] dividers) throws NumberFormatException {
if (str == null) {
return null;
}
// Cat fix this findbug as code used in UI
str = str.trim().toUpperCase();
// Kill Quotes
if (str.startsWith("\'") || str.startsWith("\"")) {
str = str.substring(1);
}
if (str.endsWith("\'") || str.endsWith("\"")) {
str = str.substring(0, str.length() - 1);
}
final StringBuilder numPart = new StringBuilder();
final StringBuilder suffixPart = new StringBuilder();
boolean inNum = true;
for (int i = 0; i < str.length(); i++) {
final char c = str.charAt(i);
if (inNum) {
if (Character.isDigit(c) || c == '.') {
numPart.append(c);
} else {
inNum = false;
}
}
if (!inNum) {
suffixPart.append(c);
}
}
if (numPart.length() == 0) {
return null;
}
final double d = Double.parseDouble(numPart.toString());
final String suffix = suffixPart.toString().trim();
long multiplier = 1;
for (final Divider divider : dividers) {
multiplier *= divider.div;
for (final String unit : divider.unit) {
if (unit.equalsIgnoreCase(suffix)) {
return (long) (multiplier * d);
}
}
}
throw new NumberFormatException("Unable to parse " + str + " as suffix not recognised");
}
public static String formatCsv(final Number number) {
if (number == null) {
return "";
}
return formatCsv(number.longValue());
}
public static String formatCsv(final Long number) {
if (number == null) {
return "";
}
final String s = String.valueOf(number);
final StringBuilder sb = new StringBuilder();
for (int i = 0; i < s.length(); i++) {
if ((s.length() - i) % 3 == 0) {
if (sb.length() > 0) {
sb.append(",");
}
}
sb.append(s.charAt(i));
}
return sb.toString();
}
public static String toCamelCase(final String string) {
final char[] chars = string.toCharArray();
for (int i = 0; i < chars.length; i++) {
final char c = chars[i];
if (Character.isUpperCase(c)) {
// If we have moved beyond the first character, aren't yet at
// the end and the next character is lower case then this must
// be the first capital of the next word so don't lower case and
// stop any further modification.
if (i > 0 && i < chars.length - 1 && Character.isLowerCase(chars[i + 1])) {
break;
} else {
chars[i] = Character.toLowerCase(c);
}
} else {
break;
}
}
return new String(chars);
}
public static String toDisplayValue(final String string) {
if (string == null) {
return "null";
}
final char[] chars = string.toCharArray();
final char[] output = new char[chars.length * 2];
int i = 0;
int j = 0;
for (; i < chars.length; i++, j++) {
final char c = chars[i];
if (i > 0 && i < chars.length - 1 && Character.isUpperCase(c) && Character.isLowerCase(chars[i + 1])) {
// If we have moved beyond the first character, aren't yet at
// the end and the next character is lower case then this must
// be the first capital of the next word so insert a space.
output[j++] = ' ';
}
output[j] = c;
}
return new String(output, 0, j);
}
public static Comparator<String> pathComparator() {
return (o1, o2) -> {
final int min = Math.min(o1.length(), o2.length());
for (int i = 0; i < min; i++) {
final int r = ((Character) o1.charAt(i)).compareTo(o2.charAt(i));
if (r != 0) {
return r;
}
}
return ((Integer) o1.length()).compareTo(o2.length());
};
}
private static class Divider {
final int div;
final String[] unit;
public Divider(final int div, final String... unit) {
this.div = div;
this.unit = unit;
}
}
}
| stroom-util-shared/src/main/java/stroom/util/shared/ModelStringUtil.java | /*
* Copyright 2016 Crown Copyright
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package stroom.util.shared;
import java.util.Comparator;
public final class ModelStringUtil {
private static final int METRIC_DIV = 1000;
private static final int IEC_BYTE_DIV = 1024;
private static Divider[] SIZE_DIVIDER = new Divider[] {
new Divider(1, ""),
new Divider(METRIC_DIV, "K"),
new Divider(METRIC_DIV, "M"),
new Divider(METRIC_DIV, "G"),
new Divider(METRIC_DIV, "T")
};
private static Divider[] METRIC_BYTE_SIZE_DIVIDER = new Divider[] {
new Divider(1, "B", "b", "bytes", ""),
new Divider(METRIC_DIV, "K", "KB"),
new Divider(METRIC_DIV, "M", "MB"),
new Divider(METRIC_DIV, "G", "GB"),
new Divider(METRIC_DIV, "T", "TB")
};
private static Divider[] IEC_BYTE_SIZE_DIVIDER = new Divider[]{
new Divider(1, "B", "b", "bytes", ""),
new Divider(IEC_BYTE_DIV, "K", "KB", "KiB"),
new Divider(IEC_BYTE_DIV, "M", "MB", "MiB"),
new Divider(IEC_BYTE_DIV, "G", "GB", "GiB"),
new Divider(IEC_BYTE_DIV, "T", "TB", "TiB")
};
/**
* Format always append ms but parse consider ms and '' as the same thing
*/
private static Divider[] TIME_SIZE_DIVIDER = new Divider[]{
new Divider(1, "ms", ""),
new Divider(1000, "s"),
new Divider(60, "m"),
new Divider(60, "h"),
new Divider(24, "d")
};
// private static Divider[] TIME_SIZE_DIVIDER_PARSE = new Divider(1, "", new Divider(1, " ms",
// new Divider(1000, " s", new Divider(60, " m", new Divider(60, " h", new Divider(24, " d", null))))));
private ModelStringUtil() {
// Utility class.
}
/**
* Pad a string out (yes I know apache commons can do this but it's used by
* GWT).
*
* @param amount pad size
* @param in string
* @return padded value.
*/
public static String zeroPad(final int amount, final String in) {
final int left = amount - in.length();
final StringBuilder out = new StringBuilder();
for (int i = 0; i < left; i++) {
out.append("0");
}
out.append(in);
return out.toString();
}
/**
* Return nice string like "25 B", "4 kB", "45 MB", etc.
*/
public static String formatMetricByteSizeString(final Long streamSize) {
if (streamSize == null) {
return "";
}
return formatNumberString(streamSize, METRIC_BYTE_SIZE_DIVIDER);
}
/**
* Return nice string like "25 B", "4KiB", "45 MiB", etc.
*/
public static String formatIECByteSizeString(final Long streamSize) {
if (streamSize == null) {
return "";
}
return formatNumberString(streamSize, IEC_BYTE_SIZE_DIVIDER);
}
public static String formatDurationString(final Long ms) {
if (ms == null) {
return "";
}
return formatNumberString(ms, TIME_SIZE_DIVIDER);
}
private static final String formatNumberString(final double number, final Divider[] dividers) {
double nextNumber = number;
Divider lastDivider = null;
for (final Divider divider : dividers) {
if (nextNumber < divider.div) {
break;
}
nextNumber = nextNumber / divider.div;
lastDivider = divider;
}
// Show the first dec place if the number is smaller than 10
if (lastDivider != null) {
if (nextNumber < 10) {
String str = String.valueOf(nextNumber);
final int decPt = str.indexOf(".");
if (decPt > 0 && decPt + 2 < str.length()) {
str = str.substring(0, decPt + 2);
}
return str + " " + lastDivider.unit[0];
} else {
return (long) nextNumber + " " + lastDivider.unit[0];
}
}
return String.valueOf(nextNumber);
}
public static final Long parseNumberString(final String str) throws NumberFormatException {
return parseNumberString(str, SIZE_DIVIDER);
}
public static final Long parseMetricByteSizeString(final String str) throws NumberFormatException {
return parseNumberString(str, METRIC_BYTE_SIZE_DIVIDER);
}
public static final Long parseIECByteSizeString(final String str) throws NumberFormatException {
return parseNumberString(str, IEC_BYTE_SIZE_DIVIDER);
}
public static final Long parseDurationString(final String str) throws NumberFormatException {
return parseNumberString(str, TIME_SIZE_DIVIDER);
}
public static final Integer parseNumberStringAsInt(final String str) throws NumberFormatException {
final Long num = parseNumberString(str, SIZE_DIVIDER);
if (num == null) {
return null;
}
if (num.longValue() > Integer.MAX_VALUE) {
throw new NumberFormatException(str + " is too big for an int. (Max value " + formatCsv(Integer.MAX_VALUE)
+ " and you number was " + formatCsv(num) + ")");
}
if (num.longValue() < Integer.MIN_VALUE) {
throw new NumberFormatException(str + " is too small for an int. (Min value "
+ formatCsv(Integer.MIN_VALUE) + " and you number was " + formatCsv(num) + ")");
}
return num.intValue();
}
public static final String format(final HasDisplayValue hasDisplayValue) {
if (hasDisplayValue == null) {
return "";
} else {
return hasDisplayValue.getDisplayValue();
}
}
private static final Long parseNumberString(String str, final Divider[] dividers) throws NumberFormatException {
if (str == null) {
return null;
}
// Cat fix this findbug as code used in UI
str = str.trim().toUpperCase();
// Kill Quotes
if (str.startsWith("\'") || str.startsWith("\"")) {
str = str.substring(1);
}
if (str.endsWith("\'") || str.endsWith("\"")) {
str = str.substring(0, str.length() - 1);
}
final StringBuilder numPart = new StringBuilder();
final StringBuilder suffixPart = new StringBuilder();
boolean inNum = true;
for (int i = 0; i < str.length(); i++) {
final char c = str.charAt(i);
if (inNum) {
if (Character.isDigit(c) || c == '.') {
numPart.append(c);
} else {
inNum = false;
}
}
if (!inNum) {
suffixPart.append(c);
}
}
if (numPart.length() == 0) {
return null;
}
final double d = Double.parseDouble(numPart.toString());
final String suffix = suffixPart.toString().trim();
long multiplier = 1;
for (final Divider divider : dividers) {
multiplier *= divider.div;
for (final String unit : divider.unit) {
if (unit.equalsIgnoreCase(suffix)) {
return (long) (multiplier * d);
}
}
}
throw new NumberFormatException("Unable to parse " + str + " as suffix not recognised");
}
public static String formatCsv(final Number number) {
if (number == null) {
return "";
}
return formatCsv(number.longValue());
}
public static String formatCsv(final Long number) {
if (number == null) {
return "";
}
final String s = String.valueOf(number);
final StringBuilder sb = new StringBuilder();
for (int i = 0; i < s.length(); i++) {
if ((s.length() - i) % 3 == 0) {
if (sb.length() > 0) {
sb.append(",");
}
}
sb.append(s.charAt(i));
}
return sb.toString();
}
public static String toCamelCase(final String string) {
final char[] chars = string.toCharArray();
for (int i = 0; i < chars.length; i++) {
final char c = chars[i];
if (Character.isUpperCase(c)) {
// If we have moved beyond the first character, aren't yet at
// the end and the next character is lower case then this must
// be the first capital of the next word so don't lower case and
// stop any further modification.
if (i > 0 && i < chars.length - 1 && Character.isLowerCase(chars[i + 1])) {
break;
} else {
chars[i] = Character.toLowerCase(c);
}
} else {
break;
}
}
return new String(chars);
}
public static String toDisplayValue(final String string) {
if (string == null) {
return "null";
}
final char[] chars = string.toCharArray();
final char[] output = new char[chars.length * 2];
int i = 0;
int j = 0;
for (; i < chars.length; i++, j++) {
final char c = chars[i];
if (i > 0 && i < chars.length - 1 && Character.isUpperCase(c) && Character.isLowerCase(chars[i + 1])) {
// If we have moved beyond the first character, aren't yet at
// the end and the next character is lower case then this must
// be the first capital of the next word so insert a space.
output[j++] = ' ';
}
output[j] = c;
}
return new String(output, 0, j);
}
public static Comparator<String> pathComparator() {
return (o1, o2) -> {
final int min = Math.min(o1.length(), o2.length());
for (int i = 0; i < min; i++) {
final int r = ((Character) o1.charAt(i)).compareTo(o2.charAt(i));
if (r != 0) {
return r;
}
}
return ((Integer) o1.length()).compareTo(o2.length());
};
}
private static class Divider {
final int div;
final String[] unit;
public Divider(final int div, final String... unit) {
this.div = div;
this.unit = unit;
}
}
}
| Issue #191 : Mebibytes (multiples of 1024) etc are now used as standard throughout the application for both memory and disk sizes and have single letter suffixes (B, K, M, G, T).
| stroom-util-shared/src/main/java/stroom/util/shared/ModelStringUtil.java | Issue #191 : Mebibytes (multiples of 1024) etc are now used as standard throughout the application for both memory and disk sizes and have single letter suffixes (B, K, M, G, T). | <ide><path>troom-util-shared/src/main/java/stroom/util/shared/ModelStringUtil.java
<ide>
<ide> private static final String formatNumberString(final double number, final Divider[] dividers) {
<ide> double nextNumber = number;
<del> Divider lastDivider = null;
<add> Divider lastDivider = dividers[0];
<ide>
<ide> for (final Divider divider : dividers) {
<ide> if (nextNumber < divider.div) { |
|
Java | apache-2.0 | 758268a9475a86794aa5d7845a768095a615279b | 0 | mythguided/hydra,mythguided/hydra,mythguided/hydra,mythguided/hydra | /*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.addthis.hydra.query.web;
import java.util.List;
import java.util.concurrent.TimeUnit;
import com.addthis.bundle.core.Bundle;
import com.addthis.bundle.core.BundleField;
import com.addthis.bundle.value.ValueObject;
import static com.addthis.hydra.query.web.HttpUtils.setContentTypeHeader;
import io.netty.channel.ChannelHandlerContext;
class OutputHTML extends AbstractHttpOutput {
OutputHTML(ChannelHandlerContext ctx) {
super(ctx);
setContentTypeHeader(response, "text/html; charset=utf-8");
}
@Override
public void writeStart() {
super.writeStart();
ctx.write("<table border=1 cellpadding=1 cellspacing=0>\n");
}
@Override
public synchronized void send(Bundle row) {
ctx.write("<tr>");
for (BundleField field : row.getFormat()) {
ValueObject o = row.getValue(field);
ctx.write("<td>" + o + "</td>");
}
ctx.write("</tr>\n");
}
@Override
public void send(List<Bundle> bundles) {
if (bundles != null && !bundles.isEmpty()) {
for (Bundle bundle : bundles) {
send(bundle);
}
}
}
@Override
public void sendComplete() {
ctx.write("</table>");
HttpQueryCallHandler.queryTimes.update(System.currentTimeMillis() - startTime, TimeUnit.MILLISECONDS);
super.sendComplete();
}
}
| hydra-main/src/main/java/com/addthis/hydra/query/web/OutputHTML.java | /*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.addthis.hydra.query.web;
import java.util.List;
import java.util.concurrent.TimeUnit;
import com.addthis.bundle.core.Bundle;
import com.addthis.bundle.core.BundleField;
import com.addthis.bundle.value.ValueObject;
import static com.addthis.hydra.query.web.HttpUtils.setContentTypeHeader;
import io.netty.channel.ChannelHandlerContext;
class OutputHTML extends AbstractHttpOutput {
OutputHTML(ChannelHandlerContext ctx) {
super(ctx);
setContentTypeHeader(response, "text/html; charset=utf-8");
ctx.write("<table border=1 cellpadding=1 cellspacing=0>\n");
}
@Override
public void writeStart() {
super.writeStart();
ctx.write("<table border=1 cellpadding=1 cellspacing=0>\n");
}
@Override
public synchronized void send(Bundle row) {
ctx.write("<tr>");
for (BundleField field : row.getFormat()) {
ValueObject o = row.getValue(field);
ctx.write("<td>" + o + "</td>");
}
ctx.write("</tr>\n");
}
@Override
public void send(List<Bundle> bundles) {
if (bundles != null && !bundles.isEmpty()) {
for (Bundle bundle : bundles) {
send(bundle);
}
}
}
@Override
public void sendComplete() {
ctx.write("</table>");
HttpQueryCallHandler.queryTimes.update(System.currentTimeMillis() - startTime, TimeUnit.MILLISECONDS);
super.sendComplete();
}
}
| don't write the table start twice for OutputHTML
| hydra-main/src/main/java/com/addthis/hydra/query/web/OutputHTML.java | don't write the table start twice for OutputHTML | <ide><path>ydra-main/src/main/java/com/addthis/hydra/query/web/OutputHTML.java
<ide> OutputHTML(ChannelHandlerContext ctx) {
<ide> super(ctx);
<ide> setContentTypeHeader(response, "text/html; charset=utf-8");
<del> ctx.write("<table border=1 cellpadding=1 cellspacing=0>\n");
<ide> }
<ide>
<ide> @Override |
|
Java | apache-2.0 | 7f1f58014f905b0279e9baef4e031806d6266bdc | 0 | strapdata/elassandra,strapdata/elassandra,strapdata/elassandra,vroyer/elassandra,vroyer/elassandra,vroyer/elassandra,strapdata/elassandra,strapdata/elassandra | /*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.xpack.ccr;
import org.elasticsearch.ElasticsearchTimeoutException;
import org.elasticsearch.ExceptionsHelper;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.admin.cluster.settings.ClusterUpdateSettingsRequest;
import org.elasticsearch.action.admin.cluster.snapshots.restore.RestoreSnapshotRequest;
import org.elasticsearch.action.admin.cluster.state.ClusterStateRequest;
import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse;
import org.elasticsearch.action.get.GetResponse;
import org.elasticsearch.action.support.IndicesOptions;
import org.elasticsearch.action.support.PlainActionFuture;
import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.cluster.metadata.MappingMetaData;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.ByteSizeValue;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.common.xcontent.XContentType;
import org.elasticsearch.common.xcontent.support.XContentMapValues;
import org.elasticsearch.index.IndexSettings;
import org.elasticsearch.repositories.RepositoriesService;
import org.elasticsearch.repositories.Repository;
import org.elasticsearch.repositories.RepositoryMissingException;
import org.elasticsearch.snapshots.RestoreInfo;
import org.elasticsearch.snapshots.RestoreService;
import org.elasticsearch.test.transport.MockTransportService;
import org.elasticsearch.transport.TransportActionProxy;
import org.elasticsearch.transport.TransportService;
import org.elasticsearch.xpack.CcrIntegTestCase;
import org.elasticsearch.xpack.ccr.action.repositories.GetCcrRestoreFileChunkAction;
import org.elasticsearch.xpack.ccr.action.repositories.PutCcrRestoreSessionAction;
import org.elasticsearch.xpack.ccr.repository.CcrRepository;
import org.elasticsearch.xpack.ccr.repository.CcrRestoreSourceService;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import java.util.Locale;
import java.util.Map;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicBoolean;
import static java.util.Collections.singletonMap;
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
import static org.hamcrest.Matchers.equalTo;
import static org.hamcrest.Matchers.greaterThan;
import static org.hamcrest.Matchers.instanceOf;
import static org.hamcrest.Matchers.lessThan;
public class CcrRepositoryIT extends CcrIntegTestCase {
private final IndicesOptions indicesOptions = IndicesOptions.strictSingleIndexNoExpandForbidClosed();
public void testThatRepositoryIsPutAndRemovedWhenRemoteClusterIsUpdated() throws Exception {
String leaderClusterRepoName = CcrRepository.NAME_PREFIX + "leader_cluster";
final RepositoriesService repositoriesService =
getFollowerCluster().getDataOrMasterNodeInstances(RepositoriesService.class).iterator().next();
try {
Repository repository = repositoriesService.repository(leaderClusterRepoName);
assertEquals(CcrRepository.TYPE, repository.getMetadata().type());
assertEquals(leaderClusterRepoName, repository.getMetadata().name());
} catch (RepositoryMissingException e) {
fail("need repository");
}
ClusterUpdateSettingsRequest putSecondCluster = new ClusterUpdateSettingsRequest();
String address = getFollowerCluster().getDataNodeInstance(TransportService.class).boundAddress().publishAddress().toString();
putSecondCluster.persistentSettings(Settings.builder().put("cluster.remote.follower_cluster_copy.seeds", address));
assertAcked(followerClient().admin().cluster().updateSettings(putSecondCluster).actionGet());
String followerCopyRepoName = CcrRepository.NAME_PREFIX + "follower_cluster_copy";
try {
Repository repository = repositoriesService.repository(followerCopyRepoName);
assertEquals(CcrRepository.TYPE, repository.getMetadata().type());
assertEquals(followerCopyRepoName, repository.getMetadata().name());
} catch (RepositoryMissingException e) {
fail("need repository");
}
ClusterUpdateSettingsRequest deleteLeaderCluster = new ClusterUpdateSettingsRequest();
deleteLeaderCluster.persistentSettings(Settings.builder().put("cluster.remote.leader_cluster.seeds", ""));
assertAcked(followerClient().admin().cluster().updateSettings(deleteLeaderCluster).actionGet());
expectThrows(RepositoryMissingException.class, () -> repositoriesService.repository(leaderClusterRepoName));
ClusterUpdateSettingsRequest deleteSecondCluster = new ClusterUpdateSettingsRequest();
deleteSecondCluster.persistentSettings(Settings.builder().put("cluster.remote.follower_cluster_copy.seeds", ""));
assertAcked(followerClient().admin().cluster().updateSettings(deleteSecondCluster).actionGet());
expectThrows(RepositoryMissingException.class, () -> repositoriesService.repository(followerCopyRepoName));
ClusterUpdateSettingsRequest putLeaderRequest = new ClusterUpdateSettingsRequest();
address = getLeaderCluster().getDataNodeInstance(TransportService.class).boundAddress().publishAddress().toString();
putLeaderRequest.persistentSettings(Settings.builder().put("cluster.remote.leader_cluster.seeds", address));
assertAcked(followerClient().admin().cluster().updateSettings(putLeaderRequest).actionGet());
}
public void testThatRepositoryRecoversEmptyIndexBasedOnLeaderSettings() throws IOException {
String leaderClusterRepoName = CcrRepository.NAME_PREFIX + "leader_cluster";
String leaderIndex = "index1";
String followerIndex = "index2";
final int numberOfPrimaryShards = randomIntBetween(1, 3);
final String leaderIndexSettings = getIndexSettings(numberOfPrimaryShards, between(0, 1),
singletonMap(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), "true"));
assertAcked(leaderClient().admin().indices().prepareCreate(leaderIndex).setSource(leaderIndexSettings, XContentType.JSON));
ensureLeaderGreen(leaderIndex);
final RestoreService restoreService = getFollowerCluster().getCurrentMasterNodeInstance(RestoreService.class);
final ClusterService clusterService = getFollowerCluster().getCurrentMasterNodeInstance(ClusterService.class);
Settings.Builder settingsBuilder = Settings.builder()
.put(IndexMetaData.SETTING_INDEX_PROVIDED_NAME, followerIndex)
.put(CcrSettings.CCR_FOLLOWING_INDEX_SETTING.getKey(), true);
RestoreSnapshotRequest restoreRequest = new RestoreSnapshotRequest(leaderClusterRepoName, CcrRepository.LATEST)
.indices(leaderIndex).indicesOptions(indicesOptions).renamePattern("^(.*)$")
.renameReplacement(followerIndex).masterNodeTimeout(new TimeValue(1L, TimeUnit.HOURS))
.indexSettings(settingsBuilder);
PlainActionFuture<RestoreInfo> future = PlainActionFuture.newFuture();
restoreService.restoreSnapshot(restoreRequest, waitForRestore(clusterService, future));
RestoreInfo restoreInfo = future.actionGet();
assertEquals(restoreInfo.totalShards(), restoreInfo.successfulShards());
assertEquals(0, restoreInfo.failedShards());
ClusterStateResponse leaderState = leaderClient()
.admin()
.cluster()
.prepareState()
.clear()
.setMetaData(true)
.setIndices(leaderIndex)
.get();
ClusterStateResponse followerState = followerClient()
.admin()
.cluster()
.prepareState()
.clear()
.setMetaData(true)
.setIndices(followerIndex)
.get();
IndexMetaData leaderMetadata = leaderState.getState().metaData().index(leaderIndex);
IndexMetaData followerMetadata = followerState.getState().metaData().index(followerIndex);
assertEquals(leaderMetadata.getNumberOfShards(), followerMetadata.getNumberOfShards());
Map<String, String> ccrMetadata = followerMetadata.getCustomData(Ccr.CCR_CUSTOM_METADATA_KEY);
assertEquals(leaderIndex, ccrMetadata.get(Ccr.CCR_CUSTOM_METADATA_LEADER_INDEX_NAME_KEY));
assertEquals(leaderMetadata.getIndexUUID(), ccrMetadata.get(Ccr.CCR_CUSTOM_METADATA_LEADER_INDEX_UUID_KEY));
assertEquals("leader_cluster", ccrMetadata.get(Ccr.CCR_CUSTOM_METADATA_REMOTE_CLUSTER_NAME_KEY));
assertEquals(followerIndex, followerMetadata.getSettings().get(IndexMetaData.SETTING_INDEX_PROVIDED_NAME));
assertEquals(true, IndexSettings.INDEX_SOFT_DELETES_SETTING.get(followerMetadata.getSettings()));
// UUID is changed so that we can follow indexes on same cluster
assertNotEquals(leaderMetadata.getIndexUUID(), followerMetadata.getIndexUUID());
}
public void testDocsAreRecovered() throws Exception {
ClusterUpdateSettingsRequest settingsRequest = new ClusterUpdateSettingsRequest();
String chunkSize = randomFrom("4KB", "128KB", "1MB");
settingsRequest.persistentSettings(Settings.builder().put(CcrSettings.RECOVERY_CHUNK_SIZE.getKey(), chunkSize));
assertAcked(followerClient().admin().cluster().updateSettings(settingsRequest).actionGet());
String leaderClusterRepoName = CcrRepository.NAME_PREFIX + "leader_cluster";
String leaderIndex = "index1";
String followerIndex = "index2";
final int numberOfPrimaryShards = randomIntBetween(1, 3);
final String leaderIndexSettings = getIndexSettings(numberOfPrimaryShards, between(0, 1),
singletonMap(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), "true"));
assertAcked(leaderClient().admin().indices().prepareCreate(leaderIndex).setSource(leaderIndexSettings, XContentType.JSON));
ensureLeaderGreen(leaderIndex);
final RestoreService restoreService = getFollowerCluster().getCurrentMasterNodeInstance(RestoreService.class);
final ClusterService clusterService = getFollowerCluster().getCurrentMasterNodeInstance(ClusterService.class);
final int firstBatchNumDocs = randomIntBetween(1, 64);
logger.info("Indexing [{}] docs as first batch", firstBatchNumDocs);
for (int i = 0; i < firstBatchNumDocs; i++) {
final String source = String.format(Locale.ROOT, "{\"f\":%d}", i);
leaderClient().prepareIndex("index1", "doc", Integer.toString(i)).setSource(source, XContentType.JSON).get();
}
leaderClient().admin().indices().prepareFlush(leaderIndex).setForce(true).setWaitIfOngoing(true).get();
Settings.Builder settingsBuilder = Settings.builder()
.put(IndexMetaData.SETTING_INDEX_PROVIDED_NAME, followerIndex)
.put(CcrSettings.CCR_FOLLOWING_INDEX_SETTING.getKey(), true);
RestoreSnapshotRequest restoreRequest = new RestoreSnapshotRequest(leaderClusterRepoName, CcrRepository.LATEST)
.indices(leaderIndex).indicesOptions(indicesOptions).renamePattern("^(.*)$")
.renameReplacement(followerIndex).masterNodeTimeout(new TimeValue(1L, TimeUnit.HOURS))
.indexSettings(settingsBuilder);
PlainActionFuture<RestoreInfo> future = PlainActionFuture.newFuture();
restoreService.restoreSnapshot(restoreRequest, waitForRestore(clusterService, future));
RestoreInfo restoreInfo = future.actionGet();
assertEquals(restoreInfo.totalShards(), restoreInfo.successfulShards());
assertEquals(0, restoreInfo.failedShards());
for (int i = 0; i < firstBatchNumDocs; ++i) {
assertExpectedDocument(followerIndex, i);
}
settingsRequest = new ClusterUpdateSettingsRequest();
ByteSizeValue defaultValue = CcrSettings.RECOVERY_CHUNK_SIZE.getDefault(Settings.EMPTY);
settingsRequest.persistentSettings(Settings.builder().put(CcrSettings.RECOVERY_CHUNK_SIZE.getKey(), defaultValue));
assertAcked(followerClient().admin().cluster().updateSettings(settingsRequest).actionGet());
}
public void testRateLimitingIsEmployed() throws Exception {
boolean followerRateLimiting = randomBoolean();
ClusterUpdateSettingsRequest settingsRequest = new ClusterUpdateSettingsRequest();
settingsRequest.persistentSettings(Settings.builder().put(CcrSettings.RECOVERY_MAX_BYTES_PER_SECOND.getKey(), "10K"));
if (followerRateLimiting) {
assertAcked(followerClient().admin().cluster().updateSettings(settingsRequest).actionGet());
} else {
assertAcked(leaderClient().admin().cluster().updateSettings(settingsRequest).actionGet());
}
String leaderClusterRepoName = CcrRepository.NAME_PREFIX + "leader_cluster";
String leaderIndex = "index1";
String followerIndex = "index2";
final int numberOfPrimaryShards = randomIntBetween(1, 3);
final String leaderIndexSettings = getIndexSettings(numberOfPrimaryShards, between(0, 1),
singletonMap(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), "true"));
assertAcked(leaderClient().admin().indices().prepareCreate(leaderIndex).setSource(leaderIndexSettings, XContentType.JSON));
ensureLeaderGreen(leaderIndex);
final RestoreService restoreService = getFollowerCluster().getCurrentMasterNodeInstance(RestoreService.class);
final ClusterService clusterService = getFollowerCluster().getCurrentMasterNodeInstance(ClusterService.class);
List<CcrRepository> repositories = new ArrayList<>();
List<CcrRestoreSourceService> restoreSources = new ArrayList<>();
for (RepositoriesService repositoriesService : getFollowerCluster().getDataOrMasterNodeInstances(RepositoriesService.class)) {
Repository repository = repositoriesService.repository(leaderClusterRepoName);
repositories.add((CcrRepository) repository);
}
for (CcrRestoreSourceService restoreSource : getLeaderCluster().getDataOrMasterNodeInstances(CcrRestoreSourceService.class)) {
restoreSources.add(restoreSource);
}
logger.info("--> indexing some data");
for (int i = 0; i < 100; i++) {
final String source = String.format(Locale.ROOT, "{\"f\":%d}", i);
leaderClient().prepareIndex("index1", "doc", Integer.toString(i)).setSource(source, XContentType.JSON).get();
}
leaderClient().admin().indices().prepareFlush(leaderIndex).setForce(true).setWaitIfOngoing(true).get();
Settings.Builder settingsBuilder = Settings.builder()
.put(IndexMetaData.SETTING_INDEX_PROVIDED_NAME, followerIndex)
.put(CcrSettings.CCR_FOLLOWING_INDEX_SETTING.getKey(), true);
RestoreSnapshotRequest restoreRequest = new RestoreSnapshotRequest(leaderClusterRepoName, CcrRepository.LATEST)
.indices(leaderIndex).indicesOptions(indicesOptions).renamePattern("^(.*)$")
.renameReplacement(followerIndex).masterNodeTimeout(new TimeValue(1L, TimeUnit.HOURS))
.indexSettings(settingsBuilder);
PlainActionFuture<RestoreInfo> future = PlainActionFuture.newFuture();
restoreService.restoreSnapshot(restoreRequest, waitForRestore(clusterService, future));
future.actionGet();
if (followerRateLimiting) {
assertTrue(repositories.stream().anyMatch(cr -> cr.getRestoreThrottleTimeInNanos() > 0));
} else {
assertTrue(restoreSources.stream().anyMatch(cr -> cr.getThrottleTime() > 0));
}
settingsRequest = new ClusterUpdateSettingsRequest();
ByteSizeValue defaultValue = CcrSettings.RECOVERY_MAX_BYTES_PER_SECOND.getDefault(Settings.EMPTY);
settingsRequest.persistentSettings(Settings.builder().put(CcrSettings.RECOVERY_MAX_BYTES_PER_SECOND.getKey(), defaultValue));
if (followerRateLimiting) {
assertAcked(followerClient().admin().cluster().updateSettings(settingsRequest).actionGet());
} else {
assertAcked(leaderClient().admin().cluster().updateSettings(settingsRequest).actionGet());
}
}
@AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/39245")
public void testIndividualActionsTimeout() throws Exception {
ClusterUpdateSettingsRequest settingsRequest = new ClusterUpdateSettingsRequest();
TimeValue timeValue = TimeValue.timeValueMillis(100);
settingsRequest.persistentSettings(Settings.builder().put(CcrSettings.INDICES_RECOVERY_ACTION_TIMEOUT_SETTING.getKey(), timeValue));
assertAcked(followerClient().admin().cluster().updateSettings(settingsRequest).actionGet());
String leaderClusterRepoName = CcrRepository.NAME_PREFIX + "leader_cluster";
String leaderIndex = "index1";
String followerIndex = "index2";
final int numberOfPrimaryShards = randomIntBetween(1, 3);
final String leaderIndexSettings = getIndexSettings(numberOfPrimaryShards, between(0, 1),
singletonMap(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), "true"));
assertAcked(leaderClient().admin().indices().prepareCreate(leaderIndex).setSource(leaderIndexSettings, XContentType.JSON));
ensureLeaderGreen(leaderIndex);
List<MockTransportService> transportServices = new ArrayList<>();
for (TransportService transportService : getFollowerCluster().getDataOrMasterNodeInstances(TransportService.class)) {
MockTransportService mockTransportService = (MockTransportService) transportService;
transportServices.add(mockTransportService);
mockTransportService.addSendBehavior((connection, requestId, action, request, options) -> {
if (action.equals(GetCcrRestoreFileChunkAction.NAME) == false &&
action.equals(TransportActionProxy.getProxyAction(GetCcrRestoreFileChunkAction.NAME)) == false) {
connection.sendRequest(requestId, action, request, options);
}
});
}
logger.info("--> indexing some data");
for (int i = 0; i < 100; i++) {
final String source = String.format(Locale.ROOT, "{\"f\":%d}", i);
leaderClient().prepareIndex("index1", "doc", Integer.toString(i)).setSource(source, XContentType.JSON).get();
}
leaderClient().admin().indices().prepareFlush(leaderIndex).setForce(true).setWaitIfOngoing(true).get();
Settings.Builder settingsBuilder = Settings.builder()
.put(IndexMetaData.SETTING_INDEX_PROVIDED_NAME, followerIndex)
.put(CcrSettings.CCR_FOLLOWING_INDEX_SETTING.getKey(), true);
RestoreSnapshotRequest restoreRequest = new RestoreSnapshotRequest(leaderClusterRepoName, CcrRepository.LATEST)
.indices(leaderIndex).indicesOptions(indicesOptions).renamePattern("^(.*)$")
.renameReplacement(followerIndex).masterNodeTimeout(new TimeValue(1L, TimeUnit.HOURS))
.indexSettings(settingsBuilder);
try {
final RestoreService restoreService = getFollowerCluster().getCurrentMasterNodeInstance(RestoreService.class);
final ClusterService clusterService = getFollowerCluster().getCurrentMasterNodeInstance(ClusterService.class);
PlainActionFuture<RestoreInfo> future = PlainActionFuture.newFuture();
restoreService.restoreSnapshot(restoreRequest, waitForRestore(clusterService, future));
// Depending on when the timeout occurs this can fail in two ways. If it times-out when fetching
// metadata this will throw an exception. If it times-out when restoring a shard, the shard will
// be marked as failed. Either one is a success for the purpose of this test.
try {
RestoreInfo restoreInfo = future.actionGet();
assertThat(restoreInfo.failedShards(), greaterThan(0));
assertThat(restoreInfo.successfulShards(), lessThan(restoreInfo.totalShards()));
assertEquals(numberOfPrimaryShards, restoreInfo.totalShards());
} catch (Exception e) {
assertThat(ExceptionsHelper.unwrapCause(e), instanceOf(ElasticsearchTimeoutException.class));
}
} finally {
for (MockTransportService transportService : transportServices) {
transportService.clearAllRules();
}
settingsRequest = new ClusterUpdateSettingsRequest();
TimeValue defaultValue = CcrSettings.INDICES_RECOVERY_ACTION_TIMEOUT_SETTING.getDefault(Settings.EMPTY);
settingsRequest.persistentSettings(Settings.builder().put(CcrSettings.INDICES_RECOVERY_ACTION_TIMEOUT_SETTING.getKey(),
defaultValue));
assertAcked(followerClient().admin().cluster().updateSettings(settingsRequest).actionGet());
// This test sets individual action timeouts low to attempt to replicated timeouts. Although the
// clear session action is not blocked, it is possible that it will still occasionally timeout.
// By wiping the leader index here, we ensure we do not trigger the index commit hanging around
// assertion because the commit is released when the index shard is closed.
getLeaderCluster().wipeIndices(leaderIndex);
}
}
public void testFollowerMappingIsUpdated() throws IOException {
String leaderClusterRepoName = CcrRepository.NAME_PREFIX + "leader_cluster";
String leaderIndex = "index1";
String followerIndex = "index2";
final int numberOfPrimaryShards = randomIntBetween(1, 3);
final String leaderIndexSettings = getIndexSettings(numberOfPrimaryShards, between(0, 1),
singletonMap(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), "true"));
assertAcked(leaderClient().admin().indices().prepareCreate(leaderIndex).setSource(leaderIndexSettings, XContentType.JSON));
ensureLeaderGreen(leaderIndex);
final RestoreService restoreService = getFollowerCluster().getCurrentMasterNodeInstance(RestoreService.class);
final ClusterService clusterService = getFollowerCluster().getCurrentMasterNodeInstance(ClusterService.class);
Settings.Builder settingsBuilder = Settings.builder()
.put(IndexMetaData.SETTING_INDEX_PROVIDED_NAME, followerIndex)
.put(CcrSettings.CCR_FOLLOWING_INDEX_SETTING.getKey(), true);
RestoreSnapshotRequest restoreRequest = new RestoreSnapshotRequest(leaderClusterRepoName, CcrRepository.LATEST)
.indices(leaderIndex).indicesOptions(indicesOptions).renamePattern("^(.*)$")
.renameReplacement(followerIndex).masterNodeTimeout(new TimeValue(1L, TimeUnit.HOURS))
.indexSettings(settingsBuilder);
List<MockTransportService> transportServices = new ArrayList<>();
CountDownLatch latch = new CountDownLatch(1);
AtomicBoolean updateSent = new AtomicBoolean(false);
Runnable updateMappings = () -> {
if (updateSent.compareAndSet(false, true)) {
leaderClient()
.admin()
.indices()
.preparePutMapping(leaderIndex)
.setType("doc")
.setSource("{\"properties\":{\"k\":{\"type\":\"long\"}}}", XContentType.JSON)
.execute(ActionListener.wrap(latch::countDown));
}
try {
latch.await();
} catch (InterruptedException e) {
throw ExceptionsHelper.convertToRuntime(e);
}
};
for (TransportService transportService : getFollowerCluster().getDataOrMasterNodeInstances(TransportService.class)) {
MockTransportService mockTransportService = (MockTransportService) transportService;
transportServices.add(mockTransportService);
mockTransportService.addSendBehavior((connection, requestId, action, request, options) -> {
if (action.equals(PutCcrRestoreSessionAction.NAME)) {
updateMappings.run();
connection.sendRequest(requestId, action, request, options);
} else {
connection.sendRequest(requestId, action, request, options);
}
});
}
try {
PlainActionFuture<RestoreInfo> future = PlainActionFuture.newFuture();
restoreService.restoreSnapshot(restoreRequest, waitForRestore(clusterService, future));
RestoreInfo restoreInfo = future.actionGet();
assertEquals(restoreInfo.totalShards(), restoreInfo.successfulShards());
assertEquals(0, restoreInfo.failedShards());
ClusterStateRequest clusterStateRequest = new ClusterStateRequest();
clusterStateRequest.clear();
clusterStateRequest.metaData(true);
clusterStateRequest.indices(followerIndex);
MappingMetaData mappingMetaData = followerClient().admin().indices().prepareGetMappings("index2").get().getMappings()
.get("index2").get("doc");
assertThat(XContentMapValues.extractValue("properties.k.type", mappingMetaData.sourceAsMap()), equalTo("long"));
} finally {
for (MockTransportService transportService : transportServices) {
transportService.clearAllRules();
}
}
}
private void assertExpectedDocument(String followerIndex, final int value) {
final GetResponse getResponse = followerClient().prepareGet(followerIndex, "doc", Integer.toString(value)).get();
assertTrue("Doc with id [" + value + "] is missing", getResponse.isExists());
assertTrue((getResponse.getSource().containsKey("f")));
assertThat(getResponse.getSource().get("f"), equalTo(value));
}
}
| x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/CcrRepositoryIT.java | /*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.xpack.ccr;
import org.elasticsearch.ElasticsearchTimeoutException;
import org.elasticsearch.ExceptionsHelper;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.admin.cluster.settings.ClusterUpdateSettingsRequest;
import org.elasticsearch.action.admin.cluster.snapshots.restore.RestoreSnapshotRequest;
import org.elasticsearch.action.admin.cluster.state.ClusterStateRequest;
import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse;
import org.elasticsearch.action.get.GetResponse;
import org.elasticsearch.action.support.IndicesOptions;
import org.elasticsearch.action.support.PlainActionFuture;
import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.cluster.metadata.MappingMetaData;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.ByteSizeValue;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.common.xcontent.XContentType;
import org.elasticsearch.common.xcontent.support.XContentMapValues;
import org.elasticsearch.index.IndexSettings;
import org.elasticsearch.repositories.RepositoriesService;
import org.elasticsearch.repositories.Repository;
import org.elasticsearch.repositories.RepositoryMissingException;
import org.elasticsearch.snapshots.RestoreInfo;
import org.elasticsearch.snapshots.RestoreService;
import org.elasticsearch.test.transport.MockTransportService;
import org.elasticsearch.transport.TransportActionProxy;
import org.elasticsearch.transport.TransportService;
import org.elasticsearch.xpack.CcrIntegTestCase;
import org.elasticsearch.xpack.ccr.action.repositories.GetCcrRestoreFileChunkAction;
import org.elasticsearch.xpack.ccr.action.repositories.PutCcrRestoreSessionAction;
import org.elasticsearch.xpack.ccr.repository.CcrRepository;
import org.elasticsearch.xpack.ccr.repository.CcrRestoreSourceService;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import java.util.Locale;
import java.util.Map;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicBoolean;
import static java.util.Collections.singletonMap;
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
import static org.hamcrest.Matchers.equalTo;
import static org.hamcrest.Matchers.greaterThan;
import static org.hamcrest.Matchers.instanceOf;
import static org.hamcrest.Matchers.lessThan;
public class CcrRepositoryIT extends CcrIntegTestCase {
private final IndicesOptions indicesOptions = IndicesOptions.strictSingleIndexNoExpandForbidClosed();
public void testThatRepositoryIsPutAndRemovedWhenRemoteClusterIsUpdated() throws Exception {
String leaderClusterRepoName = CcrRepository.NAME_PREFIX + "leader_cluster";
final RepositoriesService repositoriesService =
getFollowerCluster().getDataOrMasterNodeInstances(RepositoriesService.class).iterator().next();
try {
Repository repository = repositoriesService.repository(leaderClusterRepoName);
assertEquals(CcrRepository.TYPE, repository.getMetadata().type());
assertEquals(leaderClusterRepoName, repository.getMetadata().name());
} catch (RepositoryMissingException e) {
fail("need repository");
}
ClusterUpdateSettingsRequest putSecondCluster = new ClusterUpdateSettingsRequest();
String address = getFollowerCluster().getDataNodeInstance(TransportService.class).boundAddress().publishAddress().toString();
putSecondCluster.persistentSettings(Settings.builder().put("cluster.remote.follower_cluster_copy.seeds", address));
assertAcked(followerClient().admin().cluster().updateSettings(putSecondCluster).actionGet());
String followerCopyRepoName = CcrRepository.NAME_PREFIX + "follower_cluster_copy";
try {
Repository repository = repositoriesService.repository(followerCopyRepoName);
assertEquals(CcrRepository.TYPE, repository.getMetadata().type());
assertEquals(followerCopyRepoName, repository.getMetadata().name());
} catch (RepositoryMissingException e) {
fail("need repository");
}
ClusterUpdateSettingsRequest deleteLeaderCluster = new ClusterUpdateSettingsRequest();
deleteLeaderCluster.persistentSettings(Settings.builder().put("cluster.remote.leader_cluster.seeds", ""));
assertAcked(followerClient().admin().cluster().updateSettings(deleteLeaderCluster).actionGet());
expectThrows(RepositoryMissingException.class, () -> repositoriesService.repository(leaderClusterRepoName));
ClusterUpdateSettingsRequest deleteSecondCluster = new ClusterUpdateSettingsRequest();
deleteSecondCluster.persistentSettings(Settings.builder().put("cluster.remote.follower_cluster_copy.seeds", ""));
assertAcked(followerClient().admin().cluster().updateSettings(deleteSecondCluster).actionGet());
expectThrows(RepositoryMissingException.class, () -> repositoriesService.repository(followerCopyRepoName));
ClusterUpdateSettingsRequest putLeaderRequest = new ClusterUpdateSettingsRequest();
address = getLeaderCluster().getDataNodeInstance(TransportService.class).boundAddress().publishAddress().toString();
putLeaderRequest.persistentSettings(Settings.builder().put("cluster.remote.leader_cluster.seeds", address));
assertAcked(followerClient().admin().cluster().updateSettings(putLeaderRequest).actionGet());
}
public void testThatRepositoryRecoversEmptyIndexBasedOnLeaderSettings() throws IOException {
String leaderClusterRepoName = CcrRepository.NAME_PREFIX + "leader_cluster";
String leaderIndex = "index1";
String followerIndex = "index2";
final int numberOfPrimaryShards = randomIntBetween(1, 3);
final String leaderIndexSettings = getIndexSettings(numberOfPrimaryShards, between(0, 1),
singletonMap(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), "true"));
assertAcked(leaderClient().admin().indices().prepareCreate(leaderIndex).setSource(leaderIndexSettings, XContentType.JSON));
ensureLeaderGreen(leaderIndex);
final RestoreService restoreService = getFollowerCluster().getCurrentMasterNodeInstance(RestoreService.class);
final ClusterService clusterService = getFollowerCluster().getCurrentMasterNodeInstance(ClusterService.class);
Settings.Builder settingsBuilder = Settings.builder()
.put(IndexMetaData.SETTING_INDEX_PROVIDED_NAME, followerIndex)
.put(CcrSettings.CCR_FOLLOWING_INDEX_SETTING.getKey(), true);
RestoreSnapshotRequest restoreRequest = new RestoreSnapshotRequest(leaderClusterRepoName, CcrRepository.LATEST)
.indices(leaderIndex).indicesOptions(indicesOptions).renamePattern("^(.*)$")
.renameReplacement(followerIndex).masterNodeTimeout(new TimeValue(1L, TimeUnit.HOURS))
.indexSettings(settingsBuilder);
PlainActionFuture<RestoreInfo> future = PlainActionFuture.newFuture();
restoreService.restoreSnapshot(restoreRequest, waitForRestore(clusterService, future));
RestoreInfo restoreInfo = future.actionGet();
assertEquals(restoreInfo.totalShards(), restoreInfo.successfulShards());
assertEquals(0, restoreInfo.failedShards());
ClusterStateResponse leaderState = leaderClient()
.admin()
.cluster()
.prepareState()
.clear()
.setMetaData(true)
.setIndices(leaderIndex)
.get();
ClusterStateResponse followerState = followerClient()
.admin()
.cluster()
.prepareState()
.clear()
.setMetaData(true)
.setIndices(followerIndex)
.get();
IndexMetaData leaderMetadata = leaderState.getState().metaData().index(leaderIndex);
IndexMetaData followerMetadata = followerState.getState().metaData().index(followerIndex);
assertEquals(leaderMetadata.getNumberOfShards(), followerMetadata.getNumberOfShards());
Map<String, String> ccrMetadata = followerMetadata.getCustomData(Ccr.CCR_CUSTOM_METADATA_KEY);
assertEquals(leaderIndex, ccrMetadata.get(Ccr.CCR_CUSTOM_METADATA_LEADER_INDEX_NAME_KEY));
assertEquals(leaderMetadata.getIndexUUID(), ccrMetadata.get(Ccr.CCR_CUSTOM_METADATA_LEADER_INDEX_UUID_KEY));
assertEquals("leader_cluster", ccrMetadata.get(Ccr.CCR_CUSTOM_METADATA_REMOTE_CLUSTER_NAME_KEY));
assertEquals(followerIndex, followerMetadata.getSettings().get(IndexMetaData.SETTING_INDEX_PROVIDED_NAME));
assertEquals(true, IndexSettings.INDEX_SOFT_DELETES_SETTING.get(followerMetadata.getSettings()));
// UUID is changed so that we can follow indexes on same cluster
assertNotEquals(leaderMetadata.getIndexUUID(), followerMetadata.getIndexUUID());
}
public void testDocsAreRecovered() throws Exception {
ClusterUpdateSettingsRequest settingsRequest = new ClusterUpdateSettingsRequest();
String chunkSize = randomFrom("4KB", "128KB", "1MB");
settingsRequest.persistentSettings(Settings.builder().put(CcrSettings.RECOVERY_CHUNK_SIZE.getKey(), chunkSize));
assertAcked(followerClient().admin().cluster().updateSettings(settingsRequest).actionGet());
String leaderClusterRepoName = CcrRepository.NAME_PREFIX + "leader_cluster";
String leaderIndex = "index1";
String followerIndex = "index2";
final int numberOfPrimaryShards = randomIntBetween(1, 3);
final String leaderIndexSettings = getIndexSettings(numberOfPrimaryShards, between(0, 1),
singletonMap(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), "true"));
assertAcked(leaderClient().admin().indices().prepareCreate(leaderIndex).setSource(leaderIndexSettings, XContentType.JSON));
ensureLeaderGreen(leaderIndex);
final RestoreService restoreService = getFollowerCluster().getCurrentMasterNodeInstance(RestoreService.class);
final ClusterService clusterService = getFollowerCluster().getCurrentMasterNodeInstance(ClusterService.class);
final int firstBatchNumDocs = randomIntBetween(1, 64);
logger.info("Indexing [{}] docs as first batch", firstBatchNumDocs);
for (int i = 0; i < firstBatchNumDocs; i++) {
final String source = String.format(Locale.ROOT, "{\"f\":%d}", i);
leaderClient().prepareIndex("index1", "doc", Integer.toString(i)).setSource(source, XContentType.JSON).get();
}
leaderClient().admin().indices().prepareFlush(leaderIndex).setForce(true).setWaitIfOngoing(true).get();
Settings.Builder settingsBuilder = Settings.builder()
.put(IndexMetaData.SETTING_INDEX_PROVIDED_NAME, followerIndex)
.put(CcrSettings.CCR_FOLLOWING_INDEX_SETTING.getKey(), true);
RestoreSnapshotRequest restoreRequest = new RestoreSnapshotRequest(leaderClusterRepoName, CcrRepository.LATEST)
.indices(leaderIndex).indicesOptions(indicesOptions).renamePattern("^(.*)$")
.renameReplacement(followerIndex).masterNodeTimeout(new TimeValue(1L, TimeUnit.HOURS))
.indexSettings(settingsBuilder);
PlainActionFuture<RestoreInfo> future = PlainActionFuture.newFuture();
restoreService.restoreSnapshot(restoreRequest, waitForRestore(clusterService, future));
RestoreInfo restoreInfo = future.actionGet();
assertEquals(restoreInfo.totalShards(), restoreInfo.successfulShards());
assertEquals(0, restoreInfo.failedShards());
for (int i = 0; i < firstBatchNumDocs; ++i) {
assertExpectedDocument(followerIndex, i);
}
settingsRequest = new ClusterUpdateSettingsRequest();
ByteSizeValue defaultValue = CcrSettings.RECOVERY_CHUNK_SIZE.getDefault(Settings.EMPTY);
settingsRequest.persistentSettings(Settings.builder().put(CcrSettings.RECOVERY_CHUNK_SIZE.getKey(), defaultValue));
assertAcked(followerClient().admin().cluster().updateSettings(settingsRequest).actionGet());
}
public void testRateLimitingIsEmployed() throws Exception {
boolean followerRateLimiting = randomBoolean();
ClusterUpdateSettingsRequest settingsRequest = new ClusterUpdateSettingsRequest();
settingsRequest.persistentSettings(Settings.builder().put(CcrSettings.RECOVERY_MAX_BYTES_PER_SECOND.getKey(), "10K"));
if (followerRateLimiting) {
assertAcked(followerClient().admin().cluster().updateSettings(settingsRequest).actionGet());
} else {
assertAcked(leaderClient().admin().cluster().updateSettings(settingsRequest).actionGet());
}
String leaderClusterRepoName = CcrRepository.NAME_PREFIX + "leader_cluster";
String leaderIndex = "index1";
String followerIndex = "index2";
final int numberOfPrimaryShards = randomIntBetween(1, 3);
final String leaderIndexSettings = getIndexSettings(numberOfPrimaryShards, between(0, 1),
singletonMap(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), "true"));
assertAcked(leaderClient().admin().indices().prepareCreate(leaderIndex).setSource(leaderIndexSettings, XContentType.JSON));
ensureLeaderGreen(leaderIndex);
final RestoreService restoreService = getFollowerCluster().getCurrentMasterNodeInstance(RestoreService.class);
final ClusterService clusterService = getFollowerCluster().getCurrentMasterNodeInstance(ClusterService.class);
List<CcrRepository> repositories = new ArrayList<>();
List<CcrRestoreSourceService> restoreSources = new ArrayList<>();
for (RepositoriesService repositoriesService : getFollowerCluster().getDataOrMasterNodeInstances(RepositoriesService.class)) {
Repository repository = repositoriesService.repository(leaderClusterRepoName);
repositories.add((CcrRepository) repository);
}
for (CcrRestoreSourceService restoreSource : getLeaderCluster().getDataOrMasterNodeInstances(CcrRestoreSourceService.class)) {
restoreSources.add(restoreSource);
}
logger.info("--> indexing some data");
for (int i = 0; i < 100; i++) {
final String source = String.format(Locale.ROOT, "{\"f\":%d}", i);
leaderClient().prepareIndex("index1", "doc", Integer.toString(i)).setSource(source, XContentType.JSON).get();
}
leaderClient().admin().indices().prepareFlush(leaderIndex).setForce(true).setWaitIfOngoing(true).get();
Settings.Builder settingsBuilder = Settings.builder()
.put(IndexMetaData.SETTING_INDEX_PROVIDED_NAME, followerIndex)
.put(CcrSettings.CCR_FOLLOWING_INDEX_SETTING.getKey(), true);
RestoreSnapshotRequest restoreRequest = new RestoreSnapshotRequest(leaderClusterRepoName, CcrRepository.LATEST)
.indices(leaderIndex).indicesOptions(indicesOptions).renamePattern("^(.*)$")
.renameReplacement(followerIndex).masterNodeTimeout(new TimeValue(1L, TimeUnit.HOURS))
.indexSettings(settingsBuilder);
PlainActionFuture<RestoreInfo> future = PlainActionFuture.newFuture();
restoreService.restoreSnapshot(restoreRequest, waitForRestore(clusterService, future));
future.actionGet();
if (followerRateLimiting) {
assertTrue(repositories.stream().anyMatch(cr -> cr.getRestoreThrottleTimeInNanos() > 0));
} else {
assertTrue(restoreSources.stream().anyMatch(cr -> cr.getThrottleTime() > 0));
}
settingsRequest = new ClusterUpdateSettingsRequest();
ByteSizeValue defaultValue = CcrSettings.RECOVERY_MAX_BYTES_PER_SECOND.getDefault(Settings.EMPTY);
settingsRequest.persistentSettings(Settings.builder().put(CcrSettings.RECOVERY_MAX_BYTES_PER_SECOND.getKey(), defaultValue));
if (followerRateLimiting) {
assertAcked(followerClient().admin().cluster().updateSettings(settingsRequest).actionGet());
} else {
assertAcked(leaderClient().admin().cluster().updateSettings(settingsRequest).actionGet());
}
}
@AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/39245")
public void testIndividualActionsTimeout() throws Exception {
ClusterUpdateSettingsRequest settingsRequest = new ClusterUpdateSettingsRequest();
TimeValue timeValue = TimeValue.timeValueMillis(100);
settingsRequest.persistentSettings(Settings.builder().put(CcrSettings.INDICES_RECOVERY_ACTION_TIMEOUT_SETTING.getKey(), timeValue));
assertAcked(followerClient().admin().cluster().updateSettings(settingsRequest).actionGet());
String leaderClusterRepoName = CcrRepository.NAME_PREFIX + "leader_cluster";
String leaderIndex = "index1";
String followerIndex = "index2";
final int numberOfPrimaryShards = randomIntBetween(1, 3);
final String leaderIndexSettings = getIndexSettings(numberOfPrimaryShards, between(0, 1),
singletonMap(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), "true"));
assertAcked(leaderClient().admin().indices().prepareCreate(leaderIndex).setSource(leaderIndexSettings, XContentType.JSON));
ensureLeaderGreen(leaderIndex);
List<MockTransportService> transportServices = new ArrayList<>();
for (TransportService transportService : getFollowerCluster().getDataOrMasterNodeInstances(TransportService.class)) {
MockTransportService mockTransportService = (MockTransportService) transportService;
transportServices.add(mockTransportService);
mockTransportService.addSendBehavior((connection, requestId, action, request, options) -> {
if (action.equals(GetCcrRestoreFileChunkAction.NAME) == false &&
action.equals(TransportActionProxy.getProxyAction(GetCcrRestoreFileChunkAction.NAME)) == false) {
connection.sendRequest(requestId, action, request, options);
}
});
}
logger.info("--> indexing some data");
for (int i = 0; i < 100; i++) {
final String source = String.format(Locale.ROOT, "{\"f\":%d}", i);
leaderClient().prepareIndex("index1", "doc", Integer.toString(i)).setSource(source, XContentType.JSON).get();
}
leaderClient().admin().indices().prepareFlush(leaderIndex).setForce(true).setWaitIfOngoing(true).get();
Settings.Builder settingsBuilder = Settings.builder()
.put(IndexMetaData.SETTING_INDEX_PROVIDED_NAME, followerIndex)
.put(CcrSettings.CCR_FOLLOWING_INDEX_SETTING.getKey(), true);
RestoreSnapshotRequest restoreRequest = new RestoreSnapshotRequest(leaderClusterRepoName, CcrRepository.LATEST)
.indices(leaderIndex).indicesOptions(indicesOptions).renamePattern("^(.*)$")
.renameReplacement(followerIndex).masterNodeTimeout(new TimeValue(1L, TimeUnit.HOURS))
.indexSettings(settingsBuilder);
try {
final RestoreService restoreService = getFollowerCluster().getCurrentMasterNodeInstance(RestoreService.class);
final ClusterService clusterService = getFollowerCluster().getCurrentMasterNodeInstance(ClusterService.class);
PlainActionFuture<RestoreInfo> future = PlainActionFuture.newFuture();
restoreService.restoreSnapshot(restoreRequest, waitForRestore(clusterService, future));
// Depending on when the timeout occurs this can fail in two ways. If it times-out when fetching
// metadata this will throw an exception. If it times-out when restoring a shard, the shard will
// be marked as failed. Either one is a success for the purpose of this test.
try {
RestoreInfo restoreInfo = future.actionGet();
assertThat(restoreInfo.failedShards(), greaterThan(0));
assertThat(restoreInfo.successfulShards(), lessThan(restoreInfo.totalShards()));
assertEquals(numberOfPrimaryShards, restoreInfo.totalShards());
} catch (Exception e) {
assertThat(ExceptionsHelper.unwrapCause(e), instanceOf(ElasticsearchTimeoutException.class));
}
} finally {
for (MockTransportService transportService : transportServices) {
transportService.clearAllRules();
}
settingsRequest = new ClusterUpdateSettingsRequest();
TimeValue defaultValue = CcrSettings.INDICES_RECOVERY_ACTION_TIMEOUT_SETTING.getDefault(Settings.EMPTY);
settingsRequest.persistentSettings(Settings.builder().put(CcrSettings.INDICES_RECOVERY_ACTION_TIMEOUT_SETTING.getKey(),
defaultValue));
assertAcked(followerClient().admin().cluster().updateSettings(settingsRequest).actionGet());
}
}
public void testFollowerMappingIsUpdated() throws IOException {
String leaderClusterRepoName = CcrRepository.NAME_PREFIX + "leader_cluster";
String leaderIndex = "index1";
String followerIndex = "index2";
final int numberOfPrimaryShards = randomIntBetween(1, 3);
final String leaderIndexSettings = getIndexSettings(numberOfPrimaryShards, between(0, 1),
singletonMap(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), "true"));
assertAcked(leaderClient().admin().indices().prepareCreate(leaderIndex).setSource(leaderIndexSettings, XContentType.JSON));
ensureLeaderGreen(leaderIndex);
final RestoreService restoreService = getFollowerCluster().getCurrentMasterNodeInstance(RestoreService.class);
final ClusterService clusterService = getFollowerCluster().getCurrentMasterNodeInstance(ClusterService.class);
Settings.Builder settingsBuilder = Settings.builder()
.put(IndexMetaData.SETTING_INDEX_PROVIDED_NAME, followerIndex)
.put(CcrSettings.CCR_FOLLOWING_INDEX_SETTING.getKey(), true);
RestoreSnapshotRequest restoreRequest = new RestoreSnapshotRequest(leaderClusterRepoName, CcrRepository.LATEST)
.indices(leaderIndex).indicesOptions(indicesOptions).renamePattern("^(.*)$")
.renameReplacement(followerIndex).masterNodeTimeout(new TimeValue(1L, TimeUnit.HOURS))
.indexSettings(settingsBuilder);
List<MockTransportService> transportServices = new ArrayList<>();
CountDownLatch latch = new CountDownLatch(1);
AtomicBoolean updateSent = new AtomicBoolean(false);
Runnable updateMappings = () -> {
if (updateSent.compareAndSet(false, true)) {
leaderClient()
.admin()
.indices()
.preparePutMapping(leaderIndex)
.setType("doc")
.setSource("{\"properties\":{\"k\":{\"type\":\"long\"}}}", XContentType.JSON)
.execute(ActionListener.wrap(latch::countDown));
}
try {
latch.await();
} catch (InterruptedException e) {
throw ExceptionsHelper.convertToRuntime(e);
}
};
for (TransportService transportService : getFollowerCluster().getDataOrMasterNodeInstances(TransportService.class)) {
MockTransportService mockTransportService = (MockTransportService) transportService;
transportServices.add(mockTransportService);
mockTransportService.addSendBehavior((connection, requestId, action, request, options) -> {
if (action.equals(PutCcrRestoreSessionAction.NAME)) {
updateMappings.run();
connection.sendRequest(requestId, action, request, options);
} else {
connection.sendRequest(requestId, action, request, options);
}
});
}
try {
PlainActionFuture<RestoreInfo> future = PlainActionFuture.newFuture();
restoreService.restoreSnapshot(restoreRequest, waitForRestore(clusterService, future));
RestoreInfo restoreInfo = future.actionGet();
assertEquals(restoreInfo.totalShards(), restoreInfo.successfulShards());
assertEquals(0, restoreInfo.failedShards());
ClusterStateRequest clusterStateRequest = new ClusterStateRequest();
clusterStateRequest.clear();
clusterStateRequest.metaData(true);
clusterStateRequest.indices(followerIndex);
MappingMetaData mappingMetaData = followerClient().admin().indices().prepareGetMappings("index2").get().getMappings()
.get("index2").get("doc");
assertThat(XContentMapValues.extractValue("properties.k.type", mappingMetaData.sourceAsMap()), equalTo("long"));
} finally {
for (MockTransportService transportService : transportServices) {
transportService.clearAllRules();
}
}
}
private void assertExpectedDocument(String followerIndex, final int value) {
final GetResponse getResponse = followerClient().prepareGet(followerIndex, "doc", Integer.toString(value)).get();
assertTrue("Doc with id [" + value + "] is missing", getResponse.isExists());
assertTrue((getResponse.getSource().containsKey("f")));
assertThat(getResponse.getSource().get("f"), equalTo(value));
}
}
| Ensure index commit released when testing timeouts (#39273)
This fixes #39245. Currently it is possible in this test that the clear
session call times-out. This means that the index commit will not be
released and there will be an assertion triggered in the test teardown.
This commit ensures that we wipe the leader index in the test to avoid
this assertion.
It is okay if the clear session call times-out in normal usage. This
scenario is unavoidable due to potential network issues. We have a local
timeout on the leader to clean it up when this scenario happens. | x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/CcrRepositoryIT.java | Ensure index commit released when testing timeouts (#39273) | <ide><path>-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/CcrRepositoryIT.java
<ide> settingsRequest.persistentSettings(Settings.builder().put(CcrSettings.INDICES_RECOVERY_ACTION_TIMEOUT_SETTING.getKey(),
<ide> defaultValue));
<ide> assertAcked(followerClient().admin().cluster().updateSettings(settingsRequest).actionGet());
<add> // This test sets individual action timeouts low to attempt to replicated timeouts. Although the
<add> // clear session action is not blocked, it is possible that it will still occasionally timeout.
<add> // By wiping the leader index here, we ensure we do not trigger the index commit hanging around
<add> // assertion because the commit is released when the index shard is closed.
<add> getLeaderCluster().wipeIndices(leaderIndex);
<ide> }
<ide> }
<ide> |
|
JavaScript | mit | 865e8fdd78735b6517f51d7af64e97a6060c6ade | 0 | Mokolea/atom,atom/atom,atom/atom,Mokolea/atom,PKRoma/atom,atom/atom,brettle/atom,brettle/atom,Mokolea/atom,PKRoma/atom,brettle/atom,PKRoma/atom | 'use strict';
const fs = require('fs-extra');
const os = require('os');
const path = require('path');
const spawnSync = require('./spawn-sync');
const template = require('lodash.template');
const CONFIG = require('../config');
module.exports = function(packagedAppPath) {
console.log('Thanks for reaching out!');
const atomExecutableName =
CONFIG.channel === 'stable' ? 'atom' : `atom-${CONFIG.channel}`;
const apmExecutableName =
CONFIG.channel === 'stable' ? 'apm' : `apm-${CONFIG.channel}`;
const appDescription = CONFIG.appMetadata.description;
const appVersion = CONFIG.appMetadata.version;
let arch;
if (process.arch === 'ia32') {
arch = 'i386';
} else if (process.arch === 'x64') {
arch = 'amd64';
} else if (process.arch === 'ppc') {
arch = 'powerpc';
} else {
arch = process.arch;
}
const outputDebianPackageFilePath = path.join(
CONFIG.buildOutputPath,
`atom-${arch}.deb`
);
const debianPackageDirPath = path.join(
os.tmpdir(),
path.basename(packagedAppPath)
);
const debianPackageConfigPath = path.join(debianPackageDirPath, 'DEBIAN');
const debianPackageInstallDirPath = path.join(debianPackageDirPath, 'usr');
const debianPackageBinDirPath = path.join(debianPackageInstallDirPath, 'bin');
const debianPackageShareDirPath = path.join(
debianPackageInstallDirPath,
'share'
);
const debianPackageAtomDirPath = path.join(
debianPackageShareDirPath,
atomExecutableName
);
const debianPackageApplicationsDirPath = path.join(
debianPackageShareDirPath,
'applications'
);
const debianPackageIconsDirPath = path.join(
debianPackageShareDirPath,
'pixmaps'
);
const debianPackageLintianOverridesDirPath = path.join(
debianPackageShareDirPath,
'lintian',
'overrides'
);
const debianPackageDocsDirPath = path.join(
debianPackageShareDirPath,
'doc',
atomExecutableName
);
if (fs.existsSync(debianPackageDirPath)) {
console.log(
`Deleting existing build dir for Debian package at "${debianPackageDirPath}"`
);
fs.removeSync(debianPackageDirPath);
}
if (fs.existsSync(`${debianPackageDirPath}.deb`)) {
console.log(
`Deleting existing Debian package at "${debianPackageDirPath}.deb"`
);
fs.removeSync(`${debianPackageDirPath}.deb`);
}
if (fs.existsSync(debianPackageDirPath)) {
console.log(
`Deleting existing Debian package at "${outputDebianPackageFilePath}"`
);
fs.removeSync(debianPackageDirPath);
}
console.log(
`Creating Debian package directory structure at "${debianPackageDirPath}"`
);
fs.mkdirpSync(debianPackageDirPath);
fs.mkdirpSync(debianPackageConfigPath);
fs.mkdirpSync(debianPackageInstallDirPath);
fs.mkdirpSync(debianPackageShareDirPath);
fs.mkdirpSync(debianPackageApplicationsDirPath);
fs.mkdirpSync(debianPackageIconsDirPath);
fs.mkdirpSync(debianPackageLintianOverridesDirPath);
fs.mkdirpSync(debianPackageDocsDirPath);
fs.mkdirpSync(debianPackageBinDirPath);
console.log(`Copying "${packagedAppPath}" to "${debianPackageAtomDirPath}"`);
fs.copySync(packagedAppPath, debianPackageAtomDirPath);
fs.chmodSync(debianPackageAtomDirPath, '755');
console.log(`Copying binaries into "${debianPackageBinDirPath}"`);
fs.copySync(
path.join(CONFIG.repositoryRootPath, 'atom.sh'),
path.join(debianPackageBinDirPath, atomExecutableName)
);
fs.symlinkSync(
path.join(
'..',
'share',
atomExecutableName,
'resources',
'app',
'apm',
'node_modules',
'.bin',
'apm'
),
path.join(debianPackageBinDirPath, apmExecutableName)
);
fs.chmodSync(path.join(debianPackageAtomDirPath, 'chrome-sandbox'), '4755');
console.log(`Writing control file into "${debianPackageConfigPath}"`);
const packageSizeInKilobytes = spawnSync('du', ['-sk', packagedAppPath])
.stdout.toString()
.split(/\s+/)[0];
const controlFileTemplate = fs.readFileSync(
path.join(
CONFIG.repositoryRootPath,
'resources',
'linux',
'debian',
'control.in'
)
);
const controlFileContents = template(controlFileTemplate)({
appFileName: atomExecutableName,
version: appVersion,
arch: arch,
installedSize: packageSizeInKilobytes,
description: appDescription
});
fs.writeFileSync(
path.join(debianPackageConfigPath, 'control'),
controlFileContents
);
console.log(
`Writing desktop entry file into "${debianPackageApplicationsDirPath}"`
);
const desktopEntryTemplate = fs.readFileSync(
path.join(
CONFIG.repositoryRootPath,
'resources',
'linux',
'atom.desktop.in'
)
);
const desktopEntryContents = template(desktopEntryTemplate)({
appName: CONFIG.appName,
appFileName: atomExecutableName,
description: appDescription,
installDir: '/usr',
iconPath: atomExecutableName
});
fs.writeFileSync(
path.join(
debianPackageApplicationsDirPath,
`${atomExecutableName}.desktop`
),
desktopEntryContents
);
console.log(`Copying icon into "${debianPackageIconsDirPath}"`);
fs.copySync(
path.join(
packagedAppPath,
'resources',
'app.asar.unpacked',
'resources',
'atom.png'
),
path.join(debianPackageIconsDirPath, `${atomExecutableName}.png`)
);
console.log(`Copying license into "${debianPackageDocsDirPath}"`);
fs.copySync(
path.join(packagedAppPath, 'resources', 'LICENSE.md'),
path.join(debianPackageDocsDirPath, 'copyright')
);
console.log(
`Copying lintian overrides into "${debianPackageLintianOverridesDirPath}"`
);
fs.copySync(
path.join(
CONFIG.repositoryRootPath,
'resources',
'linux',
'debian',
'lintian-overrides'
),
path.join(debianPackageLintianOverridesDirPath, atomExecutableName)
);
console.log(
`Copying polkit configuration into "${debianPackageShareDirPath}"`
);
fs.copySync(
path.join(CONFIG.repositoryRootPath, 'resources', 'linux', 'atom.policy'),
path.join(
debianPackageShareDirPath,
'polkit-1',
'actions',
`atom-${CONFIG.channel}.policy`
)
);
console.log(`Generating .deb file from ${debianPackageDirPath}`);
spawnSync('fakeroot', ['dpkg-deb', '-b', debianPackageDirPath], {
stdio: 'inherit'
});
console.log(
`Copying generated package into "${outputDebianPackageFilePath}"`
);
fs.copySync(`${debianPackageDirPath}.deb`, outputDebianPackageFilePath);
};
| script/lib/create-debian-package.js | 'use strict';
const fs = require('fs-extra');
const os = require('os');
const path = require('path');
const spawnSync = require('./spawn-sync');
const template = require('lodash.template');
const CONFIG = require('../config');
module.exports = function(packagedAppPath) {
console.log(`Creating Debian package for "${packagedAppPath}"`);
const atomExecutableName =
CONFIG.channel === 'stable' ? 'atom' : `atom-${CONFIG.channel}`;
const apmExecutableName =
CONFIG.channel === 'stable' ? 'apm' : `apm-${CONFIG.channel}`;
const appDescription = CONFIG.appMetadata.description;
const appVersion = CONFIG.appMetadata.version;
let arch;
if (process.arch === 'ia32') {
arch = 'i386';
} else if (process.arch === 'x64') {
arch = 'amd64';
} else if (process.arch === 'ppc') {
arch = 'powerpc';
} else {
arch = process.arch;
}
const outputDebianPackageFilePath = path.join(
CONFIG.buildOutputPath,
`atom-${arch}.deb`
);
const debianPackageDirPath = path.join(
os.tmpdir(),
path.basename(packagedAppPath)
);
const debianPackageConfigPath = path.join(debianPackageDirPath, 'DEBIAN');
const debianPackageInstallDirPath = path.join(debianPackageDirPath, 'usr');
const debianPackageBinDirPath = path.join(debianPackageInstallDirPath, 'bin');
const debianPackageShareDirPath = path.join(
debianPackageInstallDirPath,
'share'
);
const debianPackageAtomDirPath = path.join(
debianPackageShareDirPath,
atomExecutableName
);
const debianPackageApplicationsDirPath = path.join(
debianPackageShareDirPath,
'applications'
);
const debianPackageIconsDirPath = path.join(
debianPackageShareDirPath,
'pixmaps'
);
const debianPackageLintianOverridesDirPath = path.join(
debianPackageShareDirPath,
'lintian',
'overrides'
);
const debianPackageDocsDirPath = path.join(
debianPackageShareDirPath,
'doc',
atomExecutableName
);
if (fs.existsSync(debianPackageDirPath)) {
console.log(
`Deleting existing build dir for Debian package at "${debianPackageDirPath}"`
);
fs.removeSync(debianPackageDirPath);
}
if (fs.existsSync(`${debianPackageDirPath}.deb`)) {
console.log(
`Deleting existing Debian package at "${debianPackageDirPath}.deb"`
);
fs.removeSync(`${debianPackageDirPath}.deb`);
}
if (fs.existsSync(debianPackageDirPath)) {
console.log(
`Deleting existing Debian package at "${outputDebianPackageFilePath}"`
);
fs.removeSync(debianPackageDirPath);
}
console.log(
`Creating Debian package directory structure at "${debianPackageDirPath}"`
);
fs.mkdirpSync(debianPackageDirPath);
fs.mkdirpSync(debianPackageConfigPath);
fs.mkdirpSync(debianPackageInstallDirPath);
fs.mkdirpSync(debianPackageShareDirPath);
fs.mkdirpSync(debianPackageApplicationsDirPath);
fs.mkdirpSync(debianPackageIconsDirPath);
fs.mkdirpSync(debianPackageLintianOverridesDirPath);
fs.mkdirpSync(debianPackageDocsDirPath);
fs.mkdirpSync(debianPackageBinDirPath);
console.log(`Copying "${packagedAppPath}" to "${debianPackageAtomDirPath}"`);
fs.copySync(packagedAppPath, debianPackageAtomDirPath);
fs.chmodSync(debianPackageAtomDirPath, '755');
console.log(`Copying binaries into "${debianPackageBinDirPath}"`);
fs.copySync(
path.join(CONFIG.repositoryRootPath, 'atom.sh'),
path.join(debianPackageBinDirPath, atomExecutableName)
);
fs.symlinkSync(
path.join(
'..',
'share',
atomExecutableName,
'resources',
'app',
'apm',
'node_modules',
'.bin',
'apm'
),
path.join(debianPackageBinDirPath, apmExecutableName)
);
fs.chmodSync(path.join(debianPackageAtomDirPath, 'chrome-sandbox'), '4755');
console.log(`Writing control file into "${debianPackageConfigPath}"`);
const packageSizeInKilobytes = spawnSync('du', ['-sk', packagedAppPath])
.stdout.toString()
.split(/\s+/)[0];
const controlFileTemplate = fs.readFileSync(
path.join(
CONFIG.repositoryRootPath,
'resources',
'linux',
'debian',
'control.in'
)
);
const controlFileContents = template(controlFileTemplate)({
appFileName: atomExecutableName,
version: appVersion,
arch: arch,
installedSize: packageSizeInKilobytes,
description: appDescription
});
fs.writeFileSync(
path.join(debianPackageConfigPath, 'control'),
controlFileContents
);
console.log(
`Writing desktop entry file into "${debianPackageApplicationsDirPath}"`
);
const desktopEntryTemplate = fs.readFileSync(
path.join(
CONFIG.repositoryRootPath,
'resources',
'linux',
'atom.desktop.in'
)
);
const desktopEntryContents = template(desktopEntryTemplate)({
appName: CONFIG.appName,
appFileName: atomExecutableName,
description: appDescription,
installDir: '/usr',
iconPath: atomExecutableName
});
fs.writeFileSync(
path.join(
debianPackageApplicationsDirPath,
`${atomExecutableName}.desktop`
),
desktopEntryContents
);
console.log(`Copying icon into "${debianPackageIconsDirPath}"`);
fs.copySync(
path.join(
packagedAppPath,
'resources',
'app.asar.unpacked',
'resources',
'atom.png'
),
path.join(debianPackageIconsDirPath, `${atomExecutableName}.png`)
);
console.log(`Copying license into "${debianPackageDocsDirPath}"`);
fs.copySync(
path.join(packagedAppPath, 'resources', 'LICENSE.md'),
path.join(debianPackageDocsDirPath, 'copyright')
);
console.log(
`Copying lintian overrides into "${debianPackageLintianOverridesDirPath}"`
);
fs.copySync(
path.join(
CONFIG.repositoryRootPath,
'resources',
'linux',
'debian',
'lintian-overrides'
),
path.join(debianPackageLintianOverridesDirPath, atomExecutableName)
);
console.log(
`Copying polkit configuration into "${debianPackageShareDirPath}"`
);
fs.copySync(
path.join(CONFIG.repositoryRootPath, 'resources', 'linux', 'atom.policy'),
path.join(
debianPackageShareDirPath,
'polkit-1',
'actions',
`atom-${CONFIG.channel}.policy`
)
);
console.log(`Generating .deb file from ${debianPackageDirPath}`);
spawnSync('fakeroot', ['dpkg-deb', '-b', debianPackageDirPath], {
stdio: 'inherit'
});
console.log(
`Copying generated package into "${outputDebianPackageFilePath}"`
);
fs.copySync(`${debianPackageDirPath}.deb`, outputDebianPackageFilePath);
};
| Trigger CI | script/lib/create-debian-package.js | Trigger CI | <ide><path>cript/lib/create-debian-package.js
<ide> const CONFIG = require('../config');
<ide>
<ide> module.exports = function(packagedAppPath) {
<del> console.log(`Creating Debian package for "${packagedAppPath}"`);
<add> console.log('Thanks for reaching out!');
<ide> const atomExecutableName =
<ide> CONFIG.channel === 'stable' ? 'atom' : `atom-${CONFIG.channel}`;
<ide> const apmExecutableName = |
|
Java | apache-2.0 | f14d4fc54f9350accdd6016e4de0dc72e0a9db16 | 0 | AndyScherzinger/andlytics,willlunniss/andlytics,d4rken/andlytics,TheNephilim88/andlytics,willlunniss/andlytics,ArcadiaConsulting/appstorestats,nelenkov/andlytics,TheNephilim88/andlytics,AndyScherzinger/andlytics | package com.github.andlyticsproject;
import java.io.UnsupportedEncodingException;
import java.net.URLEncoder;
import java.util.ArrayList;
import java.util.Locale;
import android.content.Intent;
import android.net.Uri;
import android.util.Log;
import android.view.LayoutInflater;
import android.view.View;
import android.view.View.OnLongClickListener;
import android.view.ViewGroup;
import android.widget.BaseExpandableListAdapter;
import android.widget.RatingBar;
import android.widget.TextView;
import com.github.andlyticsproject.model.Comment;
import com.github.andlyticsproject.model.CommentGroup;
public class CommentsListAdapter extends BaseExpandableListAdapter {
private LayoutInflater layoutInflater;
private ArrayList<CommentGroup> commentGroups;
private CommentsActivity context;
public CommentsListAdapter(CommentsActivity activity) {
this.setCommentGroups(new ArrayList<CommentGroup>());
this.layoutInflater = activity.getLayoutInflater();
this.context = activity;
}
@Override
public View getChildView(int groupPosition, int childPosition, boolean isLastChild,
View convertView, ViewGroup parent) {
ViewHolderChild holder;
if (convertView == null) {
convertView = layoutInflater.inflate(R.layout.comments_list_item_child, null);
holder = new ViewHolderChild();
holder.text = (TextView) convertView.findViewById(R.id.comments_list_item_text);
holder.user = (TextView) convertView.findViewById(R.id.comments_list_item_username);
holder.date = (TextView) convertView.findViewById(R.id.comments_list_item_date);
holder.device = (TextView) convertView.findViewById(R.id.comments_list_item_device);
holder.rating = (RatingBar) convertView
.findViewById(R.id.comments_list_item_app_ratingbar);
convertView.setTag(holder);
} else {
holder = (ViewHolderChild) convertView.getTag();
}
final Comment comment = getChild(groupPosition, childPosition);
holder.text.setText(comment.getText());
holder.user.setText(comment.getUser());
String version = comment.getAppVersion();
String device = comment.getDevice();
String deviceText = "";
// building string: version X on device: XYZ
if (isNotEmptyOrNull(version)) {
if (isNotEmptyOrNull(device)) {
deviceText = context.getString(R.string.comments_details_full, version, device);
} else {
deviceText = context.getString(R.string.comments_details_version, version);
}
} else if (isNotEmptyOrNull(device)) {
deviceText = context.getString(R.string.comments_details_device, device);
}
holder.device.setText(deviceText);
int rating = comment.getRating();
if (rating > 0 && rating <= 5) {
holder.rating.setRating((float) rating);
holder.rating.setVisibility(View.VISIBLE);
holder.date.setText(null);
} else if (rating == -1) {
// developer reply
holder.rating.setVisibility(View.GONE);
holder.date.setText(comment.getDate());
}
convertView.setOnLongClickListener(new OnLongClickListener() {
@Override
public boolean onLongClick(View v) {
String text = comment.getText();
String displayLanguage = Locale.getDefault().getLanguage();
String url = "http://translate.google.de/m/translate?hl=<<lang>>&vi=m&text=<<text>>&langpair=auto|<<lang>>";
try {
url = url.replaceAll("<<lang>>", URLEncoder.encode(displayLanguage, "UTF-8"));
url = url.replaceAll("<<text>>", URLEncoder.encode(text, "UTF-8"));
Log.d("CommentsTranslate", "lang: " + displayLanguage + " url: " + url);
Intent i = new Intent(Intent.ACTION_VIEW);
i.setData(Uri.parse(url));
context.startActivity(i);
} catch (UnsupportedEncodingException e) {
e.printStackTrace();
}
return true;
}
});
return convertView;
}
@Override
public View getGroupView(int groupPosition, boolean isExpanded, View convertView,
ViewGroup parent) {
ViewHolderGroup holder;
if (convertView == null) {
convertView = layoutInflater.inflate(R.layout.comments_list_item, null);
convertView.setOnClickListener(null);
holder = new ViewHolderGroup();
holder.date = (TextView) convertView.findViewById(R.id.comments_list_item_date);
convertView.setTag(holder);
} else {
holder = (ViewHolderGroup) convertView.getTag();
}
CommentGroup commentGroup = getGroup(groupPosition);
holder.date.setText(commentGroup.getDateString());
return convertView;
}
private boolean isNotEmptyOrNull(String str) {
return str != null && str.length() > 0;
}
static class ViewHolderGroup {
TextView date;
}
static class ViewHolderChild {
TextView text;
RatingBar rating;
TextView user;
TextView date;
TextView device;
}
@Override
public int getGroupCount() {
return getCommentGroups().size();
}
@Override
public int getChildrenCount(int groupPosition) {
return getCommentGroups().get(groupPosition).getComments().size();
}
@Override
public CommentGroup getGroup(int groupPosition) {
return getCommentGroups().get(groupPosition);
}
@Override
public Comment getChild(int groupPosition, int childPosition) {
return getCommentGroups().get(groupPosition).getComments().get(childPosition);
}
@Override
public long getGroupId(int groupPosition) {
return groupPosition;
}
@Override
public long getChildId(int groupPosition, int childPosition) {
return childPosition;
}
@Override
public boolean hasStableIds() {
return false;
}
@Override
public boolean isChildSelectable(int groupPosition, int childPosition) {
return false;
}
public void setCommentGroups(ArrayList<CommentGroup> commentGroups) {
this.commentGroups = commentGroups;
}
public ArrayList<CommentGroup> getCommentGroups() {
return commentGroups;
}
}
| src/com/github/andlyticsproject/CommentsListAdapter.java | package com.github.andlyticsproject;
import java.io.UnsupportedEncodingException;
import java.net.URLEncoder;
import java.util.ArrayList;
import java.util.Locale;
import android.content.Intent;
import android.net.Uri;
import android.util.Log;
import android.view.LayoutInflater;
import android.view.View;
import android.view.View.OnClickListener;
import android.view.View.OnLongClickListener;
import android.view.ViewGroup;
import android.widget.BaseExpandableListAdapter;
import android.widget.RatingBar;
import android.widget.TextView;
import com.github.andlyticsproject.model.Comment;
import com.github.andlyticsproject.model.CommentGroup;
public class CommentsListAdapter extends BaseExpandableListAdapter {
private LayoutInflater layoutInflater;
private ArrayList<CommentGroup> commentGroups;
private CommentsActivity context;
public CommentsListAdapter(CommentsActivity activity) {
this.setCommentGroups(new ArrayList<CommentGroup>());
this.layoutInflater = activity.getLayoutInflater();
this.context = activity;
}
@Override
public View getChildView(int groupPosition, int childPosition, boolean isLastChild,
View convertView, ViewGroup parent) {
ViewHolderChild holder;
if (convertView == null) {
convertView = layoutInflater.inflate(R.layout.comments_list_item_child, null);
holder = new ViewHolderChild();
holder.text = (TextView) convertView.findViewById(R.id.comments_list_item_text);
holder.user = (TextView) convertView.findViewById(R.id.comments_list_item_username);
holder.date = (TextView) convertView.findViewById(R.id.comments_list_item_date);
holder.device = (TextView) convertView.findViewById(R.id.comments_list_item_device);
holder.rating = (RatingBar) convertView
.findViewById(R.id.comments_list_item_app_ratingbar);
convertView.setTag(holder);
} else {
holder = (ViewHolderChild) convertView.getTag();
}
final Comment comment = getChild(groupPosition, childPosition);
holder.text.setText(comment.getText());
holder.user.setText(comment.getUser());
String version = comment.getAppVersion();
String device = comment.getDevice();
String deviceText = "";
// building string: version X on device: XYZ
if (isNotEmptyOrNull(version)) {
if (isNotEmptyOrNull(device)) {
deviceText = context.getString(R.string.comments_details_full, version, device);
} else {
deviceText = context.getString(R.string.comments_details_version, version);
}
} else if (isNotEmptyOrNull(device)) {
deviceText = context.getString(R.string.comments_details_device, device);
}
holder.device.setText(deviceText);
int rating = comment.getRating();
if (rating > 0 && rating <= 5) {
holder.rating.setRating((float) rating);
holder.rating.setVisibility(View.VISIBLE);
holder.date.setText(null);
} else if (rating == -1) {
// developer reply
holder.rating.setVisibility(View.GONE);
holder.date.setText(comment.getDate());
}
convertView.setOnLongClickListener(new OnLongClickListener() {
@Override
public boolean onLongClick(View v) {
String text = comment.getText();
String displayLanguage = Locale.getDefault().getLanguage();
String url = "http://translate.google.de/m/translate?hl=<<lang>>&vi=m&text=<<text>>&langpair=auto|<<lang>>";
try {
url = url.replaceAll("<<lang>>", URLEncoder.encode(displayLanguage, "UTF-8"));
url = url.replaceAll("<<text>>", URLEncoder.encode(text, "UTF-8"));
Log.d("CommentsTranslate", "lang: " + displayLanguage + " url: " + url);
Intent i = new Intent(Intent.ACTION_VIEW);
i.setData(Uri.parse(url));
context.startActivity(i);
} catch (UnsupportedEncodingException e) {
e.printStackTrace();
}
return true;
}
});
return convertView;
}
@Override
public View getGroupView(int groupPosition, boolean isExpanded, View convertView,
ViewGroup parent) {
ViewHolderGroup holder;
if (convertView == null) {
convertView = layoutInflater.inflate(R.layout.comments_list_item, null);
holder = new ViewHolderGroup();
holder.date = (TextView) convertView.findViewById(R.id.comments_list_item_date);
convertView.setTag(holder);
} else {
holder = (ViewHolderGroup) convertView.getTag();
}
CommentGroup commentGroup = getGroup(groupPosition);
holder.date.setText(commentGroup.getDateString());
convertView.setOnClickListener(new OnClickListener() {
@Override
public void onClick(View v) {
}
});
return convertView;
}
private boolean isNotEmptyOrNull(String str) {
return str != null && str.length() > 0;
}
static class ViewHolderGroup {
TextView date;
}
static class ViewHolderChild {
TextView text;
RatingBar rating;
TextView user;
TextView date;
TextView device;
}
@Override
public int getGroupCount() {
return getCommentGroups().size();
}
@Override
public int getChildrenCount(int groupPosition) {
return getCommentGroups().get(groupPosition).getComments().size();
}
@Override
public CommentGroup getGroup(int groupPosition) {
return getCommentGroups().get(groupPosition);
}
@Override
public Comment getChild(int groupPosition, int childPosition) {
return getCommentGroups().get(groupPosition).getComments().get(childPosition);
}
@Override
public long getGroupId(int groupPosition) {
return groupPosition;
}
@Override
public long getChildId(int groupPosition, int childPosition) {
return childPosition;
}
@Override
public boolean hasStableIds() {
return false;
}
@Override
public boolean isChildSelectable(int groupPosition, int childPosition) {
return false;
}
public void setCommentGroups(ArrayList<CommentGroup> commentGroups) {
this.commentGroups = commentGroups;
}
public ArrayList<CommentGroup> getCommentGroups() {
return commentGroups;
}
}
| Minor cleanup. Don't bother registering an empty click listener.
| src/com/github/andlyticsproject/CommentsListAdapter.java | Minor cleanup. Don't bother registering an empty click listener. | <ide><path>rc/com/github/andlyticsproject/CommentsListAdapter.java
<ide> import android.util.Log;
<ide> import android.view.LayoutInflater;
<ide> import android.view.View;
<del>import android.view.View.OnClickListener;
<ide> import android.view.View.OnLongClickListener;
<ide> import android.view.ViewGroup;
<ide> import android.widget.BaseExpandableListAdapter;
<ide>
<ide> if (convertView == null) {
<ide> convertView = layoutInflater.inflate(R.layout.comments_list_item, null);
<del>
<add> convertView.setOnClickListener(null);
<ide> holder = new ViewHolderGroup();
<ide> holder.date = (TextView) convertView.findViewById(R.id.comments_list_item_date);
<ide> convertView.setTag(holder);
<ide> } else {
<del>
<ide> holder = (ViewHolderGroup) convertView.getTag();
<ide> }
<ide>
<ide> CommentGroup commentGroup = getGroup(groupPosition);
<ide> holder.date.setText(commentGroup.getDateString());
<del>
<del> convertView.setOnClickListener(new OnClickListener() {
<del>
<del> @Override
<del> public void onClick(View v) {
<del>
<del> }
<del> });
<ide>
<ide> return convertView;
<ide> } |
|
Java | apache-2.0 | 9f607748d6feaaf2402f7144b8017f3460cf71cb | 0 | firebase/FirebaseUI-Android,firebase/FirebaseUI-Android,firebase/FirebaseUI-Android,firebase/FirebaseUI-Android | /*
* Copyright 2016 Google Inc. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
* in compliance with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the
* License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.firebase.ui.auth.ui.idp;
import android.content.Context;
import android.content.Intent;
import android.os.Bundle;
import android.text.TextUtils;
import android.view.View;
import android.view.ViewGroup;
import android.widget.ImageView;
import android.widget.ProgressBar;
import android.widget.TextView;
import android.widget.Toast;
import com.firebase.ui.auth.AuthMethodPickerLayout;
import com.firebase.ui.auth.AuthUI;
import com.firebase.ui.auth.AuthUI.IdpConfig;
import com.firebase.ui.auth.ErrorCodes;
import com.firebase.ui.auth.FirebaseAuthAnonymousUpgradeException;
import com.firebase.ui.auth.FirebaseUiException;
import com.firebase.ui.auth.IdpResponse;
import com.firebase.ui.auth.R;
import com.firebase.ui.auth.data.model.FlowParameters;
import com.firebase.ui.auth.data.model.UserCancellationException;
import com.firebase.ui.auth.data.remote.AnonymousSignInHandler;
import com.firebase.ui.auth.data.remote.EmailSignInHandler;
import com.firebase.ui.auth.data.remote.FacebookSignInHandler;
import com.firebase.ui.auth.data.remote.GenericIdpSignInHandler;
import com.firebase.ui.auth.data.remote.GoogleSignInHandler;
import com.firebase.ui.auth.data.remote.PhoneSignInHandler;
import com.firebase.ui.auth.ui.AppCompatBase;
import com.firebase.ui.auth.util.ExtraConstants;
import com.firebase.ui.auth.util.data.PrivacyDisclosureUtils;
import com.firebase.ui.auth.viewmodel.ProviderSignInBase;
import com.firebase.ui.auth.viewmodel.ResourceObserver;
import com.firebase.ui.auth.viewmodel.idp.SocialProviderResponseHandler;
import com.google.android.material.snackbar.Snackbar;
import com.google.firebase.auth.EmailAuthProvider;
import com.google.firebase.auth.FacebookAuthProvider;
import com.google.firebase.auth.GoogleAuthProvider;
import com.google.firebase.auth.PhoneAuthProvider;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import androidx.annotation.IdRes;
import androidx.annotation.LayoutRes;
import androidx.annotation.NonNull;
import androidx.annotation.Nullable;
import androidx.annotation.RestrictTo;
import androidx.constraintlayout.widget.ConstraintLayout;
import androidx.constraintlayout.widget.ConstraintSet;
import androidx.lifecycle.ViewModelProvider;
import static com.firebase.ui.auth.util.ExtraConstants.GENERIC_OAUTH_BUTTON_ID;
import static com.firebase.ui.auth.util.ExtraConstants.GENERIC_OAUTH_PROVIDER_ID;
import static com.firebase.ui.auth.AuthUI.EMAIL_LINK_PROVIDER;
/**
* Presents the list of authentication options for this app to the user.
*/
@RestrictTo(RestrictTo.Scope.LIBRARY_GROUP)
public class AuthMethodPickerActivity extends AppCompatBase {
private SocialProviderResponseHandler mHandler;
private List<ProviderSignInBase<?>> mProviders;
private ProgressBar mProgressBar;
private ViewGroup mProviderHolder;
private AuthMethodPickerLayout customLayout;
public static Intent createIntent(Context context, FlowParameters flowParams) {
return createBaseIntent(context, AuthMethodPickerActivity.class, flowParams);
}
@Override
protected void onCreate(@Nullable Bundle savedInstanceState) {
super.onCreate(savedInstanceState);
FlowParameters params = getFlowParams();
customLayout = params.authMethodPickerLayout;
mHandler = new ViewModelProvider(this).get(SocialProviderResponseHandler.class);
mHandler.init(params);
mProviders = new ArrayList<>();
if (customLayout != null) {
setContentView(customLayout.getMainLayout());
//Setup using custom layout
populateIdpListCustomLayout(params.providers);
} else {
setContentView(R.layout.fui_auth_method_picker_layout);
//UI only with default layout
mProgressBar = findViewById(R.id.top_progress_bar);
mProviderHolder = findViewById(R.id.btn_holder);
populateIdpList(params.providers);
int logoId = params.logoId;
if (logoId == AuthUI.NO_LOGO) {
findViewById(R.id.logo).setVisibility(View.GONE);
ConstraintLayout layout = findViewById(R.id.root);
ConstraintSet constraints = new ConstraintSet();
constraints.clone(layout);
constraints.setHorizontalBias(R.id.container, 0.5f);
constraints.setVerticalBias(R.id.container, 0.5f);
constraints.applyTo(layout);
} else {
ImageView logo = findViewById(R.id.logo);
logo.setImageResource(logoId);
}
}
boolean tosAndPpConfigured = getFlowParams().isPrivacyPolicyUrlProvided()
&& getFlowParams().isTermsOfServiceUrlProvided();
int termsTextId = customLayout == null
? R.id.main_tos_and_pp
: customLayout.getTosPpView();
if (termsTextId >= 0) {
TextView termsText = findViewById(termsTextId);
// No ToS or PP provided, so we should hide the view entirely
if (!tosAndPpConfigured) {
termsText.setVisibility(View.GONE);
} else {
PrivacyDisclosureUtils.setupTermsOfServiceAndPrivacyPolicyText(this,
getFlowParams(),
termsText);
}
}
//Handler for both
mHandler.getOperation().observe(this, new ResourceObserver<IdpResponse>(
this, R.string.fui_progress_dialog_signing_in) {
@Override
protected void onSuccess(@NonNull IdpResponse response) {
startSaveCredentials(mHandler.getCurrentUser(), response, null);
}
@Override
protected void onFailure(@NonNull Exception e) {
if (e instanceof UserCancellationException) {
// User pressed back, there is no error.
return;
}
if (e instanceof FirebaseAuthAnonymousUpgradeException) {
finish(ErrorCodes.ANONYMOUS_UPGRADE_MERGE_CONFLICT,
((FirebaseAuthAnonymousUpgradeException) e).getResponse().toIntent());
} else if (e instanceof FirebaseUiException) {
FirebaseUiException fue = (FirebaseUiException) e;
finish(RESULT_CANCELED, IdpResponse.from(fue).toIntent());
} else {
String text = getString(R.string.fui_error_unknown);
Toast.makeText(AuthMethodPickerActivity.this,
text,
Toast.LENGTH_SHORT).show();
}
}
});
}
private void populateIdpList(List<IdpConfig> providerConfigs) {
ViewModelProvider supplier = new ViewModelProvider(this);
mProviders = new ArrayList<>();
for (IdpConfig idpConfig : providerConfigs) {
@LayoutRes int buttonLayout;
final String providerId = idpConfig.getProviderId();
switch (providerId) {
case GoogleAuthProvider.PROVIDER_ID:
buttonLayout = R.layout.fui_idp_button_google;
break;
case FacebookAuthProvider.PROVIDER_ID:
buttonLayout = R.layout.fui_idp_button_facebook;
break;
case EMAIL_LINK_PROVIDER:
case EmailAuthProvider.PROVIDER_ID:
buttonLayout = R.layout.fui_provider_button_email;
break;
case PhoneAuthProvider.PROVIDER_ID:
buttonLayout = R.layout.fui_provider_button_phone;
break;
case AuthUI.ANONYMOUS_PROVIDER:
buttonLayout = R.layout.fui_provider_button_anonymous;
break;
default:
if (!TextUtils.isEmpty(
idpConfig.getParams().getString(GENERIC_OAUTH_PROVIDER_ID))) {
buttonLayout = idpConfig.getParams().getInt(GENERIC_OAUTH_BUTTON_ID);
break;
}
throw new IllegalStateException("Unknown provider: " + providerId);
}
View loginButton = getLayoutInflater().inflate(buttonLayout, mProviderHolder, false);
handleSignInOperation(idpConfig, loginButton);
mProviderHolder.addView(loginButton);
}
}
private void populateIdpListCustomLayout(List<IdpConfig> providerConfigs) {
Map<String, Integer> providerButtonIds = customLayout.getProvidersButton();
for (IdpConfig idpConfig : providerConfigs) {
final String providerId = providerOrEmailLinkProvider(idpConfig.getProviderId());
Integer buttonResId = providerButtonIds.get(providerId);
if (buttonResId == null) {
throw new IllegalStateException("No button found for auth provider: " + idpConfig.getProviderId());
}
@IdRes int buttonId = buttonResId;
View loginButton = findViewById(buttonId);
handleSignInOperation(idpConfig, loginButton);
}
//hide custom layout buttons that don't have their identity provider set
for (String providerBtnId : providerButtonIds.keySet()) {
if (providerBtnId == null) {
continue;
}
boolean hasProvider = false;
for (IdpConfig idpConfig : providerConfigs) {
String providerId = providerOrEmailLinkProvider(idpConfig.getProviderId());
if (providerBtnId.equals(providerId)) {
hasProvider = true;
break;
}
}
if (!hasProvider) {
Integer resId = providerButtonIds.get(providerBtnId);
if (resId == null) {
continue;
}
@IdRes int buttonId = resId;
findViewById(buttonId).setVisibility(View.GONE);
}
}
}
@NonNull
private String providerOrEmailLinkProvider(@NonNull String providerId) {
if (providerId.equals(EmailAuthProvider.EMAIL_LINK_SIGN_IN_METHOD)) {
return EmailAuthProvider.PROVIDER_ID;
}
return providerId;
}
private void handleSignInOperation(final IdpConfig idpConfig, View view) {
ViewModelProvider supplier = new ViewModelProvider(this);
final String providerId = idpConfig.getProviderId();
final ProviderSignInBase<?> provider;
AuthUI authUI = getAuthUI();
switch (providerId) {
case EMAIL_LINK_PROVIDER:
case EmailAuthProvider.PROVIDER_ID:
provider = supplier.get(EmailSignInHandler.class).initWith(null);
break;
case PhoneAuthProvider.PROVIDER_ID:
provider = supplier.get(PhoneSignInHandler.class).initWith(idpConfig);
break;
case AuthUI.ANONYMOUS_PROVIDER:
provider = supplier.get(AnonymousSignInHandler.class).initWith(getFlowParams());
break;
case GoogleAuthProvider.PROVIDER_ID:
if (authUI.isUseEmulator()) {
provider = supplier.get(GenericIdpSignInHandler.class)
.initWith(GenericIdpSignInHandler.getGenericGoogleConfig());
} else {
provider = supplier.get(GoogleSignInHandler.class).initWith(
new GoogleSignInHandler.Params(idpConfig));
}
break;
case FacebookAuthProvider.PROVIDER_ID:
if (authUI.isUseEmulator()) {
provider = supplier.get(GenericIdpSignInHandler.class)
.initWith(GenericIdpSignInHandler.getGenericFacebookConfig());
} else {
provider = supplier.get(FacebookSignInHandler.class).initWith(idpConfig);
}
break;
default:
if (!TextUtils.isEmpty(
idpConfig.getParams().getString(GENERIC_OAUTH_PROVIDER_ID))) {
provider = supplier.get(GenericIdpSignInHandler.class).initWith(idpConfig);
break;
}
throw new IllegalStateException("Unknown provider: " + providerId);
}
mProviders.add(provider);
provider.getOperation().observe(this, new ResourceObserver<IdpResponse>(this) {
@Override
protected void onSuccess(@NonNull IdpResponse response) {
handleResponse(response);
}
@Override
protected void onFailure(@NonNull Exception e) {
if (e instanceof FirebaseAuthAnonymousUpgradeException) {
finish(RESULT_CANCELED, new Intent().putExtra(ExtraConstants.IDP_RESPONSE,
IdpResponse.from(e)));
return;
}
handleResponse(IdpResponse.from(e));
}
private void handleResponse(@NonNull IdpResponse response) {
// If we're using the emulator then the social flows actually use Generic IDP
// instead which means we shouldn't use the social response handler.
boolean isSocialResponse = AuthUI.SOCIAL_PROVIDERS.contains(providerId)
&& !getAuthUI().isUseEmulator();
if (!response.isSuccessful()) {
// We have no idea what provider this error stemmed from so just forward
// this along to the handler.
mHandler.startSignIn(response);
} else if (isSocialResponse) {
// Don't use the response's provider since it can be different than the one
// that launched the sign-in attempt. Ex: the email flow is started, but
// ends up turning into a Google sign-in because that account already
// existed. In the previous example, an extra sign-in would incorrectly
// started.
mHandler.startSignIn(response);
} else {
// Email, phone, or generic: the credentials should have already been saved so
// simply move along.
// Anononymous sign in also does not require any other operations.
finish(response.isSuccessful() ? RESULT_OK : RESULT_CANCELED,
response.toIntent());
}
}
});
view.setOnClickListener(new View.OnClickListener() {
@Override
public void onClick(View view) {
if (isOffline()) {
Snackbar.make(findViewById(android.R.id.content), getString(R.string.fui_no_internet), Snackbar.LENGTH_SHORT).show();
return;
}
provider.startSignIn(getAuth(), AuthMethodPickerActivity.this,
idpConfig.getProviderId());
}
});
}
@Override
protected void onActivityResult(int requestCode, int resultCode, Intent data) {
super.onActivityResult(requestCode, resultCode, data);
mHandler.onActivityResult(requestCode, resultCode, data);
for (ProviderSignInBase<?> provider : mProviders) {
provider.onActivityResult(requestCode, resultCode, data);
}
}
@Override
public void showProgress(int message) {
//mProgressBar & mProviderHolder might be null if using custom AuthMethodPickerLayout
if (customLayout == null) {
mProgressBar.setVisibility(View.VISIBLE);
for (int i = 0; i < mProviderHolder.getChildCount(); i++) {
View child = mProviderHolder.getChildAt(i);
child.setEnabled(false);
child.setAlpha(0.75f);
}
}
}
@Override
public void hideProgress() {
//mProgressBar & mProviderHolder might be null if using custom AuthMethodPickerLayout
if (customLayout == null) {
mProgressBar.setVisibility(View.INVISIBLE);
for (int i = 0; i < mProviderHolder.getChildCount(); i++) {
View child = mProviderHolder.getChildAt(i);
child.setEnabled(true);
child.setAlpha(1.0f);
}
}
}
}
| auth/src/main/java/com/firebase/ui/auth/ui/idp/AuthMethodPickerActivity.java | /*
* Copyright 2016 Google Inc. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
* in compliance with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the
* License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.firebase.ui.auth.ui.idp;
import android.content.Context;
import android.content.Intent;
import android.os.Bundle;
import android.text.TextUtils;
import android.view.View;
import android.view.ViewGroup;
import android.widget.ImageView;
import android.widget.ProgressBar;
import android.widget.TextView;
import android.widget.Toast;
import com.firebase.ui.auth.AuthMethodPickerLayout;
import com.firebase.ui.auth.AuthUI;
import com.firebase.ui.auth.AuthUI.IdpConfig;
import com.firebase.ui.auth.ErrorCodes;
import com.firebase.ui.auth.FirebaseAuthAnonymousUpgradeException;
import com.firebase.ui.auth.FirebaseUiException;
import com.firebase.ui.auth.IdpResponse;
import com.firebase.ui.auth.R;
import com.firebase.ui.auth.data.model.FlowParameters;
import com.firebase.ui.auth.data.model.UserCancellationException;
import com.firebase.ui.auth.data.remote.AnonymousSignInHandler;
import com.firebase.ui.auth.data.remote.EmailSignInHandler;
import com.firebase.ui.auth.data.remote.FacebookSignInHandler;
import com.firebase.ui.auth.data.remote.GenericIdpSignInHandler;
import com.firebase.ui.auth.data.remote.GoogleSignInHandler;
import com.firebase.ui.auth.data.remote.PhoneSignInHandler;
import com.firebase.ui.auth.ui.AppCompatBase;
import com.firebase.ui.auth.util.ExtraConstants;
import com.firebase.ui.auth.util.data.PrivacyDisclosureUtils;
import com.firebase.ui.auth.viewmodel.ProviderSignInBase;
import com.firebase.ui.auth.viewmodel.ResourceObserver;
import com.firebase.ui.auth.viewmodel.idp.SocialProviderResponseHandler;
import com.google.android.material.snackbar.Snackbar;
import com.google.firebase.auth.EmailAuthProvider;
import com.google.firebase.auth.FacebookAuthProvider;
import com.google.firebase.auth.GoogleAuthProvider;
import com.google.firebase.auth.PhoneAuthProvider;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import androidx.annotation.IdRes;
import androidx.annotation.LayoutRes;
import androidx.annotation.NonNull;
import androidx.annotation.Nullable;
import androidx.annotation.RestrictTo;
import androidx.constraintlayout.widget.ConstraintLayout;
import androidx.constraintlayout.widget.ConstraintSet;
import androidx.lifecycle.ViewModelProvider;
import static com.firebase.ui.auth.util.ExtraConstants.GENERIC_OAUTH_BUTTON_ID;
import static com.firebase.ui.auth.util.ExtraConstants.GENERIC_OAUTH_PROVIDER_ID;
import static com.firebase.ui.auth.AuthUI.EMAIL_LINK_PROVIDER;
/**
* Presents the list of authentication options for this app to the user.
*/
@RestrictTo(RestrictTo.Scope.LIBRARY_GROUP)
public class AuthMethodPickerActivity extends AppCompatBase {
private SocialProviderResponseHandler mHandler;
private List<ProviderSignInBase<?>> mProviders;
private ProgressBar mProgressBar;
private ViewGroup mProviderHolder;
private AuthMethodPickerLayout customLayout;
public static Intent createIntent(Context context, FlowParameters flowParams) {
return createBaseIntent(context, AuthMethodPickerActivity.class, flowParams);
}
@Override
protected void onCreate(@Nullable Bundle savedInstanceState) {
super.onCreate(savedInstanceState);
FlowParameters params = getFlowParams();
customLayout = params.authMethodPickerLayout;
mHandler = new ViewModelProvider(this).get(SocialProviderResponseHandler.class);
mHandler.init(params);
mProviders = new ArrayList<>();
if (customLayout != null) {
setContentView(customLayout.getMainLayout());
//Setup using custom layout
populateIdpListCustomLayout(params.providers);
} else {
setContentView(R.layout.fui_auth_method_picker_layout);
//UI only with default layout
mProgressBar = findViewById(R.id.top_progress_bar);
mProviderHolder = findViewById(R.id.btn_holder);
populateIdpList(params.providers);
int logoId = params.logoId;
if (logoId == AuthUI.NO_LOGO) {
findViewById(R.id.logo).setVisibility(View.GONE);
ConstraintLayout layout = findViewById(R.id.root);
ConstraintSet constraints = new ConstraintSet();
constraints.clone(layout);
constraints.setHorizontalBias(R.id.container, 0.5f);
constraints.setVerticalBias(R.id.container, 0.5f);
constraints.applyTo(layout);
} else {
ImageView logo = findViewById(R.id.logo);
logo.setImageResource(logoId);
}
}
boolean tosAndPpConfigured = getFlowParams().isPrivacyPolicyUrlProvided()
&& getFlowParams().isTermsOfServiceUrlProvided();
int termsTextId = customLayout == null
? R.id.main_tos_and_pp
: customLayout.getTosPpView();
if (termsTextId >= 0) {
TextView termsText = findViewById(termsTextId);
// No ToS or PP provided, so we should hide the view entirely
if (!tosAndPpConfigured) {
termsText.setVisibility(View.GONE);
} else {
PrivacyDisclosureUtils.setupTermsOfServiceAndPrivacyPolicyText(this,
getFlowParams(),
termsText);
}
}
//Handler for both
mHandler.getOperation().observe(this, new ResourceObserver<IdpResponse>(
this, R.string.fui_progress_dialog_signing_in) {
@Override
protected void onSuccess(@NonNull IdpResponse response) {
startSaveCredentials(mHandler.getCurrentUser(), response, null);
}
@Override
protected void onFailure(@NonNull Exception e) {
if (e instanceof UserCancellationException) {
// User pressed back, there is no error.
return;
}
if (e instanceof FirebaseAuthAnonymousUpgradeException) {
finish(ErrorCodes.ANONYMOUS_UPGRADE_MERGE_CONFLICT,
((FirebaseAuthAnonymousUpgradeException) e).getResponse().toIntent());
} else if (e instanceof FirebaseUiException) {
FirebaseUiException fue = (FirebaseUiException) e;
finish(RESULT_CANCELED, IdpResponse.from(fue).toIntent());
} else {
String text = getString(R.string.fui_error_unknown);
Toast.makeText(AuthMethodPickerActivity.this,
text,
Toast.LENGTH_SHORT).show();
}
}
});
}
private void populateIdpList(List<IdpConfig> providerConfigs) {
ViewModelProvider supplier = new ViewModelProvider(this);
mProviders = new ArrayList<>();
for (IdpConfig idpConfig : providerConfigs) {
@LayoutRes int buttonLayout;
final String providerId = idpConfig.getProviderId();
switch (providerId) {
case GoogleAuthProvider.PROVIDER_ID:
buttonLayout = R.layout.fui_idp_button_google;
break;
case FacebookAuthProvider.PROVIDER_ID:
buttonLayout = R.layout.fui_idp_button_facebook;
break;
case EMAIL_LINK_PROVIDER:
case EmailAuthProvider.PROVIDER_ID:
buttonLayout = R.layout.fui_provider_button_email;
break;
case PhoneAuthProvider.PROVIDER_ID:
buttonLayout = R.layout.fui_provider_button_phone;
break;
case AuthUI.ANONYMOUS_PROVIDER:
buttonLayout = R.layout.fui_provider_button_anonymous;
break;
default:
if (!TextUtils.isEmpty(
idpConfig.getParams().getString(GENERIC_OAUTH_PROVIDER_ID))) {
buttonLayout = idpConfig.getParams().getInt(GENERIC_OAUTH_BUTTON_ID);
break;
}
throw new IllegalStateException("Unknown provider: " + providerId);
}
View loginButton = getLayoutInflater().inflate(buttonLayout, mProviderHolder, false);
handleSignInOperation(idpConfig, loginButton);
mProviderHolder.addView(loginButton);
}
}
private void populateIdpListCustomLayout(List<IdpConfig> providerConfigs) {
Map<String, Integer> providerButtonIds = customLayout.getProvidersButton();
for (IdpConfig idpConfig : providerConfigs) {
final String providerId = providerOrEmailLinkProvider(idpConfig.getProviderId());
Integer buttonResId = providerButtonIds.get(providerId);
if (buttonResId == null) {
throw new IllegalStateException("No button found for auth provider: " + idpConfig.getProviderId());
}
@IdRes int buttonId = buttonResId;
View loginButton = findViewById(buttonId);
handleSignInOperation(idpConfig, loginButton);
}
//hide custom layout buttons that don't have their identity provider set
for (String providerBtnId : providerButtonIds.keySet()) {
if (providerBtnId == null) {
continue;
}
boolean hasProvider = false;
for (IdpConfig idpConfig : providerConfigs) {
if (providerBtnId.equals(idpConfig.getProviderId())) {
hasProvider = true;
break;
}
}
if (!hasProvider) {
Integer resId = providerButtonIds.get(providerBtnId);
if (resId == null) {
continue;
}
@IdRes int buttonId = resId;
findViewById(buttonId).setVisibility(View.GONE);
}
}
}
@NonNull
private String providerOrEmailLinkProvider(@NonNull String providerId) {
if (providerId.equals(EmailAuthProvider.EMAIL_LINK_SIGN_IN_METHOD)) {
return EmailAuthProvider.PROVIDER_ID;
}
return providerId;
}
private void handleSignInOperation(final IdpConfig idpConfig, View view) {
ViewModelProvider supplier = new ViewModelProvider(this);
final String providerId = idpConfig.getProviderId();
final ProviderSignInBase<?> provider;
AuthUI authUI = getAuthUI();
switch (providerId) {
case EMAIL_LINK_PROVIDER:
case EmailAuthProvider.PROVIDER_ID:
provider = supplier.get(EmailSignInHandler.class).initWith(null);
break;
case PhoneAuthProvider.PROVIDER_ID:
provider = supplier.get(PhoneSignInHandler.class).initWith(idpConfig);
break;
case AuthUI.ANONYMOUS_PROVIDER:
provider = supplier.get(AnonymousSignInHandler.class).initWith(getFlowParams());
break;
case GoogleAuthProvider.PROVIDER_ID:
if (authUI.isUseEmulator()) {
provider = supplier.get(GenericIdpSignInHandler.class)
.initWith(GenericIdpSignInHandler.getGenericGoogleConfig());
} else {
provider = supplier.get(GoogleSignInHandler.class).initWith(
new GoogleSignInHandler.Params(idpConfig));
}
break;
case FacebookAuthProvider.PROVIDER_ID:
if (authUI.isUseEmulator()) {
provider = supplier.get(GenericIdpSignInHandler.class)
.initWith(GenericIdpSignInHandler.getGenericFacebookConfig());
} else {
provider = supplier.get(FacebookSignInHandler.class).initWith(idpConfig);
}
break;
default:
if (!TextUtils.isEmpty(
idpConfig.getParams().getString(GENERIC_OAUTH_PROVIDER_ID))) {
provider = supplier.get(GenericIdpSignInHandler.class).initWith(idpConfig);
break;
}
throw new IllegalStateException("Unknown provider: " + providerId);
}
mProviders.add(provider);
provider.getOperation().observe(this, new ResourceObserver<IdpResponse>(this) {
@Override
protected void onSuccess(@NonNull IdpResponse response) {
handleResponse(response);
}
@Override
protected void onFailure(@NonNull Exception e) {
if (e instanceof FirebaseAuthAnonymousUpgradeException) {
finish(RESULT_CANCELED, new Intent().putExtra(ExtraConstants.IDP_RESPONSE,
IdpResponse.from(e)));
return;
}
handleResponse(IdpResponse.from(e));
}
private void handleResponse(@NonNull IdpResponse response) {
// If we're using the emulator then the social flows actually use Generic IDP
// instead which means we shouldn't use the social response handler.
boolean isSocialResponse = AuthUI.SOCIAL_PROVIDERS.contains(providerId)
&& !getAuthUI().isUseEmulator();
if (!response.isSuccessful()) {
// We have no idea what provider this error stemmed from so just forward
// this along to the handler.
mHandler.startSignIn(response);
} else if (isSocialResponse) {
// Don't use the response's provider since it can be different than the one
// that launched the sign-in attempt. Ex: the email flow is started, but
// ends up turning into a Google sign-in because that account already
// existed. In the previous example, an extra sign-in would incorrectly
// started.
mHandler.startSignIn(response);
} else {
// Email, phone, or generic: the credentials should have already been saved so
// simply move along.
// Anononymous sign in also does not require any other operations.
finish(response.isSuccessful() ? RESULT_OK : RESULT_CANCELED,
response.toIntent());
}
}
});
view.setOnClickListener(new View.OnClickListener() {
@Override
public void onClick(View view) {
if (isOffline()) {
Snackbar.make(findViewById(android.R.id.content), getString(R.string.fui_no_internet), Snackbar.LENGTH_SHORT).show();
return;
}
provider.startSignIn(getAuth(), AuthMethodPickerActivity.this,
idpConfig.getProviderId());
}
});
}
@Override
protected void onActivityResult(int requestCode, int resultCode, Intent data) {
super.onActivityResult(requestCode, resultCode, data);
mHandler.onActivityResult(requestCode, resultCode, data);
for (ProviderSignInBase<?> provider : mProviders) {
provider.onActivityResult(requestCode, resultCode, data);
}
}
@Override
public void showProgress(int message) {
//mProgressBar & mProviderHolder might be null if using custom AuthMethodPickerLayout
if (customLayout == null) {
mProgressBar.setVisibility(View.VISIBLE);
for (int i = 0; i < mProviderHolder.getChildCount(); i++) {
View child = mProviderHolder.getChildAt(i);
child.setEnabled(false);
child.setAlpha(0.75f);
}
}
}
@Override
public void hideProgress() {
//mProgressBar & mProviderHolder might be null if using custom AuthMethodPickerLayout
if (customLayout == null) {
mProgressBar.setVisibility(View.INVISIBLE);
for (int i = 0; i < mProviderHolder.getChildCount(); i++) {
View child = mProviderHolder.getChildAt(i);
child.setEnabled(true);
child.setAlpha(1.0f);
}
}
}
}
| Fix email-link with custom layouts (#1955)
| auth/src/main/java/com/firebase/ui/auth/ui/idp/AuthMethodPickerActivity.java | Fix email-link with custom layouts (#1955) | <ide><path>uth/src/main/java/com/firebase/ui/auth/ui/idp/AuthMethodPickerActivity.java
<ide> }
<ide> boolean hasProvider = false;
<ide> for (IdpConfig idpConfig : providerConfigs) {
<del> if (providerBtnId.equals(idpConfig.getProviderId())) {
<add> String providerId = providerOrEmailLinkProvider(idpConfig.getProviderId());
<add> if (providerBtnId.equals(providerId)) {
<ide> hasProvider = true;
<ide> break;
<ide> } |
|
Java | apache-2.0 | error: pathspec 'src/main/java/com/adms/batch/kpireport/app/TsrTrackingValidation.java' did not match any file(s) known to git
| 399416dd8289a0f53be034699e70c4c787c28fcd | 1 | AdamsTHDev/kpi-report-app | package com.adms.batch.kpireport.app;
import java.io.File;
import java.io.FileInputStream;
import java.io.FilenameFilter;
import java.io.InputStream;
import java.util.ArrayList;
import java.util.List;
import org.apache.commons.lang3.StringUtils;
import org.apache.poi.ss.usermodel.Row;
import org.apache.poi.ss.usermodel.Sheet;
import org.apache.poi.ss.usermodel.Workbook;
import org.apache.poi.ss.usermodel.WorkbookFactory;
import org.apache.poi.ss.util.CellReference;
import com.adms.support.FileWalker;
import com.adms.utils.FileUtil;
import com.adms.utils.Logger;
public class TsrTrackingValidation {
private static Logger logger = Logger.getLogger();
private static final String OK = "OK";
private static final String ERR = "ERR";
public static void main(String[] args) {
List<String[]> records = new ArrayList<>();
String rootPath = args[0];
String outDir = args[1];
String outName = args[2];
InputStream wbIS = null;
try {
logger.setLogFileName(args[3]);
logger.info("Start");
List<String> dirs = getFilePaths(rootPath);
for(String dir : dirs) {
String listLotCode = null;
String valid = null;
wbIS = new FileInputStream(dir);
Workbook wb = WorkbookFactory.create(wbIS);
boolean isSingleSheet = wb.getNumberOfSheets() == 1;
for(int sheetIdx = 0; sheetIdx < wb.getNumberOfSheets(); sheetIdx++) {
Sheet sheet = wb.getSheetAt(sheetIdx);
String listLotName = sheet.getRow(1).getCell(CellReference.convertColStringToIndex("C"), Row.CREATE_NULL_AS_BLANK).getStringCellValue();
if(listLotName.contains(",")) {
valid = isSingleSheet ? ERR : sheetIdx == 0 ? OK : ERR;
} else {
listLotCode = getListLotCode(listLotName);
// <!-- check -->
valid = StringUtils.isBlank(listLotCode) ? ERR : OK;
}
if(valid.equals(ERR))
records.add(new String[]{valid, dir, wb.getSheetName(sheetIdx), listLotName});
}
}
writeDataOut(outDir, outName, records);
} catch(Exception e) {
e.printStackTrace();
} finally {
try { wbIS.close(); } catch(Exception e) {}
}
logger.info("Finish");
}
private static void writeDataOut(String outDir, String outName, List<String[]> records) throws Exception {
FileUtil.getInstance().createDirectory(outDir);
StringBuffer buffer = new StringBuffer();
int row = 0;
int idx = 0;
for(String[] strs : records) {
idx = 0;
for(String str : strs) {
buffer.append(toCoveredTxt(str));
if(idx < strs.length) {
buffer.append(",");
}
}
if(row < records.size()) {
buffer.append("\n");
}
row++;
}
String msg = FileUtil.getInstance().writeout(new File(outDir + "/" + outName), buffer);
logger.info("write to: " + msg);
}
private static String toCoveredTxt(String arg) {
return new String("\"" + arg + "\"");
}
private static String getListLotCode(String val) {
String result = "";
if(!StringUtils.isEmpty(val)) {
int count = 0;
// <!-- Check -->
for(int i = 0; i < val.length(); i++) {
if(val.charAt(i) == '(') {
count++;
}
}
// <!-- process -->
if(count == 1) {
return val.substring(val.indexOf("(") + 1, val.indexOf(")")).trim();
} else if(count == 2) {
return val.substring(val.indexOf("(", val.indexOf("(") + 1) + 1, val.length() - 1).trim();
} else {
logger.error("Cannot Retrieve: " + val);
}
}
return result;
}
private static List<String> getFilePaths(String root) {
FileWalker fw = new FileWalker();
fw.walk(root, new FilenameFilter() {
@Override
public boolean accept(File file, String name) {
if(!name.contains("~$")
&& (name.contains("TsrTracking")
|| name.contains("TSRTracking")
|| name.contains("TSRTRA"))
&& (!name.contains("CTD")
&& !name.contains("MTD")
&& !name.contains("_ALL")
&& !name.contains("QA_Report")
&& !name.contains("QC_Reconfirm")
&& !name.contains("SalesReportByRecords")))
return true;
return false;
}
});
return fw.getFileList();
}
}
| src/main/java/com/adms/batch/kpireport/app/TsrTrackingValidation.java | New Class: For Tsr Tracking Validation | src/main/java/com/adms/batch/kpireport/app/TsrTrackingValidation.java | New Class: For Tsr Tracking Validation | <ide><path>rc/main/java/com/adms/batch/kpireport/app/TsrTrackingValidation.java
<add>package com.adms.batch.kpireport.app;
<add>
<add>import java.io.File;
<add>import java.io.FileInputStream;
<add>import java.io.FilenameFilter;
<add>import java.io.InputStream;
<add>import java.util.ArrayList;
<add>import java.util.List;
<add>
<add>import org.apache.commons.lang3.StringUtils;
<add>import org.apache.poi.ss.usermodel.Row;
<add>import org.apache.poi.ss.usermodel.Sheet;
<add>import org.apache.poi.ss.usermodel.Workbook;
<add>import org.apache.poi.ss.usermodel.WorkbookFactory;
<add>import org.apache.poi.ss.util.CellReference;
<add>
<add>import com.adms.support.FileWalker;
<add>import com.adms.utils.FileUtil;
<add>import com.adms.utils.Logger;
<add>
<add>
<add>public class TsrTrackingValidation {
<add>
<add> private static Logger logger = Logger.getLogger();
<add>
<add> private static final String OK = "OK";
<add> private static final String ERR = "ERR";
<add>
<add> public static void main(String[] args) {
<add> List<String[]> records = new ArrayList<>();
<add> String rootPath = args[0];
<add>
<add> String outDir = args[1];
<add> String outName = args[2];
<add>
<add> InputStream wbIS = null;
<add> try {
<add> logger.setLogFileName(args[3]);
<add>
<add> logger.info("Start");
<add> List<String> dirs = getFilePaths(rootPath);
<add>
<add> for(String dir : dirs) {
<add> String listLotCode = null;
<add> String valid = null;
<add>
<add> wbIS = new FileInputStream(dir);
<add>
<add> Workbook wb = WorkbookFactory.create(wbIS);
<add> boolean isSingleSheet = wb.getNumberOfSheets() == 1;
<add>
<add> for(int sheetIdx = 0; sheetIdx < wb.getNumberOfSheets(); sheetIdx++) {
<add>
<add> Sheet sheet = wb.getSheetAt(sheetIdx);
<add> String listLotName = sheet.getRow(1).getCell(CellReference.convertColStringToIndex("C"), Row.CREATE_NULL_AS_BLANK).getStringCellValue();
<add>
<add> if(listLotName.contains(",")) {
<add> valid = isSingleSheet ? ERR : sheetIdx == 0 ? OK : ERR;
<add> } else {
<add> listLotCode = getListLotCode(listLotName);
<add>// <!-- check -->
<add> valid = StringUtils.isBlank(listLotCode) ? ERR : OK;
<add> }
<add>
<add> if(valid.equals(ERR))
<add> records.add(new String[]{valid, dir, wb.getSheetName(sheetIdx), listLotName});
<add> }
<add> }
<add>
<add> writeDataOut(outDir, outName, records);
<add>
<add> } catch(Exception e) {
<add> e.printStackTrace();
<add> } finally {
<add> try { wbIS.close(); } catch(Exception e) {}
<add> }
<add> logger.info("Finish");
<add> }
<add>
<add> private static void writeDataOut(String outDir, String outName, List<String[]> records) throws Exception {
<add> FileUtil.getInstance().createDirectory(outDir);
<add>
<add> StringBuffer buffer = new StringBuffer();
<add>
<add> int row = 0;
<add> int idx = 0;
<add>
<add> for(String[] strs : records) {
<add> idx = 0;
<add>
<add> for(String str : strs) {
<add> buffer.append(toCoveredTxt(str));
<add> if(idx < strs.length) {
<add> buffer.append(",");
<add> }
<add> }
<add>
<add> if(row < records.size()) {
<add> buffer.append("\n");
<add> }
<add>
<add> row++;
<add> }
<add>
<add> String msg = FileUtil.getInstance().writeout(new File(outDir + "/" + outName), buffer);
<add> logger.info("write to: " + msg);
<add> }
<add>
<add> private static String toCoveredTxt(String arg) {
<add> return new String("\"" + arg + "\"");
<add> }
<add>
<add> private static String getListLotCode(String val) {
<add> String result = "";
<add> if(!StringUtils.isEmpty(val)) {
<add> int count = 0;
<add>
<add>// <!-- Check -->
<add> for(int i = 0; i < val.length(); i++) {
<add> if(val.charAt(i) == '(') {
<add> count++;
<add> }
<add> }
<add>
<add>// <!-- process -->
<add> if(count == 1) {
<add> return val.substring(val.indexOf("(") + 1, val.indexOf(")")).trim();
<add> } else if(count == 2) {
<add> return val.substring(val.indexOf("(", val.indexOf("(") + 1) + 1, val.length() - 1).trim();
<add> } else {
<add> logger.error("Cannot Retrieve: " + val);
<add> }
<add> }
<add> return result;
<add> }
<add>
<add> private static List<String> getFilePaths(String root) {
<add> FileWalker fw = new FileWalker();
<add>
<add> fw.walk(root, new FilenameFilter() {
<add>
<add> @Override
<add> public boolean accept(File file, String name) {
<add> if(!name.contains("~$")
<add> && (name.contains("TsrTracking")
<add> || name.contains("TSRTracking")
<add> || name.contains("TSRTRA"))
<add> && (!name.contains("CTD")
<add> && !name.contains("MTD")
<add> && !name.contains("_ALL")
<add> && !name.contains("QA_Report")
<add> && !name.contains("QC_Reconfirm")
<add> && !name.contains("SalesReportByRecords")))
<add> return true;
<add> return false;
<add> }
<add> });
<add>
<add> return fw.getFileList();
<add> }
<add>}
<add> |
|
JavaScript | bsd-3-clause | 1bea90c9c362c9006065652a721d6eee778f1b98 | 0 | jeena/Bungloo,jeena/Bungloo,jeena/Bungloo | //
// Core.js
// Tentia
//
// Created by Jeena on 15.04.10.
// Licence: BSD (see attached LICENCE.txt file).
//
function Core(action) {
this.max_length = 200;
// this.timeout = 2 * 60 * 1000;
this.timeout = 10 * 1000; // every 10 seconds
this.action = action;
this.getNewData();
this.unread_mentions = 0;
this.since_id = null;
this.since_id_entity = null;
this.since_time = 0;
this.body = document.createElement("ol");
this.body.className = this.action;
this.cache = {};
this.is_not_init = false;
/*
if (action == "home_timeline") {
this.usernames = [];
this.getUsernames("friends");
this.getUsernames("followers");
}
*/
}
Core.prototype.newStatus = function(status, supress_new_with_timeout) {
if(status != null && status.length > 0) {
this.since_id = status[0]["id"];
for(var i = status.length-1, c=0; i>=c; --i) {
if(this.body.childNodes.length > 0) {
if(this.body.childNodes.length > this.max_length) {
this.body.removeChild(this.body.lastChild);
}
this.body.insertBefore(this.getItem(status[i]), this.body.firstChild);
} else {
this.body.appendChild(this.getItem(status[i]));
}
}
}
if(!supress_new_with_timeout) {
var _this = this;
setTimeout(function() { _this.getNewData() }, this.timeout);
}
if(this.action == "mentions" && this.is_not_init) {
this.unread_mentions += status.length;
controller.unreadMentions_(this.unread_mentions);
}
this.is_not_init = true;
}
Core.prototype.getItem = function(status) {
var _this = this;
this.since_id = status.id;
this.since_id_entity = status.entity;
if (this.since_time < status.published_at) this.since_time = status.published_at;
var original_status = null;
/*
if(status.retweeted_status != null) {
var original_status = status;
var status = status.retweeted_status;
}*/
var template = this.getTemplate();
template.reply_to.onclick = function() {
var mentions = [];
for (var i = 0; i < status.mentions.length; i++) {
var mention = status.mentions[i];
if(mention.entity != controller.stringForKey_("entity"))
mentions.push(mention);
};
replyTo(status.entity, status.id, mentions);
return false;
}
//template.retweet.onclick = function() { template.retweet.className = "hidden"; _this.retweet(status.id_str, template.item); return false; }
//template.image.src = status.user.profile_image_url;
template.username.innerText = status.entity;
template.username.href = status.entity; // FIXME open profile
findProfileURL(status.entity, function(profile_url) {
if (profile_url) {
getURL(profile_url, "GET", function(resp) {
var profile = JSON.parse(resp.responseText);
var basic = profile["https://tent.io/types/info/basic/v0.1.0"];
if (profile && basic) {
if(basic.name) {
template.username.title = template.username.innerText;
template.username.innerText = basic.name;
}
if(basic.avatar_url) template.image.src = basic.avatar_url;
}
});
}
});
/*
if(original_status != null) {
var retweeted = document.createElement("span")
retweeted.className = "retweeted";
var retweeted_icon = document.createElement("span");
retweeted_icon.innerText = " ";
retweeted.appendChild(retweeted_icon);
var retweeted_by = document.createElement("a");
retweeted_by.innerText = original_status.user.screen_name + " ";
retweeted_by.href = WEBSITE_PATH + original_status.user.screen_name;
retweeted.appendChild(document.createTextNode("@"));
retweeted.appendChild(retweeted_by);
template.in_reply.parentNode.parentNode.insertBefore(retweeted, template.in_reply.parent);
}*/
/*if(status.in_reply_to_status_id_str != null) template.in_reply.innerText = status.in_reply_to_screen_name;
else */template.in_reply.parentNode.className = "hidden";
//template.in_reply.href = WEBSITE_PATH + status.in_reply_to_screen_name + "/status/" + status.in_reply_to_status_id_str;
template.message.innerHTML = replaceUsernamesWithLinks(replaceURLWithHTMLLinks(status.content.text, status.entities, template.message));
var time = document.createElement("abbr");
time.innerText = ISODateString(new Date(status.published_at * 1000));
time.title = time.innerText;
time.className = "timeago";
$(time).timeago();
template.ago.appendChild(time);
//template.ago.href = WEBSITE_PATH + status.user.screen_name + "/status/" + status.id_str;
// {"type":"Point","coordinates":[57.10803113,12.25854746]}
if (status.content && status.content.location && status.content.location.type == "Point") {
template.geo.href = "http://maps.google.com/maps?q=" + status.content.location.coordinates[0] + "," + status.content.location.coordinates[1];
template.geo.style.display = "";
}
template.source.href = status.app.url;
template.source.innerHTML = status.app.name;
template.source.title = status.app.url;
/*
if(status.entities.media) {
for(var i=0; i<status.entities.media.length; i++) {
var media = status.entities.media[i];
if(media.type == "photo") {
var a = document.createElement("a");
a.href = media.media_url;
template.message.innerHTML = template.message.innerHTML.replace(media.url, "");
alert(media.url)
var img = document.createElement("img");
img.className = "photo";
img.src = media.media_url + ":small";
a.appendChild(img);
template.images.appendChild(a);
} else if(media.type == "tentia_youtube") {
var a = document.createElement("a");
a.href = media.url;
var img = document.createElement("img");
img.className = "video";
img.src = media.media_url;
a.appendChild(img);
template.images.appendChild(a);
} else if(media.type == "tentia_photo") {
var a = document.createElement("a");
a.href = media.url;
var img = document.createElement("img");
img.className = "photo";
img.src = media.media_url;
a.appendChild(img);
template.images.appendChild(a);
}
}
}
*/
return template.item;
}
Core.prototype.getTemplate = function() {
if(this.template == "undefined") {
return jQuery.extend(true, {}, this.template);
}
var a = document.createElement("a");
var item = document.createElement("li");
var reply_to = a.cloneNode();
reply_to.className = "reply_to"
reply_to.innerText = " ";
reply_to.href = "#";
item.appendChild(reply_to);
var retweet = a.cloneNode();
retweet.className = "retweet";
retweet.innerText = " ";
retweet.href = "#";
// item.appendChild(retweet); // FIXME
var image = document.createElement("img");
image.className = "image";
image.src = "default-avatar.png";
image.onmousedown = function(e) { e.preventDefault(); };
item.appendChild(image);
var image_username = a.cloneNode();
image.appendChild(image_username);
var data = document.createElement("div");
data.className = "data";
item.appendChild(data);
var head = document.createElement("h1");
data.appendChild(head);
var username = a.cloneNode();
head.appendChild(username);
var in_reply = document.createElement("span");
in_reply.className = "reply";
head.appendChild(in_reply);
var space = document.createTextNode(" ");
head.appendChild(space);
var geo = document.createElement("a");
geo.style.display = "none";
head.appendChild(geo);
var pin = document.createElement("img");
pin.src = "pin.png";
pin.alt = "Map link";
geo.appendChild(pin);
var in_reply_text = document.createTextNode(" in reply to ");
in_reply.appendChild(in_reply_text)
var in_reply_a = a.cloneNode();
in_reply.appendChild(in_reply_a);
var message = document.createElement("p");
message.className = "message";
data.appendChild(message);
var images = document.createElement("p")
images.className = "images";
data.appendChild(images);
var date = message.cloneNode();
date.className = "date";
data.appendChild(date);
var ago = a.cloneNode();
date.appendChild(ago);
var from = document.createTextNode(" from ");
date.appendChild(from)
var source = document.createElement("a");
source.className = "source";
date.appendChild(source)
this.template = {
item: item,
reply_to: reply_to,
retweet: retweet,
image: image,
username: username,
in_reply: in_reply_a,
message: message,
ago: ago,
source: source,
geo: geo,
images: images
}
return jQuery.extend(true, {}, this.template);
}
Core.prototype.getNewData = function(supress_new_with_timeout) {
var those = this;
var url = URI(controller.stringForKey_("api_root"));
url.path("posts");
url.addSearch("post_types", "https://tent.io/types/post/status/v0.1.0");
url.addSearch("limit", this.max_length);
if(this.since_id) {
url.addSearch("since_id", this.since_id);
url.addSearch("since_id_entity", this.since_id_entity);
}
if (this.action == "mentions") {
url.addSearch("mentioned_entity", controller.stringForKey_("entity"));
}
var http_method = "GET";
var callback = function(resp) {
try {
var json = JSON.parse(resp.responseText)
} catch (e) {
//alert(resp.responseText);
alert(url + " JSON parse error");
throw e;
}
those.newStatus(json, supress_new_with_timeout);
}
var data = null;
getURL(
url.toString(),
http_method,
callback,
data,
makeAuthHeader(
url.toString(),
http_method,
controller.stringForKey_("user_mac_key"),
controller.stringForKey_("user_access_token")
)
); // FIXME: error callback
/*
$.ajax(
{ beforeSend: function(xhr) {
xhr.setRequestHeader("Authorization", );
},
url: url + url2,
dataType: 'json',
success: function(data) {
_this.newStatus(data, supress_new_with_timeout);
},
error:function (xhr, ajaxOptions, thrownError){
alert(xhr.status);
alert(thrownError);
setTimeout(function() { _this.getNewData(supress_new_with_timeout) }, this.timeout);
}
}
);*/
}
Core.prototype.sendNewMessage = function(content, in_reply_to_status_id, in_reply_to_entity) {
var _this = this;
var url = URI(controller.stringForKey_("api_root") + "/posts");
var http_method = "POST";
var callback = function(data) { _this.getNewData(true); }
var data = {
"type": "https://tent.io/types/post/status/v0.1.0",
"published_at": (new Date().getTime() / 1000),
"permissions": {
"public": true
},
"content": {
"text": content,
},
};
var mentions = parseMentions(content, in_reply_to_status_id, in_reply_to_entity);
if (mentions.length > 0) {
data["mentions"] = mentions;
}
getURL(
url.toString(),
http_method,
callback,
JSON.stringify(data),
makeAuthHeader(
url.toString(),
http_method,
controller.stringForKey_("user_mac_key"),
controller.stringForKey_("user_access_token")
)
); // FIXME: error callback
/*
var url = API_PATH + "statuses/update.json";
var data = "source=tentia&status=" + OAuth.percentEncode(tweet);
if(in_reply_to_status_id != '') data += "&in_reply_to_status_id=" + in_reply_to_status_id
var parameters = { source: "tentia", status: tweet };
if(in_reply_to_status_id != '') parameters.in_reply_to_status_id = in_reply_to_status_id;
var _this = this;
var message = { method:"POST" , action:url, parameters:parameters };
OAuth.completeRequest(message,
{ consumerKey : OAUTH_CONSUMER_KEY
, consumerSecret: OAUTH_CONSUMER_SECRET
, token : controller.accessToken.accessToken()
, tokenSecret : controller.accessToken.secret()
});
$.ajax({
beforeSend: function(xhr) {
xhr.setRequestHeader("Authorization", OAuth.getAuthorizationHeader("", message.parameters));
},
url: url,
type: 'POST',
data: data,
dataType: 'json',
success: function(data) {
_this.getNewData(true);
},
error:function (xhr, ajaxOptions, thrownError) {
alert(xhr.status);
alert(thrownError);
}
});*/
}
/*
Core.prototype.retweet = function(status_id, item) {
var url = API_PATH + "statuses/retweet/" + status_id + ".json";
var _this = this;
var message = { method:"POST" , action:url };
OAuth.completeRequest(message,
{ consumerKey : OAUTH_CONSUMER_KEY
, consumerSecret: OAUTH_CONSUMER_SECRET
, token : controller.accessToken.accessToken()
, tokenSecret : controller.accessToken.secret()
});
$.ajax({
beforeSend: function(xhr) {
xhr.setRequestHeader("Authorization", OAuth.getAuthorizationHeader("", message.parameters));
},
url: url,
type: 'POST',
dataType: 'json',
success: function(data) {
item.parentNode.replaceChild(_this.getItem(data), item);
},
error:function (xhr, ajaxOptions, thrownError) {
alert(xhr.status);
alert(thrownError);
}
});
}
Core.prototype.getUsernames = function(type, cursor) {
cursor = typeof cursor == "undefined" ? -1 : cursor;
var url = API_PATH + type + "/ids.json";
var _this = this;
var parameters = { stringify_ids: "true", cursor:cursor };
var message = { method:"GET" , action:url, parameters:parameters };
OAuth.completeRequest(message,
{ consumerKey : OAUTH_CONSUMER_KEY
, consumerSecret: OAUTH_CONSUMER_SECRET
, token : controller.accessToken.accessToken()
, tokenSecret : controller.accessToken.secret()
});
$.ajax({
beforeSend: function(xhr) {
xhr.setRequestHeader("Authorization", OAuth.getAuthorizationHeader("", message.parameters));
},
url: url + "?stringify_ids=true&cursor=" + cursor ,
type: 'GET',
dataType: 'json',
success: function(data) {
for (var i=0; i < data.ids.length; i = i + 100) {
_this.getUsernamesFromIds(data.ids.slice(i, i + 100));
}
if (data.next_cursor > 0) {
_this.getUsernames(type, data.next_cursor);
}
},
error:function (xhr, ajaxOptions, thrownError) {
alert(xhr.status);
alert(thrownError);
}
});
}
Core.prototype.getUsernamesFromIds = function(ids) {
var url = API_PATH + "users/lookup.json";
var _this = this;
var parameters = { user_id:ids.join(",") };
var message = { method:"GET" , action:url, parameters:parameters };
OAuth.completeRequest(message,
{ consumerKey : OAUTH_CONSUMER_KEY
, consumerSecret: OAUTH_CONSUMER_SECRET
, token : controller.accessToken.accessToken()
, tokenSecret : controller.accessToken.secret()
});
$.ajax({
beforeSend: function(xhr) {
xhr.setRequestHeader("Authorization", OAuth.getAuthorizationHeader("", message.parameters));
},
url: url + "?user_id=" + ids.join(","),
type: 'GET',
dataType: 'json',
success: function(data) {
for (var i=0; i < data.length; i++) {
_this.usernames.push(data[i].screen_name);
}
},
error:function (xhr, ajaxOptions, thrownError) {
alert(xhr.status);
alert(thrownError);
}
});
}
Core.prototype.findUsernamesFor = function(query) {
var ret = [];
for (var i=0; i < this.usernames.length; i++) {
if(this.usernames[i].startsWith(query)) {
ret.push(this.usernames[i]);
}
}
return ret;
}
*/
/* Helper functions */
function replaceURLWithHTMLLinks(text, entities, message_node) {
var exp = /(([^\^]https?|ftp|file):\/\/[-A-Z0-9+&@#\/%?=~_()|!:,.;]*[-A-Z0-9+&@#\/%=~_|])/ig;
return text.replace(exp, "<a href='$1'>$1</a>");
/*
var urls = entities.urls;
for(var i = 0; i<urls.length; i++) {
var original = urls[i].url;
var replace = urls[i].expanded_url == null ? original : urls[i].expanded_url;
if(replace.startsWith("http://bit.ly/") || replace.startsWith("http://j.mp/")) {
replaceShortened(replace, message_node);
}
text = text.replace(original, "<a href='" + original + "'>" + replace + "</a>");
var media = null;
// add thumbnail
if(replace.startsWith("http://youtube.com/") || replace.startsWith("http://www.youtube.com/")) {
var v = getUrlVars(replace)["v"];
if (v) {
media = {
type: "tentia_youtube",
url: original,
media_url: "http://img.youtube.com/vi/" + v + "/1.jpg"
}
}
} else if (replace.startsWith("http://twitpic.com/")) {
media = {
type: "tentia_photo",
url: original,
media_url: "http://twitpic.com/show/mini/" + replace.substring("http://twitpic.com/".length)
}
} else if (replace.startsWith("http://yfrog")) {
media = {
type: "tentia_photo",
url: original,
media_url: replace + ":small"
}
} else if (replace.startsWith("http://instagr.am/p/") || replace.startsWith("http://instagram.com/p/")) {
media = {
type: "tentia_photo",
url: original,
media_url: replace + "media?size=t"
}
}
if(media) {
if(entities.media) {
entities.media.push(media);
} else {
entities.media = [media];
}
}
}
return text;*/
}
function replaceUsernamesWithLinks(text, mentions) {
return text; // FIXME!
var username = /(^|\s)(\^)(\w+)/ig;
var hash = /(^|\s)(#)(\w+)/ig;
text = text.replace(username, "$1$2<a href='tentia://profile/$3'>$3</a>");
return text.replace(hash, "$1$2<a href='http://search.twitter.com/search?q=%23$3'>$3</a>");
}
function replyTo(entity, status_id, mentions) {
var string = "^" + entity + " ";
for (var i = 0; i < mentions.length; i++) {
string += "^" + mentions[i].entity + " ";
}
controller.openNewMessageWindowInReplyTo_statusId_withString_(entity, status_id, string);
}
function loadPlugin(url) {
var plugin = document.createElement("script");
plugin.type = "text/javascript";
plugin.src = url;
document.getElementsByTagName("head")[0].appendChild(plugin);
}
String.prototype.startsWith = function(prefix) {
return this.indexOf(prefix) === 0;
}
String.prototype.endsWith = function(suffix) {
return this.match(suffix+"$") == suffix;
};
function getUrlVars(url)
{
var vars = [], hash;
if(url.indexOf("#") > -1) url = url.slice(0, url.indexOf("#"));
var hashes = url.slice(url.indexOf('?') + 1).split('&');
for(var i = 0; i < hashes.length; i++)
{
hash = hashes[i].split('=');
vars.push(hash[0]);
vars[hash[0]] = hash[1];
}
return vars;
}
function replaceShortened(url, message_node) {
var api = "http://api.bitly.com";
if(url.startsWith("http://j.mp/")) {
api = "http://api.j.mp";
}
var api_url = api + "/v3/expand?format=json&apiKey=R_4fc2a1aa461d076556016390fa6400f6&login=twittia&shortUrl=" + url; // FIXME: new api key
$.ajax({
url: api_url,
success: function(data) {
var new_url = data.data.expand[0].long_url;
if (new_url) {
var regex = new RegExp(url, "g");
message_node.innerHTML = message_node.innerHTML.replace(regex, new_url);
}
},
error:function (xhr, ajaxOptions, thrownError) {
alert(xhr.status);
alert(thrownError);
}
});
}
function parseMentions(text, post_id, entity) {
var mentions = [];
if (post_id && entity) {
mentions.push({
post: post_id,
entity: entity
})
}
var res = text.match(/((\^https?):\/\/\S+)/ig);
if (res) {
for (var i = 0; i < res.length; i++) {
var e = res[i].substring(1);
if (e != entity) {
mentions.push({entity:e});
}
}
}
return mentions;
}
function ISODateString(d){
function pad(n){return n<10 ? '0'+n : n}
return d.getUTCFullYear()+'-'
+ pad(d.getUTCMonth()+1)+'-'
+ pad(d.getUTCDate())+'T'
+ pad(d.getUTCHours())+':'
+ pad(d.getUTCMinutes())+':'
+ pad(d.getUTCSeconds())+'Z'
}
var tentia_instance;
| Core.js | //
// Core.js
// Tentia
//
// Created by Jeena on 15.04.10.
// Licence: BSD (see attached LICENCE.txt file).
//
function Core(action) {
this.max_length = 200;
// this.timeout = 2 * 60 * 1000;
this.timeout = 10 * 1000; // every 10 seconds
this.action = action;
this.getNewData();
this.unread_mentions = 0;
this.since_id = null;
this.since_id_entity = null;
this.since_time = 0;
this.body = document.createElement("ol");
this.body.className = this.action;
this.cache = {};
this.is_not_init = false;
/*
if (action == "home_timeline") {
this.usernames = [];
this.getUsernames("friends");
this.getUsernames("followers");
}
*/
}
Core.prototype.newStatus = function(status, supress_new_with_timeout) {
if(status != null && status.length > 0) {
this.since_id = status[0]["id"];
for(var i = status.length-1, c=0; i>=c; --i) {
if(this.body.childNodes.length > 0) {
if(this.body.childNodes.length > this.max_length) {
this.body.removeChild(this.body.lastChild);
}
this.body.insertBefore(this.getItem(status[i]), this.body.firstChild);
} else {
this.body.appendChild(this.getItem(status[i]));
}
}
}
if(!supress_new_with_timeout) {
var _this = this;
setTimeout(function() { _this.getNewData() }, this.timeout);
}
if(this.action == "mentions" && this.is_not_init) {
this.unread_mentions += status.length;
controller.unreadMentions_(this.unread_mentions);
}
this.is_not_init = true;
}
Core.prototype.getItem = function(status) {
var _this = this;
this.since_id = status.id;
this.since_id_entity = status.entity;
if (this.since_time < status.published_at) this.since_time = status.published_at;
var original_status = null;
/*
if(status.retweeted_status != null) {
var original_status = status;
var status = status.retweeted_status;
}*/
var template = this.getTemplate();
template.reply_to.onclick = function() {
var mentions = [];
for (var i = 0; i < status.mentions.length; i++) {
var mention = status.mentions[i];
if(mention.entity != controller.stringForKey_("entity"))
mentions.push(mention);
};
replyTo(status.entity, status.id, mentions);
return false;
}
//template.retweet.onclick = function() { template.retweet.className = "hidden"; _this.retweet(status.id_str, template.item); return false; }
//template.image.src = status.user.profile_image_url;
template.username.innerText = status.entity;
template.username.href = status.entity; // FIXME open profile
findProfileURL(status.entity, function(profile_url) {
if (profile_url) {
getURL(profile_url, "GET", function(resp) {
var profile = JSON.parse(resp.responseText);
var basic = profile["https://tent.io/types/info/basic/v0.1.0"];
if (profile && basic) {
if(basic.name) {
template.username.title = template.username.innerText;
template.username.innerText = basic.name;
}
if(basic.avatar_url) template.image.src = basic.avatar_url;
}
});
}
});
/*
if(original_status != null) {
var retweeted = document.createElement("span")
retweeted.className = "retweeted";
var retweeted_icon = document.createElement("span");
retweeted_icon.innerText = " ";
retweeted.appendChild(retweeted_icon);
var retweeted_by = document.createElement("a");
retweeted_by.innerText = original_status.user.screen_name + " ";
retweeted_by.href = WEBSITE_PATH + original_status.user.screen_name;
retweeted.appendChild(document.createTextNode("@"));
retweeted.appendChild(retweeted_by);
template.in_reply.parentNode.parentNode.insertBefore(retweeted, template.in_reply.parent);
}*/
/*if(status.in_reply_to_status_id_str != null) template.in_reply.innerText = status.in_reply_to_screen_name;
else */template.in_reply.parentNode.className = "hidden";
//template.in_reply.href = WEBSITE_PATH + status.in_reply_to_screen_name + "/status/" + status.in_reply_to_status_id_str;
template.message.innerHTML = replaceUsernamesWithLinks(replaceURLWithHTMLLinks(status.content.text, status.entities, template.message));
var time = document.createElement("abbr");
time.innerText = ISODateString(new Date(status.published_at * 1000));
time.title = time.innerText;
time.className = "timeago";
$(time).timeago();
template.ago.appendChild(time);
//template.ago.href = WEBSITE_PATH + status.user.screen_name + "/status/" + status.id_str;
// {"type":"Point","coordinates":[57.10803113,12.25854746]}
if (status.content && status.content.location && status.content.location.type == "Point") {
template.geo.href = "http://maps.google.com/maps?q=" + status.content.location.coordinates[0] + "," + status.content.location.coordinates[1];
template.geo.style.display = "";
}
template.source.href = status.app.url;
template.source.innerHTML = status.app.name;
template.source.title = status.app.url;
/*
if(status.entities.media) {
for(var i=0; i<status.entities.media.length; i++) {
var media = status.entities.media[i];
if(media.type == "photo") {
var a = document.createElement("a");
a.href = media.media_url;
template.message.innerHTML = template.message.innerHTML.replace(media.url, "");
alert(media.url)
var img = document.createElement("img");
img.className = "photo";
img.src = media.media_url + ":small";
a.appendChild(img);
template.images.appendChild(a);
} else if(media.type == "tentia_youtube") {
var a = document.createElement("a");
a.href = media.url;
var img = document.createElement("img");
img.className = "video";
img.src = media.media_url;
a.appendChild(img);
template.images.appendChild(a);
} else if(media.type == "tentia_photo") {
var a = document.createElement("a");
a.href = media.url;
var img = document.createElement("img");
img.className = "photo";
img.src = media.media_url;
a.appendChild(img);
template.images.appendChild(a);
}
}
}
*/
return template.item;
}
Core.prototype.getTemplate = function() {
if(this.template == "undefined") {
return jQuery.extend(true, {}, this.template);
}
var a = document.createElement("a");
var item = document.createElement("li");
var reply_to = a.cloneNode();
reply_to.className = "reply_to"
reply_to.innerText = " ";
reply_to.href = "#";
item.appendChild(reply_to);
var retweet = a.cloneNode();
retweet.className = "retweet";
retweet.innerText = " ";
retweet.href = "#";
// item.appendChild(retweet); // FIXME
var image = document.createElement("img");
image.className = "image";
image.src = "default-avatar.png";
image.onmousedown = function(e) { e.preventDefault(); };
item.appendChild(image);
var image_username = a.cloneNode();
image.appendChild(image_username);
var data = document.createElement("div");
data.className = "data";
item.appendChild(data);
var head = document.createElement("h1");
data.appendChild(head);
var username = a.cloneNode();
head.appendChild(username);
var in_reply = document.createElement("span");
in_reply.className = "reply";
head.appendChild(in_reply);
var space = document.createTextNode(" ");
head.appendChild(space);
var geo = document.createElement("a");
geo.style.display = "none";
head.appendChild(geo);
var pin = document.createElement("img");
pin.src = "pin.png";
pin.alt = "Map link";
geo.appendChild(pin);
var in_reply_text = document.createTextNode(" in reply to ");
in_reply.appendChild(in_reply_text)
var in_reply_a = a.cloneNode();
in_reply.appendChild(in_reply_a);
var message = document.createElement("p");
message.className = "message";
data.appendChild(message);
var images = document.createElement("p")
images.className = "images";
data.appendChild(images);
var date = message.cloneNode();
date.className = "date";
data.appendChild(date);
var ago = a.cloneNode();
date.appendChild(ago);
var from = document.createTextNode(" from ");
date.appendChild(from)
var source = document.createElement("a");
source.className = "source";
date.appendChild(source)
this.template = {
item: item,
reply_to: reply_to,
retweet: retweet,
image: image,
username: username,
in_reply: in_reply_a,
message: message,
ago: ago,
source: source,
geo: geo,
images: images
}
return jQuery.extend(true, {}, this.template);
}
Core.prototype.getNewData = function(supress_new_with_timeout) {
var those = this;
var url = URI(controller.stringForKey_("api_root"));
url.path("posts");
url.addSearch("post_types", "https://tent.io/types/post/status/v0.1.0");
url.addSearch("limit", this.max_length);
if(this.since_id) {
url.addSearch("since_id", this.since_id);
url.addSearch("since_id_entity", this.since_id_entity);
}
if (this.action == "mentions") {
url.addSearch("mentioned_entity", controller.stringForKey_("entity"));
}
var http_method = "GET";
var callback = function(resp) {
try {
var json = JSON.parse(resp.responseText)
} catch (e) {
//alert(resp.responseText);
alert(url + " JSON parse error");
throw e;
}
those.newStatus(json, supress_new_with_timeout);
}
var data = null;
getURL(
url.toString(),
http_method,
callback,
data,
makeAuthHeader(
url.toString(),
http_method,
controller.stringForKey_("user_mac_key"),
controller.stringForKey_("user_access_token")
)
); // FIXME: error callback
/*
$.ajax(
{ beforeSend: function(xhr) {
xhr.setRequestHeader("Authorization", );
},
url: url + url2,
dataType: 'json',
success: function(data) {
_this.newStatus(data, supress_new_with_timeout);
},
error:function (xhr, ajaxOptions, thrownError){
alert(xhr.status);
alert(thrownError);
setTimeout(function() { _this.getNewData(supress_new_with_timeout) }, this.timeout);
}
}
);*/
}
Core.prototype.sendNewMessage = function(content, in_reply_to_status_id, in_reply_to_entity) {
var _this = this;
var url = URI(controller.stringForKey_("api_root") + "/posts");
var http_method = "POST";
var callback = function(data) { _this.getNewData(true); }
var data = {
"type": "https://tent.io/types/post/status/v0.1.0",
"published_at": (new Date().getTime() / 1000),
"permissions": {
"public": true
},
"content": {
"text": content,
},
};
var mentions = parseMentions(content, in_reply_to_status_id, in_reply_to_entity);
if (mentions.length > 0) {
data["mentions"] = mentions;
}
getURL(
url.toString(),
http_method,
callback,
JSON.stringify(data),
makeAuthHeader(
url.toString(),
http_method,
controller.stringForKey_("user_mac_key"),
controller.stringForKey_("user_access_token")
)
); // FIXME: error callback
/*
var url = API_PATH + "statuses/update.json";
var data = "source=tentia&status=" + OAuth.percentEncode(tweet);
if(in_reply_to_status_id != '') data += "&in_reply_to_status_id=" + in_reply_to_status_id
var parameters = { source: "tentia", status: tweet };
if(in_reply_to_status_id != '') parameters.in_reply_to_status_id = in_reply_to_status_id;
var _this = this;
var message = { method:"POST" , action:url, parameters:parameters };
OAuth.completeRequest(message,
{ consumerKey : OAUTH_CONSUMER_KEY
, consumerSecret: OAUTH_CONSUMER_SECRET
, token : controller.accessToken.accessToken()
, tokenSecret : controller.accessToken.secret()
});
$.ajax({
beforeSend: function(xhr) {
xhr.setRequestHeader("Authorization", OAuth.getAuthorizationHeader("", message.parameters));
},
url: url,
type: 'POST',
data: data,
dataType: 'json',
success: function(data) {
_this.getNewData(true);
},
error:function (xhr, ajaxOptions, thrownError) {
alert(xhr.status);
alert(thrownError);
}
});*/
}
/*
Core.prototype.retweet = function(status_id, item) {
var url = API_PATH + "statuses/retweet/" + status_id + ".json";
var _this = this;
var message = { method:"POST" , action:url };
OAuth.completeRequest(message,
{ consumerKey : OAUTH_CONSUMER_KEY
, consumerSecret: OAUTH_CONSUMER_SECRET
, token : controller.accessToken.accessToken()
, tokenSecret : controller.accessToken.secret()
});
$.ajax({
beforeSend: function(xhr) {
xhr.setRequestHeader("Authorization", OAuth.getAuthorizationHeader("", message.parameters));
},
url: url,
type: 'POST',
dataType: 'json',
success: function(data) {
item.parentNode.replaceChild(_this.getItem(data), item);
},
error:function (xhr, ajaxOptions, thrownError) {
alert(xhr.status);
alert(thrownError);
}
});
}
Core.prototype.getUsernames = function(type, cursor) {
cursor = typeof cursor == "undefined" ? -1 : cursor;
var url = API_PATH + type + "/ids.json";
var _this = this;
var parameters = { stringify_ids: "true", cursor:cursor };
var message = { method:"GET" , action:url, parameters:parameters };
OAuth.completeRequest(message,
{ consumerKey : OAUTH_CONSUMER_KEY
, consumerSecret: OAUTH_CONSUMER_SECRET
, token : controller.accessToken.accessToken()
, tokenSecret : controller.accessToken.secret()
});
$.ajax({
beforeSend: function(xhr) {
xhr.setRequestHeader("Authorization", OAuth.getAuthorizationHeader("", message.parameters));
},
url: url + "?stringify_ids=true&cursor=" + cursor ,
type: 'GET',
dataType: 'json',
success: function(data) {
for (var i=0; i < data.ids.length; i = i + 100) {
_this.getUsernamesFromIds(data.ids.slice(i, i + 100));
}
if (data.next_cursor > 0) {
_this.getUsernames(type, data.next_cursor);
}
},
error:function (xhr, ajaxOptions, thrownError) {
alert(xhr.status);
alert(thrownError);
}
});
}
Core.prototype.getUsernamesFromIds = function(ids) {
var url = API_PATH + "users/lookup.json";
var _this = this;
var parameters = { user_id:ids.join(",") };
var message = { method:"GET" , action:url, parameters:parameters };
OAuth.completeRequest(message,
{ consumerKey : OAUTH_CONSUMER_KEY
, consumerSecret: OAUTH_CONSUMER_SECRET
, token : controller.accessToken.accessToken()
, tokenSecret : controller.accessToken.secret()
});
$.ajax({
beforeSend: function(xhr) {
xhr.setRequestHeader("Authorization", OAuth.getAuthorizationHeader("", message.parameters));
},
url: url + "?user_id=" + ids.join(","),
type: 'GET',
dataType: 'json',
success: function(data) {
for (var i=0; i < data.length; i++) {
_this.usernames.push(data[i].screen_name);
}
},
error:function (xhr, ajaxOptions, thrownError) {
alert(xhr.status);
alert(thrownError);
}
});
}
Core.prototype.findUsernamesFor = function(query) {
var ret = [];
for (var i=0; i < this.usernames.length; i++) {
if(this.usernames[i].startsWith(query)) {
ret.push(this.usernames[i]);
}
}
return ret;
}
*/
/* Helper functions */
function replaceURLWithHTMLLinks(text, entities, message_node) {
var exp = /(\b(https?|ftp|file):\/\/[-A-Z0-9+&@#\/%?=~_()|!:,.;]*[-A-Z0-9+&@#\/%=~_|])/ig;
return text.replace(exp, "<a href='$1'>$1</a>");
/*
var urls = entities.urls;
for(var i = 0; i<urls.length; i++) {
var original = urls[i].url;
var replace = urls[i].expanded_url == null ? original : urls[i].expanded_url;
if(replace.startsWith("http://bit.ly/") || replace.startsWith("http://j.mp/")) {
replaceShortened(replace, message_node);
}
text = text.replace(original, "<a href='" + original + "'>" + replace + "</a>");
var media = null;
// add thumbnail
if(replace.startsWith("http://youtube.com/") || replace.startsWith("http://www.youtube.com/")) {
var v = getUrlVars(replace)["v"];
if (v) {
media = {
type: "tentia_youtube",
url: original,
media_url: "http://img.youtube.com/vi/" + v + "/1.jpg"
}
}
} else if (replace.startsWith("http://twitpic.com/")) {
media = {
type: "tentia_photo",
url: original,
media_url: "http://twitpic.com/show/mini/" + replace.substring("http://twitpic.com/".length)
}
} else if (replace.startsWith("http://yfrog")) {
media = {
type: "tentia_photo",
url: original,
media_url: replace + ":small"
}
} else if (replace.startsWith("http://instagr.am/p/") || replace.startsWith("http://instagram.com/p/")) {
media = {
type: "tentia_photo",
url: original,
media_url: replace + "media?size=t"
}
}
if(media) {
if(entities.media) {
entities.media.push(media);
} else {
entities.media = [media];
}
}
}
return text;*/
}
function replaceUsernamesWithLinks(text, mentions) {
return text; // FIXME!
var username = /(^|\s)(\^)(\w+)/ig;
var hash = /(^|\s)(#)(\w+)/ig;
text = text.replace(username, "$1$2<a href='tentia://profile/$3'>$3</a>");
return text.replace(hash, "$1$2<a href='http://search.twitter.com/search?q=%23$3'>$3</a>");
}
function replyTo(entity, status_id, mentions) {
var string = "^" + entity + " ";
for (var i = 0; i < mentions.length; i++) {
string += "^" + mentions[i].entity + " ";
}
controller.openNewMessageWindowInReplyTo_statusId_withString_(entity, status_id, string);
}
function loadPlugin(url) {
var plugin = document.createElement("script");
plugin.type = "text/javascript";
plugin.src = url;
document.getElementsByTagName("head")[0].appendChild(plugin);
}
String.prototype.startsWith = function(prefix) {
return this.indexOf(prefix) === 0;
}
String.prototype.endsWith = function(suffix) {
return this.match(suffix+"$") == suffix;
};
function getUrlVars(url)
{
var vars = [], hash;
if(url.indexOf("#") > -1) url = url.slice(0, url.indexOf("#"));
var hashes = url.slice(url.indexOf('?') + 1).split('&');
for(var i = 0; i < hashes.length; i++)
{
hash = hashes[i].split('=');
vars.push(hash[0]);
vars[hash[0]] = hash[1];
}
return vars;
}
function replaceShortened(url, message_node) {
var api = "http://api.bitly.com";
if(url.startsWith("http://j.mp/")) {
api = "http://api.j.mp";
}
var api_url = api + "/v3/expand?format=json&apiKey=R_4fc2a1aa461d076556016390fa6400f6&login=twittia&shortUrl=" + url; // FIXME: new api key
$.ajax({
url: api_url,
success: function(data) {
var new_url = data.data.expand[0].long_url;
if (new_url) {
var regex = new RegExp(url, "g");
message_node.innerHTML = message_node.innerHTML.replace(regex, new_url);
}
},
error:function (xhr, ajaxOptions, thrownError) {
alert(xhr.status);
alert(thrownError);
}
});
}
function parseMentions(text, post_id, entity) {
var mentions = [];
if (post_id && entity) {
mentions.push({
post: post_id,
entity: entity
})
}
var res = text.match(/((\^https?):\/\/\S+)/ig);
if (res) {
for (var i = 0; i < res.length; i++) {
var e = res[i].substring(1);
if (e != entity) {
mentions.push({entity:e});
}
}
}
return mentions;
}
function ISODateString(d){
function pad(n){return n<10 ? '0'+n : n}
return d.getUTCFullYear()+'-'
+ pad(d.getUTCMonth()+1)+'-'
+ pad(d.getUTCDate())+'T'
+ pad(d.getUTCHours())+':'
+ pad(d.getUTCMinutes())+':'
+ pad(d.getUTCSeconds())+'Z'
}
var tentia_instance;
| fixed http links
| Core.js | fixed http links | <ide><path>ore.js
<ide> /* Helper functions */
<ide>
<ide> function replaceURLWithHTMLLinks(text, entities, message_node) {
<del> var exp = /(\b(https?|ftp|file):\/\/[-A-Z0-9+&@#\/%?=~_()|!:,.;]*[-A-Z0-9+&@#\/%=~_|])/ig;
<add> var exp = /(([^\^]https?|ftp|file):\/\/[-A-Z0-9+&@#\/%?=~_()|!:,.;]*[-A-Z0-9+&@#\/%=~_|])/ig;
<ide> return text.replace(exp, "<a href='$1'>$1</a>");
<ide>
<ide> /* |
|
Java | apache-2.0 | 7f17fd1055b520b8d60c9f7439660112e5c3ab9b | 0 | allotria/intellij-community,allotria/intellij-community,allotria/intellij-community,allotria/intellij-community,allotria/intellij-community,allotria/intellij-community,allotria/intellij-community,allotria/intellij-community,allotria/intellij-community,allotria/intellij-community,allotria/intellij-community,allotria/intellij-community,allotria/intellij-community | // Copyright 2000-2018 JetBrains s.r.o. Use of this source code is governed by the Apache 2.0 license that can be found in the LICENSE file.
package com.intellij.core;
import com.intellij.codeInsight.folding.CodeFoldingSettings;
import com.intellij.concurrency.Job;
import com.intellij.concurrency.JobLauncher;
import com.intellij.ide.plugins.PluginManagerCore;
import com.intellij.lang.*;
import com.intellij.lang.impl.PsiBuilderFactoryImpl;
import com.intellij.mock.MockApplication;
import com.intellij.mock.MockApplicationEx;
import com.intellij.mock.MockFileDocumentManagerImpl;
import com.intellij.openapi.Disposable;
import com.intellij.openapi.application.ApplicationInfo;
import com.intellij.openapi.application.ApplicationManager;
import com.intellij.openapi.application.impl.ApplicationInfoImpl;
import com.intellij.openapi.command.CommandProcessor;
import com.intellij.openapi.command.impl.CoreCommandProcessor;
import com.intellij.openapi.components.ExtensionAreas;
import com.intellij.openapi.editor.impl.DocumentImpl;
import com.intellij.openapi.extensions.ExtensionPoint;
import com.intellij.openapi.extensions.ExtensionPointName;
import com.intellij.openapi.extensions.Extensions;
import com.intellij.openapi.extensions.ExtensionsArea;
import com.intellij.openapi.fileEditor.FileDocumentManager;
import com.intellij.openapi.fileTypes.FileType;
import com.intellij.openapi.fileTypes.FileTypeExtension;
import com.intellij.openapi.progress.ProgressIndicator;
import com.intellij.openapi.progress.ProgressManager;
import com.intellij.openapi.progress.impl.CoreProgressManager;
import com.intellij.openapi.util.ClassExtension;
import com.intellij.openapi.util.Disposer;
import com.intellij.openapi.util.KeyedExtensionCollector;
import com.intellij.openapi.util.StaticGetter;
import com.intellij.openapi.vfs.VirtualFileManager;
import com.intellij.openapi.vfs.VirtualFileSystem;
import com.intellij.openapi.vfs.encoding.EncodingManager;
import com.intellij.openapi.vfs.impl.CoreVirtualFilePointerManager;
import com.intellij.openapi.vfs.impl.VirtualFileManagerImpl;
import com.intellij.openapi.vfs.impl.jar.CoreJarFileSystem;
import com.intellij.openapi.vfs.local.CoreLocalFileSystem;
import com.intellij.openapi.vfs.pointers.VirtualFilePointerManager;
import com.intellij.psi.PsiReferenceService;
import com.intellij.psi.PsiReferenceServiceImpl;
import com.intellij.psi.impl.meta.MetaRegistry;
import com.intellij.psi.impl.source.resolve.reference.ReferenceProvidersRegistry;
import com.intellij.psi.impl.source.resolve.reference.ReferenceProvidersRegistryImpl;
import com.intellij.psi.meta.MetaDataRegistrar;
import com.intellij.psi.stubs.CoreStubTreeLoader;
import com.intellij.psi.stubs.StubTreeLoader;
import com.intellij.util.Consumer;
import com.intellij.util.KeyedLazyInstanceEP;
import com.intellij.util.Processor;
import com.intellij.util.graph.GraphAlgorithms;
import com.intellij.util.graph.impl.GraphAlgorithmsImpl;
import org.jetbrains.annotations.NotNull;
import org.jetbrains.annotations.Nullable;
import org.picocontainer.MutablePicoContainer;
import java.io.File;
import java.lang.reflect.Modifier;
import java.util.List;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.Future;
/**
* @author yole
*/
public class CoreApplicationEnvironment {
private final CoreFileTypeRegistry myFileTypeRegistry;
protected final MockApplication myApplication;
private final CoreLocalFileSystem myLocalFileSystem;
protected final VirtualFileSystem myJarFileSystem;
private final VirtualFileSystem myJrtFileSystem;
@NotNull private final Disposable myParentDisposable;
private final boolean myUnitTestMode;
public CoreApplicationEnvironment(@NotNull Disposable parentDisposable) {
this(parentDisposable, true);
}
public CoreApplicationEnvironment(@NotNull Disposable parentDisposable, boolean unitTestMode) {
myParentDisposable = parentDisposable;
myUnitTestMode = unitTestMode;
myFileTypeRegistry = new CoreFileTypeRegistry();
myApplication = createApplication(myParentDisposable);
ApplicationManager.setApplication(myApplication,
new StaticGetter<>(myFileTypeRegistry),
myParentDisposable);
myLocalFileSystem = createLocalFileSystem();
myJarFileSystem = createJarFileSystem();
myJrtFileSystem = createJrtFileSystem();
Extensions.registerAreaClass(ExtensionAreas.IDEA_PROJECT, null);
final MutablePicoContainer appContainer = myApplication.getPicoContainer();
registerComponentInstance(appContainer, FileDocumentManager.class, new MockFileDocumentManagerImpl(
charSequence -> new DocumentImpl(charSequence), null));
VirtualFileSystem[] fs = myJrtFileSystem != null
? new VirtualFileSystem[]{myLocalFileSystem, myJarFileSystem, myJrtFileSystem}
: new VirtualFileSystem[]{myLocalFileSystem, myJarFileSystem};
VirtualFileManagerImpl virtualFileManager = new VirtualFileManagerImpl(fs, myApplication.getMessageBus());
registerApplicationComponent(VirtualFileManager.class, virtualFileManager);
//fake EP for cleaning resources after area disposing (otherwise KeyedExtensionCollector listener will be copied to the next area)
registerApplicationExtensionPoint(new ExtensionPointName<>("com.intellij.virtualFileSystem"), KeyedLazyInstanceEP.class);
registerApplicationService(EncodingManager.class, new CoreEncodingRegistry());
registerApplicationService(VirtualFilePointerManager.class, createVirtualFilePointerManager());
registerApplicationService(DefaultASTFactory.class, new CoreASTFactory());
registerApplicationService(PsiBuilderFactory.class, new PsiBuilderFactoryImpl());
registerApplicationService(ReferenceProvidersRegistry.class, new ReferenceProvidersRegistryImpl());
registerApplicationService(StubTreeLoader.class, new CoreStubTreeLoader());
registerApplicationService(PsiReferenceService.class, new PsiReferenceServiceImpl());
registerApplicationService(MetaDataRegistrar.class, new MetaRegistry());
registerApplicationService(ProgressManager.class, createProgressIndicatorProvider());
registerApplicationService(JobLauncher.class, createJobLauncher());
registerApplicationService(CodeFoldingSettings.class, new CodeFoldingSettings());
registerApplicationService(CommandProcessor.class, new CoreCommandProcessor());
registerApplicationService(GraphAlgorithms.class, new GraphAlgorithmsImpl());
myApplication.registerService(ApplicationInfo.class, ApplicationInfoImpl.class);
}
public <T> void registerApplicationService(@NotNull Class<T> serviceInterface, @NotNull T serviceImplementation) {
myApplication.registerService(serviceInterface, serviceImplementation);
}
@NotNull
protected VirtualFilePointerManager createVirtualFilePointerManager() {
return new CoreVirtualFilePointerManager();
}
@NotNull
protected MockApplication createApplication(@NotNull Disposable parentDisposable) {
return new MockApplicationEx(parentDisposable) {
@Override
public boolean isUnitTestMode() {
return myUnitTestMode;
}
};
}
@NotNull
protected JobLauncher createJobLauncher() {
return new JobLauncher() {
@Override
public <T> boolean invokeConcurrentlyUnderProgress(@NotNull List<T> things,
ProgressIndicator progress,
boolean runInReadAction,
boolean failFastOnAcquireReadAction,
@NotNull Processor<? super T> thingProcessor) {
for (T thing : things) {
if (!thingProcessor.process(thing))
return false;
}
return true;
}
@NotNull
@Override
public Job<Void> submitToJobThread(@NotNull Runnable action, Consumer<? super Future<?>> onDoneCallback) {
action.run();
if (onDoneCallback != null) {
onDoneCallback.consume(CompletableFuture.completedFuture(null));
}
return Job.NULL_JOB;
}
};
}
@NotNull
protected ProgressManager createProgressIndicatorProvider() {
return new CoreProgressManager();
}
@NotNull
protected VirtualFileSystem createJarFileSystem() {
return new CoreJarFileSystem();
}
@NotNull
protected CoreLocalFileSystem createLocalFileSystem() {
return new CoreLocalFileSystem();
}
@Nullable
protected VirtualFileSystem createJrtFileSystem() {
return null;
}
@NotNull
public MockApplication getApplication() {
return myApplication;
}
@NotNull
public Disposable getParentDisposable() {
return myParentDisposable;
}
public <T> void registerApplicationComponent(@NotNull Class<T> interfaceClass, @NotNull T implementation) {
registerComponentInstance(myApplication.getPicoContainer(), interfaceClass, implementation);
if (implementation instanceof Disposable) {
Disposer.register(myApplication, (Disposable)implementation);
}
}
public void registerFileType(@NotNull FileType fileType, @NotNull String extension) {
myFileTypeRegistry.registerFileType(fileType, extension);
}
public void registerParserDefinition(@NotNull ParserDefinition definition) {
addExplicitExtension(LanguageParserDefinitions.INSTANCE, definition.getFileNodeType().getLanguage(), definition);
}
public static <T> void registerComponentInstance(@NotNull MutablePicoContainer container, @NotNull Class<T> key, @NotNull T implementation) {
container.unregisterComponent(key);
container.registerComponentInstance(key, implementation);
}
public <T> void addExplicitExtension(@NotNull LanguageExtension<T> instance, @NotNull Language language, @NotNull T object) {
doAddExplicitExtension(instance, language, object);
}
public void registerParserDefinition(@NotNull Language language, @NotNull ParserDefinition parserDefinition) {
addExplicitExtension(LanguageParserDefinitions.INSTANCE, language, parserDefinition);
}
public <T> void addExplicitExtension(@NotNull final FileTypeExtension<T> instance, @NotNull final FileType fileType, @NotNull final T object) {
doAddExplicitExtension(instance, fileType, object);
}
private <T,U> void doAddExplicitExtension(@NotNull final KeyedExtensionCollector<T,U> instance, @NotNull final U key, @NotNull final T object) {
instance.addExplicitExtension(key, object);
Disposer.register(myParentDisposable, new Disposable() {
@Override
public void dispose() {
instance.removeExplicitExtension(key, object);
}
});
}
public <T> void addExplicitExtension(@NotNull final ClassExtension<T> instance, @NotNull final Class aClass, @NotNull final T object) {
doAddExplicitExtension(instance, aClass, object);
}
public <T> void addExtension(@NotNull ExtensionPointName<T> name, @NotNull final T extension) {
final ExtensionPoint<T> extensionPoint = Extensions.getRootArea().getExtensionPoint(name);
extensionPoint.registerExtension(extension);
Disposer.register(myParentDisposable, new Disposable() {
@Override
public void dispose() {
// There is a possible case that particular extension was replaced in particular environment, e.g. Upsource
// replaces some IntelliJ extensions.
if (extensionPoint.hasExtension(extension)) {
extensionPoint.unregisterExtension(extension);
}
}
});
}
public static <T> void registerExtensionPoint(@NotNull ExtensionsArea area,
@NotNull ExtensionPointName<T> extensionPointName,
@NotNull Class<? extends T> aClass) {
final String name = extensionPointName.getName();
registerExtensionPoint(area, name, aClass);
}
public static <T> void registerExtensionPoint(@NotNull ExtensionsArea area, @NotNull String name, @NotNull Class<? extends T> aClass) {
if (!area.hasExtensionPoint(name)) {
ExtensionPoint.Kind kind = aClass.isInterface() || Modifier.isAbstract(aClass.getModifiers()) ? ExtensionPoint.Kind.INTERFACE : ExtensionPoint.Kind.BEAN_CLASS;
area.registerExtensionPoint(name, aClass.getName(), kind);
}
}
public static <T> void registerApplicationExtensionPoint(@NotNull ExtensionPointName<T> extensionPointName, @NotNull Class<? extends T> aClass) {
registerExtensionPoint(Extensions.getRootArea(), extensionPointName, aClass);
}
public static void registerExtensionPointAndExtensions(@NotNull File pluginRoot, @NotNull String fileName, @NotNull ExtensionsArea area) {
PluginManagerCore.registerExtensionPointAndExtensions(pluginRoot, fileName, area);
}
@NotNull
public CoreLocalFileSystem getLocalFileSystem() {
return myLocalFileSystem;
}
@NotNull
public VirtualFileSystem getJarFileSystem() {
return myJarFileSystem;
}
@Nullable
public VirtualFileSystem getJrtFileSystem() {
return myJrtFileSystem;
}
}
| platform/core-impl/src/com/intellij/core/CoreApplicationEnvironment.java | // Copyright 2000-2018 JetBrains s.r.o. Use of this source code is governed by the Apache 2.0 license that can be found in the LICENSE file.
package com.intellij.core;
import com.intellij.codeInsight.folding.CodeFoldingSettings;
import com.intellij.concurrency.Job;
import com.intellij.concurrency.JobLauncher;
import com.intellij.ide.plugins.PluginManagerCore;
import com.intellij.lang.*;
import com.intellij.lang.impl.PsiBuilderFactoryImpl;
import com.intellij.mock.MockApplication;
import com.intellij.mock.MockApplicationEx;
import com.intellij.mock.MockFileDocumentManagerImpl;
import com.intellij.openapi.Disposable;
import com.intellij.openapi.application.ApplicationInfo;
import com.intellij.openapi.application.ApplicationManager;
import com.intellij.openapi.application.impl.ApplicationInfoImpl;
import com.intellij.openapi.command.CommandProcessor;
import com.intellij.openapi.command.impl.CoreCommandProcessor;
import com.intellij.openapi.components.ExtensionAreas;
import com.intellij.openapi.editor.impl.DocumentImpl;
import com.intellij.openapi.extensions.ExtensionPoint;
import com.intellij.openapi.extensions.ExtensionPointName;
import com.intellij.openapi.extensions.Extensions;
import com.intellij.openapi.extensions.ExtensionsArea;
import com.intellij.openapi.fileEditor.FileDocumentManager;
import com.intellij.openapi.fileTypes.FileType;
import com.intellij.openapi.fileTypes.FileTypeExtension;
import com.intellij.openapi.progress.ProgressIndicator;
import com.intellij.openapi.progress.ProgressManager;
import com.intellij.openapi.progress.impl.CoreProgressManager;
import com.intellij.openapi.util.ClassExtension;
import com.intellij.openapi.util.Disposer;
import com.intellij.openapi.util.KeyedExtensionCollector;
import com.intellij.openapi.util.StaticGetter;
import com.intellij.openapi.vfs.VirtualFileManager;
import com.intellij.openapi.vfs.VirtualFileSystem;
import com.intellij.openapi.vfs.encoding.EncodingManager;
import com.intellij.openapi.vfs.impl.CoreVirtualFilePointerManager;
import com.intellij.openapi.vfs.impl.VirtualFileManagerImpl;
import com.intellij.openapi.vfs.impl.jar.CoreJarFileSystem;
import com.intellij.openapi.vfs.local.CoreLocalFileSystem;
import com.intellij.openapi.vfs.pointers.VirtualFilePointerManager;
import com.intellij.psi.PsiReferenceService;
import com.intellij.psi.PsiReferenceServiceImpl;
import com.intellij.psi.impl.meta.MetaRegistry;
import com.intellij.psi.impl.source.resolve.reference.ReferenceProvidersRegistry;
import com.intellij.psi.impl.source.resolve.reference.ReferenceProvidersRegistryImpl;
import com.intellij.psi.meta.MetaDataRegistrar;
import com.intellij.psi.stubs.CoreStubTreeLoader;
import com.intellij.psi.stubs.StubTreeLoader;
import com.intellij.util.Consumer;
import com.intellij.util.KeyedLazyInstanceEP;
import com.intellij.util.Processor;
import com.intellij.util.graph.GraphAlgorithms;
import com.intellij.util.graph.impl.GraphAlgorithmsImpl;
import org.jetbrains.annotations.NotNull;
import org.jetbrains.annotations.Nullable;
import org.picocontainer.MutablePicoContainer;
import java.io.File;
import java.lang.reflect.Modifier;
import java.util.List;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.Future;
/**
* @author yole
*/
public class CoreApplicationEnvironment {
private final CoreFileTypeRegistry myFileTypeRegistry;
protected final MockApplication myApplication;
private final CoreLocalFileSystem myLocalFileSystem;
protected final VirtualFileSystem myJarFileSystem;
private final VirtualFileSystem myJrtFileSystem;
@NotNull private final Disposable myParentDisposable;
private final boolean myUnitTestMode;
public CoreApplicationEnvironment(@NotNull Disposable parentDisposable) {
this(parentDisposable, true);
}
public CoreApplicationEnvironment(@NotNull Disposable parentDisposable, boolean unitTestMode) {
myParentDisposable = parentDisposable;
myUnitTestMode = unitTestMode;
myFileTypeRegistry = new CoreFileTypeRegistry();
myApplication = createApplication(myParentDisposable);
ApplicationManager.setApplication(myApplication,
new StaticGetter<>(myFileTypeRegistry),
myParentDisposable);
myLocalFileSystem = createLocalFileSystem();
myJarFileSystem = createJarFileSystem();
myJrtFileSystem = createJrtFileSystem();
Extensions.registerAreaClass(ExtensionAreas.IDEA_PROJECT, null);
final MutablePicoContainer appContainer = myApplication.getPicoContainer();
registerComponentInstance(appContainer, FileDocumentManager.class, new MockFileDocumentManagerImpl(
charSequence -> new DocumentImpl(charSequence), null));
VirtualFileSystem[] fs = myJrtFileSystem != null
? new VirtualFileSystem[]{myLocalFileSystem, myJarFileSystem, myJrtFileSystem}
: new VirtualFileSystem[]{myLocalFileSystem, myJarFileSystem};
VirtualFileManagerImpl virtualFileManager = new VirtualFileManagerImpl(fs, myApplication.getMessageBus());
registerComponentInstance(appContainer, VirtualFileManager.class, virtualFileManager);
//fake EP for cleaning resources after area disposing (otherwise KeyedExtensionCollector listener will be copied to the next area)
registerApplicationExtensionPoint(new ExtensionPointName<>("com.intellij.virtualFileSystem"), KeyedLazyInstanceEP.class);
registerApplicationService(EncodingManager.class, new CoreEncodingRegistry());
registerApplicationService(VirtualFilePointerManager.class, createVirtualFilePointerManager());
registerApplicationService(DefaultASTFactory.class, new CoreASTFactory());
registerApplicationService(PsiBuilderFactory.class, new PsiBuilderFactoryImpl());
registerApplicationService(ReferenceProvidersRegistry.class, new ReferenceProvidersRegistryImpl());
registerApplicationService(StubTreeLoader.class, new CoreStubTreeLoader());
registerApplicationService(PsiReferenceService.class, new PsiReferenceServiceImpl());
registerApplicationService(MetaDataRegistrar.class, new MetaRegistry());
registerApplicationService(ProgressManager.class, createProgressIndicatorProvider());
registerApplicationService(JobLauncher.class, createJobLauncher());
registerApplicationService(CodeFoldingSettings.class, new CodeFoldingSettings());
registerApplicationService(CommandProcessor.class, new CoreCommandProcessor());
registerApplicationService(GraphAlgorithms.class, new GraphAlgorithmsImpl());
myApplication.registerService(ApplicationInfo.class, ApplicationInfoImpl.class);
}
public <T> void registerApplicationService(@NotNull Class<T> serviceInterface, @NotNull T serviceImplementation) {
myApplication.registerService(serviceInterface, serviceImplementation);
}
@NotNull
protected VirtualFilePointerManager createVirtualFilePointerManager() {
return new CoreVirtualFilePointerManager();
}
@NotNull
protected MockApplication createApplication(@NotNull Disposable parentDisposable) {
return new MockApplicationEx(parentDisposable) {
@Override
public boolean isUnitTestMode() {
return myUnitTestMode;
}
};
}
@NotNull
protected JobLauncher createJobLauncher() {
return new JobLauncher() {
@Override
public <T> boolean invokeConcurrentlyUnderProgress(@NotNull List<T> things,
ProgressIndicator progress,
boolean runInReadAction,
boolean failFastOnAcquireReadAction,
@NotNull Processor<? super T> thingProcessor) {
for (T thing : things) {
if (!thingProcessor.process(thing))
return false;
}
return true;
}
@NotNull
@Override
public Job<Void> submitToJobThread(@NotNull Runnable action, Consumer<? super Future<?>> onDoneCallback) {
action.run();
if (onDoneCallback != null) {
onDoneCallback.consume(CompletableFuture.completedFuture(null));
}
return Job.NULL_JOB;
}
};
}
@NotNull
protected ProgressManager createProgressIndicatorProvider() {
return new CoreProgressManager();
}
@NotNull
protected VirtualFileSystem createJarFileSystem() {
return new CoreJarFileSystem();
}
@NotNull
protected CoreLocalFileSystem createLocalFileSystem() {
return new CoreLocalFileSystem();
}
@Nullable
protected VirtualFileSystem createJrtFileSystem() {
return null;
}
@NotNull
public MockApplication getApplication() {
return myApplication;
}
@NotNull
public Disposable getParentDisposable() {
return myParentDisposable;
}
public <T> void registerApplicationComponent(@NotNull Class<T> interfaceClass, @NotNull T implementation) {
registerComponentInstance(myApplication.getPicoContainer(), interfaceClass, implementation);
if (implementation instanceof Disposable) {
Disposer.register(myApplication, (Disposable)implementation);
}
}
public void registerFileType(@NotNull FileType fileType, @NotNull String extension) {
myFileTypeRegistry.registerFileType(fileType, extension);
}
public void registerParserDefinition(@NotNull ParserDefinition definition) {
addExplicitExtension(LanguageParserDefinitions.INSTANCE, definition.getFileNodeType().getLanguage(), definition);
}
public static <T> void registerComponentInstance(@NotNull MutablePicoContainer container, @NotNull Class<T> key, @NotNull T implementation) {
container.unregisterComponent(key);
container.registerComponentInstance(key, implementation);
}
public <T> void addExplicitExtension(@NotNull LanguageExtension<T> instance, @NotNull Language language, @NotNull T object) {
doAddExplicitExtension(instance, language, object);
}
public void registerParserDefinition(@NotNull Language language, @NotNull ParserDefinition parserDefinition) {
addExplicitExtension(LanguageParserDefinitions.INSTANCE, language, parserDefinition);
}
public <T> void addExplicitExtension(@NotNull final FileTypeExtension<T> instance, @NotNull final FileType fileType, @NotNull final T object) {
doAddExplicitExtension(instance, fileType, object);
}
private <T,U> void doAddExplicitExtension(@NotNull final KeyedExtensionCollector<T,U> instance, @NotNull final U key, @NotNull final T object) {
instance.addExplicitExtension(key, object);
Disposer.register(myParentDisposable, new Disposable() {
@Override
public void dispose() {
instance.removeExplicitExtension(key, object);
}
});
}
public <T> void addExplicitExtension(@NotNull final ClassExtension<T> instance, @NotNull final Class aClass, @NotNull final T object) {
doAddExplicitExtension(instance, aClass, object);
}
public <T> void addExtension(@NotNull ExtensionPointName<T> name, @NotNull final T extension) {
final ExtensionPoint<T> extensionPoint = Extensions.getRootArea().getExtensionPoint(name);
extensionPoint.registerExtension(extension);
Disposer.register(myParentDisposable, new Disposable() {
@Override
public void dispose() {
// There is a possible case that particular extension was replaced in particular environment, e.g. Upsource
// replaces some IntelliJ extensions.
if (extensionPoint.hasExtension(extension)) {
extensionPoint.unregisterExtension(extension);
}
}
});
}
public static <T> void registerExtensionPoint(@NotNull ExtensionsArea area,
@NotNull ExtensionPointName<T> extensionPointName,
@NotNull Class<? extends T> aClass) {
final String name = extensionPointName.getName();
registerExtensionPoint(area, name, aClass);
}
public static <T> void registerExtensionPoint(@NotNull ExtensionsArea area, @NotNull String name, @NotNull Class<? extends T> aClass) {
if (!area.hasExtensionPoint(name)) {
ExtensionPoint.Kind kind = aClass.isInterface() || Modifier.isAbstract(aClass.getModifiers()) ? ExtensionPoint.Kind.INTERFACE : ExtensionPoint.Kind.BEAN_CLASS;
area.registerExtensionPoint(name, aClass.getName(), kind);
}
}
public static <T> void registerApplicationExtensionPoint(@NotNull ExtensionPointName<T> extensionPointName, @NotNull Class<? extends T> aClass) {
registerExtensionPoint(Extensions.getRootArea(), extensionPointName, aClass);
}
public static void registerExtensionPointAndExtensions(@NotNull File pluginRoot, @NotNull String fileName, @NotNull ExtensionsArea area) {
PluginManagerCore.registerExtensionPointAndExtensions(pluginRoot, fileName, area);
}
@NotNull
public CoreLocalFileSystem getLocalFileSystem() {
return myLocalFileSystem;
}
@NotNull
public VirtualFileSystem getJarFileSystem() {
return myJarFileSystem;
}
@Nullable
public VirtualFileSystem getJrtFileSystem() {
return myJrtFileSystem;
}
}
| Correctly dispose VirtualFileManager registered in CoreApplicationEnvironment
| platform/core-impl/src/com/intellij/core/CoreApplicationEnvironment.java | Correctly dispose VirtualFileManager registered in CoreApplicationEnvironment | <ide><path>latform/core-impl/src/com/intellij/core/CoreApplicationEnvironment.java
<ide> ? new VirtualFileSystem[]{myLocalFileSystem, myJarFileSystem, myJrtFileSystem}
<ide> : new VirtualFileSystem[]{myLocalFileSystem, myJarFileSystem};
<ide> VirtualFileManagerImpl virtualFileManager = new VirtualFileManagerImpl(fs, myApplication.getMessageBus());
<del> registerComponentInstance(appContainer, VirtualFileManager.class, virtualFileManager);
<del>
<add> registerApplicationComponent(VirtualFileManager.class, virtualFileManager);
<add>
<ide> //fake EP for cleaning resources after area disposing (otherwise KeyedExtensionCollector listener will be copied to the next area)
<ide> registerApplicationExtensionPoint(new ExtensionPointName<>("com.intellij.virtualFileSystem"), KeyedLazyInstanceEP.class);
<ide> |
|
Java | apache-2.0 | bfb9eb04f07d9af058a5b9cf5a2a2363c77ff36e | 0 | vespa-engine/vespa,vespa-engine/vespa,vespa-engine/vespa,vespa-engine/vespa,vespa-engine/vespa,vespa-engine/vespa,vespa-engine/vespa,vespa-engine/vespa,vespa-engine/vespa,vespa-engine/vespa | // Copyright 2018 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package com.yahoo.vespa.hosted.controller;
import com.yahoo.component.Version;
import com.yahoo.config.application.api.ValidationId;
import com.yahoo.config.application.api.ValidationOverrides;
import com.yahoo.config.provision.ApplicationId;
import com.yahoo.config.provision.ApplicationName;
import com.yahoo.config.provision.Environment;
import com.yahoo.config.provision.InstanceName;
import com.yahoo.config.provision.RegionName;
import com.yahoo.config.provision.SystemName;
import com.yahoo.config.provision.TenantName;
import com.yahoo.vespa.athenz.api.NToken;
import com.yahoo.vespa.config.SlimeUtils;
import com.yahoo.vespa.hosted.controller.api.application.v4.model.DeployOptions;
import com.yahoo.vespa.hosted.controller.api.application.v4.model.EndpointStatus;
import com.yahoo.vespa.hosted.controller.api.identifiers.DeploymentId;
import com.yahoo.vespa.hosted.controller.api.integration.BuildService;
import com.yahoo.vespa.hosted.controller.api.integration.dns.Record;
import com.yahoo.vespa.hosted.controller.api.integration.dns.RecordName;
import com.yahoo.vespa.hosted.controller.api.integration.stubs.MockBuildService;
import com.yahoo.vespa.hosted.controller.api.integration.zone.ZoneId;
import com.yahoo.vespa.hosted.controller.application.ApplicationPackage;
import com.yahoo.vespa.hosted.controller.application.ApplicationVersion;
import com.yahoo.vespa.hosted.controller.application.Change;
import com.yahoo.vespa.hosted.controller.application.Deployment;
import com.yahoo.vespa.hosted.controller.application.DeploymentJobs.JobError;
import com.yahoo.vespa.hosted.controller.api.integration.deployment.JobType;
import com.yahoo.vespa.hosted.controller.application.JobStatus;
import com.yahoo.vespa.hosted.controller.application.SourceRevision;
import com.yahoo.vespa.hosted.controller.deployment.ApplicationPackageBuilder;
import com.yahoo.vespa.hosted.controller.deployment.BuildJob;
import com.yahoo.vespa.hosted.controller.deployment.DeploymentTester;
import com.yahoo.vespa.hosted.controller.persistence.ApplicationSerializer;
import com.yahoo.vespa.hosted.controller.rotation.RotationId;
import com.yahoo.vespa.hosted.controller.rotation.RotationLock;
import com.yahoo.vespa.hosted.controller.versions.DeploymentStatistics;
import com.yahoo.vespa.hosted.controller.versions.VersionStatus;
import com.yahoo.vespa.hosted.controller.versions.VespaVersion;
import org.junit.Test;
import java.io.IOException;
import java.nio.file.Files;
import java.nio.file.Paths;
import java.time.Duration;
import java.time.Instant;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import java.util.Map;
import java.util.Optional;
import java.util.function.Supplier;
import java.util.stream.Collectors;
import static com.yahoo.config.provision.SystemName.main;
import static com.yahoo.vespa.hosted.controller.ControllerTester.buildJob;
import static com.yahoo.vespa.hosted.controller.api.integration.deployment.JobType.component;
import static com.yahoo.vespa.hosted.controller.api.integration.deployment.JobType.productionCorpUsEast1;
import static com.yahoo.vespa.hosted.controller.api.integration.deployment.JobType.productionUsEast3;
import static com.yahoo.vespa.hosted.controller.api.integration.deployment.JobType.productionUsWest1;
import static com.yahoo.vespa.hosted.controller.api.integration.deployment.JobType.stagingTest;
import static com.yahoo.vespa.hosted.controller.api.integration.deployment.JobType.systemTest;
import static java.time.temporal.ChronoUnit.MILLIS;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertNull;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
/**
* @author bratseth
* @author mpolden
*/
public class ControllerTest {
private static final ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.environment(Environment.prod)
.region("corp-us-east-1")
.build();
@Test
public void testDeployment() {
// Setup system
DeploymentTester tester = new DeploymentTester();
ApplicationController applications = tester.controller().applications();
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.environment(Environment.prod)
.region("corp-us-east-1")
.region("us-east-3")
.build();
// staging job - succeeding
Version version1 = tester.defaultPlatformVersion();
Application app1 = tester.createApplication("app1", "tenant1", 1, 11L);
tester.jobCompletion(component).application(app1).uploadArtifact(applicationPackage).submit();
assertEquals("Application version is known from completion of initial job",
ApplicationVersion.from(BuildJob.defaultSourceRevision, BuildJob.defaultBuildNumber),
tester.controller().applications().require(app1.id()).change().application().get());
tester.deployAndNotify(app1, applicationPackage, true, systemTest);
tester.deployAndNotify(app1, applicationPackage, true, stagingTest);
assertEquals(4, applications.require(app1.id()).deploymentJobs().jobStatus().size());
ApplicationVersion applicationVersion = tester.controller().applications().require(app1.id()).change().application().get();
assertFalse("Application version has been set during deployment", applicationVersion.isUnknown());
assertStatus(JobStatus.initial(stagingTest)
.withTriggering(version1, applicationVersion, Optional.empty(),"", tester.clock().instant().truncatedTo(MILLIS))
.withCompletion(42, Optional.empty(), tester.clock().instant().truncatedTo(MILLIS)), app1.id(), tester.controller());
// Causes first deployment job to be triggered
assertStatus(JobStatus.initial(productionCorpUsEast1)
.withTriggering(version1, applicationVersion, Optional.empty(), "", tester.clock().instant().truncatedTo(MILLIS)), app1.id(), tester.controller());
tester.clock().advance(Duration.ofSeconds(1));
// production job (failing) after deployment
tester.deploy(productionCorpUsEast1, app1, applicationPackage);
tester.deployAndNotify(app1, applicationPackage, false, productionCorpUsEast1);
assertEquals(4, applications.require(app1.id()).deploymentJobs().jobStatus().size());
JobStatus expectedJobStatus = JobStatus.initial(productionCorpUsEast1)
.withTriggering(version1, applicationVersion, Optional.empty(), "", tester.clock().instant().truncatedTo(MILLIS)) // Triggered first without application version info
.withCompletion(42, Optional.of(JobError.unknown), tester.clock().instant().truncatedTo(MILLIS))
.withTriggering(version1,
applicationVersion,
Optional.of(tester.application(app1.id()).deployments().get(productionCorpUsEast1.zone(main).get())),
"",
tester.clock().instant().truncatedTo(MILLIS)); // Re-triggering (due to failure) has application version info
assertStatus(expectedJobStatus, app1.id(), tester.controller());
// Simulate restart
tester.restartController();
applications = tester.controller().applications();
assertNotNull(tester.controller().tenants().tenant(TenantName.from("tenant1")));
assertNotNull(applications.get(ApplicationId.from(TenantName.from("tenant1"),
ApplicationName.from("application1"),
InstanceName.from("default"))));
assertEquals(4, applications.require(app1.id()).deploymentJobs().jobStatus().size());
tester.clock().advance(Duration.ofHours(1));
// system and staging test job - succeeding
tester.jobCompletion(component).application(app1).nextBuildNumber().uploadArtifact(applicationPackage).submit();
applicationVersion = tester.application("app1").change().application().get();
tester.deployAndNotify(app1, applicationPackage, true, systemTest);
assertStatus(JobStatus.initial(systemTest)
.withTriggering(version1, applicationVersion, productionCorpUsEast1.zone(main).map(tester.application(app1.id()).deployments()::get), "", tester.clock().instant().truncatedTo(MILLIS))
.withCompletion(42, Optional.empty(), tester.clock().instant().truncatedTo(MILLIS)),
app1.id(), tester.controller());
tester.clock().advance(Duration.ofHours(1)); // Stop retrying
tester.jobCompletion(productionCorpUsEast1).application(app1).unsuccessful().submit();
tester.deployAndNotify(app1, applicationPackage, true, stagingTest);
// production job succeeding now
expectedJobStatus = expectedJobStatus
.withTriggering(version1, applicationVersion, productionCorpUsEast1.zone(main).map(tester.application(app1.id()).deployments()::get), "", tester.clock().instant().truncatedTo(MILLIS))
.withCompletion(42, Optional.empty(), tester.clock().instant().truncatedTo(MILLIS));
tester.deployAndNotify(app1, applicationPackage, true, productionCorpUsEast1);
assertStatus(expectedJobStatus, app1.id(), tester.controller());
// causes triggering of next production job
assertStatus(JobStatus.initial(productionUsEast3)
.withTriggering(version1, applicationVersion, Optional.empty(), "", tester.clock().instant().truncatedTo(MILLIS)),
app1.id(), tester.controller());
tester.deployAndNotify(app1, applicationPackage, true, productionUsEast3);
assertEquals(5, applications.get(app1.id()).get().deploymentJobs().jobStatus().size());
// prod zone removal is not allowed
applicationPackage = new ApplicationPackageBuilder()
.environment(Environment.prod)
.region("us-east-3")
.build();
tester.jobCompletion(component).application(app1).nextBuildNumber().nextBuildNumber().uploadArtifact(applicationPackage).submit();
try {
tester.deploy(systemTest, app1, applicationPackage);
fail("Expected exception due to illegal production deployment removal");
}
catch (IllegalArgumentException e) {
assertEquals("deployment-removal: application 'tenant1.app1' is deployed in corp-us-east-1, but does not include this zone in deployment.xml. " +
ValidationOverrides.toAllowMessage(ValidationId.deploymentRemoval),
e.getMessage());
}
assertNotNull("Zone was not removed",
applications.require(app1.id()).deployments().get(productionCorpUsEast1.zone(main).get()));
JobStatus jobStatus = applications.require(app1.id()).deploymentJobs().jobStatus().get(productionCorpUsEast1);
assertNotNull("Deployment job was not removed", jobStatus);
assertEquals(42, jobStatus.lastCompleted().get().id());
assertEquals("New change available", jobStatus.lastCompleted().get().reason());
// prod zone removal is allowed with override
applicationPackage = new ApplicationPackageBuilder()
.allow(ValidationId.deploymentRemoval)
.upgradePolicy("default")
.environment(Environment.prod)
.region("us-east-3")
.build();
tester.jobCompletion(component).application(app1).nextBuildNumber(2).uploadArtifact(applicationPackage).submit();
tester.deployAndNotify(app1, applicationPackage, true, systemTest);
assertNull("Zone was removed",
applications.require(app1.id()).deployments().get(productionCorpUsEast1.zone(main).get()));
assertNull("Deployment job was removed", applications.require(app1.id()).deploymentJobs().jobStatus().get(productionCorpUsEast1));
}
@Test
public void testDeploymentApplicationVersion() {
DeploymentTester tester = new DeploymentTester();
Application app = tester.createApplication("app1", "tenant1", 1, 11L);
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.environment(Environment.prod)
.region("corp-us-east-1")
.region("us-east-3")
.build();
SourceRevision source = new SourceRevision("repo", "master", "commit1");
ApplicationVersion applicationVersion = ApplicationVersion.from(source, 101);
runDeployment(tester, app.id(), applicationVersion, applicationPackage, source,101);
assertEquals("Artifact is downloaded twice in staging and once for other zones", 5,
tester.artifactRepository().hits(app.id(), applicationVersion.id()));
// Application is upgraded. This makes deployment orchestration pick the last successful application version in
// zones which do not have permanent deployments, e.g. test and staging
runUpgrade(tester, app.id(), applicationVersion);
}
@Test
public void testDeployVersion() {
// Setup system
DeploymentTester tester = new DeploymentTester();
ApplicationController applications = tester.controller().applications();
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.environment(Environment.prod)
.region("us-west-1")
.build();
Version systemVersion = tester.controller().versionStatus().systemVersion().get().versionNumber();
Application app1 = tester.createApplication("application1", "tenant1", 1, 1L);
// First deployment: An application change
tester.jobCompletion(component).application(app1).uploadArtifact(applicationPackage).submit();
tester.deployAndNotify(app1, applicationPackage, true, systemTest);
tester.deployAndNotify(app1, applicationPackage, true, stagingTest);
tester.deployAndNotify(app1, applicationPackage, true, productionUsWest1);
app1 = applications.require(app1.id());
assertEquals("First deployment gets system version", systemVersion, app1.oldestDeployedPlatform().get());
assertEquals(systemVersion, tester.configServer().lastPrepareVersion().get());
// Unexpected deployment
tester.deploy(productionUsWest1, app1, applicationPackage);
// applications are immutable, so any change to one, including deployment changes, would give rise to a new instance.
assertEquals("Unexpected deployment is ignored", app1, applications.require(app1.id()));
// Application change after a new system version, and a region added
Version newSystemVersion = incrementSystemVersion(tester.controller());
assertTrue(newSystemVersion.isAfter(systemVersion));
applicationPackage = new ApplicationPackageBuilder()
.environment(Environment.prod)
.region("us-west-1")
.region("us-east-3")
.build();
tester.jobCompletion(component).application(app1).nextBuildNumber().uploadArtifact(applicationPackage).submit();
tester.deployAndNotify(app1, applicationPackage, true, systemTest);
tester.deployAndNotify(app1, applicationPackage, true, stagingTest);
tester.deployAndNotify(app1, applicationPackage, true, productionUsWest1);
app1 = applications.require(app1.id());
assertEquals("Application change preserves version", systemVersion, app1.oldestDeployedPlatform().get());
assertEquals(systemVersion, tester.configServer().lastPrepareVersion().get());
// A deployment to the new region gets the same version
tester.deployAndNotify(app1, applicationPackage, true, productionUsEast3);
app1 = applications.require(app1.id());
assertEquals("Application change preserves version", systemVersion, app1.oldestDeployedPlatform().get());
assertEquals(systemVersion, tester.configServer().lastPrepareVersion().get());
assertFalse("Change deployed", app1.change().isPresent());
// Version upgrade changes system version
applications.deploymentTrigger().triggerChange(app1.id(), Change.of(newSystemVersion));
tester.deploymentTrigger().triggerReadyJobs();
tester.deployAndNotify(app1, applicationPackage, true, systemTest);
tester.deployAndNotify(app1, applicationPackage, true, stagingTest);
tester.deployAndNotify(app1, applicationPackage, true, productionUsWest1);
tester.deployAndNotify(app1, applicationPackage, true, productionUsEast3);
app1 = applications.require(app1.id());
assertEquals("Version upgrade changes version", newSystemVersion, app1.oldestDeployedPlatform().get());
assertEquals(newSystemVersion, tester.configServer().lastPrepareVersion().get());
}
/** Adds a new version, higher than the current system version, makes it the system version and returns it */
private Version incrementSystemVersion(Controller controller) {
Version systemVersion = controller.versionStatus().systemVersion().get().versionNumber();
Version newSystemVersion = new Version(systemVersion.getMajor(), systemVersion.getMinor()+1, 0);
VespaVersion newSystemVespaVersion = new VespaVersion(DeploymentStatistics.empty(newSystemVersion),
"commit1",
Instant.now(),
true,
true,
Collections.emptyList(),
VespaVersion.Confidence.low
);
List<VespaVersion> versions = new ArrayList<>(controller.versionStatus().versions());
for (int i = 0; i < versions.size(); i++) {
VespaVersion c = versions.get(i);
if (c.isSystemVersion())
versions.set(i, new VespaVersion(c.statistics(), c.releaseCommit(), c.committedAt(),
false,
false,
c.systemApplicationHostnames(),
c.confidence()));
}
versions.add(newSystemVespaVersion);
controller.updateVersionStatus(new VersionStatus(versions));
return newSystemVersion;
}
@Test
public void testPullRequestDeployment() {
// Setup system
ControllerTester tester = new ControllerTester();
ApplicationController applications = tester.controller().applications();
// staging deployment
long app1ProjectId = 22;
ApplicationId app1 = tester.createAndDeploy("tenant1", "domain1",
"application1", Environment.staging,
app1ProjectId).id();
// pull-request deployment - uses different instance id
ApplicationId app1pr = tester.createAndDeploy("tenant1", "domain1",
"application1", "1",
Environment.staging, app1ProjectId, null).id();
assertTrue(applications.get(app1).isPresent());
assertEquals(app1, applications.get(app1).get().id());
assertTrue(applications.get(app1pr).isPresent());
assertEquals(app1pr, applications.get(app1pr).get().id());
// Simulate restart
tester.createNewController();
applications = tester.controller().applications();
assertTrue(applications.get(app1).isPresent());
assertEquals(app1, applications.get(app1).get().id());
assertTrue(applications.get(app1pr).isPresent());
assertEquals(app1pr, applications.get(app1pr).get().id());
// Deleting application also removes PR instance
ApplicationId app2 = tester.createAndDeploy("tenant1", "domain1",
"application2", Environment.staging,
33).id();
tester.controller().applications().deleteApplication(app1, Optional.of(new NToken("ntoken")));
assertEquals("All instances deleted", 0,
tester.controller().applications().asList(app1.tenant()).stream()
.filter(app -> app.id().application().equals(app1.application()))
.count());
assertEquals("Other application survives", 1,
tester.controller().applications().asList(app1.tenant()).stream()
.filter(app -> app.id().application().equals(app2.application()))
.count());
}
@Test
public void testFailingSinceUpdates() {
// Setup system
DeploymentTester tester = new DeploymentTester();
// Setup application
Application app = tester.createApplication("app1", "foo", 1, 1L);
// Initial failure
Instant initialFailure = tester.clock().instant().truncatedTo(MILLIS);
tester.jobCompletion(component).application(app).uploadArtifact(applicationPackage).submit();
tester.deployAndNotify(app, applicationPackage, false, systemTest);
assertEquals("Failure age is right at initial failure",
initialFailure, firstFailing(app, tester).get().at());
// Failure again -- failingSince should remain the same
tester.clock().advance(Duration.ofMillis(1000));
tester.deployAndNotify(app, applicationPackage, false, systemTest);
assertEquals("Failure age is right at second consecutive failure",
initialFailure, firstFailing(app, tester).get().at());
// Success resets failingSince
tester.clock().advance(Duration.ofMillis(1000));
tester.deployAndNotify(app, applicationPackage, true, systemTest);
assertFalse(firstFailing(app, tester).isPresent());
// Complete deployment
tester.deployAndNotify(app, applicationPackage, true, stagingTest);
tester.deployAndNotify(app, applicationPackage, true, productionCorpUsEast1);
// Two repeated failures again.
// Initial failure
tester.clock().advance(Duration.ofMillis(1000));
initialFailure = tester.clock().instant().truncatedTo(MILLIS);
tester.jobCompletion(component).application(app).nextBuildNumber().uploadArtifact(applicationPackage).submit();
tester.deployAndNotify(app, applicationPackage, false, systemTest);
assertEquals("Failure age is right at initial failure",
initialFailure, firstFailing(app, tester).get().at());
// Failure again -- failingSince should remain the same
tester.clock().advance(Duration.ofMillis(1000));
tester.deployAndNotify(app, applicationPackage, false, systemTest);
assertEquals("Failure age is right at second consecutive failure",
initialFailure, firstFailing(app, tester).get().at());
}
private Optional<JobStatus.JobRun> firstFailing(Application application, DeploymentTester tester) {
return tester.controller().applications().get(application.id()).get().deploymentJobs().jobStatus().get(systemTest).firstFailing();
}
@Test
public void requeueOutOfCapacityStagingJob() {
DeploymentTester tester = new DeploymentTester();
long project1 = 1;
long project2 = 2;
long project3 = 3;
Application app1 = tester.createApplication("app1", "tenant1", project1, 1L);
Application app2 = tester.createApplication("app2", "tenant2", project2, 1L);
Application app3 = tester.createApplication("app3", "tenant3", project3, 1L);
MockBuildService mockBuildService = tester.buildService();
// all applications: system-test completes successfully with some time in between, to determine trigger order.
tester.jobCompletion(component).application(app2).uploadArtifact(applicationPackage).submit();
tester.deployAndNotify(app2, applicationPackage, true, systemTest);
tester.clock().advance(Duration.ofMinutes(1));
tester.jobCompletion(component).application(app1).uploadArtifact(applicationPackage).submit();
tester.deployAndNotify(app1, applicationPackage, true, systemTest);
tester.clock().advance(Duration.ofMinutes(1));
tester.jobCompletion(component).application(app3).uploadArtifact(applicationPackage).submit();
tester.deployAndNotify(app3, applicationPackage, true, systemTest);
// all applications: staging test jobs queued
assertEquals(3, mockBuildService.jobs().size());
// Abort all running jobs, so we have three candidate jobs, of which only one should be triggered at a time.
tester.buildService().clear();
List<BuildService.BuildJob> jobs = new ArrayList<>();
assertJobsInOrder(jobs, tester.buildService().jobs());
tester.triggerUntilQuiescence();
jobs.add(buildJob(app2, stagingTest));
jobs.add(buildJob(app1, stagingTest));
jobs.add(buildJob(app3, stagingTest));
assertJobsInOrder(jobs, tester.buildService().jobs());
// Remove the jobs for app1 and app2, and then let app3 fail with outOfCapacity.
// All three jobs are now eligible, but the one for app3 should trigger first as an outOfCapacity-retry.
tester.buildService().remove(buildJob(app1, stagingTest));
tester.buildService().remove(buildJob(app2, stagingTest));
jobs.remove(buildJob(app1, stagingTest));
jobs.remove(buildJob(app2, stagingTest));
tester.jobCompletion(stagingTest).application(app3).error(JobError.outOfCapacity).submit();
assertJobsInOrder(jobs, tester.buildService().jobs());
tester.triggerUntilQuiescence();
jobs.add(buildJob(app2, stagingTest));
jobs.add(buildJob(app1, stagingTest));
assertJobsInOrder(jobs, tester.buildService().jobs());
// Finish deployment for apps 2 and 3, then release a new version, leaving only app1 with an application upgrade.
tester.deployAndNotify(app2, applicationPackage, true, stagingTest);
tester.deployAndNotify(app2, applicationPackage, true, productionCorpUsEast1);
tester.deployAndNotify(app3, applicationPackage, true, stagingTest);
tester.deployAndNotify(app3, applicationPackage, true, productionCorpUsEast1);
tester.upgradeSystem(new Version("6.2"));
// app1 also gets a new application change, so its time of availability is after the version upgrade.
tester.clock().advance(Duration.ofMinutes(1));
tester.buildService().clear();
tester.jobCompletion(component).application(app1).nextBuildNumber().uploadArtifact(applicationPackage).submit();
jobs.clear();
jobs.add(buildJob(app1, stagingTest));
jobs.add(buildJob(app1, systemTest));
// Tests for app1 trigger before the others since it carries an application upgrade.
assertJobsInOrder(jobs, tester.buildService().jobs());
// Let the test jobs start, remove everything expect system test for app3, which fails with outOfCapacity again.
tester.triggerUntilQuiescence();
tester.buildService().remove(buildJob(app1, systemTest));
tester.buildService().remove(buildJob(app2, systemTest));
tester.buildService().remove(buildJob(app1, stagingTest));
tester.buildService().remove(buildJob(app2, stagingTest));
tester.buildService().remove(buildJob(app3, stagingTest));
tester.jobCompletion(systemTest).application(app3).error(JobError.outOfCapacity).submit();
jobs.clear();
jobs.add(buildJob(app1, stagingTest));
jobs.add(buildJob(app3, systemTest));
assertJobsInOrder(jobs, tester.buildService().jobs());
tester.triggerUntilQuiescence();
jobs.add(buildJob(app2, stagingTest));
jobs.add(buildJob(app1, systemTest));
jobs.add(buildJob(app3, stagingTest));
jobs.add(buildJob(app2, systemTest));
assertJobsInOrder(jobs, tester.buildService().jobs());
}
/** Verifies that the given job lists have the same jobs, ignoring order of jobs that may have been triggered concurrently. */
private static void assertJobsInOrder(List<BuildService.BuildJob> expected, List<BuildService.BuildJob> actual) {
assertEquals(expected.stream().filter(job -> job.jobName().equals("system-test")).collect(Collectors.toList()),
actual.stream().filter(job -> job.jobName().equals("system-test")).collect(Collectors.toList()));
assertEquals(expected.stream().filter(job -> job.jobName().equals("staging-test")).collect(Collectors.toList()),
actual.stream().filter(job -> job.jobName().equals("staging-test")).collect(Collectors.toList()));
assertTrue(expected.containsAll(actual));
assertTrue(actual.containsAll(expected));
}
private void assertStatus(JobStatus expectedStatus, ApplicationId id, Controller controller) {
Application app = controller.applications().get(id).get();
JobStatus existingStatus = app.deploymentJobs().jobStatus().get(expectedStatus.type());
assertNotNull("Status of type " + expectedStatus.type() + " is present", existingStatus);
assertEquals(expectedStatus, existingStatus);
}
@Test
public void testGlobalRotations() throws IOException {
// Setup tester and app def
ControllerTester tester = new ControllerTester();
ZoneId zone = ZoneId.from(Environment.defaultEnvironment(), RegionName.defaultName());
ApplicationId appId = ApplicationId.from("tenant", "app1", "default");
DeploymentId deployId = new DeploymentId(appId, zone);
// Check initial rotation status
Map<String, EndpointStatus> rotationStatus = tester.controller().applications().getGlobalRotationStatus(deployId);
assertEquals(1, rotationStatus.size());
assertEquals(rotationStatus.get("qrs-endpoint").getStatus(), EndpointStatus.Status.in);
// Set the global rotations out of service
EndpointStatus status = new EndpointStatus(EndpointStatus.Status.out, "unit-test", "Test", tester.clock().instant().getEpochSecond());
List<String> overrides = tester.controller().applications().setGlobalRotationStatus(deployId, status);
assertEquals(1, overrides.size());
// Recheck the override rotation status
rotationStatus = tester.controller().applications().getGlobalRotationStatus(deployId);
assertEquals(1, rotationStatus.size());
assertEquals(rotationStatus.get("qrs-endpoint").getStatus(), EndpointStatus.Status.out);
assertEquals("unit-test", rotationStatus.get("qrs-endpoint").getReason());
}
@Test
public void testCleanupOfStaleDeploymentData() throws IOException {
DeploymentTester tester = new DeploymentTester();
tester.controllerTester().zoneRegistry().setSystemName(SystemName.cd);
tester.controllerTester().zoneRegistry().setZones(ZoneId.from("prod", "cd-us-central-1"));
Supplier<Map<JobType, JobStatus>> statuses = () ->
tester.application(ApplicationId.from("vespa", "canary", "default"))
.deploymentJobs().jobStatus();
// Current system version, matches version in test data
Version version = Version.fromString("6.141.117");
tester.upgradeSystem(version);
assertEquals(version, tester.controller().versionStatus().systemVersion().get().versionNumber());
// Load test data data
byte[] json = Files.readAllBytes(Paths.get("src/test/java/com/yahoo/vespa/hosted/controller/maintenance/testdata/canary-with-stale-data.json"));
Application application = tester.controllerTester().createApplication(SlimeUtils.jsonToSlime(json));
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.upgradePolicy("canary")
.region("cd-us-central-1")
.build();
tester.jobCompletion(component).application(application).uploadArtifact(applicationPackage).submit();
long cdJobsCount = statuses.get().keySet().stream()
.filter(type -> type.zone(SystemName.cd).isPresent())
.count();
long mainJobsCount = statuses.get().keySet().stream()
.filter(type -> type.zone(main).isPresent() && ! type.zone(SystemName.cd).isPresent())
.count();
assertEquals("Irrelevant (main) data is present.", 8, mainJobsCount);
// New version is released
version = Version.fromString("6.142.1");
tester.upgradeSystem(version);
assertEquals(version, tester.controller().versionStatus().systemVersion().get().versionNumber());
// Test environment passes
tester.deployAndNotify(application, applicationPackage, true, systemTest);
long newCdJobsCount = statuses.get().keySet().stream()
.filter(type -> type.zone(SystemName.cd).isPresent())
.count();
long newMainJobsCount = statuses.get().keySet().stream()
.filter(type -> type.zone(main).isPresent() && ! type.zone(SystemName.cd).isPresent())
.count();
assertEquals("Irrelevant (main) job data is removed.", 0, newMainJobsCount);
assertEquals("Relevant (cd) data is not removed.", cdJobsCount, newCdJobsCount);
}
@Test
public void testDnsAliasRegistration() {
DeploymentTester tester = new DeploymentTester();
Application application = tester.createApplication("app1", "tenant1", 1, 1L);
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.environment(Environment.prod)
.globalServiceId("foo")
.region("us-west-1")
.region("us-central-1") // Two deployments should result in each DNS alias being registered once
.build();
tester.deployCompletely(application, applicationPackage);
assertEquals(3, tester.controllerTester().nameService().records().size());
Optional<Record> record = tester.controllerTester().nameService().findRecord(
Record.Type.CNAME, RecordName.from("app1--tenant1.global.vespa.yahooapis.com")
);
assertTrue(record.isPresent());
assertEquals("app1--tenant1.global.vespa.yahooapis.com", record.get().name().asString());
assertEquals("rotation-fqdn-01.", record.get().data().asString());
record = tester.controllerTester().nameService().findRecord(
Record.Type.CNAME, RecordName.from("app1--tenant1.global.vespa.oath.cloud")
);
assertTrue(record.isPresent());
assertEquals("app1--tenant1.global.vespa.oath.cloud", record.get().name().asString());
assertEquals("rotation-fqdn-01.", record.get().data().asString());
record = tester.controllerTester().nameService().findRecord(
Record.Type.CNAME, RecordName.from("app1.tenant1.global.vespa.yahooapis.com")
);
assertTrue(record.isPresent());
assertEquals("app1.tenant1.global.vespa.yahooapis.com", record.get().name().asString());
assertEquals("rotation-fqdn-01.", record.get().data().asString());
}
@Test
public void testUpdatesExistingDnsAlias() {
DeploymentTester tester = new DeploymentTester();
// Application 1 is deployed and deleted
{
Application app1 = tester.createApplication("app1", "tenant1", 1, 1L);
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.environment(Environment.prod)
.globalServiceId("foo")
.region("us-west-1")
.region("us-central-1") // Two deployments should result in each DNS alias being registered once
.build();
tester.deployCompletely(app1, applicationPackage);
assertEquals(3, tester.controllerTester().nameService().records().size());
Optional<Record> record = tester.controllerTester().nameService().findRecord(
Record.Type.CNAME, RecordName.from("app1--tenant1.global.vespa.yahooapis.com")
);
assertTrue(record.isPresent());
assertEquals("app1--tenant1.global.vespa.yahooapis.com", record.get().name().asString());
assertEquals("rotation-fqdn-01.", record.get().data().asString());
record = tester.controllerTester().nameService().findRecord(
Record.Type.CNAME, RecordName.from("app1.tenant1.global.vespa.yahooapis.com")
);
assertTrue(record.isPresent());
assertEquals("app1.tenant1.global.vespa.yahooapis.com", record.get().name().asString());
assertEquals("rotation-fqdn-01.", record.get().data().asString());
// Application is deleted and rotation is unassigned
applicationPackage = new ApplicationPackageBuilder()
.environment(Environment.prod)
.allow(ValidationId.deploymentRemoval)
.build();
tester.jobCompletion(component).application(app1).nextBuildNumber().uploadArtifact(applicationPackage).submit();
tester.deployAndNotify(app1, applicationPackage, true, systemTest);
tester.applications().deactivate(app1, ZoneId.from(Environment.test, RegionName.from("us-east-1")));
tester.applications().deactivate(app1, ZoneId.from(Environment.staging, RegionName.from("us-east-3")));
tester.applications().deleteApplication(app1.id(), Optional.of(new NToken("ntoken")));
try (RotationLock lock = tester.applications().rotationRepository().lock()) {
assertTrue("Rotation is unassigned",
tester.applications().rotationRepository().availableRotations(lock)
.containsKey(new RotationId("rotation-id-01")));
}
// Records remain
record = tester.controllerTester().nameService().findRecord(
Record.Type.CNAME, RecordName.from("app1--tenant1.global.vespa.yahooapis.com")
);
assertTrue(record.isPresent());
record = tester.controllerTester().nameService().findRecord(
Record.Type.CNAME, RecordName.from("app1--tenant1.global.vespa.oath.cloud")
);
assertTrue(record.isPresent());
record = tester.controllerTester().nameService().findRecord(
Record.Type.CNAME, RecordName.from("app1.tenant1.global.vespa.yahooapis.com")
);
assertTrue(record.isPresent());
}
// Application 2 is deployed and assigned same rotation as application 1 had before deletion
{
Application app2 = tester.createApplication("app2", "tenant2", 2, 1L);
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.environment(Environment.prod)
.globalServiceId("foo")
.region("us-west-1")
.region("us-central-1")
.build();
tester.deployCompletely(app2, applicationPackage);
assertEquals(6, tester.controllerTester().nameService().records().size());
Optional<Record> record = tester.controllerTester().nameService().findRecord(
Record.Type.CNAME, RecordName.from("app2--tenant2.global.vespa.yahooapis.com")
);
assertTrue(record.isPresent());
assertEquals("app2--tenant2.global.vespa.yahooapis.com", record.get().name().asString());
assertEquals("rotation-fqdn-01.", record.get().data().asString());
record = tester.controllerTester().nameService().findRecord(
Record.Type.CNAME, RecordName.from("app2--tenant2.global.vespa.oath.cloud")
);
assertTrue(record.isPresent());
assertEquals("app2--tenant2.global.vespa.oath.cloud", record.get().name().asString());
assertEquals("rotation-fqdn-01.", record.get().data().asString());
record = tester.controllerTester().nameService().findRecord(
Record.Type.CNAME, RecordName.from("app2.tenant2.global.vespa.yahooapis.com")
);
assertTrue(record.isPresent());
assertEquals("app2.tenant2.global.vespa.yahooapis.com", record.get().name().asString());
assertEquals("rotation-fqdn-01.", record.get().data().asString());
}
// Application 1 is recreated, deployed and assigned a new rotation
{
tester.buildService().clear();
Application app1 = tester.createApplication("app1", "tenant1", 1, 1L);
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.environment(Environment.prod)
.globalServiceId("foo")
.region("us-west-1")
.region("us-central-1")
.build();
tester.deployCompletely(app1, applicationPackage);
app1 = tester.applications().require(app1.id());
assertEquals("rotation-id-02", app1.rotation().get().id().asString());
// Existing DNS records are updated to point to the newly assigned rotation
assertEquals(6, tester.controllerTester().nameService().records().size());
Optional<Record> record = tester.controllerTester().nameService().findRecord(
Record.Type.CNAME, RecordName.from("app1--tenant1.global.vespa.yahooapis.com")
);
assertTrue(record.isPresent());
assertEquals("rotation-fqdn-02.", record.get().data().asString());
record = tester.controllerTester().nameService().findRecord(
Record.Type.CNAME, RecordName.from("app1--tenant1.global.vespa.oath.cloud")
);
assertTrue(record.isPresent());
assertEquals("rotation-fqdn-02.", record.get().data().asString());
record = tester.controllerTester().nameService().findRecord(
Record.Type.CNAME, RecordName.from("app1.tenant1.global.vespa.yahooapis.com")
);
assertTrue(record.isPresent());
assertEquals("rotation-fqdn-02.", record.get().data().asString());
}
}
@Test
public void testDeployDirectly() {
DeploymentTester tester = new DeploymentTester();
tester.controllerTester().zoneRegistry().setSystemName(SystemName.cd);
tester.controllerTester().zoneRegistry().setZones(ZoneId.from("prod", "cd-us-central-1"));
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.environment(Environment.prod)
.region("cd-us-central-1")
.build();
// Create application
Application app = tester.createApplication("app1", "tenant1", 1, 2L);
// Direct deploy is allowed when deployDirectly is true
ZoneId zone = ZoneId.from("prod", "cd-us-central-1");
// Same options as used in our integration tests
DeployOptions options = new DeployOptions(true, Optional.empty(), false,
false);
tester.controller().applications().deploy(app.id(), zone, Optional.of(applicationPackage), options);
assertTrue("Application deployed and activated",
tester.controllerTester().configServer().application(app.id()).get().activated());
assertTrue("No job status added",
tester.applications().require(app.id()).deploymentJobs().jobStatus().isEmpty());
}
private void runUpgrade(DeploymentTester tester, ApplicationId application, ApplicationVersion version) {
Version next = Version.fromString("6.2");
tester.upgradeSystem(next);
runDeployment(tester, tester.applications().require(application), version, Optional.of(next), Optional.empty());
}
private void runDeployment(DeploymentTester tester, ApplicationId application, ApplicationVersion version,
ApplicationPackage applicationPackage, SourceRevision sourceRevision, long buildNumber) {
Application app = tester.applications().require(application);
tester.jobCompletion(component)
.application(app)
.buildNumber(buildNumber)
.sourceRevision(sourceRevision)
.uploadArtifact(applicationPackage)
.submit();
ApplicationVersion change = ApplicationVersion.from(sourceRevision, buildNumber);
assertEquals(change.id(), tester.controller().applications()
.require(application)
.change().application().get().id());
runDeployment(tester, app, version, Optional.empty(), Optional.of(applicationPackage));
}
private void runDeployment(DeploymentTester tester, Application app, ApplicationVersion version,
Optional<Version> upgrade, Optional<ApplicationPackage> applicationPackage) {
Version vespaVersion = upgrade.orElseGet(tester::defaultPlatformVersion);
// Deploy in test
tester.deployAndNotify(app, applicationPackage, true, systemTest);
tester.deployAndNotify(app, applicationPackage, true, stagingTest);
JobStatus expected = JobStatus.initial(stagingTest)
.withTriggering(vespaVersion, version, productionCorpUsEast1.zone(main).map(tester.application(app.id()).deployments()::get), "",
tester.clock().instant().truncatedTo(MILLIS))
.withCompletion(42, Optional.empty(), tester.clock().instant().truncatedTo(MILLIS));
assertStatus(expected, app.id(), tester.controller());
// Deploy in production
expected = JobStatus.initial(productionCorpUsEast1)
.withTriggering(vespaVersion, version, productionCorpUsEast1.zone(main).map(tester.application(app.id()).deployments()::get), "",
tester.clock().instant().truncatedTo(MILLIS))
.withCompletion(42, Optional.empty(), tester.clock().instant().truncatedTo(MILLIS));
tester.deployAndNotify(app, applicationPackage, true, productionCorpUsEast1);
assertStatus(expected, app.id(), tester.controller());
expected = JobStatus.initial(productionUsEast3)
.withTriggering(vespaVersion, version, productionUsEast3.zone(main).map(tester.application(app.id()).deployments()::get), "",
tester.clock().instant().truncatedTo(MILLIS))
.withCompletion(42, Optional.empty(), tester.clock().instant().truncatedTo(MILLIS));
tester.deployAndNotify(app, applicationPackage, true, productionUsEast3);
assertStatus(expected, app.id(), tester.controller());
// Verify deployed version
app = tester.controller().applications().require(app.id());
for (Deployment deployment : app.productionDeployments().values()) {
assertEquals(version, deployment.applicationVersion());
upgrade.ifPresent(v -> assertEquals(v, deployment.version()));
}
}
@Test
public void testDeploymentOfNewInstanceWithIllegalApplicationName() {
ControllerTester tester = new ControllerTester();
String application = "this_application_name_is_far_too_long_and_has_underscores";
ZoneId zone = ZoneId.from("test", "us-east-1");
DeployOptions options = new DeployOptions(false,
Optional.empty(),
false,
false);
tester.createTenant("tenant", "domain", null);
// Deploy an application which doesn't yet exist, and which has an illegal application name.
try {
tester.controller().applications().deploy(ApplicationId.from("tenant", application, "123"), zone, Optional.empty(), options);
fail("Illegal application name should cause validation exception.");
}
catch (IllegalArgumentException e) {
assertTrue(e.getMessage().contains("Invalid id"));
}
// Sneak an illegal application in the back door.
tester.createApplication(new ApplicationSerializer().toSlime(new Application(ApplicationId.from("tenant", application, "default"))));
// Deploy a PR instance for the application, with no NToken.
tester.controller().applications().deploy(ApplicationId.from("tenant", application, "456"), zone, Optional.empty(), options);
assertTrue(tester.controller().applications().get(ApplicationId.from("tenant", application, "456")).isPresent());
}
}
| controller-server/src/test/java/com/yahoo/vespa/hosted/controller/ControllerTest.java | // Copyright 2018 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package com.yahoo.vespa.hosted.controller;
import com.yahoo.component.Version;
import com.yahoo.config.application.api.ValidationId;
import com.yahoo.config.application.api.ValidationOverrides;
import com.yahoo.config.provision.ApplicationId;
import com.yahoo.config.provision.ApplicationName;
import com.yahoo.config.provision.Environment;
import com.yahoo.config.provision.InstanceName;
import com.yahoo.config.provision.RegionName;
import com.yahoo.config.provision.SystemName;
import com.yahoo.config.provision.TenantName;
import com.yahoo.vespa.athenz.api.NToken;
import com.yahoo.vespa.config.SlimeUtils;
import com.yahoo.vespa.hosted.controller.api.application.v4.model.DeployOptions;
import com.yahoo.vespa.hosted.controller.api.application.v4.model.EndpointStatus;
import com.yahoo.vespa.hosted.controller.api.identifiers.DeploymentId;
import com.yahoo.vespa.hosted.controller.api.integration.BuildService;
import com.yahoo.vespa.hosted.controller.api.integration.dns.Record;
import com.yahoo.vespa.hosted.controller.api.integration.dns.RecordName;
import com.yahoo.vespa.hosted.controller.api.integration.stubs.MockBuildService;
import com.yahoo.vespa.hosted.controller.api.integration.zone.ZoneId;
import com.yahoo.vespa.hosted.controller.application.ApplicationPackage;
import com.yahoo.vespa.hosted.controller.application.ApplicationVersion;
import com.yahoo.vespa.hosted.controller.application.Change;
import com.yahoo.vespa.hosted.controller.application.Deployment;
import com.yahoo.vespa.hosted.controller.application.DeploymentJobs.JobError;
import com.yahoo.vespa.hosted.controller.api.integration.deployment.JobType;
import com.yahoo.vespa.hosted.controller.application.JobStatus;
import com.yahoo.vespa.hosted.controller.application.SourceRevision;
import com.yahoo.vespa.hosted.controller.deployment.ApplicationPackageBuilder;
import com.yahoo.vespa.hosted.controller.deployment.BuildJob;
import com.yahoo.vespa.hosted.controller.deployment.DeploymentTester;
import com.yahoo.vespa.hosted.controller.persistence.ApplicationSerializer;
import com.yahoo.vespa.hosted.controller.rotation.RotationId;
import com.yahoo.vespa.hosted.controller.rotation.RotationLock;
import com.yahoo.vespa.hosted.controller.versions.DeploymentStatistics;
import com.yahoo.vespa.hosted.controller.versions.VersionStatus;
import com.yahoo.vespa.hosted.controller.versions.VespaVersion;
import org.junit.Test;
import java.io.IOException;
import java.nio.file.Files;
import java.nio.file.Paths;
import java.time.Duration;
import java.time.Instant;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import java.util.Map;
import java.util.Optional;
import java.util.function.Supplier;
import java.util.stream.Collectors;
import static com.yahoo.config.provision.SystemName.main;
import static com.yahoo.vespa.hosted.controller.ControllerTester.buildJob;
import static com.yahoo.vespa.hosted.controller.api.integration.deployment.JobType.component;
import static com.yahoo.vespa.hosted.controller.api.integration.deployment.JobType.productionCorpUsEast1;
import static com.yahoo.vespa.hosted.controller.api.integration.deployment.JobType.productionUsEast3;
import static com.yahoo.vespa.hosted.controller.api.integration.deployment.JobType.productionUsWest1;
import static com.yahoo.vespa.hosted.controller.api.integration.deployment.JobType.stagingTest;
import static com.yahoo.vespa.hosted.controller.api.integration.deployment.JobType.systemTest;
import static java.time.temporal.ChronoUnit.MILLIS;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertNull;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
/**
* @author bratseth
* @author mpolden
*/
public class ControllerTest {
private static final ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.environment(Environment.prod)
.region("corp-us-east-1")
.build();
@Test
public void testDeployment() {
// Setup system
DeploymentTester tester = new DeploymentTester();
ApplicationController applications = tester.controller().applications();
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.environment(Environment.prod)
.region("corp-us-east-1")
.region("us-east-3")
.build();
// staging job - succeeding
Version version1 = tester.defaultPlatformVersion();
Application app1 = tester.createApplication("app1", "tenant1", 1, 11L);
tester.jobCompletion(component).application(app1).uploadArtifact(applicationPackage).submit();
assertEquals("Application version is known from completion of initial job",
ApplicationVersion.from(BuildJob.defaultSourceRevision, BuildJob.defaultBuildNumber),
tester.controller().applications().require(app1.id()).change().application().get());
tester.deployAndNotify(app1, applicationPackage, true, systemTest);
tester.deployAndNotify(app1, applicationPackage, true, stagingTest);
assertEquals(4, applications.require(app1.id()).deploymentJobs().jobStatus().size());
ApplicationVersion applicationVersion = tester.controller().applications().require(app1.id()).change().application().get();
assertFalse("Application version has been set during deployment", applicationVersion.isUnknown());
assertStatus(JobStatus.initial(stagingTest)
.withTriggering(version1, applicationVersion, Optional.empty(),"", tester.clock().instant().truncatedTo(MILLIS))
.withCompletion(42, Optional.empty(), tester.clock().instant().truncatedTo(MILLIS)), app1.id(), tester.controller());
// Causes first deployment job to be triggered
assertStatus(JobStatus.initial(productionCorpUsEast1)
.withTriggering(version1, applicationVersion, Optional.empty(), "", tester.clock().instant().truncatedTo(MILLIS)), app1.id(), tester.controller());
tester.clock().advance(Duration.ofSeconds(1));
// production job (failing) after deployment
tester.deploy(productionCorpUsEast1, app1, applicationPackage);
tester.deployAndNotify(app1, applicationPackage, false, productionCorpUsEast1);
assertEquals(4, applications.require(app1.id()).deploymentJobs().jobStatus().size());
JobStatus expectedJobStatus = JobStatus.initial(productionCorpUsEast1)
.withTriggering(version1, applicationVersion, Optional.empty(), "", tester.clock().instant().truncatedTo(MILLIS)) // Triggered first without application version info
.withCompletion(42, Optional.of(JobError.unknown), tester.clock().instant().truncatedTo(MILLIS))
.withTriggering(version1,
applicationVersion,
Optional.of(tester.application(app1.id()).deployments().get(productionCorpUsEast1.zone(main).get())),
"",
tester.clock().instant().truncatedTo(MILLIS)); // Re-triggering (due to failure) has application version info
assertStatus(expectedJobStatus, app1.id(), tester.controller());
// Simulate restart
tester.restartController();
applications = tester.controller().applications();
assertNotNull(tester.controller().tenants().tenant(TenantName.from("tenant1")));
assertNotNull(applications.get(ApplicationId.from(TenantName.from("tenant1"),
ApplicationName.from("application1"),
InstanceName.from("default"))));
assertEquals(4, applications.require(app1.id()).deploymentJobs().jobStatus().size());
tester.clock().advance(Duration.ofHours(1));
// system and staging test job - succeeding
tester.jobCompletion(component).application(app1).nextBuildNumber().uploadArtifact(applicationPackage).submit();
applicationVersion = tester.application("app1").change().application().get();
tester.deployAndNotify(app1, applicationPackage, true, systemTest);
assertStatus(JobStatus.initial(systemTest)
.withTriggering(version1, applicationVersion, productionCorpUsEast1.zone(main).map(tester.application(app1.id()).deployments()::get), "", tester.clock().instant().truncatedTo(MILLIS))
.withCompletion(42, Optional.empty(), tester.clock().instant().truncatedTo(MILLIS)),
app1.id(), tester.controller());
tester.clock().advance(Duration.ofHours(1)); // Stop retrying
tester.jobCompletion(productionCorpUsEast1).application(app1).unsuccessful().submit();
tester.deployAndNotify(app1, applicationPackage, true, stagingTest);
// production job succeeding now
expectedJobStatus = expectedJobStatus
.withTriggering(version1, applicationVersion, productionCorpUsEast1.zone(main).map(tester.application(app1.id()).deployments()::get), "", tester.clock().instant().truncatedTo(MILLIS))
.withCompletion(42, Optional.empty(), tester.clock().instant().truncatedTo(MILLIS));
tester.deployAndNotify(app1, applicationPackage, true, productionCorpUsEast1);
assertStatus(expectedJobStatus, app1.id(), tester.controller());
// causes triggering of next production job
assertStatus(JobStatus.initial(productionUsEast3)
.withTriggering(version1, applicationVersion, Optional.empty(), "", tester.clock().instant().truncatedTo(MILLIS)),
app1.id(), tester.controller());
tester.deployAndNotify(app1, applicationPackage, true, productionUsEast3);
assertEquals(5, applications.get(app1.id()).get().deploymentJobs().jobStatus().size());
// prod zone removal is not allowed
applicationPackage = new ApplicationPackageBuilder()
.environment(Environment.prod)
.region("us-east-3")
.build();
tester.jobCompletion(component).application(app1).nextBuildNumber().nextBuildNumber().uploadArtifact(applicationPackage).submit();
try {
tester.deploy(systemTest, app1, applicationPackage);
fail("Expected exception due to illegal production deployment removal");
}
catch (IllegalArgumentException e) {
assertEquals("deployment-removal: application 'tenant1.app1' is deployed in corp-us-east-1, but does not include this zone in deployment.xml. " +
ValidationOverrides.toAllowMessage(ValidationId.deploymentRemoval),
e.getMessage());
}
assertNotNull("Zone was not removed",
applications.require(app1.id()).deployments().get(productionCorpUsEast1.zone(main).get()));
JobStatus jobStatus = applications.require(app1.id()).deploymentJobs().jobStatus().get(productionCorpUsEast1);
assertNotNull("Deployment job was not removed", jobStatus);
assertEquals(42, jobStatus.lastCompleted().get().id());
assertEquals("New change available", jobStatus.lastCompleted().get().reason());
// prod zone removal is allowed with override
applicationPackage = new ApplicationPackageBuilder()
.allow(ValidationId.deploymentRemoval)
.upgradePolicy("default")
.environment(Environment.prod)
.region("us-east-3")
.build();
tester.jobCompletion(component).application(app1).nextBuildNumber(2).uploadArtifact(applicationPackage).submit();
tester.deployAndNotify(app1, applicationPackage, true, systemTest);
assertNull("Zone was removed",
applications.require(app1.id()).deployments().get(productionCorpUsEast1.zone(main).get()));
assertNull("Deployment job was removed", applications.require(app1.id()).deploymentJobs().jobStatus().get(productionCorpUsEast1));
}
@Test
public void testDeploymentApplicationVersion() {
DeploymentTester tester = new DeploymentTester();
Application app = tester.createApplication("app1", "tenant1", 1, 11L);
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.environment(Environment.prod)
.region("corp-us-east-1")
.region("us-east-3")
.build();
SourceRevision source = new SourceRevision("repo", "master", "commit1");
ApplicationVersion applicationVersion = ApplicationVersion.from(source, 101);
runDeployment(tester, app.id(), applicationVersion, applicationPackage, source,101);
assertEquals("Artifact is downloaded twice in staging and once for other zones", 5,
tester.artifactRepository().hits(app.id(), applicationVersion.id()));
// Application is upgraded. This makes deployment orchestration pick the last successful application version in
// zones which do not have permanent deployments, e.g. test and staging
runUpgrade(tester, app.id(), applicationVersion);
}
@Test
public void testDeployVersion() {
// Setup system
DeploymentTester tester = new DeploymentTester();
ApplicationController applications = tester.controller().applications();
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.environment(Environment.prod)
.region("us-west-1")
.build();
Version systemVersion = tester.controller().versionStatus().systemVersion().get().versionNumber();
Application app1 = tester.createApplication("application1", "tenant1", 1, 1L);
// First deployment: An application change
tester.jobCompletion(component).application(app1).uploadArtifact(applicationPackage).submit();
tester.deployAndNotify(app1, applicationPackage, true, systemTest);
tester.deployAndNotify(app1, applicationPackage, true, stagingTest);
tester.deployAndNotify(app1, applicationPackage, true, productionUsWest1);
app1 = applications.require(app1.id());
assertEquals("First deployment gets system version", systemVersion, app1.oldestDeployedPlatform().get());
assertEquals(systemVersion, tester.configServer().lastPrepareVersion().get());
// Unexpected deployment
tester.deploy(productionUsWest1, app1, applicationPackage);
// applications are immutable, so any change to one, including deployment changes, would give rise to a new instance.
assertEquals("Unexpected deployment is ignored", app1, applications.require(app1.id()));
// Application change after a new system version, and a region added
Version newSystemVersion = incrementSystemVersion(tester.controller());
assertTrue(newSystemVersion.isAfter(systemVersion));
applicationPackage = new ApplicationPackageBuilder()
.environment(Environment.prod)
.region("us-west-1")
.region("us-east-3")
.build();
tester.jobCompletion(component).application(app1).nextBuildNumber().uploadArtifact(applicationPackage).submit();
tester.deployAndNotify(app1, applicationPackage, true, systemTest);
tester.deployAndNotify(app1, applicationPackage, true, stagingTest);
tester.deployAndNotify(app1, applicationPackage, true, productionUsWest1);
app1 = applications.require(app1.id());
assertEquals("Application change preserves version", systemVersion, app1.oldestDeployedPlatform().get());
assertEquals(systemVersion, tester.configServer().lastPrepareVersion().get());
// A deployment to the new region gets the same version
tester.deployAndNotify(app1, applicationPackage, true, productionUsEast3);
app1 = applications.require(app1.id());
assertEquals("Application change preserves version", systemVersion, app1.oldestDeployedPlatform().get());
assertEquals(systemVersion, tester.configServer().lastPrepareVersion().get());
assertFalse("Change deployed", app1.change().isPresent());
// Version upgrade changes system version
applications.deploymentTrigger().triggerChange(app1.id(), Change.of(newSystemVersion));
tester.deploymentTrigger().triggerReadyJobs();
tester.deployAndNotify(app1, applicationPackage, true, systemTest);
tester.deployAndNotify(app1, applicationPackage, true, stagingTest);
tester.deployAndNotify(app1, applicationPackage, true, productionUsWest1);
tester.deployAndNotify(app1, applicationPackage, true, productionUsEast3);
app1 = applications.require(app1.id());
assertEquals("Version upgrade changes version", newSystemVersion, app1.oldestDeployedPlatform().get());
assertEquals(newSystemVersion, tester.configServer().lastPrepareVersion().get());
}
/** Adds a new version, higher than the current system version, makes it the system version and returns it */
private Version incrementSystemVersion(Controller controller) {
Version systemVersion = controller.versionStatus().systemVersion().get().versionNumber();
Version newSystemVersion = new Version(systemVersion.getMajor(), systemVersion.getMinor()+1, 0);
VespaVersion newSystemVespaVersion = new VespaVersion(DeploymentStatistics.empty(newSystemVersion),
"commit1",
Instant.now(),
true,
true,
Collections.emptyList(),
VespaVersion.Confidence.low
);
List<VespaVersion> versions = new ArrayList<>(controller.versionStatus().versions());
for (int i = 0; i < versions.size(); i++) {
VespaVersion c = versions.get(i);
if (c.isSystemVersion())
versions.set(i, new VespaVersion(c.statistics(), c.releaseCommit(), c.committedAt(),
false,
false,
c.systemApplicationHostnames(),
c.confidence()));
}
versions.add(newSystemVespaVersion);
controller.updateVersionStatus(new VersionStatus(versions));
return newSystemVersion;
}
@Test
public void testPullRequestDeployment() {
// Setup system
ControllerTester tester = new ControllerTester();
ApplicationController applications = tester.controller().applications();
// staging deployment
long app1ProjectId = 22;
ApplicationId app1 = tester.createAndDeploy("tenant1", "domain1",
"application1", Environment.staging,
app1ProjectId).id();
// pull-request deployment - uses different instance id
ApplicationId app1pr = tester.createAndDeploy("tenant1", "domain1",
"application1", "1",
Environment.staging, app1ProjectId, null).id();
assertTrue(applications.get(app1).isPresent());
assertEquals(app1, applications.get(app1).get().id());
assertTrue(applications.get(app1pr).isPresent());
assertEquals(app1pr, applications.get(app1pr).get().id());
// Simulate restart
tester.createNewController();
applications = tester.controller().applications();
assertTrue(applications.get(app1).isPresent());
assertEquals(app1, applications.get(app1).get().id());
assertTrue(applications.get(app1pr).isPresent());
assertEquals(app1pr, applications.get(app1pr).get().id());
// Deleting application also removes PR instance
ApplicationId app2 = tester.createAndDeploy("tenant1", "domain1",
"application2", Environment.staging,
33).id();
tester.controller().applications().deleteApplication(app1, Optional.of(new NToken("ntoken")));
assertEquals("All instances deleted", 0,
tester.controller().applications().asList(app1.tenant()).stream()
.filter(app -> app.id().application().equals(app1.application()))
.count());
assertEquals("Other application survives", 1,
tester.controller().applications().asList(app1.tenant()).stream()
.filter(app -> app.id().application().equals(app2.application()))
.count());
}
@Test
public void testFailingSinceUpdates() {
// Setup system
DeploymentTester tester = new DeploymentTester();
// Setup application
Application app = tester.createApplication("app1", "foo", 1, 1L);
// Initial failure
Instant initialFailure = tester.clock().instant().truncatedTo(MILLIS);
tester.jobCompletion(component).application(app).uploadArtifact(applicationPackage).submit();
tester.deployAndNotify(app, applicationPackage, false, systemTest);
assertEquals("Failure age is right at initial failure",
initialFailure, firstFailing(app, tester).get().at());
// Failure again -- failingSince should remain the same
tester.clock().advance(Duration.ofMillis(1000));
tester.deployAndNotify(app, applicationPackage, false, systemTest);
assertEquals("Failure age is right at second consecutive failure",
initialFailure, firstFailing(app, tester).get().at());
// Success resets failingSince
tester.clock().advance(Duration.ofMillis(1000));
tester.deployAndNotify(app, applicationPackage, true, systemTest);
assertFalse(firstFailing(app, tester).isPresent());
// Complete deployment
tester.deployAndNotify(app, applicationPackage, true, stagingTest);
tester.deployAndNotify(app, applicationPackage, true, productionCorpUsEast1);
// Two repeated failures again.
// Initial failure
tester.clock().advance(Duration.ofMillis(1000));
initialFailure = tester.clock().instant().truncatedTo(MILLIS);
tester.jobCompletion(component).application(app).nextBuildNumber().uploadArtifact(applicationPackage).submit();
tester.deployAndNotify(app, applicationPackage, false, systemTest);
assertEquals("Failure age is right at initial failure",
initialFailure, firstFailing(app, tester).get().at());
// Failure again -- failingSince should remain the same
tester.clock().advance(Duration.ofMillis(1000));
tester.deployAndNotify(app, applicationPackage, false, systemTest);
assertEquals("Failure age is right at second consecutive failure",
initialFailure, firstFailing(app, tester).get().at());
}
private Optional<JobStatus.JobRun> firstFailing(Application application, DeploymentTester tester) {
return tester.controller().applications().get(application.id()).get().deploymentJobs().jobStatus().get(systemTest).firstFailing();
}
@Test
public void requeueOutOfCapacityStagingJob() {
DeploymentTester tester = new DeploymentTester();
long project1 = 1;
long project2 = 2;
long project3 = 3;
Application app1 = tester.createApplication("app1", "tenant1", project1, 1L);
Application app2 = tester.createApplication("app2", "tenant2", project2, 1L);
Application app3 = tester.createApplication("app3", "tenant3", project3, 1L);
MockBuildService mockBuildService = tester.buildService();
// all applications: system-test completes successfully with some time in between, to determine trigger order.
tester.jobCompletion(component).application(app2).uploadArtifact(applicationPackage).submit();
tester.deployAndNotify(app2, applicationPackage, true, systemTest);
tester.clock().advance(Duration.ofMinutes(1));
tester.jobCompletion(component).application(app1).uploadArtifact(applicationPackage).submit();
tester.deployAndNotify(app1, applicationPackage, true, systemTest);
tester.clock().advance(Duration.ofMinutes(1));
tester.jobCompletion(component).application(app3).uploadArtifact(applicationPackage).submit();
tester.deployAndNotify(app3, applicationPackage, true, systemTest);
// all applications: staging test jobs queued
assertEquals(3, mockBuildService.jobs().size());
// Abort all running jobs, so we have three candidate jobs, of which only one should be triggered at a time.
tester.buildService().clear();
List<BuildService.BuildJob> jobs = new ArrayList<>();
assertJobsInOrder(jobs, tester.buildService().jobs());
tester.triggerUntilQuiescence();
jobs.add(buildJob(app2, stagingTest));
jobs.add(buildJob(app1, stagingTest));
jobs.add(buildJob(app3, stagingTest));
assertJobsInOrder(jobs, tester.buildService().jobs());
// Remove the jobs for app1 and app2, and then let app3 fail with outOfCapacity.
// All three jobs are now eligible, but the one for app3 should trigger first as an outOfCapacity-retry.
tester.buildService().remove(buildJob(app1, stagingTest));
tester.buildService().remove(buildJob(app2, stagingTest));
jobs.remove(buildJob(app1, stagingTest));
jobs.remove(buildJob(app2, stagingTest));
tester.jobCompletion(stagingTest).application(app3).error(JobError.outOfCapacity).submit();
assertJobsInOrder(jobs, tester.buildService().jobs());
tester.triggerUntilQuiescence();
jobs.add(buildJob(app2, stagingTest));
jobs.add(buildJob(app1, stagingTest));
assertJobsInOrder(jobs, tester.buildService().jobs());
// Finish deployment for apps 2 and 3, then release a new version, leaving only app1 with an application upgrade.
tester.deployAndNotify(app2, applicationPackage, true, stagingTest);
tester.deployAndNotify(app2, applicationPackage, true, productionCorpUsEast1);
tester.deployAndNotify(app3, applicationPackage, true, stagingTest);
tester.deployAndNotify(app3, applicationPackage, true, productionCorpUsEast1);
tester.upgradeSystem(new Version("6.2"));
// app1 also gets a new application change, so its time of availability is after the version upgrade.
tester.clock().advance(Duration.ofMinutes(1));
tester.buildService().clear();
tester.jobCompletion(component).application(app1).nextBuildNumber().uploadArtifact(applicationPackage).submit();
jobs.clear();
jobs.add(buildJob(app1, stagingTest));
jobs.add(buildJob(app1, systemTest));
// Tests for app1 trigger before the others since it carries an application upgrade.
assertJobsInOrder(jobs, tester.buildService().jobs());
// Let the test jobs start, remove everything expect system test for app3, which fails with outOfCapacity again.
tester.triggerUntilQuiescence();
tester.buildService().remove(buildJob(app1, systemTest));
tester.buildService().remove(buildJob(app2, systemTest));
tester.buildService().remove(buildJob(app1, stagingTest));
tester.buildService().remove(buildJob(app2, stagingTest));
tester.buildService().remove(buildJob(app3, stagingTest));
tester.jobCompletion(systemTest).application(app3).error(JobError.outOfCapacity).submit();
jobs.clear();
jobs.add(buildJob(app1, stagingTest));
jobs.add(buildJob(app3, systemTest));
assertJobsInOrder(jobs, tester.buildService().jobs());
tester.triggerUntilQuiescence();
jobs.add(buildJob(app2, stagingTest));
jobs.add(buildJob(app1, systemTest));
jobs.add(buildJob(app3, stagingTest));
jobs.add(buildJob(app2, systemTest));
assertJobsInOrder(jobs, tester.buildService().jobs());
}
/** Verifies that the given job lists have the same jobs, ignoring order of jobs that may have been triggered concurrently. */
private static void assertJobsInOrder(List<BuildService.BuildJob> expected, List<BuildService.BuildJob> actual) {
assertEquals(expected.stream().filter(job -> job.jobName().equals("system-test")).collect(Collectors.toList()),
actual.stream().filter(job -> job.jobName().equals("system-test")).collect(Collectors.toList()));
assertEquals(expected.stream().filter(job -> job.jobName().equals("staging-test")).collect(Collectors.toList()),
actual.stream().filter(job -> job.jobName().equals("staging-test")).collect(Collectors.toList()));
assertTrue(expected.containsAll(actual));
assertTrue(actual.containsAll(expected));
}
private void assertStatus(JobStatus expectedStatus, ApplicationId id, Controller controller) {
Application app = controller.applications().get(id).get();
JobStatus existingStatus = app.deploymentJobs().jobStatus().get(expectedStatus.type());
assertNotNull("Status of type " + expectedStatus.type() + " is present", existingStatus);
assertEquals(expectedStatus, existingStatus);
}
@Test
public void testGlobalRotations() throws IOException {
// Setup tester and app def
ControllerTester tester = new ControllerTester();
ZoneId zone = ZoneId.from(Environment.defaultEnvironment(), RegionName.defaultName());
ApplicationId appId = ApplicationId.from("tenant", "app1", "default");
DeploymentId deployId = new DeploymentId(appId, zone);
// Check initial rotation status
Map<String, EndpointStatus> rotationStatus = tester.controller().applications().getGlobalRotationStatus(deployId);
assertEquals(1, rotationStatus.size());
assertTrue(rotationStatus.get("qrs-endpoint").getStatus().equals(EndpointStatus.Status.in));
// Set the global rotations out of service
EndpointStatus status = new EndpointStatus(EndpointStatus.Status.out, "Testing I said", "Test", tester.clock().instant().getEpochSecond());
List<String> overrides = tester.controller().applications().setGlobalRotationStatus(deployId, status);
assertEquals(1, overrides.size());
// Recheck the override rotation status
rotationStatus = tester.controller().applications().getGlobalRotationStatus(deployId);
assertEquals(1, rotationStatus.size());
assertTrue(rotationStatus.get("qrs-endpoint").getStatus().equals(EndpointStatus.Status.out));
assertTrue(rotationStatus.get("qrs-endpoint").getReason().equals("Testing I said"));
}
@Test
public void testCleanupOfStaleDeploymentData() throws IOException {
DeploymentTester tester = new DeploymentTester();
tester.controllerTester().zoneRegistry().setSystemName(SystemName.cd);
tester.controllerTester().zoneRegistry().setZones(ZoneId.from("prod", "cd-us-central-1"));
Supplier<Map<JobType, JobStatus>> statuses = () ->
tester.application(ApplicationId.from("vespa", "canary", "default"))
.deploymentJobs().jobStatus();
// Current system version, matches version in test data
Version version = Version.fromString("6.141.117");
tester.upgradeSystem(version);
assertEquals(version, tester.controller().versionStatus().systemVersion().get().versionNumber());
// Load test data data
byte[] json = Files.readAllBytes(Paths.get("src/test/java/com/yahoo/vespa/hosted/controller/maintenance/testdata/canary-with-stale-data.json"));
Application application = tester.controllerTester().createApplication(SlimeUtils.jsonToSlime(json));
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.upgradePolicy("canary")
.region("cd-us-central-1")
.build();
tester.jobCompletion(component).application(application).uploadArtifact(applicationPackage).submit();
long cdJobsCount = statuses.get().keySet().stream()
.filter(type -> type.zone(SystemName.cd).isPresent())
.count();
long mainJobsCount = statuses.get().keySet().stream()
.filter(type -> type.zone(main).isPresent() && ! type.zone(SystemName.cd).isPresent())
.count();
assertEquals("Irrelevant (main) data is present.", 8, mainJobsCount);
// New version is released
version = Version.fromString("6.142.1");
tester.upgradeSystem(version);
assertEquals(version, tester.controller().versionStatus().systemVersion().get().versionNumber());
// Test environment passes
tester.deployAndNotify(application, applicationPackage, true, systemTest);
long newCdJobsCount = statuses.get().keySet().stream()
.filter(type -> type.zone(SystemName.cd).isPresent())
.count();
long newMainJobsCount = statuses.get().keySet().stream()
.filter(type -> type.zone(main).isPresent() && ! type.zone(SystemName.cd).isPresent())
.count();
assertEquals("Irrelevant (main) job data is removed.", 0, newMainJobsCount);
assertEquals("Relevant (cd) data is not removed.", cdJobsCount, newCdJobsCount);
}
@Test
public void testDnsAliasRegistration() {
DeploymentTester tester = new DeploymentTester();
Application application = tester.createApplication("app1", "tenant1", 1, 1L);
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.environment(Environment.prod)
.globalServiceId("foo")
.region("us-west-1")
.region("us-central-1") // Two deployments should result in each DNS alias being registered once
.build();
tester.deployCompletely(application, applicationPackage);
assertEquals(3, tester.controllerTester().nameService().records().size());
Optional<Record> record = tester.controllerTester().nameService().findRecord(
Record.Type.CNAME, RecordName.from("app1--tenant1.global.vespa.yahooapis.com")
);
assertTrue(record.isPresent());
assertEquals("app1--tenant1.global.vespa.yahooapis.com", record.get().name().asString());
assertEquals("rotation-fqdn-01.", record.get().data().asString());
record = tester.controllerTester().nameService().findRecord(
Record.Type.CNAME, RecordName.from("app1--tenant1.global.vespa.oath.cloud")
);
assertTrue(record.isPresent());
assertEquals("app1--tenant1.global.vespa.oath.cloud", record.get().name().asString());
assertEquals("rotation-fqdn-01.", record.get().data().asString());
record = tester.controllerTester().nameService().findRecord(
Record.Type.CNAME, RecordName.from("app1.tenant1.global.vespa.yahooapis.com")
);
assertTrue(record.isPresent());
assertEquals("app1.tenant1.global.vespa.yahooapis.com", record.get().name().asString());
assertEquals("rotation-fqdn-01.", record.get().data().asString());
}
@Test
public void testUpdatesExistingDnsAlias() {
DeploymentTester tester = new DeploymentTester();
// Application 1 is deployed and deleted
{
Application app1 = tester.createApplication("app1", "tenant1", 1, 1L);
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.environment(Environment.prod)
.globalServiceId("foo")
.region("us-west-1")
.region("us-central-1") // Two deployments should result in each DNS alias being registered once
.build();
tester.deployCompletely(app1, applicationPackage);
assertEquals(3, tester.controllerTester().nameService().records().size());
Optional<Record> record = tester.controllerTester().nameService().findRecord(
Record.Type.CNAME, RecordName.from("app1--tenant1.global.vespa.yahooapis.com")
);
assertTrue(record.isPresent());
assertEquals("app1--tenant1.global.vespa.yahooapis.com", record.get().name().asString());
assertEquals("rotation-fqdn-01.", record.get().data().asString());
record = tester.controllerTester().nameService().findRecord(
Record.Type.CNAME, RecordName.from("app1.tenant1.global.vespa.yahooapis.com")
);
assertTrue(record.isPresent());
assertEquals("app1.tenant1.global.vespa.yahooapis.com", record.get().name().asString());
assertEquals("rotation-fqdn-01.", record.get().data().asString());
// Application is deleted and rotation is unassigned
applicationPackage = new ApplicationPackageBuilder()
.environment(Environment.prod)
.allow(ValidationId.deploymentRemoval)
.build();
tester.jobCompletion(component).application(app1).nextBuildNumber().uploadArtifact(applicationPackage).submit();
tester.deployAndNotify(app1, applicationPackage, true, systemTest);
tester.applications().deactivate(app1, ZoneId.from(Environment.test, RegionName.from("us-east-1")));
tester.applications().deactivate(app1, ZoneId.from(Environment.staging, RegionName.from("us-east-3")));
tester.applications().deleteApplication(app1.id(), Optional.of(new NToken("ntoken")));
try (RotationLock lock = tester.applications().rotationRepository().lock()) {
assertTrue("Rotation is unassigned",
tester.applications().rotationRepository().availableRotations(lock)
.containsKey(new RotationId("rotation-id-01")));
}
// Records remain
record = tester.controllerTester().nameService().findRecord(
Record.Type.CNAME, RecordName.from("app1--tenant1.global.vespa.yahooapis.com")
);
assertTrue(record.isPresent());
record = tester.controllerTester().nameService().findRecord(
Record.Type.CNAME, RecordName.from("app1--tenant1.global.vespa.oath.cloud")
);
assertTrue(record.isPresent());
record = tester.controllerTester().nameService().findRecord(
Record.Type.CNAME, RecordName.from("app1.tenant1.global.vespa.yahooapis.com")
);
assertTrue(record.isPresent());
}
// Application 2 is deployed and assigned same rotation as application 1 had before deletion
{
Application app2 = tester.createApplication("app2", "tenant2", 2, 1L);
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.environment(Environment.prod)
.globalServiceId("foo")
.region("us-west-1")
.region("us-central-1")
.build();
tester.deployCompletely(app2, applicationPackage);
assertEquals(6, tester.controllerTester().nameService().records().size());
Optional<Record> record = tester.controllerTester().nameService().findRecord(
Record.Type.CNAME, RecordName.from("app2--tenant2.global.vespa.yahooapis.com")
);
assertTrue(record.isPresent());
assertEquals("app2--tenant2.global.vespa.yahooapis.com", record.get().name().asString());
assertEquals("rotation-fqdn-01.", record.get().data().asString());
record = tester.controllerTester().nameService().findRecord(
Record.Type.CNAME, RecordName.from("app2--tenant2.global.vespa.oath.cloud")
);
assertTrue(record.isPresent());
assertEquals("app2--tenant2.global.vespa.oath.cloud", record.get().name().asString());
assertEquals("rotation-fqdn-01.", record.get().data().asString());
record = tester.controllerTester().nameService().findRecord(
Record.Type.CNAME, RecordName.from("app2.tenant2.global.vespa.yahooapis.com")
);
assertTrue(record.isPresent());
assertEquals("app2.tenant2.global.vespa.yahooapis.com", record.get().name().asString());
assertEquals("rotation-fqdn-01.", record.get().data().asString());
}
// Application 1 is recreated, deployed and assigned a new rotation
{
tester.buildService().clear();
Application app1 = tester.createApplication("app1", "tenant1", 1, 1L);
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.environment(Environment.prod)
.globalServiceId("foo")
.region("us-west-1")
.region("us-central-1")
.build();
tester.deployCompletely(app1, applicationPackage);
app1 = tester.applications().require(app1.id());
assertEquals("rotation-id-02", app1.rotation().get().id().asString());
// Existing DNS records are updated to point to the newly assigned rotation
assertEquals(6, tester.controllerTester().nameService().records().size());
Optional<Record> record = tester.controllerTester().nameService().findRecord(
Record.Type.CNAME, RecordName.from("app1--tenant1.global.vespa.yahooapis.com")
);
assertTrue(record.isPresent());
assertEquals("rotation-fqdn-02.", record.get().data().asString());
record = tester.controllerTester().nameService().findRecord(
Record.Type.CNAME, RecordName.from("app1--tenant1.global.vespa.oath.cloud")
);
assertTrue(record.isPresent());
assertEquals("rotation-fqdn-02.", record.get().data().asString());
record = tester.controllerTester().nameService().findRecord(
Record.Type.CNAME, RecordName.from("app1.tenant1.global.vespa.yahooapis.com")
);
assertTrue(record.isPresent());
assertEquals("rotation-fqdn-02.", record.get().data().asString());
}
}
@Test
public void testDeployDirectly() {
DeploymentTester tester = new DeploymentTester();
tester.controllerTester().zoneRegistry().setSystemName(SystemName.cd);
tester.controllerTester().zoneRegistry().setZones(ZoneId.from("prod", "cd-us-central-1"));
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.environment(Environment.prod)
.region("cd-us-central-1")
.build();
// Create application
Application app = tester.createApplication("app1", "tenant1", 1, 2L);
// Direct deploy is allowed when deployDirectly is true
ZoneId zone = ZoneId.from("prod", "cd-us-central-1");
// Same options as used in our integration tests
DeployOptions options = new DeployOptions(true, Optional.empty(), false,
false);
tester.controller().applications().deploy(app.id(), zone, Optional.of(applicationPackage), options);
assertTrue("Application deployed and activated",
tester.controllerTester().configServer().application(app.id()).get().activated());
assertTrue("No job status added",
tester.applications().require(app.id()).deploymentJobs().jobStatus().isEmpty());
}
private void runUpgrade(DeploymentTester tester, ApplicationId application, ApplicationVersion version) {
Version next = Version.fromString("6.2");
tester.upgradeSystem(next);
runDeployment(tester, tester.applications().require(application), version, Optional.of(next), Optional.empty());
}
private void runDeployment(DeploymentTester tester, ApplicationId application, ApplicationVersion version,
ApplicationPackage applicationPackage, SourceRevision sourceRevision, long buildNumber) {
Application app = tester.applications().require(application);
tester.jobCompletion(component)
.application(app)
.buildNumber(buildNumber)
.sourceRevision(sourceRevision)
.uploadArtifact(applicationPackage)
.submit();
ApplicationVersion change = ApplicationVersion.from(sourceRevision, buildNumber);
assertEquals(change.id(), tester.controller().applications()
.require(application)
.change().application().get().id());
runDeployment(tester, app, version, Optional.empty(), Optional.of(applicationPackage));
}
private void runDeployment(DeploymentTester tester, Application app, ApplicationVersion version,
Optional<Version> upgrade, Optional<ApplicationPackage> applicationPackage) {
Version vespaVersion = upgrade.orElseGet(tester::defaultPlatformVersion);
// Deploy in test
tester.deployAndNotify(app, applicationPackage, true, systemTest);
tester.deployAndNotify(app, applicationPackage, true, stagingTest);
JobStatus expected = JobStatus.initial(stagingTest)
.withTriggering(vespaVersion, version, productionCorpUsEast1.zone(main).map(tester.application(app.id()).deployments()::get), "",
tester.clock().instant().truncatedTo(MILLIS))
.withCompletion(42, Optional.empty(), tester.clock().instant().truncatedTo(MILLIS));
assertStatus(expected, app.id(), tester.controller());
// Deploy in production
expected = JobStatus.initial(productionCorpUsEast1)
.withTriggering(vespaVersion, version, productionCorpUsEast1.zone(main).map(tester.application(app.id()).deployments()::get), "",
tester.clock().instant().truncatedTo(MILLIS))
.withCompletion(42, Optional.empty(), tester.clock().instant().truncatedTo(MILLIS));
tester.deployAndNotify(app, applicationPackage, true, productionCorpUsEast1);
assertStatus(expected, app.id(), tester.controller());
expected = JobStatus.initial(productionUsEast3)
.withTriggering(vespaVersion, version, productionUsEast3.zone(main).map(tester.application(app.id()).deployments()::get), "",
tester.clock().instant().truncatedTo(MILLIS))
.withCompletion(42, Optional.empty(), tester.clock().instant().truncatedTo(MILLIS));
tester.deployAndNotify(app, applicationPackage, true, productionUsEast3);
assertStatus(expected, app.id(), tester.controller());
// Verify deployed version
app = tester.controller().applications().require(app.id());
for (Deployment deployment : app.productionDeployments().values()) {
assertEquals(version, deployment.applicationVersion());
upgrade.ifPresent(v -> assertEquals(v, deployment.version()));
}
}
@Test
public void testDeploymentOfNewInstanceWithIllegalApplicationName() {
ControllerTester tester = new ControllerTester();
String application = "this_application_name_is_far_too_long_and_has_underscores";
ZoneId zone = ZoneId.from("test", "us-east-1");
DeployOptions options = new DeployOptions(false,
Optional.empty(),
false,
false);
tester.createTenant("tenant", "domain", null);
// Deploy an application which doesn't yet exist, and which has an illegal application name.
try {
tester.controller().applications().deploy(ApplicationId.from("tenant", application, "123"), zone, Optional.empty(), options);
fail("Illegal application name should cause validation exception.");
}
catch (IllegalArgumentException e) {
assertTrue(e.getMessage().contains("Invalid id"));
}
// Sneak an illegal application in the back door.
tester.createApplication(new ApplicationSerializer().toSlime(new Application(ApplicationId.from("tenant", application, "default"))));
// Deploy a PR instance for the application, with no NToken.
tester.controller().applications().deploy(ApplicationId.from("tenant", application, "456"), zone, Optional.empty(), options);
assertTrue(tester.controller().applications().get(ApplicationId.from("tenant", application, "456")).isPresent());
}
}
| Simplify asserts
| controller-server/src/test/java/com/yahoo/vespa/hosted/controller/ControllerTest.java | Simplify asserts | <ide><path>ontroller-server/src/test/java/com/yahoo/vespa/hosted/controller/ControllerTest.java
<ide> Map<String, EndpointStatus> rotationStatus = tester.controller().applications().getGlobalRotationStatus(deployId);
<ide> assertEquals(1, rotationStatus.size());
<ide>
<del> assertTrue(rotationStatus.get("qrs-endpoint").getStatus().equals(EndpointStatus.Status.in));
<add> assertEquals(rotationStatus.get("qrs-endpoint").getStatus(), EndpointStatus.Status.in);
<ide>
<ide> // Set the global rotations out of service
<del> EndpointStatus status = new EndpointStatus(EndpointStatus.Status.out, "Testing I said", "Test", tester.clock().instant().getEpochSecond());
<add> EndpointStatus status = new EndpointStatus(EndpointStatus.Status.out, "unit-test", "Test", tester.clock().instant().getEpochSecond());
<ide> List<String> overrides = tester.controller().applications().setGlobalRotationStatus(deployId, status);
<ide> assertEquals(1, overrides.size());
<ide>
<ide> // Recheck the override rotation status
<ide> rotationStatus = tester.controller().applications().getGlobalRotationStatus(deployId);
<ide> assertEquals(1, rotationStatus.size());
<del> assertTrue(rotationStatus.get("qrs-endpoint").getStatus().equals(EndpointStatus.Status.out));
<del> assertTrue(rotationStatus.get("qrs-endpoint").getReason().equals("Testing I said"));
<add> assertEquals(rotationStatus.get("qrs-endpoint").getStatus(), EndpointStatus.Status.out);
<add> assertEquals("unit-test", rotationStatus.get("qrs-endpoint").getReason());
<ide> }
<ide>
<ide> @Test |
|
Java | apache-2.0 | 42b587402fddf00bd6759b94d6cf76d7676e8141 | 0 | lqbweb/logging-log4j2,pisfly/logging-log4j2,lburgazzoli/apache-logging-log4j2,neuro-sys/logging-log4j2,renchunxiao/logging-log4j2,apache/logging-log4j2,codescale/logging-log4j2,GFriedrich/logging-log4j2,xnslong/logging-log4j2,lburgazzoli/logging-log4j2,lqbweb/logging-log4j2,ChetnaChaudhari/logging-log4j2,codescale/logging-log4j2,xnslong/logging-log4j2,lburgazzoli/apache-logging-log4j2,neuro-sys/logging-log4j2,xnslong/logging-log4j2,lburgazzoli/logging-log4j2,lqbweb/logging-log4j2,jinxuan/logging-log4j2,lburgazzoli/logging-log4j2,jsnikhil/nj-logging-log4j2,apache/logging-log4j2,renchunxiao/logging-log4j2,ChetnaChaudhari/logging-log4j2,MagicWiz/log4j2,jsnikhil/nj-logging-log4j2,codescale/logging-log4j2,pisfly/logging-log4j2,jinxuan/logging-log4j2,apache/logging-log4j2,GFriedrich/logging-log4j2,MagicWiz/log4j2,lburgazzoli/apache-logging-log4j2,GFriedrich/logging-log4j2 | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache license, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the license for the specific language governing permissions and
* limitations under the license.
*/
package org.apache.logging.log4j;
import java.net.URI;
import java.util.Map;
import java.util.SortedMap;
import java.util.TreeMap;
import org.apache.logging.log4j.message.MessageFactory;
import org.apache.logging.log4j.message.StringFormatterMessageFactory;
import org.apache.logging.log4j.simple.SimpleLoggerContextFactory;
import org.apache.logging.log4j.spi.LoggerContext;
import org.apache.logging.log4j.spi.LoggerContextFactory;
import org.apache.logging.log4j.spi.Provider;
import org.apache.logging.log4j.status.StatusLogger;
import org.apache.logging.log4j.util.LoaderUtil;
import org.apache.logging.log4j.util.PropertiesUtil;
import org.apache.logging.log4j.util.ProviderUtil;
import org.apache.logging.log4j.util.Strings;
/**
* The anchor point for the logging system. The most common usage of this class is to obtain a named
* {@link Logger}. The method {@link #getLogger()} is provided as the most convenient way to obtain a named Logger
* based on the calling class name. This class also provides method for obtaining named Loggers that use
* {@link String#format(String, Object...)} style messages instead of the default type of parameterized messages.
* These are obtained through the {@link #getFormatterLogger(Class)} family of methods. Other service provider methods
* are given through the {@link #getContext()} and {@link #getFactory()} family of methods; these methods are not
* normally useful for typical usage of Log4j.
*/
public class LogManager {
private static volatile LoggerContextFactory factory;
/**
* Log4j property to set to the fully qualified class name of a custom implementation of
* {@link org.apache.logging.log4j.spi.LoggerContextFactory}.
*/
public static final String FACTORY_PROPERTY_NAME = "log4j2.loggerContextFactory";
private static final Logger LOGGER = StatusLogger.getLogger();
/**
* The name of the root Logger.
*/
public static final String ROOT_LOGGER_NAME = Strings.EMPTY;
/**
* Scans the classpath to find all logging implementation. Currently, only one will
* be used but this could be extended to allow multiple implementations to be used.
*/
static {
// Shortcut binding to force a specific logging implementation.
final PropertiesUtil managerProps = PropertiesUtil.getProperties();
final String factoryClassName = managerProps.getStringProperty(FACTORY_PROPERTY_NAME);
final ClassLoader cl = LoaderUtil.getThreadContextClassLoader();
if (factoryClassName != null) {
try {
final Class<?> clazz = cl.loadClass(factoryClassName);
if (LoggerContextFactory.class.isAssignableFrom(clazz)) {
factory = (LoggerContextFactory) clazz.newInstance();
}
} catch (final ClassNotFoundException cnfe) {
LOGGER.error("Unable to locate configured LoggerContextFactory {}", factoryClassName);
} catch (final Exception ex) {
LOGGER.error("Unable to create configured LoggerContextFactory {}", factoryClassName, ex);
}
}
if (factory == null) {
final SortedMap<Integer, LoggerContextFactory> factories = new TreeMap<Integer, LoggerContextFactory>();
// note that the following initial call to ProviderUtil may block until a Provider has been installed when
// running in an OSGi environment
if (ProviderUtil.hasProviders()) {
for (final Provider provider : ProviderUtil.getProviders()) {
final Class<? extends LoggerContextFactory> factoryClass = provider.loadLoggerContextFactory();
if (factoryClass != null) {
try {
factories.put(provider.getPriority(), factoryClass.newInstance());
} catch (final Exception e) {
LOGGER.error("Unable to create class {} specified in {}", factoryClass.getName(),
provider.getUrl().toString(), e);
}
}
}
if (factories.isEmpty()) {
LOGGER.error("Log4j2 could not find a logging implementation. Please add log4j-core to the classpath. Using SimpleLogger to log to the console...");
factory = new SimpleLoggerContextFactory();
} else {
final StringBuilder sb = new StringBuilder("Multiple logging implementations found: \n");
for (final Map.Entry<Integer, LoggerContextFactory> entry : factories.entrySet()) {
sb.append("Factory: ").append(entry.getValue().getClass().getName());
sb.append(", Weighting: ").append(entry.getKey()).append('\n');
}
factory = factories.get(factories.lastKey());
sb.append("Using factory: ").append(factory.getClass().getName());
LOGGER.warn(sb.toString());
}
} else {
LOGGER.error("Log4j2 could not find a logging implementation. Please add log4j-core to the classpath. Using SimpleLogger to log to the console...");
factory = new SimpleLoggerContextFactory();
}
}
}
/**
* Detects if a Logger with the specified name exists. This is a convenience method for porting from version 1.
*
* @param name
* The Logger name to search for.
* @return true if the Logger exists, false otherwise.
* @see LoggerContext#hasLogger(String)
*/
public static boolean exists(final String name) {
return getContext().hasLogger(name);
}
/**
* Gets the class name of the caller in the current stack at the given {@code depth}.
*
* @param depth a 0-based index in the current stack.
* @return a class name
*/
private static String getClassName(final int depth) {
return new Throwable().getStackTrace()[depth].getClassName();
}
/**
* Returns the current LoggerContext.
* <p>
* WARNING - The LoggerContext returned by this method may not be the LoggerContext used to create a Logger
* for the calling class.
* </p>
* @return The current LoggerContext.
*/
public static LoggerContext getContext() {
return factory.getContext(LogManager.class.getName(), null, null, true);
}
/**
* Returns a LoggerContext.
*
* @param currentContext if false the LoggerContext appropriate for the caller of this method is returned. For
* example, in a web application if the caller is a class in WEB-INF/lib then one LoggerContext may be
* returned and if the caller is a class in the container's classpath then a different LoggerContext may be
* returned. If true then only a single LoggerContext will be returned.
* @return a LoggerContext.
*/
public static LoggerContext getContext(final boolean currentContext) {
return factory.getContext(LogManager.class.getName(), null, null, currentContext, null, null);
}
/**
* Returns a LoggerContext.
*
* @param loader The ClassLoader for the context. If null the context will attempt to determine the appropriate
* ClassLoader.
* @param currentContext if false the LoggerContext appropriate for the caller of this method is returned. For
* example, in a web application if the caller is a class in WEB-INF/lib then one LoggerContext may be
* returned and if the caller is a class in the container's classpath then a different LoggerContext may be
* returned. If true then only a single LoggerContext will be returned.
* @return a LoggerContext.
*/
public static LoggerContext getContext(final ClassLoader loader, final boolean currentContext) {
return factory.getContext(LogManager.class.getName(), loader, null, currentContext);
}
/**
* Returns a LoggerContext.
*
* @param loader The ClassLoader for the context. If null the context will attempt to determine the appropriate
* ClassLoader.
* @param currentContext if false the LoggerContext appropriate for the caller of this method is returned. For
* example, in a web application if the caller is a class in WEB-INF/lib then one LoggerContext may be
* returned and if the caller is a class in the container's classpath then a different LoggerContext may be
* returned. If true then only a single LoggerContext will be returned.
* @param externalContext An external context (such as a ServletContext) to be associated with the LoggerContext.
* @return a LoggerContext.
*/
public static LoggerContext getContext(final ClassLoader loader, final boolean currentContext,
final Object externalContext) {
return factory.getContext(LogManager.class.getName(), loader, externalContext, currentContext);
}
/**
* Returns a LoggerContext.
*
* @param loader The ClassLoader for the context. If null the context will attempt to determine the appropriate
* ClassLoader.
* @param currentContext if false the LoggerContext appropriate for the caller of this method is returned. For
* example, in a web application if the caller is a class in WEB-INF/lib then one LoggerContext may be
* returned and if the caller is a class in the container's classpath then a different LoggerContext may be
* returned. If true then only a single LoggerContext will be returned.
* @param configLocation The URI for the configuration to use.
* @return a LoggerContext.
*/
public static LoggerContext getContext(final ClassLoader loader, final boolean currentContext,
final URI configLocation) {
return factory.getContext(LogManager.class.getName(), loader, null, currentContext, configLocation, null);
}
/**
* Returns a LoggerContext.
*
* @param loader The ClassLoader for the context. If null the context will attempt to determine the appropriate
* ClassLoader.
* @param currentContext if false the LoggerContext appropriate for the caller of this method is returned. For
* example, in a web application if the caller is a class in WEB-INF/lib then one LoggerContext may be
* returned and if the caller is a class in the container's classpath then a different LoggerContext may be
* returned. If true then only a single LoggerContext will be returned.
* @param externalContext An external context (such as a ServletContext) to be associated with the LoggerContext.
* @param configLocation The URI for the configuration to use.
* @return a LoggerContext.
*/
public static LoggerContext getContext(final ClassLoader loader, final boolean currentContext,
final Object externalContext, final URI configLocation) {
return factory.getContext(LogManager.class.getName(), loader, externalContext, currentContext, configLocation,
null);
}
/**
* Returns a LoggerContext.
*
* @param loader The ClassLoader for the context. If null the context will attempt to determine the appropriate
* ClassLoader.
* @param currentContext if false the LoggerContext appropriate for the caller of this method is returned. For
* example, in a web application if the caller is a class in WEB-INF/lib then one LoggerContext may be
* returned and if the caller is a class in the container's classpath then a different LoggerContext may be
* returned. If true then only a single LoggerContext will be returned.
* @param externalContext An external context (such as a ServletContext) to be associated with the LoggerContext.
* @param configLocation The URI for the configuration to use.
* @param name The LoggerContext name.
* @return a LoggerContext.
*/
public static LoggerContext getContext(final ClassLoader loader, final boolean currentContext,
final Object externalContext, final URI configLocation,
final String name) {
return factory.getContext(LogManager.class.getName(), loader, externalContext, currentContext, configLocation,
name);
}
/**
* Returns a LoggerContext
* @param fqcn The fully qualified class name of the Class that this method is a member of.
* @param currentContext if false the LoggerContext appropriate for the caller of this method is returned. For
* example, in a web application if the caller is a class in WEB-INF/lib then one LoggerContext may be
* returned and if the caller is a class in the container's classpath then a different LoggerContext may be
* returned. If true then only a single LoggerContext will be returned.
* @return a LoggerContext.
*/
protected static LoggerContext getContext(final String fqcn, final boolean currentContext) {
return factory.getContext(fqcn, null, null, currentContext);
}
/**
* Returns a LoggerContext
* @param fqcn The fully qualified class name of the Class that this method is a member of.
* @param loader The ClassLoader for the context. If null the context will attempt to determine the appropriate
* ClassLoader.
* @param currentContext if false the LoggerContext appropriate for the caller of this method is returned. For
* example, in a web application if the caller is a class in WEB-INF/lib then one LoggerContext may be
* returned and if the caller is a class in the container's classpath then a different LoggerContext may be
* returned. If true then only a single LoggerContext will be returned.
* @return a LoggerContext.
*/
protected static LoggerContext getContext(final String fqcn, final ClassLoader loader,
final boolean currentContext) {
return factory.getContext(fqcn, loader, null, currentContext);
}
/**
* Returns the current LoggerContextFactory.
* @return The LoggerContextFactory.
*/
public static LoggerContextFactory getFactory() {
return factory;
}
/**
* Sets the current LoggerContextFactory to use. Normally, the appropriate LoggerContextFactory is created at
* startup, but in certain environments, a LoggerContextFactory implementation may not be available at this point.
* Thus, an alternative LoggerContextFactory can be set at runtime.
*
* <p>
* Note that any Logger or LoggerContext objects already created will still be valid, but they will no longer be
* accessible through LogManager. Thus, <strong>it is a bad idea to use this method without a good reason</strong>!
* Generally, this method should be used only during startup before any code starts caching Logger objects.
* </p>
*
* @param factory the LoggerContextFactory to use.
*/
// FIXME: should we allow only one update of the factory?
public static void setFactory(final LoggerContextFactory factory) {
LogManager.factory = factory;
}
/**
* Returns a formatter Logger using the fully qualified name of the Class as the Logger name.
* <p>
* This logger let you use a {@link java.util.Formatter} string in the message to format parameters.
* </p>
* <p>
* Short-hand for {@code getLogger(clazz, StringFormatterMessageFactory.INSTANCE)}
* </p>
*
* @param clazz
* The Class whose name should be used as the Logger name.
* @return The Logger, created with a {@link StringFormatterMessageFactory}
* @see Logger#fatal(Marker, String, Object...)
* @see Logger#fatal(String, Object...)
* @see Logger#error(Marker, String, Object...)
* @see Logger#error(String, Object...)
* @see Logger#warn(Marker, String, Object...)
* @see Logger#warn(String, Object...)
* @see Logger#info(Marker, String, Object...)
* @see Logger#info(String, Object...)
* @see Logger#debug(Marker, String, Object...)
* @see Logger#debug(String, Object...)
* @see Logger#trace(Marker, String, Object...)
* @see Logger#trace(String, Object...)
* @see StringFormatterMessageFactory
*/
public static Logger getFormatterLogger(final Class<?> clazz) {
return getLogger(clazz != null ? clazz.getName() : getClassName(2), StringFormatterMessageFactory.INSTANCE);
}
/**
* Returns a formatter Logger using the fully qualified name of the value's Class as the Logger name.
* <p>
* This logger let you use a {@link java.util.Formatter} string in the message to format parameters.
* </p>
* <p>
* Short-hand for {@code getLogger(value, StringFormatterMessageFactory.INSTANCE)}
* </p>
*
* @param value
* The value's whose class name should be used as the Logger name.
* @return The Logger, created with a {@link StringFormatterMessageFactory}
* @see Logger#fatal(Marker, String, Object...)
* @see Logger#fatal(String, Object...)
* @see Logger#error(Marker, String, Object...)
* @see Logger#error(String, Object...)
* @see Logger#warn(Marker, String, Object...)
* @see Logger#warn(String, Object...)
* @see Logger#info(Marker, String, Object...)
* @see Logger#info(String, Object...)
* @see Logger#debug(Marker, String, Object...)
* @see Logger#debug(String, Object...)
* @see Logger#trace(Marker, String, Object...)
* @see Logger#trace(String, Object...)
* @see StringFormatterMessageFactory
*/
public static Logger getFormatterLogger(final Object value) {
return getLogger(value != null ? value.getClass().getName() : getClassName(2),
StringFormatterMessageFactory.INSTANCE);
}
/**
* Returns a formatter Logger with the specified name.
* <p>
* This logger let you use a {@link java.util.Formatter} string in the message to format parameters.
* </p>
* <p>
* Short-hand for {@code getLogger(name, StringFormatterMessageFactory.INSTANCE)}
* </p>
*
* @param name The logger name. If null it will default to the name of the calling class.
* @return The Logger, created with a {@link StringFormatterMessageFactory}
* @see Logger#fatal(Marker, String, Object...)
* @see Logger#fatal(String, Object...)
* @see Logger#error(Marker, String, Object...)
* @see Logger#error(String, Object...)
* @see Logger#warn(Marker, String, Object...)
* @see Logger#warn(String, Object...)
* @see Logger#info(Marker, String, Object...)
* @see Logger#info(String, Object...)
* @see Logger#debug(Marker, String, Object...)
* @see Logger#debug(String, Object...)
* @see Logger#trace(Marker, String, Object...)
* @see Logger#trace(String, Object...)
* @see StringFormatterMessageFactory
*/
public static Logger getFormatterLogger(final String name) {
return getLogger(name != null ? name : getClassName(2), StringFormatterMessageFactory.INSTANCE);
}
/**
* Returns a Logger with the name of the calling class.
* @return The Logger for the calling class.
*/
public static Logger getLogger() {
return getLogger(getClassName(2));
}
/**
* Returns a Logger using the fully qualified name of the Class as the Logger name.
* @param clazz The Class whose name should be used as the Logger name. If null it will default to the calling
* class.
* @return The Logger.
*/
public static Logger getLogger(final Class<?> clazz) {
return getLogger(clazz != null ? clazz.getName() : getClassName(2));
}
/**
* Returns a Logger using the fully qualified name of the Class as the Logger name.
* @param clazz The Class whose name should be used as the Logger name. If null it will default to the calling
* class.
* @param messageFactory The message factory is used only when creating a logger, subsequent use does not change
* the logger but will log a warning if mismatched.
* @return The Logger.
*/
public static Logger getLogger(final Class<?> clazz, final MessageFactory messageFactory) {
return getLogger(clazz != null ? clazz.getName() : getClassName(2), messageFactory);
}
/**
* Returns a Logger with the name of the calling class.
* @param messageFactory The message factory is used only when creating a logger, subsequent use does not change
* the logger but will log a warning if mismatched.
* @return The Logger for the calling class.
*/
public static Logger getLogger(final MessageFactory messageFactory) {
return getLogger(getClassName(2), messageFactory);
}
/**
* Returns a Logger using the fully qualified class name of the value as the Logger name.
* @param value The value whose class name should be used as the Logger name. If null the name of the calling
* class will be used as the logger name.
* @return The Logger.
*/
public static Logger getLogger(final Object value) {
return getLogger(value != null ? value.getClass().getName() : getClassName(2));
}
/**
* Returns a Logger using the fully qualified class name of the value as the Logger name.
* @param value The value whose class name should be used as the Logger name. If null the name of the calling
* class will be used as the logger name.
* @param messageFactory The message factory is used only when creating a logger, subsequent use does not change
* the logger but will log a warning if mismatched.
* @return The Logger.
*/
public static Logger getLogger(final Object value, final MessageFactory messageFactory) {
return getLogger(value != null ? value.getClass().getName() : getClassName(2), messageFactory);
}
/**
* Returns a Logger with the specified name.
*
* @param name The logger name. If null the name of the calling class will be used.
* @return The Logger.
*/
public static Logger getLogger(final String name) {
final String actualName = name != null ? name : getClassName(2);
return factory.getContext(LogManager.class.getName(), null, null, false).getLogger(actualName);
}
/**
* Returns a Logger with the specified name.
*
* @param name The logger name. If null the name of the calling class will be used.
* @param messageFactory The message factory is used only when creating a logger, subsequent use does not change
* the logger but will log a warning if mismatched.
* @return The Logger.
*/
public static Logger getLogger(final String name, final MessageFactory messageFactory) {
final String actualName = name != null ? name : getClassName(2);
return factory.getContext(LogManager.class.getName(), null, null, false).getLogger(actualName, messageFactory);
}
/**
* Returns a Logger with the specified name.
*
* @param fqcn The fully qualified class name of the class that this method is a member of.
* @param name The logger name.
* @return The Logger.
*/
protected static Logger getLogger(final String fqcn, final String name) {
return factory.getContext(fqcn, null, null, false).getLogger(name);
}
/**
* Returns the root logger.
*
* @return the root logger, named {@link #ROOT_LOGGER_NAME}.
*/
public static Logger getRootLogger() {
return getLogger(ROOT_LOGGER_NAME);
}
/**
* Prevents instantiation
*/
protected LogManager() {
}
}
| log4j-api/src/main/java/org/apache/logging/log4j/LogManager.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache license, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the license for the specific language governing permissions and
* limitations under the license.
*/
package org.apache.logging.log4j;
import java.net.URI;
import java.util.Map;
import java.util.SortedMap;
import java.util.TreeMap;
import org.apache.logging.log4j.message.MessageFactory;
import org.apache.logging.log4j.message.StringFormatterMessageFactory;
import org.apache.logging.log4j.simple.SimpleLoggerContextFactory;
import org.apache.logging.log4j.spi.LoggerContext;
import org.apache.logging.log4j.spi.LoggerContextFactory;
import org.apache.logging.log4j.spi.Provider;
import org.apache.logging.log4j.status.StatusLogger;
import org.apache.logging.log4j.util.LoaderUtil;
import org.apache.logging.log4j.util.PropertiesUtil;
import org.apache.logging.log4j.util.ProviderUtil;
import org.apache.logging.log4j.util.Strings;
/**
* The anchor point for the logging system. The most common usage of this class is to obtain a named
* {@link Logger}. The method {@link #getLogger()} is provided as the most convenient way to obtain a named Logger
* based on the calling class name. This class also provides method for obtaining named Loggers that use
* {@link String#format(String, Object...)} style messages instead of the default type of parameterized messages.
* These are obtained through the {@link #getFormatterLogger(Class)} family of methods. Other service provider methods
* are given through the {@link #getContext()} and {@link #getFactory()} family of methods; these methods are not
* normally useful for typical usage of Log4j.
*/
public class LogManager {
private static volatile LoggerContextFactory factory;
/**
* Log4j property to set to the fully qualified class name of a custom implementation of
* {@link org.apache.logging.log4j.spi.LoggerContextFactory}.
*/
public static final String FACTORY_PROPERTY_NAME = "log4j2.loggerContextFactory";
private static final Logger LOGGER = StatusLogger.getLogger();
/**
* The name of the root Logger.
*/
public static final String ROOT_LOGGER_NAME = Strings.EMPTY;
/**
* Scans the classpath to find all logging implementation. Currently, only one will
* be used but this could be extended to allow multiple implementations to be used.
*/
static {
// Shortcut binding to force a specific logging implementation.
final PropertiesUtil managerProps = PropertiesUtil.getProperties();
final String factoryClassName = managerProps.getStringProperty(FACTORY_PROPERTY_NAME);
final ClassLoader cl = LoaderUtil.getThreadContextClassLoader();
if (factoryClassName != null) {
try {
final Class<?> clazz = cl.loadClass(factoryClassName);
if (LoggerContextFactory.class.isAssignableFrom(clazz)) {
factory = (LoggerContextFactory) clazz.newInstance();
}
} catch (final ClassNotFoundException cnfe) {
LOGGER.error("Unable to locate configured LoggerContextFactory {}", factoryClassName);
} catch (final Exception ex) {
LOGGER.error("Unable to create configured LoggerContextFactory {}", factoryClassName, ex);
}
}
if (factory == null) {
final SortedMap<Integer, LoggerContextFactory> factories = new TreeMap<Integer, LoggerContextFactory>();
if (ProviderUtil.hasProviders()) {
for (final Provider provider : ProviderUtil.getProviders()) {
final Class<? extends LoggerContextFactory> factoryClass = provider.loadLoggerContextFactory();
if (factoryClass != null) {
try {
factories.put(provider.getPriority(), factoryClass.newInstance());
} catch (final Exception e) {
LOGGER.error("Unable to create class {} specified in {}", factoryClass.getName(),
provider.getUrl().toString(), e);
}
}
}
if (factories.isEmpty()) {
LOGGER.error("Log4j2 could not find a logging implementation. Please add log4j-core to the classpath. Using SimpleLogger to log to the console...");
factory = new SimpleLoggerContextFactory();
} else {
final StringBuilder sb = new StringBuilder("Multiple logging implementations found: \n");
for (final Map.Entry<Integer, LoggerContextFactory> entry : factories.entrySet()) {
sb.append("Factory: ").append(entry.getValue().getClass().getName());
sb.append(", Weighting: ").append(entry.getKey()).append('\n');
}
factory = factories.get(factories.lastKey());
sb.append("Using factory: ").append(factory.getClass().getName());
LOGGER.warn(sb.toString());
}
} else {
LOGGER.error("Log4j2 could not find a logging implementation. Please add log4j-core to the classpath. Using SimpleLogger to log to the console...");
factory = new SimpleLoggerContextFactory();
}
}
}
/**
* Detects if a Logger with the specified name exists. This is a convenience method for porting from version 1.
*
* @param name
* The Logger name to search for.
* @return true if the Logger exists, false otherwise.
* @see LoggerContext#hasLogger(String)
*/
public static boolean exists(final String name) {
return getContext().hasLogger(name);
}
/**
* Gets the class name of the caller in the current stack at the given {@code depth}.
*
* @param depth a 0-based index in the current stack.
* @return a class name
*/
private static String getClassName(final int depth) {
return new Throwable().getStackTrace()[depth].getClassName();
}
/**
* Returns the current LoggerContext.
* <p>
* WARNING - The LoggerContext returned by this method may not be the LoggerContext used to create a Logger
* for the calling class.
* </p>
* @return The current LoggerContext.
*/
public static LoggerContext getContext() {
return factory.getContext(LogManager.class.getName(), null, null, true);
}
/**
* Returns a LoggerContext.
*
* @param currentContext if false the LoggerContext appropriate for the caller of this method is returned. For
* example, in a web application if the caller is a class in WEB-INF/lib then one LoggerContext may be
* returned and if the caller is a class in the container's classpath then a different LoggerContext may be
* returned. If true then only a single LoggerContext will be returned.
* @return a LoggerContext.
*/
public static LoggerContext getContext(final boolean currentContext) {
return factory.getContext(LogManager.class.getName(), null, null, currentContext, null, null);
}
/**
* Returns a LoggerContext.
*
* @param loader The ClassLoader for the context. If null the context will attempt to determine the appropriate
* ClassLoader.
* @param currentContext if false the LoggerContext appropriate for the caller of this method is returned. For
* example, in a web application if the caller is a class in WEB-INF/lib then one LoggerContext may be
* returned and if the caller is a class in the container's classpath then a different LoggerContext may be
* returned. If true then only a single LoggerContext will be returned.
* @return a LoggerContext.
*/
public static LoggerContext getContext(final ClassLoader loader, final boolean currentContext) {
return factory.getContext(LogManager.class.getName(), loader, null, currentContext);
}
/**
* Returns a LoggerContext.
*
* @param loader The ClassLoader for the context. If null the context will attempt to determine the appropriate
* ClassLoader.
* @param currentContext if false the LoggerContext appropriate for the caller of this method is returned. For
* example, in a web application if the caller is a class in WEB-INF/lib then one LoggerContext may be
* returned and if the caller is a class in the container's classpath then a different LoggerContext may be
* returned. If true then only a single LoggerContext will be returned.
* @param externalContext An external context (such as a ServletContext) to be associated with the LoggerContext.
* @return a LoggerContext.
*/
public static LoggerContext getContext(final ClassLoader loader, final boolean currentContext,
final Object externalContext) {
return factory.getContext(LogManager.class.getName(), loader, externalContext, currentContext);
}
/**
* Returns a LoggerContext.
*
* @param loader The ClassLoader for the context. If null the context will attempt to determine the appropriate
* ClassLoader.
* @param currentContext if false the LoggerContext appropriate for the caller of this method is returned. For
* example, in a web application if the caller is a class in WEB-INF/lib then one LoggerContext may be
* returned and if the caller is a class in the container's classpath then a different LoggerContext may be
* returned. If true then only a single LoggerContext will be returned.
* @param configLocation The URI for the configuration to use.
* @return a LoggerContext.
*/
public static LoggerContext getContext(final ClassLoader loader, final boolean currentContext,
final URI configLocation) {
return factory.getContext(LogManager.class.getName(), loader, null, currentContext, configLocation, null);
}
/**
* Returns a LoggerContext.
*
* @param loader The ClassLoader for the context. If null the context will attempt to determine the appropriate
* ClassLoader.
* @param currentContext if false the LoggerContext appropriate for the caller of this method is returned. For
* example, in a web application if the caller is a class in WEB-INF/lib then one LoggerContext may be
* returned and if the caller is a class in the container's classpath then a different LoggerContext may be
* returned. If true then only a single LoggerContext will be returned.
* @param externalContext An external context (such as a ServletContext) to be associated with the LoggerContext.
* @param configLocation The URI for the configuration to use.
* @return a LoggerContext.
*/
public static LoggerContext getContext(final ClassLoader loader, final boolean currentContext,
final Object externalContext, final URI configLocation) {
return factory.getContext(LogManager.class.getName(), loader, externalContext, currentContext, configLocation,
null);
}
/**
* Returns a LoggerContext.
*
* @param loader The ClassLoader for the context. If null the context will attempt to determine the appropriate
* ClassLoader.
* @param currentContext if false the LoggerContext appropriate for the caller of this method is returned. For
* example, in a web application if the caller is a class in WEB-INF/lib then one LoggerContext may be
* returned and if the caller is a class in the container's classpath then a different LoggerContext may be
* returned. If true then only a single LoggerContext will be returned.
* @param externalContext An external context (such as a ServletContext) to be associated with the LoggerContext.
* @param configLocation The URI for the configuration to use.
* @param name The LoggerContext name.
* @return a LoggerContext.
*/
public static LoggerContext getContext(final ClassLoader loader, final boolean currentContext,
final Object externalContext, final URI configLocation,
final String name) {
return factory.getContext(LogManager.class.getName(), loader, externalContext, currentContext, configLocation,
name);
}
/**
* Returns a LoggerContext
* @param fqcn The fully qualified class name of the Class that this method is a member of.
* @param currentContext if false the LoggerContext appropriate for the caller of this method is returned. For
* example, in a web application if the caller is a class in WEB-INF/lib then one LoggerContext may be
* returned and if the caller is a class in the container's classpath then a different LoggerContext may be
* returned. If true then only a single LoggerContext will be returned.
* @return a LoggerContext.
*/
protected static LoggerContext getContext(final String fqcn, final boolean currentContext) {
return factory.getContext(fqcn, null, null, currentContext);
}
/**
* Returns a LoggerContext
* @param fqcn The fully qualified class name of the Class that this method is a member of.
* @param loader The ClassLoader for the context. If null the context will attempt to determine the appropriate
* ClassLoader.
* @param currentContext if false the LoggerContext appropriate for the caller of this method is returned. For
* example, in a web application if the caller is a class in WEB-INF/lib then one LoggerContext may be
* returned and if the caller is a class in the container's classpath then a different LoggerContext may be
* returned. If true then only a single LoggerContext will be returned.
* @return a LoggerContext.
*/
protected static LoggerContext getContext(final String fqcn, final ClassLoader loader,
final boolean currentContext) {
return factory.getContext(fqcn, loader, null, currentContext);
}
/**
* Returns the current LoggerContextFactory.
* @return The LoggerContextFactory.
*/
public static LoggerContextFactory getFactory() {
return factory;
}
/**
* Sets the current LoggerContextFactory to use. Normally, the appropriate LoggerContextFactory is created at
* startup, but in certain environments, a LoggerContextFactory implementation may not be available at this point.
* Thus, an alternative LoggerContextFactory can be set at runtime.
*
* <p>
* Note that any Logger or LoggerContext objects already created will still be valid, but they will no longer be
* accessible through LogManager. Thus, <strong>it is a bad idea to use this method without a good reason</strong>!
* Generally, this method should be used only during startup before any code starts caching Logger objects.
* </p>
*
* @param factory the LoggerContextFactory to use.
*/
// FIXME: should we allow only one update of the factory?
public static void setFactory(final LoggerContextFactory factory) {
LogManager.factory = factory;
}
/**
* Returns a formatter Logger using the fully qualified name of the Class as the Logger name.
* <p>
* This logger let you use a {@link java.util.Formatter} string in the message to format parameters.
* </p>
* <p>
* Short-hand for {@code getLogger(clazz, StringFormatterMessageFactory.INSTANCE)}
* </p>
*
* @param clazz
* The Class whose name should be used as the Logger name.
* @return The Logger, created with a {@link StringFormatterMessageFactory}
* @see Logger#fatal(Marker, String, Object...)
* @see Logger#fatal(String, Object...)
* @see Logger#error(Marker, String, Object...)
* @see Logger#error(String, Object...)
* @see Logger#warn(Marker, String, Object...)
* @see Logger#warn(String, Object...)
* @see Logger#info(Marker, String, Object...)
* @see Logger#info(String, Object...)
* @see Logger#debug(Marker, String, Object...)
* @see Logger#debug(String, Object...)
* @see Logger#trace(Marker, String, Object...)
* @see Logger#trace(String, Object...)
* @see StringFormatterMessageFactory
*/
public static Logger getFormatterLogger(final Class<?> clazz) {
return getLogger(clazz != null ? clazz.getName() : getClassName(2), StringFormatterMessageFactory.INSTANCE);
}
/**
* Returns a formatter Logger using the fully qualified name of the value's Class as the Logger name.
* <p>
* This logger let you use a {@link java.util.Formatter} string in the message to format parameters.
* </p>
* <p>
* Short-hand for {@code getLogger(value, StringFormatterMessageFactory.INSTANCE)}
* </p>
*
* @param value
* The value's whose class name should be used as the Logger name.
* @return The Logger, created with a {@link StringFormatterMessageFactory}
* @see Logger#fatal(Marker, String, Object...)
* @see Logger#fatal(String, Object...)
* @see Logger#error(Marker, String, Object...)
* @see Logger#error(String, Object...)
* @see Logger#warn(Marker, String, Object...)
* @see Logger#warn(String, Object...)
* @see Logger#info(Marker, String, Object...)
* @see Logger#info(String, Object...)
* @see Logger#debug(Marker, String, Object...)
* @see Logger#debug(String, Object...)
* @see Logger#trace(Marker, String, Object...)
* @see Logger#trace(String, Object...)
* @see StringFormatterMessageFactory
*/
public static Logger getFormatterLogger(final Object value) {
return getLogger(value != null ? value.getClass().getName() : getClassName(2),
StringFormatterMessageFactory.INSTANCE);
}
/**
* Returns a formatter Logger with the specified name.
* <p>
* This logger let you use a {@link java.util.Formatter} string in the message to format parameters.
* </p>
* <p>
* Short-hand for {@code getLogger(name, StringFormatterMessageFactory.INSTANCE)}
* </p>
*
* @param name The logger name. If null it will default to the name of the calling class.
* @return The Logger, created with a {@link StringFormatterMessageFactory}
* @see Logger#fatal(Marker, String, Object...)
* @see Logger#fatal(String, Object...)
* @see Logger#error(Marker, String, Object...)
* @see Logger#error(String, Object...)
* @see Logger#warn(Marker, String, Object...)
* @see Logger#warn(String, Object...)
* @see Logger#info(Marker, String, Object...)
* @see Logger#info(String, Object...)
* @see Logger#debug(Marker, String, Object...)
* @see Logger#debug(String, Object...)
* @see Logger#trace(Marker, String, Object...)
* @see Logger#trace(String, Object...)
* @see StringFormatterMessageFactory
*/
public static Logger getFormatterLogger(final String name) {
return getLogger(name != null ? name : getClassName(2), StringFormatterMessageFactory.INSTANCE);
}
/**
* Returns a Logger with the name of the calling class.
* @return The Logger for the calling class.
*/
public static Logger getLogger() {
return getLogger(getClassName(2));
}
/**
* Returns a Logger using the fully qualified name of the Class as the Logger name.
* @param clazz The Class whose name should be used as the Logger name. If null it will default to the calling
* class.
* @return The Logger.
*/
public static Logger getLogger(final Class<?> clazz) {
return getLogger(clazz != null ? clazz.getName() : getClassName(2));
}
/**
* Returns a Logger using the fully qualified name of the Class as the Logger name.
* @param clazz The Class whose name should be used as the Logger name. If null it will default to the calling
* class.
* @param messageFactory The message factory is used only when creating a logger, subsequent use does not change
* the logger but will log a warning if mismatched.
* @return The Logger.
*/
public static Logger getLogger(final Class<?> clazz, final MessageFactory messageFactory) {
return getLogger(clazz != null ? clazz.getName() : getClassName(2), messageFactory);
}
/**
* Returns a Logger with the name of the calling class.
* @param messageFactory The message factory is used only when creating a logger, subsequent use does not change
* the logger but will log a warning if mismatched.
* @return The Logger for the calling class.
*/
public static Logger getLogger(final MessageFactory messageFactory) {
return getLogger(getClassName(2), messageFactory);
}
/**
* Returns a Logger using the fully qualified class name of the value as the Logger name.
* @param value The value whose class name should be used as the Logger name. If null the name of the calling
* class will be used as the logger name.
* @return The Logger.
*/
public static Logger getLogger(final Object value) {
return getLogger(value != null ? value.getClass().getName() : getClassName(2));
}
/**
* Returns a Logger using the fully qualified class name of the value as the Logger name.
* @param value The value whose class name should be used as the Logger name. If null the name of the calling
* class will be used as the logger name.
* @param messageFactory The message factory is used only when creating a logger, subsequent use does not change
* the logger but will log a warning if mismatched.
* @return The Logger.
*/
public static Logger getLogger(final Object value, final MessageFactory messageFactory) {
return getLogger(value != null ? value.getClass().getName() : getClassName(2), messageFactory);
}
/**
* Returns a Logger with the specified name.
*
* @param name The logger name. If null the name of the calling class will be used.
* @return The Logger.
*/
public static Logger getLogger(final String name) {
final String actualName = name != null ? name : getClassName(2);
return factory.getContext(LogManager.class.getName(), null, null, false).getLogger(actualName);
}
/**
* Returns a Logger with the specified name.
*
* @param name The logger name. If null the name of the calling class will be used.
* @param messageFactory The message factory is used only when creating a logger, subsequent use does not change
* the logger but will log a warning if mismatched.
* @return The Logger.
*/
public static Logger getLogger(final String name, final MessageFactory messageFactory) {
final String actualName = name != null ? name : getClassName(2);
return factory.getContext(LogManager.class.getName(), null, null, false).getLogger(actualName, messageFactory);
}
/**
* Returns a Logger with the specified name.
*
* @param fqcn The fully qualified class name of the class that this method is a member of.
* @param name The logger name.
* @return The Logger.
*/
protected static Logger getLogger(final String fqcn, final String name) {
return factory.getContext(fqcn, null, null, false).getLogger(name);
}
/**
* Returns the root logger.
*
* @return the root logger, named {@link #ROOT_LOGGER_NAME}.
*/
public static Logger getRootLogger() {
return getLogger(ROOT_LOGGER_NAME);
}
/**
* Prevents instantiation
*/
protected LogManager() {
}
}
| Add comment regarding recent OSGi change.
| log4j-api/src/main/java/org/apache/logging/log4j/LogManager.java | Add comment regarding recent OSGi change. | <ide><path>og4j-api/src/main/java/org/apache/logging/log4j/LogManager.java
<ide>
<ide> if (factory == null) {
<ide> final SortedMap<Integer, LoggerContextFactory> factories = new TreeMap<Integer, LoggerContextFactory>();
<del>
<add> // note that the following initial call to ProviderUtil may block until a Provider has been installed when
<add> // running in an OSGi environment
<ide> if (ProviderUtil.hasProviders()) {
<ide> for (final Provider provider : ProviderUtil.getProviders()) {
<ide> final Class<? extends LoggerContextFactory> factoryClass = provider.loadLoggerContextFactory(); |
|
Java | agpl-3.0 | 4344e3ad96188e68053c61d544a99a0b15807a70 | 0 | duaneking/rockstar_test,duaneking/rockstar_test,duaneking/rockstar_test,duaneking/rockstar_test,duaneking/rockstar_test,duaneking/rockstar_test,duaneking/rockstar_test,duaneking/rockstar_test,duaneking/rockstar_test,duaneking/rockstar_test | 46316a38-2e62-11e5-9284-b827eb9e62be | hello.java | 462c03c2-2e62-11e5-9284-b827eb9e62be | 46316a38-2e62-11e5-9284-b827eb9e62be | hello.java | 46316a38-2e62-11e5-9284-b827eb9e62be | <ide><path>ello.java
<del>462c03c2-2e62-11e5-9284-b827eb9e62be
<add>46316a38-2e62-11e5-9284-b827eb9e62be |
|
JavaScript | mit | bb20ed61f983e78a599c2ac033606e3c69279a68 | 0 | jmettraux/quaderno,jmettraux/quaderno | //
// Copyright (c) 2010, John Mettraux, [email protected]
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
//
var Quaderno = function () {
//
// misc
function clog (o) {
try {
if (arguments.length == 1) console.log(arguments[0]);
else console.log(arguments);
}
catch (e) {
if (o == undefined) print("undefined");
else print(o.toString());
}
}
function isArray (o) {
if (o == null) return false;
return (o.constructor == Array);
}
function dup (o) {
if (isArray(o)) {
var r = [];
for (var i = 0; i < o.length; i++) r.push(o[i]);
return r;
}
return o;
}
function strip (s) {
return s.replace(/^\s+|\s+$/g, '');
}
function hasClass (elt, cname) {
if ( ! cname) return false;
if (cname[0] == '.') cname = cname.slice(1);
var cs = elt.className.split(' ');
for (var i = 0; i < cs.length; i++) { if (cs[i] == cname) return true; }
return false;
}
function addClass (elt, cname) {
if (cname.match(/^\./)) cname = cname.slice(1);
var cs = elt.className.split(' ');
cs.push(cname);
elt.className = cs.join(' ');
}
function removeClass (elt, cname) {
var cs = elt.className.split(' ');
var ncs = [];
for (var i = 0; i < cs.length; i++) {
var cn = cs[i];
if (cn != cname) ncs.push(cn);
}
elt.className = ncs.join(' ');
}
function identify (path) {
var cs = [];
var i = null;
var t = null;
var s = path;
var m;
if (m = s.match(/^ *([^#\.]+)(.*)$/)) {
t = m[1];
s = m[2];
}
while (m = s.match(/^ *([#\.][^#\. ]+)/)) {
var m1 = m[1];
var ms = m1.slice(1);
if (m1[0] == '.') cs.push(ms);
else if (m1[0] == '#') i = ms;
s = s.slice(m1.length);
}
var cn = null;
if (cs.length > 0) cn = cs.join(' ');
return {
'className': cn,
'id': i,
'tagName': t,
'accepts': function (elt) { return hasClass(elt, cn); }
};
}
function sc (elt, path, index) {
var i = identify(path);
var a = [];
for (var j = 0; j < elt.children.length; j++) {
var c = elt.children[j];
if (i.id && c.id == i.id) return [ c ];
if (i.accepts(c)) a.push(c);
else if (c.tagName && (c.tagName.toLowerCase() == i.tagName)) a.push(c);
}
if (index === -1) return a.slice(-1)[0];
return (index !== undefined) ? a[index] : a;
}
function scc (elt, cname) {
var child = sc(elt, cname, 0);
if (child) return child;
var div = sc(elt, 'div', 0);
if ( ! div) return undefined;
return sc(div, cname, 0);
}
function spath (elt, path, index) {
path = path.split(' > ');
var start = [ elt ];
var p;
while (p = path.shift()) {
start = start[0];
var c = sc(start, p);
if (c.length == 0) return [];
start = c;
}
if (index === -1) return start.slice(-1)[0];
return (index !== undefined) ? start[index] : start;
}
function create (container, tagName, attributes, innerText) {
var e = document.createElement(tagName);
if (attributes && ((typeof attributes) == 'string')) {
var i = identify(attributes);
if (i.className) e.className = i.className;
else if (i.id) e.id = i.id;
}
else if (attributes) {
for (var k in attributes) e.setAttribute(k, attributes[k]);
}
if (innerText) {
//e.innerHTML = innerText; // doesn't work with Safari
e.appendChild(document.createTextNode(innerText));
}
if (container) {
container.appendChild(e);
}
return e;
}
function hide (container, classSel, value) {
var i = create(container, 'input', classSel);
i.setAttribute('type', 'hidden');
i.setAttribute('value', value);
}
function lookup (hash, key) {
if (hash == undefined) return undefined
if ( ! isArray(key)) key = key.split('.');
if (key.length < 1) return hash;
return lookup(hash[key.shift()], key);
}
function translate (options, text) {
if (text.indexOf('.') > -1 && options.translations) {
return lookup(options.translations, text);
}
return text;
}
function getValue (template, data, options) {
if (template[1].value) return template[1].value;
if (template[1].id) return lookup(data, template[1].id);
return undefined;
}
function button (container, className, onclick) {
if ( ! onclick.match(/return false;$/)) onclick += " return false;";
if (className[0] == '.') className = className.slice(1, className.length);
title = {
'quad_plus_button': 'add',
'quad_minus_button': 'remove',
'quad_up_button': 'move up',
'quad_down_button': 'move down',
'quad_copy_button': 'copy',
'quad_cut_button': 'cut',
'quad_paste_button': 'paste',
'quad_go_button': 'go',
}[className];
return create(
container,
'a',
{ 'href': '',
'class': className + ' quad_button',
'title': title,
'onclick': onclick });
}
function createTextInput (container, key, template, data, options) {
create(container, 'span', '.quad_key_e', key);
var input = create(container, 'input', '.quad_' + key);
input.type = 'text';
var v = template[1][key];
if (v) input.value = v;
return input;
}
function fetchAndSet (elt, key, atts) {
var v = scc(elt, '.quad_' + key);
if ( ! v) return;
v = v.value;
if (v === '') return;
atts[key] = v;
}
//function root (elt) {
// if ( ! elt) return null;
// if (elt.undoStack) return elt;
// return root(elt.parentNode);
//}
//function stack (elt) {
// var r = root(elt);
// var d = toData(r.id);
// r.undoStack.push(d);
//}
var TYPE_BLANKS = {
'text_input': [ 'text_input', {}, [] ],
'text': [ 'text', {}, [] ],
'group': [ 'group', {}, [] ]
}
//var TYPES = []; for (var k in TYPE_BLANKS) { TYPES.push(k); }
var TYPES = [
'text_input', 'text', 'group'
];
//
// 'tabs'
function render_tab_label (container, template, data, options) {
var td = create(container, 'td', {});
hide(td, '.quad_label', template[1].label);
var a = create(td, 'a', '.quad_tab', template[1].label);
a.setAttribute('href', '');
a.setAttribute('onclick', 'Quaderno.showTab(this.parentNode); return false;');
}
function edit_tab_label (container, template, data, options) {
var td = create(container, 'td', {});
var div = create(td, 'div', '.quad_tab');
if (template === 'new_tab_tab') {
button(div, '.quad_plus_button', 'Quaderno.addTab(this);');
}
else {
var inp = create(div, 'input', '.quad_label');
inp.setAttribute('type', 'text');
inp.setAttribute('value', template[1].label);
button(div, '.quad_go_button', 'Quaderno.showTab(this.parentNode.parentNode);');
button(div, '.quad_minus_button', 'Quaderno.removeTab(this.parentNode.parentNode);');
}
}
function render_tabs (container, template, data, options) {
var tabs = dup(template[2]);
if (options.mode === 'edit') tabs.push('new_tab_tab');
var table = create(container, 'table', '.quad_tab_group');
// tabs
var tr0 = create(table, 'tr', '.quad_tab_group');
for (var i = 0; i < tabs.length; i++) {
var f = (options.mode === 'edit') ? edit_tab_label : render_tab_label;
f(tr0, tabs[i], data, options);
}
var tab = spath(tr0, 'td > .quad_tab', 0);
addClass(tab, 'quad_selected');
// content
var tr = create(table, 'tr', '.quad_tab_group');
var td = create(tr, 'td', { 'colspan': tabs.length });
var qtb = create(td, 'div', '.quad_tab_body');
for (i = 0; i < template[2].length; i++) {
var f = (options.mode === 'edit') ? editElement : renderElement;
var div = f(qtb, tabs[i], data, options);
tr0.children[i].tab_body = div;
if (i != 0) div.style.display = 'none';
}
return table;
}
var edit_tabs = render_tabs;
function serialize_tabs (elt) {
var tabs = [];
var labels = [];
var tds = spath(elt, 'table > tr > td');
for (var i = 0; i < tds.length; i++) {
var lab =
sc(tds[i], '.quad_label', 0) ||
spath(tds[i], '.quad_tab > .quad_label', 0);
if (lab) labels.push(lab.value);
}
var trs = spath(elt, 'table > tr', 1);
var tab_body = spath(trs, 'td > .quad_tab_body', 0);
var children = serialize_children(tab_body);
for (var i = 0; i < children.length; i++) {
children[i][1].label = labels[i];
}
return [ 'tabs', {}, children ];
}
//
// 'group'
function render_group (container, template, data, options) {
if ( ! hasClass(container.parentNode, 'quad_tab_body')) {
addClass(container, '.quad_group');
}
var children = template[2];
for (var i = 0; i < children.length; i++) {
renderElement(container, children[i], data, options);
}
}
function addMoveButtons (elt) {
button(
elt,
'.quad_minus_button',
'Quaderno.removeElement(this.parentNode);');
button(
elt,
'.quad_up_button',
'Quaderno.moveElement(this.parentNode.parentNode, "up");');
button(
elt,
'.quad_down_button',
'Quaderno.moveElement(this.parentNode.parentNode, "down");');
}
function edit_group (container, template, data, options) {
if ( ! hasClass(container.parentNode, 'quad_tab_body')) {
addClass(container, '.quad_group');
}
var children = template[2];
for (var i = 0; i < children.length; i++) {
var c = editElement(container, children[i], data, options);
var cdiv = sc(c, 'div', 0);
addMoveButtons(cdiv);
}
var div = create(container, 'div', {});
var sel = create(div, 'select', '.quad_type');
for (var i = 0; i < TYPES.length; i++) {
var o = create(sel, 'option', {}, TYPES[i]);
if (TYPES[i] === template[0]) o.setAttribute('selected', 'selected');
}
button(div, '.quad_plus_button', 'Quaderno.addElement(this.parentNode);');
return container;
}
//
// 'text'
function render_text (container, template, data, options) {
var text = template[1].label;
hide(container, '.quad_label', text);
text = translate(options, text);
create(container, 'div', '.quad_key.quad_text', text);
}
//
// 'text_input'
function render_text_input (container, template, data, options) {
hide(container, '.quad_label', template[1].label);
create(container, 'span', '.quad_key', template[1].label);
var input = create(container, 'input', '.quad_value');
input.setAttribute('type', 'text');
var value = getValue(template, data, options);
//var value = options.value;
//delete options.value;
if (value != undefined) input.value = value;
}
//
// *
function edit_ (container, template, data, options) {
var div = create(container, 'div', {});
create(div, 'span', '.quad_type', template[0]);
createTextInput(div, 'id', template, data, options);
createTextInput(div, 'label', template, data, options);
createTextInput(div, 'title', template, data, options);
createTextInput(div, 'value', template, data, options);
return div;
}
function serialize_children (elt) {
var children = [];
var elts = sc(elt, '.quad_element');
for (var i = 0; i < elts.length; i++) {
children.push(serializeElement(elts[i]));
}
return children;
}
function serialize_ (elt, serializeChildren) {
if (serializeChildren == undefined) serializeChildren = true;
var type = sc(elt, '.quad_type', 0).value;
var atts = {};
var id = fetchAndSet(elt, 'id', atts);
var label = fetchAndSet(elt, 'label', atts);
var title = fetchAndSet(elt, 'title', atts);
var value = fetchAndSet(elt, 'value', atts);
var values = fetchAndSet(elt, 'values', atts);
var children = [];
if (serializeChildren) children = serialize_children(elt);
return [ type, atts, children ];
}
//
// methods
function setParent (template, parent) {
template.parent = parent;
for (var i = 0; i < template[2].length; i++) {
setParent(template[2][i], template);
}
}
function lookupFunction (funcPrefix, template) {
var type = template;
if (isArray(template)) type = template[0];
try { return eval(funcPrefix + type); }
catch (e) { return eval(funcPrefix); }
}
function editElement (container, template, data, options) {
var div = create(container, 'div', '.quad_element');
hide(div, '.quad_type', template[0]);
var f = lookupFunction('edit_', template);
f(div, template, data, options);
return div;
}
function renderElement (container, template, data, options) {
var f = lookupFunction('render_', template);
var div = create(container, 'div', '.quad_element');
var id = template[1].id;
if (id) {
hide(div, '.quad_id', id);
}
if (template[1].title) {
hide(div, '.quad_title', template[1].title);
div.setAttribute('title', translate(options, template[1].title));
}
hide(div, '.quad_type', template[0]);
f(div, template, data, options);
return div;
}
function serializeElement (container) {
var type = sc(container, '.quad_type', 0).value;
var f = lookupFunction('serialize_', type);
return f(container);
}
//
// onClick public methods
function showTab (td) {
for (var i = 0; i < td.parentNode.children.length; i++) {
var tab = sc(td.parentNode.children[i], '.quad_tab', 0);
removeClass(tab, 'quad_selected');
}
var tab = sc(td, '.quad_tab', 0);
addClass(tab, 'quad_selected');
for (var i = 0; i < td.tab_body.parentNode.children.length; i++) {
td.tab_body.parentNode.children[i].style.display = 'none';
}
td.tab_body.style.display = 'block';
}
function addElement (elt) {
var type = sc(elt, '.quad_type', 0).value;
var blank = TYPE_BLANKS[type];
var newElement = editElement(elt.parentNode, blank, {}, {});
addMoveButtons(sc(newElement, 'div', 0));
elt.parentNode.insertBefore(newElement, elt);
}
function moveElement (elt, direction) {
if (direction === 'up') {
if (elt.previousSibling)
elt.parentNode.insertBefore(elt, elt.previousSibling);
}
else {
if (elt.nextSibling && hasClass(elt.nextSibling, 'quad_element'))
elt.parentNode.insertBefore(elt.nextSibling, elt);
}
}
//
// public methods
function render (container, template, data, options) {
setParent(template);
data = data || {};
options = options || {};
options.mode = options.mode || 'view';
if ((typeof container) == 'string') {
container = document.getElementById(container);
}
var fc; while (fc = container.firstChild) { container.removeChild(fc); }
container.mode = options.mode;
if (options.mode == 'edit') {
editElement(container, template, data, options);
}
else {
renderElement(container, template, data, options);
}
//container.undoStack = [ toTemplateWithData
}
function serialize (container) {
if ((typeof container) == 'string') {
container = document.getElementById(container);
}
return serializeElement(sc(container, '.quad_element', 0));
}
//function extract (container) {
// var s = serialize(container);
// // TODO
// return [ "template", "data" ];
//}
//
// that's all folks...
return {
// public for the sake of testing
identify: identify,
// public for onClick or onChange
showTab: showTab,
addElement: addElement,
moveElement: moveElement,
// public
render: render,
serialize: serialize,
//extract: extract
};
}();
| js/quaderno.js | //
// Copyright (c) 2010, John Mettraux, [email protected]
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
//
var Quaderno = function () {
//
// misc
function clog (o) {
try {
if (arguments.length == 1) console.log(arguments[0]);
else console.log(arguments);
}
catch (e) {
if (o == undefined) print("undefined");
else print(o.toString());
}
}
function isArray (o) {
if (o == null) return false;
return (o.constructor == Array);
}
function dup (o) {
if (isArray(o)) {
var r = [];
for (var i = 0; i < o.length; i++) r.push(o[i]);
return r;
}
return o;
}
function strip (s) {
return s.replace(/^\s+|\s+$/g, '');
}
function hasClass (elt, cname) {
if ( ! cname) return false;
if (cname[0] == '.') cname = cname.slice(1);
var cs = elt.className.split(' ');
for (var i = 0; i < cs.length; i++) { if (cs[i] == cname) return true; }
return false;
}
function addClass (elt, cname) {
var cs = elt.className.split(' ');
cs.push(cname);
elt.className = cs.join(' ');
}
function removeClass (elt, cname) {
var cs = elt.className.split(' ');
var ncs = [];
for (var i = 0; i < cs.length; i++) {
var cn = cs[i];
if (cn != cname) ncs.push(cn);
}
elt.className = ncs.join(' ');
}
function identify (path) {
var cs = [];
var i = null;
var t = null;
var s = path;
var m;
if (m = s.match(/^ *([^#\.]+)(.*)$/)) {
t = m[1];
s = m[2];
}
while (m = s.match(/^ *([#\.][^#\. ]+)/)) {
var m1 = m[1];
var ms = m1.slice(1);
if (m1[0] == '.') cs.push(ms);
else if (m1[0] == '#') i = ms;
s = s.slice(m1.length);
}
var cn = null;
if (cs.length > 0) cn = cs.join(' ');
return {
'className': cn,
'id': i,
'tagName': t,
'accepts': function (elt) { return hasClass(elt, cn); }
};
}
function sc (elt, path, index) {
var i = identify(path);
var a = [];
for (var j = 0; j < elt.children.length; j++) {
var c = elt.children[j];
if (i.id && c.id == i.id) return [ c ];
if (i.accepts(c)) a.push(c);
else if (c.tagName && (c.tagName.toLowerCase() == i.tagName)) a.push(c);
}
if (index === -1) return a.slice(-1)[0];
return (index !== undefined) ? a[index] : a;
}
function scc (elt, cname) {
var child = sc(elt, cname, 0);
if (child) return child;
var div = sc(elt, 'div', 0);
if ( ! div) return undefined;
return sc(div, cname, 0);
}
function spath (elt, path, index) {
path = path.split(' > ');
var start = [ elt ];
var p;
while (p = path.shift()) {
start = start[0];
var c = sc(start, p);
if (c.length == 0) return [];
start = c;
}
if (index === -1) return start.slice(-1)[0];
return (index !== undefined) ? start[index] : start;
}
function create (container, tagName, attributes, innerText) {
var e = document.createElement(tagName);
if (attributes && ((typeof attributes) == 'string')) {
var i = identify(attributes);
if (i.className) e.className = i.className;
else if (i.id) e.id = i.id;
}
else if (attributes) {
for (var k in attributes) e.setAttribute(k, attributes[k]);
}
if (innerText) {
//e.innerHTML = innerText; // doesn't work with Safari
e.appendChild(document.createTextNode(innerText));
}
if (container) {
container.appendChild(e);
}
return e;
}
function hide (container, classSel, value) {
var i = create(container, 'input', classSel);
i.setAttribute('type', 'hidden');
i.setAttribute('value', value);
}
function lookup (hash, key) {
if (hash == undefined) return undefined
if ( ! isArray(key)) key = key.split('.');
if (key.length < 1) return hash;
return lookup(hash[key.shift()], key);
}
function translate (options, text) {
if (text.indexOf('.') > -1 && options.translations) {
return lookup(options.translations, text);
}
return text;
}
function getValue (template, data, options) {
if (template[1].value) return template[1].value;
if (template[1].id) return lookup(data, template[1].id);
return undefined;
}
function button (container, className, onclick) {
if ( ! onclick.match(/return false;$/)) onclick += " return false;";
if (className[0] == '.') className = className.slice(1, className.length);
title = {
'quad_plus_button': 'add',
'quad_minus_button': 'remove',
'quad_up_button': 'move up',
'quad_down_button': 'move down',
'quad_copy_button': 'copy',
'quad_cut_button': 'cut',
'quad_paste_button': 'paste',
'quad_go_button': 'go',
}[className];
return create(
container,
'a',
{ 'href': '',
'class': className + ' quad_button',
'title': title,
'onclick': onclick });
}
function createTextInput (container, key, template, data, options) {
create(container, 'span', '.quad_key_e', key);
var input = create(container, 'input', '.quad_' + key);
input.type = 'text';
var v = template[1][key];
if (v) input.value = v;
return input;
}
function fetchAndSet (elt, key, atts) {
var v = scc(elt, '.quad_' + key);
if ( ! v) return;
v = v.value;
if (v === '') return;
atts[key] = v;
}
//function root (elt) {
// if ( ! elt) return null;
// if (elt.undoStack) return elt;
// return root(elt.parentNode);
//}
//function stack (elt) {
// var r = root(elt);
// var d = toData(r.id);
// r.undoStack.push(d);
//}
var TYPE_BLANKS = {
'text': [ 'text', {}, [] ],
'text_input': [ 'text_input', {}, [] ],
}
var TYPES = []; for (var k in TYPE_BLANKS) { TYPES.push(k); }
//
// 'tabs'
function render_tab_label (container, template, data, options) {
var td = create(container, 'td', {});
hide(td, '.quad_label', template[1].label);
var a = create(td, 'a', '.quad_tab', template[1].label);
a.setAttribute('href', '');
a.setAttribute('onclick', 'Quaderno.showTab(this.parentNode); return false;');
}
function edit_tab_label (container, template, data, options) {
var td = create(container, 'td', {});
var div = create(td, 'div', '.quad_tab');
if (template === 'new_tab_tab') {
button(div, '.quad_plus_button', 'Quaderno.addTab(this);');
}
else {
var inp = create(div, 'input', '.quad_label');
inp.setAttribute('type', 'text');
inp.setAttribute('value', template[1].label);
button(div, '.quad_go_button', 'Quaderno.showTab(this.parentNode.parentNode);');
button(div, '.quad_minus_button', 'Quaderno.removeTab(this.parentNode.parentNode);');
}
}
function render_tabs (container, template, data, options) {
var tabs = dup(template[2]);
if (options.mode === 'edit') tabs.push('new_tab_tab');
var table = create(container, 'table', '.quad_tab_group');
// tabs
var tr0 = create(table, 'tr', '.quad_tab_group');
for (var i = 0; i < tabs.length; i++) {
var f = (options.mode === 'edit') ? edit_tab_label : render_tab_label;
f(tr0, tabs[i], data, options);
}
var tab = spath(tr0, 'td > .quad_tab', 0);
addClass(tab, 'quad_selected');
// content
var tr = create(table, 'tr', '.quad_tab_group');
var td = create(tr, 'td', { 'colspan': tabs.length });
var qtb = create(td, 'div', '.quad_tab_body');
for (i = 0; i < template[2].length; i++) {
var f = (options.mode === 'edit') ? editElement : renderElement;
var div = f(qtb, tabs[i], data, options);
tr0.children[i].tab_body = div;
if (i != 0) div.style.display = 'none';
}
return table;
}
var edit_tabs = render_tabs;
function serialize_tabs (elt) {
var tabs = [];
var labels = [];
var tds = spath(elt, 'table > tr > td');
for (var i = 0; i < tds.length; i++) {
var lab =
sc(tds[i], '.quad_label', 0) ||
spath(tds[i], '.quad_tab > .quad_label', 0);
if (lab) labels.push(lab.value);
}
var trs = spath(elt, 'table > tr', 1);
var tab_body = spath(trs, 'td > .quad_tab_body', 0);
var children = serialize_children(tab_body);
for (var i = 0; i < children.length; i++) {
children[i][1].label = labels[i];
}
return [ 'tabs', {}, children ];
}
//
// 'group'
function render_group (container, template, data, options) {
var children = template[2];
for (var i = 0; i < children.length; i++) {
renderElement(container, children[i], data, options);
}
}
function edit_group (container, template, data, options) {
var children = template[2];
// TODO : buttons for moving stuff up and down
for (var i = 0; i < children.length; i++) {
var c = editElement(container, children[i], data, options);
var cdiv = sc(c, 'div', 0);
button(cdiv, '.quad_minus_button', 'Quaderno.removeElement(this.parentNode);');
button(cdiv, '.quad_up_button', 'Quaderno.moveElement(this.parentNode, "up");');
button(cdiv, '.quad_down_button', 'Quaderno.moveElement(this.parentNode, "down");');
}
var div = create(container, 'div', {});
var sel = create(div, 'select', '.quad_type');
for (var i = 0; i < TYPES.length; i++) {
var o = create(sel, 'option', {}, TYPES[i]);
if (TYPES[i] === template[0]) o.setAttribute('selected', 'selected');
}
button(div, '.quad_plus_button', 'Quaderno.addElement(this.parentNode);');
return container;
}
//
// 'text'
function render_text (container, template, data, options) {
var text = template[1].label;
hide(container, '.quad_label', text);
text = translate(options, text);
create(container, 'div', '.quad_key.quad_text', text);
}
//
// 'text_input'
function render_text_input (container, template, data, options) {
hide(container, '.quad_label', template[1].label);
create(container, 'span', '.quad_key', template[1].label);
var input = create(container, 'input', '.quad_value');
input.setAttribute('type', 'text');
var value = getValue(template, data, options);
//var value = options.value;
//delete options.value;
if (value != undefined) input.value = value;
}
//
// *
function edit_ (container, template, data, options) {
// TODO : finish me
var div = create(container, 'div', {});
create(div, 'span', '.quad_type', template[0]);
createTextInput(div, 'id', template, data, options);
createTextInput(div, 'label', template, data, options);
createTextInput(div, 'title', template, data, options);
createTextInput(div, 'value', template, data, options);
// TODO : repeatable
return div;
}
function serialize_children (elt) {
var children = [];
var elts = sc(elt, '.quad_element');
for (var i = 0; i < elts.length; i++) {
children.push(serializeElement(elts[i]));
}
return children;
}
function serialize_ (elt, serializeChildren) {
if (serializeChildren == undefined) serializeChildren = true;
var type = sc(elt, '.quad_type', 0).value;
// TODO : repeatable
var atts = {};
var id = fetchAndSet(elt, 'id', atts);
var label = fetchAndSet(elt, 'label', atts);
var title = fetchAndSet(elt, 'title', atts);
var value = fetchAndSet(elt, 'value', atts);
var values = fetchAndSet(elt, 'values', atts);
var children = [];
if (serializeChildren) children = serialize_children(elt);
return [ type, atts, children ];
}
//
// methods
function setParent (template, parent) {
template.parent = parent;
for (var i = 0; i < template[2].length; i++) {
setParent(template[2][i], template);
}
}
function lookupFunction (funcPrefix, template) {
var type = template;
if (isArray(template)) type = template[0];
try { return eval(funcPrefix + type); }
catch (e) { return eval(funcPrefix); }
}
function editElement (container, template, data, options) {
var div = create(container, 'div', '.quad_element');
hide(div, '.quad_type', template[0]);
var f = lookupFunction('edit_', template);
f(div, template, data, options);
return div;
}
function renderElement (container, template, data, options) {
var f = lookupFunction('render_', template);
var div = create(container, 'div', '.quad_element');
var id = template[1].id;
if (id) {
hide(div, '.quad_id', id);
}
if (template[1].title) {
hide(div, '.quad_title', template[1].title);
div.setAttribute('title', translate(options, template[1].title));
}
hide(div, '.quad_type', template[0]);
//if (id && id.matches(/\.$/)) {
// id = id.slice(0, -1);
// var value = getValue(id, data);
//}
f(div, template, data, options);
//var value = getValue(template, data, options);
//if (isArray(value)) {
// for (var i = 0; i < value.length; i++) {
// var v = value[i];
// options.value = v;
// f(div, template, data, options);
// }
//}
//else {
// options.value = value;
// f(div, template, data, options);
//}
return div;
}
function serializeElement (container) {
var type = sc(container, '.quad_type', 0).value;
var f = lookupFunction('serialize_', type);
return f(container);
}
//
// public methods
function showTab (td) {
for (var i = 0; i < td.parentNode.children.length; i++) {
var tab = sc(td.parentNode.children[i], '.quad_tab', 0);
removeClass(tab, 'quad_selected');
}
var tab = sc(td, '.quad_tab', 0);
addClass(tab, 'quad_selected');
for (var i = 0; i < td.tab_body.parentNode.children.length; i++) {
td.tab_body.parentNode.children[i].style.display = 'none';
}
td.tab_body.style.display = 'block';
}
function render (container, template, data, options) {
setParent(template);
data = data || {};
options = options || {};
options.mode = options.mode || 'view';
if ((typeof container) == 'string') {
container = document.getElementById(container);
}
var fc; while (fc = container.firstChild) { container.removeChild(fc); }
container.mode = options.mode;
if (options.mode == 'edit') {
editElement(container, template, data, options);
}
else {
renderElement(container, template, data, options);
}
//container.undoStack = [ toTemplateWithData
}
function serialize (container) {
if ((typeof container) == 'string') {
container = document.getElementById(container);
}
return serializeElement(sc(container, '.quad_element', 0));
}
//function extract (container) {
// var s = serialize(container);
// // TODO
// return [ "template", "data" ];
//}
//
// that's all folks...
return {
// public for the sake of testing
identify: identify,
// public for onClick or onChange
showTab: showTab,
// public
render: render,
serialize: serialize,
//extract: extract
};
}();
| moveElement
| js/quaderno.js | moveElement | <ide><path>s/quaderno.js
<ide> }
<ide>
<ide> function addClass (elt, cname) {
<add> if (cname.match(/^\./)) cname = cname.slice(1);
<ide> var cs = elt.className.split(' ');
<ide> cs.push(cname);
<ide> elt.className = cs.join(' ');
<ide> //}
<ide>
<ide> var TYPE_BLANKS = {
<add> 'text_input': [ 'text_input', {}, [] ],
<ide> 'text': [ 'text', {}, [] ],
<del> 'text_input': [ 'text_input', {}, [] ],
<del> }
<del>
<del> var TYPES = []; for (var k in TYPE_BLANKS) { TYPES.push(k); }
<add> 'group': [ 'group', {}, [] ]
<add> }
<add>
<add> //var TYPES = []; for (var k in TYPE_BLANKS) { TYPES.push(k); }
<add> var TYPES = [
<add> 'text_input', 'text', 'group'
<add> ];
<ide>
<ide> //
<ide> // 'tabs'
<ide>
<ide> function render_group (container, template, data, options) {
<ide>
<add> if ( ! hasClass(container.parentNode, 'quad_tab_body')) {
<add> addClass(container, '.quad_group');
<add> }
<add>
<ide> var children = template[2];
<ide>
<ide> for (var i = 0; i < children.length; i++) {
<ide> }
<ide> }
<ide>
<add> function addMoveButtons (elt) {
<add>
<add> button(
<add> elt,
<add> '.quad_minus_button',
<add> 'Quaderno.removeElement(this.parentNode);');
<add> button(
<add> elt,
<add> '.quad_up_button',
<add> 'Quaderno.moveElement(this.parentNode.parentNode, "up");');
<add> button(
<add> elt,
<add> '.quad_down_button',
<add> 'Quaderno.moveElement(this.parentNode.parentNode, "down");');
<add> }
<add>
<ide> function edit_group (container, template, data, options) {
<ide>
<add> if ( ! hasClass(container.parentNode, 'quad_tab_body')) {
<add> addClass(container, '.quad_group');
<add> }
<add>
<ide> var children = template[2];
<del>
<del> // TODO : buttons for moving stuff up and down
<ide>
<ide> for (var i = 0; i < children.length; i++) {
<ide> var c = editElement(container, children[i], data, options);
<ide> var cdiv = sc(c, 'div', 0);
<del> button(cdiv, '.quad_minus_button', 'Quaderno.removeElement(this.parentNode);');
<del> button(cdiv, '.quad_up_button', 'Quaderno.moveElement(this.parentNode, "up");');
<del> button(cdiv, '.quad_down_button', 'Quaderno.moveElement(this.parentNode, "down");');
<add> addMoveButtons(cdiv);
<ide> }
<ide>
<ide> var div = create(container, 'div', {});
<ide> // *
<ide>
<ide> function edit_ (container, template, data, options) {
<del>
<del> // TODO : finish me
<ide>
<ide> var div = create(container, 'div', {});
<ide>
<ide> createTextInput(div, 'title', template, data, options);
<ide> createTextInput(div, 'value', template, data, options);
<ide>
<del> // TODO : repeatable
<del>
<ide> return div;
<ide> }
<ide>
<ide> if (serializeChildren == undefined) serializeChildren = true;
<ide>
<ide> var type = sc(elt, '.quad_type', 0).value;
<del>
<del> // TODO : repeatable
<ide>
<ide> var atts = {};
<ide>
<ide>
<ide> hide(div, '.quad_type', template[0]);
<ide>
<del> //if (id && id.matches(/\.$/)) {
<del> // id = id.slice(0, -1);
<del> // var value = getValue(id, data);
<del> //}
<del>
<ide> f(div, template, data, options);
<del>
<del> //var value = getValue(template, data, options);
<del> //if (isArray(value)) {
<del> // for (var i = 0; i < value.length; i++) {
<del> // var v = value[i];
<del> // options.value = v;
<del> // f(div, template, data, options);
<del> // }
<del> //}
<del> //else {
<del> // options.value = value;
<del> // f(div, template, data, options);
<del> //}
<ide>
<ide> return div;
<ide> }
<ide> }
<ide>
<ide> //
<del> // public methods
<add> // onClick public methods
<ide>
<ide> function showTab (td) {
<ide>
<ide> }
<ide> td.tab_body.style.display = 'block';
<ide> }
<add>
<add> function addElement (elt) {
<add>
<add> var type = sc(elt, '.quad_type', 0).value;
<add> var blank = TYPE_BLANKS[type];
<add>
<add> var newElement = editElement(elt.parentNode, blank, {}, {});
<add> addMoveButtons(sc(newElement, 'div', 0));
<add>
<add> elt.parentNode.insertBefore(newElement, elt);
<add> }
<add>
<add> function moveElement (elt, direction) {
<add> if (direction === 'up') {
<add> if (elt.previousSibling)
<add> elt.parentNode.insertBefore(elt, elt.previousSibling);
<add> }
<add> else {
<add> if (elt.nextSibling && hasClass(elt.nextSibling, 'quad_element'))
<add> elt.parentNode.insertBefore(elt.nextSibling, elt);
<add> }
<add> }
<add>
<add> //
<add> // public methods
<ide>
<ide> function render (container, template, data, options) {
<ide>
<ide> // public for onClick or onChange
<ide>
<ide> showTab: showTab,
<add> addElement: addElement,
<add> moveElement: moveElement,
<ide>
<ide> // public
<ide> |
|
Java | mit | 59399330f95ab76193604712ad0fd46799e34eda | 0 | tcdl/msb-java,tcdl/msb-java | package io.github.tcdl.msb.examples;
import io.github.tcdl.msb.api.MessageTemplate;
import io.github.tcdl.msb.api.MsbContext;
import io.github.tcdl.msb.api.MsbContextBuilder;
import io.github.tcdl.msb.api.RequestOptions;
import io.github.tcdl.msb.api.Requester;
import io.github.tcdl.msb.api.Responder;
import io.github.tcdl.msb.api.message.payload.Payload;
import io.github.tcdl.msb.examples.payload.Request;
import javax.script.ScriptException;
import java.io.FileNotFoundException;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
/**
* Microservice which is listening for incoming messages, creates requests to another microservices(
* data-extractor, airport-extractor, resort-extractor), concatenates responses and returns result response
*/
public class FacetsAggregator {
public static void main(String[] args) throws ScriptException, FileNotFoundException, NoSuchMethodException {
MsbContext msbContext = new MsbContextBuilder()
.enableChannelMonitorAgent(true)
.enableShutdownHook(true)
.build();
MessageTemplate options = new MessageTemplate();
final String namespace = "search:aggregator:facets:v1";
msbContext.getObjectFactory().createResponderServer(namespace, options, (Request facetsRequest, Responder responder) -> {
String q = facetsRequest.getQuery().getQ();
if (q == null) {
Payload responsePayload = new Payload.Builder()
.withStatusCode(400)
.build();
responder.send(responsePayload);
} else if (q.isEmpty()) {
ResponseBodyAny responseBodyAny = new ResponseBodyAny();
Facet facet = new Facet();
facet.setProbability(1f);
Map<String, Object> map = new HashMap<>();
map.put("depAirport", 0);
map.put("resortCode", "any");
facet.setParams(map);
responseBodyAny.setFacets(Collections.singletonList(facet));
Payload responsePayloadAny = new Payload.Builder<Object, Object, Object, ResponseBodyAny>()
.withStatusCode(200)
.withBody(responseBodyAny)
.build();
responder.send(responsePayloadAny);
} else {
RequestOptions requestOptions = new RequestOptions.Builder()
.withWaitForResponses(1)
.withAckTimeout(200)
.withResponseTimeout(600)
.build();
Requester<Payload> requester = msbContext.getObjectFactory().createRequester("search:parsers:facets:v1",
requestOptions, Payload.class);
final String[] result = {""};
List<Payload> responses = Collections.synchronizedList(new ArrayList<>());
requester.onResponse(responses::add)
.onEnd(end -> {
for (Payload payload : responses) {
System.out.println(">>> MESSAGE: " + payload);
result[0] += payload;
}
Payload responsePayload = new Payload.Builder<Object, Object, Object, String>()
.withStatusCode(200)
.withBody(result[0])
.build();
responder.send(responsePayload);
});
requester.publish(facetsRequest, responder.getOriginalMessage());
}
}, Request.class).listen();
}
private static class ResponseBodyAny {
private List<Facet> facets;
public List<Facet> getFacets() {
return facets;
}
public void setFacets(List<Facet> facets) {
this.facets = facets;
}
}
private static class Facet {
private float probability;
private int[] offsets = {};
private Map<String, Object> params;
private String[] mappings = {};
public float getProbability() {
return probability;
}
public void setProbability(float probability) {
this.probability = probability;
}
public int[] getOffsets() {
return offsets;
}
public void setOffsets(int[] offsets) {
this.offsets = offsets;
}
public Map<String, Object> getParams() {
return params;
}
public void setParams(Map<String, Object> params) {
this.params = params;
}
public String[] getMappings() {
return mappings;
}
public void setMappings(String[] mappings) {
this.mappings = mappings;
}
}
}
| examples/src/main/java/io/github/tcdl/msb/examples/FacetsAggregator.java | package io.github.tcdl.msb.examples;
import io.github.tcdl.msb.api.MessageTemplate;
import io.github.tcdl.msb.api.MsbContext;
import io.github.tcdl.msb.api.MsbContextBuilder;
import io.github.tcdl.msb.api.RequestOptions;
import io.github.tcdl.msb.api.Requester;
import io.github.tcdl.msb.api.Responder;
import io.github.tcdl.msb.api.message.payload.Payload;
import io.github.tcdl.msb.examples.payload.Request;
import javax.script.ScriptException;
import java.io.FileNotFoundException;
import java.util.Arrays;
import java.util.HashMap;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
/**
* Microservice which is listening for incoming messages, creates requests to another microservices(
* data-extractor, airport-extractor, resort-extractor), concatenates responses and returns result response
*/
public class FacetsAggregator {
public static void main(String[] args) throws ScriptException, FileNotFoundException, NoSuchMethodException {
MsbContext msbContext = new MsbContextBuilder()
.enableChannelMonitorAgent(true)
.enableShutdownHook(true)
.build();
MessageTemplate options = new MessageTemplate();
final String namespace = "search:aggregator:facets:v1";
msbContext.getObjectFactory().createResponderServer(namespace, options, (Request facetsRequest, Responder responder) -> {
String q = facetsRequest.getQuery().getQ();
if (q == null) {
Payload responsePayload = new Payload.Builder()
.withStatusCode(400)
.build();
responder.send(responsePayload);
} else if (q.isEmpty()) {
ResponseBodyAny responseBodyAny = new ResponseBodyAny();
Facet facet = new Facet();
facet.setProbability(1f);
Map<String, Object> map = new HashMap<>();
map.put("depAirport", 0);
map.put("resortCode", "any");
facet.setParams(map);
responseBodyAny.setFacets(Arrays.asList(facet));
Payload responsePayloadAny = new Payload.Builder<Object, Object, Object, ResponseBodyAny>()
.withStatusCode(200)
.withBody(responseBodyAny)
.build();
responder.send(responsePayloadAny);
} else {
RequestOptions requestOptions = new RequestOptions.Builder()
.withWaitForResponses(1)
.withAckTimeout(200)
.withResponseTimeout(600)
.build();
Requester<Payload> requester = msbContext.getObjectFactory().createRequester("search:parsers:facets:v1",
requestOptions, Payload.class);
final String[] result = {""};
requester.onResponse(response -> {
System.out.println(">>> RESPONSE: " + response);
result[0] +=response;
});
List<Payload> responses = new LinkedList<>();
requester.onResponse(response -> responses.add(response))
.onEnd(end -> {
for (Payload payload : responses)
System.out.println(">>> MESSAGE: " + payload);
Payload responsePayload = new Payload.Builder<Object, Object, Object, String>()
.withStatusCode(200)
.withBody(result[0])
.build();
responder.send(responsePayload);
});
requester.publish(facetsRequest, responder.getOriginalMessage());
}
}, Request.class).listen();
}
private static class ResponseBodyAny {
private List<Facet> facets;
public List<Facet> getFacets() {
return facets;
}
public void setFacets(List<Facet> facets) {
this.facets = facets;
}
}
private static class Facet {
private float probability;
private int[] offsets = {};
private Map<String, Object> params;
private String[] mappings = {};
public float getProbability() {
return probability;
}
public void setProbability(float probability) {
this.probability = probability;
}
public int[] getOffsets() {
return offsets;
}
public void setOffsets(int[] offsets) {
this.offsets = offsets;
}
public Map<String, Object> getParams() {
return params;
}
public void setParams(Map<String, Object> params) {
this.params = params;
}
public String[] getMappings() {
return mappings;
}
public void setMappings(String[] mappings) {
this.mappings = mappings;
}
}
}
| Fix for FacetsAggregator
| examples/src/main/java/io/github/tcdl/msb/examples/FacetsAggregator.java | Fix for FacetsAggregator | <ide><path>xamples/src/main/java/io/github/tcdl/msb/examples/FacetsAggregator.java
<ide>
<ide> import javax.script.ScriptException;
<ide> import java.io.FileNotFoundException;
<del>import java.util.Arrays;
<add>import java.util.ArrayList;
<add>import java.util.Collections;
<ide> import java.util.HashMap;
<del>import java.util.LinkedList;
<ide> import java.util.List;
<ide> import java.util.Map;
<ide>
<ide> map.put("resortCode", "any");
<ide> facet.setParams(map);
<ide>
<del> responseBodyAny.setFacets(Arrays.asList(facet));
<add> responseBodyAny.setFacets(Collections.singletonList(facet));
<ide>
<ide> Payload responsePayloadAny = new Payload.Builder<Object, Object, Object, ResponseBodyAny>()
<ide> .withStatusCode(200)
<ide>
<ide> final String[] result = {""};
<ide>
<del> requester.onResponse(response -> {
<del> System.out.println(">>> RESPONSE: " + response);
<del> result[0] +=response;
<del> });
<del>
<del> List<Payload> responses = new LinkedList<>();
<del> requester.onResponse(response -> responses.add(response))
<add> List<Payload> responses = Collections.synchronizedList(new ArrayList<>());
<add> requester.onResponse(responses::add)
<ide> .onEnd(end -> {
<del> for (Payload payload : responses)
<add> for (Payload payload : responses) {
<ide> System.out.println(">>> MESSAGE: " + payload);
<add> result[0] += payload;
<add> }
<ide>
<ide> Payload responsePayload = new Payload.Builder<Object, Object, Object, String>()
<ide> .withStatusCode(200) |
|
Java | lgpl-2.1 | 0cfe8d5d9bb9fcae7a77ea8316a0504bb75748f6 | 0 | languagetool-org/languagetool,meg0man/languagetool,janissl/languagetool,jimregan/languagetool,janissl/languagetool,languagetool-org/languagetool,languagetool-org/languagetool,janissl/languagetool,lopescan/languagetool,lopescan/languagetool,lopescan/languagetool,meg0man/languagetool,meg0man/languagetool,janissl/languagetool,languagetool-org/languagetool,lopescan/languagetool,languagetool-org/languagetool,jimregan/languagetool,janissl/languagetool,meg0man/languagetool,janissl/languagetool,jimregan/languagetool,jimregan/languagetool,jimregan/languagetool,meg0man/languagetool,lopescan/languagetool | /* LanguageTool, a natural language style checker
* Copyright (C) 2005 Daniel Naber (http://www.danielnaber.de)
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301
* USA
*/
package de.danielnaber.languagetool.dev.wikipedia;
import java.io.File;
import java.io.FileInputStream;
import javax.xml.parsers.SAXParser;
import javax.xml.parsers.SAXParserFactory;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.FSDirectory;
import org.xml.sax.Attributes;
import org.xml.sax.SAXException;
import org.xml.sax.helpers.DefaultHandler;
import de.danielnaber.languagetool.Language;
import de.danielnaber.languagetool.TextFilter;
import de.danielnaber.languagetool.dev.index.Indexer;
import de.danielnaber.languagetool.dev.tools.RomanianDiacriticsModifier;
/**
*
* Wikipedia handler for indexing.
*
* @author Tao Lin
*
*/
public class WikipediaIndexHandler extends DefaultHandler {
protected static final int CONTEXT_SIZE = 50;
protected static final String MARKER_START = "<err>";
protected static final String MARKER_END = "</err>";
protected static final String LANG_MARKER = "XX";
protected static final String URL_PREFIX = "http://" + LANG_MARKER + ".wikipedia.org/wiki/";
private int articleCount = 0;
// the number of the wiki page to start indexing
private int start = 0;
// the number of the wiki page to end indexing
private int end = 0;
private boolean inText = false;
private StringBuilder text = new StringBuilder();
private TextFilter textFilter = new WikipediaTextFilter();
private final Language language;
private final Indexer indexer;
// ===========================================================
// SAX DocumentHandler methods
// ===========================================================
public WikipediaIndexHandler(Directory dir, Language language, int start, int end) {
this.language = language;
this.indexer = new Indexer(dir, language);
this.start = start;
this.end = end;
if (start > end) {
throw new RuntimeException("\"Start\" should be smaller than \"End\"");
}
initTextFilter();
}
/**
* initialize textFilter field
*/
private void initTextFilter() {
if (Language.ROMANIAN == language) {
textFilter = new WikipediaTextFilter() {
@Override
public String filter(String arg0) {
final String tmp = super.filter(arg0);
// diacritics correction (comma-bellow instead of sedilla for ș and ț)
return RomanianDiacriticsModifier.correctDiacritrics(tmp);
}
};
} else {
textFilter = new WikipediaTextFilter();
}
}
@SuppressWarnings("unused")
public void startElement(String namespaceURI, String lName, String qName, Attributes attrs)
throws SAXException {
if (qName.equals("title")) {
inText = true;
} else if (qName.equals("text")) {
inText = true;
}
}
@SuppressWarnings("unused")
public void endElement(String namespaceURI, String sName, String qName) {
if (qName.equals("title")) {
text = new StringBuilder();
} else if (qName.equals("text")) {
try {
System.out.println(articleCount++);
if (articleCount < start) {
return;
} else if (articleCount >= end) {
throw new RuntimeException();
}
// System.err.println(text.length() + " " + text.substring(0, Math.min(50, text.length())));
final String textToCheck = textFilter.filter(text.toString());
// System.out.println(textToCheck);
if (!textToCheck.contains("#REDIRECT") && !textToCheck.trim().equals("")) {
// System.err.println("#########################");
// System.err.println(textToCheck);
indexer.index(textToCheck, false);
}
} catch (Exception e) {
throw new RuntimeException(e);
}
}
text = new StringBuilder();
inText = false;
}
public void characters(char buf[], int offset, int len) {
final String s = new String(buf, offset, len);
if (inText) {
text.append(s);
}
}
public void close() throws Exception {
indexer.close();
}
public static void main(String... strings) throws Exception {
long start = System.currentTimeMillis();
final SAXParserFactory factory = SAXParserFactory.newInstance();
final SAXParser saxParser = factory.newSAXParser();
WikipediaIndexHandler handler = new WikipediaIndexHandler(FSDirectory.open(new File(
//"E:\\project\\data\\index_zh")), Language.CHINESE, 4001, 10000);
"E:\\project\\data\\index_zh")), Language.ENGLISH, 4001, 10000);
try {
saxParser.parse(new FileInputStream(new File(
"E:\\project\\data\\zhwiki-latest-pages-meta-current.xml")), handler);
} catch (RuntimeException e) {
}
handler.close();
long end = System.currentTimeMillis();
System.out.println("It takes " + (start - end) / (1000 * 60) + " minutes");
}
}
| trunk/JLanguageTool/src/dev/de/danielnaber/languagetool/dev/wikipedia/WikipediaIndexHandler.java | /* LanguageTool, a natural language style checker
* Copyright (C) 2005 Daniel Naber (http://www.danielnaber.de)
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301
* USA
*/
package de.danielnaber.languagetool.dev.wikipedia;
import java.io.File;
import java.io.FileInputStream;
import javax.xml.parsers.SAXParser;
import javax.xml.parsers.SAXParserFactory;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.FSDirectory;
import org.xml.sax.Attributes;
import org.xml.sax.SAXException;
import org.xml.sax.helpers.DefaultHandler;
import de.danielnaber.languagetool.Language;
import de.danielnaber.languagetool.TextFilter;
import de.danielnaber.languagetool.dev.index.Indexer;
import de.danielnaber.languagetool.dev.tools.RomanianDiacriticsModifier;
/**
*
* Wikipedia handler for indexing.
*
* @author Tao Lin
*
*/
public class WikipediaIndexHandler extends DefaultHandler {
protected static final int CONTEXT_SIZE = 50;
protected static final String MARKER_START = "<err>";
protected static final String MARKER_END = "</err>";
protected static final String LANG_MARKER = "XX";
protected static final String URL_PREFIX = "http://" + LANG_MARKER + ".wikipedia.org/wiki/";
private int articleCount = 0;
// the number of the wiki page to start indexing
private int start = 0;
// the number of the wiki page to end indexing
private int end = 0;
private boolean inText = false;
private StringBuilder text = new StringBuilder();
private TextFilter textFilter = new WikipediaTextFilter();
private final Language language;
private final Indexer indexer;
// ===========================================================
// SAX DocumentHandler methods
// ===========================================================
public WikipediaIndexHandler(Directory dir, Language language, int start, int end) {
this.language = language;
this.indexer = new Indexer(dir, language);
this.start = start;
this.end = end;
if (start > end) {
throw new RuntimeException("\"Start\" should be smaller than \"End\"");
}
initTextFilter();
}
/**
* initialize textFilter field
*/
private void initTextFilter() {
if (Language.ROMANIAN == language) {
textFilter = new WikipediaTextFilter() {
@Override
public String filter(String arg0) {
final String tmp = super.filter(arg0);
// diacritics correction (comma-bellow instead of sedilla for ș and ț)
return RomanianDiacriticsModifier.correctDiacritrics(tmp);
}
};
} else {
textFilter = new WikipediaTextFilter();
}
}
@SuppressWarnings("unused")
public void startElement(String namespaceURI, String lName, String qName, Attributes attrs)
throws SAXException {
if (qName.equals("title")) {
inText = true;
} else if (qName.equals("text")) {
inText = true;
}
}
@SuppressWarnings("unused")
public void endElement(String namespaceURI, String sName, String qName) {
if (qName.equals("title")) {
text = new StringBuilder();
} else if (qName.equals("text")) {
try {
System.out.println(articleCount++);
if (articleCount < start) {
return;
} else if (articleCount >= end) {
throw new RuntimeException();
}
// System.err.println(text.length() + " " + text.substring(0, Math.min(50, text.length())));
final String textToCheck = textFilter.filter(text.toString());
// System.out.println(textToCheck);
if (!textToCheck.contains("#REDIRECT") && !textToCheck.trim().equals("")) {
// System.err.println("#########################");
// System.err.println(textToCheck);
indexer.index(textToCheck, false);
}
} catch (Exception e) {
throw new RuntimeException(e);
}
}
text = new StringBuilder();
inText = false;
}
public void characters(char buf[], int offset, int len) {
final String s = new String(buf, offset, len);
if (inText) {
text.append(s);
}
}
public void close() throws Exception {
indexer.close();
}
public static void main(String... strings) throws Exception {
long start = System.currentTimeMillis();
final SAXParserFactory factory = SAXParserFactory.newInstance();
final SAXParser saxParser = factory.newSAXParser();
WikipediaIndexHandler handler = new WikipediaIndexHandler(FSDirectory.open(new File(
"E:\\project\\data\\index_zh")), Language.CHINESE, 4001, 10000);
try {
saxParser.parse(new FileInputStream(new File(
"E:\\project\\data\\zhwiki-latest-pages-meta-current.xml")), handler);
} catch (RuntimeException e) {
}
handler.close();
long end = System.currentTimeMillis();
System.out.println("It takes " + (start - end) / (1000 * 60) + " minutes");
}
}
| fix compile (Chinese is not known yet)
| trunk/JLanguageTool/src/dev/de/danielnaber/languagetool/dev/wikipedia/WikipediaIndexHandler.java | fix compile (Chinese is not known yet) | <ide><path>runk/JLanguageTool/src/dev/de/danielnaber/languagetool/dev/wikipedia/WikipediaIndexHandler.java
<ide> final SAXParserFactory factory = SAXParserFactory.newInstance();
<ide> final SAXParser saxParser = factory.newSAXParser();
<ide> WikipediaIndexHandler handler = new WikipediaIndexHandler(FSDirectory.open(new File(
<del> "E:\\project\\data\\index_zh")), Language.CHINESE, 4001, 10000);
<add> //"E:\\project\\data\\index_zh")), Language.CHINESE, 4001, 10000);
<add> "E:\\project\\data\\index_zh")), Language.ENGLISH, 4001, 10000);
<ide> try {
<ide> saxParser.parse(new FileInputStream(new File(
<ide> "E:\\project\\data\\zhwiki-latest-pages-meta-current.xml")), handler); |
|
Java | apache-2.0 | e4786e790a9b2444be36455b6aa0e9aba49e000e | 0 | SomeFire/ignite,agura/incubator-ignite,shroman/ignite,kromulan/ignite,zzcclp/ignite,ryanzz/ignite,nivanov/ignite,DoudTechData/ignite,svladykin/ignite,endian675/ignite,thuTom/ignite,samaitra/ignite,dream-x/ignite,SomeFire/ignite,ascherbakoff/ignite,xtern/ignite,VladimirErshov/ignite,ilantukh/ignite,gridgain/apache-ignite,apache/ignite,nizhikov/ignite,adeelmahmood/ignite,gargvish/ignite,vadopolski/ignite,xtern/ignite,samaitra/ignite,StalkXT/ignite,alexzaitzev/ignite,gargvish/ignite,nivanov/ignite,irudyak/ignite,psadusumilli/ignite,alexzaitzev/ignite,BiryukovVA/ignite,nivanov/ignite,nizhikov/ignite,kidaa/incubator-ignite,louishust/incubator-ignite,ntikhonov/ignite,voipp/ignite,leveyj/ignite,agura/incubator-ignite,ascherbakoff/ignite,amirakhmedov/ignite,dream-x/ignite,sylentprayer/ignite,voipp/ignite,amirakhmedov/ignite,daradurvs/ignite,NSAmelchev/ignite,voipp/ignite,VladimirErshov/ignite,NSAmelchev/ignite,irudyak/ignite,endian675/ignite,dmagda/incubator-ignite,ryanzz/ignite,ascherbakoff/ignite,shroman/ignite,dream-x/ignite,sk0x50/ignite,vadopolski/ignite,iveselovskiy/ignite,ashutakGG/incubator-ignite,gridgain/apache-ignite,ptupitsyn/ignite,ryanzz/ignite,andrey-kuznetsov/ignite,WilliamDo/ignite,thuTom/ignite,alexzaitzev/ignite,nivanov/ignite,avinogradovgg/ignite,StalkXT/ignite,vldpyatkov/ignite,zzcclp/ignite,f7753/ignite,nivanov/ignite,afinka77/ignite,dmagda/incubator-ignite,louishust/incubator-ignite,vldpyatkov/ignite,vsuslov/incubator-ignite,f7753/ignite,BiryukovVA/ignite,daradurvs/ignite,afinka77/ignite,chandresh-pancholi/ignite,NSAmelchev/ignite,abhishek-ch/incubator-ignite,irudyak/ignite,zzcclp/ignite,shroman/ignite,WilliamDo/ignite,dmagda/incubator-ignite,apacheignite/ignite,dream-x/ignite,adeelmahmood/ignite,pperalta/ignite,alexzaitzev/ignite,thuTom/ignite,chandresh-pancholi/ignite,amirakhmedov/ignite,ptupitsyn/ignite,dlnufox/ignite,gridgain/apache-ignite,dlnufox/ignite,WilliamDo/ignite,daradurvs/ignite,wmz7year/ignite,StalkXT/ignite,apache/ignite,kidaa/incubator-ignite,agura/incubator-ignite,BiryukovVA/ignite,vladisav/ignite,f7753/ignite,daradurvs/ignite,vladisav/ignite,wmz7year/ignite,nizhikov/ignite,apacheignite/ignite,a1vanov/ignite,vsuslov/incubator-ignite,ntikhonov/ignite,leveyj/ignite,samaitra/ignite,dmagda/incubator-ignite,dream-x/ignite,tkpanther/ignite,mcherkasov/ignite,alexzaitzev/ignite,amirakhmedov/ignite,chandresh-pancholi/ignite,amirakhmedov/ignite,iveselovskiy/ignite,pperalta/ignite,gargvish/ignite,gridgain/apache-ignite,WilliamDo/ignite,afinka77/ignite,thuTom/ignite,shurun19851206/ignite,tkpanther/ignite,f7753/ignite,vsisko/incubator-ignite,agoncharuk/ignite,gargvish/ignite,shroman/ignite,DoudTechData/ignite,apacheignite/ignite,adeelmahmood/ignite,pperalta/ignite,xtern/ignite,psadusumilli/ignite,mcherkasov/ignite,mcherkasov/ignite,kromulan/ignite,f7753/ignite,andrey-kuznetsov/ignite,NSAmelchev/ignite,svladykin/ignite,NSAmelchev/ignite,daradurvs/ignite,arijitt/incubator-ignite,BiryukovVA/ignite,nizhikov/ignite,thuTom/ignite,daradurvs/ignite,adeelmahmood/ignite,chandresh-pancholi/ignite,thuTom/ignite,rfqu/ignite,tkpanther/ignite,agura/incubator-ignite,tkpanther/ignite,zzcclp/ignite,avinogradovgg/ignite,chandresh-pancholi/ignite,afinka77/ignite,nizhikov/ignite,SharplEr/ignite,ilantukh/ignite,DoudTechData/ignite,dlnufox/ignite,ilantukh/ignite,irudyak/ignite,alexzaitzev/ignite,ryanzz/ignite,shurun19851206/ignite,VladimirErshov/ignite,rfqu/ignite,voipp/ignite,wmz7year/ignite,DoudTechData/ignite,vsisko/incubator-ignite,akuznetsov-gridgain/ignite,ascherbakoff/ignite,agoncharuk/ignite,ashutakGG/incubator-ignite,sk0x50/ignite,svladykin/ignite,avinogradovgg/ignite,wmz7year/ignite,avinogradovgg/ignite,shroman/ignite,chandresh-pancholi/ignite,tkpanther/ignite,vadopolski/ignite,sk0x50/ignite,tkpanther/ignite,SharplEr/ignite,endian675/ignite,endian675/ignite,samaitra/ignite,gridgain/apache-ignite,mcherkasov/ignite,arijitt/incubator-ignite,NSAmelchev/ignite,sylentprayer/ignite,sk0x50/ignite,shurun19851206/ignite,leveyj/ignite,SomeFire/ignite,DoudTechData/ignite,apache/ignite,vldpyatkov/ignite,ascherbakoff/ignite,ascherbakoff/ignite,vadopolski/ignite,endian675/ignite,dlnufox/ignite,iveselovskiy/ignite,akuznetsov-gridgain/ignite,dmagda/incubator-ignite,afinka77/ignite,StalkXT/ignite,shroman/ignite,tkpanther/ignite,ntikhonov/ignite,vldpyatkov/ignite,apache/ignite,ptupitsyn/ignite,pperalta/ignite,BiryukovVA/ignite,amirakhmedov/ignite,WilliamDo/ignite,a1vanov/ignite,zzcclp/ignite,rfqu/ignite,SomeFire/ignite,endian675/ignite,afinka77/ignite,shroman/ignite,vsisko/incubator-ignite,andrey-kuznetsov/ignite,chandresh-pancholi/ignite,rfqu/ignite,ilantukh/ignite,ryanzz/ignite,vadopolski/ignite,ashutakGG/incubator-ignite,WilliamDo/ignite,xtern/ignite,sylentprayer/ignite,xtern/ignite,ptupitsyn/ignite,xtern/ignite,gridgain/apache-ignite,psadusumilli/ignite,shurun19851206/ignite,tkpanther/ignite,ntikhonov/ignite,vldpyatkov/ignite,SharplEr/ignite,amirakhmedov/ignite,StalkXT/ignite,ptupitsyn/ignite,sylentprayer/ignite,apacheignite/ignite,kromulan/ignite,voipp/ignite,NSAmelchev/ignite,chandresh-pancholi/ignite,xtern/ignite,adeelmahmood/ignite,ashutakGG/incubator-ignite,leveyj/ignite,wmz7year/ignite,agoncharuk/ignite,akuznetsov-gridgain/ignite,pperalta/ignite,andrey-kuznetsov/ignite,samaitra/ignite,kromulan/ignite,dream-x/ignite,nizhikov/ignite,ilantukh/ignite,shroman/ignite,SomeFire/ignite,louishust/incubator-ignite,vsisko/incubator-ignite,ntikhonov/ignite,voipp/ignite,vldpyatkov/ignite,abhishek-ch/incubator-ignite,SharplEr/ignite,VladimirErshov/ignite,ascherbakoff/ignite,vadopolski/ignite,gridgain/apache-ignite,irudyak/ignite,StalkXT/ignite,NSAmelchev/ignite,SharplEr/ignite,leveyj/ignite,ptupitsyn/ignite,dlnufox/ignite,ascherbakoff/ignite,ilantukh/ignite,voipp/ignite,agura/incubator-ignite,murador/ignite,murador/ignite,gargvish/ignite,ryanzz/ignite,adeelmahmood/ignite,apache/ignite,akuznetsov-gridgain/ignite,rfqu/ignite,endian675/ignite,sk0x50/ignite,amirakhmedov/ignite,voipp/ignite,SharplEr/ignite,f7753/ignite,apache/ignite,samaitra/ignite,DoudTechData/ignite,mcherkasov/ignite,alexzaitzev/ignite,thuTom/ignite,shurun19851206/ignite,agura/incubator-ignite,shurun19851206/ignite,apacheignite/ignite,apache/ignite,vladisav/ignite,psadusumilli/ignite,samaitra/ignite,louishust/incubator-ignite,gargvish/ignite,samaitra/ignite,gargvish/ignite,ashutakGG/incubator-ignite,samaitra/ignite,avinogradovgg/ignite,andrey-kuznetsov/ignite,agoncharuk/ignite,SomeFire/ignite,leveyj/ignite,andrey-kuznetsov/ignite,nizhikov/ignite,vsuslov/incubator-ignite,StalkXT/ignite,iveselovskiy/ignite,a1vanov/ignite,pperalta/ignite,kidaa/incubator-ignite,SomeFire/ignite,SomeFire/ignite,shroman/ignite,andrey-kuznetsov/ignite,endian675/ignite,mcherkasov/ignite,dlnufox/ignite,ilantukh/ignite,a1vanov/ignite,SomeFire/ignite,murador/ignite,kidaa/incubator-ignite,xtern/ignite,f7753/ignite,ptupitsyn/ignite,VladimirErshov/ignite,xtern/ignite,wmz7year/ignite,amirakhmedov/ignite,apacheignite/ignite,abhishek-ch/incubator-ignite,ntikhonov/ignite,avinogradovgg/ignite,daradurvs/ignite,ntikhonov/ignite,iveselovskiy/ignite,mcherkasov/ignite,NSAmelchev/ignite,daradurvs/ignite,agoncharuk/ignite,daradurvs/ignite,daradurvs/ignite,ilantukh/ignite,sk0x50/ignite,louishust/incubator-ignite,mcherkasov/ignite,VladimirErshov/ignite,kromulan/ignite,abhishek-ch/incubator-ignite,nivanov/ignite,vsisko/incubator-ignite,arijitt/incubator-ignite,pperalta/ignite,zzcclp/ignite,dmagda/incubator-ignite,irudyak/ignite,sk0x50/ignite,shurun19851206/ignite,shroman/ignite,SharplEr/ignite,irudyak/ignite,abhishek-ch/incubator-ignite,WilliamDo/ignite,wmz7year/ignite,kidaa/incubator-ignite,ptupitsyn/ignite,dream-x/ignite,kidaa/incubator-ignite,vsisko/incubator-ignite,agura/incubator-ignite,sk0x50/ignite,vldpyatkov/ignite,wmz7year/ignite,murador/ignite,voipp/ignite,nivanov/ignite,arijitt/incubator-ignite,vladisav/ignite,vladisav/ignite,DoudTechData/ignite,andrey-kuznetsov/ignite,rfqu/ignite,SharplEr/ignite,ntikhonov/ignite,dmagda/incubator-ignite,nivanov/ignite,agoncharuk/ignite,apache/ignite,psadusumilli/ignite,afinka77/ignite,vadopolski/ignite,psadusumilli/ignite,irudyak/ignite,apacheignite/ignite,zzcclp/ignite,StalkXT/ignite,StalkXT/ignite,arijitt/incubator-ignite,BiryukovVA/ignite,andrey-kuznetsov/ignite,psadusumilli/ignite,vsuslov/incubator-ignite,kromulan/ignite,akuznetsov-gridgain/ignite,ashutakGG/incubator-ignite,VladimirErshov/ignite,vadopolski/ignite,kromulan/ignite,vladisav/ignite,sk0x50/ignite,vsuslov/incubator-ignite,chandresh-pancholi/ignite,ptupitsyn/ignite,akuznetsov-gridgain/ignite,vsuslov/incubator-ignite,louishust/incubator-ignite,agura/incubator-ignite,adeelmahmood/ignite,dmagda/incubator-ignite,vladisav/ignite,alexzaitzev/ignite,ptupitsyn/ignite,rfqu/ignite,samaitra/ignite,ascherbakoff/ignite,shurun19851206/ignite,rfqu/ignite,sylentprayer/ignite,thuTom/ignite,gargvish/ignite,sylentprayer/ignite,BiryukovVA/ignite,a1vanov/ignite,murador/ignite,arijitt/incubator-ignite,andrey-kuznetsov/ignite,murador/ignite,sylentprayer/ignite,kromulan/ignite,WilliamDo/ignite,svladykin/ignite,iveselovskiy/ignite,sylentprayer/ignite,dream-x/ignite,ilantukh/ignite,agoncharuk/ignite,vladisav/ignite,VladimirErshov/ignite,psadusumilli/ignite,vsisko/incubator-ignite,ryanzz/ignite,zzcclp/ignite,afinka77/ignite,alexzaitzev/ignite,vsisko/incubator-ignite,ryanzz/ignite,apacheignite/ignite,a1vanov/ignite,vldpyatkov/ignite,pperalta/ignite,svladykin/ignite,nizhikov/ignite,nizhikov/ignite,f7753/ignite,a1vanov/ignite,leveyj/ignite,murador/ignite,BiryukovVA/ignite,dlnufox/ignite,agoncharuk/ignite,svladykin/ignite,BiryukovVA/ignite,apache/ignite,DoudTechData/ignite,avinogradovgg/ignite,irudyak/ignite,leveyj/ignite,murador/ignite,adeelmahmood/ignite,a1vanov/ignite,BiryukovVA/ignite,svladykin/ignite,dlnufox/ignite,abhishek-ch/incubator-ignite,SharplEr/ignite,ilantukh/ignite,SomeFire/ignite | /* @java.file.header */
/* _________ _____ __________________ _____
* __ ____/___________(_)______ /__ ____/______ ____(_)_______
* _ / __ __ ___/__ / _ __ / _ / __ _ __ `/__ / __ __ \
* / /_/ / _ / _ / / /_/ / / /_/ / / /_/ / _ / _ / / /
* \____/ /_/ /_/ \_,__/ \____/ \__,_/ /_/ /_/ /_/
*/
package org.gridgain.grid.kernal.processors.hadoop;
import org.apache.hadoop.conf.*;
import org.apache.hadoop.fs.*;
import org.apache.hadoop.io.*;
import org.apache.hadoop.mapreduce.*;
import org.apache.hadoop.mapreduce.lib.input.*;
import org.apache.hadoop.mapreduce.lib.output.*;
import org.gridgain.grid.*;
import org.gridgain.grid.ggfs.*;
import org.gridgain.grid.ggfs.hadoop.v1.*;
import org.gridgain.grid.hadoop.*;
import org.gridgain.grid.util.lang.*;
import org.gridgain.grid.util.typedef.*;
import org.gridgain.grid.util.typedef.internal.*;
import org.gridgain.testframework.*;
import java.io.*;
import java.util.*;
import java.util.concurrent.*;
import java.util.concurrent.atomic.*;
/**
* Tests map-reduce task execution basics.
*/
public class GridHadoopTaskExecutionSelfTest extends GridHadoopAbstractSelfTest {
/** Line count. */
private static final AtomicInteger totalLineCnt = new AtomicInteger();
/** Executed tasks. */
private static final AtomicInteger executedTasks = new AtomicInteger();
/** Cancelled tasks. */
private static final AtomicInteger cancelledTasks = new AtomicInteger();
/** Mapper id to fail. */
private static volatile int failMapperId;
/** Test param. */
private static final String MAP_WRITE = "test.map.write";
/** {@inheritDoc} */
@Override public GridGgfsConfiguration ggfsConfiguration() {
GridGgfsConfiguration cfg = super.ggfsConfiguration();
cfg.setFragmentizerEnabled(false);
return cfg;
}
/** {@inheritDoc} */
@Override protected boolean ggfsEnabled() {
return true;
}
/** {@inheritDoc} */
@Override protected void beforeTestsStarted() throws Exception {
super.beforeTestsStarted();
startGrids(gridCount());
}
/** {@inheritDoc} */
@Override protected void afterTestsStopped() throws Exception {
stopAllGrids();
super.afterTestsStopped();
}
/** {@inheritDoc} */
@Override protected void beforeTest() throws Exception {
grid(0).ggfs(ggfsName).format().get();
}
/** {@inheritDoc} */
@Override public GridHadoopConfiguration hadoopConfiguration(String gridName) {
GridHadoopConfiguration cfg = super.hadoopConfiguration(gridName);
cfg.setExternalExecution(false);
return cfg;
}
/**
* @throws Exception If failed.
*/
public void testMapRun() throws Exception {
int lineCnt = 10000;
String fileName = "/testFile";
prepareFile(fileName, lineCnt);
totalLineCnt.set(0);
Configuration cfg = new Configuration();
cfg.setStrings("fs.ggfs.impl", GridGgfsHadoopFileSystem.class.getName());
Job job = Job.getInstance(cfg);
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(IntWritable.class);
job.setMapperClass(TestMapper.class);
job.setNumReduceTasks(0);
job.setInputFormatClass(TextInputFormat.class);
FileInputFormat.setInputPaths(job, new Path("ggfs:///"));
FileOutputFormat.setOutputPath(job, new Path("ggfs:///output/"));
job.setJarByClass(getClass());
GridFuture<?> fut = grid(0).hadoop().submit(new GridHadoopJobId(UUID.randomUUID(), 1),
new GridHadoopDefaultJobInfo(job.getConfiguration()));
fut.get();
assertEquals(lineCnt, totalLineCnt.get());
}
/**
* @throws Exception If failed.
*/
public void testMapCombineRun() throws Exception {
int lineCnt = 10001;
String fileName = "/testFile";
prepareFile(fileName, lineCnt);
totalLineCnt.set(0);
Configuration cfg = new Configuration();
cfg.setStrings("fs.ggfs.impl", GridGgfsHadoopFileSystem.class.getName());
cfg.setBoolean(MAP_WRITE, true);
Job job = Job.getInstance(cfg);
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(IntWritable.class);
job.setMapperClass(TestMapper.class);
job.setCombinerClass(TestCombiner.class);
job.setReducerClass(TestReducer.class);
job.setNumReduceTasks(2);
job.setInputFormatClass(TextInputFormat.class);
FileInputFormat.setInputPaths(job, new Path("ggfs:///"));
FileOutputFormat.setOutputPath(job, new Path("ggfs:///output"));
job.setJarByClass(getClass());
GridHadoopJobId jobId = new GridHadoopJobId(UUID.randomUUID(), 2);
GridFuture<?> fut = grid(0).hadoop().submit(jobId,
new GridHadoopDefaultJobInfo(job.getConfiguration()));
fut.get();
assertEquals(lineCnt, totalLineCnt.get());
for (int g = 0; g < gridCount(); g++)
grid(g).hadoop().finishFuture(jobId).get();
}
/**
* @throws Exception If failed.
*/
public void testMapperException() throws Exception {
prepareFile("/testFile", 1000);
Configuration cfg = new Configuration();
cfg.setStrings("fs.ggfs.impl", GridGgfsHadoopFileSystem.class.getName());
Job job = Job.getInstance(cfg);
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(IntWritable.class);
job.setMapperClass(FailMapper.class);
job.setNumReduceTasks(0);
job.setInputFormatClass(TextInputFormat.class);
FileInputFormat.setInputPaths(job, new Path("ggfs:///"));
FileOutputFormat.setOutputPath(job, new Path("ggfs:///output/"));
job.setJarByClass(getClass());
final GridFuture<?> fut = grid(0).hadoop().submit(new GridHadoopJobId(UUID.randomUUID(), 3),
new GridHadoopDefaultJobInfo(job.getConfiguration()));
GridTestUtils.assertThrows(log, new Callable<Object>() {
@Override public Object call() throws Exception {
fut.get();
return null;
}
}, GridException.class, null);
}
/**
* @param fileName File name.
* @param lineCnt Line count.
* @throws Exception If failed.
*/
private void prepareFile(String fileName, int lineCnt) throws Exception {
GridGgfs ggfs = grid(0).ggfs(ggfsName);
try (OutputStream os = ggfs.create(new GridGgfsPath(fileName), true)) {
PrintWriter w = new PrintWriter(new OutputStreamWriter(os));
for (int i = 0; i < lineCnt; i++)
w.print("Hello, Hadoop map-reduce!\n");
w.flush();
}
}
/**
* Prepare job with mappers to cancel.
* @return Fully configured job.
* @throws Exception If fails.
*/
private Configuration prepareJobForCancelling() throws Exception {
prepareFile("/testFile", 10000);
executedTasks.set(0);
cancelledTasks.set(0);
failMapperId = 0;
Configuration cfg = new Configuration();
cfg.set("fs.ggfs.impl", GridGgfsHadoopFileSystem.class.getName());
Job job = Job.getInstance(cfg);
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(IntWritable.class);
job.setMapperClass(CancellingTestMapper.class);
job.setNumReduceTasks(0);
job.setInputFormatClass(TextInputFormat.class);
FileInputFormat.setInputPaths(job, new Path("ggfs:///"));
FileOutputFormat.setOutputPath(job, new Path("ggfs:///output/"));
job.setJarByClass(getClass());
return job.getConfiguration();
}
/**
* @throws Exception If failed.
*/
public void testTaskCancelling() throws Exception {
Configuration cfg = prepareJobForCancelling();
GridHadoopJobId jobId = new GridHadoopJobId(UUID.randomUUID(), 1);
final GridFuture<?> fut = grid(0).hadoop().submit(jobId, new GridHadoopDefaultJobInfo(cfg));
if (!GridTestUtils.waitForCondition(new GridAbsPredicate() {
@Override public boolean apply() {
return executedTasks.get() == 32;
}
}, 20000)) {
U.dumpThreads(log);
assertTrue(false);
}
// Fail mapper with id "1", cancels others
failMapperId = 1;
GridTestUtils.assertThrows(log, new Callable<Object>() {
@Override public Object call() throws Exception {
fut.get();
return null;
}
}, GridException.class, null);
assertEquals(executedTasks.get(), cancelledTasks.get() + 1);
}
/**
* @throws Exception If failed.
*/
public void testJobKill() throws Exception {
Configuration cfg = prepareJobForCancelling();
GridHadoop hadoop = grid(0).hadoop();
GridHadoopJobId jobId = new GridHadoopJobId(UUID.randomUUID(), 1);
//Kill unknown job.
boolean killRes = hadoop.kill(jobId);
assertFalse(killRes);
final GridFuture<?> fut = hadoop.submit(jobId, new GridHadoopDefaultJobInfo(cfg));
if (!GridTestUtils.waitForCondition(new GridAbsPredicate() {
@Override public boolean apply() {
return executedTasks.get() == 32;
}
}, 20000)) {
U.dumpThreads(log);
assertTrue(false);
}
//Kill really ran job.
killRes = hadoop.kill(jobId);
assertTrue(killRes);
GridTestUtils.assertThrows(log, new Callable<Object>() {
@Override public Object call() throws Exception {
fut.get();
return null;
}
}, GridException.class, null);
assertEquals(executedTasks.get(), cancelledTasks.get());
//Kill the same job again.
killRes = hadoop.kill(jobId);
assertTrue(killRes);
}
private static class CancellingTestMapper extends Mapper<Object, Text, Text, IntWritable> {
private int mapperId;
/** {@inheritDoc} */
@Override protected void setup(Context context) throws IOException, InterruptedException {
mapperId = executedTasks.incrementAndGet();
}
/** {@inheritDoc} */
@Override public void run(Context context) throws IOException, InterruptedException {
try {
super.run(context);
}
catch (GridHadoopTaskCancelledException e) {
cancelledTasks.incrementAndGet();
throw e;
}
}
/** {@inheritDoc} */
@Override protected void map(Object key, Text value, Context context) throws IOException, InterruptedException {
if (mapperId == failMapperId)
throw new IOException();
Thread.sleep(1000);
}
}
/**
* Test failing mapper.
*/
private static class FailMapper extends Mapper<Object, Text, Text, IntWritable> {
/** {@inheritDoc} */
@Override protected void map(Object key, Text value, Context context)
throws IOException, InterruptedException {
throw new IOException("Expected");
}
}
/**
* Mapper calculates number of lines.
*/
private static class TestMapper extends Mapper<Object, Text, Text, IntWritable> {
/** Writable integer constant of '1'. */
private static final IntWritable ONE = new IntWritable(1);
/** Line count constant. */
public static final Text LINE_COUNT = new Text("lineCount");
/** {@inheritDoc} */
@Override protected void setup(Context context) throws IOException, InterruptedException {
X.println("___ Mapper: " + context.getTaskAttemptID());
}
/** {@inheritDoc} */
@Override protected void map(Object key, Text value, Context ctx) throws IOException, InterruptedException {
if (ctx.getConfiguration().getBoolean(MAP_WRITE, false))
ctx.write(LINE_COUNT, ONE);
else
totalLineCnt.incrementAndGet();
}
}
/**
* Combiner calculates number of lines.
*/
private static class TestCombiner extends Reducer<Text, IntWritable, Text, IntWritable> {
/** */
IntWritable sum = new IntWritable();
/** {@inheritDoc} */
@Override protected void setup(Context context) throws IOException, InterruptedException {
X.println("___ Combiner: ");
}
/** {@inheritDoc} */
@Override protected void reduce(Text key, Iterable<IntWritable> values, Context ctx) throws IOException,
InterruptedException {
int lineCnt = 0;
for (IntWritable value : values)
lineCnt += value.get();
sum.set(lineCnt);
X.println("___ combo: " + lineCnt);
ctx.write(key, sum);
}
}
/**
* Combiner calculates number of lines.
*/
private static class TestReducer extends Reducer<Text, IntWritable, Text, IntWritable> {
/** */
IntWritable sum = new IntWritable();
/** {@inheritDoc} */
@Override protected void setup(Context context) throws IOException, InterruptedException {
X.println("___ Reducer: " + context.getTaskAttemptID());
}
/** {@inheritDoc} */
@Override protected void reduce(Text key, Iterable<IntWritable> values, Context ctx) throws IOException,
InterruptedException {
int lineCnt = 0;
for (IntWritable value : values) {
lineCnt += value.get();
X.println("___ rdcr: " + value.get());
}
sum.set(lineCnt);
ctx.write(key, sum);
X.println("___ RDCR SUM: " + lineCnt);
totalLineCnt.addAndGet(lineCnt);
}
}
}
| modules/hadoop/src/test/java/org/gridgain/grid/kernal/processors/hadoop/GridHadoopTaskExecutionSelfTest.java | /* @java.file.header */
/* _________ _____ __________________ _____
* __ ____/___________(_)______ /__ ____/______ ____(_)_______
* _ / __ __ ___/__ / _ __ / _ / __ _ __ `/__ / __ __ \
* / /_/ / _ / _ / / /_/ / / /_/ / / /_/ / _ / _ / / /
* \____/ /_/ /_/ \_,__/ \____/ \__,_/ /_/ /_/ /_/
*/
package org.gridgain.grid.kernal.processors.hadoop;
import org.apache.hadoop.conf.*;
import org.apache.hadoop.fs.*;
import org.apache.hadoop.io.*;
import org.apache.hadoop.mapreduce.*;
import org.apache.hadoop.mapreduce.lib.input.*;
import org.apache.hadoop.mapreduce.lib.output.*;
import org.gridgain.grid.*;
import org.gridgain.grid.ggfs.*;
import org.gridgain.grid.ggfs.hadoop.v1.*;
import org.gridgain.grid.hadoop.*;
import org.gridgain.grid.util.lang.GridAbsPredicate;
import org.gridgain.grid.util.typedef.*;
import org.gridgain.grid.util.typedef.internal.U;
import org.gridgain.testframework.*;
import java.io.*;
import java.util.*;
import java.util.concurrent.*;
import java.util.concurrent.atomic.*;
/**
* Tests map-reduce task execution basics.
*/
public class GridHadoopTaskExecutionSelfTest extends GridHadoopAbstractSelfTest {
/** Line count. */
private static final AtomicInteger totalLineCnt = new AtomicInteger();
/** Executed tasks. */
private static final AtomicInteger executedTasks = new AtomicInteger();
/** Cancelled tasks. */
private static final AtomicInteger cancelledTasks = new AtomicInteger();
/** Mapper id to fail. */
private static volatile int failMapperId;
/** Test param. */
private static final String MAP_WRITE = "test.map.write";
/** {@inheritDoc} */
@Override public GridGgfsConfiguration ggfsConfiguration() {
GridGgfsConfiguration cfg = super.ggfsConfiguration();
cfg.setFragmentizerEnabled(false);
return cfg;
}
/** {@inheritDoc} */
@Override protected boolean ggfsEnabled() {
return true;
}
/** {@inheritDoc} */
@Override protected void beforeTestsStarted() throws Exception {
super.beforeTestsStarted();
startGrids(gridCount());
}
/** {@inheritDoc} */
@Override protected void afterTestsStopped() throws Exception {
stopAllGrids();
super.afterTestsStopped();
}
/** {@inheritDoc} */
@Override protected void beforeTest() throws Exception {
grid(0).ggfs(ggfsName).format().get();
}
/** {@inheritDoc} */
@Override public GridHadoopConfiguration hadoopConfiguration(String gridName) {
GridHadoopConfiguration cfg = super.hadoopConfiguration(gridName);
cfg.setExternalExecution(false);
return cfg;
}
/**
* @throws Exception If failed.
*/
public void testMapRun() throws Exception {
int lineCnt = 10000;
String fileName = "/testFile";
prepareFile(fileName, lineCnt);
totalLineCnt.set(0);
Configuration cfg = new Configuration();
cfg.setStrings("fs.ggfs.impl", GridGgfsHadoopFileSystem.class.getName());
Job job = Job.getInstance(cfg);
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(IntWritable.class);
job.setMapperClass(TestMapper.class);
job.setNumReduceTasks(0);
job.setInputFormatClass(TextInputFormat.class);
FileInputFormat.setInputPaths(job, new Path("ggfs:///"));
FileOutputFormat.setOutputPath(job, new Path("ggfs:///output/"));
job.setJarByClass(getClass());
GridFuture<?> fut = grid(0).hadoop().submit(new GridHadoopJobId(UUID.randomUUID(), 1),
new GridHadoopDefaultJobInfo(job.getConfiguration()));
fut.get();
assertEquals(lineCnt, totalLineCnt.get());
}
/**
* @throws Exception If failed.
*/
public void testMapCombineRun() throws Exception {
int lineCnt = 10001;
String fileName = "/testFile";
prepareFile(fileName, lineCnt);
totalLineCnt.set(0);
Configuration cfg = new Configuration();
cfg.setStrings("fs.ggfs.impl", GridGgfsHadoopFileSystem.class.getName());
cfg.setBoolean(MAP_WRITE, true);
Job job = Job.getInstance(cfg);
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(IntWritable.class);
job.setMapperClass(TestMapper.class);
job.setCombinerClass(TestCombiner.class);
job.setReducerClass(TestReducer.class);
job.setNumReduceTasks(2);
job.setInputFormatClass(TextInputFormat.class);
FileInputFormat.setInputPaths(job, new Path("ggfs:///"));
FileOutputFormat.setOutputPath(job, new Path("ggfs:///output"));
job.setJarByClass(getClass());
GridHadoopJobId jobId = new GridHadoopJobId(UUID.randomUUID(), 2);
GridFuture<?> fut = grid(0).hadoop().submit(jobId,
new GridHadoopDefaultJobInfo(job.getConfiguration()));
fut.get();
assertEquals(lineCnt, totalLineCnt.get());
for (int g = 0; g < gridCount(); g++)
grid(g).hadoop().finishFuture(jobId).get();
}
/**
* @throws Exception If failed.
*/
public void testMapperException() throws Exception {
prepareFile("/testFile", 1000);
Configuration cfg = new Configuration();
cfg.setStrings("fs.ggfs.impl", GridGgfsHadoopFileSystem.class.getName());
Job job = Job.getInstance(cfg);
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(IntWritable.class);
job.setMapperClass(FailMapper.class);
job.setNumReduceTasks(0);
job.setInputFormatClass(TextInputFormat.class);
FileInputFormat.setInputPaths(job, new Path("ggfs:///"));
FileOutputFormat.setOutputPath(job, new Path("ggfs:///output/"));
job.setJarByClass(getClass());
final GridFuture<?> fut = grid(0).hadoop().submit(new GridHadoopJobId(UUID.randomUUID(), 3),
new GridHadoopDefaultJobInfo(job.getConfiguration()));
GridTestUtils.assertThrows(log, new Callable<Object>() {
@Override public Object call() throws Exception {
fut.get();
return null;
}
}, GridException.class, null);
}
/**
* @param fileName File name.
* @param lineCnt Line count.
* @throws Exception If failed.
*/
private void prepareFile(String fileName, int lineCnt) throws Exception {
GridGgfs ggfs = grid(0).ggfs(ggfsName);
try (OutputStream os = ggfs.create(new GridGgfsPath(fileName), true)) {
PrintWriter w = new PrintWriter(new OutputStreamWriter(os));
for (int i = 0; i < lineCnt; i++)
w.print("Hello, Hadoop map-reduce!\n");
w.flush();
}
}
/**
* Prepare job with mappers to cancel.
* @return Fully configured job.
* @throws Exception If fails.
*/
private Configuration prepareJobForCancelling() throws Exception {
prepareFile("/testFile", 10000);
executedTasks.set(0);
cancelledTasks.set(0);
failMapperId = 0;
Configuration cfg = new Configuration();
cfg.set("fs.ggfs.impl", GridGgfsHadoopFileSystem.class.getName());
Job job = Job.getInstance(cfg);
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(IntWritable.class);
job.setMapperClass(CancellingTestMapper.class);
job.setNumReduceTasks(0);
job.setInputFormatClass(TextInputFormat.class);
FileInputFormat.setInputPaths(job, new Path("ggfs:///"));
FileOutputFormat.setOutputPath(job, new Path("ggfs:///output/"));
job.setJarByClass(getClass());
return job.getConfiguration();
}
/**
* @throws Exception If failed.
*/
public void testTaskCancelling() throws Exception {
Configuration cfg = prepareJobForCancelling();
GridHadoopJobId jobId = new GridHadoopJobId(UUID.randomUUID(), 1);
final GridFuture<?> fut = grid(0).hadoop().submit(jobId, new GridHadoopDefaultJobInfo(cfg));
if (!GridTestUtils.waitForCondition(new GridAbsPredicate() {
@Override public boolean apply() {
return executedTasks.get() == 32;
}
}, 20000)) {
U.dumpThreads(log);
assertTrue(false);
}
// Fail mapper with id "1", cancels others
failMapperId = 1;
GridTestUtils.assertThrows(log, new Callable<Object>() {
@Override public Object call() throws Exception {
fut.get();
return null;
}
}, GridException.class, null);
assertEquals(executedTasks.get(), cancelledTasks.get() + 1);
}
/**
* @throws Exception If failed.
*/
public void testJobKill() throws Exception {
Configuration cfg = prepareJobForCancelling();
GridHadoop hadoop = grid(0).hadoop();
GridHadoopJobId jobId = new GridHadoopJobId(UUID.randomUUID(), 1);
//Kill unknown job.
boolean killRes = hadoop.kill(jobId);
assertFalse(killRes);
final GridFuture<?> fut = hadoop.submit(jobId, new GridHadoopDefaultJobInfo(cfg));
if (!GridTestUtils.waitForCondition(new GridAbsPredicate() {
@Override public boolean apply() {
return executedTasks.get() == 32;
}
}, 20000)) {
U.dumpThreads(log);
assertTrue(false);
}
//Kill really ran job.
killRes = hadoop.kill(jobId);
assertTrue(killRes);
GridTestUtils.assertThrows(log, new Callable<Object>() {
@Override public Object call() throws Exception {
fut.get();
return null;
}
}, GridException.class, null);
assertEquals(executedTasks.get(), cancelledTasks.get());
//Kill the same job again.
killRes = hadoop.kill(jobId);
assertTrue(killRes);
}
private static class CancellingTestMapper extends Mapper<Object, Text, Text, IntWritable> {
private int mapperId;
/** {@inheritDoc} */
@Override protected void setup(Context context) throws IOException, InterruptedException {
mapperId = executedTasks.incrementAndGet();
}
/** {@inheritDoc} */
@Override public void run(Context context) throws IOException, InterruptedException {
try {
super.run(context);
}
catch (GridHadoopTaskCancelledException e) {
cancelledTasks.incrementAndGet();
throw e;
}
}
/** {@inheritDoc} */
@Override protected void map(Object key, Text value, Context context) throws IOException, InterruptedException {
if (mapperId == failMapperId)
throw new IOException();
Thread.sleep(1000);
}
}
/**
* Test failing mapper.
*/
private static class FailMapper extends Mapper<Object, Text, Text, IntWritable> {
/** {@inheritDoc} */
@Override protected void map(Object key, Text value, Context context)
throws IOException, InterruptedException {
throw new IOException("Expected");
}
}
/**
* Mapper calculates number of lines.
*/
private static class TestMapper extends Mapper<Object, Text, Text, IntWritable> {
/** Writable integer constant of '1'. */
private static final IntWritable ONE = new IntWritable(1);
/** Line count constant. */
public static final Text LINE_COUNT = new Text("lineCount");
/** {@inheritDoc} */
@Override protected void setup(Context context) throws IOException, InterruptedException {
X.println("___ Mapper: " + context.getTaskAttemptID());
}
/** {@inheritDoc} */
@Override protected void map(Object key, Text value, Context ctx) throws IOException, InterruptedException {
if (ctx.getConfiguration().getBoolean(MAP_WRITE, false))
ctx.write(LINE_COUNT, ONE);
else
totalLineCnt.incrementAndGet();
}
}
/**
* Combiner calculates number of lines.
*/
private static class TestCombiner extends Reducer<Text, IntWritable, Text, IntWritable> {
/** */
IntWritable sum = new IntWritable();
/** {@inheritDoc} */
@Override protected void setup(Context context) throws IOException, InterruptedException {
X.println("___ Combiner: ");
}
/** {@inheritDoc} */
@Override protected void reduce(Text key, Iterable<IntWritable> values, Context ctx) throws IOException,
InterruptedException {
int lineCnt = 0;
for (IntWritable value : values)
lineCnt += value.get();
sum.set(lineCnt);
X.println("___ combo: " + lineCnt);
ctx.write(key, sum);
}
}
/**
* Combiner calculates number of lines.
*/
private static class TestReducer extends Reducer<Text, IntWritable, Text, IntWritable> {
/** */
IntWritable sum = new IntWritable();
/** {@inheritDoc} */
@Override protected void setup(Context context) throws IOException, InterruptedException {
X.println("___ Reducer: " + context.getTaskAttemptID());
}
/** {@inheritDoc} */
@Override protected void reduce(Text key, Iterable<IntWritable> values, Context ctx) throws IOException,
InterruptedException {
int lineCnt = 0;
for (IntWritable value : values) {
lineCnt += value.get();
X.println("___ rdcr: " + value.get());
}
sum.set(lineCnt);
ctx.write(key, sum);
X.println("___ RDCR SUM: " + lineCnt);
totalLineCnt.addAndGet(lineCnt);
}
}
}
| #Correct imports
| modules/hadoop/src/test/java/org/gridgain/grid/kernal/processors/hadoop/GridHadoopTaskExecutionSelfTest.java | #Correct imports | <ide><path>odules/hadoop/src/test/java/org/gridgain/grid/kernal/processors/hadoop/GridHadoopTaskExecutionSelfTest.java
<ide> import org.gridgain.grid.ggfs.*;
<ide> import org.gridgain.grid.ggfs.hadoop.v1.*;
<ide> import org.gridgain.grid.hadoop.*;
<del>import org.gridgain.grid.util.lang.GridAbsPredicate;
<add>import org.gridgain.grid.util.lang.*;
<ide> import org.gridgain.grid.util.typedef.*;
<del>import org.gridgain.grid.util.typedef.internal.U;
<add>import org.gridgain.grid.util.typedef.internal.*;
<ide> import org.gridgain.testframework.*;
<ide>
<ide> import java.io.*; |
|
Java | apache-2.0 | 680843c155665dd2898169e3b31234126a301262 | 0 | MINDS-i/Dashboard,MINDS-i/Dashboard,MINDS-i/Dashboard | package com.ui;
import com.Dashboard;
import com.serial.*;
import com.Context;
import com.remote.*;
import com.table.TableColumn;
import com.table.ColumnTableModel;
import java.awt.*;
import java.awt.event.*;
import java.awt.geom.*;
import java.awt.FlowLayout;
import java.io.*;
import java.nio.file.*;
import java.util.*;
import java.util.Locale;
import java.util.PropertyResourceBundle;
import java.util.ResourceBundle;
import javax.swing.*;
import javax.swing.border.*;
import javax.swing.event.*;
import javax.swing.table.*;
import javax.swing.text.*;
public class DataWindow implements ActionListener{
public static final long PERIOD = 200; //update period in MS
private static final int WINDOW_X = 300;
private static final int WINDOW_Y = 560;
private static final Dimension telemBoxPref = new Dimension(300, 140);
private static final Dimension telemBoxMax = new Dimension(Integer.MAX_VALUE, 140);
private static final Dimension settingBoxPref = new Dimension(300, 300);
private static final Dimension settingBoxMax = new Dimension(Integer.MAX_VALUE, 300);
private static final Dimension descriptionMin = new Dimension(300, 80);
private static final Dimension descriptionPref= new Dimension(300, 200);
private JTable telTable, setTable;
private ColumnTableModel setModel;
private ColumnTableModel telModel;
private Context context;
private java.util.Timer update;
private JPanel logPanel;
private JTextField logInput;
private JTextComponent descriptionBox;
public DataWindow(Context cxt){
context = cxt;
JFrame frame = new JFrame("Telemetry");
frame.setDefaultCloseOperation(JFrame.DISPOSE_ON_CLOSE);
frame.setSize(WINDOW_X,WINDOW_Y);
JPanel panel = new JPanel();
panel.setLayout(new BoxLayout(panel, BoxLayout.PAGE_AXIS));
frame.addWindowListener(new java.awt.event.WindowAdapter() {
@Override
public void windowClosing(java.awt.event.WindowEvent windowEvent) {
System.out.println("Data window closed");
onClose();
}
});
final SettingList settingList = context.settingList;
ArrayList<TableColumn> telem = new ArrayList<TableColumn>();
telem.add( new TableColumn(){
public String getName(){ return "name"; }
public Object getValueAt(int row){
return context.getTelemetryName(row);
}
public int getRowCount(){ return 256; }
public Class getDataClass(){ return String.class; }
public boolean isRowEditable(int row){ return false; }
public void setValueAt(Object val, int row){ ; }
});
telem.add( new TableColumn(){
public String getName(){ return "Value"; }
public Object getValueAt(int row) { return " "+context.getTelemetry(row); }
public int getRowCount(){ return context.getTelemetryCount(); }
public Class getDataClass(){ return String.class; }
public boolean isRowEditable(int row){ return false; }
public void setValueAt(Object val, int row){
;
}
});
ArrayList<TableColumn> settings = new ArrayList<TableColumn>();
settings.add( new TableColumn(){
public String getName(){ return "name"; }
public Object getValueAt(int row){
if(row < settingList.size())
return settingList.get(row).getName();
return "#"+row;
}
public int getRowCount(){ return settingList.size(); }
public Class getDataClass(){ return String.class; }
public boolean isRowEditable(int row){ return false; }
public void setValueAt(Object val, int row){ ; }
});
settings.add( new TableColumn(){
public String getName(){ return "Setting"; }
public Object getValueAt(int row) {
float val = settingList.get(row).getVal();
return " "+val;
}
public int getRowCount(){ return settingList.size(); }
public Class getDataClass(){ return String.class; }
public boolean isRowEditable(int row){ return true; }
public void setValueAt(Object val, int row){
if(val.getClass()==Float.class){
settingList.pushSetting(row,(Float)val);
System.out.println("Setting New Value "+(Float)val);
} else if(val.getClass()==String.class){
try{
Float newVal = Float.valueOf((String)val);
if(settingList.get(row).outsideOfBounds(newVal)){
JFrame mf = new JFrame("Warning");
JOptionPane.showMessageDialog(mf, "Caution: new value is outside of logical bounds");
}
settingList.pushSetting(row,newVal);
} catch(Exception e) {
System.out.println("Bad new value");
}
}
}
});
//JTable telTable, setTable;
JScrollPane telScroll, setScroll;
telModel = new ColumnTableModel(telem);
telTable = new JTable(telModel);
telScroll = new JScrollPane(telTable);
setModel = new ColumnTableModel(settings);
setTable = new JTable(setModel);
setScroll = new JScrollPane(setTable);
telTable.setAutoResizeMode(JTable.AUTO_RESIZE_ALL_COLUMNS);
telTable.setFillsViewportHeight(true);
setTable.setAutoResizeMode(JTable.AUTO_RESIZE_ALL_COLUMNS);
setTable.setFillsViewportHeight(true);
telScroll.setMaximumSize( telemBoxMax);
telScroll.setPreferredSize(telemBoxPref);
telScroll.setMinimumSize( telemBoxPref);
setScroll.setMaximumSize( settingBoxMax);
setScroll.setPreferredSize(settingBoxPref);
setScroll.setMinimumSize( settingBoxPref);
Border tableBorders = BorderFactory.createCompoundBorder(
BorderFactory.createEmptyBorder(5, 5, 5, 5),
BorderFactory.createLineBorder(Color.BLACK) );
setScroll.setBorder(tableBorders);
telScroll.setBorder(tableBorders);
javax.swing.table.TableColumn col;
col = telTable.getColumn(telem.get(1).getName());
col.setPreferredWidth(1);
col = setTable.getColumn(settings.get(1).getName());
col.setPreferredWidth(1);
setTable.getSelectionModel().addListSelectionListener(new ListSelectionListener(){
public void valueChanged(ListSelectionEvent event) {
setDetail(setTable.getSelectedRow());
}
});
JTextPane dBox = new JTextPane();
dBox.setBorder(BorderFactory.createLineBorder(Color.gray));
dBox.setContentType("text/html");
dBox.setMinimumSize(descriptionMin);
dBox.setPreferredSize(descriptionPref);
//dBox.setBorder(tableBorders);
dBox.setOpaque(false);
descriptionBox = dBox;
constructLogPane();
panel.add(logPanel);
panel.add(telScroll);
panel.add(setScroll);
panel.add(descriptionBox);
panel.add(Box.createVerticalGlue());
frame.add(panel);
frame.pack();
frame.setVisible(true);
startUpdateTimer();
}
private void onClose(){
if(update != null) update.cancel();
}
private void constructLogPane(){
logPanel = new JPanel();
logPanel.setLayout(new FlowLayout());
JLabel label = new JLabel("Set logging period (ms)");
logInput = new JTextField();
logInput.addActionListener(this);
logInput.setText(Integer.toString(context.telemetry.getLogPeriod()));
logInput.setColumns(8);
logPanel.add(label);
logPanel.add(logInput);
}
private void setDetail(int row){
StringBuilder detail = new StringBuilder();
if(row >= 0 && row < context.settingList.size()){
Setting set = context.settingList.get(row);
detail.append("min: ");
detail.append(set.getMin());
detail.append(" max: ");
detail.append(set.getMax());
detail.append(" default: ");
detail.append(set.getDefault());
detail.append("<br><hr>");
detail.append(set.getDescription());
}
if(descriptionBox != null) descriptionBox.setText(detail.toString());
}
private void startUpdateTimer(){
update = new java.util.Timer();
update.scheduleAtFixedRate(new TimerTask(){
public void run(){
if(telModel == null) return;
if(setModel == null) return;
if(context.connected){
telModel.fireTableRowsUpdated(0, Serial.MAX_TELEMETRY);
setModel.fireTableRowsUpdated(0, Serial.MAX_SETTINGS);
telTable.invalidate();
setTable.invalidate();
}
}
}, PERIOD, PERIOD);
}
public void actionPerformed(ActionEvent evt) {
if(logInput == null) return;
String inputText = logInput.getText();
int input;
try {
input = Integer.parseInt(inputText);
logInput.setText(Integer.toString(input));
context.telemetry.setLogPeriod(input);
} catch (NumberFormatException e) {
logInput.setText(Integer.toString(context.telemetry.getLogPeriod()));
}
}
}
| src/ui/DataWindow.java | package com.ui;
import com.Dashboard;
import com.serial.*;
import com.Context;
import com.remote.*;
import com.table.TableColumn;
import com.table.ColumnTableModel;
import java.awt.*;
import java.awt.event.*;
import java.awt.geom.*;
import java.awt.FlowLayout;
import java.io.*;
import java.nio.file.*;
import java.util.*;
import java.util.Locale;
import java.util.PropertyResourceBundle;
import java.util.ResourceBundle;
import javax.swing.*;
import javax.swing.border.*;
import javax.swing.event.*;
import javax.swing.table.*;
import javax.swing.text.*;
public class DataWindow implements ActionListener{
public static final long PERIOD = 200; //update period in MS
private static final int WINDOW_X = 300;
private static final int WINDOW_Y = 560;
private static final Dimension telemBoxPref = new Dimension(300, 140);
private static final Dimension telemBoxMax = new Dimension(Integer.MAX_VALUE, 140);
private static final Dimension settingBoxPref = new Dimension(300, 300);
private static final Dimension settingBoxMax = new Dimension(Integer.MAX_VALUE, 300);
private static final Dimension descriptionMin = new Dimension(300, 80);
private static final Dimension descriptionPref= new Dimension(300, 200);
private JTable telTable, setTable;
private ColumnTableModel setModel;
private ColumnTableModel telModel;
private Context context;
private java.util.Timer update;
private JPanel logPanel;
private JTextField logInput;
private JTextComponent descriptionBox;
public DataWindow(Context cxt){
context = cxt;
JFrame frame = new JFrame("Telemetry");
frame.setDefaultCloseOperation(JFrame.DISPOSE_ON_CLOSE);
frame.setSize(WINDOW_X,WINDOW_Y);
JPanel panel = new JPanel();
panel.setLayout(new BoxLayout(panel, BoxLayout.PAGE_AXIS));
frame.addWindowListener(new java.awt.event.WindowAdapter() {
@Override
public void windowClosing(java.awt.event.WindowEvent windowEvent) {
System.out.println("Data window closed");
onClose();
}
});
final SettingList settingList = context.settingList;
ArrayList<TableColumn> telem = new ArrayList<TableColumn>();
telem.add( new TableColumn(){
public String getName(){ return "name"; }
public Object getValueAt(int row){
return context.getTelemetryName(row);
}
public int getRowCount(){ return 256; }
public Class getDataClass(){ return String.class; }
public boolean isRowEditable(int row){ return false; }
public void setValueAt(Object val, int row){ ; }
});
telem.add( new TableColumn(){
public String getName(){ return "Value"; }
public Object getValueAt(int row) { return " "+context.getTelemetry(row); }
public int getRowCount(){ return context.getTelemetryCount(); }
public Class getDataClass(){ return String.class; }
public boolean isRowEditable(int row){ return false; }
public void setValueAt(Object val, int row){
;
}
});
ArrayList<TableColumn> settings = new ArrayList<TableColumn>();
settings.add( new TableColumn(){
public String getName(){ return "name"; }
public Object getValueAt(int row){
if(row < settingList.size())
return settingList.get(row).getName();
return "#"+row;
}
public int getRowCount(){ return settingList.size(); }
public Class getDataClass(){ return String.class; }
public boolean isRowEditable(int row){ return false; }
public void setValueAt(Object val, int row){ ; }
});
settings.add( new TableColumn(){
public String getName(){ return "Setting"; }
public Object getValueAt(int row) {
float val = settingList.get(row).getVal();
return " "+val;
}
public int getRowCount(){ return settingList.size(); }
public Class getDataClass(){ return String.class; }
public boolean isRowEditable(int row){ return true; }
public void setValueAt(Object val, int row){
if(val.getClass()==Float.class){
settingList.pushSetting(row,(Float)val);
System.out.println("Setting New Value "+(Float)val);
} else if(val.getClass()==String.class){
try{
Float newVal = new Float((String)val);
if(settingList.get(row).outsideOfBounds(newVal)){
JFrame mf = new JFrame("Warning");
JOptionPane.showMessageDialog(mf, "Caution: new value is outside of logical bounds");
}
settingList.pushSetting(row,newVal);
} catch(Exception e) {
System.out.println("Bad new value");
}
}
}
});
//JTable telTable, setTable;
JScrollPane telScroll, setScroll;
telModel = new ColumnTableModel(telem);
telTable = new JTable(telModel);
telScroll = new JScrollPane(telTable);
setModel = new ColumnTableModel(settings);
setTable = new JTable(setModel);
setScroll = new JScrollPane(setTable);
telTable.setAutoResizeMode(JTable.AUTO_RESIZE_ALL_COLUMNS);
telTable.setFillsViewportHeight(true);
setTable.setAutoResizeMode(JTable.AUTO_RESIZE_ALL_COLUMNS);
setTable.setFillsViewportHeight(true);
telScroll.setMaximumSize( telemBoxMax);
telScroll.setPreferredSize(telemBoxPref);
telScroll.setMinimumSize( telemBoxPref);
setScroll.setMaximumSize( settingBoxMax);
setScroll.setPreferredSize(settingBoxPref);
setScroll.setMinimumSize( settingBoxPref);
Border tableBorders = BorderFactory.createCompoundBorder(
BorderFactory.createEmptyBorder(5, 5, 5, 5),
BorderFactory.createLineBorder(Color.BLACK) );
setScroll.setBorder(tableBorders);
telScroll.setBorder(tableBorders);
javax.swing.table.TableColumn col;
col = telTable.getColumn(telem.get(1).getName());
col.setPreferredWidth(1);
col = setTable.getColumn(settings.get(1).getName());
col.setPreferredWidth(1);
setTable.getSelectionModel().addListSelectionListener(new ListSelectionListener(){
public void valueChanged(ListSelectionEvent event) {
setDetail(setTable.getSelectedRow());
}
});
JTextPane dBox = new JTextPane();
dBox.setBorder(BorderFactory.createLineBorder(Color.gray));
dBox.setContentType("text/html");
dBox.setMinimumSize(descriptionMin);
dBox.setPreferredSize(descriptionPref);
//dBox.setBorder(tableBorders);
dBox.setOpaque(false);
descriptionBox = dBox;
constructLogPane();
panel.add(logPanel);
panel.add(telScroll);
panel.add(setScroll);
panel.add(descriptionBox);
panel.add(Box.createVerticalGlue());
frame.add(panel);
frame.pack();
frame.setVisible(true);
startUpdateTimer();
}
private void onClose(){
if(update != null) update.cancel();
}
private void constructLogPane(){
logPanel = new JPanel();
logPanel.setLayout(new FlowLayout());
JLabel label = new JLabel("Set logging period (ms)");
logInput = new JTextField();
logInput.addActionListener(this);
logInput.setText(Integer.toString(context.telemetry.getLogPeriod()));
logInput.setColumns(8);
logPanel.add(label);
logPanel.add(logInput);
}
private void setDetail(int row){
StringBuilder detail = new StringBuilder();
if(row >= 0 && row < context.settingList.size()){
Setting set = context.settingList.get(row);
detail.append("min: ");
detail.append(set.getMin());
detail.append(" max: ");
detail.append(set.getMax());
detail.append(" default: ");
detail.append(set.getDefault());
detail.append("<br><hr>");
detail.append(set.getDescription());
}
if(descriptionBox != null) descriptionBox.setText(detail.toString());
}
private void startUpdateTimer(){
update = new java.util.Timer();
update.scheduleAtFixedRate(new TimerTask(){
public void run(){
if(telModel == null) return;
if(setModel == null) return;
if(context.connected){
telModel.fireTableRowsUpdated(0, Serial.MAX_TELEMETRY);
setModel.fireTableRowsUpdated(0, Serial.MAX_SETTINGS);
telTable.invalidate();
setTable.invalidate();
}
}
}, PERIOD, PERIOD);
}
public void actionPerformed(ActionEvent evt) {
if(logInput == null) return;
String inputText = logInput.getText();
int input;
try {
input = Integer.parseInt(inputText);
logInput.setText(Integer.toString(input));
context.telemetry.setLogPeriod(input);
} catch (NumberFormatException e) {
logInput.setText(Integer.toString(context.telemetry.getLogPeriod()));
}
}
}
| Use more effecient Float.ValueOf method
| src/ui/DataWindow.java | Use more effecient Float.ValueOf method | <ide><path>rc/ui/DataWindow.java
<ide> System.out.println("Setting New Value "+(Float)val);
<ide> } else if(val.getClass()==String.class){
<ide> try{
<del> Float newVal = new Float((String)val);
<add> Float newVal = Float.valueOf((String)val);
<ide> if(settingList.get(row).outsideOfBounds(newVal)){
<ide> JFrame mf = new JFrame("Warning");
<ide> JOptionPane.showMessageDialog(mf, "Caution: new value is outside of logical bounds"); |
|
JavaScript | mit | 2ac82c86545fb36bad7023321d9297688538cb5e | 0 | Dragnipur/tiny-angular-wordcloud,Dragnipur/tiny-angular-wordcloud,MrHertal/tiny-angular-wordcloud,MrHertal/tiny-angular-wordcloud | angular.module("tangcloud",[]).directive("tangCloud",["$interpolate","$compile","$timeout",function(a,b,c){var d={restrict:"E",scope:{words:"=",onClick:"&",spin:"="},template:function(a,b){var c=angular.isDefined(b.onClick),d=c?'ng-click="onClick({word : entry.word, id : entry.id})"':"";return"<div class='tangcloud'><span ng-repeat='entry in words'"+d+">{{entry.word}}</span></div>"},compile:function(b){return b.children().children().addClass("tangcloud-item-"+a.startSymbol()+"entry.size"+a.endSymbol()).addClass("tangcloud-item-hidden"),function(a,b){function d(a){for(var b=a.length-1;b>0;b--){var c=Math.floor(Math.random()*(b+1)),d=a[b];a[b]=a[c],a[c]=d}return a}function e(){c(function(){for(var a=b.children().eq(0).children(),c=a.length,d=0;c>d;d++)f(a.eq(d))})}function f(a){for(var b=parseInt(window.getComputedStyle(a[0]).lineHeight,10),c=a[0].offsetWidth,d=g(c,b),e=0;i(d)&&50>p;)d=h(d,e),e+=1;50>p&&(q.push(d),l(a,d.startX,d.startY)),p=0}function g(a,b){return{width:a,height:b,startX:n-a/2,startY:o-b/2,endX:n+a/2,endY:o+b/2}}function h(a,b){var c=.1*b;return a.startX=n+1.5*c*Math.cos(c)-a.width/2,a.startY=o+c*Math.sin(c)-a.height/2,a.endX=a.startX+a.width,a.endY=a.startY+a.height,a}function i(b){for(var c={left:n-a.width/2,right:n+a.width/2,bottom:o-a.height/2,top:o+a.height/2},d=0;d<q.length;d++)if(j(b,c)||k(b,q[d]))return!0;return!1}function j(a,b){return a.startX<b.left||a.endX>b.right||a.startY<b.bottom||a.endY>b.top?(p++,!0):!1}function k(a,b){return a.startX>b.endX||a.endX<b.startX?!1:!(a.startY>b.endY||a.endY<b.startY)}function l(a,b,c){var d="position: absolute; left:"+b+"px; top: "+c+"px;";a.attr("style",d),a.removeClass("tangcloud-item-hidden")}var m=b.children().eq(0)[0];a.width=m.offsetWidth,a.height=m.offsetHeight;var n=a.width/2,o=a.height/2,p=0,q=[];a.words&&(a.words=d(a.words),e())}}};return d}]); | dist/tangCloud.min.js | angular.module("tangcloud",[]).directive("tangCloud",["$interpolate","$compile","$timeout",function(a,b,c){var d={restrict:"E",scope:{width:"=",height:"=",words:"=",onClick:"&",spin:"="},template:function(a,b){var c=angular.isDefined(b.onClick),d=c?'ng-click="onClick({word : entry.word, id : entry.id})"':"";return"<div class='tangcloud'><span ng-repeat='entry in words'"+d+">{{entry.word}}</span></div>"},compile:function(b){return b.children().children().addClass("tangcloud-item-"+a.startSymbol()+"entry.size"+a.endSymbol()).addClass("tangcloud-item-hidden"),function(a,b){function d(a){for(var b=a.length-1;b>0;b--){var c=Math.floor(Math.random()*(b+1)),d=a[b];a[b]=a[c],a[c]=d}return a}function e(){c(function(){for(var a=b.children().eq(0).children(),c=a.length,d=0;c>d;d++)f(a.eq(d))})}function f(a){for(var b=parseInt(window.getComputedStyle(a[0]).lineHeight,10),c=a[0].offsetWidth,d=g(c,b),e=0;i(d)&&50>o;)d=h(d,e),e+=1;50>o&&(p.push(d),l(a,d.startX,d.startY)),o=0}function g(a,b){return{width:a,height:b,startX:m-a/2,startY:n-b/2,endX:m+a/2,endY:n+b/2}}function h(a,b){var c=.1*b;return a.startX=m+1.5*c*Math.cos(c)-a.width/2,a.startY=n+c*Math.sin(c)-a.height/2,a.endX=a.startX+a.width,a.endY=a.startY+a.height,a}function i(b){for(var c={left:m-a.width/2,right:m+a.width/2,bottom:n-a.height/2,top:n+a.height/2},d=0;d<p.length;d++)if(j(b,c)||k(b,p[d]))return!0;return!1}function j(a,b){return a.startX<b.left||a.endX>b.right||a.startY<b.bottom||a.endY>b.top?(o++,!0):!1}function k(a,b){return a.startX>b.endX||a.endX<b.startX?!1:!(a.startY>b.endY||a.endY<b.startY)}function l(a,b,c){var d="position: absolute; left:"+b+"px; top: "+c+"px;";a.attr("style",d),a.removeClass("tangcloud-item-hidden")}var m=a.width/2,n=a.height/2,o=0,p=[];a.words&&(a.words=d(a.words),e())}}};return d}]); | minified
| dist/tangCloud.min.js | minified | <ide><path>ist/tangCloud.min.js
<del>angular.module("tangcloud",[]).directive("tangCloud",["$interpolate","$compile","$timeout",function(a,b,c){var d={restrict:"E",scope:{width:"=",height:"=",words:"=",onClick:"&",spin:"="},template:function(a,b){var c=angular.isDefined(b.onClick),d=c?'ng-click="onClick({word : entry.word, id : entry.id})"':"";return"<div class='tangcloud'><span ng-repeat='entry in words'"+d+">{{entry.word}}</span></div>"},compile:function(b){return b.children().children().addClass("tangcloud-item-"+a.startSymbol()+"entry.size"+a.endSymbol()).addClass("tangcloud-item-hidden"),function(a,b){function d(a){for(var b=a.length-1;b>0;b--){var c=Math.floor(Math.random()*(b+1)),d=a[b];a[b]=a[c],a[c]=d}return a}function e(){c(function(){for(var a=b.children().eq(0).children(),c=a.length,d=0;c>d;d++)f(a.eq(d))})}function f(a){for(var b=parseInt(window.getComputedStyle(a[0]).lineHeight,10),c=a[0].offsetWidth,d=g(c,b),e=0;i(d)&&50>o;)d=h(d,e),e+=1;50>o&&(p.push(d),l(a,d.startX,d.startY)),o=0}function g(a,b){return{width:a,height:b,startX:m-a/2,startY:n-b/2,endX:m+a/2,endY:n+b/2}}function h(a,b){var c=.1*b;return a.startX=m+1.5*c*Math.cos(c)-a.width/2,a.startY=n+c*Math.sin(c)-a.height/2,a.endX=a.startX+a.width,a.endY=a.startY+a.height,a}function i(b){for(var c={left:m-a.width/2,right:m+a.width/2,bottom:n-a.height/2,top:n+a.height/2},d=0;d<p.length;d++)if(j(b,c)||k(b,p[d]))return!0;return!1}function j(a,b){return a.startX<b.left||a.endX>b.right||a.startY<b.bottom||a.endY>b.top?(o++,!0):!1}function k(a,b){return a.startX>b.endX||a.endX<b.startX?!1:!(a.startY>b.endY||a.endY<b.startY)}function l(a,b,c){var d="position: absolute; left:"+b+"px; top: "+c+"px;";a.attr("style",d),a.removeClass("tangcloud-item-hidden")}var m=a.width/2,n=a.height/2,o=0,p=[];a.words&&(a.words=d(a.words),e())}}};return d}]);
<add>angular.module("tangcloud",[]).directive("tangCloud",["$interpolate","$compile","$timeout",function(a,b,c){var d={restrict:"E",scope:{words:"=",onClick:"&",spin:"="},template:function(a,b){var c=angular.isDefined(b.onClick),d=c?'ng-click="onClick({word : entry.word, id : entry.id})"':"";return"<div class='tangcloud'><span ng-repeat='entry in words'"+d+">{{entry.word}}</span></div>"},compile:function(b){return b.children().children().addClass("tangcloud-item-"+a.startSymbol()+"entry.size"+a.endSymbol()).addClass("tangcloud-item-hidden"),function(a,b){function d(a){for(var b=a.length-1;b>0;b--){var c=Math.floor(Math.random()*(b+1)),d=a[b];a[b]=a[c],a[c]=d}return a}function e(){c(function(){for(var a=b.children().eq(0).children(),c=a.length,d=0;c>d;d++)f(a.eq(d))})}function f(a){for(var b=parseInt(window.getComputedStyle(a[0]).lineHeight,10),c=a[0].offsetWidth,d=g(c,b),e=0;i(d)&&50>p;)d=h(d,e),e+=1;50>p&&(q.push(d),l(a,d.startX,d.startY)),p=0}function g(a,b){return{width:a,height:b,startX:n-a/2,startY:o-b/2,endX:n+a/2,endY:o+b/2}}function h(a,b){var c=.1*b;return a.startX=n+1.5*c*Math.cos(c)-a.width/2,a.startY=o+c*Math.sin(c)-a.height/2,a.endX=a.startX+a.width,a.endY=a.startY+a.height,a}function i(b){for(var c={left:n-a.width/2,right:n+a.width/2,bottom:o-a.height/2,top:o+a.height/2},d=0;d<q.length;d++)if(j(b,c)||k(b,q[d]))return!0;return!1}function j(a,b){return a.startX<b.left||a.endX>b.right||a.startY<b.bottom||a.endY>b.top?(p++,!0):!1}function k(a,b){return a.startX>b.endX||a.endX<b.startX?!1:!(a.startY>b.endY||a.endY<b.startY)}function l(a,b,c){var d="position: absolute; left:"+b+"px; top: "+c+"px;";a.attr("style",d),a.removeClass("tangcloud-item-hidden")}var m=b.children().eq(0)[0];a.width=m.offsetWidth,a.height=m.offsetHeight;var n=a.width/2,o=a.height/2,p=0,q=[];a.words&&(a.words=d(a.words),e())}}};return d}]); |
|
Java | apache-2.0 | df0e759102338e76c6b5a8f2f6cd83d7567b1381 | 0 | Spaceghost/OrientDB,Spaceghost/OrientDB,Spaceghost/OrientDB | /*
* Copyright 1999-2010 Luca Garulli (l.garulli--at--orientechnologies.com)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.orientechnologies.orient.core.db.raw;
import java.util.ArrayList;
import java.util.Collection;
import java.util.HashMap;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
import com.orientechnologies.common.log.OLogManager;
import com.orientechnologies.orient.core.Orient;
import com.orientechnologies.orient.core.cache.OCacheRecord;
import com.orientechnologies.orient.core.config.OGlobalConfiguration;
import com.orientechnologies.orient.core.db.ODatabase;
import com.orientechnologies.orient.core.db.ODatabaseLifecycleListener;
import com.orientechnologies.orient.core.db.ODatabaseListener;
import com.orientechnologies.orient.core.db.record.ODatabaseRecord;
import com.orientechnologies.orient.core.exception.ODatabaseException;
import com.orientechnologies.orient.core.exception.ORecordNotFoundException;
import com.orientechnologies.orient.core.id.ORecordId;
import com.orientechnologies.orient.core.intent.OIntent;
import com.orientechnologies.orient.core.storage.ORawBuffer;
import com.orientechnologies.orient.core.storage.OStorage;
/**
* Lower level ODatabase implementation. It's extended or wrapped by all the others.
*
* @author Luca Garulli (l.garulli--at--orientechnologies.com)
*
*/
@SuppressWarnings("unchecked")
public class ODatabaseRaw implements ODatabase {
private static volatile int serialId = 0;
protected int id;
protected String url;
protected OStorage storage;
protected STATUS status;
protected OIntent currentIntent;
private ODatabaseRecord<?> databaseOwner;
private boolean useCache;
private Map<String, Object> properties = new HashMap<String, Object>();
private List<ODatabaseListener> listeners = new ArrayList<ODatabaseListener>();
public enum STATUS {
OPEN, CLOSED
}
public ODatabaseRaw(final String iURL) {
try {
url = iURL;
id = serialId++;
status = STATUS.CLOSED;
// SET DEFAULT PROPERTIES
setProperty("fetch-max", 50);
useCache = OGlobalConfiguration.DB_USE_CACHE.getValueAsBoolean();
} catch (Throwable t) {
throw new ODatabaseException("Error on opening database '" + iURL + "'", t);
}
}
public <DB extends ODatabase> DB open(final String iUserName, final String iUserPassword) {
try {
if (status == STATUS.OPEN)
throw new IllegalStateException("Database " + getName() + " is already open");
if (storage == null)
storage = Orient.instance().loadStorage(url);
storage.open(getId(), iUserName, iUserPassword);
// WAKE UP DB LIFECYCLE LISTENER
for (ODatabaseLifecycleListener it : Orient.instance().getDbLifecycleListeners())
it.onOpen(getDatabaseOwner());
// WAKE UP LISTENERS
for (ODatabaseListener listener : listeners)
try {
listener.onOpen(this);
} catch (Throwable t) {
}
status = STATUS.OPEN;
} catch (ODatabaseException e) {
throw e;
} catch (Exception e) {
throw new ODatabaseException("Can't open database", e);
}
return (DB) this;
}
public <DB extends ODatabase> DB create() {
try {
if (status == STATUS.OPEN)
throw new IllegalStateException("Database " + getName() + " is already open");
if (storage == null)
storage = Orient.instance().loadStorage(url);
storage.create();
// WAKE UP DB LIFECYCLE LISTENER
for (ODatabaseLifecycleListener it : Orient.instance().getDbLifecycleListeners())
it.onOpen(getDatabaseOwner());
// WAKE UP LISTENERS
for (ODatabaseListener listener : listeners)
try {
listener.onCreate(this);
} catch (Throwable t) {
}
status = STATUS.OPEN;
} catch (Exception e) {
throw new ODatabaseException("Can't create database", e);
}
return (DB) this;
}
public void delete() {
close(false);
try {
if (storage == null)
storage = Orient.instance().loadStorage(url);
storage.delete();
// WAKE UP LISTENERS
for (ODatabaseListener listener : listeners)
try {
listener.onDelete(this);
} catch (Throwable t) {
}
status = STATUS.CLOSED;
} catch (Exception e) {
throw new ODatabaseException("Can't delete database", e);
}
}
public boolean exists() {
if (status == STATUS.OPEN)
return true;
if (storage == null)
storage = Orient.instance().loadStorage(url);
return storage.exists();
}
public long countClusterElements(final String iClusterName) {
return storage.count(getClusterIdByName(iClusterName));
}
public long countClusterElements(final int iClusterId) {
return storage.count(iClusterId);
}
public long countClusterElements(final int[] iClusterIds) {
return storage.count(iClusterIds);
}
public ORawBuffer read(final int iClusterId, final long iPosition, final String iFetchPlan) {
if (iClusterId < 0 || iPosition < 0)
return null;
try {
final String recId = ORecordId.generateString(iClusterId, iPosition);
// SEARCH IT IN CACHE
ORawBuffer result;
if (useCache) {
// FIND IN CACHE
result = getCache().popRecord(recId);
if (result != null)
// FOUND: JUST RETURN IT
return result;
}
result = storage.readRecord(databaseOwner, id, iClusterId, iPosition, iFetchPlan);
if (useCache)
// ADD THE RECORD TO THE LOCAL CACHE
getCache().pushRecord(recId, result);
return result;
} catch (Throwable t) {
throw new ODatabaseException("Error on retrieving record #" + iPosition + " in cluster '"
+ storage.getPhysicalClusterNameById(iClusterId) + "'", t);
}
}
public long save(final int iClusterId, long iPosition, final byte[] iContent, final int iVersion, final byte iRecordType) {
try {
if (iPosition < 0) {
// CREATE
iPosition = storage.createRecord(iClusterId, iContent, iRecordType);
if (useCache)
// ADD/UPDATE IT IN CACHE
getCache().pushRecord(ORecordId.generateString(iClusterId, iPosition), new ORawBuffer(iContent, 0, iRecordType));
return iPosition;
} else {
// UPDATE
int newVersion = storage.updateRecord(id, iClusterId, iPosition, iContent, iVersion, iRecordType);
if (useCache)
// ADD/UPDATE IT IN CACHE
getCache().pushRecord(ORecordId.generateString(iClusterId, iPosition), new ORawBuffer(iContent, newVersion, iRecordType));
return newVersion;
}
} catch (Throwable t) {
throw new ODatabaseException("Error on saving record in cluster id: " + iClusterId + ", position: " + iPosition, t);
}
}
public void delete(final String iClusterName, final long iPosition, final int iVersion) {
delete(getClusterIdByName(iClusterName), iPosition, iVersion);
}
public void delete(final int iClusterId, final long iPosition, final int iVersion) {
try {
if (!storage.deleteRecord(id, iClusterId, iPosition, iVersion))
throw new ORecordNotFoundException("The record with id '" + iClusterId + ":" + iPosition + "' was not found");
// DELETE IT ALSO IN CACHE
if (useCache)
getCache().removeRecord(ORecordId.generateString(iClusterId, iPosition));
} catch (Exception e) {
OLogManager.instance().exception("Error on deleting record #%d in cluster '%s'", e, ODatabaseException.class, iPosition,
storage.getPhysicalClusterNameById(iClusterId));
}
}
public OStorage getStorage() {
return storage;
}
public boolean isClosed() {
return status == STATUS.CLOSED;
}
public String getName() {
return storage != null ? storage.getName() : "<no-name>";
}
public String getURL() {
return storage != null ? storage.getURL() : "<no-url>";
}
@Override
public void finalize() {
close();
}
public void close() {
close(true);
}
public int getId() {
return id;
}
public String getClusterType(final String iClusterName) {
return storage.getClusterTypeByName(iClusterName);
}
public int getClusterIdByName(final String iClusterName) {
return storage.getClusterIdByName(iClusterName);
}
public String getClusterNameById(final int iClusterId) {
if (iClusterId == -1)
return null;
// PHIYSICAL CLUSTER
return storage.getPhysicalClusterNameById(iClusterId);
}
public int addLogicalCluster(final String iClusterName, final int iPhyClusterContainerId) {
return storage.addCluster(iClusterName, OStorage.CLUSTER_TYPE.LOGICAL, iPhyClusterContainerId);
}
public int addPhysicalCluster(final String iClusterName, final String iClusterFileName, final int iStartSize) {
return storage.addCluster(iClusterName, OStorage.CLUSTER_TYPE.PHYSICAL, iClusterFileName, iStartSize);
}
public int addDataSegment(final String iSegmentName, final String iSegmentFileName) {
return storage.addDataSegment(iSegmentName, iSegmentFileName);
}
public Collection<String> getClusterNames() {
return storage.getClusterNames();
}
public OCacheRecord getCache() {
return storage.getCache();
}
public int getDefaultClusterId() {
return storage.getDefaultClusterId();
}
public void declareIntent(final OIntent iIntent, final Object... iParams) {
if (currentIntent != null)
// END CURRENT INTENT
currentIntent.end(this);
currentIntent = iIntent;
if (iIntent != null)
iIntent.begin(this, iParams);
}
public ODatabaseRecord<?> getDatabaseOwner() {
return databaseOwner;
}
public ODatabaseRaw setOwner(final ODatabaseRecord<?> iOwner) {
databaseOwner = iOwner;
return this;
}
public boolean isUseCache() {
return useCache;
}
public void setUseCache(boolean useCache) {
this.useCache = useCache;
}
public Object setProperty(final String iName, final Object iValue) {
return properties.put(iName, iValue);
}
public Object getProperty(final String iName) {
return properties.get(iName);
}
public Iterator<Entry<String, Object>> getProperties() {
return properties.entrySet().iterator();
}
public void registerListener(final ODatabaseListener iListener) {
listeners.add(iListener);
}
public List<ODatabaseListener> getListeners() {
return listeners;
}
protected void close(boolean iCloseStorageToo) {
if (status != STATUS.OPEN)
return;
// WAKE UP DB LIFECYCLE LISTENER
for (ODatabaseLifecycleListener it : Orient.instance().getDbLifecycleListeners())
it.onClose(getDatabaseOwner());
// WAKE UP LISTENERS
for (ODatabaseListener listener : listeners)
try {
listener.onClose(this);
} catch (Throwable t) {
}
listeners.clear();
if (storage != null && iCloseStorageToo)
storage.removeUser();
status = STATUS.CLOSED;
}
}
| core/src/main/java/com/orientechnologies/orient/core/db/raw/ODatabaseRaw.java | /*
* Copyright 1999-2010 Luca Garulli (l.garulli--at--orientechnologies.com)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.orientechnologies.orient.core.db.raw;
import java.util.ArrayList;
import java.util.Collection;
import java.util.HashMap;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
import com.orientechnologies.common.log.OLogManager;
import com.orientechnologies.orient.core.Orient;
import com.orientechnologies.orient.core.cache.OCacheRecord;
import com.orientechnologies.orient.core.config.OGlobalConfiguration;
import com.orientechnologies.orient.core.db.ODatabase;
import com.orientechnologies.orient.core.db.ODatabaseLifecycleListener;
import com.orientechnologies.orient.core.db.ODatabaseListener;
import com.orientechnologies.orient.core.db.record.ODatabaseRecord;
import com.orientechnologies.orient.core.exception.ODatabaseException;
import com.orientechnologies.orient.core.exception.ORecordNotFoundException;
import com.orientechnologies.orient.core.id.ORecordId;
import com.orientechnologies.orient.core.intent.OIntent;
import com.orientechnologies.orient.core.storage.ORawBuffer;
import com.orientechnologies.orient.core.storage.OStorage;
/**
* Lower level ODatabase implementation. It's extended or wrapped by all the others.
*
* @author Luca Garulli (l.garulli--at--orientechnologies.com)
*
*/
@SuppressWarnings("unchecked")
public class ODatabaseRaw implements ODatabase {
private static volatile int serialId = 0;
protected int id;
protected String url;
protected OStorage storage;
protected STATUS status;
protected OIntent currentIntent;
private ODatabaseRecord<?> databaseOwner;
private boolean useCache;
private Map<String, Object> properties = new HashMap<String, Object>();
private List<ODatabaseListener> listeners = new ArrayList<ODatabaseListener>();
public enum STATUS {
OPEN, CLOSED
}
public ODatabaseRaw(final String iURL) {
try {
url = iURL;
id = serialId++;
status = STATUS.CLOSED;
// SET DEFAULT PROPERTIES
setProperty("fetch-max", 50);
useCache = OGlobalConfiguration.DB_USE_CACHE.getValueAsBoolean();
} catch (Throwable t) {
throw new ODatabaseException("Error on opening database '" + iURL + "'", t);
}
}
public <DB extends ODatabase> DB open(final String iUserName, final String iUserPassword) {
try {
if (status == STATUS.OPEN)
throw new IllegalStateException("Database " + getName() + " is already open");
storage = Orient.instance().loadStorage(url);
storage.open(getId(), iUserName, iUserPassword);
// WAKE UP DB LIFECYCLE LISTENER
for (ODatabaseLifecycleListener it : Orient.instance().getDbLifecycleListeners())
it.onOpen(getDatabaseOwner());
// WAKE UP LISTENERS
for (ODatabaseListener listener : listeners)
try {
listener.onOpen(this);
} catch (Throwable t) {
}
status = STATUS.OPEN;
} catch (ODatabaseException e) {
throw e;
} catch (Exception e) {
throw new ODatabaseException("Can't open database", e);
}
return (DB) this;
}
public <DB extends ODatabase> DB create() {
try {
if (status == STATUS.OPEN)
throw new IllegalStateException("Database " + getName() + " is already open");
storage = Orient.instance().loadStorage(url);
storage.create();
// WAKE UP DB LIFECYCLE LISTENER
for (ODatabaseLifecycleListener it : Orient.instance().getDbLifecycleListeners())
it.onOpen(getDatabaseOwner());
// WAKE UP LISTENERS
for (ODatabaseListener listener : listeners)
try {
listener.onCreate(this);
} catch (Throwable t) {
}
status = STATUS.OPEN;
} catch (Exception e) {
throw new ODatabaseException("Can't create database", e);
}
return (DB) this;
}
public void delete() {
close(false);
try {
if (storage == null)
storage = Orient.instance().loadStorage(url);
storage.delete();
// WAKE UP LISTENERS
for (ODatabaseListener listener : listeners)
try {
listener.onDelete(this);
} catch (Throwable t) {
}
status = STATUS.CLOSED;
} catch (Exception e) {
throw new ODatabaseException("Can't delete database", e);
}
}
public boolean exists() {
if (status == STATUS.OPEN)
return true;
return storage.exists();
}
public long countClusterElements(final String iClusterName) {
return storage.count(getClusterIdByName(iClusterName));
}
public long countClusterElements(final int iClusterId) {
return storage.count(iClusterId);
}
public long countClusterElements(final int[] iClusterIds) {
return storage.count(iClusterIds);
}
public ORawBuffer read(final int iClusterId, final long iPosition, final String iFetchPlan) {
if (iClusterId < 0 || iPosition < 0)
return null;
try {
final String recId = ORecordId.generateString(iClusterId, iPosition);
// SEARCH IT IN CACHE
ORawBuffer result;
if (useCache) {
// FIND IN CACHE
result = getCache().popRecord(recId);
if (result != null)
// FOUND: JUST RETURN IT
return result;
}
result = storage.readRecord(databaseOwner, id, iClusterId, iPosition, iFetchPlan);
if (useCache)
// ADD THE RECORD TO THE LOCAL CACHE
getCache().pushRecord(recId, result);
return result;
} catch (Throwable t) {
throw new ODatabaseException("Error on retrieving record #" + iPosition + " in cluster '"
+ storage.getPhysicalClusterNameById(iClusterId) + "'", t);
}
}
public long save(final int iClusterId, long iPosition, final byte[] iContent, final int iVersion, final byte iRecordType) {
try {
if (iPosition < 0) {
// CREATE
iPosition = storage.createRecord(iClusterId, iContent, iRecordType);
if (useCache)
// ADD/UPDATE IT IN CACHE
getCache().pushRecord(ORecordId.generateString(iClusterId, iPosition), new ORawBuffer(iContent, 0, iRecordType));
return iPosition;
} else {
// UPDATE
int newVersion = storage.updateRecord(id, iClusterId, iPosition, iContent, iVersion, iRecordType);
if (useCache)
// ADD/UPDATE IT IN CACHE
getCache().pushRecord(ORecordId.generateString(iClusterId, iPosition), new ORawBuffer(iContent, newVersion, iRecordType));
return newVersion;
}
} catch (Throwable t) {
throw new ODatabaseException("Error on saving record in cluster id: " + iClusterId + ", position: " + iPosition, t);
}
}
public void delete(final String iClusterName, final long iPosition, final int iVersion) {
delete(getClusterIdByName(iClusterName), iPosition, iVersion);
}
public void delete(final int iClusterId, final long iPosition, final int iVersion) {
try {
if (!storage.deleteRecord(id, iClusterId, iPosition, iVersion))
throw new ORecordNotFoundException("The record with id '" + iClusterId + ":" + iPosition + "' was not found");
// DELETE IT ALSO IN CACHE
if (useCache)
getCache().removeRecord(ORecordId.generateString(iClusterId, iPosition));
} catch (Exception e) {
OLogManager.instance().exception("Error on deleting record #%d in cluster '%s'", e, ODatabaseException.class, iPosition,
storage.getPhysicalClusterNameById(iClusterId));
}
}
public OStorage getStorage() {
return storage;
}
public boolean isClosed() {
return status == STATUS.CLOSED;
}
public String getName() {
return storage != null ? storage.getName() : "<no-name>";
}
public String getURL() {
return storage != null ? storage.getURL() : "<no-url>";
}
@Override
public void finalize() {
close();
}
public void close() {
close(true);
}
public int getId() {
return id;
}
public String getClusterType(final String iClusterName) {
return storage.getClusterTypeByName(iClusterName);
}
public int getClusterIdByName(final String iClusterName) {
return storage.getClusterIdByName(iClusterName);
}
public String getClusterNameById(final int iClusterId) {
if (iClusterId == -1)
return null;
// PHIYSICAL CLUSTER
return storage.getPhysicalClusterNameById(iClusterId);
}
public int addLogicalCluster(final String iClusterName, final int iPhyClusterContainerId) {
return storage.addCluster(iClusterName, OStorage.CLUSTER_TYPE.LOGICAL, iPhyClusterContainerId);
}
public int addPhysicalCluster(final String iClusterName, final String iClusterFileName, final int iStartSize) {
return storage.addCluster(iClusterName, OStorage.CLUSTER_TYPE.PHYSICAL, iClusterFileName, iStartSize);
}
public int addDataSegment(final String iSegmentName, final String iSegmentFileName) {
return storage.addDataSegment(iSegmentName, iSegmentFileName);
}
public Collection<String> getClusterNames() {
return storage.getClusterNames();
}
public OCacheRecord getCache() {
return storage.getCache();
}
public int getDefaultClusterId() {
return storage.getDefaultClusterId();
}
public void declareIntent(final OIntent iIntent, final Object... iParams) {
if (currentIntent != null)
// END CURRENT INTENT
currentIntent.end(this);
currentIntent = iIntent;
if (iIntent != null)
iIntent.begin(this, iParams);
}
public ODatabaseRecord<?> getDatabaseOwner() {
return databaseOwner;
}
public ODatabaseRaw setOwner(final ODatabaseRecord<?> iOwner) {
databaseOwner = iOwner;
return this;
}
public boolean isUseCache() {
return useCache;
}
public void setUseCache(boolean useCache) {
this.useCache = useCache;
}
public Object setProperty(final String iName, final Object iValue) {
return properties.put(iName, iValue);
}
public Object getProperty(final String iName) {
return properties.get(iName);
}
public Iterator<Entry<String, Object>> getProperties() {
return properties.entrySet().iterator();
}
public void registerListener(final ODatabaseListener iListener) {
listeners.add(iListener);
}
public List<ODatabaseListener> getListeners() {
return listeners;
}
protected void close(boolean iCloseStorageToo) {
if (status != STATUS.OPEN)
return;
// WAKE UP DB LIFECYCLE LISTENER
for (ODatabaseLifecycleListener it : Orient.instance().getDbLifecycleListeners())
it.onClose(getDatabaseOwner());
// WAKE UP LISTENERS
for (ODatabaseListener listener : listeners)
try {
listener.onClose(this);
} catch (Throwable t) {
}
listeners.clear();
if (storage != null && iCloseStorageToo)
storage.removeUser();
status = STATUS.CLOSED;
}
}
| Fixed issue on multiple clients = one socket
| core/src/main/java/com/orientechnologies/orient/core/db/raw/ODatabaseRaw.java | Fixed issue on multiple clients = one socket | <ide><path>ore/src/main/java/com/orientechnologies/orient/core/db/raw/ODatabaseRaw.java
<ide> if (status == STATUS.OPEN)
<ide> throw new IllegalStateException("Database " + getName() + " is already open");
<ide>
<del> storage = Orient.instance().loadStorage(url);
<add> if (storage == null)
<add> storage = Orient.instance().loadStorage(url);
<ide> storage.open(getId(), iUserName, iUserPassword);
<ide>
<ide> // WAKE UP DB LIFECYCLE LISTENER
<ide> if (status == STATUS.OPEN)
<ide> throw new IllegalStateException("Database " + getName() + " is already open");
<ide>
<del> storage = Orient.instance().loadStorage(url);
<add> if (storage == null)
<add> storage = Orient.instance().loadStorage(url);
<ide> storage.create();
<ide>
<ide> // WAKE UP DB LIFECYCLE LISTENER
<ide> public boolean exists() {
<ide> if (status == STATUS.OPEN)
<ide> return true;
<add>
<add> if (storage == null)
<add> storage = Orient.instance().loadStorage(url);
<ide>
<ide> return storage.exists();
<ide> } |
|
Java | apache-2.0 | f3447fb9c976fb801c037a626d2c3d3a4a1c897f | 0 | ChiralBehaviors/Groo | /**
* (C) Copyright 2013 Hal Hildebrand, All Rights Reserved
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.hellblazer.groo;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.Callable;
import java.util.concurrent.CopyOnWriteArraySet;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.Executor;
import java.util.concurrent.ExecutorCompletionService;
import java.util.concurrent.Executors;
import java.util.concurrent.Future;
import javax.management.Attribute;
import javax.management.AttributeList;
import javax.management.AttributeNotFoundException;
import javax.management.InstanceNotFoundException;
import javax.management.IntrospectionException;
import javax.management.InvalidAttributeValueException;
import javax.management.ListenerNotFoundException;
import javax.management.MBeanException;
import javax.management.MBeanInfo;
import javax.management.MBeanRegistration;
import javax.management.MBeanServer;
import javax.management.NotificationFilter;
import javax.management.NotificationListener;
import javax.management.ObjectInstance;
import javax.management.ObjectName;
import javax.management.QueryExp;
import javax.management.ReflectionException;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* @author hhildebrand
*
*/
public class Node implements NodeMBean, MBeanRegistration {
private static final Logger log = LoggerFactory.getLogger(Node.class);
private final Set<NodeMBean> children = new CopyOnWriteArraySet<>();
private final Executor executor;
private final RegistrationFilter filter;
private MBeanServer mbs;
private ObjectName name;
public Node() {
this(null, null);
}
public Node(ObjectName sourcePattern, QueryExp sourceQuery) {
this(sourcePattern, sourceQuery, Executors.newCachedThreadPool());
}
public Node(ObjectName sourcePattern, QueryExp sourceQuery,
Executor executor) {
filter = new RegistrationFilter(sourcePattern, sourceQuery);
this.executor = executor;
}
/**
* @param child
*/
public void addChild(NodeMBean child) {
children.add(child);
}
/**
* @param objectName
* @param listener
* @param filter
* @param handback
* @throws InstanceNotFoundException
* @see javax.management.MBeanServer#addNotificationListener(javax.management.ObjectName,
* javax.management.NotificationListener,
* javax.management.NotificationFilter, java.lang.Object)
*/
@Override
public void addNotificationListener(final ObjectName objectName,
final NotificationListener listener,
final NotificationFilter filter,
final Object handback)
throws InstanceNotFoundException {
ExecutorCompletionService<Boolean> completionService = new ExecutorCompletionService<>(
executor);
TaskGenerator<Boolean> generator = new TaskGenerator<Boolean>() {
@Override
public Callable<Boolean> localTask(final ObjectName objectName) {
return new Callable<Boolean>() {
@Override
public Boolean call() throws Exception {
mbs.addNotificationListener(objectName, listener,
filter, handback);
return true;
}
};
}
@Override
public Callable<Boolean> remoteTask(final NodeMBean child) {
return new Callable<Boolean>() {
@Override
public Boolean call() throws Exception {
child.addNotificationListener(objectName, listener,
filter, handback);
return true;
}
};
}
};
List<Future<Boolean>> futures = forAll(completionService, generator,
objectName);
for (int i = 0; i < futures.size(); i++) {
try {
if (completionService.take().get()) {
for (Future<Boolean> future : futures) {
future.cancel(true);
}
return;
}
} catch (InterruptedException e) {
return; // don't even log this ;)
} catch (ExecutionException e) {
log.debug(String.format("%s experienced exception when adding notification listener on %s for %s",
this, objectName, listener), e);
}
}
throw new InstanceNotFoundException(
String.format("Instance not found: %s",
objectName));
}
/**
* @param objectName
* @param listener
* @param filter
* @param handback
* @throws InstanceNotFoundException
* @see javax.management.MBeanServer#addNotificationListener(javax.management.ObjectName,
* javax.management.ObjectName, javax.management.NotificationFilter,
* java.lang.Object)
*/
@Override
public void addNotificationListener(final ObjectName objectName,
final ObjectName listener,
final NotificationFilter filter,
final Object handback)
throws InstanceNotFoundException {
ExecutorCompletionService<Boolean> completionService = new ExecutorCompletionService<>(
executor);
TaskGenerator<Boolean> generator = new TaskGenerator<Boolean>() {
@Override
public Callable<Boolean> localTask(final ObjectName objectName) {
return new Callable<Boolean>() {
@Override
public Boolean call() throws Exception {
mbs.addNotificationListener(objectName, listener,
filter, handback);
return true;
}
};
}
@Override
public Callable<Boolean> remoteTask(final NodeMBean child) {
return new Callable<Boolean>() {
@Override
public Boolean call() throws Exception {
child.addNotificationListener(objectName, listener,
filter, handback);
return true;
}
};
}
};
List<Future<Boolean>> futures = forAll(completionService, generator,
objectName);
for (int i = 0; i < futures.size(); i++) {
try {
if (completionService.take().get()) {
for (Future<Boolean> future : futures) {
future.cancel(true);
}
return;
}
} catch (InterruptedException e) {
return; // don't even log this ;)
} catch (ExecutionException e) {
log.debug(String.format("%s experienced exception when adding notification listener on %s for %s",
this, objectName, listener), e);
}
}
throw new InstanceNotFoundException(
String.format("Instance not found: %s",
objectName));
}
/* (non-Javadoc)
* @see com.hellblazer.groo.NodeMXBean#addNotificationListener(javax.management.ObjectName, javax.management.QueryExp, javax.management.NotificationListener, javax.management.NotificationFilter, java.lang.Object)
*/
@Override
public void addNotificationListener(final ObjectName pattern,
final QueryExp queryExpr,
final NotificationListener listener,
final NotificationFilter filter,
final Object handback)
throws InstanceNotFoundException {
ExecutorCompletionService<Boolean> completionService = new ExecutorCompletionService<>(
executor);
TaskGenerator<Boolean> generator = new TaskGenerator<Boolean>() {
@Override
public Callable<Boolean> localTask(final ObjectName objectName) {
return new Callable<Boolean>() {
@Override
public Boolean call() throws Exception {
mbs.addNotificationListener(objectName, listener,
filter, handback);
return true;
}
};
}
@Override
public Callable<Boolean> remoteTask(final NodeMBean child) {
return new Callable<Boolean>() {
@Override
public Boolean call() throws Exception {
child.addNotificationListener(pattern, queryExpr,
listener, filter,
handback);
return true;
}
};
}
};
List<Future<Boolean>> futures = forAll(completionService, generator,
pattern, queryExpr);
for (int i = 0; i < futures.size(); i++) {
try {
if (completionService.take().get()) {
for (Future<Boolean> future : futures) {
future.cancel(true);
}
return;
}
} catch (InterruptedException e) {
return;
} catch (ExecutionException e) {
log.debug(String.format("%s experienced exception when adding notification listener %s, %s",
this, pattern, queryExpr), e);
}
}
throw new InstanceNotFoundException(
String.format("Instance not found: %s, %s",
pattern, queryExpr));
}
/* (non-Javadoc)
* @see com.hellblazer.groo.NodeMXBean#addNotificationListener(javax.management.ObjectName, javax.management.QueryExp, javax.management.ObjectName, javax.management.NotificationFilter, java.lang.Object)
*/
@Override
public void addNotificationListener(final ObjectName pattern,
final QueryExp queryExpr,
final ObjectName listener,
final NotificationFilter filter,
final Object handback)
throws InstanceNotFoundException,
IOException {
ExecutorCompletionService<Boolean> completionService = new ExecutorCompletionService<>(
executor);
TaskGenerator<Boolean> generator = new TaskGenerator<Boolean>() {
@Override
public Callable<Boolean> localTask(final ObjectName objectName) {
return new Callable<Boolean>() {
@Override
public Boolean call() throws Exception {
mbs.addNotificationListener(objectName, listener,
filter, handback);
return true;
}
};
}
@Override
public Callable<Boolean> remoteTask(final NodeMBean child) {
return new Callable<Boolean>() {
@Override
public Boolean call() throws Exception {
child.addNotificationListener(pattern, queryExpr,
listener, filter,
handback);
return true;
}
};
}
};
List<Future<Boolean>> futures = forAll(completionService, generator,
pattern, queryExpr);
for (int i = 0; i < futures.size(); i++) {
try {
if (completionService.take().get()) {
for (Future<Boolean> future : futures) {
future.cancel(true);
}
return;
}
} catch (InterruptedException e) {
return;
} catch (ExecutionException e) {
log.debug(String.format("%s experienced exception when adding notification listener %s, %s",
this, pattern, queryExpr), e);
}
}
throw new InstanceNotFoundException(
String.format("Instance not found: %s, %s",
name, queryExpr));
}
/* (non-Javadoc)
* @see java.lang.Object#equals(java.lang.Object)
*/
@Override
public boolean equals(Object obj) {
if (this == obj) {
return true;
}
if (obj == null) {
return false;
}
if (getClass() != obj.getClass()) {
return false;
}
Node other = (Node) obj;
if (name == null) {
if (other.name != null) {
return false;
}
} else if (!name.equals(other.name)) {
return false;
}
return true;
}
/* (non-Javadoc)
* @see com.hellblazer.groo.NodeMXBean#getAttribute(javax.management.ObjectName, javax.management.QueryExp, java.lang.String)
*/
@Override
public Map<ObjectName, Object> getAttribute(final ObjectName pattern,
final QueryExp queryExpr,
final String attribute)
throws MBeanException,
AttributeNotFoundException,
InstanceNotFoundException,
ReflectionException,
IOException {
Map<ObjectName, Object> attributes = new HashMap<>();
ExecutorCompletionService<Map<ObjectName, Object>> completionService = new ExecutorCompletionService<>(
executor);
TaskGenerator<Map<ObjectName, Object>> generator = new TaskGenerator<Map<ObjectName, Object>>() {
@Override
public Callable<Map<ObjectName, Object>> localTask(final ObjectName objectName) {
return new Callable<Map<ObjectName, Object>>() {
@Override
public Map<ObjectName, Object> call() throws Exception {
Map<ObjectName, Object> attributes = new HashMap<>();
attributes.put(objectName,
mbs.getAttribute(objectName, attribute));
return attributes;
}
};
}
@Override
public Callable<Map<ObjectName, Object>> remoteTask(final NodeMBean child) {
return new Callable<Map<ObjectName, Object>>() {
@Override
public Map<ObjectName, Object> call() throws Exception {
return child.getAttribute(pattern, queryExpr, attribute);
}
};
}
};
List<Future<Map<ObjectName, Object>>> futures = forAll(completionService,
generator,
pattern,
queryExpr);
for (int i = 0; i < futures.size(); i++) {
try {
attributes.putAll(completionService.take().get());
} catch (InterruptedException e) {
return Collections.emptyMap();
} catch (ExecutionException e) {
log.debug(String.format("%s experienced exception when collecting attributes %s, %s",
this, pattern, queryExpr), e);
}
}
if (attributes.size() == 0) {
throw new InstanceNotFoundException(
String.format("Instance not found: %s, %s",
pattern,
queryExpr));
}
return attributes;
}
/**
* @param objectName
* @param attribute
* @return
* @throws MBeanException
* @throws AttributeNotFoundException
* @throws InstanceNotFoundException
* @throws ReflectionException
* @see javax.management.MBeanServer#getAttribute(javax.management.ObjectName,
* java.lang.String)
*/
@Override
public Object getAttribute(final ObjectName objectName,
final String attribute)
throws MBeanException,
AttributeNotFoundException,
InstanceNotFoundException,
ReflectionException {
ExecutorCompletionService<Object> completionService = new ExecutorCompletionService<>(
executor);
TaskGenerator<Object> generator = new TaskGenerator<Object>() {
@Override
public Callable<Object> localTask(final ObjectName objectName) {
return new Callable<Object>() {
@Override
public Object call() throws Exception {
return mbs.getAttribute(objectName, attribute);
}
};
}
@Override
public Callable<Object> remoteTask(final NodeMBean child) {
return new Callable<Object>() {
@Override
public Object call() throws Exception {
return child.getAttribute(objectName, attribute);
}
};
}
};
List<Future<Object>> futures = forAll(completionService, generator,
objectName);
boolean attributeNotFound = false;
for (int i = 0; i < futures.size(); i++) {
try {
Object attributeValue = completionService.take().get();
for (Future<Object> future : futures) {
future.cancel(true);
}
return attributeValue;
} catch (InterruptedException e) {
return Collections.emptyMap();
} catch (ExecutionException e) {
if (e.getCause() instanceof AttributeNotFoundException) {
attributeNotFound |= true;
} else {
log.debug(String.format("%s experienced exception when retriving attribute %s, %s",
this, objectName, attribute), e);
}
}
}
if (attributeNotFound) {
throw new AttributeNotFoundException(
String.format("Attribute not found: %s for %s",
attribute,
objectName));
} else {
throw new InstanceNotFoundException(
String.format("Instance not found: %s",
objectName));
}
}
/* (non-Javadoc)
* @see com.hellblazer.groo.NodeMXBean#getAttributes(javax.management.ObjectName, javax.management.QueryExp, java.lang.String[])
*/
@Override
public Map<ObjectName, AttributeList> getAttributes(final ObjectName pattern,
final QueryExp queryExpr,
final String[] attributes)
throws InstanceNotFoundException,
ReflectionException,
IOException {
Map<ObjectName, AttributeList> attrs = new HashMap<>();
ExecutorCompletionService<Map<ObjectName, AttributeList>> completionService = new ExecutorCompletionService<>(
executor);
TaskGenerator<Map<ObjectName, AttributeList>> generator = new TaskGenerator<Map<ObjectName, AttributeList>>() {
@Override
public Callable<Map<ObjectName, AttributeList>> localTask(final ObjectName objectName) {
return new Callable<Map<ObjectName, AttributeList>>() {
@Override
public Map<ObjectName, AttributeList> call()
throws Exception {
Map<ObjectName, AttributeList> attrs = new HashMap<>();
attrs.put(objectName,
mbs.getAttributes(objectName, attributes));
return attrs;
}
};
}
@Override
public Callable<Map<ObjectName, AttributeList>> remoteTask(final NodeMBean child) {
return new Callable<Map<ObjectName, AttributeList>>() {
@Override
public Map<ObjectName, AttributeList> call()
throws Exception {
return child.getAttributes(pattern, queryExpr,
attributes);
}
};
}
};
List<Future<Map<ObjectName, AttributeList>>> futures = forAll(completionService,
generator,
pattern,
queryExpr);
for (int i = 0; i < futures.size(); i++) {
try {
attrs.putAll(completionService.take().get());
} catch (InterruptedException e) {
return Collections.emptyMap();
} catch (ExecutionException e) {
log.debug(String.format("%s experienced exception when retriving attributes %s, %s, %s, %s",
this, pattern, queryExpr,
Arrays.asList(attributes)), e);
}
}
if (attrs.size() == 0) {
throw new InstanceNotFoundException(
String.format("Instance not found: %s, %s",
pattern,
queryExpr));
}
return attrs;
}
/**
* @param name
* @param attributes
* @return
* @throws InstanceNotFoundException
* @throws ReflectionException
* @see javax.management.MBeanServer#getAttributes(javax.management.ObjectName,
* java.lang.String[])
*/
@Override
public AttributeList getAttributes(final ObjectName objectName,
final String[] attributes)
throws InstanceNotFoundException,
ReflectionException {
ExecutorCompletionService<AttributeList> completionService = new ExecutorCompletionService<>(
executor);
TaskGenerator<AttributeList> generator = new TaskGenerator<AttributeList>() {
@Override
public Callable<AttributeList> localTask(final ObjectName objectName) {
return new Callable<AttributeList>() {
@Override
public AttributeList call() throws Exception {
return mbs.getAttributes(objectName, attributes);
}
};
}
@Override
public Callable<AttributeList> remoteTask(final NodeMBean child) {
return new Callable<AttributeList>() {
@Override
public AttributeList call() throws Exception {
return child.getAttributes(objectName, attributes);
}
};
}
};
List<Future<AttributeList>> futures = forAll(completionService,
generator, objectName);
for (int i = 0; i < futures.size(); i++) {
try {
AttributeList attrs = completionService.take().get();
for (Future<AttributeList> future : futures) {
future.cancel(true);
}
return attrs;
} catch (InterruptedException e) {
return new AttributeList();
} catch (ExecutionException e) {
log.debug(String.format("%s experienced exception when retriving attributes %s, %s",
this, objectName,
Arrays.asList(attributes)), e);
}
}
throw new InstanceNotFoundException(
String.format("Instance not found: %s",
objectName));
}
public Set<NodeMBean> getChildren() {
return Collections.unmodifiableSet(children);
}
/**
* @return the filter
*/
public RegistrationFilter getFilter() {
return filter;
}
/**
* @return
* @see javax.management.MBeanServer#getMBeanCount()
*/
@Override
public Integer getMBeanCount() {
ExecutorCompletionService<Integer> completionService = new ExecutorCompletionService<>(
executor);
TaskGenerator<Integer> generator = new TaskGenerator<Integer>() {
@Override
public Callable<Integer> localTask(final ObjectName objectName) {
return new Callable<Integer>() {
@Override
public Integer call() throws Exception {
return mbs.getMBeanCount();
}
};
}
@Override
public Callable<Integer> remoteTask(final NodeMBean child) {
return new Callable<Integer>() {
@Override
public Integer call() throws Exception {
return child.getMBeanCount();
}
};
}
};
List<Future<Integer>> futures = forAll(completionService, generator,
null);
int count = 0;
for (int i = 0; i < futures.size(); i++) {
try {
count += completionService.take().get();
} catch (InterruptedException e) {
return 0;
} catch (ExecutionException e) {
log.debug(String.format("%s experienced exception when retriving mbean count %s",
this), e);
}
}
return count;
}
/* (non-Javadoc)
* @see com.hellblazer.groo.NodeMXBean#getMBeanCount(javax.management.ObjectName)
*/
@Override
public int getMBeanCount(final ObjectName filter, final QueryExp queryExp) {
ExecutorCompletionService<Integer> completionService = new ExecutorCompletionService<>(
executor);
TaskGenerator<Integer> generator = new TaskGenerator<Integer>() {
@Override
public Callable<Integer> localTask(final ObjectName objectName) {
return new Callable<Integer>() {
@Override
public Integer call() throws Exception {
return mbs.queryNames(filter, queryExp).size();
}
};
}
@Override
public Callable<Integer> remoteTask(final NodeMBean child) {
return new Callable<Integer>() {
@Override
public Integer call() throws Exception {
return child.getMBeanCount(filter, queryExp);
}
};
}
};
List<Future<Integer>> futures = forAll(completionService, generator,
null);
int count = 0;
for (int i = 0; i < futures.size(); i++) {
try {
count += completionService.take().get();
} catch (InterruptedException e) {
return 0;
} catch (ExecutionException e) {
log.debug(String.format("%s experienced exception when retriving mbean count %s, %s, %s",
this, filter, queryExp), e);
}
}
return count;
}
/**
* @param objectName
* @return
* @throws InstanceNotFoundException
* @throws IntrospectionException
* @throws ReflectionException
* @see javax.management.MBeanServer#getMBeanInfo(javax.management.ObjectName)
*/
@Override
public MBeanInfo getMBeanInfo(final ObjectName objectName)
throws InstanceNotFoundException,
IntrospectionException,
ReflectionException {
ExecutorCompletionService<MBeanInfo> completionService = new ExecutorCompletionService<>(
executor);
TaskGenerator<MBeanInfo> generator = new TaskGenerator<MBeanInfo>() {
@Override
public Callable<MBeanInfo> localTask(final ObjectName objectName) {
return new Callable<MBeanInfo>() {
@Override
public MBeanInfo call() throws Exception {
return mbs.getMBeanInfo(objectName);
}
};
}
@Override
public Callable<MBeanInfo> remoteTask(final NodeMBean child) {
return new Callable<MBeanInfo>() {
@Override
public MBeanInfo call() throws Exception {
return child.getMBeanInfo(objectName);
}
};
}
};
List<Future<MBeanInfo>> futures = forAll(completionService, generator,
objectName);
for (int i = 0; i < futures.size(); i++) {
try {
MBeanInfo info = completionService.take().get();
for (Future<MBeanInfo> future : futures) {
future.cancel(true);
}
return info;
} catch (InterruptedException e) {
return null;
} catch (ExecutionException e) {
log.debug(String.format("%s experienced exception when retriving mbean info %s, %s",
this, objectName), e);
}
}
throw new InstanceNotFoundException(
String.format("Instance not found: %s",
objectName));
}
/**
* @return the name
*/
@Override
public ObjectName getName() {
return name;
}
/* (non-Javadoc)
* @see com.hellblazer.groo.NodeMBean#getObjectInstance(javax.management.ObjectName)
*/
@Override
public ObjectInstance getObjectInstance(final ObjectName objectName)
throws InstanceNotFoundException {
ExecutorCompletionService<ObjectInstance> completionService = new ExecutorCompletionService<>(
executor);
TaskGenerator<ObjectInstance> generator = new TaskGenerator<ObjectInstance>() {
@Override
public Callable<ObjectInstance> localTask(final ObjectName objectName) {
return new Callable<ObjectInstance>() {
@Override
public ObjectInstance call() throws Exception {
return mbs.getObjectInstance(objectName);
}
};
}
@Override
public Callable<ObjectInstance> remoteTask(final NodeMBean child) {
return new Callable<ObjectInstance>() {
@Override
public ObjectInstance call() throws Exception {
return child.getObjectInstance(objectName);
}
};
}
};
List<Future<ObjectInstance>> futures = forAll(completionService,
generator, objectName);
for (int i = 0; i < futures.size(); i++) {
try {
ObjectInstance instance = completionService.take().get();
for (Future<ObjectInstance> future : futures) {
future.cancel(true);
}
return instance;
} catch (InterruptedException e) {
return null;
} catch (ExecutionException e) {
log.debug(String.format("%s experienced exception when retriving object instance %s, %s",
this, objectName), e);
}
}
throw new InstanceNotFoundException(
String.format("Instance not found: %s",
objectName));
}
/* (non-Javadoc)
* @see com.hellblazer.groo.NodeMXBean#getObjectInstance(javax.management.ObjectName, javax.management.QueryExp)
*/
@Override
public Set<ObjectInstance> getObjectInstances(final ObjectName filter,
final QueryExp queryExpr)
throws InstanceNotFoundException,
IOException {
ExecutorCompletionService<Set<ObjectInstance>> completionService = new ExecutorCompletionService<>(
executor);
TaskGenerator<Set<ObjectInstance>> generator = new TaskGenerator<Set<ObjectInstance>>() {
@Override
public Callable<Set<ObjectInstance>> localTask(final ObjectName objectName) {
return new Callable<Set<ObjectInstance>>() {
@Override
public Set<ObjectInstance> call() throws Exception {
return mbs.queryMBeans(objectName, queryExpr);
}
};
}
@Override
public Callable<Set<ObjectInstance>> remoteTask(final NodeMBean child) {
return new Callable<Set<ObjectInstance>>() {
@Override
public Set<ObjectInstance> call() throws Exception {
return child.getObjectInstances(filter, queryExpr);
}
};
}
};
List<Future<Set<ObjectInstance>>> futures = forAll(completionService,
generator, filter);
Set<ObjectInstance> instances = new HashSet<>();
for (int i = 0; i < futures.size(); i++) {
try {
instances.addAll(completionService.take().get());
} catch (InterruptedException e) {
return instances;
} catch (ExecutionException e) {
log.debug(String.format("%s experienced exception when retreiving object instances %s, %s, %s",
this, filter, queryExpr), e);
}
}
if (instances.size() == 0) {
throw new InstanceNotFoundException(
String.format("Instance not found: %s, %s",
filter, queryExpr));
}
return instances;
}
/* (non-Javadoc)
* @see java.lang.Object#hashCode()
*/
@Override
public int hashCode() {
final int prime = 31;
int result = 1;
result = prime * result + (name == null ? 0 : name.hashCode());
return result;
}
/* (non-Javadoc)
* @see com.hellblazer.groo.NodeMXBean#invoke(javax.management.ObjectName, javax.management.Query, java.lang.String, java.lang.Object[], java.lang.String[])
*/
@Override
public Map<ObjectName, Object> invoke(final ObjectName filter,
final QueryExp queryExpr,
final String operationName,
final Object[] params,
final String[] signature)
throws InstanceNotFoundException,
MBeanException,
ReflectionException,
IOException {
ExecutorCompletionService<Map<ObjectName, Object>> completionService = new ExecutorCompletionService<>(
executor);
TaskGenerator<Map<ObjectName, Object>> generator = new TaskGenerator<Map<ObjectName, Object>>() {
@Override
public Callable<Map<ObjectName, Object>> localTask(final ObjectName objectName) {
return new Callable<Map<ObjectName, Object>>() {
@Override
public Map<ObjectName, Object> call() throws Exception {
Map<ObjectName, Object> result = new HashMap<>();
result.put(objectName, mbs.invoke(objectName,
operationName,
params, signature));
return result;
}
};
}
@Override
public Callable<Map<ObjectName, Object>> remoteTask(final NodeMBean child) {
return new Callable<Map<ObjectName, Object>>() {
@Override
public Map<ObjectName, Object> call() throws Exception {
return child.invoke(filter, queryExpr, operationName,
params, signature);
}
};
}
};
List<Future<Map<ObjectName, Object>>> futures = forAll(completionService,
generator,
filter,
queryExpr);
Map<ObjectName, Object> results = new HashMap<>();
for (int i = 0; i < futures.size(); i++) {
try {
results.putAll(completionService.take().get());
} catch (InterruptedException e) {
return results;
} catch (ExecutionException e) {
log.debug(String.format("%s experienced exception when invoking %s, %s, %s, %s, %s",
this,
filter,
queryExpr,
operationName,
params != null ? Arrays.asList(params)
: null,
signature != null ? Arrays.asList(signature)
: null), e.getCause());
}
}
if (results.size() == 0) {
throw new InstanceNotFoundException(
String.format("Instance not found: %s, %s",
filter, queryExpr));
}
return results;
}
/**
* @param objectName
* @param operationName
* @param params
* @param signature
* @return
* @throws InstanceNotFoundException
* @throws MBeanException
* @throws ReflectionException
* @see javax.management.MBeanServer#invoke(javax.management.ObjectName,
* java.lang.String, java.lang.Object[], java.lang.String[])
*/
@Override
public Object invoke(final ObjectName objectName,
final String operationName, final Object[] params,
final String[] signature)
throws InstanceNotFoundException,
MBeanException,
ReflectionException {
ExecutorCompletionService<Object> completionService = new ExecutorCompletionService<>(
executor);
TaskGenerator<Object> generator = new TaskGenerator<Object>() {
@Override
public Callable<Object> localTask(final ObjectName objectName) {
return new Callable<Object>() {
@Override
public Object call() throws Exception {
return mbs.invoke(objectName, operationName, params,
signature);
}
};
}
@Override
public Callable<Object> remoteTask(final NodeMBean child) {
return new Callable<Object>() {
@Override
public Object call() throws Exception {
return child.invoke(objectName, operationName, params,
signature);
}
};
}
};
List<Future<Object>> futures = forAll(completionService, generator,
objectName);
for (int i = 0; i < futures.size(); i++) {
try {
Object result = completionService.take().get();
for (Future<Object> future : futures) {
future.cancel(true);
}
return result;
} catch (InterruptedException e) {
return null;
} catch (ExecutionException e) {
log.debug(String.format("%s experienced exception when invoking %s, %s, %s, %s",
this,
objectName,
operationName,
params != null ? Arrays.asList(params)
: null,
signature != null ? Arrays.asList(signature)
: null), e);
}
}
throw new InstanceNotFoundException(
String.format("Instance not found: %s",
objectName));
}
/**
* @param objectName
* @param className
* @return
* @throws InstanceNotFoundException
* @throws IOException
* @see javax.management.MBeanServer#isInstanceOf(javax.management.ObjectName,
* java.lang.String)
*/
@Override
public boolean isInstanceOf(final ObjectName objectName,
final String className)
throws InstanceNotFoundException,
IOException {
ExecutorCompletionService<Boolean> completionService = new ExecutorCompletionService<>(
executor);
TaskGenerator<Boolean> generator = new TaskGenerator<Boolean>() {
@Override
public Callable<Boolean> localTask(final ObjectName objectName) {
return new Callable<Boolean>() {
@Override
public Boolean call() throws Exception {
return mbs.isInstanceOf(objectName, className);
}
};
}
@Override
public Callable<Boolean> remoteTask(final NodeMBean child) {
return new Callable<Boolean>() {
@Override
public Boolean call() throws Exception {
return child.isInstanceOf(objectName, className);
}
};
}
};
List<Future<Boolean>> futures = forAll(completionService, generator,
objectName);
for (int i = 0; i < futures.size(); i++) {
try {
Boolean result = completionService.take().get();
for (Future<Boolean> future : futures) {
future.cancel(true);
}
return result;
} catch (InterruptedException e) {
return false;
} catch (ExecutionException e) {
log.debug(String.format("%s experienced exception when determining instance of %s, %s",
this, objectName, className), e);
}
}
throw new InstanceNotFoundException(
String.format("Instance not found: %s",
objectName));
}
/**
* @param objectName
* @return
* @throws IOException
* @see javax.management.MBeanServer#isRegistered(javax.management.ObjectName)
*/
@Override
public boolean isRegistered(final ObjectName objectName) throws IOException {
ExecutorCompletionService<Boolean> completionService = new ExecutorCompletionService<>(
executor);
TaskGenerator<Boolean> generator = new TaskGenerator<Boolean>() {
@Override
public Callable<Boolean> localTask(final ObjectName objectName) {
return new Callable<Boolean>() {
@Override
public Boolean call() throws Exception {
return mbs.isRegistered(objectName);
}
};
}
@Override
public Callable<Boolean> remoteTask(final NodeMBean child) {
return new Callable<Boolean>() {
@Override
public Boolean call() throws Exception {
return child.isRegistered(objectName);
}
};
}
};
List<Future<Boolean>> futures = forAll(completionService, generator,
objectName);
for (int i = 0; i < futures.size(); i++) {
try {
Boolean result = completionService.take().get();
for (Future<Boolean> future : futures) {
future.cancel(true);
}
return result;
} catch (InterruptedException e) {
return false;
} catch (ExecutionException e) {
log.debug(String.format("%s experienced exception when determining is registered %s",
this, objectName), e);
}
}
return false;
}
/* (non-Javadoc)
* @see javax.management.MBeanRegistration#postDeregister()
*/
@Override
public void postDeregister() {
}
/* (non-Javadoc)
* @see javax.management.MBeanRegistration#postRegister(java.lang.Boolean)
*/
@Override
public void postRegister(Boolean registrationDone) {
}
/* (non-Javadoc)
* @see javax.management.MBeanRegistration#preDeregister()
*/
@Override
public void preDeregister() throws Exception {
}
/* (non-Javadoc)
* @see javax.management.MBeanRegistration#preRegister(javax.management.MBeanServer, javax.management.ObjectName)
*/
@Override
public ObjectName preRegister(MBeanServer server, ObjectName name)
throws Exception {
mbs = server;
this.name = name;
return name;
}
/**
* @param filter
* @param query
* @return
* @throws IOException
* @see javax.management.MBeanServer#queryMBeans(javax.management.ObjectName,
* javax.management.QueryExp)
*/
@Override
public Set<ObjectInstance> queryMBeans(final ObjectName filter,
final QueryExp query)
throws IOException {
ExecutorCompletionService<Set<ObjectInstance>> completionService = new ExecutorCompletionService<>(
executor);
TaskGenerator<Set<ObjectInstance>> generator = new TaskGenerator<Set<ObjectInstance>>() {
@Override
public Callable<Set<ObjectInstance>> localTask(final ObjectName objectName) {
return new Callable<Set<ObjectInstance>>() {
@Override
public Set<ObjectInstance> call() throws Exception {
return mbs.queryMBeans(filter, query);
}
};
}
@Override
public Callable<Set<ObjectInstance>> remoteTask(final NodeMBean child) {
return new Callable<Set<ObjectInstance>>() {
@Override
public Set<ObjectInstance> call() throws Exception {
return child.queryMBeans(filter, query);
}
};
}
};
Set<ObjectInstance> instances = new HashSet<>();
List<Future<Set<ObjectInstance>>> futures = forAll(completionService,
generator, filter,
query);
for (int i = 0; i < futures.size(); i++) {
try {
instances.addAll(completionService.take().get());
} catch (InterruptedException e) {
return instances;
} catch (ExecutionException e) {
log.debug(String.format("%s experienced exception when querying mbeans %s, %s",
this, filter, query), e);
}
}
return instances;
}
/**
* @param name
* @param query
* @return
* @throws IOException
* @see javax.management.MBeanServer#queryNames(javax.management.ObjectName,
* javax.management.QueryExp)
*/
@Override
public Set<ObjectName> queryNames(final ObjectName filter,
final QueryExp query) throws IOException {
ExecutorCompletionService<Set<ObjectName>> completionService = new ExecutorCompletionService<>(
executor);
TaskGenerator<Set<ObjectName>> generator = new TaskGenerator<Set<ObjectName>>() {
@Override
public Callable<Set<ObjectName>> localTask(final ObjectName objectName) {
return new Callable<Set<ObjectName>>() {
@Override
public Set<ObjectName> call() throws Exception {
return mbs.queryNames(filter, query);
}
};
}
@Override
public Callable<Set<ObjectName>> remoteTask(final NodeMBean child) {
return new Callable<Set<ObjectName>>() {
@Override
public Set<ObjectName> call() throws Exception {
return child.queryNames(filter, query);
}
};
}
};
Set<ObjectName> names = new HashSet<>();
List<Future<Set<ObjectName>>> futures = forAll(completionService,
generator, filter, query);
for (int i = 0; i < futures.size(); i++) {
try {
names.addAll(completionService.take().get());
} catch (InterruptedException e) {
return names;
} catch (ExecutionException e) {
log.debug(String.format("%s experienced exception when querying names %s, %s",
this, filter, query), e);
}
}
return names;
}
/**
* @param child
*/
public void removeChild(NodeMBean child) {
children.remove(child);
}
/**
* @param name
* @param listener
* @throws InstanceNotFoundException
* @throws ListenerNotFoundException
* @throws IOException
* @see javax.management.MBeanServer#removeNotificationListener(javax.management.ObjectName,
* javax.management.NotificationListener)
*/
@Override
public void removeNotificationListener(final ObjectName objectName,
final NotificationListener listener)
throws InstanceNotFoundException,
ListenerNotFoundException,
IOException {
ExecutorCompletionService<Boolean> completionService = new ExecutorCompletionService<>(
executor);
TaskGenerator<Boolean> generator = new TaskGenerator<Boolean>() {
@Override
public Callable<Boolean> localTask(final ObjectName objectName) {
return new Callable<Boolean>() {
@Override
public Boolean call() throws Exception {
mbs.removeNotificationListener(objectName, listener);
return true;
}
};
}
@Override
public Callable<Boolean> remoteTask(final NodeMBean child) {
return new Callable<Boolean>() {
@Override
public Boolean call() throws Exception {
child.removeNotificationListener(objectName, listener);
return true;
}
};
}
};
List<Future<Boolean>> futures = forAll(completionService, generator,
objectName);
for (int i = 0; i < futures.size(); i++) {
try {
if (completionService.take().get()) {
for (Future<Boolean> future : futures) {
future.cancel(true);
}
return;
}
} catch (InterruptedException e) {
return; // don't even log this ;)
} catch (ExecutionException e) {
log.debug(String.format("%s experienced exception when removing notification listener on %s for %s",
this, objectName, listener), e);
}
}
throw new InstanceNotFoundException(
String.format("Instance not found: %s",
objectName));
}
/**
* @param objectName
* @param listener
* @param filter
* @param handback
* @throws InstanceNotFoundException
* @throws ListenerNotFoundException
* @throws IOException
* @see javax.management.MBeanServer#removeNotificationListener(javax.management.ObjectName,
* javax.management.NotificationListener,
* javax.management.NotificationFilter, java.lang.Object)
*/
@Override
public void removeNotificationListener(final ObjectName objectName,
final NotificationListener listener,
final NotificationFilter filter,
final Object handback)
throws InstanceNotFoundException,
ListenerNotFoundException,
IOException {
ExecutorCompletionService<Boolean> completionService = new ExecutorCompletionService<>(
executor);
TaskGenerator<Boolean> generator = new TaskGenerator<Boolean>() {
@Override
public Callable<Boolean> localTask(final ObjectName objectName) {
return new Callable<Boolean>() {
@Override
public Boolean call() throws Exception {
mbs.removeNotificationListener(objectName, listener,
filter, handback);
return true;
}
};
}
@Override
public Callable<Boolean> remoteTask(final NodeMBean child) {
return new Callable<Boolean>() {
@Override
public Boolean call() throws Exception {
child.removeNotificationListener(objectName, listener,
filter, handback);
return true;
}
};
}
};
List<Future<Boolean>> futures = forAll(completionService, generator,
objectName);
for (int i = 0; i < futures.size(); i++) {
try {
if (completionService.take().get()) {
for (Future<Boolean> future : futures) {
future.cancel(true);
}
return;
}
} catch (InterruptedException e) {
return; // don't even log this ;)
} catch (ExecutionException e) {
log.debug(String.format("%s experienced exception when removing notification listener on %s for %s, %s",
this, objectName, listener, filter), e);
}
}
throw new InstanceNotFoundException(
String.format("Instance not found: %s",
objectName));
}
/**
* @param objectName
* @param listener
* @throws InstanceNotFoundException
* @throws ListenerNotFoundException
* @throws IOException
* @see javax.management.MBeanServer#removeNotificationListener(javax.management.ObjectName,
* javax.management.ObjectName)
*/
@Override
public void removeNotificationListener(final ObjectName objectName,
final ObjectName listener)
throws InstanceNotFoundException,
ListenerNotFoundException,
IOException {
ExecutorCompletionService<Boolean> completionService = new ExecutorCompletionService<>(
executor);
TaskGenerator<Boolean> generator = new TaskGenerator<Boolean>() {
@Override
public Callable<Boolean> localTask(final ObjectName objectName) {
return new Callable<Boolean>() {
@Override
public Boolean call() throws Exception {
mbs.removeNotificationListener(objectName, listener);
return true;
}
};
}
@Override
public Callable<Boolean> remoteTask(final NodeMBean child) {
return new Callable<Boolean>() {
@Override
public Boolean call() throws Exception {
child.removeNotificationListener(objectName, listener);
return true;
}
};
}
};
List<Future<Boolean>> futures = forAll(completionService, generator,
objectName);
for (int i = 0; i < futures.size(); i++) {
try {
if (completionService.take().get()) {
for (Future<Boolean> future : futures) {
future.cancel(true);
}
return;
}
} catch (InterruptedException e) {
return; // don't even log this ;)
} catch (ExecutionException e) {
log.debug(String.format("%s experienced exception when removing notification listener on %s for %s",
this, objectName, listener), e);
}
}
throw new InstanceNotFoundException(
String.format("Instance not found: %s",
objectName));
}
/**
* @param name
* @param listener
* @param filter
* @param handback
* @throws InstanceNotFoundException
* @throws ListenerNotFoundException
* @throws IOException
* @see javax.management.MBeanServer#removeNotificationListener(javax.management.ObjectName,
* javax.management.ObjectName, javax.management.NotificationFilter,
* java.lang.Object)
*/
@Override
public void removeNotificationListener(final ObjectName objectName,
final ObjectName listener,
final NotificationFilter filter,
final Object handback)
throws InstanceNotFoundException,
ListenerNotFoundException,
IOException {
ExecutorCompletionService<Boolean> completionService = new ExecutorCompletionService<>(
executor);
TaskGenerator<Boolean> generator = new TaskGenerator<Boolean>() {
@Override
public Callable<Boolean> localTask(final ObjectName objectName) {
return new Callable<Boolean>() {
@Override
public Boolean call() throws Exception {
mbs.removeNotificationListener(objectName, listener,
filter, handback);
return true;
}
};
}
@Override
public Callable<Boolean> remoteTask(final NodeMBean child) {
return new Callable<Boolean>() {
@Override
public Boolean call() throws Exception {
child.removeNotificationListener(objectName, listener,
filter, handback);
return true;
}
};
}
};
List<Future<Boolean>> futures = forAll(completionService, generator,
objectName);
for (int i = 0; i < futures.size(); i++) {
try {
if (completionService.take().get()) {
for (Future<Boolean> future : futures) {
future.cancel(true);
}
return;
}
} catch (InterruptedException e) {
return; // don't even log this ;)
} catch (ExecutionException e) {
log.debug(String.format("%s experienced exception when removing notification listener on %s for %s, %s, %s",
this, objectName, listener, filter,
handback), e);
}
}
throw new InstanceNotFoundException(
String.format("Instance not found: %s",
objectName));
}
/* (non-Javadoc)
* @see com.hellblazer.groo.NodeMXBean#removeNotificationListener(javax.management.ObjectName, javax.management.QueryExp, javax.management.NotificationListener)
*/
@Override
public void removeNotificationListener(final ObjectName pattern,
final QueryExp queryExpr,
final NotificationListener listener)
throws InstanceNotFoundException,
ListenerNotFoundException,
IOException {
ExecutorCompletionService<Boolean> completionService = new ExecutorCompletionService<>(
executor);
TaskGenerator<Boolean> generator = new TaskGenerator<Boolean>() {
@Override
public Callable<Boolean> localTask(final ObjectName objectName) {
return new Callable<Boolean>() {
@Override
public Boolean call() throws Exception {
mbs.removeNotificationListener(objectName, listener);
return true;
}
};
}
@Override
public Callable<Boolean> remoteTask(final NodeMBean child) {
return new Callable<Boolean>() {
@Override
public Boolean call() throws Exception {
child.removeNotificationListener(pattern, queryExpr,
listener);
return true;
}
};
}
};
List<Future<Boolean>> futures = forAll(completionService, generator,
pattern, queryExpr);
for (int i = 0; i < futures.size(); i++) {
try {
if (completionService.take().get()) {
for (Future<Boolean> future : futures) {
future.cancel(true);
}
return;
}
} catch (InterruptedException e) {
return;
} catch (ExecutionException e) {
log.debug(String.format("%s experienced exception when adding notification listener %s, %s",
this, pattern, queryExpr), e);
}
}
throw new InstanceNotFoundException(
String.format("Instance not found: %s, %s",
pattern, queryExpr));
}
/* (non-Javadoc)
* @see com.hellblazer.groo.NodeMXBean#removeNotificationListener(javax.management.ObjectName, javax.management.QueryExp, javax.management.NotificationListener, javax.management.NotificationFilter, java.lang.Object)
*/
@Override
public void removeNotificationListener(final ObjectName pattern,
final QueryExp queryExpr,
final NotificationListener listener,
final NotificationFilter filter,
final Object handback)
throws InstanceNotFoundException,
ListenerNotFoundException,
IOException {
ExecutorCompletionService<Boolean> completionService = new ExecutorCompletionService<>(
executor);
TaskGenerator<Boolean> generator = new TaskGenerator<Boolean>() {
@Override
public Callable<Boolean> localTask(final ObjectName objectName) {
return new Callable<Boolean>() {
@Override
public Boolean call() throws Exception {
mbs.removeNotificationListener(objectName, listener,
filter, handback);
return true;
}
};
}
@Override
public Callable<Boolean> remoteTask(final NodeMBean child) {
return new Callable<Boolean>() {
@Override
public Boolean call() throws Exception {
child.removeNotificationListener(pattern, queryExpr,
listener, filter,
handback);
return true;
}
};
}
};
List<Future<Boolean>> futures = forAll(completionService, generator,
pattern, queryExpr);
for (int i = 0; i < futures.size(); i++) {
try {
completionService.take().get();
} catch (InterruptedException e) {
return;
} catch (ExecutionException e) {
log.debug(String.format("%s experienced exception when removing notification listener %s, %s, %s, %s, %s",
this, pattern, queryExpr, listener,
filter, handback), e);
}
}
throw new InstanceNotFoundException(
String.format("Instance not found: %s, %s",
name, queryExpr));
}
/* (non-Javadoc)
* @see com.hellblazer.groo.NodeMXBean#removeNotificationListener(javax.management.ObjectName, javax.management.QueryExp, javax.management.ObjectName)
*/
@Override
public void removeNotificationListener(final ObjectName pattern,
final QueryExp queryExpr,
final ObjectName listener)
throws InstanceNotFoundException,
ListenerNotFoundException,
IOException {
ExecutorCompletionService<Boolean> completionService = new ExecutorCompletionService<>(
executor);
TaskGenerator<Boolean> generator = new TaskGenerator<Boolean>() {
@Override
public Callable<Boolean> localTask(final ObjectName objectName) {
return new Callable<Boolean>() {
@Override
public Boolean call() throws Exception {
mbs.removeNotificationListener(objectName, listener);
return true;
}
};
}
@Override
public Callable<Boolean> remoteTask(final NodeMBean child) {
return new Callable<Boolean>() {
@Override
public Boolean call() throws Exception {
child.removeNotificationListener(pattern, queryExpr,
listener);
return true;
}
};
}
};
List<Future<Boolean>> futures = forAll(completionService, generator,
pattern, queryExpr);
for (int i = 0; i < futures.size(); i++) {
try {
completionService.take().get();
} catch (InterruptedException e) {
return;
} catch (ExecutionException e) {
log.debug(String.format("%s experienced exception when removing notification listener %s, %s, %s",
this, pattern, queryExpr, listener), e);
}
}
throw new InstanceNotFoundException(
String.format("Instance not found: %s, %s",
name, queryExpr));
}
/* (non-Javadoc)
* @see com.hellblazer.groo.NodeMXBean#removeNotificationListener(javax.management.ObjectName, javax.management.QueryExp, javax.management.ObjectName, javax.management.NotificationFilter, java.lang.Object)
*/
@Override
public void removeNotificationListener(final ObjectName pattern,
final QueryExp queryExpr,
final ObjectName listener,
final NotificationFilter filter,
final Object handback)
throws InstanceNotFoundException,
ListenerNotFoundException,
IOException {
ExecutorCompletionService<Boolean> completionService = new ExecutorCompletionService<>(
executor);
TaskGenerator<Boolean> generator = new TaskGenerator<Boolean>() {
@Override
public Callable<Boolean> localTask(final ObjectName objectName) {
return new Callable<Boolean>() {
@Override
public Boolean call() throws Exception {
mbs.removeNotificationListener(objectName, listener,
filter, handback);
return true;
}
};
}
@Override
public Callable<Boolean> remoteTask(final NodeMBean child) {
return new Callable<Boolean>() {
@Override
public Boolean call() throws Exception {
child.removeNotificationListener(pattern, queryExpr,
listener, filter,
handback);
return true;
}
};
}
};
List<Future<Boolean>> futures = forAll(completionService, generator,
pattern, queryExpr);
for (int i = 0; i < futures.size(); i++) {
try {
completionService.take().get();
} catch (InterruptedException e) {
return;
} catch (ExecutionException e) {
log.debug(String.format("%s experienced exception when removing notification listener %s, %s, %s, %s, %s",
this, pattern, queryExpr, listener,
filter, handback), e);
}
}
throw new InstanceNotFoundException(
String.format("Instance not found: %s, %s",
name, queryExpr));
}
/**
* @param name
* @param attribute
* @throws InstanceNotFoundException
* @throws AttributeNotFoundException
* @throws InvalidAttributeValueException
* @throws MBeanException
* @throws ReflectionException
* @throws IOException
* @see javax.management.MBeanServer#setAttribute(javax.management.ObjectName,
* javax.management.Attribute)
*/
@Override
public void setAttribute(final ObjectName objectName,
final Attribute attribute)
throws InstanceNotFoundException,
AttributeNotFoundException,
InvalidAttributeValueException,
MBeanException,
ReflectionException,
IOException {
ExecutorCompletionService<Void> completionService = new ExecutorCompletionService<>(
executor);
TaskGenerator<Void> generator = new TaskGenerator<Void>() {
@Override
public Callable<Void> localTask(final ObjectName objectName) {
return new Callable<Void>() {
@Override
public Void call() throws Exception {
mbs.setAttribute(objectName, attribute);
return null;
}
};
}
@Override
public Callable<Void> remoteTask(final NodeMBean child) {
return new Callable<Void>() {
@Override
public Void call() throws Exception {
child.setAttribute(objectName, attribute);
return null;
}
};
}
};
List<Future<Void>> futures = forAll(completionService, generator,
objectName);
boolean attributeNotFound = false;
for (int i = 0; i < futures.size(); i++) {
try {
completionService.take().get();
for (Future<Void> future : futures) {
future.cancel(true);
}
return;
} catch (InterruptedException e) {
return;
} catch (ExecutionException e) {
if (e.getCause() instanceof AttributeNotFoundException) {
attributeNotFound |= true;
} else {
log.debug(String.format("%s experienced exception when setting attribute %s, %s",
this, objectName, attribute), e);
}
}
}
if (attributeNotFound) {
throw new AttributeNotFoundException(
String.format("Attribute not found: %s for %s",
attribute,
objectName));
} else {
throw new InstanceNotFoundException(
String.format("Instance not found: %s",
objectName));
}
}
/* (non-Javadoc)
* @see com.hellblazer.groo.NodeMXBean#setAttribute(javax.management.ObjectName, javax.management.QueryExp, javax.management.Attribute)
*/
@Override
public void setAttribute(final ObjectName pattern,
final QueryExp queryExpr, final Attribute attribute)
throws InstanceNotFoundException,
AttributeNotFoundException,
InvalidAttributeValueException,
MBeanException,
ReflectionException,
IOException {
ExecutorCompletionService<Boolean> completionService = new ExecutorCompletionService<>(
executor);
TaskGenerator<Boolean> generator = new TaskGenerator<Boolean>() {
@Override
public Callable<Boolean> localTask(final ObjectName objectName) {
return new Callable<Boolean>() {
@Override
public Boolean call() throws Exception {
mbs.setAttribute(objectName, attribute);
return true;
}
};
}
@Override
public Callable<Boolean> remoteTask(final NodeMBean child) {
return new Callable<Boolean>() {
@Override
public Boolean call() throws Exception {
child.setAttribute(pattern, queryExpr, attribute);
return true;
}
};
}
};
List<Future<Boolean>> futures = forAll(completionService, generator,
pattern, queryExpr);
for (int i = 0; i < futures.size(); i++) {
try {
completionService.take().get();
return;
} catch (InterruptedException e) {
return;
} catch (ExecutionException e) {
log.debug(String.format("%s experienced exception when setting attribute %s, %s, %s, %s",
this, pattern, queryExpr, attribute), e);
}
}
throw new InstanceNotFoundException(
String.format("Instance not found: %s, %s",
pattern, queryExpr));
}
/**
* @param name
* @param attributes
* @return
* @throws InstanceNotFoundException
* @throws ReflectionException
* @throws IOException
* @see javax.management.MBeanServer#setAttributes(javax.management.ObjectName,
* javax.management.AttributeList)
*/
@Override
public AttributeList setAttributes(final ObjectName objectName,
final AttributeList attributes)
throws InstanceNotFoundException,
ReflectionException,
IOException {
ExecutorCompletionService<AttributeList> completionService = new ExecutorCompletionService<>(
executor);
TaskGenerator<AttributeList> generator = new TaskGenerator<AttributeList>() {
@Override
public Callable<AttributeList> localTask(final ObjectName objectName) {
return new Callable<AttributeList>() {
@Override
public AttributeList call() throws Exception {
return mbs.setAttributes(objectName, attributes);
}
};
}
@Override
public Callable<AttributeList> remoteTask(final NodeMBean child) {
return new Callable<AttributeList>() {
@Override
public AttributeList call() throws Exception {
return child.setAttributes(objectName, attributes);
}
};
}
};
List<Future<AttributeList>> futures = forAll(completionService,
generator, objectName);
for (int i = 0; i < futures.size(); i++) {
try {
AttributeList attrs = completionService.take().get();
for (Future<AttributeList> future : futures) {
future.cancel(true);
}
return attrs;
} catch (InterruptedException e) {
return new AttributeList();
} catch (ExecutionException e) {
log.debug(String.format("%s experienced exception when setting attributes %s, %s",
this, objectName, attributes), e);
}
}
throw new InstanceNotFoundException(
String.format("Instance not found: %s",
objectName));
}
/* (non-Javadoc)
* @see com.hellblazer.groo.NodeMXBean#setAttributes(javax.management.ObjectName, javax.management.QueryExp, javax.management.AttributeList)
*/
@Override
public Map<ObjectName, AttributeList> setAttributes(final ObjectName pattern,
final QueryExp queryExpr,
final AttributeList attributes)
throws InstanceNotFoundException,
ReflectionException,
IOException {
Map<ObjectName, AttributeList> attrs = new HashMap<>();
ExecutorCompletionService<Map<ObjectName, AttributeList>> completionService = new ExecutorCompletionService<>(
executor);
TaskGenerator<Map<ObjectName, AttributeList>> generator = new TaskGenerator<Map<ObjectName, AttributeList>>() {
@Override
public Callable<Map<ObjectName, AttributeList>> localTask(final ObjectName objectName) {
return new Callable<Map<ObjectName, AttributeList>>() {
@Override
public Map<ObjectName, AttributeList> call()
throws Exception {
Map<ObjectName, AttributeList> attrs = new HashMap<>();
attrs.put(objectName,
mbs.setAttributes(objectName, attributes));
return attrs;
}
};
}
@Override
public Callable<Map<ObjectName, AttributeList>> remoteTask(final NodeMBean child) {
return new Callable<Map<ObjectName, AttributeList>>() {
@Override
public Map<ObjectName, AttributeList> call()
throws Exception {
return child.setAttributes(pattern, queryExpr,
attributes);
}
};
}
};
List<Future<Map<ObjectName, AttributeList>>> futures = forAll(completionService,
generator,
pattern,
queryExpr);
for (int i = 0; i < futures.size(); i++) {
try {
attrs.putAll(completionService.take().get());
} catch (InterruptedException e) {
return Collections.emptyMap();
} catch (ExecutionException e) {
log.debug(String.format("%s experienced exception when setting attributes %s, %s, %s, %s",
this, pattern, queryExpr,
Arrays.asList(attributes)), e);
}
}
if (attrs.size() == 0) {
throw new InstanceNotFoundException(
String.format("Instance not found: %s, %s",
pattern,
queryExpr));
}
return attrs;
}
/* (non-Javadoc)
* @see java.lang.Object#toString()
*/
@Override
public String toString() {
return "Node [" + name + ", " + filter + "]";
}
/**
* @param completionService
* @param generator
* @param objectName
* @return
*/
private <V> List<Future<V>> forAll(ExecutorCompletionService<V> completionService,
TaskGenerator<V> generator,
ObjectName objectName) {
List<Future<V>> futures = new ArrayList<>();
for (NodeMBean child : children) {
futures.add(completionService.submit(generator.remoteTask(child)));
}
futures.add(completionService.submit(generator.localTask(objectName)));
return futures;
}
private <V> List<Future<V>> forAll(ExecutorCompletionService<V> completionService,
TaskGenerator<V> generator,
ObjectName pattern, QueryExp queryExpr) {
List<Future<V>> futures = new ArrayList<>();
for (NodeMBean child : children) {
futures.add(completionService.submit(generator.remoteTask(child)));
}
for (ObjectName n : mbs.queryNames(pattern, queryExpr)) {
futures.add(completionService.submit(generator.localTask(n)));
}
return futures;
}
}
| src/main/java/com/hellblazer/groo/Node.java | /**
* (C) Copyright 2013 Hal Hildebrand, All Rights Reserved
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.hellblazer.groo;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.Callable;
import java.util.concurrent.CopyOnWriteArraySet;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.Executor;
import java.util.concurrent.ExecutorCompletionService;
import java.util.concurrent.Executors;
import java.util.concurrent.Future;
import javax.management.Attribute;
import javax.management.AttributeList;
import javax.management.AttributeNotFoundException;
import javax.management.InstanceNotFoundException;
import javax.management.IntrospectionException;
import javax.management.InvalidAttributeValueException;
import javax.management.ListenerNotFoundException;
import javax.management.MBeanException;
import javax.management.MBeanInfo;
import javax.management.MBeanRegistration;
import javax.management.MBeanServer;
import javax.management.NotificationFilter;
import javax.management.NotificationListener;
import javax.management.ObjectInstance;
import javax.management.ObjectName;
import javax.management.QueryExp;
import javax.management.ReflectionException;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* @author hhildebrand
*
*/
public class Node implements NodeMBean, MBeanRegistration {
private static final Logger log = LoggerFactory.getLogger(Node.class);
private final Set<NodeMBean> children = new CopyOnWriteArraySet<>();
private final Executor executor;
private final RegistrationFilter filter;
private MBeanServer mbs;
private ObjectName name;
public Node() {
this(null, null);
}
public Node(ObjectName sourcePattern, QueryExp sourceQuery) {
this(sourcePattern, sourceQuery, Executors.newCachedThreadPool());
}
public Node(ObjectName sourcePattern, QueryExp sourceQuery,
Executor executor) {
filter = new RegistrationFilter(sourcePattern, sourceQuery);
this.executor = executor;
}
/**
* @param child
*/
public void addChild(NodeMBean child) {
children.add(child);
}
/**
* @param objectName
* @param listener
* @param filter
* @param handback
* @throws InstanceNotFoundException
* @see javax.management.MBeanServer#addNotificationListener(javax.management.ObjectName,
* javax.management.NotificationListener,
* javax.management.NotificationFilter, java.lang.Object)
*/
@Override
public void addNotificationListener(final ObjectName objectName,
final NotificationListener listener,
final NotificationFilter filter,
final Object handback)
throws InstanceNotFoundException {
ExecutorCompletionService<Boolean> completionService = new ExecutorCompletionService<>(
executor);
TaskGenerator<Boolean> generator = new TaskGenerator<Boolean>() {
@Override
public Callable<Boolean> localTask(final ObjectName objectName) {
return new Callable<Boolean>() {
@Override
public Boolean call() throws Exception {
mbs.addNotificationListener(objectName, listener,
filter, handback);
return true;
}
};
}
@Override
public Callable<Boolean> remoteTask(final NodeMBean child) {
return new Callable<Boolean>() {
@Override
public Boolean call() throws Exception {
child.addNotificationListener(objectName, listener,
filter, handback);
return true;
}
};
}
};
List<Future<Boolean>> futures = forAll(completionService, generator,
objectName);
for (int i = 0; i < futures.size(); i++) {
try {
if (completionService.take().get()) {
for (Future<Boolean> future : futures) {
future.cancel(true);
}
return;
}
} catch (InterruptedException e) {
return; // don't even log this ;)
} catch (ExecutionException e) {
log.debug(String.format("%s experienced exception when adding notification listener on %s for %s",
this, objectName, listener), e);
}
}
throw new InstanceNotFoundException(
String.format("Instance not found: %s",
objectName));
}
/**
* @param objectName
* @param listener
* @param filter
* @param handback
* @throws InstanceNotFoundException
* @see javax.management.MBeanServer#addNotificationListener(javax.management.ObjectName,
* javax.management.ObjectName, javax.management.NotificationFilter,
* java.lang.Object)
*/
@Override
public void addNotificationListener(final ObjectName objectName,
final ObjectName listener,
final NotificationFilter filter,
final Object handback)
throws InstanceNotFoundException {
ExecutorCompletionService<Boolean> completionService = new ExecutorCompletionService<>(
executor);
TaskGenerator<Boolean> generator = new TaskGenerator<Boolean>() {
@Override
public Callable<Boolean> localTask(final ObjectName objectName) {
return new Callable<Boolean>() {
@Override
public Boolean call() throws Exception {
mbs.addNotificationListener(objectName, listener,
filter, handback);
return true;
}
};
}
@Override
public Callable<Boolean> remoteTask(final NodeMBean child) {
return new Callable<Boolean>() {
@Override
public Boolean call() throws Exception {
child.addNotificationListener(objectName, listener,
filter, handback);
return true;
}
};
}
};
List<Future<Boolean>> futures = forAll(completionService, generator,
objectName);
for (int i = 0; i < futures.size(); i++) {
try {
if (completionService.take().get()) {
for (Future<Boolean> future : futures) {
future.cancel(true);
}
return;
}
} catch (InterruptedException e) {
return; // don't even log this ;)
} catch (ExecutionException e) {
log.debug(String.format("%s experienced exception when adding notification listener on %s for %s",
this, objectName, listener), e);
}
}
throw new InstanceNotFoundException(
String.format("Instance not found: %s",
objectName));
}
/* (non-Javadoc)
* @see com.hellblazer.groo.NodeMXBean#addNotificationListener(javax.management.ObjectName, javax.management.QueryExp, javax.management.NotificationListener, javax.management.NotificationFilter, java.lang.Object)
*/
@Override
public void addNotificationListener(final ObjectName pattern,
final QueryExp queryExpr,
final NotificationListener listener,
final NotificationFilter filter,
final Object handback)
throws InstanceNotFoundException {
ExecutorCompletionService<Boolean> completionService = new ExecutorCompletionService<>(
executor);
TaskGenerator<Boolean> generator = new TaskGenerator<Boolean>() {
@Override
public Callable<Boolean> localTask(final ObjectName objectName) {
return new Callable<Boolean>() {
@Override
public Boolean call() throws Exception {
mbs.addNotificationListener(objectName, listener,
filter, handback);
return true;
}
};
}
@Override
public Callable<Boolean> remoteTask(final NodeMBean child) {
return new Callable<Boolean>() {
@Override
public Boolean call() throws Exception {
child.addNotificationListener(pattern, queryExpr,
listener, filter,
handback);
return true;
}
};
}
};
List<Future<Boolean>> futures = forAll(completionService, generator,
pattern, queryExpr);
boolean success = false;
for (int i = 0; i < futures.size(); i++) {
try {
if (completionService.take().get()) {
success |= true;
}
} catch (InterruptedException e) {
return;
} catch (ExecutionException e) {
log.debug(String.format("%s experienced exception when adding notification listener %s, %s",
this, pattern, queryExpr), e);
}
}
if (!success) {
throw new InstanceNotFoundException(
String.format("Instance not found: %s, %s",
pattern,
queryExpr));
}
}
/* (non-Javadoc)
* @see com.hellblazer.groo.NodeMXBean#addNotificationListener(javax.management.ObjectName, javax.management.QueryExp, javax.management.ObjectName, javax.management.NotificationFilter, java.lang.Object)
*/
@Override
public void addNotificationListener(final ObjectName pattern,
final QueryExp queryExpr,
final ObjectName listener,
final NotificationFilter filter,
final Object handback)
throws InstanceNotFoundException,
IOException {
ExecutorCompletionService<Boolean> completionService = new ExecutorCompletionService<>(
executor);
TaskGenerator<Boolean> generator = new TaskGenerator<Boolean>() {
@Override
public Callable<Boolean> localTask(final ObjectName objectName) {
return new Callable<Boolean>() {
@Override
public Boolean call() throws Exception {
mbs.addNotificationListener(objectName, listener,
filter, handback);
return true;
}
};
}
@Override
public Callable<Boolean> remoteTask(final NodeMBean child) {
return new Callable<Boolean>() {
@Override
public Boolean call() throws Exception {
child.addNotificationListener(pattern, queryExpr,
listener, filter,
handback);
return true;
}
};
}
};
List<Future<Boolean>> futures = forAll(completionService, generator,
pattern, queryExpr);
for (int i = 0; i < futures.size(); i++) {
try {
if (completionService.take().get()) {
for (Future<Boolean> future : futures) {
future.cancel(true);
}
return;
}
} catch (InterruptedException e) {
return;
} catch (ExecutionException e) {
log.debug(String.format("%s experienced exception when adding notification listener %s, %s",
this, pattern, queryExpr), e);
}
}
throw new InstanceNotFoundException(
String.format("Instance not found: %s, %s",
name, queryExpr));
}
/* (non-Javadoc)
* @see java.lang.Object#equals(java.lang.Object)
*/
@Override
public boolean equals(Object obj) {
if (this == obj) {
return true;
}
if (obj == null) {
return false;
}
if (getClass() != obj.getClass()) {
return false;
}
Node other = (Node) obj;
if (name == null) {
if (other.name != null) {
return false;
}
} else if (!name.equals(other.name)) {
return false;
}
return true;
}
/* (non-Javadoc)
* @see com.hellblazer.groo.NodeMXBean#getAttribute(javax.management.ObjectName, javax.management.QueryExp, java.lang.String)
*/
@Override
public Map<ObjectName, Object> getAttribute(final ObjectName pattern,
final QueryExp queryExpr,
final String attribute)
throws MBeanException,
AttributeNotFoundException,
InstanceNotFoundException,
ReflectionException,
IOException {
Map<ObjectName, Object> attributes = new HashMap<>();
ExecutorCompletionService<Map<ObjectName, Object>> completionService = new ExecutorCompletionService<>(
executor);
TaskGenerator<Map<ObjectName, Object>> generator = new TaskGenerator<Map<ObjectName, Object>>() {
@Override
public Callable<Map<ObjectName, Object>> localTask(final ObjectName objectName) {
return new Callable<Map<ObjectName, Object>>() {
@Override
public Map<ObjectName, Object> call() throws Exception {
Map<ObjectName, Object> attributes = new HashMap<>();
attributes.put(objectName,
mbs.getAttribute(objectName, attribute));
return attributes;
}
};
}
@Override
public Callable<Map<ObjectName, Object>> remoteTask(final NodeMBean child) {
return new Callable<Map<ObjectName, Object>>() {
@Override
public Map<ObjectName, Object> call() throws Exception {
return child.getAttribute(pattern, queryExpr, attribute);
}
};
}
};
List<Future<Map<ObjectName, Object>>> futures = forAll(completionService,
generator,
pattern,
queryExpr);
for (int i = 0; i < futures.size(); i++) {
try {
attributes.putAll(completionService.take().get());
} catch (InterruptedException e) {
return Collections.emptyMap();
} catch (ExecutionException e) {
log.debug(String.format("%s experienced exception when collecting attributes %s, %s",
this, pattern, queryExpr), e);
}
}
if (attributes.size() == 0) {
throw new InstanceNotFoundException(
String.format("Instance not found: %s, %s",
pattern,
queryExpr));
}
return attributes;
}
/**
* @param objectName
* @param attribute
* @return
* @throws MBeanException
* @throws AttributeNotFoundException
* @throws InstanceNotFoundException
* @throws ReflectionException
* @see javax.management.MBeanServer#getAttribute(javax.management.ObjectName,
* java.lang.String)
*/
@Override
public Object getAttribute(final ObjectName objectName,
final String attribute)
throws MBeanException,
AttributeNotFoundException,
InstanceNotFoundException,
ReflectionException {
ExecutorCompletionService<Object> completionService = new ExecutorCompletionService<>(
executor);
TaskGenerator<Object> generator = new TaskGenerator<Object>() {
@Override
public Callable<Object> localTask(final ObjectName objectName) {
return new Callable<Object>() {
@Override
public Object call() throws Exception {
return mbs.getAttribute(objectName, attribute);
}
};
}
@Override
public Callable<Object> remoteTask(final NodeMBean child) {
return new Callable<Object>() {
@Override
public Object call() throws Exception {
return child.getAttribute(objectName, attribute);
}
};
}
};
List<Future<Object>> futures = forAll(completionService, generator,
objectName);
boolean attributeNotFound = false;
for (int i = 0; i < futures.size(); i++) {
try {
Object attributeValue = completionService.take().get();
for (Future<Object> future : futures) {
future.cancel(true);
}
return attributeValue;
} catch (InterruptedException e) {
return Collections.emptyMap();
} catch (ExecutionException e) {
if (e.getCause() instanceof AttributeNotFoundException) {
attributeNotFound |= true;
} else {
log.debug(String.format("%s experienced exception when retriving attribute %s, %s",
this, objectName, attribute), e);
}
}
}
if (attributeNotFound) {
throw new AttributeNotFoundException(
String.format("Attribute not found: %s for %s",
attribute,
objectName));
} else {
throw new InstanceNotFoundException(
String.format("Instance not found: %s",
objectName));
}
}
/* (non-Javadoc)
* @see com.hellblazer.groo.NodeMXBean#getAttributes(javax.management.ObjectName, javax.management.QueryExp, java.lang.String[])
*/
@Override
public Map<ObjectName, AttributeList> getAttributes(final ObjectName pattern,
final QueryExp queryExpr,
final String[] attributes)
throws InstanceNotFoundException,
ReflectionException,
IOException {
Map<ObjectName, AttributeList> attrs = new HashMap<>();
ExecutorCompletionService<Map<ObjectName, AttributeList>> completionService = new ExecutorCompletionService<>(
executor);
TaskGenerator<Map<ObjectName, AttributeList>> generator = new TaskGenerator<Map<ObjectName, AttributeList>>() {
@Override
public Callable<Map<ObjectName, AttributeList>> localTask(final ObjectName objectName) {
return new Callable<Map<ObjectName, AttributeList>>() {
@Override
public Map<ObjectName, AttributeList> call()
throws Exception {
Map<ObjectName, AttributeList> attrs = new HashMap<>();
attrs.put(objectName,
mbs.getAttributes(objectName, attributes));
return attrs;
}
};
}
@Override
public Callable<Map<ObjectName, AttributeList>> remoteTask(final NodeMBean child) {
return new Callable<Map<ObjectName, AttributeList>>() {
@Override
public Map<ObjectName, AttributeList> call()
throws Exception {
return child.getAttributes(pattern, queryExpr,
attributes);
}
};
}
};
List<Future<Map<ObjectName, AttributeList>>> futures = forAll(completionService,
generator,
pattern,
queryExpr);
for (int i = 0; i < futures.size(); i++) {
try {
attrs.putAll(completionService.take().get());
} catch (InterruptedException e) {
return Collections.emptyMap();
} catch (ExecutionException e) {
log.debug(String.format("%s experienced exception when retriving attributes %s, %s, %s, %s",
this, pattern, queryExpr,
Arrays.asList(attributes)), e);
}
}
if (attrs.size() == 0) {
throw new InstanceNotFoundException(
String.format("Instance not found: %s, %s",
pattern,
queryExpr));
}
return attrs;
}
/**
* @param name
* @param attributes
* @return
* @throws InstanceNotFoundException
* @throws ReflectionException
* @see javax.management.MBeanServer#getAttributes(javax.management.ObjectName,
* java.lang.String[])
*/
@Override
public AttributeList getAttributes(final ObjectName objectName,
final String[] attributes)
throws InstanceNotFoundException,
ReflectionException {
ExecutorCompletionService<AttributeList> completionService = new ExecutorCompletionService<>(
executor);
TaskGenerator<AttributeList> generator = new TaskGenerator<AttributeList>() {
@Override
public Callable<AttributeList> localTask(final ObjectName objectName) {
return new Callable<AttributeList>() {
@Override
public AttributeList call() throws Exception {
return mbs.getAttributes(objectName, attributes);
}
};
}
@Override
public Callable<AttributeList> remoteTask(final NodeMBean child) {
return new Callable<AttributeList>() {
@Override
public AttributeList call() throws Exception {
return child.getAttributes(objectName, attributes);
}
};
}
};
List<Future<AttributeList>> futures = forAll(completionService,
generator, objectName);
for (int i = 0; i < futures.size(); i++) {
try {
AttributeList attrs = completionService.take().get();
for (Future<AttributeList> future : futures) {
future.cancel(true);
}
return attrs;
} catch (InterruptedException e) {
return new AttributeList();
} catch (ExecutionException e) {
log.debug(String.format("%s experienced exception when retriving attributes %s, %s",
this, objectName,
Arrays.asList(attributes)), e);
}
}
throw new InstanceNotFoundException(
String.format("Instance not found: %s",
objectName));
}
public Set<NodeMBean> getChildren() {
return Collections.unmodifiableSet(children);
}
/**
* @return the filter
*/
public RegistrationFilter getFilter() {
return filter;
}
/**
* @return
* @see javax.management.MBeanServer#getMBeanCount()
*/
@Override
public Integer getMBeanCount() {
ExecutorCompletionService<Integer> completionService = new ExecutorCompletionService<>(
executor);
TaskGenerator<Integer> generator = new TaskGenerator<Integer>() {
@Override
public Callable<Integer> localTask(final ObjectName objectName) {
return new Callable<Integer>() {
@Override
public Integer call() throws Exception {
return mbs.getMBeanCount();
}
};
}
@Override
public Callable<Integer> remoteTask(final NodeMBean child) {
return new Callable<Integer>() {
@Override
public Integer call() throws Exception {
return child.getMBeanCount();
}
};
}
};
List<Future<Integer>> futures = forAll(completionService, generator,
null);
int count = 0;
for (int i = 0; i < futures.size(); i++) {
try {
count += completionService.take().get();
} catch (InterruptedException e) {
return 0;
} catch (ExecutionException e) {
log.debug(String.format("%s experienced exception when retriving mbean count %s",
this), e);
}
}
return count;
}
/* (non-Javadoc)
* @see com.hellblazer.groo.NodeMXBean#getMBeanCount(javax.management.ObjectName)
*/
@Override
public int getMBeanCount(final ObjectName filter, final QueryExp queryExp) {
ExecutorCompletionService<Integer> completionService = new ExecutorCompletionService<>(
executor);
TaskGenerator<Integer> generator = new TaskGenerator<Integer>() {
@Override
public Callable<Integer> localTask(final ObjectName objectName) {
return new Callable<Integer>() {
@Override
public Integer call() throws Exception {
return mbs.queryNames(filter, queryExp).size();
}
};
}
@Override
public Callable<Integer> remoteTask(final NodeMBean child) {
return new Callable<Integer>() {
@Override
public Integer call() throws Exception {
return child.getMBeanCount(filter, queryExp);
}
};
}
};
List<Future<Integer>> futures = forAll(completionService, generator,
null);
int count = 0;
for (int i = 0; i < futures.size(); i++) {
try {
count += completionService.take().get();
} catch (InterruptedException e) {
return 0;
} catch (ExecutionException e) {
log.debug(String.format("%s experienced exception when retriving mbean count %s, %s, %s",
this, filter, queryExp), e);
}
}
return count;
}
/**
* @param objectName
* @return
* @throws InstanceNotFoundException
* @throws IntrospectionException
* @throws ReflectionException
* @see javax.management.MBeanServer#getMBeanInfo(javax.management.ObjectName)
*/
@Override
public MBeanInfo getMBeanInfo(final ObjectName objectName)
throws InstanceNotFoundException,
IntrospectionException,
ReflectionException {
ExecutorCompletionService<MBeanInfo> completionService = new ExecutorCompletionService<>(
executor);
TaskGenerator<MBeanInfo> generator = new TaskGenerator<MBeanInfo>() {
@Override
public Callable<MBeanInfo> localTask(final ObjectName objectName) {
return new Callable<MBeanInfo>() {
@Override
public MBeanInfo call() throws Exception {
return mbs.getMBeanInfo(objectName);
}
};
}
@Override
public Callable<MBeanInfo> remoteTask(final NodeMBean child) {
return new Callable<MBeanInfo>() {
@Override
public MBeanInfo call() throws Exception {
return child.getMBeanInfo(objectName);
}
};
}
};
List<Future<MBeanInfo>> futures = forAll(completionService, generator,
objectName);
for (int i = 0; i < futures.size(); i++) {
try {
MBeanInfo info = completionService.take().get();
for (Future<MBeanInfo> future : futures) {
future.cancel(true);
}
return info;
} catch (InterruptedException e) {
return null;
} catch (ExecutionException e) {
log.debug(String.format("%s experienced exception when retriving mbean info %s, %s",
this, objectName), e);
}
}
throw new InstanceNotFoundException(
String.format("Instance not found: %s",
objectName));
}
/**
* @return the name
*/
@Override
public ObjectName getName() {
return name;
}
/* (non-Javadoc)
* @see com.hellblazer.groo.NodeMBean#getObjectInstance(javax.management.ObjectName)
*/
@Override
public ObjectInstance getObjectInstance(final ObjectName objectName)
throws InstanceNotFoundException {
ExecutorCompletionService<ObjectInstance> completionService = new ExecutorCompletionService<>(
executor);
TaskGenerator<ObjectInstance> generator = new TaskGenerator<ObjectInstance>() {
@Override
public Callable<ObjectInstance> localTask(final ObjectName objectName) {
return new Callable<ObjectInstance>() {
@Override
public ObjectInstance call() throws Exception {
return mbs.getObjectInstance(objectName);
}
};
}
@Override
public Callable<ObjectInstance> remoteTask(final NodeMBean child) {
return new Callable<ObjectInstance>() {
@Override
public ObjectInstance call() throws Exception {
return child.getObjectInstance(objectName);
}
};
}
};
List<Future<ObjectInstance>> futures = forAll(completionService,
generator, objectName);
for (int i = 0; i < futures.size(); i++) {
try {
ObjectInstance instance = completionService.take().get();
for (Future<ObjectInstance> future : futures) {
future.cancel(true);
}
return instance;
} catch (InterruptedException e) {
return null;
} catch (ExecutionException e) {
log.debug(String.format("%s experienced exception when retriving object instance %s, %s",
this, objectName), e);
}
}
throw new InstanceNotFoundException(
String.format("Instance not found: %s",
objectName));
}
/* (non-Javadoc)
* @see com.hellblazer.groo.NodeMXBean#getObjectInstance(javax.management.ObjectName, javax.management.QueryExp)
*/
@Override
public Set<ObjectInstance> getObjectInstances(final ObjectName filter,
final QueryExp queryExpr)
throws InstanceNotFoundException,
IOException {
ExecutorCompletionService<Set<ObjectInstance>> completionService = new ExecutorCompletionService<>(
executor);
TaskGenerator<Set<ObjectInstance>> generator = new TaskGenerator<Set<ObjectInstance>>() {
@Override
public Callable<Set<ObjectInstance>> localTask(final ObjectName objectName) {
return new Callable<Set<ObjectInstance>>() {
@Override
public Set<ObjectInstance> call() throws Exception {
return mbs.queryMBeans(objectName, queryExpr);
}
};
}
@Override
public Callable<Set<ObjectInstance>> remoteTask(final NodeMBean child) {
return new Callable<Set<ObjectInstance>>() {
@Override
public Set<ObjectInstance> call() throws Exception {
return child.getObjectInstances(filter, queryExpr);
}
};
}
};
List<Future<Set<ObjectInstance>>> futures = forAll(completionService,
generator, filter);
Set<ObjectInstance> instances = new HashSet<>();
for (int i = 0; i < futures.size(); i++) {
try {
instances.addAll(completionService.take().get());
} catch (InterruptedException e) {
return instances;
} catch (ExecutionException e) {
log.debug(String.format("%s experienced exception when retreiving object instances %s, %s, %s",
this, filter, queryExpr), e);
}
}
if (instances.size() == 0) {
throw new InstanceNotFoundException(
String.format("Instance not found: %s, %s",
filter, queryExpr));
}
return instances;
}
/* (non-Javadoc)
* @see java.lang.Object#hashCode()
*/
@Override
public int hashCode() {
final int prime = 31;
int result = 1;
result = prime * result + (name == null ? 0 : name.hashCode());
return result;
}
/* (non-Javadoc)
* @see com.hellblazer.groo.NodeMXBean#invoke(javax.management.ObjectName, javax.management.Query, java.lang.String, java.lang.Object[], java.lang.String[])
*/
@Override
public Map<ObjectName, Object> invoke(final ObjectName filter,
final QueryExp queryExpr,
final String operationName,
final Object[] params,
final String[] signature)
throws InstanceNotFoundException,
MBeanException,
ReflectionException,
IOException {
ExecutorCompletionService<Map<ObjectName, Object>> completionService = new ExecutorCompletionService<>(
executor);
TaskGenerator<Map<ObjectName, Object>> generator = new TaskGenerator<Map<ObjectName, Object>>() {
@Override
public Callable<Map<ObjectName, Object>> localTask(final ObjectName objectName) {
return new Callable<Map<ObjectName, Object>>() {
@Override
public Map<ObjectName, Object> call() throws Exception {
Map<ObjectName, Object> result = new HashMap<>();
result.put(objectName, mbs.invoke(objectName,
operationName,
params, signature));
return result;
}
};
}
@Override
public Callable<Map<ObjectName, Object>> remoteTask(final NodeMBean child) {
return new Callable<Map<ObjectName, Object>>() {
@Override
public Map<ObjectName, Object> call() throws Exception {
return child.invoke(filter, queryExpr, operationName,
params, signature);
}
};
}
};
List<Future<Map<ObjectName, Object>>> futures = forAll(completionService,
generator,
filter,
queryExpr);
Map<ObjectName, Object> results = new HashMap<>();
for (int i = 0; i < futures.size(); i++) {
try {
results.putAll(completionService.take().get());
} catch (InterruptedException e) {
return results;
} catch (ExecutionException e) {
log.debug(String.format("%s experienced exception when invoking %s, %s, %s, %s, %s",
this,
filter,
queryExpr,
operationName,
params != null ? Arrays.asList(params)
: null,
signature != null ? Arrays.asList(signature)
: null), e.getCause());
}
}
if (results.size() == 0) {
throw new InstanceNotFoundException(
String.format("Instance not found: %s, %s",
filter, queryExpr));
}
return results;
}
/**
* @param objectName
* @param operationName
* @param params
* @param signature
* @return
* @throws InstanceNotFoundException
* @throws MBeanException
* @throws ReflectionException
* @see javax.management.MBeanServer#invoke(javax.management.ObjectName,
* java.lang.String, java.lang.Object[], java.lang.String[])
*/
@Override
public Object invoke(final ObjectName objectName,
final String operationName, final Object[] params,
final String[] signature)
throws InstanceNotFoundException,
MBeanException,
ReflectionException {
ExecutorCompletionService<Object> completionService = new ExecutorCompletionService<>(
executor);
TaskGenerator<Object> generator = new TaskGenerator<Object>() {
@Override
public Callable<Object> localTask(final ObjectName objectName) {
return new Callable<Object>() {
@Override
public Object call() throws Exception {
return mbs.invoke(objectName, operationName, params,
signature);
}
};
}
@Override
public Callable<Object> remoteTask(final NodeMBean child) {
return new Callable<Object>() {
@Override
public Object call() throws Exception {
return child.invoke(objectName, operationName, params,
signature);
}
};
}
};
List<Future<Object>> futures = forAll(completionService, generator,
objectName);
for (int i = 0; i < futures.size(); i++) {
try {
Object result = completionService.take().get();
for (Future<Object> future : futures) {
future.cancel(true);
}
return result;
} catch (InterruptedException e) {
return null;
} catch (ExecutionException e) {
log.debug(String.format("%s experienced exception when invoking %s, %s, %s, %s",
this,
objectName,
operationName,
params != null ? Arrays.asList(params)
: null,
signature != null ? Arrays.asList(signature)
: null), e);
}
}
throw new InstanceNotFoundException(
String.format("Instance not found: %s",
objectName));
}
/**
* @param name
* @param className
* @return
* @throws InstanceNotFoundException
* @throws IOException
* @see javax.management.MBeanServer#isInstanceOf(javax.management.ObjectName,
* java.lang.String)
*/
@Override
public boolean isInstanceOf(ObjectName name, String className)
throws InstanceNotFoundException,
IOException {
for (NodeMBean child : children) {
if (child.isInstanceOf(name, className)) {
return true;
}
}
return mbs.isInstanceOf(name, className);
}
/**
* @param name
* @return
* @throws IOException
* @see javax.management.MBeanServer#isRegistered(javax.management.ObjectName)
*/
@Override
public boolean isRegistered(ObjectName name) throws IOException {
for (NodeMBean child : children) {
if (child.isRegistered(name)) {
return true;
}
}
return mbs.isRegistered(name);
}
/* (non-Javadoc)
* @see javax.management.MBeanRegistration#postDeregister()
*/
@Override
public void postDeregister() {
}
/* (non-Javadoc)
* @see javax.management.MBeanRegistration#postRegister(java.lang.Boolean)
*/
@Override
public void postRegister(Boolean registrationDone) {
}
/* (non-Javadoc)
* @see javax.management.MBeanRegistration#preDeregister()
*/
@Override
public void preDeregister() throws Exception {
}
/* (non-Javadoc)
* @see javax.management.MBeanRegistration#preRegister(javax.management.MBeanServer, javax.management.ObjectName)
*/
@Override
public ObjectName preRegister(MBeanServer server, ObjectName name)
throws Exception {
mbs = server;
this.name = name;
return name;
}
/**
* @param name
* @param query
* @return
* @throws IOException
* @see javax.management.MBeanServer#queryMBeans(javax.management.ObjectName,
* javax.management.QueryExp)
*/
@Override
public Set<ObjectInstance> queryMBeans(ObjectName name, QueryExp query)
throws IOException {
Set<ObjectInstance> result = new HashSet<>();
for (NodeMBean child : children) {
result.addAll(child.queryMBeans(name, query));
}
result.addAll(mbs.queryMBeans(name, query));
return result;
}
/**
* @param name
* @param query
* @return
* @throws IOException
* @see javax.management.MBeanServer#queryNames(javax.management.ObjectName,
* javax.management.QueryExp)
*/
@Override
public Set<ObjectName> queryNames(ObjectName name, QueryExp query)
throws IOException {
Set<ObjectName> result = new HashSet<>();
for (NodeMBean child : children) {
result.addAll(child.queryNames(name, query));
}
result.addAll(mbs.queryNames(name, query));
return result;
}
/**
* @param child
*/
public void removeChild(NodeMBean child) {
children.remove(child);
}
/**
* @param name
* @param listener
* @throws InstanceNotFoundException
* @throws ListenerNotFoundException
* @throws IOException
* @see javax.management.MBeanServer#removeNotificationListener(javax.management.ObjectName,
* javax.management.NotificationListener)
*/
@Override
public void removeNotificationListener(ObjectName name,
NotificationListener listener)
throws InstanceNotFoundException,
ListenerNotFoundException,
IOException {
for (NodeMBean child : children) {
try {
child.removeNotificationListener(name, listener);
return;
} catch (InstanceNotFoundException e) {
// ignored
}
}
mbs.removeNotificationListener(name, listener);
}
/**
* @param name
* @param listener
* @param filter
* @param handback
* @throws InstanceNotFoundException
* @throws ListenerNotFoundException
* @throws IOException
* @see javax.management.MBeanServer#removeNotificationListener(javax.management.ObjectName,
* javax.management.NotificationListener,
* javax.management.NotificationFilter, java.lang.Object)
*/
@Override
public void removeNotificationListener(ObjectName name,
NotificationListener listener,
NotificationFilter filter,
Object handback)
throws InstanceNotFoundException,
ListenerNotFoundException,
IOException {
for (NodeMBean child : children) {
try {
child.removeNotificationListener(name, listener, filter,
handback);
return;
} catch (InstanceNotFoundException e) {
// ignored
}
}
mbs.removeNotificationListener(name, listener, filter, handback);
}
/**
* @param name
* @param listener
* @throws InstanceNotFoundException
* @throws ListenerNotFoundException
* @throws IOException
* @see javax.management.MBeanServer#removeNotificationListener(javax.management.ObjectName,
* javax.management.ObjectName)
*/
@Override
public void removeNotificationListener(ObjectName name, ObjectName listener)
throws InstanceNotFoundException,
ListenerNotFoundException,
IOException {
for (NodeMBean child : children) {
try {
child.removeNotificationListener(name, listener);
return;
} catch (InstanceNotFoundException e) {
// ignored
}
}
mbs.removeNotificationListener(name, listener);
}
/**
* @param name
* @param listener
* @param filter
* @param handback
* @throws InstanceNotFoundException
* @throws ListenerNotFoundException
* @throws IOException
* @see javax.management.MBeanServer#removeNotificationListener(javax.management.ObjectName,
* javax.management.ObjectName, javax.management.NotificationFilter,
* java.lang.Object)
*/
@Override
public void removeNotificationListener(ObjectName name,
ObjectName listener,
NotificationFilter filter,
Object handback)
throws InstanceNotFoundException,
ListenerNotFoundException,
IOException {
for (NodeMBean child : children) {
try {
child.removeNotificationListener(name, listener, filter,
handback);
return;
} catch (InstanceNotFoundException e) {
// ignored
}
}
mbs.removeNotificationListener(name, listener, filter, handback);
}
/* (non-Javadoc)
* @see com.hellblazer.groo.NodeMXBean#removeNotificationListener(javax.management.ObjectName, javax.management.QueryExp, javax.management.NotificationListener)
*/
@Override
public void removeNotificationListener(ObjectName name, QueryExp queryExpr,
NotificationListener listener)
throws InstanceNotFoundException,
ListenerNotFoundException,
IOException {
boolean success = false;
for (NodeMBean child : children) {
try {
child.removeNotificationListener(name, queryExpr, listener);
success = true;
} catch (InstanceNotFoundException e) {
// continue
}
}
Set<ObjectName> names = mbs.queryNames(name, queryExpr);
if (!success && names.size() == 0) {
throw new InstanceNotFoundException(
String.format("No instance found for %s, %s",
name, queryExpr));
}
for (ObjectName n : mbs.queryNames(name, queryExpr)) {
mbs.removeNotificationListener(n, listener);
}
}
/* (non-Javadoc)
* @see com.hellblazer.groo.NodeMXBean#removeNotificationListener(javax.management.ObjectName, javax.management.QueryExp, javax.management.NotificationListener, javax.management.NotificationFilter, java.lang.Object)
*/
@Override
public void removeNotificationListener(ObjectName name, QueryExp queryExpr,
NotificationListener listener,
NotificationFilter filter,
Object handback)
throws InstanceNotFoundException,
ListenerNotFoundException,
IOException {
boolean success = false;
for (NodeMBean child : children) {
try {
child.removeNotificationListener(name, queryExpr, listener,
filter, handback);
success = true;
} catch (InstanceNotFoundException e) {
// continue
}
}
Set<ObjectName> names = mbs.queryNames(name, queryExpr);
if (!success && names.size() == 0) {
throw new InstanceNotFoundException(
String.format("No instance found for %s, %s",
name, queryExpr));
}
for (ObjectName n : mbs.queryNames(name, queryExpr)) {
mbs.removeNotificationListener(n, listener, filter, handback);
}
}
/* (non-Javadoc)
* @see com.hellblazer.groo.NodeMXBean#removeNotificationListener(javax.management.ObjectName, javax.management.QueryExp, javax.management.ObjectName)
*/
@Override
public void removeNotificationListener(ObjectName name, QueryExp queryExpr,
ObjectName listener)
throws InstanceNotFoundException,
ListenerNotFoundException,
IOException {
boolean success = false;
for (NodeMBean child : children) {
try {
child.removeNotificationListener(name, queryExpr, listener);
success = true;
} catch (InstanceNotFoundException e) {
// continue
}
}
Set<ObjectName> names = mbs.queryNames(name, queryExpr);
if (!success && names.size() == 0) {
throw new InstanceNotFoundException(
String.format("No instance found for %s, %s",
name, queryExpr));
}
for (ObjectName n : mbs.queryNames(name, queryExpr)) {
mbs.removeNotificationListener(n, listener);
}
}
/* (non-Javadoc)
* @see com.hellblazer.groo.NodeMXBean#removeNotificationListener(javax.management.ObjectName, javax.management.QueryExp, javax.management.ObjectName, javax.management.NotificationFilter, java.lang.Object)
*/
@Override
public void removeNotificationListener(ObjectName name, QueryExp queryExpr,
ObjectName listener,
NotificationFilter filter,
Object handback)
throws InstanceNotFoundException,
ListenerNotFoundException,
IOException {
boolean success = false;
for (NodeMBean child : children) {
try {
child.removeNotificationListener(name, queryExpr, listener,
filter, handback);
success = true;
} catch (InstanceNotFoundException e) {
// continue
}
}
Set<ObjectName> names = mbs.queryNames(name, queryExpr);
if (!success && names.size() == 0) {
throw new InstanceNotFoundException(
String.format("No instance found for %s, %s",
name, queryExpr));
}
for (ObjectName n : mbs.queryNames(name, queryExpr)) {
mbs.removeNotificationListener(n, listener, filter, handback);
}
}
/**
* @param name
* @param attribute
* @throws InstanceNotFoundException
* @throws AttributeNotFoundException
* @throws InvalidAttributeValueException
* @throws MBeanException
* @throws ReflectionException
* @throws IOException
* @see javax.management.MBeanServer#setAttribute(javax.management.ObjectName,
* javax.management.Attribute)
*/
@Override
public void setAttribute(ObjectName name, Attribute attribute)
throws InstanceNotFoundException,
AttributeNotFoundException,
InvalidAttributeValueException,
MBeanException,
ReflectionException,
IOException {
for (NodeMBean child : children) {
try {
child.setAttribute(name, attribute);
return;
} catch (InstanceNotFoundException e) {
// ignored
}
}
mbs.setAttribute(name, attribute);
}
/* (non-Javadoc)
* @see com.hellblazer.groo.NodeMXBean#setAttribute(javax.management.ObjectName, javax.management.QueryExp, javax.management.Attribute)
*/
@Override
public void setAttribute(ObjectName name, QueryExp queryExpr,
Attribute attribute)
throws InstanceNotFoundException,
AttributeNotFoundException,
InvalidAttributeValueException,
MBeanException,
ReflectionException,
IOException {
for (NodeMBean child : children) {
child.setAttribute(name, queryExpr, attribute);
}
for (ObjectName instance : mbs.queryNames(name, queryExpr)) {
mbs.setAttribute(instance, attribute);
}
}
/**
* @param name
* @param attributes
* @return
* @throws InstanceNotFoundException
* @throws ReflectionException
* @throws IOException
* @see javax.management.MBeanServer#setAttributes(javax.management.ObjectName,
* javax.management.AttributeList)
*/
@Override
public AttributeList setAttributes(ObjectName name, AttributeList attributes)
throws InstanceNotFoundException,
ReflectionException,
IOException {
for (NodeMBean child : children) {
AttributeList list = child.setAttributes(name, attributes);
if (list != null) {
return list;
}
}
return mbs.setAttributes(name, attributes);
}
/* (non-Javadoc)
* @see com.hellblazer.groo.NodeMXBean#setAttributes(javax.management.ObjectName, javax.management.QueryExp, javax.management.AttributeList)
*/
@Override
public Map<ObjectName, AttributeList> setAttributes(ObjectName name,
QueryExp queryExpr,
AttributeList attributes)
throws InstanceNotFoundException,
ReflectionException,
IOException {
Map<ObjectName, AttributeList> results = new HashMap<>();
for (NodeMBean child : children) {
results.putAll(child.setAttributes(name, queryExpr, attributes));
}
for (ObjectName instance : mbs.queryNames(name, queryExpr)) {
results.put(instance, mbs.setAttributes(instance, attributes));
}
return results;
}
/* (non-Javadoc)
* @see java.lang.Object#toString()
*/
@Override
public String toString() {
return "Node [" + name + ", " + filter + "]";
}
/**
* @param completionService
* @param generator
* @param objectName
* @return
*/
private <V> List<Future<V>> forAll(ExecutorCompletionService<V> completionService,
TaskGenerator<V> generator,
ObjectName objectName) {
List<Future<V>> futures = new ArrayList<>();
for (NodeMBean child : children) {
futures.add(completionService.submit(generator.remoteTask(child)));
}
futures.add(completionService.submit(generator.localTask(objectName)));
return futures;
}
private <V> List<Future<V>> forAll(ExecutorCompletionService<V> completionService,
TaskGenerator<V> generator,
ObjectName pattern, QueryExp queryExpr) {
List<Future<V>> futures = new ArrayList<>();
for (NodeMBean child : children) {
futures.add(completionService.submit(generator.remoteTask(child)));
}
for (ObjectName n : mbs.queryNames(pattern, queryExpr)) {
futures.add(completionService.submit(generator.localTask(n)));
}
return futures;
}
}
| Finish parallelization | src/main/java/com/hellblazer/groo/Node.java | Finish parallelization | <ide><path>rc/main/java/com/hellblazer/groo/Node.java
<ide> };
<ide> List<Future<Boolean>> futures = forAll(completionService, generator,
<ide> pattern, queryExpr);
<del>
<del> boolean success = false;
<ide> for (int i = 0; i < futures.size(); i++) {
<ide> try {
<ide> if (completionService.take().get()) {
<del> success |= true;
<add> for (Future<Boolean> future : futures) {
<add> future.cancel(true);
<add> }
<add> return;
<ide> }
<ide> } catch (InterruptedException e) {
<ide> return;
<ide> this, pattern, queryExpr), e);
<ide> }
<ide> }
<del> if (!success) {
<del> throw new InstanceNotFoundException(
<del> String.format("Instance not found: %s, %s",
<del> pattern,
<del> queryExpr));
<del> }
<add> throw new InstanceNotFoundException(
<add> String.format("Instance not found: %s, %s",
<add> pattern, queryExpr));
<ide> }
<ide>
<ide> /* (non-Javadoc)
<ide> }
<ide>
<ide> /**
<del> * @param name
<add> * @param objectName
<ide> * @param className
<ide> * @return
<ide> * @throws InstanceNotFoundException
<ide> * java.lang.String)
<ide> */
<ide> @Override
<del> public boolean isInstanceOf(ObjectName name, String className)
<del> throws InstanceNotFoundException,
<del> IOException {
<del> for (NodeMBean child : children) {
<del> if (child.isInstanceOf(name, className)) {
<del> return true;
<del> }
<del> }
<del> return mbs.isInstanceOf(name, className);
<del> }
<del>
<del> /**
<del> * @param name
<add> public boolean isInstanceOf(final ObjectName objectName,
<add> final String className)
<add> throws InstanceNotFoundException,
<add> IOException {
<add> ExecutorCompletionService<Boolean> completionService = new ExecutorCompletionService<>(
<add> executor);
<add> TaskGenerator<Boolean> generator = new TaskGenerator<Boolean>() {
<add> @Override
<add> public Callable<Boolean> localTask(final ObjectName objectName) {
<add> return new Callable<Boolean>() {
<add> @Override
<add> public Boolean call() throws Exception {
<add> return mbs.isInstanceOf(objectName, className);
<add> }
<add> };
<add> }
<add>
<add> @Override
<add> public Callable<Boolean> remoteTask(final NodeMBean child) {
<add> return new Callable<Boolean>() {
<add> @Override
<add> public Boolean call() throws Exception {
<add> return child.isInstanceOf(objectName, className);
<add> }
<add> };
<add> }
<add> };
<add> List<Future<Boolean>> futures = forAll(completionService, generator,
<add> objectName);
<add> for (int i = 0; i < futures.size(); i++) {
<add> try {
<add> Boolean result = completionService.take().get();
<add> for (Future<Boolean> future : futures) {
<add> future.cancel(true);
<add> }
<add> return result;
<add> } catch (InterruptedException e) {
<add> return false;
<add> } catch (ExecutionException e) {
<add> log.debug(String.format("%s experienced exception when determining instance of %s, %s",
<add> this, objectName, className), e);
<add> }
<add> }
<add> throw new InstanceNotFoundException(
<add> String.format("Instance not found: %s",
<add> objectName));
<add> }
<add>
<add> /**
<add> * @param objectName
<ide> * @return
<ide> * @throws IOException
<ide> * @see javax.management.MBeanServer#isRegistered(javax.management.ObjectName)
<ide> */
<ide> @Override
<del> public boolean isRegistered(ObjectName name) throws IOException {
<del> for (NodeMBean child : children) {
<del> if (child.isRegistered(name)) {
<del> return true;
<del> }
<del> }
<del> return mbs.isRegistered(name);
<add> public boolean isRegistered(final ObjectName objectName) throws IOException {
<add> ExecutorCompletionService<Boolean> completionService = new ExecutorCompletionService<>(
<add> executor);
<add> TaskGenerator<Boolean> generator = new TaskGenerator<Boolean>() {
<add> @Override
<add> public Callable<Boolean> localTask(final ObjectName objectName) {
<add> return new Callable<Boolean>() {
<add> @Override
<add> public Boolean call() throws Exception {
<add> return mbs.isRegistered(objectName);
<add> }
<add> };
<add> }
<add>
<add> @Override
<add> public Callable<Boolean> remoteTask(final NodeMBean child) {
<add> return new Callable<Boolean>() {
<add> @Override
<add> public Boolean call() throws Exception {
<add> return child.isRegistered(objectName);
<add> }
<add> };
<add> }
<add> };
<add> List<Future<Boolean>> futures = forAll(completionService, generator,
<add> objectName);
<add> for (int i = 0; i < futures.size(); i++) {
<add> try {
<add> Boolean result = completionService.take().get();
<add> for (Future<Boolean> future : futures) {
<add> future.cancel(true);
<add> }
<add> return result;
<add> } catch (InterruptedException e) {
<add> return false;
<add> } catch (ExecutionException e) {
<add> log.debug(String.format("%s experienced exception when determining is registered %s",
<add> this, objectName), e);
<add> }
<add> }
<add> return false;
<ide> }
<ide>
<ide> /* (non-Javadoc)
<ide> }
<ide>
<ide> /**
<del> * @param name
<add> * @param filter
<ide> * @param query
<ide> * @return
<ide> * @throws IOException
<ide> * javax.management.QueryExp)
<ide> */
<ide> @Override
<del> public Set<ObjectInstance> queryMBeans(ObjectName name, QueryExp query)
<del> throws IOException {
<del> Set<ObjectInstance> result = new HashSet<>();
<del> for (NodeMBean child : children) {
<del> result.addAll(child.queryMBeans(name, query));
<del> }
<del> result.addAll(mbs.queryMBeans(name, query));
<del> return result;
<add> public Set<ObjectInstance> queryMBeans(final ObjectName filter,
<add> final QueryExp query)
<add> throws IOException {
<add> ExecutorCompletionService<Set<ObjectInstance>> completionService = new ExecutorCompletionService<>(
<add> executor);
<add> TaskGenerator<Set<ObjectInstance>> generator = new TaskGenerator<Set<ObjectInstance>>() {
<add> @Override
<add> public Callable<Set<ObjectInstance>> localTask(final ObjectName objectName) {
<add> return new Callable<Set<ObjectInstance>>() {
<add> @Override
<add> public Set<ObjectInstance> call() throws Exception {
<add> return mbs.queryMBeans(filter, query);
<add> }
<add> };
<add> }
<add>
<add> @Override
<add> public Callable<Set<ObjectInstance>> remoteTask(final NodeMBean child) {
<add> return new Callable<Set<ObjectInstance>>() {
<add> @Override
<add> public Set<ObjectInstance> call() throws Exception {
<add> return child.queryMBeans(filter, query);
<add> }
<add> };
<add> }
<add> };
<add> Set<ObjectInstance> instances = new HashSet<>();
<add> List<Future<Set<ObjectInstance>>> futures = forAll(completionService,
<add> generator, filter,
<add> query);
<add> for (int i = 0; i < futures.size(); i++) {
<add> try {
<add> instances.addAll(completionService.take().get());
<add> } catch (InterruptedException e) {
<add> return instances;
<add> } catch (ExecutionException e) {
<add> log.debug(String.format("%s experienced exception when querying mbeans %s, %s",
<add> this, filter, query), e);
<add> }
<add> }
<add> return instances;
<ide> }
<ide>
<ide> /**
<ide> * javax.management.QueryExp)
<ide> */
<ide> @Override
<del> public Set<ObjectName> queryNames(ObjectName name, QueryExp query)
<del> throws IOException {
<del> Set<ObjectName> result = new HashSet<>();
<del> for (NodeMBean child : children) {
<del> result.addAll(child.queryNames(name, query));
<del> }
<del> result.addAll(mbs.queryNames(name, query));
<del> return result;
<add> public Set<ObjectName> queryNames(final ObjectName filter,
<add> final QueryExp query) throws IOException {
<add> ExecutorCompletionService<Set<ObjectName>> completionService = new ExecutorCompletionService<>(
<add> executor);
<add> TaskGenerator<Set<ObjectName>> generator = new TaskGenerator<Set<ObjectName>>() {
<add> @Override
<add> public Callable<Set<ObjectName>> localTask(final ObjectName objectName) {
<add> return new Callable<Set<ObjectName>>() {
<add> @Override
<add> public Set<ObjectName> call() throws Exception {
<add> return mbs.queryNames(filter, query);
<add> }
<add> };
<add> }
<add>
<add> @Override
<add> public Callable<Set<ObjectName>> remoteTask(final NodeMBean child) {
<add> return new Callable<Set<ObjectName>>() {
<add> @Override
<add> public Set<ObjectName> call() throws Exception {
<add> return child.queryNames(filter, query);
<add> }
<add> };
<add> }
<add> };
<add> Set<ObjectName> names = new HashSet<>();
<add> List<Future<Set<ObjectName>>> futures = forAll(completionService,
<add> generator, filter, query);
<add> for (int i = 0; i < futures.size(); i++) {
<add> try {
<add> names.addAll(completionService.take().get());
<add> } catch (InterruptedException e) {
<add> return names;
<add> } catch (ExecutionException e) {
<add> log.debug(String.format("%s experienced exception when querying names %s, %s",
<add> this, filter, query), e);
<add> }
<add> }
<add> return names;
<ide> }
<ide>
<ide> /**
<ide> * javax.management.NotificationListener)
<ide> */
<ide> @Override
<del> public void removeNotificationListener(ObjectName name,
<del> NotificationListener listener)
<del> throws InstanceNotFoundException,
<del> ListenerNotFoundException,
<del> IOException {
<del> for (NodeMBean child : children) {
<del> try {
<del> child.removeNotificationListener(name, listener);
<del> return;
<del> } catch (InstanceNotFoundException e) {
<del> // ignored
<del> }
<del> }
<del> mbs.removeNotificationListener(name, listener);
<del> }
<del>
<del> /**
<del> * @param name
<add> public void removeNotificationListener(final ObjectName objectName,
<add> final NotificationListener listener)
<add> throws InstanceNotFoundException,
<add> ListenerNotFoundException,
<add> IOException {
<add> ExecutorCompletionService<Boolean> completionService = new ExecutorCompletionService<>(
<add> executor);
<add> TaskGenerator<Boolean> generator = new TaskGenerator<Boolean>() {
<add> @Override
<add> public Callable<Boolean> localTask(final ObjectName objectName) {
<add> return new Callable<Boolean>() {
<add> @Override
<add> public Boolean call() throws Exception {
<add> mbs.removeNotificationListener(objectName, listener);
<add> return true;
<add> }
<add> };
<add> }
<add>
<add> @Override
<add> public Callable<Boolean> remoteTask(final NodeMBean child) {
<add> return new Callable<Boolean>() {
<add> @Override
<add> public Boolean call() throws Exception {
<add> child.removeNotificationListener(objectName, listener);
<add> return true;
<add> }
<add> };
<add> }
<add> };
<add> List<Future<Boolean>> futures = forAll(completionService, generator,
<add> objectName);
<add> for (int i = 0; i < futures.size(); i++) {
<add> try {
<add> if (completionService.take().get()) {
<add> for (Future<Boolean> future : futures) {
<add> future.cancel(true);
<add> }
<add> return;
<add> }
<add> } catch (InterruptedException e) {
<add> return; // don't even log this ;)
<add> } catch (ExecutionException e) {
<add> log.debug(String.format("%s experienced exception when removing notification listener on %s for %s",
<add> this, objectName, listener), e);
<add> }
<add> }
<add> throw new InstanceNotFoundException(
<add> String.format("Instance not found: %s",
<add> objectName));
<add> }
<add>
<add> /**
<add> * @param objectName
<ide> * @param listener
<ide> * @param filter
<ide> * @param handback
<ide> * javax.management.NotificationFilter, java.lang.Object)
<ide> */
<ide> @Override
<del> public void removeNotificationListener(ObjectName name,
<del> NotificationListener listener,
<del> NotificationFilter filter,
<del> Object handback)
<del> throws InstanceNotFoundException,
<del> ListenerNotFoundException,
<del> IOException {
<del> for (NodeMBean child : children) {
<del> try {
<del> child.removeNotificationListener(name, listener, filter,
<del> handback);
<del> return;
<del> } catch (InstanceNotFoundException e) {
<del> // ignored
<del> }
<del> }
<del> mbs.removeNotificationListener(name, listener, filter, handback);
<del> }
<del>
<del> /**
<del> * @param name
<add> public void removeNotificationListener(final ObjectName objectName,
<add> final NotificationListener listener,
<add> final NotificationFilter filter,
<add> final Object handback)
<add> throws InstanceNotFoundException,
<add> ListenerNotFoundException,
<add> IOException {
<add> ExecutorCompletionService<Boolean> completionService = new ExecutorCompletionService<>(
<add> executor);
<add> TaskGenerator<Boolean> generator = new TaskGenerator<Boolean>() {
<add> @Override
<add> public Callable<Boolean> localTask(final ObjectName objectName) {
<add> return new Callable<Boolean>() {
<add> @Override
<add> public Boolean call() throws Exception {
<add> mbs.removeNotificationListener(objectName, listener,
<add> filter, handback);
<add> return true;
<add> }
<add> };
<add> }
<add>
<add> @Override
<add> public Callable<Boolean> remoteTask(final NodeMBean child) {
<add> return new Callable<Boolean>() {
<add> @Override
<add> public Boolean call() throws Exception {
<add> child.removeNotificationListener(objectName, listener,
<add> filter, handback);
<add> return true;
<add> }
<add> };
<add> }
<add> };
<add> List<Future<Boolean>> futures = forAll(completionService, generator,
<add> objectName);
<add> for (int i = 0; i < futures.size(); i++) {
<add> try {
<add> if (completionService.take().get()) {
<add> for (Future<Boolean> future : futures) {
<add> future.cancel(true);
<add> }
<add> return;
<add> }
<add> } catch (InterruptedException e) {
<add> return; // don't even log this ;)
<add> } catch (ExecutionException e) {
<add> log.debug(String.format("%s experienced exception when removing notification listener on %s for %s, %s",
<add> this, objectName, listener, filter), e);
<add> }
<add> }
<add> throw new InstanceNotFoundException(
<add> String.format("Instance not found: %s",
<add> objectName));
<add> }
<add>
<add> /**
<add> * @param objectName
<ide> * @param listener
<ide> * @throws InstanceNotFoundException
<ide> * @throws ListenerNotFoundException
<ide> * javax.management.ObjectName)
<ide> */
<ide> @Override
<del> public void removeNotificationListener(ObjectName name, ObjectName listener)
<del> throws InstanceNotFoundException,
<del> ListenerNotFoundException,
<del> IOException {
<del> for (NodeMBean child : children) {
<del> try {
<del> child.removeNotificationListener(name, listener);
<del> return;
<del> } catch (InstanceNotFoundException e) {
<del> // ignored
<del> }
<del> }
<del> mbs.removeNotificationListener(name, listener);
<add> public void removeNotificationListener(final ObjectName objectName,
<add> final ObjectName listener)
<add> throws InstanceNotFoundException,
<add> ListenerNotFoundException,
<add> IOException {
<add> ExecutorCompletionService<Boolean> completionService = new ExecutorCompletionService<>(
<add> executor);
<add> TaskGenerator<Boolean> generator = new TaskGenerator<Boolean>() {
<add> @Override
<add> public Callable<Boolean> localTask(final ObjectName objectName) {
<add> return new Callable<Boolean>() {
<add> @Override
<add> public Boolean call() throws Exception {
<add> mbs.removeNotificationListener(objectName, listener);
<add> return true;
<add> }
<add> };
<add> }
<add>
<add> @Override
<add> public Callable<Boolean> remoteTask(final NodeMBean child) {
<add> return new Callable<Boolean>() {
<add> @Override
<add> public Boolean call() throws Exception {
<add> child.removeNotificationListener(objectName, listener);
<add> return true;
<add> }
<add> };
<add> }
<add> };
<add> List<Future<Boolean>> futures = forAll(completionService, generator,
<add> objectName);
<add> for (int i = 0; i < futures.size(); i++) {
<add> try {
<add> if (completionService.take().get()) {
<add> for (Future<Boolean> future : futures) {
<add> future.cancel(true);
<add> }
<add> return;
<add> }
<add> } catch (InterruptedException e) {
<add> return; // don't even log this ;)
<add> } catch (ExecutionException e) {
<add> log.debug(String.format("%s experienced exception when removing notification listener on %s for %s",
<add> this, objectName, listener), e);
<add> }
<add> }
<add> throw new InstanceNotFoundException(
<add> String.format("Instance not found: %s",
<add> objectName));
<ide> }
<ide>
<ide> /**
<ide> * java.lang.Object)
<ide> */
<ide> @Override
<del> public void removeNotificationListener(ObjectName name,
<del> ObjectName listener,
<del> NotificationFilter filter,
<del> Object handback)
<del> throws InstanceNotFoundException,
<del> ListenerNotFoundException,
<del> IOException {
<del> for (NodeMBean child : children) {
<del> try {
<del> child.removeNotificationListener(name, listener, filter,
<del> handback);
<del> return;
<del> } catch (InstanceNotFoundException e) {
<del> // ignored
<del> }
<del> }
<del> mbs.removeNotificationListener(name, listener, filter, handback);
<add> public void removeNotificationListener(final ObjectName objectName,
<add> final ObjectName listener,
<add> final NotificationFilter filter,
<add> final Object handback)
<add> throws InstanceNotFoundException,
<add> ListenerNotFoundException,
<add> IOException {
<add> ExecutorCompletionService<Boolean> completionService = new ExecutorCompletionService<>(
<add> executor);
<add> TaskGenerator<Boolean> generator = new TaskGenerator<Boolean>() {
<add> @Override
<add> public Callable<Boolean> localTask(final ObjectName objectName) {
<add> return new Callable<Boolean>() {
<add> @Override
<add> public Boolean call() throws Exception {
<add> mbs.removeNotificationListener(objectName, listener,
<add> filter, handback);
<add> return true;
<add> }
<add> };
<add> }
<add>
<add> @Override
<add> public Callable<Boolean> remoteTask(final NodeMBean child) {
<add> return new Callable<Boolean>() {
<add> @Override
<add> public Boolean call() throws Exception {
<add> child.removeNotificationListener(objectName, listener,
<add> filter, handback);
<add> return true;
<add> }
<add> };
<add> }
<add> };
<add> List<Future<Boolean>> futures = forAll(completionService, generator,
<add> objectName);
<add> for (int i = 0; i < futures.size(); i++) {
<add> try {
<add> if (completionService.take().get()) {
<add> for (Future<Boolean> future : futures) {
<add> future.cancel(true);
<add> }
<add> return;
<add> }
<add> } catch (InterruptedException e) {
<add> return; // don't even log this ;)
<add> } catch (ExecutionException e) {
<add> log.debug(String.format("%s experienced exception when removing notification listener on %s for %s, %s, %s",
<add> this, objectName, listener, filter,
<add> handback), e);
<add> }
<add> }
<add> throw new InstanceNotFoundException(
<add> String.format("Instance not found: %s",
<add> objectName));
<ide> }
<ide>
<ide> /* (non-Javadoc)
<ide> * @see com.hellblazer.groo.NodeMXBean#removeNotificationListener(javax.management.ObjectName, javax.management.QueryExp, javax.management.NotificationListener)
<ide> */
<ide> @Override
<del> public void removeNotificationListener(ObjectName name, QueryExp queryExpr,
<del> NotificationListener listener)
<del> throws InstanceNotFoundException,
<del> ListenerNotFoundException,
<del> IOException {
<del> boolean success = false;
<del> for (NodeMBean child : children) {
<del> try {
<del> child.removeNotificationListener(name, queryExpr, listener);
<del> success = true;
<del> } catch (InstanceNotFoundException e) {
<del> // continue
<del> }
<del> }
<del> Set<ObjectName> names = mbs.queryNames(name, queryExpr);
<del> if (!success && names.size() == 0) {
<del> throw new InstanceNotFoundException(
<del> String.format("No instance found for %s, %s",
<del> name, queryExpr));
<del> }
<del> for (ObjectName n : mbs.queryNames(name, queryExpr)) {
<del> mbs.removeNotificationListener(n, listener);
<del> }
<add> public void removeNotificationListener(final ObjectName pattern,
<add> final QueryExp queryExpr,
<add> final NotificationListener listener)
<add> throws InstanceNotFoundException,
<add> ListenerNotFoundException,
<add> IOException {
<add> ExecutorCompletionService<Boolean> completionService = new ExecutorCompletionService<>(
<add> executor);
<add> TaskGenerator<Boolean> generator = new TaskGenerator<Boolean>() {
<add> @Override
<add> public Callable<Boolean> localTask(final ObjectName objectName) {
<add> return new Callable<Boolean>() {
<add> @Override
<add> public Boolean call() throws Exception {
<add> mbs.removeNotificationListener(objectName, listener);
<add> return true;
<add> }
<add> };
<add> }
<add>
<add> @Override
<add> public Callable<Boolean> remoteTask(final NodeMBean child) {
<add> return new Callable<Boolean>() {
<add> @Override
<add> public Boolean call() throws Exception {
<add> child.removeNotificationListener(pattern, queryExpr,
<add> listener);
<add> return true;
<add> }
<add> };
<add> }
<add> };
<add> List<Future<Boolean>> futures = forAll(completionService, generator,
<add> pattern, queryExpr);
<add> for (int i = 0; i < futures.size(); i++) {
<add> try {
<add> if (completionService.take().get()) {
<add> for (Future<Boolean> future : futures) {
<add> future.cancel(true);
<add> }
<add> return;
<add> }
<add> } catch (InterruptedException e) {
<add> return;
<add> } catch (ExecutionException e) {
<add> log.debug(String.format("%s experienced exception when adding notification listener %s, %s",
<add> this, pattern, queryExpr), e);
<add> }
<add> }
<add> throw new InstanceNotFoundException(
<add> String.format("Instance not found: %s, %s",
<add> pattern, queryExpr));
<ide> }
<ide>
<ide> /* (non-Javadoc)
<ide> * @see com.hellblazer.groo.NodeMXBean#removeNotificationListener(javax.management.ObjectName, javax.management.QueryExp, javax.management.NotificationListener, javax.management.NotificationFilter, java.lang.Object)
<ide> */
<ide> @Override
<del> public void removeNotificationListener(ObjectName name, QueryExp queryExpr,
<del> NotificationListener listener,
<del> NotificationFilter filter,
<del> Object handback)
<del> throws InstanceNotFoundException,
<del> ListenerNotFoundException,
<del> IOException {
<del> boolean success = false;
<del> for (NodeMBean child : children) {
<del> try {
<del> child.removeNotificationListener(name, queryExpr, listener,
<del> filter, handback);
<del> success = true;
<del> } catch (InstanceNotFoundException e) {
<del> // continue
<del> }
<del> }
<del> Set<ObjectName> names = mbs.queryNames(name, queryExpr);
<del> if (!success && names.size() == 0) {
<del> throw new InstanceNotFoundException(
<del> String.format("No instance found for %s, %s",
<del> name, queryExpr));
<del> }
<del> for (ObjectName n : mbs.queryNames(name, queryExpr)) {
<del> mbs.removeNotificationListener(n, listener, filter, handback);
<del> }
<add> public void removeNotificationListener(final ObjectName pattern,
<add> final QueryExp queryExpr,
<add> final NotificationListener listener,
<add> final NotificationFilter filter,
<add> final Object handback)
<add> throws InstanceNotFoundException,
<add> ListenerNotFoundException,
<add> IOException {
<add> ExecutorCompletionService<Boolean> completionService = new ExecutorCompletionService<>(
<add> executor);
<add> TaskGenerator<Boolean> generator = new TaskGenerator<Boolean>() {
<add> @Override
<add> public Callable<Boolean> localTask(final ObjectName objectName) {
<add> return new Callable<Boolean>() {
<add> @Override
<add> public Boolean call() throws Exception {
<add> mbs.removeNotificationListener(objectName, listener,
<add> filter, handback);
<add> return true;
<add> }
<add> };
<add> }
<add>
<add> @Override
<add> public Callable<Boolean> remoteTask(final NodeMBean child) {
<add> return new Callable<Boolean>() {
<add> @Override
<add> public Boolean call() throws Exception {
<add> child.removeNotificationListener(pattern, queryExpr,
<add> listener, filter,
<add> handback);
<add> return true;
<add> }
<add> };
<add> }
<add> };
<add> List<Future<Boolean>> futures = forAll(completionService, generator,
<add> pattern, queryExpr);
<add> for (int i = 0; i < futures.size(); i++) {
<add> try {
<add> completionService.take().get();
<add> } catch (InterruptedException e) {
<add> return;
<add> } catch (ExecutionException e) {
<add> log.debug(String.format("%s experienced exception when removing notification listener %s, %s, %s, %s, %s",
<add> this, pattern, queryExpr, listener,
<add> filter, handback), e);
<add> }
<add> }
<add> throw new InstanceNotFoundException(
<add> String.format("Instance not found: %s, %s",
<add> name, queryExpr));
<ide> }
<ide>
<ide> /* (non-Javadoc)
<ide> * @see com.hellblazer.groo.NodeMXBean#removeNotificationListener(javax.management.ObjectName, javax.management.QueryExp, javax.management.ObjectName)
<ide> */
<ide> @Override
<del> public void removeNotificationListener(ObjectName name, QueryExp queryExpr,
<del> ObjectName listener)
<del> throws InstanceNotFoundException,
<del> ListenerNotFoundException,
<del> IOException {
<del> boolean success = false;
<del> for (NodeMBean child : children) {
<del> try {
<del> child.removeNotificationListener(name, queryExpr, listener);
<del> success = true;
<del> } catch (InstanceNotFoundException e) {
<del> // continue
<del> }
<del> }
<del> Set<ObjectName> names = mbs.queryNames(name, queryExpr);
<del> if (!success && names.size() == 0) {
<del> throw new InstanceNotFoundException(
<del> String.format("No instance found for %s, %s",
<del> name, queryExpr));
<del> }
<del> for (ObjectName n : mbs.queryNames(name, queryExpr)) {
<del> mbs.removeNotificationListener(n, listener);
<del> }
<add> public void removeNotificationListener(final ObjectName pattern,
<add> final QueryExp queryExpr,
<add> final ObjectName listener)
<add> throws InstanceNotFoundException,
<add> ListenerNotFoundException,
<add> IOException {
<add> ExecutorCompletionService<Boolean> completionService = new ExecutorCompletionService<>(
<add> executor);
<add> TaskGenerator<Boolean> generator = new TaskGenerator<Boolean>() {
<add> @Override
<add> public Callable<Boolean> localTask(final ObjectName objectName) {
<add> return new Callable<Boolean>() {
<add> @Override
<add> public Boolean call() throws Exception {
<add> mbs.removeNotificationListener(objectName, listener);
<add> return true;
<add> }
<add> };
<add> }
<add>
<add> @Override
<add> public Callable<Boolean> remoteTask(final NodeMBean child) {
<add> return new Callable<Boolean>() {
<add> @Override
<add> public Boolean call() throws Exception {
<add> child.removeNotificationListener(pattern, queryExpr,
<add> listener);
<add> return true;
<add> }
<add> };
<add> }
<add> };
<add> List<Future<Boolean>> futures = forAll(completionService, generator,
<add> pattern, queryExpr);
<add> for (int i = 0; i < futures.size(); i++) {
<add> try {
<add> completionService.take().get();
<add> } catch (InterruptedException e) {
<add> return;
<add> } catch (ExecutionException e) {
<add> log.debug(String.format("%s experienced exception when removing notification listener %s, %s, %s",
<add> this, pattern, queryExpr, listener), e);
<add> }
<add> }
<add> throw new InstanceNotFoundException(
<add> String.format("Instance not found: %s, %s",
<add> name, queryExpr));
<ide> }
<ide>
<ide> /* (non-Javadoc)
<ide> * @see com.hellblazer.groo.NodeMXBean#removeNotificationListener(javax.management.ObjectName, javax.management.QueryExp, javax.management.ObjectName, javax.management.NotificationFilter, java.lang.Object)
<ide> */
<ide> @Override
<del> public void removeNotificationListener(ObjectName name, QueryExp queryExpr,
<del> ObjectName listener,
<del> NotificationFilter filter,
<del> Object handback)
<del> throws InstanceNotFoundException,
<del> ListenerNotFoundException,
<del> IOException {
<del> boolean success = false;
<del> for (NodeMBean child : children) {
<del> try {
<del> child.removeNotificationListener(name, queryExpr, listener,
<del> filter, handback);
<del> success = true;
<del> } catch (InstanceNotFoundException e) {
<del> // continue
<del> }
<del> }
<del> Set<ObjectName> names = mbs.queryNames(name, queryExpr);
<del> if (!success && names.size() == 0) {
<del> throw new InstanceNotFoundException(
<del> String.format("No instance found for %s, %s",
<del> name, queryExpr));
<del> }
<del> for (ObjectName n : mbs.queryNames(name, queryExpr)) {
<del> mbs.removeNotificationListener(n, listener, filter, handback);
<del> }
<add> public void removeNotificationListener(final ObjectName pattern,
<add> final QueryExp queryExpr,
<add> final ObjectName listener,
<add> final NotificationFilter filter,
<add> final Object handback)
<add> throws InstanceNotFoundException,
<add> ListenerNotFoundException,
<add> IOException {
<add> ExecutorCompletionService<Boolean> completionService = new ExecutorCompletionService<>(
<add> executor);
<add> TaskGenerator<Boolean> generator = new TaskGenerator<Boolean>() {
<add> @Override
<add> public Callable<Boolean> localTask(final ObjectName objectName) {
<add> return new Callable<Boolean>() {
<add> @Override
<add> public Boolean call() throws Exception {
<add> mbs.removeNotificationListener(objectName, listener,
<add> filter, handback);
<add> return true;
<add> }
<add> };
<add> }
<add>
<add> @Override
<add> public Callable<Boolean> remoteTask(final NodeMBean child) {
<add> return new Callable<Boolean>() {
<add> @Override
<add> public Boolean call() throws Exception {
<add> child.removeNotificationListener(pattern, queryExpr,
<add> listener, filter,
<add> handback);
<add> return true;
<add> }
<add> };
<add> }
<add> };
<add> List<Future<Boolean>> futures = forAll(completionService, generator,
<add> pattern, queryExpr);
<add> for (int i = 0; i < futures.size(); i++) {
<add> try {
<add> completionService.take().get();
<add> } catch (InterruptedException e) {
<add> return;
<add> } catch (ExecutionException e) {
<add> log.debug(String.format("%s experienced exception when removing notification listener %s, %s, %s, %s, %s",
<add> this, pattern, queryExpr, listener,
<add> filter, handback), e);
<add> }
<add> }
<add> throw new InstanceNotFoundException(
<add> String.format("Instance not found: %s, %s",
<add> name, queryExpr));
<ide> }
<ide>
<ide> /**
<ide> * javax.management.Attribute)
<ide> */
<ide> @Override
<del> public void setAttribute(ObjectName name, Attribute attribute)
<del> throws InstanceNotFoundException,
<del> AttributeNotFoundException,
<del> InvalidAttributeValueException,
<del> MBeanException,
<del> ReflectionException,
<del> IOException {
<del> for (NodeMBean child : children) {
<del> try {
<del> child.setAttribute(name, attribute);
<add> public void setAttribute(final ObjectName objectName,
<add> final Attribute attribute)
<add> throws InstanceNotFoundException,
<add> AttributeNotFoundException,
<add> InvalidAttributeValueException,
<add> MBeanException,
<add> ReflectionException,
<add> IOException {
<add> ExecutorCompletionService<Void> completionService = new ExecutorCompletionService<>(
<add> executor);
<add> TaskGenerator<Void> generator = new TaskGenerator<Void>() {
<add> @Override
<add> public Callable<Void> localTask(final ObjectName objectName) {
<add> return new Callable<Void>() {
<add> @Override
<add> public Void call() throws Exception {
<add> mbs.setAttribute(objectName, attribute);
<add> return null;
<add> }
<add> };
<add> }
<add>
<add> @Override
<add> public Callable<Void> remoteTask(final NodeMBean child) {
<add> return new Callable<Void>() {
<add> @Override
<add> public Void call() throws Exception {
<add> child.setAttribute(objectName, attribute);
<add> return null;
<add> }
<add> };
<add> }
<add> };
<add> List<Future<Void>> futures = forAll(completionService, generator,
<add> objectName);
<add>
<add> boolean attributeNotFound = false;
<add> for (int i = 0; i < futures.size(); i++) {
<add> try {
<add> completionService.take().get();
<add> for (Future<Void> future : futures) {
<add> future.cancel(true);
<add> }
<ide> return;
<del> } catch (InstanceNotFoundException e) {
<del> // ignored
<del> }
<del> }
<del> mbs.setAttribute(name, attribute);
<add> } catch (InterruptedException e) {
<add> return;
<add> } catch (ExecutionException e) {
<add> if (e.getCause() instanceof AttributeNotFoundException) {
<add> attributeNotFound |= true;
<add> } else {
<add> log.debug(String.format("%s experienced exception when setting attribute %s, %s",
<add> this, objectName, attribute), e);
<add> }
<add> }
<add> }
<add> if (attributeNotFound) {
<add> throw new AttributeNotFoundException(
<add> String.format("Attribute not found: %s for %s",
<add> attribute,
<add> objectName));
<add> } else {
<add> throw new InstanceNotFoundException(
<add> String.format("Instance not found: %s",
<add> objectName));
<add> }
<ide> }
<ide>
<ide> /* (non-Javadoc)
<ide> * @see com.hellblazer.groo.NodeMXBean#setAttribute(javax.management.ObjectName, javax.management.QueryExp, javax.management.Attribute)
<ide> */
<ide> @Override
<del> public void setAttribute(ObjectName name, QueryExp queryExpr,
<del> Attribute attribute)
<del> throws InstanceNotFoundException,
<del> AttributeNotFoundException,
<del> InvalidAttributeValueException,
<del> MBeanException,
<del> ReflectionException,
<del> IOException {
<del> for (NodeMBean child : children) {
<del> child.setAttribute(name, queryExpr, attribute);
<del> }
<del> for (ObjectName instance : mbs.queryNames(name, queryExpr)) {
<del> mbs.setAttribute(instance, attribute);
<del> }
<add> public void setAttribute(final ObjectName pattern,
<add> final QueryExp queryExpr, final Attribute attribute)
<add> throws InstanceNotFoundException,
<add> AttributeNotFoundException,
<add> InvalidAttributeValueException,
<add> MBeanException,
<add> ReflectionException,
<add> IOException {
<add> ExecutorCompletionService<Boolean> completionService = new ExecutorCompletionService<>(
<add> executor);
<add> TaskGenerator<Boolean> generator = new TaskGenerator<Boolean>() {
<add> @Override
<add> public Callable<Boolean> localTask(final ObjectName objectName) {
<add> return new Callable<Boolean>() {
<add> @Override
<add> public Boolean call() throws Exception {
<add> mbs.setAttribute(objectName, attribute);
<add> return true;
<add> }
<add> };
<add> }
<add>
<add> @Override
<add> public Callable<Boolean> remoteTask(final NodeMBean child) {
<add> return new Callable<Boolean>() {
<add> @Override
<add> public Boolean call() throws Exception {
<add> child.setAttribute(pattern, queryExpr, attribute);
<add> return true;
<add> }
<add> };
<add> }
<add> };
<add> List<Future<Boolean>> futures = forAll(completionService, generator,
<add> pattern, queryExpr);
<add> for (int i = 0; i < futures.size(); i++) {
<add> try {
<add> completionService.take().get();
<add> return;
<add> } catch (InterruptedException e) {
<add> return;
<add> } catch (ExecutionException e) {
<add> log.debug(String.format("%s experienced exception when setting attribute %s, %s, %s, %s",
<add> this, pattern, queryExpr, attribute), e);
<add> }
<add> }
<add> throw new InstanceNotFoundException(
<add> String.format("Instance not found: %s, %s",
<add> pattern, queryExpr));
<ide> }
<ide>
<ide> /**
<ide> * javax.management.AttributeList)
<ide> */
<ide> @Override
<del> public AttributeList setAttributes(ObjectName name, AttributeList attributes)
<del> throws InstanceNotFoundException,
<del> ReflectionException,
<del> IOException {
<del> for (NodeMBean child : children) {
<del> AttributeList list = child.setAttributes(name, attributes);
<del> if (list != null) {
<del> return list;
<del> }
<del> }
<del> return mbs.setAttributes(name, attributes);
<add> public AttributeList setAttributes(final ObjectName objectName,
<add> final AttributeList attributes)
<add> throws InstanceNotFoundException,
<add> ReflectionException,
<add> IOException {
<add> ExecutorCompletionService<AttributeList> completionService = new ExecutorCompletionService<>(
<add> executor);
<add> TaskGenerator<AttributeList> generator = new TaskGenerator<AttributeList>() {
<add> @Override
<add> public Callable<AttributeList> localTask(final ObjectName objectName) {
<add> return new Callable<AttributeList>() {
<add> @Override
<add> public AttributeList call() throws Exception {
<add> return mbs.setAttributes(objectName, attributes);
<add> }
<add> };
<add> }
<add>
<add> @Override
<add> public Callable<AttributeList> remoteTask(final NodeMBean child) {
<add> return new Callable<AttributeList>() {
<add> @Override
<add> public AttributeList call() throws Exception {
<add> return child.setAttributes(objectName, attributes);
<add> }
<add> };
<add> }
<add> };
<add> List<Future<AttributeList>> futures = forAll(completionService,
<add> generator, objectName);
<add>
<add> for (int i = 0; i < futures.size(); i++) {
<add> try {
<add> AttributeList attrs = completionService.take().get();
<add> for (Future<AttributeList> future : futures) {
<add> future.cancel(true);
<add> }
<add> return attrs;
<add> } catch (InterruptedException e) {
<add> return new AttributeList();
<add> } catch (ExecutionException e) {
<add> log.debug(String.format("%s experienced exception when setting attributes %s, %s",
<add> this, objectName, attributes), e);
<add> }
<add> }
<add> throw new InstanceNotFoundException(
<add> String.format("Instance not found: %s",
<add> objectName));
<ide> }
<ide>
<ide> /* (non-Javadoc)
<ide> * @see com.hellblazer.groo.NodeMXBean#setAttributes(javax.management.ObjectName, javax.management.QueryExp, javax.management.AttributeList)
<ide> */
<ide> @Override
<del> public Map<ObjectName, AttributeList> setAttributes(ObjectName name,
<del> QueryExp queryExpr,
<del> AttributeList attributes)
<del> throws InstanceNotFoundException,
<del> ReflectionException,
<del> IOException {
<del> Map<ObjectName, AttributeList> results = new HashMap<>();
<del> for (NodeMBean child : children) {
<del> results.putAll(child.setAttributes(name, queryExpr, attributes));
<del> }
<del> for (ObjectName instance : mbs.queryNames(name, queryExpr)) {
<del> results.put(instance, mbs.setAttributes(instance, attributes));
<del> }
<del> return results;
<add> public Map<ObjectName, AttributeList> setAttributes(final ObjectName pattern,
<add> final QueryExp queryExpr,
<add> final AttributeList attributes)
<add> throws InstanceNotFoundException,
<add> ReflectionException,
<add> IOException {
<add> Map<ObjectName, AttributeList> attrs = new HashMap<>();
<add> ExecutorCompletionService<Map<ObjectName, AttributeList>> completionService = new ExecutorCompletionService<>(
<add> executor);
<add> TaskGenerator<Map<ObjectName, AttributeList>> generator = new TaskGenerator<Map<ObjectName, AttributeList>>() {
<add> @Override
<add> public Callable<Map<ObjectName, AttributeList>> localTask(final ObjectName objectName) {
<add> return new Callable<Map<ObjectName, AttributeList>>() {
<add> @Override
<add> public Map<ObjectName, AttributeList> call()
<add> throws Exception {
<add> Map<ObjectName, AttributeList> attrs = new HashMap<>();
<add> attrs.put(objectName,
<add> mbs.setAttributes(objectName, attributes));
<add> return attrs;
<add> }
<add> };
<add> }
<add>
<add> @Override
<add> public Callable<Map<ObjectName, AttributeList>> remoteTask(final NodeMBean child) {
<add> return new Callable<Map<ObjectName, AttributeList>>() {
<add> @Override
<add> public Map<ObjectName, AttributeList> call()
<add> throws Exception {
<add> return child.setAttributes(pattern, queryExpr,
<add> attributes);
<add> }
<add> };
<add> }
<add> };
<add> List<Future<Map<ObjectName, AttributeList>>> futures = forAll(completionService,
<add> generator,
<add> pattern,
<add> queryExpr);
<add> for (int i = 0; i < futures.size(); i++) {
<add> try {
<add> attrs.putAll(completionService.take().get());
<add> } catch (InterruptedException e) {
<add> return Collections.emptyMap();
<add> } catch (ExecutionException e) {
<add> log.debug(String.format("%s experienced exception when setting attributes %s, %s, %s, %s",
<add> this, pattern, queryExpr,
<add> Arrays.asList(attributes)), e);
<add> }
<add> }
<add> if (attrs.size() == 0) {
<add> throw new InstanceNotFoundException(
<add> String.format("Instance not found: %s, %s",
<add> pattern,
<add> queryExpr));
<add> }
<add> return attrs;
<ide> }
<ide>
<ide> /* (non-Javadoc) |
|
JavaScript | bsd-3-clause | bef64582f594dcebee0301939d142c168ed2a725 | 0 | visor841/SkelScratch,Calvin-CS/SkelScratch | (function(ext) {
var firstTime = true;
var boolean = true;
var jsonObject = null;
var xScale = 280;
var yScale = 210;
var zScale = 200;
var status = 0;
alert("BEFORE CLICKING OK: Make sure the kinect is on and KinectinScratchServer has started");
var wsImpl = window.WebSocket || window.MozWebSocket;
console.log("connecting to server ..");
// create a new websocket and connect
window.ws = new wsImpl('ws://153.106.117.84:8181/');
// when data is comming from the server, this method is called
ws.onmessage = function (evt) {
jsonObject = JSON.parse(evt.data);
if(jsonObject.bodies == '')
{
status = 1;
} else
{
status = 2;
}
};
// when the connection is established, this method is called
ws.onopen = function () {
console.log('.. connection open');
};
// when the connection is closed, this method is called
ws.onclose = function () {
console.log('.. connection closed');
status = 0;
};
// Cleanup function when the extension is unloaded
ext._shutdown = function() {};
// Status reporting code
// Use this to report missing hardware, plugin or unsupported browser
ext._getStatus = function() {
if(status == 0)
{
return {status: 0, msg: 'Kinect is not connected to Scratch'};
//polling function for auto-reconnect should go here
}
if(status == 1)
{
return {status: 1, msg: 'Kinect is connected, but is not detecting any bodies'};
}
if(status == 2)
{
return {status: 2, msg: 'Kinect is sending body data'};
}
};
// Block and block menu descriptions
var descriptor = {
blocks: [
['', 'My First Block', 'my_first_block'],
['r', '%n ^ %n', 'power', 2, 3],
['r', '%m.k body 1 sensor value', 'k', 'Head X'],
['r', '%m.k body 2 sensor value', 'k1', 'Head X'],
['r', '%m.l %m.k1 %m.x', 'joints', 'Body 1', 'Head', 'x'],
['', 'restart local connection', 'restart'],
['', 'Create connection to %s', 'ipconnect', '0.0.0.0'],
['', 'Close connection', 'closeconn'],
['', 'test block', 'test_block'],
['b', 'connected', 'connected'],
['b', '%m.l tracked', 'tracked', 'Body 1'],
['', 'console.log %n', 'write'],
['', 'bad only %n', 'writeB'],
['r', '%m.l id', 'l', 'Body 1'],
['r', '%m.l Left Handstate', 'lhandd', 'Body 1'],
['b', '%m.l Left Handstate is %m.n', 'lhand', 'Body 1', 'Closed'],
['b', '%m.l Right Handstate is %m.n', 'rhand', 'Body 1', 'Closed']
],
menus: {
k: ['Left Ankle X', 'Left Ankle Y', 'Right Ankle X', 'Right Ankle Y', 'Left Elbow X', 'Left Elbow Y', 'Right Elbow X', 'Right Elbow Y', 'Left Foot X', 'Left Foot Y', 'Right Foot X', 'Right Foot Y', 'Left Hand X', 'Left Hand Y', 'Right Hand X', 'Right Hand Y', 'Left Hand Tip X', 'Left Hand Tip Y', 'Right Hand Tip X', 'Right Hand Tip Y', 'Head X', 'Head Y', 'Left Hip X', 'Left Hip Y', 'Right Hip X', 'Right Hip Y', 'Left Knee X', 'Left Knee Y', 'Right Knee X', 'Right Knee Y', 'Neck X', 'Neck Y', 'Left Shoulder X', 'Left Shoulder Y', 'Right Shoulder X', 'Right Shoulder Y', 'Spine Base X', 'Spine Base Y', 'Spine Middle X', 'Spine Middle Y', 'Spine Shoulder X', 'Spine Shoulder Y', 'Left Thumb X', 'Left Thumb Y', 'Right Thumb X', 'Right Thumb Y', 'Left Wrist X', 'Left Wrist Y', 'Right Wrist X', 'Right Wrist Y'],
k1: ['Left Ankle', 'Right Ankle', 'Left Elbow', 'Right Elbow', 'Left Foot', 'Right Foot', 'Left Hand', 'Right Hand', 'Left Hand Tip', 'Right Hand Tip', 'Head', 'Left Hip', 'Right Hip', 'Left Knee', 'Right Knee', 'Neck', 'Left Shoulder', 'Right Shoulder', 'Spine Base', 'Spine Middle', 'Spine Shoulder', 'Left Thumb', 'Right Thumb', 'Left Wrist', 'Right Wrist'],
l: ['Body 1', 'Body 2', 'Body 3', 'Body 4', 'Body 5', 'Body 6'],
n: ['Unknown', 'Not Tracked', 'Open', 'Closed', 'Lasso'],
x: ['x', 'y', 'z'],
}
};
ext.my_first_block = function() {
console.log("My first block");
};
//restarts the client side of the server
ext.restart = function() {
window.ws.close();
console.log("connecting to local server ..");
window.ws = new wsImpl('ws://localhost:8181/');
// when data is comming from the server, this method is called
ws.onmessage = function (evt) {
jsonObject = JSON.parse(evt.data);
if(jsonObject.bodies == '')
{
status = 1;
} else
{
status = 2;
}
};
// when the connection is established, this method is called
ws.onopen = function () {
console.log('.. connection open');
};
// when the connection is closed, this method is called
ws.onclose = function () {
console.log('.. connection closed');
status = 0;
};
};
ext.ipconnect = function(s) {
window.ws.close();
console.log("connecting to "+s+' ..');
window.ws = new wsImpl('ws://'+s+':8181/');
// when data is comming from the server, this method is called
ws.onmessage = function (evt) {
jsonObject = JSON.parse(evt.data);
if(jsonObject.bodies == '')
{
status = 1;
} else
{
status = 2;
}
};
// when the connection is established, this method is called
ws.onopen = function () {
console.log('.. connection open');
};
// when the connection is closed, this method is called
ws.onclose = function () {
console.log('.. connection closed');
status = 0;
};
}
ext.closeconn = function() {
window.ws.close();
}
ext.power = function(base, exponent) {
return Math.pow(base, exponent);
};
ext.test_block = function() {
console.log(jsonObject.bodies[0].joints[3].x*xScale);
};
//True if scratch is receiving the kinect (but not necessarily data)
ext.connected = function()
{
if(status == 0){
return false;
}
if(status == 1 || 2){
return true;
}
};
//True if scratch is receiving body data
ext.tracked = function(m)
{
var i = -1;
switch(m){
case 'Body 1': i = 0;
break;
case 'Body 2': i = 1;
break;
case 'Body 3': i = 2;
break;
case 'Body 4': i = 3;
break;
case 'Body 5': i = 4;
break;
case 'Body 6': i = 5;
break;
}
return jsonObject.bodies[i].id != 0;
};
//Outputs numeric content to console
ext.write = function(m){
console.log(m);
};
//Writes "bad" in console if the input is 0
ext.writeB = function(m){
if(m == 0)
{
console.log("bad");
}
};
//Gives the id of the selected body
ext.l = function(m)
{
switch(m){
case 'Body 1': return jsonObject.bodies[0].id;
case 'Body 2': return jsonObject.bodies[1].id;
case 'Body 3': return jsonObject.bodies[2].id;
case 'Body 4': return jsonObject.bodies[3].id;
case 'Body 5': return jsonObject.bodies[4].id;
case 'Body 6': return jsonObject.bodies[5].id;
}
}
ext.lhandd = function(l)
{
var i;
var j;
switch(l){
case 'Body 1': i=0;
break;
case 'Body 2': i=1;
break;
case 'Body 3': i=2;
break;
case 'Body 4': i=3;
break;
case 'Body 5': i=4;
break;
case 'Body 6': i=5;
break;
}
return jsonObject.bodies[i].lhandstate;
}
//Returns true if the selected bodies left handstate is the same as block selected one.
ext.lhand = function(l,n)
{
var i;
var j;
switch(l){
case 'Body 1': i=0;
break;
case 'Body 2': i=1;
break;
case 'Body 3': i=2;
break;
case 'Body 4': i=3;
break;
case 'Body 5': i=4;
break;
case 'Body 6': i=5;
break;
}
switch(n)
{
case 'Unknown': j = 0;
break;
case 'Not Tracked': j = 1;
break;
case 'Open': j = 2;
break;
case 'Closed': j = 3;
break;
case 'Lasso': j = 4;
break;
}
return jsonObject.bodies[i].lhandstate == j;
}
//Returns true if the selected bodies right handstate is the same as block selected one.
ext.rhand = function(l,n)
{
var i;
var j;
switch(l){
case 'Body 1': i=0;
break;
case 'Body 2': i=1;
break;
case 'Body 3': i=2;
break;
case 'Body 4': i=3;
break;
case 'Body 5': i=4;
break;
case 'Body 6': i=5;
break;
}
switch(n)
{
case 'Unknown': j = 0;
break;
case 'Not Tracked': j = 1;
break;
case 'Open': j = 2;
break;
case 'Closed': j = 3;
break;
case 'Lasso': j = 4;
break;
}
return jsonObject.bodies[i].rhandstate == j;
}
ext.joints = function(l,k1,x)
{
var a;
var b;
switch(k1){
case 'Left Ankle': a=14;
break;
case 'Right Ankle': a=18;
break;
case 'Left Elbow': a=5;
break;
case 'Right Elbow': a=9;
break;
case 'Left Foot': a=15;
break;
case 'Right Foot': a=19;
break;
case 'Left Hand': a=7;
break;
case 'Right Hand': a=11;
break;
case 'Left Hand Tip': a=21;
break;
case 'Right Hand Tip': a=23;
break;
case 'Head': a=3;
break;
case 'Left Hip': a=12;
break;
case 'Right Hip': a=16;
break;
case 'Left Knee': a=13;
break;
case 'Right Knee': a=17;
break;
case 'Neck': a=2;
break;
case 'Left Shoulder': a=4;
break;
case 'Right Shoulder': a=8;
break;
case 'Spine Base': a=0;
break;
case 'Spine Middle': a=1;
break;
case 'Spine Shoulder': a=20;
break;
case 'Left Thumb': a=22;
break;
case 'Right Thumb': a=24;
break;
case 'Left Wrist': a=6;
break;
case 'Right Wrist': a=10;
break;
}
switch(l){
case 'Body 1': b=0;
break;
case 'Body 2': b=1;
break;
case 'Body 3': b=2;
break;
case 'Body 4': b=3;
break;
case 'Body 5': b=4;
break;
case 'Body 6': b=5;
break;
}
switch(x){
case 'x': return jsonObject.bodies[b].joints[a].x*xScale;
case 'y': return jsonObject.bodies[b].joints[a].y*yScale;
case 'z': return jsonObject.bodies[b].joints[a].z*zScale;
}
}
//returns the selected joint's x or y for the 1st body
ext.k1 = function(m) {
switch(m){
case 'Left Ankle X': return jsonObject.bodies[1].joints[14].x*xScale;
case 'Left Ankle Y': return jsonObject.bodies[1].joints[14].y*yScale;
case 'Right Ankle X': return jsonObject.bodies[1].joints[18].x*xScale;
case 'Right Ankle Y': return jsonObject.bodies[1].joints[18].y*yScale;
case 'Left Elbow X': return jsonObject.bodies[1].joints[5].x*xScale;
case 'Left Elbow Y': return jsonObject.bodies[1].joints[5].y*yScale;
case 'Right Elbow X': return jsonObject.bodies[1].joints[9].x*xScale;
case 'Right Elbow Y': return jsonObject.bodies[1].joints[9].y*yScale;
case 'Left Foot X': return jsonObject.bodies[1].joints[15].x*xScale;
case 'Left Foot Y': return jsonObject.bodies[1].joints[15].y*yScale;
case 'Right Foot X': return jsonObject.bodies[1].joints[19].x*xScale;
case 'Right Foot Y': return jsonObject.bodies[1].joints[19].y*yScale;
case 'Left Hand X': return jsonObject.bodies[1].joints[7].x*xScale;
case 'Left Hand Y': return jsonObject.bodies[1].joints[7].y*yScale;
case 'Right Hand X': return jsonObject.bodies[1].joints[11].x*xScale;
case 'Right Hand Y': return jsonObject.bodies[1].joints[11].y*yScale;
case 'Left Hand Tip X': return jsonObject.bodies[1].joints[21].x*xScale;
case 'Left Hand Tip Y': return jsonObject.bodies[1].joints[21].y*yScale;
case 'Right Hand Tip X': return jsonObject.bodies[1].joints[23].x*xScale;
case 'Right Hand Tip Y': return jsonObject.bodies[1].joints[23].y*yScale;
case 'Head X': return jsonObject.bodies[1].joints[3].x*xScale;
case 'Head Y': return jsonObject.bodies[1].joints[3].y*yScale;
case 'Left Hip X': return jsonObject.bodies[1].joints[12].x*xScale;
case 'Left Hip Y': return jsonObject.bodies[1].joints[12].y*yScale;
case 'Right Hip X': return jsonObject.bodies[1].joints[16].x*xScale;
case 'Right Hip Y': return jsonObject.bodies[1].joints[16].y*yScale;
case 'Left Knee X': return jsonObject.bodies[1].joints[13].x*xScale;
case 'Left Knee Y': return jsonObject.bodies[1].joints[13].y*yScale;
case 'Right Knee X': return jsonObject.bodies[1].joints[17].x*xScale;
case 'Right Knee Y': return jsonObject.bodies[1].joints[17].y*yScale;
case 'Neck X': return jsonObject.bodies[1].joints[2].x*xScale;
case 'Neck Y': return jsonObject.bodies[1].joints[2].y*yScale;
case 'Left Shoulder X': return jsonObject.bodies[1].joints[4].x*xScale;
case 'Left Shoulder Y': return jsonObject.bodies[1].joints[4].y*yScale;
case 'Right Shoulder X': return jsonObject.bodies[1].joints[8].x*xScale;
case 'Right Shoulder Y': return jsonObject.bodies[1].joints[8].y*yScale;
case 'Spine Base X': return jsonObject.bodies[1].joints[0].x*xScale;
case 'Spine Base Y': return jsonObject.bodies[1].joints[0].y*yScale;
case 'Spine Middle X': return jsonObject.bodies[1].joints[1].x*xScale;
case 'Spine Middle Y': return jsonObject.bodies[1].joints[1].y*yScale;
case 'Spine Shoulder X': return jsonObject.bodies[1].joints[20].x*xScale;
case 'Spine Shoulder Y': return jsonObject.bodies[1].joints[20].y*yScale;
case 'Left Thumb X': return jsonObject.bodies[1].joints[22].x*xScale;
case 'Left Thumb Y': return jsonObject.bodies[1].joints[22].y*yScale;
case 'Right Thumb X': return jsonObject.bodies[1].joints[24].x*xScale;
case 'Right Thumb Y': return jsonObject.bodies[1].joints[24].y*yScale;
case 'Left Wrist X': return jsonObject.bodies[1].joints[6].x*xScale;
case 'Left Wrist Y': return jsonObject.bodies[1].joints[6].y*yScale;
case 'Right Wrist X': return jsonObject.bodies[1].joints[10].x*xScale;
case 'Right Wrist Y': return jsonObject.bodies[1].joints[10].y*yScale;
}
};
//return the selected joints x or y for the 2nd body
ext.k = function(m) {
switch(m){
case 'Left Ankle X': return jsonObject.bodies[0].joints[14].x*xScale;
case 'Left Ankle Y': return jsonObject.bodies[0].joints[14].y*yScale;
case 'Right Ankle X': return jsonObject.bodies[0].joints[18].x*xScale;
case 'Right Ankle Y': return jsonObject.bodies[0].joints[18].y*yScale;
case 'Left Elbow X': return jsonObject.bodies[0].joints[5].x*xScale;
case 'Left Elbow Y': return jsonObject.bodies[0].joints[5].y*yScale;
case 'Right Elbow X': return jsonObject.bodies[0].joints[9].x*xScale;
case 'Right Elbow Y': return jsonObject.bodies[0].joints[9].y*yScale;
case 'Left Foot X': return jsonObject.bodies[0].joints[15].x*xScale;
case 'Left Foot Y': return jsonObject.bodies[0].joints[15].y*yScale;
case 'Right Foot X': return jsonObject.bodies[0].joints[19].x*xScale;
case 'Right Foot Y': return jsonObject.bodies[0].joints[19].y*yScale;
case 'Left Hand X': return jsonObject.bodies[0].joints[7].x*xScale;
case 'Left Hand Y': return jsonObject.bodies[0].joints[7].y*yScale;
case 'Right Hand X': return jsonObject.bodies[0].joints[11].x*xScale;
case 'Right Hand Y': return jsonObject.bodies[0].joints[11].y*yScale;
case 'Left Hand Tip X': return jsonObject.bodies[0].joints[21].x*xScale;
case 'Left Hand Tip Y': return jsonObject.bodies[0].joints[21].y*yScale;
case 'Right Hand Tip X': return jsonObject.bodies[0].joints[23].x*xScale;
case 'Right Hand Tip Y': return jsonObject.bodies[0].joints[23].y*yScale;
case 'Head X': return jsonObject.bodies[0].joints[3].x*xScale;
case 'Head Y': return jsonObject.bodies[0].joints[3].y*yScale;
case 'Left Hip X': return jsonObject.bodies[0].joints[12].x*xScale;
case 'Left Hip Y': return jsonObject.bodies[0].joints[12].y*yScale;
case 'Right Hip X': return jsonObject.bodies[0].joints[16].x*xScale;
case 'Right Hip Y': return jsonObject.bodies[0].joints[16].y*yScale;
case 'Left Knee X': return jsonObject.bodies[0].joints[13].x*xScale;
case 'Left Knee Y': return jsonObject.bodies[0].joints[13].y*yScale;
case 'Right Knee X': return jsonObject.bodies[0].joints[17].x*xScale;
case 'Right Knee Y': return jsonObject.bodies[0].joints[17].y*yScale;
case 'Neck X': return jsonObject.bodies[0].joints[2].x*xScale;
case 'Neck Y': return jsonObject.bodies[0].joints[2].y*yScale;
case 'Left Shoulder X': return jsonObject.bodies[0].joints[4].x*xScale;
case 'Left Shoulder Y': return jsonObject.bodies[0].joints[4].y*yScale;
case 'Right Shoulder X': return jsonObject.bodies[0].joints[8].x*xScale;
case 'Right Shoulder Y': return jsonObject.bodies[0].joints[8].y*yScale;
case 'Spine Base X': return jsonObject.bodies[0].joints[0].x*xScale;
case 'Spine Base Y': return jsonObject.bodies[0].joints[0].y*yScale;
case 'Spine Middle X': return jsonObject.bodies[0].joints[1].x*xScale;
case 'Spine Middle Y': return jsonObject.bodies[0].joints[1].y*yScale;
case 'Spine Shoulder X': return jsonObject.bodies[0].joints[20].x*xScale;
case 'Spine Shoulder Y': return jsonObject.bodies[0].joints[20].y*yScale;
case 'Left Thumb X': return jsonObject.bodies[0].joints[22].x*xScale;
case 'Left Thumb Y': return jsonObject.bodies[0].joints[22].y*yScale;
case 'Right Thumb X': return jsonObject.bodies[0].joints[24].x*xScale;
case 'Right Thumb Y': return jsonObject.bodies[0].joints[24].y*yScale;
case 'Left Wrist X': return jsonObject.bodies[0].joints[6].x*xScale;
case 'Left Wrist Y': return jsonObject.bodies[0].joints[6].y*yScale;
case 'Right Wrist X': return jsonObject.bodies[0].joints[10].x*xScale;
case 'Right Wrist Y': return jsonObject.bodies[0].joints[10].y*yScale;
}
};
// Register the extension
ScratchExtensions.register('KinectinScratch', descriptor, ext);
})({}); | KinectinScratch.js | (function(ext) {
var firstTime = true;
var boolean = true;
var jsonObject = null;
var xScale = 280;
var yScale = 210;
var zScale = 200;
var status = 0;
alert("BEFORE CLICKING OK: Make sure the kinect is on and KinectinScratchServer has started");
var wsImpl = window.WebSocket || window.MozWebSocket;
console.log("connecting to server ..");
// create a new websocket and connect
window.ws = new wsImpl('ws://153.106.117.84:8181/');
// when data is comming from the server, this method is called
ws.onmessage = function (evt) {
jsonObject = JSON.parse(evt.data);
if(jsonObject.bodies == '')
{
status = 1;
} else
{
status = 2;
}
};
// when the connection is established, this method is called
ws.onopen = function () {
console.log('.. connection open');
};
// when the connection is closed, this method is called
ws.onclose = function () {
console.log('.. connection closed');
status = 0;
};
// Cleanup function when the extension is unloaded
ext._shutdown = function() {};
// Status reporting code
// Use this to report missing hardware, plugin or unsupported browser
ext._getStatus = function() {
if(status == 0)
{
return {status: 0, msg: 'Kinect is not connected to Scratch'};
//polling function for auto-reconnect should go here
}
if(status == 1)
{
return {status: 1, msg: 'Kinect is connected, but is not detecting any bodies'};
}
if(status == 2)
{
return {status: 2, msg: 'Kinect is sending body data'};
}
};
// Block and block menu descriptions
var descriptor = {
blocks: [
['', 'My First Block', 'my_first_block'],
['r', '%n ^ %n', 'power', 2, 3],
['r', '%m.k body 1 sensor value', 'k', 'Head X'],
['r', '%m.k body 2 sensor value', 'k1', 'Head X'],
['r', '%m.l %m.k1 %m.x', 'joints', 'Body 1', 'Head', 'x'],
['', 'restart local connection', 'restart'],
['', 'Create connection to %s', 'ipconnect', '0.0.0.0'],
['', 'Close connection', 'closeconn'],
['', 'test block', 'test_block'],
['b', 'connected', 'connected'],
['b', '%m.l tracked', 'tracked', 'Body 1'],
['', 'console.log %n', 'write'],
['', 'bad only %n', 'writeB'],
['r', '%m.l id', 'l', 'Body 1'],
['r', '%m.l Left Handstate', 'lhandd', 'Body 1'],
['b', '%m.l Left Handstate is %m.n', 'lhand', 'Body 1', 'Closed'],
['b', '%m.l Right Handstate is %m.n', 'rhand', 'Body 1', 'Closed']
],
menus: {
k: ['Left Ankle X', 'Left Ankle Y', 'Right Ankle X', 'Right Ankle Y', 'Left Elbow X', 'Left Elbow Y', 'Right Elbow X', 'Right Elbow Y', 'Left Foot X', 'Left Foot Y', 'Right Foot X', 'Right Foot Y', 'Left Hand X', 'Left Hand Y', 'Right Hand X', 'Right Hand Y', 'Left Hand Tip X', 'Left Hand Tip Y', 'Right Hand Tip X', 'Right Hand Tip Y', 'Head X', 'Head Y', 'Left Hip X', 'Left Hip Y', 'Right Hip X', 'Right Hip Y', 'Left Knee X', 'Left Knee Y', 'Right Knee X', 'Right Knee Y', 'Neck X', 'Neck Y', 'Left Shoulder X', 'Left Shoulder Y', 'Right Shoulder X', 'Right Shoulder Y', 'Spine Base X', 'Spine Base Y', 'Spine Middle X', 'Spine Middle Y', 'Spine Shoulder X', 'Spine Shoulder Y', 'Left Thumb X', 'Left Thumb Y', 'Right Thumb X', 'Right Thumb Y', 'Left Wrist X', 'Left Wrist Y', 'Right Wrist X', 'Right Wrist Y'],
k1: ['Left Ankle', 'Right Ankle', 'Left Elbow', 'Right Elbow', 'Left Foot', 'Right Foot', 'Left Hand', 'Right Hand', 'Left Hand Tip', 'Right Hand Tip', 'Head', 'Left Hip', 'Right Hip', 'Left Knee', 'Right Knee', 'Neck', 'Left Shoulder', 'Right Shoulder', 'Spine Base', 'Spine Middle', 'Spine Shoulder', 'Left Thumb', 'Right Thumb', 'Left Wrist', 'Right Wrist'],
l: ['Body 1', 'Body 2', 'Body 3', 'Body 4', 'Body 5', 'Body 6'],
n: ['Unknown', 'Not Tracked', 'Open', 'Closed', 'Lasso'],
x: ['x', 'y', 'z'],
}
};
ext.my_first_block = function() {
console.log("My first block");
};
//restarts the client side of the server
ext.restart = function() {
window.ws.close();
console.log("connecting to local server ..");
window.ws = new wsImpl('ws://localhost:8181/');
};
ext.ipconnect = function(s) {
window.ws.close();
console.log("connecting to "+s+' ..');
window.ws = new wsImpl('ws://'+s+':8181/');
}
ext.closeconn = function() {
window.ws.close();
}
ext.power = function(base, exponent) {
return Math.pow(base, exponent);
};
ext.test_block = function() {
console.log(jsonObject.bodies[0].joints[3].x*xScale);
};
//True if scratch is receiving the kinect (but not necessarily data)
ext.connected = function()
{
if(status == 0){
return false;
}
if(status == 1 || 2){
return true;
}
};
//True if scratch is receiving body data
ext.tracked = function(m)
{
var i = -1;
switch(m){
case 'Body 1': i = 0;
break;
case 'Body 2': i = 1;
break;
case 'Body 3': i = 2;
break;
case 'Body 4': i = 3;
break;
case 'Body 5': i = 4;
break;
case 'Body 6': i = 5;
break;
}
return jsonObject.bodies[i].id != 0;
};
//Outputs numeric content to console
ext.write = function(m){
console.log(m);
};
//Writes "bad" in console if the input is 0
ext.writeB = function(m){
if(m == 0)
{
console.log("bad");
}
};
//Gives the id of the selected body
ext.l = function(m)
{
switch(m){
case 'Body 1': return jsonObject.bodies[0].id;
case 'Body 2': return jsonObject.bodies[1].id;
case 'Body 3': return jsonObject.bodies[2].id;
case 'Body 4': return jsonObject.bodies[3].id;
case 'Body 5': return jsonObject.bodies[4].id;
case 'Body 6': return jsonObject.bodies[5].id;
}
}
ext.lhandd = function(l)
{
var i;
var j;
switch(l){
case 'Body 1': i=0;
break;
case 'Body 2': i=1;
break;
case 'Body 3': i=2;
break;
case 'Body 4': i=3;
break;
case 'Body 5': i=4;
break;
case 'Body 6': i=5;
break;
}
return jsonObject.bodies[i].lhandstate;
}
//Returns true if the selected bodies left handstate is the same as block selected one.
ext.lhand = function(l,n)
{
var i;
var j;
switch(l){
case 'Body 1': i=0;
break;
case 'Body 2': i=1;
break;
case 'Body 3': i=2;
break;
case 'Body 4': i=3;
break;
case 'Body 5': i=4;
break;
case 'Body 6': i=5;
break;
}
switch(n)
{
case 'Unknown': j = 0;
break;
case 'Not Tracked': j = 1;
break;
case 'Open': j = 2;
break;
case 'Closed': j = 3;
break;
case 'Lasso': j = 4;
break;
}
return jsonObject.bodies[i].lhandstate == j;
}
//Returns true if the selected bodies right handstate is the same as block selected one.
ext.rhand = function(l,n)
{
var i;
var j;
switch(l){
case 'Body 1': i=0;
break;
case 'Body 2': i=1;
break;
case 'Body 3': i=2;
break;
case 'Body 4': i=3;
break;
case 'Body 5': i=4;
break;
case 'Body 6': i=5;
break;
}
switch(n)
{
case 'Unknown': j = 0;
break;
case 'Not Tracked': j = 1;
break;
case 'Open': j = 2;
break;
case 'Closed': j = 3;
break;
case 'Lasso': j = 4;
break;
}
return jsonObject.bodies[i].rhandstate == j;
}
ext.joints = function(l,k1,x)
{
var a;
var b;
switch(k1){
case 'Left Ankle': a=14;
break;
case 'Right Ankle': a=18;
break;
case 'Left Elbow': a=5;
break;
case 'Right Elbow': a=9;
break;
case 'Left Foot': a=15;
break;
case 'Right Foot': a=19;
break;
case 'Left Hand': a=7;
break;
case 'Right Hand': a=11;
break;
case 'Left Hand Tip': a=21;
break;
case 'Right Hand Tip': a=23;
break;
case 'Head': a=3;
break;
case 'Left Hip': a=12;
break;
case 'Right Hip': a=16;
break;
case 'Left Knee': a=13;
break;
case 'Right Knee': a=17;
break;
case 'Neck': a=2;
break;
case 'Left Shoulder': a=4;
break;
case 'Right Shoulder': a=8;
break;
case 'Spine Base': a=0;
break;
case 'Spine Middle': a=1;
break;
case 'Spine Shoulder': a=20;
break;
case 'Left Thumb': a=22;
break;
case 'Right Thumb': a=24;
break;
case 'Left Wrist': a=6;
break;
case 'Right Wrist': a=10;
break;
}
switch(l){
case 'Body 1': b=0;
break;
case 'Body 2': b=1;
break;
case 'Body 3': b=2;
break;
case 'Body 4': b=3;
break;
case 'Body 5': b=4;
break;
case 'Body 6': b=5;
break;
}
switch(x){
case 'x': return jsonObject.bodies[b].joints[a].x*xScale;
case 'y': return jsonObject.bodies[b].joints[a].y*yScale;
case 'z': return jsonObject.bodies[b].joints[a].z*zScale;
}
}
//returns the selected joint's x or y for the 1st body
ext.k1 = function(m) {
switch(m){
case 'Left Ankle X': return jsonObject.bodies[1].joints[14].x*xScale;
case 'Left Ankle Y': return jsonObject.bodies[1].joints[14].y*yScale;
case 'Right Ankle X': return jsonObject.bodies[1].joints[18].x*xScale;
case 'Right Ankle Y': return jsonObject.bodies[1].joints[18].y*yScale;
case 'Left Elbow X': return jsonObject.bodies[1].joints[5].x*xScale;
case 'Left Elbow Y': return jsonObject.bodies[1].joints[5].y*yScale;
case 'Right Elbow X': return jsonObject.bodies[1].joints[9].x*xScale;
case 'Right Elbow Y': return jsonObject.bodies[1].joints[9].y*yScale;
case 'Left Foot X': return jsonObject.bodies[1].joints[15].x*xScale;
case 'Left Foot Y': return jsonObject.bodies[1].joints[15].y*yScale;
case 'Right Foot X': return jsonObject.bodies[1].joints[19].x*xScale;
case 'Right Foot Y': return jsonObject.bodies[1].joints[19].y*yScale;
case 'Left Hand X': return jsonObject.bodies[1].joints[7].x*xScale;
case 'Left Hand Y': return jsonObject.bodies[1].joints[7].y*yScale;
case 'Right Hand X': return jsonObject.bodies[1].joints[11].x*xScale;
case 'Right Hand Y': return jsonObject.bodies[1].joints[11].y*yScale;
case 'Left Hand Tip X': return jsonObject.bodies[1].joints[21].x*xScale;
case 'Left Hand Tip Y': return jsonObject.bodies[1].joints[21].y*yScale;
case 'Right Hand Tip X': return jsonObject.bodies[1].joints[23].x*xScale;
case 'Right Hand Tip Y': return jsonObject.bodies[1].joints[23].y*yScale;
case 'Head X': return jsonObject.bodies[1].joints[3].x*xScale;
case 'Head Y': return jsonObject.bodies[1].joints[3].y*yScale;
case 'Left Hip X': return jsonObject.bodies[1].joints[12].x*xScale;
case 'Left Hip Y': return jsonObject.bodies[1].joints[12].y*yScale;
case 'Right Hip X': return jsonObject.bodies[1].joints[16].x*xScale;
case 'Right Hip Y': return jsonObject.bodies[1].joints[16].y*yScale;
case 'Left Knee X': return jsonObject.bodies[1].joints[13].x*xScale;
case 'Left Knee Y': return jsonObject.bodies[1].joints[13].y*yScale;
case 'Right Knee X': return jsonObject.bodies[1].joints[17].x*xScale;
case 'Right Knee Y': return jsonObject.bodies[1].joints[17].y*yScale;
case 'Neck X': return jsonObject.bodies[1].joints[2].x*xScale;
case 'Neck Y': return jsonObject.bodies[1].joints[2].y*yScale;
case 'Left Shoulder X': return jsonObject.bodies[1].joints[4].x*xScale;
case 'Left Shoulder Y': return jsonObject.bodies[1].joints[4].y*yScale;
case 'Right Shoulder X': return jsonObject.bodies[1].joints[8].x*xScale;
case 'Right Shoulder Y': return jsonObject.bodies[1].joints[8].y*yScale;
case 'Spine Base X': return jsonObject.bodies[1].joints[0].x*xScale;
case 'Spine Base Y': return jsonObject.bodies[1].joints[0].y*yScale;
case 'Spine Middle X': return jsonObject.bodies[1].joints[1].x*xScale;
case 'Spine Middle Y': return jsonObject.bodies[1].joints[1].y*yScale;
case 'Spine Shoulder X': return jsonObject.bodies[1].joints[20].x*xScale;
case 'Spine Shoulder Y': return jsonObject.bodies[1].joints[20].y*yScale;
case 'Left Thumb X': return jsonObject.bodies[1].joints[22].x*xScale;
case 'Left Thumb Y': return jsonObject.bodies[1].joints[22].y*yScale;
case 'Right Thumb X': return jsonObject.bodies[1].joints[24].x*xScale;
case 'Right Thumb Y': return jsonObject.bodies[1].joints[24].y*yScale;
case 'Left Wrist X': return jsonObject.bodies[1].joints[6].x*xScale;
case 'Left Wrist Y': return jsonObject.bodies[1].joints[6].y*yScale;
case 'Right Wrist X': return jsonObject.bodies[1].joints[10].x*xScale;
case 'Right Wrist Y': return jsonObject.bodies[1].joints[10].y*yScale;
}
};
//return the selected joints x or y for the 2nd body
ext.k = function(m) {
switch(m){
case 'Left Ankle X': return jsonObject.bodies[0].joints[14].x*xScale;
case 'Left Ankle Y': return jsonObject.bodies[0].joints[14].y*yScale;
case 'Right Ankle X': return jsonObject.bodies[0].joints[18].x*xScale;
case 'Right Ankle Y': return jsonObject.bodies[0].joints[18].y*yScale;
case 'Left Elbow X': return jsonObject.bodies[0].joints[5].x*xScale;
case 'Left Elbow Y': return jsonObject.bodies[0].joints[5].y*yScale;
case 'Right Elbow X': return jsonObject.bodies[0].joints[9].x*xScale;
case 'Right Elbow Y': return jsonObject.bodies[0].joints[9].y*yScale;
case 'Left Foot X': return jsonObject.bodies[0].joints[15].x*xScale;
case 'Left Foot Y': return jsonObject.bodies[0].joints[15].y*yScale;
case 'Right Foot X': return jsonObject.bodies[0].joints[19].x*xScale;
case 'Right Foot Y': return jsonObject.bodies[0].joints[19].y*yScale;
case 'Left Hand X': return jsonObject.bodies[0].joints[7].x*xScale;
case 'Left Hand Y': return jsonObject.bodies[0].joints[7].y*yScale;
case 'Right Hand X': return jsonObject.bodies[0].joints[11].x*xScale;
case 'Right Hand Y': return jsonObject.bodies[0].joints[11].y*yScale;
case 'Left Hand Tip X': return jsonObject.bodies[0].joints[21].x*xScale;
case 'Left Hand Tip Y': return jsonObject.bodies[0].joints[21].y*yScale;
case 'Right Hand Tip X': return jsonObject.bodies[0].joints[23].x*xScale;
case 'Right Hand Tip Y': return jsonObject.bodies[0].joints[23].y*yScale;
case 'Head X': return jsonObject.bodies[0].joints[3].x*xScale;
case 'Head Y': return jsonObject.bodies[0].joints[3].y*yScale;
case 'Left Hip X': return jsonObject.bodies[0].joints[12].x*xScale;
case 'Left Hip Y': return jsonObject.bodies[0].joints[12].y*yScale;
case 'Right Hip X': return jsonObject.bodies[0].joints[16].x*xScale;
case 'Right Hip Y': return jsonObject.bodies[0].joints[16].y*yScale;
case 'Left Knee X': return jsonObject.bodies[0].joints[13].x*xScale;
case 'Left Knee Y': return jsonObject.bodies[0].joints[13].y*yScale;
case 'Right Knee X': return jsonObject.bodies[0].joints[17].x*xScale;
case 'Right Knee Y': return jsonObject.bodies[0].joints[17].y*yScale;
case 'Neck X': return jsonObject.bodies[0].joints[2].x*xScale;
case 'Neck Y': return jsonObject.bodies[0].joints[2].y*yScale;
case 'Left Shoulder X': return jsonObject.bodies[0].joints[4].x*xScale;
case 'Left Shoulder Y': return jsonObject.bodies[0].joints[4].y*yScale;
case 'Right Shoulder X': return jsonObject.bodies[0].joints[8].x*xScale;
case 'Right Shoulder Y': return jsonObject.bodies[0].joints[8].y*yScale;
case 'Spine Base X': return jsonObject.bodies[0].joints[0].x*xScale;
case 'Spine Base Y': return jsonObject.bodies[0].joints[0].y*yScale;
case 'Spine Middle X': return jsonObject.bodies[0].joints[1].x*xScale;
case 'Spine Middle Y': return jsonObject.bodies[0].joints[1].y*yScale;
case 'Spine Shoulder X': return jsonObject.bodies[0].joints[20].x*xScale;
case 'Spine Shoulder Y': return jsonObject.bodies[0].joints[20].y*yScale;
case 'Left Thumb X': return jsonObject.bodies[0].joints[22].x*xScale;
case 'Left Thumb Y': return jsonObject.bodies[0].joints[22].y*yScale;
case 'Right Thumb X': return jsonObject.bodies[0].joints[24].x*xScale;
case 'Right Thumb Y': return jsonObject.bodies[0].joints[24].y*yScale;
case 'Left Wrist X': return jsonObject.bodies[0].joints[6].x*xScale;
case 'Left Wrist Y': return jsonObject.bodies[0].joints[6].y*yScale;
case 'Right Wrist X': return jsonObject.bodies[0].joints[10].x*xScale;
case 'Right Wrist Y': return jsonObject.bodies[0].joints[10].y*yScale;
}
};
// Register the extension
ScratchExtensions.register('KinectinScratch', descriptor, ext);
})({}); | Completely recreate server each time restart is called
| KinectinScratch.js | Completely recreate server each time restart is called | <ide><path>inectinScratch.js
<ide> window.ws.close();
<ide> console.log("connecting to local server ..");
<ide> window.ws = new wsImpl('ws://localhost:8181/');
<add>
<add> // when data is comming from the server, this method is called
<add> ws.onmessage = function (evt) {
<add> jsonObject = JSON.parse(evt.data);
<add> if(jsonObject.bodies == '')
<add> {
<add> status = 1;
<add> } else
<add> {
<add> status = 2;
<add> }
<add> };
<add>
<add> // when the connection is established, this method is called
<add> ws.onopen = function () {
<add> console.log('.. connection open');
<add> };
<add>
<add> // when the connection is closed, this method is called
<add> ws.onclose = function () {
<add> console.log('.. connection closed');
<add> status = 0;
<add> };
<ide> };
<ide>
<ide> ext.ipconnect = function(s) {
<ide> window.ws.close();
<ide> console.log("connecting to "+s+' ..');
<ide> window.ws = new wsImpl('ws://'+s+':8181/');
<add>
<add> // when data is comming from the server, this method is called
<add> ws.onmessage = function (evt) {
<add> jsonObject = JSON.parse(evt.data);
<add> if(jsonObject.bodies == '')
<add> {
<add> status = 1;
<add> } else
<add> {
<add> status = 2;
<add> }
<add> };
<add>
<add> // when the connection is established, this method is called
<add> ws.onopen = function () {
<add> console.log('.. connection open');
<add> };
<add>
<add> // when the connection is closed, this method is called
<add> ws.onclose = function () {
<add> console.log('.. connection closed');
<add> status = 0;
<add> };
<ide> }
<ide>
<ide> ext.closeconn = function() { |
|
Java | apache-2.0 | b74b6f5a78f83bfbdaeb2ee866c83b8f26d52de9 | 0 | petrushy/Orekit,CS-SI/Orekit,CS-SI/Orekit,petrushy/Orekit | package org.orekit.propagation.events;
import org.hipparchus.geometry.euclidean.threed.Vector3D;
import org.hipparchus.ode.events.Action;
import org.hipparchus.util.FastMath;
import org.junit.After;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
import org.orekit.Utils;
import org.orekit.bodies.CelestialBodyFactory;
import org.orekit.bodies.GeodeticPoint;
import org.orekit.bodies.OneAxisEllipsoid;
import org.orekit.errors.OrekitException;
import org.orekit.frames.FramesFactory;
import org.orekit.frames.TopocentricFrame;
import org.orekit.orbits.KeplerianOrbit;
import org.orekit.orbits.Orbit;
import org.orekit.orbits.PositionAngle;
import org.orekit.propagation.Propagator;
import org.orekit.propagation.SpacecraftState;
import org.orekit.propagation.analytical.KeplerianPropagator;
import org.orekit.propagation.events.handlers.EventHandler;
import org.orekit.time.AbsoluteDate;
import org.orekit.time.TimeScalesFactory;
import org.orekit.utils.Constants;
import org.orekit.utils.IERSConventions;
import org.orekit.utils.PVCoordinates;
import org.orekit.utils.PVCoordinatesProvider;
public class AngularSeparationFromSatelliteDetectorTest {
private OneAxisEllipsoid earth;
private TopocentricFrame acatenango;
private AbsoluteDate iniDate;
private Orbit initialOrbit;
private Propagator propagator;
@Test
public void testCentralSunTransit() {
double proximityAngle = FastMath.toRadians(10.0);
double maxCheck = 0.1 * proximityAngle / initialOrbit.getKeplerianMeanMotion();
PVCoordinatesProvider sun = CelestialBodyFactory.getSun();
AngularSeparationFromSatelliteDetector detector =
new AngularSeparationFromSatelliteDetector(sun, acatenango, proximityAngle).
withMaxCheck(maxCheck).
withThreshold(1.0e-6);
Assert.assertEquals(proximityAngle, detector.getProximityAngle(), 1.0e-15);
Assert.assertSame(sun, detector.getPrimaryObject());
Assert.assertSame(acatenango, detector.getSecondaryObject());
Assert.assertEquals(maxCheck, detector.getMaxCheckInterval(), 1.0e-15);
propagator.addEventDetector(detector);
final SpacecraftState finalState = propagator.propagate(iniDate.shiftedBy(3600 * 2));
Assert.assertEquals(4587.6472, finalState.getDate().durationFrom(iniDate), 1.0e-3);
final PVCoordinates sPV = finalState.getPVCoordinates();
final PVCoordinates primaryPV = sun .getPVCoordinates(finalState.getDate(), finalState.getFrame());
final PVCoordinates secondaryPV = acatenango.getPVCoordinates(finalState.getDate(), finalState.getFrame());
final double separation = Vector3D.angle(primaryPV .getPosition().subtract(sPV.getPosition()),
secondaryPV.getPosition().subtract(sPV.getPosition()));
Assert.assertTrue(separation < proximityAngle);
}
@Test
public void testRegularProximity() {
double proximityAngle = FastMath.toRadians(10.0);
double maxCheck = 0.1 * proximityAngle / initialOrbit.getKeplerianMeanMotion();
PVCoordinatesProvider sun = CelestialBodyFactory.getSun();
AngularSeparationFromSatelliteDetector detector =
new AngularSeparationFromSatelliteDetector(sun, acatenango, proximityAngle).
withMaxCheck(maxCheck).
withThreshold(1.0e-6).
withHandler(new EventHandler<AngularSeparationFromSatelliteDetector>() {
public Action eventOccurred(SpacecraftState s, AngularSeparationFromSatelliteDetector detector, boolean increasing) {
if (increasing) {
Assert.assertEquals(5084.4147, s.getDate().durationFrom(iniDate), 1.0e-3);
} else {
Assert.assertEquals(4587.6472, s.getDate().durationFrom(iniDate), 1.0e-3);
}
return Action.CONTINUE;
}
});
Assert.assertEquals(proximityAngle, detector.getProximityAngle(), 1.0e-15);
Assert.assertSame(sun, detector.getPrimaryObject());
Assert.assertSame(acatenango, detector.getSecondaryObject());
Assert.assertEquals(maxCheck, detector.getMaxCheckInterval(), 1.0e-15);
propagator.addEventDetector(detector);
final SpacecraftState finalState = propagator.propagate(iniDate.shiftedBy(3600 * 2));
Assert.assertEquals(7200.0, finalState.getDate().durationFrom(iniDate), 1.0e-3);
}
@Before
public void setUp() {
try {
Utils.setDataRoot("regular-data");
earth = new OneAxisEllipsoid(Constants.WGS84_EARTH_EQUATORIAL_RADIUS,
Constants.WGS84_EARTH_FLATTENING,
FramesFactory.getITRF(IERSConventions.IERS_2010, true));
acatenango = new TopocentricFrame(earth,
new GeodeticPoint(FastMath.toRadians(14.500833),
FastMath.toRadians(-90.87583),
3976.0),
"Acatenango");
iniDate = new AbsoluteDate(2003, 5, 1, 17, 30, 0.0, TimeScalesFactory.getUTC());
initialOrbit = new KeplerianOrbit(7e6, 1.0e-4, FastMath.toRadians(98.5),
FastMath.toRadians(87.0), FastMath.toRadians(216.59976025619),
FastMath.toRadians(319.7), PositionAngle.MEAN,
FramesFactory.getEME2000(), iniDate,
Constants.EIGEN5C_EARTH_MU);
propagator = new KeplerianPropagator(initialOrbit);
} catch (OrekitException oe) {
Assert.fail(oe.getLocalizedMessage());
}
}
@After
public void tearDown() {
earth = null;
iniDate = null;
initialOrbit = null;
propagator = null;
}
}
| src/test/java/org/orekit/propagation/events/AngularSeparationFromSatelliteDetectorTest.java | package org.orekit.propagation.events;
import org.hipparchus.geometry.euclidean.threed.Vector3D;
import org.hipparchus.ode.events.Action;
import org.hipparchus.util.FastMath;
import org.junit.After;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
import org.orekit.Utils;
import org.orekit.bodies.CelestialBodyFactory;
import org.orekit.bodies.GeodeticPoint;
import org.orekit.bodies.OneAxisEllipsoid;
import org.orekit.errors.OrekitException;
import org.orekit.frames.FramesFactory;
import org.orekit.frames.TopocentricFrame;
import org.orekit.orbits.KeplerianOrbit;
import org.orekit.orbits.Orbit;
import org.orekit.orbits.PositionAngle;
import org.orekit.propagation.Propagator;
import org.orekit.propagation.SpacecraftState;
import org.orekit.propagation.analytical.KeplerianPropagator;
import org.orekit.propagation.events.handlers.EventHandler;
import org.orekit.time.AbsoluteDate;
import org.orekit.time.TimeScalesFactory;
import org.orekit.utils.Constants;
import org.orekit.utils.IERSConventions;
import org.orekit.utils.PVCoordinates;
import org.orekit.utils.PVCoordinatesProvider;
public class AngularSeparationFromSatelliteDetectorTest {
private OneAxisEllipsoid earth;
private TopocentricFrame acatenango;
private AbsoluteDate iniDate;
private Orbit initialOrbit;
private Propagator propagator;
@Test
public void testCentralSunTransit() {
double proximityAngle = FastMath.toRadians(10);
double maxCheck = 0.1 * proximityAngle / initialOrbit.getKeplerianMeanMotion();
PVCoordinatesProvider sun = CelestialBodyFactory.getSun();
AngularSeparationFromSatelliteDetector detector =
new AngularSeparationFromSatelliteDetector(sun, acatenango, proximityAngle).
withMaxCheck(maxCheck).
withThreshold(1.0e-6);
Assert.assertEquals(proximityAngle, detector.getProximityAngle(), 1.0e-15);
Assert.assertSame(sun, detector.getPrimaryObject());
Assert.assertSame(acatenango, detector.getSecondaryObject());
Assert.assertEquals(maxCheck, detector.getMaxCheckInterval(), 1.0e-15);
propagator.addEventDetector(detector);
final SpacecraftState finalState = propagator.propagate(iniDate.shiftedBy(3600 * 2));
Assert.assertEquals(4587.6472, finalState.getDate().durationFrom(iniDate), 1.0e-3);
final PVCoordinates sPV = finalState.getPVCoordinates();
final PVCoordinates primaryPV = sun .getPVCoordinates(finalState.getDate(), finalState.getFrame());
final PVCoordinates secondaryPV = acatenango.getPVCoordinates(finalState.getDate(), finalState.getFrame());
final double separation = Vector3D.angle(primaryPV .getPosition().subtract(sPV.getPosition()),
secondaryPV.getPosition().subtract(sPV.getPosition()));
Assert.assertTrue(separation < proximityAngle);
}
@Test
public void testRegularProximity() {
double proximityAngle = FastMath.toRadians(5.0);
double maxCheck = 0.1 * proximityAngle / initialOrbit.getKeplerianMeanMotion();
PVCoordinatesProvider sun = CelestialBodyFactory.getSun();
AngularSeparationFromSatelliteDetector detector =
new AngularSeparationFromSatelliteDetector(sun, acatenango, proximityAngle).
withMaxCheck(maxCheck).
withThreshold(1.0e-6).
withHandler(new EventHandler<AngularSeparationFromSatelliteDetector>() {
public Action eventOccurred(SpacecraftState s, AngularSeparationFromSatelliteDetector detector, boolean increasing) {
if (increasing) {
Assert.assertEquals(0.0, s.getDate().durationFrom(iniDate), 1.0e-3);
} else {
Assert.assertEquals(1914.1680, s.getDate().durationFrom(iniDate), 1.0e-3);
}
return Action.CONTINUE;
}
});
Assert.assertEquals(proximityAngle, detector.getProximityAngle(), 1.0e-15);
Assert.assertSame(sun, detector.getPrimaryObject());
Assert.assertSame(acatenango, detector.getSecondaryObject());
Assert.assertEquals(maxCheck, detector.getMaxCheckInterval(), 1.0e-15);
propagator.addEventDetector(detector);
final SpacecraftState finalState = propagator.propagate(iniDate.shiftedBy(3600 * 2));
Assert.assertEquals(7200.0, finalState.getDate().durationFrom(iniDate), 1.0e-3);
}
@Before
public void setUp() {
try {
Utils.setDataRoot("regular-data");
earth = new OneAxisEllipsoid(Constants.WGS84_EARTH_EQUATORIAL_RADIUS,
Constants.WGS84_EARTH_FLATTENING,
FramesFactory.getITRF(IERSConventions.IERS_2010, true));
acatenango = new TopocentricFrame(earth,
new GeodeticPoint(FastMath.toRadians(14.500833),
FastMath.toRadians(-90.87583),
3976.0),
"Acatenango");
iniDate = new AbsoluteDate(2003, 5, 1, 17, 30, 0.0, TimeScalesFactory.getUTC());
initialOrbit = new KeplerianOrbit(7e6, 1.0e-4, FastMath.toRadians(98.5),
FastMath.toRadians(87.0), FastMath.toRadians(216.59976025619),
FastMath.toRadians(319.7), PositionAngle.MEAN,
FramesFactory.getEME2000(), iniDate,
Constants.EIGEN5C_EARTH_MU);
propagator = new KeplerianPropagator(initialOrbit);
} catch (OrekitException oe) {
Assert.fail(oe.getLocalizedMessage());
}
}
@After
public void tearDown() {
earth = null;
iniDate = null;
initialOrbit = null;
propagator = null;
}
}
| Completed event detector test class | src/test/java/org/orekit/propagation/events/AngularSeparationFromSatelliteDetectorTest.java | Completed event detector test class | <ide><path>rc/test/java/org/orekit/propagation/events/AngularSeparationFromSatelliteDetectorTest.java
<ide> @Test
<ide> public void testCentralSunTransit() {
<ide>
<del> double proximityAngle = FastMath.toRadians(10);
<add> double proximityAngle = FastMath.toRadians(10.0);
<ide> double maxCheck = 0.1 * proximityAngle / initialOrbit.getKeplerianMeanMotion();
<ide> PVCoordinatesProvider sun = CelestialBodyFactory.getSun();
<ide> AngularSeparationFromSatelliteDetector detector =
<ide> @Test
<ide> public void testRegularProximity() {
<ide>
<del> double proximityAngle = FastMath.toRadians(5.0);
<add> double proximityAngle = FastMath.toRadians(10.0);
<ide> double maxCheck = 0.1 * proximityAngle / initialOrbit.getKeplerianMeanMotion();
<ide> PVCoordinatesProvider sun = CelestialBodyFactory.getSun();
<ide> AngularSeparationFromSatelliteDetector detector =
<ide> withHandler(new EventHandler<AngularSeparationFromSatelliteDetector>() {
<ide> public Action eventOccurred(SpacecraftState s, AngularSeparationFromSatelliteDetector detector, boolean increasing) {
<ide> if (increasing) {
<del> Assert.assertEquals(0.0, s.getDate().durationFrom(iniDate), 1.0e-3);
<add> Assert.assertEquals(5084.4147, s.getDate().durationFrom(iniDate), 1.0e-3);
<ide> } else {
<del> Assert.assertEquals(1914.1680, s.getDate().durationFrom(iniDate), 1.0e-3);
<add> Assert.assertEquals(4587.6472, s.getDate().durationFrom(iniDate), 1.0e-3);
<ide> }
<ide> return Action.CONTINUE;
<ide> } |
|
Java | mit | b7d768370d0770271de3c1cc7d387da777e25f2d | 0 | plackemacher/robolectric,kriegfrj/robolectric,charlesmunger/robolectric,yuzhong-google/robolectric,tuenti/robolectric,gb112211/robolectric,davidsun/robolectric,wyvx/robolectric,rburgst/robolectric,wyvx/robolectric,eric-kansas/robolectric,wyvx/robolectric,svenji/robolectric,ChengCorp/robolectric,gb112211/robolectric,tuenti/robolectric,1zaman/robolectric,tmrudick/robolectric,WonderCsabo/robolectric,macklinu/robolectric,erichaugh/robolectric,1zaman/robolectric,lexs/robolectric,eric-kansas/robolectric,jingle1267/robolectric,erichaugh/robolectric,zhongyu05/robolectric,gb112211/robolectric,macklinu/robolectric,rongou/robolectric,zhongyu05/robolectric,tec27/robolectric,pivotal-oscar/robolectric,davidsun/robolectric,tmrudick/robolectric,amarts/robolectric,svenji/robolectric,cc12703/robolectric,tec27/robolectric,rburgst/robolectric,cc12703/robolectric,paulpv/robolectric,macklinu/robolectric,kriegfrj/robolectric,cesar1000/robolectric,ocadotechnology/robolectric,tyronen/robolectric,trevorrjohn/robolectric,paulpv/robolectric,davidsun/robolectric,lexs/robolectric,plackemacher/robolectric,tyronen/robolectric,cesar1000/robolectric,tuenti/robolectric,mag/robolectric,1zaman/robolectric,cesar1000/robolectric,rongou/robolectric,eric-kansas/robolectric,pivotal-oscar/robolectric,WonderCsabo/robolectric,ocadotechnology/robolectric,charlesmunger/robolectric,fiower/robolectric,diegotori/robolectric,zbsz/robolectric,jingle1267/robolectric,tyronen/robolectric,yuzhong-google/robolectric,VikingDen/robolectric,spotify/robolectric,jongerrish/robolectric,charlesmunger/robolectric,toluju/robolectric,amarts/robolectric,hgl888/robolectric,toluju/robolectric,ocadotechnology/robolectric,paulpv/robolectric,jongerrish/robolectric,amarts/robolectric,pivotal-oscar/robolectric,cc12703/robolectric,yuzhong-google/robolectric,fiower/robolectric,ChengCorp/robolectric,karlicoss/robolectric,ChengCorp/robolectric,holmari/robolectric,rongou/robolectric,tmrudick/robolectric,rburgst/robolectric,holmari/robolectric,karlicoss/robolectric,VikingDen/robolectric,svenji/robolectric,jongerrish/robolectric,hgl888/robolectric,zbsz/robolectric,diegotori/robolectric,kriegfrj/robolectric,tjohn/robolectric,tjohn/robolectric,zbsz/robolectric,tjohn/robolectric,erichaugh/robolectric,holmari/robolectric,zhongyu05/robolectric,trevorrjohn/robolectric,VikingDen/robolectric,fiower/robolectric,hgl888/robolectric,mag/robolectric,mag/robolectric,toluju/robolectric,plackemacher/robolectric,spotify/robolectric,lexs/robolectric,spotify/robolectric,tec27/robolectric,jongerrish/robolectric,karlicoss/robolectric,jingle1267/robolectric,diegotori/robolectric,trevorrjohn/robolectric,WonderCsabo/robolectric | package org.robolectric.fakes;
import android.database.Cursor;
import android.database.DatabaseUtils;
import android.net.Uri;
import java.sql.Blob;
import java.util.ArrayList;
import java.util.List;
public class RoboCursor extends BaseCursor {
public Uri uri;
public String[] projection;
public String selection;
public String[] selectionArgs;
public String sortOrder;
protected Object[][] results = new Object[0][0];
protected List<String> columnNames= new ArrayList<String>();
int resultsIndex = -1;
boolean closeWasCalled;
@Override
public void setQuery(Uri uri, String[] projection, String selection, String[] selectionArgs, String sortOrder) {
this.uri = uri;
this.projection = projection;
this.selection = selection;
this.selectionArgs = selectionArgs;
this.sortOrder = sortOrder;
}
@Override
public int getColumnIndexOrThrow(String columnName) throws IllegalArgumentException{
int col = getColumnIndex(columnName);
if(col == -1){
throw new IllegalArgumentException("No column with name: "+columnName);
}
return col;
}
@Override
public int getColumnIndex(String columnName) {
return columnNames.indexOf(columnName);
}
@Override
public String getString(int columnIndex) {
return (String) results[resultsIndex][columnIndex];
}
@Override
public long getLong(int columnIndex) {
return (Long) results[resultsIndex][columnIndex];
}
@Override
public int getInt(int columnIndex) {
return (Integer) results[resultsIndex][columnIndex];
}
@Override
public int getCount() {
return results.length;
}
@Override
public boolean moveToNext() {
++resultsIndex;
return resultsIndex < results.length;
}
@Override
public void close() {
closeWasCalled = true;
}
@Override
public int getColumnCount() {
return results[0].length;
}
@Override
public String getColumnName(int index) {
return columnNames.get(index);
}
@Override
public int getType(int columnIndex) {
return DatabaseUtils.getTypeOfObject(results[0][columnIndex]);
}
public void setColumnNames(List<String> columnNames) {
this.columnNames = columnNames;
}
public void setResults(Object[][] results) {
this.results = results;
}
public boolean getCloseWasCalled() {
return closeWasCalled;
}
}
| robolectric-shadows/shadows-core/src/main/java/org/robolectric/fakes/RoboCursor.java | package org.robolectric.fakes;
import android.database.Cursor;
import android.database.DatabaseUtils;
import android.net.Uri;
import java.sql.Blob;
import java.util.ArrayList;
import java.util.List;
public class RoboCursor extends BaseCursor {
public Uri uri;
public String[] projection;
public String selection;
public String[] selectionArgs;
public String sortOrder;
protected Object[][] results = new Object[0][0];
protected List<String> columnNames= new ArrayList<String>();
int resultsIndex = -1;
boolean closeWasCalled;
@Override
public void setQuery(Uri uri, String[] projection, String selection, String[] selectionArgs, String sortOrder) {
this.uri = uri;
this.projection = projection;
this.selection = selection;
this.selectionArgs = selectionArgs;
this.sortOrder = sortOrder;
}
@Override
public int getColumnIndexOrThrow(String columnName) throws IllegalArgumentException{
int col = getColumnIndex(columnName);
if(col == -1){
throw new IllegalArgumentException("No column with name: "+columnName);
}
return col;
}
@Override
public int getColumnIndex(String columnName) {
return columnNames.indexOf(columnName);
}
@Override
public String getString(int columnIndex) {
return (String) results[resultsIndex][columnIndex];
}
@Override
public long getLong(int columnIndex) {
return (Long) results[resultsIndex][columnIndex];
}
@Override
public int getInt(int columnIndex) {
return (Integer) results[resultsIndex][columnIndex];
}
@Override
public int getCount() {
return results.length;
}
@Override
public boolean moveToNext() {
++resultsIndex;
return resultsIndex < results.length;
}
@Override
public void close() {
closeWasCalled = true;
}
@Override
public int getCount() {
return results.length;
}
@Override
public int getColumnCount() {
return results[0].length;
}
@Override
public String getColumnName(int index) {
return columnNames.get(index);
}
@Override
public int getType(int columnIndex) {
return DatabaseUtils.getTypeOfObject(results[0][columnIndex]);
}
public void setColumnNames(List<String> columnNames) {
this.columnNames = columnNames;
}
public void setResults(Object[][] results) {
this.results = results;
}
public boolean getCloseWasCalled() {
return closeWasCalled;
}
}
| Remove duplicate RoboCursor.getCount definition
This was breaking the build.
| robolectric-shadows/shadows-core/src/main/java/org/robolectric/fakes/RoboCursor.java | Remove duplicate RoboCursor.getCount definition | <ide><path>obolectric-shadows/shadows-core/src/main/java/org/robolectric/fakes/RoboCursor.java
<ide> }
<ide>
<ide> @Override
<del> public int getCount() {
<del> return results.length;
<del> }
<del>
<del> @Override
<ide> public int getColumnCount() {
<ide> return results[0].length;
<ide> } |
|
Java | epl-1.0 | 4b6cc14cc24760ebfcf6cb43f6f38c876866afed | 0 | collaborative-modeling/egit,collaborative-modeling/egit | /*******************************************************************************
* Copyright (c) 2016 Thomas Wolf <[email protected]>
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Eclipse Public License v1.0
* which accompanies this distribution, and is available at
* http://www.eclipse.org/legal/epl-v10.html
*******************************************************************************/
package org.eclipse.egit.ui.internal.jobs;
import java.text.MessageFormat;
import org.eclipse.core.runtime.IProgressMonitor;
import org.eclipse.core.runtime.IStatus;
import org.eclipse.core.runtime.Status;
import org.eclipse.core.runtime.jobs.Job;
import org.eclipse.egit.ui.Activator;
import org.eclipse.egit.ui.internal.UIText;
import org.eclipse.jface.action.IAction;
import org.eclipse.swt.widgets.Display;
import org.eclipse.ui.PlatformUI;
import org.eclipse.ui.progress.IProgressConstants;
/**
* A {@link Job} operating (solely) on a repository, reporting some result
* beyond a mere {@link IStatus} back to the user via an {@link IAction}. If the
* job is running in a dialog when its {@link #performJob(IProgressMonitor)}
* method returns, the action is invoked directly in the display thread,
* otherwise {@link IProgressConstants#ACTION_PROPERTY} is used to associate the
* action with the finished job and eventual display of the result is left to
* the progress reporting framework.
*/
public abstract class RepositoryJob extends Job {
/**
* Creates a new {@link RepositoryJob}.
*
* @param name
* of the job.
*/
public RepositoryJob(String name) {
super(name);
}
@Override
protected final IStatus run(IProgressMonitor monitor) {
try {
IStatus status = performJob(monitor);
if (status == null) {
return Activator.createErrorStatus(MessageFormat
.format(UIText.RepositoryJob_NullStatus, getName()),
new NullPointerException());
} else if (!status.isOK()) {
return status;
}
IAction action = getAction();
if (action != null) {
if (isModal()) {
showResult(action);
} else {
setProperty(IProgressConstants.KEEP_PROPERTY, Boolean.TRUE);
setProperty(IProgressConstants.ACTION_PROPERTY, action);
return new Status(IStatus.OK, Activator.getPluginId(),
IStatus.OK, action.getText(), null);
}
}
return status;
} finally {
monitor.done();
}
}
/**
* Performs the actual work of the job.
*
* @param monitor
* for progress reporting and cancellation.
* @return an {@link IStatus} describing the outcome of the job
*/
abstract protected IStatus performJob(IProgressMonitor monitor);
/**
* Obtains an {@link IAction} to report the full job result if
* {@link #performJob(IProgressMonitor)} returned an {@link IStatus#isOK()
* isOK()} status.
*
* @return the action, or {@code null} if no action is to be taken
*/
abstract protected IAction getAction();
private boolean isModal() {
Boolean modal = (Boolean) getProperty(
IProgressConstants.PROPERTY_IN_DIALOG);
return modal != null && modal.booleanValue();
}
private void showResult(final IAction action) {
final Display display = PlatformUI.getWorkbench().getDisplay();
if (display != null) {
display.asyncExec(new Runnable() {
@Override
public void run() {
if (!display.isDisposed()) {
action.run();
}
}
});
}
}
}
| org.eclipse.egit.ui/src/org/eclipse/egit/ui/internal/jobs/RepositoryJob.java | /*******************************************************************************
* Copyright (c) 2016 Thomas Wolf <[email protected]>
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Eclipse Public License v1.0
* which accompanies this distribution, and is available at
* http://www.eclipse.org/legal/epl-v10.html
*******************************************************************************/
package org.eclipse.egit.ui.internal.jobs;
import java.text.MessageFormat;
import org.eclipse.core.runtime.IProgressMonitor;
import org.eclipse.core.runtime.IStatus;
import org.eclipse.core.runtime.Status;
import org.eclipse.core.runtime.jobs.Job;
import org.eclipse.egit.ui.Activator;
import org.eclipse.egit.ui.internal.UIText;
import org.eclipse.jface.action.IAction;
import org.eclipse.swt.widgets.Display;
import org.eclipse.ui.PlatformUI;
import org.eclipse.ui.progress.IProgressConstants;
/**
* A {@link Job} operating (solely) on a repository, reporting some result
* beyond a mere {@link IStatus} back to the user via an {@link IAction}. If the
* job is running in a dialog when its {@link #performJob(IProgressMonitor)}
* method returns, the action is invoked directly in the display thread,
* otherwise {@link IProgressConstants#ACTION_PROPERTY} is used to associate the
* action with the finished job and eventual display of the result is left to
* the progress reporting framework.
*/
public abstract class RepositoryJob extends Job {
/**
* Creates a new {@link RepositoryJob}.
*
* @param name
* of the job.
*/
public RepositoryJob(String name) {
super(name);
}
@Override
protected final IStatus run(IProgressMonitor monitor) {
IStatus status = performJob(monitor);
if (status == null) {
return Activator
.createErrorStatus(
MessageFormat.format(
UIText.RepositoryJob_NullStatus, getName()),
new NullPointerException());
} else if (!status.isOK()) {
return status;
}
IAction action = getAction();
if (action != null) {
if (isModal()) {
showResult(action);
} else {
setProperty(IProgressConstants.KEEP_PROPERTY, Boolean.TRUE);
setProperty(IProgressConstants.ACTION_PROPERTY, action);
return new Status(IStatus.OK, Activator.getPluginId(),
IStatus.OK, action.getText(), null);
}
}
return status;
}
/**
* Performs the actual work of the job.
*
* @param monitor
* for progress reporting and cancellation.
* @return an {@link IStatus} describing the outcome of the job
*/
abstract protected IStatus performJob(IProgressMonitor monitor);
/**
* Obtains an {@link IAction} to report the full job result if
* {@link #performJob(IProgressMonitor)} returned an {@link IStatus#isOK()
* isOK()} status.
*
* @return the action, or {@code null} if no action is to be taken
*/
abstract protected IAction getAction();
private boolean isModal() {
Boolean modal = (Boolean) getProperty(
IProgressConstants.PROPERTY_IN_DIALOG);
return modal != null && modal.booleanValue();
}
private void showResult(final IAction action) {
final Display display = PlatformUI.getWorkbench().getDisplay();
if (display != null) {
display.asyncExec(new Runnable() {
@Override
public void run() {
if (!display.isDisposed()) {
action.run();
}
}
});
}
}
}
| RepositoryJob: jobs must call monitor.done()
Jobs must tell the progress framework when they're done by calling
monitor.done(). Without this, the status framework may use the last
subtask's message for the action link in the progress view.
Change-Id: Ic32f2f4fa1840ab1fd835ff82ac9804b03e96bc8
| org.eclipse.egit.ui/src/org/eclipse/egit/ui/internal/jobs/RepositoryJob.java | RepositoryJob: jobs must call monitor.done() | <ide><path>rg.eclipse.egit.ui/src/org/eclipse/egit/ui/internal/jobs/RepositoryJob.java
<ide>
<ide> @Override
<ide> protected final IStatus run(IProgressMonitor monitor) {
<del> IStatus status = performJob(monitor);
<del> if (status == null) {
<del> return Activator
<del> .createErrorStatus(
<del> MessageFormat.format(
<del> UIText.RepositoryJob_NullStatus, getName()),
<del> new NullPointerException());
<del> } else if (!status.isOK()) {
<add> try {
<add> IStatus status = performJob(monitor);
<add> if (status == null) {
<add> return Activator.createErrorStatus(MessageFormat
<add> .format(UIText.RepositoryJob_NullStatus, getName()),
<add> new NullPointerException());
<add> } else if (!status.isOK()) {
<add> return status;
<add> }
<add> IAction action = getAction();
<add> if (action != null) {
<add> if (isModal()) {
<add> showResult(action);
<add> } else {
<add> setProperty(IProgressConstants.KEEP_PROPERTY, Boolean.TRUE);
<add> setProperty(IProgressConstants.ACTION_PROPERTY, action);
<add> return new Status(IStatus.OK, Activator.getPluginId(),
<add> IStatus.OK, action.getText(), null);
<add> }
<add> }
<ide> return status;
<add> } finally {
<add> monitor.done();
<ide> }
<del> IAction action = getAction();
<del> if (action != null) {
<del> if (isModal()) {
<del> showResult(action);
<del> } else {
<del> setProperty(IProgressConstants.KEEP_PROPERTY, Boolean.TRUE);
<del> setProperty(IProgressConstants.ACTION_PROPERTY, action);
<del> return new Status(IStatus.OK, Activator.getPluginId(),
<del> IStatus.OK, action.getText(), null);
<del> }
<del> }
<del> return status;
<ide> }
<ide>
<ide> /** |
|
Java | mit | 33e2040a6ba5ad9ebc627fff041d16cb45a05cef | 0 | caoyang521/bugsnag-android,hgl888/bugsnag-android,ppamorim/bugsnag-android,amikey/bugsnag-bugsnag-android,freefair/advanced-bugsnag-android | package com.bugsnag.android;
import java.util.Collection;
public abstract class BeforeNotify {
public abstract boolean run(Error error);
static boolean runAll(Collection<BeforeNotify> beforeNotifyTasks, Error error) {
for (BeforeNotify beforeNotify : beforeNotifyTasks) {
try {
if (!beforeNotify.run(error)) {
return false;
}
} catch (Throwable ex) {
Logger.warn("BeforeNotify threw an Exception", ex);
}
}
// By default, allow the error to be sent if there were no objections
return true;
}
}
| src/main/java/com/bugsnag/android/BeforeNotify.java | package com.bugsnag.android;
import java.util.Collection;
public abstract class BeforeNotify {
abstract boolean run(Error error);
static boolean runAll(Collection<BeforeNotify> beforeNotifyTasks, Error error) {
for (BeforeNotify beforeNotify : beforeNotifyTasks) {
try {
if (!beforeNotify.run(error)) {
return false;
}
} catch (Throwable ex) {
Logger.warn("BeforeNotify threw an Exception", ex);
}
}
// By default, allow the error to be sent if there were no objections
return true;
}
}
| Fix BeforeNotify#run visibility
| src/main/java/com/bugsnag/android/BeforeNotify.java | Fix BeforeNotify#run visibility | <ide><path>rc/main/java/com/bugsnag/android/BeforeNotify.java
<ide> import java.util.Collection;
<ide>
<ide> public abstract class BeforeNotify {
<del> abstract boolean run(Error error);
<add> public abstract boolean run(Error error);
<ide>
<ide> static boolean runAll(Collection<BeforeNotify> beforeNotifyTasks, Error error) {
<ide> for (BeforeNotify beforeNotify : beforeNotifyTasks) { |
|
Java | bsd-2-clause | 65e6df158b5dd98e2b689b99c3876c7e2420f56e | 0 | dmurph/protobee | package org.protobee.examples.broadcast.modules;
import java.net.SocketAddress;
import org.jboss.netty.handler.codec.http.HttpMethod;
import org.protobee.annotation.InjectLogger;
import org.protobee.compatability.Headers;
import org.protobee.events.BasicMessageReceivedEvent;
import org.protobee.examples.emotion.Emotion;
import org.protobee.examples.protos.BroadcasterProtos.BroadcastMessage;
import org.protobee.guice.scopes.SessionScope;
import org.protobee.identity.NetworkIdentityManager;
import org.protobee.modules.ProtocolModule;
import org.protobee.network.ConnectionCreator;
import org.protobee.protocol.Protocol;
import org.protobee.protocol.ProtocolModel;
import org.protobee.util.SocketAddressUtils;
import org.slf4j.Logger;
import com.google.common.base.Preconditions;
import com.google.common.eventbus.Subscribe;
import com.google.inject.Inject;
@SessionScope
@Headers(required = {})
public class FeelingsInitiatorModule extends ProtocolModule {
@InjectLogger
private Logger log;
private final NetworkIdentityManager identityManager;
private final Protocol feelingsProtocol;
private final ConnectionCreator creator;
private final ProtocolModel feelingsModel;
private final SocketAddressUtils addressUtils;
@Inject
public FeelingsInitiatorModule(ConnectionCreator creator,
@Emotion Protocol feelings, @Emotion ProtocolModel feelingsModel, SocketAddressUtils addressUtils,
NetworkIdentityManager manager) {
this.creator = creator;
this.feelingsProtocol = feelings;
this.feelingsModel = feelingsModel;
this.addressUtils = addressUtils;
this.identityManager = manager;
}
@Subscribe
public void messageReceived(BasicMessageReceivedEvent event) {
Preconditions.checkArgument(event.getMessage() instanceof BroadcastMessage,
"Not a broadcast message");
BroadcastMessage message = (BroadcastMessage) event.getMessage();
SocketAddress address =
addressUtils.getAddress(message.getListeningAddress(), message.getListeningPort());
if (message.getMessage().equals("feelings!")
&& (!identityManager.hasNetworkIdentity(address) || !identityManager.getNewtorkIdentity(
address).hasCurrentSession(feelingsProtocol))) {
log.info("Connecting to address " + address + " with feelings protocol");
creator.connect(feelingsModel, address, HttpMethod.valueOf("SAY"), "/");
}
}
}
| examples/src/main/java/org/protobee/examples/broadcast/modules/FeelingsInitiatorModule.java | package org.protobee.examples.broadcast.modules;
import java.net.SocketAddress;
import org.jboss.netty.handler.codec.http.HttpMethod;
import org.protobee.annotation.InjectLogger;
import org.protobee.compatability.Headers;
import org.protobee.events.BasicMessageReceivedEvent;
import org.protobee.examples.protos.BroadcasterProtos.BroadcastMessage;
import org.protobee.guice.scopes.SessionScope;
import org.protobee.identity.NetworkIdentityManager;
import org.protobee.modules.ProtocolModule;
import org.protobee.network.ConnectionCreator;
import org.protobee.protocol.Protocol;
import org.protobee.protocol.ProtocolModel;
import org.protobee.util.SocketAddressUtils;
import org.slf4j.Logger;
import com.google.common.base.Preconditions;
import com.google.common.eventbus.Subscribe;
import com.google.inject.Inject;
@SessionScope
@Headers(required = {})
public class FeelingsInitiatorModule extends ProtocolModule {
@InjectLogger
private Logger log;
private final NetworkIdentityManager identityManager;
private final Protocol feelingsProtocol;
private final ConnectionCreator creator;
private final ProtocolModel feelingsModel;
private final SocketAddressUtils addressUtils;
@Inject
public FeelingsInitiatorModule(ConnectionCreator creator,
Protocol feelings, ProtocolModel feelingsModel, SocketAddressUtils addressUtils,
NetworkIdentityManager manager) {
this.creator = creator;
this.feelingsProtocol = feelings;
this.feelingsModel = feelingsModel;
this.addressUtils = addressUtils;
this.identityManager = manager;
}
@Subscribe
public void messageReceived(BasicMessageReceivedEvent event) {
Preconditions.checkArgument(event.getMessage() instanceof BroadcastMessage,
"Not a broadcast message");
BroadcastMessage message = (BroadcastMessage) event.getMessage();
SocketAddress address =
addressUtils.getAddress(message.getListeningAddress(), message.getListeningPort());
if (message.getMessage().equals("feelings!")
&& (!identityManager.hasNetworkIdentity(address) || !identityManager.getNewtorkIdentity(
address).hasCurrentSession(feelingsProtocol))) {
log.info("Connecting to address " + address + " with feelings protocol");
creator.connect(feelingsModel, address, HttpMethod.valueOf("SAY"), "/");
}
}
}
| fixed feelings initiator
| examples/src/main/java/org/protobee/examples/broadcast/modules/FeelingsInitiatorModule.java | fixed feelings initiator | <ide><path>xamples/src/main/java/org/protobee/examples/broadcast/modules/FeelingsInitiatorModule.java
<ide> import org.protobee.annotation.InjectLogger;
<ide> import org.protobee.compatability.Headers;
<ide> import org.protobee.events.BasicMessageReceivedEvent;
<add>import org.protobee.examples.emotion.Emotion;
<ide> import org.protobee.examples.protos.BroadcasterProtos.BroadcastMessage;
<ide> import org.protobee.guice.scopes.SessionScope;
<ide> import org.protobee.identity.NetworkIdentityManager;
<ide>
<ide> @Inject
<ide> public FeelingsInitiatorModule(ConnectionCreator creator,
<del> Protocol feelings, ProtocolModel feelingsModel, SocketAddressUtils addressUtils,
<add> @Emotion Protocol feelings, @Emotion ProtocolModel feelingsModel, SocketAddressUtils addressUtils,
<ide> NetworkIdentityManager manager) {
<ide> this.creator = creator;
<ide> this.feelingsProtocol = feelings; |
|
Java | apache-2.0 | cb710ef1fbff51d060614d093d6ceb95fa86e50e | 0 | googleapis/java-datastore,googleapis/java-datastore,googleapis/java-datastore | /*
* Copyright 2022 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.google.cloud.datastore.admin.v1;
import com.google.api.core.ApiFuture;
import com.google.api.core.ApiFutures;
import com.google.api.gax.core.BackgroundResource;
import com.google.api.gax.longrunning.OperationFuture;
import com.google.api.gax.paging.AbstractFixedSizeCollection;
import com.google.api.gax.paging.AbstractPage;
import com.google.api.gax.paging.AbstractPagedListResponse;
import com.google.api.gax.rpc.OperationCallable;
import com.google.api.gax.rpc.PageContext;
import com.google.api.gax.rpc.UnaryCallable;
import com.google.cloud.datastore.admin.v1.stub.DatastoreAdminStub;
import com.google.cloud.datastore.admin.v1.stub.DatastoreAdminStubSettings;
import com.google.common.util.concurrent.MoreExecutors;
import com.google.datastore.admin.v1.CreateIndexRequest;
import com.google.datastore.admin.v1.DeleteIndexRequest;
import com.google.datastore.admin.v1.EntityFilter;
import com.google.datastore.admin.v1.ExportEntitiesMetadata;
import com.google.datastore.admin.v1.ExportEntitiesRequest;
import com.google.datastore.admin.v1.ExportEntitiesResponse;
import com.google.datastore.admin.v1.GetIndexRequest;
import com.google.datastore.admin.v1.ImportEntitiesMetadata;
import com.google.datastore.admin.v1.ImportEntitiesRequest;
import com.google.datastore.admin.v1.Index;
import com.google.datastore.admin.v1.IndexOperationMetadata;
import com.google.datastore.admin.v1.ListIndexesRequest;
import com.google.datastore.admin.v1.ListIndexesResponse;
import com.google.longrunning.Operation;
import com.google.longrunning.OperationsClient;
import com.google.protobuf.Empty;
import java.io.IOException;
import java.util.List;
import java.util.Map;
import java.util.concurrent.TimeUnit;
import javax.annotation.Generated;
// AUTO-GENERATED DOCUMENTATION AND CLASS.
/**
* Service Description: Google Cloud Datastore Admin API
*
* <p>The Datastore Admin API provides several admin services for Cloud Datastore.
*
* <p>----------------------------------------------------------------------------- ## Concepts
*
* <p>Project, namespace, kind, and entity as defined in the Google Cloud Datastore API.
*
* <p>Operation: An Operation represents work being performed in the background.
*
* <p>EntityFilter: Allows specifying a subset of entities in a project. This is specified as a
* combination of kinds and namespaces (either or both of which may be all).
*
* <p>----------------------------------------------------------------------------- ## Services
*
* <p># Export/Import
*
* <p>The Export/Import service provides the ability to copy all or a subset of entities to/from
* Google Cloud Storage.
*
* <p>Exported data may be imported into Cloud Datastore for any Google Cloud Platform project. It
* is not restricted to the export source project. It is possible to export from one project and
* then import into another.
*
* <p>Exported data can also be loaded into Google BigQuery for analysis.
*
* <p>Exports and imports are performed asynchronously. An Operation resource is created for each
* export/import. The state (including any errors encountered) of the export/import may be queried
* via the Operation resource.
*
* <p># Index
*
* <p>The index service manages Cloud Datastore composite indexes.
*
* <p>Index creation and deletion are performed asynchronously. An Operation resource is created for
* each such asynchronous operation. The state of the operation (including any errors encountered)
* may be queried via the Operation resource.
*
* <p># Operation
*
* <p>The Operations collection provides a record of actions performed for the specified project
* (including any operations in progress). Operations are not created directly but through calls on
* other collections or resources.
*
* <p>An operation that is not yet done may be cancelled. The request to cancel is asynchronous and
* the operation may continue to run for some time after the request to cancel is made.
*
* <p>An operation that is done may be deleted so that it is no longer listed as part of the
* Operation collection.
*
* <p>ListOperations returns all pending operations, but not completed operations.
*
* <p>Operations are created by service DatastoreAdmin, but are accessed via service
* google.longrunning.Operations.
*
* <p>This class provides the ability to make remote calls to the backing service through method
* calls that map to API methods. Sample code to get started:
*
* <pre>{@code
* // This snippet has been automatically generated for illustrative purposes only.
* // It may require modifications to work in your environment.
* try (DatastoreAdminClient datastoreAdminClient = DatastoreAdminClient.create()) {
* GetIndexRequest request =
* GetIndexRequest.newBuilder()
* .setProjectId("projectId-894832108")
* .setIndexId("indexId1943291277")
* .build();
* Index response = datastoreAdminClient.getIndex(request);
* }
* }</pre>
*
* <p>Note: close() needs to be called on the DatastoreAdminClient object to clean up resources such
* as threads. In the example above, try-with-resources is used, which automatically calls close().
*
* <p>The surface of this class includes several types of Java methods for each of the API's
* methods:
*
* <ol>
* <li>A "flattened" method. With this type of method, the fields of the request type have been
* converted into function parameters. It may be the case that not all fields are available as
* parameters, and not every API method will have a flattened method entry point.
* <li>A "request object" method. This type of method only takes one parameter, a request object,
* which must be constructed before the call. Not every API method will have a request object
* method.
* <li>A "callable" method. This type of method takes no parameters and returns an immutable API
* callable object, which can be used to initiate calls to the service.
* </ol>
*
* <p>See the individual methods for example code.
*
* <p>Many parameters require resource names to be formatted in a particular way. To assist with
* these names, this class includes a format method for each type of name, and additionally a parse
* method to extract the individual identifiers contained within names that are returned.
*
* <p>This class can be customized by passing in a custom instance of DatastoreAdminSettings to
* create(). For example:
*
* <p>To customize credentials:
*
* <pre>{@code
* // This snippet has been automatically generated for illustrative purposes only.
* // It may require modifications to work in your environment.
* DatastoreAdminSettings datastoreAdminSettings =
* DatastoreAdminSettings.newBuilder()
* .setCredentialsProvider(FixedCredentialsProvider.create(myCredentials))
* .build();
* DatastoreAdminClient datastoreAdminClient = DatastoreAdminClient.create(datastoreAdminSettings);
* }</pre>
*
* <p>To customize the endpoint:
*
* <pre>{@code
* // This snippet has been automatically generated for illustrative purposes only.
* // It may require modifications to work in your environment.
* DatastoreAdminSettings datastoreAdminSettings =
* DatastoreAdminSettings.newBuilder().setEndpoint(myEndpoint).build();
* DatastoreAdminClient datastoreAdminClient = DatastoreAdminClient.create(datastoreAdminSettings);
* }</pre>
*
* <p>Please refer to the GitHub repository's samples for more quickstart code snippets.
*/
@Generated("by gapic-generator-java")
public class DatastoreAdminClient implements BackgroundResource {
private final DatastoreAdminSettings settings;
private final DatastoreAdminStub stub;
private final OperationsClient operationsClient;
/** Constructs an instance of DatastoreAdminClient with default settings. */
public static final DatastoreAdminClient create() throws IOException {
return create(DatastoreAdminSettings.newBuilder().build());
}
/**
* Constructs an instance of DatastoreAdminClient, using the given settings. The channels are
* created based on the settings passed in, or defaults for any settings that are not set.
*/
public static final DatastoreAdminClient create(DatastoreAdminSettings settings)
throws IOException {
return new DatastoreAdminClient(settings);
}
/**
* Constructs an instance of DatastoreAdminClient, using the given stub for making calls. This is
* for advanced usage - prefer using create(DatastoreAdminSettings).
*/
public static final DatastoreAdminClient create(DatastoreAdminStub stub) {
return new DatastoreAdminClient(stub);
}
/**
* Constructs an instance of DatastoreAdminClient, using the given settings. This is protected so
* that it is easy to make a subclass, but otherwise, the static factory methods should be
* preferred.
*/
protected DatastoreAdminClient(DatastoreAdminSettings settings) throws IOException {
this.settings = settings;
this.stub = ((DatastoreAdminStubSettings) settings.getStubSettings()).createStub();
this.operationsClient = OperationsClient.create(this.stub.getOperationsStub());
}
protected DatastoreAdminClient(DatastoreAdminStub stub) {
this.settings = null;
this.stub = stub;
this.operationsClient = OperationsClient.create(this.stub.getOperationsStub());
}
public final DatastoreAdminSettings getSettings() {
return settings;
}
public DatastoreAdminStub getStub() {
return stub;
}
/**
* Returns the OperationsClient that can be used to query the status of a long-running operation
* returned by another API method call.
*/
public final OperationsClient getOperationsClient() {
return operationsClient;
}
// AUTO-GENERATED DOCUMENTATION AND METHOD.
/**
* Exports a copy of all or a subset of entities from Google Cloud Datastore to another storage
* system, such as Google Cloud Storage. Recent updates to entities may not be reflected in the
* export. The export occurs in the background and its progress can be monitored and managed via
* the Operation resource that is created. The output of an export may only be used once the
* associated operation is done. If an export operation is cancelled before completion it may
* leave partial data behind in Google Cloud Storage.
*
* <p>Sample code:
*
* <pre>{@code
* // This snippet has been automatically generated for illustrative purposes only.
* // It may require modifications to work in your environment.
* try (DatastoreAdminClient datastoreAdminClient = DatastoreAdminClient.create()) {
* String projectId = "projectId-894832108";
* Map<String, String> labels = new HashMap<>();
* EntityFilter entityFilter = EntityFilter.newBuilder().build();
* String outputUrlPrefix = "outputUrlPrefix-1132598048";
* ExportEntitiesResponse response =
* datastoreAdminClient
* .exportEntitiesAsync(projectId, labels, entityFilter, outputUrlPrefix)
* .get();
* }
* }</pre>
*
* @param projectId Required. Project ID against which to make the request.
* @param labels Client-assigned labels.
* @param entityFilter Description of what data from the project is included in the export.
* @param outputUrlPrefix Required. Location for the export metadata and data files.
* <p>The full resource URL of the external storage location. Currently, only Google Cloud
* Storage is supported. So output_url_prefix should be of the form:
* `gs://BUCKET_NAME[/NAMESPACE_PATH]`, where `BUCKET_NAME` is the name of the Cloud Storage
* bucket and `NAMESPACE_PATH` is an optional Cloud Storage namespace path (this is not a
* Cloud Datastore namespace). For more information about Cloud Storage namespace paths, see
* [Object name
* considerations](https://cloud.google.com/storage/docs/naming#object-considerations).
* <p>The resulting files will be nested deeper than the specified URL prefix. The final
* output URL will be provided in the
* [google.datastore.admin.v1.ExportEntitiesResponse.output_url][google.datastore.admin.v1.ExportEntitiesResponse.output_url]
* field. That value should be used for subsequent ImportEntities operations.
* <p>By nesting the data files deeper, the same Cloud Storage bucket can be used in multiple
* ExportEntities operations without conflict.
* @throws com.google.api.gax.rpc.ApiException if the remote call fails
*/
public final OperationFuture<ExportEntitiesResponse, ExportEntitiesMetadata> exportEntitiesAsync(
String projectId,
Map<String, String> labels,
EntityFilter entityFilter,
String outputUrlPrefix) {
ExportEntitiesRequest request =
ExportEntitiesRequest.newBuilder()
.setProjectId(projectId)
.putAllLabels(labels)
.setEntityFilter(entityFilter)
.setOutputUrlPrefix(outputUrlPrefix)
.build();
return exportEntitiesAsync(request);
}
// AUTO-GENERATED DOCUMENTATION AND METHOD.
/**
* Exports a copy of all or a subset of entities from Google Cloud Datastore to another storage
* system, such as Google Cloud Storage. Recent updates to entities may not be reflected in the
* export. The export occurs in the background and its progress can be monitored and managed via
* the Operation resource that is created. The output of an export may only be used once the
* associated operation is done. If an export operation is cancelled before completion it may
* leave partial data behind in Google Cloud Storage.
*
* <p>Sample code:
*
* <pre>{@code
* // This snippet has been automatically generated for illustrative purposes only.
* // It may require modifications to work in your environment.
* try (DatastoreAdminClient datastoreAdminClient = DatastoreAdminClient.create()) {
* ExportEntitiesRequest request =
* ExportEntitiesRequest.newBuilder()
* .setProjectId("projectId-894832108")
* .putAllLabels(new HashMap<String, String>())
* .setEntityFilter(EntityFilter.newBuilder().build())
* .setOutputUrlPrefix("outputUrlPrefix-1132598048")
* .build();
* ExportEntitiesResponse response = datastoreAdminClient.exportEntitiesAsync(request).get();
* }
* }</pre>
*
* @param request The request object containing all of the parameters for the API call.
* @throws com.google.api.gax.rpc.ApiException if the remote call fails
*/
public final OperationFuture<ExportEntitiesResponse, ExportEntitiesMetadata> exportEntitiesAsync(
ExportEntitiesRequest request) {
return exportEntitiesOperationCallable().futureCall(request);
}
// AUTO-GENERATED DOCUMENTATION AND METHOD.
/**
* Exports a copy of all or a subset of entities from Google Cloud Datastore to another storage
* system, such as Google Cloud Storage. Recent updates to entities may not be reflected in the
* export. The export occurs in the background and its progress can be monitored and managed via
* the Operation resource that is created. The output of an export may only be used once the
* associated operation is done. If an export operation is cancelled before completion it may
* leave partial data behind in Google Cloud Storage.
*
* <p>Sample code:
*
* <pre>{@code
* // This snippet has been automatically generated for illustrative purposes only.
* // It may require modifications to work in your environment.
* try (DatastoreAdminClient datastoreAdminClient = DatastoreAdminClient.create()) {
* ExportEntitiesRequest request =
* ExportEntitiesRequest.newBuilder()
* .setProjectId("projectId-894832108")
* .putAllLabels(new HashMap<String, String>())
* .setEntityFilter(EntityFilter.newBuilder().build())
* .setOutputUrlPrefix("outputUrlPrefix-1132598048")
* .build();
* OperationFuture<ExportEntitiesResponse, ExportEntitiesMetadata> future =
* datastoreAdminClient.exportEntitiesOperationCallable().futureCall(request);
* // Do something.
* ExportEntitiesResponse response = future.get();
* }
* }</pre>
*/
public final OperationCallable<
ExportEntitiesRequest, ExportEntitiesResponse, ExportEntitiesMetadata>
exportEntitiesOperationCallable() {
return stub.exportEntitiesOperationCallable();
}
// AUTO-GENERATED DOCUMENTATION AND METHOD.
/**
* Exports a copy of all or a subset of entities from Google Cloud Datastore to another storage
* system, such as Google Cloud Storage. Recent updates to entities may not be reflected in the
* export. The export occurs in the background and its progress can be monitored and managed via
* the Operation resource that is created. The output of an export may only be used once the
* associated operation is done. If an export operation is cancelled before completion it may
* leave partial data behind in Google Cloud Storage.
*
* <p>Sample code:
*
* <pre>{@code
* // This snippet has been automatically generated for illustrative purposes only.
* // It may require modifications to work in your environment.
* try (DatastoreAdminClient datastoreAdminClient = DatastoreAdminClient.create()) {
* ExportEntitiesRequest request =
* ExportEntitiesRequest.newBuilder()
* .setProjectId("projectId-894832108")
* .putAllLabels(new HashMap<String, String>())
* .setEntityFilter(EntityFilter.newBuilder().build())
* .setOutputUrlPrefix("outputUrlPrefix-1132598048")
* .build();
* ApiFuture<Operation> future =
* datastoreAdminClient.exportEntitiesCallable().futureCall(request);
* // Do something.
* Operation response = future.get();
* }
* }</pre>
*/
public final UnaryCallable<ExportEntitiesRequest, Operation> exportEntitiesCallable() {
return stub.exportEntitiesCallable();
}
// AUTO-GENERATED DOCUMENTATION AND METHOD.
/**
* Imports entities into Google Cloud Datastore. Existing entities with the same key are
* overwritten. The import occurs in the background and its progress can be monitored and managed
* via the Operation resource that is created. If an ImportEntities operation is cancelled, it is
* possible that a subset of the data has already been imported to Cloud Datastore.
*
* <p>Sample code:
*
* <pre>{@code
* // This snippet has been automatically generated for illustrative purposes only.
* // It may require modifications to work in your environment.
* try (DatastoreAdminClient datastoreAdminClient = DatastoreAdminClient.create()) {
* String projectId = "projectId-894832108";
* Map<String, String> labels = new HashMap<>();
* String inputUrl = "inputUrl470706501";
* EntityFilter entityFilter = EntityFilter.newBuilder().build();
* datastoreAdminClient.importEntitiesAsync(projectId, labels, inputUrl, entityFilter).get();
* }
* }</pre>
*
* @param projectId Required. Project ID against which to make the request.
* @param labels Client-assigned labels.
* @param inputUrl Required. The full resource URL of the external storage location. Currently,
* only Google Cloud Storage is supported. So input_url should be of the form:
* `gs://BUCKET_NAME[/NAMESPACE_PATH]/OVERALL_EXPORT_METADATA_FILE`, where `BUCKET_NAME` is
* the name of the Cloud Storage bucket, `NAMESPACE_PATH` is an optional Cloud Storage
* namespace path (this is not a Cloud Datastore namespace), and
* `OVERALL_EXPORT_METADATA_FILE` is the metadata file written by the ExportEntities
* operation. For more information about Cloud Storage namespace paths, see [Object name
* considerations](https://cloud.google.com/storage/docs/naming#object-considerations).
* <p>For more information, see
* [google.datastore.admin.v1.ExportEntitiesResponse.output_url][google.datastore.admin.v1.ExportEntitiesResponse.output_url].
* @param entityFilter Optionally specify which kinds/namespaces are to be imported. If provided,
* the list must be a subset of the EntityFilter used in creating the export, otherwise a
* FAILED_PRECONDITION error will be returned. If no filter is specified then all entities
* from the export are imported.
* @throws com.google.api.gax.rpc.ApiException if the remote call fails
*/
public final OperationFuture<Empty, ImportEntitiesMetadata> importEntitiesAsync(
String projectId, Map<String, String> labels, String inputUrl, EntityFilter entityFilter) {
ImportEntitiesRequest request =
ImportEntitiesRequest.newBuilder()
.setProjectId(projectId)
.putAllLabels(labels)
.setInputUrl(inputUrl)
.setEntityFilter(entityFilter)
.build();
return importEntitiesAsync(request);
}
// AUTO-GENERATED DOCUMENTATION AND METHOD.
/**
* Imports entities into Google Cloud Datastore. Existing entities with the same key are
* overwritten. The import occurs in the background and its progress can be monitored and managed
* via the Operation resource that is created. If an ImportEntities operation is cancelled, it is
* possible that a subset of the data has already been imported to Cloud Datastore.
*
* <p>Sample code:
*
* <pre>{@code
* // This snippet has been automatically generated for illustrative purposes only.
* // It may require modifications to work in your environment.
* try (DatastoreAdminClient datastoreAdminClient = DatastoreAdminClient.create()) {
* ImportEntitiesRequest request =
* ImportEntitiesRequest.newBuilder()
* .setProjectId("projectId-894832108")
* .putAllLabels(new HashMap<String, String>())
* .setInputUrl("inputUrl470706501")
* .setEntityFilter(EntityFilter.newBuilder().build())
* .build();
* datastoreAdminClient.importEntitiesAsync(request).get();
* }
* }</pre>
*
* @param request The request object containing all of the parameters for the API call.
* @throws com.google.api.gax.rpc.ApiException if the remote call fails
*/
public final OperationFuture<Empty, ImportEntitiesMetadata> importEntitiesAsync(
ImportEntitiesRequest request) {
return importEntitiesOperationCallable().futureCall(request);
}
// AUTO-GENERATED DOCUMENTATION AND METHOD.
/**
* Imports entities into Google Cloud Datastore. Existing entities with the same key are
* overwritten. The import occurs in the background and its progress can be monitored and managed
* via the Operation resource that is created. If an ImportEntities operation is cancelled, it is
* possible that a subset of the data has already been imported to Cloud Datastore.
*
* <p>Sample code:
*
* <pre>{@code
* // This snippet has been automatically generated for illustrative purposes only.
* // It may require modifications to work in your environment.
* try (DatastoreAdminClient datastoreAdminClient = DatastoreAdminClient.create()) {
* ImportEntitiesRequest request =
* ImportEntitiesRequest.newBuilder()
* .setProjectId("projectId-894832108")
* .putAllLabels(new HashMap<String, String>())
* .setInputUrl("inputUrl470706501")
* .setEntityFilter(EntityFilter.newBuilder().build())
* .build();
* OperationFuture<Empty, ImportEntitiesMetadata> future =
* datastoreAdminClient.importEntitiesOperationCallable().futureCall(request);
* // Do something.
* future.get();
* }
* }</pre>
*/
public final OperationCallable<ImportEntitiesRequest, Empty, ImportEntitiesMetadata>
importEntitiesOperationCallable() {
return stub.importEntitiesOperationCallable();
}
// AUTO-GENERATED DOCUMENTATION AND METHOD.
/**
* Imports entities into Google Cloud Datastore. Existing entities with the same key are
* overwritten. The import occurs in the background and its progress can be monitored and managed
* via the Operation resource that is created. If an ImportEntities operation is cancelled, it is
* possible that a subset of the data has already been imported to Cloud Datastore.
*
* <p>Sample code:
*
* <pre>{@code
* // This snippet has been automatically generated for illustrative purposes only.
* // It may require modifications to work in your environment.
* try (DatastoreAdminClient datastoreAdminClient = DatastoreAdminClient.create()) {
* ImportEntitiesRequest request =
* ImportEntitiesRequest.newBuilder()
* .setProjectId("projectId-894832108")
* .putAllLabels(new HashMap<String, String>())
* .setInputUrl("inputUrl470706501")
* .setEntityFilter(EntityFilter.newBuilder().build())
* .build();
* ApiFuture<Operation> future =
* datastoreAdminClient.importEntitiesCallable().futureCall(request);
* // Do something.
* future.get();
* }
* }</pre>
*/
public final UnaryCallable<ImportEntitiesRequest, Operation> importEntitiesCallable() {
return stub.importEntitiesCallable();
}
// AUTO-GENERATED DOCUMENTATION AND METHOD.
/**
* Creates the specified index. A newly created index's initial state is `CREATING`. On completion
* of the returned [google.longrunning.Operation][google.longrunning.Operation], the state will be
* `READY`. If the index already exists, the call will return an `ALREADY_EXISTS` status.
*
* <p>During index creation, the process could result in an error, in which case the index will
* move to the `ERROR` state. The process can be recovered by fixing the data that caused the
* error, removing the index with [delete][google.datastore.admin.v1.DatastoreAdmin.DeleteIndex],
* then re-creating the index with [create]
* [google.datastore.admin.v1.DatastoreAdmin.CreateIndex].
*
* <p>Indexes with a single property cannot be created.
*
* <p>Sample code:
*
* <pre>{@code
* // This snippet has been automatically generated for illustrative purposes only.
* // It may require modifications to work in your environment.
* try (DatastoreAdminClient datastoreAdminClient = DatastoreAdminClient.create()) {
* CreateIndexRequest request =
* CreateIndexRequest.newBuilder()
* .setProjectId("projectId-894832108")
* .setIndex(Index.newBuilder().build())
* .build();
* Index response = datastoreAdminClient.createIndexAsync(request).get();
* }
* }</pre>
*
* @param request The request object containing all of the parameters for the API call.
* @throws com.google.api.gax.rpc.ApiException if the remote call fails
*/
public final OperationFuture<Index, IndexOperationMetadata> createIndexAsync(
CreateIndexRequest request) {
return createIndexOperationCallable().futureCall(request);
}
// AUTO-GENERATED DOCUMENTATION AND METHOD.
/**
* Creates the specified index. A newly created index's initial state is `CREATING`. On completion
* of the returned [google.longrunning.Operation][google.longrunning.Operation], the state will be
* `READY`. If the index already exists, the call will return an `ALREADY_EXISTS` status.
*
* <p>During index creation, the process could result in an error, in which case the index will
* move to the `ERROR` state. The process can be recovered by fixing the data that caused the
* error, removing the index with [delete][google.datastore.admin.v1.DatastoreAdmin.DeleteIndex],
* then re-creating the index with [create]
* [google.datastore.admin.v1.DatastoreAdmin.CreateIndex].
*
* <p>Indexes with a single property cannot be created.
*
* <p>Sample code:
*
* <pre>{@code
* // This snippet has been automatically generated for illustrative purposes only.
* // It may require modifications to work in your environment.
* try (DatastoreAdminClient datastoreAdminClient = DatastoreAdminClient.create()) {
* CreateIndexRequest request =
* CreateIndexRequest.newBuilder()
* .setProjectId("projectId-894832108")
* .setIndex(Index.newBuilder().build())
* .build();
* OperationFuture<Index, IndexOperationMetadata> future =
* datastoreAdminClient.createIndexOperationCallable().futureCall(request);
* // Do something.
* Index response = future.get();
* }
* }</pre>
*/
public final OperationCallable<CreateIndexRequest, Index, IndexOperationMetadata>
createIndexOperationCallable() {
return stub.createIndexOperationCallable();
}
// AUTO-GENERATED DOCUMENTATION AND METHOD.
/**
* Creates the specified index. A newly created index's initial state is `CREATING`. On completion
* of the returned [google.longrunning.Operation][google.longrunning.Operation], the state will be
* `READY`. If the index already exists, the call will return an `ALREADY_EXISTS` status.
*
* <p>During index creation, the process could result in an error, in which case the index will
* move to the `ERROR` state. The process can be recovered by fixing the data that caused the
* error, removing the index with [delete][google.datastore.admin.v1.DatastoreAdmin.DeleteIndex],
* then re-creating the index with [create]
* [google.datastore.admin.v1.DatastoreAdmin.CreateIndex].
*
* <p>Indexes with a single property cannot be created.
*
* <p>Sample code:
*
* <pre>{@code
* // This snippet has been automatically generated for illustrative purposes only.
* // It may require modifications to work in your environment.
* try (DatastoreAdminClient datastoreAdminClient = DatastoreAdminClient.create()) {
* CreateIndexRequest request =
* CreateIndexRequest.newBuilder()
* .setProjectId("projectId-894832108")
* .setIndex(Index.newBuilder().build())
* .build();
* ApiFuture<Operation> future = datastoreAdminClient.createIndexCallable().futureCall(request);
* // Do something.
* Operation response = future.get();
* }
* }</pre>
*/
public final UnaryCallable<CreateIndexRequest, Operation> createIndexCallable() {
return stub.createIndexCallable();
}
// AUTO-GENERATED DOCUMENTATION AND METHOD.
/**
* Deletes an existing index. An index can only be deleted if it is in a `READY` or `ERROR` state.
* On successful execution of the request, the index will be in a `DELETING`
* [state][google.datastore.admin.v1.Index.State]. And on completion of the returned
* [google.longrunning.Operation][google.longrunning.Operation], the index will be removed.
*
* <p>During index deletion, the process could result in an error, in which case the index will
* move to the `ERROR` state. The process can be recovered by fixing the data that caused the
* error, followed by calling [delete][google.datastore.admin.v1.DatastoreAdmin.DeleteIndex]
* again.
*
* <p>Sample code:
*
* <pre>{@code
* // This snippet has been automatically generated for illustrative purposes only.
* // It may require modifications to work in your environment.
* try (DatastoreAdminClient datastoreAdminClient = DatastoreAdminClient.create()) {
* DeleteIndexRequest request =
* DeleteIndexRequest.newBuilder()
* .setProjectId("projectId-894832108")
* .setIndexId("indexId1943291277")
* .build();
* Index response = datastoreAdminClient.deleteIndexAsync(request).get();
* }
* }</pre>
*
* @param request The request object containing all of the parameters for the API call.
* @throws com.google.api.gax.rpc.ApiException if the remote call fails
*/
public final OperationFuture<Index, IndexOperationMetadata> deleteIndexAsync(
DeleteIndexRequest request) {
return deleteIndexOperationCallable().futureCall(request);
}
// AUTO-GENERATED DOCUMENTATION AND METHOD.
/**
* Deletes an existing index. An index can only be deleted if it is in a `READY` or `ERROR` state.
* On successful execution of the request, the index will be in a `DELETING`
* [state][google.datastore.admin.v1.Index.State]. And on completion of the returned
* [google.longrunning.Operation][google.longrunning.Operation], the index will be removed.
*
* <p>During index deletion, the process could result in an error, in which case the index will
* move to the `ERROR` state. The process can be recovered by fixing the data that caused the
* error, followed by calling [delete][google.datastore.admin.v1.DatastoreAdmin.DeleteIndex]
* again.
*
* <p>Sample code:
*
* <pre>{@code
* // This snippet has been automatically generated for illustrative purposes only.
* // It may require modifications to work in your environment.
* try (DatastoreAdminClient datastoreAdminClient = DatastoreAdminClient.create()) {
* DeleteIndexRequest request =
* DeleteIndexRequest.newBuilder()
* .setProjectId("projectId-894832108")
* .setIndexId("indexId1943291277")
* .build();
* OperationFuture<Index, IndexOperationMetadata> future =
* datastoreAdminClient.deleteIndexOperationCallable().futureCall(request);
* // Do something.
* Index response = future.get();
* }
* }</pre>
*/
public final OperationCallable<DeleteIndexRequest, Index, IndexOperationMetadata>
deleteIndexOperationCallable() {
return stub.deleteIndexOperationCallable();
}
// AUTO-GENERATED DOCUMENTATION AND METHOD.
/**
* Deletes an existing index. An index can only be deleted if it is in a `READY` or `ERROR` state.
* On successful execution of the request, the index will be in a `DELETING`
* [state][google.datastore.admin.v1.Index.State]. And on completion of the returned
* [google.longrunning.Operation][google.longrunning.Operation], the index will be removed.
*
* <p>During index deletion, the process could result in an error, in which case the index will
* move to the `ERROR` state. The process can be recovered by fixing the data that caused the
* error, followed by calling [delete][google.datastore.admin.v1.DatastoreAdmin.DeleteIndex]
* again.
*
* <p>Sample code:
*
* <pre>{@code
* // This snippet has been automatically generated for illustrative purposes only.
* // It may require modifications to work in your environment.
* try (DatastoreAdminClient datastoreAdminClient = DatastoreAdminClient.create()) {
* DeleteIndexRequest request =
* DeleteIndexRequest.newBuilder()
* .setProjectId("projectId-894832108")
* .setIndexId("indexId1943291277")
* .build();
* ApiFuture<Operation> future = datastoreAdminClient.deleteIndexCallable().futureCall(request);
* // Do something.
* Operation response = future.get();
* }
* }</pre>
*/
public final UnaryCallable<DeleteIndexRequest, Operation> deleteIndexCallable() {
return stub.deleteIndexCallable();
}
// AUTO-GENERATED DOCUMENTATION AND METHOD.
/**
* Gets an index.
*
* <p>Sample code:
*
* <pre>{@code
* // This snippet has been automatically generated for illustrative purposes only.
* // It may require modifications to work in your environment.
* try (DatastoreAdminClient datastoreAdminClient = DatastoreAdminClient.create()) {
* GetIndexRequest request =
* GetIndexRequest.newBuilder()
* .setProjectId("projectId-894832108")
* .setIndexId("indexId1943291277")
* .build();
* Index response = datastoreAdminClient.getIndex(request);
* }
* }</pre>
*
* @param request The request object containing all of the parameters for the API call.
* @throws com.google.api.gax.rpc.ApiException if the remote call fails
*/
public final Index getIndex(GetIndexRequest request) {
return getIndexCallable().call(request);
}
// AUTO-GENERATED DOCUMENTATION AND METHOD.
/**
* Gets an index.
*
* <p>Sample code:
*
* <pre>{@code
* // This snippet has been automatically generated for illustrative purposes only.
* // It may require modifications to work in your environment.
* try (DatastoreAdminClient datastoreAdminClient = DatastoreAdminClient.create()) {
* GetIndexRequest request =
* GetIndexRequest.newBuilder()
* .setProjectId("projectId-894832108")
* .setIndexId("indexId1943291277")
* .build();
* ApiFuture<Index> future = datastoreAdminClient.getIndexCallable().futureCall(request);
* // Do something.
* Index response = future.get();
* }
* }</pre>
*/
public final UnaryCallable<GetIndexRequest, Index> getIndexCallable() {
return stub.getIndexCallable();
}
// AUTO-GENERATED DOCUMENTATION AND METHOD.
/**
* Lists the indexes that match the specified filters. Datastore uses an eventually consistent
* query to fetch the list of indexes and may occasionally return stale results.
*
* <p>Sample code:
*
* <pre>{@code
* // This snippet has been automatically generated for illustrative purposes only.
* // It may require modifications to work in your environment.
* try (DatastoreAdminClient datastoreAdminClient = DatastoreAdminClient.create()) {
* ListIndexesRequest request =
* ListIndexesRequest.newBuilder()
* .setProjectId("projectId-894832108")
* .setFilter("filter-1274492040")
* .setPageSize(883849137)
* .setPageToken("pageToken873572522")
* .build();
* for (Index element : datastoreAdminClient.listIndexes(request).iterateAll()) {
* // doThingsWith(element);
* }
* }
* }</pre>
*
* @param request The request object containing all of the parameters for the API call.
* @throws com.google.api.gax.rpc.ApiException if the remote call fails
*/
public final ListIndexesPagedResponse listIndexes(ListIndexesRequest request) {
return listIndexesPagedCallable().call(request);
}
// AUTO-GENERATED DOCUMENTATION AND METHOD.
/**
* Lists the indexes that match the specified filters. Datastore uses an eventually consistent
* query to fetch the list of indexes and may occasionally return stale results.
*
* <p>Sample code:
*
* <pre>{@code
* // This snippet has been automatically generated for illustrative purposes only.
* // It may require modifications to work in your environment.
* try (DatastoreAdminClient datastoreAdminClient = DatastoreAdminClient.create()) {
* ListIndexesRequest request =
* ListIndexesRequest.newBuilder()
* .setProjectId("projectId-894832108")
* .setFilter("filter-1274492040")
* .setPageSize(883849137)
* .setPageToken("pageToken873572522")
* .build();
* ApiFuture<Index> future = datastoreAdminClient.listIndexesPagedCallable().futureCall(request);
* // Do something.
* for (Index element : future.get().iterateAll()) {
* // doThingsWith(element);
* }
* }
* }</pre>
*/
public final UnaryCallable<ListIndexesRequest, ListIndexesPagedResponse>
listIndexesPagedCallable() {
return stub.listIndexesPagedCallable();
}
// AUTO-GENERATED DOCUMENTATION AND METHOD.
/**
* Lists the indexes that match the specified filters. Datastore uses an eventually consistent
* query to fetch the list of indexes and may occasionally return stale results.
*
* <p>Sample code:
*
* <pre>{@code
* // This snippet has been automatically generated for illustrative purposes only.
* // It may require modifications to work in your environment.
* try (DatastoreAdminClient datastoreAdminClient = DatastoreAdminClient.create()) {
* ListIndexesRequest request =
* ListIndexesRequest.newBuilder()
* .setProjectId("projectId-894832108")
* .setFilter("filter-1274492040")
* .setPageSize(883849137)
* .setPageToken("pageToken873572522")
* .build();
* while (true) {
* ListIndexesResponse response = datastoreAdminClient.listIndexesCallable().call(request);
* for (Index element : response.getIndexesList()) {
* // doThingsWith(element);
* }
* String nextPageToken = response.getNextPageToken();
* if (!Strings.isNullOrEmpty(nextPageToken)) {
* request = request.toBuilder().setPageToken(nextPageToken).build();
* } else {
* break;
* }
* }
* }
* }</pre>
*/
public final UnaryCallable<ListIndexesRequest, ListIndexesResponse> listIndexesCallable() {
return stub.listIndexesCallable();
}
@Override
public final void close() {
stub.close();
}
@Override
public void shutdown() {
stub.shutdown();
}
@Override
public boolean isShutdown() {
return stub.isShutdown();
}
@Override
public boolean isTerminated() {
return stub.isTerminated();
}
@Override
public void shutdownNow() {
stub.shutdownNow();
}
@Override
public boolean awaitTermination(long duration, TimeUnit unit) throws InterruptedException {
return stub.awaitTermination(duration, unit);
}
public static class ListIndexesPagedResponse
extends AbstractPagedListResponse<
ListIndexesRequest,
ListIndexesResponse,
Index,
ListIndexesPage,
ListIndexesFixedSizeCollection> {
public static ApiFuture<ListIndexesPagedResponse> createAsync(
PageContext<ListIndexesRequest, ListIndexesResponse, Index> context,
ApiFuture<ListIndexesResponse> futureResponse) {
ApiFuture<ListIndexesPage> futurePage =
ListIndexesPage.createEmptyPage().createPageAsync(context, futureResponse);
return ApiFutures.transform(
futurePage, input -> new ListIndexesPagedResponse(input), MoreExecutors.directExecutor());
}
private ListIndexesPagedResponse(ListIndexesPage page) {
super(page, ListIndexesFixedSizeCollection.createEmptyCollection());
}
}
public static class ListIndexesPage
extends AbstractPage<ListIndexesRequest, ListIndexesResponse, Index, ListIndexesPage> {
private ListIndexesPage(
PageContext<ListIndexesRequest, ListIndexesResponse, Index> context,
ListIndexesResponse response) {
super(context, response);
}
private static ListIndexesPage createEmptyPage() {
return new ListIndexesPage(null, null);
}
@Override
protected ListIndexesPage createPage(
PageContext<ListIndexesRequest, ListIndexesResponse, Index> context,
ListIndexesResponse response) {
return new ListIndexesPage(context, response);
}
@Override
public ApiFuture<ListIndexesPage> createPageAsync(
PageContext<ListIndexesRequest, ListIndexesResponse, Index> context,
ApiFuture<ListIndexesResponse> futureResponse) {
return super.createPageAsync(context, futureResponse);
}
}
public static class ListIndexesFixedSizeCollection
extends AbstractFixedSizeCollection<
ListIndexesRequest,
ListIndexesResponse,
Index,
ListIndexesPage,
ListIndexesFixedSizeCollection> {
private ListIndexesFixedSizeCollection(List<ListIndexesPage> pages, int collectionSize) {
super(pages, collectionSize);
}
private static ListIndexesFixedSizeCollection createEmptyCollection() {
return new ListIndexesFixedSizeCollection(null, 0);
}
@Override
protected ListIndexesFixedSizeCollection createCollection(
List<ListIndexesPage> pages, int collectionSize) {
return new ListIndexesFixedSizeCollection(pages, collectionSize);
}
}
}
| google-cloud-datastore/src/main/java/com/google/cloud/datastore/admin/v1/DatastoreAdminClient.java | /*
* Copyright 2022 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.google.cloud.datastore.admin.v1;
import com.google.api.core.ApiFuture;
import com.google.api.core.ApiFutures;
import com.google.api.gax.core.BackgroundResource;
import com.google.api.gax.longrunning.OperationFuture;
import com.google.api.gax.paging.AbstractFixedSizeCollection;
import com.google.api.gax.paging.AbstractPage;
import com.google.api.gax.paging.AbstractPagedListResponse;
import com.google.api.gax.rpc.OperationCallable;
import com.google.api.gax.rpc.PageContext;
import com.google.api.gax.rpc.UnaryCallable;
import com.google.cloud.datastore.admin.v1.stub.DatastoreAdminStub;
import com.google.cloud.datastore.admin.v1.stub.DatastoreAdminStubSettings;
import com.google.common.util.concurrent.MoreExecutors;
import com.google.datastore.admin.v1.CreateIndexRequest;
import com.google.datastore.admin.v1.DeleteIndexRequest;
import com.google.datastore.admin.v1.EntityFilter;
import com.google.datastore.admin.v1.ExportEntitiesMetadata;
import com.google.datastore.admin.v1.ExportEntitiesRequest;
import com.google.datastore.admin.v1.ExportEntitiesResponse;
import com.google.datastore.admin.v1.GetIndexRequest;
import com.google.datastore.admin.v1.ImportEntitiesMetadata;
import com.google.datastore.admin.v1.ImportEntitiesRequest;
import com.google.datastore.admin.v1.Index;
import com.google.datastore.admin.v1.IndexOperationMetadata;
import com.google.datastore.admin.v1.ListIndexesRequest;
import com.google.datastore.admin.v1.ListIndexesResponse;
import com.google.longrunning.Operation;
import com.google.longrunning.OperationsClient;
import com.google.protobuf.Empty;
import java.io.IOException;
import java.util.List;
import java.util.Map;
import java.util.concurrent.TimeUnit;
import javax.annotation.Generated;
// AUTO-GENERATED DOCUMENTATION AND CLASS.
/**
* Service Description: Google Cloud Datastore Admin API
*
* <p>The Datastore Admin API provides several admin services for Cloud Datastore.
*
* <p>----------------------------------------------------------------------------- ## Concepts
*
* <p>Project, namespace, kind, and entity as defined in the Google Cloud Datastore API.
*
* <p>Operation: An Operation represents work being performed in the background.
*
* <p>EntityFilter: Allows specifying a subset of entities in a project. This is specified as a
* combination of kinds and namespaces (either or both of which may be all).
*
* <p>----------------------------------------------------------------------------- ## Services
*
* <p># Export/Import
*
* <p>The Export/Import service provides the ability to copy all or a subset of entities to/from
* Google Cloud Storage.
*
* <p>Exported data may be imported into Cloud Datastore for any Google Cloud Platform project. It
* is not restricted to the export source project. It is possible to export from one project and
* then import into another.
*
* <p>Exported data can also be loaded into Google BigQuery for analysis.
*
* <p>Exports and imports are performed asynchronously. An Operation resource is created for each
* export/import. The state (including any errors encountered) of the export/import may be queried
* via the Operation resource.
*
* <p># Index
*
* <p>The index service manages Cloud Datastore composite indexes.
*
* <p>Index creation and deletion are performed asynchronously. An Operation resource is created for
* each such asynchronous operation. The state of the operation (including any errors encountered)
* may be queried via the Operation resource.
*
* <p># Operation
*
* <p>The Operations collection provides a record of actions performed for the specified project
* (including any operations in progress). Operations are not created directly but through calls on
* other collections or resources.
*
* <p>An operation that is not yet done may be cancelled. The request to cancel is asynchronous and
* the operation may continue to run for some time after the request to cancel is made.
*
* <p>An operation that is done may be deleted so that it is no longer listed as part of the
* Operation collection.
*
* <p>ListOperations returns all pending operations, but not completed operations.
*
* <p>Operations are created by service DatastoreAdmin, but are accessed via service
* google.longrunning.Operations.
*
* <p>This class provides the ability to make remote calls to the backing service through method
* calls that map to API methods. Sample code to get started:
*
* <pre>{@code
* // This snippet has been automatically generated for illustrative purposes only.
* // It may require modifications to work in your environment.
* try (DatastoreAdminClient datastoreAdminClient = DatastoreAdminClient.create()) {
* GetIndexRequest request =
* GetIndexRequest.newBuilder()
* .setProjectId("projectId-894832108")
* .setIndexId("indexId1943291277")
* .build();
* Index response = datastoreAdminClient.getIndex(request);
* }
* }</pre>
*
* <p>Note: close() needs to be called on the DatastoreAdminClient object to clean up resources such
* as threads. In the example above, try-with-resources is used, which automatically calls close().
*
* <p>The surface of this class includes several types of Java methods for each of the API's
* methods:
*
* <ol>
* <li>A "flattened" method. With this type of method, the fields of the request type have been
* converted into function parameters. It may be the case that not all fields are available as
* parameters, and not every API method will have a flattened method entry point.
* <li>A "request object" method. This type of method only takes one parameter, a request object,
* which must be constructed before the call. Not every API method will have a request object
* method.
* <li>A "callable" method. This type of method takes no parameters and returns an immutable API
* callable object, which can be used to initiate calls to the service.
* </ol>
*
* <p>See the individual methods for example code.
*
* <p>Many parameters require resource names to be formatted in a particular way. To assist with
* these names, this class includes a format method for each type of name, and additionally a parse
* method to extract the individual identifiers contained within names that are returned.
*
* <p>This class can be customized by passing in a custom instance of DatastoreAdminSettings to
* create(). For example:
*
* <p>To customize credentials:
*
* <pre>{@code
* // This snippet has been automatically generated for illustrative purposes only.
* // It may require modifications to work in your environment.
* DatastoreAdminSettings datastoreAdminSettings =
* DatastoreAdminSettings.newBuilder()
* .setCredentialsProvider(FixedCredentialsProvider.create(myCredentials))
* .build();
* DatastoreAdminClient datastoreAdminClient = DatastoreAdminClient.create(datastoreAdminSettings);
* }</pre>
*
* <p>To customize the endpoint:
*
* <pre>{@code
* // This snippet has been automatically generated for illustrative purposes only.
* // It may require modifications to work in your environment.
* DatastoreAdminSettings datastoreAdminSettings =
* DatastoreAdminSettings.newBuilder().setEndpoint(myEndpoint).build();
* DatastoreAdminClient datastoreAdminClient = DatastoreAdminClient.create(datastoreAdminSettings);
* }</pre>
*
* <p>Please refer to the GitHub repository's samples for more quickstart code snippets.
*/
@Generated("by gapic-generator-java")
public class DatastoreAdminClient implements BackgroundResource {
private final DatastoreAdminSettings settings;
private final DatastoreAdminStub stub;
private final OperationsClient operationsClient;
/** Constructs an instance of DatastoreAdminClient with default settings. */
public static final DatastoreAdminClient create() throws IOException {
return create(DatastoreAdminSettings.newBuilder().build());
}
/**
* Constructs an instance of DatastoreAdminClient, using the given settings. The channels are
* created based on the settings passed in, or defaults for any settings that are not set.
*/
public static final DatastoreAdminClient create(DatastoreAdminSettings settings)
throws IOException {
return new DatastoreAdminClient(settings);
}
/**
* Constructs an instance of DatastoreAdminClient, using the given stub for making calls. This is
* for advanced usage - prefer using create(DatastoreAdminSettings).
*/
public static final DatastoreAdminClient create(DatastoreAdminStub stub) {
return new DatastoreAdminClient(stub);
}
/**
* Constructs an instance of DatastoreAdminClient, using the given settings. This is protected so
* that it is easy to make a subclass, but otherwise, the static factory methods should be
* preferred.
*/
protected DatastoreAdminClient(DatastoreAdminSettings settings) throws IOException {
this.settings = settings;
this.stub = ((DatastoreAdminStubSettings) settings.getStubSettings()).createStub();
this.operationsClient = OperationsClient.create(this.stub.getOperationsStub());
}
protected DatastoreAdminClient(DatastoreAdminStub stub) {
this.settings = null;
this.stub = stub;
this.operationsClient = OperationsClient.create(this.stub.getOperationsStub());
}
public final DatastoreAdminSettings getSettings() {
return settings;
}
public DatastoreAdminStub getStub() {
return stub;
}
/**
* Returns the OperationsClient that can be used to query the status of a long-running operation
* returned by another API method call.
*/
public final OperationsClient getOperationsClient() {
return operationsClient;
}
// AUTO-GENERATED DOCUMENTATION AND METHOD.
/**
* Exports a copy of all or a subset of entities from Google Cloud Datastore to another storage
* system, such as Google Cloud Storage. Recent updates to entities may not be reflected in the
* export. The export occurs in the background and its progress can be monitored and managed via
* the Operation resource that is created. The output of an export may only be used once the
* associated operation is done. If an export operation is cancelled before completion it may
* leave partial data behind in Google Cloud Storage.
*
* <p>Sample code:
*
* <pre>{@code
* // This snippet has been automatically generated for illustrative purposes only.
* // It may require modifications to work in your environment.
* try (DatastoreAdminClient datastoreAdminClient = DatastoreAdminClient.create()) {
* String projectId = "projectId-894832108";
* Map<String, String> labels = new HashMap<>();
* EntityFilter entityFilter = EntityFilter.newBuilder().build();
* String outputUrlPrefix = "outputUrlPrefix-1132598048";
* ExportEntitiesResponse response =
* datastoreAdminClient
* .exportEntitiesAsync(projectId, labels, entityFilter, outputUrlPrefix)
* .get();
* }
* }</pre>
*
* @param projectId Required. Project ID against which to make the request.
* @param labels Client-assigned labels.
* @param entityFilter Description of what data from the project is included in the export.
* @param outputUrlPrefix Required. Location for the export metadata and data files.
* <p>The full resource URL of the external storage location. Currently, only Google Cloud
* Storage is supported. So output_url_prefix should be of the form:
* `gs://BUCKET_NAME[/NAMESPACE_PATH]`, where `BUCKET_NAME` is the name of the Cloud Storage
* bucket and `NAMESPACE_PATH` is an optional Cloud Storage namespace path (this is not a
* Cloud Datastore namespace). For more information about Cloud Storage namespace paths, see
* [Object name
* considerations](https://cloud.google.com/storage/docs/naming#object-considerations).
* <p>The resulting files will be nested deeper than the specified URL prefix. The final
* output URL will be provided in the
* [google.datastore.admin.v1.ExportEntitiesResponse.output_url][google.datastore.admin.v1.ExportEntitiesResponse.output_url]
* field. That value should be used for subsequent ImportEntities operations.
* <p>By nesting the data files deeper, the same Cloud Storage bucket can be used in multiple
* ExportEntities operations without conflict.
* @throws com.google.api.gax.rpc.ApiException if the remote call fails
*/
public final OperationFuture<ExportEntitiesResponse, ExportEntitiesMetadata> exportEntitiesAsync(
String projectId,
Map<String, String> labels,
EntityFilter entityFilter,
String outputUrlPrefix) {
ExportEntitiesRequest request =
ExportEntitiesRequest.newBuilder()
.setProjectId(projectId)
.putAllLabels(labels)
.setEntityFilter(entityFilter)
.setOutputUrlPrefix(outputUrlPrefix)
.build();
return exportEntitiesAsync(request);
}
// AUTO-GENERATED DOCUMENTATION AND METHOD.
/**
* Exports a copy of all or a subset of entities from Google Cloud Datastore to another storage
* system, such as Google Cloud Storage. Recent updates to entities may not be reflected in the
* export. The export occurs in the background and its progress can be monitored and managed via
* the Operation resource that is created. The output of an export may only be used once the
* associated operation is done. If an export operation is cancelled before completion it may
* leave partial data behind in Google Cloud Storage.
*
* <p>Sample code:
*
* <pre>{@code
* // This snippet has been automatically generated for illustrative purposes only.
* // It may require modifications to work in your environment.
* try (DatastoreAdminClient datastoreAdminClient = DatastoreAdminClient.create()) {
* ExportEntitiesRequest request =
* ExportEntitiesRequest.newBuilder()
* .setProjectId("projectId-894832108")
* .putAllLabels(new HashMap<String, String>())
* .setEntityFilter(EntityFilter.newBuilder().build())
* .setOutputUrlPrefix("outputUrlPrefix-1132598048")
* .build();
* ExportEntitiesResponse response = datastoreAdminClient.exportEntitiesAsync(request).get();
* }
* }</pre>
*
* @param request The request object containing all of the parameters for the API call.
* @throws com.google.api.gax.rpc.ApiException if the remote call fails
*/
public final OperationFuture<ExportEntitiesResponse, ExportEntitiesMetadata> exportEntitiesAsync(
ExportEntitiesRequest request) {
return exportEntitiesOperationCallable().futureCall(request);
}
// AUTO-GENERATED DOCUMENTATION AND METHOD.
/**
* Exports a copy of all or a subset of entities from Google Cloud Datastore to another storage
* system, such as Google Cloud Storage. Recent updates to entities may not be reflected in the
* export. The export occurs in the background and its progress can be monitored and managed via
* the Operation resource that is created. The output of an export may only be used once the
* associated operation is done. If an export operation is cancelled before completion it may
* leave partial data behind in Google Cloud Storage.
*
* <p>Sample code:
*
* <pre>{@code
* // This snippet has been automatically generated for illustrative purposes only.
* // It may require modifications to work in your environment.
* try (DatastoreAdminClient datastoreAdminClient = DatastoreAdminClient.create()) {
* ExportEntitiesRequest request =
* ExportEntitiesRequest.newBuilder()
* .setProjectId("projectId-894832108")
* .putAllLabels(new HashMap<String, String>())
* .setEntityFilter(EntityFilter.newBuilder().build())
* .setOutputUrlPrefix("outputUrlPrefix-1132598048")
* .build();
* OperationFuture<ExportEntitiesResponse, ExportEntitiesMetadata> future =
* datastoreAdminClient.exportEntitiesOperationCallable().futureCall(request);
* // Do something.
* ExportEntitiesResponse response = future.get();
* }
* }</pre>
*/
public final OperationCallable<
ExportEntitiesRequest, ExportEntitiesResponse, ExportEntitiesMetadata>
exportEntitiesOperationCallable() {
return stub.exportEntitiesOperationCallable();
}
// AUTO-GENERATED DOCUMENTATION AND METHOD.
/**
* Exports a copy of all or a subset of entities from Google Cloud Datastore to another storage
* system, such as Google Cloud Storage. Recent updates to entities may not be reflected in the
* export. The export occurs in the background and its progress can be monitored and managed via
* the Operation resource that is created. The output of an export may only be used once the
* associated operation is done. If an export operation is cancelled before completion it may
* leave partial data behind in Google Cloud Storage.
*
* <p>Sample code:
*
* <pre>{@code
* // This snippet has been automatically generated for illustrative purposes only.
* // It may require modifications to work in your environment.
* try (DatastoreAdminClient datastoreAdminClient = DatastoreAdminClient.create()) {
* ExportEntitiesRequest request =
* ExportEntitiesRequest.newBuilder()
* .setProjectId("projectId-894832108")
* .putAllLabels(new HashMap<String, String>())
* .setEntityFilter(EntityFilter.newBuilder().build())
* .setOutputUrlPrefix("outputUrlPrefix-1132598048")
* .build();
* ApiFuture<Operation> future =
* datastoreAdminClient.exportEntitiesCallable().futureCall(request);
* // Do something.
* Operation response = future.get();
* }
* }</pre>
*/
public final UnaryCallable<ExportEntitiesRequest, Operation> exportEntitiesCallable() {
return stub.exportEntitiesCallable();
}
// AUTO-GENERATED DOCUMENTATION AND METHOD.
/**
* Imports entities into Google Cloud Datastore. Existing entities with the same key are
* overwritten. The import occurs in the background and its progress can be monitored and managed
* via the Operation resource that is created. If an ImportEntities operation is cancelled, it is
* possible that a subset of the data has already been imported to Cloud Datastore.
*
* <p>Sample code:
*
* <pre>{@code
* // This snippet has been automatically generated for illustrative purposes only.
* // It may require modifications to work in your environment.
* try (DatastoreAdminClient datastoreAdminClient = DatastoreAdminClient.create()) {
* String projectId = "projectId-894832108";
* Map<String, String> labels = new HashMap<>();
* String inputUrl = "inputUrl470706501";
* EntityFilter entityFilter = EntityFilter.newBuilder().build();
* datastoreAdminClient.importEntitiesAsync(projectId, labels, inputUrl, entityFilter).get();
* }
* }</pre>
*
* @param projectId Required. Project ID against which to make the request.
* @param labels Client-assigned labels.
* @param inputUrl Required. The full resource URL of the external storage location. Currently,
* only Google Cloud Storage is supported. So input_url should be of the form:
* `gs://BUCKET_NAME[/NAMESPACE_PATH]/OVERALL_EXPORT_METADATA_FILE`, where `BUCKET_NAME` is
* the name of the Cloud Storage bucket, `NAMESPACE_PATH` is an optional Cloud Storage
* namespace path (this is not a Cloud Datastore namespace), and
* `OVERALL_EXPORT_METADATA_FILE` is the metadata file written by the ExportEntities
* operation. For more information about Cloud Storage namespace paths, see [Object name
* considerations](https://cloud.google.com/storage/docs/naming#object-considerations).
* <p>For more information, see
* [google.datastore.admin.v1.ExportEntitiesResponse.output_url][google.datastore.admin.v1.ExportEntitiesResponse.output_url].
* @param entityFilter Optionally specify which kinds/namespaces are to be imported. If provided,
* the list must be a subset of the EntityFilter used in creating the export, otherwise a
* FAILED_PRECONDITION error will be returned. If no filter is specified then all entities
* from the export are imported.
* @throws com.google.api.gax.rpc.ApiException if the remote call fails
*/
public final OperationFuture<Empty, ImportEntitiesMetadata> importEntitiesAsync(
String projectId, Map<String, String> labels, String inputUrl, EntityFilter entityFilter) {
ImportEntitiesRequest request =
ImportEntitiesRequest.newBuilder()
.setProjectId(projectId)
.putAllLabels(labels)
.setInputUrl(inputUrl)
.setEntityFilter(entityFilter)
.build();
return importEntitiesAsync(request);
}
// AUTO-GENERATED DOCUMENTATION AND METHOD.
/**
* Imports entities into Google Cloud Datastore. Existing entities with the same key are
* overwritten. The import occurs in the background and its progress can be monitored and managed
* via the Operation resource that is created. If an ImportEntities operation is cancelled, it is
* possible that a subset of the data has already been imported to Cloud Datastore.
*
* <p>Sample code:
*
* <pre>{@code
* // This snippet has been automatically generated for illustrative purposes only.
* // It may require modifications to work in your environment.
* try (DatastoreAdminClient datastoreAdminClient = DatastoreAdminClient.create()) {
* ImportEntitiesRequest request =
* ImportEntitiesRequest.newBuilder()
* .setProjectId("projectId-894832108")
* .putAllLabels(new HashMap<String, String>())
* .setInputUrl("inputUrl470706501")
* .setEntityFilter(EntityFilter.newBuilder().build())
* .build();
* datastoreAdminClient.importEntitiesAsync(request).get();
* }
* }</pre>
*
* @param request The request object containing all of the parameters for the API call.
* @throws com.google.api.gax.rpc.ApiException if the remote call fails
*/
public final OperationFuture<Empty, ImportEntitiesMetadata> importEntitiesAsync(
ImportEntitiesRequest request) {
return importEntitiesOperationCallable().futureCall(request);
}
// AUTO-GENERATED DOCUMENTATION AND METHOD.
/**
* Imports entities into Google Cloud Datastore. Existing entities with the same key are
* overwritten. The import occurs in the background and its progress can be monitored and managed
* via the Operation resource that is created. If an ImportEntities operation is cancelled, it is
* possible that a subset of the data has already been imported to Cloud Datastore.
*
* <p>Sample code:
*
* <pre>{@code
* // This snippet has been automatically generated for illustrative purposes only.
* // It may require modifications to work in your environment.
* try (DatastoreAdminClient datastoreAdminClient = DatastoreAdminClient.create()) {
* ImportEntitiesRequest request =
* ImportEntitiesRequest.newBuilder()
* .setProjectId("projectId-894832108")
* .putAllLabels(new HashMap<String, String>())
* .setInputUrl("inputUrl470706501")
* .setEntityFilter(EntityFilter.newBuilder().build())
* .build();
* OperationFuture<Empty, ImportEntitiesMetadata> future =
* datastoreAdminClient.importEntitiesOperationCallable().futureCall(request);
* // Do something.
* future.get();
* }
* }</pre>
*/
public final OperationCallable<ImportEntitiesRequest, Empty, ImportEntitiesMetadata>
importEntitiesOperationCallable() {
return stub.importEntitiesOperationCallable();
}
// AUTO-GENERATED DOCUMENTATION AND METHOD.
/**
* Imports entities into Google Cloud Datastore. Existing entities with the same key are
* overwritten. The import occurs in the background and its progress can be monitored and managed
* via the Operation resource that is created. If an ImportEntities operation is cancelled, it is
* possible that a subset of the data has already been imported to Cloud Datastore.
*
* <p>Sample code:
*
* <pre>{@code
* // This snippet has been automatically generated for illustrative purposes only.
* // It may require modifications to work in your environment.
* try (DatastoreAdminClient datastoreAdminClient = DatastoreAdminClient.create()) {
* ImportEntitiesRequest request =
* ImportEntitiesRequest.newBuilder()
* .setProjectId("projectId-894832108")
* .putAllLabels(new HashMap<String, String>())
* .setInputUrl("inputUrl470706501")
* .setEntityFilter(EntityFilter.newBuilder().build())
* .build();
* ApiFuture<Operation> future =
* datastoreAdminClient.importEntitiesCallable().futureCall(request);
* // Do something.
* future.get();
* }
* }</pre>
*/
public final UnaryCallable<ImportEntitiesRequest, Operation> importEntitiesCallable() {
return stub.importEntitiesCallable();
}
// AUTO-GENERATED DOCUMENTATION AND METHOD.
/**
* Creates the specified index. A newly created index's initial state is `CREATING`. On completion
* of the returned [google.longrunning.Operation][google.longrunning.Operation], the state will be
* `READY`. If the index already exists, the call will return an `ALREADY_EXISTS` status.
*
* <p>During index creation, the process could result in an error, in which case the index will
* move to the `ERROR` state. The process can be recovered by fixing the data that caused the
* error, removing the index with [delete][google.datastore.admin.v1.DatastoreAdmin.DeleteIndex],
* then re-creating the index with [create]
* [google.datastore.admin.v1.DatastoreAdmin.CreateIndex].
*
* <p>Indexes with a single property cannot be created.
*
* <p>Sample code:
*
* <pre>{@code
* // This snippet has been automatically generated for illustrative purposes only.
* // It may require modifications to work in your environment.
* try (DatastoreAdminClient datastoreAdminClient = DatastoreAdminClient.create()) {
* CreateIndexRequest request =
* CreateIndexRequest.newBuilder()
* .setProjectId("projectId-894832108")
* .setIndex(Index.newBuilder().build())
* .build();
* Index response = datastoreAdminClient.createIndexAsync(request).get();
* }
* }</pre>
*
* @param request The request object containing all of the parameters for the API call.
* @throws com.google.api.gax.rpc.ApiException if the remote call fails
*/
public final OperationFuture<Index, IndexOperationMetadata> createIndexAsync(
CreateIndexRequest request) {
return createIndexOperationCallable().futureCall(request);
}
// AUTO-GENERATED DOCUMENTATION AND METHOD.
/**
* Creates the specified index. A newly created index's initial state is `CREATING`. On completion
* of the returned [google.longrunning.Operation][google.longrunning.Operation], the state will be
* `READY`. If the index already exists, the call will return an `ALREADY_EXISTS` status.
*
* <p>During index creation, the process could result in an error, in which case the index will
* move to the `ERROR` state. The process can be recovered by fixing the data that caused the
* error, removing the index with [delete][google.datastore.admin.v1.DatastoreAdmin.DeleteIndex],
* then re-creating the index with [create]
* [google.datastore.admin.v1.DatastoreAdmin.CreateIndex].
*
* <p>Indexes with a single property cannot be created.
*
* <p>Sample code:
*
* <pre>{@code
* // This snippet has been automatically generated for illustrative purposes only.
* // It may require modifications to work in your environment.
* try (DatastoreAdminClient datastoreAdminClient = DatastoreAdminClient.create()) {
* CreateIndexRequest request =
* CreateIndexRequest.newBuilder()
* .setProjectId("projectId-894832108")
* .setIndex(Index.newBuilder().build())
* .build();
* OperationFuture<Index, IndexOperationMetadata> future =
* datastoreAdminClient.createIndexOperationCallable().futureCall(request);
* // Do something.
* Index response = future.get();
* }
* }</pre>
*/
public final OperationCallable<CreateIndexRequest, Index, IndexOperationMetadata>
createIndexOperationCallable() {
return stub.createIndexOperationCallable();
}
// AUTO-GENERATED DOCUMENTATION AND METHOD.
/**
* Creates the specified index. A newly created index's initial state is `CREATING`. On completion
* of the returned [google.longrunning.Operation][google.longrunning.Operation], the state will be
* `READY`. If the index already exists, the call will return an `ALREADY_EXISTS` status.
*
* <p>During index creation, the process could result in an error, in which case the index will
* move to the `ERROR` state. The process can be recovered by fixing the data that caused the
* error, removing the index with [delete][google.datastore.admin.v1.DatastoreAdmin.DeleteIndex],
* then re-creating the index with [create]
* [google.datastore.admin.v1.DatastoreAdmin.CreateIndex].
*
* <p>Indexes with a single property cannot be created.
*
* <p>Sample code:
*
* <pre>{@code
* // This snippet has been automatically generated for illustrative purposes only.
* // It may require modifications to work in your environment.
* try (DatastoreAdminClient datastoreAdminClient = DatastoreAdminClient.create()) {
* CreateIndexRequest request =
* CreateIndexRequest.newBuilder()
* .setProjectId("projectId-894832108")
* .setIndex(Index.newBuilder().build())
* .build();
* ApiFuture<Operation> future = datastoreAdminClient.createIndexCallable().futureCall(request);
* // Do something.
* Operation response = future.get();
* }
* }</pre>
*/
public final UnaryCallable<CreateIndexRequest, Operation> createIndexCallable() {
return stub.createIndexCallable();
}
// AUTO-GENERATED DOCUMENTATION AND METHOD.
/**
* Deletes an existing index. An index can only be deleted if it is in a `READY` or `ERROR` state.
* On successful execution of the request, the index will be in a `DELETING`
* [state][google.datastore.admin.v1.Index.State]. And on completion of the returned
* [google.longrunning.Operation][google.longrunning.Operation], the index will be removed.
*
* <p>During index deletion, the process could result in an error, in which case the index will
* move to the `ERROR` state. The process can be recovered by fixing the data that caused the
* error, followed by calling [delete][google.datastore.admin.v1.DatastoreAdmin.DeleteIndex]
* again.
*
* <p>Sample code:
*
* <pre>{@code
* // This snippet has been automatically generated for illustrative purposes only.
* // It may require modifications to work in your environment.
* try (DatastoreAdminClient datastoreAdminClient = DatastoreAdminClient.create()) {
* DeleteIndexRequest request =
* DeleteIndexRequest.newBuilder()
* .setProjectId("projectId-894832108")
* .setIndexId("indexId1943291277")
* .build();
* Index response = datastoreAdminClient.deleteIndexAsync(request).get();
* }
* }</pre>
*
* @param request The request object containing all of the parameters for the API call.
* @throws com.google.api.gax.rpc.ApiException if the remote call fails
*/
public final OperationFuture<Index, IndexOperationMetadata> deleteIndexAsync(
DeleteIndexRequest request) {
return deleteIndexOperationCallable().futureCall(request);
}
// AUTO-GENERATED DOCUMENTATION AND METHOD.
/**
* Deletes an existing index. An index can only be deleted if it is in a `READY` or `ERROR` state.
* On successful execution of the request, the index will be in a `DELETING`
* [state][google.datastore.admin.v1.Index.State]. And on completion of the returned
* [google.longrunning.Operation][google.longrunning.Operation], the index will be removed.
*
* <p>During index deletion, the process could result in an error, in which case the index will
* move to the `ERROR` state. The process can be recovered by fixing the data that caused the
* error, followed by calling [delete][google.datastore.admin.v1.DatastoreAdmin.DeleteIndex]
* again.
*
* <p>Sample code:
*
* <pre>{@code
* // This snippet has been automatically generated for illustrative purposes only.
* // It may require modifications to work in your environment.
* try (DatastoreAdminClient datastoreAdminClient = DatastoreAdminClient.create()) {
* DeleteIndexRequest request =
* DeleteIndexRequest.newBuilder()
* .setProjectId("projectId-894832108")
* .setIndexId("indexId1943291277")
* .build();
* OperationFuture<Index, IndexOperationMetadata> future =
* datastoreAdminClient.deleteIndexOperationCallable().futureCall(request);
* // Do something.
* Index response = future.get();
* }
* }</pre>
*/
public final OperationCallable<DeleteIndexRequest, Index, IndexOperationMetadata>
deleteIndexOperationCallable() {
return stub.deleteIndexOperationCallable();
}
// AUTO-GENERATED DOCUMENTATION AND METHOD.
/**
* Deletes an existing index. An index can only be deleted if it is in a `READY` or `ERROR` state.
* On successful execution of the request, the index will be in a `DELETING`
* [state][google.datastore.admin.v1.Index.State]. And on completion of the returned
* [google.longrunning.Operation][google.longrunning.Operation], the index will be removed.
*
* <p>During index deletion, the process could result in an error, in which case the index will
* move to the `ERROR` state. The process can be recovered by fixing the data that caused the
* error, followed by calling [delete][google.datastore.admin.v1.DatastoreAdmin.DeleteIndex]
* again.
*
* <p>Sample code:
*
* <pre>{@code
* // This snippet has been automatically generated for illustrative purposes only.
* // It may require modifications to work in your environment.
* try (DatastoreAdminClient datastoreAdminClient = DatastoreAdminClient.create()) {
* DeleteIndexRequest request =
* DeleteIndexRequest.newBuilder()
* .setProjectId("projectId-894832108")
* .setIndexId("indexId1943291277")
* .build();
* ApiFuture<Operation> future = datastoreAdminClient.deleteIndexCallable().futureCall(request);
* // Do something.
* Operation response = future.get();
* }
* }</pre>
*/
public final UnaryCallable<DeleteIndexRequest, Operation> deleteIndexCallable() {
return stub.deleteIndexCallable();
}
// AUTO-GENERATED DOCUMENTATION AND METHOD.
/**
* Gets an index.
*
* <p>Sample code:
*
* <pre>{@code
* // This snippet has been automatically generated for illustrative purposes only.
* // It may require modifications to work in your environment.
* try (DatastoreAdminClient datastoreAdminClient = DatastoreAdminClient.create()) {
* GetIndexRequest request =
* GetIndexRequest.newBuilder()
* .setProjectId("projectId-894832108")
* .setIndexId("indexId1943291277")
* .build();
* Index response = datastoreAdminClient.getIndex(request);
* }
* }</pre>
*
* @param request The request object containing all of the parameters for the API call.
* @throws com.google.api.gax.rpc.ApiException if the remote call fails
*/
public final Index getIndex(GetIndexRequest request) {
return getIndexCallable().call(request);
}
// AUTO-GENERATED DOCUMENTATION AND METHOD.
/**
* Gets an index.
*
* <p>Sample code:
*
* <pre>{@code
* // This snippet has been automatically generated for illustrative purposes only.
* // It may require modifications to work in your environment.
* try (DatastoreAdminClient datastoreAdminClient = DatastoreAdminClient.create()) {
* GetIndexRequest request =
* GetIndexRequest.newBuilder()
* .setProjectId("projectId-894832108")
* .setIndexId("indexId1943291277")
* .build();
* ApiFuture<Index> future = datastoreAdminClient.getIndexCallable().futureCall(request);
* // Do something.
* Index response = future.get();
* }
* }</pre>
*/
public final UnaryCallable<GetIndexRequest, Index> getIndexCallable() {
return stub.getIndexCallable();
}
// AUTO-GENERATED DOCUMENTATION AND METHOD.
/**
* Lists the indexes that match the specified filters. Datastore uses an eventually consistent
* query to fetch the list of indexes and may occasionally return stale results.
*
* <p>Sample code:
*
* <pre>{@code
* // This snippet has been automatically generated for illustrative purposes only.
* // It may require modifications to work in your environment.
* try (DatastoreAdminClient datastoreAdminClient = DatastoreAdminClient.create()) {
* ListIndexesRequest request =
* ListIndexesRequest.newBuilder()
* .setProjectId("projectId-894832108")
* .setFilter("filter-1274492040")
* .setPageSize(883849137)
* .setPageToken("pageToken873572522")
* .build();
* for (Index element : datastoreAdminClient.listIndexes(request).iterateAll()) {
* // doThingsWith(element);
* }
* }
* }</pre>
*
* @param request The request object containing all of the parameters for the API call.
* @throws com.google.api.gax.rpc.ApiException if the remote call fails
*/
public final ListIndexesPagedResponse listIndexes(ListIndexesRequest request) {
return listIndexesPagedCallable().call(request);
}
// AUTO-GENERATED DOCUMENTATION AND METHOD.
/**
* Lists the indexes that match the specified filters. Datastore uses an eventually consistent
* query to fetch the list of indexes and may occasionally return stale results.
*
* <p>Sample code:
*
* <pre>{@code
* // This snippet has been automatically generated for illustrative purposes only.
* // It may require modifications to work in your environment.
* try (DatastoreAdminClient datastoreAdminClient = DatastoreAdminClient.create()) {
* ListIndexesRequest request =
* ListIndexesRequest.newBuilder()
* .setProjectId("projectId-894832108")
* .setFilter("filter-1274492040")
* .setPageSize(883849137)
* .setPageToken("pageToken873572522")
* .build();
* ApiFuture<Index> future = datastoreAdminClient.listIndexesPagedCallable().futureCall(request);
* // Do something.
* for (Index element : future.get().iterateAll()) {
* // doThingsWith(element);
* }
* }
* }</pre>
*/
public final UnaryCallable<ListIndexesRequest, ListIndexesPagedResponse>
listIndexesPagedCallable() {
return stub.listIndexesPagedCallable();
}
// AUTO-GENERATED DOCUMENTATION AND METHOD.
/**
* Lists the indexes that match the specified filters. Datastore uses an eventually consistent
* query to fetch the list of indexes and may occasionally return stale results.
*
* <p>Sample code:
*
* <pre>{@code
* // This snippet has been automatically generated for illustrative purposes only.
* // It may require modifications to work in your environment.
* try (DatastoreAdminClient datastoreAdminClient = DatastoreAdminClient.create()) {
* ListIndexesRequest request =
* ListIndexesRequest.newBuilder()
* .setProjectId("projectId-894832108")
* .setFilter("filter-1274492040")
* .setPageSize(883849137)
* .setPageToken("pageToken873572522")
* .build();
* while (true) {
* ListIndexesResponse response = datastoreAdminClient.listIndexesCallable().call(request);
* for (Index element : response.getResponsesList()) {
* // doThingsWith(element);
* }
* String nextPageToken = response.getNextPageToken();
* if (!Strings.isNullOrEmpty(nextPageToken)) {
* request = request.toBuilder().setPageToken(nextPageToken).build();
* } else {
* break;
* }
* }
* }
* }</pre>
*/
public final UnaryCallable<ListIndexesRequest, ListIndexesResponse> listIndexesCallable() {
return stub.listIndexesCallable();
}
@Override
public final void close() {
stub.close();
}
@Override
public void shutdown() {
stub.shutdown();
}
@Override
public boolean isShutdown() {
return stub.isShutdown();
}
@Override
public boolean isTerminated() {
return stub.isTerminated();
}
@Override
public void shutdownNow() {
stub.shutdownNow();
}
@Override
public boolean awaitTermination(long duration, TimeUnit unit) throws InterruptedException {
return stub.awaitTermination(duration, unit);
}
public static class ListIndexesPagedResponse
extends AbstractPagedListResponse<
ListIndexesRequest,
ListIndexesResponse,
Index,
ListIndexesPage,
ListIndexesFixedSizeCollection> {
public static ApiFuture<ListIndexesPagedResponse> createAsync(
PageContext<ListIndexesRequest, ListIndexesResponse, Index> context,
ApiFuture<ListIndexesResponse> futureResponse) {
ApiFuture<ListIndexesPage> futurePage =
ListIndexesPage.createEmptyPage().createPageAsync(context, futureResponse);
return ApiFutures.transform(
futurePage, input -> new ListIndexesPagedResponse(input), MoreExecutors.directExecutor());
}
private ListIndexesPagedResponse(ListIndexesPage page) {
super(page, ListIndexesFixedSizeCollection.createEmptyCollection());
}
}
public static class ListIndexesPage
extends AbstractPage<ListIndexesRequest, ListIndexesResponse, Index, ListIndexesPage> {
private ListIndexesPage(
PageContext<ListIndexesRequest, ListIndexesResponse, Index> context,
ListIndexesResponse response) {
super(context, response);
}
private static ListIndexesPage createEmptyPage() {
return new ListIndexesPage(null, null);
}
@Override
protected ListIndexesPage createPage(
PageContext<ListIndexesRequest, ListIndexesResponse, Index> context,
ListIndexesResponse response) {
return new ListIndexesPage(context, response);
}
@Override
public ApiFuture<ListIndexesPage> createPageAsync(
PageContext<ListIndexesRequest, ListIndexesResponse, Index> context,
ApiFuture<ListIndexesResponse> futureResponse) {
return super.createPageAsync(context, futureResponse);
}
}
public static class ListIndexesFixedSizeCollection
extends AbstractFixedSizeCollection<
ListIndexesRequest,
ListIndexesResponse,
Index,
ListIndexesPage,
ListIndexesFixedSizeCollection> {
private ListIndexesFixedSizeCollection(List<ListIndexesPage> pages, int collectionSize) {
super(pages, collectionSize);
}
private static ListIndexesFixedSizeCollection createEmptyCollection() {
return new ListIndexesFixedSizeCollection(null, 0);
}
@Override
protected ListIndexesFixedSizeCollection createCollection(
List<ListIndexesPage> pages, int collectionSize) {
return new ListIndexesFixedSizeCollection(pages, collectionSize);
}
}
}
| chore: Integrate new gapic-generator-java and rules_gapic (#768)
* chore: Integrate new gapic-generator-java and rules_gapic
PiperOrigin-RevId: 454027580
Source-Link: https://github.com/googleapis/googleapis/commit/1b222777baa702e7135610355706570ed2b56318
Source-Link: https://github.com/googleapis/googleapis-gen/commit/e04cea20d0d12eb5c3bdb360a9e72b654edcb638
Copy-Tag: eyJwIjoiLmdpdGh1Yi8uT3dsQm90LnlhbWwiLCJoIjoiZTA0Y2VhMjBkMGQxMmViNWMzYmRiMzYwYTllNzJiNjU0ZWRjYjYzOCJ9
* �� Updates from OwlBot post-processor
See https://github.com/googleapis/repo-automation-bots/blob/main/packages/owl-bot/README.md
Co-authored-by: Owl Bot <7f39581e9a7ddb69f3fa3e6fc69a582ea2916fc9@users.noreply.github.com> | google-cloud-datastore/src/main/java/com/google/cloud/datastore/admin/v1/DatastoreAdminClient.java | chore: Integrate new gapic-generator-java and rules_gapic (#768) | <ide><path>oogle-cloud-datastore/src/main/java/com/google/cloud/datastore/admin/v1/DatastoreAdminClient.java
<ide> * .build();
<ide> * while (true) {
<ide> * ListIndexesResponse response = datastoreAdminClient.listIndexesCallable().call(request);
<del> * for (Index element : response.getResponsesList()) {
<add> * for (Index element : response.getIndexesList()) {
<ide> * // doThingsWith(element);
<ide> * }
<ide> * String nextPageToken = response.getNextPageToken(); |
|
Java | apache-2.0 | 08f91b8563c3e66670abc3bfa0d2a64417bfb243 | 0 | codeaudit/OG-Platform,McLeodMoores/starling,McLeodMoores/starling,ChinaQuants/OG-Platform,McLeodMoores/starling,ChinaQuants/OG-Platform,codeaudit/OG-Platform,jeorme/OG-Platform,nssales/OG-Platform,jerome79/OG-Platform,McLeodMoores/starling,ChinaQuants/OG-Platform,jeorme/OG-Platform,jerome79/OG-Platform,codeaudit/OG-Platform,nssales/OG-Platform,jerome79/OG-Platform,jeorme/OG-Platform,codeaudit/OG-Platform,nssales/OG-Platform,jeorme/OG-Platform,DevStreet/FinanceAnalytics,jerome79/OG-Platform,DevStreet/FinanceAnalytics,ChinaQuants/OG-Platform,nssales/OG-Platform,DevStreet/FinanceAnalytics,DevStreet/FinanceAnalytics | /**
* Copyright (C) 2009 - present by OpenGamma Inc. and the OpenGamma group of companies
*
* Please see distribution for license.
*/
package com.opengamma.component.factory.web;
import java.util.LinkedHashMap;
import java.util.Map;
import java.util.concurrent.ScheduledExecutorService;
import org.joda.beans.Bean;
import org.joda.beans.BeanBuilder;
import org.joda.beans.BeanDefinition;
import org.joda.beans.JodaBeanUtils;
import org.joda.beans.MetaProperty;
import org.joda.beans.Property;
import org.joda.beans.PropertyDefinition;
import org.joda.beans.impl.direct.DirectBeanBuilder;
import org.joda.beans.impl.direct.DirectMetaProperty;
import org.joda.beans.impl.direct.DirectMetaPropertyMap;
import com.opengamma.OpenGammaRuntimeException;
import com.opengamma.batch.BatchMaster;
import com.opengamma.component.ComponentRepository;
import com.opengamma.component.factory.AbstractComponentFactory;
import com.opengamma.component.rest.JerseyRestResourceFactory;
import com.opengamma.core.historicaltimeseries.HistoricalTimeSeriesSource;
import com.opengamma.core.security.SecuritySource;
import com.opengamma.engine.ComputationTargetResolver;
import com.opengamma.engine.marketdata.NamedMarketDataSpecificationRepository;
import com.opengamma.engine.marketdata.live.LiveMarketDataProviderFactory;
import com.opengamma.engine.target.ComputationTargetTypeProvider;
import com.opengamma.engine.target.DefaultComputationTargetTypeProvider;
import com.opengamma.engine.view.ViewProcessor;
import com.opengamma.master.config.ConfigMaster;
import com.opengamma.master.config.impl.MasterConfigSource;
import com.opengamma.master.exchange.ExchangeMaster;
import com.opengamma.master.historicaltimeseries.HistoricalTimeSeriesLoader;
import com.opengamma.master.historicaltimeseries.HistoricalTimeSeriesMaster;
import com.opengamma.master.holiday.HolidayMaster;
import com.opengamma.master.marketdatasnapshot.MarketDataSnapshotMaster;
import com.opengamma.master.orgs.OrganizationMaster;
import com.opengamma.master.portfolio.PortfolioMaster;
import com.opengamma.master.position.PositionMaster;
import com.opengamma.master.region.RegionMaster;
import com.opengamma.master.security.SecurityLoader;
import com.opengamma.master.security.SecurityMaster;
import com.opengamma.web.WebAboutResource;
import com.opengamma.web.WebHomeResource;
import com.opengamma.web.analytics.rest.LiveMarketDataProviderNamesResource;
import com.opengamma.web.analytics.rest.LiveMarketDataSpecificationNamesResource;
import com.opengamma.web.config.WebConfigsResource;
import com.opengamma.web.exchange.WebExchangesResource;
import com.opengamma.web.historicaltimeseries.WebAllHistoricalTimeSeriesResource;
import com.opengamma.web.holiday.WebHolidaysResource;
import com.opengamma.web.marketdatasnapshot.WebMarketDataSnapshotsResource;
import com.opengamma.web.orgs.WebOrganizationsResource;
import com.opengamma.web.portfolio.WebPortfoliosResource;
import com.opengamma.web.position.WebPositionsResource;
import com.opengamma.web.region.WebRegionsResource;
import com.opengamma.web.security.WebSecuritiesResource;
import com.opengamma.web.target.WebComputationTargetTypeResource;
import com.opengamma.web.valuerequirementname.WebValueRequirementNamesResource;
/**
* Component factory for the main website.
*/
@BeanDefinition
public class WebsiteBasicsComponentFactory extends AbstractComponentFactory {
/**
* The config master.
*/
@PropertyDefinition(validate = "notNull")
private ConfigMaster _configMaster;
/**
* The exchange master.
*/
@PropertyDefinition(validate = "notNull")
private ExchangeMaster _exchangeMaster;
/**
* The holiday master.
*/
@PropertyDefinition(validate = "notNull")
private HolidayMaster _holidayMaster;
/**
* The underlying master.
*/
@PropertyDefinition(validate = "notNull")
private RegionMaster _regionMaster;
/**
* The security master.
*/
@PropertyDefinition(validate = "notNull")
private SecurityMaster _securityMaster;
/**
* The security source.
*/
@PropertyDefinition(validate = "notNull")
private SecuritySource _securitySource;
/**
* The security loader.
*/
@PropertyDefinition(validate = "notNull")
private SecurityLoader _securityLoader;
/**
* The position master.
*/
@PropertyDefinition(validate = "notNull")
private PositionMaster _positionMaster;
/**
* The portfolio master.
*/
@PropertyDefinition(validate = "notNull")
private PortfolioMaster _portfolioMaster;
/**
* The batch master.
*/
@PropertyDefinition(validate = "notNull")
private BatchMaster _batchMaster;
/**
* The time-series master.
*/
@PropertyDefinition(validate = "notNull")
private HistoricalTimeSeriesMaster _historicalTimeSeriesMaster;
/**
* The time-series source.
*/
@PropertyDefinition(validate = "notNull")
private HistoricalTimeSeriesSource _historicalTimeSeriesSource;
/**
* The time-series loader.
*/
@PropertyDefinition(validate = "notNull")
private HistoricalTimeSeriesLoader _historicalTimeSeriesLoader;
/**
* The scheduler.
*/
@PropertyDefinition(validate = "notNull")
private ScheduledExecutorService _scheduler;
/**
* The available computation target types.
*/
@PropertyDefinition(validate = "notNull")
private ComputationTargetTypeProvider _targetTypes = new DefaultComputationTargetTypeProvider();
/**
* The organization master.
*/
@PropertyDefinition(validate = "notNull")
private OrganizationMaster _organizationMaster;
/**
* The market data snapshot master.
*/
@PropertyDefinition(validate = "notNull")
private MarketDataSnapshotMaster _marketDataSnapshotMaster;
/**
* For obtaining the live market data provider names. Either this or marketDataSpecificationRepository must be set.
*/
@PropertyDefinition
private LiveMarketDataProviderFactory _liveMarketDataProviderFactory;
/**
* For looking up market data provider specifications by name. Either this or liveMarketDataProviderFactory must be set.
*
* @deprecated use liveMarketDataProviderFactory
*/
@PropertyDefinition
@Deprecated
private NamedMarketDataSpecificationRepository _marketDataSpecificationRepository;
/**
* The view processor.
*/
@PropertyDefinition(validate = "notNull")
private ViewProcessor _viewProcessor;
/**
* The computation target resolver.
*/
@PropertyDefinition(validate = "notNull")
private ComputationTargetResolver _computationTargetResolver;
//-------------------------------------------------------------------------
@Override
public void init(ComponentRepository repo, LinkedHashMap<String, String> configuration) {
initBasics(repo);
initMasters(repo);
initValueRequirementNames(repo, configuration);
}
protected void initBasics(ComponentRepository repo) {
repo.getRestComponents().publishResource(new WebHomeResource());
repo.getRestComponents().publishResource(new WebAboutResource());
}
protected void initMasters(ComponentRepository repo) {
if (getLiveMarketDataProviderFactory() == null && getMarketDataSpecificationRepository() == null) {
throw new OpenGammaRuntimeException("Neither " + marketDataSpecificationRepository().name() + " nor " + liveMarketDataProviderFactory().name() + " were specified");
}
JerseyRestResourceFactory resource;
resource = new JerseyRestResourceFactory(WebConfigsResource.class, getConfigMaster());
repo.getRestComponents().publishResource(resource);
resource = new JerseyRestResourceFactory(WebExchangesResource.class, getExchangeMaster());
repo.getRestComponents().publishResource(resource);
resource = new JerseyRestResourceFactory(WebHolidaysResource.class, getHolidayMaster());
repo.getRestComponents().publishResource(resource);
resource = new JerseyRestResourceFactory(WebRegionsResource.class, getRegionMaster());
repo.getRestComponents().publishResource(resource);
resource = new JerseyRestResourceFactory(WebSecuritiesResource.class, getSecurityMaster(), getSecurityLoader(), getHistoricalTimeSeriesMaster(), getOrganizationMaster());
repo.getRestComponents().publishResource(resource);
resource = new JerseyRestResourceFactory(WebPositionsResource.class, getPositionMaster(), getSecurityLoader(), getSecuritySource(), getHistoricalTimeSeriesSource());
repo.getRestComponents().publishResource(resource);
resource = new JerseyRestResourceFactory(WebPortfoliosResource.class, getPortfolioMaster(), getPositionMaster(), getSecuritySource(), getScheduler());
repo.getRestComponents().publishResource(resource);
final MasterConfigSource configSource = new MasterConfigSource(getConfigMaster());
resource = new JerseyRestResourceFactory(WebAllHistoricalTimeSeriesResource.class, getHistoricalTimeSeriesMaster(), getHistoricalTimeSeriesLoader(), configSource);
repo.getRestComponents().publishResource(resource);
resource = new JerseyRestResourceFactory(WebComputationTargetTypeResource.class, getTargetTypes());
repo.getRestComponents().publishResource(resource);
resource = new JerseyRestResourceFactory(WebOrganizationsResource.class, getOrganizationMaster());
repo.getRestComponents().publishResource(resource);
resource = new JerseyRestResourceFactory(WebMarketDataSnapshotsResource.class,
getMarketDataSnapshotMaster(), getConfigMaster(), getLiveMarketDataProviderFactory(), getMarketDataSpecificationRepository(),
configSource, getComputationTargetResolver(), getViewProcessor(), getHistoricalTimeSeriesSource());
repo.getRestComponents().publishResource(resource);
}
protected void initValueRequirementNames(ComponentRepository repo, LinkedHashMap<String, String> configuration) {
String valueRequirementNameClasses = configuration.get(WebValueRequirementNamesResource.VALUE_REQUIREMENT_NAME_CLASSES);
configuration.remove(WebValueRequirementNamesResource.VALUE_REQUIREMENT_NAME_CLASSES);
if (valueRequirementNameClasses == null) {
repo.getRestComponents().publishResource(new WebValueRequirementNamesResource());
} else if (valueRequirementNameClasses.contains(",")) {
repo.getRestComponents().publishResource(
new WebValueRequirementNamesResource(valueRequirementNameClasses.split(",")));
} else {
repo.getRestComponents().publishResource(new WebValueRequirementNamesResource(new String[] {valueRequirementNameClasses}));
}
}
//------------------------- AUTOGENERATED START -------------------------
///CLOVER:OFF
/**
* The meta-bean for {@code WebsiteBasicsComponentFactory}.
* @return the meta-bean, not null
*/
public static WebsiteBasicsComponentFactory.Meta meta() {
return WebsiteBasicsComponentFactory.Meta.INSTANCE;
}
static {
JodaBeanUtils.registerMetaBean(WebsiteBasicsComponentFactory.Meta.INSTANCE);
}
@Override
public WebsiteBasicsComponentFactory.Meta metaBean() {
return WebsiteBasicsComponentFactory.Meta.INSTANCE;
}
//-----------------------------------------------------------------------
/**
* Gets the config master.
* @return the value of the property, not null
*/
public ConfigMaster getConfigMaster() {
return _configMaster;
}
/**
* Sets the config master.
* @param configMaster the new value of the property, not null
*/
public void setConfigMaster(ConfigMaster configMaster) {
JodaBeanUtils.notNull(configMaster, "configMaster");
this._configMaster = configMaster;
}
/**
* Gets the the {@code configMaster} property.
* @return the property, not null
*/
public final Property<ConfigMaster> configMaster() {
return metaBean().configMaster().createProperty(this);
}
//-----------------------------------------------------------------------
/**
* Gets the exchange master.
* @return the value of the property, not null
*/
public ExchangeMaster getExchangeMaster() {
return _exchangeMaster;
}
/**
* Sets the exchange master.
* @param exchangeMaster the new value of the property, not null
*/
public void setExchangeMaster(ExchangeMaster exchangeMaster) {
JodaBeanUtils.notNull(exchangeMaster, "exchangeMaster");
this._exchangeMaster = exchangeMaster;
}
/**
* Gets the the {@code exchangeMaster} property.
* @return the property, not null
*/
public final Property<ExchangeMaster> exchangeMaster() {
return metaBean().exchangeMaster().createProperty(this);
}
//-----------------------------------------------------------------------
/**
* Gets the holiday master.
* @return the value of the property, not null
*/
public HolidayMaster getHolidayMaster() {
return _holidayMaster;
}
/**
* Sets the holiday master.
* @param holidayMaster the new value of the property, not null
*/
public void setHolidayMaster(HolidayMaster holidayMaster) {
JodaBeanUtils.notNull(holidayMaster, "holidayMaster");
this._holidayMaster = holidayMaster;
}
/**
* Gets the the {@code holidayMaster} property.
* @return the property, not null
*/
public final Property<HolidayMaster> holidayMaster() {
return metaBean().holidayMaster().createProperty(this);
}
//-----------------------------------------------------------------------
/**
* Gets the underlying master.
* @return the value of the property, not null
*/
public RegionMaster getRegionMaster() {
return _regionMaster;
}
/**
* Sets the underlying master.
* @param regionMaster the new value of the property, not null
*/
public void setRegionMaster(RegionMaster regionMaster) {
JodaBeanUtils.notNull(regionMaster, "regionMaster");
this._regionMaster = regionMaster;
}
/**
* Gets the the {@code regionMaster} property.
* @return the property, not null
*/
public final Property<RegionMaster> regionMaster() {
return metaBean().regionMaster().createProperty(this);
}
//-----------------------------------------------------------------------
/**
* Gets the security master.
* @return the value of the property, not null
*/
public SecurityMaster getSecurityMaster() {
return _securityMaster;
}
/**
* Sets the security master.
* @param securityMaster the new value of the property, not null
*/
public void setSecurityMaster(SecurityMaster securityMaster) {
JodaBeanUtils.notNull(securityMaster, "securityMaster");
this._securityMaster = securityMaster;
}
/**
* Gets the the {@code securityMaster} property.
* @return the property, not null
*/
public final Property<SecurityMaster> securityMaster() {
return metaBean().securityMaster().createProperty(this);
}
//-----------------------------------------------------------------------
/**
* Gets the security source.
* @return the value of the property, not null
*/
public SecuritySource getSecuritySource() {
return _securitySource;
}
/**
* Sets the security source.
* @param securitySource the new value of the property, not null
*/
public void setSecuritySource(SecuritySource securitySource) {
JodaBeanUtils.notNull(securitySource, "securitySource");
this._securitySource = securitySource;
}
/**
* Gets the the {@code securitySource} property.
* @return the property, not null
*/
public final Property<SecuritySource> securitySource() {
return metaBean().securitySource().createProperty(this);
}
//-----------------------------------------------------------------------
/**
* Gets the security loader.
* @return the value of the property, not null
*/
public SecurityLoader getSecurityLoader() {
return _securityLoader;
}
/**
* Sets the security loader.
* @param securityLoader the new value of the property, not null
*/
public void setSecurityLoader(SecurityLoader securityLoader) {
JodaBeanUtils.notNull(securityLoader, "securityLoader");
this._securityLoader = securityLoader;
}
/**
* Gets the the {@code securityLoader} property.
* @return the property, not null
*/
public final Property<SecurityLoader> securityLoader() {
return metaBean().securityLoader().createProperty(this);
}
//-----------------------------------------------------------------------
/**
* Gets the position master.
* @return the value of the property, not null
*/
public PositionMaster getPositionMaster() {
return _positionMaster;
}
/**
* Sets the position master.
* @param positionMaster the new value of the property, not null
*/
public void setPositionMaster(PositionMaster positionMaster) {
JodaBeanUtils.notNull(positionMaster, "positionMaster");
this._positionMaster = positionMaster;
}
/**
* Gets the the {@code positionMaster} property.
* @return the property, not null
*/
public final Property<PositionMaster> positionMaster() {
return metaBean().positionMaster().createProperty(this);
}
//-----------------------------------------------------------------------
/**
* Gets the portfolio master.
* @return the value of the property, not null
*/
public PortfolioMaster getPortfolioMaster() {
return _portfolioMaster;
}
/**
* Sets the portfolio master.
* @param portfolioMaster the new value of the property, not null
*/
public void setPortfolioMaster(PortfolioMaster portfolioMaster) {
JodaBeanUtils.notNull(portfolioMaster, "portfolioMaster");
this._portfolioMaster = portfolioMaster;
}
/**
* Gets the the {@code portfolioMaster} property.
* @return the property, not null
*/
public final Property<PortfolioMaster> portfolioMaster() {
return metaBean().portfolioMaster().createProperty(this);
}
//-----------------------------------------------------------------------
/**
* Gets the batch master.
* @return the value of the property, not null
*/
public BatchMaster getBatchMaster() {
return _batchMaster;
}
/**
* Sets the batch master.
* @param batchMaster the new value of the property, not null
*/
public void setBatchMaster(BatchMaster batchMaster) {
JodaBeanUtils.notNull(batchMaster, "batchMaster");
this._batchMaster = batchMaster;
}
/**
* Gets the the {@code batchMaster} property.
* @return the property, not null
*/
public final Property<BatchMaster> batchMaster() {
return metaBean().batchMaster().createProperty(this);
}
//-----------------------------------------------------------------------
/**
* Gets the time-series master.
* @return the value of the property, not null
*/
public HistoricalTimeSeriesMaster getHistoricalTimeSeriesMaster() {
return _historicalTimeSeriesMaster;
}
/**
* Sets the time-series master.
* @param historicalTimeSeriesMaster the new value of the property, not null
*/
public void setHistoricalTimeSeriesMaster(HistoricalTimeSeriesMaster historicalTimeSeriesMaster) {
JodaBeanUtils.notNull(historicalTimeSeriesMaster, "historicalTimeSeriesMaster");
this._historicalTimeSeriesMaster = historicalTimeSeriesMaster;
}
/**
* Gets the the {@code historicalTimeSeriesMaster} property.
* @return the property, not null
*/
public final Property<HistoricalTimeSeriesMaster> historicalTimeSeriesMaster() {
return metaBean().historicalTimeSeriesMaster().createProperty(this);
}
//-----------------------------------------------------------------------
/**
* Gets the time-series source.
* @return the value of the property, not null
*/
public HistoricalTimeSeriesSource getHistoricalTimeSeriesSource() {
return _historicalTimeSeriesSource;
}
/**
* Sets the time-series source.
* @param historicalTimeSeriesSource the new value of the property, not null
*/
public void setHistoricalTimeSeriesSource(HistoricalTimeSeriesSource historicalTimeSeriesSource) {
JodaBeanUtils.notNull(historicalTimeSeriesSource, "historicalTimeSeriesSource");
this._historicalTimeSeriesSource = historicalTimeSeriesSource;
}
/**
* Gets the the {@code historicalTimeSeriesSource} property.
* @return the property, not null
*/
public final Property<HistoricalTimeSeriesSource> historicalTimeSeriesSource() {
return metaBean().historicalTimeSeriesSource().createProperty(this);
}
//-----------------------------------------------------------------------
/**
* Gets the time-series loader.
* @return the value of the property, not null
*/
public HistoricalTimeSeriesLoader getHistoricalTimeSeriesLoader() {
return _historicalTimeSeriesLoader;
}
/**
* Sets the time-series loader.
* @param historicalTimeSeriesLoader the new value of the property, not null
*/
public void setHistoricalTimeSeriesLoader(HistoricalTimeSeriesLoader historicalTimeSeriesLoader) {
JodaBeanUtils.notNull(historicalTimeSeriesLoader, "historicalTimeSeriesLoader");
this._historicalTimeSeriesLoader = historicalTimeSeriesLoader;
}
/**
* Gets the the {@code historicalTimeSeriesLoader} property.
* @return the property, not null
*/
public final Property<HistoricalTimeSeriesLoader> historicalTimeSeriesLoader() {
return metaBean().historicalTimeSeriesLoader().createProperty(this);
}
//-----------------------------------------------------------------------
/**
* Gets the scheduler.
* @return the value of the property, not null
*/
public ScheduledExecutorService getScheduler() {
return _scheduler;
}
/**
* Sets the scheduler.
* @param scheduler the new value of the property, not null
*/
public void setScheduler(ScheduledExecutorService scheduler) {
JodaBeanUtils.notNull(scheduler, "scheduler");
this._scheduler = scheduler;
}
/**
* Gets the the {@code scheduler} property.
* @return the property, not null
*/
public final Property<ScheduledExecutorService> scheduler() {
return metaBean().scheduler().createProperty(this);
}
//-----------------------------------------------------------------------
/**
* Gets the available computation target types.
* @return the value of the property, not null
*/
public ComputationTargetTypeProvider getTargetTypes() {
return _targetTypes;
}
/**
* Sets the available computation target types.
* @param targetTypes the new value of the property, not null
*/
public void setTargetTypes(ComputationTargetTypeProvider targetTypes) {
JodaBeanUtils.notNull(targetTypes, "targetTypes");
this._targetTypes = targetTypes;
}
/**
* Gets the the {@code targetTypes} property.
* @return the property, not null
*/
public final Property<ComputationTargetTypeProvider> targetTypes() {
return metaBean().targetTypes().createProperty(this);
}
//-----------------------------------------------------------------------
/**
* Gets the organization master.
* @return the value of the property, not null
*/
public OrganizationMaster getOrganizationMaster() {
return _organizationMaster;
}
/**
* Sets the organization master.
* @param organizationMaster the new value of the property, not null
*/
public void setOrganizationMaster(OrganizationMaster organizationMaster) {
JodaBeanUtils.notNull(organizationMaster, "organizationMaster");
this._organizationMaster = organizationMaster;
}
/**
* Gets the the {@code organizationMaster} property.
* @return the property, not null
*/
public final Property<OrganizationMaster> organizationMaster() {
return metaBean().organizationMaster().createProperty(this);
}
//-----------------------------------------------------------------------
/**
* Gets the market data snapshot master.
* @return the value of the property, not null
*/
public MarketDataSnapshotMaster getMarketDataSnapshotMaster() {
return _marketDataSnapshotMaster;
}
/**
* Sets the market data snapshot master.
* @param marketDataSnapshotMaster the new value of the property, not null
*/
public void setMarketDataSnapshotMaster(MarketDataSnapshotMaster marketDataSnapshotMaster) {
JodaBeanUtils.notNull(marketDataSnapshotMaster, "marketDataSnapshotMaster");
this._marketDataSnapshotMaster = marketDataSnapshotMaster;
}
/**
* Gets the the {@code marketDataSnapshotMaster} property.
* @return the property, not null
*/
public final Property<MarketDataSnapshotMaster> marketDataSnapshotMaster() {
return metaBean().marketDataSnapshotMaster().createProperty(this);
}
//-----------------------------------------------------------------------
/**
* Gets for obtaining the live market data provider names. Either this or marketDataSpecificationRepository must be set.
* @return the value of the property
*/
public LiveMarketDataProviderFactory getLiveMarketDataProviderFactory() {
return _liveMarketDataProviderFactory;
}
/**
* Sets for obtaining the live market data provider names. Either this or marketDataSpecificationRepository must be set.
* @param liveMarketDataProviderFactory the new value of the property
*/
public void setLiveMarketDataProviderFactory(LiveMarketDataProviderFactory liveMarketDataProviderFactory) {
this._liveMarketDataProviderFactory = liveMarketDataProviderFactory;
}
/**
* Gets the the {@code liveMarketDataProviderFactory} property.
* @return the property, not null
*/
public final Property<LiveMarketDataProviderFactory> liveMarketDataProviderFactory() {
return metaBean().liveMarketDataProviderFactory().createProperty(this);
}
//-----------------------------------------------------------------------
/**
* Gets for looking up market data provider specifications by name. Either this or liveMarketDataProviderFactory must be set.
*
* @deprecated use liveMarketDataProviderFactory
* @return the value of the property
*/
@Deprecated
public NamedMarketDataSpecificationRepository getMarketDataSpecificationRepository() {
return _marketDataSpecificationRepository;
}
/**
* Sets for looking up market data provider specifications by name. Either this or liveMarketDataProviderFactory must be set.
*
* @deprecated use liveMarketDataProviderFactory
* @param marketDataSpecificationRepository the new value of the property
*/
@Deprecated
public void setMarketDataSpecificationRepository(NamedMarketDataSpecificationRepository marketDataSpecificationRepository) {
this._marketDataSpecificationRepository = marketDataSpecificationRepository;
}
/**
* Gets the the {@code marketDataSpecificationRepository} property.
*
* @deprecated use liveMarketDataProviderFactory
* @return the property, not null
*/
@Deprecated
public final Property<NamedMarketDataSpecificationRepository> marketDataSpecificationRepository() {
return metaBean().marketDataSpecificationRepository().createProperty(this);
}
//-----------------------------------------------------------------------
/**
* Gets the view processor.
* @return the value of the property, not null
*/
public ViewProcessor getViewProcessor() {
return _viewProcessor;
}
/**
* Sets the view processor.
* @param viewProcessor the new value of the property, not null
*/
public void setViewProcessor(ViewProcessor viewProcessor) {
JodaBeanUtils.notNull(viewProcessor, "viewProcessor");
this._viewProcessor = viewProcessor;
}
/**
* Gets the the {@code viewProcessor} property.
* @return the property, not null
*/
public final Property<ViewProcessor> viewProcessor() {
return metaBean().viewProcessor().createProperty(this);
}
//-----------------------------------------------------------------------
/**
* Gets the computation target resolver.
* @return the value of the property, not null
*/
public ComputationTargetResolver getComputationTargetResolver() {
return _computationTargetResolver;
}
/**
* Sets the computation target resolver.
* @param computationTargetResolver the new value of the property, not null
*/
public void setComputationTargetResolver(ComputationTargetResolver computationTargetResolver) {
JodaBeanUtils.notNull(computationTargetResolver, "computationTargetResolver");
this._computationTargetResolver = computationTargetResolver;
}
/**
* Gets the the {@code computationTargetResolver} property.
* @return the property, not null
*/
public final Property<ComputationTargetResolver> computationTargetResolver() {
return metaBean().computationTargetResolver().createProperty(this);
}
//-----------------------------------------------------------------------
@Override
public WebsiteBasicsComponentFactory clone() {
return (WebsiteBasicsComponentFactory) super.clone();
}
@Override
public boolean equals(Object obj) {
if (obj == this) {
return true;
}
if (obj != null && obj.getClass() == this.getClass()) {
WebsiteBasicsComponentFactory other = (WebsiteBasicsComponentFactory) obj;
return JodaBeanUtils.equal(getConfigMaster(), other.getConfigMaster()) &&
JodaBeanUtils.equal(getExchangeMaster(), other.getExchangeMaster()) &&
JodaBeanUtils.equal(getHolidayMaster(), other.getHolidayMaster()) &&
JodaBeanUtils.equal(getRegionMaster(), other.getRegionMaster()) &&
JodaBeanUtils.equal(getSecurityMaster(), other.getSecurityMaster()) &&
JodaBeanUtils.equal(getSecuritySource(), other.getSecuritySource()) &&
JodaBeanUtils.equal(getSecurityLoader(), other.getSecurityLoader()) &&
JodaBeanUtils.equal(getPositionMaster(), other.getPositionMaster()) &&
JodaBeanUtils.equal(getPortfolioMaster(), other.getPortfolioMaster()) &&
JodaBeanUtils.equal(getBatchMaster(), other.getBatchMaster()) &&
JodaBeanUtils.equal(getHistoricalTimeSeriesMaster(), other.getHistoricalTimeSeriesMaster()) &&
JodaBeanUtils.equal(getHistoricalTimeSeriesSource(), other.getHistoricalTimeSeriesSource()) &&
JodaBeanUtils.equal(getHistoricalTimeSeriesLoader(), other.getHistoricalTimeSeriesLoader()) &&
JodaBeanUtils.equal(getScheduler(), other.getScheduler()) &&
JodaBeanUtils.equal(getTargetTypes(), other.getTargetTypes()) &&
JodaBeanUtils.equal(getOrganizationMaster(), other.getOrganizationMaster()) &&
JodaBeanUtils.equal(getMarketDataSnapshotMaster(), other.getMarketDataSnapshotMaster()) &&
JodaBeanUtils.equal(getLiveMarketDataProviderFactory(), other.getLiveMarketDataProviderFactory()) &&
JodaBeanUtils.equal(getMarketDataSpecificationRepository(), other.getMarketDataSpecificationRepository()) &&
JodaBeanUtils.equal(getViewProcessor(), other.getViewProcessor()) &&
JodaBeanUtils.equal(getComputationTargetResolver(), other.getComputationTargetResolver()) &&
super.equals(obj);
}
return false;
}
@Override
public int hashCode() {
int hash = 7;
hash += hash * 31 + JodaBeanUtils.hashCode(getConfigMaster());
hash += hash * 31 + JodaBeanUtils.hashCode(getExchangeMaster());
hash += hash * 31 + JodaBeanUtils.hashCode(getHolidayMaster());
hash += hash * 31 + JodaBeanUtils.hashCode(getRegionMaster());
hash += hash * 31 + JodaBeanUtils.hashCode(getSecurityMaster());
hash += hash * 31 + JodaBeanUtils.hashCode(getSecuritySource());
hash += hash * 31 + JodaBeanUtils.hashCode(getSecurityLoader());
hash += hash * 31 + JodaBeanUtils.hashCode(getPositionMaster());
hash += hash * 31 + JodaBeanUtils.hashCode(getPortfolioMaster());
hash += hash * 31 + JodaBeanUtils.hashCode(getBatchMaster());
hash += hash * 31 + JodaBeanUtils.hashCode(getHistoricalTimeSeriesMaster());
hash += hash * 31 + JodaBeanUtils.hashCode(getHistoricalTimeSeriesSource());
hash += hash * 31 + JodaBeanUtils.hashCode(getHistoricalTimeSeriesLoader());
hash += hash * 31 + JodaBeanUtils.hashCode(getScheduler());
hash += hash * 31 + JodaBeanUtils.hashCode(getTargetTypes());
hash += hash * 31 + JodaBeanUtils.hashCode(getOrganizationMaster());
hash += hash * 31 + JodaBeanUtils.hashCode(getMarketDataSnapshotMaster());
hash += hash * 31 + JodaBeanUtils.hashCode(getLiveMarketDataProviderFactory());
hash += hash * 31 + JodaBeanUtils.hashCode(getMarketDataSpecificationRepository());
hash += hash * 31 + JodaBeanUtils.hashCode(getViewProcessor());
hash += hash * 31 + JodaBeanUtils.hashCode(getComputationTargetResolver());
return hash ^ super.hashCode();
}
@Override
public String toString() {
StringBuilder buf = new StringBuilder(704);
buf.append("WebsiteBasicsComponentFactory{");
int len = buf.length();
toString(buf);
if (buf.length() > len) {
buf.setLength(buf.length() - 2);
}
buf.append('}');
return buf.toString();
}
@Override
protected void toString(StringBuilder buf) {
super.toString(buf);
buf.append("configMaster").append('=').append(getConfigMaster()).append(',').append(' ');
buf.append("exchangeMaster").append('=').append(getExchangeMaster()).append(',').append(' ');
buf.append("holidayMaster").append('=').append(getHolidayMaster()).append(',').append(' ');
buf.append("regionMaster").append('=').append(getRegionMaster()).append(',').append(' ');
buf.append("securityMaster").append('=').append(getSecurityMaster()).append(',').append(' ');
buf.append("securitySource").append('=').append(getSecuritySource()).append(',').append(' ');
buf.append("securityLoader").append('=').append(getSecurityLoader()).append(',').append(' ');
buf.append("positionMaster").append('=').append(getPositionMaster()).append(',').append(' ');
buf.append("portfolioMaster").append('=').append(getPortfolioMaster()).append(',').append(' ');
buf.append("batchMaster").append('=').append(getBatchMaster()).append(',').append(' ');
buf.append("historicalTimeSeriesMaster").append('=').append(getHistoricalTimeSeriesMaster()).append(',').append(' ');
buf.append("historicalTimeSeriesSource").append('=').append(getHistoricalTimeSeriesSource()).append(',').append(' ');
buf.append("historicalTimeSeriesLoader").append('=').append(getHistoricalTimeSeriesLoader()).append(',').append(' ');
buf.append("scheduler").append('=').append(getScheduler()).append(',').append(' ');
buf.append("targetTypes").append('=').append(getTargetTypes()).append(',').append(' ');
buf.append("organizationMaster").append('=').append(getOrganizationMaster()).append(',').append(' ');
buf.append("marketDataSnapshotMaster").append('=').append(getMarketDataSnapshotMaster()).append(',').append(' ');
buf.append("liveMarketDataProviderFactory").append('=').append(getLiveMarketDataProviderFactory()).append(',').append(' ');
buf.append("marketDataSpecificationRepository").append('=').append(getMarketDataSpecificationRepository()).append(',').append(' ');
buf.append("viewProcessor").append('=').append(getViewProcessor()).append(',').append(' ');
buf.append("computationTargetResolver").append('=').append(getComputationTargetResolver()).append(',').append(' ');
}
//-----------------------------------------------------------------------
/**
* The meta-bean for {@code WebsiteBasicsComponentFactory}.
*/
public static class Meta extends AbstractComponentFactory.Meta {
/**
* The singleton instance of the meta-bean.
*/
static final Meta INSTANCE = new Meta();
/**
* The meta-property for the {@code configMaster} property.
*/
private final MetaProperty<ConfigMaster> _configMaster = DirectMetaProperty.ofReadWrite(
this, "configMaster", WebsiteBasicsComponentFactory.class, ConfigMaster.class);
/**
* The meta-property for the {@code exchangeMaster} property.
*/
private final MetaProperty<ExchangeMaster> _exchangeMaster = DirectMetaProperty.ofReadWrite(
this, "exchangeMaster", WebsiteBasicsComponentFactory.class, ExchangeMaster.class);
/**
* The meta-property for the {@code holidayMaster} property.
*/
private final MetaProperty<HolidayMaster> _holidayMaster = DirectMetaProperty.ofReadWrite(
this, "holidayMaster", WebsiteBasicsComponentFactory.class, HolidayMaster.class);
/**
* The meta-property for the {@code regionMaster} property.
*/
private final MetaProperty<RegionMaster> _regionMaster = DirectMetaProperty.ofReadWrite(
this, "regionMaster", WebsiteBasicsComponentFactory.class, RegionMaster.class);
/**
* The meta-property for the {@code securityMaster} property.
*/
private final MetaProperty<SecurityMaster> _securityMaster = DirectMetaProperty.ofReadWrite(
this, "securityMaster", WebsiteBasicsComponentFactory.class, SecurityMaster.class);
/**
* The meta-property for the {@code securitySource} property.
*/
private final MetaProperty<SecuritySource> _securitySource = DirectMetaProperty.ofReadWrite(
this, "securitySource", WebsiteBasicsComponentFactory.class, SecuritySource.class);
/**
* The meta-property for the {@code securityLoader} property.
*/
private final MetaProperty<SecurityLoader> _securityLoader = DirectMetaProperty.ofReadWrite(
this, "securityLoader", WebsiteBasicsComponentFactory.class, SecurityLoader.class);
/**
* The meta-property for the {@code positionMaster} property.
*/
private final MetaProperty<PositionMaster> _positionMaster = DirectMetaProperty.ofReadWrite(
this, "positionMaster", WebsiteBasicsComponentFactory.class, PositionMaster.class);
/**
* The meta-property for the {@code portfolioMaster} property.
*/
private final MetaProperty<PortfolioMaster> _portfolioMaster = DirectMetaProperty.ofReadWrite(
this, "portfolioMaster", WebsiteBasicsComponentFactory.class, PortfolioMaster.class);
/**
* The meta-property for the {@code batchMaster} property.
*/
private final MetaProperty<BatchMaster> _batchMaster = DirectMetaProperty.ofReadWrite(
this, "batchMaster", WebsiteBasicsComponentFactory.class, BatchMaster.class);
/**
* The meta-property for the {@code historicalTimeSeriesMaster} property.
*/
private final MetaProperty<HistoricalTimeSeriesMaster> _historicalTimeSeriesMaster = DirectMetaProperty.ofReadWrite(
this, "historicalTimeSeriesMaster", WebsiteBasicsComponentFactory.class, HistoricalTimeSeriesMaster.class);
/**
* The meta-property for the {@code historicalTimeSeriesSource} property.
*/
private final MetaProperty<HistoricalTimeSeriesSource> _historicalTimeSeriesSource = DirectMetaProperty.ofReadWrite(
this, "historicalTimeSeriesSource", WebsiteBasicsComponentFactory.class, HistoricalTimeSeriesSource.class);
/**
* The meta-property for the {@code historicalTimeSeriesLoader} property.
*/
private final MetaProperty<HistoricalTimeSeriesLoader> _historicalTimeSeriesLoader = DirectMetaProperty.ofReadWrite(
this, "historicalTimeSeriesLoader", WebsiteBasicsComponentFactory.class, HistoricalTimeSeriesLoader.class);
/**
* The meta-property for the {@code scheduler} property.
*/
private final MetaProperty<ScheduledExecutorService> _scheduler = DirectMetaProperty.ofReadWrite(
this, "scheduler", WebsiteBasicsComponentFactory.class, ScheduledExecutorService.class);
/**
* The meta-property for the {@code targetTypes} property.
*/
private final MetaProperty<ComputationTargetTypeProvider> _targetTypes = DirectMetaProperty.ofReadWrite(
this, "targetTypes", WebsiteBasicsComponentFactory.class, ComputationTargetTypeProvider.class);
/**
* The meta-property for the {@code organizationMaster} property.
*/
private final MetaProperty<OrganizationMaster> _organizationMaster = DirectMetaProperty.ofReadWrite(
this, "organizationMaster", WebsiteBasicsComponentFactory.class, OrganizationMaster.class);
/**
* The meta-property for the {@code marketDataSnapshotMaster} property.
*/
private final MetaProperty<MarketDataSnapshotMaster> _marketDataSnapshotMaster = DirectMetaProperty.ofReadWrite(
this, "marketDataSnapshotMaster", WebsiteBasicsComponentFactory.class, MarketDataSnapshotMaster.class);
/**
* The meta-property for the {@code liveMarketDataProviderFactory} property.
*/
private final MetaProperty<LiveMarketDataProviderFactory> _liveMarketDataProviderFactory = DirectMetaProperty.ofReadWrite(
this, "liveMarketDataProviderFactory", WebsiteBasicsComponentFactory.class, LiveMarketDataProviderFactory.class);
/**
* The meta-property for the {@code marketDataSpecificationRepository} property.
*/
private final MetaProperty<NamedMarketDataSpecificationRepository> _marketDataSpecificationRepository = DirectMetaProperty.ofReadWrite(
this, "marketDataSpecificationRepository", WebsiteBasicsComponentFactory.class, NamedMarketDataSpecificationRepository.class);
/**
* The meta-property for the {@code viewProcessor} property.
*/
private final MetaProperty<ViewProcessor> _viewProcessor = DirectMetaProperty.ofReadWrite(
this, "viewProcessor", WebsiteBasicsComponentFactory.class, ViewProcessor.class);
/**
* The meta-property for the {@code computationTargetResolver} property.
*/
private final MetaProperty<ComputationTargetResolver> _computationTargetResolver = DirectMetaProperty.ofReadWrite(
this, "computationTargetResolver", WebsiteBasicsComponentFactory.class, ComputationTargetResolver.class);
/**
* The meta-properties.
*/
private final Map<String, MetaProperty<?>> _metaPropertyMap$ = new DirectMetaPropertyMap(
this, (DirectMetaPropertyMap) super.metaPropertyMap(),
"configMaster",
"exchangeMaster",
"holidayMaster",
"regionMaster",
"securityMaster",
"securitySource",
"securityLoader",
"positionMaster",
"portfolioMaster",
"batchMaster",
"historicalTimeSeriesMaster",
"historicalTimeSeriesSource",
"historicalTimeSeriesLoader",
"scheduler",
"targetTypes",
"organizationMaster",
"marketDataSnapshotMaster",
"liveMarketDataProviderFactory",
"marketDataSpecificationRepository",
"viewProcessor",
"computationTargetResolver");
/**
* Restricted constructor.
*/
protected Meta() {
}
@Override
protected MetaProperty<?> metaPropertyGet(String propertyName) {
switch (propertyName.hashCode()) {
case 10395716: // configMaster
return _configMaster;
case -652001691: // exchangeMaster
return _exchangeMaster;
case 246258906: // holidayMaster
return _holidayMaster;
case -1820969354: // regionMaster
return _regionMaster;
case -887218750: // securityMaster
return _securityMaster;
case -702456965: // securitySource
return _securitySource;
case -903470221: // securityLoader
return _securityLoader;
case -1840419605: // positionMaster
return _positionMaster;
case -772274742: // portfolioMaster
return _portfolioMaster;
case -252634564: // batchMaster
return _batchMaster;
case 173967376: // historicalTimeSeriesMaster
return _historicalTimeSeriesMaster;
case 358729161: // historicalTimeSeriesSource
return _historicalTimeSeriesSource;
case 157715905: // historicalTimeSeriesLoader
return _historicalTimeSeriesLoader;
case -160710469: // scheduler
return _scheduler;
case -2094577304: // targetTypes
return _targetTypes;
case -1158737547: // organizationMaster
return _organizationMaster;
case 2090650860: // marketDataSnapshotMaster
return _marketDataSnapshotMaster;
case -301472921: // liveMarketDataProviderFactory
return _liveMarketDataProviderFactory;
case 1743800263: // marketDataSpecificationRepository
return _marketDataSpecificationRepository;
case -1697555603: // viewProcessor
return _viewProcessor;
case 1562222174: // computationTargetResolver
return _computationTargetResolver;
}
return super.metaPropertyGet(propertyName);
}
@Override
public BeanBuilder<? extends WebsiteBasicsComponentFactory> builder() {
return new DirectBeanBuilder<WebsiteBasicsComponentFactory>(new WebsiteBasicsComponentFactory());
}
@Override
public Class<? extends WebsiteBasicsComponentFactory> beanType() {
return WebsiteBasicsComponentFactory.class;
}
@Override
public Map<String, MetaProperty<?>> metaPropertyMap() {
return _metaPropertyMap$;
}
//-----------------------------------------------------------------------
/**
* The meta-property for the {@code configMaster} property.
* @return the meta-property, not null
*/
public final MetaProperty<ConfigMaster> configMaster() {
return _configMaster;
}
/**
* The meta-property for the {@code exchangeMaster} property.
* @return the meta-property, not null
*/
public final MetaProperty<ExchangeMaster> exchangeMaster() {
return _exchangeMaster;
}
/**
* The meta-property for the {@code holidayMaster} property.
* @return the meta-property, not null
*/
public final MetaProperty<HolidayMaster> holidayMaster() {
return _holidayMaster;
}
/**
* The meta-property for the {@code regionMaster} property.
* @return the meta-property, not null
*/
public final MetaProperty<RegionMaster> regionMaster() {
return _regionMaster;
}
/**
* The meta-property for the {@code securityMaster} property.
* @return the meta-property, not null
*/
public final MetaProperty<SecurityMaster> securityMaster() {
return _securityMaster;
}
/**
* The meta-property for the {@code securitySource} property.
* @return the meta-property, not null
*/
public final MetaProperty<SecuritySource> securitySource() {
return _securitySource;
}
/**
* The meta-property for the {@code securityLoader} property.
* @return the meta-property, not null
*/
public final MetaProperty<SecurityLoader> securityLoader() {
return _securityLoader;
}
/**
* The meta-property for the {@code positionMaster} property.
* @return the meta-property, not null
*/
public final MetaProperty<PositionMaster> positionMaster() {
return _positionMaster;
}
/**
* The meta-property for the {@code portfolioMaster} property.
* @return the meta-property, not null
*/
public final MetaProperty<PortfolioMaster> portfolioMaster() {
return _portfolioMaster;
}
/**
* The meta-property for the {@code batchMaster} property.
* @return the meta-property, not null
*/
public final MetaProperty<BatchMaster> batchMaster() {
return _batchMaster;
}
/**
* The meta-property for the {@code historicalTimeSeriesMaster} property.
* @return the meta-property, not null
*/
public final MetaProperty<HistoricalTimeSeriesMaster> historicalTimeSeriesMaster() {
return _historicalTimeSeriesMaster;
}
/**
* The meta-property for the {@code historicalTimeSeriesSource} property.
* @return the meta-property, not null
*/
public final MetaProperty<HistoricalTimeSeriesSource> historicalTimeSeriesSource() {
return _historicalTimeSeriesSource;
}
/**
* The meta-property for the {@code historicalTimeSeriesLoader} property.
* @return the meta-property, not null
*/
public final MetaProperty<HistoricalTimeSeriesLoader> historicalTimeSeriesLoader() {
return _historicalTimeSeriesLoader;
}
/**
* The meta-property for the {@code scheduler} property.
* @return the meta-property, not null
*/
public final MetaProperty<ScheduledExecutorService> scheduler() {
return _scheduler;
}
/**
* The meta-property for the {@code targetTypes} property.
* @return the meta-property, not null
*/
public final MetaProperty<ComputationTargetTypeProvider> targetTypes() {
return _targetTypes;
}
/**
* The meta-property for the {@code organizationMaster} property.
* @return the meta-property, not null
*/
public final MetaProperty<OrganizationMaster> organizationMaster() {
return _organizationMaster;
}
/**
* The meta-property for the {@code marketDataSnapshotMaster} property.
* @return the meta-property, not null
*/
public final MetaProperty<MarketDataSnapshotMaster> marketDataSnapshotMaster() {
return _marketDataSnapshotMaster;
}
/**
* The meta-property for the {@code liveMarketDataProviderFactory} property.
* @return the meta-property, not null
*/
public final MetaProperty<LiveMarketDataProviderFactory> liveMarketDataProviderFactory() {
return _liveMarketDataProviderFactory;
}
/**
* The meta-property for the {@code marketDataSpecificationRepository} property.
* @deprecated use liveMarketDataProviderFactory
* @return the meta-property, not null
*/
@Deprecated
public final MetaProperty<NamedMarketDataSpecificationRepository> marketDataSpecificationRepository() {
return _marketDataSpecificationRepository;
}
/**
* The meta-property for the {@code viewProcessor} property.
* @return the meta-property, not null
*/
public final MetaProperty<ViewProcessor> viewProcessor() {
return _viewProcessor;
}
/**
* The meta-property for the {@code computationTargetResolver} property.
* @return the meta-property, not null
*/
public final MetaProperty<ComputationTargetResolver> computationTargetResolver() {
return _computationTargetResolver;
}
//-----------------------------------------------------------------------
@Override
protected Object propertyGet(Bean bean, String propertyName, boolean quiet) {
switch (propertyName.hashCode()) {
case 10395716: // configMaster
return ((WebsiteBasicsComponentFactory) bean).getConfigMaster();
case -652001691: // exchangeMaster
return ((WebsiteBasicsComponentFactory) bean).getExchangeMaster();
case 246258906: // holidayMaster
return ((WebsiteBasicsComponentFactory) bean).getHolidayMaster();
case -1820969354: // regionMaster
return ((WebsiteBasicsComponentFactory) bean).getRegionMaster();
case -887218750: // securityMaster
return ((WebsiteBasicsComponentFactory) bean).getSecurityMaster();
case -702456965: // securitySource
return ((WebsiteBasicsComponentFactory) bean).getSecuritySource();
case -903470221: // securityLoader
return ((WebsiteBasicsComponentFactory) bean).getSecurityLoader();
case -1840419605: // positionMaster
return ((WebsiteBasicsComponentFactory) bean).getPositionMaster();
case -772274742: // portfolioMaster
return ((WebsiteBasicsComponentFactory) bean).getPortfolioMaster();
case -252634564: // batchMaster
return ((WebsiteBasicsComponentFactory) bean).getBatchMaster();
case 173967376: // historicalTimeSeriesMaster
return ((WebsiteBasicsComponentFactory) bean).getHistoricalTimeSeriesMaster();
case 358729161: // historicalTimeSeriesSource
return ((WebsiteBasicsComponentFactory) bean).getHistoricalTimeSeriesSource();
case 157715905: // historicalTimeSeriesLoader
return ((WebsiteBasicsComponentFactory) bean).getHistoricalTimeSeriesLoader();
case -160710469: // scheduler
return ((WebsiteBasicsComponentFactory) bean).getScheduler();
case -2094577304: // targetTypes
return ((WebsiteBasicsComponentFactory) bean).getTargetTypes();
case -1158737547: // organizationMaster
return ((WebsiteBasicsComponentFactory) bean).getOrganizationMaster();
case 2090650860: // marketDataSnapshotMaster
return ((WebsiteBasicsComponentFactory) bean).getMarketDataSnapshotMaster();
case -301472921: // liveMarketDataProviderFactory
return ((WebsiteBasicsComponentFactory) bean).getLiveMarketDataProviderFactory();
case 1743800263: // marketDataSpecificationRepository
return ((WebsiteBasicsComponentFactory) bean).getMarketDataSpecificationRepository();
case -1697555603: // viewProcessor
return ((WebsiteBasicsComponentFactory) bean).getViewProcessor();
case 1562222174: // computationTargetResolver
return ((WebsiteBasicsComponentFactory) bean).getComputationTargetResolver();
}
return super.propertyGet(bean, propertyName, quiet);
}
@Override
protected void propertySet(Bean bean, String propertyName, Object newValue, boolean quiet) {
switch (propertyName.hashCode()) {
case 10395716: // configMaster
((WebsiteBasicsComponentFactory) bean).setConfigMaster((ConfigMaster) newValue);
return;
case -652001691: // exchangeMaster
((WebsiteBasicsComponentFactory) bean).setExchangeMaster((ExchangeMaster) newValue);
return;
case 246258906: // holidayMaster
((WebsiteBasicsComponentFactory) bean).setHolidayMaster((HolidayMaster) newValue);
return;
case -1820969354: // regionMaster
((WebsiteBasicsComponentFactory) bean).setRegionMaster((RegionMaster) newValue);
return;
case -887218750: // securityMaster
((WebsiteBasicsComponentFactory) bean).setSecurityMaster((SecurityMaster) newValue);
return;
case -702456965: // securitySource
((WebsiteBasicsComponentFactory) bean).setSecuritySource((SecuritySource) newValue);
return;
case -903470221: // securityLoader
((WebsiteBasicsComponentFactory) bean).setSecurityLoader((SecurityLoader) newValue);
return;
case -1840419605: // positionMaster
((WebsiteBasicsComponentFactory) bean).setPositionMaster((PositionMaster) newValue);
return;
case -772274742: // portfolioMaster
((WebsiteBasicsComponentFactory) bean).setPortfolioMaster((PortfolioMaster) newValue);
return;
case -252634564: // batchMaster
((WebsiteBasicsComponentFactory) bean).setBatchMaster((BatchMaster) newValue);
return;
case 173967376: // historicalTimeSeriesMaster
((WebsiteBasicsComponentFactory) bean).setHistoricalTimeSeriesMaster((HistoricalTimeSeriesMaster) newValue);
return;
case 358729161: // historicalTimeSeriesSource
((WebsiteBasicsComponentFactory) bean).setHistoricalTimeSeriesSource((HistoricalTimeSeriesSource) newValue);
return;
case 157715905: // historicalTimeSeriesLoader
((WebsiteBasicsComponentFactory) bean).setHistoricalTimeSeriesLoader((HistoricalTimeSeriesLoader) newValue);
return;
case -160710469: // scheduler
((WebsiteBasicsComponentFactory) bean).setScheduler((ScheduledExecutorService) newValue);
return;
case -2094577304: // targetTypes
((WebsiteBasicsComponentFactory) bean).setTargetTypes((ComputationTargetTypeProvider) newValue);
return;
case -1158737547: // organizationMaster
((WebsiteBasicsComponentFactory) bean).setOrganizationMaster((OrganizationMaster) newValue);
return;
case 2090650860: // marketDataSnapshotMaster
((WebsiteBasicsComponentFactory) bean).setMarketDataSnapshotMaster((MarketDataSnapshotMaster) newValue);
return;
case -301472921: // liveMarketDataProviderFactory
((WebsiteBasicsComponentFactory) bean).setLiveMarketDataProviderFactory((LiveMarketDataProviderFactory) newValue);
return;
case 1743800263: // marketDataSpecificationRepository
((WebsiteBasicsComponentFactory) bean).setMarketDataSpecificationRepository((NamedMarketDataSpecificationRepository) newValue);
return;
case -1697555603: // viewProcessor
((WebsiteBasicsComponentFactory) bean).setViewProcessor((ViewProcessor) newValue);
return;
case 1562222174: // computationTargetResolver
((WebsiteBasicsComponentFactory) bean).setComputationTargetResolver((ComputationTargetResolver) newValue);
return;
}
super.propertySet(bean, propertyName, newValue, quiet);
}
@Override
protected void validate(Bean bean) {
JodaBeanUtils.notNull(((WebsiteBasicsComponentFactory) bean)._configMaster, "configMaster");
JodaBeanUtils.notNull(((WebsiteBasicsComponentFactory) bean)._exchangeMaster, "exchangeMaster");
JodaBeanUtils.notNull(((WebsiteBasicsComponentFactory) bean)._holidayMaster, "holidayMaster");
JodaBeanUtils.notNull(((WebsiteBasicsComponentFactory) bean)._regionMaster, "regionMaster");
JodaBeanUtils.notNull(((WebsiteBasicsComponentFactory) bean)._securityMaster, "securityMaster");
JodaBeanUtils.notNull(((WebsiteBasicsComponentFactory) bean)._securitySource, "securitySource");
JodaBeanUtils.notNull(((WebsiteBasicsComponentFactory) bean)._securityLoader, "securityLoader");
JodaBeanUtils.notNull(((WebsiteBasicsComponentFactory) bean)._positionMaster, "positionMaster");
JodaBeanUtils.notNull(((WebsiteBasicsComponentFactory) bean)._portfolioMaster, "portfolioMaster");
JodaBeanUtils.notNull(((WebsiteBasicsComponentFactory) bean)._batchMaster, "batchMaster");
JodaBeanUtils.notNull(((WebsiteBasicsComponentFactory) bean)._historicalTimeSeriesMaster, "historicalTimeSeriesMaster");
JodaBeanUtils.notNull(((WebsiteBasicsComponentFactory) bean)._historicalTimeSeriesSource, "historicalTimeSeriesSource");
JodaBeanUtils.notNull(((WebsiteBasicsComponentFactory) bean)._historicalTimeSeriesLoader, "historicalTimeSeriesLoader");
JodaBeanUtils.notNull(((WebsiteBasicsComponentFactory) bean)._scheduler, "scheduler");
JodaBeanUtils.notNull(((WebsiteBasicsComponentFactory) bean)._targetTypes, "targetTypes");
JodaBeanUtils.notNull(((WebsiteBasicsComponentFactory) bean)._organizationMaster, "organizationMaster");
JodaBeanUtils.notNull(((WebsiteBasicsComponentFactory) bean)._marketDataSnapshotMaster, "marketDataSnapshotMaster");
JodaBeanUtils.notNull(((WebsiteBasicsComponentFactory) bean)._viewProcessor, "viewProcessor");
JodaBeanUtils.notNull(((WebsiteBasicsComponentFactory) bean)._computationTargetResolver, "computationTargetResolver");
super.validate(bean);
}
}
///CLOVER:ON
//-------------------------- AUTOGENERATED END --------------------------
}
| projects/OG-Component/src/main/java/com/opengamma/component/factory/web/WebsiteBasicsComponentFactory.java | /**
* Copyright (C) 2009 - present by OpenGamma Inc. and the OpenGamma group of companies
*
* Please see distribution for license.
*/
package com.opengamma.component.factory.web;
import java.util.LinkedHashMap;
import java.util.Map;
import java.util.concurrent.ScheduledExecutorService;
import org.joda.beans.Bean;
import org.joda.beans.BeanBuilder;
import org.joda.beans.BeanDefinition;
import org.joda.beans.JodaBeanUtils;
import org.joda.beans.MetaProperty;
import org.joda.beans.Property;
import org.joda.beans.PropertyDefinition;
import org.joda.beans.impl.direct.DirectBeanBuilder;
import org.joda.beans.impl.direct.DirectMetaProperty;
import org.joda.beans.impl.direct.DirectMetaPropertyMap;
import com.opengamma.OpenGammaRuntimeException;
import com.opengamma.batch.BatchMaster;
import com.opengamma.component.ComponentRepository;
import com.opengamma.component.factory.AbstractComponentFactory;
import com.opengamma.component.rest.JerseyRestResourceFactory;
import com.opengamma.core.historicaltimeseries.HistoricalTimeSeriesSource;
import com.opengamma.core.security.SecuritySource;
import com.opengamma.engine.ComputationTargetResolver;
import com.opengamma.engine.marketdata.NamedMarketDataSpecificationRepository;
import com.opengamma.engine.marketdata.live.LiveMarketDataProviderFactory;
import com.opengamma.engine.target.ComputationTargetTypeProvider;
import com.opengamma.engine.target.DefaultComputationTargetTypeProvider;
import com.opengamma.engine.view.ViewProcessor;
import com.opengamma.master.config.ConfigMaster;
import com.opengamma.master.config.impl.MasterConfigSource;
import com.opengamma.master.exchange.ExchangeMaster;
import com.opengamma.master.historicaltimeseries.HistoricalTimeSeriesLoader;
import com.opengamma.master.historicaltimeseries.HistoricalTimeSeriesMaster;
import com.opengamma.master.holiday.HolidayMaster;
import com.opengamma.master.marketdatasnapshot.MarketDataSnapshotMaster;
import com.opengamma.master.orgs.OrganizationMaster;
import com.opengamma.master.portfolio.PortfolioMaster;
import com.opengamma.master.position.PositionMaster;
import com.opengamma.master.region.RegionMaster;
import com.opengamma.master.security.SecurityLoader;
import com.opengamma.master.security.SecurityMaster;
import com.opengamma.web.WebAboutResource;
import com.opengamma.web.WebHomeResource;
import com.opengamma.web.analytics.rest.LiveMarketDataProviderNamesResource;
import com.opengamma.web.analytics.rest.LiveMarketDataSpecificationNamesResource;
import com.opengamma.web.config.WebConfigsResource;
import com.opengamma.web.exchange.WebExchangesResource;
import com.opengamma.web.historicaltimeseries.WebAllHistoricalTimeSeriesResource;
import com.opengamma.web.holiday.WebHolidaysResource;
import com.opengamma.web.marketdatasnapshot.WebMarketDataSnapshotsResource;
import com.opengamma.web.orgs.WebOrganizationsResource;
import com.opengamma.web.portfolio.WebPortfoliosResource;
import com.opengamma.web.position.WebPositionsResource;
import com.opengamma.web.region.WebRegionsResource;
import com.opengamma.web.security.WebSecuritiesResource;
import com.opengamma.web.target.WebComputationTargetTypeResource;
import com.opengamma.web.valuerequirementname.WebValueRequirementNamesResource;
/**
* Component factory for the main website.
*/
@BeanDefinition
public class WebsiteBasicsComponentFactory extends AbstractComponentFactory {
/**
* The config master.
*/
@PropertyDefinition(validate = "notNull")
private ConfigMaster _configMaster;
/**
* The exchange master.
*/
@PropertyDefinition(validate = "notNull")
private ExchangeMaster _exchangeMaster;
/**
* The holiday master.
*/
@PropertyDefinition(validate = "notNull")
private HolidayMaster _holidayMaster;
/**
* The underlying master.
*/
@PropertyDefinition(validate = "notNull")
private RegionMaster _regionMaster;
/**
* The security master.
*/
@PropertyDefinition(validate = "notNull")
private SecurityMaster _securityMaster;
/**
* The security source.
*/
@PropertyDefinition(validate = "notNull")
private SecuritySource _securitySource;
/**
* The security loader.
*/
@PropertyDefinition(validate = "notNull")
private SecurityLoader _securityLoader;
/**
* The position master.
*/
@PropertyDefinition(validate = "notNull")
private PositionMaster _positionMaster;
/**
* The portfolio master.
*/
@PropertyDefinition(validate = "notNull")
private PortfolioMaster _portfolioMaster;
/**
* The batch master.
*/
@PropertyDefinition(validate = "notNull")
private BatchMaster _batchMaster;
/**
* The time-series master.
*/
@PropertyDefinition(validate = "notNull")
private HistoricalTimeSeriesMaster _historicalTimeSeriesMaster;
/**
* The time-series source.
*/
@PropertyDefinition(validate = "notNull")
private HistoricalTimeSeriesSource _historicalTimeSeriesSource;
/**
* The time-series loader.
*/
@PropertyDefinition(validate = "notNull")
private HistoricalTimeSeriesLoader _historicalTimeSeriesLoader;
/**
* The scheduler.
*/
@PropertyDefinition(validate = "notNull")
private ScheduledExecutorService _scheduler;
/**
* The available computation target types.
*/
@PropertyDefinition(validate = "notNull")
private ComputationTargetTypeProvider _targetTypes = new DefaultComputationTargetTypeProvider();
/**
* The organization master.
*/
@PropertyDefinition(validate = "notNull")
private OrganizationMaster _organizationMaster;
/**
* The market data snapshot master.
*/
@PropertyDefinition(validate = "notNull")
private MarketDataSnapshotMaster _marketDataSnapshotMaster;
/**
* For obtaining the live market data provider names. Either this or marketDataSpecificationRepository must be set.
*/
@PropertyDefinition
private LiveMarketDataProviderFactory _liveMarketDataProviderFactory;
/**
* For looking up market data provider specifications by name. Either this or liveMarketDataProviderFactory must be set.
*
* @deprecated use liveMarketDataProviderFactory
*/
@PropertyDefinition
@Deprecated
private NamedMarketDataSpecificationRepository _marketDataSpecificationRepository;
/**
* The view processor.
*/
@PropertyDefinition(validate = "notNull")
private ViewProcessor _viewProcessor;
/**
* The computation target resolver.
*/
@PropertyDefinition(validate = "notNull")
private ComputationTargetResolver _computationTargetResolver;
//-------------------------------------------------------------------------
@Override
public void init(ComponentRepository repo, LinkedHashMap<String, String> configuration) {
initBasics(repo);
initMasters(repo);
initValueRequirementNames(repo, configuration);
}
protected void initBasics(ComponentRepository repo) {
repo.getRestComponents().publishResource(new WebHomeResource());
repo.getRestComponents().publishResource(new WebAboutResource());
}
protected void initMasters(ComponentRepository repo) {
if (getLiveMarketDataProviderFactory() == null && getMarketDataSpecificationRepository() == null) {
throw new OpenGammaRuntimeException("Neither " + marketDataSpecificationRepository().name() + " nor " + liveMarketDataProviderFactory().name() + " were specified");
}
JerseyRestResourceFactory resource;
resource = new JerseyRestResourceFactory(WebConfigsResource.class, getConfigMaster());
repo.getRestComponents().publishResource(resource);
resource = new JerseyRestResourceFactory(WebExchangesResource.class, getExchangeMaster());
repo.getRestComponents().publishResource(resource);
resource = new JerseyRestResourceFactory(WebHolidaysResource.class, getHolidayMaster());
repo.getRestComponents().publishResource(resource);
resource = new JerseyRestResourceFactory(WebRegionsResource.class, getRegionMaster());
repo.getRestComponents().publishResource(resource);
resource = new JerseyRestResourceFactory(WebSecuritiesResource.class, getSecurityMaster(), getSecurityLoader(), getHistoricalTimeSeriesMaster(), getOrganizationMaster());
repo.getRestComponents().publishResource(resource);
resource = new JerseyRestResourceFactory(WebPositionsResource.class, getPositionMaster(), getSecurityLoader(), getSecuritySource(), getHistoricalTimeSeriesSource());
repo.getRestComponents().publishResource(resource);
resource = new JerseyRestResourceFactory(WebPortfoliosResource.class, getPortfolioMaster(), getPositionMaster(), getSecuritySource(), getScheduler());
repo.getRestComponents().publishResource(resource);
final MasterConfigSource configSource = new MasterConfigSource(getConfigMaster());
resource = new JerseyRestResourceFactory(WebAllHistoricalTimeSeriesResource.class, getHistoricalTimeSeriesMaster(), getHistoricalTimeSeriesLoader(), configSource);
repo.getRestComponents().publishResource(resource);
resource = new JerseyRestResourceFactory(WebComputationTargetTypeResource.class, getTargetTypes());
repo.getRestComponents().publishResource(resource);
resource = new JerseyRestResourceFactory(WebOrganizationsResource.class, getOrganizationMaster());
repo.getRestComponents().publishResource(resource);
resource = new JerseyRestResourceFactory(WebMarketDataSnapshotsResource.class,
getMarketDataSnapshotMaster(), getConfigMaster(), getLiveMarketDataProviderFactory(), getMarketDataSpecificationRepository(),
configSource, getComputationTargetResolver(), getViewProcessor(), getHistoricalTimeSeriesSource());
repo.getRestComponents().publishResource(resource);
}
protected void initValueRequirementNames(ComponentRepository repo, LinkedHashMap<String, String> configuration) {
String valueRequirementNameClasses = configuration.get(WebValueRequirementNamesResource.VALUE_REQUIREMENT_NAME_CLASSES);
configuration.remove(WebValueRequirementNamesResource.VALUE_REQUIREMENT_NAME_CLASSES);
if (valueRequirementNameClasses == null) {
repo.getRestComponents().publishResource(new WebValueRequirementNamesResource());
} else if (valueRequirementNameClasses.contains(",")) {
repo.getRestComponents().publishResource(
new WebValueRequirementNamesResource(valueRequirementNameClasses.split(",")));
} else {
repo.getRestComponents().publishResource(new WebValueRequirementNamesResource(new String[] {valueRequirementNameClasses}));
}
}
//------------------------- AUTOGENERATED START -------------------------
///CLOVER:OFF
/**
* The meta-bean for {@code WebsiteBasicsComponentFactory}.
* @return the meta-bean, not null
*/
public static WebsiteBasicsComponentFactory.Meta meta() {
return WebsiteBasicsComponentFactory.Meta.INSTANCE;
}
static {
JodaBeanUtils.registerMetaBean(WebsiteBasicsComponentFactory.Meta.INSTANCE);
}
@Override
public WebsiteBasicsComponentFactory.Meta metaBean() {
return WebsiteBasicsComponentFactory.Meta.INSTANCE;
}
//-----------------------------------------------------------------------
/**
* Gets the config master.
* @return the value of the property, not null
*/
public ConfigMaster getConfigMaster() {
return _configMaster;
}
/**
* Sets the config master.
* @param configMaster the new value of the property, not null
*/
public void setConfigMaster(ConfigMaster configMaster) {
JodaBeanUtils.notNull(configMaster, "configMaster");
this._configMaster = configMaster;
}
/**
* Gets the the {@code configMaster} property.
* @return the property, not null
*/
public final Property<ConfigMaster> configMaster() {
return metaBean().configMaster().createProperty(this);
}
//-----------------------------------------------------------------------
/**
* Gets the exchange master.
* @return the value of the property, not null
*/
public ExchangeMaster getExchangeMaster() {
return _exchangeMaster;
}
/**
* Sets the exchange master.
* @param exchangeMaster the new value of the property, not null
*/
public void setExchangeMaster(ExchangeMaster exchangeMaster) {
JodaBeanUtils.notNull(exchangeMaster, "exchangeMaster");
this._exchangeMaster = exchangeMaster;
}
/**
* Gets the the {@code exchangeMaster} property.
* @return the property, not null
*/
public final Property<ExchangeMaster> exchangeMaster() {
return metaBean().exchangeMaster().createProperty(this);
}
//-----------------------------------------------------------------------
/**
* Gets the holiday master.
* @return the value of the property, not null
*/
public HolidayMaster getHolidayMaster() {
return _holidayMaster;
}
/**
* Sets the holiday master.
* @param holidayMaster the new value of the property, not null
*/
public void setHolidayMaster(HolidayMaster holidayMaster) {
JodaBeanUtils.notNull(holidayMaster, "holidayMaster");
this._holidayMaster = holidayMaster;
}
/**
* Gets the the {@code holidayMaster} property.
* @return the property, not null
*/
public final Property<HolidayMaster> holidayMaster() {
return metaBean().holidayMaster().createProperty(this);
}
//-----------------------------------------------------------------------
/**
* Gets the underlying master.
* @return the value of the property, not null
*/
public RegionMaster getRegionMaster() {
return _regionMaster;
}
/**
* Sets the underlying master.
* @param regionMaster the new value of the property, not null
*/
public void setRegionMaster(RegionMaster regionMaster) {
JodaBeanUtils.notNull(regionMaster, "regionMaster");
this._regionMaster = regionMaster;
}
/**
* Gets the the {@code regionMaster} property.
* @return the property, not null
*/
public final Property<RegionMaster> regionMaster() {
return metaBean().regionMaster().createProperty(this);
}
//-----------------------------------------------------------------------
/**
* Gets the security master.
* @return the value of the property, not null
*/
public SecurityMaster getSecurityMaster() {
return _securityMaster;
}
/**
* Sets the security master.
* @param securityMaster the new value of the property, not null
*/
public void setSecurityMaster(SecurityMaster securityMaster) {
JodaBeanUtils.notNull(securityMaster, "securityMaster");
this._securityMaster = securityMaster;
}
/**
* Gets the the {@code securityMaster} property.
* @return the property, not null
*/
public final Property<SecurityMaster> securityMaster() {
return metaBean().securityMaster().createProperty(this);
}
//-----------------------------------------------------------------------
/**
* Gets the security source.
* @return the value of the property, not null
*/
public SecuritySource getSecuritySource() {
return _securitySource;
}
/**
* Sets the security source.
* @param securitySource the new value of the property, not null
*/
public void setSecuritySource(SecuritySource securitySource) {
JodaBeanUtils.notNull(securitySource, "securitySource");
this._securitySource = securitySource;
}
/**
* Gets the the {@code securitySource} property.
* @return the property, not null
*/
public final Property<SecuritySource> securitySource() {
return metaBean().securitySource().createProperty(this);
}
//-----------------------------------------------------------------------
/**
* Gets the security loader.
* @return the value of the property, not null
*/
public SecurityLoader getSecurityLoader() {
return _securityLoader;
}
/**
* Sets the security loader.
* @param securityLoader the new value of the property, not null
*/
public void setSecurityLoader(SecurityLoader securityLoader) {
JodaBeanUtils.notNull(securityLoader, "securityLoader");
this._securityLoader = securityLoader;
}
/**
* Gets the the {@code securityLoader} property.
* @return the property, not null
*/
public final Property<SecurityLoader> securityLoader() {
return metaBean().securityLoader().createProperty(this);
}
//-----------------------------------------------------------------------
/**
* Gets the position master.
* @return the value of the property, not null
*/
public PositionMaster getPositionMaster() {
return _positionMaster;
}
/**
* Sets the position master.
* @param positionMaster the new value of the property, not null
*/
public void setPositionMaster(PositionMaster positionMaster) {
JodaBeanUtils.notNull(positionMaster, "positionMaster");
this._positionMaster = positionMaster;
}
/**
* Gets the the {@code positionMaster} property.
* @return the property, not null
*/
public final Property<PositionMaster> positionMaster() {
return metaBean().positionMaster().createProperty(this);
}
//-----------------------------------------------------------------------
/**
* Gets the portfolio master.
* @return the value of the property, not null
*/
public PortfolioMaster getPortfolioMaster() {
return _portfolioMaster;
}
/**
* Sets the portfolio master.
* @param portfolioMaster the new value of the property, not null
*/
public void setPortfolioMaster(PortfolioMaster portfolioMaster) {
JodaBeanUtils.notNull(portfolioMaster, "portfolioMaster");
this._portfolioMaster = portfolioMaster;
}
/**
* Gets the the {@code portfolioMaster} property.
* @return the property, not null
*/
public final Property<PortfolioMaster> portfolioMaster() {
return metaBean().portfolioMaster().createProperty(this);
}
//-----------------------------------------------------------------------
/**
* Gets the batch master.
* @return the value of the property, not null
*/
public BatchMaster getBatchMaster() {
return _batchMaster;
}
/**
* Sets the batch master.
* @param batchMaster the new value of the property, not null
*/
public void setBatchMaster(BatchMaster batchMaster) {
JodaBeanUtils.notNull(batchMaster, "batchMaster");
this._batchMaster = batchMaster;
}
/**
* Gets the the {@code batchMaster} property.
* @return the property, not null
*/
public final Property<BatchMaster> batchMaster() {
return metaBean().batchMaster().createProperty(this);
}
//-----------------------------------------------------------------------
/**
* Gets the time-series master.
* @return the value of the property, not null
*/
public HistoricalTimeSeriesMaster getHistoricalTimeSeriesMaster() {
return _historicalTimeSeriesMaster;
}
/**
* Sets the time-series master.
* @param historicalTimeSeriesMaster the new value of the property, not null
*/
public void setHistoricalTimeSeriesMaster(HistoricalTimeSeriesMaster historicalTimeSeriesMaster) {
JodaBeanUtils.notNull(historicalTimeSeriesMaster, "historicalTimeSeriesMaster");
this._historicalTimeSeriesMaster = historicalTimeSeriesMaster;
}
/**
* Gets the the {@code historicalTimeSeriesMaster} property.
* @return the property, not null
*/
public final Property<HistoricalTimeSeriesMaster> historicalTimeSeriesMaster() {
return metaBean().historicalTimeSeriesMaster().createProperty(this);
}
//-----------------------------------------------------------------------
/**
* Gets the time-series source.
* @return the value of the property, not null
*/
public HistoricalTimeSeriesSource getHistoricalTimeSeriesSource() {
return _historicalTimeSeriesSource;
}
/**
* Sets the time-series source.
* @param historicalTimeSeriesSource the new value of the property, not null
*/
public void setHistoricalTimeSeriesSource(HistoricalTimeSeriesSource historicalTimeSeriesSource) {
JodaBeanUtils.notNull(historicalTimeSeriesSource, "historicalTimeSeriesSource");
this._historicalTimeSeriesSource = historicalTimeSeriesSource;
}
/**
* Gets the the {@code historicalTimeSeriesSource} property.
* @return the property, not null
*/
public final Property<HistoricalTimeSeriesSource> historicalTimeSeriesSource() {
return metaBean().historicalTimeSeriesSource().createProperty(this);
}
//-----------------------------------------------------------------------
/**
* Gets the time-series loader.
* @return the value of the property, not null
*/
public HistoricalTimeSeriesLoader getHistoricalTimeSeriesLoader() {
return _historicalTimeSeriesLoader;
}
/**
* Sets the time-series loader.
* @param historicalTimeSeriesLoader the new value of the property, not null
*/
public void setHistoricalTimeSeriesLoader(HistoricalTimeSeriesLoader historicalTimeSeriesLoader) {
JodaBeanUtils.notNull(historicalTimeSeriesLoader, "historicalTimeSeriesLoader");
this._historicalTimeSeriesLoader = historicalTimeSeriesLoader;
}
/**
* Gets the the {@code historicalTimeSeriesLoader} property.
* @return the property, not null
*/
public final Property<HistoricalTimeSeriesLoader> historicalTimeSeriesLoader() {
return metaBean().historicalTimeSeriesLoader().createProperty(this);
}
//-----------------------------------------------------------------------
/**
* Gets the scheduler.
* @return the value of the property, not null
*/
public ScheduledExecutorService getScheduler() {
return _scheduler;
}
/**
* Sets the scheduler.
* @param scheduler the new value of the property, not null
*/
public void setScheduler(ScheduledExecutorService scheduler) {
JodaBeanUtils.notNull(scheduler, "scheduler");
this._scheduler = scheduler;
}
/**
* Gets the the {@code scheduler} property.
* @return the property, not null
*/
public final Property<ScheduledExecutorService> scheduler() {
return metaBean().scheduler().createProperty(this);
}
//-----------------------------------------------------------------------
/**
* Gets the available computation target types.
* @return the value of the property, not null
*/
public ComputationTargetTypeProvider getTargetTypes() {
return _targetTypes;
}
/**
* Sets the available computation target types.
* @param targetTypes the new value of the property, not null
*/
public void setTargetTypes(ComputationTargetTypeProvider targetTypes) {
JodaBeanUtils.notNull(targetTypes, "targetTypes");
this._targetTypes = targetTypes;
}
/**
* Gets the the {@code targetTypes} property.
* @return the property, not null
*/
public final Property<ComputationTargetTypeProvider> targetTypes() {
return metaBean().targetTypes().createProperty(this);
}
//-----------------------------------------------------------------------
/**
* Gets the organization master.
* @return the value of the property, not null
*/
public OrganizationMaster getOrganizationMaster() {
return _organizationMaster;
}
/**
* Sets the organization master.
* @param organizationMaster the new value of the property, not null
*/
public void setOrganizationMaster(OrganizationMaster organizationMaster) {
JodaBeanUtils.notNull(organizationMaster, "organizationMaster");
this._organizationMaster = organizationMaster;
}
/**
* Gets the the {@code organizationMaster} property.
* @return the property, not null
*/
public final Property<OrganizationMaster> organizationMaster() {
return metaBean().organizationMaster().createProperty(this);
}
//-----------------------------------------------------------------------
/**
* Gets the market data snapshot master.
* @return the value of the property, not null
*/
public MarketDataSnapshotMaster getMarketDataSnapshotMaster() {
return _marketDataSnapshotMaster;
}
/**
* Sets the market data snapshot master.
* @param marketDataSnapshotMaster the new value of the property, not null
*/
public void setMarketDataSnapshotMaster(MarketDataSnapshotMaster marketDataSnapshotMaster) {
JodaBeanUtils.notNull(marketDataSnapshotMaster, "marketDataSnapshotMaster");
this._marketDataSnapshotMaster = marketDataSnapshotMaster;
}
/**
* Gets the the {@code marketDataSnapshotMaster} property.
* @return the property, not null
*/
public final Property<MarketDataSnapshotMaster> marketDataSnapshotMaster() {
return metaBean().marketDataSnapshotMaster().createProperty(this);
}
//-----------------------------------------------------------------------
/**
* Gets for obtaining the live market data provider names. Either this or marketDataSpecificationRepository must be set.
* @return the value of the property
*/
public LiveMarketDataProviderFactory getLiveMarketDataProviderFactory() {
return _liveMarketDataProviderFactory;
}
/**
* Sets for obtaining the live market data provider names. Either this or marketDataSpecificationRepository must be set.
* @param liveMarketDataProviderFactory the new value of the property
*/
public void setLiveMarketDataProviderFactory(LiveMarketDataProviderFactory liveMarketDataProviderFactory) {
this._liveMarketDataProviderFactory = liveMarketDataProviderFactory;
}
/**
* Gets the the {@code liveMarketDataProviderFactory} property.
* @return the property, not null
*/
public final Property<LiveMarketDataProviderFactory> liveMarketDataProviderFactory() {
return metaBean().liveMarketDataProviderFactory().createProperty(this);
}
//-----------------------------------------------------------------------
/**
* Gets for looking up market data provider specifications by name. Either this or liveMarketDataProviderFactory must be set.
*
* @deprecated use liveMarketDataProviderFactory
* @return the value of the property, not null
*/
@Deprecated
public NamedMarketDataSpecificationRepository getMarketDataSpecificationRepository() {
return _marketDataSpecificationRepository;
}
/**
* Sets for looking up market data provider specifications by name. Either this or liveMarketDataProviderFactory must be set.
*
* @deprecated use liveMarketDataProviderFactory
* @param marketDataSpecificationRepository the new value of the property, not null
*/
@Deprecated
public void setMarketDataSpecificationRepository(NamedMarketDataSpecificationRepository marketDataSpecificationRepository) {
JodaBeanUtils.notNull(marketDataSpecificationRepository, "marketDataSpecificationRepository");
this._marketDataSpecificationRepository = marketDataSpecificationRepository;
}
/**
* Gets the the {@code marketDataSpecificationRepository} property.
*
* @deprecated use liveMarketDataProviderFactory
* @return the property, not null
*/
@Deprecated
public final Property<NamedMarketDataSpecificationRepository> marketDataSpecificationRepository() {
return metaBean().marketDataSpecificationRepository().createProperty(this);
}
//-----------------------------------------------------------------------
/**
* Gets the view processor.
* @return the value of the property, not null
*/
public ViewProcessor getViewProcessor() {
return _viewProcessor;
}
/**
* Sets the view processor.
* @param viewProcessor the new value of the property, not null
*/
public void setViewProcessor(ViewProcessor viewProcessor) {
JodaBeanUtils.notNull(viewProcessor, "viewProcessor");
this._viewProcessor = viewProcessor;
}
/**
* Gets the the {@code viewProcessor} property.
* @return the property, not null
*/
public final Property<ViewProcessor> viewProcessor() {
return metaBean().viewProcessor().createProperty(this);
}
//-----------------------------------------------------------------------
/**
* Gets the computation target resolver.
* @return the value of the property, not null
*/
public ComputationTargetResolver getComputationTargetResolver() {
return _computationTargetResolver;
}
/**
* Sets the computation target resolver.
* @param computationTargetResolver the new value of the property, not null
*/
public void setComputationTargetResolver(ComputationTargetResolver computationTargetResolver) {
JodaBeanUtils.notNull(computationTargetResolver, "computationTargetResolver");
this._computationTargetResolver = computationTargetResolver;
}
/**
* Gets the the {@code computationTargetResolver} property.
* @return the property, not null
*/
public final Property<ComputationTargetResolver> computationTargetResolver() {
return metaBean().computationTargetResolver().createProperty(this);
}
//-----------------------------------------------------------------------
@Override
public WebsiteBasicsComponentFactory clone() {
return (WebsiteBasicsComponentFactory) super.clone();
}
@Override
public boolean equals(Object obj) {
if (obj == this) {
return true;
}
if (obj != null && obj.getClass() == this.getClass()) {
WebsiteBasicsComponentFactory other = (WebsiteBasicsComponentFactory) obj;
return JodaBeanUtils.equal(getConfigMaster(), other.getConfigMaster()) &&
JodaBeanUtils.equal(getExchangeMaster(), other.getExchangeMaster()) &&
JodaBeanUtils.equal(getHolidayMaster(), other.getHolidayMaster()) &&
JodaBeanUtils.equal(getRegionMaster(), other.getRegionMaster()) &&
JodaBeanUtils.equal(getSecurityMaster(), other.getSecurityMaster()) &&
JodaBeanUtils.equal(getSecuritySource(), other.getSecuritySource()) &&
JodaBeanUtils.equal(getSecurityLoader(), other.getSecurityLoader()) &&
JodaBeanUtils.equal(getPositionMaster(), other.getPositionMaster()) &&
JodaBeanUtils.equal(getPortfolioMaster(), other.getPortfolioMaster()) &&
JodaBeanUtils.equal(getBatchMaster(), other.getBatchMaster()) &&
JodaBeanUtils.equal(getHistoricalTimeSeriesMaster(), other.getHistoricalTimeSeriesMaster()) &&
JodaBeanUtils.equal(getHistoricalTimeSeriesSource(), other.getHistoricalTimeSeriesSource()) &&
JodaBeanUtils.equal(getHistoricalTimeSeriesLoader(), other.getHistoricalTimeSeriesLoader()) &&
JodaBeanUtils.equal(getScheduler(), other.getScheduler()) &&
JodaBeanUtils.equal(getTargetTypes(), other.getTargetTypes()) &&
JodaBeanUtils.equal(getOrganizationMaster(), other.getOrganizationMaster()) &&
JodaBeanUtils.equal(getMarketDataSnapshotMaster(), other.getMarketDataSnapshotMaster()) &&
JodaBeanUtils.equal(getLiveMarketDataProviderFactory(), other.getLiveMarketDataProviderFactory()) &&
JodaBeanUtils.equal(getMarketDataSpecificationRepository(), other.getMarketDataSpecificationRepository()) &&
JodaBeanUtils.equal(getViewProcessor(), other.getViewProcessor()) &&
JodaBeanUtils.equal(getComputationTargetResolver(), other.getComputationTargetResolver()) &&
super.equals(obj);
}
return false;
}
@Override
public int hashCode() {
int hash = 7;
hash += hash * 31 + JodaBeanUtils.hashCode(getConfigMaster());
hash += hash * 31 + JodaBeanUtils.hashCode(getExchangeMaster());
hash += hash * 31 + JodaBeanUtils.hashCode(getHolidayMaster());
hash += hash * 31 + JodaBeanUtils.hashCode(getRegionMaster());
hash += hash * 31 + JodaBeanUtils.hashCode(getSecurityMaster());
hash += hash * 31 + JodaBeanUtils.hashCode(getSecuritySource());
hash += hash * 31 + JodaBeanUtils.hashCode(getSecurityLoader());
hash += hash * 31 + JodaBeanUtils.hashCode(getPositionMaster());
hash += hash * 31 + JodaBeanUtils.hashCode(getPortfolioMaster());
hash += hash * 31 + JodaBeanUtils.hashCode(getBatchMaster());
hash += hash * 31 + JodaBeanUtils.hashCode(getHistoricalTimeSeriesMaster());
hash += hash * 31 + JodaBeanUtils.hashCode(getHistoricalTimeSeriesSource());
hash += hash * 31 + JodaBeanUtils.hashCode(getHistoricalTimeSeriesLoader());
hash += hash * 31 + JodaBeanUtils.hashCode(getScheduler());
hash += hash * 31 + JodaBeanUtils.hashCode(getTargetTypes());
hash += hash * 31 + JodaBeanUtils.hashCode(getOrganizationMaster());
hash += hash * 31 + JodaBeanUtils.hashCode(getMarketDataSnapshotMaster());
hash += hash * 31 + JodaBeanUtils.hashCode(getLiveMarketDataProviderFactory());
hash += hash * 31 + JodaBeanUtils.hashCode(getMarketDataSpecificationRepository());
hash += hash * 31 + JodaBeanUtils.hashCode(getViewProcessor());
hash += hash * 31 + JodaBeanUtils.hashCode(getComputationTargetResolver());
return hash ^ super.hashCode();
}
@Override
public String toString() {
StringBuilder buf = new StringBuilder(704);
buf.append("WebsiteBasicsComponentFactory{");
int len = buf.length();
toString(buf);
if (buf.length() > len) {
buf.setLength(buf.length() - 2);
}
buf.append('}');
return buf.toString();
}
@Override
protected void toString(StringBuilder buf) {
super.toString(buf);
buf.append("configMaster").append('=').append(getConfigMaster()).append(',').append(' ');
buf.append("exchangeMaster").append('=').append(getExchangeMaster()).append(',').append(' ');
buf.append("holidayMaster").append('=').append(getHolidayMaster()).append(',').append(' ');
buf.append("regionMaster").append('=').append(getRegionMaster()).append(',').append(' ');
buf.append("securityMaster").append('=').append(getSecurityMaster()).append(',').append(' ');
buf.append("securitySource").append('=').append(getSecuritySource()).append(',').append(' ');
buf.append("securityLoader").append('=').append(getSecurityLoader()).append(',').append(' ');
buf.append("positionMaster").append('=').append(getPositionMaster()).append(',').append(' ');
buf.append("portfolioMaster").append('=').append(getPortfolioMaster()).append(',').append(' ');
buf.append("batchMaster").append('=').append(getBatchMaster()).append(',').append(' ');
buf.append("historicalTimeSeriesMaster").append('=').append(getHistoricalTimeSeriesMaster()).append(',').append(' ');
buf.append("historicalTimeSeriesSource").append('=').append(getHistoricalTimeSeriesSource()).append(',').append(' ');
buf.append("historicalTimeSeriesLoader").append('=').append(getHistoricalTimeSeriesLoader()).append(',').append(' ');
buf.append("scheduler").append('=').append(getScheduler()).append(',').append(' ');
buf.append("targetTypes").append('=').append(getTargetTypes()).append(',').append(' ');
buf.append("organizationMaster").append('=').append(getOrganizationMaster()).append(',').append(' ');
buf.append("marketDataSnapshotMaster").append('=').append(getMarketDataSnapshotMaster()).append(',').append(' ');
buf.append("liveMarketDataProviderFactory").append('=').append(getLiveMarketDataProviderFactory()).append(',').append(' ');
buf.append("marketDataSpecificationRepository").append('=').append(getMarketDataSpecificationRepository()).append(',').append(' ');
buf.append("viewProcessor").append('=').append(getViewProcessor()).append(',').append(' ');
buf.append("computationTargetResolver").append('=').append(getComputationTargetResolver()).append(',').append(' ');
}
//-----------------------------------------------------------------------
/**
* The meta-bean for {@code WebsiteBasicsComponentFactory}.
*/
public static class Meta extends AbstractComponentFactory.Meta {
/**
* The singleton instance of the meta-bean.
*/
static final Meta INSTANCE = new Meta();
/**
* The meta-property for the {@code configMaster} property.
*/
private final MetaProperty<ConfigMaster> _configMaster = DirectMetaProperty.ofReadWrite(
this, "configMaster", WebsiteBasicsComponentFactory.class, ConfigMaster.class);
/**
* The meta-property for the {@code exchangeMaster} property.
*/
private final MetaProperty<ExchangeMaster> _exchangeMaster = DirectMetaProperty.ofReadWrite(
this, "exchangeMaster", WebsiteBasicsComponentFactory.class, ExchangeMaster.class);
/**
* The meta-property for the {@code holidayMaster} property.
*/
private final MetaProperty<HolidayMaster> _holidayMaster = DirectMetaProperty.ofReadWrite(
this, "holidayMaster", WebsiteBasicsComponentFactory.class, HolidayMaster.class);
/**
* The meta-property for the {@code regionMaster} property.
*/
private final MetaProperty<RegionMaster> _regionMaster = DirectMetaProperty.ofReadWrite(
this, "regionMaster", WebsiteBasicsComponentFactory.class, RegionMaster.class);
/**
* The meta-property for the {@code securityMaster} property.
*/
private final MetaProperty<SecurityMaster> _securityMaster = DirectMetaProperty.ofReadWrite(
this, "securityMaster", WebsiteBasicsComponentFactory.class, SecurityMaster.class);
/**
* The meta-property for the {@code securitySource} property.
*/
private final MetaProperty<SecuritySource> _securitySource = DirectMetaProperty.ofReadWrite(
this, "securitySource", WebsiteBasicsComponentFactory.class, SecuritySource.class);
/**
* The meta-property for the {@code securityLoader} property.
*/
private final MetaProperty<SecurityLoader> _securityLoader = DirectMetaProperty.ofReadWrite(
this, "securityLoader", WebsiteBasicsComponentFactory.class, SecurityLoader.class);
/**
* The meta-property for the {@code positionMaster} property.
*/
private final MetaProperty<PositionMaster> _positionMaster = DirectMetaProperty.ofReadWrite(
this, "positionMaster", WebsiteBasicsComponentFactory.class, PositionMaster.class);
/**
* The meta-property for the {@code portfolioMaster} property.
*/
private final MetaProperty<PortfolioMaster> _portfolioMaster = DirectMetaProperty.ofReadWrite(
this, "portfolioMaster", WebsiteBasicsComponentFactory.class, PortfolioMaster.class);
/**
* The meta-property for the {@code batchMaster} property.
*/
private final MetaProperty<BatchMaster> _batchMaster = DirectMetaProperty.ofReadWrite(
this, "batchMaster", WebsiteBasicsComponentFactory.class, BatchMaster.class);
/**
* The meta-property for the {@code historicalTimeSeriesMaster} property.
*/
private final MetaProperty<HistoricalTimeSeriesMaster> _historicalTimeSeriesMaster = DirectMetaProperty.ofReadWrite(
this, "historicalTimeSeriesMaster", WebsiteBasicsComponentFactory.class, HistoricalTimeSeriesMaster.class);
/**
* The meta-property for the {@code historicalTimeSeriesSource} property.
*/
private final MetaProperty<HistoricalTimeSeriesSource> _historicalTimeSeriesSource = DirectMetaProperty.ofReadWrite(
this, "historicalTimeSeriesSource", WebsiteBasicsComponentFactory.class, HistoricalTimeSeriesSource.class);
/**
* The meta-property for the {@code historicalTimeSeriesLoader} property.
*/
private final MetaProperty<HistoricalTimeSeriesLoader> _historicalTimeSeriesLoader = DirectMetaProperty.ofReadWrite(
this, "historicalTimeSeriesLoader", WebsiteBasicsComponentFactory.class, HistoricalTimeSeriesLoader.class);
/**
* The meta-property for the {@code scheduler} property.
*/
private final MetaProperty<ScheduledExecutorService> _scheduler = DirectMetaProperty.ofReadWrite(
this, "scheduler", WebsiteBasicsComponentFactory.class, ScheduledExecutorService.class);
/**
* The meta-property for the {@code targetTypes} property.
*/
private final MetaProperty<ComputationTargetTypeProvider> _targetTypes = DirectMetaProperty.ofReadWrite(
this, "targetTypes", WebsiteBasicsComponentFactory.class, ComputationTargetTypeProvider.class);
/**
* The meta-property for the {@code organizationMaster} property.
*/
private final MetaProperty<OrganizationMaster> _organizationMaster = DirectMetaProperty.ofReadWrite(
this, "organizationMaster", WebsiteBasicsComponentFactory.class, OrganizationMaster.class);
/**
* The meta-property for the {@code marketDataSnapshotMaster} property.
*/
private final MetaProperty<MarketDataSnapshotMaster> _marketDataSnapshotMaster = DirectMetaProperty.ofReadWrite(
this, "marketDataSnapshotMaster", WebsiteBasicsComponentFactory.class, MarketDataSnapshotMaster.class);
/**
* The meta-property for the {@code liveMarketDataProviderFactory} property.
*/
private final MetaProperty<LiveMarketDataProviderFactory> _liveMarketDataProviderFactory = DirectMetaProperty.ofReadWrite(
this, "liveMarketDataProviderFactory", WebsiteBasicsComponentFactory.class, LiveMarketDataProviderFactory.class);
/**
* The meta-property for the {@code marketDataSpecificationRepository} property.
*/
private final MetaProperty<NamedMarketDataSpecificationRepository> _marketDataSpecificationRepository = DirectMetaProperty.ofReadWrite(
this, "marketDataSpecificationRepository", WebsiteBasicsComponentFactory.class, NamedMarketDataSpecificationRepository.class);
/**
* The meta-property for the {@code viewProcessor} property.
*/
private final MetaProperty<ViewProcessor> _viewProcessor = DirectMetaProperty.ofReadWrite(
this, "viewProcessor", WebsiteBasicsComponentFactory.class, ViewProcessor.class);
/**
* The meta-property for the {@code computationTargetResolver} property.
*/
private final MetaProperty<ComputationTargetResolver> _computationTargetResolver = DirectMetaProperty.ofReadWrite(
this, "computationTargetResolver", WebsiteBasicsComponentFactory.class, ComputationTargetResolver.class);
/**
* The meta-properties.
*/
private final Map<String, MetaProperty<?>> _metaPropertyMap$ = new DirectMetaPropertyMap(
this, (DirectMetaPropertyMap) super.metaPropertyMap(),
"configMaster",
"exchangeMaster",
"holidayMaster",
"regionMaster",
"securityMaster",
"securitySource",
"securityLoader",
"positionMaster",
"portfolioMaster",
"batchMaster",
"historicalTimeSeriesMaster",
"historicalTimeSeriesSource",
"historicalTimeSeriesLoader",
"scheduler",
"targetTypes",
"organizationMaster",
"marketDataSnapshotMaster",
"liveMarketDataProviderFactory",
"marketDataSpecificationRepository",
"viewProcessor",
"computationTargetResolver");
/**
* Restricted constructor.
*/
protected Meta() {
}
@Override
protected MetaProperty<?> metaPropertyGet(String propertyName) {
switch (propertyName.hashCode()) {
case 10395716: // configMaster
return _configMaster;
case -652001691: // exchangeMaster
return _exchangeMaster;
case 246258906: // holidayMaster
return _holidayMaster;
case -1820969354: // regionMaster
return _regionMaster;
case -887218750: // securityMaster
return _securityMaster;
case -702456965: // securitySource
return _securitySource;
case -903470221: // securityLoader
return _securityLoader;
case -1840419605: // positionMaster
return _positionMaster;
case -772274742: // portfolioMaster
return _portfolioMaster;
case -252634564: // batchMaster
return _batchMaster;
case 173967376: // historicalTimeSeriesMaster
return _historicalTimeSeriesMaster;
case 358729161: // historicalTimeSeriesSource
return _historicalTimeSeriesSource;
case 157715905: // historicalTimeSeriesLoader
return _historicalTimeSeriesLoader;
case -160710469: // scheduler
return _scheduler;
case -2094577304: // targetTypes
return _targetTypes;
case -1158737547: // organizationMaster
return _organizationMaster;
case 2090650860: // marketDataSnapshotMaster
return _marketDataSnapshotMaster;
case -301472921: // liveMarketDataProviderFactory
return _liveMarketDataProviderFactory;
case 1743800263: // marketDataSpecificationRepository
return _marketDataSpecificationRepository;
case -1697555603: // viewProcessor
return _viewProcessor;
case 1562222174: // computationTargetResolver
return _computationTargetResolver;
}
return super.metaPropertyGet(propertyName);
}
@Override
public BeanBuilder<? extends WebsiteBasicsComponentFactory> builder() {
return new DirectBeanBuilder<WebsiteBasicsComponentFactory>(new WebsiteBasicsComponentFactory());
}
@Override
public Class<? extends WebsiteBasicsComponentFactory> beanType() {
return WebsiteBasicsComponentFactory.class;
}
@Override
public Map<String, MetaProperty<?>> metaPropertyMap() {
return _metaPropertyMap$;
}
//-----------------------------------------------------------------------
/**
* The meta-property for the {@code configMaster} property.
* @return the meta-property, not null
*/
public final MetaProperty<ConfigMaster> configMaster() {
return _configMaster;
}
/**
* The meta-property for the {@code exchangeMaster} property.
* @return the meta-property, not null
*/
public final MetaProperty<ExchangeMaster> exchangeMaster() {
return _exchangeMaster;
}
/**
* The meta-property for the {@code holidayMaster} property.
* @return the meta-property, not null
*/
public final MetaProperty<HolidayMaster> holidayMaster() {
return _holidayMaster;
}
/**
* The meta-property for the {@code regionMaster} property.
* @return the meta-property, not null
*/
public final MetaProperty<RegionMaster> regionMaster() {
return _regionMaster;
}
/**
* The meta-property for the {@code securityMaster} property.
* @return the meta-property, not null
*/
public final MetaProperty<SecurityMaster> securityMaster() {
return _securityMaster;
}
/**
* The meta-property for the {@code securitySource} property.
* @return the meta-property, not null
*/
public final MetaProperty<SecuritySource> securitySource() {
return _securitySource;
}
/**
* The meta-property for the {@code securityLoader} property.
* @return the meta-property, not null
*/
public final MetaProperty<SecurityLoader> securityLoader() {
return _securityLoader;
}
/**
* The meta-property for the {@code positionMaster} property.
* @return the meta-property, not null
*/
public final MetaProperty<PositionMaster> positionMaster() {
return _positionMaster;
}
/**
* The meta-property for the {@code portfolioMaster} property.
* @return the meta-property, not null
*/
public final MetaProperty<PortfolioMaster> portfolioMaster() {
return _portfolioMaster;
}
/**
* The meta-property for the {@code batchMaster} property.
* @return the meta-property, not null
*/
public final MetaProperty<BatchMaster> batchMaster() {
return _batchMaster;
}
/**
* The meta-property for the {@code historicalTimeSeriesMaster} property.
* @return the meta-property, not null
*/
public final MetaProperty<HistoricalTimeSeriesMaster> historicalTimeSeriesMaster() {
return _historicalTimeSeriesMaster;
}
/**
* The meta-property for the {@code historicalTimeSeriesSource} property.
* @return the meta-property, not null
*/
public final MetaProperty<HistoricalTimeSeriesSource> historicalTimeSeriesSource() {
return _historicalTimeSeriesSource;
}
/**
* The meta-property for the {@code historicalTimeSeriesLoader} property.
* @return the meta-property, not null
*/
public final MetaProperty<HistoricalTimeSeriesLoader> historicalTimeSeriesLoader() {
return _historicalTimeSeriesLoader;
}
/**
* The meta-property for the {@code scheduler} property.
* @return the meta-property, not null
*/
public final MetaProperty<ScheduledExecutorService> scheduler() {
return _scheduler;
}
/**
* The meta-property for the {@code targetTypes} property.
* @return the meta-property, not null
*/
public final MetaProperty<ComputationTargetTypeProvider> targetTypes() {
return _targetTypes;
}
/**
* The meta-property for the {@code organizationMaster} property.
* @return the meta-property, not null
*/
public final MetaProperty<OrganizationMaster> organizationMaster() {
return _organizationMaster;
}
/**
* The meta-property for the {@code marketDataSnapshotMaster} property.
* @return the meta-property, not null
*/
public final MetaProperty<MarketDataSnapshotMaster> marketDataSnapshotMaster() {
return _marketDataSnapshotMaster;
}
/**
* The meta-property for the {@code liveMarketDataProviderFactory} property.
* @return the meta-property, not null
*/
public final MetaProperty<LiveMarketDataProviderFactory> liveMarketDataProviderFactory() {
return _liveMarketDataProviderFactory;
}
/**
* The meta-property for the {@code marketDataSpecificationRepository} property.
* @deprecated use liveMarketDataProviderFactory
* @return the meta-property, not null
*/
@Deprecated
public final MetaProperty<NamedMarketDataSpecificationRepository> marketDataSpecificationRepository() {
return _marketDataSpecificationRepository;
}
/**
* The meta-property for the {@code viewProcessor} property.
* @return the meta-property, not null
*/
public final MetaProperty<ViewProcessor> viewProcessor() {
return _viewProcessor;
}
/**
* The meta-property for the {@code computationTargetResolver} property.
* @return the meta-property, not null
*/
public final MetaProperty<ComputationTargetResolver> computationTargetResolver() {
return _computationTargetResolver;
}
//-----------------------------------------------------------------------
@Override
protected Object propertyGet(Bean bean, String propertyName, boolean quiet) {
switch (propertyName.hashCode()) {
case 10395716: // configMaster
return ((WebsiteBasicsComponentFactory) bean).getConfigMaster();
case -652001691: // exchangeMaster
return ((WebsiteBasicsComponentFactory) bean).getExchangeMaster();
case 246258906: // holidayMaster
return ((WebsiteBasicsComponentFactory) bean).getHolidayMaster();
case -1820969354: // regionMaster
return ((WebsiteBasicsComponentFactory) bean).getRegionMaster();
case -887218750: // securityMaster
return ((WebsiteBasicsComponentFactory) bean).getSecurityMaster();
case -702456965: // securitySource
return ((WebsiteBasicsComponentFactory) bean).getSecuritySource();
case -903470221: // securityLoader
return ((WebsiteBasicsComponentFactory) bean).getSecurityLoader();
case -1840419605: // positionMaster
return ((WebsiteBasicsComponentFactory) bean).getPositionMaster();
case -772274742: // portfolioMaster
return ((WebsiteBasicsComponentFactory) bean).getPortfolioMaster();
case -252634564: // batchMaster
return ((WebsiteBasicsComponentFactory) bean).getBatchMaster();
case 173967376: // historicalTimeSeriesMaster
return ((WebsiteBasicsComponentFactory) bean).getHistoricalTimeSeriesMaster();
case 358729161: // historicalTimeSeriesSource
return ((WebsiteBasicsComponentFactory) bean).getHistoricalTimeSeriesSource();
case 157715905: // historicalTimeSeriesLoader
return ((WebsiteBasicsComponentFactory) bean).getHistoricalTimeSeriesLoader();
case -160710469: // scheduler
return ((WebsiteBasicsComponentFactory) bean).getScheduler();
case -2094577304: // targetTypes
return ((WebsiteBasicsComponentFactory) bean).getTargetTypes();
case -1158737547: // organizationMaster
return ((WebsiteBasicsComponentFactory) bean).getOrganizationMaster();
case 2090650860: // marketDataSnapshotMaster
return ((WebsiteBasicsComponentFactory) bean).getMarketDataSnapshotMaster();
case -301472921: // liveMarketDataProviderFactory
return ((WebsiteBasicsComponentFactory) bean).getLiveMarketDataProviderFactory();
case 1743800263: // marketDataSpecificationRepository
return ((WebsiteBasicsComponentFactory) bean).getMarketDataSpecificationRepository();
case -1697555603: // viewProcessor
return ((WebsiteBasicsComponentFactory) bean).getViewProcessor();
case 1562222174: // computationTargetResolver
return ((WebsiteBasicsComponentFactory) bean).getComputationTargetResolver();
}
return super.propertyGet(bean, propertyName, quiet);
}
@Override
protected void propertySet(Bean bean, String propertyName, Object newValue, boolean quiet) {
switch (propertyName.hashCode()) {
case 10395716: // configMaster
((WebsiteBasicsComponentFactory) bean).setConfigMaster((ConfigMaster) newValue);
return;
case -652001691: // exchangeMaster
((WebsiteBasicsComponentFactory) bean).setExchangeMaster((ExchangeMaster) newValue);
return;
case 246258906: // holidayMaster
((WebsiteBasicsComponentFactory) bean).setHolidayMaster((HolidayMaster) newValue);
return;
case -1820969354: // regionMaster
((WebsiteBasicsComponentFactory) bean).setRegionMaster((RegionMaster) newValue);
return;
case -887218750: // securityMaster
((WebsiteBasicsComponentFactory) bean).setSecurityMaster((SecurityMaster) newValue);
return;
case -702456965: // securitySource
((WebsiteBasicsComponentFactory) bean).setSecuritySource((SecuritySource) newValue);
return;
case -903470221: // securityLoader
((WebsiteBasicsComponentFactory) bean).setSecurityLoader((SecurityLoader) newValue);
return;
case -1840419605: // positionMaster
((WebsiteBasicsComponentFactory) bean).setPositionMaster((PositionMaster) newValue);
return;
case -772274742: // portfolioMaster
((WebsiteBasicsComponentFactory) bean).setPortfolioMaster((PortfolioMaster) newValue);
return;
case -252634564: // batchMaster
((WebsiteBasicsComponentFactory) bean).setBatchMaster((BatchMaster) newValue);
return;
case 173967376: // historicalTimeSeriesMaster
((WebsiteBasicsComponentFactory) bean).setHistoricalTimeSeriesMaster((HistoricalTimeSeriesMaster) newValue);
return;
case 358729161: // historicalTimeSeriesSource
((WebsiteBasicsComponentFactory) bean).setHistoricalTimeSeriesSource((HistoricalTimeSeriesSource) newValue);
return;
case 157715905: // historicalTimeSeriesLoader
((WebsiteBasicsComponentFactory) bean).setHistoricalTimeSeriesLoader((HistoricalTimeSeriesLoader) newValue);
return;
case -160710469: // scheduler
((WebsiteBasicsComponentFactory) bean).setScheduler((ScheduledExecutorService) newValue);
return;
case -2094577304: // targetTypes
((WebsiteBasicsComponentFactory) bean).setTargetTypes((ComputationTargetTypeProvider) newValue);
return;
case -1158737547: // organizationMaster
((WebsiteBasicsComponentFactory) bean).setOrganizationMaster((OrganizationMaster) newValue);
return;
case 2090650860: // marketDataSnapshotMaster
((WebsiteBasicsComponentFactory) bean).setMarketDataSnapshotMaster((MarketDataSnapshotMaster) newValue);
return;
case -301472921: // liveMarketDataProviderFactory
((WebsiteBasicsComponentFactory) bean).setLiveMarketDataProviderFactory((LiveMarketDataProviderFactory) newValue);
return;
case 1743800263: // marketDataSpecificationRepository
((WebsiteBasicsComponentFactory) bean).setMarketDataSpecificationRepository((NamedMarketDataSpecificationRepository) newValue);
return;
case -1697555603: // viewProcessor
((WebsiteBasicsComponentFactory) bean).setViewProcessor((ViewProcessor) newValue);
return;
case 1562222174: // computationTargetResolver
((WebsiteBasicsComponentFactory) bean).setComputationTargetResolver((ComputationTargetResolver) newValue);
return;
}
super.propertySet(bean, propertyName, newValue, quiet);
}
@Override
protected void validate(Bean bean) {
JodaBeanUtils.notNull(((WebsiteBasicsComponentFactory) bean)._configMaster, "configMaster");
JodaBeanUtils.notNull(((WebsiteBasicsComponentFactory) bean)._exchangeMaster, "exchangeMaster");
JodaBeanUtils.notNull(((WebsiteBasicsComponentFactory) bean)._holidayMaster, "holidayMaster");
JodaBeanUtils.notNull(((WebsiteBasicsComponentFactory) bean)._regionMaster, "regionMaster");
JodaBeanUtils.notNull(((WebsiteBasicsComponentFactory) bean)._securityMaster, "securityMaster");
JodaBeanUtils.notNull(((WebsiteBasicsComponentFactory) bean)._securitySource, "securitySource");
JodaBeanUtils.notNull(((WebsiteBasicsComponentFactory) bean)._securityLoader, "securityLoader");
JodaBeanUtils.notNull(((WebsiteBasicsComponentFactory) bean)._positionMaster, "positionMaster");
JodaBeanUtils.notNull(((WebsiteBasicsComponentFactory) bean)._portfolioMaster, "portfolioMaster");
JodaBeanUtils.notNull(((WebsiteBasicsComponentFactory) bean)._batchMaster, "batchMaster");
JodaBeanUtils.notNull(((WebsiteBasicsComponentFactory) bean)._historicalTimeSeriesMaster, "historicalTimeSeriesMaster");
JodaBeanUtils.notNull(((WebsiteBasicsComponentFactory) bean)._historicalTimeSeriesSource, "historicalTimeSeriesSource");
JodaBeanUtils.notNull(((WebsiteBasicsComponentFactory) bean)._historicalTimeSeriesLoader, "historicalTimeSeriesLoader");
JodaBeanUtils.notNull(((WebsiteBasicsComponentFactory) bean)._scheduler, "scheduler");
JodaBeanUtils.notNull(((WebsiteBasicsComponentFactory) bean)._targetTypes, "targetTypes");
JodaBeanUtils.notNull(((WebsiteBasicsComponentFactory) bean)._organizationMaster, "organizationMaster");
JodaBeanUtils.notNull(((WebsiteBasicsComponentFactory) bean)._marketDataSnapshotMaster, "marketDataSnapshotMaster");
JodaBeanUtils.notNull(((WebsiteBasicsComponentFactory) bean)._marketDataSpecificationRepository, "marketDataSpecificationRepository");
JodaBeanUtils.notNull(((WebsiteBasicsComponentFactory) bean)._viewProcessor, "viewProcessor");
JodaBeanUtils.notNull(((WebsiteBasicsComponentFactory) bean)._computationTargetResolver, "computationTargetResolver");
super.validate(bean);
}
}
///CLOVER:ON
//-------------------------- AUTOGENERATED END --------------------------
}
| [PLAT-4684] Regenerate joda-beans
| projects/OG-Component/src/main/java/com/opengamma/component/factory/web/WebsiteBasicsComponentFactory.java | [PLAT-4684] Regenerate joda-beans | <ide><path>rojects/OG-Component/src/main/java/com/opengamma/component/factory/web/WebsiteBasicsComponentFactory.java
<ide> * Gets for looking up market data provider specifications by name. Either this or liveMarketDataProviderFactory must be set.
<ide> *
<ide> * @deprecated use liveMarketDataProviderFactory
<del> * @return the value of the property, not null
<add> * @return the value of the property
<ide> */
<ide> @Deprecated
<ide> public NamedMarketDataSpecificationRepository getMarketDataSpecificationRepository() {
<ide> * Sets for looking up market data provider specifications by name. Either this or liveMarketDataProviderFactory must be set.
<ide> *
<ide> * @deprecated use liveMarketDataProviderFactory
<del> * @param marketDataSpecificationRepository the new value of the property, not null
<add> * @param marketDataSpecificationRepository the new value of the property
<ide> */
<ide> @Deprecated
<ide> public void setMarketDataSpecificationRepository(NamedMarketDataSpecificationRepository marketDataSpecificationRepository) {
<del> JodaBeanUtils.notNull(marketDataSpecificationRepository, "marketDataSpecificationRepository");
<ide> this._marketDataSpecificationRepository = marketDataSpecificationRepository;
<ide> }
<ide>
<ide> JodaBeanUtils.notNull(((WebsiteBasicsComponentFactory) bean)._targetTypes, "targetTypes");
<ide> JodaBeanUtils.notNull(((WebsiteBasicsComponentFactory) bean)._organizationMaster, "organizationMaster");
<ide> JodaBeanUtils.notNull(((WebsiteBasicsComponentFactory) bean)._marketDataSnapshotMaster, "marketDataSnapshotMaster");
<del> JodaBeanUtils.notNull(((WebsiteBasicsComponentFactory) bean)._marketDataSpecificationRepository, "marketDataSpecificationRepository");
<ide> JodaBeanUtils.notNull(((WebsiteBasicsComponentFactory) bean)._viewProcessor, "viewProcessor");
<ide> JodaBeanUtils.notNull(((WebsiteBasicsComponentFactory) bean)._computationTargetResolver, "computationTargetResolver");
<ide> super.validate(bean); |
|
JavaScript | agpl-3.0 | ed2fb87e870eef8ebe4faef6726c2a1d24ce69c0 | 0 | duaneking/rockstar_test,duaneking/rockstar_test,duaneking/rockstar_test,duaneking/rockstar_test,duaneking/rockstar_test,duaneking/rockstar_test,duaneking/rockstar_test,duaneking/rockstar_test,duaneking/rockstar_test,duaneking/rockstar_test | 9796ad92-2e63-11e5-9284-b827eb9e62be | helloWorld.js | 97911b2a-2e63-11e5-9284-b827eb9e62be | 9796ad92-2e63-11e5-9284-b827eb9e62be | helloWorld.js | 9796ad92-2e63-11e5-9284-b827eb9e62be | <ide><path>elloWorld.js
<del>97911b2a-2e63-11e5-9284-b827eb9e62be
<add>9796ad92-2e63-11e5-9284-b827eb9e62be |
|
JavaScript | mit | b9c9afcfafe4852161d6221ad6290af7176e087a | 0 | mil-tokyo/sushi,mil-tokyo/sushi | var AgentSmith = {};
AgentSmith.Matrix = function(rows, cols, data) {
this.rows = rows;
this.cols = cols;
this.length = rows * cols;
this.datum_type = Float32Array;
this.byte_length = this.length * this.datum_type.BYTES_PER_ELEMENT;
if (data === void 0) {
this.data = new this.datum_type(this.length);
} else {
this.data = data;
}
this.row_wise = true;
};
(function() {
var $M = AgentSmith.Matrix;
var $P = AgentSmith.Matrix.prototype;
/* ##### utilities ##### */
$P.syncData = function() { };
$P.destruct = function() { this.data = void 0; };
$P.copyPropertyFrom = function(original) {
this.rows = original.rows;
this.cols = original.cols;
this.length = original.length;
this.datum_type = original.datum_type;
this.row_wise = original.row_wise;
};
$P.equals = function(mat) {
this.syncData();
mat.syncData();
if (this.rows !== mat.rows || this.cols !== mat.cols) {
return false;
}
if (this.row_wise == mat.row_wise) {
for (var i = 0; i < this.length; i++) {
if (this.data[i] !== mat.data[i]) {
return false;
}
}
} else {
for (var row = 0; row < this.rows; row++) {
for (var col = 0; col < this.cols; col++) {
if (this.get(row, col) !== mat.get(row, col)) {
return false;
}
}
};
}
return true;
};
$P.nearlyEquals = function(mat, epsilon) {
this.syncData();
mat.syncData();
if (epsilon === void 0) {
var epsilon = 0.01;
}
var nearlyEquals = function(a, b) {
var tmp = a - b;
return -epsilon < tmp && tmp < epsilon;
};
if (this.rows !== mat.rows || this.cols !== mat.cols) {
return false;
}
if (this.row_wise == mat.row_wise) {
for (var i = 0; i < this.length; i++) {
if (!nearlyEquals(this.data[i], mat.data[i])) {
return false;
}
}
} else {
for (var row = 0; row < this.rows; row++) {
for (var col = 0; col < this.cols; col++) {
if (!nearlyEquals(this.get(row, col), mat.get(row, col))) {
return false;
}
}
};
}
return true;
};
$P.print = function() {
console.log(this.toString());
};
$P.saveString = function(filename) {
var fs = require('fs');
fs.writeFile(filename, this.toString() , function (err) {
if (err) {
throw new Error(err);
}
});
};
$P.toString = function() {
this.syncData();
var formatWidth = function(str, width) {
while(str.length < width) {
str = ' ' + str;
}
return str;
};
var isInt = function(x) {
return x % 1 === 0;
}
var write_buf = '-- Matrix (' + this.rows + ' x ' + this.cols + ') --';
write_buf += '\r\n';
var digit = Math.max(1, Math.floor(Math.LOG10E * Math.log(Math.max($M.max(this), -$M.min(this)))));
for (var row = 0; row < this.rows; row++) {
for (var col = 0; col < this.cols; col++) {
var tmp = this.get(row, col);
write_buf += formatWidth(isInt(tmp) ? String(tmp) : tmp.toFixed(6), 10 + digit);
}
if (row != this.rows - 1) { write_buf += '\r\n'; }
}
return write_buf;
};
$P.clone = function() {
this.syncData();
var newM = new $M(this.rows, this.cols);
newM.copyPropertyFrom(this);
newM.data = new this.datum_type(this.data);
return newM;
};
$P.alias = function() {
this.syncData();
var newM = new $M(this.rows, this.cols, null);
newM.copyPropertyFrom(this);
newM.data = this.data;
return newM;
};
$M.hasNaN = function(mat) {
mat.syncData();
for (var i = 0; i < mat.length; i++) {
if (isNaN(mat.data[i])) {
return true;
}
}
return false;
}
/* #####initializer ##### */
$P.zeros = function() {
this.syncData();
for (var i = 0; i < this.length; i++) {
this.data[i] = 0;
}
return this;
};
$P.random = function(min, max) {
this.syncData();
if (min === void 0) {
var min = 0.0;
}
if (max === void 0) {
var max = 1.0;
}
for (var i = 0; i < this.length; i++) {
this.data[i] = min + (max - min) * Math.random();
}
return this;
};
$P.gaussRandom = function() {
this.syncData();
var getGauss = function(mu, std) {
var a = 1 - Math.random();
var b = 1 - Math.random();
var c = Math.sqrt(-2 * Math.log(a));
if (0.5 - Math.random() > 0) {
return c * Math.sin(Math.PI * 2 * b) * std + mu;
} else {
return c * Math.cos(Math.PI * 2 * b) * std + mu;
}
};
return function(mu, std) {
for (var i = 0; i < this.length; i++) {
this.data[i] = getGauss(mu, std);
}
return this;
}
}();
$P.range = function() {
this.syncData();
for (var i = 0; i < this.data.length; i++) {
this.data[i] = i;
}
return this;
};
$M.fromArray = function(original_array) {
var newM = new $M(original_array.length, original_array[0].length, null);
newM.setArray(original_array);
return newM;
};
$P.setArray = function(original_array) {
this.syncData();
var flatten = Array.prototype.concat.apply([], original_array);
this.data = new this.datum_type(flatten);
return this;
};
$M.fromColVectors = function(original_vectors) {
if (!(original_vectors instanceof Array)) {
throw new Error('input must be an array');
}
if (original_vectors[0].cols !== 1) {
throw new Error('vectors must be col vectors');
}
var newM = new $M(original_vectors[0].length, original_vectors.length);
newM.setEach(function(row, col) {
return original_vectors[col].get(row, 0);
});
return newM;
};
$M.extract = function(mat, offest_row, offset_col, rows, cols) {
throw new Error('not implemented');
};
$M.writeSubmat = function(mat, submat, offset_row, offset_col) {
throw new Error('not implemented');
};
/* ##### general manipulation ##### */
$P.get = function(row, col) {
this.syncData();
if (row >= this.rows || col >= this.cols) {
throw new Error('out of range');
}
if (this.row_wise) {
return this.data[row * this.cols + col];
} else {
return this.data[col * this.rows + row];
}
};
$P.set = function(row, col, datum) {
this.syncData();
if (row >= this.rows || col >= this.cols) {
throw new Error('out of range');
}
if (this.row_wise) {
this.data[row * this.cols + col] = datum;
} else {
this.data[col * this.rows + row] = datum;
}
return this;
};
$P.map = function(func) {
this.syncData();
for (var i = 0; i < this.length; i++) {
this.data[i] = func(this.data[i]);
};
return this;
};
$P.setEach = function(func) {
this.syncData();
for (var row = 0; row < this.rows; row++) {
for (var col = 0; col < this.cols; col++) {
this.set(row, col, func(row, col));
}
}
return this;
};
$P.forEach = function(func) {
this.syncData();
for (var row = 0; row < this.rows; row++) {
for (var col = 0; col < this.cols; col++) {
func(row, col);
}
}
return this;
}
/* ##### shape ##### */
$P.reshape = function(rows, cols) {
if (rows * cols !== this.rows * this.cols) {
throw new Error('shape does not match');
}
this.rows = rows;
this.cols = cols;
return this;
};
$P.t = function() {
var alias = this.alias();
alias.row_wise = !alias.row_wise;
var tmp = alias.rows;
alias.rows = alias.cols;
alias.cols = tmp;
return alias;
};
$P.getShape = function() {
return { rows : this.rows, cols : this.cols };
};
/* ##### statistics ##### */
$M.max = function(mat) {
mat.syncData();
var max_val = mat.data[0];
for (var row = 0; row < mat.rows; row++) {
for (var col = 0; col < mat.cols; col++) {
if (mat.get(row, col) > max_val) {
max_val = mat.get(row, col);
}
}
}
return max_val;
};
$M.min = function(mat) {
mat.syncData();
var min_val = mat.data[0];
for (var row = 0; row < mat.rows; row++) {
for (var col = 0; col < mat.cols; col++) {
if (mat.get(row, col) < min_val) {
min_val = mat.get(row, col);
}
}
}
return min_val;
};
$M.argmax = function(mat) {
mat.syncData();
var max_val = mat.data[0];
var arg = { row : 0, col : 0 };
for (var row = 0; row < mat.rows; row++) {
for (var col = 0; col < mat.cols; col++) {
if (mat.get(row, col) > max_val) {
max_val = mat.get(row, col);
arg.row = row;
arg.col = col;
}
}
}
return arg;
};
$M.sum = function(mat) {
mat.syncData();
var sum = 0.0;
for (var i = 0; i < mat.length; i++) {
sum += mat.data[i];
}
return sum;
};
$M.sumEachRow = function(mat) {
mat.syncData();
var newM = new $M(mat.rows, 1);
for (var row = 0; row < mat.rows; row++) {
var tmp = 0;
for (var col = 0; col < mat.cols; col++) {
tmp += mat.get(row, col);
}
newM.set(row, 0, tmp);
}
return newM;
};
$M.sumEachCol = function(mat) {
mat.syncData();
var newM = new $M(1, mat.cols);
for (var col = 0; col < mat.cols; col++) {
var tmp = 0;
for (var row = 0; row < mat.rows; row++) {
tmp += mat.get(row, col);
}
newM.set(0, col, tmp);
}
return newM;
};
$M.maxEachRow = function(mat) {
mat.syncData();
var newM = new $M(mat.rows, 1);
for (var row = 0; row < mat.rows; row++) {
var tmp = mat.get(row, 0);
for (var col = 0; col < mat.cols; col++) {
tmp = Math.max(tmp, mat.get(row, col));
}
newM.set(row, 0, tmp);
}
return newM;
};
$M.maxEachCol = function(mat) {
mat.syncData();
var newM = new $M(1, mat.cols);
for (var col = 0; col < mat.cols; col++) {
var tmp = mat.get(0, col);
for (var row = 0; row < mat.rows; row++) {
tmp = Math.max(tmp, mat.get(row, col));
}
newM.set(0, col, tmp);
}
return newM;
};
/* ##### basic calculation ##### */
var eachOperationGenerator = function(op) {
return eval(
[
" (function(mat) { ",
" this.syncData(); ",
" mat.syncData(); ",
" if (!( (this.rows === mat.rows && this.cols === mat.cols) || ",
" (this.rows === mat.rows && mat.cols === 1) || ",
" (this.cols === mat.cols && mat.rows === 1) ) ) { ",
" throw new Error('shape does not match'); ",
" } ",
" if (this.rows === mat.rows && this.cols === mat.cols) { ",
" if (this.row_wise == mat.row_wise) { ",
" for (var i = 0; i < this.length; i++) { ",
" this.data[i] " + op + "= mat.data[i]; ",
" } ",
" } else { ",
" this.forEach(function(row, col) { ",
" this.set(row, col, this.get(row, col) " + op + " mat.get(row, col)); ",
" }.bind(this)); ",
" } ",
" } else if (this.row_wise) { ",
" if (mat.cols ===1) { ",
" for (var row = 0; row < mat.rows; row++) { ",
" for (var col = 0; col < this.cols; col++) { ",
" this.data[row * this.cols + col] " + op + "= mat.data[row]; ",
" } ",
" } ",
" } else { ",
" for (var col = 0; col < mat.cols; col++) { ",
" for (var row = 0; row < this.rows; row++) { ",
" this.data[row * this.cols + col] " + op + "= mat.data[col]; ",
" } ",
" } ",
" } ",
" } else { ",
" if (mat.cols ===1) { ",
" for (var row = 0; row < mat.rows; row++) { ",
" for (var col = 0; col < this.cols; col++) { ",
" this.data[col * this.rows + row] " + op + "= mat.data[row]; ",
" } ",
" } ",
" } else { ",
" for (var col = 0; col < mat.cols; col++) { ",
" for (var row = 0; row < this.rows; row++) { ",
" this.data[col * this.rows + row] " + op + "= mat.data[col]; ",
" } ",
" } ",
" } ",
" } ",
" return this; ",
" }); "
].join('\r\n')
);
};
$P.times = function(times) {
this.syncData();
for (var i = 0; i < this.length; i++) {
this.data[i] *= times;
}
return this;
};
$P.add = eachOperationGenerator("+");
$M.add = function(mat1, mat2) {
return mat1.clone().add(mat2);
};
$P.sub = eachOperationGenerator("-");
$M.sub = function(mat1, mat2) {
return mat1.clone().sub(mat2);
};
$P.mulEach = eachOperationGenerator("*");
$M.mulEach = function(mat1, mat2) {
return mat1.clone().mulEach(mat2);
};
$P.divEach = eachOperationGenerator("/");
$M.divEach = function(mat1, mat2) {
return mat1.clone().divEach(mat2);
};
$P.dot = function(mat) {
this.syncData();
mat.syncData();
if (this.rows !== mat.rows || this.cols !== mat.cols) {
throw new Error('shape does not match');
}
var sum = 0.0;
if (this.row_wise == mat.row_wise) {
for (var i = 0; i < this.length; i++) {
sum += this.data[i] * mat.data[i];
}
} else {
this.forEach(function(row, col) {
sum += this.get(row, col) * mat.get(row, col);
}.bind(this));
}
return sum;
};
$M.dot = function(mat1, mat2) {
return mat1.dot(mat2);
};
$P.mul = function(mat) {
return $M.mul(this, mat);
};
$M.mul = function(mat1, mat2) {
mat1.syncData();
mat2.syncData();
if (mat1.cols !== mat2.rows) {
throw new Error('shape does not match');
}
var newM = new $M(mat1.rows, mat2.cols);
var tmp = 0;
for (var row = 0; row < newM.rows; row++) {
for (var col = 0; col < newM.cols; col++) {
var tmp = 0.0;
for (var i = 0; i < mat1.cols; i++) {
tmp += mat1.get(row, i) * mat2.get(i, col);
}
newM.data[row * newM.cols + col] = tmp;
}
}
return newM;
};
$M.convolve = function(mat1, mat2, mode) {
throw new Error('not implemented');
};
/* ##### large matrix calculation ##### */
$P.largeAdd = $P.add;
$P.largeSub = $P.sub;
$P.largeMulEach = $P.mulEach;
$P.largeDivEach = $P.divEach;
$P.largeMul = $P.mul;
$P.largeTimes = $P.times;
$P.largeClone = $P.clone;
$M.largeAdd = $M.add;
$M.largeSub = $M.sub;
$M.largeMulEach = $M.mulEach;
$M.largeDivEach = $M.divEach;
$M.largeMul = $M.mul;
$M.largeSum = $M.sum;
$M.largeSumEachRow = $M.sumEachRow;
$M.largeSumEachCol = $M.sumEachCol;
$M.largeMaxEachRow = $M.maxEachRow;
$M.largeMaxEachCol = $M.maxEachCol;
$M.largeConvolve = $M.convolve;
$M.largeExtract = $M.extract;
$M.largeWriteSubmat = $M.writeSubmat;
})();
var nodejs = (typeof window === 'undefined');
if (nodejs) {
module.exports = AgentSmith;
}
| src/agent_smith.js | var AgentSmith = {};
AgentSmith.Matrix = function(rows, cols, data) {
this.rows = rows;
this.cols = cols;
this.length = rows * cols;
this.datum_type = Float32Array;
this.byte_length = this.length * this.datum_type.BYTES_PER_ELEMENT;
if (data === void 0) {
this.data = new this.datum_type(this.length);
} else {
this.data = data;
}
this.row_wise = true;
};
(function() {
var $M = AgentSmith.Matrix;
var $P = AgentSmith.Matrix.prototype;
/* ##### utilities ##### */
$P.syncData = function() { };
$P.destruct = function() { this.data = void 0; };
$P.copyPropertyFrom = function(original) {
this.rows = original.rows;
this.cols = original.cols;
this.length = original.length;
this.datum_type = original.datum_type;
this.row_wise = original.row_wise;
};
$P.equals = function(mat) {
this.syncData();
mat.syncData();
if (this.rows !== mat.rows || this.cols !== mat.cols) {
return false;
}
if (this.row_wise == mat.row_wise) {
for (var i = 0; i < this.length; i++) {
if (this.data[i] !== mat.data[i]) {
return false;
}
}
} else {
for (var row = 0; row < this.rows; row++) {
for (var col = 0; col < this.cols; col++) {
if (this.get(row, col) !== mat.get(row, col)) {
return false;
}
}
};
}
return true;
};
$P.nearlyEquals = function(mat, epsilon) {
this.syncData();
mat.syncData();
if (epsilon === void 0) {
var epsilon = 0.01;
}
var nearlyEquals = function(a, b) {
var tmp = a - b;
return -epsilon < tmp && tmp < epsilon;
};
if (this.rows !== mat.rows || this.cols !== mat.cols) {
return false;
}
if (this.row_wise == mat.row_wise) {
for (var i = 0; i < this.length; i++) {
if (!nearlyEquals(this.data[i], mat.data[i])) {
return false;
}
}
} else {
for (var row = 0; row < this.rows; row++) {
for (var col = 0; col < this.cols; col++) {
if (!nearlyEquals(this.get(row, col), mat.get(row, col))) {
return false;
}
}
};
}
return true;
};
$P.print = function() {
console.log(this.toString());
};
$P.saveString = function(filename) {
var fs = require('fs');
fs.writeFile(filename, this.toString() , function (err) {
if (err) {
throw new Error(err);
}
});
};
$P.toString = function() {
this.syncData();
var formatWidth = function(str, width) {
while(str.length < width) {
str = ' ' + str;
}
return str;
};
var isInt = function(x) {
return x % 1 === 0;
}
var write_buf = '-- Matrix (' + this.rows + ' x ' + this.cols + ') --';
write_buf += '\r\n';
var digit = Math.max(1, Math.floor(Math.LOG10E * Math.log(Math.max($M.max(this), -$M.min(this)))));
for (var row = 0; row < this.rows; row++) {
for (var col = 0; col < this.cols; col++) {
var tmp = this.get(row, col);
write_buf += formatWidth(isInt(tmp) ? String(tmp) : tmp.toFixed(6), 10 + digit);
}
if (row != this.rows - 1) { write_buf += '\r\n'; }
}
return write_buf;
};
$P.clone = function() {
this.syncData();
var newM = new $M(this.rows, this.cols);
newM.copyPropertyFrom(this);
newM.data = new this.datum_type(this.data);
return newM;
};
$P.alias = function() {
this.syncData();
var newM = new $M(this.rows, this.cols, null);
newM.copyPropertyFrom(this);
newM.data = this.data;
return newM;
};
$M.hasNaN = function(mat) {
this.syncData();
for (var i = 0; i < mat.length; i++) {
if (isNaN(mat.data[i])) {
return true;
}
}
return false;
}
/* #####initializer ##### */
$P.zeros = function() {
this.syncData();
for (var i = 0; i < this.length; i++) {
this.data[i] = 0;
}
return this;
};
$P.random = function(min, max) {
this.syncData();
if (min === void 0) {
var min = 0.0;
}
if (max === void 0) {
var max = 1.0;
}
for (var i = 0; i < this.length; i++) {
this.data[i] = min + (max - min) * Math.random();
}
return this;
};
$P.gaussRandom = function() {
this.syncData();
var getGauss = function(mu, std) {
var a = 1 - Math.random();
var b = 1 - Math.random();
var c = Math.sqrt(-2 * Math.log(a));
if (0.5 - Math.random() > 0) {
return c * Math.sin(Math.PI * 2 * b) * std + mu;
} else {
return c * Math.cos(Math.PI * 2 * b) * std + mu;
}
};
return function(mu, std) {
for (var i = 0; i < this.length; i++) {
this.data[i] = getGauss(mu, std);
}
return this;
}
}();
$P.range = function() {
this.syncData();
for (var i = 0; i < this.data.length; i++) {
this.data[i] = i;
}
return this;
};
$M.fromArray = function(original_array) {
var newM = new $M(original_array.length, original_array[0].length, null);
newM.setArray(original_array);
return newM;
};
$P.setArray = function(original_array) {
this.syncData();
var flatten = Array.prototype.concat.apply([], original_array);
this.data = new this.datum_type(flatten);
return this;
};
$M.fromColVectors = function(original_vectors) {
if (!(original_vectors instanceof Array)) {
throw new Error('input must be an array');
}
if (original_vectors[0].cols !== 1) {
throw new Error('vectors must be col vectors');
}
var newM = new $M(original_vectors[0].length, original_vectors.length);
newM.setEach(function(row, col) {
return original_vectors[col].get(row, 0);
});
return newM;
};
$M.extract = function(mat, offest_row, offset_col, rows, cols) {
throw new Error('not implemented');
};
$M.writeSubmat = function(mat, submat, offset_row, offset_col) {
throw new Error('not implemented');
};
/* ##### general manipulation ##### */
$P.get = function(row, col) {
this.syncData();
if (row >= this.rows || col >= this.cols) {
throw new Error('out of range');
}
if (this.row_wise) {
return this.data[row * this.cols + col];
} else {
return this.data[col * this.rows + row];
}
};
$P.set = function(row, col, datum) {
this.syncData();
if (row >= this.rows || col >= this.cols) {
throw new Error('out of range');
}
if (this.row_wise) {
this.data[row * this.cols + col] = datum;
} else {
this.data[col * this.rows + row] = datum;
}
return this;
};
$P.map = function(func) {
this.syncData();
for (var i = 0; i < this.length; i++) {
this.data[i] = func(this.data[i]);
};
return this;
};
$P.setEach = function(func) {
this.syncData();
for (var row = 0; row < this.rows; row++) {
for (var col = 0; col < this.cols; col++) {
this.set(row, col, func(row, col));
}
}
return this;
};
$P.forEach = function(func) {
this.syncData();
for (var row = 0; row < this.rows; row++) {
for (var col = 0; col < this.cols; col++) {
func(row, col);
}
}
return this;
}
/* ##### shape ##### */
$P.reshape = function(rows, cols) {
if (rows * cols !== this.rows * this.cols) {
throw new Error('shape does not match');
}
this.rows = rows;
this.cols = cols;
return this;
};
$P.t = function() {
var alias = this.alias();
alias.row_wise = !alias.row_wise;
var tmp = alias.rows;
alias.rows = alias.cols;
alias.cols = tmp;
return alias;
};
$P.getShape = function() {
return { rows : this.rows, cols : this.cols };
};
/* ##### statistics ##### */
$M.max = function(mat) {
mat.syncData();
var max_val = mat.data[0];
for (var row = 0; row < mat.rows; row++) {
for (var col = 0; col < mat.cols; col++) {
if (mat.get(row, col) > max_val) {
max_val = mat.get(row, col);
}
}
}
return max_val;
};
$M.min = function(mat) {
mat.syncData();
var min_val = mat.data[0];
for (var row = 0; row < mat.rows; row++) {
for (var col = 0; col < mat.cols; col++) {
if (mat.get(row, col) < min_val) {
min_val = mat.get(row, col);
}
}
}
return min_val;
};
$M.argmax = function(mat) {
mat.syncData();
var max_val = mat.data[0];
var arg = { row : 0, col : 0 };
for (var row = 0; row < mat.rows; row++) {
for (var col = 0; col < mat.cols; col++) {
if (mat.get(row, col) > max_val) {
max_val = mat.get(row, col);
arg.row = row;
arg.col = col;
}
}
}
return arg;
};
$M.sum = function(mat) {
mat.syncData();
var sum = 0.0;
for (var i = 0; i < mat.length; i++) {
sum += mat.data[i];
}
return sum;
};
$M.sumEachRow = function(mat) {
mat.syncData();
var newM = new $M(mat.rows, 1);
for (var row = 0; row < mat.rows; row++) {
var tmp = 0;
for (var col = 0; col < mat.cols; col++) {
tmp += mat.get(row, col);
}
newM.set(row, 0, tmp);
}
return newM;
};
$M.sumEachCol = function(mat) {
mat.syncData();
var newM = new $M(1, mat.cols);
for (var col = 0; col < mat.cols; col++) {
var tmp = 0;
for (var row = 0; row < mat.rows; row++) {
tmp += mat.get(row, col);
}
newM.set(0, col, tmp);
}
return newM;
};
$M.maxEachRow = function(mat) {
mat.syncData();
var newM = new $M(mat.rows, 1);
for (var row = 0; row < mat.rows; row++) {
var tmp = mat.get(row, 0);
for (var col = 0; col < mat.cols; col++) {
tmp = Math.max(tmp, mat.get(row, col));
}
newM.set(row, 0, tmp);
}
return newM;
};
$M.maxEachCol = function(mat) {
mat.syncData();
var newM = new $M(1, mat.cols);
for (var col = 0; col < mat.cols; col++) {
var tmp = mat.get(0, col);
for (var row = 0; row < mat.rows; row++) {
tmp = Math.max(tmp, mat.get(row, col));
}
newM.set(0, col, tmp);
}
return newM;
};
/* ##### basic calculation ##### */
var eachOperationGenerator = function(op) {
return eval(
[
" (function(mat) { ",
" this.syncData(); ",
" mat.syncData(); ",
" if (!( (this.rows === mat.rows && this.cols === mat.cols) || ",
" (this.rows === mat.rows && mat.cols === 1) || ",
" (this.cols === mat.cols && mat.rows === 1) ) ) { ",
" throw new Error('shape does not match'); ",
" } ",
" if (this.rows === mat.rows && this.cols === mat.cols) { ",
" if (this.row_wise == mat.row_wise) { ",
" for (var i = 0; i < this.length; i++) { ",
" this.data[i] " + op + "= mat.data[i]; ",
" } ",
" } else { ",
" this.forEach(function(row, col) { ",
" this.set(row, col, this.get(row, col) " + op + " mat.get(row, col)); ",
" }.bind(this)); ",
" } ",
" } else if (this.row_wise) { ",
" if (mat.cols ===1) { ",
" for (var row = 0; row < mat.rows; row++) { ",
" for (var col = 0; col < this.cols; col++) { ",
" this.data[row * this.cols + col] " + op + "= mat.data[row]; ",
" } ",
" } ",
" } else { ",
" for (var col = 0; col < mat.cols; col++) { ",
" for (var row = 0; row < this.rows; row++) { ",
" this.data[row * this.cols + col] " + op + "= mat.data[col]; ",
" } ",
" } ",
" } ",
" } else { ",
" if (mat.cols ===1) { ",
" for (var row = 0; row < mat.rows; row++) { ",
" for (var col = 0; col < this.cols; col++) { ",
" this.data[col * this.rows + row] " + op + "= mat.data[row]; ",
" } ",
" } ",
" } else { ",
" for (var col = 0; col < mat.cols; col++) { ",
" for (var row = 0; row < this.rows; row++) { ",
" this.data[col * this.rows + row] " + op + "= mat.data[col]; ",
" } ",
" } ",
" } ",
" } ",
" return this; ",
" }); "
].join('\r\n')
);
};
$P.times = function(times) {
this.syncData();
for (var i = 0; i < this.length; i++) {
this.data[i] *= times;
}
return this;
};
$P.add = eachOperationGenerator("+");
$M.add = function(mat1, mat2) {
return mat1.clone().add(mat2);
};
$P.sub = eachOperationGenerator("-");
$M.sub = function(mat1, mat2) {
return mat1.clone().sub(mat2);
};
$P.mulEach = eachOperationGenerator("*");
$M.mulEach = function(mat1, mat2) {
return mat1.clone().mulEach(mat2);
};
$P.divEach = eachOperationGenerator("/");
$M.divEach = function(mat1, mat2) {
return mat1.clone().divEach(mat2);
};
$P.dot = function(mat) {
this.syncData();
mat.syncData();
if (this.rows !== mat.rows || this.cols !== mat.cols) {
throw new Error('shape does not match');
}
var sum = 0.0;
if (this.row_wise == mat.row_wise) {
for (var i = 0; i < this.length; i++) {
sum += this.data[i] * mat.data[i];
}
} else {
this.forEach(function(row, col) {
sum += this.get(row, col) * mat.get(row, col);
}.bind(this));
}
return sum;
};
$M.dot = function(mat1, mat2) {
return mat1.dot(mat2);
};
$P.mul = function(mat) {
return $M.mul(this, mat);
};
$M.mul = function(mat1, mat2) {
mat1.syncData();
mat2.syncData();
if (mat1.cols !== mat2.rows) {
throw new Error('shape does not match');
}
var newM = new $M(mat1.rows, mat2.cols);
var tmp = 0;
for (var row = 0; row < newM.rows; row++) {
for (var col = 0; col < newM.cols; col++) {
var tmp = 0.0;
for (var i = 0; i < mat1.cols; i++) {
tmp += mat1.get(row, i) * mat2.get(i, col);
}
newM.data[row * newM.cols + col] = tmp;
}
}
return newM;
};
$M.convolve = function(mat1, mat2, mode) {
throw new Error('not implemented');
};
/* ##### large matrix calculation ##### */
$P.largeAdd = $P.add;
$P.largeSub = $P.sub;
$P.largeMulEach = $P.mulEach;
$P.largeDivEach = $P.divEach;
$P.largeMul = $P.mul;
$P.largeTimes = $P.times;
$P.largeClone = $P.clone;
$M.largeAdd = $M.add;
$M.largeSub = $M.sub;
$M.largeMulEach = $M.mulEach;
$M.largeDivEach = $M.divEach;
$M.largeMul = $M.mul;
$M.largeSum = $M.sum;
$M.largeSumEachRow = $M.sumEachRow;
$M.largeSumEachCol = $M.sumEachCol;
$M.largeMaxEachRow = $M.maxEachRow;
$M.largeMaxEachCol = $M.maxEachCol;
$M.largeConvolve = $M.convolve;
$M.largeExtract = $M.extract;
$M.largeWriteSubmat = $M.writeSubmat;
})();
var nodejs = (typeof window === 'undefined');
if (nodejs) {
module.exports = AgentSmith;
}
| Bug fix
| src/agent_smith.js | Bug fix | <ide><path>rc/agent_smith.js
<ide> };
<ide>
<ide> $M.hasNaN = function(mat) {
<del> this.syncData();
<add> mat.syncData();
<ide> for (var i = 0; i < mat.length; i++) {
<ide> if (isNaN(mat.data[i])) {
<ide> return true; |
|
Java | mit | 343ab6d3c1d8771612be5717c36eed1df931426d | 0 | ist-dresden/composum-platform,ist-dresden/composum-platform,ist-dresden/composum-platform | package com.composum.platform.commons.request;
import com.composum.sling.core.AbstractSlingBean;
import com.composum.sling.core.CoreConfiguration;
import com.composum.sling.core.util.SlingUrl;
import org.apache.commons.lang3.StringUtils;
import org.apache.sling.api.SlingHttpServletRequest;
import org.apache.sling.api.SlingHttpServletResponse;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import javax.annotation.Nonnull;
import java.io.IOException;
import java.security.Principal;
/**
* Model for Errorpages, mainly containing logic about redirections to login.
*/
public class ErrorPage extends AbstractSlingBean {
private static final Logger LOG = LoggerFactory.getLogger(ErrorPage.class);
/**
* Parameter that is appended to {@link #getLoginUrl()} to save the current rendered resource, to allow
* re-rendering it after the user logged in.
*/
public static final String TARGET_PARAMETER = "target";
/**
* @see #getLoginUrl()
*/
private transient String loginUrl;
/**
* @see #getCoreConfiguration()
*/
private transient CoreConfiguration coreConfiguration;
/**
* The {@link CoreConfiguration} service.
*/
public CoreConfiguration getCoreConfiguration() {
if (coreConfiguration == null) {
coreConfiguration = context.getService(CoreConfiguration.class);
}
return coreConfiguration;
}
/**
* URL to login page.
*
* @see {@link CoreConfiguration#getLoginUrl()}
*/
@Nonnull
public String getLoginUrl() {
if (loginUrl == null) {
loginUrl = getCoreConfiguration() != null ? getCoreConfiguration().getLoginUrl() : null;
loginUrl = StringUtils.defaultIfBlank(loginUrl, "/system/sling/form/login.html");
}
return loginUrl;
}
/**
* Redirects to the {@link #getLoginUrl()} if this isn't already a request to that.
*/
public boolean redirectToLogin(SlingHttpServletRequest request, SlingHttpServletResponse response) {
try {
String requestUrl = request.getRequestURL().toString();
if (!requestUrl.startsWith(getLoginUrl())) {
SlingUrl url = new SlingUrl(request, getLoginUrl()).parameter(TARGET_PARAMETER, requestUrl);
response.sendRedirect(url.toString());
return true;
}
} catch (IOException ex) {
LOG.error(ex.getMessage(), ex);
}
return false;
}
/**
* Redirects to the {@link #getLoginUrl()} if user is not logged in yet and this isn't already a request to that.
*/
public boolean loginIfAnonymous(SlingHttpServletRequest request, SlingHttpServletResponse response) {
Principal currentUser = request.getUserPrincipal();
if (currentUser == null || "anonymous".equals(currentUser.getName())) {
return redirectToLogin(request, response);
}
return false;
}
}
| commons/bundle/src/main/java/com/composum/platform/commons/request/ErrorPage.java | package com.composum.platform.commons.request;
import com.composum.sling.core.AbstractSlingBean;
import com.composum.sling.core.CoreConfiguration;
import com.composum.sling.core.util.SlingUrl;
import org.apache.commons.lang3.StringUtils;
import org.apache.sling.api.SlingHttpServletRequest;
import org.apache.sling.api.SlingHttpServletResponse;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import javax.annotation.Nonnull;
import java.io.IOException;
import java.security.Principal;
/**
* Model for Errorpages, mainly containing logic about redirections to login.
*/
public class ErrorPage extends AbstractSlingBean {
private static final Logger LOG = LoggerFactory.getLogger(ErrorPage.class);
/**
* Parameter that is appended to {@link #getLoginUrl()} to save the current rendered resource, to allow
* re-rendering it after the user logged in.
*/
public static final String TARGET_PARAMETER = "target";
/**
* @see #getLoginUrl()
*/
private transient String loginUrl;
/**
* @see #getCoreConfiguration()
*/
private transient CoreConfiguration coreConfiguration;
/**
* The {@link CoreConfiguration} service.
*/
public CoreConfiguration getCoreConfiguration() {
if (coreConfiguration == null) {
coreConfiguration = context.getService(CoreConfiguration.class);
}
return coreConfiguration;
}
/**
* URL to login page.
*
* @see {@link CoreConfiguration#getLoginUrl()}
*/
@Nonnull
public String getLoginUrl() {
if (loginUrl == null) {
loginUrl = getCoreConfiguration() != null ? getCoreConfiguration().getLoginUrl() : null;
loginUrl = StringUtils.defaultIfBlank(loginUrl, "/system/sling/form/login.html");
}
return loginUrl;
}
/**
* Redirects to the {@link #getLoginUrl()} if this isn't already a request to that.
*/
public boolean redirectToLogin(SlingHttpServletRequest request, SlingHttpServletResponse response) {
try {
String requestUrl = request.getRequestURL().toString();
if (!requestUrl.startsWith(getLoginUrl())) {
SlingUrl url = new SlingUrl(request, getLoginUrl()).parameter(TARGET_PARAMETER, requestUrl);
response.sendRedirect(url.getUrl());
return true;
}
} catch (IOException ex) {
LOG.error(ex.getMessage(), ex);
}
return false;
}
/**
* Redirects to the {@link #getLoginUrl()} if user is not logged in yet and this isn't already a request to that.
*/
public boolean loginIfAnonymous(SlingHttpServletRequest request, SlingHttpServletResponse response) {
Principal currentUser = request.getUserPrincipal();
if (currentUser == null || "anonymous".equals(currentUser.getName())) {
return redirectToLogin(request, response);
}
return false;
}
}
| login redirect target URL handling fixed
| commons/bundle/src/main/java/com/composum/platform/commons/request/ErrorPage.java | login redirect target URL handling fixed | <ide><path>ommons/bundle/src/main/java/com/composum/platform/commons/request/ErrorPage.java
<ide> String requestUrl = request.getRequestURL().toString();
<ide> if (!requestUrl.startsWith(getLoginUrl())) {
<ide> SlingUrl url = new SlingUrl(request, getLoginUrl()).parameter(TARGET_PARAMETER, requestUrl);
<del> response.sendRedirect(url.getUrl());
<add> response.sendRedirect(url.toString());
<ide> return true;
<ide> }
<ide> } catch (IOException ex) { |
|
Java | apache-2.0 | cd31ce588ff3bd0dd1ae91aeca126a4e5a9f84c7 | 0 | jerome79/OG-Platform,McLeodMoores/starling,codeaudit/OG-Platform,ChinaQuants/OG-Platform,McLeodMoores/starling,DevStreet/FinanceAnalytics,DevStreet/FinanceAnalytics,jeorme/OG-Platform,ChinaQuants/OG-Platform,jerome79/OG-Platform,McLeodMoores/starling,DevStreet/FinanceAnalytics,ChinaQuants/OG-Platform,nssales/OG-Platform,nssales/OG-Platform,codeaudit/OG-Platform,jerome79/OG-Platform,nssales/OG-Platform,codeaudit/OG-Platform,McLeodMoores/starling,jeorme/OG-Platform,jeorme/OG-Platform,jeorme/OG-Platform,nssales/OG-Platform,DevStreet/FinanceAnalytics,codeaudit/OG-Platform,jerome79/OG-Platform,ChinaQuants/OG-Platform | /**
* Copyright (C) 2009 - present by OpenGamma Inc. and the OpenGamma group of companies
*
* Please see distribution for license.
*/
package com.opengamma.engine.view.worker;
import java.text.MessageFormat;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.Future;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicReference;
import java.util.concurrent.locks.Lock;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.util.CollectionUtils;
import org.threeten.bp.Duration;
import org.threeten.bp.Instant;
import com.codahale.metrics.MetricRegistry;
import com.codahale.metrics.Timer;
import com.google.common.base.Supplier;
import com.google.common.collect.Maps;
import com.google.common.collect.Sets;
import com.opengamma.OpenGammaRuntimeException;
import com.opengamma.core.position.Portfolio;
import com.opengamma.core.position.PortfolioNode;
import com.opengamma.core.position.Position;
import com.opengamma.core.position.Trade;
import com.opengamma.core.position.impl.PortfolioNodeEquivalenceMapper;
import com.opengamma.engine.ComputationTarget;
import com.opengamma.engine.ComputationTargetResolver;
import com.opengamma.engine.ComputationTargetSpecification;
import com.opengamma.engine.MemoryUtils;
import com.opengamma.engine.depgraph.DependencyGraph;
import com.opengamma.engine.depgraph.DependencyGraphExplorer;
import com.opengamma.engine.depgraph.DependencyNode;
import com.opengamma.engine.depgraph.DependencyNodeFilter;
import com.opengamma.engine.marketdata.MarketDataListener;
import com.opengamma.engine.marketdata.MarketDataSnapshot;
import com.opengamma.engine.marketdata.availability.MarketDataAvailabilityProvider;
import com.opengamma.engine.marketdata.manipulator.DistinctMarketDataSelector;
import com.opengamma.engine.marketdata.manipulator.MarketDataManipulator;
import com.opengamma.engine.marketdata.manipulator.NoOpMarketDataSelector;
import com.opengamma.engine.marketdata.spec.MarketDataSpecification;
import com.opengamma.engine.resource.EngineResourceReference;
import com.opengamma.engine.target.ComputationTargetReference;
import com.opengamma.engine.target.ComputationTargetType;
import com.opengamma.engine.value.ValueRequirement;
import com.opengamma.engine.value.ValueSpecification;
import com.opengamma.engine.view.ViewCalculationConfiguration;
import com.opengamma.engine.view.ViewComputationResultModel;
import com.opengamma.engine.view.ViewDefinition;
import com.opengamma.engine.view.compilation.CompiledViewDefinitionWithGraphs;
import com.opengamma.engine.view.compilation.CompiledViewDefinitionWithGraphsImpl;
import com.opengamma.engine.view.compilation.ViewCompilationServices;
import com.opengamma.engine.view.compilation.ViewDefinitionCompiler;
import com.opengamma.engine.view.cycle.DefaultViewCycleMetadata;
import com.opengamma.engine.view.cycle.SingleComputationCycle;
import com.opengamma.engine.view.cycle.ViewCycle;
import com.opengamma.engine.view.cycle.ViewCycleMetadata;
import com.opengamma.engine.view.cycle.ViewCycleState;
import com.opengamma.engine.view.execution.ViewCycleExecutionOptions;
import com.opengamma.engine.view.execution.ViewExecutionFlags;
import com.opengamma.engine.view.execution.ViewExecutionOptions;
import com.opengamma.engine.view.impl.ViewProcessContext;
import com.opengamma.engine.view.listener.ComputationResultListener;
import com.opengamma.engine.view.worker.cache.PLAT3249;
import com.opengamma.engine.view.worker.cache.ViewExecutionCacheKey;
import com.opengamma.engine.view.worker.trigger.CombinedViewCycleTrigger;
import com.opengamma.engine.view.worker.trigger.FixedTimeTrigger;
import com.opengamma.engine.view.worker.trigger.RecomputationPeriodTrigger;
import com.opengamma.engine.view.worker.trigger.RunAsFastAsPossibleTrigger;
import com.opengamma.engine.view.worker.trigger.SuccessiveDeltaLimitTrigger;
import com.opengamma.engine.view.worker.trigger.ViewCycleEligibility;
import com.opengamma.engine.view.worker.trigger.ViewCycleTrigger;
import com.opengamma.engine.view.worker.trigger.ViewCycleTriggerResult;
import com.opengamma.engine.view.worker.trigger.ViewCycleType;
import com.opengamma.id.ObjectId;
import com.opengamma.id.UniqueId;
import com.opengamma.id.VersionCorrection;
import com.opengamma.id.VersionCorrectionUtils;
import com.opengamma.util.ArgumentChecker;
import com.opengamma.util.NamedThreadPoolFactory;
import com.opengamma.util.PoolExecutor;
import com.opengamma.util.TerminatableJob;
import com.opengamma.util.metric.OpenGammaMetricRegistry;
import com.opengamma.util.monitor.OperationTimer;
import com.opengamma.util.tuple.Pair;
/**
* The job which schedules and executes computation cycles for a view process. See {@link SingleThreadViewProcessWorkerFactory} for a more detailed description.
*/
public class SingleThreadViewProcessWorker implements MarketDataListener, ViewProcessWorker {
private static final Logger s_logger = LoggerFactory.getLogger(SingleThreadViewProcessWorker.class);
private static final ExecutorService s_executor = Executors.newCachedThreadPool(new NamedThreadPoolFactory("Worker"));
/**
* Wrapper that allows a thread to be "borrowed" from an executor service.
*/
/* package*/static final class BorrowedThread implements Runnable {
private final String _name;
private final Runnable _job;
private final CountDownLatch _join = new CountDownLatch(1);
private Thread _thread;
private String _originalName;
public BorrowedThread(final String name, final Runnable job) {
_name = name;
_job = job;
}
public synchronized Thread.State getState() {
if (_thread != null) {
return _thread.getState();
} else {
return (_originalName != null) ? Thread.State.TERMINATED : Thread.State.NEW;
}
}
public void join() throws InterruptedException {
_join.await();
}
public void join(long timeout) throws InterruptedException {
_join.await(timeout, TimeUnit.MILLISECONDS);
}
public synchronized void interrupt() {
if (_thread != null) {
_thread.interrupt();
}
}
public synchronized boolean isAlive() {
return _thread != null;
}
// Runnable
@Override
public void run() {
synchronized (this) {
_thread = Thread.currentThread();
_originalName = _thread.getName();
}
try {
_thread.setName(_originalName + "-" + _name);
_job.run();
} finally {
_thread.setName(_originalName);
synchronized (this) {
_thread = null;
}
_join.countDown();
}
}
}
private static final long NANOS_PER_MILLISECOND = 1000000;
private final ViewProcessWorkerContext _context;
private final ViewExecutionOptions _executionOptions;
private final CombinedViewCycleTrigger _masterCycleTrigger = new CombinedViewCycleTrigger();
private final FixedTimeTrigger _compilationExpiryCycleTrigger;
private final boolean _executeCycles;
private final boolean _executeGraphs;
private final boolean _ignoreCompilationValidity;
private final boolean _suppressExecutionOnNoMarketData;
/**
* The changes to the master trigger that must be made during the next cycle.
* <p>
* This has been added as an immediate fix for [PLAT-3291] but could be extended to represent an arbitrary change to add/remove triggers if we wish to support the execution options changing for a
* running worker.
*/
private ViewCycleTrigger _masterCycleTriggerChanges;
private int _cycleCount;
private EngineResourceReference<SingleComputationCycle> _previousCycleReference;
/**
* The current view definition the worker must calculate on.
*/
private ViewDefinition _viewDefinition;
/**
* The most recently compiled form of the view definition. This may have been compiled by this worker, or retrieved from the cache and is being reused.
*/
private CompiledViewDefinitionWithGraphs _latestCompiledViewDefinition;
/**
* The key to use for storing the compiled view definition, or querying it, from the cache shared with other workers. Whenever the market data provider or view definition changes, this must be
* updated.
*/
private ViewExecutionCacheKey _executionCacheKey;
private final Set<ValueSpecification> _marketDataSubscriptions = new HashSet<ValueSpecification>();
private final Set<ValueSpecification> _pendingSubscriptions = Collections.newSetFromMap(new ConcurrentHashMap<ValueSpecification, Boolean>());
private TargetResolverChangeListener _targetResolverChanges;
private volatile boolean _wakeOnCycleRequest;
private volatile boolean _cycleRequested;
private volatile boolean _forceTriggerCycle;
/**
* An updated view definition pushed in by the execution coordinator. When the next cycle runs, this should be used instead of the previous one.
*/
private final AtomicReference<ViewDefinition> _newViewDefinition = new AtomicReference<ViewDefinition>();
private volatile Future<CompiledViewDefinitionWithGraphsImpl> _compilationTask;
/**
* Total time the job has spent "working". This does not include time spent waiting for a trigger. It is a real time spent on all I/O involved in a cycle (e.g. database accesses), graph compilation,
* market data subscription, graph execution, result dispatch, etc.
*/
private double _totalTimeNanos;
/**
* The market data provider(s) for the current cycles.
*/
private SnapshottingViewExecutionDataProvider _marketDataProvider;
/**
* Flag indicating the market data provider has changed and any nodes sourcing market data into the dependency graph may now be invalid.
*/
private boolean _marketDataProviderDirty;
/**
* The terminatable job wrapper.
*/
private final TerminatableJob _job;
/**
* The thread running this job.
*/
private final BorrowedThread _thread;
/**
* The manipulator for structured market data.
*/
private final MarketDataManipulator _marketDataManipulator;
/**
Timer to track delta cycle execution time.
*/
private Timer _deltaCycleTimer;
/**
* Timer to track full cycle execution time.
*/
private Timer _fullCycleTimer;
public SingleThreadViewProcessWorker(final ViewProcessWorkerContext context, final ViewExecutionOptions executionOptions, final ViewDefinition viewDefinition) {
ArgumentChecker.notNull(context, "context");
ArgumentChecker.notNull(executionOptions, "executionOptions");
ArgumentChecker.notNull(viewDefinition, "viewDefinition");
_context = context;
_executionOptions = executionOptions;
_cycleRequested = !executionOptions.getFlags().contains(ViewExecutionFlags.WAIT_FOR_INITIAL_TRIGGER);
_compilationExpiryCycleTrigger = new FixedTimeTrigger();
addMasterCycleTrigger(_compilationExpiryCycleTrigger);
if (executionOptions.getFlags().contains(ViewExecutionFlags.TRIGGER_CYCLE_ON_TIME_ELAPSED)) {
addMasterCycleTrigger(new RecomputationPeriodTrigger(new Supplier<ViewDefinition>() {
@Override
public ViewDefinition get() {
return getViewDefinition();
}
}));
}
if (executionOptions.getMaxSuccessiveDeltaCycles() != null) {
addMasterCycleTrigger(new SuccessiveDeltaLimitTrigger(executionOptions.getMaxSuccessiveDeltaCycles()));
}
if (executionOptions.getFlags().contains(ViewExecutionFlags.RUN_AS_FAST_AS_POSSIBLE)) {
if (_cycleRequested) {
addMasterCycleTrigger(new RunAsFastAsPossibleTrigger());
} else {
// Defer the trigger until an initial one has happened
_masterCycleTriggerChanges = new RunAsFastAsPossibleTrigger();
}
}
_executeCycles = !executionOptions.getFlags().contains(ViewExecutionFlags.COMPILE_ONLY);
_executeGraphs = !executionOptions.getFlags().contains(ViewExecutionFlags.FETCH_MARKET_DATA_ONLY);
_suppressExecutionOnNoMarketData = executionOptions.getFlags().contains(ViewExecutionFlags.SKIP_CYCLE_ON_NO_MARKET_DATA);
_ignoreCompilationValidity = executionOptions.getFlags().contains(ViewExecutionFlags.IGNORE_COMPILATION_VALIDITY);
_viewDefinition = viewDefinition;
_marketDataManipulator = createMarketDataManipulator();
_job = new Job();
_thread = new BorrowedThread(context.toString(), _job);
_deltaCycleTimer = OpenGammaMetricRegistry.getSummaryInstance().timer("SingleThreadViewProcessWorker.cycle.delta");
_fullCycleTimer = OpenGammaMetricRegistry.getSummaryInstance().timer("SingleThreadViewProcessWorker.cycle.full");
s_executor.submit(_thread);
}
private MarketDataManipulator createMarketDataManipulator() {
ViewCycleExecutionOptions defaultExecutionOptions = _executionOptions.getDefaultExecutionOptions();
return new MarketDataManipulator(defaultExecutionOptions != null ?
defaultExecutionOptions.getMarketDataSelector() :
NoOpMarketDataSelector.getInstance());
}
private ViewProcessWorkerContext getWorkerContext() {
return _context;
}
private ViewExecutionOptions getExecutionOptions() {
return _executionOptions;
}
private ViewProcessContext getProcessContext() {
return getWorkerContext().getProcessContext();
}
private ViewCycleTrigger getMasterCycleTrigger() {
return _masterCycleTrigger;
}
private void addMasterCycleTrigger(final ViewCycleTrigger trigger) {
_masterCycleTrigger.addTrigger(trigger);
}
public FixedTimeTrigger getCompilationExpiryCycleTrigger() {
return _compilationExpiryCycleTrigger;
}
protected BorrowedThread getThread() {
return _thread;
}
protected TerminatableJob getJob() {
return _job;
}
private final class Job extends TerminatableJob {
/**
* Determines whether to run, and runs if required, a single computation cycle using the following rules:
* <ul>
* <li>A computation cycle can only be triggered if the relevant minimum computation period has passed since the start of the previous cycle.
* <li>A computation cycle will be forced if the relevant maximum computation period has passed since the start of the previous cycle.
* <li>A full computation is preferred over a delta computation if both are possible.
* <li>Performing a full computation also updates the times to the next delta computation; i.e. a full computation is considered to be as good as a delta.
* </ul>
*/
@Override
protected void runOneCycle() {
// Exception handling is important here to ensure that computation jobs do not just die quietly while consumers are
// potentially blocked, waiting for results.
ViewCycleType cycleType;
try {
cycleType = waitForNextCycle();
} catch (final InterruptedException e) {
return;
}
ViewCycleExecutionOptions executionOptions = null;
try {
if (!getExecutionOptions().getExecutionSequence().isEmpty()) {
executionOptions = getExecutionOptions().getExecutionSequence().poll(getExecutionOptions().getDefaultExecutionOptions());
s_logger.debug("Next cycle execution options: {}", executionOptions);
}
if (executionOptions == null) {
s_logger.info("No more view cycle execution options");
jobCompleted();
return;
}
} catch (final Exception e) {
s_logger.error("Error obtaining next view cycle execution options from sequence for " + getWorkerContext(), e);
return;
}
if (executionOptions.getMarketDataSpecifications().isEmpty()) {
s_logger.error("No market data specifications for cycle");
cycleExecutionFailed(executionOptions, new OpenGammaRuntimeException("No market data specifications for cycle"));
return;
}
MarketDataSnapshot marketDataSnapshot;
try {
SnapshottingViewExecutionDataProvider marketDataProvider = getMarketDataProvider();
if (marketDataProvider == null ||
!marketDataProvider.getSpecifications().equals(executionOptions.getMarketDataSpecifications())) {
if (marketDataProvider != null) {
s_logger.info("Replacing market data provider between cycles");
}
replaceMarketDataProvider(executionOptions.getMarketDataSpecifications());
marketDataProvider = getMarketDataProvider();
if (marketDataProvider == null) {
cycleExecutionFailed(executionOptions, new OpenGammaRuntimeException("Market data specifications " + executionOptions.getMarketDataSpecifications() + "invalid"));
return;
}
}
// Obtain the snapshot in case it is needed, but don't explicitly initialise it until the data is required
marketDataSnapshot = marketDataProvider.snapshot();
} catch (final Exception e) {
s_logger.error("Error with market data provider", e);
cycleExecutionFailed(executionOptions, new OpenGammaRuntimeException("Error with market data provider", e));
return;
}
Instant compilationValuationTime;
try {
if (executionOptions.getValuationTime() != null) {
compilationValuationTime = executionOptions.getValuationTime();
} else {
// Neither the cycle-specific options nor the defaults have overridden the valuation time so use the time
// associated with the market data snapshot. To avoid initialising the snapshot perhaps before the required
// inputs are known or even subscribed to, only ask for an indication at the moment.
compilationValuationTime = marketDataSnapshot.getSnapshotTimeIndication();
if (compilationValuationTime == null) {
throw new OpenGammaRuntimeException("Market data snapshot " + marketDataSnapshot + " produced a null indication of snapshot time");
}
}
} catch (final Exception e) {
s_logger.error("Error obtaining compilation valuation time", e);
cycleExecutionFailed(executionOptions, new OpenGammaRuntimeException("Error obtaining compilation valuation time", e));
return;
}
final VersionCorrection versionCorrection = getResolverVersionCorrection(executionOptions);
VersionCorrectionUtils.lock(versionCorrection);
try {
final CompiledViewDefinitionWithGraphs compiledViewDefinition;
try {
// Don't query the cache so that the process gets a "compiled" message even if a cached compilation is used
final CompiledViewDefinitionWithGraphs previous = _latestCompiledViewDefinition;
if (_ignoreCompilationValidity && (previous != null) && CompiledViewDefinitionWithGraphsImpl.isValidFor(previous, compilationValuationTime)) {
compiledViewDefinition = previous;
} else {
compiledViewDefinition = getCompiledViewDefinition(compilationValuationTime, versionCorrection);
if (compiledViewDefinition == null) {
s_logger.warn("Job terminated during view compilation");
return;
}
if ((previous == null) || !previous.getCompilationIdentifier().equals(compiledViewDefinition.getCompilationIdentifier())) {
if (_targetResolverChanges != null) {
// We'll try to register for changes that will wake us up for a cycle if market data is not ticking
if (previous != null) {
final Set<UniqueId> subscribedIds = new HashSet<UniqueId>(previous.getResolvedIdentifiers().values());
for (UniqueId uid : compiledViewDefinition.getResolvedIdentifiers().values()) {
if (!subscribedIds.contains(uid)) {
_targetResolverChanges.watch(uid.getObjectId());
}
}
} else {
for (UniqueId uid : compiledViewDefinition.getResolvedIdentifiers().values()) {
_targetResolverChanges.watch(uid.getObjectId());
}
}
}
viewDefinitionCompiled(executionOptions, compiledViewDefinition);
}
}
} catch (final Exception e) {
final String message = MessageFormat.format("Error obtaining compiled view definition {0} for time {1} at version-correction {2}", getViewDefinition().getUniqueId(),
compilationValuationTime, versionCorrection);
s_logger.error(message);
cycleExecutionFailed(executionOptions, new OpenGammaRuntimeException(message, e));
return;
}
// [PLAT-1174] This is necessary to support global injections by ValueRequirement. The use of a process-context level variable will be bad
// if there are multiple worker threads that initialise snapshots concurrently.
getProcessContext().getLiveDataOverrideInjector().setComputationTargetResolver(
getProcessContext().getFunctionCompilationService().getFunctionCompilationContext().getRawComputationTargetResolver().atVersionCorrection(versionCorrection));
boolean marketDataSubscribed = false;
try {
if (getExecutionOptions().getFlags().contains(ViewExecutionFlags.AWAIT_MARKET_DATA)) {
// REVIEW jonathan/andrew -- 2013-03-28 -- if the user wants to wait for market data, then assume they mean
// it and wait as long as it takes. There are mechanisms for cancelling the job.
setMarketDataSubscriptions(compiledViewDefinition.getMarketDataRequirements());
marketDataSubscribed = true;
marketDataSnapshot.init(compiledViewDefinition.getMarketDataRequirements(), Long.MAX_VALUE, TimeUnit.MILLISECONDS);
} else {
marketDataSubscribed = false;
marketDataSnapshot.init();
}
if (executionOptions.getValuationTime() == null) {
executionOptions = executionOptions.copy().setValuationTime(marketDataSnapshot.getSnapshotTime()).create();
}
} catch (final Exception e) {
s_logger.error("Error initializing snapshot {}", marketDataSnapshot);
cycleExecutionFailed(executionOptions, new OpenGammaRuntimeException("Error initializing snapshot" + marketDataSnapshot, e));
}
EngineResourceReference<SingleComputationCycle> cycleReference;
try {
cycleReference = createCycle(executionOptions, compiledViewDefinition, versionCorrection);
} catch (final Exception e) {
s_logger.error("Error creating next view cycle for " + getWorkerContext(), e);
return;
}
if (_executeCycles) {
try {
final SingleComputationCycle singleComputationCycle = cycleReference.get();
final HashMap<String, Collection<ComputationTargetSpecification>> configToComputationTargets = new HashMap<String, Collection<ComputationTargetSpecification>>();
final HashMap<String, Map<ValueSpecification, Set<ValueRequirement>>> configToTerminalOutputs = new HashMap<String, Map<ValueSpecification, Set<ValueRequirement>>>();
for (DependencyGraphExplorer graphExp : compiledViewDefinition.getDependencyGraphExplorers()) {
final DependencyGraph graph = graphExp.getWholeGraph();
configToComputationTargets.put(graph.getCalculationConfigurationName(), graph.getAllComputationTargets());
configToTerminalOutputs.put(graph.getCalculationConfigurationName(), graph.getTerminalOutputs());
}
if (isTerminated()) {
cycleReference.release();
return;
}
cycleStarted(new DefaultViewCycleMetadata(
cycleReference.get().getUniqueId(),
marketDataSnapshot.getUniqueId(),
compiledViewDefinition.getViewDefinition().getUniqueId(),
versionCorrection,
executionOptions.getValuationTime(),
singleComputationCycle.getAllCalculationConfigurationNames(),
configToComputationTargets,
configToTerminalOutputs));
if (isTerminated()) {
cycleReference.release();
return;
}
if (!marketDataSubscribed) {
setMarketDataSubscriptions(compiledViewDefinition.getMarketDataRequirements());
}
executeViewCycle(cycleType, cycleReference, marketDataSnapshot);
} catch (final InterruptedException e) {
// Execution interrupted - don't propagate as failure
s_logger.info("View cycle execution interrupted for {}", getWorkerContext());
cycleReference.release();
return;
} catch (final Exception e) {
// Execution failed
s_logger.error("View cycle execution failed for " + getWorkerContext(), e);
cycleReference.release();
cycleExecutionFailed(executionOptions, e);
return;
}
}
// Don't push the results through if we've been terminated, since another computation job could be running already
// and the fact that we've been terminated means the view is no longer interested in the result. Just die quietly.
if (isTerminated()) {
cycleReference.release();
return;
}
if (_executeCycles) {
cycleCompleted(cycleReference.get());
}
if (getExecutionOptions().getExecutionSequence().isEmpty()) {
jobCompleted();
}
if (_executeCycles) {
if (_previousCycleReference != null) {
_previousCycleReference.release();
}
_previousCycleReference = cycleReference;
}
} finally {
VersionCorrectionUtils.unlock(versionCorrection);
}
}
@Override
protected void postRunCycle() {
if (_previousCycleReference != null) {
_previousCycleReference.release();
}
unsubscribeFromTargetResolverChanges();
removeMarketDataProvider();
cacheCompiledViewDefinition(null);
}
@Override
public void terminate() {
super.terminate();
final Future<CompiledViewDefinitionWithGraphsImpl> task = _compilationTask;
if (task != null) {
task.cancel(true);
}
}
}
private void cycleCompleted(final ViewCycle cycle) {
try {
getWorkerContext().cycleCompleted(cycle);
} catch (final Exception e) {
s_logger.error("Error notifying " + getWorkerContext() + " of view cycle completion", e);
}
}
private void cycleStarted(final ViewCycleMetadata cycleMetadata) {
try {
getWorkerContext().cycleStarted(cycleMetadata);
} catch (final Exception e) {
s_logger.error("Error notifying " + getWorkerContext() + " of view cycle starting", e);
}
}
private void cycleFragmentCompleted(final ViewComputationResultModel result) {
try {
getWorkerContext().cycleFragmentCompleted(result, getViewDefinition());
} catch (final Exception e) {
s_logger.error("Error notifying " + getWorkerContext() + " of cycle fragment completion", e);
}
}
private void cycleExecutionFailed(final ViewCycleExecutionOptions executionOptions, final Exception exception) {
try {
getWorkerContext().cycleExecutionFailed(executionOptions, exception);
} catch (final Exception vpe) {
s_logger.error("Error notifying " + getWorkerContext() + " of the cycle execution error", vpe);
}
}
private void viewDefinitionCompiled(final ViewCycleExecutionOptions executionOptions, final CompiledViewDefinitionWithGraphs compiledViewDefinition) {
try {
getWorkerContext().viewDefinitionCompiled(getMarketDataProvider(), compiledViewDefinition);
} catch (final Exception vpe) {
s_logger.error("Error notifying " + getWorkerContext() + " of view definition compilation");
}
}
private void viewDefinitionCompilationFailed(final Instant compilationTime, final Exception e) {
try {
getWorkerContext().viewDefinitionCompilationFailed(compilationTime, e);
} catch (final Exception vpe) {
s_logger.error("Error notifying " + getWorkerContext() + " of the view definition compilation failure", vpe);
}
}
private synchronized ViewCycleType waitForNextCycle() throws InterruptedException {
while (true) {
final long currentTimeNanos = System.nanoTime();
final ViewCycleTriggerResult triggerResult = getMasterCycleTrigger().query(currentTimeNanos);
ViewCycleEligibility cycleEligibility = triggerResult.getCycleEligibility();
if (_forceTriggerCycle) {
cycleEligibility = ViewCycleEligibility.FORCE;
_forceTriggerCycle = false;
}
if (cycleEligibility == ViewCycleEligibility.FORCE || (cycleEligibility == ViewCycleEligibility.ELIGIBLE && _cycleRequested)) {
_cycleRequested = false;
ViewCycleType cycleType = triggerResult.getCycleType();
if (_previousCycleReference == null) {
// Cannot do a delta if we have no previous cycle
cycleType = ViewCycleType.FULL;
}
try {
getMasterCycleTrigger().cycleTriggered(currentTimeNanos, cycleType);
} catch (final Exception e) {
s_logger.error("Error notifying trigger of intention to execute cycle", e);
}
s_logger.debug("Eligible for {} cycle", cycleType);
if (_masterCycleTriggerChanges != null) {
// TODO: If we wish to support execution option changes mid-execution, we will need to add/remove any relevant triggers here
// Currently only the run-as-fast-as-possible trigger becomes valid for the second cycle if we've also got wait-for-initial-trigger
addMasterCycleTrigger(_masterCycleTriggerChanges);
_masterCycleTriggerChanges = null;
}
return cycleType;
}
// Going to sleep
final long wakeUpTime = triggerResult.getNextStateChangeNanos();
if (_cycleRequested) {
s_logger.debug("Sleeping until eligible to perform the next computation cycle");
// No amount of market data can make us eligible for a computation cycle any sooner.
_wakeOnCycleRequest = false;
} else {
s_logger.debug("Sleeping until forced to perform the next computation cycle");
_wakeOnCycleRequest = cycleEligibility == ViewCycleEligibility.ELIGIBLE;
}
long sleepTime = wakeUpTime - currentTimeNanos;
sleepTime = Math.max(0, sleepTime);
sleepTime /= NANOS_PER_MILLISECOND;
sleepTime += 1; // Could have been rounded down during division so ensure only woken after state change
s_logger.debug("Waiting for {} ms", sleepTime);
try {
// This could wait until end of time. In this case, only marketDataChanged() or triggerCycle() will wake it up
wait(sleepTime);
} catch (final InterruptedException e) {
// We support interruption as a signal that we have been terminated. If we're interrupted without having been
// terminated, we'll just return to this method and go back to sleep.
Thread.interrupted();
s_logger.info("Interrupted while delaying. Continuing operation.");
throw e;
}
}
}
private void executeViewCycle(final ViewCycleType cycleType,
final EngineResourceReference<SingleComputationCycle> cycleReference,
final MarketDataSnapshot marketDataSnapshot) throws Exception {
SingleComputationCycle deltaCycle;
if (cycleType == ViewCycleType.FULL) {
s_logger.info("Performing full computation");
deltaCycle = null;
} else {
s_logger.info("Performing delta computation");
deltaCycle = _previousCycleReference.get();
if ((deltaCycle != null) && (deltaCycle.getState() != ViewCycleState.EXECUTED)) {
// Can only do a delta cycle if the previous was valid
deltaCycle = null;
}
}
boolean continueExecution = cycleReference.get().preExecute(deltaCycle, marketDataSnapshot, _suppressExecutionOnNoMarketData);
if (_executeGraphs && continueExecution) {
try {
cycleReference.get().execute(s_executor);
} catch (final InterruptedException e) {
Thread.interrupted();
// In reality this means that the job has been terminated, and it will end as soon as we return from this method.
// In case the thread has been interrupted without terminating the job, we tidy everything up as if the
// interrupted cycle never happened so that deltas will be calculated from the previous cycle.
s_logger.info("Interrupted while executing a computation cycle. No results will be output from this cycle.");
throw e;
} catch (final Exception e) {
s_logger.error("Error while executing view cycle", e);
throw e;
}
} else {
s_logger.debug("Skipping graph execution");
}
cycleReference.get().postExecute();
final long durationNanos = cycleReference.get().getDuration().toNanos();
final Timer timer = deltaCycle != null ? _deltaCycleTimer : _fullCycleTimer;
if (timer != null) {
timer.update(durationNanos, TimeUnit.NANOSECONDS);
}
_totalTimeNanos += durationNanos;
_cycleCount += 1;
s_logger.info("Last latency was {} ms, Average latency is {} ms",
durationNanos / NANOS_PER_MILLISECOND,
(_totalTimeNanos / _cycleCount) / NANOS_PER_MILLISECOND);
}
private void jobCompleted() {
s_logger.info("Computation job completed for {}", getWorkerContext());
try {
getWorkerContext().workerCompleted();
} catch (final Exception e) {
s_logger.error("Error notifying " + getWorkerContext() + " of computation job completion", e);
}
getJob().terminate();
}
private EngineResourceReference<SingleComputationCycle> createCycle(final ViewCycleExecutionOptions executionOptions,
final CompiledViewDefinitionWithGraphs compiledViewDefinition, final VersionCorrection versionCorrection) {
// [PLAT-3581] Is the check below still necessary? The logic to create the valuation time for compilation is the same as that for
// populating the valuation time on the execution options that this detects.
// View definition was compiled based on compilation options, which might have only included an indicative
// valuation time. A further check ensures that the compiled view definition is still valid.
if (!CompiledViewDefinitionWithGraphsImpl.isValidFor(compiledViewDefinition, executionOptions.getValuationTime())) {
throw new OpenGammaRuntimeException("Compiled view definition " + compiledViewDefinition + " not valid for execution options " + executionOptions);
}
final UniqueId cycleId = getProcessContext().getCycleIdentifiers().get();
final ComputationResultListener streamingResultListener = new ComputationResultListener() {
@Override
public void resultAvailable(final ViewComputationResultModel result) {
cycleFragmentCompleted(result);
}
};
final SingleComputationCycle cycle = new SingleComputationCycle(cycleId, streamingResultListener, getProcessContext(), compiledViewDefinition, executionOptions, versionCorrection);
return getProcessContext().getCycleManager().manage(cycle);
}
private void subscribeToTargetResolverChanges() {
if (_targetResolverChanges == null) {
_targetResolverChanges = new TargetResolverChangeListener() {
@Override
protected void onChanged() {
requestCycle();
}
};
getProcessContext().getFunctionCompilationService().getFunctionCompilationContext().getRawComputationTargetResolver().changeManager().addChangeListener(_targetResolverChanges);
}
}
private void unsubscribeFromTargetResolverChanges() {
if (_targetResolverChanges != null) {
getProcessContext().getFunctionCompilationService().getFunctionCompilationContext().getRawComputationTargetResolver().changeManager().removeChangeListener(_targetResolverChanges);
_targetResolverChanges = null;
}
}
private static Instant now() {
// TODO: The distributed caches use a message bus for eventual consistency. This should really be (NOW - maximum permitted clock drift - eventual consistency time limit)
return Instant.now();
}
private VersionCorrection getResolverVersionCorrection(final ViewCycleExecutionOptions viewCycleOptions) {
VersionCorrection vc = null;
do {
vc = viewCycleOptions.getResolverVersionCorrection();
if (vc != null) {
break;
}
final ViewCycleExecutionOptions options = getExecutionOptions().getDefaultExecutionOptions();
if (options != null) {
vc = options.getResolverVersionCorrection();
if (vc != null) {
break;
}
}
vc = VersionCorrection.LATEST;
} while (false);
// Note: NOW means NOW as the caller has requested LATEST. We should not be using the valuation time.
if (vc.getCorrectedTo() == null) {
if (vc.getVersionAsOf() == null) {
if (!_ignoreCompilationValidity) {
subscribeToTargetResolverChanges();
}
return vc.withLatestFixed(now());
} else {
vc = vc.withLatestFixed(now());
}
} else if (vc.getVersionAsOf() == null) {
vc = vc.withLatestFixed(now());
}
unsubscribeFromTargetResolverChanges();
return vc;
}
private PortfolioNodeEquivalenceMapper getNodeEquivalenceMapper() {
return new PortfolioNodeEquivalenceMapper();
}
private void markMappedPositions(final PortfolioNode node, final Map<UniqueId, Position> positions) {
for (Position position : node.getPositions()) {
positions.put(position.getUniqueId(), null);
}
for (PortfolioNode child : node.getChildNodes()) {
markMappedPositions(child, positions);
}
}
private void findUnmapped(final PortfolioNode node, final Map<UniqueId, UniqueId> mapped, final Set<UniqueId> unmapped, final Map<UniqueId, Position> positions) {
if (mapped.containsKey(node.getUniqueId())) {
// This node is mapped; as are the nodes underneath it, so just mark the child positions
markMappedPositions(node, positions);
} else {
// This node is unmapped - mark it as such and check the nodes underneath it
unmapped.add(node.getUniqueId());
for (PortfolioNode child : node.getChildNodes()) {
findUnmapped(child, mapped, unmapped, positions);
}
// Any child positions (and their trades) are unmapped if, and only if, they are not referenced by anything else
for (Position position : node.getPositions()) {
if (!positions.containsKey(position.getUniqueId())) {
positions.put(position.getUniqueId(), position);
}
}
}
}
private void findUnmapped(final PortfolioNode node, final Map<UniqueId, UniqueId> mapped, final Set<UniqueId> unmapped) {
final Map<UniqueId, Position> positions = new HashMap<UniqueId, Position>();
findUnmapped(node, mapped, unmapped, positions);
for (Map.Entry<UniqueId, Position> position : positions.entrySet()) {
if (position.getValue() != null) {
unmapped.add(position.getKey());
for (Trade trade : position.getValue().getTrades()) {
unmapped.add(trade.getUniqueId());
}
}
}
}
private Set<UniqueId> rewritePortfolioNodes(final Map<String, Pair<DependencyGraph, Set<ValueRequirement>>> previousGraphs, final CompiledViewDefinitionWithGraphs compiledViewDefinition,
final Portfolio newPortfolio) {
// Map any nodes from the old portfolio structure to the new one
final Map<UniqueId, UniqueId> mapped;
if (newPortfolio != null) {
mapped = getNodeEquivalenceMapper().getEquivalentNodes(compiledViewDefinition.getPortfolio().getRootNode(), newPortfolio.getRootNode());
} else {
mapped = Collections.emptyMap();
}
// Identify anything not (immediately) mapped to the new portfolio structure
final Set<UniqueId> unmapped = new HashSet<UniqueId>();
findUnmapped(compiledViewDefinition.getPortfolio().getRootNode(), mapped, unmapped);
if (s_logger.isDebugEnabled()) {
s_logger.debug("Mapping {} portfolio nodes to new structure, unmapping {} targets", mapped.size(), unmapped.size());
}
// For anything not mapped, remove the terminal outputs from the graph
for (final ViewCalculationConfiguration calcConfig : compiledViewDefinition.getViewDefinition().getAllCalculationConfigurations()) {
final Set<ValueRequirement> specificRequirements = calcConfig.getSpecificRequirements();
final Pair<DependencyGraph, Set<ValueRequirement>> previousGraphEntry = previousGraphs.get(calcConfig.getName());
if (previousGraphEntry == null) {
continue;
}
final DependencyGraph previousGraph = previousGraphEntry.getFirst();
final Map<ValueSpecification, Set<ValueRequirement>> terminalOutputs = previousGraph.getTerminalOutputs();
final ValueSpecification[] removeSpecifications = new ValueSpecification[terminalOutputs.size()];
@SuppressWarnings("unchecked")
final List<ValueRequirement>[] removeRequirements = new List[terminalOutputs.size()];
int remove = 0;
for (final Map.Entry<ValueSpecification, Set<ValueRequirement>> entry : terminalOutputs.entrySet()) {
if (unmapped.contains(entry.getKey().getTargetSpecification().getUniqueId())) {
List<ValueRequirement> removal = null;
for (final ValueRequirement requirement : entry.getValue()) {
if (!specificRequirements.contains(requirement)) {
if (removal == null) {
removal = new ArrayList<ValueRequirement>(entry.getValue().size());
}
removal.add(requirement);
}
// Anything that was in the specific requirements will be captured by the standard invalid identifier tests
}
if (removal != null) {
removeSpecifications[remove] = entry.getKey();
removeRequirements[remove++] = removal;
}
}
}
for (int i = 0; i < remove; i++) {
previousGraph.removeTerminalOutputs(removeRequirements[i], removeSpecifications[i]);
}
if (!mapped.isEmpty()) {
final ComputationTargetIdentifierRemapVisitor remapper = new ComputationTargetIdentifierRemapVisitor(mapped);
final Collection<Object> replacements = new ArrayList<Object>(mapped.size() * 2);
for (DependencyNode node : previousGraph.getDependencyNodes()) {
final ComputationTargetSpecification newTarget = remapper.remap(node.getComputationTarget());
if (newTarget != null) {
replacements.add(node);
replacements.add(newTarget);
}
}
Iterator<Object> itrReplacements = replacements.iterator();
while (itrReplacements.hasNext()) {
final DependencyNode node = (DependencyNode) itrReplacements.next();
final ComputationTargetSpecification newTarget = (ComputationTargetSpecification) itrReplacements.next();
s_logger.debug("Rewriting {} to {}", node, newTarget);
previousGraph.replaceNode(node, newTarget);
}
// Rewrite the original value requirements that might have referenced the original nodes
for (Map.Entry<ValueSpecification, Set<ValueRequirement>> terminalOutput : previousGraph.getTerminalOutputs().entrySet()) {
final Set<ValueRequirement> oldReqs = terminalOutput.getValue();
replacements.clear();
for (ValueRequirement req : oldReqs) {
final ComputationTargetReference newTarget = req.getTargetReference().accept(remapper);
if (newTarget != null) {
replacements.add(req);
replacements.add(MemoryUtils.instance(new ValueRequirement(req.getValueName(), newTarget, req.getConstraints())));
}
}
if (!replacements.isEmpty()) {
itrReplacements = replacements.iterator();
while (itrReplacements.hasNext()) {
final ValueRequirement oldReq = (ValueRequirement) itrReplacements.next();
final ValueRequirement newReq = (ValueRequirement) itrReplacements.next();
oldReqs.remove(oldReq);
oldReqs.add(newReq);
}
}
}
}
}
// Remove any PORTFOLIO nodes and any unmapped PORTFOLIO_NODE nodes with the filter
filterPreviousGraphs(previousGraphs, new InvalidPortfolioDependencyNodeFilter(unmapped), null);
return new HashSet<UniqueId>(mapped.values());
}
/**
* Returns the set of unique identifiers that were previously used as targets in the dependency graph for object identifiers (or external identifiers) that now resolve differently.
*
* @param previousResolutions the previous cycle's resolution of identifiers, not null
* @param versionCorrection the resolver version correction for this cycle, not null
* @return the invalid identifier set, or null if none are invalid, this is a map from the old unique identifier to the new resolution
*/
private Map<UniqueId, ComputationTargetSpecification> getInvalidIdentifiers(final Map<ComputationTargetReference, UniqueId> previousResolutions, final VersionCorrection versionCorrection) {
long t = -System.nanoTime();
final Set<ComputationTargetReference> toCheck;
if (_targetResolverChanges == null) {
// Change notifications aren't relevant for historical iteration; must recheck all of the resolutions
toCheck = previousResolutions.keySet();
} else {
// Subscribed to LATEST/LATEST so change manager notifications can filter the set to be checked
toCheck = Sets.newHashSetWithExpectedSize(previousResolutions.size());
final Set<ObjectId> allObjectIds = Sets.newHashSetWithExpectedSize(previousResolutions.size());
for (final Map.Entry<ComputationTargetReference, UniqueId> previousResolution : previousResolutions.entrySet()) {
final ObjectId oid = previousResolution.getValue().getObjectId();
if (_targetResolverChanges.isChanged(oid)) {
// A change was seen on this target
s_logger.debug("Change observed on {}", oid);
toCheck.add(previousResolution.getKey());
}
allObjectIds.add(oid);
}
_targetResolverChanges.watchOnly(allObjectIds);
if (toCheck.isEmpty()) {
s_logger.debug("No resolutions (from {}) to check", previousResolutions.size());
return null;
} else {
s_logger.debug("Checking {} of {} resolutions for changed objects", toCheck.size(), previousResolutions.size());
}
}
PoolExecutor previousInstance = PoolExecutor.setInstance(getProcessContext().getFunctionCompilationService().getExecutorService());
final Map<ComputationTargetReference, ComputationTargetSpecification> specifications = getProcessContext().getFunctionCompilationService().getFunctionCompilationContext()
.getRawComputationTargetResolver().getSpecificationResolver().getTargetSpecifications(toCheck, versionCorrection);
PoolExecutor.setInstance(previousInstance);
t += System.nanoTime();
Map<UniqueId, ComputationTargetSpecification> invalidIdentifiers = null;
for (final Map.Entry<ComputationTargetReference, UniqueId> target : previousResolutions.entrySet()) {
final ComputationTargetSpecification resolved = specifications.get(target.getKey());
if ((resolved != null) && target.getValue().equals(resolved.getUniqueId())) {
// No change
s_logger.debug("No change resolving {}", target);
} else if (toCheck.contains(target.getKey())) {
// Identifier no longer resolved, or resolved differently
s_logger.info("New resolution of {} to {}", target, resolved);
if (invalidIdentifiers == null) {
invalidIdentifiers = new HashMap<UniqueId, ComputationTargetSpecification>();
}
invalidIdentifiers.put(target.getValue(), resolved);
}
}
s_logger.info("{} resolutions checked in {}ms", toCheck.size(), t / 1e6);
return invalidIdentifiers;
}
private void getInvalidMarketData(final DependencyGraph graph, final InvalidMarketDataDependencyNodeFilter filter) {
final PoolExecutor.Service<?> slaveJobs = getProcessContext().getFunctionCompilationService().getExecutorService().createService(null);
// 32 was chosen fairly arbitrarily. Before doing this 502 node checks was taking 700ms. After this it is taking 180ms.
final int jobSize = 32;
InvalidMarketDataDependencyNodeFilter.VisitBatch visit = filter.visit(jobSize);
for (ValueSpecification marketData : graph.getAllRequiredMarketData()) {
if (visit.isFull()) {
slaveJobs.execute(visit);
visit = filter.visit(jobSize);
}
final DependencyNode node = graph.getNodeProducing(marketData);
visit.add(marketData, node);
}
visit.run();
try {
slaveJobs.join();
} catch (InterruptedException e) {
throw new OpenGammaRuntimeException("Interrupted", e);
}
}
/**
* Returns the set of value specifications from Market Data sourcing nodes that are not valid for the new data provider.
* <p>
* The cost of applying a filter can be quite high and in the historical simulation case seldom excludes nodes. To optimise this case we consider the market data sourcing nodes first to determine
* whether the filter should be applied.
*
* @param previousGraphs the previous graphs that have already been part processed, null if no preprocessing has occurred
* @param compiledViewDefinition the cached compilation containing previous graphs if {@code previousGraphs} is null
* @param filter the filter to pass details of the nodes to
* @return the invalid specification set, or null if none are invalid
*/
private void getInvalidMarketData(final Map<String, Pair<DependencyGraph, Set<ValueRequirement>>> previousGraphs,
final CompiledViewDefinitionWithGraphs compiledViewDefinition, final InvalidMarketDataDependencyNodeFilter filter) {
if (previousGraphs != null) {
for (Pair<DependencyGraph, Set<ValueRequirement>> previousGraph : previousGraphs.values()) {
getInvalidMarketData(previousGraph.getFirst(), filter);
}
} else {
for (DependencyGraphExplorer graphExp : compiledViewDefinition.getDependencyGraphExplorers()) {
getInvalidMarketData(graphExp.getWholeGraph(), filter);
}
}
}
/**
* Mark a set of nodes for inclusion (TRUE) or exclusion (FALSE) based on the filter. A node is included if the filter accepts it and all of its inputs are also marked for inclusion. A node is
* excluded if the filter rejects it or any of its inputs are rejected. This will operate recursively, processing all nodes to the leaves of the graph.
* <p>
* The {@link DependencyGraph#subGraph} operation doesn't work for us as it can leave nodes in the sub-graph that have inputs that aren't in the graph. Invalid nodes identified by the filter need to
* remove all the graph up to the terminal output root so that we can rebuild it.
*
* @param include the map to build the result into
* @param nodes the nodes to process
* @param filter the filter to apply to the nodes
* @return true if all of the nodes in the collection were included
*/
private static boolean includeNodes(final Map<DependencyNode, Boolean> include, final Collection<DependencyNode> nodes, final DependencyNodeFilter filter) {
boolean includedAll = true;
for (final DependencyNode node : nodes) {
final Boolean match = include.get(node);
if (match == null) {
if (filter.accept(node)) {
if (includeNodes(include, node.getInputNodes(), filter)) {
include.put(node, Boolean.TRUE);
} else {
includedAll = false;
include.put(node, Boolean.FALSE);
}
} else {
includedAll = false;
include.put(node, Boolean.FALSE);
}
} else {
if (match == Boolean.FALSE) {
includedAll = false;
}
}
}
return includedAll;
}
private Map<String, Pair<DependencyGraph, Set<ValueRequirement>>> getPreviousGraphs(Map<String, Pair<DependencyGraph, Set<ValueRequirement>>> previousGraphs,
final CompiledViewDefinitionWithGraphs compiledViewDefinition) {
if (previousGraphs == null) {
final Collection<DependencyGraphExplorer> graphExps = compiledViewDefinition.getDependencyGraphExplorers();
previousGraphs = Maps.newHashMapWithExpectedSize(graphExps.size());
for (DependencyGraphExplorer graphExp : graphExps) {
final DependencyGraph graph = graphExp.getWholeGraph();
previousGraphs.put(graph.getCalculationConfigurationName(), Pair.<DependencyGraph, Set<ValueRequirement>>of(graph, new HashSet<ValueRequirement>()));
}
}
return previousGraphs;
}
/**
* Maintain the previously used dependency graphs by applying a node filter that identifies invalid nodes that must be recalculated (implying everything dependent on them must also be rebuilt). The
* first call will extract the previously compiled graphs, subsequent calls will update the structure invalidating more nodes and increasing the number of missing requirements.
*
* @param previousGraphs the previously used graphs as a map from calculation configuration name to the graph and the value requirements that need to be recalculated, not null
* @param filter the filter to identify invalid nodes, not null
* @param unchangedNodes optional identifiers of unchanged portfolio nodes; any nodes filtered out must be removed from this
*/
private void filterPreviousGraphs(final Map<String, Pair<DependencyGraph, Set<ValueRequirement>>> previousGraphs, final DependencyNodeFilter filter, final Set<UniqueId> unchangedNodes) {
final Iterator<Map.Entry<String, Pair<DependencyGraph, Set<ValueRequirement>>>> itr = previousGraphs.entrySet().iterator();
while (itr.hasNext()) {
final Map.Entry<String, Pair<DependencyGraph, Set<ValueRequirement>>> entry = itr.next();
final DependencyGraph graph = entry.getValue().getFirst();
if (graph.getSize() == 0) {
continue;
}
final Collection<DependencyNode> nodes = graph.getDependencyNodes();
final Map<DependencyNode, Boolean> include = Maps.newHashMapWithExpectedSize(nodes.size());
includeNodes(include, nodes, filter);
assert nodes.size() == include.size();
final Map<ValueSpecification, Set<ValueRequirement>> terminalOutputs = graph.getTerminalOutputs();
final Set<ValueRequirement> missingRequirements = entry.getValue().getSecond();
final DependencyGraph filtered = graph.subGraph(new DependencyNodeFilter() {
@Override
public boolean accept(final DependencyNode node) {
if (include.get(node) == Boolean.TRUE) {
return true;
} else {
s_logger.debug("Discarding {} from dependency graph for {}", node, entry.getKey());
for (final ValueSpecification output : node.getOutputValues()) {
final Set<ValueRequirement> terminal = terminalOutputs.get(output);
if (terminal != null) {
missingRequirements.addAll(terminal);
}
}
if (unchangedNodes != null) {
unchangedNodes.remove(node.getComputationTarget().getUniqueId());
}
return false;
}
}
});
if (filtered.getSize() == 0) {
s_logger.info("Discarded total dependency graph for {}", entry.getKey());
itr.remove();
} else {
if (s_logger.isInfoEnabled()) {
s_logger.info("Removed {} nodes from dependency graph for {} by {}",
nodes.size() - filtered.getSize(),
entry.getKey(),
filter);
}
entry.setValue(Pair.of(filtered, missingRequirements));
}
}
}
private CompiledViewDefinitionWithGraphs getCompiledViewDefinition(final Instant valuationTime, final VersionCorrection versionCorrection) {
final long functionInitId = getProcessContext().getFunctionCompilationService().getFunctionCompilationContext().getFunctionInitId();
updateViewDefinitionIfRequired();
CompiledViewDefinitionWithGraphs compiledViewDefinition = null;
final Pair<Lock, Lock> executionCacheLocks = getProcessContext().getExecutionCacheLock().get(_executionCacheKey, valuationTime, versionCorrection);
executionCacheLocks.getSecond().lock();
executionCacheLocks.getFirst().lock();
boolean broadLock = true;
try {
compiledViewDefinition = getCachedCompiledViewDefinition(valuationTime, versionCorrection);
Map<String, Pair<DependencyGraph, Set<ValueRequirement>>> previousGraphs = null;
ConcurrentMap<ComputationTargetReference, UniqueId> previousResolutions = null;
Set<UniqueId> changedPositions = null;
Set<UniqueId> unchangedNodes = null;
boolean marketDataProviderDirty = _marketDataProviderDirty;
_marketDataProviderDirty = false;
if (compiledViewDefinition != null) {
executionCacheLocks.getFirst().unlock();
broadLock = false;
do {
// The cast below is bad, but only temporary -- the function initialiser id needs to go
if (functionInitId != ((CompiledViewDefinitionWithGraphsImpl) compiledViewDefinition).getFunctionInitId()) {
// The function repository has been reinitialized which invalidates any previous graphs
// TODO: [PLAT-2237, PLAT-1623, PLAT-2240] Get rid of this
break;
}
final Map<ComputationTargetReference, UniqueId> resolvedIdentifiers = compiledViewDefinition.getResolvedIdentifiers();
// TODO: The check below works well for the historical valuation case, but if the resolver v/c is different for two workers in the
// group for an otherwise identical cache key then including it in the caching detail may become necessary to handle those cases.
if (!versionCorrection.equals(compiledViewDefinition.getResolverVersionCorrection())) {
final Map<UniqueId, ComputationTargetSpecification> invalidIdentifiers = getInvalidIdentifiers(resolvedIdentifiers, versionCorrection);
if (invalidIdentifiers != null) {
previousGraphs = getPreviousGraphs(previousGraphs, compiledViewDefinition);
if ((compiledViewDefinition.getPortfolio() != null) && invalidIdentifiers.containsKey(compiledViewDefinition.getPortfolio().getUniqueId())) {
// The portfolio resolution is different, invalidate or rewrite PORTFOLIO and PORTFOLIO_NODE nodes in the graph. Note that incremental
// compilation under this circumstance can be flawed if the functions have made notable use of the overall portfolio structure such that
// a full re-compilation will yield a different dependency graph to just rewriting the previous one.
final ComputationTargetResolver resolver = getProcessContext().getFunctionCompilationService().getFunctionCompilationContext().getRawComputationTargetResolver();
final ComputationTargetSpecification portfolioSpec = resolver.getSpecificationResolver().getTargetSpecification(
new ComputationTargetSpecification(ComputationTargetType.PORTFOLIO, getViewDefinition().getPortfolioId()), versionCorrection);
final ComputationTarget newPortfolio = resolver.resolve(portfolioSpec, versionCorrection);
unchangedNodes = rewritePortfolioNodes(previousGraphs, compiledViewDefinition, (Portfolio) newPortfolio.getValue());
}
// Invalidate any dependency graph nodes on the invalid targets
filterPreviousGraphs(previousGraphs, new InvalidTargetDependencyNodeFilter(invalidIdentifiers.keySet()), unchangedNodes);
previousResolutions = new ConcurrentHashMap<ComputationTargetReference, UniqueId>(resolvedIdentifiers.size());
for (final Map.Entry<ComputationTargetReference, UniqueId> resolvedIdentifier : resolvedIdentifiers.entrySet()) {
if (invalidIdentifiers.containsKey(resolvedIdentifier.getValue())) {
if ((unchangedNodes == null) && resolvedIdentifier.getKey().getType().isTargetType(ComputationTargetType.POSITION)) {
// At least one position has changed, add all portfolio targets
ComputationTargetSpecification ctspec = invalidIdentifiers.get(resolvedIdentifier.getValue());
if (ctspec != null) {
if (changedPositions == null) {
changedPositions = new HashSet<UniqueId>();
}
changedPositions.add(ctspec.getUniqueId());
}
}
} else {
previousResolutions.put(resolvedIdentifier.getKey(), resolvedIdentifier.getValue());
}
}
} else {
compiledViewDefinition = compiledViewDefinition.withResolverVersionCorrection(versionCorrection);
cacheCompiledViewDefinition(compiledViewDefinition);
}
}
if (!CompiledViewDefinitionWithGraphsImpl.isValidFor(compiledViewDefinition, valuationTime)) {
// Invalidate any dependency graph nodes that use functions that are no longer valid
previousGraphs = getPreviousGraphs(previousGraphs, compiledViewDefinition);
filterPreviousGraphs(previousGraphs, new InvalidFunctionDependencyNodeFilter(valuationTime), unchangedNodes);
}
if (marketDataProviderDirty) {
// Invalidate any market data sourcing nodes that are no longer valid
final InvalidMarketDataDependencyNodeFilter filter = new InvalidMarketDataDependencyNodeFilter(getProcessContext().getFunctionCompilationService().getFunctionCompilationContext()
.getRawComputationTargetResolver().atVersionCorrection(versionCorrection), getMarketDataProvider().getAvailabilityProvider());
getInvalidMarketData(previousGraphs, compiledViewDefinition, filter);
if (filter.hasInvalidNodes()) {
previousGraphs = getPreviousGraphs(previousGraphs, compiledViewDefinition);
filterPreviousGraphs(previousGraphs, filter, unchangedNodes);
}
}
if (previousGraphs == null) {
// Existing cached model is valid (an optimization for the common case of similar, increasing valuation times)
return compiledViewDefinition;
}
if (previousResolutions == null) {
previousResolutions = new ConcurrentHashMap<ComputationTargetReference, UniqueId>(resolvedIdentifiers);
}
} while (false);
executionCacheLocks.getFirst().lock();
broadLock = true;
}
final MarketDataAvailabilityProvider availabilityProvider = getMarketDataProvider().getAvailabilityProvider();
final ViewCompilationServices compilationServices = getProcessContext().asCompilationServices(availabilityProvider);
if (previousGraphs != null) {
s_logger.info("Performing incremental graph compilation");
_compilationTask = ViewDefinitionCompiler.incrementalCompileTask(getViewDefinition(), compilationServices, valuationTime, versionCorrection, previousGraphs,
previousResolutions, changedPositions, unchangedNodes);
} else {
s_logger.info("Performing full graph compilation");
_compilationTask = ViewDefinitionCompiler.fullCompileTask(getViewDefinition(), compilationServices, valuationTime, versionCorrection);
}
try {
if (!getJob().isTerminated()) {
compiledViewDefinition = _compilationTask.get();
compiledViewDefinition = initialiseMarketDataManipulation(compiledViewDefinition);
cacheCompiledViewDefinition(compiledViewDefinition);
} else {
return null;
}
} finally {
_compilationTask = null;
}
} catch (final Exception e) {
final String message = MessageFormat.format("Error compiling view definition {0} for time {1}", getViewDefinition().getUniqueId(), valuationTime);
viewDefinitionCompilationFailed(valuationTime, new OpenGammaRuntimeException(message, e));
throw new OpenGammaRuntimeException(message, e);
} finally {
if (broadLock) {
executionCacheLocks.getFirst().unlock();
}
executionCacheLocks.getSecond().unlock();
}
// [PLAT-984]
// Assume that valuation times are increasing in real-time towards the expiry of the view definition, so that we
// can predict the time to expiry. If this assumption is wrong then the worst we do is trigger an unnecessary
// cycle. In the predicted case, we trigger a cycle on expiry so that any new market data subscriptions are made
// straight away.
if ((compiledViewDefinition.getValidTo() != null) && getExecutionOptions().getFlags().contains(ViewExecutionFlags.TRIGGER_CYCLE_ON_MARKET_DATA_CHANGED)) {
final Duration durationToExpiry = getMarketDataProvider().getRealTimeDuration(valuationTime, compiledViewDefinition.getValidTo());
final long expiryNanos = System.nanoTime() + durationToExpiry.toNanos();
_compilationExpiryCycleTrigger.set(expiryNanos, ViewCycleTriggerResult.forceFull());
// REVIEW Andrew 2012-11-02 -- If we are ticking live, then this is almost right (System.nanoTime will be close to valuationTime, depending on how
// long the compilation took). If we are running through historical data then this is quite a meaningless trigger.
} else {
_compilationExpiryCycleTrigger.reset();
}
return compiledViewDefinition;
}
private CompiledViewDefinitionWithGraphs initialiseMarketDataManipulation(final CompiledViewDefinitionWithGraphs compiledViewDefinition) {
if (_marketDataManipulator.hasManipulationsDefined()) {
Map<DependencyGraph, Map<DistinctMarketDataSelector, Set<ValueSpecification>>> selectionsByGraph = new HashMap<>();
for (DependencyGraphExplorer graphExplorer : compiledViewDefinition.getDependencyGraphExplorers()) {
DependencyGraph graph = graphExplorer.getWholeGraph();
Map<DistinctMarketDataSelector, Set<ValueSpecification>> selectorMapping = _marketDataManipulator.modifyDependencyGraph(graph);
if (!selectorMapping.isEmpty()) {
selectionsByGraph.put(graph, selectorMapping);
}
}
if (!selectionsByGraph.isEmpty()) {
return compiledViewDefinition.withMarketDataManipulationSelections(selectionsByGraph);
}
}
return compiledViewDefinition;
}
/**
* Gets the cached compiled view definition which may be re-used in subsequent computation cycles.
* <p>
* External visibility for tests.
*
* @param valuationTime the indicative valuation time, not null
* @param resolverVersionCorrection the resolver version correction, not null
* @return the cached compiled view definition, or null if nothing is currently cached
*/
public CompiledViewDefinitionWithGraphs getCachedCompiledViewDefinition(final Instant valuationTime, final VersionCorrection resolverVersionCorrection) {
CompiledViewDefinitionWithGraphs cached = _latestCompiledViewDefinition;
if (cached != null) {
boolean resolverMatch = resolverVersionCorrection.equals(cached.getResolverVersionCorrection());
boolean valuationMatch = CompiledViewDefinitionWithGraphsImpl.isValidFor(cached, valuationTime);
if (!resolverMatch || !valuationMatch) {
// Query the cache in case there is a newer one
cached = getProcessContext().getExecutionCache().getCompiledViewDefinitionWithGraphs(_executionCacheKey);
if (cached != null) {
// Only update ours if the one from the cache has a better validity
if (resolverVersionCorrection.equals(cached.getResolverVersionCorrection())) {
cached = PLAT3249.deepClone(cached);
_latestCompiledViewDefinition = cached;
} else {
if (!resolverMatch && !valuationMatch && CompiledViewDefinitionWithGraphsImpl.isValidFor(cached, valuationTime)) {
cached = PLAT3249.deepClone(cached);
_latestCompiledViewDefinition = cached;
}
}
} else {
// Nothing in the cache; use the one from last time
cached = _latestCompiledViewDefinition;
}
}
} else {
// Query the cache
cached = getProcessContext().getExecutionCache().getCompiledViewDefinitionWithGraphs(_executionCacheKey);
if (cached != null) {
cached = PLAT3249.deepClone(cached);
_latestCompiledViewDefinition = cached;
}
}
return cached;
}
/**
* Replaces the cached compiled view definition.
* <p>
* External visibility for tests.
*
* @param latestCompiledViewDefinition the compiled view definition, may be null
*/
public void cacheCompiledViewDefinition(final CompiledViewDefinitionWithGraphs latestCompiledViewDefinition) {
if (latestCompiledViewDefinition != null) {
getProcessContext().getExecutionCache().setCompiledViewDefinitionWithGraphs(_executionCacheKey, latestCompiledViewDefinition);
}
_latestCompiledViewDefinition = latestCompiledViewDefinition;
}
/**
* Gets the view definition currently in use by the computation job.
*
* @return the view definition, not null
*/
public ViewDefinition getViewDefinition() {
return _viewDefinition;
}
private void updateViewDefinitionIfRequired() {
final ViewDefinition newViewDefinition = _newViewDefinition.getAndSet(null);
if (newViewDefinition != null) {
_viewDefinition = newViewDefinition;
// TODO [PLAT-3215] Might not need to discard the entire compilation at this point
cacheCompiledViewDefinition(null);
SnapshottingViewExecutionDataProvider marketDataProvider = getMarketDataProvider();
_executionCacheKey = ViewExecutionCacheKey.of(newViewDefinition, marketDataProvider.getAvailabilityProvider());
// A change in view definition might mean a change in market data user which could invalidate the resolutions
if (marketDataProvider != null) {
if (!marketDataProvider.getMarketDataUser().equals(newViewDefinition.getMarketDataUser())) {
replaceMarketDataProvider(marketDataProvider.getSpecifications());
}
}
}
}
private void replaceMarketDataProvider(final List<MarketDataSpecification> marketDataSpecs) {
// [PLAT-3186] Not a huge overhead, but we could check compatability with the new specs and keep the same provider
removeMarketDataProvider();
setMarketDataProvider(marketDataSpecs);
}
private void removeMarketDataProvider() {
if (_marketDataProvider == null) {
return;
}
removeMarketDataSubscriptions();
_marketDataProvider.removeListener(this);
_marketDataProvider = null;
_marketDataProviderDirty = true;
_executionCacheKey = null;
}
private void setMarketDataProvider(final List<MarketDataSpecification> marketDataSpecs) {
try {
_marketDataProvider = new SnapshottingViewExecutionDataProvider(getViewDefinition().getMarketDataUser(),
marketDataSpecs, getProcessContext().getMarketDataProviderResolver());
} catch (final Exception e) {
s_logger.error("Failed to create data provider", e);
_marketDataProvider = null;
}
if (_marketDataProvider != null) {
_marketDataProvider.addListener(this);
_executionCacheKey = ViewExecutionCacheKey.of(getViewDefinition(), _marketDataProvider.getAvailabilityProvider());
}
_marketDataProviderDirty = true;
}
private SnapshottingViewExecutionDataProvider getMarketDataProvider() {
return _marketDataProvider;
}
private void setMarketDataSubscriptions(final Set<ValueSpecification> requiredSubscriptions) {
final Set<ValueSpecification> currentSubscriptions = _marketDataSubscriptions;
final Set<ValueSpecification> unusedMarketData = Sets.difference(currentSubscriptions, requiredSubscriptions);
if (!unusedMarketData.isEmpty()) {
s_logger.debug("{} unused market data subscriptions", unusedMarketData.size());
removeMarketDataSubscriptions(new ArrayList<ValueSpecification>(unusedMarketData));
}
final Set<ValueSpecification> newMarketData = Sets.difference(requiredSubscriptions, currentSubscriptions);
if (!newMarketData.isEmpty()) {
s_logger.debug("{} new market data requirements", newMarketData.size());
addMarketDataSubscriptions(new HashSet<ValueSpecification>(newMarketData));
}
}
//-------------------------------------------------------------------------
private void addMarketDataSubscriptions(final Set<ValueSpecification> requiredSubscriptions) {
final OperationTimer timer = new OperationTimer(s_logger, "Adding {} market data subscriptions", requiredSubscriptions.size());
_pendingSubscriptions.addAll(requiredSubscriptions);
_marketDataProvider.subscribe(requiredSubscriptions);
_marketDataSubscriptions.addAll(requiredSubscriptions);
try {
synchronized (_pendingSubscriptions) {
if (!_pendingSubscriptions.isEmpty()) {
_pendingSubscriptions.wait();
}
}
} catch (final InterruptedException ex) {
s_logger.info("Interrupted while waiting for subscription results.");
} finally {
_pendingSubscriptions.clear();
}
timer.finished();
}
private void removePendingSubscription(final ValueSpecification specification) {
if (_pendingSubscriptions.remove(specification)) {
notifyIfPendingSubscriptionsDone();
}
}
private void removePendingSubscriptions(final Collection<ValueSpecification> specifications) {
// Previously, this used removeAll, but as specifications may be a list, it was observed
// that we may end up iterating over _pendingSubscriptions and calling contains() on
// specifications, resulting in long wait times for a view to load (PLAT-3508)
boolean removalPerformed = false;
for (ValueSpecification specification : specifications) {
removalPerformed = _pendingSubscriptions.remove(specification) || removalPerformed;
}
if (removalPerformed) {
notifyIfPendingSubscriptionsDone();
}
}
private void notifyIfPendingSubscriptionsDone() {
if (_pendingSubscriptions.isEmpty()) {
synchronized (_pendingSubscriptions) {
if (_pendingSubscriptions.isEmpty()) {
_pendingSubscriptions.notifyAll();
}
}
}
}
private void removeMarketDataSubscriptions() {
removeMarketDataSubscriptions(_marketDataSubscriptions);
}
private void removeMarketDataSubscriptions(final Collection<ValueSpecification> unusedSubscriptions) {
final OperationTimer timer = new OperationTimer(s_logger, "Removing {} market data subscriptions", unusedSubscriptions.size());
_marketDataProvider.unsubscribe(_marketDataSubscriptions);
_marketDataSubscriptions.removeAll(unusedSubscriptions);
timer.finished();
}
// MarketDataListener
@Override
public void subscriptionsSucceeded(final Collection<ValueSpecification> valueSpecifications) {
s_logger.debug("Subscription succeeded: {}", valueSpecifications.size());
removePendingSubscriptions(valueSpecifications);
}
@Override
public void subscriptionFailed(final ValueSpecification valueSpecification, final String msg) {
s_logger.debug("Market data subscription to {} failed. This market data may be missing from computation cycles.", valueSpecification);
removePendingSubscription(valueSpecification);
}
@Override
public void subscriptionStopped(final ValueSpecification valueSpecification) {
}
@Override
public void valuesChanged(final Collection<ValueSpecification> valueSpecifications) {
if (!getExecutionOptions().getFlags().contains(ViewExecutionFlags.TRIGGER_CYCLE_ON_MARKET_DATA_CHANGED)) {
return;
}
// Don't want to query the cache for this; always use the last one
final CompiledViewDefinitionWithGraphs compiledView = _latestCompiledViewDefinition;
if (compiledView == null) {
return;
}
if (CollectionUtils.containsAny(compiledView.getMarketDataRequirements(), valueSpecifications)) {
requestCycle();
}
}
// ViewComputationJob
@Override
public synchronized boolean triggerCycle() {
s_logger.debug("Cycle triggered manually");
_forceTriggerCycle = true;
notifyAll();
return true;
}
@Override
public synchronized boolean requestCycle() {
// REVIEW jonathan 2010-10-04 -- this synchronisation is necessary, but it feels very heavyweight for
// high-frequency market data. See how it goes, but we could take into account the recalc periods and apply a
// heuristic (e.g. only wake up due to market data if max - min < e, for some e) which tries to see whether it's
// worth doing all this.
_cycleRequested = true;
if (!_wakeOnCycleRequest) {
return true;
}
notifyAll();
return true;
}
@Override
public void updateViewDefinition(final ViewDefinition viewDefinition) {
s_logger.debug("Received new view definition {} for next cycle", viewDefinition.getUniqueId());
_newViewDefinition.getAndSet(viewDefinition);
}
@Override
public void terminate() {
getJob().terminate();
s_logger.debug("Interrupting calculation job thread");
getThread().interrupt();
}
@Override
public void join() throws InterruptedException {
getThread().join();
}
@Override
public boolean join(final long timeout) throws InterruptedException {
getThread().join(timeout);
return !getThread().isAlive();
}
@Override
public boolean isTerminated() {
return getJob().isTerminated() && !getThread().isAlive();
}
}
| projects/OG-Engine/src/main/java/com/opengamma/engine/view/worker/SingleThreadViewProcessWorker.java | /**
* Copyright (C) 2009 - present by OpenGamma Inc. and the OpenGamma group of companies
*
* Please see distribution for license.
*/
package com.opengamma.engine.view.worker;
import java.text.MessageFormat;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.Future;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicReference;
import java.util.concurrent.locks.Lock;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.util.CollectionUtils;
import org.threeten.bp.Duration;
import org.threeten.bp.Instant;
import com.google.common.base.Supplier;
import com.google.common.collect.Maps;
import com.google.common.collect.Sets;
import com.opengamma.OpenGammaRuntimeException;
import com.opengamma.core.position.Portfolio;
import com.opengamma.core.position.PortfolioNode;
import com.opengamma.core.position.Position;
import com.opengamma.core.position.Trade;
import com.opengamma.core.position.impl.PortfolioNodeEquivalenceMapper;
import com.opengamma.engine.ComputationTarget;
import com.opengamma.engine.ComputationTargetResolver;
import com.opengamma.engine.ComputationTargetSpecification;
import com.opengamma.engine.MemoryUtils;
import com.opengamma.engine.depgraph.DependencyGraph;
import com.opengamma.engine.depgraph.DependencyGraphExplorer;
import com.opengamma.engine.depgraph.DependencyNode;
import com.opengamma.engine.depgraph.DependencyNodeFilter;
import com.opengamma.engine.marketdata.MarketDataListener;
import com.opengamma.engine.marketdata.MarketDataSnapshot;
import com.opengamma.engine.marketdata.availability.MarketDataAvailabilityProvider;
import com.opengamma.engine.marketdata.manipulator.DistinctMarketDataSelector;
import com.opengamma.engine.marketdata.manipulator.MarketDataManipulator;
import com.opengamma.engine.marketdata.manipulator.NoOpMarketDataSelector;
import com.opengamma.engine.marketdata.spec.MarketDataSpecification;
import com.opengamma.engine.resource.EngineResourceReference;
import com.opengamma.engine.target.ComputationTargetReference;
import com.opengamma.engine.target.ComputationTargetType;
import com.opengamma.engine.value.ValueRequirement;
import com.opengamma.engine.value.ValueSpecification;
import com.opengamma.engine.view.ViewCalculationConfiguration;
import com.opengamma.engine.view.ViewComputationResultModel;
import com.opengamma.engine.view.ViewDefinition;
import com.opengamma.engine.view.compilation.CompiledViewDefinitionWithGraphs;
import com.opengamma.engine.view.compilation.CompiledViewDefinitionWithGraphsImpl;
import com.opengamma.engine.view.compilation.ViewCompilationServices;
import com.opengamma.engine.view.compilation.ViewDefinitionCompiler;
import com.opengamma.engine.view.cycle.DefaultViewCycleMetadata;
import com.opengamma.engine.view.cycle.SingleComputationCycle;
import com.opengamma.engine.view.cycle.ViewCycle;
import com.opengamma.engine.view.cycle.ViewCycleMetadata;
import com.opengamma.engine.view.cycle.ViewCycleState;
import com.opengamma.engine.view.execution.ViewCycleExecutionOptions;
import com.opengamma.engine.view.execution.ViewExecutionFlags;
import com.opengamma.engine.view.execution.ViewExecutionOptions;
import com.opengamma.engine.view.impl.ViewProcessContext;
import com.opengamma.engine.view.listener.ComputationResultListener;
import com.opengamma.engine.view.worker.cache.PLAT3249;
import com.opengamma.engine.view.worker.cache.ViewExecutionCacheKey;
import com.opengamma.engine.view.worker.trigger.CombinedViewCycleTrigger;
import com.opengamma.engine.view.worker.trigger.FixedTimeTrigger;
import com.opengamma.engine.view.worker.trigger.RecomputationPeriodTrigger;
import com.opengamma.engine.view.worker.trigger.RunAsFastAsPossibleTrigger;
import com.opengamma.engine.view.worker.trigger.SuccessiveDeltaLimitTrigger;
import com.opengamma.engine.view.worker.trigger.ViewCycleEligibility;
import com.opengamma.engine.view.worker.trigger.ViewCycleTrigger;
import com.opengamma.engine.view.worker.trigger.ViewCycleTriggerResult;
import com.opengamma.engine.view.worker.trigger.ViewCycleType;
import com.opengamma.id.ObjectId;
import com.opengamma.id.UniqueId;
import com.opengamma.id.VersionCorrection;
import com.opengamma.id.VersionCorrectionUtils;
import com.opengamma.util.ArgumentChecker;
import com.opengamma.util.NamedThreadPoolFactory;
import com.opengamma.util.PoolExecutor;
import com.opengamma.util.TerminatableJob;
import com.opengamma.util.monitor.OperationTimer;
import com.opengamma.util.tuple.Pair;
/**
* The job which schedules and executes computation cycles for a view process. See {@link SingleThreadViewProcessWorkerFactory} for a more detailed description.
*/
public class SingleThreadViewProcessWorker implements MarketDataListener, ViewProcessWorker {
private static final Logger s_logger = LoggerFactory.getLogger(SingleThreadViewProcessWorker.class);
private static final ExecutorService s_executor = Executors.newCachedThreadPool(new NamedThreadPoolFactory("Worker"));
/**
* Wrapper that allows a thread to be "borrowed" from an executor service.
*/
/* package*/static final class BorrowedThread implements Runnable {
private final String _name;
private final Runnable _job;
private final CountDownLatch _join = new CountDownLatch(1);
private Thread _thread;
private String _originalName;
public BorrowedThread(final String name, final Runnable job) {
_name = name;
_job = job;
}
public synchronized Thread.State getState() {
if (_thread != null) {
return _thread.getState();
} else {
return (_originalName != null) ? Thread.State.TERMINATED : Thread.State.NEW;
}
}
public void join() throws InterruptedException {
_join.await();
}
public void join(long timeout) throws InterruptedException {
_join.await(timeout, TimeUnit.MILLISECONDS);
}
public synchronized void interrupt() {
if (_thread != null) {
_thread.interrupt();
}
}
public synchronized boolean isAlive() {
return _thread != null;
}
// Runnable
@Override
public void run() {
synchronized (this) {
_thread = Thread.currentThread();
_originalName = _thread.getName();
}
try {
_thread.setName(_originalName + "-" + _name);
_job.run();
} finally {
_thread.setName(_originalName);
synchronized (this) {
_thread = null;
}
_join.countDown();
}
}
}
private static final long NANOS_PER_MILLISECOND = 1000000;
private final ViewProcessWorkerContext _context;
private final ViewExecutionOptions _executionOptions;
private final CombinedViewCycleTrigger _masterCycleTrigger = new CombinedViewCycleTrigger();
private final FixedTimeTrigger _compilationExpiryCycleTrigger;
private final boolean _executeCycles;
private final boolean _executeGraphs;
private final boolean _ignoreCompilationValidity;
private final boolean _suppressExecutionOnNoMarketData;
/**
* The changes to the master trigger that must be made during the next cycle.
* <p>
* This has been added as an immediate fix for [PLAT-3291] but could be extended to represent an arbitrary change to add/remove triggers if we wish to support the execution options changing for a
* running worker.
*/
private ViewCycleTrigger _masterCycleTriggerChanges;
private int _cycleCount;
private EngineResourceReference<SingleComputationCycle> _previousCycleReference;
/**
* The current view definition the worker must calculate on.
*/
private ViewDefinition _viewDefinition;
/**
* The most recently compiled form of the view definition. This may have been compiled by this worker, or retrieved from the cache and is being reused.
*/
private CompiledViewDefinitionWithGraphs _latestCompiledViewDefinition;
/**
* The key to use for storing the compiled view definition, or querying it, from the cache shared with other workers. Whenever the market data provider or view definition changes, this must be
* updated.
*/
private ViewExecutionCacheKey _executionCacheKey;
private final Set<ValueSpecification> _marketDataSubscriptions = new HashSet<ValueSpecification>();
private final Set<ValueSpecification> _pendingSubscriptions = Collections.newSetFromMap(new ConcurrentHashMap<ValueSpecification, Boolean>());
private TargetResolverChangeListener _targetResolverChanges;
private volatile boolean _wakeOnCycleRequest;
private volatile boolean _cycleRequested;
private volatile boolean _forceTriggerCycle;
/**
* An updated view definition pushed in by the execution coordinator. When the next cycle runs, this should be used instead of the previous one.
*/
private final AtomicReference<ViewDefinition> _newViewDefinition = new AtomicReference<ViewDefinition>();
private volatile Future<CompiledViewDefinitionWithGraphsImpl> _compilationTask;
/**
* Total time the job has spent "working". This does not include time spent waiting for a trigger. It is a real time spent on all I/O involved in a cycle (e.g. database accesses), graph compilation,
* market data subscription, graph execution, result dispatch, etc.
*/
private double _totalTimeNanos;
/**
* The market data provider(s) for the current cycles.
*/
private SnapshottingViewExecutionDataProvider _marketDataProvider;
/**
* Flag indicating the market data provider has changed and any nodes sourcing market data into the dependency graph may now be invalid.
*/
private boolean _marketDataProviderDirty;
/**
* The terminatable job wrapper.
*/
private final TerminatableJob _job;
/**
* The thread running this job.
*/
private final BorrowedThread _thread;
/**
* The manipulator for structured market data.
*/
private final MarketDataManipulator _marketDataManipulator;
public SingleThreadViewProcessWorker(final ViewProcessWorkerContext context, final ViewExecutionOptions executionOptions, final ViewDefinition viewDefinition) {
ArgumentChecker.notNull(context, "context");
ArgumentChecker.notNull(executionOptions, "executionOptions");
ArgumentChecker.notNull(viewDefinition, "viewDefinition");
_context = context;
_executionOptions = executionOptions;
_cycleRequested = !executionOptions.getFlags().contains(ViewExecutionFlags.WAIT_FOR_INITIAL_TRIGGER);
_compilationExpiryCycleTrigger = new FixedTimeTrigger();
addMasterCycleTrigger(_compilationExpiryCycleTrigger);
if (executionOptions.getFlags().contains(ViewExecutionFlags.TRIGGER_CYCLE_ON_TIME_ELAPSED)) {
addMasterCycleTrigger(new RecomputationPeriodTrigger(new Supplier<ViewDefinition>() {
@Override
public ViewDefinition get() {
return getViewDefinition();
}
}));
}
if (executionOptions.getMaxSuccessiveDeltaCycles() != null) {
addMasterCycleTrigger(new SuccessiveDeltaLimitTrigger(executionOptions.getMaxSuccessiveDeltaCycles()));
}
if (executionOptions.getFlags().contains(ViewExecutionFlags.RUN_AS_FAST_AS_POSSIBLE)) {
if (_cycleRequested) {
addMasterCycleTrigger(new RunAsFastAsPossibleTrigger());
} else {
// Defer the trigger until an initial one has happened
_masterCycleTriggerChanges = new RunAsFastAsPossibleTrigger();
}
}
_executeCycles = !executionOptions.getFlags().contains(ViewExecutionFlags.COMPILE_ONLY);
_executeGraphs = !executionOptions.getFlags().contains(ViewExecutionFlags.FETCH_MARKET_DATA_ONLY);
_suppressExecutionOnNoMarketData = executionOptions.getFlags().contains(ViewExecutionFlags.SKIP_CYCLE_ON_NO_MARKET_DATA);
_ignoreCompilationValidity = executionOptions.getFlags().contains(ViewExecutionFlags.IGNORE_COMPILATION_VALIDITY);
_viewDefinition = viewDefinition;
_marketDataManipulator = createMarketDataManipulator();
_job = new Job();
_thread = new BorrowedThread(context.toString(), _job);
s_executor.submit(_thread);
}
private MarketDataManipulator createMarketDataManipulator() {
ViewCycleExecutionOptions defaultExecutionOptions = _executionOptions.getDefaultExecutionOptions();
return new MarketDataManipulator(defaultExecutionOptions != null ?
defaultExecutionOptions.getMarketDataSelector() :
NoOpMarketDataSelector.getInstance());
}
private ViewProcessWorkerContext getWorkerContext() {
return _context;
}
private ViewExecutionOptions getExecutionOptions() {
return _executionOptions;
}
private ViewProcessContext getProcessContext() {
return getWorkerContext().getProcessContext();
}
private ViewCycleTrigger getMasterCycleTrigger() {
return _masterCycleTrigger;
}
private void addMasterCycleTrigger(final ViewCycleTrigger trigger) {
_masterCycleTrigger.addTrigger(trigger);
}
public FixedTimeTrigger getCompilationExpiryCycleTrigger() {
return _compilationExpiryCycleTrigger;
}
protected BorrowedThread getThread() {
return _thread;
}
protected TerminatableJob getJob() {
return _job;
}
private final class Job extends TerminatableJob {
/**
* Determines whether to run, and runs if required, a single computation cycle using the following rules:
* <ul>
* <li>A computation cycle can only be triggered if the relevant minimum computation period has passed since the start of the previous cycle.
* <li>A computation cycle will be forced if the relevant maximum computation period has passed since the start of the previous cycle.
* <li>A full computation is preferred over a delta computation if both are possible.
* <li>Performing a full computation also updates the times to the next delta computation; i.e. a full computation is considered to be as good as a delta.
* </ul>
*/
@Override
protected void runOneCycle() {
// Exception handling is important here to ensure that computation jobs do not just die quietly while consumers are
// potentially blocked, waiting for results.
ViewCycleType cycleType;
try {
cycleType = waitForNextCycle();
} catch (final InterruptedException e) {
return;
}
ViewCycleExecutionOptions executionOptions = null;
try {
if (!getExecutionOptions().getExecutionSequence().isEmpty()) {
executionOptions = getExecutionOptions().getExecutionSequence().poll(getExecutionOptions().getDefaultExecutionOptions());
s_logger.debug("Next cycle execution options: {}", executionOptions);
}
if (executionOptions == null) {
s_logger.info("No more view cycle execution options");
jobCompleted();
return;
}
} catch (final Exception e) {
s_logger.error("Error obtaining next view cycle execution options from sequence for " + getWorkerContext(), e);
return;
}
if (executionOptions.getMarketDataSpecifications().isEmpty()) {
s_logger.error("No market data specifications for cycle");
cycleExecutionFailed(executionOptions, new OpenGammaRuntimeException("No market data specifications for cycle"));
return;
}
MarketDataSnapshot marketDataSnapshot;
try {
SnapshottingViewExecutionDataProvider marketDataProvider = getMarketDataProvider();
if (marketDataProvider == null ||
!marketDataProvider.getSpecifications().equals(executionOptions.getMarketDataSpecifications())) {
if (marketDataProvider != null) {
s_logger.info("Replacing market data provider between cycles");
}
replaceMarketDataProvider(executionOptions.getMarketDataSpecifications());
marketDataProvider = getMarketDataProvider();
if (marketDataProvider == null) {
cycleExecutionFailed(executionOptions, new OpenGammaRuntimeException("Market data specifications " + executionOptions.getMarketDataSpecifications() + "invalid"));
return;
}
}
// Obtain the snapshot in case it is needed, but don't explicitly initialise it until the data is required
marketDataSnapshot = marketDataProvider.snapshot();
} catch (final Exception e) {
s_logger.error("Error with market data provider", e);
cycleExecutionFailed(executionOptions, new OpenGammaRuntimeException("Error with market data provider", e));
return;
}
Instant compilationValuationTime;
try {
if (executionOptions.getValuationTime() != null) {
compilationValuationTime = executionOptions.getValuationTime();
} else {
// Neither the cycle-specific options nor the defaults have overridden the valuation time so use the time
// associated with the market data snapshot. To avoid initialising the snapshot perhaps before the required
// inputs are known or even subscribed to, only ask for an indication at the moment.
compilationValuationTime = marketDataSnapshot.getSnapshotTimeIndication();
if (compilationValuationTime == null) {
throw new OpenGammaRuntimeException("Market data snapshot " + marketDataSnapshot + " produced a null indication of snapshot time");
}
}
} catch (final Exception e) {
s_logger.error("Error obtaining compilation valuation time", e);
cycleExecutionFailed(executionOptions, new OpenGammaRuntimeException("Error obtaining compilation valuation time", e));
return;
}
final VersionCorrection versionCorrection = getResolverVersionCorrection(executionOptions);
VersionCorrectionUtils.lock(versionCorrection);
try {
final CompiledViewDefinitionWithGraphs compiledViewDefinition;
try {
// Don't query the cache so that the process gets a "compiled" message even if a cached compilation is used
final CompiledViewDefinitionWithGraphs previous = _latestCompiledViewDefinition;
if (_ignoreCompilationValidity && (previous != null) && CompiledViewDefinitionWithGraphsImpl.isValidFor(previous, compilationValuationTime)) {
compiledViewDefinition = previous;
} else {
compiledViewDefinition = getCompiledViewDefinition(compilationValuationTime, versionCorrection);
if (compiledViewDefinition == null) {
s_logger.warn("Job terminated during view compilation");
return;
}
if ((previous == null) || !previous.getCompilationIdentifier().equals(compiledViewDefinition.getCompilationIdentifier())) {
if (_targetResolverChanges != null) {
// We'll try to register for changes that will wake us up for a cycle if market data is not ticking
if (previous != null) {
final Set<UniqueId> subscribedIds = new HashSet<UniqueId>(previous.getResolvedIdentifiers().values());
for (UniqueId uid : compiledViewDefinition.getResolvedIdentifiers().values()) {
if (!subscribedIds.contains(uid)) {
_targetResolverChanges.watch(uid.getObjectId());
}
}
} else {
for (UniqueId uid : compiledViewDefinition.getResolvedIdentifiers().values()) {
_targetResolverChanges.watch(uid.getObjectId());
}
}
}
viewDefinitionCompiled(executionOptions, compiledViewDefinition);
}
}
} catch (final Exception e) {
final String message = MessageFormat.format("Error obtaining compiled view definition {0} for time {1} at version-correction {2}", getViewDefinition().getUniqueId(),
compilationValuationTime, versionCorrection);
s_logger.error(message);
cycleExecutionFailed(executionOptions, new OpenGammaRuntimeException(message, e));
return;
}
// [PLAT-1174] This is necessary to support global injections by ValueRequirement. The use of a process-context level variable will be bad
// if there are multiple worker threads that initialise snapshots concurrently.
getProcessContext().getLiveDataOverrideInjector().setComputationTargetResolver(
getProcessContext().getFunctionCompilationService().getFunctionCompilationContext().getRawComputationTargetResolver().atVersionCorrection(versionCorrection));
boolean marketDataSubscribed = false;
try {
if (getExecutionOptions().getFlags().contains(ViewExecutionFlags.AWAIT_MARKET_DATA)) {
// REVIEW jonathan/andrew -- 2013-03-28 -- if the user wants to wait for market data, then assume they mean
// it and wait as long as it takes. There are mechanisms for cancelling the job.
setMarketDataSubscriptions(compiledViewDefinition.getMarketDataRequirements());
marketDataSubscribed = true;
marketDataSnapshot.init(compiledViewDefinition.getMarketDataRequirements(), Long.MAX_VALUE, TimeUnit.MILLISECONDS);
} else {
marketDataSubscribed = false;
marketDataSnapshot.init();
}
if (executionOptions.getValuationTime() == null) {
executionOptions = executionOptions.copy().setValuationTime(marketDataSnapshot.getSnapshotTime()).create();
}
} catch (final Exception e) {
s_logger.error("Error initializing snapshot {}", marketDataSnapshot);
cycleExecutionFailed(executionOptions, new OpenGammaRuntimeException("Error initializing snapshot" + marketDataSnapshot, e));
}
EngineResourceReference<SingleComputationCycle> cycleReference;
try {
cycleReference = createCycle(executionOptions, compiledViewDefinition, versionCorrection);
} catch (final Exception e) {
s_logger.error("Error creating next view cycle for " + getWorkerContext(), e);
return;
}
if (_executeCycles) {
try {
final SingleComputationCycle singleComputationCycle = cycleReference.get();
final HashMap<String, Collection<ComputationTargetSpecification>> configToComputationTargets = new HashMap<String, Collection<ComputationTargetSpecification>>();
final HashMap<String, Map<ValueSpecification, Set<ValueRequirement>>> configToTerminalOutputs = new HashMap<String, Map<ValueSpecification, Set<ValueRequirement>>>();
for (DependencyGraphExplorer graphExp : compiledViewDefinition.getDependencyGraphExplorers()) {
final DependencyGraph graph = graphExp.getWholeGraph();
configToComputationTargets.put(graph.getCalculationConfigurationName(), graph.getAllComputationTargets());
configToTerminalOutputs.put(graph.getCalculationConfigurationName(), graph.getTerminalOutputs());
}
if (isTerminated()) {
cycleReference.release();
return;
}
cycleStarted(new DefaultViewCycleMetadata(
cycleReference.get().getUniqueId(),
marketDataSnapshot.getUniqueId(),
compiledViewDefinition.getViewDefinition().getUniqueId(),
versionCorrection,
executionOptions.getValuationTime(),
singleComputationCycle.getAllCalculationConfigurationNames(),
configToComputationTargets,
configToTerminalOutputs));
if (isTerminated()) {
cycleReference.release();
return;
}
if (!marketDataSubscribed) {
setMarketDataSubscriptions(compiledViewDefinition.getMarketDataRequirements());
}
executeViewCycle(cycleType, cycleReference, marketDataSnapshot);
} catch (final InterruptedException e) {
// Execution interrupted - don't propagate as failure
s_logger.info("View cycle execution interrupted for {}", getWorkerContext());
cycleReference.release();
return;
} catch (final Exception e) {
// Execution failed
s_logger.error("View cycle execution failed for " + getWorkerContext(), e);
cycleReference.release();
cycleExecutionFailed(executionOptions, e);
return;
}
}
// Don't push the results through if we've been terminated, since another computation job could be running already
// and the fact that we've been terminated means the view is no longer interested in the result. Just die quietly.
if (isTerminated()) {
cycleReference.release();
return;
}
if (_executeCycles) {
cycleCompleted(cycleReference.get());
}
if (getExecutionOptions().getExecutionSequence().isEmpty()) {
jobCompleted();
}
if (_executeCycles) {
if (_previousCycleReference != null) {
_previousCycleReference.release();
}
_previousCycleReference = cycleReference;
}
} finally {
VersionCorrectionUtils.unlock(versionCorrection);
}
}
@Override
protected void postRunCycle() {
if (_previousCycleReference != null) {
_previousCycleReference.release();
}
unsubscribeFromTargetResolverChanges();
removeMarketDataProvider();
cacheCompiledViewDefinition(null);
}
@Override
public void terminate() {
super.terminate();
final Future<CompiledViewDefinitionWithGraphsImpl> task = _compilationTask;
if (task != null) {
task.cancel(true);
}
}
}
private void cycleCompleted(final ViewCycle cycle) {
try {
getWorkerContext().cycleCompleted(cycle);
} catch (final Exception e) {
s_logger.error("Error notifying " + getWorkerContext() + " of view cycle completion", e);
}
}
private void cycleStarted(final ViewCycleMetadata cycleMetadata) {
try {
getWorkerContext().cycleStarted(cycleMetadata);
} catch (final Exception e) {
s_logger.error("Error notifying " + getWorkerContext() + " of view cycle starting", e);
}
}
private void cycleFragmentCompleted(final ViewComputationResultModel result) {
try {
getWorkerContext().cycleFragmentCompleted(result, getViewDefinition());
} catch (final Exception e) {
s_logger.error("Error notifying " + getWorkerContext() + " of cycle fragment completion", e);
}
}
private void cycleExecutionFailed(final ViewCycleExecutionOptions executionOptions, final Exception exception) {
try {
getWorkerContext().cycleExecutionFailed(executionOptions, exception);
} catch (final Exception vpe) {
s_logger.error("Error notifying " + getWorkerContext() + " of the cycle execution error", vpe);
}
}
private void viewDefinitionCompiled(final ViewCycleExecutionOptions executionOptions, final CompiledViewDefinitionWithGraphs compiledViewDefinition) {
try {
getWorkerContext().viewDefinitionCompiled(getMarketDataProvider(), compiledViewDefinition);
} catch (final Exception vpe) {
s_logger.error("Error notifying " + getWorkerContext() + " of view definition compilation");
}
}
private void viewDefinitionCompilationFailed(final Instant compilationTime, final Exception e) {
try {
getWorkerContext().viewDefinitionCompilationFailed(compilationTime, e);
} catch (final Exception vpe) {
s_logger.error("Error notifying " + getWorkerContext() + " of the view definition compilation failure", vpe);
}
}
private synchronized ViewCycleType waitForNextCycle() throws InterruptedException {
while (true) {
final long currentTimeNanos = System.nanoTime();
final ViewCycleTriggerResult triggerResult = getMasterCycleTrigger().query(currentTimeNanos);
ViewCycleEligibility cycleEligibility = triggerResult.getCycleEligibility();
if (_forceTriggerCycle) {
cycleEligibility = ViewCycleEligibility.FORCE;
_forceTriggerCycle = false;
}
if (cycleEligibility == ViewCycleEligibility.FORCE || (cycleEligibility == ViewCycleEligibility.ELIGIBLE && _cycleRequested)) {
_cycleRequested = false;
ViewCycleType cycleType = triggerResult.getCycleType();
if (_previousCycleReference == null) {
// Cannot do a delta if we have no previous cycle
cycleType = ViewCycleType.FULL;
}
try {
getMasterCycleTrigger().cycleTriggered(currentTimeNanos, cycleType);
} catch (final Exception e) {
s_logger.error("Error notifying trigger of intention to execute cycle", e);
}
s_logger.debug("Eligible for {} cycle", cycleType);
if (_masterCycleTriggerChanges != null) {
// TODO: If we wish to support execution option changes mid-execution, we will need to add/remove any relevant triggers here
// Currently only the run-as-fast-as-possible trigger becomes valid for the second cycle if we've also got wait-for-initial-trigger
addMasterCycleTrigger(_masterCycleTriggerChanges);
_masterCycleTriggerChanges = null;
}
return cycleType;
}
// Going to sleep
final long wakeUpTime = triggerResult.getNextStateChangeNanos();
if (_cycleRequested) {
s_logger.debug("Sleeping until eligible to perform the next computation cycle");
// No amount of market data can make us eligible for a computation cycle any sooner.
_wakeOnCycleRequest = false;
} else {
s_logger.debug("Sleeping until forced to perform the next computation cycle");
_wakeOnCycleRequest = cycleEligibility == ViewCycleEligibility.ELIGIBLE;
}
long sleepTime = wakeUpTime - currentTimeNanos;
sleepTime = Math.max(0, sleepTime);
sleepTime /= NANOS_PER_MILLISECOND;
sleepTime += 1; // Could have been rounded down during division so ensure only woken after state change
s_logger.debug("Waiting for {} ms", sleepTime);
try {
// This could wait until end of time. In this case, only marketDataChanged() or triggerCycle() will wake it up
wait(sleepTime);
} catch (final InterruptedException e) {
// We support interruption as a signal that we have been terminated. If we're interrupted without having been
// terminated, we'll just return to this method and go back to sleep.
Thread.interrupted();
s_logger.info("Interrupted while delaying. Continuing operation.");
throw e;
}
}
}
private void executeViewCycle(final ViewCycleType cycleType,
final EngineResourceReference<SingleComputationCycle> cycleReference,
final MarketDataSnapshot marketDataSnapshot) throws Exception {
SingleComputationCycle deltaCycle;
if (cycleType == ViewCycleType.FULL) {
s_logger.info("Performing full computation");
deltaCycle = null;
} else {
s_logger.info("Performing delta computation");
deltaCycle = _previousCycleReference.get();
if ((deltaCycle != null) && (deltaCycle.getState() != ViewCycleState.EXECUTED)) {
// Can only do a delta cycle if the previous was valid
deltaCycle = null;
}
}
boolean continueExecution = cycleReference.get().preExecute(deltaCycle, marketDataSnapshot, _suppressExecutionOnNoMarketData);
if (_executeGraphs && continueExecution) {
try {
cycleReference.get().execute(s_executor);
} catch (final InterruptedException e) {
Thread.interrupted();
// In reality this means that the job has been terminated, and it will end as soon as we return from this method.
// In case the thread has been interrupted without terminating the job, we tidy everything up as if the
// interrupted cycle never happened so that deltas will be calculated from the previous cycle.
s_logger.info("Interrupted while executing a computation cycle. No results will be output from this cycle.");
throw e;
} catch (final Exception e) {
s_logger.error("Error while executing view cycle", e);
throw e;
}
} else {
s_logger.debug("Skipping graph execution");
}
cycleReference.get().postExecute();
final long durationNanos = cycleReference.get().getDuration().toNanos();
_totalTimeNanos += durationNanos;
_cycleCount += 1;
s_logger.info("Last latency was {} ms, Average latency is {} ms",
durationNanos / NANOS_PER_MILLISECOND,
(_totalTimeNanos / _cycleCount) / NANOS_PER_MILLISECOND);
}
private void jobCompleted() {
s_logger.info("Computation job completed for {}", getWorkerContext());
try {
getWorkerContext().workerCompleted();
} catch (final Exception e) {
s_logger.error("Error notifying " + getWorkerContext() + " of computation job completion", e);
}
getJob().terminate();
}
private EngineResourceReference<SingleComputationCycle> createCycle(final ViewCycleExecutionOptions executionOptions,
final CompiledViewDefinitionWithGraphs compiledViewDefinition, final VersionCorrection versionCorrection) {
// [PLAT-3581] Is the check below still necessary? The logic to create the valuation time for compilation is the same as that for
// populating the valuation time on the execution options that this detects.
// View definition was compiled based on compilation options, which might have only included an indicative
// valuation time. A further check ensures that the compiled view definition is still valid.
if (!CompiledViewDefinitionWithGraphsImpl.isValidFor(compiledViewDefinition, executionOptions.getValuationTime())) {
throw new OpenGammaRuntimeException("Compiled view definition " + compiledViewDefinition + " not valid for execution options " + executionOptions);
}
final UniqueId cycleId = getProcessContext().getCycleIdentifiers().get();
final ComputationResultListener streamingResultListener = new ComputationResultListener() {
@Override
public void resultAvailable(final ViewComputationResultModel result) {
cycleFragmentCompleted(result);
}
};
final SingleComputationCycle cycle = new SingleComputationCycle(cycleId, streamingResultListener, getProcessContext(), compiledViewDefinition, executionOptions, versionCorrection);
return getProcessContext().getCycleManager().manage(cycle);
}
private void subscribeToTargetResolverChanges() {
if (_targetResolverChanges == null) {
_targetResolverChanges = new TargetResolverChangeListener() {
@Override
protected void onChanged() {
requestCycle();
}
};
getProcessContext().getFunctionCompilationService().getFunctionCompilationContext().getRawComputationTargetResolver().changeManager().addChangeListener(_targetResolverChanges);
}
}
private void unsubscribeFromTargetResolverChanges() {
if (_targetResolverChanges != null) {
getProcessContext().getFunctionCompilationService().getFunctionCompilationContext().getRawComputationTargetResolver().changeManager().removeChangeListener(_targetResolverChanges);
_targetResolverChanges = null;
}
}
private static Instant now() {
// TODO: The distributed caches use a message bus for eventual consistency. This should really be (NOW - maximum permitted clock drift - eventual consistency time limit)
return Instant.now();
}
private VersionCorrection getResolverVersionCorrection(final ViewCycleExecutionOptions viewCycleOptions) {
VersionCorrection vc = null;
do {
vc = viewCycleOptions.getResolverVersionCorrection();
if (vc != null) {
break;
}
final ViewCycleExecutionOptions options = getExecutionOptions().getDefaultExecutionOptions();
if (options != null) {
vc = options.getResolverVersionCorrection();
if (vc != null) {
break;
}
}
vc = VersionCorrection.LATEST;
} while (false);
// Note: NOW means NOW as the caller has requested LATEST. We should not be using the valuation time.
if (vc.getCorrectedTo() == null) {
if (vc.getVersionAsOf() == null) {
if (!_ignoreCompilationValidity) {
subscribeToTargetResolverChanges();
}
return vc.withLatestFixed(now());
} else {
vc = vc.withLatestFixed(now());
}
} else if (vc.getVersionAsOf() == null) {
vc = vc.withLatestFixed(now());
}
unsubscribeFromTargetResolverChanges();
return vc;
}
private PortfolioNodeEquivalenceMapper getNodeEquivalenceMapper() {
return new PortfolioNodeEquivalenceMapper();
}
private void markMappedPositions(final PortfolioNode node, final Map<UniqueId, Position> positions) {
for (Position position : node.getPositions()) {
positions.put(position.getUniqueId(), null);
}
for (PortfolioNode child : node.getChildNodes()) {
markMappedPositions(child, positions);
}
}
private void findUnmapped(final PortfolioNode node, final Map<UniqueId, UniqueId> mapped, final Set<UniqueId> unmapped, final Map<UniqueId, Position> positions) {
if (mapped.containsKey(node.getUniqueId())) {
// This node is mapped; as are the nodes underneath it, so just mark the child positions
markMappedPositions(node, positions);
} else {
// This node is unmapped - mark it as such and check the nodes underneath it
unmapped.add(node.getUniqueId());
for (PortfolioNode child : node.getChildNodes()) {
findUnmapped(child, mapped, unmapped, positions);
}
// Any child positions (and their trades) are unmapped if, and only if, they are not referenced by anything else
for (Position position : node.getPositions()) {
if (!positions.containsKey(position.getUniqueId())) {
positions.put(position.getUniqueId(), position);
}
}
}
}
private void findUnmapped(final PortfolioNode node, final Map<UniqueId, UniqueId> mapped, final Set<UniqueId> unmapped) {
final Map<UniqueId, Position> positions = new HashMap<UniqueId, Position>();
findUnmapped(node, mapped, unmapped, positions);
for (Map.Entry<UniqueId, Position> position : positions.entrySet()) {
if (position.getValue() != null) {
unmapped.add(position.getKey());
for (Trade trade : position.getValue().getTrades()) {
unmapped.add(trade.getUniqueId());
}
}
}
}
private Set<UniqueId> rewritePortfolioNodes(final Map<String, Pair<DependencyGraph, Set<ValueRequirement>>> previousGraphs, final CompiledViewDefinitionWithGraphs compiledViewDefinition,
final Portfolio newPortfolio) {
// Map any nodes from the old portfolio structure to the new one
final Map<UniqueId, UniqueId> mapped;
if (newPortfolio != null) {
mapped = getNodeEquivalenceMapper().getEquivalentNodes(compiledViewDefinition.getPortfolio().getRootNode(), newPortfolio.getRootNode());
} else {
mapped = Collections.emptyMap();
}
// Identify anything not (immediately) mapped to the new portfolio structure
final Set<UniqueId> unmapped = new HashSet<UniqueId>();
findUnmapped(compiledViewDefinition.getPortfolio().getRootNode(), mapped, unmapped);
if (s_logger.isDebugEnabled()) {
s_logger.debug("Mapping {} portfolio nodes to new structure, unmapping {} targets", mapped.size(), unmapped.size());
}
// For anything not mapped, remove the terminal outputs from the graph
for (final ViewCalculationConfiguration calcConfig : compiledViewDefinition.getViewDefinition().getAllCalculationConfigurations()) {
final Set<ValueRequirement> specificRequirements = calcConfig.getSpecificRequirements();
final Pair<DependencyGraph, Set<ValueRequirement>> previousGraphEntry = previousGraphs.get(calcConfig.getName());
if (previousGraphEntry == null) {
continue;
}
final DependencyGraph previousGraph = previousGraphEntry.getFirst();
final Map<ValueSpecification, Set<ValueRequirement>> terminalOutputs = previousGraph.getTerminalOutputs();
final ValueSpecification[] removeSpecifications = new ValueSpecification[terminalOutputs.size()];
@SuppressWarnings("unchecked")
final List<ValueRequirement>[] removeRequirements = new List[terminalOutputs.size()];
int remove = 0;
for (final Map.Entry<ValueSpecification, Set<ValueRequirement>> entry : terminalOutputs.entrySet()) {
if (unmapped.contains(entry.getKey().getTargetSpecification().getUniqueId())) {
List<ValueRequirement> removal = null;
for (final ValueRequirement requirement : entry.getValue()) {
if (!specificRequirements.contains(requirement)) {
if (removal == null) {
removal = new ArrayList<ValueRequirement>(entry.getValue().size());
}
removal.add(requirement);
}
// Anything that was in the specific requirements will be captured by the standard invalid identifier tests
}
if (removal != null) {
removeSpecifications[remove] = entry.getKey();
removeRequirements[remove++] = removal;
}
}
}
for (int i = 0; i < remove; i++) {
previousGraph.removeTerminalOutputs(removeRequirements[i], removeSpecifications[i]);
}
if (!mapped.isEmpty()) {
final ComputationTargetIdentifierRemapVisitor remapper = new ComputationTargetIdentifierRemapVisitor(mapped);
final Collection<Object> replacements = new ArrayList<Object>(mapped.size() * 2);
for (DependencyNode node : previousGraph.getDependencyNodes()) {
final ComputationTargetSpecification newTarget = remapper.remap(node.getComputationTarget());
if (newTarget != null) {
replacements.add(node);
replacements.add(newTarget);
}
}
Iterator<Object> itrReplacements = replacements.iterator();
while (itrReplacements.hasNext()) {
final DependencyNode node = (DependencyNode) itrReplacements.next();
final ComputationTargetSpecification newTarget = (ComputationTargetSpecification) itrReplacements.next();
s_logger.debug("Rewriting {} to {}", node, newTarget);
previousGraph.replaceNode(node, newTarget);
}
// Rewrite the original value requirements that might have referenced the original nodes
for (Map.Entry<ValueSpecification, Set<ValueRequirement>> terminalOutput : previousGraph.getTerminalOutputs().entrySet()) {
final Set<ValueRequirement> oldReqs = terminalOutput.getValue();
replacements.clear();
for (ValueRequirement req : oldReqs) {
final ComputationTargetReference newTarget = req.getTargetReference().accept(remapper);
if (newTarget != null) {
replacements.add(req);
replacements.add(MemoryUtils.instance(new ValueRequirement(req.getValueName(), newTarget, req.getConstraints())));
}
}
if (!replacements.isEmpty()) {
itrReplacements = replacements.iterator();
while (itrReplacements.hasNext()) {
final ValueRequirement oldReq = (ValueRequirement) itrReplacements.next();
final ValueRequirement newReq = (ValueRequirement) itrReplacements.next();
oldReqs.remove(oldReq);
oldReqs.add(newReq);
}
}
}
}
}
// Remove any PORTFOLIO nodes and any unmapped PORTFOLIO_NODE nodes with the filter
filterPreviousGraphs(previousGraphs, new InvalidPortfolioDependencyNodeFilter(unmapped), null);
return new HashSet<UniqueId>(mapped.values());
}
/**
* Returns the set of unique identifiers that were previously used as targets in the dependency graph for object identifiers (or external identifiers) that now resolve differently.
*
* @param previousResolutions the previous cycle's resolution of identifiers, not null
* @param versionCorrection the resolver version correction for this cycle, not null
* @return the invalid identifier set, or null if none are invalid, this is a map from the old unique identifier to the new resolution
*/
private Map<UniqueId, ComputationTargetSpecification> getInvalidIdentifiers(final Map<ComputationTargetReference, UniqueId> previousResolutions, final VersionCorrection versionCorrection) {
long t = -System.nanoTime();
final Set<ComputationTargetReference> toCheck;
if (_targetResolverChanges == null) {
// Change notifications aren't relevant for historical iteration; must recheck all of the resolutions
toCheck = previousResolutions.keySet();
} else {
// Subscribed to LATEST/LATEST so change manager notifications can filter the set to be checked
toCheck = Sets.newHashSetWithExpectedSize(previousResolutions.size());
final Set<ObjectId> allObjectIds = Sets.newHashSetWithExpectedSize(previousResolutions.size());
for (final Map.Entry<ComputationTargetReference, UniqueId> previousResolution : previousResolutions.entrySet()) {
final ObjectId oid = previousResolution.getValue().getObjectId();
if (_targetResolverChanges.isChanged(oid)) {
// A change was seen on this target
s_logger.debug("Change observed on {}", oid);
toCheck.add(previousResolution.getKey());
}
allObjectIds.add(oid);
}
_targetResolverChanges.watchOnly(allObjectIds);
if (toCheck.isEmpty()) {
s_logger.debug("No resolutions (from {}) to check", previousResolutions.size());
return null;
} else {
s_logger.debug("Checking {} of {} resolutions for changed objects", toCheck.size(), previousResolutions.size());
}
}
PoolExecutor previousInstance = PoolExecutor.setInstance(getProcessContext().getFunctionCompilationService().getExecutorService());
final Map<ComputationTargetReference, ComputationTargetSpecification> specifications = getProcessContext().getFunctionCompilationService().getFunctionCompilationContext()
.getRawComputationTargetResolver().getSpecificationResolver().getTargetSpecifications(toCheck, versionCorrection);
PoolExecutor.setInstance(previousInstance);
t += System.nanoTime();
Map<UniqueId, ComputationTargetSpecification> invalidIdentifiers = null;
for (final Map.Entry<ComputationTargetReference, UniqueId> target : previousResolutions.entrySet()) {
final ComputationTargetSpecification resolved = specifications.get(target.getKey());
if ((resolved != null) && target.getValue().equals(resolved.getUniqueId())) {
// No change
s_logger.debug("No change resolving {}", target);
} else if (toCheck.contains(target.getKey())) {
// Identifier no longer resolved, or resolved differently
s_logger.info("New resolution of {} to {}", target, resolved);
if (invalidIdentifiers == null) {
invalidIdentifiers = new HashMap<UniqueId, ComputationTargetSpecification>();
}
invalidIdentifiers.put(target.getValue(), resolved);
}
}
s_logger.info("{} resolutions checked in {}ms", toCheck.size(), t / 1e6);
return invalidIdentifiers;
}
private void getInvalidMarketData(final DependencyGraph graph, final InvalidMarketDataDependencyNodeFilter filter) {
final PoolExecutor.Service<?> slaveJobs = getProcessContext().getFunctionCompilationService().getExecutorService().createService(null);
// 32 was chosen fairly arbitrarily. Before doing this 502 node checks was taking 700ms. After this it is taking 180ms.
final int jobSize = 32;
InvalidMarketDataDependencyNodeFilter.VisitBatch visit = filter.visit(jobSize);
for (ValueSpecification marketData : graph.getAllRequiredMarketData()) {
if (visit.isFull()) {
slaveJobs.execute(visit);
visit = filter.visit(jobSize);
}
final DependencyNode node = graph.getNodeProducing(marketData);
visit.add(marketData, node);
}
visit.run();
try {
slaveJobs.join();
} catch (InterruptedException e) {
throw new OpenGammaRuntimeException("Interrupted", e);
}
}
/**
* Returns the set of value specifications from Market Data sourcing nodes that are not valid for the new data provider.
* <p>
* The cost of applying a filter can be quite high and in the historical simulation case seldom excludes nodes. To optimise this case we consider the market data sourcing nodes first to determine
* whether the filter should be applied.
*
* @param previousGraphs the previous graphs that have already been part processed, null if no preprocessing has occurred
* @param compiledViewDefinition the cached compilation containing previous graphs if {@code previousGraphs} is null
* @param filter the filter to pass details of the nodes to
* @return the invalid specification set, or null if none are invalid
*/
private void getInvalidMarketData(final Map<String, Pair<DependencyGraph, Set<ValueRequirement>>> previousGraphs,
final CompiledViewDefinitionWithGraphs compiledViewDefinition, final InvalidMarketDataDependencyNodeFilter filter) {
if (previousGraphs != null) {
for (Pair<DependencyGraph, Set<ValueRequirement>> previousGraph : previousGraphs.values()) {
getInvalidMarketData(previousGraph.getFirst(), filter);
}
} else {
for (DependencyGraphExplorer graphExp : compiledViewDefinition.getDependencyGraphExplorers()) {
getInvalidMarketData(graphExp.getWholeGraph(), filter);
}
}
}
/**
* Mark a set of nodes for inclusion (TRUE) or exclusion (FALSE) based on the filter. A node is included if the filter accepts it and all of its inputs are also marked for inclusion. A node is
* excluded if the filter rejects it or any of its inputs are rejected. This will operate recursively, processing all nodes to the leaves of the graph.
* <p>
* The {@link DependencyGraph#subGraph} operation doesn't work for us as it can leave nodes in the sub-graph that have inputs that aren't in the graph. Invalid nodes identified by the filter need to
* remove all the graph up to the terminal output root so that we can rebuild it.
*
* @param include the map to build the result into
* @param nodes the nodes to process
* @param filter the filter to apply to the nodes
* @return true if all of the nodes in the collection were included
*/
private static boolean includeNodes(final Map<DependencyNode, Boolean> include, final Collection<DependencyNode> nodes, final DependencyNodeFilter filter) {
boolean includedAll = true;
for (final DependencyNode node : nodes) {
final Boolean match = include.get(node);
if (match == null) {
if (filter.accept(node)) {
if (includeNodes(include, node.getInputNodes(), filter)) {
include.put(node, Boolean.TRUE);
} else {
includedAll = false;
include.put(node, Boolean.FALSE);
}
} else {
includedAll = false;
include.put(node, Boolean.FALSE);
}
} else {
if (match == Boolean.FALSE) {
includedAll = false;
}
}
}
return includedAll;
}
private Map<String, Pair<DependencyGraph, Set<ValueRequirement>>> getPreviousGraphs(Map<String, Pair<DependencyGraph, Set<ValueRequirement>>> previousGraphs,
final CompiledViewDefinitionWithGraphs compiledViewDefinition) {
if (previousGraphs == null) {
final Collection<DependencyGraphExplorer> graphExps = compiledViewDefinition.getDependencyGraphExplorers();
previousGraphs = Maps.newHashMapWithExpectedSize(graphExps.size());
for (DependencyGraphExplorer graphExp : graphExps) {
final DependencyGraph graph = graphExp.getWholeGraph();
previousGraphs.put(graph.getCalculationConfigurationName(), Pair.<DependencyGraph, Set<ValueRequirement>>of(graph, new HashSet<ValueRequirement>()));
}
}
return previousGraphs;
}
/**
* Maintain the previously used dependency graphs by applying a node filter that identifies invalid nodes that must be recalculated (implying everything dependent on them must also be rebuilt). The
* first call will extract the previously compiled graphs, subsequent calls will update the structure invalidating more nodes and increasing the number of missing requirements.
*
* @param previousGraphs the previously used graphs as a map from calculation configuration name to the graph and the value requirements that need to be recalculated, not null
* @param filter the filter to identify invalid nodes, not null
* @param unchangedNodes optional identifiers of unchanged portfolio nodes; any nodes filtered out must be removed from this
*/
private void filterPreviousGraphs(final Map<String, Pair<DependencyGraph, Set<ValueRequirement>>> previousGraphs, final DependencyNodeFilter filter, final Set<UniqueId> unchangedNodes) {
final Iterator<Map.Entry<String, Pair<DependencyGraph, Set<ValueRequirement>>>> itr = previousGraphs.entrySet().iterator();
while (itr.hasNext()) {
final Map.Entry<String, Pair<DependencyGraph, Set<ValueRequirement>>> entry = itr.next();
final DependencyGraph graph = entry.getValue().getFirst();
if (graph.getSize() == 0) {
continue;
}
final Collection<DependencyNode> nodes = graph.getDependencyNodes();
final Map<DependencyNode, Boolean> include = Maps.newHashMapWithExpectedSize(nodes.size());
includeNodes(include, nodes, filter);
assert nodes.size() == include.size();
final Map<ValueSpecification, Set<ValueRequirement>> terminalOutputs = graph.getTerminalOutputs();
final Set<ValueRequirement> missingRequirements = entry.getValue().getSecond();
final DependencyGraph filtered = graph.subGraph(new DependencyNodeFilter() {
@Override
public boolean accept(final DependencyNode node) {
if (include.get(node) == Boolean.TRUE) {
return true;
} else {
s_logger.debug("Discarding {} from dependency graph for {}", node, entry.getKey());
for (final ValueSpecification output : node.getOutputValues()) {
final Set<ValueRequirement> terminal = terminalOutputs.get(output);
if (terminal != null) {
missingRequirements.addAll(terminal);
}
}
if (unchangedNodes != null) {
unchangedNodes.remove(node.getComputationTarget().getUniqueId());
}
return false;
}
}
});
if (filtered.getSize() == 0) {
s_logger.info("Discarded total dependency graph for {}", entry.getKey());
itr.remove();
} else {
if (s_logger.isInfoEnabled()) {
s_logger.info("Removed {} nodes from dependency graph for {} by {}",
nodes.size() - filtered.getSize(),
entry.getKey(),
filter);
}
entry.setValue(Pair.of(filtered, missingRequirements));
}
}
}
private CompiledViewDefinitionWithGraphs getCompiledViewDefinition(final Instant valuationTime, final VersionCorrection versionCorrection) {
final long functionInitId = getProcessContext().getFunctionCompilationService().getFunctionCompilationContext().getFunctionInitId();
updateViewDefinitionIfRequired();
CompiledViewDefinitionWithGraphs compiledViewDefinition = null;
final Pair<Lock, Lock> executionCacheLocks = getProcessContext().getExecutionCacheLock().get(_executionCacheKey, valuationTime, versionCorrection);
executionCacheLocks.getSecond().lock();
executionCacheLocks.getFirst().lock();
boolean broadLock = true;
try {
compiledViewDefinition = getCachedCompiledViewDefinition(valuationTime, versionCorrection);
Map<String, Pair<DependencyGraph, Set<ValueRequirement>>> previousGraphs = null;
ConcurrentMap<ComputationTargetReference, UniqueId> previousResolutions = null;
Set<UniqueId> changedPositions = null;
Set<UniqueId> unchangedNodes = null;
boolean marketDataProviderDirty = _marketDataProviderDirty;
_marketDataProviderDirty = false;
if (compiledViewDefinition != null) {
executionCacheLocks.getFirst().unlock();
broadLock = false;
do {
// The cast below is bad, but only temporary -- the function initialiser id needs to go
if (functionInitId != ((CompiledViewDefinitionWithGraphsImpl) compiledViewDefinition).getFunctionInitId()) {
// The function repository has been reinitialized which invalidates any previous graphs
// TODO: [PLAT-2237, PLAT-1623, PLAT-2240] Get rid of this
break;
}
final Map<ComputationTargetReference, UniqueId> resolvedIdentifiers = compiledViewDefinition.getResolvedIdentifiers();
// TODO: The check below works well for the historical valuation case, but if the resolver v/c is different for two workers in the
// group for an otherwise identical cache key then including it in the caching detail may become necessary to handle those cases.
if (!versionCorrection.equals(compiledViewDefinition.getResolverVersionCorrection())) {
final Map<UniqueId, ComputationTargetSpecification> invalidIdentifiers = getInvalidIdentifiers(resolvedIdentifiers, versionCorrection);
if (invalidIdentifiers != null) {
previousGraphs = getPreviousGraphs(previousGraphs, compiledViewDefinition);
if ((compiledViewDefinition.getPortfolio() != null) && invalidIdentifiers.containsKey(compiledViewDefinition.getPortfolio().getUniqueId())) {
// The portfolio resolution is different, invalidate or rewrite PORTFOLIO and PORTFOLIO_NODE nodes in the graph. Note that incremental
// compilation under this circumstance can be flawed if the functions have made notable use of the overall portfolio structure such that
// a full re-compilation will yield a different dependency graph to just rewriting the previous one.
final ComputationTargetResolver resolver = getProcessContext().getFunctionCompilationService().getFunctionCompilationContext().getRawComputationTargetResolver();
final ComputationTargetSpecification portfolioSpec = resolver.getSpecificationResolver().getTargetSpecification(
new ComputationTargetSpecification(ComputationTargetType.PORTFOLIO, getViewDefinition().getPortfolioId()), versionCorrection);
final ComputationTarget newPortfolio = resolver.resolve(portfolioSpec, versionCorrection);
unchangedNodes = rewritePortfolioNodes(previousGraphs, compiledViewDefinition, (Portfolio) newPortfolio.getValue());
}
// Invalidate any dependency graph nodes on the invalid targets
filterPreviousGraphs(previousGraphs, new InvalidTargetDependencyNodeFilter(invalidIdentifiers.keySet()), unchangedNodes);
previousResolutions = new ConcurrentHashMap<ComputationTargetReference, UniqueId>(resolvedIdentifiers.size());
for (final Map.Entry<ComputationTargetReference, UniqueId> resolvedIdentifier : resolvedIdentifiers.entrySet()) {
if (invalidIdentifiers.containsKey(resolvedIdentifier.getValue())) {
if ((unchangedNodes == null) && resolvedIdentifier.getKey().getType().isTargetType(ComputationTargetType.POSITION)) {
// At least one position has changed, add all portfolio targets
ComputationTargetSpecification ctspec = invalidIdentifiers.get(resolvedIdentifier.getValue());
if (ctspec != null) {
if (changedPositions == null) {
changedPositions = new HashSet<UniqueId>();
}
changedPositions.add(ctspec.getUniqueId());
}
}
} else {
previousResolutions.put(resolvedIdentifier.getKey(), resolvedIdentifier.getValue());
}
}
} else {
compiledViewDefinition = compiledViewDefinition.withResolverVersionCorrection(versionCorrection);
cacheCompiledViewDefinition(compiledViewDefinition);
}
}
if (!CompiledViewDefinitionWithGraphsImpl.isValidFor(compiledViewDefinition, valuationTime)) {
// Invalidate any dependency graph nodes that use functions that are no longer valid
previousGraphs = getPreviousGraphs(previousGraphs, compiledViewDefinition);
filterPreviousGraphs(previousGraphs, new InvalidFunctionDependencyNodeFilter(valuationTime), unchangedNodes);
}
if (marketDataProviderDirty) {
// Invalidate any market data sourcing nodes that are no longer valid
final InvalidMarketDataDependencyNodeFilter filter = new InvalidMarketDataDependencyNodeFilter(getProcessContext().getFunctionCompilationService().getFunctionCompilationContext()
.getRawComputationTargetResolver().atVersionCorrection(versionCorrection), getMarketDataProvider().getAvailabilityProvider());
getInvalidMarketData(previousGraphs, compiledViewDefinition, filter);
if (filter.hasInvalidNodes()) {
previousGraphs = getPreviousGraphs(previousGraphs, compiledViewDefinition);
filterPreviousGraphs(previousGraphs, filter, unchangedNodes);
}
}
if (previousGraphs == null) {
// Existing cached model is valid (an optimization for the common case of similar, increasing valuation times)
return compiledViewDefinition;
}
if (previousResolutions == null) {
previousResolutions = new ConcurrentHashMap<ComputationTargetReference, UniqueId>(resolvedIdentifiers);
}
} while (false);
executionCacheLocks.getFirst().lock();
broadLock = true;
}
final MarketDataAvailabilityProvider availabilityProvider = getMarketDataProvider().getAvailabilityProvider();
final ViewCompilationServices compilationServices = getProcessContext().asCompilationServices(availabilityProvider);
if (previousGraphs != null) {
s_logger.info("Performing incremental graph compilation");
_compilationTask = ViewDefinitionCompiler.incrementalCompileTask(getViewDefinition(), compilationServices, valuationTime, versionCorrection, previousGraphs,
previousResolutions, changedPositions, unchangedNodes);
} else {
s_logger.info("Performing full graph compilation");
_compilationTask = ViewDefinitionCompiler.fullCompileTask(getViewDefinition(), compilationServices, valuationTime, versionCorrection);
}
try {
if (!getJob().isTerminated()) {
compiledViewDefinition = _compilationTask.get();
compiledViewDefinition = initialiseMarketDataManipulation(compiledViewDefinition);
cacheCompiledViewDefinition(compiledViewDefinition);
} else {
return null;
}
} finally {
_compilationTask = null;
}
} catch (final Exception e) {
final String message = MessageFormat.format("Error compiling view definition {0} for time {1}", getViewDefinition().getUniqueId(), valuationTime);
viewDefinitionCompilationFailed(valuationTime, new OpenGammaRuntimeException(message, e));
throw new OpenGammaRuntimeException(message, e);
} finally {
if (broadLock) {
executionCacheLocks.getFirst().unlock();
}
executionCacheLocks.getSecond().unlock();
}
// [PLAT-984]
// Assume that valuation times are increasing in real-time towards the expiry of the view definition, so that we
// can predict the time to expiry. If this assumption is wrong then the worst we do is trigger an unnecessary
// cycle. In the predicted case, we trigger a cycle on expiry so that any new market data subscriptions are made
// straight away.
if ((compiledViewDefinition.getValidTo() != null) && getExecutionOptions().getFlags().contains(ViewExecutionFlags.TRIGGER_CYCLE_ON_MARKET_DATA_CHANGED)) {
final Duration durationToExpiry = getMarketDataProvider().getRealTimeDuration(valuationTime, compiledViewDefinition.getValidTo());
final long expiryNanos = System.nanoTime() + durationToExpiry.toNanos();
_compilationExpiryCycleTrigger.set(expiryNanos, ViewCycleTriggerResult.forceFull());
// REVIEW Andrew 2012-11-02 -- If we are ticking live, then this is almost right (System.nanoTime will be close to valuationTime, depending on how
// long the compilation took). If we are running through historical data then this is quite a meaningless trigger.
} else {
_compilationExpiryCycleTrigger.reset();
}
return compiledViewDefinition;
}
private CompiledViewDefinitionWithGraphs initialiseMarketDataManipulation(final CompiledViewDefinitionWithGraphs compiledViewDefinition) {
if (_marketDataManipulator.hasManipulationsDefined()) {
Map<DependencyGraph, Map<DistinctMarketDataSelector, Set<ValueSpecification>>> selectionsByGraph = new HashMap<>();
for (DependencyGraphExplorer graphExplorer : compiledViewDefinition.getDependencyGraphExplorers()) {
DependencyGraph graph = graphExplorer.getWholeGraph();
Map<DistinctMarketDataSelector, Set<ValueSpecification>> selectorMapping = _marketDataManipulator.modifyDependencyGraph(graph);
if (!selectorMapping.isEmpty()) {
selectionsByGraph.put(graph, selectorMapping);
}
}
if (!selectionsByGraph.isEmpty()) {
return compiledViewDefinition.withMarketDataManipulationSelections(selectionsByGraph);
}
}
return compiledViewDefinition;
}
/**
* Gets the cached compiled view definition which may be re-used in subsequent computation cycles.
* <p>
* External visibility for tests.
*
* @param valuationTime the indicative valuation time, not null
* @param resolverVersionCorrection the resolver version correction, not null
* @return the cached compiled view definition, or null if nothing is currently cached
*/
public CompiledViewDefinitionWithGraphs getCachedCompiledViewDefinition(final Instant valuationTime, final VersionCorrection resolverVersionCorrection) {
CompiledViewDefinitionWithGraphs cached = _latestCompiledViewDefinition;
if (cached != null) {
boolean resolverMatch = resolverVersionCorrection.equals(cached.getResolverVersionCorrection());
boolean valuationMatch = CompiledViewDefinitionWithGraphsImpl.isValidFor(cached, valuationTime);
if (!resolverMatch || !valuationMatch) {
// Query the cache in case there is a newer one
cached = getProcessContext().getExecutionCache().getCompiledViewDefinitionWithGraphs(_executionCacheKey);
if (cached != null) {
// Only update ours if the one from the cache has a better validity
if (resolverVersionCorrection.equals(cached.getResolverVersionCorrection())) {
cached = PLAT3249.deepClone(cached);
_latestCompiledViewDefinition = cached;
} else {
if (!resolverMatch && !valuationMatch && CompiledViewDefinitionWithGraphsImpl.isValidFor(cached, valuationTime)) {
cached = PLAT3249.deepClone(cached);
_latestCompiledViewDefinition = cached;
}
}
} else {
// Nothing in the cache; use the one from last time
cached = _latestCompiledViewDefinition;
}
}
} else {
// Query the cache
cached = getProcessContext().getExecutionCache().getCompiledViewDefinitionWithGraphs(_executionCacheKey);
if (cached != null) {
cached = PLAT3249.deepClone(cached);
_latestCompiledViewDefinition = cached;
}
}
return cached;
}
/**
* Replaces the cached compiled view definition.
* <p>
* External visibility for tests.
*
* @param latestCompiledViewDefinition the compiled view definition, may be null
*/
public void cacheCompiledViewDefinition(final CompiledViewDefinitionWithGraphs latestCompiledViewDefinition) {
if (latestCompiledViewDefinition != null) {
getProcessContext().getExecutionCache().setCompiledViewDefinitionWithGraphs(_executionCacheKey, latestCompiledViewDefinition);
}
_latestCompiledViewDefinition = latestCompiledViewDefinition;
}
/**
* Gets the view definition currently in use by the computation job.
*
* @return the view definition, not null
*/
public ViewDefinition getViewDefinition() {
return _viewDefinition;
}
private void updateViewDefinitionIfRequired() {
final ViewDefinition newViewDefinition = _newViewDefinition.getAndSet(null);
if (newViewDefinition != null) {
_viewDefinition = newViewDefinition;
// TODO [PLAT-3215] Might not need to discard the entire compilation at this point
cacheCompiledViewDefinition(null);
SnapshottingViewExecutionDataProvider marketDataProvider = getMarketDataProvider();
_executionCacheKey = ViewExecutionCacheKey.of(newViewDefinition, marketDataProvider.getAvailabilityProvider());
// A change in view definition might mean a change in market data user which could invalidate the resolutions
if (marketDataProvider != null) {
if (!marketDataProvider.getMarketDataUser().equals(newViewDefinition.getMarketDataUser())) {
replaceMarketDataProvider(marketDataProvider.getSpecifications());
}
}
}
}
private void replaceMarketDataProvider(final List<MarketDataSpecification> marketDataSpecs) {
// [PLAT-3186] Not a huge overhead, but we could check compatability with the new specs and keep the same provider
removeMarketDataProvider();
setMarketDataProvider(marketDataSpecs);
}
private void removeMarketDataProvider() {
if (_marketDataProvider == null) {
return;
}
removeMarketDataSubscriptions();
_marketDataProvider.removeListener(this);
_marketDataProvider = null;
_marketDataProviderDirty = true;
_executionCacheKey = null;
}
private void setMarketDataProvider(final List<MarketDataSpecification> marketDataSpecs) {
try {
_marketDataProvider = new SnapshottingViewExecutionDataProvider(getViewDefinition().getMarketDataUser(),
marketDataSpecs, getProcessContext().getMarketDataProviderResolver());
} catch (final Exception e) {
s_logger.error("Failed to create data provider", e);
_marketDataProvider = null;
}
if (_marketDataProvider != null) {
_marketDataProvider.addListener(this);
_executionCacheKey = ViewExecutionCacheKey.of(getViewDefinition(), _marketDataProvider.getAvailabilityProvider());
}
_marketDataProviderDirty = true;
}
private SnapshottingViewExecutionDataProvider getMarketDataProvider() {
return _marketDataProvider;
}
private void setMarketDataSubscriptions(final Set<ValueSpecification> requiredSubscriptions) {
final Set<ValueSpecification> currentSubscriptions = _marketDataSubscriptions;
final Set<ValueSpecification> unusedMarketData = Sets.difference(currentSubscriptions, requiredSubscriptions);
if (!unusedMarketData.isEmpty()) {
s_logger.debug("{} unused market data subscriptions", unusedMarketData.size());
removeMarketDataSubscriptions(new ArrayList<ValueSpecification>(unusedMarketData));
}
final Set<ValueSpecification> newMarketData = Sets.difference(requiredSubscriptions, currentSubscriptions);
if (!newMarketData.isEmpty()) {
s_logger.debug("{} new market data requirements", newMarketData.size());
addMarketDataSubscriptions(new HashSet<ValueSpecification>(newMarketData));
}
}
//-------------------------------------------------------------------------
private void addMarketDataSubscriptions(final Set<ValueSpecification> requiredSubscriptions) {
final OperationTimer timer = new OperationTimer(s_logger, "Adding {} market data subscriptions", requiredSubscriptions.size());
_pendingSubscriptions.addAll(requiredSubscriptions);
_marketDataProvider.subscribe(requiredSubscriptions);
_marketDataSubscriptions.addAll(requiredSubscriptions);
try {
synchronized (_pendingSubscriptions) {
if (!_pendingSubscriptions.isEmpty()) {
_pendingSubscriptions.wait();
}
}
} catch (final InterruptedException ex) {
s_logger.info("Interrupted while waiting for subscription results.");
} finally {
_pendingSubscriptions.clear();
}
timer.finished();
}
private void removePendingSubscription(final ValueSpecification specification) {
if (_pendingSubscriptions.remove(specification)) {
notifyIfPendingSubscriptionsDone();
}
}
private void removePendingSubscriptions(final Collection<ValueSpecification> specifications) {
// Previously, this used removeAll, but as specifications may be a list, it was observed
// that we may end up iterating over _pendingSubscriptions and calling contains() on
// specifications, resulting in long wait times for a view to load (PLAT-3508)
boolean removalPerformed = false;
for (ValueSpecification specification : specifications) {
removalPerformed = _pendingSubscriptions.remove(specification) || removalPerformed;
}
if (removalPerformed) {
notifyIfPendingSubscriptionsDone();
}
}
private void notifyIfPendingSubscriptionsDone() {
if (_pendingSubscriptions.isEmpty()) {
synchronized (_pendingSubscriptions) {
if (_pendingSubscriptions.isEmpty()) {
_pendingSubscriptions.notifyAll();
}
}
}
}
private void removeMarketDataSubscriptions() {
removeMarketDataSubscriptions(_marketDataSubscriptions);
}
private void removeMarketDataSubscriptions(final Collection<ValueSpecification> unusedSubscriptions) {
final OperationTimer timer = new OperationTimer(s_logger, "Removing {} market data subscriptions", unusedSubscriptions.size());
_marketDataProvider.unsubscribe(_marketDataSubscriptions);
_marketDataSubscriptions.removeAll(unusedSubscriptions);
timer.finished();
}
// MarketDataListener
@Override
public void subscriptionsSucceeded(final Collection<ValueSpecification> valueSpecifications) {
s_logger.debug("Subscription succeeded: {}", valueSpecifications.size());
removePendingSubscriptions(valueSpecifications);
}
@Override
public void subscriptionFailed(final ValueSpecification valueSpecification, final String msg) {
s_logger.debug("Market data subscription to {} failed. This market data may be missing from computation cycles.", valueSpecification);
removePendingSubscription(valueSpecification);
}
@Override
public void subscriptionStopped(final ValueSpecification valueSpecification) {
}
@Override
public void valuesChanged(final Collection<ValueSpecification> valueSpecifications) {
if (!getExecutionOptions().getFlags().contains(ViewExecutionFlags.TRIGGER_CYCLE_ON_MARKET_DATA_CHANGED)) {
return;
}
// Don't want to query the cache for this; always use the last one
final CompiledViewDefinitionWithGraphs compiledView = _latestCompiledViewDefinition;
if (compiledView == null) {
return;
}
if (CollectionUtils.containsAny(compiledView.getMarketDataRequirements(), valueSpecifications)) {
requestCycle();
}
}
// ViewComputationJob
@Override
public synchronized boolean triggerCycle() {
s_logger.debug("Cycle triggered manually");
_forceTriggerCycle = true;
notifyAll();
return true;
}
@Override
public synchronized boolean requestCycle() {
// REVIEW jonathan 2010-10-04 -- this synchronisation is necessary, but it feels very heavyweight for
// high-frequency market data. See how it goes, but we could take into account the recalc periods and apply a
// heuristic (e.g. only wake up due to market data if max - min < e, for some e) which tries to see whether it's
// worth doing all this.
_cycleRequested = true;
if (!_wakeOnCycleRequest) {
return true;
}
notifyAll();
return true;
}
@Override
public void updateViewDefinition(final ViewDefinition viewDefinition) {
s_logger.debug("Received new view definition {} for next cycle", viewDefinition.getUniqueId());
_newViewDefinition.getAndSet(viewDefinition);
}
@Override
public void terminate() {
getJob().terminate();
s_logger.debug("Interrupting calculation job thread");
getThread().interrupt();
}
@Override
public void join() throws InterruptedException {
getThread().join();
}
@Override
public boolean join(final long timeout) throws InterruptedException {
getThread().join(timeout);
return !getThread().isAlive();
}
@Override
public boolean isTerminated() {
return getJob().isTerminated() && !getThread().isAlive();
}
}
| [PLAT-3366] Add cycle execution time metric
| projects/OG-Engine/src/main/java/com/opengamma/engine/view/worker/SingleThreadViewProcessWorker.java | [PLAT-3366] Add cycle execution time metric | <ide><path>rojects/OG-Engine/src/main/java/com/opengamma/engine/view/worker/SingleThreadViewProcessWorker.java
<ide> import org.threeten.bp.Duration;
<ide> import org.threeten.bp.Instant;
<ide>
<add>import com.codahale.metrics.MetricRegistry;
<add>import com.codahale.metrics.Timer;
<ide> import com.google.common.base.Supplier;
<ide> import com.google.common.collect.Maps;
<ide> import com.google.common.collect.Sets;
<ide> import com.opengamma.util.NamedThreadPoolFactory;
<ide> import com.opengamma.util.PoolExecutor;
<ide> import com.opengamma.util.TerminatableJob;
<add>import com.opengamma.util.metric.OpenGammaMetricRegistry;
<ide> import com.opengamma.util.monitor.OperationTimer;
<ide> import com.opengamma.util.tuple.Pair;
<ide>
<ide> * The manipulator for structured market data.
<ide> */
<ide> private final MarketDataManipulator _marketDataManipulator;
<add>
<add> /**
<add> Timer to track delta cycle execution time.
<add> */
<add> private Timer _deltaCycleTimer;
<add> /**
<add> * Timer to track full cycle execution time.
<add> */
<add> private Timer _fullCycleTimer;
<ide>
<ide> public SingleThreadViewProcessWorker(final ViewProcessWorkerContext context, final ViewExecutionOptions executionOptions, final ViewDefinition viewDefinition) {
<ide> ArgumentChecker.notNull(context, "context");
<ide> _marketDataManipulator = createMarketDataManipulator();
<ide> _job = new Job();
<ide> _thread = new BorrowedThread(context.toString(), _job);
<add> _deltaCycleTimer = OpenGammaMetricRegistry.getSummaryInstance().timer("SingleThreadViewProcessWorker.cycle.delta");
<add> _fullCycleTimer = OpenGammaMetricRegistry.getSummaryInstance().timer("SingleThreadViewProcessWorker.cycle.full");
<ide> s_executor.submit(_thread);
<ide> }
<ide>
<ide> }
<ide> cycleReference.get().postExecute();
<ide> final long durationNanos = cycleReference.get().getDuration().toNanos();
<add> final Timer timer = deltaCycle != null ? _deltaCycleTimer : _fullCycleTimer;
<add> if (timer != null) {
<add> timer.update(durationNanos, TimeUnit.NANOSECONDS);
<add> }
<ide> _totalTimeNanos += durationNanos;
<ide> _cycleCount += 1;
<ide> s_logger.info("Last latency was {} ms, Average latency is {} ms", |
|
JavaScript | apache-2.0 | c9a88b9667d4d24d1d65defa1b4148f9113c17d8 | 0 | NLeSC/PattyVis,NLeSC/PattyVis | // Karma configuration
// http://karma-runner.github.io/0.12/config/configuration-file.html
// Generated on 2015-01-07 using
// generator-karma 0.8.3
module.exports = function(config) {
'use strict';
config.set({
// enable / disable watching file and executing tests whenever any file changes
autoWatch: true,
// base path, that will be used to resolve files and exclude
basePath: '../',
// testing framework to use (jasmine/mocha/qunit/...)
frameworks: ['jasmine'],
// list of files / patterns to load in the browser
files: [
'bower_components/angular/angular.js',
'bower_components/angular-mocks/angular-mocks.js',
'bower_components/angular-animate/angular-animate.js',
'bower_components/angular-cookies/angular-cookies.js',
'bower_components/angular-resource/angular-resource.js',
'bower_components/angular-route/angular-route.js',
'bower_components/angular-sanitize/angular-sanitize.js',
'bower_components/angular-touch/angular-touch.js',
'bower_components/proj4/dist/proj4.js',
'bower_components/threejs/build/three.js',
'bower_components/OrbitControls/index.js',
'bower_components/potree/build/js/potree.js',
'bower_components/potree/build/js/laslaz.js',
'bower_components/openlayers3/build/ol.js',
'bower_components/oculus-bridge/web/build/OculusBridge.min.js',
'bower_components/OculusRiftEffect/index.js',
'bower_components/angular-bootstrap/ui-bootstrap-tpls.js',
'app/scripts/**/*.js',
// test for directives need the templates, inside test load pattyApp.templates module to get templates
'.tmp/template.js',
'test/mock/**/*.js',
'test/spec/**/*.js'
],
// list of files / patterns to exclude
exclude: [],
// web server port
port: 8080,
// Start these browsers, currently available:
// - Chrome
// - ChromeCanary
// - Firefox
// - Opera
// - Safari (only Mac)
// - PhantomJS
// - IE (only Windows)
browsers: [
'PhantomJS'
// 'Chrome'
],
// Which plugins to enable
plugins: [
'karma-phantomjs-launcher',
// 'karma-chrome-launcher',
'karma-jasmine',
'karma-coverage',
'karma-junit-reporter'
],
preprocessors: {
// source files, that you wanna generate coverage for
// do not include tests or libraries
// (these files will be instrumented by Istanbul)
'app/scripts/**/*.js': ['coverage']
},
reporters: ['dots', 'junit', 'coverage'],
junitReporter: {
outputFile: 'test/reports/TEST-results.xml'
},
coverageReporter: {
dir: 'test/reports/coverage/',
reporters: [{
type: 'lcov' // for viewing html pages and SonarQube
}, {
type: 'cobertura' // for use in Jenkins
}]
},
// Continuous Integration mode
// if true, it capture browsers, run tests and exit
singleRun: false,
colors: true,
// level of logging
// possible values: LOG_DISABLE || LOG_ERROR || LOG_WARN || LOG_INFO || LOG_DEBUG
logLevel: config.LOG_INFO,
// Uncomment the following lines if you are using grunt's server to run the tests
// proxies: {
// '/': 'http://localhost:9000/'
// },
// URL root prevent conflicts with the site root
// urlRoot: '_karma_'
});
};
| test/karma.conf.js | // Karma configuration
// http://karma-runner.github.io/0.12/config/configuration-file.html
// Generated on 2015-01-07 using
// generator-karma 0.8.3
module.exports = function(config) {
'use strict';
config.set({
// enable / disable watching file and executing tests whenever any file changes
autoWatch: true,
// base path, that will be used to resolve files and exclude
basePath: '../',
// testing framework to use (jasmine/mocha/qunit/...)
frameworks: ['jasmine'],
// list of files / patterns to load in the browser
files: [
'bower_components/angular/angular.js',
'bower_components/angular-mocks/angular-mocks.js',
'bower_components/angular-animate/angular-animate.js',
'bower_components/angular-cookies/angular-cookies.js',
'bower_components/angular-resource/angular-resource.js',
'bower_components/angular-route/angular-route.js',
'bower_components/angular-sanitize/angular-sanitize.js',
'bower_components/angular-touch/angular-touch.js',
'bower_components/proj4/dist/proj4.js',
'bower_components/threejs/build/three.js',
'bower_components/OrbitControls/index.js',
'bower_components/potree/build/js/potree.js',
'bower_components/potree/build/js/laslaz.js',
'bower_components/openlayers3/build/ol.js',
'bower_components/oculus-bridge/web/build/OculusBridge.min.js',
'bower_components/OculusRiftEffect/index.js',
'bower_components/angular-bootstrap/ui-bootstrap-tpls.js',
'app/scripts/**/*.js',
// test for directives need the templates, inside test load pattyApp.templates module to get templates
'.tmp/template.js',
'test/mock/**/*.js',
'test/spec/**/*.js'
],
// list of files / patterns to exclude
exclude: [],
// web server port
port: 8080,
// Start these browsers, currently available:
// - Chrome
// - ChromeCanary
// - Firefox
// - Opera
// - Safari (only Mac)
// - PhantomJS
// - IE (only Windows)
browsers: [
'PhantomJS'
],
// Which plugins to enable
plugins: [
'karma-phantomjs-launcher',
'karma-jasmine',
'karma-coverage',
'karma-junit-reporter'
],
preprocessors: {
// source files, that you wanna generate coverage for
// do not include tests or libraries
// (these files will be instrumented by Istanbul)
'app/scripts/**/*.js': ['coverage']
},
reporters: ['dots', 'junit', 'coverage'],
junitReporter: {
outputFile: 'test/reports/TEST-results.xml'
},
coverageReporter: {
dir: 'test/reports/coverage/',
reporters: [{
type: 'lcov' // for viewing html pages and SonarQube
}, {
type: 'cobertura' // for use in Jenkins
}]
},
// Continuous Integration mode
// if true, it capture browsers, run tests and exit
singleRun: false,
colors: true,
// level of logging
// possible values: LOG_DISABLE || LOG_ERROR || LOG_WARN || LOG_INFO || LOG_DEBUG
logLevel: config.LOG_INFO,
// Uncomment the following lines if you are using grunt's server to run the tests
// proxies: {
// '/': 'http://localhost:9000/'
// },
// URL root prevent conflicts with the site root
// urlRoot: '_karma_'
});
};
| Added (commented out) options for using Chrome as Karma browser
| test/karma.conf.js | Added (commented out) options for using Chrome as Karma browser | <ide><path>est/karma.conf.js
<ide> // - IE (only Windows)
<ide> browsers: [
<ide> 'PhantomJS'
<add> // 'Chrome'
<ide> ],
<ide>
<ide> // Which plugins to enable
<ide> plugins: [
<ide> 'karma-phantomjs-launcher',
<add> // 'karma-chrome-launcher',
<ide> 'karma-jasmine',
<ide> 'karma-coverage',
<ide> 'karma-junit-reporter' |
|
Java | agpl-3.0 | aaa1744add131e54d376e0a0e64b345540571013 | 0 | mnlipp/jgrapes,mnlipp/jgrapes | /*
* JGrapes Event Driven Framework
* Copyright (C) 2016-2018 Michael N. Lipp
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU Affero General Public License as published by
* the Free Software Foundation; either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License
* for more details.
*
* You should have received a copy of the GNU Affero General Public License along
* with this program; if not, see <http://www.gnu.org/licenses/>.
*/
package org.jgrapes.core.internal;
import java.lang.reflect.Method;
import java.util.concurrent.atomic.AtomicLong;
import java.util.logging.Level;
import org.jgrapes.core.Channel;
import org.jgrapes.core.ComponentType;
import org.jgrapes.core.Components;
import org.jgrapes.core.HandlerScope;
import org.jgrapes.core.InvocationFilter;
/**
* An variant of handler reference that provides better debug information (at
* the cost of some cpu cycles).
*
*/
@SuppressWarnings("PMD.DataflowAnomalyAnalysis")
class VerboseHandlerReference extends HandlerReference {
private static AtomicLong invocationCounter = new AtomicLong(1);
private final ComponentType component;
private final String handlerName;
/**
* @param component
* @param method
* @param eventParam
* @param priority
*/
public VerboseHandlerReference(ComponentType component, Method method,
int priority, HandlerScope filter) {
super(component, method, priority, filter);
this.component = component;
handlerName = Components.objectName(component)
+ "." + method.getName();
}
/**
* Invoke the handler with the given event as parameter.
*
* @param event the event
*/
@Override
@SuppressWarnings("PMD.NcssCount")
public void invoke(EventBase<?> event) throws Throwable {
if (needsFiltering && !((InvocationFilter) filter).includes(event)) {
return;
}
if (component == ComponentTree.DUMMY_HANDLER) {
reportInvocation(event, false);
return;
}
long invocation;
switch (method.type().parameterCount()) {
case 0:
// No parameters
invocation = reportInvocation(event, false);
method.invoke();
reportResult(event, invocation);
break;
case 1:
// Event parameter
invocation = reportInvocation(event, false);
method.invoke(event);
reportResult(event, invocation);
break;
case 2:
// Event and channel
Class<?> channelParam = method.type().parameterType(1);
boolean handlerFound = false;
for (Channel channel : event.channels()) {
if (channelParam.isAssignableFrom(channel.getClass())) {
handlerFound = true;
invocation = reportInvocation(event, false);
method.invoke(event, channel);
reportResult(event, invocation);
}
}
if (!handlerFound) {
reportInvocation(event, true);
}
break;
default:
throw new IllegalStateException("Handle not usable");
}
}
private long reportInvocation(EventBase<?> event, boolean noChannel) {
if (!event.isTrackable()) {
return 0;
}
long invocation = 0;
StringBuilder builder = new StringBuilder();
if (handlerTracking.isLoggable(Level.FINEST)) {
invocation = invocationCounter.getAndIncrement();
builder.append('[');
builder.append(Long.toString(invocation));
builder.append("] ");
}
builder.append('P')
.append(Components
.objectId(ComponentTree.currentPipeline()))
.append(": ")
.append(event);
if (component == ComponentTree.DUMMY_HANDLER) {
builder.append(" (unhandled)");
} else {
builder.append(" >> ");
if (noChannel) {
builder.append("No matching channels: ");
}
builder.append(this.toString());
}
String trackMsg = builder.toString();
handlerTracking.fine(trackMsg);
return invocation;
}
@SuppressWarnings("PMD.GuardLogStatement")
private void reportResult(EventBase<?> event, long invocation) {
if (!handlerTracking.isLoggable(Level.FINEST) || !event.isTrackable()) {
return;
}
StringBuilder builder = new StringBuilder();
builder.append("Result [")
.append(Long.toString(invocation))
.append("]: ")
.append(event.currentResults());
handlerTracking.fine(builder.toString());
}
@Override
protected String methodToString() {
return handlerName;
}
}
| org.jgrapes.core/src/org/jgrapes/core/internal/VerboseHandlerReference.java | /*
* JGrapes Event Driven Framework
* Copyright (C) 2016-2018 Michael N. Lipp
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU Affero General Public License as published by
* the Free Software Foundation; either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License
* for more details.
*
* You should have received a copy of the GNU Affero General Public License along
* with this program; if not, see <http://www.gnu.org/licenses/>.
*/
package org.jgrapes.core.internal;
import java.lang.reflect.Method;
import java.util.concurrent.atomic.AtomicLong;
import java.util.logging.Level;
import org.jgrapes.core.Channel;
import org.jgrapes.core.ComponentType;
import org.jgrapes.core.Components;
import org.jgrapes.core.HandlerScope;
import org.jgrapes.core.InvocationFilter;
/**
* An variant of handler reference that provides better debug information (at
* the cost of some cpu cycles).
*
*/
@SuppressWarnings("PMD.DataflowAnomalyAnalysis")
class VerboseHandlerReference extends HandlerReference {
private static AtomicLong invocationCounter = new AtomicLong(1);
private final ComponentType component;
private final String handlerName;
/**
* @param component
* @param method
* @param eventParam
* @param priority
*/
public VerboseHandlerReference(ComponentType component, Method method,
int priority, HandlerScope filter) {
super(component, method, priority, filter);
this.component = component;
handlerName = Components.objectName(component)
+ "." + method.getName();
}
/**
* Invoke the handler with the given event as parameter.
*
* @param event the event
*/
@Override
@SuppressWarnings("PMD.NcssCount")
public void invoke(EventBase<?> event) throws Throwable {
if (needsFiltering && !((InvocationFilter) filter).includes(event)) {
return;
}
if (component == ComponentTree.DUMMY_HANDLER) {
reportInvocation(event, false);
return;
}
long invocation;
switch (method.type().parameterCount()) {
case 0:
// No parameters
invocation = reportInvocation(event, false);
method.invoke();
reportResult(event, invocation);
break;
case 1:
// Event parameter
invocation = reportInvocation(event, false);
method.invoke(event);
reportResult(event, invocation);
break;
case 2:
// Event and channel
Class<?> channelParam = method.type().parameterType(1);
boolean handlerFound = false;
for (Channel channel : event.channels()) {
if (channelParam.isAssignableFrom(channel.getClass())) {
handlerFound = true;
invocation = reportInvocation(event, false);
method.invoke(event, channel);
reportResult(event, invocation);
}
}
if (!handlerFound) {
reportInvocation(event, true);
}
break;
default:
throw new IllegalStateException("Handle not usable");
}
}
private long reportInvocation(EventBase<?> event, boolean noChannel) {
if (!event.isTrackable()) {
return 0;
}
long invocation = 0;
StringBuilder builder = new StringBuilder();
if (handlerTracking.isLoggable(Level.FINEST)) {
invocation = invocationCounter.getAndIncrement();
builder.append('[');
builder.append(Long.toString(invocation));
builder.append("] ");
}
builder.append('P')
.append(Components
.objectId(ComponentTree.currentPipeline()))
.append(": ")
.append(event);
if (component == ComponentTree.DUMMY_HANDLER) {
builder.append(" (unhandled)");
} else {
builder.append(" >> ");
if (noChannel) {
builder.append("No matching channels: ");
}
builder.append(this.toString());
}
String trackMsg = builder.toString();
handlerTracking.fine(trackMsg);
return invocation;
}
private void reportResult(EventBase<?> event, long invocation) {
if (!handlerTracking.isLoggable(Level.FINEST) || !event.isTrackable()) {
return;
}
StringBuilder builder = new StringBuilder();
builder.append("Result [")
.append(Long.toString(invocation))
.append("]: ")
.append(event.currentResults());
handlerTracking.fine(builder.toString());
}
@Override
protected String methodToString() {
return handlerName;
}
}
| Fix warning. | org.jgrapes.core/src/org/jgrapes/core/internal/VerboseHandlerReference.java | Fix warning. | <ide><path>rg.jgrapes.core/src/org/jgrapes/core/internal/VerboseHandlerReference.java
<ide> return invocation;
<ide> }
<ide>
<add> @SuppressWarnings("PMD.GuardLogStatement")
<ide> private void reportResult(EventBase<?> event, long invocation) {
<ide> if (!handlerTracking.isLoggable(Level.FINEST) || !event.isTrackable()) {
<ide> return; |
|
Java | apache-2.0 | b1e2ed970a403e4942048a7d5029241b0adb21f1 | 0 | allotria/intellij-community,allotria/intellij-community,allotria/intellij-community,allotria/intellij-community,allotria/intellij-community,allotria/intellij-community,allotria/intellij-community,allotria/intellij-community,allotria/intellij-community,allotria/intellij-community,allotria/intellij-community,allotria/intellij-community,allotria/intellij-community | /*
* Copyright 2000-2017 JetBrains s.r.o.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.intellij.java.codeInsight.intention;
import com.intellij.JavaTestUtil;
import com.intellij.codeInsight.intention.IntentionAction;
import com.intellij.openapi.fileTypes.StdFileTypes;
import com.intellij.psi.codeStyle.CodeStyleSettingsManager;
import com.intellij.psi.codeStyle.JavaCodeStyleSettings;
import com.intellij.testFramework.fixtures.JavaCodeInsightFixtureTestCase;
public class AddSingleStaticImportActionTest extends JavaCodeInsightFixtureTestCase {
public void testInaccessible() {
myFixture.addClass("package foo; class Foo {public static void foo(){}}");
doTest("Add static import for 'impl.FooImpl.foo'");
}
public void testInsideParameterizedReference() {
myFixture.addClass("package foo; " +
"public class Class1 {" +
" public static class Inner1 {}\n" +
" public static class Inner2<T> {}" +
"}");
doTest("Add import for 'foo.Class1.Inner2'");
}
public void testWrongCandidateAfterImport() {
myFixture.addClass("package foo; class Empty {}"); //to ensure package is in the project
doTest("Add static import for 'foo.Test.X.test'");
}
public void testAllowStaticImportWhenAlreadyImported() {
myFixture.addClass("package foo; " +
"public class Clazz {\n" +
" public enum Foo{\n" +
" Const_1, Const_2\n" +
" }\n" +
"}");
doTest("Add import for 'foo.Clazz.Foo'");
}
public void testInsideParameterizedReferenceInsideParameterizedReference() {
myFixture.addClass("package foo; " +
"public class Class1 {" +
" public static class Inner1 {}\n" +
" public static class Inner2<T> {}" +
"}");
doTest("Add import for 'foo.Class1.Inner1'");
}
public void testDisabledInsideParameterizedReference() {
myFixture.addClass("package foo; " +
"public class Class1 {" +
" public static <T> T foo(){return null;}\n" +
"}");
myFixture.configureByFile(getTestName(false) + ".java");
final IntentionAction intentionAction = myFixture.getAvailableIntention("Add static import for 'foo.Class1.foo'");
assertNull(intentionAction);
}
public void testSkipSameNamedNonStaticReferences() {
myFixture.addClass("package foo;" +
"public class Clazz {" +
" public void print(String s) {}" +
" public static void print() {}" +
" public static void print(int i) {}" +
"}");
doTest("Add static import for 'foo.Clazz.print'");
}
public void testAllowSingleStaticImportWhenOnDemandImportOverloadedMethod() {
myFixture.addClass("package foo; class Foo {public static void foo(int i){}}");
myFixture.addClass("package foo; class Bar {public static void foo(String s){}}");
doTest("Add static import for 'foo.Bar.foo'");
}
public void testInvalidInput() {
myFixture.configureByText(StdFileTypes.JAVA, "class X {\n Character.\n" +
" Sub<caret>set\n}");
myFixture.getAvailableIntentions();
}
public void testSingleImportWhenConflictingWithOnDemand() {
myFixture.addClass("package foo; class Foo {public static void foo(int i){}}");
myFixture.addClass("package foo; class Bar {public static void foo(String s){}}");
JavaCodeStyleSettings settings = CodeStyleSettingsManager.getInstance(getProject()).getCurrentSettings().getCustomSettings(JavaCodeStyleSettings.class);
int old = settings.NAMES_COUNT_TO_USE_IMPORT_ON_DEMAND;
settings.NAMES_COUNT_TO_USE_IMPORT_ON_DEMAND = 1;
try {
doTest("Add static import for 'foo.Foo.foo'");
}
finally {
settings.NAMES_COUNT_TO_USE_IMPORT_ON_DEMAND = old;
}
}
public void testConflictingNamesInScope() {
myFixture.addClass("package foo; public class Assert {public static void assertTrue(boolean b) {}}");
myFixture.configureByFile(getTestName(false) + ".java");
IntentionAction intention = myFixture.getAvailableIntention("Add static import for 'foo.Assert.assertTrue'");
assertNull(intention);
}
public void testNonStaticInnerClassImport() {
myFixture.addClass("package foo; public class Foo {public class Bar {}}");
doTest("Add import for 'foo.Foo.Bar'");
}
public void testProhibitWhenMethodWithIdenticalSignatureAlreadyImportedFromAnotherClass() {
myFixture.addClass("package foo; class Foo {public static void foo(int i){}}");
myFixture.addClass("package foo; class Bar {public static void foo(int i){}}");
myFixture.configureByFile(getTestName(false) + ".java");
IntentionAction intention = myFixture.getAvailableIntention("Add static import for 'foo.Bar.foo'");
assertNull(intention);
}
public void testComment() {
doTest("Add static import for 'java.util.Arrays.asList'");
}
private void doTest(String intentionName) {
myFixture.configureByFile(getTestName(false) + ".java");
IntentionAction intention = myFixture.findSingleIntention(intentionName);
assertNotNull(intention);
myFixture.launchAction(intention);
myFixture.checkResultByFile(getTestName(false) + "_after.java");
}
@Override
protected String getTestDataPath() {
return JavaTestUtil.getJavaTestDataPath() + "/codeInsight/daemonCodeAnalyzer/quickFix/addSingleStaticImport";
}
}
| java/java-tests/testSrc/com/intellij/java/codeInsight/intention/AddSingleStaticImportActionTest.java | /*
* Copyright 2000-2017 JetBrains s.r.o.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.intellij.java.codeInsight.intention;
import com.intellij.JavaTestUtil;
import com.intellij.codeInsight.intention.IntentionAction;
import com.intellij.openapi.fileTypes.StdFileTypes;
import com.intellij.psi.codeStyle.CodeStyleSettingsManager;
import com.intellij.psi.codeStyle.JavaCodeStyleSettings;
import com.intellij.testFramework.fixtures.JavaCodeInsightFixtureTestCase;
public class AddSingleStaticImportActionTest extends JavaCodeInsightFixtureTestCase {
public void testInaccessible() {
myFixture.addClass("package foo; class Foo {public static void foo(){}}");
myFixture.configureByFile(getTestName(false) + ".java");
final IntentionAction intentionAction = myFixture.findSingleIntention("Add static import for 'impl.FooImpl.foo'");
assertNotNull(intentionAction);
myFixture.launchAction(intentionAction);
myFixture.checkResultByFile(getTestName(false) + "_after.java");
}
public void testInsideParameterizedReference() {
myFixture.addClass("package foo; " +
"public class Class1 {" +
" public static class Inner1 {}\n" +
" public static class Inner2<T> {}" +
"}");
myFixture.configureByFile(getTestName(false) + ".java");
final IntentionAction intentionAction = myFixture.findSingleIntention("Add import for 'foo.Class1.Inner2'");
assertNotNull(intentionAction);
myFixture.launchAction(intentionAction);
myFixture.checkResultByFile(getTestName(false) + "_after.java");
}
public void testWrongCandidateAfterImport() {
myFixture.addClass("package foo; class Empty {}"); //to ensure package is in the project
myFixture.configureByFile(getTestName(false) + ".java");
final IntentionAction intentionAction = myFixture.findSingleIntention("Add static import for 'foo.Test.X.test'");
assertNotNull(intentionAction);
myFixture.launchAction(intentionAction);
myFixture.checkResultByFile(getTestName(false) + "_after.java");
}
public void testAllowStaticImportWhenAlreadyImported() {
myFixture.addClass("package foo; " +
"public class Clazz {\n" +
" public enum Foo{\n" +
" Const_1, Const_2\n" +
" }\n" +
"}");
myFixture.configureByFile(getTestName(false) + ".java");
final IntentionAction intentionAction = myFixture.findSingleIntention("Add import for 'foo.Clazz.Foo'");
assertNotNull(intentionAction);
myFixture.launchAction(intentionAction);
myFixture.checkResultByFile(getTestName(false) + "_after.java");
}
public void testInsideParameterizedReferenceInsideParameterizedReference() {
myFixture.addClass("package foo; " +
"public class Class1 {" +
" public static class Inner1 {}\n" +
" public static class Inner2<T> {}" +
"}");
myFixture.configureByFile(getTestName(false) + ".java");
final IntentionAction intentionAction = myFixture.findSingleIntention("Add import for 'foo.Class1.Inner1'");
assertNotNull(intentionAction);
myFixture.launchAction(intentionAction);
myFixture.checkResultByFile(getTestName(false) + "_after.java");
}
public void testDisabledInsideParameterizedReference() {
myFixture.addClass("package foo; " +
"public class Class1 {" +
" public static <T> T foo(){return null;}\n" +
"}");
myFixture.configureByFile(getTestName(false) + ".java");
final IntentionAction intentionAction = myFixture.getAvailableIntention("Add static import for 'foo.Class1.foo'");
assertNull(intentionAction);
}
public void testSkipSameNamedNonStaticReferences() {
myFixture.addClass("package foo;" +
"public class Clazz {" +
" public void print(String s) {}" +
" public static void print() {}" +
" public static void print(int i) {}" +
"}");
myFixture.configureByFile(getTestName(false) + ".java");
IntentionAction intention = myFixture.findSingleIntention("Add static import for 'foo.Clazz.print'");
assertNotNull(intention);
myFixture.launchAction(intention);
myFixture.checkResultByFile(getTestName(false) + "_after.java");
}
public void testAllowSingleStaticImportWhenOnDemandImportOverloadedMethod() {
myFixture.addClass("package foo; class Foo {public static void foo(int i){}}");
myFixture.addClass("package foo; class Bar {public static void foo(String s){}}");
myFixture.configureByFile(getTestName(false) + ".java");
IntentionAction intention = myFixture.findSingleIntention("Add static import for 'foo.Bar.foo'");
assertNotNull(intention);
myFixture.launchAction(intention);
myFixture.checkResultByFile(getTestName(false) + "_after.java");
}
public void testInvalidInput() {
myFixture.configureByText(StdFileTypes.JAVA, "class X {\n Character.\n" +
" Sub<caret>set\n}");
myFixture.getAvailableIntentions();
}
public void testSingleImportWhenConflictingWithOnDemand() {
myFixture.addClass("package foo; class Foo {public static void foo(int i){}}");
myFixture.addClass("package foo; class Bar {public static void foo(String s){}}");
myFixture.configureByFile(getTestName(false) + ".java");
JavaCodeStyleSettings settings = CodeStyleSettingsManager.getInstance(getProject()).getCurrentSettings().getCustomSettings(JavaCodeStyleSettings.class);
int old = settings.NAMES_COUNT_TO_USE_IMPORT_ON_DEMAND;
settings.NAMES_COUNT_TO_USE_IMPORT_ON_DEMAND = 1;
try {
IntentionAction intention = myFixture.findSingleIntention("Add static import for 'foo.Foo.foo'");
assertNotNull(intention);
myFixture.launchAction(intention);
myFixture.checkResultByFile(getTestName(false) + "_after.java");
}
finally {
settings.NAMES_COUNT_TO_USE_IMPORT_ON_DEMAND = old;
}
}
public void testConflictingNamesInScope() {
myFixture.addClass("package foo; public class Assert {public static void assertTrue(boolean b) {}}");
myFixture.configureByFile(getTestName(false) + ".java");
IntentionAction intention = myFixture.getAvailableIntention("Add static import for 'foo.Assert.assertTrue'");
assertNull(intention);
}
public void testNonStaticInnerClassImport() {
myFixture.addClass("package foo; public class Foo {public class Bar {}}");
myFixture.configureByFile(getTestName(false) + ".java");
IntentionAction intention = myFixture.getAvailableIntention("Add import for 'foo.Foo.Bar'");
assertNotNull(intention);
myFixture.launchAction(intention);
myFixture.checkResultByFile(getTestName(false) + "_after.java");
}
public void testProhibitWhenMethodWithIdenticalSignatureAlreadyImportedFromAnotherClass() {
myFixture.addClass("package foo; class Foo {public static void foo(int i){}}");
myFixture.addClass("package foo; class Bar {public static void foo(int i){}}");
myFixture.configureByFile(getTestName(false) + ".java");
IntentionAction intention = myFixture.getAvailableIntention("Add static import for 'foo.Bar.foo'");
assertNull(intention);
}
public void testComment() {
myFixture.configureByFile(getTestName(false) + ".java");
IntentionAction intention = myFixture.getAvailableIntention("Add static import for 'java.util.Arrays.asList'");
assertNotNull(intention);
myFixture.launchAction(intention);
myFixture.checkResultByFile(getTestName(false) + "_after.java");
}
@Override
protected String getTestDataPath() {
return JavaTestUtil.getJavaTestDataPath() + "/codeInsight/daemonCodeAnalyzer/quickFix/addSingleStaticImport";
}
}
| AddSingleStaticImportActionTest: common code extracted
| java/java-tests/testSrc/com/intellij/java/codeInsight/intention/AddSingleStaticImportActionTest.java | AddSingleStaticImportActionTest: common code extracted | <ide><path>ava/java-tests/testSrc/com/intellij/java/codeInsight/intention/AddSingleStaticImportActionTest.java
<ide>
<ide> public void testInaccessible() {
<ide> myFixture.addClass("package foo; class Foo {public static void foo(){}}");
<del> myFixture.configureByFile(getTestName(false) + ".java");
<del>
<del> final IntentionAction intentionAction = myFixture.findSingleIntention("Add static import for 'impl.FooImpl.foo'");
<del> assertNotNull(intentionAction);
<del> myFixture.launchAction(intentionAction);
<del> myFixture.checkResultByFile(getTestName(false) + "_after.java");
<add> doTest("Add static import for 'impl.FooImpl.foo'");
<ide> }
<ide>
<ide> public void testInsideParameterizedReference() {
<ide> " public static class Inner1 {}\n" +
<ide> " public static class Inner2<T> {}" +
<ide> "}");
<del> myFixture.configureByFile(getTestName(false) + ".java");
<del>
<del> final IntentionAction intentionAction = myFixture.findSingleIntention("Add import for 'foo.Class1.Inner2'");
<del> assertNotNull(intentionAction);
<del> myFixture.launchAction(intentionAction);
<del> myFixture.checkResultByFile(getTestName(false) + "_after.java");
<add> doTest("Add import for 'foo.Class1.Inner2'");
<ide> }
<ide>
<ide> public void testWrongCandidateAfterImport() {
<ide> myFixture.addClass("package foo; class Empty {}"); //to ensure package is in the project
<del> myFixture.configureByFile(getTestName(false) + ".java");
<del>
<del> final IntentionAction intentionAction = myFixture.findSingleIntention("Add static import for 'foo.Test.X.test'");
<del> assertNotNull(intentionAction);
<del> myFixture.launchAction(intentionAction);
<del> myFixture.checkResultByFile(getTestName(false) + "_after.java");
<add> doTest("Add static import for 'foo.Test.X.test'");
<ide> }
<ide>
<ide> public void testAllowStaticImportWhenAlreadyImported() {
<ide> " Const_1, Const_2\n" +
<ide> " }\n" +
<ide> "}");
<del> myFixture.configureByFile(getTestName(false) + ".java");
<del>
<del> final IntentionAction intentionAction = myFixture.findSingleIntention("Add import for 'foo.Clazz.Foo'");
<del> assertNotNull(intentionAction);
<del> myFixture.launchAction(intentionAction);
<del> myFixture.checkResultByFile(getTestName(false) + "_after.java");
<add> doTest("Add import for 'foo.Clazz.Foo'");
<ide> }
<ide>
<ide> public void testInsideParameterizedReferenceInsideParameterizedReference() {
<ide> " public static class Inner1 {}\n" +
<ide> " public static class Inner2<T> {}" +
<ide> "}");
<del> myFixture.configureByFile(getTestName(false) + ".java");
<del>
<del> final IntentionAction intentionAction = myFixture.findSingleIntention("Add import for 'foo.Class1.Inner1'");
<del> assertNotNull(intentionAction);
<del> myFixture.launchAction(intentionAction);
<del> myFixture.checkResultByFile(getTestName(false) + "_after.java");
<add> doTest("Add import for 'foo.Class1.Inner1'");
<ide> }
<ide>
<ide> public void testDisabledInsideParameterizedReference() {
<ide> " public static void print() {}" +
<ide> " public static void print(int i) {}" +
<ide> "}");
<del> myFixture.configureByFile(getTestName(false) + ".java");
<del> IntentionAction intention = myFixture.findSingleIntention("Add static import for 'foo.Clazz.print'");
<del> assertNotNull(intention);
<del> myFixture.launchAction(intention);
<del> myFixture.checkResultByFile(getTestName(false) + "_after.java");
<add> doTest("Add static import for 'foo.Clazz.print'");
<ide> }
<ide>
<ide> public void testAllowSingleStaticImportWhenOnDemandImportOverloadedMethod() {
<ide> myFixture.addClass("package foo; class Foo {public static void foo(int i){}}");
<ide> myFixture.addClass("package foo; class Bar {public static void foo(String s){}}");
<del> myFixture.configureByFile(getTestName(false) + ".java");
<del>
<del> IntentionAction intention = myFixture.findSingleIntention("Add static import for 'foo.Bar.foo'");
<del> assertNotNull(intention);
<del> myFixture.launchAction(intention);
<del> myFixture.checkResultByFile(getTestName(false) + "_after.java");
<add> doTest("Add static import for 'foo.Bar.foo'");
<ide> }
<ide>
<ide> public void testInvalidInput() {
<ide> public void testSingleImportWhenConflictingWithOnDemand() {
<ide> myFixture.addClass("package foo; class Foo {public static void foo(int i){}}");
<ide> myFixture.addClass("package foo; class Bar {public static void foo(String s){}}");
<del> myFixture.configureByFile(getTestName(false) + ".java");
<ide>
<ide> JavaCodeStyleSettings settings = CodeStyleSettingsManager.getInstance(getProject()).getCurrentSettings().getCustomSettings(JavaCodeStyleSettings.class);
<ide> int old = settings.NAMES_COUNT_TO_USE_IMPORT_ON_DEMAND;
<ide> settings.NAMES_COUNT_TO_USE_IMPORT_ON_DEMAND = 1;
<ide> try {
<del> IntentionAction intention = myFixture.findSingleIntention("Add static import for 'foo.Foo.foo'");
<del> assertNotNull(intention);
<del> myFixture.launchAction(intention);
<del> myFixture.checkResultByFile(getTestName(false) + "_after.java");
<add> doTest("Add static import for 'foo.Foo.foo'");
<ide> }
<ide> finally {
<ide> settings.NAMES_COUNT_TO_USE_IMPORT_ON_DEMAND = old;
<ide>
<ide> public void testNonStaticInnerClassImport() {
<ide> myFixture.addClass("package foo; public class Foo {public class Bar {}}");
<del> myFixture.configureByFile(getTestName(false) + ".java");
<del> IntentionAction intention = myFixture.getAvailableIntention("Add import for 'foo.Foo.Bar'");
<del> assertNotNull(intention);
<del> myFixture.launchAction(intention);
<del> myFixture.checkResultByFile(getTestName(false) + "_after.java");
<add> doTest("Add import for 'foo.Foo.Bar'");
<ide> }
<ide>
<ide> public void testProhibitWhenMethodWithIdenticalSignatureAlreadyImportedFromAnotherClass() {
<ide> }
<ide>
<ide> public void testComment() {
<add> doTest("Add static import for 'java.util.Arrays.asList'");
<add> }
<add>
<add> private void doTest(String intentionName) {
<ide> myFixture.configureByFile(getTestName(false) + ".java");
<del> IntentionAction intention = myFixture.getAvailableIntention("Add static import for 'java.util.Arrays.asList'");
<add> IntentionAction intention = myFixture.findSingleIntention(intentionName);
<ide> assertNotNull(intention);
<ide> myFixture.launchAction(intention);
<ide> myFixture.checkResultByFile(getTestName(false) + "_after.java"); |
|
Java | apache-2.0 | 343887e4bf388dc3f8516bb635b0719d3edcbe94 | 0 | gk-brown/WebRPC,gk-brown/WebRPC,gk-brown/WebRPC,gk-brown/WebRPC,gk-brown/WebRPC,gk-brown/HTTP-RPC,gk-brown/HTTP-RPC,gk-brown/WebRPC,gk-brown/HTTP-RPC | /*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.httprpc.beans;
import java.lang.reflect.InvocationTargetException;
import java.lang.reflect.Method;
import java.util.AbstractList;
import java.util.AbstractMap;
import java.util.AbstractSet;
import java.util.Date;
import java.util.HashMap;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.Set;
/**
* Class that exposes the properties of a Java Bean object as a map.
*/
public class BeanAdapter extends AbstractMap<String, Object> {
// List adapter
private static class ListAdapter extends AbstractList<Object> {
private List<Object> list;
public ListAdapter(List<Object> list) {
this.list = list;
}
@Override
public Object get(int index) {
return adapt(list.get(index));
}
@Override
public int size() {
return list.size();
}
}
// Map adapter
private static class MapAdapter extends AbstractMap<Object, Object> {
private Map<Object, Object> map;
private Set<Entry<Object, Object>> entrySet = new AbstractSet<Entry<Object, Object>>() {
@Override
public int size() {
return map.size();
}
@Override
public Iterator<Entry<Object, Object>> iterator() {
return new Iterator<Entry<Object, Object>>() {
private Iterator<Entry<Object, Object>> iterator = map.entrySet().iterator();
@Override
public boolean hasNext() {
return iterator.hasNext();
}
@Override
public Entry<Object, Object> next() {
return new Entry<Object, Object>() {
private Entry<Object, Object> entry = iterator.next();
@Override
public Object getKey() {
return entry.getKey();
}
@Override
public Object getValue() {
return adapt(entry.getValue());
}
@Override
public Object setValue(Object value) {
throw new UnsupportedOperationException();
}
};
}
};
}
};
public MapAdapter(Map<Object, Object> map) {
this.map = map;
}
@Override
public Set<Entry<Object, Object>> entrySet() {
return entrySet;
}
}
private Object bean;
private HashMap<String, Method> accessors = new HashMap<>();
private Set<Entry<String, Object>> entrySet = new AbstractSet<Entry<String, Object>>() {
@Override
public int size() {
return accessors.size();
}
@Override
public Iterator<Entry<String, Object>> iterator() {
return new Iterator<Entry<String, Object>>() {
private Iterator<String> keys = accessors.keySet().iterator();
@Override
public boolean hasNext() {
return keys.hasNext();
}
@Override
public Entry<String, Object> next() {
String key = keys.next();
return new SimpleImmutableEntry<>(key, get(key));
}
};
}
};
private static final String GET_PREFIX = "get";
private static final String IS_PREFIX = "is";
/**
* Constructs a new Bean adapter.
*
* @param bean
* The source Bean.
*/
public BeanAdapter(Object bean) {
if (bean == null) {
throw new IllegalArgumentException();
}
this.bean = bean;
Class<?> type = bean.getClass();
Method[] methods = type.getMethods();
for (int i = 0; i < methods.length; i++) {
Method method = methods[i];
if (type.isAssignableFrom(method.getDeclaringClass())) {
String methodName = method.getName();
String prefix;
if (methodName.startsWith(GET_PREFIX)) {
prefix = GET_PREFIX;
} else if (methodName.startsWith(IS_PREFIX)) {
prefix = IS_PREFIX;
} else {
prefix = null;
}
if (prefix != null) {
int j = prefix.length();
int n = methodName.length();
if (j < n && method.getParameterCount() == 0) {
char c = methodName.charAt(j++);
if (j == n || Character.isLowerCase(methodName.charAt(j))) {
c = Character.toLowerCase(c);
}
String key = c + methodName.substring(j);
accessors.put(key, method);
}
}
}
}
}
@Override
public Object get(Object key) {
if (key == null) {
throw new IllegalArgumentException();
}
Method method = accessors.get(key);
Object value;
if (method != null) {
try {
value = adapt(method.invoke(bean));
} catch (InvocationTargetException | IllegalAccessException exception) {
throw new RuntimeException(exception);
}
} else {
value = null;
}
return value;
}
@Override
public Set<Entry<String, Object>> entrySet() {
return entrySet;
}
/**
* Adapts a value. If the value is <tt>null</tt> or an instance of one of
* the following types, it is returned as-is:
* <ul>
* <li>{@link String}</li>
* <li>{@link Number}</li>
* <li>{@link Boolean}</li>
* </ul>
* If the value is a {@link Date}, it is converted to its numeric
* representation via {@link Date#getTime()}. If the value is a
* {@link List}, it is wrapped in an adapter that will adapt the list's
* elements. If the value is a {@link Map}, it is wrapped in an adapter
* that will adapt the map's values. Otherwise, the value is considered a
* nested Bean and is wrapped in a Bean adapter.
*
* @param <T> The expected type of the adapted value.
*
* @param value
* The value to adapt.
*
* @return
* The adapted value.
*/
@SuppressWarnings("unchecked")
public static <T> T adapt(Object value) {
if (value != null && !(value instanceof String || value instanceof Number || value instanceof Boolean)) {
if (value instanceof Date) {
value = ((Date)value).getTime();
} else if (value instanceof List<?>) {
value = new ListAdapter((List<Object>)value);
} else if (value instanceof Map<?, ?>) {
value = new MapAdapter((Map<Object, Object>)value);
} else {
value = new BeanAdapter(value);
}
}
return (T)value;
}
}
| Server/Java/httprpc-server/src/org/httprpc/beans/BeanAdapter.java | /*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.httprpc.beans;
import java.lang.reflect.InvocationTargetException;
import java.lang.reflect.Method;
import java.util.AbstractList;
import java.util.AbstractMap;
import java.util.AbstractSet;
import java.util.Date;
import java.util.HashMap;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.Set;
/**
* Class that exposes the properties of a Java Bean object as a map.
*/
public class BeanAdapter extends AbstractMap<String, Object> {
// List adapter
private static class ListAdapter extends AbstractList<Object> {
private List<Object> list;
public ListAdapter(List<Object> list) {
this.list = list;
}
@Override
public Object get(int index) {
return adapt(list.get(index));
}
@Override
public int size() {
return list.size();
}
}
// Map adapter
private static class MapAdapter extends AbstractMap<Object, Object> {
private Map<Object, Object> map;
private Set<Entry<Object, Object>> entrySet = new AbstractSet<Entry<Object, Object>>() {
@Override
public int size() {
return map.size();
}
@Override
public Iterator<Entry<Object, Object>> iterator() {
return new Iterator<Entry<Object, Object>>() {
private Iterator<Entry<Object, Object>> iterator = map.entrySet().iterator();
@Override
public boolean hasNext() {
return iterator.hasNext();
}
@Override
public Entry<Object, Object> next() {
return new Entry<Object, Object>() {
private Entry<Object, Object> entry = iterator.next();
@Override
public Object getKey() {
return entry.getKey();
}
@Override
public Object getValue() {
return adapt(entry.getValue());
}
@Override
public Object setValue(Object value) {
throw new UnsupportedOperationException();
}
};
}
};
}
};
public MapAdapter(Map<Object, Object> map) {
this.map = map;
}
@Override
public Set<Entry<Object, Object>> entrySet() {
return entrySet;
}
}
private Object bean;
private HashMap<String, Method> accessors = new HashMap<>();
private Set<Entry<String, Object>> entrySet = new AbstractSet<Entry<String, Object>>() {
@Override
public int size() {
return accessors.size();
}
@Override
public Iterator<Entry<String, Object>> iterator() {
return new Iterator<Entry<String, Object>>() {
private Iterator<String> keys = accessors.keySet().iterator();
@Override
public boolean hasNext() {
return keys.hasNext();
}
@Override
public Entry<String, Object> next() {
String key = keys.next();
return new SimpleImmutableEntry<>(key, get(key));
}
};
}
};
private static final String GET_PREFIX = "get";
private static final String IS_PREFIX = "is";
/**
* Constructs a new Bean adapter.
*
* @param bean
* The source Bean.
*/
public BeanAdapter(Object bean) {
if (bean == null) {
throw new IllegalArgumentException();
}
this.bean = bean;
Class<?> type = bean.getClass();
Method[] methods = type.getMethods();
for (int i = 0; i < methods.length; i++) {
Method method = methods[i];
if (type.isAssignableFrom(method.getDeclaringClass())) {
String methodName = method.getName();
String prefix;
if (methodName.startsWith(GET_PREFIX)) {
prefix = GET_PREFIX;
} else if (methodName.startsWith(IS_PREFIX)) {
prefix = IS_PREFIX;
} else {
prefix = null;
}
if (prefix != null) {
int j = prefix.length();
int n = methodName.length();
if (j < n && method.getParameterCount() == 0) {
char c = methodName.charAt(j++);
if (j == n || Character.isLowerCase(methodName.charAt(j))) {
c = Character.toLowerCase(c);
}
String key = c + methodName.substring(j);
accessors.put(key, method);
}
}
}
}
}
@Override
public Object get(Object key) {
if (key == null) {
throw new IllegalArgumentException();
}
Method method = accessors.get(key);
Object value;
try {
value = method.invoke(bean);
} catch (InvocationTargetException | IllegalAccessException exception) {
throw new RuntimeException(exception);
}
return adapt(value);
}
@Override
public Set<Entry<String, Object>> entrySet() {
return entrySet;
}
/**
* Adapts a value. If the value is <tt>null</tt> or an instance of one of
* the following types, it is returned as-is:
* <ul>
* <li>{@link String}</li>
* <li>{@link Number}</li>
* <li>{@link Boolean}</li>
* </ul>
* If the value is a {@link Date}, it is converted to its numeric
* representation via {@link Date#getTime()}. If the value is a
* {@link List}, it is wrapped in an adapter that will adapt the list's
* elements. If the value is a {@link Map}, it is wrapped in an adapter
* that will adapt the map's values. Otherwise, the value is considered a
* nested Bean and is wrapped in a Bean adapter.
*
* @param <T> The expected type of the adapted value.
*
* @param value
* The value to adapt.
*
* @return
* The adapted value.
*/
@SuppressWarnings("unchecked")
public static <T> T adapt(Object value) {
if (value != null && !(value instanceof String || value instanceof Number || value instanceof Boolean)) {
if (value instanceof Date) {
value = ((Date)value).getTime();
} else if (value instanceof List<?>) {
value = new ListAdapter((List<Object>)value);
} else if (value instanceof Map<?, ?>) {
value = new MapAdapter((Map<Object, Object>)value);
} else {
value = new BeanAdapter(value);
}
}
return (T)value;
}
}
| Resolve exception in BeanAdapter when get() is called with an invalid property name.
| Server/Java/httprpc-server/src/org/httprpc/beans/BeanAdapter.java | Resolve exception in BeanAdapter when get() is called with an invalid property name. | <ide><path>erver/Java/httprpc-server/src/org/httprpc/beans/BeanAdapter.java
<ide> Method method = accessors.get(key);
<ide>
<ide> Object value;
<del> try {
<del> value = method.invoke(bean);
<del> } catch (InvocationTargetException | IllegalAccessException exception) {
<del> throw new RuntimeException(exception);
<del> }
<del>
<del> return adapt(value);
<add> if (method != null) {
<add> try {
<add> value = adapt(method.invoke(bean));
<add> } catch (InvocationTargetException | IllegalAccessException exception) {
<add> throw new RuntimeException(exception);
<add> }
<add> } else {
<add> value = null;
<add> }
<add>
<add> return value;
<ide> }
<ide>
<ide> @Override |
|
Java | lgpl-2.1 | 2a5cd766318e29c84bd223a46a272248d5d7c322 | 0 | jzuijlek/Lucee4,jzuijlek/Lucee4,jzuijlek/Lucee4,jzuijlek/Lucee4,jzuijlek/Lucee4 | /**
*
* Copyright (c) 2014, the Railo Company Ltd. All rights reserved.
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library. If not, see <http://www.gnu.org/licenses/>.
*
**/
package lucee.runtime.interpreter.ref.literal;
import lucee.runtime.PageContext;
import lucee.runtime.exp.PageException;
import lucee.runtime.interpreter.ref.Ref;
import lucee.runtime.interpreter.ref.util.RefUtil;
import lucee.runtime.op.Caster;
/**
* Literal Number
*/
public final class LNumber implements Literal {
public static final LNumber ZERO = new LNumber(new Double(0));
public static final LNumber ONE = new LNumber(new Double(1));
private Object literal;
/**
* constructor of the class
* @param literal
*/
public LNumber(Double literal) {
this.literal=literal;
}
/**
* constructor of the class
* @param literal
* @throws PageException
*/
public LNumber(String literal) throws PageException {
this.literal=Caster.toDouble(literal);
// in theory this filter (>10) makes not really sense, just better for performance!!!
if(literal.length()>10) {
if(!Caster.toString(this.literal).equals(literal))
this.literal=literal;
}
}
@Override
public Object getValue(PageContext pc) {
return literal;
}
@Override
public Object getCollection(PageContext pc) {
return getValue(pc);
}
@Override
public String getTypeName() {
return "number";
}
@Override
public Object touchValue(PageContext pc) {
return getValue(pc);
}
@Override
public String getString(PageContext pc) {
return toString();
}
@Override
public String toString() {
return literal instanceof String?(String)literal:Caster.toString((Double)literal);
}
@Override
public boolean eeq(PageContext pc,Ref other) throws PageException {
if(other instanceof LNumber){
if(literal instanceof Double) {
// Double|Double
if(((LNumber)other).literal instanceof Double) {
return ((Double)literal).doubleValue()==((Double)((LNumber)other).literal).doubleValue();
}
// Double|String
return Caster.toString(((Double)literal).doubleValue()).equals(((LNumber)other).literal);
}
// String|Double
if(((LNumber)other).literal instanceof Double) {
return ((String)literal).equals(Caster.toString(((Double)((LNumber)other).literal).doubleValue()));
}
// String|String
return ((String)literal).equals((((LNumber)other).literal));
}
return RefUtil.eeq(pc,this,other);
}
}
| lucee-java/lucee-core/src/lucee/runtime/interpreter/ref/literal/LNumber.java | /**
*
* Copyright (c) 2014, the Railo Company Ltd. All rights reserved.
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library. If not, see <http://www.gnu.org/licenses/>.
*
**/
package lucee.runtime.interpreter.ref.literal;
import lucee.runtime.PageContext;
import lucee.runtime.exp.PageException;
import lucee.runtime.interpreter.ref.Ref;
import lucee.runtime.interpreter.ref.util.RefUtil;
import lucee.runtime.op.Caster;
/**
* Literal Number
*/
public final class LNumber implements Literal {
public static final LNumber ZERO = new LNumber(new Double(0));
public static final LNumber ONE = new LNumber(new Double(1));
private Double literal;
/**
* constructor of the class
* @param literal
*/
public LNumber(Double literal) {
this.literal=literal;
}
/**
* constructor of the class
* @param literal
* @throws PageException
*/
public LNumber(String literal) throws PageException {
this.literal=Caster.toDouble(literal);
}
@Override
public Object getValue(PageContext pc) {
return literal;
}
@Override
public Object getCollection(PageContext pc) {
return getValue(pc);
}
@Override
public String getTypeName() {
return "number";
}
@Override
public Object touchValue(PageContext pc) {
return getValue(pc);
}
@Override
public String getString(PageContext pc) {
return toString();
}
@Override
public String toString() {
return Caster.toString(literal.doubleValue());
}
@Override
public boolean eeq(PageContext pc,Ref other) throws PageException {
if(other instanceof LNumber){
return literal.doubleValue()==((LNumber)other).literal.doubleValue();
}
// TODO Auto-generated method stub
return RefUtil.eeq(pc,this,other);
}
}
| reconize if json parsing lead in a invalid number and keep it as string in this case
| lucee-java/lucee-core/src/lucee/runtime/interpreter/ref/literal/LNumber.java | reconize if json parsing lead in a invalid number and keep it as string in this case | <ide><path>ucee-java/lucee-core/src/lucee/runtime/interpreter/ref/literal/LNumber.java
<ide>
<ide>
<ide>
<del> private Double literal;
<add> private Object literal;
<ide>
<ide> /**
<ide> * constructor of the class
<ide> * @throws PageException
<ide> */
<ide> public LNumber(String literal) throws PageException {
<del> this.literal=Caster.toDouble(literal);
<add> this.literal=Caster.toDouble(literal);
<add> // in theory this filter (>10) makes not really sense, just better for performance!!!
<add> if(literal.length()>10) {
<add> if(!Caster.toString(this.literal).equals(literal))
<add> this.literal=literal;
<add> }
<ide> }
<ide>
<ide> @Override
<ide>
<ide> @Override
<ide> public String toString() {
<del> return Caster.toString(literal.doubleValue());
<add> return literal instanceof String?(String)literal:Caster.toString((Double)literal);
<ide> }
<ide>
<ide> @Override
<ide> public boolean eeq(PageContext pc,Ref other) throws PageException {
<del> if(other instanceof LNumber){
<del> return literal.doubleValue()==((LNumber)other).literal.doubleValue();
<add> if(other instanceof LNumber){
<add> if(literal instanceof Double) {
<add> // Double|Double
<add> if(((LNumber)other).literal instanceof Double) {
<add> return ((Double)literal).doubleValue()==((Double)((LNumber)other).literal).doubleValue();
<add> }
<add> // Double|String
<add> return Caster.toString(((Double)literal).doubleValue()).equals(((LNumber)other).literal);
<add> }
<add> // String|Double
<add> if(((LNumber)other).literal instanceof Double) {
<add> return ((String)literal).equals(Caster.toString(((Double)((LNumber)other).literal).doubleValue()));
<add> }
<add> // String|String
<add> return ((String)literal).equals((((LNumber)other).literal));
<add>
<ide> }
<del> // TODO Auto-generated method stub
<ide> return RefUtil.eeq(pc,this,other);
<ide> }
<ide> } |
|
Java | bsd-3-clause | 554a95db984e2bf663c5f8d7ea9ea7424aedb8ed | 0 | muloem/xins,muloem/xins,muloem/xins | /*
* $Id$
*/
package org.xins.client;
import java.io.ByteArrayInputStream;
import java.io.IOException;
import java.util.Stack;
import javax.xml.parsers.SAXParserFactory;
import javax.xml.parsers.SAXParser;
import org.xml.sax.Attributes;
import org.xml.sax.SAXException;
import org.xml.sax.helpers.DefaultHandler;
import org.xins.common.MandatoryArgumentChecker;
import org.xins.common.Utils;
import org.xins.common.collections.PropertyReader;
import org.xins.common.collections.ProtectedPropertyReader;
import org.xins.common.text.FastStringBuffer;
import org.xins.common.text.ParseException;
import org.xins.common.text.TextUtils;
/**
* XINS call result parser. XML is parsed to produce a {@link XINSCallResult}
* object.
*
* <p>The root element in the XML must be of type <code>result</code>. Inside
* this element, <code>param</code> elements optionally define parameters and
* an optional <code>data</code> element defines a data section.
*
* <p>If the result element contains an <code>errorcode</code> or a
* <code>code</code> attribute, then the value of the attribute is interpreted
* as the error code. If both these attributes are set and conflicting, then
* this is considered a showstopper.
*
* <p>TODO: Describe rest of parse process.
*
* <p>Note: This parser is
* <a href="http://www.w3.org/TR/REC-xml-names/">XML Namespaces</a>-aware.
*
* @version $Revision$ $Date$
*
* @author Anthony Goubard (<a href="mailto:[email protected]">[email protected]</a>)
* @author Ernst de Haan (<a href="mailto:[email protected]">[email protected]</a>)
*
* @since XINS 1.0.0
*/
public class XINSCallResultParser
extends Object {
//-------------------------------------------------------------------------
// Class fields
//-------------------------------------------------------------------------
/**
* Fully-qualified name of this class. This field is not <code>null</code>.
*/
private static final String CLASSNAME = XINSCallResultParser.class.getName();
/**
* Fully-qualified name of the inner class <code>Handler</code>. This field
* is not <code>null</code>.
*/
private static final String HANDLER_CLASSNAME = XINSCallResultParser.Handler.class.getName();
/**
* The key for the <code>ProtectedPropertyReader</code> instances created
* by this class.
*/
private static final Object PROTECTION_KEY = new Object();
/**
* Error state for the SAX event handler.
*/
private static final State ERROR = new State("ERROR");
/**
* Initial state for the SAX event handler, before the root element is
* processed.
*/
private static final State INITIAL = new State("INITIAL");
/**
* State for the SAX event handler just within the root element
* (<code>result</code>).
*/
private static final State AT_ROOT_LEVEL = new State("AT_ROOT_LEVEL");
/**
* State for the SAX event handler at any depth within an ignorable
* element.
*/
private static final State IN_IGNORABLE_ELEMENT = new State("IN_IGNORABLE_ELEMENT");
/**
* State for the SAX event handler within the output parameter element
* (<code>param</code>).
*/
private static final State IN_PARAM_ELEMENT = new State("IN_PARAM_ELEMENT");
/**
* State for the SAX event handler in the data section (at any depth within
* the <code>data</code> element).
*/
private static final State IN_DATA_SECTION = new State("IN_DATA_SECTION");
/**
* State for the SAX event handler for the final state, when parsing is
* finished.
*/
private static final State FINISHED = new State("FINISHED");
/**
* The factory for SAX parsers. This field is never <code>null</code>, it
* is initialized by a class initializer.
*/
private static final SAXParserFactory SAX_PARSER_FACTORY;
//-------------------------------------------------------------------------
// Class functions
//-------------------------------------------------------------------------
/**
* Initializes this class.
*/
static {
SAX_PARSER_FACTORY = SAXParserFactory.newInstance();
SAX_PARSER_FACTORY.setNamespaceAware(true);
}
//-------------------------------------------------------------------------
// Constructors
//-------------------------------------------------------------------------
/**
* Constructs a new <code>XINSCallResultParser</code>.
*/
public XINSCallResultParser() {
// TRACE: Enter constructor
org.xins.common.Log.log_1000(CLASSNAME, null);
// empty
// TRACE: Leave constructor
org.xins.common.Log.log_1002(CLASSNAME, null);
}
//-------------------------------------------------------------------------
// Fields
//-------------------------------------------------------------------------
//-------------------------------------------------------------------------
// Methods
//-------------------------------------------------------------------------
/**
* Parses the given XML string to create a <code>XINSCallResultData</code>
* object.
*
* @param xml
* the XML to be parsed, not <code>null</code>.
*
* @return
* the parsed result of the call, not <code>null</code>.
*
* @throws IllegalArgumentException
* if <code>xml == null</code>.
*
* @throws ParseException
* if the specified string is not valid XML or if it is not a valid XINS
* API function call result.
*/
public XINSCallResultData parse(byte[] xml)
throws IllegalArgumentException, ParseException {
final String THIS_METHOD = "parse(byte[])";
// TRACE: Enter method
org.xins.common.Log.log_1003(CLASSNAME, THIS_METHOD, null);
// Check preconditions
MandatoryArgumentChecker.check("xml", xml);
// Initialize our SAX event handler
Handler handler = new Handler();
ByteArrayInputStream stream = null;
try {
// Construct a SAX parser
SAXParser saxParser = SAX_PARSER_FACTORY.newSAXParser();
// Convert the byte array to an input stream
stream = new ByteArrayInputStream(xml);
// Let SAX parse the XML, using our handler
saxParser.parse(stream, handler);
} catch (Throwable exception) {
// Log: Parsing failed
String detail = exception.getMessage();
Log.log_2205(exception, detail);
// Construct a buffer for the error message
FastStringBuffer buffer = new FastStringBuffer(142, "Unable to convert the specified character string to XML");
// Include the exception message in our error message, if any
if (detail != null && detail.length() > 0) {
buffer.append(": ");
buffer.append(detail);
} else {
buffer.append('.');
}
// Throw exception with message, and register cause exception
throw new ParseException(buffer.toString(), exception, detail);
// Always dispose the ByteArrayInputStream
} finally {
if (stream != null) {
try {
stream.close();
} catch (IOException ioException) {
final String SUBJECT_CLASS = stream.getClass().getName();
final String SUBJECT_METHOD = "close()";
Utils.logProgrammingError(CLASSNAME, THIS_METHOD,
SUBJECT_CLASS, SUBJECT_METHOD,
null, ioException);
}
}
}
// TRACE: Leave method
org.xins.common.Log.log_1005(CLASSNAME, THIS_METHOD, null);
return handler;
}
//-------------------------------------------------------------------------
// Inner classes
//-------------------------------------------------------------------------
/**
* SAX event handler that will parse the result from a call to a XINS
* service.
*
* @version $Revision$ $Date$
* @author Anthony Goubard (<a href="mailto:[email protected]">[email protected]</a>)
* @author Ernst de Haan (<a href="mailto:[email protected]">[email protected]</a>)
*
* @since XINS 1.0.0
*/
private class Handler
extends DefaultHandler
implements XINSCallResultData {
//-------------------------------------------------------------------------
// Constructors
//-------------------------------------------------------------------------
/**
* Constructs a new <code>Handler</code> instance.
*/
private Handler() {
// TRACE: Enter constructor
org.xins.common.Log.log_1000(HANDLER_CLASSNAME, null);
_state = INITIAL;
_level = -1;
_characters = new FastStringBuffer(45);
_dataElementStack = new Stack();
// TRACE: Leave constructor
org.xins.common.Log.log_1002(HANDLER_CLASSNAME, null);
}
//-------------------------------------------------------------------------
// Fields
//-------------------------------------------------------------------------
/**
* The current state. Never <code>null</code>.
*/
private State _state;
/**
* The error code returned by the function or <code>null</code>, if no
* error code is returned.
*
* <p>The value will never return an empty string, so if the result is
* not <code>null</code>, then it is safe to assume the length of the
* string is at least 1 character.
*/
private String _errorCode;
/**
* The list of the parameters (name/value) returned by the function.
* This field is lazily initialized.
*/
private ProtectedPropertyReader _parameters;
/**
* The name of the output parameter that is currently being parsed.
*/
private String _parameterName;
/**
* The character content (CDATA or PCDATA) of the element currently
* being parsed.
*/
private final FastStringBuffer _characters;
/**
* The stack of child elements within the data section. The top element
* is always <code><data/></code>.
*/
private Stack _dataElementStack;
/**
* The level for the element pointer within the XML document. Initially
* this field is <code>-1</code>, which indicates the current element
* pointer is outside the document. The value <code>0</code> is for the
* root element (<code>result</code>), etc.
*/
private int _level;
//-------------------------------------------------------------------------
// Methods
//-------------------------------------------------------------------------
/**
* Receive notification of the beginning of an element.
*
* @param namespaceURI
* the namespace URI, can be <code>null</code>.
*
* @param localName
* the local name (without prefix); cannot be <code>null</code>.
*
* @param qName
* the qualified name (with prefix), can be <code>null</code> since
* <code>namespaceURI</code> and <code>localName</code> are always
* used instead.
*
* @param atts
* the attributes attached to the element; if there are no
* attributes, it shall be an empty {@link Attributes} object; cannot
* be <code>null</code>.
*
* @throws IllegalArgumentException
* if <code>localName == null || atts == null</code>.
*
* @throws SAXException
* if the parsing failed.
*/
public void startElement(String namespaceURI,
String localName,
String qName,
Attributes atts)
throws IllegalArgumentException, SAXException {
final String THIS_METHOD = "startElement(java.lang.String,"
+ "java.lang.String,"
+ "java.lang.String,"
+ Attributes.class.getName()
+ ')';
// Temporarily enter ERROR state, on success this state is left
State currentState = _state;
_state = ERROR;
// Make sure namespaceURI is either null or non-empty
namespaceURI = "".equals(namespaceURI) ? null : namespaceURI;
// Cache quoted version of namespaceURI
String quotedNamespaceURI = TextUtils.quote(namespaceURI);
// TRACE: Enter method
org.xins.common.Log.log_1003(HANDLER_CLASSNAME, THIS_METHOD,
"_state=" + currentState
+ "; _level=" + _level
+ "; namespaceURI=" + quotedNamespaceURI
+ "; localName=" + TextUtils.quote(localName)
+ "; qName=" + TextUtils.quote(qName));
// Check preconditions
MandatoryArgumentChecker.check("localName", localName, "atts", atts);
// Increase the element depth level
_level++;
if (currentState == ERROR) {
final String DETAIL = "_state=" + currentState + "; _level=" + _level;
throw Utils.logProgrammingError(HANDLER_CLASSNAME, THIS_METHOD,
HANDLER_CLASSNAME, THIS_METHOD,
DETAIL);
} else if (currentState == INITIAL) {
// Level and state must comply
if (_level != 0) {
final String DETAIL = "_state=" + currentState + "; _level=" + _level;
throw Utils.logProgrammingError(HANDLER_CLASSNAME, THIS_METHOD,
HANDLER_CLASSNAME, THIS_METHOD,
DETAIL);
}
// Root element must be 'result' without namespace
if (! (namespaceURI == null && localName.equals("result"))) {
Log.log_2200(namespaceURI, localName);
final String DETAIL = "Root element is \""
+ localName
+ "\" with namespace "
+ quotedNamespaceURI
+ " instead of \"result\" with namespace (null).";
throw new SAXException(DETAIL);
}
// Get the 'errorcode' and 'code attributes
String code1 = atts.getValue("errorcode");
String code2 = atts.getValue("code");
// Convert an empty string to null
if (code1 == null || code1.length() == 0) {
code1 = null;
}
if (code2 == null || code2.length() == 0) {
code2 = null;
}
// Only one error code attribute set
if (code1 != null && code2 == null) {
_errorCode = code1;
} else if (code1 == null && code2 != null) {
_errorCode = code2;
// Two error code attribute set
} else if (code1 == null && code2 == null) {
_errorCode = null;
} else if (code1.equals(code2)) {
_errorCode = code1;
// Conflicting error codes
} else {
// NOTE: No need to log here. This will be logged already (message 2205)
throw new SAXException("Found conflicting duplicate value for error code, since errorcode=\"" + code1 + "\", while code=\"" + code2 + "\".");
}
// Change state
_state = AT_ROOT_LEVEL;
} else if (currentState == AT_ROOT_LEVEL) {
// Output parameter
if (namespaceURI == null && "param".equals(localName)) {
// Store the name of the parameter. It may be null, but that will
// be checked only after the element end tag is processed.
_parameterName = atts.getValue("name");
// TODO: Check parameter name here (null and pattern)
// Reserve buffer for PCDATA
_characters.clear();
// Update the state
_state = IN_PARAM_ELEMENT;
// Start of data section
} else if (namespaceURI == null && "data".equals(localName)) {
// A data element stack should really be empty
if (_dataElementStack.size() > 0) {
throw new SAXException("Found second data section.");
}
// Maintain a list of the elements, with data as the root
_dataElementStack.push(new DataElement(null, "data"));
// Update the state
_state = IN_DATA_SECTION;
// Ignore unrecognized element at root level
} else {
_state = IN_IGNORABLE_ELEMENT;
Log.log_2206(namespaceURI, localName);
}
// Within output parameter element, no elements are allowed
} else if (currentState == IN_PARAM_ELEMENT) {
// NOTE: No need to log here. This will be logged already (message 2205)
throw new SAXException("Found \"" + localName + "\" element with namespace " + quotedNamespaceURI + " within \"param\" element.");
// Within the data section
} else if (currentState == IN_DATA_SECTION) {
// Construct a DataElement
DataElement element = new DataElement(namespaceURI, localName);
// Add all attributes
for (int i = 0; i < atts.getLength(); i++) {
String attrNamespaceURI = atts.getURI(i);
String attrLocalName = atts.getLocalName(i);
String attrValue = atts.getValue(i);
element.setAttribute(attrNamespaceURI, attrLocalName, attrValue);
}
// Push the element on the stack
_dataElementStack.push(element);
// Reserve buffer for PCDATA
_characters.clear();
// Reset the state from ERROR back to IN_DATA_SECTION
_state = IN_DATA_SECTION;
// Deeper level within ignorable element
} else if (currentState == IN_IGNORABLE_ELEMENT) {
_state = IN_IGNORABLE_ELEMENT;
// Unrecognized state
} else {
final String DETAIL = "_state=" + currentState + "; _level=" + _level;
throw Utils.logProgrammingError(HANDLER_CLASSNAME, THIS_METHOD,
HANDLER_CLASSNAME, THIS_METHOD,
DETAIL);
}
org.xins.common.Log.log_1005(HANDLER_CLASSNAME, THIS_METHOD,
"_state=" + _state
+ "; _level=" + _level
+ "; namespaceURI=" + TextUtils.quote(namespaceURI)
+ "; localName=" + TextUtils.quote(localName)
+ "; qName=" + TextUtils.quote(qName));
}
/**
* Receive notification of the end of an element.
*
* @param namespaceURI
* the namespace URI, can be <code>null</code>.
*
* @param localName
* the local name (without prefix); cannot be <code>null</code>.
*
* @param qName
* the qualified name (with prefix), can be <code>null</code> since
* <code>namespaceURI</code> and <code>localName</code> are only
* used.
*
* @throws IllegalArgumentException
* if <code>localName == null</code>.
*
* @throws SAXException
* if the parsing failed.
*/
public void endElement(String namespaceURI,
String localName,
String qName)
throws IllegalArgumentException, SAXException {
final String THIS_METHOD = "endElement(java.lang.String,"
+ "java.lang.String,"
+ "java.lang.String)";
// Temporarily enter ERROR state, on success this state is left
State currentState = _state;
_state = ERROR;
// Make sure namespaceURI is either null or non-empty
namespaceURI = "".equals(namespaceURI) ? null : namespaceURI;
// Cache quoted version of namespaceURI
String quotedNamespaceURI = TextUtils.quote(namespaceURI);
// TRACE: Enter method
org.xins.common.Log.log_1003(HANDLER_CLASSNAME, THIS_METHOD,
"_state=" + currentState
+ "; _level=" + _level
+ "; namespaceURI=" + TextUtils.quote(namespaceURI)
+ "; localName=" + TextUtils.quote(localName)
+ "; qName=" + TextUtils.quote(qName));
// Check preconditions
MandatoryArgumentChecker.check("localName", localName);
if (currentState == ERROR) {
final String DETAIL = "_state=" + currentState + "; _level=" + _level;
throw Utils.logProgrammingError(HANDLER_CLASSNAME, THIS_METHOD,
HANDLER_CLASSNAME, THIS_METHOD,
DETAIL);
// At root level
} else if (currentState == AT_ROOT_LEVEL) {
if (! (namespaceURI == null && "result".equals(localName))) {
final String DETAIL = "Expected end of element of type \"result\" with namespace (null) instead of \""
+ localName
+ "\" with namespace "
+ quotedNamespaceURI
+ '.';
throw Utils.logProgrammingError(HANDLER_CLASSNAME, THIS_METHOD,
HANDLER_CLASSNAME, THIS_METHOD,
DETAIL);
}
_state = FINISHED;
// Ignorable element
} else if (currentState == IN_IGNORABLE_ELEMENT) {
if (_level == 1) {
_state = AT_ROOT_LEVEL;
} else {
_state = IN_IGNORABLE_ELEMENT;
}
// Within data section
} else if (currentState == IN_DATA_SECTION) {
// Get the DataElement for which we process the end tag
DataElement child = (DataElement) _dataElementStack.pop();
// If at the <data/> element level, then return to AT_ROOT_LEVEL
if (_dataElementStack.size() == 0) {
if (! (namespaceURI == null && "data".equals(localName))) {
final String DETAIL = "Expected end of element of type \"data\" with namespace (null) instead of \""
+ localName
+ "\" with namespace "
+ quotedNamespaceURI
+ '.';
throw Utils.logProgrammingError(HANDLER_CLASSNAME, THIS_METHOD,
HANDLER_CLASSNAME, THIS_METHOD,
DETAIL);
}
// Push the root DataElement back
_dataElementStack.push(child);
// Reset the state
_state = AT_ROOT_LEVEL;
// Otherwise it's a custom element
} else {
// Set the PCDATA content on the element
if (_characters != null && _characters.getLength() > 0) {
child.setText(_characters.toString());
}
// Add the child to the parent
DataElement parent = (DataElement) _dataElementStack.peek();
parent.addChild(child);
// Reset the state back frmo ERROR to IN_DATA_SECTION
_state = IN_DATA_SECTION;
}
// Output parameter
} else if (currentState == IN_PARAM_ELEMENT) {
if (! (namespaceURI == null && "param".equals(localName))) {
final String DETAIL = "Expected end of element of type \"param\" with namespace (null) instead of \""
+ localName
+ "\" with namespace "
+ quotedNamespaceURI
+ '.';
throw Utils.logProgrammingError(HANDLER_CLASSNAME, THIS_METHOD,
HANDLER_CLASSNAME, THIS_METHOD,
DETAIL);
}
// Retrieve name and value for output parameter
String name = _parameterName;
String value = _characters.toString();
// Both name and value should be set
boolean noName = (name == null || name.length() < 1);
boolean noValue = (value == null || value.length() < 1);
if (noName && noValue) {
Log.log_2201();
} else if (noName) {
Log.log_2202(value);
} else if (noValue) {
Log.log_2203(name);
// Name and value are both set, correctly
} else {
Log.log_2204(name, value);
// Previously no parameters, perform (lazy) initialization
if (_parameters == null) {
_parameters = new ProtectedPropertyReader(PROTECTION_KEY);
// Check if parameter is already set
} else {
String existingValue = _parameters.get(name);
if (existingValue != null) {
if (!existingValue.equals(value)) {
// NOTE: This will be logged already (message 2205)
final String DETAIL = "Found conflicting duplicate value for output parameter \""
+ name
+ "\". Initial value is \""
+ existingValue
+ "\". New value is \""
+ value +
"\".";
throw new SAXException(DETAIL);
}
}
}
// Store the name-value combination for the output parameter
_parameters.set(PROTECTION_KEY, name, value);
}
// Reset the state
_parameterName = null;
_state = AT_ROOT_LEVEL;
_characters.clear();
// Unknown state
} else {
final String DETAIL = "Unrecognized state: "
+ currentState
+ ". Programming error suspected.";
throw Utils.logProgrammingError(HANDLER_CLASSNAME, THIS_METHOD,
HANDLER_CLASSNAME, THIS_METHOD,
DETAIL);
}
_level--;
_characters.clear();
// TRACE: Leave method
org.xins.common.Log.log_1005(HANDLER_CLASSNAME, THIS_METHOD,
"_state=" + _state
+ "; _level=" + _level
+ "; namespaceURI=" + TextUtils.quote(namespaceURI)
+ "; localName=" + TextUtils.quote(localName)
+ "; qName=" + TextUtils.quote(qName));
}
/**
* Receive notification of character data.
*
* @param ch
* the <code>char</code> array that contains the characters from the
* XML document, cannot be <code>null</code>.
*
* @param start
* the start index within <code>ch</code>.
*
* @param length
* the number of characters to take from <code>ch</code>.
*
* @throws IndexOutOfBoundsException
* if characters outside the allowed range are specified.
*
* @throws SAXException
* if the parsing failed.
*/
public void characters(char[] ch, int start, int length)
throws IndexOutOfBoundsException, SAXException {
final String THIS_METHOD = "characters(char[],int,int)";
// Temporarily enter ERROR state, on success this state is left
State currentState = _state;
_state = ERROR;
// TRACE: Enter method
org.xins.common.Log.log_1003(HANDLER_CLASSNAME, THIS_METHOD, null);
// Check state
if (currentState != IN_PARAM_ELEMENT
&& currentState != IN_DATA_SECTION
&& currentState != IN_IGNORABLE_ELEMENT) {
String text = new String(ch, start, length);
if (text.trim().length() > 0) {
// NOTE: This will be logged already (message 2205)
throw new SAXException("Found character content \"" + text + "\" in state " + currentState + '.');
}
}
if (_characters != null) {
_characters.append(ch, start, length);
}
// Reset _state
_state = currentState;
}
/**
* Checks if the state is <code>FINISHED</code> and if not throws an
* <code>IllegalStateException</code>.
*
* @throws IllegalStateException
* if the current state is not {@link #FINISHED}.
*/
private void assertFinished()
throws IllegalStateException {
if (_state != FINISHED) {
// TODO: Should SUBJECT_METHOD not be something else?
final String THIS_METHOD = "assertFinished()";
final String SUBJECT_METHOD = Utils.getCallingMethod();
final String DETAIL = "State is "
+ _state
+ " instead of "
+ FINISHED
+ '.';
Utils.logProgrammingError(HANDLER_CLASSNAME, THIS_METHOD,
HANDLER_CLASSNAME, SUBJECT_METHOD,
DETAIL);
throw new IllegalStateException(DETAIL);
}
}
/**
* Returns the error code. If <code>null</code> is returned the call was
* successful and thus no error code was returned. Otherwise the call
* was unsuccessful.
*
* <p>This method will never return an empty string, so if the result is
* not <code>null</code>, then it is safe to assume the length of the
* string is at least 1 character.
*
* @return
* the returned error code, or <code>null</code> if the call was
* successful.
*
* @throws IllegalStateException
* if the current state is invalid.
*/
public String getErrorCode()
throws IllegalStateException {
// Check state
assertFinished();
return _errorCode;
}
/**
* Get the parameters returned by the function.
*
* @return
* the parameters (name/value) or <code>null</code> if the function
* does not have any parameters.
*
* @throws IllegalStateException
* if the current state is invalid.
*/
public PropertyReader getParameters()
throws IllegalStateException {
// Check state
assertFinished();
return _parameters;
}
/**
* Get the data element returned by the function if any.
*
* @return
* the data element, or <code>null</code> if the function did not
* return any data element.
*
* @throws IllegalStateException
* if the current state is invalid.
*/
public DataElement getDataElement()
throws IllegalStateException {
// Check state
assertFinished();
if (_dataElementStack.isEmpty()) {
return null;
} else {
return (DataElement) _dataElementStack.peek();
}
}
}
/**
* State of the event handler.
*
* @version $Revision$ $Date$
* @author Ernst de Haan (<a href="mailto:[email protected]">[email protected]</a>)
*
* @since XINS 1.0.0
*/
private static final class State extends Object {
//----------------------------------------------------------------------
// Constructors
//----------------------------------------------------------------------
/**
* Constructs a new <code>State</code> object.
*
* @param name
* the name of this state, cannot be <code>null</code>.
*
* @throws IllegalArgumentException
* if <code>name == null</code>.
*/
private State(String name) throws IllegalArgumentException {
// Check preconditions
MandatoryArgumentChecker.check("name", name);
_name = name;
}
//----------------------------------------------------------------------
// Fields
//----------------------------------------------------------------------
/**
* The name of this state. Cannot be <code>null</code>.
*/
private final String _name;
//----------------------------------------------------------------------
// Methods
//----------------------------------------------------------------------
/**
* Returns the name of this state.
*
* @return
* the name of this state, cannot be <code>null</code>.
*/
public String getName() {
return _name;
}
/**
* Returns a textual representation of this object.
*
* @return
* the name of this state, never <code>null</code>.
*/
public String toString() {
return _name;
}
}
}
| src/java-client-framework/org/xins/client/XINSCallResultParser.java | /*
* $Id$
*/
package org.xins.client;
import java.io.ByteArrayInputStream;
import java.io.IOException;
import java.util.Stack;
import javax.xml.parsers.SAXParserFactory;
import javax.xml.parsers.SAXParser;
import org.xml.sax.Attributes;
import org.xml.sax.SAXException;
import org.xml.sax.helpers.DefaultHandler;
import org.xins.common.MandatoryArgumentChecker;
import org.xins.common.Utils;
import org.xins.common.collections.PropertyReader;
import org.xins.common.collections.ProtectedPropertyReader;
import org.xins.common.text.FastStringBuffer;
import org.xins.common.text.ParseException;
import org.xins.common.text.TextUtils;
/**
* XINS call result parser. XML is parsed to produce a {@link XINSCallResult}
* object.
*
* <p>The root element in the XML must be of type <code>result</code>. Inside
* this element, <code>param</code> elements optionally define parameters and
* an optional <code>data</code> element defines a data section.
*
* <p>If the result element contains an <code>errorcode</code> or a
* <code>code</code> attribute, then the value of the attribute is interpreted
* as the error code. If both these attributes are set and conflicting, then
* this is considered a showstopper.
*
* <p>TODO: Describe rest of parse process.
*
* <p>Note: This parser is
* <a href="http://www.w3.org/TR/REC-xml-names/">XML Namespaces</a>-aware.
*
* @version $Revision$ $Date$
*
* @author Anthony Goubard (<a href="mailto:[email protected]">[email protected]</a>)
* @author Ernst de Haan (<a href="mailto:[email protected]">[email protected]</a>)
*
* @since XINS 1.0.0
*/
public class XINSCallResultParser
extends Object {
//-------------------------------------------------------------------------
// Class fields
//-------------------------------------------------------------------------
/**
* Fully-qualified name of this class. This field is not <code>null</code>.
*/
private static final String CLASSNAME = XINSCallResultParser.class.getName();
/**
* Fully-qualified name of the inner class <code>Handler</code>. This field
* is not <code>null</code>.
*/
private static final String HANDLER_CLASSNAME = XINSCallResultParser.Handler.class.getName();
/**
* The key for the <code>ProtectedPropertyReader</code> instances created
* by this class.
*/
private static final Object PROTECTION_KEY = new Object();
/**
* Error state for the SAX event handler.
*/
private static final State ERROR = new State("ERROR");
/**
* Initial state for the SAX event handler, before the root element is
* processed.
*/
private static final State INITIAL = new State("INITIAL");
/**
* State for the SAX event handler just within the root element
* (<code>result</code>).
*/
private static final State AT_ROOT_LEVEL = new State("AT_ROOT_LEVEL");
/**
* State for the SAX event handler at any depth within an ignorable
* element.
*/
private static final State IN_IGNORABLE_ELEMENT = new State("IN_IGNORABLE_ELEMENT");
/**
* State for the SAX event handler within the output parameter element
* (<code>param</code>).
*/
private static final State IN_PARAM_ELEMENT = new State("IN_PARAM_ELEMENT");
/**
* State for the SAX event handler in the data section (at any depth within
* the <code>data</code> element).
*/
private static final State IN_DATA_SECTION = new State("IN_DATA_SECTION");
/**
* State for the SAX event handler for the final state, when parsing is
* finished.
*/
private static final State FINISHED = new State("FINISHED");
/**
* The factory for SAX parsers. This field is never <code>null</code>, it
* is initialized by a class initializer.
*/
private static final SAXParserFactory SAX_PARSER_FACTORY;
//-------------------------------------------------------------------------
// Class functions
//-------------------------------------------------------------------------
/**
* Initializes this class.
*/
static {
SAX_PARSER_FACTORY = SAXParserFactory.newInstance();
SAX_PARSER_FACTORY.setNamespaceAware(true);
}
//-------------------------------------------------------------------------
// Constructors
//-------------------------------------------------------------------------
/**
* Constructs a new <code>XINSCallResultParser</code>.
*/
public XINSCallResultParser() {
// TRACE: Enter constructor
org.xins.common.Log.log_1000(CLASSNAME, null);
// empty
// TRACE: Leave constructor
org.xins.common.Log.log_1002(CLASSNAME, null);
}
//-------------------------------------------------------------------------
// Fields
//-------------------------------------------------------------------------
//-------------------------------------------------------------------------
// Methods
//-------------------------------------------------------------------------
/**
* Parses the given XML string to create a <code>XINSCallResultData</code>
* object.
*
* @param xml
* the XML to be parsed, not <code>null</code>.
*
* @return
* the parsed result of the call, not <code>null</code>.
*
* @throws IllegalArgumentException
* if <code>xml == null</code>.
*
* @throws ParseException
* if the specified string is not valid XML or if it is not a valid XINS
* API function call result.
*/
public XINSCallResultData parse(byte[] xml)
throws IllegalArgumentException, ParseException {
final String THIS_METHOD = "parse(byte[])";
// TRACE: Enter method
org.xins.common.Log.log_1003(CLASSNAME, THIS_METHOD, null);
// Check preconditions
MandatoryArgumentChecker.check("xml", xml);
// Initialize our SAX event handler
Handler handler = new Handler();
ByteArrayInputStream stream = null;
try {
// Construct a SAX parser
SAXParser saxParser = SAX_PARSER_FACTORY.newSAXParser();
// Convert the byte array to an input stream
stream = new ByteArrayInputStream(xml);
// Let SAX parse the XML, using our handler
saxParser.parse(stream, handler);
} catch (Throwable exception) {
// Log: Parsing failed
String detail = exception.getMessage();
Log.log_2205(exception, detail);
// Construct a buffer for the error message
FastStringBuffer buffer = new FastStringBuffer(142, "Unable to convert the specified character string to XML");
// Include the exception message in our error message, if any
if (detail != null && detail.length() > 0) {
buffer.append(": ");
buffer.append(detail);
} else {
buffer.append('.');
}
// Throw exception with message, and register cause exception
throw new ParseException(buffer.toString(), exception, detail);
// Always dispose the ByteArrayInputStream
} finally {
if (stream != null) {
try {
stream.close();
} catch (IOException ioException) {
final String SUBJECT_CLASS = stream.getClass().getName();
final String SUBJECT_METHOD = "close()";
Utils.logProgrammingError(CLASSNAME, THIS_METHOD,
SUBJECT_CLASS, SUBJECT_METHOD,
null, ioException);
}
}
}
// TRACE: Leave method
org.xins.common.Log.log_1005(CLASSNAME, THIS_METHOD, null);
return handler;
}
//-------------------------------------------------------------------------
// Inner classes
//-------------------------------------------------------------------------
/**
* SAX event handler that will parse the result from a call to a XINS
* service.
*
* @version $Revision$ $Date$
* @author Anthony Goubard (<a href="mailto:[email protected]">[email protected]</a>)
* @author Ernst de Haan (<a href="mailto:[email protected]">[email protected]</a>)
*
* @since XINS 1.0.0
*/
private class Handler
extends DefaultHandler
implements XINSCallResultData {
//-------------------------------------------------------------------------
// Constructors
//-------------------------------------------------------------------------
/**
* Constructs a new <code>Handler</code> instance.
*/
private Handler() {
// TRACE: Enter constructor
org.xins.common.Log.log_1000(HANDLER_CLASSNAME, null);
_state = INITIAL;
_level = -1;
_characters = new FastStringBuffer(45);
_dataElementStack = new Stack();
// TRACE: Leave constructor
org.xins.common.Log.log_1002(HANDLER_CLASSNAME, null);
}
//-------------------------------------------------------------------------
// Fields
//-------------------------------------------------------------------------
/**
* The current state. Never <code>null</code>.
*/
private State _state;
/**
* The error code returned by the function or <code>null</code>, if no
* error code is returned.
*/
private String _errorCode;
/**
* The list of the parameters (name/value) returned by the function.
* This field is lazily initialized.
*/
private ProtectedPropertyReader _parameters;
/**
* The name of the output parameter that is currently being parsed.
*/
private String _parameterName;
/**
* The character content (CDATA or PCDATA) of the element currently
* being parsed.
*/
private final FastStringBuffer _characters;
/**
* The stack of child elements within the data section. The top element
* is always <code><data/></code>.
*/
private Stack _dataElementStack;
/**
* The level for the element pointer within the XML document. Initially
* this field is <code>-1</code>, which indicates the current element
* pointer is outside the document. The value <code>0</code> is for the
* root element (<code>result</code>), etc.
*/
private int _level;
//-------------------------------------------------------------------------
// Methods
//-------------------------------------------------------------------------
/**
* Receive notification of the beginning of an element.
*
* @param namespaceURI
* the namespace URI, can be <code>null</code>.
*
* @param localName
* the local name (without prefix); cannot be <code>null</code>.
*
* @param qName
* the qualified name (with prefix), can be <code>null</code> since
* <code>namespaceURI</code> and <code>localName</code> are always
* used instead.
*
* @param atts
* the attributes attached to the element; if there are no
* attributes, it shall be an empty {@link Attributes} object; cannot
* be <code>null</code>.
*
* @throws IllegalArgumentException
* if <code>localName == null || atts == null</code>.
*
* @throws SAXException
* if the parsing failed.
*/
public void startElement(String namespaceURI,
String localName,
String qName,
Attributes atts)
throws IllegalArgumentException, SAXException {
final String THIS_METHOD = "startElement(java.lang.String,"
+ "java.lang.String,"
+ "java.lang.String,"
+ Attributes.class.getName()
+ ')';
// Temporarily enter ERROR state, on success this state is left
State currentState = _state;
_state = ERROR;
// Make sure namespaceURI is either null or non-empty
namespaceURI = "".equals(namespaceURI) ? null : namespaceURI;
// Cache quoted version of namespaceURI
String quotedNamespaceURI = TextUtils.quote(namespaceURI);
// TRACE: Enter method
org.xins.common.Log.log_1003(HANDLER_CLASSNAME, THIS_METHOD,
"_state=" + currentState
+ "; _level=" + _level
+ "; namespaceURI=" + quotedNamespaceURI
+ "; localName=" + TextUtils.quote(localName)
+ "; qName=" + TextUtils.quote(qName));
// Check preconditions
MandatoryArgumentChecker.check("localName", localName, "atts", atts);
// Increase the element depth level
_level++;
if (currentState == ERROR) {
final String DETAIL = "_state=" + currentState + "; _level=" + _level;
throw Utils.logProgrammingError(HANDLER_CLASSNAME, THIS_METHOD,
HANDLER_CLASSNAME, THIS_METHOD,
DETAIL);
} else if (currentState == INITIAL) {
// Level and state must comply
if (_level != 0) {
final String DETAIL = "_state=" + currentState + "; _level=" + _level;
throw Utils.logProgrammingError(HANDLER_CLASSNAME, THIS_METHOD,
HANDLER_CLASSNAME, THIS_METHOD,
DETAIL);
}
// Root element must be 'result' without namespace
if (! (namespaceURI == null && localName.equals("result"))) {
Log.log_2200(namespaceURI, localName);
final String DETAIL = "Root element is \""
+ localName
+ "\" with namespace "
+ quotedNamespaceURI
+ " instead of \"result\" with namespace (null).";
throw new SAXException(DETAIL);
}
// Get the 'errorcode' and 'code attributes
String code1 = atts.getValue("errorcode");
String code2 = atts.getValue("code");
// Only one error code attribute set
if (code1 != null && code2 == null) {
_errorCode = code1;
} else if (code1 == null && code2 != null) {
_errorCode = code2;
// Two error code attribute set
} else if (code1 == null && code2 == null) {
_errorCode = null;
} else if (code1.equals(code2)) {
_errorCode = code1;
// Conflicting error codes
} else {
// NOTE: No need to log here. This will be logged already (message 2205)
throw new SAXException("Found conflicting duplicate value for error code, since errorcode=\"" + code1 + "\", while code=\"" + code2 + "\".");
}
// Change state
_state = AT_ROOT_LEVEL;
} else if (currentState == AT_ROOT_LEVEL) {
// Output parameter
if (namespaceURI == null && "param".equals(localName)) {
// Store the name of the parameter. It may be null, but that will
// be checked only after the element end tag is processed.
_parameterName = atts.getValue("name");
// TODO: Check parameter name here (null and pattern)
// Reserve buffer for PCDATA
_characters.clear();
// Update the state
_state = IN_PARAM_ELEMENT;
// Start of data section
} else if (namespaceURI == null && "data".equals(localName)) {
// A data element stack should really be empty
if (_dataElementStack.size() > 0) {
throw new SAXException("Found second data section.");
}
// Maintain a list of the elements, with data as the root
_dataElementStack.push(new DataElement(null, "data"));
// Update the state
_state = IN_DATA_SECTION;
// Ignore unrecognized element at root level
} else {
_state = IN_IGNORABLE_ELEMENT;
Log.log_2206(namespaceURI, localName);
}
// Within output parameter element, no elements are allowed
} else if (currentState == IN_PARAM_ELEMENT) {
// NOTE: No need to log here. This will be logged already (message 2205)
throw new SAXException("Found \"" + localName + "\" element with namespace " + quotedNamespaceURI + " within \"param\" element.");
// Within the data section
} else if (currentState == IN_DATA_SECTION) {
// Construct a DataElement
DataElement element = new DataElement(namespaceURI, localName);
// Add all attributes
for (int i = 0; i < atts.getLength(); i++) {
String attrNamespaceURI = atts.getURI(i);
String attrLocalName = atts.getLocalName(i);
String attrValue = atts.getValue(i);
element.setAttribute(attrNamespaceURI, attrLocalName, attrValue);
}
// Push the element on the stack
_dataElementStack.push(element);
// Reserve buffer for PCDATA
_characters.clear();
// Reset the state from ERROR back to IN_DATA_SECTION
_state = IN_DATA_SECTION;
// Deeper level within ignorable element
} else if (currentState == IN_IGNORABLE_ELEMENT) {
_state = IN_IGNORABLE_ELEMENT;
// Unrecognized state
} else {
final String DETAIL = "_state=" + currentState + "; _level=" + _level;
throw Utils.logProgrammingError(HANDLER_CLASSNAME, THIS_METHOD,
HANDLER_CLASSNAME, THIS_METHOD,
DETAIL);
}
org.xins.common.Log.log_1005(HANDLER_CLASSNAME, THIS_METHOD,
"_state=" + _state
+ "; _level=" + _level
+ "; namespaceURI=" + TextUtils.quote(namespaceURI)
+ "; localName=" + TextUtils.quote(localName)
+ "; qName=" + TextUtils.quote(qName));
}
/**
* Receive notification of the end of an element.
*
* @param namespaceURI
* the namespace URI, can be <code>null</code>.
*
* @param localName
* the local name (without prefix); cannot be <code>null</code>.
*
* @param qName
* the qualified name (with prefix), can be <code>null</code> since
* <code>namespaceURI</code> and <code>localName</code> are only
* used.
*
* @throws IllegalArgumentException
* if <code>localName == null</code>.
*
* @throws SAXException
* if the parsing failed.
*/
public void endElement(String namespaceURI,
String localName,
String qName)
throws IllegalArgumentException, SAXException {
final String THIS_METHOD = "endElement(java.lang.String,"
+ "java.lang.String,"
+ "java.lang.String)";
// Temporarily enter ERROR state, on success this state is left
State currentState = _state;
_state = ERROR;
// Make sure namespaceURI is either null or non-empty
namespaceURI = "".equals(namespaceURI) ? null : namespaceURI;
// Cache quoted version of namespaceURI
String quotedNamespaceURI = TextUtils.quote(namespaceURI);
// TRACE: Enter method
org.xins.common.Log.log_1003(HANDLER_CLASSNAME, THIS_METHOD,
"_state=" + currentState
+ "; _level=" + _level
+ "; namespaceURI=" + TextUtils.quote(namespaceURI)
+ "; localName=" + TextUtils.quote(localName)
+ "; qName=" + TextUtils.quote(qName));
// Check preconditions
MandatoryArgumentChecker.check("localName", localName);
if (currentState == ERROR) {
final String DETAIL = "_state=" + currentState + "; _level=" + _level;
throw Utils.logProgrammingError(HANDLER_CLASSNAME, THIS_METHOD,
HANDLER_CLASSNAME, THIS_METHOD,
DETAIL);
// At root level
} else if (currentState == AT_ROOT_LEVEL) {
if (! (namespaceURI == null && "result".equals(localName))) {
final String DETAIL = "Expected end of element of type \"result\" with namespace (null) instead of \""
+ localName
+ "\" with namespace "
+ quotedNamespaceURI
+ '.';
throw Utils.logProgrammingError(HANDLER_CLASSNAME, THIS_METHOD,
HANDLER_CLASSNAME, THIS_METHOD,
DETAIL);
}
_state = FINISHED;
// Ignorable element
} else if (currentState == IN_IGNORABLE_ELEMENT) {
if (_level == 1) {
_state = AT_ROOT_LEVEL;
} else {
_state = IN_IGNORABLE_ELEMENT;
}
// Within data section
} else if (currentState == IN_DATA_SECTION) {
// Get the DataElement for which we process the end tag
DataElement child = (DataElement) _dataElementStack.pop();
// If at the <data/> element level, then return to AT_ROOT_LEVEL
if (_dataElementStack.size() == 0) {
if (! (namespaceURI == null && "data".equals(localName))) {
final String DETAIL = "Expected end of element of type \"data\" with namespace (null) instead of \""
+ localName
+ "\" with namespace "
+ quotedNamespaceURI
+ '.';
throw Utils.logProgrammingError(HANDLER_CLASSNAME, THIS_METHOD,
HANDLER_CLASSNAME, THIS_METHOD,
DETAIL);
}
// Push the root DataElement back
_dataElementStack.push(child);
// Reset the state
_state = AT_ROOT_LEVEL;
// Otherwise it's a custom element
} else {
// Set the PCDATA content on the element
if (_characters != null && _characters.getLength() > 0) {
child.setText(_characters.toString());
}
// Add the child to the parent
DataElement parent = (DataElement) _dataElementStack.peek();
parent.addChild(child);
// Reset the state back frmo ERROR to IN_DATA_SECTION
_state = IN_DATA_SECTION;
}
// Output parameter
} else if (currentState == IN_PARAM_ELEMENT) {
if (! (namespaceURI == null && "param".equals(localName))) {
final String DETAIL = "Expected end of element of type \"param\" with namespace (null) instead of \""
+ localName
+ "\" with namespace "
+ quotedNamespaceURI
+ '.';
throw Utils.logProgrammingError(HANDLER_CLASSNAME, THIS_METHOD,
HANDLER_CLASSNAME, THIS_METHOD,
DETAIL);
}
// Retrieve name and value for output parameter
String name = _parameterName;
String value = _characters.toString();
// Both name and value should be set
boolean noName = (name == null || name.length() < 1);
boolean noValue = (value == null || value.length() < 1);
if (noName && noValue) {
Log.log_2201();
} else if (noName) {
Log.log_2202(value);
} else if (noValue) {
Log.log_2203(name);
// Name and value are both set, correctly
} else {
Log.log_2204(name, value);
// Previously no parameters, perform (lazy) initialization
if (_parameters == null) {
_parameters = new ProtectedPropertyReader(PROTECTION_KEY);
// Check if parameter is already set
} else {
String existingValue = _parameters.get(name);
if (existingValue != null) {
if (!existingValue.equals(value)) {
// NOTE: This will be logged already (message 2205)
final String DETAIL = "Found conflicting duplicate value for output parameter \""
+ name
+ "\". Initial value is \""
+ existingValue
+ "\". New value is \""
+ value +
"\".";
throw new SAXException(DETAIL);
}
}
}
// Store the name-value combination for the output parameter
_parameters.set(PROTECTION_KEY, name, value);
}
// Reset the state
_parameterName = null;
_state = AT_ROOT_LEVEL;
_characters.clear();
// Unknown state
} else {
final String DETAIL = "Unrecognized state: "
+ currentState
+ ". Programming error suspected.";
throw Utils.logProgrammingError(HANDLER_CLASSNAME, THIS_METHOD,
HANDLER_CLASSNAME, THIS_METHOD,
DETAIL);
}
_level--;
_characters.clear();
// TRACE: Leave method
org.xins.common.Log.log_1005(HANDLER_CLASSNAME, THIS_METHOD,
"_state=" + _state
+ "; _level=" + _level
+ "; namespaceURI=" + TextUtils.quote(namespaceURI)
+ "; localName=" + TextUtils.quote(localName)
+ "; qName=" + TextUtils.quote(qName));
}
/**
* Receive notification of character data.
*
* @param ch
* the <code>char</code> array that contains the characters from the
* XML document, cannot be <code>null</code>.
*
* @param start
* the start index within <code>ch</code>.
*
* @param length
* the number of characters to take from <code>ch</code>.
*
* @throws IndexOutOfBoundsException
* if characters outside the allowed range are specified.
*
* @throws SAXException
* if the parsing failed.
*/
public void characters(char[] ch, int start, int length)
throws IndexOutOfBoundsException, SAXException {
final String THIS_METHOD = "characters(char[],int,int)";
// Temporarily enter ERROR state, on success this state is left
State currentState = _state;
_state = ERROR;
// TRACE: Enter method
org.xins.common.Log.log_1003(HANDLER_CLASSNAME, THIS_METHOD, null);
// Check state
if (currentState != IN_PARAM_ELEMENT
&& currentState != IN_DATA_SECTION
&& currentState != IN_IGNORABLE_ELEMENT) {
String text = new String(ch, start, length);
if (text.trim().length() > 0) {
// NOTE: This will be logged already (message 2205)
throw new SAXException("Found character content \"" + text + "\" in state " + currentState + '.');
}
}
if (_characters != null) {
_characters.append(ch, start, length);
}
// Reset _state
_state = currentState;
}
/**
* Checks if the state is <code>FINISHED</code> and if not throws an
* <code>IllegalStateException</code>.
*
* @throws IllegalStateException
* if the current state is not {@link #FINISHED}.
*/
private void assertFinished()
throws IllegalStateException {
if (_state != FINISHED) {
// TODO: Should SUBJECT_METHOD not be something else?
final String THIS_METHOD = "assertFinished()";
final String SUBJECT_METHOD = Utils.getCallingMethod();
final String DETAIL = "State is "
+ _state
+ " instead of "
+ FINISHED
+ '.';
Utils.logProgrammingError(HANDLER_CLASSNAME, THIS_METHOD,
HANDLER_CLASSNAME, SUBJECT_METHOD,
DETAIL);
throw new IllegalStateException(DETAIL);
}
}
/**
* Gets the error code returned by the function if any.
*
* @return
* the error code returned by the function or <code>null<code>
* if no error code has been returned from the function.
*
* @throws IllegalStateException
* if the current state is invalid.
*/
public String getErrorCode()
throws IllegalStateException {
// Check state
assertFinished();
return _errorCode;
}
/**
* Get the parameters returned by the function.
*
* @return
* the parameters (name/value) or <code>null</code> if the function
* does not have any parameters.
*
* @throws IllegalStateException
* if the current state is invalid.
*/
public PropertyReader getParameters()
throws IllegalStateException {
// Check state
assertFinished();
return _parameters;
}
/**
* Get the data element returned by the function if any.
*
* @return
* the data element, or <code>null</code> if the function did not
* return any data element.
*
* @throws IllegalStateException
* if the current state is invalid.
*/
public DataElement getDataElement()
throws IllegalStateException {
// Check state
assertFinished();
if (_dataElementStack.isEmpty()) {
return null;
} else {
return (DataElement) _dataElementStack.peek();
}
}
}
/**
* State of the event handler.
*
* @version $Revision$ $Date$
* @author Ernst de Haan (<a href="mailto:[email protected]">[email protected]</a>)
*
* @since XINS 1.0.0
*/
private static final class State extends Object {
//----------------------------------------------------------------------
// Constructors
//----------------------------------------------------------------------
/**
* Constructs a new <code>State</code> object.
*
* @param name
* the name of this state, cannot be <code>null</code>.
*
* @throws IllegalArgumentException
* if <code>name == null</code>.
*/
private State(String name) throws IllegalArgumentException {
// Check preconditions
MandatoryArgumentChecker.check("name", name);
_name = name;
}
//----------------------------------------------------------------------
// Fields
//----------------------------------------------------------------------
/**
* The name of this state. Cannot be <code>null</code>.
*/
private final String _name;
//----------------------------------------------------------------------
// Methods
//----------------------------------------------------------------------
/**
* Returns the name of this state.
*
* @return
* the name of this state, cannot be <code>null</code>.
*/
public String getName() {
return _name;
}
/**
* Returns a textual representation of this object.
*
* @return
* the name of this state, never <code>null</code>.
*/
public String toString() {
return _name;
}
}
}
| Making sure the error code is never an empty string.
| src/java-client-framework/org/xins/client/XINSCallResultParser.java | Making sure the error code is never an empty string. | <ide><path>rc/java-client-framework/org/xins/client/XINSCallResultParser.java
<ide> /**
<ide> * The error code returned by the function or <code>null</code>, if no
<ide> * error code is returned.
<add> *
<add> * <p>The value will never return an empty string, so if the result is
<add> * not <code>null</code>, then it is safe to assume the length of the
<add> * string is at least 1 character.
<ide> */
<ide> private String _errorCode;
<ide>
<ide> // Get the 'errorcode' and 'code attributes
<ide> String code1 = atts.getValue("errorcode");
<ide> String code2 = atts.getValue("code");
<add>
<add> // Convert an empty string to null
<add> if (code1 == null || code1.length() == 0) {
<add> code1 = null;
<add> }
<add> if (code2 == null || code2.length() == 0) {
<add> code2 = null;
<add> }
<ide>
<ide> // Only one error code attribute set
<ide> if (code1 != null && code2 == null) {
<ide> }
<ide>
<ide> /**
<del> * Gets the error code returned by the function if any.
<add> * Returns the error code. If <code>null</code> is returned the call was
<add> * successful and thus no error code was returned. Otherwise the call
<add> * was unsuccessful.
<add> *
<add> * <p>This method will never return an empty string, so if the result is
<add> * not <code>null</code>, then it is safe to assume the length of the
<add> * string is at least 1 character.
<ide> *
<ide> * @return
<del> * the error code returned by the function or <code>null<code>
<del> * if no error code has been returned from the function.
<add> * the returned error code, or <code>null</code> if the call was
<add> * successful.
<ide> *
<ide> * @throws IllegalStateException
<ide> * if the current state is invalid. |
|
Java | apache-2.0 | c01fefdb8d05100c3cb867f428758f047fb6dad7 | 0 | reportportal/service-api,reportportal/service-api,reportportal/service-api,reportportal/service-api,reportportal/service-api | /*
* Copyright 2017 EPAM Systems
*
*
* This file is part of EPAM Report Portal.
* https://github.com/reportportal/service-api
*
* Report Portal is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* Report Portal is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with Report Portal. If not, see <http://www.gnu.org/licenses/>.
*/
package com.epam.ta.reportportal.core.item;
import com.epam.ta.reportportal.exception.ReportPortalException;
import com.epam.ta.reportportal.store.commons.Preconditions;
import com.epam.ta.reportportal.store.database.dao.LaunchRepository;
import com.epam.ta.reportportal.store.database.dao.TestItemRepository;
import com.epam.ta.reportportal.store.database.entity.enums.StatusEnum;
import com.epam.ta.reportportal.store.database.entity.item.TestItem;
import com.epam.ta.reportportal.store.database.entity.item.TestItemResults;
import com.epam.ta.reportportal.store.database.entity.item.issue.IssueEntity;
import com.epam.ta.reportportal.store.database.entity.item.issue.IssueType;
import com.epam.ta.reportportal.ws.converter.builders.TestItemBuilder;
import com.epam.ta.reportportal.ws.model.ErrorType;
import com.epam.ta.reportportal.ws.model.FinishTestItemRQ;
import com.epam.ta.reportportal.ws.model.OperationCompletionRS;
import com.epam.ta.reportportal.ws.model.issue.Issue;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Service;
import java.util.List;
import java.util.Optional;
import java.util.function.Function;
import static com.epam.ta.reportportal.commons.validation.BusinessRule.expect;
import static com.epam.ta.reportportal.commons.validation.Suppliers.formattedSupplier;
import static com.epam.ta.reportportal.store.commons.Predicates.equalTo;
import static com.epam.ta.reportportal.store.database.entity.enums.StatusEnum.*;
import static com.epam.ta.reportportal.store.database.entity.enums.TestItemIssueType.NOT_ISSUE_FLAG;
import static com.epam.ta.reportportal.store.database.entity.enums.TestItemIssueType.TO_INVESTIGATE;
import static com.epam.ta.reportportal.ws.model.ErrorType.*;
import static java.util.stream.Collectors.toList;
/**
* Default implementation of {@link FinishTestItemHandler}
*
* @author Andrei Varabyeu
* @author Aliaksei Makayed
* @author Dzianis Shlychkou
* @author Andrei_Ramanchuk
*/
@Service
class FinishTestItemHandlerImpl implements FinishTestItemHandler {
private LaunchRepository launchRepository;
private TestItemRepository testItemRepository;
private Function<Issue, IssueEntity> TO_ISSUE = from -> {
IssueEntity issue = new IssueEntity();
issue.setAutoAnalyzed(from.getAutoAnalyzed());
issue.setIgnoreAnalyzer(from.getIgnoreAnalyzer());
issue.setIssueDescription(from.getComment());
return issue;
};
// private ExternalSystemRepository externalSystemRepository;
@Autowired
public void setLaunchRepository(LaunchRepository launchRepository) {
this.launchRepository = launchRepository;
}
@Autowired
public void setTestItemRepository(TestItemRepository testItemRepository) {
this.testItemRepository = testItemRepository;
}
// @Autowired
// public void setExternalSystemRepository(ExternalSystemRepository externalSystemRepository) {
// this.externalSystemRepository = externalSystemRepository;
// }
@Override
public OperationCompletionRS finishTestItem(Long testItemId, FinishTestItemRQ finishExecutionRQ, String username) {
TestItem testItem = testItemRepository.findById(testItemId)
.orElseThrow(() -> new ReportPortalException(TEST_ITEM_NOT_FOUND, testItemId));
verifyTestItem(testItem, testItemId, finishExecutionRQ, fromValue(finishExecutionRQ.getStatus()));
//
// Launch launch = Optional.ofNullable(testItem.getTestItemStructure().getLaunch())
// .orElseThrow(() -> new ReportPortalException(LAUNCH_NOT_FOUND));
//
// if (!launch.getUserRef().equalsIgnoreCase(username)) {
// fail().withError(FINISH_ITEM_NOT_ALLOWED, "You are not launch owner.");
// }
// final Project project = projectRepository.findOne(launch.getProjectRef());
TestItemResults testItemResults = processItemResults(testItem, finishExecutionRQ);
testItem = new TestItemBuilder(testItem).addDescription(finishExecutionRQ.getDescription())
.addTags(finishExecutionRQ.getTags())
.addTestItemResults(testItemResults, finishExecutionRQ.getEndTime())
.get();
testItemRepository.save(testItem);
return new OperationCompletionRS("TestItem with ID = '" + testItemId + "' successfully finished.");
}
/**
* If test item has descendants, it's status is resolved from statistics
* When status provided, no matter test item has or not descendants, test
* item status is resolved to provided
*
* @param testItem Test item id
* @param finishExecutionRQ Finish test item request
* @return TestItemResults object
*/
private TestItemResults processItemResults(TestItem testItem, FinishTestItemRQ finishExecutionRQ) {
TestItemResults testItemResults = Optional.ofNullable(testItem.getTestItemResults()).orElse(new TestItemResults());
Optional<StatusEnum> actualStatus = fromValue(finishExecutionRQ.getStatus());
Issue providedIssue = finishExecutionRQ.getIssue();
boolean hasChildren = testItemRepository.hasChildren(testItem.getItemId());
if (actualStatus.isPresent() && !hasChildren) {
testItemResults.setStatus(actualStatus.get());
} else {
testItemResults.setStatus(testItemRepository.identifyStatus(testItem.getItemId()));
}
if (Preconditions.statusIn(FAILED, SKIPPED).test(testItemResults.getStatus()) && !hasChildren) {
if (null != providedIssue) {
//in provided issue should be locator id or NOT_ISSUE value
String locator = providedIssue.getIssueType();
if (!NOT_ISSUE_FLAG.getValue().equalsIgnoreCase(locator)) {
List<IssueType> projectIssueTypes = testItemRepository.selectIssueLocatorsByProject(1L);
IssueType issueType = verifyIssue(testItem.getItemId(), providedIssue, projectIssueTypes);
IssueEntity issue = TO_ISSUE.apply(providedIssue);
issue.setIssueType(issueType.getId());
testItemResults.setIssue(issue);
}
} else {
List<IssueType> issueTypes = testItemRepository.selectIssueLocatorsByProject(1L);
IssueType toInvestigate = issueTypes.stream()
.filter(it -> it.getLocator().equalsIgnoreCase(TO_INVESTIGATE.getLocator()))
.findFirst()
.orElseThrow(() -> new ReportPortalException(ErrorType.UNCLASSIFIED_ERROR));
IssueEntity issue = new IssueEntity();
issue.setIssueType(toInvestigate.getId());
testItemResults.setIssue(issue);
}
}
return testItemResults;
}
/**
* Validation procedure for specified test item
*
* @param testItemId ID of test item
* @param finishExecutionRQ Request data
* @param actualStatus Actual status of item
* @return TestItem updated item
*/
private void verifyTestItem(TestItem testItem, final Long testItemId, FinishTestItemRQ finishExecutionRQ,
Optional<StatusEnum> actualStatus) {
expect(testItem.getTestItemResults().getStatus(), Preconditions.statusIn(IN_PROGRESS)).verify(
REPORTING_ITEM_ALREADY_FINISHED, testItem.getItemId());
List<TestItem> items = testItemRepository.selectItemsInStatusByParent(testItem.getItemId(), IN_PROGRESS);
expect(items.isEmpty(), equalTo(true)).verify(FINISH_ITEM_NOT_ALLOWED,
formattedSupplier("Test item '{}' has descendants with '{}' status. All descendants '{}'", testItemId, IN_PROGRESS.name(),
items
)
);
// try {
//
// expect(!actualStatus.isPresent() && !testItem.hasChilds(), equalTo(Boolean.FALSE), formattedSupplier(
// "There is no status provided from request and there are no descendants to check statistics for test item id '{}'",
// testItemId
// )).verify();
expect(finishExecutionRQ.getEndTime(), Preconditions.sameTimeOrLater(testItem.getStartTime())).verify(
FINISH_TIME_EARLIER_THAN_START_TIME, finishExecutionRQ.getEndTime(), testItem.getStartTime(), testItemId);
//
// /*
// * If there is issue provided we have to be sure issue type is
// * correct
// */
// } catch (BusinessRuleViolationException e) {
// fail().withError(AMBIGUOUS_TEST_ITEM_STATUS, e.getMessage());
// }
}
private IssueType verifyIssue(Long testItemId, Issue issue, List<IssueType> projectIssueTypes) {
return projectIssueTypes.stream()
.filter(it -> it.getTestItemIssueType().getLocator().equalsIgnoreCase(issue.getIssueType()))
.findAny()
.orElseThrow(() -> new ReportPortalException(
AMBIGUOUS_TEST_ITEM_STATUS, formattedSupplier(
"Invalid test item issue type definition '{}' is requested for item '{}'. Valid issue types locators are: {}",
issue.getIssueType(), testItemId, projectIssueTypes.stream().map(IssueType::getLocator).collect(toList())
)));
}
}
| src/main/java/com/epam/ta/reportportal/core/item/FinishTestItemHandlerImpl.java | /*
* Copyright 2017 EPAM Systems
*
*
* This file is part of EPAM Report Portal.
* https://github.com/reportportal/service-api
*
* Report Portal is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* Report Portal is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with Report Portal. If not, see <http://www.gnu.org/licenses/>.
*/
package com.epam.ta.reportportal.core.item;
import com.epam.ta.reportportal.exception.ReportPortalException;
import com.epam.ta.reportportal.store.commons.Preconditions;
import com.epam.ta.reportportal.store.database.dao.LaunchRepository;
import com.epam.ta.reportportal.store.database.dao.TestItemRepository;
import com.epam.ta.reportportal.store.database.entity.enums.StatusEnum;
import com.epam.ta.reportportal.store.database.entity.item.TestItem;
import com.epam.ta.reportportal.store.database.entity.item.TestItemResults;
import com.epam.ta.reportportal.store.database.entity.item.issue.IssueEntity;
import com.epam.ta.reportportal.store.database.entity.item.issue.IssueType;
import com.epam.ta.reportportal.ws.converter.builders.TestItemBuilder;
import com.epam.ta.reportportal.ws.model.ErrorType;
import com.epam.ta.reportportal.ws.model.FinishTestItemRQ;
import com.epam.ta.reportportal.ws.model.OperationCompletionRS;
import com.epam.ta.reportportal.ws.model.issue.Issue;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Service;
import java.util.List;
import java.util.Optional;
import java.util.function.Function;
import static com.epam.ta.reportportal.commons.validation.BusinessRule.expect;
import static com.epam.ta.reportportal.commons.validation.Suppliers.formattedSupplier;
import static com.epam.ta.reportportal.store.commons.Predicates.equalTo;
import static com.epam.ta.reportportal.store.database.entity.enums.StatusEnum.*;
import static com.epam.ta.reportportal.store.database.entity.enums.TestItemIssueType.NOT_ISSUE_FLAG;
import static com.epam.ta.reportportal.store.database.entity.enums.TestItemIssueType.TO_INVESTIGATE;
import static com.epam.ta.reportportal.ws.model.ErrorType.*;
import static java.util.stream.Collectors.toList;
/**
* Default implementation of {@link FinishTestItemHandler}
*
* @author Andrei Varabyeu
* @author Aliaksei Makayed
* @author Dzianis Shlychkou
* @author Andrei_Ramanchuk
*/
@Service
class FinishTestItemHandlerImpl implements FinishTestItemHandler {
private LaunchRepository launchRepository;
private TestItemRepository testItemRepository;
private Function<Issue, IssueEntity> TO_ISSUE = from -> {
IssueEntity issue = new IssueEntity();
issue.setAutoAnalyzed(from.getAutoAnalyzed());
issue.setIgnoreAnalyzer(from.getIgnoreAnalyzer());
issue.setIssueDescription(from.getComment());
return issue;
};
// private ExternalSystemRepository externalSystemRepository;
@Autowired
public void setLaunchRepository(LaunchRepository launchRepository) {
this.launchRepository = launchRepository;
}
@Autowired
public void setTestItemRepository(TestItemRepository testItemRepository) {
this.testItemRepository = testItemRepository;
}
// @Autowired
// public void setExternalSystemRepository(ExternalSystemRepository externalSystemRepository) {
// this.externalSystemRepository = externalSystemRepository;
// }
@Override
public OperationCompletionRS finishTestItem(Long testItemId, FinishTestItemRQ finishExecutionRQ, String username) {
TestItem testItem = testItemRepository.findById(testItemId)
.orElseThrow(() -> new ReportPortalException(TEST_ITEM_NOT_FOUND, testItemId));
verifyTestItem(testItem, testItemId, finishExecutionRQ, fromValue(finishExecutionRQ.getStatus()));
//
// Launch launch = Optional.ofNullable(testItem.getTestItemStructure().getLaunch())
// .orElseThrow(() -> new ReportPortalException(LAUNCH_NOT_FOUND));
//
// if (!launch.getUserRef().equalsIgnoreCase(username)) {
// fail().withError(FINISH_ITEM_NOT_ALLOWED, "You are not launch owner.");
// }
// final Project project = projectRepository.findOne(launch.getProjectRef());
TestItemResults testItemResults = processItemResults(testItem, finishExecutionRQ);
testItem = new TestItemBuilder(testItem).addDescription(finishExecutionRQ.getDescription())
.addTags(finishExecutionRQ.getTags())
.addTestItemResults(testItemResults, finishExecutionRQ.getEndTime())
.get();
testItemRepository.save(testItem);
return new OperationCompletionRS("TestItem with ID = '" + testItemId + "' successfully finished.");
}
/**
* If test item has descendants, it's status is resolved from statistics
* When status provided, no matter test item has or not descendants, test
* item status is resolved to provided
*
* @param testItem Test item id
* @param finishExecutionRQ Finish test item request
* @return TestItemResults object
*/
private TestItemResults processItemResults(TestItem testItem, FinishTestItemRQ finishExecutionRQ) {
TestItemResults testItemResults = Optional.ofNullable(testItem.getTestItemResults()).orElse(new TestItemResults());
Optional<StatusEnum> actualStatus = fromValue(finishExecutionRQ.getStatus());
Issue providedIssue = finishExecutionRQ.getIssue();
boolean hasChildren = testItemRepository.hasChildren(testItem.getItemId());
if (actualStatus.isPresent() && !hasChildren) {
testItemResults.setStatus(actualStatus.get());
} else {
testItemResults.setStatus(testItemRepository.identifyStatus(testItem.getItemId()));
}
if (Preconditions.statusIn(FAILED, SKIPPED).test(testItemResults.getStatus())) {
if (null != providedIssue) {
//in provided issue should be locator id or NOT_ISSUE value
String locator = providedIssue.getIssueType();
if (!NOT_ISSUE_FLAG.getValue().equalsIgnoreCase(locator)) {
List<IssueType> projectIssueTypes = testItemRepository.selectIssueLocatorsByProject(1L);
IssueType issueType = verifyIssue(testItem.getItemId(), providedIssue, projectIssueTypes);
IssueEntity issue = TO_ISSUE.apply(providedIssue);
issue.setIssueType(issueType.getId());
testItemResults.setIssue(issue);
}
} else {
List<IssueType> issueTypes = testItemRepository.selectIssueLocatorsByProject(1L);
IssueType toInvestigate = issueTypes.stream()
.filter(it -> it.getLocator().equalsIgnoreCase(TO_INVESTIGATE.getLocator()))
.findFirst()
.orElseThrow(() -> new ReportPortalException(ErrorType.UNCLASSIFIED_ERROR));
IssueEntity issue = new IssueEntity();
issue.setIssueType(toInvestigate.getId());
testItemResults.setIssue(issue);
}
}
return testItemResults;
}
/**
* Validation procedure for specified test item
*
* @param testItemId ID of test item
* @param finishExecutionRQ Request data
* @param actualStatus Actual status of item
* @return TestItem updated item
*/
private void verifyTestItem(TestItem testItem, final Long testItemId, FinishTestItemRQ finishExecutionRQ,
Optional<StatusEnum> actualStatus) {
expect(testItem.getTestItemResults().getStatus(), Preconditions.statusIn(IN_PROGRESS)).verify(
REPORTING_ITEM_ALREADY_FINISHED, testItem.getItemId());
List<TestItem> items = testItemRepository.selectItemsInStatusByParent(testItem.getItemId(), IN_PROGRESS);
expect(items.isEmpty(), equalTo(true)).verify(FINISH_ITEM_NOT_ALLOWED,
formattedSupplier("Test item '{}' has descendants with '{}' status. All descendants '{}'", testItemId, IN_PROGRESS.name(),
items
)
);
// try {
//
// expect(!actualStatus.isPresent() && !testItem.hasChilds(), equalTo(Boolean.FALSE), formattedSupplier(
// "There is no status provided from request and there are no descendants to check statistics for test item id '{}'",
// testItemId
// )).verify();
expect(finishExecutionRQ.getEndTime(), Preconditions.sameTimeOrLater(testItem.getStartTime())).verify(
FINISH_TIME_EARLIER_THAN_START_TIME, finishExecutionRQ.getEndTime(), testItem.getStartTime(), testItemId);
//
// /*
// * If there is issue provided we have to be sure issue type is
// * correct
// */
// } catch (BusinessRuleViolationException e) {
// fail().withError(AMBIGUOUS_TEST_ITEM_STATUS, e.getMessage());
// }
}
private IssueType verifyIssue(Long testItemId, Issue issue, List<IssueType> projectIssueTypes) {
return projectIssueTypes.stream()
.filter(it -> it.getTestItemIssueType().getLocator().equalsIgnoreCase(issue.getIssueType()))
.findAny()
.orElseThrow(() -> new ReportPortalException(
AMBIGUOUS_TEST_ITEM_STATUS, formattedSupplier(
"Invalid test item issue type definition '{}' is requested for item '{}'. Valid issue types locators are: {}",
issue.getIssueType(), testItemId, projectIssueTypes.stream().map(IssueType::getLocator).collect(toList())
)));
}
}
| fixed missed condition on issue while finishing test item
| src/main/java/com/epam/ta/reportportal/core/item/FinishTestItemHandlerImpl.java | fixed missed condition on issue while finishing test item | <ide><path>rc/main/java/com/epam/ta/reportportal/core/item/FinishTestItemHandlerImpl.java
<ide> testItemResults.setStatus(testItemRepository.identifyStatus(testItem.getItemId()));
<ide> }
<ide>
<del> if (Preconditions.statusIn(FAILED, SKIPPED).test(testItemResults.getStatus())) {
<add> if (Preconditions.statusIn(FAILED, SKIPPED).test(testItemResults.getStatus()) && !hasChildren) {
<ide> if (null != providedIssue) {
<ide> //in provided issue should be locator id or NOT_ISSUE value
<ide> String locator = providedIssue.getIssueType(); |
|
Java | epl-1.0 | bb1c005bbf26932bfdaf312010b35e138cca45c9 | 0 | darionct/kura,MMaiero/kura,cdealti/kura,unverbraucht/kura,MMaiero/kura,ymai/kura,MMaiero/kura,gavinying/kura,cdealti/kura,amitjoy/kura,amitjoy/kura,nicolatimeus/kura,nicolatimeus/kura,amitjoy/kura,nicolatimeus/kura,darionct/kura,markcullen/kura_Windows,rohitdubey12/kura,markcullen/kura_Windows,darionct/kura,gavinying/kura,ymai/kura,unverbraucht/kura,MMaiero/kura,gavinying/kura,markoer/kura,ctron/kura,unverbraucht/kura,cdealti/kura,ctron/kura,ymai/kura,cdealti/kura,gavinying/kura,nicolatimeus/kura,MMaiero/kura,nicolatimeus/kura,markoer/kura,nicolatimeus/kura,cdealti/kura,ctron/kura,markcullen/kura_Windows,markcullen/kura_Windows,amitjoy/kura,ctron/kura,ymai/kura,markoer/kura,markcullen/kura_Windows,rohitdubey12/kura,markoer/kura,rohitdubey12/kura,rohitdubey12/kura,amitjoy/kura,darionct/kura,unverbraucht/kura,gavinying/kura,ctron/kura,ymai/kura,markoer/kura,unverbraucht/kura,rohitdubey12/kura,cdealti/kura,MMaiero/kura,ymai/kura,darionct/kura,markoer/kura,ctron/kura,darionct/kura,amitjoy/kura,gavinying/kura | /**
* Copyright (c) 2011, 2015 Eurotech and/or its affiliates
*
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Eclipse Public License v1.0
* which accompanies this distribution, and is available at
* http://www.eclipse.org/legal/epl-v10.html
*
* Contributors:
* Eurotech
*/
package org.eclipse.kura.core.deployment;
import java.io.File;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.io.UnsupportedEncodingException;
import java.net.MalformedURLException;
import java.net.URL;
import java.util.Date;
import java.util.Properties;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.Future;
import org.eclipse.kura.KuraErrorCode;
import org.eclipse.kura.KuraException;
import org.eclipse.kura.cloud.Cloudlet;
import org.eclipse.kura.cloud.CloudletTopic;
import org.eclipse.kura.core.deployment.download.DeploymentPackageDownloadOptions;
import org.eclipse.kura.core.deployment.download.DownloadCountingOutputStream;
import org.eclipse.kura.core.deployment.download.DownloadFileUtilities;
import org.eclipse.kura.core.deployment.download.DownloadImpl;
import org.eclipse.kura.core.deployment.install.DeploymentPackageInstallOptions;
import org.eclipse.kura.core.deployment.install.InstallImpl;
import org.eclipse.kura.core.deployment.uninstall.DeploymentPackageUninstallOptions;
import org.eclipse.kura.core.deployment.uninstall.UninstallImpl;
import org.eclipse.kura.core.deployment.xml.XmlBundle;
import org.eclipse.kura.core.deployment.xml.XmlBundleInfo;
import org.eclipse.kura.core.deployment.xml.XmlBundles;
import org.eclipse.kura.core.deployment.xml.XmlDeploymentPackage;
import org.eclipse.kura.core.deployment.xml.XmlDeploymentPackages;
import org.eclipse.kura.core.deployment.xml.XmlUtil;
import org.eclipse.kura.core.util.ThrowableUtil;
import org.eclipse.kura.data.DataTransportService;
import org.eclipse.kura.message.KuraPayload;
import org.eclipse.kura.message.KuraRequestPayload;
import org.eclipse.kura.message.KuraResponsePayload;
import org.eclipse.kura.ssl.SslManagerService;
import org.osgi.framework.Bundle;
import org.osgi.framework.BundleContext;
import org.osgi.framework.BundleException;
import org.osgi.service.component.ComponentContext;
import org.osgi.service.component.ComponentException;
import org.osgi.service.deploymentadmin.BundleInfo;
import org.osgi.service.deploymentadmin.DeploymentAdmin;
import org.osgi.service.deploymentadmin.DeploymentPackage;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class CloudDeploymentHandlerV2 extends Cloudlet {
private static final Logger s_logger = LoggerFactory.getLogger(CloudDeploymentHandlerV2.class);
public static final String APP_ID = "DEPLOY-V2";
private static final String DPA_CONF_PATH_PROPNAME = "dpa.configuration";
private static final String KURA_CONF_URL_PROPNAME = "kura.configuration";
private static final String PACKAGES_PATH_PROPNAME = "kura.packages";
private static final String KURA_DATA_DIR = "kura.data";
public static final String RESOURCE_PACKAGES = "packages";
public static final String RESOURCE_BUNDLES = "bundles";
/* EXEC */
public static final String RESOURCE_DOWNLOAD = "download";
public static final String RESOURCE_INSTALL = "install";
public static final String RESOURCE_UNINSTALL = "uninstall";
public static final String RESOURCE_CANCEL = "cancel";
public static final String RESOURCE_START = "start";
public static final String RESOURCE_STOP = "stop";
/* Metrics in the REPLY to RESOURCE_DOWNLOAD */
public static final String METRIC_DOWNLOAD_STATUS = "download.status";
public static final String METRIC_REQUESTER_CLIENT_ID = "requester.client.id";
/**
* Enum representing the different status of the download process
*
* {@link DeploymentAgentService.DOWNLOAD_STATUS.PROGRESS} Download in
* progress {@link DeploymentAgentService.DOWNLOAD_STATUS.COMPLETE} Download
* completed {@link DeploymentAgentService.DOWNLOAD_STATUS.FAILED} Download
* failed
*/
public static enum DOWNLOAD_STATUS {
IN_PROGRESS("IN_PROGRESS"), COMPLETED("COMPLETED"), FAILED("FAILED"), ALREADY_DONE("ALREADY DONE");
private final String status;
DOWNLOAD_STATUS(String status) {
this.status = status;
}
public String getStatusString() {
return status;
}
}
public static enum INSTALL_STATUS {
IDLE("IDLE"), IN_PROGRESS("IN_PROGRESS"), COMPLETED("COMPLETED"), FAILED("FAILED"), ALREADY_DONE("ALREADY DONE");
private final String status;
INSTALL_STATUS(String status) {
this.status = status;
}
public String getStatusString() {
return status;
}
}
public static enum UNINSTALL_STATUS {
IDLE("IDLE"), IN_PROGRESS("IN_PROGRESS"), COMPLETED("COMPLETED"), FAILED("FAILED"), ALREADY_DONE("ALREADY DONE");
private final String status;
UNINSTALL_STATUS(String status) {
this.status = status;
}
public String getStatusString() {
return status;
}
}
private static String s_pendingPackageUrl = null;
private static DownloadImpl m_downloadImplementation;
private static UninstallImpl m_uninstallImplementation;
public static InstallImpl m_installImplementation;
private SslManagerService m_sslManagerService;
private DeploymentAdmin m_deploymentAdmin;
private static ExecutorService executor = Executors.newSingleThreadExecutor();
private Future<?> downloaderFuture;
private Future<?> installerFuture;
private BundleContext m_bundleContext;
private DataTransportService m_dataTransportService;
private String m_dpaConfPath;
private String m_packagesPath;
private DeploymentPackageDownloadOptions m_downloadOptions;
private boolean m_isInstalling = false;
private DeploymentPackageInstallOptions m_installOptions;
private String m_pendingUninstPackageName;
private String m_installVerificationDir;
private String m_clientId;
// ----------------------------------------------------------------
//
// Dependencies
//
// ----------------------------------------------------------------
public void setSslManagerService(SslManagerService sslManagerService) {
this.m_sslManagerService = sslManagerService;
}
public void unsetSslManagerService(SslManagerService sslManagerService) {
this.m_sslManagerService = null;
}
protected void setDeploymentAdmin(DeploymentAdmin deploymentAdmin) {
m_deploymentAdmin = deploymentAdmin;
}
protected void unsetDeploymentAdmin(DeploymentAdmin deploymentAdmin) {
m_deploymentAdmin = null;
}
public void setDataTransportService(DataTransportService dataTransportService) {
m_dataTransportService = dataTransportService;
}
public void unsetDataTransportService(DataTransportService dataTransportService) {
m_dataTransportService = null;
}
public CloudDeploymentHandlerV2() {
super(APP_ID);
}
// ----------------------------------------------------------------
//
// Activation APIs
//
// ----------------------------------------------------------------
@Override
protected void activate(ComponentContext componentContext) {
s_logger.info("Cloud Deployment v2 is starting");
super.activate(componentContext);
m_bundleContext = componentContext.getBundleContext();
m_clientId= m_dataTransportService.getClientId();
m_dpaConfPath = System.getProperty(DPA_CONF_PATH_PROPNAME);
if (m_dpaConfPath == null || m_dpaConfPath.isEmpty()) {
throw new ComponentException("The value of '" + DPA_CONF_PATH_PROPNAME + "' is not defined");
}
String sKuraConfUrl = System.getProperty(KURA_CONF_URL_PROPNAME);
if (sKuraConfUrl == null || sKuraConfUrl.isEmpty()) {
throw new ComponentException("The value of '" + KURA_CONF_URL_PROPNAME + "' is not defined");
}
URL kuraUrl = null;
try {
kuraUrl = new URL(sKuraConfUrl);
} catch (MalformedURLException e) {
throw new ComponentException("Invalid Kura configuration URL");
}
Properties kuraProperties = new Properties();
try {
kuraProperties.load(kuraUrl.openStream());
} catch (FileNotFoundException e) {
throw new ComponentException("Kura configuration file not found", e);
} catch (IOException e) {
throw new ComponentException("Exception loading Kura configuration file", e);
}
m_packagesPath = kuraProperties.getProperty(PACKAGES_PATH_PROPNAME);
if (m_packagesPath == null || m_packagesPath.isEmpty()) {
throw new ComponentException("The value of '" + PACKAGES_PATH_PROPNAME + "' is not defined");
}
if (kuraProperties.getProperty(PACKAGES_PATH_PROPNAME) != null && kuraProperties.getProperty(PACKAGES_PATH_PROPNAME).trim().equals("kura/packages")) {
kuraProperties.setProperty(PACKAGES_PATH_PROPNAME, "/opt/eclipse/kura/kura/packages");
m_packagesPath = kuraProperties.getProperty(PACKAGES_PATH_PROPNAME);
s_logger.warn("Overridding invalid kura.packages location");
}
String kuraDataDir= kuraProperties.getProperty(KURA_DATA_DIR);
m_installImplementation = new InstallImpl(this, kuraDataDir);
m_installImplementation.setPackagesPath(m_packagesPath);
m_installImplementation.setDpaConfPath(m_dpaConfPath);
m_installImplementation.setDeploymentAdmin(m_deploymentAdmin);
m_installImplementation.sendInstallConfirmations();
}
@Override
protected void deactivate(ComponentContext componentContext) {
s_logger.info("Bundle " + APP_ID + " is deactivating!");
if(downloaderFuture != null){
downloaderFuture.cancel(true);
}
if(installerFuture != null){
installerFuture.cancel(true);
}
m_bundleContext = null;
}
// ----------------------------------------------------------------
//
// Public methods
//
// ----------------------------------------------------------------
public void publishMessage(DeploymentPackageOptions options, KuraPayload messagePayload, String messageType){
try {
String messageTopic = new StringBuilder("NOTIFY/").append(options.getClientId())
.append("/")
.append(messageType)
.toString();
getCloudApplicationClient().controlPublish(options.getRequestClientId(), messageTopic, messagePayload, 2, DFLT_RETAIN, DFLT_PRIORITY);
} catch (KuraException e) {
s_logger.error("Error publishing response for command {} {}", messageType, e);
}
}
// ----------------------------------------------------------------
//
// Protected methods
//
// ----------------------------------------------------------------
@Override
protected void doGet(CloudletTopic reqTopic, KuraRequestPayload reqPayload, KuraResponsePayload respPayload) throws KuraException {
String[] resources = reqTopic.getResources();
if (resources == null || resources.length == 0) {
s_logger.error("Bad request topic: {}", reqTopic.toString());
s_logger.error("Expected one resource but found {}", resources != null ? resources.length : "none");
respPayload.setResponseCode(KuraResponsePayload.RESPONSE_CODE_BAD_REQUEST);
return;
}
if (resources[0].equals(RESOURCE_DOWNLOAD)) {
doGetDownload(reqPayload, respPayload);
} else if (resources[0].equals(RESOURCE_INSTALL)) {
doGetInstall(reqPayload, respPayload);
} else if (resources[0].equals(RESOURCE_PACKAGES)) {
doGetPackages(reqPayload, respPayload);
} else if (resources[0].equals(RESOURCE_BUNDLES)) {
doGetBundles(reqPayload, respPayload);
} else {
s_logger.error("Bad request topic: {}", reqTopic.toString());
s_logger.error("Cannot find resource with name: {}", resources[0]);
respPayload.setResponseCode(KuraResponsePayload.RESPONSE_CODE_NOTFOUND);
return;
}
}
@Override
protected void doExec(CloudletTopic reqTopic, KuraRequestPayload reqPayload, KuraResponsePayload respPayload) throws KuraException {
String[] resources = reqTopic.getResources();
if (resources == null || resources.length == 0) {
s_logger.error("Bad request topic: {}", reqTopic.toString());
s_logger.error("Expected one resource but found {}", resources != null ? resources.length : "none");
respPayload.setResponseCode(KuraResponsePayload.RESPONSE_CODE_BAD_REQUEST);
return;
}
if (resources[0].equals(RESOURCE_DOWNLOAD)) {
doExecDownload(reqPayload, respPayload);
} else if (resources[0].equals(RESOURCE_INSTALL)) {
doExecInstall(reqPayload, respPayload);
} else if (resources[0].equals(RESOURCE_UNINSTALL)) {
doExecUninstall(reqPayload, respPayload);
} else if (resources[0].equals(RESOURCE_START)) {
String bundleId = resources[1];
doExecStartStopBundle(reqPayload, respPayload, true, bundleId);
} else if (resources[0].equals(RESOURCE_STOP)) {
String bundleId = resources[1];
doExecStartStopBundle(reqPayload, respPayload, false, bundleId);
}else {
s_logger.error("Bad request topic: {}", reqTopic.toString());
s_logger.error("Cannot find resource with name: {}", resources[0]);
respPayload.setResponseCode(KuraResponsePayload.RESPONSE_CODE_NOTFOUND);
return;
}
}
@Override
protected void doDel(CloudletTopic reqTopic, KuraRequestPayload reqPayload, KuraResponsePayload respPayload) throws KuraException {
String[] resources = reqTopic.getResources();
if (resources == null || resources.length == 0) {
s_logger.error("Bad request topic: {}", reqTopic.toString());
s_logger.error("Expected one resource but found {}", resources != null ? resources.length : "none");
respPayload.setResponseCode(KuraResponsePayload.RESPONSE_CODE_BAD_REQUEST);
return;
}
if (resources[0].equals(RESOURCE_DOWNLOAD)) {
doDelDownload(reqPayload, respPayload);
} else {
s_logger.error("Bad request topic: {}", reqTopic.toString());
s_logger.error("Cannot find resource with name: {}", resources[0]);
respPayload.setResponseCode(KuraResponsePayload.RESPONSE_CODE_NOTFOUND);
return;
}
}
// ----------------------------------------------------------------
//
// Private methods
//
// ----------------------------------------------------------------
private void doDelDownload(KuraRequestPayload request, KuraResponsePayload response) {
try{
m_downloadImplementation.getDownloadHelper().cancelDownload();
m_downloadImplementation.deleteDownloadedFile();
}catch(Exception ex){
s_logger.info("Error cancelling download!", ex);
}
}
private void doExecDownload(KuraRequestPayload request, KuraResponsePayload response) {
final DeploymentPackageDownloadOptions options;
try {
options = new DeploymentPackageDownloadOptions(request);
options.setClientId(m_clientId);
m_downloadImplementation= new DownloadImpl(options, this);
} catch (Exception ex) {
s_logger.info("Malformed download request!");
response.setResponseCode(KuraResponsePayload.RESPONSE_CODE_ERROR);
response.setTimestamp(new Date());
try {
response.setBody("Malformed donwload request".getBytes("UTF-8"));
} catch (UnsupportedEncodingException e) {
// Ignore
}
response.setException(ex);
return;
}
m_downloadOptions = options;
if (s_pendingPackageUrl != null) {
s_logger.info("Another request seems for the same URL is pending: {}.", s_pendingPackageUrl);
response.setResponseCode(KuraResponsePayload.RESPONSE_CODE_ERROR);
response.setTimestamp(new Date());
response.addMetric(METRIC_DOWNLOAD_STATUS, DOWNLOAD_STATUS.IN_PROGRESS.getStatusString());
try {
response.setBody("Another resource is already in download".getBytes("UTF-8"));
} catch (UnsupportedEncodingException e) {
}
return;
}
boolean alreadyDownloaded = false;
try {
alreadyDownloaded = m_downloadImplementation.isAlreadyDownloaded();
} catch (KuraException ex) {
response.setResponseCode(KuraResponsePayload.RESPONSE_CODE_ERROR);
response.setException(ex);
response.setTimestamp(new Date());
try {
response.setBody("Error checking download status".getBytes("UTF-8"));
} catch (UnsupportedEncodingException e) {
}
return;
}
s_logger.info("About to download and install package at URL {}", options.getDeployUri());
try {
s_pendingPackageUrl = options.getDeployUri();
m_downloadImplementation.setSslManager(m_sslManagerService);
m_downloadImplementation.setAlreadyDownloadedFlag(alreadyDownloaded);
m_downloadImplementation.setVerificationDirectory(m_installVerificationDir);
s_logger.info("Downloading package from URL: " + options.getDeployUri());
downloaderFuture = executor.submit(new Runnable(){
@Override
public void run() {
try {
m_downloadImplementation.downloadDeploymentPackageInternal();
} catch (KuraException e) {
try {
File dpFile = DownloadFileUtilities.getDpDownloadFile(options);
if (dpFile != null){
dpFile.delete();
}
} catch (IOException e1) {
}
} finally{
s_pendingPackageUrl = null;
}
}
});
} catch (Exception e) {
s_logger.error("Failed to download and install package at URL {}: {}", options.getDeployUri(), e);
s_pendingPackageUrl = null;
response.setResponseCode(KuraResponsePayload.RESPONSE_CODE_ERROR);
response.setTimestamp(new Date());
try {
response.setBody(e.getMessage().getBytes("UTF-8"));
} catch (UnsupportedEncodingException uee) {
}
}
return;
}
private void doExecInstall(KuraRequestPayload request, KuraResponsePayload response){
final DeploymentPackageInstallOptions options;
try {
options = new DeploymentPackageInstallOptions(request);
options.setClientId(m_clientId);
} catch (Exception ex) {
s_logger.info("Malformed install request!");
response.setResponseCode(KuraResponsePayload.RESPONSE_CODE_ERROR);
response.setTimestamp(new Date());
try {
response.setBody("Malformed install request".getBytes("UTF-8"));
} catch (UnsupportedEncodingException e) {
// Ignore
}
response.setException(ex);
return;
}
m_installOptions = options;
boolean alreadyDownloaded = false;
try {
alreadyDownloaded = m_downloadImplementation.isAlreadyDownloaded();
} catch (KuraException ex) {
response.setResponseCode(KuraResponsePayload.RESPONSE_CODE_ERROR);
response.setException(ex);
response.setTimestamp(new Date());
try {
response.setBody("Error checking download status".getBytes("UTF-8"));
} catch (UnsupportedEncodingException e) {
}
return;
}
if(alreadyDownloaded && !m_isInstalling){
//Check if file exists
try {
m_isInstalling = true;
final File dpFile = DownloadFileUtilities.getDpDownloadFile(options);
m_installImplementation.setOptions(options);
//if yes, install
installerFuture = executor.submit(new Runnable(){
@Override
public void run() {
try {
installDownloadedFile(dpFile, m_installOptions);
} catch (KuraException e) {
s_logger.error("Impossible to send an exception message to the cloud platform");
if (dpFile != null){
dpFile.delete();
}
} finally {
m_installOptions = null;
m_isInstalling = false;
}
}
});
} catch (IOException e) {
response.setResponseCode(KuraResponsePayload.RESPONSE_CODE_ERROR);
response.setException(e);
response.setTimestamp(new Date());
try {
response.setBody("Exception during install".getBytes("UTF-8"));
} catch (UnsupportedEncodingException e1) {
}
}
} else {
response.setResponseCode(KuraResponsePayload.RESPONSE_CODE_ERROR);
response.setException(new KuraException(KuraErrorCode.INTERNAL_ERROR));
response.setTimestamp(new Date());
try {
response.setBody("Already installing/uninstalling".getBytes("UTF-8"));
} catch (UnsupportedEncodingException e) {
}
return;
}
}
private void doExecUninstall(KuraRequestPayload request, KuraResponsePayload response) {
final DeploymentPackageUninstallOptions options;
try {
options = new DeploymentPackageUninstallOptions(request);
options.setClientId(m_clientId);
} catch (Exception ex) {
s_logger.info("Malformed uninstall request!");
response.setResponseCode(KuraResponsePayload.RESPONSE_CODE_ERROR);
response.setTimestamp(new Date());
try {
response.setBody("Malformed uninstall request".getBytes("UTF-8"));
} catch (UnsupportedEncodingException e) {
// Ignore
}
response.setException(ex);
return;
}
final String packageName = options.getDpName();
//
// We only allow one request at a time
if (!m_isInstalling && m_pendingUninstPackageName != null) {
s_logger.info("Antother request seems still pending: {}. Checking if stale...", m_pendingUninstPackageName);
response = new KuraResponsePayload(KuraResponsePayload.RESPONSE_CODE_ERROR);
response.setTimestamp(new Date());
try {
response.setBody("Only one request at a time is allowed".getBytes("UTF-8"));
} catch (UnsupportedEncodingException e) {
// Ignore
}
} else {
s_logger.info("About to uninstall package {}", packageName);
try {
m_isInstalling = true;
m_pendingUninstPackageName = packageName;
m_uninstallImplementation= new UninstallImpl(this, m_deploymentAdmin);
s_logger.info("Uninstalling package...");
installerFuture = executor.submit(new Runnable(){
@Override
public void run() {
try {
m_uninstallImplementation.uninstaller(options, packageName);
} catch (Exception e) {
try {
m_uninstallImplementation.uninstallFailedAsync(options, packageName, e);
} catch (KuraException e1) {
}
} finally {
m_installOptions = null;
m_isInstalling = false;
}
}
});
} catch (Exception e) {
s_logger.error("Failed to uninstall package {}: {}", packageName, e);
response = new KuraResponsePayload(KuraResponsePayload.RESPONSE_CODE_ERROR);
response.setTimestamp(new Date());
try {
response.setBody(e.getMessage().getBytes("UTF-8"));
} catch (UnsupportedEncodingException uee) {
// Ignore
}
} finally {
m_isInstalling = false;
m_pendingUninstPackageName = null;
}
}
}
private void doExecStartStopBundle(KuraRequestPayload request, KuraResponsePayload response, boolean start, String bundleId) {
if (bundleId == null) {
s_logger.info("EXEC start/stop bundle: null bundle ID");
response.setResponseCode(KuraResponsePayload.RESPONSE_CODE_BAD_REQUEST);
response.setTimestamp(new Date());
} else {
Long id = null;
try {
id = Long.valueOf(bundleId);
} catch (NumberFormatException e){
s_logger.error("EXEC start/stop bundle: bad bundle ID format: {}", e);
response.setResponseCode(KuraResponsePayload.RESPONSE_CODE_BAD_REQUEST);
response.setTimestamp(new Date());
response.setExceptionMessage(e.getMessage());
response.setExceptionStack(ThrowableUtil.stackTraceAsString(e));
}
if (id != null) {
s_logger.info("Executing command {}", start ? RESOURCE_START : RESOURCE_STOP);
Bundle bundle = m_bundleContext.getBundle(id);
if (bundle == null) {
s_logger.error("Bundle ID {} not found", id);
response.setResponseCode(KuraResponsePayload.RESPONSE_CODE_NOTFOUND);
response.setTimestamp(new Date());
} else {
try {
if (start) {
bundle.start();
} else {
bundle.stop();
}
s_logger.info("{} bundle ID {} ({})", new Object[] {start ? "Started" : "Stopped", id, bundle.getSymbolicName()});
response.setResponseCode(KuraResponsePayload.RESPONSE_CODE_OK);
response.setTimestamp(new Date());
} catch (BundleException e) {
s_logger.error("Failed to {} bundle {}: {}", new Object[] {start ? "start" : "stop", id, e});
response.setResponseCode(KuraResponsePayload.RESPONSE_CODE_ERROR);
response.setTimestamp(new Date());
}
}
}
}
}
private void doGetInstall(KuraRequestPayload reqPayload, KuraResponsePayload respPayload) {
if(m_isInstalling){
m_installImplementation.installInProgressSyncMessage(respPayload);
} else {
m_installImplementation.installIdleSyncMessage(respPayload);
}
}
private void doGetDownload(KuraRequestPayload reqPayload, KuraResponsePayload respPayload) {
if (s_pendingPackageUrl != null){ //A download is pending
DownloadCountingOutputStream downloadHelper= m_downloadImplementation.getDownloadHelper();
DownloadImpl.downloadInProgressSyncMessage(respPayload, downloadHelper, m_downloadOptions);
} else { //No pending downloads
DownloadImpl.downloadAlreadyDoneSyncMessage(respPayload); //is it right? Do we remove the last object
}
}
private void doGetPackages(KuraRequestPayload request, KuraResponsePayload response) {
DeploymentPackage[] dps = m_deploymentAdmin.listDeploymentPackages();
XmlDeploymentPackages xdps = new XmlDeploymentPackages();
XmlDeploymentPackage[] axdp = new XmlDeploymentPackage[dps.length];
for (int i = 0; i < dps.length; i++) {
DeploymentPackage dp = dps[i];
XmlDeploymentPackage xdp = new XmlDeploymentPackage();
xdp.setName(dp.getName());
xdp.setVersion(dp.getVersion().toString());
BundleInfo[] bis = dp.getBundleInfos();
XmlBundleInfo[] axbi = new XmlBundleInfo[bis.length];
for (int j = 0; j < bis.length; j++) {
BundleInfo bi = bis[j];
XmlBundleInfo xbi = new XmlBundleInfo();
xbi.setName(bi.getSymbolicName());
xbi.setVersion(bi.getVersion().toString());
axbi[j] = xbi;
}
xdp.setBundleInfos(axbi);
axdp[i] = xdp;
}
xdps.setDeploymentPackages(axdp);
try {
String s = XmlUtil.marshal(xdps);
//s_logger.info("Getting resource {}: {}", RESOURCE_PACKAGES, s);
response.setTimestamp(new Date());
try {
response.setBody(s.getBytes("UTF-8"));
} catch (UnsupportedEncodingException e) {
// Ignore
}
} catch (Exception e) {
s_logger.error("Error getting resource {}: {}", RESOURCE_PACKAGES, e);
}
}
private void doGetBundles(KuraRequestPayload request, KuraResponsePayload response) {
Bundle[] bundles = m_bundleContext.getBundles();
XmlBundles xmlBundles = new XmlBundles();
XmlBundle[] axb = new XmlBundle[bundles.length];
for (int i = 0; i < bundles.length; i++) {
Bundle bundle = bundles[i];
XmlBundle xmlBundle = new XmlBundle();
xmlBundle.setName(bundle.getSymbolicName());
xmlBundle.setVersion(bundle.getVersion().toString());
xmlBundle.setId(bundle.getBundleId());
int state = bundle.getState();
switch(state) {
case Bundle.UNINSTALLED:
xmlBundle.setState("UNINSTALLED");
break;
case Bundle.INSTALLED:
xmlBundle.setState("INSTALLED");
break;
case Bundle.RESOLVED:
xmlBundle.setState("RESOLVED");
break;
case Bundle.STARTING:
xmlBundle.setState("STARTING");
break;
case Bundle.STOPPING:
xmlBundle.setState("STOPPING");
break;
case Bundle.ACTIVE:
xmlBundle.setState("ACTIVE");
break;
default:
xmlBundle.setState(String.valueOf(state));
}
axb[i] = xmlBundle;
}
xmlBundles.setBundles(axb);
try {
String s = XmlUtil.marshal(xmlBundles);
//s_logger.info("Getting resource {}: {}", RESOURCE_BUNDLES, s);
response.setTimestamp(new Date());
try {
response.setBody(s.getBytes("UTF-8"));
} catch (UnsupportedEncodingException e) {
// Ignore
}
} catch (Exception e) {
s_logger.error("Error getting resource {}: {}", RESOURCE_BUNDLES, e);
}
}
public void installDownloadedFile(File dpFile, DeploymentPackageInstallOptions options) throws KuraException {
try{
if(options.getSystemUpdate()){
m_installImplementation.installSh(options, dpFile);
} else {
m_installImplementation.installDp(options, dpFile);
}
} catch (Exception e) {
s_logger.info("Install exception");
m_installImplementation.installFailedAsync(options, dpFile.getName(), e);
}
}
}
| kura/org.eclipse.kura.core.deployment/src/main/java/org/eclipse/kura/core/deployment/CloudDeploymentHandlerV2.java | /**
* Copyright (c) 2011, 2015 Eurotech and/or its affiliates
*
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Eclipse Public License v1.0
* which accompanies this distribution, and is available at
* http://www.eclipse.org/legal/epl-v10.html
*
* Contributors:
* Eurotech
*/
package org.eclipse.kura.core.deployment;
import java.io.File;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.io.UnsupportedEncodingException;
import java.net.MalformedURLException;
import java.net.URL;
import java.util.Date;
import java.util.Properties;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.Future;
import org.eclipse.kura.KuraErrorCode;
import org.eclipse.kura.KuraException;
import org.eclipse.kura.cloud.Cloudlet;
import org.eclipse.kura.cloud.CloudletTopic;
import org.eclipse.kura.core.deployment.download.DeploymentPackageDownloadOptions;
import org.eclipse.kura.core.deployment.download.DownloadCountingOutputStream;
import org.eclipse.kura.core.deployment.download.DownloadFileUtilities;
import org.eclipse.kura.core.deployment.download.DownloadImpl;
import org.eclipse.kura.core.deployment.install.DeploymentPackageInstallOptions;
import org.eclipse.kura.core.deployment.install.InstallImpl;
import org.eclipse.kura.core.deployment.uninstall.DeploymentPackageUninstallOptions;
import org.eclipse.kura.core.deployment.uninstall.UninstallImpl;
import org.eclipse.kura.core.deployment.xml.XmlBundle;
import org.eclipse.kura.core.deployment.xml.XmlBundleInfo;
import org.eclipse.kura.core.deployment.xml.XmlBundles;
import org.eclipse.kura.core.deployment.xml.XmlDeploymentPackage;
import org.eclipse.kura.core.deployment.xml.XmlDeploymentPackages;
import org.eclipse.kura.core.deployment.xml.XmlUtil;
import org.eclipse.kura.core.util.ThrowableUtil;
import org.eclipse.kura.data.DataTransportService;
import org.eclipse.kura.message.KuraPayload;
import org.eclipse.kura.message.KuraRequestPayload;
import org.eclipse.kura.message.KuraResponsePayload;
import org.eclipse.kura.ssl.SslManagerService;
import org.osgi.framework.Bundle;
import org.osgi.framework.BundleContext;
import org.osgi.framework.BundleException;
import org.osgi.service.component.ComponentContext;
import org.osgi.service.component.ComponentException;
import org.osgi.service.deploymentadmin.BundleInfo;
import org.osgi.service.deploymentadmin.DeploymentAdmin;
import org.osgi.service.deploymentadmin.DeploymentPackage;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class CloudDeploymentHandlerV2 extends Cloudlet {
private static final Logger s_logger = LoggerFactory.getLogger(CloudDeploymentHandlerV2.class);
public static final String APP_ID = "DEPLOY-V2";
private static final String DPA_CONF_PATH_PROPNAME = "dpa.configuration";
private static final String KURA_CONF_URL_PROPNAME = "kura.configuration";
private static final String PACKAGES_PATH_PROPNAME = "kura.packages";
private static final String KURA_DATA_DIR = "kura.data";
public static final String RESOURCE_PACKAGES = "packages";
public static final String RESOURCE_BUNDLES = "bundles";
/* EXEC */
public static final String RESOURCE_DOWNLOAD = "download";
public static final String RESOURCE_INSTALL = "install";
public static final String RESOURCE_UNINSTALL = "uninstall";
public static final String RESOURCE_CANCEL = "cancel";
public static final String RESOURCE_START = "start";
public static final String RESOURCE_STOP = "stop";
/* Metrics in the REPLY to RESOURCE_DOWNLOAD */
public static final String METRIC_DOWNLOAD_STATUS = "download.status";
public static final String METRIC_REQUESTER_CLIENT_ID = "requester.client.id";
/**
* Enum representing the different status of the download process
*
* {@link DeploymentAgentService.DOWNLOAD_STATUS.PROGRESS} Download in
* progress {@link DeploymentAgentService.DOWNLOAD_STATUS.COMPLETE} Download
* completed {@link DeploymentAgentService.DOWNLOAD_STATUS.FAILED} Download
* failed
*/
public static enum DOWNLOAD_STATUS {
IN_PROGRESS("IN_PROGRESS"), COMPLETED("COMPLETED"), FAILED("FAILED"), ALREADY_DONE("ALREADY DONE");
private final String status;
DOWNLOAD_STATUS(String status) {
this.status = status;
}
public String getStatusString() {
return status;
}
}
public static enum INSTALL_STATUS {
IDLE("IDLE"), IN_PROGRESS("IN_PROGRESS"), COMPLETED("COMPLETED"), FAILED("FAILED"), ALREADY_DONE("ALREADY DONE");
private final String status;
INSTALL_STATUS(String status) {
this.status = status;
}
public String getStatusString() {
return status;
}
}
public static enum UNINSTALL_STATUS {
IDLE("IDLE"), IN_PROGRESS("IN_PROGRESS"), COMPLETED("COMPLETED"), FAILED("FAILED"), ALREADY_DONE("ALREADY DONE");
private final String status;
UNINSTALL_STATUS(String status) {
this.status = status;
}
public String getStatusString() {
return status;
}
}
private static String s_pendingPackageUrl = null;
private static DownloadImpl m_downloadImplementation;
private static UninstallImpl m_uninstallImplementation;
public static InstallImpl m_installImplementation;
private SslManagerService m_sslManagerService;
private DeploymentAdmin m_deploymentAdmin;
private static ExecutorService executor = Executors.newSingleThreadExecutor();
private Future<?> downloaderFuture;
private Future<?> installerFuture;
private BundleContext m_bundleContext;
private DataTransportService m_dataTransportService;
private String m_dpaConfPath;
private String m_packagesPath;
private DeploymentPackageDownloadOptions m_downloadOptions;
private boolean m_isInstalling = false;
private DeploymentPackageInstallOptions m_installOptions;
private String m_pendingUninstPackageName;
private String m_installVerificationDir;
private String m_clientId;
// ----------------------------------------------------------------
//
// Dependencies
//
// ----------------------------------------------------------------
public void setSslManagerService(SslManagerService sslManagerService) {
this.m_sslManagerService = sslManagerService;
}
public void unsetSslManagerService(SslManagerService sslManagerService) {
this.m_sslManagerService = null;
}
protected void setDeploymentAdmin(DeploymentAdmin deploymentAdmin) {
m_deploymentAdmin = deploymentAdmin;
}
protected void unsetDeploymentAdmin(DeploymentAdmin deploymentAdmin) {
m_deploymentAdmin = null;
}
public void setDataTransportService(DataTransportService dataTransportService) {
m_dataTransportService = dataTransportService;
}
public void unsetDataTransportService(DataTransportService dataTransportService) {
m_dataTransportService = null;
}
public CloudDeploymentHandlerV2() {
super(APP_ID);
}
// ----------------------------------------------------------------
//
// Activation APIs
//
// ----------------------------------------------------------------
@Override
protected void activate(ComponentContext componentContext) {
s_logger.info("Cloud Deployment v2 is starting");
super.activate(componentContext);
m_bundleContext = componentContext.getBundleContext();
m_clientId= m_dataTransportService.getClientId();
m_dpaConfPath = System.getProperty(DPA_CONF_PATH_PROPNAME);
if (m_dpaConfPath == null || m_dpaConfPath.isEmpty()) {
throw new ComponentException("The value of '" + DPA_CONF_PATH_PROPNAME + "' is not defined");
}
String sKuraConfUrl = System.getProperty(KURA_CONF_URL_PROPNAME);
if (sKuraConfUrl == null || sKuraConfUrl.isEmpty()) {
throw new ComponentException("The value of '" + KURA_CONF_URL_PROPNAME + "' is not defined");
}
URL kuraUrl = null;
try {
kuraUrl = new URL(sKuraConfUrl);
} catch (MalformedURLException e) {
throw new ComponentException("Invalid Kura configuration URL");
}
Properties kuraProperties = new Properties();
try {
kuraProperties.load(kuraUrl.openStream());
} catch (FileNotFoundException e) {
throw new ComponentException("Kura configuration file not found", e);
} catch (IOException e) {
throw new ComponentException("Exception loading Kura configuration file", e);
}
m_packagesPath = kuraProperties.getProperty(PACKAGES_PATH_PROPNAME);
if (m_packagesPath == null || m_packagesPath.isEmpty()) {
throw new ComponentException("The value of '" + PACKAGES_PATH_PROPNAME + "' is not defined");
}
if (kuraProperties.getProperty(PACKAGES_PATH_PROPNAME) != null && kuraProperties.getProperty(PACKAGES_PATH_PROPNAME).trim().equals("kura/packages")) {
kuraProperties.setProperty(PACKAGES_PATH_PROPNAME, "/opt/eclipse/kura/kura/packages");
m_packagesPath = kuraProperties.getProperty(PACKAGES_PATH_PROPNAME);
s_logger.warn("Overridding invalid kura.packages location");
}
String kuraDataDir= kuraProperties.getProperty(KURA_DATA_DIR);
m_installImplementation = new InstallImpl(this, kuraDataDir);
m_installImplementation.setPackagesPath(m_packagesPath);
m_installImplementation.setDpaConfPath(m_dpaConfPath);
m_installImplementation.setDeploymentAdmin(m_deploymentAdmin);
m_installImplementation.sendInstallConfirmations();
// Thread t = new Thread(new Runnable() {
//
// @Override
// public void run() {
// try {
// Thread.sleep(5000);
// s_logger.info("STARTING DOWNLOAD...");
// CloudletTopic ct = CloudletTopic.parseAppTopic("EXEC/download");
// KuraRequestPayload request = new KuraRequestPayload();
// request.setRequestId("RequestID");
// request.setRequesterClientId("RequesterClientId");
// String url = "https://s3.amazonaws.com/kura-resources/dps/heater.dp";//"http://esfdownload.eurotech-inc.com/update_site/esf3/3.0.2/user_workspace_archive_3.0.2.zip";
// DeploymentPackageDownloadOptions options = new DeploymentPackageDownloadOptions(url, "dpName", "dpVersion");
// options.setUsername("[email protected]");
// options.setPassword("lc2251981");
// // options.setPassword("errata");
// request.addMetric(DeploymentPackageDownloadOptions.METRIC_DP_DOWNLOAD_URI, options.getDeployUri());
// request.addMetric(DeploymentPackageDownloadOptions.METRIC_DP_NAME, options.getDpName());
// request.addMetric(DeploymentPackageDownloadOptions.METRIC_DP_VERSION, options.getDpVersion());
// request.addMetric(DeploymentPackageDownloadOptions.METRIC_DP_DOWNLOAD_BLOCK_DELAY, 3000);
// request.addMetric(DeploymentPackageDownloadOptions.METRIC_DP_DOWNLOAD_BLOCK_SIZE, 3);
// request.addMetric(DeploymentPackageDownloadOptions.METRIC_JOB_ID, Long.parseLong("1111"));
// request.addMetric(DeploymentPackageDownloadOptions.METRIC_DP_INSTALL_SYSTEM_UPDATE, false);
// request.addMetric(DeploymentPackageDownloadOptions.METRIC_DP_REBOOT, false);
//
// KuraResponsePayload response = new KuraResponsePayload(KuraResponsePayload.RESPONSE_CODE_OK);
//
// doExec(ct, request, response);
//
// s_logger.info("*******************************************");
// s_logger.info(response.getMetric(KuraResponsePayload.METRIC_RESPONSE_CODE).toString());
// if(response.getBody() != null){
// s_logger.info(new String(response.getBody()));
// }
// if(response.getMetric(METRIC_DOWNLOAD_STATUS) != null){
// s_logger.info(response.getMetric(METRIC_DOWNLOAD_STATUS).toString());
// }
// s_logger.info("*******************************************");
//
// } catch (InterruptedException e) {
// // TODO Auto-generated catch block
// e.printStackTrace();
// } catch (KuraException e) {
// // TODO Auto-generated catch block
// e.printStackTrace();
// }
// }
//
// });
// t.start();
//
// Thread t2 = new Thread(new Runnable() {
//
// @Override
// public void run() {
// try {
// Thread.sleep(6000);
// s_logger.info("STARTING DOWNLOAD...");
// CloudletTopic ct = CloudletTopic.parseAppTopic("EXEC/download");
// KuraRequestPayload request = new KuraRequestPayload();
// request.setRequestId("RequestID");
// request.setRequesterClientId("RequesterClientId");
// String url = "https://s3.amazonaws.com/kura-resources/dps/heater.dp";//"http://esfdownload.eurotech-inc.com/update_site/esf3/3.0.2/user_workspace_archive_3.0.2.zip";
// DeploymentPackageDownloadOptions options = new DeploymentPackageDownloadOptions(url, "dpName", "dpVersion");
// options.setUsername("[email protected]");
// options.setPassword("lc2251981");
// // options.setPassword("errata");
// request.addMetric(DeploymentPackageDownloadOptions.METRIC_DP_DOWNLOAD_URI, options.getDeployUri());
// request.addMetric(DeploymentPackageDownloadOptions.METRIC_DP_NAME, options.getDpName());
// request.addMetric(DeploymentPackageDownloadOptions.METRIC_DP_VERSION, options.getDpVersion());
// request.addMetric(DeploymentPackageDownloadOptions.METRIC_JOB_ID, Long.parseLong("1111"));
// request.addMetric(DeploymentPackageDownloadOptions.METRIC_DP_INSTALL_SYSTEM_UPDATE, false);
// request.addMetric(DeploymentPackageDownloadOptions.METRIC_DP_REBOOT, false);
//
// KuraResponsePayload response = new KuraResponsePayload(KuraResponsePayload.RESPONSE_CODE_OK);
//
// doExec(ct, request, response);
//
// s_logger.info("*******************************************");
// s_logger.info(response.getMetric(KuraResponsePayload.METRIC_RESPONSE_CODE).toString());
// if(response.getBody() != null){
// s_logger.info(new String(response.getBody()));
// }
// if(response.getMetric(METRIC_DOWNLOAD_STATUS) != null){
// s_logger.info(response.getMetric(METRIC_DOWNLOAD_STATUS).toString());
// }
// s_logger.info("*******************************************");
//
// } catch (InterruptedException e) {
// // TODO Auto-generated catch block
// e.printStackTrace();
// } catch (KuraException e) {
// // TODO Auto-generated catch block
// e.printStackTrace();
// }
// }
//
// });
// t2.start();
}
@Override
protected void deactivate(ComponentContext componentContext) {
s_logger.info("Bundle " + APP_ID + " is deactivating!");
if(downloaderFuture != null){
downloaderFuture.cancel(true);
}
if(installerFuture != null){
installerFuture.cancel(true);
}
m_bundleContext = null;
}
// ----------------------------------------------------------------
//
// Public methods
//
// ----------------------------------------------------------------
public void publishMessage(DeploymentPackageOptions options, KuraPayload messagePayload, String messageType){
try {
String messageTopic = new StringBuilder("NOTIFY/").append(options.getClientId())
.append("/")
.append(messageType)
.toString();
getCloudApplicationClient().controlPublish(options.getRequestClientId(), messageTopic, messagePayload, 2, DFLT_RETAIN, DFLT_PRIORITY);
} catch (KuraException e) {
s_logger.error("Error publishing response for command {} {}", messageType, e);
}
}
// ----------------------------------------------------------------
//
// Protected methods
//
// ----------------------------------------------------------------
@Override
protected void doGet(CloudletTopic reqTopic, KuraRequestPayload reqPayload, KuraResponsePayload respPayload) throws KuraException {
//doGetResource(reqTopic, reqPayload);
String[] resources = reqTopic.getResources();
if (resources == null || resources.length == 0) {
s_logger.error("Bad request topic: {}", reqTopic.toString());
s_logger.error("Expected one resource but found {}", resources != null ? resources.length : "none");
respPayload.setResponseCode(KuraResponsePayload.RESPONSE_CODE_BAD_REQUEST);
return;
}
if (resources[0].equals(RESOURCE_DOWNLOAD)) {
doGetDownload(reqPayload, respPayload);
} else if (resources[0].equals(RESOURCE_INSTALL)) {
doGetInstall(reqPayload, respPayload);
} else if (resources[0].equals(RESOURCE_PACKAGES)) {
doGetPackages(reqPayload, respPayload);
} else if (resources[0].equals(RESOURCE_BUNDLES)) {
doGetBundles(reqPayload, respPayload);
} else {
s_logger.error("Bad request topic: {}", reqTopic.toString());
s_logger.error("Cannot find resource with name: {}", resources[0]);
respPayload.setResponseCode(KuraResponsePayload.RESPONSE_CODE_NOTFOUND);
return;
}
}
@Override
protected void doExec(CloudletTopic reqTopic, KuraRequestPayload reqPayload, KuraResponsePayload respPayload) throws KuraException {
String[] resources = reqTopic.getResources();
if (resources == null || resources.length == 0) {
s_logger.error("Bad request topic: {}", reqTopic.toString());
s_logger.error("Expected one resource but found {}", resources != null ? resources.length : "none");
respPayload.setResponseCode(KuraResponsePayload.RESPONSE_CODE_BAD_REQUEST);
return;
}
if (resources[0].equals(RESOURCE_DOWNLOAD)) {
doExecDownload(reqPayload, respPayload);
} else if (resources[0].equals(RESOURCE_INSTALL)) {
doExecInstall(reqPayload, respPayload);
} else if (resources[0].equals(RESOURCE_UNINSTALL)) {
doExecUninstall(reqPayload, respPayload);
} else if (resources[0].equals(RESOURCE_START)) {
String bundleId = resources[1];
doExecStartStopBundle(reqPayload, respPayload, true, bundleId);
} else if (resources[0].equals(RESOURCE_STOP)) {
String bundleId = resources[1];
doExecStartStopBundle(reqPayload, respPayload, false, bundleId);
}else {
s_logger.error("Bad request topic: {}", reqTopic.toString());
s_logger.error("Cannot find resource with name: {}", resources[0]);
respPayload.setResponseCode(KuraResponsePayload.RESPONSE_CODE_NOTFOUND);
return;
}
}
@Override
protected void doDel(CloudletTopic reqTopic, KuraRequestPayload reqPayload, KuraResponsePayload respPayload) throws KuraException {
String[] resources = reqTopic.getResources();
if (resources == null || resources.length == 0) {
s_logger.error("Bad request topic: {}", reqTopic.toString());
s_logger.error("Expected one resource but found {}", resources != null ? resources.length : "none");
respPayload.setResponseCode(KuraResponsePayload.RESPONSE_CODE_BAD_REQUEST);
return;
}
if (resources[0].equals(RESOURCE_DOWNLOAD)) {
doDelDownload(reqPayload, respPayload);
} else {
s_logger.error("Bad request topic: {}", reqTopic.toString());
s_logger.error("Cannot find resource with name: {}", resources[0]);
respPayload.setResponseCode(KuraResponsePayload.RESPONSE_CODE_NOTFOUND);
return;
}
}
// ----------------------------------------------------------------
//
// Private methods
//
// ----------------------------------------------------------------
private void doDelDownload(KuraRequestPayload request, KuraResponsePayload response) {
try{
m_downloadImplementation.getDownloadHelper().cancelDownload();
m_downloadImplementation.deleteDownloadedFile();
}catch(Exception ex){
s_logger.info("Error cancelling download!", ex);
}
}
private void doExecDownload(KuraRequestPayload request, KuraResponsePayload response) {
final DeploymentPackageDownloadOptions options;
try {
options = new DeploymentPackageDownloadOptions(request);
options.setClientId(m_clientId);
m_downloadImplementation= new DownloadImpl(options, this);
} catch (Exception ex) {
s_logger.info("Malformed download request!");
response.setResponseCode(KuraResponsePayload.RESPONSE_CODE_ERROR);
response.setTimestamp(new Date());
try {
response.setBody("Malformed donwload request".getBytes("UTF-8"));
} catch (UnsupportedEncodingException e) {
// Ignore
}
response.setException(ex);
return;
}
m_downloadOptions = options;
if (s_pendingPackageUrl != null) {
s_logger.info("Another request seems for the same URL is pending: {}.", s_pendingPackageUrl);
response.setResponseCode(KuraResponsePayload.RESPONSE_CODE_ERROR);
response.setTimestamp(new Date());
response.addMetric(METRIC_DOWNLOAD_STATUS, DOWNLOAD_STATUS.IN_PROGRESS.getStatusString());
try {
response.setBody("Another resource is already in download".getBytes("UTF-8"));
} catch (UnsupportedEncodingException e) {
}
return;
}
boolean alreadyDownloaded = false;
try {
alreadyDownloaded = m_downloadImplementation.isAlreadyDownloaded();
} catch (KuraException ex) {
response.setResponseCode(KuraResponsePayload.RESPONSE_CODE_ERROR);
response.setException(ex);
response.setTimestamp(new Date());
try {
response.setBody("Error checking download status".getBytes("UTF-8"));
} catch (UnsupportedEncodingException e) {
}
return;
}
s_logger.info("About to download and install package at URL {}", options.getDeployUri());
try {
s_pendingPackageUrl = options.getDeployUri();
m_downloadImplementation.setSslManager(m_sslManagerService);
m_downloadImplementation.setAlreadyDownloadedFlag(alreadyDownloaded);
m_downloadImplementation.setVerificationDirectory(m_installVerificationDir);
s_logger.info("Downloading package from URL: " + options.getDeployUri());
downloaderFuture = executor.submit(new Runnable(){
@Override
public void run() {
try {
m_downloadImplementation.downloadDeploymentPackageInternal();
} catch (KuraException e) {
try {
File dpFile = DownloadFileUtilities.getDpDownloadFile(options);
if (dpFile != null){
dpFile.delete();
}
} catch (IOException e1) {
}
} finally{
s_pendingPackageUrl = null;
}
}
});
} catch (Exception e) {
s_logger.error("Failed to download and install package at URL {}: {}", options.getDeployUri(), e);
s_pendingPackageUrl = null;
response.setResponseCode(KuraResponsePayload.RESPONSE_CODE_ERROR);
response.setTimestamp(new Date());
try {
response.setBody(e.getMessage().getBytes("UTF-8"));
} catch (UnsupportedEncodingException uee) {
}
}
return;
}
private void doExecInstall(KuraRequestPayload request, KuraResponsePayload response){
final DeploymentPackageInstallOptions options;
try {
options = new DeploymentPackageInstallOptions(request);
options.setClientId(m_clientId);
} catch (Exception ex) {
s_logger.info("Malformed install request!");
response.setResponseCode(KuraResponsePayload.RESPONSE_CODE_ERROR);
response.setTimestamp(new Date());
try {
response.setBody("Malformed install request".getBytes("UTF-8"));
} catch (UnsupportedEncodingException e) {
// Ignore
}
response.setException(ex);
return;
}
m_installOptions = options;
boolean alreadyDownloaded = false;
try {
alreadyDownloaded = m_downloadImplementation.isAlreadyDownloaded();
} catch (KuraException ex) {
response.setResponseCode(KuraResponsePayload.RESPONSE_CODE_ERROR);
response.setException(ex);
response.setTimestamp(new Date());
try {
response.setBody("Error checking download status".getBytes("UTF-8"));
} catch (UnsupportedEncodingException e) {
}
return;
}
if(alreadyDownloaded && !m_isInstalling){
//Check if file exists
try {
m_isInstalling = true;
final File dpFile = DownloadFileUtilities.getDpDownloadFile(options);
m_installImplementation.setOptions(options);
//if yes, install
installerFuture = executor.submit(new Runnable(){
@Override
public void run() {
try {
installDownloadedFile(dpFile, m_installOptions);
} catch (KuraException e) {
s_logger.error("Impossible to send an exception message to the cloud platform");
if (dpFile != null){
dpFile.delete();
}
} finally {
m_installOptions = null;
m_isInstalling = false;
}
}
});
} catch (IOException e) {
response.setResponseCode(KuraResponsePayload.RESPONSE_CODE_ERROR);
response.setException(e);
response.setTimestamp(new Date());
try {
response.setBody("Exception during install".getBytes("UTF-8"));
} catch (UnsupportedEncodingException e1) {
}
}
} else {
response.setResponseCode(KuraResponsePayload.RESPONSE_CODE_ERROR);
response.setException(new KuraException(KuraErrorCode.INTERNAL_ERROR));
response.setTimestamp(new Date());
try {
response.setBody("Already installing/uninstalling".getBytes("UTF-8"));
} catch (UnsupportedEncodingException e) {
}
return;
}
}
private void doExecUninstall(KuraRequestPayload request, KuraResponsePayload response) {
final DeploymentPackageUninstallOptions options;
try {
options = new DeploymentPackageUninstallOptions(request);
options.setClientId(m_clientId);
} catch (Exception ex) {
s_logger.info("Malformed uninstall request!");
response.setResponseCode(KuraResponsePayload.RESPONSE_CODE_ERROR);
response.setTimestamp(new Date());
try {
response.setBody("Malformed uninstall request".getBytes("UTF-8"));
} catch (UnsupportedEncodingException e) {
// Ignore
}
response.setException(ex);
return;
}
final String packageName = options.getDpName();
//
// We only allow one request at a time
if (!m_isInstalling && m_pendingUninstPackageName != null) {
s_logger.info("Antother request seems still pending: {}. Checking if stale...", m_pendingUninstPackageName);
response = new KuraResponsePayload(KuraResponsePayload.RESPONSE_CODE_ERROR);
response.setTimestamp(new Date());
try {
response.setBody("Only one request at a time is allowed".getBytes("UTF-8"));
} catch (UnsupportedEncodingException e) {
// Ignore
}
} else {
s_logger.info("About to uninstall package {}", packageName);
try {
m_isInstalling = true;
m_pendingUninstPackageName = packageName;
m_uninstallImplementation= new UninstallImpl(this, m_deploymentAdmin);
s_logger.info("Uninstalling package...");
installerFuture = executor.submit(new Runnable(){
@Override
public void run() {
try {
m_uninstallImplementation.uninstaller(options, packageName);
} catch (Exception e) {
try {
m_uninstallImplementation.uninstallFailedAsync(options, packageName, e);
} catch (KuraException e1) {
}
} finally {
m_installOptions = null;
m_isInstalling = false;
}
}
});
} catch (Exception e) {
s_logger.error("Failed to uninstall package {}: {}", packageName, e);
response = new KuraResponsePayload(KuraResponsePayload.RESPONSE_CODE_ERROR);
response.setTimestamp(new Date());
try {
response.setBody(e.getMessage().getBytes("UTF-8"));
} catch (UnsupportedEncodingException uee) {
// Ignore
}
} finally {
m_isInstalling = false;
m_pendingUninstPackageName = null;
}
}
}
private void doExecStartStopBundle(KuraRequestPayload request, KuraResponsePayload response, boolean start, String bundleId) {
if (bundleId == null) {
s_logger.info("EXEC start/stop bundle: null bundle ID");
response.setResponseCode(KuraResponsePayload.RESPONSE_CODE_BAD_REQUEST);
response.setTimestamp(new Date());
} else {
Long id = null;
try {
id = Long.valueOf(bundleId);
} catch (NumberFormatException e){
s_logger.error("EXEC start/stop bundle: bad bundle ID format: {}", e);
response.setResponseCode(KuraResponsePayload.RESPONSE_CODE_BAD_REQUEST);
response.setTimestamp(new Date());
response.setExceptionMessage(e.getMessage());
response.setExceptionStack(ThrowableUtil.stackTraceAsString(e));
}
if (id != null) {
s_logger.info("Executing command {}", start ? RESOURCE_START : RESOURCE_STOP);
Bundle bundle = m_bundleContext.getBundle(id);
if (bundle == null) {
s_logger.error("Bundle ID {} not found", id);
response.setResponseCode(KuraResponsePayload.RESPONSE_CODE_NOTFOUND);
response.setTimestamp(new Date());
} else {
try {
if (start) {
bundle.start();
} else {
bundle.stop();
}
s_logger.info("{} bundle ID {} ({})", new Object[] {start ? "Started" : "Stopped", id, bundle.getSymbolicName()});
response.setResponseCode(KuraResponsePayload.RESPONSE_CODE_OK);
response.setTimestamp(new Date());
} catch (BundleException e) {
s_logger.error("Failed to {} bundle {}: {}", new Object[] {start ? "start" : "stop", id, e});
response.setResponseCode(KuraResponsePayload.RESPONSE_CODE_ERROR);
response.setTimestamp(new Date());
}
}
}
}
}
private void doGetInstall(KuraRequestPayload reqPayload, KuraResponsePayload respPayload) {
if(m_isInstalling){
m_installImplementation.installInProgressSyncMessage(respPayload);
} else {
m_installImplementation.installIdleSyncMessage(respPayload);
}
}
private void doGetDownload(KuraRequestPayload reqPayload, KuraResponsePayload respPayload) {
if (s_pendingPackageUrl != null){ //A download is pending
DownloadCountingOutputStream downloadHelper= m_downloadImplementation.getDownloadHelper();
DownloadImpl.downloadInProgressSyncMessage(respPayload, downloadHelper, m_downloadOptions);
} else { //No pending downloads
DownloadImpl.downloadAlreadyDoneSyncMessage(respPayload); //is it right? Do we remove the last object
}
}
private void doGetPackages(KuraRequestPayload request, KuraResponsePayload response) {
DeploymentPackage[] dps = m_deploymentAdmin.listDeploymentPackages();
XmlDeploymentPackages xdps = new XmlDeploymentPackages();
XmlDeploymentPackage[] axdp = new XmlDeploymentPackage[dps.length];
for (int i = 0; i < dps.length; i++) {
DeploymentPackage dp = dps[i];
XmlDeploymentPackage xdp = new XmlDeploymentPackage();
xdp.setName(dp.getName());
xdp.setVersion(dp.getVersion().toString());
BundleInfo[] bis = dp.getBundleInfos();
XmlBundleInfo[] axbi = new XmlBundleInfo[bis.length];
for (int j = 0; j < bis.length; j++) {
BundleInfo bi = bis[j];
XmlBundleInfo xbi = new XmlBundleInfo();
xbi.setName(bi.getSymbolicName());
xbi.setVersion(bi.getVersion().toString());
axbi[j] = xbi;
}
xdp.setBundleInfos(axbi);
axdp[i] = xdp;
}
xdps.setDeploymentPackages(axdp);
try {
String s = XmlUtil.marshal(xdps);
//s_logger.info("Getting resource {}: {}", RESOURCE_PACKAGES, s);
response.setTimestamp(new Date());
try {
response.setBody(s.getBytes("UTF-8"));
} catch (UnsupportedEncodingException e) {
// Ignore
}
} catch (Exception e) {
s_logger.error("Error getting resource {}: {}", RESOURCE_PACKAGES, e);
}
}
private void doGetBundles(KuraRequestPayload request, KuraResponsePayload response) {
Bundle[] bundles = m_bundleContext.getBundles();
XmlBundles xmlBundles = new XmlBundles();
XmlBundle[] axb = new XmlBundle[bundles.length];
for (int i = 0; i < bundles.length; i++) {
Bundle bundle = bundles[i];
XmlBundle xmlBundle = new XmlBundle();
xmlBundle.setName(bundle.getSymbolicName());
xmlBundle.setVersion(bundle.getVersion().toString());
xmlBundle.setId(bundle.getBundleId());
int state = bundle.getState();
switch(state) {
case Bundle.UNINSTALLED:
xmlBundle.setState("UNINSTALLED");
break;
case Bundle.INSTALLED:
xmlBundle.setState("INSTALLED");
break;
case Bundle.RESOLVED:
xmlBundle.setState("RESOLVED");
break;
case Bundle.STARTING:
xmlBundle.setState("STARTING");
break;
case Bundle.STOPPING:
xmlBundle.setState("STOPPING");
break;
case Bundle.ACTIVE:
xmlBundle.setState("ACTIVE");
break;
default:
xmlBundle.setState(String.valueOf(state));
}
axb[i] = xmlBundle;
}
xmlBundles.setBundles(axb);
try {
String s = XmlUtil.marshal(xmlBundles);
//s_logger.info("Getting resource {}: {}", RESOURCE_BUNDLES, s);
response.setTimestamp(new Date());
try {
response.setBody(s.getBytes("UTF-8"));
} catch (UnsupportedEncodingException e) {
// Ignore
}
} catch (Exception e) {
s_logger.error("Error getting resource {}: {}", RESOURCE_BUNDLES, e);
}
}
public void installDownloadedFile(File dpFile, DeploymentPackageInstallOptions options) throws KuraException {
try{
if(options.getSystemUpdate()){
m_installImplementation.installSh(options, dpFile);
} else {
m_installImplementation.installDp(options, dpFile);
}
} catch (Exception e) {
s_logger.info("Install exception");
m_installImplementation.installFailedAsync(options, dpFile.getName(), e);
}
}
}
| Comments cleanup in CloudDeploymentHandlerV2.
Signed-off-by: MMaiero <[email protected]>
| kura/org.eclipse.kura.core.deployment/src/main/java/org/eclipse/kura/core/deployment/CloudDeploymentHandlerV2.java | Comments cleanup in CloudDeploymentHandlerV2. | <ide><path>ura/org.eclipse.kura.core.deployment/src/main/java/org/eclipse/kura/core/deployment/CloudDeploymentHandlerV2.java
<ide> m_installImplementation.setDpaConfPath(m_dpaConfPath);
<ide> m_installImplementation.setDeploymentAdmin(m_deploymentAdmin);
<ide> m_installImplementation.sendInstallConfirmations();
<del>
<del>// Thread t = new Thread(new Runnable() {
<del>//
<del>// @Override
<del>// public void run() {
<del>// try {
<del>// Thread.sleep(5000);
<del>// s_logger.info("STARTING DOWNLOAD...");
<del>// CloudletTopic ct = CloudletTopic.parseAppTopic("EXEC/download");
<del>// KuraRequestPayload request = new KuraRequestPayload();
<del>// request.setRequestId("RequestID");
<del>// request.setRequesterClientId("RequesterClientId");
<del>// String url = "https://s3.amazonaws.com/kura-resources/dps/heater.dp";//"http://esfdownload.eurotech-inc.com/update_site/esf3/3.0.2/user_workspace_archive_3.0.2.zip";
<del>// DeploymentPackageDownloadOptions options = new DeploymentPackageDownloadOptions(url, "dpName", "dpVersion");
<del>// options.setUsername("[email protected]");
<del>// options.setPassword("lc2251981");
<del>// // options.setPassword("errata");
<del>// request.addMetric(DeploymentPackageDownloadOptions.METRIC_DP_DOWNLOAD_URI, options.getDeployUri());
<del>// request.addMetric(DeploymentPackageDownloadOptions.METRIC_DP_NAME, options.getDpName());
<del>// request.addMetric(DeploymentPackageDownloadOptions.METRIC_DP_VERSION, options.getDpVersion());
<del>// request.addMetric(DeploymentPackageDownloadOptions.METRIC_DP_DOWNLOAD_BLOCK_DELAY, 3000);
<del>// request.addMetric(DeploymentPackageDownloadOptions.METRIC_DP_DOWNLOAD_BLOCK_SIZE, 3);
<del>// request.addMetric(DeploymentPackageDownloadOptions.METRIC_JOB_ID, Long.parseLong("1111"));
<del>// request.addMetric(DeploymentPackageDownloadOptions.METRIC_DP_INSTALL_SYSTEM_UPDATE, false);
<del>// request.addMetric(DeploymentPackageDownloadOptions.METRIC_DP_REBOOT, false);
<del>//
<del>// KuraResponsePayload response = new KuraResponsePayload(KuraResponsePayload.RESPONSE_CODE_OK);
<del>//
<del>// doExec(ct, request, response);
<del>//
<del>// s_logger.info("*******************************************");
<del>// s_logger.info(response.getMetric(KuraResponsePayload.METRIC_RESPONSE_CODE).toString());
<del>// if(response.getBody() != null){
<del>// s_logger.info(new String(response.getBody()));
<del>// }
<del>// if(response.getMetric(METRIC_DOWNLOAD_STATUS) != null){
<del>// s_logger.info(response.getMetric(METRIC_DOWNLOAD_STATUS).toString());
<del>// }
<del>// s_logger.info("*******************************************");
<del>//
<del>// } catch (InterruptedException e) {
<del>// // TODO Auto-generated catch block
<del>// e.printStackTrace();
<del>// } catch (KuraException e) {
<del>// // TODO Auto-generated catch block
<del>// e.printStackTrace();
<del>// }
<del>// }
<del>//
<del>// });
<del>// t.start();
<del>//
<del>// Thread t2 = new Thread(new Runnable() {
<del>//
<del>// @Override
<del>// public void run() {
<del>// try {
<del>// Thread.sleep(6000);
<del>// s_logger.info("STARTING DOWNLOAD...");
<del>// CloudletTopic ct = CloudletTopic.parseAppTopic("EXEC/download");
<del>// KuraRequestPayload request = new KuraRequestPayload();
<del>// request.setRequestId("RequestID");
<del>// request.setRequesterClientId("RequesterClientId");
<del>// String url = "https://s3.amazonaws.com/kura-resources/dps/heater.dp";//"http://esfdownload.eurotech-inc.com/update_site/esf3/3.0.2/user_workspace_archive_3.0.2.zip";
<del>// DeploymentPackageDownloadOptions options = new DeploymentPackageDownloadOptions(url, "dpName", "dpVersion");
<del>// options.setUsername("[email protected]");
<del>// options.setPassword("lc2251981");
<del>// // options.setPassword("errata");
<del>// request.addMetric(DeploymentPackageDownloadOptions.METRIC_DP_DOWNLOAD_URI, options.getDeployUri());
<del>// request.addMetric(DeploymentPackageDownloadOptions.METRIC_DP_NAME, options.getDpName());
<del>// request.addMetric(DeploymentPackageDownloadOptions.METRIC_DP_VERSION, options.getDpVersion());
<del>// request.addMetric(DeploymentPackageDownloadOptions.METRIC_JOB_ID, Long.parseLong("1111"));
<del>// request.addMetric(DeploymentPackageDownloadOptions.METRIC_DP_INSTALL_SYSTEM_UPDATE, false);
<del>// request.addMetric(DeploymentPackageDownloadOptions.METRIC_DP_REBOOT, false);
<del>//
<del>// KuraResponsePayload response = new KuraResponsePayload(KuraResponsePayload.RESPONSE_CODE_OK);
<del>//
<del>// doExec(ct, request, response);
<del>//
<del>// s_logger.info("*******************************************");
<del>// s_logger.info(response.getMetric(KuraResponsePayload.METRIC_RESPONSE_CODE).toString());
<del>// if(response.getBody() != null){
<del>// s_logger.info(new String(response.getBody()));
<del>// }
<del>// if(response.getMetric(METRIC_DOWNLOAD_STATUS) != null){
<del>// s_logger.info(response.getMetric(METRIC_DOWNLOAD_STATUS).toString());
<del>// }
<del>// s_logger.info("*******************************************");
<del>//
<del>// } catch (InterruptedException e) {
<del>// // TODO Auto-generated catch block
<del>// e.printStackTrace();
<del>// } catch (KuraException e) {
<del>// // TODO Auto-generated catch block
<del>// e.printStackTrace();
<del>// }
<del>// }
<del>//
<del>// });
<del>// t2.start();
<ide> }
<ide>
<ide> @Override
<ide>
<ide> @Override
<ide> protected void doGet(CloudletTopic reqTopic, KuraRequestPayload reqPayload, KuraResponsePayload respPayload) throws KuraException {
<del>
<del> //doGetResource(reqTopic, reqPayload);
<ide>
<ide> String[] resources = reqTopic.getResources();
<ide> |
|
Java | agpl-3.0 | abd549db2fc24783e0684f9007706411a74c25d5 | 0 | cstroe/svndumpapi,cstroe/svndumpgui,cstroe/svndumpgui,cstroe/svndumpapi | package com.github.cstroe.svndumpgui.internal.utility.range;
import java.util.ArrayList;
import java.util.List;
import java.util.stream.Collectors;
public class MultiSpan implements Cloneable {
private List<Span> spans = new ArrayList<>();
public MultiSpan() {}
private MultiSpan(List<Span> spans) {
this.spans = spans;
}
public void add(Span span) {
for(Span currentSpan : spans) {
if(currentSpan.merge(span)) {
reduce();
return;
}
}
spans.add(span);
}
private void reduce() {
MultiSpan multiSpan = new MultiSpan();
for(Span currentSpan : spans) {
multiSpan.add(currentSpan);
}
if(spans.size() > multiSpan.spans.size()) {
spans = multiSpan.spans;
reduce();
}
}
public boolean contains(int value) {
return spans.parallelStream().anyMatch(s -> s.contains(value));
}
public void cutoff(int value) {
spans = spans.parallelStream().filter(s -> s.low() <= value).collect(Collectors.toList());
spans.parallelStream().forEach(s -> s.cutoff(value));
}
List<Span> getSpans() {
return spans;
}
@SuppressWarnings({"CloneDoesntCallSuperClone", "CloneDoesntDeclareCloneNotSupportedException"})
@Override
public MultiSpan clone() {
List<Span> newSpans = new ArrayList<>(spans.size());
for(Span currentSpan : spans) {
newSpans.add(new SpanImpl(currentSpan.low(), currentSpan.high()));
}
return new MultiSpan(newSpans);
}
@Override
public String toString() {
return String.join(",", spans.parallelStream().map(Object::toString).collect(Collectors.toList()));
}
}
| src/main/java/com/github/cstroe/svndumpgui/internal/utility/range/MultiSpan.java | package com.github.cstroe.svndumpgui.internal.utility.range;
import java.util.ArrayList;
import java.util.List;
import java.util.stream.Collectors;
public class MultiSpan implements Cloneable {
private List<Span> spans = new ArrayList<>();
public MultiSpan() {}
private MultiSpan(List<Span> spans) {
this.spans = spans;
}
public void add(Span span) {
for(Span currentSpan : spans) {
if(currentSpan.merge(span)) {
reduce();
return;
}
}
spans.add(span);
}
private void reduce() {
MultiSpan multiSpan = new MultiSpan();
for(Span currentSpan : spans) {
multiSpan.add(currentSpan);
}
if(spans.size() > multiSpan.spans.size()) {
spans = multiSpan.spans;
reduce();
}
}
public boolean contains(int value) {
return spans.parallelStream().anyMatch(s -> s.contains(value));
}
public void cutoff(int value) {
spans.parallelStream().forEach(s -> s.cutoff(value));
}
List<Span> getSpans() {
return spans;
}
@SuppressWarnings({"CloneDoesntCallSuperClone", "CloneDoesntDeclareCloneNotSupportedException"})
@Override
public MultiSpan clone() {
List<Span> newSpans = new ArrayList<>(spans.size());
for(Span currentSpan : spans) {
newSpans.add(new SpanImpl(currentSpan.low(), currentSpan.high()));
}
return new MultiSpan(newSpans);
}
@Override
public String toString() {
return String.join(",", spans.parallelStream().map(Object::toString).collect(Collectors.toList()));
}
}
| Filter out bad spans.
| src/main/java/com/github/cstroe/svndumpgui/internal/utility/range/MultiSpan.java | Filter out bad spans. | <ide><path>rc/main/java/com/github/cstroe/svndumpgui/internal/utility/range/MultiSpan.java
<ide> }
<ide>
<ide> public void cutoff(int value) {
<add> spans = spans.parallelStream().filter(s -> s.low() <= value).collect(Collectors.toList());
<ide> spans.parallelStream().forEach(s -> s.cutoff(value));
<ide> }
<ide> |
|
Java | apache-2.0 | 7b9f6d0eadb0ce5446a0f2906f425ef6f7aa7fcd | 0 | gbif/dwc-api | package org.gbif.dwc.terms;
import java.net.URI;
/**
* Internal GBIF terms used for processing, fragmenting, crawling, ...
* These are not exposed in downloads or the public API.
*/
public enum GbifInternalTerm implements Term, AlternativeNames {
identifierCount,
crawlId,
fragment,
fragmentHash,
fragmentCreated,
xmlSchema,
publishingOrgKey,
unitQualifier,
networkKey,
installationKey;
private static final String PREFIX = "gbint";
private static final String NS = "http://rs.gbif.org/terms/internal/";
private static final URI NS_URI = URI.create(NS);
private static final String[] EMPTY = new String[0];
@Override
public String simpleName() {
return name();
}
@Override
public String toString() {
return prefixedName();
}
@Override
public String[] alternativeNames() {
return EMPTY;
}
@Override
public boolean isClass() {
return false;
}
@Override
public String prefix() {
return PREFIX;
}
@Override
public URI namespace() {
return NS_URI;
}
}
| src/main/java/org/gbif/dwc/terms/GbifInternalTerm.java | package org.gbif.dwc.terms;
import java.net.URI;
/**
* Internal GBIF terms used for processing, fragmenting, crawling, ...
* These are not exposed in downloads or the public API.
*/
public enum GbifInternalTerm implements Term, AlternativeNames {
identifierCount,
crawlId,
fragment,
fragmentHash,
fragmentCreated,
xmlSchema,
publishingOrgKey,
unitQualifier,
gbifNetworkKey,
installationKey;
private static final String PREFIX = "gbint";
private static final String NS = "http://rs.gbif.org/terms/internal/";
private static final URI NS_URI = URI.create(NS);
private static final String[] EMPTY = new String[0];
@Override
public String simpleName() {
return name();
}
@Override
public String toString() {
return prefixedName();
}
@Override
public String[] alternativeNames() {
return EMPTY;
}
@Override
public boolean isClass() {
return false;
}
@Override
public String prefix() {
return PREFIX;
}
@Override
public URI namespace() {
return NS_URI;
}
}
| Rename networkKey.
| src/main/java/org/gbif/dwc/terms/GbifInternalTerm.java | Rename networkKey. | <ide><path>rc/main/java/org/gbif/dwc/terms/GbifInternalTerm.java
<ide> xmlSchema,
<ide> publishingOrgKey,
<ide> unitQualifier,
<del> gbifNetworkKey,
<add> networkKey,
<ide> installationKey;
<ide>
<ide> private static final String PREFIX = "gbint"; |
|
Java | apache-2.0 | 7a0e046fd31ce61e8d734b133cdeca4642cd7a7f | 0 | Gigaspaces/xap-openspaces,Gigaspaces/xap-openspaces,Gigaspaces/xap-openspaces | /*
* Copyright 2006-2007 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.openspaces.core.space;
import java.lang.reflect.Field;
import java.net.MalformedURLException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Map;
import java.util.Properties;
import java.util.concurrent.atomic.AtomicReference;
import com.gigaspaces.client.SpaceProxyFactory;
import net.jini.core.entry.UnusableEntryException;
import org.openspaces.core.GigaSpace;
import org.openspaces.core.GigaSpaceConfigurer;
import org.openspaces.core.cluster.ClusterInfo;
import org.openspaces.core.cluster.ClusterInfoAware;
import org.openspaces.core.config.BlobStoreDataPolicyFactoryBean;
import org.openspaces.core.config.CustomCachePolicyFactoryBean;
import org.openspaces.core.executor.AutowireTask;
import org.openspaces.core.executor.AutowireTaskMarker;
import org.openspaces.core.executor.TaskGigaSpace;
import org.openspaces.core.executor.TaskGigaSpaceAware;
import org.openspaces.core.executor.internal.InternalSpaceTaskWrapper;
import org.openspaces.core.executor.support.DelegatingTask;
import org.openspaces.core.executor.support.ProcessObjectsProvider;
import org.openspaces.core.gateway.GatewayTargetsFactoryBean;
import org.openspaces.core.properties.BeanLevelMergedPropertiesAware;
import org.openspaces.core.space.filter.FilterProviderFactory;
import org.openspaces.core.space.filter.replication.ReplicationFilterProviderFactory;
import org.openspaces.core.transaction.DistributedTransactionProcessingConfigurationFactoryBean;
import org.openspaces.core.util.SpaceUtils;
import org.springframework.beans.factory.config.AutowireCapableBeanFactory;
import org.springframework.context.ApplicationContext;
import org.springframework.context.ApplicationContextAware;
import org.springframework.dao.DataAccessException;
import org.springframework.util.Assert;
import org.springframework.util.ReflectionUtils;
import org.springframework.util.StringUtils;
import com.gigaspaces.datasource.ManagedDataSource;
import com.gigaspaces.datasource.SpaceDataSource;
import com.gigaspaces.internal.reflection.IField;
import com.gigaspaces.internal.reflection.ReflectionUtil;
import com.gigaspaces.internal.utils.collections.CopyOnUpdateMap;
import com.gigaspaces.metadata.SpaceTypeDescriptor;
import com.gigaspaces.security.directory.CredentialsProviderHelper;
import com.gigaspaces.sync.SpaceSynchronizationEndpoint;
import com.j_spaces.core.Constants;
import com.j_spaces.core.IJSpace;
import com.j_spaces.core.SpaceContext;
import com.j_spaces.core.client.FinderException;
import com.j_spaces.core.client.SpaceFinder;
import com.j_spaces.core.client.SpaceURL;
import com.j_spaces.core.client.SpaceURLParser;
import com.j_spaces.core.filters.FilterOperationCodes;
import com.j_spaces.core.filters.FilterProvider;
import com.j_spaces.core.filters.ISpaceFilter;
import com.j_spaces.core.filters.entry.ISpaceFilterEntry;
/**
* A space factory bean that creates a space ({@link IJSpace}) based on a url.
*
* <p>The factory allows to specify url properties using
* {@link #setUrlProperties(java.util.Properties) urlProperties} and space parameters using
* {@link #setParameters(java.util.Map) parameters} or using
* {@link #setProperties(Properties) properties}. It also accepts a {@link ClusterInfo} using
* {@link #setClusterInfo(ClusterInfo)} and translates it into the relevant space url properties
* automatically.
*
* <p>Most url properties are explicitly exposed using different setters. Though they can also be set
* using the {@link #setUrlProperties(java.util.Properties) urlProperties} the explicit setters
* allow for more readable and simpler configuration. Some examples of explicit url properties are:
* {@link #setSchema(String)}, {@link #setFifo(boolean)}.
*
* <p>The factory uses the {@link BeanLevelMergedPropertiesAware} in order to be injected with
* properties that were not parameterized in advance (using ${...} notation). This will directly
* inject additional properties in the Space creation/finding process.
*
* @author kimchy
*/
public class UrlSpaceFactoryBean extends AbstractSpaceFactoryBean implements BeanLevelMergedPropertiesAware, ClusterInfoAware {
private final SpaceProxyFactory factory = new SpaceProxyFactory();
private String url;
private Boolean secured;
private FilterProviderFactory[] filterProviders;
private ReplicationFilterProviderFactory replicationFilterProvider;
private CachePolicy cachePolicy;
private GatewayTargetsFactoryBean gatewayTargets;
private DistributedTransactionProcessingConfigurationFactoryBean distributedTransactionProcessingConfiguration;
private final boolean enableExecutorInjection = true;
private Properties beanLevelProperties;
private ClusterInfo clusterInfo;
private CustomCachePolicyFactoryBean customCachePolicy;
private BlobStoreDataPolicyFactoryBean blobStoreDataPolicy;
/**
* Creates a new url space factory bean. The url parameters is requires so the
* {@link #setUrl(String)} must be called before the bean is initialized.
*/
public UrlSpaceFactoryBean() {
}
/**
* Creates a new url space factory bean based on the url provided.
*
* @param url The url to create the {@link com.j_spaces.core.IJSpace} with.
*/
public UrlSpaceFactoryBean(String url) {
this(url, null);
}
/**
* Creates a new url space factory bean based on the url and map parameters provided.
*
* @param url The url to create the {@link IJSpace} with.
* @param params The parameters to create the {@link IJSpace} with.
*/
public UrlSpaceFactoryBean(String url, Map<String, Object> params) {
this.url = url;
setParameters(params);
}
/**
* Sets the space as secured. Note, when passing userName and password it will
* automatically be secured.
*/
public void setSecured(boolean secured) {
this.secured = secured;
}
/**
* Sets the url the {@link IJSpace} will be created with. Note this url does not take affect
* after the bean has been initialized.
*
* @param url The url to create the {@link IJSpace} with.
*/
public void setUrl(String url) {
this.url = url;
}
/**
* Sets the parameters the {@link IJSpace} will be created with. Note this parameters does not
* take affect after the bean has been initialized.
*
* <p>
* Note, this should not be confused with {@link #setUrlProperties(java.util.Properties)}. The
* parameters here are the ones referred to as custom properties and allows for example to
* control the xpath injection to space schema.
*
* @param parameters The parameters to create the {@link com.j_spaces.core.IJSpace} with.
*/
public void setParameters(Map<String, Object> parameters) {
factory.setParameters(parameters);
}
/**
* Same as {@link #setParameters(java.util.Map) parameters} just with properties for simpler
* configuration.
*/
public void setProperties(Properties properties) {
factory.setProperties(properties);
}
/**
* Sets the url properties. Note, most if not all url level properties can be set using explicit
* setters.
*/
public void setUrlProperties(Properties urlProperties) {
factory.setUrlProperties(urlProperties);
}
/**
* The space instance is created using a space schema file which can be used as a template
* configuration file for creating a space. The user specifies one of the pre-configured schema
* names (to create a space instance from its template) or a custom one using this property.
*
* <p>If a schema name is not defined, a default schema name called <code>default</code> will be
* used.
*/
public void setSchema(String schema) {
factory.setSchema(schema);
}
/**
* Indicates that all take/write operations be conducted in FIFO mode. Default is
* the Space default (<code>false</code>).
*/
public void setFifo(boolean fifo) {
factory.setFifo(fifo);
}
/**
* The Jini Lookup Service group to find container or space using multicast (jini protocol).
* Groups are comma separated list.
*/
public void setLookupGroups(String lookupGroups) {
factory.setLookupGroups(lookupGroups);
}
/**
* The Jini Lookup locators for the Space. In the form of: <code>host1:port1,host2:port2</code>.
*/
public void setLookupLocators(String lookupLocators) {
factory.setLookupLocators(lookupLocators);
}
/**
* The max timeout in <b>milliseconds</b> to find a Container or Space using multicast (jini
* protocol). Defaults to <code>6000</code> (i.e. 6 seconds).
*/
public void setLookupTimeout(Integer lookupTimeout) {
factory.setLookupTimeout(lookupTimeout);
}
/**
* When <code>false</code>, optimistic lock is disabled. Default to the Space default value.
*/
public void setVersioned(boolean versioned) {
factory.setVersioned(versioned);
}
/**
* If <code>true</code> - Lease object would not return from the write/writeMultiple
* operations. Defaults to the Space default value (<code>false</code>).
*/
public void setNoWriteLease(boolean noWriteLease) {
factory.setNoWriteLease(noWriteLease);
}
/**
* When setting this URL property to <code>true</code> it will allow the space to connect to
* the Mirror service to push its data and operations for asynchronous persistency. Defaults to
* the Space default (which defaults to <code>false</code>).
*/
public void setMirror(boolean mirror) {
factory.setMirror(mirror);
}
/**
* Inject a list of filter provider factories providing the ability to
* inject actual Space filters.
*/
public void setFilterProviders(FilterProviderFactory[] filterProviders) {
this.filterProviders = filterProviders;
}
/**
* Injects a replication provider allowing to directly inject actual replication
* filters.
*/
public void setReplicationFilterProvider(ReplicationFilterProviderFactory replicationFilterProvider) {
this.replicationFilterProvider = replicationFilterProvider;
}
/**
* A data source
*/
public void setExternalDataSource(ManagedDataSource externalDataSource) {
factory.setExternalDataSource(externalDataSource);
}
/**
* Sets the {@link SpaceDataSource} which will be used as a data source for the space.
* @param spaceDataSource The {@link SpaceDataSource} instance.
*/
public void setSpaceDataSource(SpaceDataSource spaceDataSource) {
factory.setSpaceDataSource(spaceDataSource);
}
/**
* Inject a list of space types.
*/
public void setSpaceTypes(SpaceTypeDescriptor[] typeDescriptors) {
factory.setTypeDescriptors(typeDescriptors);
}
/**
* Sets the cache policy that the space will use. If not set, will default to the one configured
* in the space schema.
*
* @see org.openspaces.core.space.AllInCachePolicy
* @see org.openspaces.core.space.LruCachePolicy
* @see org.openspaces.core.space.CustomCachePolicy
* @see org.openspaces.core.space.BlobStoreDataCachePolicy
*/
public void setCachePolicy(CachePolicy cachePolicy) {
this.cachePolicy = cachePolicy;
}
/**
* Externally managed override properties using open spaces extended config support. Should not
* be set directly but allowed for different Spring context container to set it.
*/
public void setMergedBeanLevelProperties(Properties beanLevelProperties) {
this.beanLevelProperties = beanLevelProperties;
}
/**
* Injected thanks to this bean implementing {@link ClusterInfoAware}. If set will use the
* cluster information in order to configure the url based on it.
*/
public void setClusterInfo(ClusterInfo clusterInfo) {
this.clusterInfo = clusterInfo;
}
/**
* Creates the space by calling {@link #doGetSpaceUrls()} and then using the returned
* {@link SpaceURL} a space is found using {@link SpaceFinder#find(SpaceURL)}.
*/
@Override
protected IJSpace doCreateSpace() throws DataAccessException {
SpaceURL[] spaceURLs = doGetSpaceUrls();
try {
return (IJSpace) SpaceFinder.find(spaceURLs, spaceURLs[0].getCustomProperties());
} catch (FinderException e) {
if (SpaceUtils.isRemoteProtocol(spaceURLs[0])) {
throw new CannotFindSpaceException("Failed to find space with url " + Arrays.toString(spaceURLs) + "", e);
}
throw new CannotCreateSpaceException("Failed to create space with url " + Arrays.toString(spaceURLs) + "", e);
}
}
/**
* Parses the given space url using {@link SpaceURLParser} and returns the parsed
* {@link SpaceURL}.
*
* <p>
* Uses the {@link #setUrlProperties(java.util.Properties)} and
* {@link #setParameters(java.util.Map)} as parameters for the space. Also uses the
* {@link #setClusterInfo(org.openspaces.core.cluster.ClusterInfo)} by automatically translating
* the cluster information into relevant Space url properties.
*/
@SuppressWarnings("deprecation")
protected SpaceURL[] doGetSpaceUrls() throws DataAccessException {
Assert.notNull(url, "url property is required");
String[] urls = StringUtils.tokenizeToStringArray(url, ";");
SpaceURL[] spacesUrls = new SpaceURL[urls.length];
for (int urlIndex = 0; urlIndex < urls.length; urlIndex++) {
String url = urls[urlIndex];
Properties props = factory.createProperties(SpaceUtils.isRemoteProtocol(url));
if (!SpaceUtils.isRemoteProtocol(url) && enableExecutorInjection) {
if (filterProviders == null) {
filterProviders = new FilterProviderFactory[]{new ExecutorFilterProviderFactory()};
} else {
ArrayList<FilterProviderFactory> tmpProviders = new ArrayList<FilterProviderFactory>(filterProviders.length + 1);
tmpProviders.addAll(Arrays.asList(filterProviders));
tmpProviders.add(new ExecutorFilterProviderFactory());
filterProviders = tmpProviders.toArray(new FilterProviderFactory[tmpProviders.size()]);
}
}
if (filterProviders != null && filterProviders.length > 0) {
if (SpaceUtils.isRemoteProtocol(url)) {
throw new IllegalArgumentException("Filters can only be used with an embedded Space");
}
FilterProvider[] spaceFilterProvider = new FilterProvider[filterProviders.length];
for (int i = 0; i < filterProviders.length; i++) {
spaceFilterProvider[i] = filterProviders[i].getFilterProvider();
}
props.put(Constants.Filter.FILTER_PROVIDERS, spaceFilterProvider);
}
if (replicationFilterProvider != null) {
if (SpaceUtils.isRemoteProtocol(url)) {
throw new IllegalArgumentException("Replication filter provider can only be used with an embedded Space");
}
props.put(Constants.ReplicationFilter.REPLICATION_FILTER_PROVIDER, replicationFilterProvider.getFilterProvider());
}
if (customCachePolicy != null)
cachePolicy = customCachePolicy.asCachePolicy();
if (blobStoreDataPolicy != null)
cachePolicy = blobStoreDataPolicy.asCachePolicy();
if (cachePolicy != null) {
props.putAll(cachePolicy.toProps());
}
// copy over the external config overrides
if (beanLevelProperties != null) {
props.putAll(beanLevelProperties);
}
// if deploy info is provided, apply it to the space url (only if it is an embedde Space).
if (shouldApplyClusterInfo()) {
if (clusterInfo.getNumberOfInstances() != null && url.indexOf("&" + SpaceURL.CLUSTER_TOTAL_MEMBERS + "=") == -1 && url.indexOf("?" + SpaceURL.CLUSTER_TOTAL_MEMBERS + "=") == -1) {
String totalMembers = clusterInfo.getNumberOfInstances().toString();
if (clusterInfo.getNumberOfBackups() != null && clusterInfo.getNumberOfBackups() > -1) {
totalMembers += "," + clusterInfo.getNumberOfBackups();
}
props.setProperty(SpaceUtils.spaceUrlProperty(SpaceURL.CLUSTER_TOTAL_MEMBERS), totalMembers);
}
if (clusterInfo.getInstanceId() != null && url.indexOf("&" + SpaceURL.CLUSTER_MEMBER_ID + "=") == -1 && url.indexOf("?" + SpaceURL.CLUSTER_MEMBER_ID + "=") == -1) {
props.setProperty(SpaceUtils.spaceUrlProperty(SpaceURL.CLUSTER_MEMBER_ID), clusterInfo.getInstanceId().toString());
}
if (clusterInfo.getBackupId() != null && clusterInfo.getBackupId() != 0 && url.indexOf("&" + SpaceURL.CLUSTER_BACKUP_ID + "=") == -1 && url.indexOf("?" + SpaceURL.CLUSTER_BACKUP_ID + "=") == -1) {
props.setProperty(SpaceUtils.spaceUrlProperty(SpaceURL.CLUSTER_BACKUP_ID), clusterInfo.getBackupId().toString());
}
if (StringUtils.hasText(clusterInfo.getSchema()) && url.indexOf(SpaceURL.CLUSTER_SCHEMA + "=") == -1) {
props.setProperty(SpaceUtils.spaceUrlProperty(SpaceURL.CLUSTER_SCHEMA), clusterInfo.getSchema());
}
}
// no need for a shutdown hook in the space as well
props.setProperty(Constants.Container.CONTAINER_SHUTDOWN_HOOK_PROP, "false");
// handle security
if (beanLevelProperties != null) {
SecurityConfig securityConfig = SecurityConfig.fromMarshalledProperties(beanLevelProperties);
if (securityConfig != null)
setSecurityConfig(securityConfig);
}
if (getSecurityConfig() == null || !getSecurityConfig().isFilled()) {
String username = (String) props.remove(Constants.Security.USERNAME);
String password = (String) props.remove(Constants.Security.PASSWORD);
setSecurityConfig(new SecurityConfig(username, password));
}
if (getSecurityConfig() != null && getSecurityConfig().isFilled()) {
props.put(SpaceURL.SECURED, "true");
CredentialsProviderHelper.appendCredentials(props, getSecurityConfig().getCredentialsProvider());
} else if (secured != null && secured) {
props.put(SpaceURL.SECURED, "true");
}
if (gatewayTargets != null) {
if (SpaceUtils.isRemoteProtocol(url)) {
throw new IllegalArgumentException("Gateway targets can only be used with an embedded Space");
}
props.put(Constants.Replication.REPLICATION_GATEWAYS, gatewayTargets.asGatewaysPolicy());
}
if (distributedTransactionProcessingConfiguration != null) {
if (SpaceUtils.isRemoteProtocol(url)) {
throw new IllegalArgumentException("Distributed transaction processing configuration can only be used with an embedded Space");
}
if (factory.schema == null || !factory.schema.equalsIgnoreCase(Constants.Schemas.MIRROR_SCHEMA)) {
throw new IllegalStateException("Distributed transaction processing configuration can only be set for a Mirror component");
}
if (distributedTransactionProcessingConfiguration.getDistributedTransactionWaitTimeout() != null)
props.put(Constants.Mirror.FULL_MIRROR_DISTRIBUTED_TRANSACTION_TIMEOUT,
distributedTransactionProcessingConfiguration.getDistributedTransactionWaitTimeout().toString());
if (distributedTransactionProcessingConfiguration.getDistributedTransactionWaitForOperations() != null)
props.put(Constants.Mirror.FULL_MIRROR_DISTRIBUTED_TRANSACTION_WAIT_FOR_OPERATIONS,
distributedTransactionProcessingConfiguration.getDistributedTransactionWaitForOperations().toString());
}
if (logger.isDebugEnabled()) {
logger.debug("Finding Space with URL [" + url + "] and properties [" + props + "]");
}
try {
spacesUrls[urlIndex] = SpaceURLParser.parseURL(url, props);
} catch (MalformedURLException e) {
throw new CannotCreateSpaceException("Failed to parse url [" + url + "]", e);
}
}
return spacesUrls;
}
/**
* Should cluster info be applies to the space url.
*/
private boolean shouldApplyClusterInfo() {
if (SpaceUtils.isRemoteProtocol(url)) {
return false;
}
if (clusterInfo == null) {
return false;
}
// only apply if we have a specific cluster schema
if (url.indexOf("cluster_schema") != -1 || StringUtils.hasText(clusterInfo.getSchema())) {
return true;
}
return false;
}
/**
* Sets the gateway replication targets to be used with the constructed space.
* @param gatewayTargets The gateway targets.
*/
public void setGatewayTargets(GatewayTargetsFactoryBean gatewayTargets) {
this.gatewayTargets = gatewayTargets;
}
/**
* Sets the distributed transaction processing configuration for the Mirror component.
* @param distributedTransactionProcessingConfiguration The distributed transaction processing configuration to set.
*/
public void setDistributedTransactionProcessingConfiguration(
DistributedTransactionProcessingConfigurationFactoryBean distributedTransactionProcessingConfiguration) {
this.distributedTransactionProcessingConfiguration = distributedTransactionProcessingConfiguration;
}
public void setCustomCachePolicy(CustomCachePolicyFactoryBean customCachePolicy) {
this.customCachePolicy = customCachePolicy;
}
public void setBlobStoreDataPolicy(BlobStoreDataPolicyFactoryBean blobStoreDataPolicy) {
this.blobStoreDataPolicy = blobStoreDataPolicy;
}
private class ExecutorFilterProviderFactory implements FilterProviderFactory {
public FilterProvider getFilterProvider() {
FilterProvider filterProvider = new FilterProvider("InjectionExecutorFilter", new ExecutorSpaceFilter());
filterProvider.setOpCodes(FilterOperationCodes.BEFORE_EXECUTE);
return filterProvider;
}
}
private final Map<Class, Object> tasksGigaSpaceInjectionMap = new CopyOnUpdateMap<Class, Object>();
private static Object NO_FIELD = new Object();
private class ExecutorSpaceFilter implements ISpaceFilter {
private IJSpace space;
private GigaSpace gigaSpace;
public void init(IJSpace space, String filterId, String url, int priority) throws RuntimeException {
this.space = space;
this.gigaSpace = new GigaSpaceConfigurer(space).gigaSpace();
}
public void process(SpaceContext context, ISpaceFilterEntry entry, int operationCode) throws RuntimeException {
if (operationCode != FilterOperationCodes.BEFORE_EXECUTE) {
return;
}
ApplicationContext applicationContext = getApplicationContext();
AutowireCapableBeanFactory beanFactory = null;
if (applicationContext != null) {
beanFactory = applicationContext.getAutowireCapableBeanFactory();
}
try {
Object task = entry.getObject(space);
if (task instanceof InternalSpaceTaskWrapper) {
task = ((InternalSpaceTaskWrapper) task).getTask();
}
// go over the task and inject what can be injected
// break when there is no more DelegatingTasks
while (true) {
if (task instanceof TaskGigaSpaceAware) {
((TaskGigaSpaceAware) task).setGigaSpace(gigaSpace);
} else {
Object field = tasksGigaSpaceInjectionMap.get(task.getClass());
if (field == NO_FIELD) {
// do nothing
} else if (field != null) {
try {
((IField) field).set(task, gigaSpace);
} catch (IllegalAccessException e) {
throw new RuntimeException("Failed to set task GigaSpace field", e);
}
} else {
final AtomicReference<Field> ref = new AtomicReference<Field>();
ReflectionUtils.doWithFields(task.getClass(), new ReflectionUtils.FieldCallback() {
public void doWith(Field field) throws IllegalArgumentException, IllegalAccessException {
if (field.isAnnotationPresent(TaskGigaSpace.class)) {
ref.set(field);
}
}
});
if (ref.get() == null) {
tasksGigaSpaceInjectionMap.put(task.getClass(), NO_FIELD);
} else {
ref.get().setAccessible(true);
IField fastField = ReflectionUtil.createField(ref.get());
tasksGigaSpaceInjectionMap.put(task.getClass(), fastField);
try {
fastField.set(task, gigaSpace);
} catch (IllegalAccessException e) {
throw new RuntimeException("Failed to set task GigaSpace field", e);
}
}
}
}
if (isAutowire(task)) {
if (beanFactory == null) {
throw new IllegalStateException("Task [" + task.getClass().getName() + "] is configured to do autowiring but the space was not started with application context");
}
beanFactory.autowireBeanProperties(task, AutowireCapableBeanFactory.AUTOWIRE_NO, false);
beanFactory.initializeBean(task, task.getClass().getName());
if (task instanceof ProcessObjectsProvider) {
Object[] objects = ((ProcessObjectsProvider) task).getObjectsToProcess();
if (objects != null) {
for (Object obj : objects) {
if (obj != null) {
beanFactory.autowireBeanProperties(obj, AutowireCapableBeanFactory.AUTOWIRE_NO, false);
beanFactory.initializeBean(obj, obj.getClass().getName());
}
}
}
}
} else {
if (applicationContext != null && task instanceof ApplicationContextAware) {
((ApplicationContextAware) task).setApplicationContext(applicationContext);
}
if (clusterInfo != null && task instanceof ClusterInfoAware) {
((ClusterInfoAware) task).setClusterInfo(clusterInfo);
}
}
if (task instanceof DelegatingTask) {
task = ((DelegatingTask) task).getDelegatedTask();
} else {
break;
}
}
} catch (UnusableEntryException e) {
// won't happen
}
}
public void process(SpaceContext context, ISpaceFilterEntry[] entries, int operationCode) throws RuntimeException {
}
public void close() throws RuntimeException {
}
private boolean isAutowire(Object obj) {
if (obj instanceof AutowireTaskMarker) {
return true;
}
return obj.getClass().isAnnotationPresent(AutowireTask.class);
}
}
/**
* @param spaceSynchronizationEndpoint
*/
public void setSpaceSynchronizationEndpoint(SpaceSynchronizationEndpoint spaceSynchronizationEndpoint) {
factory.setSpaceSynchronizationEndpoint(spaceSynchronizationEndpoint);
}
}
| src/main/java/org/openspaces/core/space/UrlSpaceFactoryBean.java | /*
* Copyright 2006-2007 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.openspaces.core.space;
import java.lang.reflect.Field;
import java.net.MalformedURLException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Map;
import java.util.Properties;
import java.util.concurrent.atomic.AtomicReference;
import com.gigaspaces.client.SpaceProxyFactory;
import net.jini.core.entry.UnusableEntryException;
import org.openspaces.core.GigaSpace;
import org.openspaces.core.GigaSpaceConfigurer;
import org.openspaces.core.cluster.ClusterInfo;
import org.openspaces.core.cluster.ClusterInfoAware;
import org.openspaces.core.config.BlobStoreDataPolicyFactoryBean;
import org.openspaces.core.config.CustomCachePolicyFactoryBean;
import org.openspaces.core.executor.AutowireTask;
import org.openspaces.core.executor.AutowireTaskMarker;
import org.openspaces.core.executor.TaskGigaSpace;
import org.openspaces.core.executor.TaskGigaSpaceAware;
import org.openspaces.core.executor.internal.InternalSpaceTaskWrapper;
import org.openspaces.core.executor.support.DelegatingTask;
import org.openspaces.core.executor.support.ProcessObjectsProvider;
import org.openspaces.core.gateway.GatewayTargetsFactoryBean;
import org.openspaces.core.properties.BeanLevelMergedPropertiesAware;
import org.openspaces.core.space.filter.FilterProviderFactory;
import org.openspaces.core.space.filter.replication.ReplicationFilterProviderFactory;
import org.openspaces.core.transaction.DistributedTransactionProcessingConfigurationFactoryBean;
import org.openspaces.core.util.SpaceUtils;
import org.springframework.beans.factory.config.AutowireCapableBeanFactory;
import org.springframework.context.ApplicationContext;
import org.springframework.context.ApplicationContextAware;
import org.springframework.dao.DataAccessException;
import org.springframework.util.Assert;
import org.springframework.util.ReflectionUtils;
import org.springframework.util.StringUtils;
import com.gigaspaces.datasource.ManagedDataSource;
import com.gigaspaces.datasource.SpaceDataSource;
import com.gigaspaces.internal.reflection.IField;
import com.gigaspaces.internal.reflection.ReflectionUtil;
import com.gigaspaces.internal.utils.collections.CopyOnUpdateMap;
import com.gigaspaces.metadata.SpaceTypeDescriptor;
import com.gigaspaces.security.directory.CredentialsProviderHelper;
import com.gigaspaces.sync.SpaceSynchronizationEndpoint;
import com.j_spaces.core.Constants;
import com.j_spaces.core.IJSpace;
import com.j_spaces.core.SpaceContext;
import com.j_spaces.core.client.FinderException;
import com.j_spaces.core.client.SpaceFinder;
import com.j_spaces.core.client.SpaceURL;
import com.j_spaces.core.client.SpaceURLParser;
import com.j_spaces.core.filters.FilterOperationCodes;
import com.j_spaces.core.filters.FilterProvider;
import com.j_spaces.core.filters.ISpaceFilter;
import com.j_spaces.core.filters.entry.ISpaceFilterEntry;
/**
* A space factory bean that creates a space ({@link IJSpace}) based on a url.
*
* <p>The factory allows to specify url properties using
* {@link #setUrlProperties(java.util.Properties) urlProperties} and space parameters using
* {@link #setParameters(java.util.Map) parameters} or using
* {@link #setProperties(Properties) properties}. It also accepts a {@link ClusterInfo} using
* {@link #setClusterInfo(ClusterInfo)} and translates it into the relevant space url properties
* automatically.
*
* <p>Most url properties are explicitly exposed using different setters. Though they can also be set
* using the {@link #setUrlProperties(java.util.Properties) urlProperties} the explicit setters
* allow for more readable and simpler configuration. Some examples of explicit url properties are:
* {@link #setSchema(String)}, {@link #setFifo(boolean)}.
*
* <p>The factory uses the {@link BeanLevelMergedPropertiesAware} in order to be injected with
* properties that were not parameterized in advance (using ${...} notation). This will directly
* inject additional properties in the Space creation/finding process.
*
* @author kimchy
*/
public class UrlSpaceFactoryBean extends AbstractSpaceFactoryBean implements BeanLevelMergedPropertiesAware, ClusterInfoAware {
private final SpaceProxyFactory factory = new SpaceProxyFactory();
private String url;
private Boolean secured;
private FilterProviderFactory[] filterProviders;
private SpaceTypeDescriptor[] typeDescriptors;
private ReplicationFilterProviderFactory replicationFilterProvider;
private ManagedDataSource externalDataSource;
private CachePolicy cachePolicy;
private GatewayTargetsFactoryBean gatewayTargets;
private DistributedTransactionProcessingConfigurationFactoryBean distributedTransactionProcessingConfiguration;
private final boolean enableExecutorInjection = true;
private Properties beanLevelProperties;
private ClusterInfo clusterInfo;
private CustomCachePolicyFactoryBean customCachePolicy;
private BlobStoreDataPolicyFactoryBean blobStoreDataPolicy;
private SpaceDataSource spaceDataSource;
private SpaceSynchronizationEndpoint spaceSynchronizationEndpoint;
/**
* Creates a new url space factory bean. The url parameters is requires so the
* {@link #setUrl(String)} must be called before the bean is initialized.
*/
public UrlSpaceFactoryBean() {
}
/**
* Creates a new url space factory bean based on the url provided.
*
* @param url The url to create the {@link com.j_spaces.core.IJSpace} with.
*/
public UrlSpaceFactoryBean(String url) {
this(url, null);
}
/**
* Creates a new url space factory bean based on the url and map parameters provided.
*
* @param url The url to create the {@link IJSpace} with.
* @param params The parameters to create the {@link IJSpace} with.
*/
public UrlSpaceFactoryBean(String url, Map<String, Object> params) {
this.url = url;
setParameters(params);
}
/**
* Sets the space as secured. Note, when passing userName and password it will
* automatically be secured.
*/
public void setSecured(boolean secured) {
this.secured = secured;
}
/**
* Sets the url the {@link IJSpace} will be created with. Note this url does not take affect
* after the bean has been initialized.
*
* @param url The url to create the {@link IJSpace} with.
*/
public void setUrl(String url) {
this.url = url;
}
/**
* Sets the parameters the {@link IJSpace} will be created with. Note this parameters does not
* take affect after the bean has been initialized.
*
* <p>
* Note, this should not be confused with {@link #setUrlProperties(java.util.Properties)}. The
* parameters here are the ones referred to as custom properties and allows for example to
* control the xpath injection to space schema.
*
* @param parameters The parameters to create the {@link com.j_spaces.core.IJSpace} with.
*/
public void setParameters(Map<String, Object> parameters) {
factory.setParameters(parameters);
}
/**
* Same as {@link #setParameters(java.util.Map) parameters} just with properties for simpler
* configuration.
*/
public void setProperties(Properties properties) {
factory.setProperties(properties);
}
/**
* Sets the url properties. Note, most if not all url level properties can be set using explicit
* setters.
*/
public void setUrlProperties(Properties urlProperties) {
factory.setUrlProperties(urlProperties);
}
/**
* The space instance is created using a space schema file which can be used as a template
* configuration file for creating a space. The user specifies one of the pre-configured schema
* names (to create a space instance from its template) or a custom one using this property.
*
* <p>If a schema name is not defined, a default schema name called <code>default</code> will be
* used.
*/
public void setSchema(String schema) {
factory.setSchema(schema);
}
/**
* Indicates that all take/write operations be conducted in FIFO mode. Default is
* the Space default (<code>false</code>).
*/
public void setFifo(boolean fifo) {
factory.setFifo(fifo);
}
/**
* The Jini Lookup Service group to find container or space using multicast (jini protocol).
* Groups are comma separated list.
*/
public void setLookupGroups(String lookupGroups) {
factory.setLookupGroups(lookupGroups);
}
/**
* The Jini Lookup locators for the Space. In the form of: <code>host1:port1,host2:port2</code>.
*/
public void setLookupLocators(String lookupLocators) {
factory.setLookupLocators(lookupLocators);
}
/**
* The max timeout in <b>milliseconds</b> to find a Container or Space using multicast (jini
* protocol). Defaults to <code>6000</code> (i.e. 6 seconds).
*/
public void setLookupTimeout(Integer lookupTimeout) {
factory.setLookupTimeout(lookupTimeout);
}
/**
* When <code>false</code>, optimistic lock is disabled. Default to the Space default value.
*/
public void setVersioned(boolean versioned) {
factory.setVersioned(versioned);
}
/**
* If <code>true</code> - Lease object would not return from the write/writeMultiple
* operations. Defaults to the Space default value (<code>false</code>).
*/
public void setNoWriteLease(boolean noWriteLease) {
factory.setNoWriteLease(noWriteLease);
}
/**
* When setting this URL property to <code>true</code> it will allow the space to connect to
* the Mirror service to push its data and operations for asynchronous persistency. Defaults to
* the Space default (which defaults to <code>false</code>).
*/
public void setMirror(boolean mirror) {
factory.setMirror(mirror);
}
/**
* Inject a list of filter provider factories providing the ability to
* inject actual Space filters.
*/
public void setFilterProviders(FilterProviderFactory[] filterProviders) {
this.filterProviders = filterProviders;
}
/**
* Injects a replication provider allowing to directly inject actual replication
* filters.
*/
public void setReplicationFilterProvider(ReplicationFilterProviderFactory replicationFilterProvider) {
this.replicationFilterProvider = replicationFilterProvider;
}
/**
* A data source
*/
public void setExternalDataSource(ManagedDataSource externalDataSource) {
this.externalDataSource = externalDataSource;
}
/**
* Sets the {@link SpaceDataSource} which will be used as a data source for the space.
* @param spaceDataSource The {@link SpaceDataSource} instance.
*/
public void setSpaceDataSource(SpaceDataSource spaceDataSource) {
this.spaceDataSource = spaceDataSource;
}
/**
* Inject a list of space types.
*/
public void setSpaceTypes(SpaceTypeDescriptor[] typeDescriptors) {
this.typeDescriptors = typeDescriptors;
}
/**
* Sets the cache policy that the space will use. If not set, will default to the one configured
* in the space schema.
*
* @see org.openspaces.core.space.AllInCachePolicy
* @see org.openspaces.core.space.LruCachePolicy
* @see org.openspaces.core.space.CustomCachePolicy
* @see org.openspaces.core.space.BlobStoreDataCachePolicy
*/
public void setCachePolicy(CachePolicy cachePolicy) {
this.cachePolicy = cachePolicy;
}
/**
* Externally managed override properties using open spaces extended config support. Should not
* be set directly but allowed for different Spring context container to set it.
*/
public void setMergedBeanLevelProperties(Properties beanLevelProperties) {
this.beanLevelProperties = beanLevelProperties;
}
/**
* Injected thanks to this bean implementing {@link ClusterInfoAware}. If set will use the
* cluster information in order to configure the url based on it.
*/
public void setClusterInfo(ClusterInfo clusterInfo) {
this.clusterInfo = clusterInfo;
}
/**
* Creates the space by calling {@link #doGetSpaceUrls()} and then using the returned
* {@link SpaceURL} a space is found using {@link SpaceFinder#find(SpaceURL)}.
*/
@Override
protected IJSpace doCreateSpace() throws DataAccessException {
SpaceURL[] spaceURLs = doGetSpaceUrls();
try {
return (IJSpace) SpaceFinder.find(spaceURLs, spaceURLs[0].getCustomProperties());
} catch (FinderException e) {
if (SpaceUtils.isRemoteProtocol(spaceURLs[0])) {
throw new CannotFindSpaceException("Failed to find space with url " + Arrays.toString(spaceURLs) + "", e);
}
throw new CannotCreateSpaceException("Failed to create space with url " + Arrays.toString(spaceURLs) + "", e);
}
}
/**
* Parses the given space url using {@link SpaceURLParser} and returns the parsed
* {@link SpaceURL}.
*
* <p>
* Uses the {@link #setUrlProperties(java.util.Properties)} and
* {@link #setParameters(java.util.Map)} as parameters for the space. Also uses the
* {@link #setClusterInfo(org.openspaces.core.cluster.ClusterInfo)} by automatically translating
* the cluster information into relevant Space url properties.
*/
@SuppressWarnings("deprecation")
protected SpaceURL[] doGetSpaceUrls() throws DataAccessException {
Assert.notNull(url, "url property is required");
String[] urls = StringUtils.tokenizeToStringArray(url, ";");
SpaceURL[] spacesUrls = new SpaceURL[urls.length];
for (int urlIndex = 0; urlIndex < urls.length; urlIndex++) {
String url = urls[urlIndex];
Properties props = factory.createProperties();
if (!SpaceUtils.isRemoteProtocol(url) && enableExecutorInjection) {
if (filterProviders == null) {
filterProviders = new FilterProviderFactory[]{new ExecutorFilterProviderFactory()};
} else {
ArrayList<FilterProviderFactory> tmpProviders = new ArrayList<FilterProviderFactory>(filterProviders.length + 1);
tmpProviders.addAll(Arrays.asList(filterProviders));
tmpProviders.add(new ExecutorFilterProviderFactory());
filterProviders = tmpProviders.toArray(new FilterProviderFactory[tmpProviders.size()]);
}
}
if (filterProviders != null && filterProviders.length > 0) {
if (SpaceUtils.isRemoteProtocol(url)) {
throw new IllegalArgumentException("Filters can only be used with an embedded Space");
}
FilterProvider[] spaceFilterProvider = new FilterProvider[filterProviders.length];
for (int i = 0; i < filterProviders.length; i++) {
spaceFilterProvider[i] = filterProviders[i].getFilterProvider();
}
props.put(Constants.Filter.FILTER_PROVIDERS, spaceFilterProvider);
}
if (replicationFilterProvider != null) {
if (SpaceUtils.isRemoteProtocol(url)) {
throw new IllegalArgumentException("Replication filter provider can only be used with an embedded Space");
}
props.put(Constants.ReplicationFilter.REPLICATION_FILTER_PROVIDER, replicationFilterProvider.getFilterProvider());
}
if (externalDataSource != null) {
if (logger.isWarnEnabled())
logger.warn("externalDataSource is deprecated - instead use spaceDataSource and/or spaceSynchronizationEndpoint");
if (SpaceUtils.isRemoteProtocol(url)) {
throw new IllegalArgumentException("External data source can only be used with an embedded Space");
}
props.put(Constants.DataAdapter.DATA_SOURCE, externalDataSource);
props.put(Constants.StorageAdapter.FULL_STORAGE_PERSISTENT_ENABLED_PROP, "true");
if (logger.isDebugEnabled()) {
logger.debug("Data Source [" + externalDataSource + "] provided, enabling data source");
}
}
if (spaceDataSource != null) {
if (SpaceUtils.isRemoteProtocol(url))
throw new IllegalArgumentException("Space data source can only be used with an embedded Space");
props.put(Constants.DataAdapter.SPACE_DATA_SOURCE, spaceDataSource);
props.put(Constants.StorageAdapter.FULL_STORAGE_PERSISTENT_ENABLED_PROP, "true");
}
if (spaceSynchronizationEndpoint != null) {
if (SpaceUtils.isRemoteProtocol(url))
throw new IllegalArgumentException("Synchronization endpoint interceptor can only be used with an embedded Space");
props.put(Constants.DataAdapter.SPACE_SYNC_ENDPOINT, spaceSynchronizationEndpoint);
props.put(Constants.StorageAdapter.FULL_STORAGE_PERSISTENT_ENABLED_PROP, "true");
}
verifyExternalDataSourceIsNotUsedIfNecessary();
if (typeDescriptors != null && typeDescriptors.length >0 ) {
if (SpaceUtils.isRemoteProtocol(url)) {
throw new IllegalArgumentException("Space types can only be introduced on embedded Space");
}
props.put(Constants.Engine.SPACE_TYPES, typeDescriptors);
}
if (customCachePolicy != null)
cachePolicy = customCachePolicy.asCachePolicy();
if (blobStoreDataPolicy != null)
cachePolicy = blobStoreDataPolicy.asCachePolicy();
if (cachePolicy != null) {
props.putAll(cachePolicy.toProps());
}
// copy over the external config overrides
if (beanLevelProperties != null) {
props.putAll(beanLevelProperties);
}
// if deploy info is provided, apply it to the space url (only if it is an embedde Space).
if (shouldApplyClusterInfo()) {
if (clusterInfo.getNumberOfInstances() != null && url.indexOf("&" + SpaceURL.CLUSTER_TOTAL_MEMBERS + "=") == -1 && url.indexOf("?" + SpaceURL.CLUSTER_TOTAL_MEMBERS + "=") == -1) {
String totalMembers = clusterInfo.getNumberOfInstances().toString();
if (clusterInfo.getNumberOfBackups() != null && clusterInfo.getNumberOfBackups() > -1) {
totalMembers += "," + clusterInfo.getNumberOfBackups();
}
props.setProperty(SpaceUtils.spaceUrlProperty(SpaceURL.CLUSTER_TOTAL_MEMBERS), totalMembers);
}
if (clusterInfo.getInstanceId() != null && url.indexOf("&" + SpaceURL.CLUSTER_MEMBER_ID + "=") == -1 && url.indexOf("?" + SpaceURL.CLUSTER_MEMBER_ID + "=") == -1) {
props.setProperty(SpaceUtils.spaceUrlProperty(SpaceURL.CLUSTER_MEMBER_ID), clusterInfo.getInstanceId().toString());
}
if (clusterInfo.getBackupId() != null && clusterInfo.getBackupId() != 0 && url.indexOf("&" + SpaceURL.CLUSTER_BACKUP_ID + "=") == -1 && url.indexOf("?" + SpaceURL.CLUSTER_BACKUP_ID + "=") == -1) {
props.setProperty(SpaceUtils.spaceUrlProperty(SpaceURL.CLUSTER_BACKUP_ID), clusterInfo.getBackupId().toString());
}
if (StringUtils.hasText(clusterInfo.getSchema()) && url.indexOf(SpaceURL.CLUSTER_SCHEMA + "=") == -1) {
props.setProperty(SpaceUtils.spaceUrlProperty(SpaceURL.CLUSTER_SCHEMA), clusterInfo.getSchema());
}
}
// no need for a shutdown hook in the space as well
props.setProperty(Constants.Container.CONTAINER_SHUTDOWN_HOOK_PROP, "false");
// handle security
if (beanLevelProperties != null) {
SecurityConfig securityConfig = SecurityConfig.fromMarshalledProperties(beanLevelProperties);
if (securityConfig != null)
setSecurityConfig(securityConfig);
}
if (getSecurityConfig() == null || !getSecurityConfig().isFilled()) {
String username = (String) props.remove(Constants.Security.USERNAME);
String password = (String) props.remove(Constants.Security.PASSWORD);
setSecurityConfig(new SecurityConfig(username, password));
}
if (getSecurityConfig() != null && getSecurityConfig().isFilled()) {
props.put(SpaceURL.SECURED, "true");
CredentialsProviderHelper.appendCredentials(props, getSecurityConfig().getCredentialsProvider());
} else if (secured != null && secured) {
props.put(SpaceURL.SECURED, "true");
}
if (gatewayTargets != null) {
if (SpaceUtils.isRemoteProtocol(url)) {
throw new IllegalArgumentException("Gateway targets can only be used with an embedded Space");
}
props.put(Constants.Replication.REPLICATION_GATEWAYS, gatewayTargets.asGatewaysPolicy());
}
if (distributedTransactionProcessingConfiguration != null) {
if (SpaceUtils.isRemoteProtocol(url)) {
throw new IllegalArgumentException("Distributed transaction processing configuration can only be used with an embedded Space");
}
if (factory.schema == null || !factory.schema.equalsIgnoreCase(Constants.Schemas.MIRROR_SCHEMA)) {
throw new IllegalStateException("Distributed transaction processing configuration can only be set for a Mirror component");
}
if (distributedTransactionProcessingConfiguration.getDistributedTransactionWaitTimeout() != null)
props.put(Constants.Mirror.FULL_MIRROR_DISTRIBUTED_TRANSACTION_TIMEOUT,
distributedTransactionProcessingConfiguration.getDistributedTransactionWaitTimeout().toString());
if (distributedTransactionProcessingConfiguration.getDistributedTransactionWaitForOperations() != null)
props.put(Constants.Mirror.FULL_MIRROR_DISTRIBUTED_TRANSACTION_WAIT_FOR_OPERATIONS,
distributedTransactionProcessingConfiguration.getDistributedTransactionWaitForOperations().toString());
}
if (logger.isDebugEnabled()) {
logger.debug("Finding Space with URL [" + url + "] and properties [" + props + "]");
}
try {
spacesUrls[urlIndex] = SpaceURLParser.parseURL(url, props);
} catch (MalformedURLException e) {
throw new CannotCreateSpaceException("Failed to parse url [" + url + "]", e);
}
}
return spacesUrls;
}
/**
* Should cluster info be applies to the space url.
*/
private boolean shouldApplyClusterInfo() {
if (SpaceUtils.isRemoteProtocol(url)) {
return false;
}
if (clusterInfo == null) {
return false;
}
// only apply if we have a specific cluster schema
if (url.indexOf("cluster_schema") != -1 || StringUtils.hasText(clusterInfo.getSchema())) {
return true;
}
return false;
}
/**
* Sets the gateway replication targets to be used with the constructed space.
* @param gatewayTargets The gateway targets.
*/
public void setGatewayTargets(GatewayTargetsFactoryBean gatewayTargets) {
this.gatewayTargets = gatewayTargets;
}
/**
* Sets the distributed transaction processing configuration for the Mirror component.
* @param distributedTransactionProcessingConfiguration The distributed transaction processing configuration to set.
*/
public void setDistributedTransactionProcessingConfiguration(
DistributedTransactionProcessingConfigurationFactoryBean distributedTransactionProcessingConfiguration) {
this.distributedTransactionProcessingConfiguration = distributedTransactionProcessingConfiguration;
}
public void setCustomCachePolicy(CustomCachePolicyFactoryBean customCachePolicy) {
this.customCachePolicy = customCachePolicy;
}
public void setBlobStoreDataPolicy(BlobStoreDataPolicyFactoryBean blobStoreDataPolicy) {
this.blobStoreDataPolicy = blobStoreDataPolicy;
}
private class ExecutorFilterProviderFactory implements FilterProviderFactory {
public FilterProvider getFilterProvider() {
FilterProvider filterProvider = new FilterProvider("InjectionExecutorFilter", new ExecutorSpaceFilter());
filterProvider.setOpCodes(FilterOperationCodes.BEFORE_EXECUTE);
return filterProvider;
}
}
private final Map<Class, Object> tasksGigaSpaceInjectionMap = new CopyOnUpdateMap<Class, Object>();
private static Object NO_FIELD = new Object();
private class ExecutorSpaceFilter implements ISpaceFilter {
private IJSpace space;
private GigaSpace gigaSpace;
public void init(IJSpace space, String filterId, String url, int priority) throws RuntimeException {
this.space = space;
this.gigaSpace = new GigaSpaceConfigurer(space).gigaSpace();
}
public void process(SpaceContext context, ISpaceFilterEntry entry, int operationCode) throws RuntimeException {
if (operationCode != FilterOperationCodes.BEFORE_EXECUTE) {
return;
}
ApplicationContext applicationContext = getApplicationContext();
AutowireCapableBeanFactory beanFactory = null;
if (applicationContext != null) {
beanFactory = applicationContext.getAutowireCapableBeanFactory();
}
try {
Object task = entry.getObject(space);
if (task instanceof InternalSpaceTaskWrapper) {
task = ((InternalSpaceTaskWrapper) task).getTask();
}
// go over the task and inject what can be injected
// break when there is no more DelegatingTasks
while (true) {
if (task instanceof TaskGigaSpaceAware) {
((TaskGigaSpaceAware) task).setGigaSpace(gigaSpace);
} else {
Object field = tasksGigaSpaceInjectionMap.get(task.getClass());
if (field == NO_FIELD) {
// do nothing
} else if (field != null) {
try {
((IField) field).set(task, gigaSpace);
} catch (IllegalAccessException e) {
throw new RuntimeException("Failed to set task GigaSpace field", e);
}
} else {
final AtomicReference<Field> ref = new AtomicReference<Field>();
ReflectionUtils.doWithFields(task.getClass(), new ReflectionUtils.FieldCallback() {
public void doWith(Field field) throws IllegalArgumentException, IllegalAccessException {
if (field.isAnnotationPresent(TaskGigaSpace.class)) {
ref.set(field);
}
}
});
if (ref.get() == null) {
tasksGigaSpaceInjectionMap.put(task.getClass(), NO_FIELD);
} else {
ref.get().setAccessible(true);
IField fastField = ReflectionUtil.createField(ref.get());
tasksGigaSpaceInjectionMap.put(task.getClass(), fastField);
try {
fastField.set(task, gigaSpace);
} catch (IllegalAccessException e) {
throw new RuntimeException("Failed to set task GigaSpace field", e);
}
}
}
}
if (isAutowire(task)) {
if (beanFactory == null) {
throw new IllegalStateException("Task [" + task.getClass().getName() + "] is configured to do autowiring but the space was not started with application context");
}
beanFactory.autowireBeanProperties(task, AutowireCapableBeanFactory.AUTOWIRE_NO, false);
beanFactory.initializeBean(task, task.getClass().getName());
if (task instanceof ProcessObjectsProvider) {
Object[] objects = ((ProcessObjectsProvider) task).getObjectsToProcess();
if (objects != null) {
for (Object obj : objects) {
if (obj != null) {
beanFactory.autowireBeanProperties(obj, AutowireCapableBeanFactory.AUTOWIRE_NO, false);
beanFactory.initializeBean(obj, obj.getClass().getName());
}
}
}
}
} else {
if (applicationContext != null && task instanceof ApplicationContextAware) {
((ApplicationContextAware) task).setApplicationContext(applicationContext);
}
if (clusterInfo != null && task instanceof ClusterInfoAware) {
((ClusterInfoAware) task).setClusterInfo(clusterInfo);
}
}
if (task instanceof DelegatingTask) {
task = ((DelegatingTask) task).getDelegatedTask();
} else {
break;
}
}
} catch (UnusableEntryException e) {
// won't happen
}
}
public void process(SpaceContext context, ISpaceFilterEntry[] entries, int operationCode) throws RuntimeException {
}
public void close() throws RuntimeException {
}
private boolean isAutowire(Object obj) {
if (obj instanceof AutowireTaskMarker) {
return true;
}
return obj.getClass().isAnnotationPresent(AutowireTask.class);
}
}
private void verifyExternalDataSourceIsNotUsedIfNecessary() {
if (externalDataSource != null && (spaceDataSource != null || spaceSynchronizationEndpoint != null))
throw new IllegalArgumentException(
"Cannot set both externalDataSource and spaceDataSource/spaceSynchronizationEndpoint - it is recommended to use spaceDataSource/spaceSynchronizationEndpoint since externalDataSource is deprecated");
}
/**
* @param spaceSynchronizationEndpoint
*/
public void setSpaceSynchronizationEndpoint(
SpaceSynchronizationEndpoint spaceSynchronizationEndpoint) {
this.spaceSynchronizationEndpoint = spaceSynchronizationEndpoint;
}
}
| GS-11758
Moved persistency and space types to new factory.
svn path=/xap/trunk/gigaspaces/; revision=183904
Former-commit-id: 953f8abb932f7722ec4be6122a2b79edde7da544 | src/main/java/org/openspaces/core/space/UrlSpaceFactoryBean.java | GS-11758 Moved persistency and space types to new factory. | <ide><path>rc/main/java/org/openspaces/core/space/UrlSpaceFactoryBean.java
<ide> private Boolean secured;
<ide>
<ide> private FilterProviderFactory[] filterProviders;
<del>
<del> private SpaceTypeDescriptor[] typeDescriptors;
<ide>
<ide> private ReplicationFilterProviderFactory replicationFilterProvider;
<del>
<del> private ManagedDataSource externalDataSource;
<ide>
<ide> private CachePolicy cachePolicy;
<ide>
<ide> private CustomCachePolicyFactoryBean customCachePolicy;
<ide>
<ide> private BlobStoreDataPolicyFactoryBean blobStoreDataPolicy;
<del>
<del> private SpaceDataSource spaceDataSource;
<del>
<del> private SpaceSynchronizationEndpoint spaceSynchronizationEndpoint;
<del>
<ide>
<ide> /**
<ide> * Creates a new url space factory bean. The url parameters is requires so the
<ide> * A data source
<ide> */
<ide> public void setExternalDataSource(ManagedDataSource externalDataSource) {
<del> this.externalDataSource = externalDataSource;
<add> factory.setExternalDataSource(externalDataSource);
<ide> }
<ide>
<ide> /**
<ide> * @param spaceDataSource The {@link SpaceDataSource} instance.
<ide> */
<ide> public void setSpaceDataSource(SpaceDataSource spaceDataSource) {
<del> this.spaceDataSource = spaceDataSource;
<add> factory.setSpaceDataSource(spaceDataSource);
<ide> }
<ide>
<ide> /**
<ide> * Inject a list of space types.
<ide> */
<ide> public void setSpaceTypes(SpaceTypeDescriptor[] typeDescriptors) {
<del> this.typeDescriptors = typeDescriptors;
<add> factory.setTypeDescriptors(typeDescriptors);
<ide> }
<ide>
<ide> /**
<ide> for (int urlIndex = 0; urlIndex < urls.length; urlIndex++) {
<ide> String url = urls[urlIndex];
<ide>
<del> Properties props = factory.createProperties();
<add> Properties props = factory.createProperties(SpaceUtils.isRemoteProtocol(url));
<ide>
<ide> if (!SpaceUtils.isRemoteProtocol(url) && enableExecutorInjection) {
<ide> if (filterProviders == null) {
<ide> props.put(Constants.ReplicationFilter.REPLICATION_FILTER_PROVIDER, replicationFilterProvider.getFilterProvider());
<ide> }
<ide>
<del> if (externalDataSource != null) {
<del> if (logger.isWarnEnabled())
<del> logger.warn("externalDataSource is deprecated - instead use spaceDataSource and/or spaceSynchronizationEndpoint");
<del> if (SpaceUtils.isRemoteProtocol(url)) {
<del> throw new IllegalArgumentException("External data source can only be used with an embedded Space");
<del> }
<del> props.put(Constants.DataAdapter.DATA_SOURCE, externalDataSource);
<del> props.put(Constants.StorageAdapter.FULL_STORAGE_PERSISTENT_ENABLED_PROP, "true");
<del> if (logger.isDebugEnabled()) {
<del> logger.debug("Data Source [" + externalDataSource + "] provided, enabling data source");
<del> }
<del> }
<del>
<del> if (spaceDataSource != null) {
<del> if (SpaceUtils.isRemoteProtocol(url))
<del> throw new IllegalArgumentException("Space data source can only be used with an embedded Space");
<del> props.put(Constants.DataAdapter.SPACE_DATA_SOURCE, spaceDataSource);
<del> props.put(Constants.StorageAdapter.FULL_STORAGE_PERSISTENT_ENABLED_PROP, "true");
<del> }
<del>
<del> if (spaceSynchronizationEndpoint != null) {
<del> if (SpaceUtils.isRemoteProtocol(url))
<del> throw new IllegalArgumentException("Synchronization endpoint interceptor can only be used with an embedded Space");
<del> props.put(Constants.DataAdapter.SPACE_SYNC_ENDPOINT, spaceSynchronizationEndpoint);
<del> props.put(Constants.StorageAdapter.FULL_STORAGE_PERSISTENT_ENABLED_PROP, "true");
<del> }
<del>
<del> verifyExternalDataSourceIsNotUsedIfNecessary();
<del>
<del> if (typeDescriptors != null && typeDescriptors.length >0 ) {
<del> if (SpaceUtils.isRemoteProtocol(url)) {
<del> throw new IllegalArgumentException("Space types can only be introduced on embedded Space");
<del> }
<del> props.put(Constants.Engine.SPACE_TYPES, typeDescriptors);
<del> }
<del>
<ide> if (customCachePolicy != null)
<ide> cachePolicy = customCachePolicy.asCachePolicy();
<ide>
<ide> }
<ide> }
<ide>
<del> private void verifyExternalDataSourceIsNotUsedIfNecessary() {
<del> if (externalDataSource != null && (spaceDataSource != null || spaceSynchronizationEndpoint != null))
<del> throw new IllegalArgumentException(
<del> "Cannot set both externalDataSource and spaceDataSource/spaceSynchronizationEndpoint - it is recommended to use spaceDataSource/spaceSynchronizationEndpoint since externalDataSource is deprecated");
<del> }
<del>
<ide> /**
<ide> * @param spaceSynchronizationEndpoint
<ide> */
<del> public void setSpaceSynchronizationEndpoint(
<del> SpaceSynchronizationEndpoint spaceSynchronizationEndpoint) {
<del> this.spaceSynchronizationEndpoint = spaceSynchronizationEndpoint;
<add> public void setSpaceSynchronizationEndpoint(SpaceSynchronizationEndpoint spaceSynchronizationEndpoint) {
<add> factory.setSpaceSynchronizationEndpoint(spaceSynchronizationEndpoint);
<ide> }
<ide> } |
|
Java | apache-2.0 | 19df75b0c0eb311b286ed997ecdda454bddc05f2 | 0 | twalthr/flink,zimmermatt/flink,hongyuhong/flink,zentol/flink,StephanEwen/incubator-flink,zhangminglei/flink,twalthr/flink,greghogan/flink,ueshin/apache-flink,fhueske/flink,gustavoanatoly/flink,rmetzger/flink,jinglining/flink,mylog00/flink,zjureel/flink,GJL/flink,godfreyhe/flink,haohui/flink,GJL/flink,StephanEwen/incubator-flink,aljoscha/flink,Xpray/flink,bowenli86/flink,godfreyhe/flink,fhueske/flink,shaoxuan-wang/flink,wwjiang007/flink,lincoln-lil/flink,fanyon/flink,yew1eb/flink,jinglining/flink,lincoln-lil/flink,wwjiang007/flink,zhangminglei/flink,godfreyhe/flink,lincoln-lil/flink,GJL/flink,fhueske/flink,haohui/flink,godfreyhe/flink,shaoxuan-wang/flink,gyfora/flink,yew1eb/flink,tzulitai/flink,zjureel/flink,clarkyzl/flink,xccui/flink,hequn8128/flink,apache/flink,gyfora/flink,gustavoanatoly/flink,gyfora/flink,fanzhidongyzby/flink,Xpray/flink,hongyuhong/flink,zhangminglei/flink,twalthr/flink,darionyaphet/flink,zhangminglei/flink,tillrohrmann/flink,bowenli86/flink,mylog00/flink,zohar-mizrahi/flink,gyfora/flink,fhueske/flink,StephanEwen/incubator-flink,shaoxuan-wang/flink,tony810430/flink,yew1eb/flink,twalthr/flink,xccui/flink,mtunique/flink,WangTaoTheTonic/flink,clarkyzl/flink,rmetzger/flink,WangTaoTheTonic/flink,tillrohrmann/flink,zjureel/flink,wwjiang007/flink,bowenli86/flink,lincoln-lil/flink,DieBauer/flink,fanzhidongyzby/flink,jinglining/flink,apache/flink,GJL/flink,tony810430/flink,fanyon/flink,tony810430/flink,zentol/flink,mtunique/flink,DieBauer/flink,twalthr/flink,mbode/flink,zohar-mizrahi/flink,gyfora/flink,tzulitai/flink,gustavoanatoly/flink,wwjiang007/flink,WangTaoTheTonic/flink,lincoln-lil/flink,haohui/flink,kaibozhou/flink,PangZhi/flink,WangTaoTheTonic/flink,hwstreaming/flink,kl0u/flink,mbode/flink,zentol/flink,kl0u/flink,apache/flink,darionyaphet/flink,godfreyhe/flink,aljoscha/flink,tillrohrmann/flink,PangZhi/flink,yew1eb/flink,tony810430/flink,kl0u/flink,xccui/flink,jinglining/flink,tzulitai/flink,bowenli86/flink,WangTaoTheTonic/flink,godfreyhe/flink,shaoxuan-wang/flink,apache/flink,fanzhidongyzby/flink,gyfora/flink,greghogan/flink,wwjiang007/flink,kl0u/flink,zjureel/flink,PangZhi/flink,greghogan/flink,fhueske/flink,zentol/flink,bowenli86/flink,greghogan/flink,clarkyzl/flink,StephanEwen/incubator-flink,PangZhi/flink,twalthr/flink,tony810430/flink,hongyuhong/flink,kaibozhou/flink,wwjiang007/flink,kl0u/flink,mtunique/flink,zimmermatt/flink,kaibozhou/flink,greghogan/flink,PangZhi/flink,aljoscha/flink,ueshin/apache-flink,clarkyzl/flink,hequn8128/flink,sunjincheng121/flink,darionyaphet/flink,rmetzger/flink,apache/flink,hequn8128/flink,mtunique/flink,yew1eb/flink,greghogan/flink,DieBauer/flink,lincoln-lil/flink,xccui/flink,rmetzger/flink,shaoxuan-wang/flink,tzulitai/flink,zimmermatt/flink,zimmermatt/flink,tillrohrmann/flink,ueshin/apache-flink,StephanEwen/incubator-flink,sunjincheng121/flink,tillrohrmann/flink,tillrohrmann/flink,fanyon/flink,darionyaphet/flink,tzulitai/flink,clarkyzl/flink,lincoln-lil/flink,ueshin/apache-flink,hongyuhong/flink,haohui/flink,aljoscha/flink,aljoscha/flink,StephanEwen/incubator-flink,sunjincheng121/flink,gustavoanatoly/flink,kaibozhou/flink,kaibozhou/flink,DieBauer/flink,hequn8128/flink,hequn8128/flink,twalthr/flink,Xpray/flink,tzulitai/flink,mbode/flink,hwstreaming/flink,zentol/flink,hwstreaming/flink,mylog00/flink,mbode/flink,mtunique/flink,shaoxuan-wang/flink,zhangminglei/flink,sunjincheng121/flink,rmetzger/flink,tony810430/flink,tillrohrmann/flink,darionyaphet/flink,jinglining/flink,jinglining/flink,tony810430/flink,gyfora/flink,zimmermatt/flink,zohar-mizrahi/flink,ueshin/apache-flink,Xpray/flink,kaibozhou/flink,fanzhidongyzby/flink,GJL/flink,zentol/flink,DieBauer/flink,bowenli86/flink,fanyon/flink,rmetzger/flink,rmetzger/flink,apache/flink,zjureel/flink,zentol/flink,xccui/flink,aljoscha/flink,xccui/flink,mylog00/flink,zohar-mizrahi/flink,wwjiang007/flink,hequn8128/flink,xccui/flink,hongyuhong/flink,Xpray/flink,kl0u/flink,mylog00/flink,hwstreaming/flink,zjureel/flink,zjureel/flink,fhueske/flink,fanyon/flink,haohui/flink,sunjincheng121/flink,gustavoanatoly/flink,hwstreaming/flink,mbode/flink,fanzhidongyzby/flink,zohar-mizrahi/flink,GJL/flink,godfreyhe/flink,sunjincheng121/flink,apache/flink | /***********************************************************************************************************************
*
* Copyright (C) 2010 by the Stratosphere project (http://stratosphere.eu)
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*
**********************************************************************************************************************/
package eu.stratosphere.nephele.executiongraph;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import eu.stratosphere.nephele.configuration.Configuration;
import eu.stratosphere.nephele.execution.Environment;
import eu.stratosphere.nephele.execution.ExecutionListener;
import eu.stratosphere.nephele.execution.ExecutionSignature;
import eu.stratosphere.nephele.execution.ExecutionState;
import eu.stratosphere.nephele.instance.AllocatedResource;
import eu.stratosphere.nephele.instance.DummyInstance;
import eu.stratosphere.nephele.instance.InstanceManager;
import eu.stratosphere.nephele.instance.InstanceType;
import eu.stratosphere.nephele.io.InputGate;
import eu.stratosphere.nephele.io.OutputGate;
import eu.stratosphere.nephele.io.channels.AbstractInputChannel;
import eu.stratosphere.nephele.io.channels.AbstractOutputChannel;
import eu.stratosphere.nephele.io.channels.ChannelID;
import eu.stratosphere.nephele.io.channels.ChannelSetupException;
import eu.stratosphere.nephele.io.channels.ChannelType;
import eu.stratosphere.nephele.io.channels.bytebuffered.NetworkOutputChannel;
import eu.stratosphere.nephele.io.compression.CompressionLevel;
import eu.stratosphere.nephele.jobgraph.AbstractJobVertex;
import eu.stratosphere.nephele.jobgraph.JobEdge;
import eu.stratosphere.nephele.jobgraph.JobFileOutputVertex;
import eu.stratosphere.nephele.jobgraph.JobGraph;
import eu.stratosphere.nephele.jobgraph.JobID;
import eu.stratosphere.nephele.jobgraph.JobInputVertex;
import eu.stratosphere.nephele.template.AbstractInputTask;
import eu.stratosphere.nephele.template.AbstractInvokable;
import eu.stratosphere.nephele.template.IllegalConfigurationException;
import eu.stratosphere.nephele.template.InputSplit;
import eu.stratosphere.nephele.types.Record;
import eu.stratosphere.nephele.util.StringUtils;
/**
* In Nephele an execution graph is the main data structure for scheduling, executing and
* observing a job. An execution graph is created from an job graph. In contrast to a job graph
* it can contain communication edges of specific types, sub groups of vertices and information on
* when and where (on which instance) to run particular tasks.
*
* @author warneke
*/
public class ExecutionGraph implements ExecutionListener {
/**
* The log object used for debugging.
*/
private static final Log LOG = LogFactory.getLog(ExecutionGraph.class);
/**
* The ID of the job this graph has been built for.
*/
private final JobID jobID;
/**
* The name of the original job graph.
*/
private final String jobName;
/**
* Mapping of channel IDs to execution vertices.
*/
private final Map<ChannelID, ExecutionVertex> channelToVertexMap = new HashMap<ChannelID, ExecutionVertex>();
/**
* Mapping of channel IDs to input channels.
*/
private final Map<ChannelID, AbstractInputChannel<? extends Record>> inputChannelMap = new HashMap<ChannelID, AbstractInputChannel<? extends Record>>();
/**
* Mapping of channel IDs to output channels.
*/
private final Map<ChannelID, AbstractOutputChannel<? extends Record>> outputChannelMap = new HashMap<ChannelID, AbstractOutputChannel<? extends Record>>();
/**
* List of stages in the graph.
*/
private final List<ExecutionStage> stages = new ArrayList<ExecutionStage>();
/**
* Index to the current execution stage.
*/
private int indexToCurrentExecutionStage = 0;
/**
* The job configuration that was originally attached to the JobGraph.
*/
private Configuration jobConfiguration;
/**
* The current status of the job which is represented by this execution graph.
*/
private InternalJobStatus jobStatus = InternalJobStatus.CREATED;
/**
* The error description of the first task which causes this job to fail.
*/
private volatile String errorDescription = null;
/**
* List of listeners which are notified in case the status of this job has changed.
*/
private List<JobStatusListener> jobStatusListeners = new ArrayList<JobStatusListener>();
/**
* List of listeners which are notified in case the execution stage of a job has changed.
*/
private List<ExecutionStageListener> executionStageListeners = new ArrayList<ExecutionStageListener>();
/**
* Private constructor used for duplicating execution vertices.
*
* @param jobID
* the ID of the duplicated execution graph
* @param jobName
* the name of the original job graph
*/
private ExecutionGraph(JobID jobID, String jobName) {
this.jobID = jobID;
this.jobName = jobName;
}
/**
* Creates a new execution graph from a job graph.
*
* @param job
* the user's job graph
* @param instanceManager
* the instance manager
* @throws GraphConversionException
* thrown if the job graph is not valid and no execution graph can be constructed from it
*/
public ExecutionGraph(JobGraph job, InstanceManager instanceManager) throws GraphConversionException {
this(job.getJobID(), job.getName());
// Start constructing the new execution graph from given job graph
try {
constructExecutionGraph(job, instanceManager);
} catch (Exception e) {
throw new GraphConversionException(StringUtils.stringifyException(e));
}
}
/**
* Applies the user defined settings to the execution graph.
*
* @param temporaryGroupVertexMap
* mapping between job vertices and the corresponding group vertices.
* @throws GraphConversionException
* thrown if an error occurs while applying the user settings.
*/
private void applyUserDefinedSettings(HashMap<AbstractJobVertex, ExecutionGroupVertex> temporaryGroupVertexMap)
throws GraphConversionException {
// The check for cycles in the dependency chain for instance sharing is already checked in
// <code>submitJob</code> method of the job manager
// If there is no cycle, apply the settings to the corresponding group vertices
final Iterator<Map.Entry<AbstractJobVertex, ExecutionGroupVertex>> it = temporaryGroupVertexMap.entrySet()
.iterator();
while (it.hasNext()) {
final Map.Entry<AbstractJobVertex, ExecutionGroupVertex> entry = it.next();
final AbstractJobVertex jobVertex = entry.getKey();
if (jobVertex.getVertexToShareInstancesWith() != null) {
final AbstractJobVertex vertexToShareInstancesWith = jobVertex.getVertexToShareInstancesWith();
final ExecutionGroupVertex groupVertex = entry.getValue();
final ExecutionGroupVertex groupVertexToShareInstancesWith = temporaryGroupVertexMap
.get(vertexToShareInstancesWith);
groupVertex.shareInstancesWith(groupVertexToShareInstancesWith);
}
}
// Second, we create the number of members each group vertex is supposed to have
Iterator<ExecutionGroupVertex> it2 = new ExecutionGroupVertexIterator(this, true, -1);
while (it2.hasNext()) {
final ExecutionGroupVertex groupVertex = it2.next();
if (groupVertex.isNumberOfMembersUserDefined()) {
groupVertex.changeNumberOfGroupMembers(groupVertex.getUserDefinedNumberOfMembers());
}
}
repairInstanceAssignment();
// Finally, apply the channel settings channel settings
it2 = new ExecutionGroupVertexIterator(this, true, -1);
while (it2.hasNext()) {
final ExecutionGroupVertex groupVertex = it2.next();
for (int i = 0; i < groupVertex.getNumberOfForwardLinks(); i++) {
final ExecutionGroupEdge edge = groupVertex.getForwardEdge(i);
if (edge.isChannelTypeUserDefined()) {
edge.changeChannelType(edge.getChannelType());
}
if (edge.isCompressionLevelUserDefined()) {
edge.changeCompressionLevel(edge.getCompressionLevel());
}
}
}
// TODO: Check if calling this is really necessary, if not set visibility of reassignInstances back to protected
it2 = new ExecutionGroupVertexIterator(this, true, -1);
while (it2.hasNext()) {
final ExecutionGroupVertex groupVertex = it2.next();
if (groupVertex.getVertexToShareInstancesWith() == null) {
groupVertex.reassignInstances();
this.repairInstanceAssignment();
}
}
}
/**
* Sets up an execution graph from a job graph.
*
* @param jobGraph
* the job graph to create the execution graph from
* @param instanceManager
* the instance manager
* @throws GraphConversionException
* thrown if the job graph is not valid and no execution graph can be constructed from it
*/
private void constructExecutionGraph(JobGraph jobGraph, InstanceManager instanceManager)
throws GraphConversionException {
// Clean up temporary data structures
final HashMap<AbstractJobVertex, ExecutionVertex> temporaryVertexMap = new HashMap<AbstractJobVertex, ExecutionVertex>();
final HashMap<AbstractJobVertex, ExecutionGroupVertex> temporaryGroupVertexMap = new HashMap<AbstractJobVertex, ExecutionGroupVertex>();
// First, store job configuration
this.jobConfiguration = jobGraph.getJobConfiguration();
// Initially, create only one execution stage that contains all group vertices
final ExecutionStage initialExecutionStage = new ExecutionStage(this, 0);
this.stages.add(initialExecutionStage);
// Convert job vertices to execution vertices and initialize them
final AbstractJobVertex[] all = jobGraph.getAllJobVertices();
for (int i = 0; i < all.length; i++) {
final ExecutionVertex createdVertex = createVertex(all[i], instanceManager, initialExecutionStage);
temporaryVertexMap.put(all[i], createdVertex);
temporaryGroupVertexMap.put(all[i], createdVertex.getGroupVertex());
}
// Create initial network channel for every vertex
for (int i = 0; i < all.length; i++) {
createInitialChannels(all[i], temporaryVertexMap);
}
// Now that an initial graph is built, apply the user settings
applyUserDefinedSettings(temporaryGroupVertexMap);
}
/**
* Creates the initial channels between all connected job vertices.
*
* @param jobVertex
* the job vertex from which the wiring is determined
* @param vertexMap
* a temporary vertex map
* @throws GraphConversionException
* if the initial wiring cannot be created
*/
private void createInitialChannels(AbstractJobVertex jobVertex,
HashMap<AbstractJobVertex, ExecutionVertex> vertexMap) throws GraphConversionException {
ExecutionVertex ev;
if (!vertexMap.containsKey(jobVertex)) {
throw new GraphConversionException("Cannot find mapping for vertex " + jobVertex.getName());
}
ev = vertexMap.get(jobVertex);
// First compare number of output gates
if (jobVertex.getNumberOfForwardConnections() != ev.getEnvironment().getNumberOfOutputGates()) {
throw new GraphConversionException("Job and execution vertex " + jobVertex.getName()
+ " have different number of outputs");
}
if (jobVertex.getNumberOfBackwardConnections() != ev.getEnvironment().getNumberOfInputGates()) {
throw new GraphConversionException("Job and execution vertex " + jobVertex.getName()
+ " have different number of inputs");
}
// Now assign identifiers to gates and check type
for (int j = 0; j < jobVertex.getNumberOfForwardConnections(); j++) {
final JobEdge edge = jobVertex.getForwardConnection(j);
final AbstractJobVertex target = edge.getConnectedVertex();
// find output gate of execution vertex
final OutputGate<? extends Record> eog = ev.getEnvironment().getOutputGate(j);
if (eog == null) {
throw new GraphConversionException("Cannot retrieve output gate " + j + " from vertex "
+ jobVertex.getName());
}
final ExecutionVertex executionTarget = vertexMap.get(target);
if (executionTarget == null) {
throw new GraphConversionException("Cannot find mapping for vertex " + target.getName());
}
final InputGate<? extends Record> eig = executionTarget.getEnvironment().getInputGate(
edge.getIndexOfInputGate());
if (eig == null) {
throw new GraphConversionException("Cannot retrieve input gate " + edge.getIndexOfInputGate()
+ " from vertex " + target.getName());
}
ChannelType channelType = ChannelType.NETWORK;
CompressionLevel compressionLevel = CompressionLevel.NO_COMPRESSION;
boolean userDefinedChannelType = false;
boolean userDefinedCompressionLevel = false;
// Create a network channel with no compression by default, user settings will be applied later on
createChannel(ev, eog, executionTarget, eig, channelType, compressionLevel);
if (edge.getChannelType() != null) {
channelType = edge.getChannelType();
userDefinedChannelType = true;
}
if (edge.getCompressionLevel() != null) {
compressionLevel = edge.getCompressionLevel();
userDefinedCompressionLevel = true;
}
// Connect the corresponding group vertices and copy the user settings from the job edge
ev.getGroupVertex().wireTo(executionTarget.getGroupVertex(), edge.getIndexOfInputGate(), j, channelType,
userDefinedChannelType, compressionLevel, userDefinedCompressionLevel);
}
}
/**
* Destroys all the channels originating from the source vertex at the given output gate and arriving at the target
* vertex at the given
* input gate. All destroyed channels are completely unregistered with the {@link ExecutionGraph}.
*
* @param source
* the source vertex the channels to be removed originate from
* @param indexOfOutputGate
* the index of the output gate the channels to be removed are assigned to
* @param target
* the target vertex the channels to be removed arrive
* @param indexOfInputGate
* the index of the input gate the channels to be removed are assigned to
* @throws GraphConversionException
* thrown if an inconsistency during the unwiring process occurs
*/
public void unwire(ExecutionGroupVertex source, int indexOfOutputGate, ExecutionGroupVertex target,
int indexOfInputGate) throws GraphConversionException {
// Unwire the respective gate of the source vertices
for (int i = 0; i < source.getCurrentNumberOfGroupMembers(); i++) {
final ExecutionVertex sourceVertex = source.getGroupMember(i);
final OutputGate<? extends Record> outputGate = sourceVertex.getEnvironment().getOutputGate(
indexOfOutputGate);
if (outputGate == null) {
throw new GraphConversionException("unwire: " + sourceVertex.getName()
+ " has no output gate with index " + indexOfOutputGate);
}
for (int j = 0; j < outputGate.getNumberOfOutputChannels(); j++) {
final AbstractOutputChannel<? extends Record> outputChannel = outputGate.getOutputChannel(j);
this.outputChannelMap.remove(outputChannel.getID());
this.channelToVertexMap.remove(outputChannel.getID());
}
outputGate.removeAllOutputChannels();
}
// Unwire the respective gate of the target vertices
for (int i = 0; i < target.getCurrentNumberOfGroupMembers(); i++) {
final ExecutionVertex targetVertex = target.getGroupMember(i);
final InputGate<? extends Record> inputGate = targetVertex.getEnvironment().getInputGate(indexOfInputGate);
if (inputGate == null) {
throw new GraphConversionException("unwire: " + targetVertex.getName()
+ " has no input gate with index " + indexOfInputGate);
}
for (int j = 0; j < inputGate.getNumberOfInputChannels(); j++) {
final AbstractInputChannel<? extends Record> inputChannel = inputGate.getInputChannel(j);
this.inputChannelMap.remove(inputChannel.getID());
this.channelToVertexMap.remove(inputChannel.getID());
}
inputGate.removeAllInputChannels();
}
}
public void wire(ExecutionGroupVertex source, int indexOfOutputGate, ExecutionGroupVertex target,
int indexOfInputGate, ChannelType channelType, CompressionLevel compressionLevel)
throws GraphConversionException {
// Unwire the respective gate of the source vertices
for (int i = 0; i < source.getCurrentNumberOfGroupMembers(); i++) {
final ExecutionVertex sourceVertex = source.getGroupMember(i);
final OutputGate<? extends Record> outputGate = sourceVertex.getEnvironment().getOutputGate(
indexOfOutputGate);
if (outputGate == null) {
throw new GraphConversionException("wire: " + sourceVertex.getName()
+ " has no output gate with index " + indexOfOutputGate);
}
if (outputGate.getNumberOfOutputChannels() > 0) {
throw new GraphConversionException("wire: wire called on source " + sourceVertex.getName() + " (" + i
+ "), but number of output channels is " + outputGate.getNumberOfOutputChannels() + "!");
}
for (int j = 0; j < target.getCurrentNumberOfGroupMembers(); j++) {
final ExecutionVertex targetVertex = target.getGroupMember(j);
final InputGate<? extends Record> inputGate = targetVertex.getEnvironment().getInputGate(
indexOfInputGate);
if (inputGate == null) {
throw new GraphConversionException("wire: " + targetVertex.getName()
+ " has no input gate with index " + indexOfInputGate);
}
if (inputGate.getNumberOfInputChannels() > 0 && i == 0) {
throw new GraphConversionException("wire: wire called on target " + targetVertex.getName() + " ("
+ j + "), but number of input channels is " + inputGate.getNumberOfInputChannels() + "!");
}
// Check if a wire is supposed to be created
if (inputGate.getDistributionPattern().createWire(i, j, source.getCurrentNumberOfGroupMembers(),
target.getCurrentNumberOfGroupMembers())) {
createChannel(sourceVertex, outputGate, targetVertex, inputGate, channelType, compressionLevel);
}
}
}
}
private void createChannel(ExecutionVertex source, OutputGate<? extends Record> outputGate, ExecutionVertex target,
InputGate<? extends Record> inputGate, ChannelType channelType, CompressionLevel compressionLevel)
throws GraphConversionException {
AbstractOutputChannel<? extends Record> outputChannel;
AbstractInputChannel<? extends Record> inputChannel;
switch (channelType) {
case NETWORK:
outputChannel = outputGate.createNetworkOutputChannel(null, compressionLevel);
inputChannel = inputGate.createNetworkInputChannel(null, compressionLevel);
break;
case INMEMORY:
outputChannel = outputGate.createInMemoryOutputChannel(null, compressionLevel);
inputChannel = inputGate.createInMemoryInputChannel(null, compressionLevel);
break;
case FILE:
outputChannel = outputGate.createFileOutputChannel(null, compressionLevel);
inputChannel = inputGate.createFileInputChannel(null, compressionLevel);
break;
default:
throw new GraphConversionException("Cannot create channel: unknown type");
}
// Copy the number of the opposite channel
inputChannel.setConnectedChannelID(outputChannel.getID());
outputChannel.setConnectedChannelID(inputChannel.getID());
this.outputChannelMap.put(outputChannel.getID(), outputChannel);
this.inputChannelMap.put(inputChannel.getID(), inputChannel);
this.channelToVertexMap.put(outputChannel.getID(), source);
this.channelToVertexMap.put(inputChannel.getID(), target);
}
/**
* Creates an execution vertex from a job vertex.
*
* @param jobVertex
* the job vertex to create the execution vertex from
* @param instanceManager
* the instanceManager
* @param initialExecutionStage
* the initial execution stage all group vertices are added to
* @return the new execution vertex
* @throws GraphConversionException
* thrown if the job vertex is of an unknown subclass
*/
private ExecutionVertex createVertex(AbstractJobVertex jobVertex, InstanceManager instanceManager,
ExecutionStage initialExecutionStage) throws GraphConversionException {
// If the user has requested instance type, check if the type is known by the current instance manager
InstanceType instanceType = null;
boolean userDefinedInstanceType = false;
if (jobVertex.getInstanceType() != null) {
userDefinedInstanceType = true;
instanceType = instanceManager.getInstanceTypeByName(jobVertex.getInstanceType());
if (instanceType == null) {
throw new GraphConversionException("Requested instance type " + jobVertex.getInstanceType()
+ " is not known to the instance manager");
}
}
if (instanceType == null) {
instanceType = instanceManager.getDefaultInstanceType();
}
// Calculate the cryptographic signature of this vertex
final ExecutionSignature signature = ExecutionSignature.createSignature(jobVertex.getInvokableClass(),
jobVertex.getJobGraph().getJobID());
// Create a group vertex for the job vertex
final ExecutionGroupVertex groupVertex = new ExecutionGroupVertex(jobVertex.getName(), jobVertex.getID(), this,
jobVertex.getNumberOfSubtasks(), instanceType, userDefinedInstanceType, jobVertex
.getNumberOfSubtasksPerInstance(), jobVertex.getVertexToShareInstancesWith() != null ? true : false,
jobVertex.getConfiguration(), signature);
// Create an initial execution vertex for the job vertex
final Class<? extends AbstractInvokable> invokableClass = jobVertex.getInvokableClass();
if (invokableClass == null) {
throw new GraphConversionException("JobVertex " + jobVertex.getID() + " (" + jobVertex.getName()
+ ") does not specify a task");
}
// Add group vertex to initial execution stage
initialExecutionStage.addStageMember(groupVertex);
ExecutionVertex ev = null;
try {
ev = new ExecutionVertex(jobVertex.getJobGraph().getJobID(), invokableClass, this,
groupVertex);
} catch (Exception e) {
throw new GraphConversionException(StringUtils.stringifyException(e));
}
// Run the configuration check the user has provided for the vertex
try {
jobVertex.checkConfiguration(ev.getEnvironment().getInvokable());
} catch (IllegalConfigurationException e) {
throw new GraphConversionException(StringUtils.stringifyException(e));
}
// Check if the user's specifications for the number of subtasks are valid
final int minimumNumberOfSubtasks = jobVertex.getMinimumNumberOfSubtasks(ev.getEnvironment().getInvokable());
final int maximumNumberOfSubtasks = jobVertex.getMaximumNumberOfSubtasks(ev.getEnvironment().getInvokable());
if (jobVertex.getNumberOfSubtasks() != -1) {
if (jobVertex.getNumberOfSubtasks() < 1) {
throw new GraphConversionException("Cannot split task " + jobVertex.getName() + " into "
+ jobVertex.getNumberOfSubtasks() + " subtasks");
}
if (jobVertex.getNumberOfSubtasks() < minimumNumberOfSubtasks) {
throw new GraphConversionException("Number of subtasks must be at least " + minimumNumberOfSubtasks);
}
if (maximumNumberOfSubtasks != -1) {
if (jobVertex.getNumberOfSubtasks() > maximumNumberOfSubtasks) {
throw new GraphConversionException("Number of subtasks for vertex " + jobVertex.getName()
+ " can be at most " + maximumNumberOfSubtasks);
}
}
}
// Check number of subtasks per instance
if (jobVertex.getNumberOfSubtasksPerInstance() != -1 && jobVertex.getNumberOfSubtasksPerInstance() < 1) {
throw new GraphConversionException("Cannot set number of subtasks per instance to "
+ jobVertex.getNumberOfSubtasksPerInstance() + " for vertex " + jobVertex.getName());
}
// Assign min/max to the group vertex (settings are actually applied in applyUserDefinedSettings)
groupVertex.setMinMemberSize(minimumNumberOfSubtasks);
groupVertex.setMaxMemberSize(maximumNumberOfSubtasks);
// Assign initial instance to vertex (may be overwritten later on when user settings are applied)
ev.setAllocatedResource(new AllocatedResource(DummyInstance.createDummyInstance(instanceType), instanceType,
null));
// Register input and output vertices separately
if (jobVertex instanceof JobInputVertex) {
final InputSplit[] inputSplits;
// let the task code compute the input splits
if (ev.getEnvironment().getInvokable() instanceof AbstractInputTask) {
try {
inputSplits = ((AbstractInputTask<?>) ev.getEnvironment().getInvokable()).
computeInputSplits(jobVertex.getNumberOfSubtasks());
} catch (Exception e) {
throw new GraphConversionException("Cannot compute input splits for " + groupVertex.getName()
+ ": "
+ StringUtils.stringifyException(e));
}
} else {
throw new GraphConversionException(
"BUG: JobInputVertex contained a task class which was not an input task.");
}
// assign input splits
groupVertex.setInputSplits(inputSplits);
}
// TODO: This is a quick workaround, problem can be solved in a more generic way
if (jobVertex instanceof JobFileOutputVertex) {
final JobFileOutputVertex jbov = (JobFileOutputVertex) jobVertex;
jobVertex.getConfiguration().setString("outputPath", jbov.getFilePath().toString());
}
return ev;
}
/**
* Returns the number of input vertices registered with this execution graph.
*
* @return the number of input vertices registered with this execution graph
*/
public int getNumberOfInputVertices() {
return this.stages.get(0).getNumberOfInputExecutionVertices();
}
/**
* Returns the number of input vertices for the given stage.
*
* @param stage
* the index of the execution stage
* @return the number of input vertices for the given stage
*/
public int getNumberOfInputVertices(int stage) {
if (stage >= this.stages.size()) {
return 0;
}
return this.stages.get(stage).getNumberOfInputExecutionVertices();
}
/**
* Returns the number of output vertices registered with this execution graph.
*
* @return the number of output vertices registered with this execution graph
*/
public int getNumberOfOutputVertices() {
return this.stages.get(0).getNumberOfOutputExecutionVertices();
}
/**
* Returns the number of output vertices for the given stage.
*
* @param stage
* the index of the execution stage
* @return the number of input vertices for the given stage
*/
public int getNumberOfOutputVertices(int stage) {
if (stage >= this.stages.size()) {
return 0;
}
return this.stages.get(stage).getNumberOfOutputExecutionVertices();
}
/**
* Returns the input vertex with the specified index.
*
* @param index
* the index of the input vertex to return
* @return the input vertex with the specified index or <code>null</code> if no input vertex with such an index
* exists
*/
public ExecutionVertex getInputVertex(int index) {
return this.stages.get(0).getInputExecutionVertex(index);
}
/**
* Returns the output vertex with the specified index.
*
* @param index
* the index of the output vertex to return
* @return the output vertex with the specified index or <code>null</code> if no output vertex with such an index
* exists
*/
public ExecutionVertex getOutputVertex(int index) {
return this.stages.get(0).getOutputExecutionVertex(index);
}
/**
* Returns the input vertex with the specified index for the given stage
*
* @param stage
* the index of the stage
* @param index
* the index of the input vertex to return
* @return the input vertex with the specified index or <code>null</code> if no input vertex with such an index
* exists in that stage
*/
public ExecutionVertex getInputVertex(int stage, int index) {
if (stage >= this.stages.size()) {
return null;
}
return this.stages.get(stage).getInputExecutionVertex(index);
}
/**
* Returns the output vertex with the specified index for the given stage.
*
* @param stage
* the index of the stage
* @param index
* the index of the output vertex to return
* @return the output vertex with the specified index or <code>null</code> if no output vertex with such an index
* exists in that stage
*/
public ExecutionVertex getOutputVertex(int stage, int index) {
if (stage >= this.stages.size()) {
return null;
}
return this.stages.get(stage).getOutputExecutionVertex(index);
}
/**
* Returns the execution stage with number <code>num</code>.
*
* @param num
* the number of the execution stage to be returned
* @return the execution stage with number <code>num</code> or <code>null</code> if no such execution stage exists
*/
public ExecutionStage getStage(int num) {
if (num < this.stages.size()) {
return this.stages.get(num);
}
return null;
}
/**
* Returns the number of execution stages in the execution graph.
*
* @return the number of execution stages in the execution graph
*/
public int getNumberOfStages() {
return this.stages.size();
}
/**
* Identifies an execution by the specified channel ID and returns it.
*
* @param id
* the channel ID to identify the vertex with
* @return the execution vertex which has a channel with ID <code>id</code> or <code>null</code> if no such vertex
* exists in the execution graph
*/
public ExecutionVertex getVertexByChannelID(ChannelID id) {
if (!this.channelToVertexMap.containsKey(id)) {
return null;
}
return this.channelToVertexMap.get(id);
}
/**
* Finds an input channel by its ID and returns it.
*
* @param id
* the channel ID to identify the input channel
* @return the input channel whose ID matches <code>id</code> or <code>null</code> if no such channel is known
*/
public AbstractInputChannel<? extends Record> getInputChannelByID(ChannelID id) {
if (!this.inputChannelMap.containsKey(id)) {
return null;
}
return this.inputChannelMap.get(id);
}
/**
* Finds an output channel by its ID and returns it.
*
* @param id
* the channel ID to identify the output channel
* @return the output channel whose ID matches <code>id</code> or <code>null</code> if no such channel is known
*/
public AbstractOutputChannel<? extends Record> getOutputChannelByID(ChannelID id) {
if (!this.outputChannelMap.containsKey(id)) {
return null;
}
return this.outputChannelMap.get(id);
}
/**
* Returns a (possibly empty) list of execution vertices which are currently assigned to the
* given allocated resource. The vertices in that list may have an arbitrary execution state.
*
* @param allocatedResource
* the allocated resource to check the assignment for
* @return a (possibly empty) list of execution vertices which are currently assigned to the given instance
*/
public synchronized List<ExecutionVertex> getVerticesAssignedToResource(AllocatedResource allocatedResource) {
final List<ExecutionVertex> list = new ArrayList<ExecutionVertex>();
if (allocatedResource == null) {
return list;
}
final Iterator<ExecutionVertex> it = new ExecutionGraphIterator(this, true);
while (it.hasNext()) {
final ExecutionVertex vertex = it.next();
if (allocatedResource.equals(vertex.getAllocatedResource())) {
list.add(vertex);
}
}
return list;
}
public ExecutionVertex getVertexByID(ExecutionVertexID id) {
if (id == null) {
return null;
}
final ExecutionGraphIterator it = new ExecutionGraphIterator(this, true);
while (it.hasNext()) {
final ExecutionVertex vertex = it.next();
if (vertex.getID().equals(id)) {
return vertex;
}
}
return null;
}
public ExecutionVertex getVertexByEnvironment(Environment environment) {
if (environment == null) {
return null;
}
final ExecutionGraphIterator it = new ExecutionGraphIterator(this, true);
while (it.hasNext()) {
final ExecutionVertex vertex = it.next();
if (vertex.getEnvironment() == environment) {
return vertex;
}
}
return null;
}
/**
* Checks if the current execution stage has been successfully completed, i.e.
* all vertices in this stage have successfully finished their execution.
*
* @return <code>true</code> if stage is completed, <code>false</code> otherwise
*/
private boolean isCurrentStageCompleted() {
if (this.indexToCurrentExecutionStage >= this.stages.size()) {
return true;
}
final ExecutionGraphIterator it = new ExecutionGraphIterator(this, this.indexToCurrentExecutionStage, true,
true);
while (it.hasNext()) {
final ExecutionVertex vertex = it.next();
if (vertex.getExecutionState() != ExecutionState.FINISHED) {
return false;
}
}
return true;
}
/**
* Checks if the execution of execution graph is finished.
*
* @return <code>true</code> if the execution of the graph is finished, <code>false</code> otherwise
*/
public boolean isExecutionFinished() {
return (getJobStatus() == InternalJobStatus.FINISHED);
}
public void prepareChannelsForExecution(ExecutionVertex executionVertex) throws ChannelSetupException {
// Prepare channels
for (int k = 0; k < executionVertex.getEnvironment().getNumberOfOutputGates(); k++) {
final OutputGate<? extends Record> outputGate = executionVertex.getEnvironment().getOutputGate(k);
for (int l = 0; l < outputGate.getNumberOfOutputChannels(); l++) {
final AbstractOutputChannel<? extends Record> outputChannel = outputGate.getOutputChannel(l);
final AbstractInputChannel<? extends Record> inputChannel = this.inputChannelMap.get(outputChannel
.getConnectedChannelID());
if (inputChannel == null) {
throw new ChannelSetupException("Cannot find input channel to output channel "
+ outputChannel.getID());
}
final ExecutionVertex targetVertex = this.channelToVertexMap.get(inputChannel.getID());
final AllocatedResource targetResources = targetVertex.getAllocatedResource();
if (targetResources == null) {
throw new ChannelSetupException("Cannot find allocated resources for target vertex "
+ targetVertex.getID() + " in instance map");
}
if (targetResources.getInstance() instanceof DummyInstance) {
throw new ChannelSetupException("Allocated instance for " + targetVertex.getID()
+ " is a dummy vertex!");
}
}
}
}
/**
* Returns the job ID of the job configuration this execution graph was originally constructed from.
*
* @return the job ID of the job configuration this execution graph was originally constructed from
*/
public JobID getJobID() {
return this.jobID;
}
public void removeUnnecessaryNetworkChannels(int stageNumber) {
if (stageNumber >= this.stages.size()) {
throw new IllegalArgumentException("removeUnnecessaryNetworkChannels called on an illegal stage ("
+ stageNumber + ")");
}
final ExecutionStage executionStage = this.stages.get(stageNumber);
for (int i = 0; i < executionStage.getNumberOfStageMembers(); i++) {
final ExecutionGroupVertex groupVertex = executionStage.getStageMember(i);
for (int j = 0; j < groupVertex.getCurrentNumberOfGroupMembers(); j++) {
final ExecutionVertex sourceVertex = groupVertex.getGroupMember(j);
for (int k = 0; k < sourceVertex.getEnvironment().getNumberOfOutputGates(); k++) {
final OutputGate<? extends Record> outputGate = sourceVertex.getEnvironment().getOutputGate(k);
for (int l = 0; l < outputGate.getNumberOfOutputChannels(); l++) {
final AbstractOutputChannel<? extends Record> oldOutputChannel = outputGate.getOutputChannel(l);
// Skip if not a network channel
if (!(oldOutputChannel instanceof NetworkOutputChannel<?>)) {
continue;
}
// Get matching input channel
final ExecutionVertex targetVertex = this.channelToVertexMap.get(oldOutputChannel
.getConnectedChannelID());
if (targetVertex == null) {
throw new RuntimeException("Cannot find target vertex: Inconsistency...");
}
// Run on the same instance?
if (!targetVertex.getAllocatedResource().getInstance().equals(
sourceVertex.getAllocatedResource().getInstance())) {
continue;
}
final AbstractInputChannel<? extends Record> oldInputChannel = getInputChannelByID(oldOutputChannel
.getConnectedChannelID());
final InputGate<? extends Record> inputGate = oldInputChannel.getInputGate();
// Replace channels
final AbstractOutputChannel<? extends Record> newOutputChannel = outputGate.replaceChannel(
oldOutputChannel.getID(), ChannelType.INMEMORY);
final AbstractInputChannel<? extends Record> newInputChannel = inputGate.replaceChannel(
oldInputChannel.getID(), ChannelType.INMEMORY);
// The new channels reuse the IDs of the old channels, so only the channel maps must be updated
this.outputChannelMap.put(newOutputChannel.getID(), newOutputChannel);
this.inputChannelMap.put(newInputChannel.getID(), newInputChannel);
}
}
}
}
}
/**
* Returns the index of the current execution stage.
*
* @return the index of the current execution stage
*/
public int getIndexOfCurrentExecutionStage() {
return this.indexToCurrentExecutionStage;
}
/**
* Returns the stage which is currently executed.
*
* @return the currently executed stage or <code>null</code> if the job execution is already completed
*/
public ExecutionStage getCurrentExecutionStage() {
if (this.indexToCurrentExecutionStage >= this.stages.size()) {
return null;
}
return this.stages.get(this.indexToCurrentExecutionStage);
}
public void repairStages() {
final Map<ExecutionGroupVertex, Integer> stageNumbers = new HashMap<ExecutionGroupVertex, Integer>();
ExecutionGroupVertexIterator it = new ExecutionGroupVertexIterator(this, true, -1);
while (it.hasNext()) {
final ExecutionGroupVertex groupVertex = it.next();
int precedingNumber = 0;
if (stageNumbers.containsKey(groupVertex)) {
precedingNumber = stageNumbers.get(groupVertex).intValue();
} else {
stageNumbers.put(groupVertex, Integer.valueOf(precedingNumber));
}
for (int i = 0; i < groupVertex.getNumberOfForwardLinks(); i++) {
final ExecutionGroupEdge edge = groupVertex.getForwardEdge(i);
if (!stageNumbers.containsKey(edge.getTargetVertex())) {
// Target vertex has not yet been discovered
if (edge.getChannelType() != ChannelType.FILE) {
// Same stage as preceding vertex
stageNumbers.put(edge.getTargetVertex(), Integer.valueOf(precedingNumber));
} else {
// File channel, increase stage of target vertex by one
stageNumbers.put(edge.getTargetVertex(), Integer.valueOf(precedingNumber + 1));
}
} else {
final int stageNumber = stageNumbers.get(edge.getTargetVertex()).intValue();
if (edge.getChannelType() != ChannelType.FILE) {
if (stageNumber != precedingNumber) {
stageNumbers.put(edge.getTargetVertex(), (int) Math.max(precedingNumber, stageNumber));
}
} else {
// File channel, increase stage of target vertex by one
if (stageNumber != (precedingNumber + 1)) {
stageNumbers.put(edge.getTargetVertex(), (int) Math.max(precedingNumber + 1, stageNumber));
}
}
}
}
}
// Traverse the graph backwards (starting from the output vertices) to make sure vertices are allocated in a
// stage as high as possible
it = new ExecutionGroupVertexIterator(this, false, -1);
while (it.hasNext()) {
final ExecutionGroupVertex groupVertex = it.next();
final int succeedingNumber = stageNumbers.get(groupVertex);
for (int i = 0; i < groupVertex.getNumberOfBackwardLinks(); i++) {
final ExecutionGroupEdge edge = groupVertex.getBackwardEdge(i);
final int stageNumber = stageNumbers.get(edge.getSourceVertex());
if (edge.getChannelType() == ChannelType.FILE) {
if (stageNumber < (succeedingNumber - 1)) {
stageNumbers.put(edge.getSourceVertex(), Integer.valueOf(succeedingNumber - 1));
}
} else {
if (stageNumber != succeedingNumber) {
LOG.error(edge.getSourceVertex() + " and " + edge.getTargetVertex()
+ " are assigned to different stages although not connected by a file channel");
}
}
}
}
// Finally, assign the new stage numbers
this.stages.clear();
final Iterator<Map.Entry<ExecutionGroupVertex, Integer>> it2 = stageNumbers.entrySet().iterator();
while (it2.hasNext()) {
final Map.Entry<ExecutionGroupVertex, Integer> entry = it2.next();
final ExecutionGroupVertex groupVertex = entry.getKey();
final int stageNumber = entry.getValue().intValue();
// Prevent out of bounds exceptions
while (this.stages.size() <= stageNumber) {
this.stages.add(null);
}
ExecutionStage executionStage = this.stages.get(stageNumber);
// If the stage not yet exists,
if (executionStage == null) {
executionStage = new ExecutionStage(this, stageNumber);
this.stages.set(stageNumber, executionStage);
}
executionStage.addStageMember(groupVertex);
groupVertex.setExecutionStage(executionStage);
}
}
public void repairInstanceAssignment() {
Iterator<ExecutionVertex> it = new ExecutionGraphIterator(this, true);
while (it.hasNext()) {
final ExecutionVertex sourceVertex = it.next();
for (int i = 0; i < sourceVertex.getEnvironment().getNumberOfOutputGates(); i++) {
final OutputGate<? extends Record> outputGate = sourceVertex.getEnvironment().getOutputGate(i);
for (int j = 0; j < outputGate.getNumberOfOutputChannels(); j++) {
final AbstractOutputChannel<? extends Record> outputChannel = outputGate.getOutputChannel(j);
final ChannelType channelType = outputChannel.getType();
if (channelType == ChannelType.FILE || channelType == ChannelType.INMEMORY) {
final ExecutionVertex targetVertex = getVertexByChannelID(outputChannel.getConnectedChannelID());
targetVertex.setAllocatedResource(sourceVertex.getAllocatedResource());
}
}
}
}
it = new ExecutionGraphIterator(this, false);
while (it.hasNext()) {
final ExecutionVertex targetVertex = it.next();
for (int i = 0; i < targetVertex.getEnvironment().getNumberOfInputGates(); i++) {
final InputGate<? extends Record> inputGate = targetVertex.getEnvironment().getInputGate(i);
for (int j = 0; j < inputGate.getNumberOfInputChannels(); j++) {
final AbstractInputChannel<? extends Record> inputChannel = inputGate.getInputChannel(j);
final ChannelType channelType = inputChannel.getType();
if (channelType == ChannelType.FILE || channelType == ChannelType.INMEMORY) {
final ExecutionVertex sourceVertex = getVertexByChannelID(inputChannel.getConnectedChannelID());
sourceVertex.setAllocatedResource(targetVertex.getAllocatedResource());
}
}
}
}
}
public ChannelType getChannelType(ExecutionVertex sourceVertex, ExecutionVertex targetVertex) {
final ExecutionGroupVertex sourceGroupVertex = sourceVertex.getGroupVertex();
final ExecutionGroupVertex targetGroupVertex = targetVertex.getGroupVertex();
final List<ExecutionGroupEdge> edges = sourceGroupVertex.getForwardEdges(targetGroupVertex);
if (edges.size() == 0) {
return null;
}
// On a task level, the two vertices are connected
final ExecutionGroupEdge edge = edges.get(0);
// Now lets see if these two concrete subtasks are connected
final OutputGate<? extends Record> outputGate = sourceVertex.getEnvironment().getOutputGate(
edge.getIndexOfOutputGate());
for (int i = 0; i < outputGate.getNumberOfOutputChannels(); i++) {
final AbstractOutputChannel<? extends Record> outputChannel = outputGate.getOutputChannel(i);
final ChannelID inputChannelID = outputChannel.getConnectedChannelID();
if (targetVertex == this.channelToVertexMap.get(inputChannelID)) {
return edge.getChannelType();
}
}
return null;
}
/**
* Returns the job configuration that was originally attached to the job graph.
*
* @return the job configuration that was originally attached to the job graph
*/
public Configuration getJobConfiguration() {
return this.jobConfiguration;
}
/**
* Checks whether the job represented by the execution graph has the status <code>FINISHED</code>.
*
* @return <code>true</code> if the job has the status <code>CREATED</code>, <code>false</code> otherwise
*/
private boolean jobHasFinishedStatus() {
final Iterator<ExecutionVertex> it = new ExecutionGraphIterator(this, true);
while (it.hasNext()) {
if (it.next().getExecutionState() != ExecutionState.FINISHED) {
return false;
}
}
return true;
}
/**
* Checks whether the job represented by the execution graph has the status <code>SCHEDULED</code>.
*
* @return <code>true</code> if the job has the status <code>SCHEDULED</code>, <code>false</code> otherwise
*/
private boolean jobHasScheduledStatus() {
final Iterator<ExecutionVertex> it = new ExecutionGraphIterator(this, true);
while (it.hasNext()) {
final ExecutionState s = it.next().getExecutionState();
if (s != ExecutionState.CREATED && s != ExecutionState.SCHEDULED && s != ExecutionState.ASSIGNING
&& s != ExecutionState.ASSIGNED && s != ExecutionState.READY) {
return false;
}
}
return true;
}
/**
* Checks whether the job represented by the execution graph has the status <code>CANCELED</code> or
* <code>FAILED</code>.
*
* @return <code>true</code> if the job has the status <code>CANCELED</code> or <code>FAILED</code>,
* <code>false</code> otherwise
*/
private boolean jobHasFailedOrCanceledStatus() {
final Iterator<ExecutionVertex> it = new ExecutionGraphIterator(this, true);
while (it.hasNext()) {
final ExecutionState state = it.next().getExecutionState();
if (state != ExecutionState.CANCELED && state != ExecutionState.FAILED && state != ExecutionState.FINISHED) {
return false;
}
}
return true;
}
/**
* Checks and updates the current execution status of the
* job which is represented by this execution graph.
*
* @param latestStateChange
* the latest execution state change which occurred
*/
public synchronized void checkAndUpdateJobStatus(final ExecutionState latestStateChange) {
switch (this.jobStatus) {
case CREATED:
if (jobHasScheduledStatus()) {
this.jobStatus = InternalJobStatus.SCHEDULED;
} else if (latestStateChange == ExecutionState.CANCELED) {
if (jobHasFailedOrCanceledStatus()) {
this.jobStatus = InternalJobStatus.CANCELED;
}
}
break;
case SCHEDULED:
if (latestStateChange == ExecutionState.RUNNING) {
this.jobStatus = InternalJobStatus.RUNNING;
return;
} else if (latestStateChange == ExecutionState.CANCELED) {
if (jobHasFailedOrCanceledStatus()) {
this.jobStatus = InternalJobStatus.CANCELED;
}
}
break;
case RUNNING:
if (latestStateChange == ExecutionState.CANCELING || latestStateChange == ExecutionState.CANCELED) {
this.jobStatus = InternalJobStatus.CANCELING;
return;
}
if (latestStateChange == ExecutionState.FAILED) {
final Iterator<ExecutionVertex> it = new ExecutionGraphIterator(this, true);
while (it.hasNext()) {
final ExecutionVertex vertex = it.next();
if (vertex.getExecutionState() == ExecutionState.FAILED && !vertex.hasRetriesLeft()) {
this.jobStatus = InternalJobStatus.FAILING;
return;
}
}
}
if (jobHasFinishedStatus()) {
this.jobStatus = InternalJobStatus.FINISHED;
}
break;
case FAILING:
if (jobHasFailedOrCanceledStatus()) {
this.jobStatus = InternalJobStatus.FAILED;
}
break;
case FAILED:
LOG.error("Received update of execute state in job status FAILED");
break;
case CANCELING:
if (jobHasFailedOrCanceledStatus()) {
this.jobStatus = InternalJobStatus.CANCELED;
}
break;
case CANCELED:
LOG.error("Received update of execute state in job status CANCELED");
break;
case FINISHED:
LOG.error("Received update of execute state in job status FINISHED");
break;
}
}
/**
* Returns the current status of the job
* represented by this execution graph.
*
* @return the current status of the job
*/
public synchronized InternalJobStatus getJobStatus() {
return this.jobStatus;
}
/**
* {@inheritDoc}
*/
@Override
public synchronized void executionStateChanged(Environment ee, ExecutionState newExecutionState,
String optionalMessage) {
final InternalJobStatus oldStatus = this.jobStatus;
checkAndUpdateJobStatus(newExecutionState);
if (newExecutionState == ExecutionState.FINISHED) {
// It is worth checking if the current stage has complete
if (this.isCurrentStageCompleted()) {
// Increase current execution stage
++this.indexToCurrentExecutionStage;
if (this.indexToCurrentExecutionStage < this.stages.size()) {
final Iterator<ExecutionStageListener> it = this.executionStageListeners.iterator();
final ExecutionStage nextExecutionStage = getCurrentExecutionStage();
while (it.hasNext()) {
it.next().nextExecutionStageEntered(ee.getJobID(), nextExecutionStage);
}
}
}
}
if (this.jobStatus != oldStatus) {
// The task caused the entire job to fail, save the error description
if (this.jobStatus == InternalJobStatus.FAILING) {
this.errorDescription = optionalMessage;
}
// If this is the final failure state change, reuse the saved error description
if (this.jobStatus == InternalJobStatus.FAILED) {
optionalMessage = this.errorDescription;
}
final Iterator<JobStatusListener> it = this.jobStatusListeners.iterator();
while (it.hasNext()) {
it.next().jobStatusHasChanged(this, this.jobStatus, optionalMessage);
}
}
}
/**
* Registers a new {@link JobStatusListener} object with this execution graph.
* After being registered the object will receive notifications about changes
* of the job status. It is not possible to register the same listener object
* twice.
*
* @param jobStatusListener
* the listener object to register
*/
public synchronized void registerJobStatusListener(final JobStatusListener jobStatusListener) {
if (jobStatusListener == null) {
return;
}
if (!this.jobStatusListeners.contains(jobStatusListener)) {
this.jobStatusListeners.add(jobStatusListener);
}
}
/**
* Unregisters the given {@link JobStatusListener} object. After having called this
* method, the object will no longer receive notifications about changes of the job
* status.
*
* @param jobStatusListener
* the listener object to unregister
*/
public synchronized void unregisterJobStatusListener(final JobStatusListener jobStatusListener) {
if (jobStatusListener == null) {
return;
}
this.jobStatusListeners.remove(jobStatusListener);
}
/**
* Registers a new {@link ExecutionStageListener} object with this execution graph. After being registered the
* object will receive a notification whenever the job has entered its next execution stage. Note that a
* notification is not sent when the job has entered its initial execution stage.
*
* @param executionStageListener
* the listener object to register
*/
public synchronized void registerExecutionStageListener(final ExecutionStageListener executionStageListener) {
if (executionStageListener == null) {
return;
}
if (!this.executionStageListeners.contains(executionStageListener)) {
this.executionStageListeners.add(executionStageListener);
}
}
/**
* Unregisters the given {@link ExecutionStageListener} object. After having called this method, the object will no
* longer receiver notifications about the execution stage progress.
*
* @param executionStageListener
* the listener object to unregister
*/
public synchronized void unregisterExecutionStageListener(final ExecutionStageListener executionStageListener) {
if (executionStageListener == null) {
return;
}
this.executionStageListeners.remove(executionStageListener);
}
/**
* Returns the name of the original job graph.
*
* @return the name of the original job graph, possibly <code>null</code>
*/
public String getJobName() {
return this.jobName;
}
/**
* {@inheritDoc}
*/
@Override
public void userThreadFinished(Environment ee, Thread userThread) {
// Nothing to do here
}
/**
* {@inheritDoc}
*/
@Override
public void userThreadStarted(Environment ee, Thread userThread) {
// Nothing to do here
}
/**
* Returns a list of vertices which are contained in this execution graph and have a finished checkpoint.
*
* @return list of vertices which are contained in this execution graph and have a finished checkpoint
*/
public List<ExecutionVertex> getVerticesWithCheckpoints() {
final List<ExecutionVertex> list = new ArrayList<ExecutionVertex>();
final Iterator<ExecutionGroupVertex> it = new ExecutionGroupVertexIterator(this, true, -1);
// In the current implementation we just look for vertices which have outgoing file channels
while (it.hasNext()) {
final ExecutionGroupVertex groupVertex = it.next();
for (int i = 0; i < groupVertex.getNumberOfForwardLinks(); i++) {
if (groupVertex.getForwardEdge(i).getChannelType() == ChannelType.FILE) {
for (int j = 0; j < groupVertex.getCurrentNumberOfGroupMembers(); j++) {
list.add(groupVertex.getGroupMember(j));
}
break;
}
}
}
return list;
}
}
| nephele/nephele-server/src/main/java/eu/stratosphere/nephele/executiongraph/ExecutionGraph.java | /***********************************************************************************************************************
*
* Copyright (C) 2010 by the Stratosphere project (http://stratosphere.eu)
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*
**********************************************************************************************************************/
package eu.stratosphere.nephele.executiongraph;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import eu.stratosphere.nephele.configuration.Configuration;
import eu.stratosphere.nephele.execution.Environment;
import eu.stratosphere.nephele.execution.ExecutionListener;
import eu.stratosphere.nephele.execution.ExecutionSignature;
import eu.stratosphere.nephele.execution.ExecutionState;
import eu.stratosphere.nephele.instance.AllocatedResource;
import eu.stratosphere.nephele.instance.DummyInstance;
import eu.stratosphere.nephele.instance.InstanceManager;
import eu.stratosphere.nephele.instance.InstanceType;
import eu.stratosphere.nephele.io.InputGate;
import eu.stratosphere.nephele.io.OutputGate;
import eu.stratosphere.nephele.io.channels.AbstractInputChannel;
import eu.stratosphere.nephele.io.channels.AbstractOutputChannel;
import eu.stratosphere.nephele.io.channels.ChannelID;
import eu.stratosphere.nephele.io.channels.ChannelSetupException;
import eu.stratosphere.nephele.io.channels.ChannelType;
import eu.stratosphere.nephele.io.channels.bytebuffered.NetworkOutputChannel;
import eu.stratosphere.nephele.io.compression.CompressionLevel;
import eu.stratosphere.nephele.jobgraph.AbstractJobVertex;
import eu.stratosphere.nephele.jobgraph.JobEdge;
import eu.stratosphere.nephele.jobgraph.JobFileOutputVertex;
import eu.stratosphere.nephele.jobgraph.JobGraph;
import eu.stratosphere.nephele.jobgraph.JobID;
import eu.stratosphere.nephele.jobgraph.JobInputVertex;
import eu.stratosphere.nephele.template.AbstractInputTask;
import eu.stratosphere.nephele.template.AbstractInvokable;
import eu.stratosphere.nephele.template.IllegalConfigurationException;
import eu.stratosphere.nephele.template.InputSplit;
import eu.stratosphere.nephele.types.Record;
import eu.stratosphere.nephele.util.StringUtils;
/**
* In Nephele an execution graph is the main data structure for scheduling, executing and
* observing a job. An execution graph is created from an job graph. In contrast to a job graph
* it can contain communication edges of specific types, sub groups of vertices and information on
* when and where (on which instance) to run particular tasks.
*
* @author warneke
*/
public class ExecutionGraph implements ExecutionListener {
/**
* The log object used for debugging.
*/
private static final Log LOG = LogFactory.getLog(ExecutionGraph.class);
/**
* The ID of the job this graph has been built for.
*/
private final JobID jobID;
/**
* The name of the original job graph.
*/
private final String jobName;
/**
* Mapping of channel IDs to execution vertices.
*/
private final Map<ChannelID, ExecutionVertex> channelToVertexMap = new HashMap<ChannelID, ExecutionVertex>();
/**
* Mapping of channel IDs to input channels.
*/
private final Map<ChannelID, AbstractInputChannel<? extends Record>> inputChannelMap = new HashMap<ChannelID, AbstractInputChannel<? extends Record>>();
/**
* Mapping of channel IDs to output channels.
*/
private final Map<ChannelID, AbstractOutputChannel<? extends Record>> outputChannelMap = new HashMap<ChannelID, AbstractOutputChannel<? extends Record>>();
/**
* List of stages in the graph.
*/
private final List<ExecutionStage> stages = new ArrayList<ExecutionStage>();
/**
* Index to the current execution stage.
*/
private int indexToCurrentExecutionStage = 0;
/**
* The job configuration that was originally attached to the JobGraph.
*/
private Configuration jobConfiguration;
/**
* The current status of the job which is represented by this execution graph.
*/
private InternalJobStatus jobStatus = InternalJobStatus.CREATED;
/**
* The error description of the first task which causes this job to fail.
*/
private volatile String errorDescription = null;
/**
* List of listeners which are notified in case the status of this job has changed.
*/
private List<JobStatusListener> jobStatusListeners = new ArrayList<JobStatusListener>();
/**
* List of listeners which are notified in case the execution stage of a job has changed.
*/
private List<ExecutionStageListener> executionStageListeners = new ArrayList<ExecutionStageListener>();
/**
* Private constructor used for duplicating execution vertices.
*
* @param jobID
* the ID of the duplicated execution graph
* @param jobName
* the name of the original job graph
*/
private ExecutionGraph(JobID jobID, String jobName) {
this.jobID = jobID;
this.jobName = jobName;
}
/**
* Creates a new execution graph from a job graph.
*
* @param job
* the user's job graph
* @param instanceManager
* the instance manager
* @throws GraphConversionException
* thrown if the job graph is not valid and no execution graph can be constructed from it
*/
public ExecutionGraph(JobGraph job, InstanceManager instanceManager) throws GraphConversionException {
this(job.getJobID(), job.getName());
// Start constructing the new execution graph from given job graph
try {
constructExecutionGraph(job, instanceManager);
} catch (Exception e) {
throw new GraphConversionException(StringUtils.stringifyException(e));
}
}
/**
* Applies the user defined settings to the execution graph.
*
* @param temporaryGroupVertexMap
* mapping between job vertices and the corresponding group vertices.
* @throws GraphConversionException
* thrown if an error occurs while applying the user settings.
*/
private void applyUserDefinedSettings(HashMap<AbstractJobVertex, ExecutionGroupVertex> temporaryGroupVertexMap)
throws GraphConversionException {
// The check for cycles in the dependency chain for instance sharing is already checked in
// <code>submitJob</code> method of the job manager
// If there is no cycle, apply the settings to the corresponding group vertices
final Iterator<Map.Entry<AbstractJobVertex, ExecutionGroupVertex>> it = temporaryGroupVertexMap.entrySet()
.iterator();
while (it.hasNext()) {
final Map.Entry<AbstractJobVertex, ExecutionGroupVertex> entry = it.next();
final AbstractJobVertex jobVertex = entry.getKey();
if (jobVertex.getVertexToShareInstancesWith() != null) {
final AbstractJobVertex vertexToShareInstancesWith = jobVertex.getVertexToShareInstancesWith();
final ExecutionGroupVertex groupVertex = entry.getValue();
final ExecutionGroupVertex groupVertexToShareInstancesWith = temporaryGroupVertexMap
.get(vertexToShareInstancesWith);
groupVertex.shareInstancesWith(groupVertexToShareInstancesWith);
}
}
// Second, we create the number of members each group vertex is supposed to have
Iterator<ExecutionGroupVertex> it2 = new ExecutionGroupVertexIterator(this, true, -1);
while (it2.hasNext()) {
final ExecutionGroupVertex groupVertex = it2.next();
if (groupVertex.isNumberOfMembersUserDefined()) {
groupVertex.changeNumberOfGroupMembers(groupVertex.getUserDefinedNumberOfMembers());
}
}
repairInstanceAssignment();
// Finally, apply the channel settings channel settings
it2 = new ExecutionGroupVertexIterator(this, true, -1);
while (it2.hasNext()) {
final ExecutionGroupVertex groupVertex = it2.next();
for (int i = 0; i < groupVertex.getNumberOfForwardLinks(); i++) {
final ExecutionGroupEdge edge = groupVertex.getForwardEdge(i);
if (edge.isChannelTypeUserDefined()) {
edge.changeChannelType(edge.getChannelType());
}
if (edge.isCompressionLevelUserDefined()) {
edge.changeCompressionLevel(edge.getCompressionLevel());
}
}
}
// TODO: Check if calling this is really necessary, if not set visibility of reassignInstances back to protected
it2 = new ExecutionGroupVertexIterator(this, true, -1);
while (it2.hasNext()) {
final ExecutionGroupVertex groupVertex = it2.next();
if (groupVertex.getVertexToShareInstancesWith() == null) {
groupVertex.reassignInstances();
this.repairInstanceAssignment();
}
}
}
/**
* Sets up an execution graph from a job graph.
*
* @param jobGraph
* the job graph to create the execution graph from
* @param instanceManager
* the instance manager
* @throws GraphConversionException
* thrown if the job graph is not valid and no execution graph can be constructed from it
*/
private void constructExecutionGraph(JobGraph jobGraph, InstanceManager instanceManager)
throws GraphConversionException {
// Clean up temporary data structures
final HashMap<AbstractJobVertex, ExecutionVertex> temporaryVertexMap = new HashMap<AbstractJobVertex, ExecutionVertex>();
final HashMap<AbstractJobVertex, ExecutionGroupVertex> temporaryGroupVertexMap = new HashMap<AbstractJobVertex, ExecutionGroupVertex>();
// First, store job configuration
this.jobConfiguration = jobGraph.getJobConfiguration();
// Initially, create only one execution stage that contains all group vertices
final ExecutionStage initialExecutionStage = new ExecutionStage(this, 0);
this.stages.add(initialExecutionStage);
// Convert job vertices to execution vertices and initialize them
final AbstractJobVertex[] all = jobGraph.getAllJobVertices();
for (int i = 0; i < all.length; i++) {
final ExecutionVertex createdVertex = createVertex(all[i], instanceManager, initialExecutionStage);
temporaryVertexMap.put(all[i], createdVertex);
temporaryGroupVertexMap.put(all[i], createdVertex.getGroupVertex());
}
// Create initial network channel for every vertex
for (int i = 0; i < all.length; i++) {
createInitialChannels(all[i], temporaryVertexMap);
}
// Now that an initial graph is built, apply the user settings
applyUserDefinedSettings(temporaryGroupVertexMap);
}
/**
* Creates the initial channels between all connected job vertices.
*
* @param jobVertex
* the job vertex from which the wiring is determined
* @param vertexMap
* a temporary vertex map
* @throws GraphConversionException
* if the initial wiring cannot be created
*/
private void createInitialChannels(AbstractJobVertex jobVertex,
HashMap<AbstractJobVertex, ExecutionVertex> vertexMap) throws GraphConversionException {
ExecutionVertex ev;
if (!vertexMap.containsKey(jobVertex)) {
throw new GraphConversionException("Cannot find mapping for vertex " + jobVertex.getName());
}
ev = vertexMap.get(jobVertex);
// First compare number of output gates
if (jobVertex.getNumberOfForwardConnections() != ev.getEnvironment().getNumberOfOutputGates()) {
throw new GraphConversionException("Job and execution vertex " + jobVertex.getName()
+ " have different number of outputs");
}
if (jobVertex.getNumberOfBackwardConnections() != ev.getEnvironment().getNumberOfInputGates()) {
throw new GraphConversionException("Job and execution vertex " + jobVertex.getName()
+ " have different number of inputs");
}
// Now assign identifiers to gates and check type
for (int j = 0; j < jobVertex.getNumberOfForwardConnections(); j++) {
final JobEdge edge = jobVertex.getForwardConnection(j);
final AbstractJobVertex target = edge.getConnectedVertex();
// find output gate of execution vertex
final OutputGate<? extends Record> eog = ev.getEnvironment().getOutputGate(j);
if (eog == null) {
throw new GraphConversionException("Cannot retrieve output gate " + j + " from vertex "
+ jobVertex.getName());
}
final ExecutionVertex executionTarget = vertexMap.get(target);
if (executionTarget == null) {
throw new GraphConversionException("Cannot find mapping for vertex " + target.getName());
}
final InputGate<? extends Record> eig = executionTarget.getEnvironment().getInputGate(
edge.getIndexOfInputGate());
if (eig == null) {
throw new GraphConversionException("Cannot retrieve input gate " + edge.getIndexOfInputGate()
+ " from vertex " + target.getName());
}
ChannelType channelType = ChannelType.NETWORK;
CompressionLevel compressionLevel = CompressionLevel.NO_COMPRESSION;
boolean userDefinedChannelType = false;
boolean userDefinedCompressionLevel = false;
// Create a network channel with no compression by default, user settings will be applied later on
createChannel(ev, eog, executionTarget, eig, channelType, compressionLevel);
if (edge.getChannelType() != null) {
channelType = edge.getChannelType();
userDefinedChannelType = true;
}
if (edge.getCompressionLevel() != null) {
compressionLevel = edge.getCompressionLevel();
userDefinedCompressionLevel = true;
}
// Connect the corresponding group vertices and copy the user settings from the job edge
ev.getGroupVertex().wireTo(executionTarget.getGroupVertex(), edge.getIndexOfInputGate(), j, channelType,
userDefinedChannelType, compressionLevel, userDefinedCompressionLevel);
}
}
/**
* Destroys all the channels originating from the source vertex at the given output gate and arriving at the target
* vertex at the given
* input gate. All destroyed channels are completely unregistered with the {@link ExecutionGraph}.
*
* @param source
* the source vertex the channels to be removed originate from
* @param indexOfOutputGate
* the index of the output gate the channels to be removed are assigned to
* @param target
* the target vertex the channels to be removed arrive
* @param indexOfInputGate
* the index of the input gate the channels to be removed are assigned to
* @throws GraphConversionException
* thrown if an inconsistency during the unwiring process occurs
*/
public void unwire(ExecutionGroupVertex source, int indexOfOutputGate, ExecutionGroupVertex target,
int indexOfInputGate) throws GraphConversionException {
// Unwire the respective gate of the source vertices
for (int i = 0; i < source.getCurrentNumberOfGroupMembers(); i++) {
final ExecutionVertex sourceVertex = source.getGroupMember(i);
final OutputGate<? extends Record> outputGate = sourceVertex.getEnvironment().getOutputGate(
indexOfOutputGate);
if (outputGate == null) {
throw new GraphConversionException("unwire: " + sourceVertex.getName()
+ " has no output gate with index " + indexOfOutputGate);
}
for (int j = 0; j < outputGate.getNumberOfOutputChannels(); j++) {
final AbstractOutputChannel<? extends Record> outputChannel = outputGate.getOutputChannel(j);
this.outputChannelMap.remove(outputChannel.getID());
this.channelToVertexMap.remove(outputChannel.getID());
}
outputGate.removeAllOutputChannels();
}
// Unwire the respective gate of the target vertices
for (int i = 0; i < target.getCurrentNumberOfGroupMembers(); i++) {
final ExecutionVertex targetVertex = target.getGroupMember(i);
final InputGate<? extends Record> inputGate = targetVertex.getEnvironment().getInputGate(indexOfInputGate);
if (inputGate == null) {
throw new GraphConversionException("unwire: " + targetVertex.getName()
+ " has no input gate with index " + indexOfInputGate);
}
for (int j = 0; j < inputGate.getNumberOfInputChannels(); j++) {
final AbstractInputChannel<? extends Record> inputChannel = inputGate.getInputChannel(j);
this.inputChannelMap.remove(inputChannel.getID());
this.channelToVertexMap.remove(inputChannel.getID());
}
inputGate.removeAllInputChannels();
}
}
public void wire(ExecutionGroupVertex source, int indexOfOutputGate, ExecutionGroupVertex target,
int indexOfInputGate, ChannelType channelType, CompressionLevel compressionLevel)
throws GraphConversionException {
// Unwire the respective gate of the source vertices
for (int i = 0; i < source.getCurrentNumberOfGroupMembers(); i++) {
final ExecutionVertex sourceVertex = source.getGroupMember(i);
final OutputGate<? extends Record> outputGate = sourceVertex.getEnvironment().getOutputGate(
indexOfOutputGate);
if (outputGate == null) {
throw new GraphConversionException("wire: " + sourceVertex.getName()
+ " has no output gate with index " + indexOfOutputGate);
}
if (outputGate.getNumberOfOutputChannels() > 0) {
throw new GraphConversionException("wire: wire called on source " + sourceVertex.getName() + " (" + i
+ "), but number of output channels is " + outputGate.getNumberOfOutputChannels() + "!");
}
for (int j = 0; j < target.getCurrentNumberOfGroupMembers(); j++) {
final ExecutionVertex targetVertex = target.getGroupMember(j);
final InputGate<? extends Record> inputGate = targetVertex.getEnvironment().getInputGate(
indexOfInputGate);
if (inputGate == null) {
throw new GraphConversionException("wire: " + targetVertex.getName()
+ " has no input gate with index " + indexOfInputGate);
}
if (inputGate.getNumberOfInputChannels() > 0 && i == 0) {
throw new GraphConversionException("wire: wire called on target " + targetVertex.getName() + " ("
+ j + "), but number of input channels is " + inputGate.getNumberOfInputChannels() + "!");
}
// Check if a wire is supposed to be created
if (inputGate.getDistributionPattern().createWire(i, j, source.getCurrentNumberOfGroupMembers(),
target.getCurrentNumberOfGroupMembers())) {
createChannel(sourceVertex, outputGate, targetVertex, inputGate, channelType, compressionLevel);
}
}
}
}
private void createChannel(ExecutionVertex source, OutputGate<? extends Record> outputGate, ExecutionVertex target,
InputGate<? extends Record> inputGate, ChannelType channelType, CompressionLevel compressionLevel)
throws GraphConversionException {
AbstractOutputChannel<? extends Record> outputChannel;
AbstractInputChannel<? extends Record> inputChannel;
switch (channelType) {
case NETWORK:
outputChannel = outputGate.createNetworkOutputChannel(null, compressionLevel);
inputChannel = inputGate.createNetworkInputChannel(null, compressionLevel);
break;
case INMEMORY:
outputChannel = outputGate.createInMemoryOutputChannel(null, compressionLevel);
inputChannel = inputGate.createInMemoryInputChannel(null, compressionLevel);
break;
case FILE:
outputChannel = outputGate.createFileOutputChannel(null, compressionLevel);
inputChannel = inputGate.createFileInputChannel(null, compressionLevel);
break;
default:
throw new GraphConversionException("Cannot create channel: unknown type");
}
// Copy the number of the opposite channel
inputChannel.setConnectedChannelID(outputChannel.getID());
outputChannel.setConnectedChannelID(inputChannel.getID());
this.outputChannelMap.put(outputChannel.getID(), outputChannel);
this.inputChannelMap.put(inputChannel.getID(), inputChannel);
this.channelToVertexMap.put(outputChannel.getID(), source);
this.channelToVertexMap.put(inputChannel.getID(), target);
}
/**
* Creates an execution vertex from a job vertex.
*
* @param jobVertex
* the job vertex to create the execution vertex from
* @param instanceManager
* the instanceManager
* @param initialExecutionStage
* the initial execution stage all group vertices are added to
* @return the new execution vertex
* @throws GraphConversionException
* thrown if the job vertex is of an unknown subclass
*/
private ExecutionVertex createVertex(AbstractJobVertex jobVertex, InstanceManager instanceManager,
ExecutionStage initialExecutionStage) throws GraphConversionException {
// If the user has requested instance type, check if the type is known by the current instance manager
InstanceType instanceType = null;
boolean userDefinedInstanceType = false;
if (jobVertex.getInstanceType() != null) {
userDefinedInstanceType = true;
instanceType = instanceManager.getInstanceTypeByName(jobVertex.getInstanceType());
if (instanceType == null) {
throw new GraphConversionException("Requested instance type " + jobVertex.getInstanceType()
+ " is not known to the instance manager");
}
}
if (instanceType == null) {
instanceType = instanceManager.getDefaultInstanceType();
}
// Calculate the cryptographic signature of this vertex
final ExecutionSignature signature = ExecutionSignature.createSignature(jobVertex.getInvokableClass(),
jobVertex.getJobGraph().getJobID());
// Create a group vertex for the job vertex
final ExecutionGroupVertex groupVertex = new ExecutionGroupVertex(jobVertex.getName(), jobVertex.getID(), this,
jobVertex.getNumberOfSubtasks(), instanceType, userDefinedInstanceType, jobVertex
.getNumberOfSubtasksPerInstance(), jobVertex.getVertexToShareInstancesWith() != null ? true : false,
jobVertex.getConfiguration(), signature);
// Create an initial execution vertex for the job vertex
final Class<? extends AbstractInvokable> invokableClass = jobVertex.getInvokableClass();
if (invokableClass == null) {
throw new GraphConversionException("JobVertex " + jobVertex.getID() + " (" + jobVertex.getName()
+ ") does not specify a task");
}
// Add group vertex to initial execution stage
initialExecutionStage.addStageMember(groupVertex);
ExecutionVertex ev = null;
try {
ev = new ExecutionVertex(jobVertex.getJobGraph().getJobID(), invokableClass, this,
groupVertex);
} catch (Exception e) {
throw new GraphConversionException(StringUtils.stringifyException(e));
}
// Run the configuration check the user has provided for the vertex
try {
jobVertex.checkConfiguration(ev.getEnvironment().getInvokable());
} catch (IllegalConfigurationException e) {
throw new GraphConversionException(StringUtils.stringifyException(e));
}
// Check if the user's specifications for the number of subtasks are valid
final int minimumNumberOfSubtasks = jobVertex.getMinimumNumberOfSubtasks(ev.getEnvironment().getInvokable());
final int maximumNumberOfSubtasks = jobVertex.getMaximumNumberOfSubtasks(ev.getEnvironment().getInvokable());
if (jobVertex.getNumberOfSubtasks() != -1) {
if (jobVertex.getNumberOfSubtasks() < 1) {
throw new GraphConversionException("Cannot split task " + jobVertex.getName() + " into "
+ jobVertex.getNumberOfSubtasks() + " subtasks");
}
if (jobVertex.getNumberOfSubtasks() < minimumNumberOfSubtasks) {
throw new GraphConversionException("Number of subtasks must be at least " + minimumNumberOfSubtasks);
}
if (maximumNumberOfSubtasks != -1) {
if (jobVertex.getNumberOfSubtasks() > maximumNumberOfSubtasks) {
throw new GraphConversionException("Number of subtasks for vertex " + jobVertex.getName()
+ " can be at most " + maximumNumberOfSubtasks);
}
}
}
// Check number of subtasks per instance
if (jobVertex.getNumberOfSubtasksPerInstance() != -1 && jobVertex.getNumberOfSubtasksPerInstance() < 1) {
throw new GraphConversionException("Cannot set number of subtasks per instance to "
+ jobVertex.getNumberOfSubtasksPerInstance() + " for vertex " + jobVertex.getName());
}
// Assign min/max to the group vertex (settings are actually applied in applyUserDefinedSettings)
groupVertex.setMinMemberSize(minimumNumberOfSubtasks);
groupVertex.setMaxMemberSize(maximumNumberOfSubtasks);
// Assign initial instance to vertex (may be overwritten later on when user settings are applied)
ev.setAllocatedResource(new AllocatedResource(DummyInstance.createDummyInstance(instanceType), instanceType,
null));
// Register input and output vertices separately
if (jobVertex instanceof JobInputVertex) {
final InputSplit[] inputSplits;
// let the task code compute the input splits
if (ev.getEnvironment().getInvokable() instanceof AbstractInputTask) {
try {
inputSplits = ((AbstractInputTask<?>) ev.getEnvironment().getInvokable()).
computeInputSplits(jobVertex.getNumberOfSubtasks());
}
catch (Exception e) {
throw new GraphConversionException("Cannot compute input splits for " + groupVertex.getName() + ": "
+ StringUtils.stringifyException(e));
}
}
else {
throw new GraphConversionException(
"BUG: JobInputVertex contained a task class which was not an input task.");
}
// assign input splits
groupVertex.setInputSplits(inputSplits);
}
// TODO: This is a quick workaround, problem can be solved in a more generic way
if (jobVertex instanceof JobFileOutputVertex) {
final JobFileOutputVertex jbov = (JobFileOutputVertex) jobVertex;
jobVertex.getConfiguration().setString("outputPath", jbov.getFilePath().toString());
}
return ev;
}
/**
* Returns the number of input vertices registered with this execution graph.
*
* @return the number of input vertices registered with this execution graph
*/
public int getNumberOfInputVertices() {
return this.stages.get(0).getNumberOfInputExecutionVertices();
}
/**
* Returns the number of input vertices for the given stage.
*
* @param stage
* the index of the execution stage
* @return the number of input vertices for the given stage
*/
public int getNumberOfInputVertices(int stage) {
if (stage >= this.stages.size()) {
return 0;
}
return this.stages.get(stage).getNumberOfInputExecutionVertices();
}
/**
* Returns the number of output vertices registered with this execution graph.
*
* @return the number of output vertices registered with this execution graph
*/
public int getNumberOfOutputVertices() {
return this.stages.get(0).getNumberOfOutputExecutionVertices();
}
/**
* Returns the number of output vertices for the given stage.
*
* @param stage
* the index of the execution stage
* @return the number of input vertices for the given stage
*/
public int getNumberOfOutputVertices(int stage) {
if (stage >= this.stages.size()) {
return 0;
}
return this.stages.get(stage).getNumberOfOutputExecutionVertices();
}
/**
* Returns the input vertex with the specified index.
*
* @param index
* the index of the input vertex to return
* @return the input vertex with the specified index or <code>null</code> if no input vertex with such an index
* exists
*/
public ExecutionVertex getInputVertex(int index) {
return this.stages.get(0).getInputExecutionVertex(index);
}
/**
* Returns the output vertex with the specified index.
*
* @param index
* the index of the output vertex to return
* @return the output vertex with the specified index or <code>null</code> if no output vertex with such an index
* exists
*/
public ExecutionVertex getOutputVertex(int index) {
return this.stages.get(0).getOutputExecutionVertex(index);
}
/**
* Returns the input vertex with the specified index for the given stage
*
* @param stage
* the index of the stage
* @param index
* the index of the input vertex to return
* @return the input vertex with the specified index or <code>null</code> if no input vertex with such an index
* exists in that stage
*/
public ExecutionVertex getInputVertex(int stage, int index) {
if (stage >= this.stages.size()) {
return null;
}
return this.stages.get(stage).getInputExecutionVertex(index);
}
/**
* Returns the output vertex with the specified index for the given stage.
*
* @param stage
* the index of the stage
* @param index
* the index of the output vertex to return
* @return the output vertex with the specified index or <code>null</code> if no output vertex with such an index
* exists in that stage
*/
public ExecutionVertex getOutputVertex(int stage, int index) {
if (stage >= this.stages.size()) {
return null;
}
return this.stages.get(stage).getOutputExecutionVertex(index);
}
/**
* Returns the execution stage with number <code>num</code>.
*
* @param num
* the number of the execution stage to be returned
* @return the execution stage with number <code>num</code> or <code>null</code> if no such execution stage exists
*/
public ExecutionStage getStage(int num) {
if (num < this.stages.size()) {
return this.stages.get(num);
}
return null;
}
/**
* Returns the number of execution stages in the execution graph.
*
* @return the number of execution stages in the execution graph
*/
public int getNumberOfStages() {
return this.stages.size();
}
/**
* Identifies an execution by the specified channel ID and returns it.
*
* @param id
* the channel ID to identify the vertex with
* @return the execution vertex which has a channel with ID <code>id</code> or <code>null</code> if no such vertex
* exists in the execution graph
*/
public ExecutionVertex getVertexByChannelID(ChannelID id) {
if (!this.channelToVertexMap.containsKey(id)) {
return null;
}
return this.channelToVertexMap.get(id);
}
/**
* Finds an input channel by its ID and returns it.
*
* @param id
* the channel ID to identify the input channel
* @return the input channel whose ID matches <code>id</code> or <code>null</code> if no such channel is known
*/
public AbstractInputChannel<? extends Record> getInputChannelByID(ChannelID id) {
if (!this.inputChannelMap.containsKey(id)) {
return null;
}
return this.inputChannelMap.get(id);
}
/**
* Finds an output channel by its ID and returns it.
*
* @param id
* the channel ID to identify the output channel
* @return the output channel whose ID matches <code>id</code> or <code>null</code> if no such channel is known
*/
public AbstractOutputChannel<? extends Record> getOutputChannelByID(ChannelID id) {
if (!this.outputChannelMap.containsKey(id)) {
return null;
}
return this.outputChannelMap.get(id);
}
/**
* Returns a (possibly empty) list of execution vertices which are currently assigned to the
* given allocated resource. The vertices in that list may have an arbitrary execution state.
*
* @param allocatedResource
* the allocated resource to check the assignment for
* @return a (possibly empty) list of execution vertices which are currently assigned to the given instance
*/
public synchronized List<ExecutionVertex> getVerticesAssignedToResource(AllocatedResource allocatedResource) {
final List<ExecutionVertex> list = new ArrayList<ExecutionVertex>();
if (allocatedResource == null) {
return list;
}
final Iterator<ExecutionVertex> it = new ExecutionGraphIterator(this, true);
while (it.hasNext()) {
final ExecutionVertex vertex = it.next();
if (allocatedResource.equals(vertex.getAllocatedResource())) {
list.add(vertex);
}
}
return list;
}
public ExecutionVertex getVertexByID(ExecutionVertexID id) {
if (id == null) {
return null;
}
final ExecutionGraphIterator it = new ExecutionGraphIterator(this, true);
while (it.hasNext()) {
final ExecutionVertex vertex = it.next();
if (vertex.getID().equals(id)) {
return vertex;
}
}
return null;
}
public ExecutionVertex getVertexByEnvironment(Environment environment) {
if (environment == null) {
return null;
}
final ExecutionGraphIterator it = new ExecutionGraphIterator(this, true);
while (it.hasNext()) {
final ExecutionVertex vertex = it.next();
if (vertex.getEnvironment() == environment) {
return vertex;
}
}
return null;
}
/**
* Checks if the current execution stage has been successfully completed, i.e.
* all vertices in this stage have successfully finished their execution.
*
* @return <code>true</code> if stage is completed, <code>false</code> otherwise
*/
private boolean isCurrentStageCompleted() {
if (this.indexToCurrentExecutionStage >= this.stages.size()) {
return true;
}
final ExecutionGraphIterator it = new ExecutionGraphIterator(this, this.indexToCurrentExecutionStage, true,
true);
while (it.hasNext()) {
final ExecutionVertex vertex = it.next();
if (vertex.getExecutionState() != ExecutionState.FINISHED) {
return false;
}
}
return true;
}
/**
* Checks if the execution of execution graph is finished.
*
* @return <code>true</code> if the execution of the graph is finished, <code>false</code> otherwise
*/
public boolean isExecutionFinished() {
return (getJobStatus() == InternalJobStatus.FINISHED);
}
public void prepareChannelsForExecution(ExecutionVertex executionVertex) throws ChannelSetupException {
// Prepare channels
for (int k = 0; k < executionVertex.getEnvironment().getNumberOfOutputGates(); k++) {
final OutputGate<? extends Record> outputGate = executionVertex.getEnvironment().getOutputGate(k);
for (int l = 0; l < outputGate.getNumberOfOutputChannels(); l++) {
final AbstractOutputChannel<? extends Record> outputChannel = outputGate.getOutputChannel(l);
final AbstractInputChannel<? extends Record> inputChannel = this.inputChannelMap.get(outputChannel
.getConnectedChannelID());
if (inputChannel == null) {
throw new ChannelSetupException("Cannot find input channel to output channel "
+ outputChannel.getID());
}
final ExecutionVertex targetVertex = this.channelToVertexMap.get(inputChannel.getID());
final AllocatedResource targetResources = targetVertex.getAllocatedResource();
if (targetResources == null) {
throw new ChannelSetupException("Cannot find allocated resources for target vertex "
+ targetVertex.getID() + " in instance map");
}
if (targetResources.getInstance() instanceof DummyInstance) {
throw new ChannelSetupException("Allocated instance for " + targetVertex.getID()
+ " is a dummy vertex!");
}
}
}
}
/**
* Returns the job ID of the job configuration this execution graph was originally constructed from.
*
* @return the job ID of the job configuration this execution graph was originally constructed from
*/
public JobID getJobID() {
return this.jobID;
}
public void removeUnnecessaryNetworkChannels(int stageNumber) {
if (stageNumber >= this.stages.size()) {
throw new IllegalArgumentException("removeUnnecessaryNetworkChannels called on an illegal stage ("
+ stageNumber + ")");
}
final ExecutionStage executionStage = this.stages.get(stageNumber);
for (int i = 0; i < executionStage.getNumberOfStageMembers(); i++) {
final ExecutionGroupVertex groupVertex = executionStage.getStageMember(i);
for (int j = 0; j < groupVertex.getCurrentNumberOfGroupMembers(); j++) {
final ExecutionVertex sourceVertex = groupVertex.getGroupMember(j);
for (int k = 0; k < sourceVertex.getEnvironment().getNumberOfOutputGates(); k++) {
final OutputGate<? extends Record> outputGate = sourceVertex.getEnvironment().getOutputGate(k);
for (int l = 0; l < outputGate.getNumberOfOutputChannels(); l++) {
final AbstractOutputChannel<? extends Record> oldOutputChannel = outputGate.getOutputChannel(l);
// Skip if not a network channel
if (!(oldOutputChannel instanceof NetworkOutputChannel<?>)) {
continue;
}
// Get matching input channel
final ExecutionVertex targetVertex = this.channelToVertexMap.get(oldOutputChannel
.getConnectedChannelID());
if (targetVertex == null) {
throw new RuntimeException("Cannot find target vertex: Inconsistency...");
}
// Run on the same instance?
if (!targetVertex.getAllocatedResource().getInstance().equals(
sourceVertex.getAllocatedResource().getInstance())) {
continue;
}
final AbstractInputChannel<? extends Record> oldInputChannel = getInputChannelByID(oldOutputChannel
.getConnectedChannelID());
final InputGate<? extends Record> inputGate = oldInputChannel.getInputGate();
// Replace channels
final AbstractOutputChannel<? extends Record> newOutputChannel = outputGate.replaceChannel(
oldOutputChannel.getID(), ChannelType.INMEMORY);
final AbstractInputChannel<? extends Record> newInputChannel = inputGate.replaceChannel(
oldInputChannel.getID(), ChannelType.INMEMORY);
// The new channels reuse the IDs of the old channels, so only the channel maps must be updated
this.outputChannelMap.put(newOutputChannel.getID(), newOutputChannel);
this.inputChannelMap.put(newInputChannel.getID(), newInputChannel);
}
}
}
}
}
/**
* Returns the index of the current execution stage.
*
* @return the index of the current execution stage
*/
public int getIndexOfCurrentExecutionStage() {
return this.indexToCurrentExecutionStage;
}
/**
* Returns the stage which is currently executed.
*
* @return the currently executed stage or <code>null</code> if the job execution is already completed
*/
public ExecutionStage getCurrentExecutionStage() {
if (this.indexToCurrentExecutionStage >= this.stages.size()) {
return null;
}
return this.stages.get(this.indexToCurrentExecutionStage);
}
public void repairStages() {
final Map<ExecutionGroupVertex, Integer> stageNumbers = new HashMap<ExecutionGroupVertex, Integer>();
ExecutionGroupVertexIterator it = new ExecutionGroupVertexIterator(this, true, -1);
while (it.hasNext()) {
final ExecutionGroupVertex groupVertex = it.next();
int precedingNumber = 0;
if (stageNumbers.containsKey(groupVertex)) {
precedingNumber = stageNumbers.get(groupVertex).intValue();
} else {
stageNumbers.put(groupVertex, Integer.valueOf(precedingNumber));
}
for (int i = 0; i < groupVertex.getNumberOfForwardLinks(); i++) {
final ExecutionGroupEdge edge = groupVertex.getForwardEdge(i);
if (!stageNumbers.containsKey(edge.getTargetVertex())) {
// Target vertex has not yet been discovered
if (edge.getChannelType() != ChannelType.FILE) {
// Same stage as preceding vertex
stageNumbers.put(edge.getTargetVertex(), Integer.valueOf(precedingNumber));
} else {
// File channel, increase stage of target vertex by one
stageNumbers.put(edge.getTargetVertex(), Integer.valueOf(precedingNumber + 1));
}
} else {
final int stageNumber = stageNumbers.get(edge.getTargetVertex()).intValue();
if (edge.getChannelType() != ChannelType.FILE) {
if (stageNumber != precedingNumber) {
stageNumbers.put(edge.getTargetVertex(), (int) Math.max(precedingNumber, stageNumber));
}
} else {
// File channel, increase stage of target vertex by one
if (stageNumber != (precedingNumber + 1)) {
stageNumbers.put(edge.getTargetVertex(), (int) Math.max(precedingNumber + 1, stageNumber));
}
}
}
}
}
// Traverse the graph backwards (starting from the output vertices) to make sure vertices are allocated in a
// stage as high as possible
it = new ExecutionGroupVertexIterator(this, false, -1);
while (it.hasNext()) {
final ExecutionGroupVertex groupVertex = it.next();
final int succeedingNumber = stageNumbers.get(groupVertex);
for (int i = 0; i < groupVertex.getNumberOfBackwardLinks(); i++) {
final ExecutionGroupEdge edge = groupVertex.getBackwardEdge(i);
final int stageNumber = stageNumbers.get(edge.getSourceVertex());
if (edge.getChannelType() == ChannelType.FILE) {
if (stageNumber < (succeedingNumber - 1)) {
stageNumbers.put(edge.getSourceVertex(), Integer.valueOf(succeedingNumber - 1));
}
} else {
if (stageNumber != succeedingNumber) {
LOG.error(edge.getSourceVertex() + " and " + edge.getTargetVertex()
+ " are assigned to different stages although not connected by a file channel");
}
}
}
}
// Finally, assign the new stage numbers
this.stages.clear();
final Iterator<Map.Entry<ExecutionGroupVertex, Integer>> it2 = stageNumbers.entrySet().iterator();
while (it2.hasNext()) {
final Map.Entry<ExecutionGroupVertex, Integer> entry = it2.next();
final ExecutionGroupVertex groupVertex = entry.getKey();
final int stageNumber = entry.getValue().intValue();
// Prevent out of bounds exceptions
while (this.stages.size() <= stageNumber) {
this.stages.add(null);
}
ExecutionStage executionStage = this.stages.get(stageNumber);
// If the stage not yet exists,
if (executionStage == null) {
executionStage = new ExecutionStage(this, stageNumber);
this.stages.set(stageNumber, executionStage);
}
executionStage.addStageMember(groupVertex);
groupVertex.setExecutionStage(executionStage);
}
}
public void repairInstanceAssignment() {
Iterator<ExecutionVertex> it = new ExecutionGraphIterator(this, true);
while (it.hasNext()) {
final ExecutionVertex sourceVertex = it.next();
for (int i = 0; i < sourceVertex.getEnvironment().getNumberOfOutputGates(); i++) {
final OutputGate<? extends Record> outputGate = sourceVertex.getEnvironment().getOutputGate(i);
for (int j = 0; j < outputGate.getNumberOfOutputChannels(); j++) {
final AbstractOutputChannel<? extends Record> outputChannel = outputGate.getOutputChannel(j);
final ChannelType channelType = outputChannel.getType();
if (channelType == ChannelType.FILE || channelType == ChannelType.INMEMORY) {
final ExecutionVertex targetVertex = getVertexByChannelID(outputChannel.getConnectedChannelID());
targetVertex.setAllocatedResource(sourceVertex.getAllocatedResource());
}
}
}
}
it = new ExecutionGraphIterator(this, false);
while (it.hasNext()) {
final ExecutionVertex targetVertex = it.next();
for (int i = 0; i < targetVertex.getEnvironment().getNumberOfInputGates(); i++) {
final InputGate<? extends Record> inputGate = targetVertex.getEnvironment().getInputGate(i);
for (int j = 0; j < inputGate.getNumberOfInputChannels(); j++) {
final AbstractInputChannel<? extends Record> inputChannel = inputGate.getInputChannel(j);
final ChannelType channelType = inputChannel.getType();
if (channelType == ChannelType.FILE || channelType == ChannelType.INMEMORY) {
final ExecutionVertex sourceVertex = getVertexByChannelID(inputChannel.getConnectedChannelID());
sourceVertex.setAllocatedResource(targetVertex.getAllocatedResource());
}
}
}
}
}
public ChannelType getChannelType(ExecutionVertex sourceVertex, ExecutionVertex targetVertex) {
final ExecutionGroupVertex sourceGroupVertex = sourceVertex.getGroupVertex();
final ExecutionGroupVertex targetGroupVertex = targetVertex.getGroupVertex();
final List<ExecutionGroupEdge> edges = sourceGroupVertex.getForwardEdges(targetGroupVertex);
if (edges.size() == 0) {
return null;
}
// On a task level, the two vertices are connected
final ExecutionGroupEdge edge = edges.get(0);
// Now lets see if these two concrete subtasks are connected
final OutputGate<? extends Record> outputGate = sourceVertex.getEnvironment().getOutputGate(
edge.getIndexOfOutputGate());
for (int i = 0; i < outputGate.getNumberOfOutputChannels(); i++) {
final AbstractOutputChannel<? extends Record> outputChannel = outputGate.getOutputChannel(i);
final ChannelID inputChannelID = outputChannel.getConnectedChannelID();
if (targetVertex == this.channelToVertexMap.get(inputChannelID)) {
return edge.getChannelType();
}
}
return null;
}
/**
* Returns the job configuration that was originally attached to the job graph.
*
* @return the job configuration that was originally attached to the job graph
*/
public Configuration getJobConfiguration() {
return this.jobConfiguration;
}
/**
* Checks whether the job represented by the execution graph has the status <code>FINISHED</code>.
*
* @return <code>true</code> if the job has the status <code>CREATED</code>, <code>false</code> otherwise
*/
private boolean jobHasFinishedStatus() {
final Iterator<ExecutionVertex> it = new ExecutionGraphIterator(this, true);
while (it.hasNext()) {
if (it.next().getExecutionState() != ExecutionState.FINISHED) {
return false;
}
}
return true;
}
/**
* Checks whether the job represented by the execution graph has the status <code>SCHEDULED</code>.
*
* @return <code>true</code> if the job has the status <code>SCHEDULED</code>, <code>false</code> otherwise
*/
private boolean jobHasScheduledStatus() {
final Iterator<ExecutionVertex> it = new ExecutionGraphIterator(this, true);
while (it.hasNext()) {
final ExecutionState s = it.next().getExecutionState();
if (s != ExecutionState.CREATED && s != ExecutionState.SCHEDULED && s != ExecutionState.ASSIGNING
&& s != ExecutionState.ASSIGNED && s != ExecutionState.READY) {
return false;
}
}
return true;
}
/**
* Checks whether the job represented by the execution graph has the status <code>CANCELED</code> or
* <code>FAILED</code>.
*
* @return <code>true</code> if the job has the status <code>CANCELED</code> or <code>FAILED</code>,
* <code>false</code> otherwise
*/
private boolean jobHasFailedOrCanceledStatus() {
final Iterator<ExecutionVertex> it = new ExecutionGraphIterator(this, true);
while (it.hasNext()) {
final ExecutionState state = it.next().getExecutionState();
if (state != ExecutionState.CANCELED && state != ExecutionState.FAILED && state != ExecutionState.FINISHED) {
return false;
}
}
return true;
}
/**
* Checks and updates the current execution status of the
* job which is represented by this execution graph.
*
* @param latestStateChange
* the latest execution state change which occurred
*/
public synchronized void checkAndUpdateJobStatus(final ExecutionState latestStateChange) {
switch (this.jobStatus) {
case CREATED:
if (jobHasScheduledStatus()) {
this.jobStatus = InternalJobStatus.SCHEDULED;
} else if (latestStateChange == ExecutionState.CANCELED) {
if (jobHasFailedOrCanceledStatus()) {
this.jobStatus = InternalJobStatus.CANCELED;
}
}
break;
case SCHEDULED:
if (latestStateChange == ExecutionState.RUNNING) {
this.jobStatus = InternalJobStatus.RUNNING;
return;
} else if (latestStateChange == ExecutionState.CANCELED) {
if (jobHasFailedOrCanceledStatus()) {
this.jobStatus = InternalJobStatus.CANCELED;
}
}
break;
case RUNNING:
if (latestStateChange == ExecutionState.CANCELING || latestStateChange == ExecutionState.CANCELED) {
this.jobStatus = InternalJobStatus.CANCELING;
return;
}
if (latestStateChange == ExecutionState.FAILED) {
final Iterator<ExecutionVertex> it = new ExecutionGraphIterator(this, true);
while (it.hasNext()) {
final ExecutionVertex vertex = it.next();
if (vertex.getExecutionState() == ExecutionState.FAILED && !vertex.hasRetriesLeft()) {
this.jobStatus = InternalJobStatus.FAILING;
return;
}
}
}
if (jobHasFinishedStatus()) {
this.jobStatus = InternalJobStatus.FINISHED;
}
break;
case FAILING:
if (jobHasFailedOrCanceledStatus()) {
this.jobStatus = InternalJobStatus.FAILED;
}
break;
case FAILED:
LOG.error("Received update of execute state in job status FAILED");
break;
case CANCELING:
if (jobHasFailedOrCanceledStatus()) {
this.jobStatus = InternalJobStatus.CANCELED;
}
break;
case CANCELED:
LOG.error("Received update of execute state in job status CANCELED");
break;
case FINISHED:
LOG.error("Received update of execute state in job status FINISHED");
break;
}
}
/**
* Returns the current status of the job
* represented by this execution graph.
*
* @return the current status of the job
*/
public synchronized InternalJobStatus getJobStatus() {
return this.jobStatus;
}
/**
* {@inheritDoc}
*/
@Override
public synchronized void executionStateChanged(Environment ee, ExecutionState newExecutionState,
String optionalMessage) {
final InternalJobStatus oldStatus = this.jobStatus;
checkAndUpdateJobStatus(newExecutionState);
if (newExecutionState == ExecutionState.FINISHED) {
// It is worth checking if the current stage has complete
if (this.isCurrentStageCompleted()) {
// Increase current execution stage
++this.indexToCurrentExecutionStage;
if (this.indexToCurrentExecutionStage < this.stages.size()) {
final Iterator<ExecutionStageListener> it = this.executionStageListeners.iterator();
final ExecutionStage nextExecutionStage = getCurrentExecutionStage();
while (it.hasNext()) {
it.next().nextExecutionStageEntered(ee.getJobID(), nextExecutionStage);
}
}
}
}
if (this.jobStatus != oldStatus) {
// The task caused the entire job to fail, save the error description
if (this.jobStatus == InternalJobStatus.FAILING) {
this.errorDescription = optionalMessage;
}
// If this is the final failure state change, reuse the saved error description
if (this.jobStatus == InternalJobStatus.FAILED) {
optionalMessage = this.errorDescription;
}
final Iterator<JobStatusListener> it = this.jobStatusListeners.iterator();
while (it.hasNext()) {
it.next().jobStatusHasChanged(this, this.jobStatus, optionalMessage);
}
}
}
/**
* Registers a new {@link JobStatusListener} object with this execution graph.
* After being registered the object will receive notifications about changes
* of the job status. It is not possible to register the same listener object
* twice.
*
* @param jobStatusListener
* the listener object to register
*/
public synchronized void registerJobStatusListener(final JobStatusListener jobStatusListener) {
if (jobStatusListener == null) {
return;
}
if (!this.jobStatusListeners.contains(jobStatusListener)) {
this.jobStatusListeners.add(jobStatusListener);
}
}
/**
* Unregisters the given {@link JobStatusListener} object. After having called this
* method, the object will no longer receive notifications about changes of the job
* status.
*
* @param jobStatusListener
* the listener object to unregister
*/
public synchronized void unregisterJobStatusListener(final JobStatusListener jobStatusListener) {
if (jobStatusListener == null) {
return;
}
this.jobStatusListeners.remove(jobStatusListener);
}
/**
* Registers a new {@link ExecutionStageListener} object with this execution graph. After being registered the
* object will receive a notification whenever the job has entered its next execution stage. Note that a
* notification is not sent when the job has entered its initial execution stage.
*
* @param executionStageListener
* the listener object to register
*/
public synchronized void registerExecutionStageListener(final ExecutionStageListener executionStageListener) {
if (executionStageListener == null) {
return;
}
if (!this.executionStageListeners.contains(executionStageListener)) {
this.executionStageListeners.add(executionStageListener);
}
}
/**
* Unregisters the given {@link ExecutionStageListener} object. After having called this method, the object will no
* longer receiver notifications about the execution stage progress.
*
* @param executionStageListener
* the listener object to unregister
*/
public synchronized void unregisterExecutionStageListener(final ExecutionStageListener executionStageListener) {
if (executionStageListener == null) {
return;
}
this.executionStageListeners.remove(executionStageListener);
}
/**
* Returns the name of the original job graph.
*
* @return the name of the original job graph, possibly <code>null</code>
*/
public String getJobName() {
return this.jobName;
}
/**
* {@inheritDoc}
*/
@Override
public void userThreadFinished(Environment ee, Thread userThread) {
// Nothing to do here
}
/**
* {@inheritDoc}
*/
@Override
public void userThreadStarted(Environment ee, Thread userThread) {
// Nothing to do here
}
/**
* Returns a list of vertices which are contained in this execution graph and have a finished checkpoint.
*
* @return list of vertices which are contained in this execution graph and have a finished checkpoint
*/
public List<ExecutionVertex> getVerticesWithCheckpoints() {
final List<ExecutionVertex> list = new ArrayList<ExecutionVertex>();
final Iterator<ExecutionGroupVertex> it = new ExecutionGroupVertexIterator(this, true, -1);
// In the current implementation we just look for vertices which have outgoing file channels
while (it.hasNext()) {
final ExecutionGroupVertex groupVertex = it.next();
for (int i = 0; i < groupVertex.getNumberOfForwardLinks(); i++) {
if (groupVertex.getForwardEdge(i).getChannelType() == ChannelType.FILE) {
for (int j = 0; j < groupVertex.getCurrentNumberOfGroupMembers(); j++) {
list.add(groupVertex.getGroupMember(j));
}
break;
}
}
}
return list;
}
}
| Reformatting ExecutionGraph
| nephele/nephele-server/src/main/java/eu/stratosphere/nephele/executiongraph/ExecutionGraph.java | Reformatting ExecutionGraph | <ide><path>ephele/nephele-server/src/main/java/eu/stratosphere/nephele/executiongraph/ExecutionGraph.java
<ide> // Register input and output vertices separately
<ide> if (jobVertex instanceof JobInputVertex) {
<ide> final InputSplit[] inputSplits;
<del>
<add>
<ide> // let the task code compute the input splits
<ide> if (ev.getEnvironment().getInvokable() instanceof AbstractInputTask) {
<ide> try {
<ide> inputSplits = ((AbstractInputTask<?>) ev.getEnvironment().getInvokable()).
<ide> computeInputSplits(jobVertex.getNumberOfSubtasks());
<del> }
<del> catch (Exception e) {
<del> throw new GraphConversionException("Cannot compute input splits for " + groupVertex.getName() + ": "
<add> } catch (Exception e) {
<add> throw new GraphConversionException("Cannot compute input splits for " + groupVertex.getName()
<add> + ": "
<ide> + StringUtils.stringifyException(e));
<ide> }
<del> }
<del> else {
<add> } else {
<ide> throw new GraphConversionException(
<ide> "BUG: JobInputVertex contained a task class which was not an input task.");
<ide> }
<del>
<add>
<ide> // assign input splits
<ide> groupVertex.setInputSplits(inputSplits);
<ide> } |
|
JavaScript | mit | 759b7a3507dab0a3ee44de2450ec44e578c4db3f | 0 | freder/nodegit,StephanieMak/nodegit,Gum-Joe/nodegit,jmurzy/nodegit,IonicaBizauKitchen/nodegit,cbargren/nodegit,KenanSulayman/nodegit,cbargren/nodegit,jdgarcia/nodegit,dkoontz/nodegit,chasingmaxwell/nodegit,tannewt/nodegit,StephanieMak/nodegit,eunomie/nodegit,nodegit/nodegit,kenprice/nodegit,jdgarcia/nodegit,eunomie/nodegit,tannewt/nodegit,taylorzane/nodegit,Naituw/nodegit,chasingmaxwell/nodegit,heavyk/nodegit,cbargren/nodegit,StephanieMak/nodegit,KenanSulayman/nodegit,taylorzane/nodegit,vladikoff/nodegit,chasingmaxwell/nodegit,nodegit/nodegit,KenanSulayman/nodegit,dkoontz/nodegit,danawoodman/nodegit,IonicaBizauKitchen/nodegit,dkoontz/nodegit,jdgarcia/nodegit,cbargren/nodegit,dkoontz/nodegit,nodegit/test2,cwahbong/nodegit,srajko/nodegit,nodegit/test,kenprice/nodegit,srajko/nodegit,nodegit/test2,StephanieMak/nodegit,Gum-Joe/nodegit,kenprice/nodegit,Gum-Joe/nodegit,danawoodman/nodegit,freder/nodegit,jfremy/nodegit,eunomie/nodegit,saper/nodegit,jmurzy/nodegit,heavyk/nodegit,Gum-Joe/nodegit,jfremy/nodegit,danawoodman/nodegit,dancali/nodegit,jfremy/nodegit,cwahbong/nodegit,bengl/nodegit,cwahbong/nodegit,chasingmaxwell/nodegit,tannewt/nodegit,saper/nodegit,srajko/nodegit,jfremy/nodegit,Naituw/nodegit,freder/nodegit,bengl/nodegit,jdgarcia/nodegit,Naituw/nodegit,nodegit/nodegit,implausible/nodegit,nodegit/test2,IonicaBizauKitchen/nodegit,cbargren/nodegit,vladikoff/nodegit,heavyk/nodegit,jmurzy/nodegit,dancali/nodegit,vladikoff/nodegit,KenanSulayman/nodegit,nodegit/test,saper/nodegit,dkoontz/nodegit,implausible/nodegit,heavyk/nodegit,Naituw/nodegit,eunomie/nodegit,saper/nodegit,jmurzy/nodegit,implausible/nodegit,srajko/nodegit,implausible/nodegit,nodegit/nodegit,srajko/nodegit,bengl/nodegit,dancali/nodegit,kenprice/nodegit,bengl/nodegit,nodegit/nodegit,jdgarcia/nodegit,vladikoff/nodegit,kenprice/nodegit,IonicaBizauKitchen/nodegit,freder/nodegit,danawoodman/nodegit,taylorzane/nodegit,jmurzy/nodegit,nodegit/test2,tannewt/nodegit,taylorzane/nodegit,dancali/nodegit,nodegit/test,cwahbong/nodegit | var promisify = require("promisify-node");
var fs = promisify("fs");
// Have to wrap exec, since it has a weird callback signature.
var exec = promisify(function(command, callback) {
return require("child_process").exec(command, callback);
});
before(function(done) {
this.timeout(15000);
var url = "https://github.com/nodegit/nodegit";
var done = done.bind(null, null);
fs.exists("test/repos").then(function() {
return fs.mkdir("test/repos").then(function() {
return exec("git init test/repos/empty");
}).then(function() {
return exec("git clone " + url + " test/repos/workdir");
}).then(function() {
var nonrepo = "test/repos/nonrepo";
return fs.mkdir(nonrepo).then(function() {
return fs.writeFile(nonrepo + "/file.txt", "This is a bogus file");
});
})
}).then(done, done);
});
| test/runner.js | var promisify = require("promisify-node");
var fs = promisify("fs");
// Have to wrap exec, since it has a weird callback signature.
var exec = promisify(function(command, callback) {
return require("child_process").exec(command, callback);
});
before(function(done) {
this.timeout(5000);
var url = "https://github.com/nodegit/nodegit";
var done = done.bind(null, null);
fs.exists("test/repos").then(function() {
return fs.mkdir("test/repos").then(function() {
return exec("git init test/repos/empty");
}).then(function() {
return exec("git clone " + url + " test/repos/workdir");
}).then(function() {
var nonrepo = "test/repos/nonrepo";
return fs.mkdir(nonrepo).then(function() {
return fs.writeFile(nonrepo + "/file.txt", "This is a bogus file");
});
})
}).then(done, done);
});
| Increase timeout for tests
| test/runner.js | Increase timeout for tests | <ide><path>est/runner.js
<ide> });
<ide>
<ide> before(function(done) {
<del> this.timeout(5000);
<add> this.timeout(15000);
<ide>
<ide> var url = "https://github.com/nodegit/nodegit";
<ide> var done = done.bind(null, null); |
|
Java | apache-2.0 | f34680e0a300b3b9d81aba8ca3c05bb5fdd05af1 | 0 | marclaporte/jitsi,cobratbq/jitsi,459below/jitsi,HelioGuilherme66/jitsi,mckayclarey/jitsi,procandi/jitsi,iant-gmbh/jitsi,damencho/jitsi,ringdna/jitsi,damencho/jitsi,Metaswitch/jitsi,procandi/jitsi,ringdna/jitsi,Metaswitch/jitsi,jitsi/jitsi,procandi/jitsi,jitsi/jitsi,bebo/jitsi,gpolitis/jitsi,bebo/jitsi,damencho/jitsi,pplatek/jitsi,tuijldert/jitsi,level7systems/jitsi,dkcreinoso/jitsi,ringdna/jitsi,ringdna/jitsi,cobratbq/jitsi,level7systems/jitsi,ibauersachs/jitsi,ibauersachs/jitsi,martin7890/jitsi,iant-gmbh/jitsi,jibaro/jitsi,bebo/jitsi,marclaporte/jitsi,martin7890/jitsi,gpolitis/jitsi,laborautonomo/jitsi,laborautonomo/jitsi,tuijldert/jitsi,mckayclarey/jitsi,gpolitis/jitsi,dkcreinoso/jitsi,jitsi/jitsi,level7systems/jitsi,459below/jitsi,jibaro/jitsi,tuijldert/jitsi,459below/jitsi,gpolitis/jitsi,cobratbq/jitsi,mckayclarey/jitsi,level7systems/jitsi,459below/jitsi,cobratbq/jitsi,bebo/jitsi,martin7890/jitsi,marclaporte/jitsi,Metaswitch/jitsi,pplatek/jitsi,jibaro/jitsi,laborautonomo/jitsi,pplatek/jitsi,martin7890/jitsi,HelioGuilherme66/jitsi,bhatvv/jitsi,Metaswitch/jitsi,tuijldert/jitsi,damencho/jitsi,martin7890/jitsi,mckayclarey/jitsi,HelioGuilherme66/jitsi,marclaporte/jitsi,mckayclarey/jitsi,dkcreinoso/jitsi,dkcreinoso/jitsi,dkcreinoso/jitsi,bhatvv/jitsi,jibaro/jitsi,laborautonomo/jitsi,bhatvv/jitsi,bhatvv/jitsi,jibaro/jitsi,iant-gmbh/jitsi,iant-gmbh/jitsi,ibauersachs/jitsi,bebo/jitsi,pplatek/jitsi,procandi/jitsi,HelioGuilherme66/jitsi,jitsi/jitsi,cobratbq/jitsi,tuijldert/jitsi,bhatvv/jitsi,ibauersachs/jitsi,jitsi/jitsi,459below/jitsi,marclaporte/jitsi,ibauersachs/jitsi,gpolitis/jitsi,procandi/jitsi,laborautonomo/jitsi,iant-gmbh/jitsi,HelioGuilherme66/jitsi,pplatek/jitsi,ringdna/jitsi,damencho/jitsi,level7systems/jitsi | /*
* SIP Communicator, the OpenSource Java VoIP and Instant Messaging client.
*
* Distributable under LGPL license.
* See terms of license at gnu.org.
*/
package net.java.sip.communicator.impl.protocol.sip;
import java.net.*;
import java.text.*;
import java.util.*;
import javax.sip.*;
import javax.sip.address.*;
import javax.sip.header.*;
import javax.sip.message.*;
import net.java.sip.communicator.service.protocol.*;
import net.java.sip.communicator.service.protocol.event.*;
import net.java.sip.communicator.util.*;
import gov.nist.javax.sip.message.*;
import gov.nist.javax.sip.stack.*;
/**
* Implements all call management logic and exports basic telephony support by
* implementing OperationSetBasicTelephony.
*
* @author Emil Ivov
*/
public class OperationSetBasicTelephonySipImpl
implements OperationSetBasicTelephony
, SipListener
{
private static final Logger logger
= Logger.getLogger(OperationSetBasicTelephonySipImpl.class);
/**
* A reference to the <tt>ProtocolProviderServiceSipImpl</tt> instance
* that created us.
*/
private ProtocolProviderServiceSipImpl protocolProvider = null;
/**
* A liste of listeners registered for call events.
*/
private Vector callListeners = new Vector();
/**
* Contains references for all currently active (non ended) calls.
*/
private ActiveCallsRepository activeCallsRepository
= new ActiveCallsRepository(this);
/**
* The name of the boolean property that the user could use to specify
* whether incoming calls should be rejected if the user name in the
* destination (to) address does not match the one that we have in our
* sip address.
*/
private static final String FAIL_CALLS_ON_DEST_USER_MISMATCH
= "net.java.sip.communicator.impl.protocol.sip."
+"FAIL_CALLS_ON_DEST_USER_MISMATCH";
/**
* Creates a new instance and adds itself as an <tt>INVITE</tt> method
* handler in the creating protocolProvider.
*
* @param protocolProvider a reference to the
* <tt>ProtocolProviderServiceSipImpl</tt> instance that created us.
*/
public OperationSetBasicTelephonySipImpl(
ProtocolProviderServiceSipImpl protocolProvider)
{
this.protocolProvider = protocolProvider;
protocolProvider.registerMethodProcessor(Request.INVITE, this);
protocolProvider.registerMethodProcessor(Request.CANCEL, this);
protocolProvider.registerMethodProcessor(Request.ACK, this);
protocolProvider.registerMethodProcessor(Request.BYE, this);
}
/**
* Registers <tt>listener</tt> with this provider so that it
* could be notified when incoming calls are received.
*
* @param listener the listener to register with this provider.
*/
public void addCallListener(CallListener listener)
{
synchronized(callListeners)
{
if (!callListeners.contains(listener))
callListeners.add(listener);
}
}
/**
* Create a new call and invite the specified CallParticipant to it.
*
* @param callee the sip address of the callee that we should invite to a
* new call.
* @return CallParticipant the CallParticipant that will represented by
* the specified uri. All following state change events will be
* delivered through that call participant. The Call that this
* participant is a member of could be retrieved from the
* CallParticipatn instance with the use of the corresponding method.
* @throws OperationFailedException with the corresponding code if we fail
* to create the call.
* @throws ParseException if <tt>callee</tt> is not a valid sip address
* string.
*/
public Call createCall(String callee)
throws OperationFailedException, ParseException
{
Address toAddress = parseAddressStr(callee);
return createOutgoingCall(toAddress);
}
/**
* Create a new call and invite the specified CallParticipant to it.
*
* @param callee the address of the callee that we should invite to a
* new call.
* @return CallParticipant the CallParticipant that will represented by
* the specified uri. All following state change events will be
* delivered through that call participant. The Call that this
* participant is a member of could be retrieved from the
* CallParticipatn instance with the use of the corresponding method.
* @throws OperationFailedException with the corresponding code if we fail
* to create the call.
*/
public Call createCall(Contact callee)
throws OperationFailedException
{
Address toAddress = null;
try
{
toAddress = parseAddressStr(callee.getAddress());
}
catch (ParseException ex)
{
//couldn't happen
logger.error(ex.getMessage(), ex);
throw new IllegalArgumentException(ex.getMessage());
}
return createOutgoingCall(toAddress);
}
/**
* Init and establish the specified call.
*
* @param calleeAddress the address of the callee that we'd like to connect
* with.
*
* @return CallParticipant the CallParticipant that will represented by
* the specified uri. All following state change events will be
* delivered through that call participant. The Call that this
* participant is a member of could be retrieved from the
* CallParticipatn instance with the use of the corresponding method.
*
* @throws OperationFailedException with the corresponding code if we fail
* to create the call.
*/
private CallSipImpl createOutgoingCall(Address calleeAddress)
throws OperationFailedException
{
//create the invite request
Request invite = createInviteRequest(calleeAddress);
//Content
ContentTypeHeader contentTypeHeader = null;
try
{
//content type should be application/sdp (not applications)
//reported by Oleg Shevchenko (Miratech)
contentTypeHeader =
protocolProvider.getHeaderFactory().createContentTypeHeader(
"application", "sdp");
}
catch (ParseException ex)
{
//Shouldn't happen
logger.error(
"Failed to create a content type header for the INVITE "
+ "request"
, ex);
throw new OperationFailedException(
"Failed to create a content type header for the INVITE "
+ "request"
, OperationFailedException.INTERNAL_ERROR
, ex);
}
//Transaction
ClientTransaction inviteTransaction;
SipProvider jainSipProvider
= protocolProvider.getDefaultJainSipProvider();
try
{
inviteTransaction = jainSipProvider.getNewClientTransaction(invite);
}
catch (TransactionUnavailableException ex)
{
logger.error(
"Failed to create inviteTransaction.\n"
+ "This is most probably a network connection error."
, ex);
throw new OperationFailedException(
"Failed to create inviteTransaction.\n"
+ "This is most probably a network connection error."
, OperationFailedException.INTERNAL_ERROR
, ex);
}
//create the call participant
CallParticipantSipImpl callParticipant
= createCallParticipantFor(inviteTransaction, jainSipProvider);
//invite content
try
{
invite.setContent(
SipActivator.getMediaService().generateSdpOffer(callParticipant)
, contentTypeHeader);
}
catch (ParseException ex)
{
logger.error(
"Failed to parse sdp data while creating invite request!"
, ex);
throw new OperationFailedException(
"Failed to parse sdp data while creating invite request!"
, OperationFailedException.INTERNAL_ERROR
, ex);
}
try
{
inviteTransaction.sendRequest();
if (logger.isDebugEnabled())
logger.debug("sent request: " + invite);
}
catch (SipException ex)
{
logger.error(
"An error occurred while sending invite request", ex);
throw new OperationFailedException(
"An error occurred while sending invite request"
, OperationFailedException.NETWORK_FAILURE
, ex);
}
return (CallSipImpl)callParticipant.getCall();
}
/**
* Creates and dispatches a <tt>CallEvent</tt> notifying registered
* listeners that an event with id <tt>eventID</tt> has occurred on
* <tt>sourceCall</tt>.
*
* @param eventID the ID of the event to dispatch
* @param sourceCall the call on which the event has occurred.
*/
protected void fireCallEvent( int eventID,
CallSipImpl sourceCall)
{
CallEvent cEvent = new CallEvent(sourceCall, eventID);
logger.debug("Dispatching a CallEvent to "
+ callListeners.size()
+" listeners. event is: " + cEvent.toString());
Iterator listeners = null;
synchronized(callListeners)
{
listeners = new ArrayList(callListeners).iterator();
}
while(listeners.hasNext())
{
CallListener listener = (CallListener)listeners.next();
if(eventID == CallEvent.CALL_INITIATED)
listener.outgoingCallCreated(cEvent);
else if(eventID == CallEvent.CALL_RECEIVED)
listener.incomingCallReceived(cEvent);
else if(eventID == CallEvent.CALL_ENDED)
listener.callEnded(cEvent);
}
}
/**
* Returns an iterator over all currently active calls.
*
* @return an iterator over all currently active calls.
*/
public Iterator getActiveCalls()
{
return activeCallsRepository.getActiveCalls();
}
/**
* Resumes communication with a call participant previously put on hold.
*
* @param participant the call participant to put on hold.
*/
public void putOffHold(CallParticipant participant)
{
/** @todo implement putOffHold() */
}
/**
* Puts the specified CallParticipant "on hold".
*
* @param participant the participant that we'd like to put on hold.
*/
public void putOnHold(CallParticipant participant)
{
/** @todo implement putOnHold() */
}
/**
* Removes the <tt>listener</tt> from the list of call listeners.
*
* @param listener the listener to unregister.
*/
public void removeCallListener(CallListener listener)
{
synchronized(callListeners)
{
callListeners.remove(listener);
}
}
/**
* Processes a Request received on a SipProvider upon which this SipListener
* is registered.
* <p>
*
* @param requestEvent requestEvent fired from the SipProvider to the
* <tt>SipListener</tt> representing a Request received from the network.
*/
public void processRequest(RequestEvent requestEvent)
{
ServerTransaction serverTransaction = requestEvent
.getServerTransaction();
SipProvider jainSipProvider = (SipProvider)requestEvent.getSource();
Request request = requestEvent.getRequest();
if (serverTransaction == null)
{
try
{
serverTransaction = jainSipProvider.getNewServerTransaction(
request);
}
catch (TransactionAlreadyExistsException ex)
{
//let's not scare the user and only log a message
logger.error("Failed to create a new server"
+ "transaction for an incoming request\n"
+ "(Next message contains the request)"
, ex);
return;
}
catch (TransactionUnavailableException ex)
{
//let's not scare the user and only log a message
logger.error("Failed to create a new server"
+ "transaction for an incoming request\n"
+ "(Next message contains the request)"
, ex);
return;
}
}
//INVITE
if (request.getMethod().equals(Request.INVITE))
{
logger.debug("received INVITE");
if (serverTransaction.getDialog().getState() == null)
{
if (logger.isDebugEnabled())
logger.debug("request is an INVITE. Dialog state="
+ serverTransaction.getDialog().getState());
processInvite(jainSipProvider, serverTransaction, request);
}
else
{
logger.error("reINVITE-s are not currently supported.");
}
}
//ACK
else if (request.getMethod().equals(Request.ACK))
{
processAck(serverTransaction, request);
}
//BYE
else if (request.getMethod().equals(Request.BYE))
{
processBye(serverTransaction, request);
}
//CANCEL
else if (request.getMethod().equals(Request.CANCEL))
{
processCancel(serverTransaction, request);
}
}
/**
* Process an asynchronously reported TransactionTerminatedEvent.
*
* @param transactionTerminatedEvent -- an event that indicates that the
* transaction has transitioned into the terminated state.
*/
public void processTransactionTerminated(
TransactionTerminatedEvent transactionTerminatedEvent)
{
//nothing to do here.
}
/**
* Analyzes the incoming <tt>responseEvent</tt> and then forwards it to the
* proper event handler.
*
* @param responseEvent the responseEvent that we received
* ProtocolProviderService.
*/
public void processResponse(ResponseEvent responseEvent)
{
ClientTransaction clientTransaction = responseEvent
.getClientTransaction();
Response response = responseEvent.getResponse();
CSeqHeader cseq = ((CSeqHeader)response.getHeader(CSeqHeader.NAME));
if (cseq == null)
{
logger.error("An incoming response did not contain a CSeq header");
}
String method = cseq.getMethod();
SipProvider sourceProvider = (SipProvider)responseEvent.getSource();
//OK
if (response.getStatusCode() == Response.OK)
{
if(method.equals(Request.INVITE))
{
processInviteOK(clientTransaction, response);
}
else if (method.equals(Request.BYE))
{
//ignore
}
}
//Ringing
else if (response.getStatusCode() == Response.RINGING)
{
processRinging(clientTransaction, response);
}
//Trying
else if (response.getStatusCode() == Response.TRYING)
{
processTrying(clientTransaction, response);
}
//Busy here.
else if (response.getStatusCode() == Response.BUSY_HERE)
{
processBusyHere(clientTransaction, response);
}
//401 UNAUTHORIZED
else if (response.getStatusCode() == Response.UNAUTHORIZED
|| response.getStatusCode()
== Response.PROXY_AUTHENTICATION_REQUIRED)
{
processAuthenticationChallenge(clientTransaction
, response
, sourceProvider);
}
//errors
else if ( response.getStatusCode() / 100 == 4 )
{
CallParticipantSipImpl callParticipant = activeCallsRepository
.findCallParticipant(clientTransaction.getDialog());
logger.error("Received error: " +response.getStatusCode()
+" "+ response.getReasonPhrase());
if(callParticipant != null)
callParticipant.setState(CallParticipantState.FAILED);
}
//ignore everything else.
}
/**
* Updates the call state of the corresponding call participant.
*
* @param clientTransaction the transaction in which the response was
* received.
* @param response the Ttrying response.
*/
private void processTrying(ClientTransaction clientTransaction,
Response response)
{
Dialog dialog = clientTransaction.getDialog();
//find the call participant
CallParticipantSipImpl callParticipant
= activeCallsRepository.findCallParticipant(dialog);
if (callParticipant == null)
{
logger.debug("Received a stray trying response.");
return;
}
//change status
callParticipant.setState(CallParticipantState.CONNECTING);
}
/**
* Updates the call state of the corresponding call participant. We'll
* also try to extract any details here that might be of use for call
* participant presentation and that we didn't have when establishing the
* call.
*
* @param clientTransaction the transaction in which the response was
* received.
* @param response the Ttrying response.
*/
private void processRinging(ClientTransaction clientTransaction,
Response response)
{
Dialog dialog = clientTransaction.getDialog();
//find the call participant
CallParticipantSipImpl callParticipant
= activeCallsRepository.findCallParticipant(dialog);
if (callParticipant == null)
{
logger.debug("Received a stray trying response.");
return;
}
//try to update the display name.
ContactHeader remotePartyContactHeader
= (ContactHeader)response.getHeader(ContactHeader.NAME);
if(remotePartyContactHeader != null)
{
Address remotePartyAddress = remotePartyContactHeader.getAddress();
String displayName = remotePartyAddress.getDisplayName();
if(displayName != null && displayName.trim().length() > 0)
{
callParticipant.setDisplayName(displayName);
}
}
//change status.
callParticipant.setState(CallParticipantState.ALERTING_REMOTE_SIDE);
}
/**
* Sets to CONNECTED that state of the corresponding call participant and
* sends an ACK.
* @param clientTransaction the <tt>ClientTransaction</tt> that the response
* arrived in.
* @param ok the OK <tt>Response</tt> to process
*/
private void processInviteOK(ClientTransaction clientTransaction,
Response ok)
{
Dialog dialog = clientTransaction.getDialog();
//find the call
CallParticipantSipImpl callParticipant
= activeCallsRepository.findCallParticipant(dialog);
if (callParticipant == null)
{
logger.debug("Received a stray ok response.");
return;
}
//Send ACK
try
{
//Need to use dialog generated ACKs so that the remote UA core
//sees them - Fixed by M.Ranganathan
Request ack = clientTransaction.getDialog()
.createRequest(Request.ACK);
clientTransaction.getDialog().sendAck(ack);
}
catch (SipException ex)
{
logger.error("Failed to acknowledge call!", ex);
callParticipant.setState(CallParticipantState.FAILED);
return;
}
// !!! set sdp content before setting call state as that is where
//listeners get alerted and they need the sdp
callParticipant.setSdpDescription(new String(ok.getRawContent()));
//change status
callParticipant.setState(CallParticipantState.CONNECTED);
}
/**
* Sets corresponding state to the call participant associated with this
* transaction.
* @param clientTransaction the transaction in which
* @param busyHere the busy here Response
*/
private void processBusyHere(ClientTransaction clientTransaction,
Response busyHere)
{
Dialog dialog = clientTransaction.getDialog();
//find the call
CallParticipantSipImpl callParticipant
= activeCallsRepository.findCallParticipant(dialog);
if (callParticipant == null)
{
logger.debug("Received a stray busyHere response.");
return;
}
//change status
callParticipant.setState(CallParticipantState.BUSY);
}
/**
* Attempts to re-ogenerate the corresponding request with the proper
* credentials and terminates the call if it fails.
*
* @param clientTransaction the corresponding transaction
* @param response the challenge
* @param jainSipProvider the provider that received the challende
*/
private void processAuthenticationChallenge(
ClientTransaction clientTransaction,
Response response,
SipProvider jainSipProvider)
{
//First find the call and the call participant that this authentication
//request concerns.
CallParticipantSipImpl callParticipant = activeCallsRepository
.findCallParticipant(clientTransaction.getDialog());
if (callParticipant == null) {
logger.debug("Received an authorization challenge for no "
+"participant. authorizing anyway.");
}
try
{
logger.debug("Authenticating an INVITE request.");
ClientTransaction retryTran
= protocolProvider.getSipSecurityManager().handleChallenge(
response
, clientTransaction
, jainSipProvider);
//There is a new dialog that will be started with this request. Get
//that dialog and record it into the Call objet for later use (by
//Bye-s for example).
//if the request was BYE then we need to authorize it anyway even
//if the call and the call participant are no longer there
if(callParticipant !=null)
{
callParticipant.setDialog(retryTran.getDialog());
callParticipant.setFirstTransaction(retryTran);
callParticipant.setJainSipProvider(jainSipProvider);
}
retryTran.sendRequest();
}
catch (Exception exc)
{
logger.error("We failed to authenticate an INVITE request.", exc);
//tell the others we couldn't register
callParticipant.setState(CallParticipantState.FAILED);
}
}
/**
* Processes a retransmit or expiration Timeout of an underlying
* {@link Transaction}handled by this SipListener. This Event notifies the
* application that a retransmission or transaction Timer expired in the
* SipProvider's transaction state machine. The TimeoutEvent encapsulates
* the specific timeout type and the transaction identifier either client or
* server upon which the timeout occured. The type of Timeout can by
* determined by:
* <code>timeoutType = timeoutEvent.getTimeout().getValue();</code>
*
* @param timeoutEvent the timeoutEvent received indicating either the
* message retransmit or transaction timed out.
*/
public void processTimeout(TimeoutEvent timeoutEvent)
{
Transaction transaction;
if (timeoutEvent.isServerTransaction()) {
transaction = timeoutEvent.getServerTransaction();
}
else {
transaction = timeoutEvent.getClientTransaction();
}
CallParticipantSipImpl callParticipant = activeCallsRepository
.findCallParticipant(transaction.getDialog());
if (callParticipant == null) {
logger.debug("Got a headless timeout event." + timeoutEvent);
return;
}
//change status
callParticipant.setState(CallParticipantState.FAILED
, "The remote party has not replied!"
+ "The call will be disconnected");
}
/**
* Process an asynchronously reported IO Exception. Asynchronous IO
* Exceptions may occur as a result of errors during retransmission of
* requests. The transaction state machine requires to report IO Exceptions
* to the application immediately (according to RFC 3261). This method
* enables an implementation to propagate the asynchronous handling of IO
* Exceptions to the application.
*
* @param exceptionEvent The Exception event that is reported to the
* application.
*/
public void processIOException(IOExceptionEvent exceptionEvent)
{
logger.error("Got an asynchronous exception event. host="
+ exceptionEvent.getHost() + " port=" + exceptionEvent.getPort());
}
/**
* Process an asynchronously reported DialogTerminatedEvent.
*
* @param dialogTerminatedEvent -- an event that indicates that the
* dialog has transitioned into the terminated state.
*/
public void processDialogTerminated(DialogTerminatedEvent
dialogTerminatedEvent)
{
CallParticipantSipImpl callParticipant = activeCallsRepository
.findCallParticipant(dialogTerminatedEvent.getDialog());
if (callParticipant == null)
{
return;
}
//change status
callParticipant.setState(CallParticipantState.DISCONNECTED);
}
/**
* Parses the the <tt>uriStr</tt> string and returns a JAIN SIP URI.
*
* @param uriStr a <tt>String</tt> containing the uri to parse.
*
* @return a URI object corresponding to the <tt>uriStr</tt> string.
* @throws ParseException if uriStr is not properly formatted.
*/
private Address parseAddressStr(String uriStr)
throws ParseException
{
uriStr = uriStr.trim();
//Handle default domain name (i.e. transform 1234 -> [email protected])
//assuming that if no domain name is specified then it should be the
//same as ours.
if (uriStr.indexOf('@') == -1
&& !uriStr.trim().startsWith("tel:"))
{
uriStr = uriStr + "@"
+ ((SipURI)protocolProvider.getOurSipAddress().getURI())
.getHost();
}
//Let's be uri fault tolerant and add the sip: scheme if there is none.
if (uriStr.toLowerCase().indexOf("sip:") == -1 //no sip scheme
&& uriStr.indexOf('@') != -1) //most probably a sip uri
{
uriStr = "sip:" + uriStr;
}
//Request URI
Address uri
= protocolProvider.getAddressFactory().createAddress(uriStr);
return uri;
}
/**
* Creates an invite request destined for <tt>callee</tt>.
*
* @param toAddress the sip address of the callee that the request is meant
* for.
* @return a newly created sip <tt>Request</tt> destined for
* <tt>callee</tt>.
* @throws OperationFailedException with the correspoding code if creating
* the request fails.
*/
private Request createInviteRequest(Address toAddress)
throws OperationFailedException
{
InetAddress destinationInetAddress = null;
try
{
destinationInetAddress = InetAddress.getByName(
( (SipURI) toAddress.getURI()).getHost());
}
catch (UnknownHostException ex)
{
throw new IllegalArgumentException(
( (SipURI) toAddress.getURI()).getHost()
+ " is not a valid internet address " + ex.getMessage());
}
//Call ID
CallIdHeader callIdHeader = protocolProvider
.getDefaultJainSipProvider().getNewCallId();
//CSeq
CSeqHeader cSeqHeader = null;
try
{
cSeqHeader = protocolProvider.getHeaderFactory()
.createCSeqHeader(1l, Request.INVITE);
}
catch (InvalidArgumentException ex)
{
//Shouldn't happen
logger.error(
"An unexpected erro occurred while"
+ "constructing the CSeqHeadder", ex);
throw new OperationFailedException(
"An unexpected erro occurred while"
+ "constructing the CSeqHeadder"
, OperationFailedException.INTERNAL_ERROR
, ex);
}
catch(ParseException exc)
{
//shouldn't happen
logger.error(
"An unexpected erro occurred while"
+ "constructing the CSeqHeadder", exc);
throw new OperationFailedException(
"An unexpected erro occurred while"
+ "constructing the CSeqHeadder"
, OperationFailedException.INTERNAL_ERROR
, exc);
}
//FromHeader
String localTag = protocolProvider.generateLocalTag();
FromHeader fromHeader = null;
ToHeader toHeader = null;
try
{
//FromHeader
fromHeader = protocolProvider.getHeaderFactory()
.createFromHeader(protocolProvider.getOurSipAddress()
, localTag);
//ToHeader
toHeader = protocolProvider.getHeaderFactory()
.createToHeader(toAddress, null);
}
catch (ParseException ex)
{
//these two should never happen.
logger.error(
"An unexpected erro occurred while"
+ "constructing the ToHeader", ex);
throw new OperationFailedException(
"An unexpected erro occurred while"
+ "constructing the ToHeader"
, OperationFailedException.INTERNAL_ERROR
, ex);
}
//ViaHeaders
ArrayList viaHeaders = protocolProvider.getLocalViaHeaders(
destinationInetAddress
, protocolProvider.getDefaultListeningPoint());
//MaxForwards
MaxForwardsHeader maxForwards = protocolProvider
.getMaxForwardsHeader();
//Contact
ContactHeader contactHeader = protocolProvider.getContactHeader();
Request invite = null;
try
{
invite = protocolProvider.getMessageFactory().createRequest(
toHeader.getAddress().getURI()
, Request.INVITE
, callIdHeader
, cSeqHeader
, fromHeader
, toHeader
, viaHeaders
, maxForwards);
}
catch (ParseException ex)
{
//shouldn't happen
logger.error(
"Failed to create invite Request!", ex);
throw new OperationFailedException(
"Failed to create invite Request!"
, OperationFailedException.INTERNAL_ERROR
, ex);
}
//User Agent
UserAgentHeader userAgentHeader
= protocolProvider.getSipCommUserAgentHeader();
if(userAgentHeader != null)
invite.addHeader(userAgentHeader);
//add the contact header.
invite.addHeader(contactHeader);
return invite;
}
/**
* Creates a new call and sends a RINGING response.
*
* @param sourceProvider the provider containin <tt>sourceTransaction</tt>.
* @param serverTransaction the transaction containing the received request.
* @param invite the Request that we've just received.
*/
private void processInvite( SipProvider sourceProvider,
ServerTransaction serverTransaction,
Request invite)
{
logger.trace("Creating call participant.");
Dialog dialog = serverTransaction.getDialog();
CallParticipantSipImpl callParticipant
= createCallParticipantFor(serverTransaction, sourceProvider);
logger.trace("call participant created = " + callParticipant);
//sdp description may be in acks - bug report Laurent Michel
ContentLengthHeader cl = invite.getContentLength();
if (cl != null
&& cl.getContentLength() > 0)
{
callParticipant.setSdpDescription(
new String(invite.getRawContent()));
}
logger.trace("Will verify whether INVITE is properly addressed.");
//Are we the one they are looking for?
javax.sip.address.URI calleeURI = dialog.getLocalParty().getURI();
if (calleeURI.isSipURI())
{
boolean assertUserMatch = Boolean.valueOf(
SipActivator.getConfigurationService().getString(
FAIL_CALLS_ON_DEST_USER_MISMATCH)).booleanValue();
if (assertUserMatch)
{
//user info is case sensitive according to rfc3261
String calleeUser = ( (SipURI) calleeURI).getUser();
String localUser = ((SipURI)protocolProvider.getOurSipAddress()
.getURI()).getUser();
if (calleeUser != null && !calleeUser.equals(localUser))
{
callParticipant.setState(
CallParticipantState.FAILED
, "A call was received here while it appeared "
+"destined to someone else. The call was rejected.");
Response notFound = null;
try {
notFound = protocolProvider.getMessageFactory()
.createResponse( Response.NOT_FOUND, invite);
//attach a to tag
protocolProvider.attachToTag(notFound, dialog);
notFound.setHeader(
protocolProvider.getSipCommUserAgentHeader());
}
catch (ParseException ex) {
logger.error("Error while trying to create a response"
, ex);
callParticipant.setState(
CallParticipantState.FAILED
, "InernalError: " +ex.getMessage());
return;
}
try {
serverTransaction.sendResponse(notFound);
logger.debug("sent a not found response: " + notFound);
}
catch (Exception ex) {
logger.error("Error while trying to send a response"
, ex);
callParticipant.setState(
CallParticipantState.FAILED
, "Internal Error: " + ex.getMessage());
return;
}
return;
}
}
}
//Send RINGING
logger.debug("Invite seems ok, we'll say RINGING.");
Response ringing = null;
try {
ringing = protocolProvider.getMessageFactory().createResponse(
Response.RINGING, invite);
protocolProvider.attachToTag(ringing, dialog);
ringing.setHeader(protocolProvider.getSipCommUserAgentHeader());
//set our display name
((ToHeader)ringing.getHeader(ToHeader.NAME))
.getAddress().setDisplayName(protocolProvider
.getOurDisplayName());
ringing.addHeader(protocolProvider.getContactHeader());
}
catch (ParseException ex) {
logger.error("Error while trying to send a request"
, ex);
callParticipant.setState(CallParticipantState.FAILED
, "Internal Error: " + ex.getMessage());
return;
}
try {
logger.trace("will send ringing response: ");
serverTransaction.sendResponse(ringing);
logger.debug("sent a ringing response: " + ringing);
}
catch (Exception ex) {
logger.error("Error while trying to send a request"
, ex);
callParticipant.setState(
CallParticipantState.FAILED
, "Internal Error: " + ex.getMessage());
return;
}
}
/**
* Sets the state of the corresponding call participant to DISCONNECTED
* and sends an OK response.
*
* @param serverTransaction the ServerTransaction the the BYE request
* arrived in.
* @param byeRequest the BYE request to process
*/
private void processBye(ServerTransaction serverTransaction,
Request byeRequest)
{
//find the call
CallParticipantSipImpl callParticipant = activeCallsRepository
.findCallParticipant( serverTransaction.getDialog());
if (callParticipant == null) {
logger.debug("Received a stray bye request.");
return;
}
//Send OK
Response ok = null;
try {
ok = protocolProvider.getMessageFactory()
.createResponse(Response.OK, byeRequest);
protocolProvider.attachToTag(ok, serverTransaction.getDialog());
ok.setHeader(protocolProvider.getSipCommUserAgentHeader());
}
catch (ParseException ex) {
logger.error("Error while trying to send a response to a bye", ex);
//no need to let the user know about the error since it doesn't
//affect them
return;
}
try {
serverTransaction.sendResponse(ok);
logger.debug("sent response " + ok);
}
catch (Exception ex) {
//This is not really a problem according to the RFC
//so just dump to stdout should someone be interested
logger.error("Failed to send an OK response to BYE request,"
+ "exception was:\n",
ex);
}
//change status
callParticipant.setState(CallParticipantState.DISCONNECTED);
}
/**
* Updates the sesion description and sends the state of the corresponding
* call participant to CONNECTED.
*
* @param serverTransaction the transaction that the Ack was received in.
* @param ackRequest Request
*/
void processAck(ServerTransaction serverTransaction,
Request ackRequest)
{
//find the call
CallParticipantSipImpl callParticipant = activeCallsRepository
.findCallParticipant(serverTransaction.getDialog());
if (callParticipant == null) {
//this is most probably the ack for a killed call - don't signal it
logger.debug("didn't find an ack's call, returning");
return;
}
ContentLengthHeader cl = ackRequest.getContentLength();
if (cl != null
&& cl.getContentLength() > 0)
{
callParticipant.setSdpDescription(
new String(ackRequest.getRawContent()));
}
//change status
callParticipant.setState(CallParticipantState.CONNECTED);
}
/**
* Sets the state of the specifies call participant as DISCONNECTED.
*
* @param serverTransaction the transaction that the cancel was received in.
* @param cancelRequest the Request that we've just received.
*/
void processCancel(ServerTransaction serverTransaction,
Request cancelRequest)
{
//find the call
CallParticipantSipImpl callParticipant = activeCallsRepository
.findCallParticipant( serverTransaction.getDialog() );
if (callParticipant == null) {
logger.debug("received a stray CANCEL req. ignoring");
return;
}
// Cancels should be OK-ed and the initial transaction - terminated
// (report and fix by Ranga)
try {
Response ok = protocolProvider.getMessageFactory().createResponse(
Response.OK, cancelRequest);
protocolProvider.attachToTag(ok, serverTransaction.getDialog());
ok.setHeader(protocolProvider.getSipCommUserAgentHeader());
serverTransaction.sendResponse(ok);
logger.debug("sent an ok response to a CANCEL request:\n" + ok);
}
catch (ParseException ex) {
logger.error(
"Failed to create an OK Response to an CANCEL request.", ex);
callParticipant.setState(CallParticipantState.FAILED
,"Failed to create an OK Response to an CANCEL request.");
}
catch (Exception ex) {
logger.error(
"Failed to send an OK Response to an CANCEL request.", ex);
callParticipant.setState(CallParticipantState.FAILED
,"Failed to send an OK Response to an CANCEL request.");
}
try {
//stop the invite transaction as well
Transaction tran = callParticipant.getFirstTransaction();
//should be server transaction and misplaced cancels should be
//filtered by the stack but it doesn't hurt checking anyway
if (! (tran instanceof ServerTransaction)) {
logger.error("Received a misplaced CANCEL request!");
return;
}
ServerTransaction inviteTran = (ServerTransaction) tran;
Request invite = callParticipant.getFirstTransaction().getRequest();
Response requestTerminated =
protocolProvider.getMessageFactory()
.createResponse(Response.REQUEST_TERMINATED, invite);
requestTerminated.setHeader(
protocolProvider.getSipCommUserAgentHeader());
protocolProvider.attachToTag(requestTerminated
, callParticipant.getDialog());
inviteTran.sendResponse(requestTerminated);
if( logger.isDebugEnabled() )
logger.debug("sent request terminated response:\n"
+ requestTerminated);
}
catch (ParseException ex)
{
logger.error("Failed to create a REQUEST_TERMINATED Response to "
+ "an INVITE request."
, ex);
}
catch (Exception ex)
{
logger.error("Failed to send an REQUEST_TERMINATED Response to "
+ "an INVITE request."
, ex);
}
//change status
callParticipant.setState(CallParticipantState.DISCONNECTED);
}
/**
* Indicates a user request to end a call with the specified call
* particiapnt. Depending on the state of the call the method would send a
* CANCEL, BYE, or BUSY_HERE and set the new state to DISCONNECTED.
*
* @param participant the participant that we'd like to hang up on.
* @throws ClassCastException if participant is not an instance of this
* CallParticipantSipImpl.
* @throws OperationFailedException if we fail to terminate the call.
*/
public void hangupCallParticipant(CallParticipant participant)
throws ClassCastException, OperationFailedException
{
//do nothing if the call is already ended
if (participant.getState().equals(CallParticipantState.DISCONNECTED))
{
logger.debug("Ignoring a request to hangup a call participant "
+"that is already DISCONNECTED");
return;
}
CallParticipantSipImpl callParticipant
= (CallParticipantSipImpl)participant;
Dialog dialog = callParticipant.getDialog();
if (callParticipant.getState().equals(CallParticipantState.CONNECTED))
{
sayBye(callParticipant);
callParticipant.setState(CallParticipantState.DISCONNECTED);
}
else if (callParticipant.getState()
.equals(CallParticipantState.CONNECTING)
|| callParticipant.getState()
.equals(CallParticipantState.ALERTING_REMOTE_SIDE))
{
if (callParticipant.getFirstTransaction() != null)
{
//Someone knows about us. Let's be polite and say we are
//leaving
sayCancel(callParticipant);
}
callParticipant.setState(CallParticipantState.DISCONNECTED);
}
else if (callParticipant.getState()
.equals(CallParticipantState.INCOMING_CALL))
{
callParticipant.setState(CallParticipantState.DISCONNECTED);
sayBusyHere(callParticipant);
}
//For FAILE and BUSY we only need to update CALL_STATUS
else if (callParticipant.getState().equals(CallParticipantState.BUSY))
{
callParticipant.setState(CallParticipantState.DISCONNECTED);
}
else if (callParticipant.getState().equals(CallParticipantState.FAILED))
{
callParticipant.setState(CallParticipantState.DISCONNECTED);
}
else
{
callParticipant.setState(CallParticipantState.DISCONNECTED);
logger.error("Could not determine call participant state!");
}
} //end call
/**
* Sends a BYE request to <tt>callParticipant</tt>.
*
* @param callParticipant the call participant that we need to say bye to.
* @since OperationFailedException if we fail to create an outgoing request
* or send it.
*
* @throws OperationFailedException if we faile constructing or sending a
* SIP Message.
*/
private void sayBye(CallParticipantSipImpl callParticipant)
throws OperationFailedException
{
Request request = callParticipant.getFirstTransaction().getRequest();
Request bye = null;
try
{
bye = callParticipant.getDialog().createRequest(Request.BYE);
}
catch (SipException ex)
{
logger.error("Failed to create bye request!", ex);
throw new OperationFailedException(
"Failed to create bye request!"
, OperationFailedException.INTERNAL_ERROR
, ex);
}
ClientTransaction clientTransaction = null;
try
{
clientTransaction = callParticipant.getJainSipProvider()
.getNewClientTransaction(bye);
}
catch (TransactionUnavailableException ex)
{
logger.error(
"Failed to construct a client transaction from the BYE request"
, ex);
throw new OperationFailedException(
"Failed to construct a client transaction from the BYE request"
, OperationFailedException.INTERNAL_ERROR
,ex);
}
try
{
callParticipant.getDialog().sendRequest(clientTransaction);
logger.debug("sent request:\n" + bye);
}
catch (SipException ex)
{
throw new OperationFailedException(
"Failed to send the BYE request"
, OperationFailedException.NETWORK_FAILURE
, ex);
}
} //bye
/**
* Sends a Cancel request to <tt>callParticipant</tt>.
*
* @param callParticipant the call participant that we need to cancel.
*
* @throws OperationFailedException we faile to construct or send the
* CANCEL request.
*/
private void sayCancel(CallParticipantSipImpl callParticipant)
throws OperationFailedException
{
Request request = callParticipant.getFirstTransaction().getRequest();
if (callParticipant.getDialog().isServer())
{
logger.error("Cannot cancel a server transaction");
throw new OperationFailedException(
"Cannot cancel a server transaction"
, OperationFailedException.INTERNAL_ERROR);
}
ClientTransaction clientTransaction =
(ClientTransaction) callParticipant.getFirstTransaction();
try
{
Request cancel = clientTransaction.createCancel();
ClientTransaction cancelTransaction = callParticipant
.getJainSipProvider().getNewClientTransaction(cancel);
cancelTransaction.sendRequest();
logger.debug("sent request:\n" + cancel);
}
catch (SipException ex) {
logger.error("Failed to send the CANCEL request", ex);
throw new OperationFailedException(
"Failed to send the CANCEL request"
, OperationFailedException.NETWORK_FAILURE
, ex);
}
} //cancel
/**
* Sends a BUSY_HERE response to <tt>callParticipant</tt>.
*
* @param callParticipant the call participant that we need to send busy
* tone to.
* @throws OperationFailedException if we fail to create or send the
* response
*/
private void sayBusyHere(CallParticipantSipImpl callParticipant)
throws OperationFailedException
{
Request request = callParticipant.getFirstTransaction().getRequest();
Response busyHere = null;
try
{
busyHere = protocolProvider.getMessageFactory()
.createResponse(Response.BUSY_HERE, request);
busyHere.setHeader(
protocolProvider.getSipCommUserAgentHeader());
protocolProvider.attachToTag(busyHere
, callParticipant.getDialog());
}
catch (ParseException ex)
{
logger.error("Failed to create the BUSY_HERE response!", ex);
throw new OperationFailedException(
"Failed to create the BUSY_HERE response!"
, OperationFailedException.INTERNAL_ERROR
, ex);
}
if (!callParticipant.getDialog().isServer())
{
logger.error("Cannot send BUSY_HERE in a client transaction");
throw new OperationFailedException(
"Cannot send BUSY_HERE in a client transaction"
, OperationFailedException.INTERNAL_ERROR);
}
ServerTransaction serverTransaction
= (ServerTransaction) callParticipant.getFirstTransaction();
try
{
serverTransaction.sendResponse(busyHere);
logger.debug("sent response:\n" + busyHere);
}
catch (Exception ex)
{
logger.error("Failed to send the BUSY_HERE response", ex);
throw new OperationFailedException(
"Failed to send the BUSY_HERE response"
, OperationFailedException.NETWORK_FAILURE
, ex);
}
} //busy here
/**
* * Indicates a user request to answer an incoming call from the specified
* CallParticipant.
*
* Sends an OK response to <tt>callParticipant</tt>. Make sure that the call
* participant contains an sdp description when you call this method.
*
* @param participant the call participant that we need to send the ok
* to.
* @throws OperationFailedException if we fail to create or send the
* response.
*/
public void answerCallParticipant(CallParticipant participant)
throws OperationFailedException
{
CallParticipantSipImpl callParticipant
= (CallParticipantSipImpl)participant;
Transaction transaction = callParticipant.getFirstTransaction();
Dialog dialog = callParticipant.getDialog();
if (transaction == null || !dialog.isServer())
{
callParticipant.setState(CallParticipantState.DISCONNECTED);
throw new OperationFailedException(
"Failed to extract a ServerTransaction "
+ "from the call's associated dialog!"
, OperationFailedException.INTERNAL_ERROR);
}
if(participant.getState().equals(CallParticipantState.CONNECTED))
{
logger.info("Ignoring user request to answer a CallParticipant "
+ "that is already connected. CP:" + participant);
return;
}
ServerTransaction serverTransaction = (ServerTransaction) transaction;
Response ok = null;
try {
ok = protocolProvider.getMessageFactory().createResponse(
Response.OK
,callParticipant.getFirstTransaction().getRequest());
ok.setHeader(protocolProvider.getSipCommUserAgentHeader());
protocolProvider.attachToTag(ok, dialog);
}
catch (ParseException ex) {
callParticipant.setState(CallParticipantState.DISCONNECTED);
logger.error(
"Failed to construct an OK response to an INVITE request"
, ex);
throw new OperationFailedException(
"Failed to construct an OK response to an INVITE request"
, OperationFailedException.INTERNAL_ERROR
, ex);
}
//Content
ContentTypeHeader contentTypeHeader = null;
try
{
//content type should be application/sdp (not applications)
//reported by Oleg Shevchenko (Miratech)
contentTypeHeader =
protocolProvider.getHeaderFactory().createContentTypeHeader(
"application", "sdp");
}
catch (ParseException ex)
{
//Shouldn't happen
callParticipant.setState(CallParticipantState.DISCONNECTED);
logger.error(
"Failed to create a content type header for the OK request"
, ex);
throw new OperationFailedException(
"Failed to create a content type header for the OK request"
, OperationFailedException.INTERNAL_ERROR
, ex);
}
try
{
ok.setContent( SipActivator.getMediaService()
.generateSdpAnswer(callParticipant)
, contentTypeHeader);
}
catch (NullPointerException ex)
{
callParticipant.setState(CallParticipantState.DISCONNECTED);
logger.error( "No sdp data was provided for the ok response to "
+ "an INVITE request!"
, ex);
throw new OperationFailedException(
"No sdp data was provided for the ok response "
+ "to an INVITE request!"
, OperationFailedException.INTERNAL_ERROR
, ex);
}
catch (ParseException ex)
{
callParticipant.setState(CallParticipantState.DISCONNECTED);
logger.error(
"Failed to parse sdp data while creating invite request!", ex);
throw new OperationFailedException(
"Failed to parse sdp data while creating invite request!"
, OperationFailedException.INTERNAL_ERROR
, ex);
}
ContactHeader contactHeader = protocolProvider.getContactHeader();
ok.addHeader(contactHeader);
try {
serverTransaction.sendResponse(ok);
if( logger.isDebugEnabled() )
logger.debug("sent response\n" + ok);
}
catch (Exception ex) {
callParticipant.setState(CallParticipantState.DISCONNECTED);
logger.error(
"Failed to send an OK response to an INVITE request"
,ex);
throw new OperationFailedException(
"Failed to send an OK response to an INVITE request"
, OperationFailedException.NETWORK_FAILURE
, ex);
}
} //answer call
/**
* Creates a new call and call participant associated with
* <tt>containingTransaction</tt>
*
* @param containingTransaction the transaction that created the call.
* @param sourceProvider the provider that the containingTransaction
* belongs to.
*
* @return a new instance of a <tt>CallParticipantSipImpl</tt>
* corresponding to the <tt>containingTransaction</tt>.
*/
private CallParticipantSipImpl createCallParticipantFor(
Transaction containingTransaction,
SipProvider sourceProvider)
{
CallSipImpl call = new CallSipImpl(protocolProvider);
CallParticipantSipImpl callParticipant = new CallParticipantSipImpl(
containingTransaction.getDialog().getRemoteParty(), call);
if(containingTransaction instanceof ServerTransaction)
callParticipant.setState(CallParticipantState.INCOMING_CALL);
else
callParticipant.setState(CallParticipantState.INITIATING_CALL);
callParticipant.setDialog(containingTransaction.getDialog());
callParticipant.setFirstTransaction(containingTransaction);
callParticipant.setJainSipProvider(sourceProvider);
activeCallsRepository.addCall(call);
//notify everyone
if(containingTransaction instanceof ServerTransaction)
fireCallEvent(CallEvent.CALL_RECEIVED, call);
else
fireCallEvent(CallEvent.CALL_INITIATED, call);
return callParticipant;
}
/**
* Returns a string representation of this OperationSetBasicTelephony
* instance including information that would permit to distinguish it among
* other instances when reading a log file.
* <p>
* @return a string representation of this operation set.
*/
public String toString()
{
String className = getClass().getName();
try
{
className = className.substring(className.lastIndexOf('.') + 1);
}
catch (Exception ex)
{
// we don't want to fail in this method because we've messed up
//something with indexes, so just ignore.
}
return className + "-[dn=" + protocolProvider.getOurDisplayName()
+" addr="+protocolProvider.getOurSipAddress() + "]";
}
}
| src/net/java/sip/communicator/impl/protocol/sip/OperationSetBasicTelephonySipImpl.java | /*
* SIP Communicator, the OpenSource Java VoIP and Instant Messaging client.
*
* Distributable under LGPL license.
* See terms of license at gnu.org.
*/
package net.java.sip.communicator.impl.protocol.sip;
import java.net.*;
import java.text.*;
import java.util.*;
import javax.sip.*;
import javax.sip.address.*;
import javax.sip.header.*;
import javax.sip.message.*;
import net.java.sip.communicator.service.protocol.*;
import net.java.sip.communicator.service.protocol.event.*;
import net.java.sip.communicator.util.*;
import gov.nist.javax.sip.message.*;
import gov.nist.javax.sip.stack.*;
/**
* Implements all call management logic and exports basic telephony support by
* implementing OperationSetBasicTelephony.
*
* @author Emil Ivov
*/
public class OperationSetBasicTelephonySipImpl
implements OperationSetBasicTelephony
, SipListener
{
private static final Logger logger
= Logger.getLogger(OperationSetBasicTelephonySipImpl.class);
/**
* A reference to the <tt>ProtocolProviderServiceSipImpl</tt> instance
* that created us.
*/
private ProtocolProviderServiceSipImpl protocolProvider = null;
/**
* A liste of listeners registered for call events.
*/
private Vector callListeners = new Vector();
/**
* Contains references for all currently active (non ended) calls.
*/
private ActiveCallsRepository activeCallsRepository
= new ActiveCallsRepository(this);
/**
* The name of the boolean property that the user could use to specify
* whether incoming calls should be rejected if the user name in the
* destination (to) address does not match the one that we have in our
* sip address.
*/
private static final String FAIL_CALLS_ON_DEST_USER_MISMATCH
= "net.java.sip.communicator.impl.protocol.sip."
+"FAIL_CALLS_ON_DEST_USER_MISMATCH";
/**
* Creates a new instance and adds itself as an <tt>INVITE</tt> method
* handler in the creating protocolProvider.
*
* @param protocolProvider a reference to the
* <tt>ProtocolProviderServiceSipImpl</tt> instance that created us.
*/
public OperationSetBasicTelephonySipImpl(
ProtocolProviderServiceSipImpl protocolProvider)
{
this.protocolProvider = protocolProvider;
protocolProvider.registerMethodProcessor(Request.INVITE, this);
protocolProvider.registerMethodProcessor(Request.CANCEL, this);
protocolProvider.registerMethodProcessor(Request.ACK, this);
protocolProvider.registerMethodProcessor(Request.BYE, this);
}
/**
* Registers <tt>listener</tt> with this provider so that it
* could be notified when incoming calls are received.
*
* @param listener the listener to register with this provider.
*/
public void addCallListener(CallListener listener)
{
synchronized(callListeners)
{
if (!callListeners.contains(listener))
callListeners.add(listener);
}
}
/**
* Create a new call and invite the specified CallParticipant to it.
*
* @param callee the sip address of the callee that we should invite to a
* new call.
* @return CallParticipant the CallParticipant that will represented by
* the specified uri. All following state change events will be
* delivered through that call participant. The Call that this
* participant is a member of could be retrieved from the
* CallParticipatn instance with the use of the corresponding method.
* @throws OperationFailedException with the corresponding code if we fail
* to create the call.
* @throws ParseException if <tt>callee</tt> is not a valid sip address
* string.
*/
public Call createCall(String callee)
throws OperationFailedException, ParseException
{
Address toAddress = parseAddressStr(callee);
return createOutgoingCall(toAddress);
}
/**
* Create a new call and invite the specified CallParticipant to it.
*
* @param callee the address of the callee that we should invite to a
* new call.
* @return CallParticipant the CallParticipant that will represented by
* the specified uri. All following state change events will be
* delivered through that call participant. The Call that this
* participant is a member of could be retrieved from the
* CallParticipatn instance with the use of the corresponding method.
* @throws OperationFailedException with the corresponding code if we fail
* to create the call.
*/
public Call createCall(Contact callee)
throws OperationFailedException
{
Address toAddress = null;
try
{
toAddress = parseAddressStr(callee.getAddress());
}
catch (ParseException ex)
{
//couldn't happen
logger.error(ex.getMessage(), ex);
throw new IllegalArgumentException(ex.getMessage());
}
return createOutgoingCall(toAddress);
}
/**
* Init and establish the specified call.
*
* @param calleeAddress the address of the callee that we'd like to connect
* with.
*
* @return CallParticipant the CallParticipant that will represented by
* the specified uri. All following state change events will be
* delivered through that call participant. The Call that this
* participant is a member of could be retrieved from the
* CallParticipatn instance with the use of the corresponding method.
*
* @throws OperationFailedException with the corresponding code if we fail
* to create the call.
*/
private CallSipImpl createOutgoingCall(Address calleeAddress)
throws OperationFailedException
{
//create the invite request
Request invite = createInviteRequest(calleeAddress);
//Content
ContentTypeHeader contentTypeHeader = null;
try
{
//content type should be application/sdp (not applications)
//reported by Oleg Shevchenko (Miratech)
contentTypeHeader =
protocolProvider.getHeaderFactory().createContentTypeHeader(
"application", "sdp");
}
catch (ParseException ex)
{
//Shouldn't happen
logger.error(
"Failed to create a content type header for the INVITE "
+ "request"
, ex);
throw new OperationFailedException(
"Failed to create a content type header for the INVITE "
+ "request"
, OperationFailedException.INTERNAL_ERROR
, ex);
}
//Transaction
ClientTransaction inviteTransaction;
SipProvider jainSipProvider
= protocolProvider.getDefaultJainSipProvider();
try
{
inviteTransaction = jainSipProvider.getNewClientTransaction(invite);
}
catch (TransactionUnavailableException ex)
{
logger.error(
"Failed to create inviteTransaction.\n"
+ "This is most probably a network connection error."
, ex);
throw new OperationFailedException(
"Failed to create inviteTransaction.\n"
+ "This is most probably a network connection error."
, OperationFailedException.INTERNAL_ERROR
, ex);
}
//create the call participant
CallParticipantSipImpl callParticipant
= createCallParticipantFor(inviteTransaction, jainSipProvider);
//invite content
try
{
invite.setContent(
SipActivator.getMediaService().generateSdpOffer(callParticipant)
, contentTypeHeader);
}
catch (ParseException ex)
{
logger.error(
"Failed to parse sdp data while creating invite request!"
, ex);
throw new OperationFailedException(
"Failed to parse sdp data while creating invite request!"
, OperationFailedException.INTERNAL_ERROR
, ex);
}
try
{
inviteTransaction.sendRequest();
if (logger.isDebugEnabled())
logger.debug("sent request: " + invite);
}
catch (SipException ex)
{
logger.error(
"An error occurred while sending invite request", ex);
throw new OperationFailedException(
"An error occurred while sending invite request"
, OperationFailedException.NETWORK_FAILURE
, ex);
}
return (CallSipImpl)callParticipant.getCall();
}
/**
* Creates and dispatches a <tt>CallEvent</tt> notifying registered
* listeners that an event with id <tt>eventID</tt> has occurred on
* <tt>sourceCall</tt>.
*
* @param eventID the ID of the event to dispatch
* @param sourceCall the call on which the event has occurred.
*/
protected void fireCallEvent( int eventID,
CallSipImpl sourceCall)
{
CallEvent cEvent = new CallEvent(sourceCall, eventID);
logger.debug("Dispatching a CallEvent to "
+ callListeners.size()
+" listeners. event is: " + cEvent.toString());
Iterator listeners = null;
synchronized(callListeners)
{
listeners = new ArrayList(callListeners).iterator();
}
while(listeners.hasNext())
{
CallListener listener = (CallListener)listeners.next();
if(eventID == CallEvent.CALL_INITIATED)
listener.outgoingCallCreated(cEvent);
else if(eventID == CallEvent.CALL_RECEIVED)
listener.incomingCallReceived(cEvent);
else if(eventID == CallEvent.CALL_ENDED)
listener.callEnded(cEvent);
}
}
/**
* Returns an iterator over all currently active calls.
*
* @return an iterator over all currently active calls.
*/
public Iterator getActiveCalls()
{
return activeCallsRepository.getActiveCalls();
}
/**
* Resumes communication with a call participant previously put on hold.
*
* @param participant the call participant to put on hold.
*/
public void putOffHold(CallParticipant participant)
{
/** @todo implement putOffHold() */
}
/**
* Puts the specified CallParticipant "on hold".
*
* @param participant the participant that we'd like to put on hold.
*/
public void putOnHold(CallParticipant participant)
{
/** @todo implement putOnHold() */
}
/**
* Removes the <tt>listener</tt> from the list of call listeners.
*
* @param listener the listener to unregister.
*/
public void removeCallListener(CallListener listener)
{
synchronized(callListeners)
{
callListeners.remove(listener);
}
}
/**
* Processes a Request received on a SipProvider upon which this SipListener
* is registered.
* <p>
*
* @param requestEvent requestEvent fired from the SipProvider to the
* <tt>SipListener</tt> representing a Request received from the network.
*/
public void processRequest(RequestEvent requestEvent)
{
ServerTransaction serverTransaction = requestEvent
.getServerTransaction();
SipProvider jainSipProvider = (SipProvider)requestEvent.getSource();
Request request = requestEvent.getRequest();
if (serverTransaction == null)
{
try
{
serverTransaction = jainSipProvider.getNewServerTransaction(
request);
}
catch (TransactionAlreadyExistsException ex)
{
//let's not scare the user and only log a message
logger.error("Failed to create a new server"
+ "transaction for an incoming request\n"
+ "(Next message contains the request)"
, ex);
return;
}
catch (TransactionUnavailableException ex)
{
//let's not scare the user and only log a message
logger.error("Failed to create a new server"
+ "transaction for an incoming request\n"
+ "(Next message contains the request)"
, ex);
return;
}
}
//INVITE
if (request.getMethod().equals(Request.INVITE))
{
logger.debug("received INVITE");
if (serverTransaction.getDialog().getState() == null)
{
if (logger.isDebugEnabled())
logger.debug("request is an INVITE. Dialog state="
+ serverTransaction.getDialog().getState());
processInvite(jainSipProvider, serverTransaction, request);
}
else
{
logger.error("reINVITE-s are not currently supported.");
}
}
//ACK
else if (request.getMethod().equals(Request.ACK))
{
processAck(serverTransaction, request);
}
//BYE
else if (request.getMethod().equals(Request.BYE))
{
processBye(serverTransaction, request);
}
//CANCEL
else if (request.getMethod().equals(Request.CANCEL))
{
processCancel(serverTransaction, request);
}
}
/**
* Process an asynchronously reported TransactionTerminatedEvent.
*
* @param transactionTerminatedEvent -- an event that indicates that the
* transaction has transitioned into the terminated state.
*/
public void processTransactionTerminated(
TransactionTerminatedEvent transactionTerminatedEvent)
{
//nothing to do here.
}
/**
* Analyzes the incoming <tt>responseEvent</tt> and then forwards it to the
* proper event handler.
*
* @param responseEvent the responseEvent that we received
* ProtocolProviderService.
*/
public void processResponse(ResponseEvent responseEvent)
{
ClientTransaction clientTransaction = responseEvent
.getClientTransaction();
Response response = responseEvent.getResponse();
CSeqHeader cseq = ((CSeqHeader)response.getHeader(CSeqHeader.NAME));
if (cseq == null)
{
logger.error("An incoming response did not contain a CSeq header");
}
String method = cseq.getMethod();
SipProvider sourceProvider = (SipProvider)responseEvent.getSource();
//OK
if (response.getStatusCode() == Response.OK)
{
if(method.equals(Request.INVITE))
{
processInviteOK(clientTransaction, response);
}
else if (method.equals(Request.BYE))
{
//ignore
}
}
//Ringing
else if (response.getStatusCode() == Response.RINGING)
{
processRinging(clientTransaction, response);
}
//Trying
else if (response.getStatusCode() == Response.TRYING)
{
processTrying(clientTransaction, response);
}
//Busy here.
else if (response.getStatusCode() == Response.BUSY_HERE)
{
processBusyHere(clientTransaction, response);
}
//401 UNAUTHORIZED
else if (response.getStatusCode() == Response.UNAUTHORIZED
|| response.getStatusCode()
== Response.PROXY_AUTHENTICATION_REQUIRED)
{
processAuthenticationChallenge(clientTransaction
, response
, sourceProvider);
}
//errors
else if ( response.getStatusCode() / 100 == 4 )
{
CallParticipantSipImpl callParticipant = activeCallsRepository
.findCallParticipant(clientTransaction.getDialog());
logger.error("Received error: " +response.getStatusCode()
+" "+ response.getReasonPhrase());
callParticipant.setState(CallParticipantState.FAILED);
}
//ignore everything else.
}
/**
* Updates the call state of the corresponding call participant.
*
* @param clientTransaction the transaction in which the response was
* received.
* @param response the Ttrying response.
*/
private void processTrying(ClientTransaction clientTransaction,
Response response)
{
Dialog dialog = clientTransaction.getDialog();
//find the call participant
CallParticipantSipImpl callParticipant
= activeCallsRepository.findCallParticipant(dialog);
if (callParticipant == null)
{
logger.debug("Received a stray trying response.");
return;
}
//change status
callParticipant.setState(CallParticipantState.CONNECTING);
}
/**
* Updates the call state of the corresponding call participant. We'll
* also try to extract any details here that might be of use for call
* participant presentation and that we didn't have when establishing the
* call.
*
* @param clientTransaction the transaction in which the response was
* received.
* @param response the Ttrying response.
*/
private void processRinging(ClientTransaction clientTransaction,
Response response)
{
Dialog dialog = clientTransaction.getDialog();
//find the call participant
CallParticipantSipImpl callParticipant
= activeCallsRepository.findCallParticipant(dialog);
if (callParticipant == null)
{
logger.debug("Received a stray trying response.");
return;
}
//try to update the display name.
ContactHeader remotePartyContactHeader
= (ContactHeader)response.getHeader(ContactHeader.NAME);
if(remotePartyContactHeader != null)
{
Address remotePartyAddress = remotePartyContactHeader.getAddress();
String displayName = remotePartyAddress.getDisplayName();
if(displayName != null && displayName.trim().length() > 0)
{
callParticipant.setDisplayName(displayName);
}
}
//change status.
callParticipant.setState(CallParticipantState.ALERTING_REMOTE_SIDE);
}
/**
* Sets to CONNECTED that state of the corresponding call participant and
* sends an ACK.
* @param clientTransaction the <tt>ClientTransaction</tt> that the response
* arrived in.
* @param ok the OK <tt>Response</tt> to process
*/
private void processInviteOK(ClientTransaction clientTransaction,
Response ok)
{
Dialog dialog = clientTransaction.getDialog();
//find the call
CallParticipantSipImpl callParticipant
= activeCallsRepository.findCallParticipant(dialog);
if (callParticipant == null)
{
logger.debug("Received a stray ok response.");
return;
}
//Send ACK
try
{
//Need to use dialog generated ACKs so that the remote UA core
//sees them - Fixed by M.Ranganathan
Request ack = clientTransaction.getDialog()
.createRequest(Request.ACK);
clientTransaction.getDialog().sendAck(ack);
}
catch (SipException ex)
{
logger.error("Failed to acknowledge call!", ex);
callParticipant.setState(CallParticipantState.FAILED);
return;
}
// !!! set sdp content before setting call state as that is where
//listeners get alerted and they need the sdp
callParticipant.setSdpDescription(new String(ok.getRawContent()));
//change status
callParticipant.setState(CallParticipantState.CONNECTED);
}
/**
* Sets corresponding state to the call participant associated with this
* transaction.
* @param clientTransaction the transaction in which
* @param busyHere the busy here Response
*/
private void processBusyHere(ClientTransaction clientTransaction,
Response busyHere)
{
Dialog dialog = clientTransaction.getDialog();
//find the call
CallParticipantSipImpl callParticipant
= activeCallsRepository.findCallParticipant(dialog);
if (callParticipant == null)
{
logger.debug("Received a stray busyHere response.");
return;
}
//change status
callParticipant.setState(CallParticipantState.BUSY);
}
/**
* Attempts to re-ogenerate the corresponding request with the proper
* credentials and terminates the call if it fails.
*
* @param clientTransaction the corresponding transaction
* @param response the challenge
* @param jainSipProvider the provider that received the challende
*/
private void processAuthenticationChallenge(
ClientTransaction clientTransaction,
Response response,
SipProvider jainSipProvider)
{
//First find the call and the call participant that this authentication
//request concerns.
CallParticipantSipImpl callParticipant = activeCallsRepository
.findCallParticipant(clientTransaction.getDialog());
if (callParticipant == null) {
logger.debug("Received an authorization challenge for no "
+"participant. authorizing anyway.");
}
try
{
logger.debug("Authenticating an INVITE request.");
ClientTransaction retryTran
= protocolProvider.getSipSecurityManager().handleChallenge(
response
, clientTransaction
, jainSipProvider);
//There is a new dialog that will be started with this request. Get
//that dialog and record it into the Call objet for later use (by
//Bye-s for example).
//if the request was BYE then we need to authorize it anyway even
//if the call and the call participant are no longer there
if(callParticipant !=null)
{
callParticipant.setDialog(retryTran.getDialog());
callParticipant.setFirstTransaction(retryTran);
callParticipant.setJainSipProvider(jainSipProvider);
}
retryTran.sendRequest();
}
catch (Exception exc)
{
logger.error("We failed to authenticate an INVITE request.", exc);
//tell the others we couldn't register
callParticipant.setState(CallParticipantState.FAILED);
}
}
/**
* Processes a retransmit or expiration Timeout of an underlying
* {@link Transaction}handled by this SipListener. This Event notifies the
* application that a retransmission or transaction Timer expired in the
* SipProvider's transaction state machine. The TimeoutEvent encapsulates
* the specific timeout type and the transaction identifier either client or
* server upon which the timeout occured. The type of Timeout can by
* determined by:
* <code>timeoutType = timeoutEvent.getTimeout().getValue();</code>
*
* @param timeoutEvent the timeoutEvent received indicating either the
* message retransmit or transaction timed out.
*/
public void processTimeout(TimeoutEvent timeoutEvent)
{
Transaction transaction;
if (timeoutEvent.isServerTransaction()) {
transaction = timeoutEvent.getServerTransaction();
}
else {
transaction = timeoutEvent.getClientTransaction();
}
CallParticipantSipImpl callParticipant = activeCallsRepository
.findCallParticipant(transaction.getDialog());
if (callParticipant == null) {
logger.debug("Got a headless timeout event." + timeoutEvent);
return;
}
//change status
callParticipant.setState(CallParticipantState.FAILED
, "The remote party has not replied!"
+ "The call will be disconnected");
}
/**
* Process an asynchronously reported IO Exception. Asynchronous IO
* Exceptions may occur as a result of errors during retransmission of
* requests. The transaction state machine requires to report IO Exceptions
* to the application immediately (according to RFC 3261). This method
* enables an implementation to propagate the asynchronous handling of IO
* Exceptions to the application.
*
* @param exceptionEvent The Exception event that is reported to the
* application.
*/
public void processIOException(IOExceptionEvent exceptionEvent)
{
logger.error("Got an asynchronous exception event. host="
+ exceptionEvent.getHost() + " port=" + exceptionEvent.getPort());
}
/**
* Process an asynchronously reported DialogTerminatedEvent.
*
* @param dialogTerminatedEvent -- an event that indicates that the
* dialog has transitioned into the terminated state.
*/
public void processDialogTerminated(DialogTerminatedEvent
dialogTerminatedEvent)
{
CallParticipantSipImpl callParticipant = activeCallsRepository
.findCallParticipant(dialogTerminatedEvent.getDialog());
if (callParticipant == null)
{
return;
}
//change status
callParticipant.setState(CallParticipantState.DISCONNECTED);
}
/**
* Parses the the <tt>uriStr</tt> string and returns a JAIN SIP URI.
*
* @param uriStr a <tt>String</tt> containing the uri to parse.
*
* @return a URI object corresponding to the <tt>uriStr</tt> string.
* @throws ParseException if uriStr is not properly formatted.
*/
private Address parseAddressStr(String uriStr)
throws ParseException
{
uriStr = uriStr.trim();
//Handle default domain name (i.e. transform 1234 -> [email protected])
//assuming that if no domain name is specified then it should be the
//same as ours.
if (uriStr.indexOf('@') == -1
&& !uriStr.trim().startsWith("tel:"))
{
uriStr = uriStr + "@"
+ ((SipURI)protocolProvider.getOurSipAddress().getURI())
.getHost();
}
//Let's be uri fault tolerant and add the sip: scheme if there is none.
if (uriStr.toLowerCase().indexOf("sip:") == -1 //no sip scheme
&& uriStr.indexOf('@') != -1) //most probably a sip uri
{
uriStr = "sip:" + uriStr;
}
//Request URI
Address uri
= protocolProvider.getAddressFactory().createAddress(uriStr);
return uri;
}
/**
* Creates an invite request destined for <tt>callee</tt>.
*
* @param toAddress the sip address of the callee that the request is meant
* for.
* @return a newly created sip <tt>Request</tt> destined for
* <tt>callee</tt>.
* @throws OperationFailedException with the correspoding code if creating
* the request fails.
*/
private Request createInviteRequest(Address toAddress)
throws OperationFailedException
{
InetAddress destinationInetAddress = null;
try
{
destinationInetAddress = InetAddress.getByName(
( (SipURI) toAddress.getURI()).getHost());
}
catch (UnknownHostException ex)
{
throw new IllegalArgumentException(
( (SipURI) toAddress.getURI()).getHost()
+ " is not a valid internet address " + ex.getMessage());
}
//Call ID
CallIdHeader callIdHeader = protocolProvider
.getDefaultJainSipProvider().getNewCallId();
//CSeq
CSeqHeader cSeqHeader = null;
try
{
cSeqHeader = protocolProvider.getHeaderFactory()
.createCSeqHeader(1l, Request.INVITE);
}
catch (InvalidArgumentException ex)
{
//Shouldn't happen
logger.error(
"An unexpected erro occurred while"
+ "constructing the CSeqHeadder", ex);
throw new OperationFailedException(
"An unexpected erro occurred while"
+ "constructing the CSeqHeadder"
, OperationFailedException.INTERNAL_ERROR
, ex);
}
catch(ParseException exc)
{
//shouldn't happen
logger.error(
"An unexpected erro occurred while"
+ "constructing the CSeqHeadder", exc);
throw new OperationFailedException(
"An unexpected erro occurred while"
+ "constructing the CSeqHeadder"
, OperationFailedException.INTERNAL_ERROR
, exc);
}
//FromHeader
String localTag = protocolProvider.generateLocalTag();
FromHeader fromHeader = null;
ToHeader toHeader = null;
try
{
//FromHeader
fromHeader = protocolProvider.getHeaderFactory()
.createFromHeader(protocolProvider.getOurSipAddress()
, localTag);
//ToHeader
toHeader = protocolProvider.getHeaderFactory()
.createToHeader(toAddress, null);
}
catch (ParseException ex)
{
//these two should never happen.
logger.error(
"An unexpected erro occurred while"
+ "constructing the ToHeader", ex);
throw new OperationFailedException(
"An unexpected erro occurred while"
+ "constructing the ToHeader"
, OperationFailedException.INTERNAL_ERROR
, ex);
}
//ViaHeaders
ArrayList viaHeaders = protocolProvider.getLocalViaHeaders(
destinationInetAddress
, protocolProvider.getDefaultListeningPoint());
//MaxForwards
MaxForwardsHeader maxForwards = protocolProvider
.getMaxForwardsHeader();
//Contact
ContactHeader contactHeader = protocolProvider.getContactHeader();
Request invite = null;
try
{
invite = protocolProvider.getMessageFactory().createRequest(
toHeader.getAddress().getURI()
, Request.INVITE
, callIdHeader
, cSeqHeader
, fromHeader
, toHeader
, viaHeaders
, maxForwards);
}
catch (ParseException ex)
{
//shouldn't happen
logger.error(
"Failed to create invite Request!", ex);
throw new OperationFailedException(
"Failed to create invite Request!"
, OperationFailedException.INTERNAL_ERROR
, ex);
}
//User Agent
UserAgentHeader userAgentHeader
= protocolProvider.getSipCommUserAgentHeader();
if(userAgentHeader != null)
invite.addHeader(userAgentHeader);
//add the contact header.
invite.addHeader(contactHeader);
return invite;
}
/**
* Creates a new call and sends a RINGING response.
*
* @param sourceProvider the provider containin <tt>sourceTransaction</tt>.
* @param serverTransaction the transaction containing the received request.
* @param invite the Request that we've just received.
*/
private void processInvite( SipProvider sourceProvider,
ServerTransaction serverTransaction,
Request invite)
{
logger.trace("Creating call participant.");
Dialog dialog = serverTransaction.getDialog();
CallParticipantSipImpl callParticipant
= createCallParticipantFor(serverTransaction, sourceProvider);
logger.trace("call participant created = " + callParticipant);
//sdp description may be in acks - bug report Laurent Michel
ContentLengthHeader cl = invite.getContentLength();
if (cl != null
&& cl.getContentLength() > 0)
{
callParticipant.setSdpDescription(
new String(invite.getRawContent()));
}
logger.trace("Will verify whether INVITE is properly addressed.");
//Are we the one they are looking for?
javax.sip.address.URI calleeURI = dialog.getLocalParty().getURI();
if (calleeURI.isSipURI())
{
boolean assertUserMatch = Boolean.valueOf(
SipActivator.getConfigurationService().getString(
FAIL_CALLS_ON_DEST_USER_MISMATCH)).booleanValue();
if (assertUserMatch)
{
//user info is case sensitive according to rfc3261
String calleeUser = ( (SipURI) calleeURI).getUser();
String localUser = ((SipURI)protocolProvider.getOurSipAddress()
.getURI()).getUser();
if (calleeUser != null && !calleeUser.equals(localUser))
{
callParticipant.setState(
CallParticipantState.FAILED
, "A call was received here while it appeared "
+"destined to someone else. The call was rejected.");
Response notFound = null;
try {
notFound = protocolProvider.getMessageFactory()
.createResponse( Response.NOT_FOUND, invite);
//attach a to tag
protocolProvider.attachToTag(notFound, dialog);
notFound.setHeader(
protocolProvider.getSipCommUserAgentHeader());
}
catch (ParseException ex) {
logger.error("Error while trying to create a response"
, ex);
callParticipant.setState(
CallParticipantState.FAILED
, "InernalError: " +ex.getMessage());
return;
}
try {
serverTransaction.sendResponse(notFound);
logger.debug("sent a not found response: " + notFound);
}
catch (Exception ex) {
logger.error("Error while trying to send a response"
, ex);
callParticipant.setState(
CallParticipantState.FAILED
, "Internal Error: " + ex.getMessage());
return;
}
return;
}
}
}
//Send RINGING
logger.debug("Invite seems ok, we'll say RINGING.");
Response ringing = null;
try {
ringing = protocolProvider.getMessageFactory().createResponse(
Response.RINGING, invite);
protocolProvider.attachToTag(ringing, dialog);
ringing.setHeader(protocolProvider.getSipCommUserAgentHeader());
//set our display name
((ToHeader)ringing.getHeader(ToHeader.NAME))
.getAddress().setDisplayName(protocolProvider
.getOurDisplayName());
ringing.addHeader(protocolProvider.getContactHeader());
}
catch (ParseException ex) {
logger.error("Error while trying to send a request"
, ex);
callParticipant.setState(CallParticipantState.FAILED
, "Internal Error: " + ex.getMessage());
return;
}
try {
logger.trace("will send ringing response: ");
serverTransaction.sendResponse(ringing);
logger.debug("sent a ringing response: " + ringing);
}
catch (Exception ex) {
logger.error("Error while trying to send a request"
, ex);
callParticipant.setState(
CallParticipantState.FAILED
, "Internal Error: " + ex.getMessage());
return;
}
}
/**
* Sets the state of the corresponding call participant to DISCONNECTED
* and sends an OK response.
*
* @param serverTransaction the ServerTransaction the the BYE request
* arrived in.
* @param byeRequest the BYE request to process
*/
private void processBye(ServerTransaction serverTransaction,
Request byeRequest)
{
//find the call
CallParticipantSipImpl callParticipant = activeCallsRepository
.findCallParticipant( serverTransaction.getDialog());
if (callParticipant == null) {
logger.debug("Received a stray bye request.");
return;
}
//Send OK
Response ok = null;
try {
ok = protocolProvider.getMessageFactory()
.createResponse(Response.OK, byeRequest);
protocolProvider.attachToTag(ok, serverTransaction.getDialog());
ok.setHeader(protocolProvider.getSipCommUserAgentHeader());
}
catch (ParseException ex) {
logger.error("Error while trying to send a response to a bye", ex);
//no need to let the user know about the error since it doesn't
//affect them
return;
}
try {
serverTransaction.sendResponse(ok);
logger.debug("sent response " + ok);
}
catch (Exception ex) {
//This is not really a problem according to the RFC
//so just dump to stdout should someone be interested
logger.error("Failed to send an OK response to BYE request,"
+ "exception was:\n",
ex);
}
//change status
callParticipant.setState(CallParticipantState.DISCONNECTED);
}
/**
* Updates the sesion description and sends the state of the corresponding
* call participant to CONNECTED.
*
* @param serverTransaction the transaction that the Ack was received in.
* @param ackRequest Request
*/
void processAck(ServerTransaction serverTransaction,
Request ackRequest)
{
//find the call
CallParticipantSipImpl callParticipant = activeCallsRepository
.findCallParticipant(serverTransaction.getDialog());
if (callParticipant == null) {
//this is most probably the ack for a killed call - don't signal it
logger.debug("didn't find an ack's call, returning");
return;
}
ContentLengthHeader cl = ackRequest.getContentLength();
if (cl != null
&& cl.getContentLength() > 0)
{
callParticipant.setSdpDescription(
new String(ackRequest.getRawContent()));
}
//change status
callParticipant.setState(CallParticipantState.CONNECTED);
}
/**
* Sets the state of the specifies call participant as DISCONNECTED.
*
* @param serverTransaction the transaction that the cancel was received in.
* @param cancelRequest the Request that we've just received.
*/
void processCancel(ServerTransaction serverTransaction,
Request cancelRequest)
{
//find the call
CallParticipantSipImpl callParticipant = activeCallsRepository
.findCallParticipant( serverTransaction.getDialog() );
if (callParticipant == null) {
logger.debug("received a stray CANCEL req. ignoring");
return;
}
// Cancels should be OK-ed and the initial transaction - terminated
// (report and fix by Ranga)
try {
Response ok = protocolProvider.getMessageFactory().createResponse(
Response.OK, cancelRequest);
protocolProvider.attachToTag(ok, serverTransaction.getDialog());
ok.setHeader(protocolProvider.getSipCommUserAgentHeader());
serverTransaction.sendResponse(ok);
logger.debug("sent an ok response to a CANCEL request:\n" + ok);
}
catch (ParseException ex) {
logger.error(
"Failed to create an OK Response to an CANCEL request.", ex);
callParticipant.setState(CallParticipantState.FAILED
,"Failed to create an OK Response to an CANCEL request.");
}
catch (Exception ex) {
logger.error(
"Failed to send an OK Response to an CANCEL request.", ex);
callParticipant.setState(CallParticipantState.FAILED
,"Failed to send an OK Response to an CANCEL request.");
}
try {
//stop the invite transaction as well
Transaction tran = callParticipant.getFirstTransaction();
//should be server transaction and misplaced cancels should be
//filtered by the stack but it doesn't hurt checking anyway
if (! (tran instanceof ServerTransaction)) {
logger.error("Received a misplaced CANCEL request!");
return;
}
ServerTransaction inviteTran = (ServerTransaction) tran;
Request invite = callParticipant.getFirstTransaction().getRequest();
Response requestTerminated =
protocolProvider.getMessageFactory()
.createResponse(Response.REQUEST_TERMINATED, invite);
requestTerminated.setHeader(
protocolProvider.getSipCommUserAgentHeader());
protocolProvider.attachToTag(requestTerminated
, callParticipant.getDialog());
inviteTran.sendResponse(requestTerminated);
if( logger.isDebugEnabled() )
logger.debug("sent request terminated response:\n"
+ requestTerminated);
}
catch (ParseException ex)
{
logger.error("Failed to create a REQUEST_TERMINATED Response to "
+ "an INVITE request."
, ex);
}
catch (Exception ex)
{
logger.error("Failed to send an REQUEST_TERMINATED Response to "
+ "an INVITE request."
, ex);
}
//change status
callParticipant.setState(CallParticipantState.DISCONNECTED);
}
/**
* Indicates a user request to end a call with the specified call
* particiapnt. Depending on the state of the call the method would send a
* CANCEL, BYE, or BUSY_HERE and set the new state to DISCONNECTED.
*
* @param participant the participant that we'd like to hang up on.
* @throws ClassCastException if participant is not an instance of this
* CallParticipantSipImpl.
* @throws OperationFailedException if we fail to terminate the call.
*/
public void hangupCallParticipant(CallParticipant participant)
throws ClassCastException, OperationFailedException
{
//do nothing if the call is already ended
if (participant.getState().equals(CallParticipantState.DISCONNECTED))
{
logger.debug("Ignoring a request to hangup a call participant "
+"that is already DISCONNECTED");
return;
}
CallParticipantSipImpl callParticipant
= (CallParticipantSipImpl)participant;
Dialog dialog = callParticipant.getDialog();
if (callParticipant.getState().equals(CallParticipantState.CONNECTED))
{
sayBye(callParticipant);
callParticipant.setState(CallParticipantState.DISCONNECTED);
}
else if (callParticipant.getState()
.equals(CallParticipantState.CONNECTING)
|| callParticipant.getState()
.equals(CallParticipantState.ALERTING_REMOTE_SIDE))
{
if (callParticipant.getFirstTransaction() != null)
{
//Someone knows about us. Let's be polite and say we are
//leaving
sayCancel(callParticipant);
}
callParticipant.setState(CallParticipantState.DISCONNECTED);
}
else if (callParticipant.getState()
.equals(CallParticipantState.INCOMING_CALL))
{
callParticipant.setState(CallParticipantState.DISCONNECTED);
sayBusyHere(callParticipant);
}
//For FAILE and BUSY we only need to update CALL_STATUS
else if (callParticipant.getState().equals(CallParticipantState.BUSY))
{
callParticipant.setState(CallParticipantState.DISCONNECTED);
}
else if (callParticipant.getState().equals(CallParticipantState.FAILED))
{
callParticipant.setState(CallParticipantState.DISCONNECTED);
}
else
{
callParticipant.setState(CallParticipantState.DISCONNECTED);
logger.error("Could not determine call participant state!");
}
} //end call
/**
* Sends a BYE request to <tt>callParticipant</tt>.
*
* @param callParticipant the call participant that we need to say bye to.
* @since OperationFailedException if we fail to create an outgoing request
* or send it.
*
* @throws OperationFailedException if we faile constructing or sending a
* SIP Message.
*/
private void sayBye(CallParticipantSipImpl callParticipant)
throws OperationFailedException
{
Request request = callParticipant.getFirstTransaction().getRequest();
Request bye = null;
try
{
bye = callParticipant.getDialog().createRequest(Request.BYE);
}
catch (SipException ex)
{
logger.error("Failed to create bye request!", ex);
throw new OperationFailedException(
"Failed to create bye request!"
, OperationFailedException.INTERNAL_ERROR
, ex);
}
ClientTransaction clientTransaction = null;
try
{
clientTransaction = callParticipant.getJainSipProvider()
.getNewClientTransaction(bye);
}
catch (TransactionUnavailableException ex)
{
logger.error(
"Failed to construct a client transaction from the BYE request"
, ex);
throw new OperationFailedException(
"Failed to construct a client transaction from the BYE request"
, OperationFailedException.INTERNAL_ERROR
,ex);
}
try
{
callParticipant.getDialog().sendRequest(clientTransaction);
logger.debug("sent request:\n" + bye);
}
catch (SipException ex)
{
throw new OperationFailedException(
"Failed to send the BYE request"
, OperationFailedException.NETWORK_FAILURE
, ex);
}
} //bye
/**
* Sends a Cancel request to <tt>callParticipant</tt>.
*
* @param callParticipant the call participant that we need to cancel.
*
* @throws OperationFailedException we faile to construct or send the
* CANCEL request.
*/
private void sayCancel(CallParticipantSipImpl callParticipant)
throws OperationFailedException
{
Request request = callParticipant.getFirstTransaction().getRequest();
if (callParticipant.getDialog().isServer())
{
logger.error("Cannot cancel a server transaction");
throw new OperationFailedException(
"Cannot cancel a server transaction"
, OperationFailedException.INTERNAL_ERROR);
}
ClientTransaction clientTransaction =
(ClientTransaction) callParticipant.getFirstTransaction();
try
{
Request cancel = clientTransaction.createCancel();
ClientTransaction cancelTransaction = callParticipant
.getJainSipProvider().getNewClientTransaction(cancel);
cancelTransaction.sendRequest();
logger.debug("sent request:\n" + cancel);
}
catch (SipException ex) {
logger.error("Failed to send the CANCEL request", ex);
throw new OperationFailedException(
"Failed to send the CANCEL request"
, OperationFailedException.NETWORK_FAILURE
, ex);
}
} //cancel
/**
* Sends a BUSY_HERE response to <tt>callParticipant</tt>.
*
* @param callParticipant the call participant that we need to send busy
* tone to.
* @throws OperationFailedException if we fail to create or send the
* response
*/
private void sayBusyHere(CallParticipantSipImpl callParticipant)
throws OperationFailedException
{
Request request = callParticipant.getFirstTransaction().getRequest();
Response busyHere = null;
try
{
busyHere = protocolProvider.getMessageFactory()
.createResponse(Response.BUSY_HERE, request);
busyHere.setHeader(
protocolProvider.getSipCommUserAgentHeader());
protocolProvider.attachToTag(busyHere
, callParticipant.getDialog());
}
catch (ParseException ex)
{
logger.error("Failed to create the BUSY_HERE response!", ex);
throw new OperationFailedException(
"Failed to create the BUSY_HERE response!"
, OperationFailedException.INTERNAL_ERROR
, ex);
}
if (!callParticipant.getDialog().isServer())
{
logger.error("Cannot send BUSY_HERE in a client transaction");
throw new OperationFailedException(
"Cannot send BUSY_HERE in a client transaction"
, OperationFailedException.INTERNAL_ERROR);
}
ServerTransaction serverTransaction
= (ServerTransaction) callParticipant.getFirstTransaction();
try
{
serverTransaction.sendResponse(busyHere);
logger.debug("sent response:\n" + busyHere);
}
catch (Exception ex)
{
logger.error("Failed to send the BUSY_HERE response", ex);
throw new OperationFailedException(
"Failed to send the BUSY_HERE response"
, OperationFailedException.NETWORK_FAILURE
, ex);
}
} //busy here
/**
* * Indicates a user request to answer an incoming call from the specified
* CallParticipant.
*
* Sends an OK response to <tt>callParticipant</tt>. Make sure that the call
* participant contains an sdp description when you call this method.
*
* @param participant the call participant that we need to send the ok
* to.
* @throws OperationFailedException if we fail to create or send the
* response.
*/
public void answerCallParticipant(CallParticipant participant)
throws OperationFailedException
{
CallParticipantSipImpl callParticipant
= (CallParticipantSipImpl)participant;
Transaction transaction = callParticipant.getFirstTransaction();
Dialog dialog = callParticipant.getDialog();
if (transaction == null || !dialog.isServer())
{
callParticipant.setState(CallParticipantState.DISCONNECTED);
throw new OperationFailedException(
"Failed to extract a ServerTransaction "
+ "from the call's associated dialog!"
, OperationFailedException.INTERNAL_ERROR);
}
if(participant.getState().equals(CallParticipantState.CONNECTED))
{
logger.info("Ignoring user request to answer a CallParticipant "
+ "that is already connected. CP:" + participant);
return;
}
ServerTransaction serverTransaction = (ServerTransaction) transaction;
Response ok = null;
try {
ok = protocolProvider.getMessageFactory().createResponse(
Response.OK
,callParticipant.getFirstTransaction().getRequest());
ok.setHeader(protocolProvider.getSipCommUserAgentHeader());
protocolProvider.attachToTag(ok, dialog);
}
catch (ParseException ex) {
callParticipant.setState(CallParticipantState.DISCONNECTED);
logger.error(
"Failed to construct an OK response to an INVITE request"
, ex);
throw new OperationFailedException(
"Failed to construct an OK response to an INVITE request"
, OperationFailedException.INTERNAL_ERROR
, ex);
}
//Content
ContentTypeHeader contentTypeHeader = null;
try
{
//content type should be application/sdp (not applications)
//reported by Oleg Shevchenko (Miratech)
contentTypeHeader =
protocolProvider.getHeaderFactory().createContentTypeHeader(
"application", "sdp");
}
catch (ParseException ex)
{
//Shouldn't happen
callParticipant.setState(CallParticipantState.DISCONNECTED);
logger.error(
"Failed to create a content type header for the OK request"
, ex);
throw new OperationFailedException(
"Failed to create a content type header for the OK request"
, OperationFailedException.INTERNAL_ERROR
, ex);
}
try
{
ok.setContent(callParticipant.getSdpDescription()
, contentTypeHeader);
}
catch (NullPointerException ex)
{
callParticipant.setState(CallParticipantState.DISCONNECTED);
logger.error( "No sdp data was provided for the ok response to "
+ "an INVITE request!"
, ex);
throw new OperationFailedException(
"No sdp data was provided for the ok response "
+ "to an INVITE request!"
, OperationFailedException.INTERNAL_ERROR
, ex);
}
catch (ParseException ex)
{
callParticipant.setState(CallParticipantState.DISCONNECTED);
logger.error(
"Failed to parse sdp data while creating invite request!", ex);
throw new OperationFailedException(
"Failed to parse sdp data while creating invite request!"
, OperationFailedException.INTERNAL_ERROR
, ex);
}
ContactHeader contactHeader = protocolProvider.getContactHeader();
ok.addHeader(contactHeader);
try {
serverTransaction.sendResponse(ok);
if( logger.isDebugEnabled() )
logger.debug("sent response\n" + ok);
}
catch (Exception ex) {
callParticipant.setState(CallParticipantState.DISCONNECTED);
logger.error(
"Failed to send an OK response to an INVITE request"
,ex);
throw new OperationFailedException(
"Failed to send an OK response to an INVITE request"
, OperationFailedException.NETWORK_FAILURE
, ex);
}
} //answer call
/**
* Creates a new call and call participant associated with
* <tt>containingTransaction</tt>
*
* @param containingTransaction the transaction that created the call.
* @param sourceProvider the provider that the containingTransaction
* belongs to.
*
* @return a new instance of a <tt>CallParticipantSipImpl</tt>
* corresponding to the <tt>containingTransaction</tt>.
*/
private CallParticipantSipImpl createCallParticipantFor(
Transaction containingTransaction,
SipProvider sourceProvider)
{
CallSipImpl call = new CallSipImpl(protocolProvider);
CallParticipantSipImpl callParticipant = new CallParticipantSipImpl(
containingTransaction.getDialog().getRemoteParty(), call);
if(containingTransaction instanceof ServerTransaction)
callParticipant.setState(CallParticipantState.INCOMING_CALL);
else
callParticipant.setState(CallParticipantState.INITIATING_CALL);
callParticipant.setDialog(containingTransaction.getDialog());
callParticipant.setFirstTransaction(containingTransaction);
callParticipant.setJainSipProvider(sourceProvider);
activeCallsRepository.addCall(call);
//notify everyone
if(containingTransaction instanceof ServerTransaction)
fireCallEvent(CallEvent.CALL_RECEIVED, call);
else
fireCallEvent(CallEvent.CALL_INITIATED, call);
return callParticipant;
}
/**
* Returns a string representation of this OperationSetBasicTelephony
* instance including information that would permit to distinguish it among
* other instances when reading a log file.
* <p>
* @return a string representation of this operation set.
*/
public String toString()
{
String className = getClass().getName();
try
{
className = className.substring(className.lastIndexOf('.') + 1);
}
catch (Exception ex)
{
// we don't want to fail in this method because we've messed up
//something with indexes, so just ignore.
}
return className + "-[dn=" + protocolProvider.getOurDisplayName()
+" addr="+protocolProvider.getOurSipAddress() + "]";
}
}
| Started working on media and fixed meta contact list contact merge and contact remove
Fixed a null pointer
| src/net/java/sip/communicator/impl/protocol/sip/OperationSetBasicTelephonySipImpl.java | Started working on media and fixed meta contact list contact merge and contact remove | <ide><path>rc/net/java/sip/communicator/impl/protocol/sip/OperationSetBasicTelephonySipImpl.java
<ide> logger.error("Received error: " +response.getStatusCode()
<ide> +" "+ response.getReasonPhrase());
<ide>
<del> callParticipant.setState(CallParticipantState.FAILED);
<add> if(callParticipant != null)
<add> callParticipant.setState(CallParticipantState.FAILED);
<ide> }
<ide> //ignore everything else.
<ide> }
<ide>
<ide> try
<ide> {
<del> ok.setContent(callParticipant.getSdpDescription()
<add> ok.setContent( SipActivator.getMediaService()
<add> .generateSdpAnswer(callParticipant)
<ide> , contentTypeHeader);
<ide> }
<ide> catch (NullPointerException ex) |
|
JavaScript | mit | 0da5a2e1ce7fce05f0563d8df786f7593c264592 | 0 | jonschlinkert/gh-clone | #!/usr/bin/env node
var chalk = require('chalk');
var symbol = require('log-symbols');
var relative = require('relative');
var argv = require('minimist')(process.argv.slice(2));
var cmd = require('spawn-commands');
// args
var repo = argv._[0] || argv.r || argv.repo;
var dest = argv._[1] || argv.d || argv.dest;
var branch = argv._[2] || argv.b || argv.branch;
// Empty line
console.log();
if (!repo) {
console.error(chalk.red('Please provide a source file, either as a first argument or with \'-r\''));
}
inform('cloned', repo);
if (!dest) {
dest = relative(process.cwd(), repo.split('/')[1]);
}
if (branch) {
inform('branch', branch);
}
inform('to', dest + '/', '\t\t');
function launch(opts) {
opts = opts || {};
var res = {cmd: 'git', args: ['clone']};
if (opts.branch) {
res.args.push('-b');
res.args.push(opts.branch);
}
res.args.push('https://github.com/' + opts.repo + '.git');
if (opts.dest) {
res.args.push(opts.dest);
}
return [res];
}
// Empty line
console.log();
console.log(chalk.green('Cloning.'));
console.log();
cmd(launch({repo: repo, dest: dest, branch: branch }), function (err) {
console.log();
if (typeof err === 'number') {
console.log(chalk.red('Cloning was unsuccessful.'));
console.log();
process.exit(1);
} else {
console.log(chalk.green('Done.'));
console.log();
process.exit(0);
}
});
/**
* Formatting
*/
function format(msg) {
return chalk.gray('gh-clone ') + msg;
}
function inform(type, msg, pad) {
var prefix = format('[' + type + ']' + (pad || '\t') + '· ');
return console.log(prefix + chalk.bold(msg) + ' ' + symbol.success);
}
| cli.js | #!/usr/bin/env node
var chalk = require('chalk');
var symbol = require('log-symbols');
var relative = require('relative');
var argv = require('minimist')(process.argv.slice(2));
var cmd = require('spawn-commands');
function format(msg) {
return chalk.gray('gh-clone ') + msg;
}
function inform(type, msg, pad) {
var prefix = format('[' + type + ']' + (pad || '\t') + '· ');
return console.log(prefix + chalk.bold(msg) + ' ' + symbol.success);
}
// args
var repo = argv._[0] || argv.r || argv.repo;
var dest = argv._[1] || argv.d || argv.dest;
var branch = argv._[2] || argv.b || argv.branch;
// Empty line
console.log();
if (!repo) {
console.error(chalk.red('Please provide a source file, either as a first argument or with \'-r\''));
}
inform('cloned', repo);
if (!dest) {
dest = relative(process.cwd(), repo.split('/')[1]);
}
if (branch) {
inform('branch', branch);
}
inform('to', dest + '/', '\t\t');
function launch(opts) {
opts = opts || {};
var res = {cmd: 'git', args: ['clone']};
if (opts.branch) {
res.args.push('-b');
res.args.push(opts.branch);
}
res.args.push('https://github.com/' + opts.repo + '.git');
if (opts.dest) {
res.args.push(opts.dest);
}
return [res];
}
// Empty line
console.log();
console.log(chalk.green('Cloning.'));
console.log();
cmd(launch({repo: repo, dest: dest, branch: branch }), function (err) {
console.log();
if (typeof err === 'number') {
console.log(chalk.red('Cloning was unsuccessful.'));
console.log();
process.exit(1);
} else {
console.log(chalk.green('Done.'));
console.log();
process.exit(0);
}
});
| clean up
| cli.js | clean up | <ide><path>li.js
<ide> var relative = require('relative');
<ide> var argv = require('minimist')(process.argv.slice(2));
<ide> var cmd = require('spawn-commands');
<del>
<del>function format(msg) {
<del> return chalk.gray('gh-clone ') + msg;
<del>}
<del>
<del>function inform(type, msg, pad) {
<del> var prefix = format('[' + type + ']' + (pad || '\t') + '· ');
<del> return console.log(prefix + chalk.bold(msg) + ' ' + symbol.success);
<del>}
<ide>
<ide> // args
<ide> var repo = argv._[0] || argv.r || argv.repo;
<ide> }
<ide> });
<ide>
<add>/**
<add> * Formatting
<add> */
<add>
<add>function format(msg) {
<add> return chalk.gray('gh-clone ') + msg;
<add>}
<add>
<add>function inform(type, msg, pad) {
<add> var prefix = format('[' + type + ']' + (pad || '\t') + '· ');
<add> return console.log(prefix + chalk.bold(msg) + ' ' + symbol.success);
<add>} |
|
Java | lgpl-2.1 | de42f172cd68c0997cf25632bd8557831c6a98e1 | 0 | CloverETL/CloverETL-Engine,CloverETL/CloverETL-Engine,CloverETL/CloverETL-Engine,CloverETL/CloverETL-Engine |
import java.io.ByteArrayInputStream;
import org.jetel.component.DataRecordTransform;
import org.jetel.component.XmlXPathReader;
import org.jetel.data.DataRecord;
import org.jetel.data.parser.XPathParser;
import org.jetel.exception.ComponentNotReadyException;
import org.jetel.exception.TransformException;
import org.jetel.util.string.StringUtils;
import org.w3c.dom.Document;
public class ReformatOrders extends DataRecordTransform{
int counter=0;
private XPathParser parser;
private boolean skipOnError = false;
public boolean init() throws ComponentNotReadyException {
//create and init XPathParser
String mapping =
" <Context xpath=\"/records/customer\" outPort=\"0\"> " +
" <Context xpath=\"/records/customer/order\" outPort=\"1\" /> " +
" </Context> ";
try {
Document doc = XmlXPathReader.createDocumentFromString(mapping);
parser = new XPathParser(doc);
parser.init(targetMetadata[0]);
} catch (Exception e) {
throw new ComponentNotReadyException(getNode(),e);
}
//get property from component custom properties
skipOnError = Boolean.parseBoolean(parameters.getProperty("skip_on_error", "false"));
return true;
}
public int transform(DataRecord[] source, DataRecord[] target) throws TransformException{
System.out.println("============== XPath transform ==============");
for (int i=0; i<target.length; i++){
System.out.println("assign port:"+i);
parser.assignRecord(target[i], i);
}// for
String s = source[0].getField(0).toString();
System.out.println("source XML:"+s);
try {
parser.setDataSource( new ByteArrayInputStream(StringUtils.stringToSpecChar(s).getBytes("UTF-8")) );
boolean[] flags = new boolean[target.length];
DataRecord dr = null;
while ((dr = parser.getNext()) != null){
int port = parser.getActualPort();
if (flags[port])
continue;
flags[port] = true;
target[port] = dr.duplicate();
System.out.println("OUTport:"+port+" data record:"+target[port]);
} // while
} catch (Exception e) {
throw new TransformException("Transormation failed", e, counter, 0);
}
counter++;
return ALL;
}
@Override
public int transformOnError(Exception exception, DataRecord[] sources,
DataRecord[] target) throws TransformException {
if (skipOnError) {
System.err.println("Skipping invalid record. Error: " + exception.getCause().getMessage());
return SKIP;//ignore invalid records
}
return super.transformOnError(exception, sources, target);//throw exception
}
}
| cloveretl.examples/ExtExamples/trans/ReformatOrders.java |
import java.io.ByteArrayInputStream;
import org.jetel.component.DataRecordTransform;
import org.jetel.component.XmlXPathReader;
import org.jetel.data.DataRecord;
import org.jetel.data.parser.XPathParser;
import org.jetel.exception.ComponentNotReadyException;
import org.jetel.exception.TransformException;
import org.jetel.util.string.StringUtils;
import org.w3c.dom.Document;
public class ReformatOrders extends DataRecordTransform{
int counter=0;
private XPathParser parser;
private boolean skipOnError = false;
public boolean init() throws ComponentNotReadyException {
//create and init XPathParser
String mapping =
" <Context xpath=\"/records/customer\" outPort=\"0\"> " +
" <Context xpath=\"/records/customer/order\" outPort=\"1\" /> " +
" </Context> ";
try {
Document doc = XmlXPathReader.createDocumentFromString(mapping);
parser = new XPathParser(doc);
parser.init(targetMetadata[0]);
} catch (Exception e) {
throw new ComponentNotReadyException(getNode(),e);
}
//get property from component custom properties
skipOnError = Boolean.parseBoolean(parameters.getProperty("skip_on_error", "false"));
return true;
}
public int transform(DataRecord[] source, DataRecord[] target) throws TransformException{
System.out.println("============== XPath transform ==============");
for (int i=0; i<target.length; i++){
System.out.println("assign port:"+i);
parser.assignRecord(target[i], i);
}// for
String s = source[0].getField(0).toString();
System.out.println("source XML:"+s);
try {
parser.setDataSource( new ByteArrayInputStream(StringUtils.stringToSpecChar(s).getBytes("UTF-8")) );
boolean[] flags = new boolean[target.length];
DataRecord dr = null;
while ((dr = parser.getNext()) != null){
int port = parser.getActualPort();
if (flags[port])
continue;
flags[port] = true;
target[port] = dr.duplicate();
System.out.println("OUTport:"+port+" data record:"+target[port]);
} // while
} catch (Exception e) {
throw new TransformException("Transormation failed", e, counter, 0);
}
counter++;
return ALL;
}
@Override
public int transformOnError(Exception exception, DataRecord[] sources,
DataRecord[] target) throws TransformException {
if (skipOnError) return SKIP;//ignore invalid records
return super.transformOnError(exception, sources, target);//throw exception
}
}
| MINOR:more logging
git-svn-id: 7003860f782148507aa0d02fa3b12992383fb6a5@9077 a09ad3ba-1a0f-0410-b1b9-c67202f10d70
| cloveretl.examples/ExtExamples/trans/ReformatOrders.java | MINOR:more logging | <ide><path>loveretl.examples/ExtExamples/trans/ReformatOrders.java
<ide> @Override
<ide> public int transformOnError(Exception exception, DataRecord[] sources,
<ide> DataRecord[] target) throws TransformException {
<del> if (skipOnError) return SKIP;//ignore invalid records
<add> if (skipOnError) {
<add> System.err.println("Skipping invalid record. Error: " + exception.getCause().getMessage());
<add> return SKIP;//ignore invalid records
<add> }
<ide> return super.transformOnError(exception, sources, target);//throw exception
<ide> }
<ide> } |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.