content
stringlengths
10
4.9M
<reponame>JoshVarty/HaliteIO<filename>game_engine/replay/Replay.hpp #ifndef HALITE_REPLAY_HPP #define HALITE_REPLAY_HPP #include <fstream> #include <iostream> #include <string> #include "Statistics.hpp" #include "Command.hpp" #include "GameEvent.hpp" #include "../version.hpp" #include "Constants.hpp" #include "Cell.hpp" #include "Enumerated.hpp" #include "Store.hpp" namespace hlt { /** * Data struct to store information about cells changed in a turn */ struct CellInfo { dimension_type x; /**< x position of the cell. */ dimension_type y; /**< y position of the cell. */ energy_type production; /**< New production value of cell. */ CellInfo(Location location, Cell cell) : x(location.x), y(location.y), production(cell.energy) {} }; struct EntityInfo { dimension_type x; dimension_type y; energy_type energy; bool is_inspired; /** * Construct entity info from location and entity * @param location Location of entity * @param entity Entity we are interested in */ EntityInfo(Location location, const Entity &entity) : x(location.x), y(location.y), energy(entity.energy), is_inspired(entity.is_inspired) {} }; struct Turn { using Entities = id_map<Entity, EntityInfo>; /** Mapping from player id to the commands they issued this turn */ ordered_id_map<Player, std::vector<std::unique_ptr<Command>>> moves; id_map<Player, energy_type> energy; /**< Mapping from player id to the energy they ended the turn with */ id_map<Player, energy_type> deposited; /**< Mapping from player id to the total energy they deposited by the end of turn */ std::vector<GameEvent> events; /**< Events occurring this turn (spawns, deaths, etc) for replay */ std::vector<CellInfo> cells; /**< Cells that changed on this turn */ id_map<Player, Entities> entities{}; /**< Current entities and their information. */ /** * Given the game store, reformat and store entity state at start of turn in replay * param store The game store at the start of the turn */ void add_entities(Store &store); /** * Add cells changed on this turn to the replay file * @param map The game map (to access cell energy) * @param cells The locations of changed cells */ void add_cells(Map &map, std::unordered_set<Location> changed_cells); /** * Given the game store, add all state from end of turn in replay * param store The game store at the end of the turn */ void add_end_state(Store &store); /** * Move constructor * * Prevents copy construtor from being defined/used, which fixes * compilation errors due to unique_ptr in MSVC when we try to add * another Turn to full_frames. * * https://stackoverflow.com/questions/26115452 */ Turn(Turn&&) = default; Turn() = default; }; struct Replay { GameStatistics &game_statistics; /**< Statistics for the game (inlcudes number of turns) */ const Constants &GAME_CONSTANTS = Constants::get(); /**< Constants used in this game */ static constexpr unsigned long REPLAY_FILE_VERSION = 3; /**< Replay file version (updated as this struct or serialization changes) */ static constexpr auto ENGINE_VERSION = HALITE_VERSION; /**< Version of the game engine */ size_t number_of_players; /**< Number of players in this game */ ordered_id_map<Player, Player> players{}; /**< List of players at start of game, including factory location and initial entities */ std::vector<hlt::Turn> full_frames{}; /**< Turn information: first element = first frame/turn. Length is game_statistics.number_turns */ unsigned int map_generator_seed; /**< Seed used in random number generator for map */ const Map production_map; /**< Map of cells game was played on, including factory and other cells. Struct incldues name of map generator */ /** * Output replay into file. Replay will be in json format and may be compressed * * @param filename File to put replay into * @param enable_compression Switch to decide whether or not to compress replay file */ void output(std::string filename, bool enable_compression); /** Create Replay from Game statistics, number of players, initial list of players, seed for the map generation, and the generated map * * @param game_statistics Reference access to game_statistics struct that will be updated during game play * @param number_of_players Number of players in this game * @param players Initial player list -> should be initialized with factory location and any starting entities * @param seed Seed for random number generator for map * @param production_map Initialized map for game play */ Replay(GameStatistics &game_statistics, size_t number_of_players, unsigned int seed, const Map production_map) : game_statistics(game_statistics), number_of_players(number_of_players), map_generator_seed(seed), production_map(production_map) {} /** * Default destructor for class */ ~Replay() = default; }; } #endif //HALITE_REPLAY_HPP
<reponame>JamFF/HttpProcessor<filename>app/src/main/java/com/jamff/http/processor/RetrofitProcessor.java package com.jamff.http.processor; import android.util.Log; import com.jamff.http.MainActivity; import com.jamff.http.processor.retrofit.ApiService; import java.util.Map; import retrofit2.Call; import retrofit2.Callback; import retrofit2.Response; import retrofit2.Retrofit; import retrofit2.converter.scalars.ScalarsConverterFactory; /** * 描述:Retrofit委托类,实现网络抽象层接口 * 作者:JamFF * 创建时间:2018/6/1 11:13 */ public class RetrofitProcessor implements IHttpProcessor { private static final String BASE_URL = "http://v.juhe.cn/"; private ApiService mApiService; public RetrofitProcessor() { Retrofit retrofit = new Retrofit.Builder() .baseUrl(BASE_URL) .addConverterFactory(ScalarsConverterFactory.create()) .build(); mApiService = retrofit.create(ApiService.class); } @Override public void post(String url, Map<String, Object> params, final ICallback callback) { Call<String> call = mApiService.postWeather(url, params); // Call<String> call = mApiService.postWeather2(params); call.enqueue(new Callback<String>() { @Override public void onResponse(Call<String> call, final Response<String> response) { Log.d(MainActivity.TAG, "Retrofit post onResponse"); if (response.isSuccessful()) { if (response.body() != null) { // 响应码在200-300 callback.onSuccess(response.body()); } else { callback.onSuccess(response.toString()); } } else { callback.onFailure(response.toString()); } } @Override public void onFailure(Call<String> call, final Throwable t) { // 网络未连接 Log.e(MainActivity.TAG, "Retrofit post onFailure"); callback.onFailure(t.toString()); } }); } @Override public void get(String url, Map<String, Object> params, final ICallback callback) { Call<String> call = mApiService.getWeather(url); // Call<String> call = mApiService.getWeather2(params); call.enqueue(new Callback<String>() { @Override public void onResponse(Call<String> call, final Response<String> response) { Log.d(MainActivity.TAG, "Retrofit post onResponse"); if (response.isSuccessful()) { if (response.body() != null) { // 响应码在200-300 callback.onSuccess(response.body()); } else { callback.onSuccess(response.toString()); } } else { callback.onFailure(response.toString()); } } @Override public void onFailure(Call<String> call, final Throwable t) { // 网络未连接 Log.e(MainActivity.TAG, "Retrofit post onFailure"); callback.onFailure(t.toString()); } }); } }
/** * Service class around authentication, session cookie and session cache handling. It works together with the * {@link Authentication} controller and the @Authenticated annotation defined in {@link AuthenticationAction}. * * If a user is authenticated (same password as stored in the database) a user session ID is generated and stored in * Play's session cookie and in the the cache. With each subsequent request this session is checked in the * AuthenticationAction. * * @author Kristian Lange */ @Singleton public class AuthenticationService { private static final ALogger LOGGER = Logger.of(AuthenticationService.class); /** * Parameter name in Play's session cookie: It contains the username of the logged in user */ public static final String SESSION_ID = "sessionID"; /** * Parameter name in Play's session cookie: It contains the username of the logged in user */ public static final String SESSION_USERNAME = "username"; /** * Parameter name in Play's session cookie: It contains the timestamp of the login time */ public static final String SESSION_LOGIN_TIME = "loginTime"; /** * Parameter name in Play's session cookie: It contains a timestamp of the * time of the last HTTP request done by the browser with this cookie */ public static final String SESSION_LAST_ACTIVITY_TIME = "lastActivityTime"; /** * Key name used in RequestScope to store the logged-in User */ public static final String LOGGED_IN_USER = "loggedInUser"; private static final SecureRandom random = new SecureRandom(); private final UserDao userDao; private final UserSessionCacheAccessor userSessionCacheAccessor; @Inject AuthenticationService(UserDao userDao, UserSessionCacheAccessor userSessionCacheAccessor) { this.userDao = userDao; this.userSessionCacheAccessor = userSessionCacheAccessor; } /** * Authenticates the user specified by the username with the given password. */ public boolean authenticate(String normalizedUsername, String password) throws NamingException { if (password == null) return false; User user = userDao.findByUsername(normalizedUsername); if (user == null) return false; switch (user.getAuthMethod()) { case LDAP: return authenticateViaLdap(normalizedUsername, password); case DB: return authenticateViaDb(normalizedUsername, password); default: throw new UnsupportedOperationException("Unsupported auth method " + user.getAuthMethod().name()); } } private boolean authenticateViaDb(String normalizedUsername, String password) { String passwordHash = HashUtils.getHashMD5(password); return userDao.authenticate(normalizedUsername, passwordHash); } /** * Authenticated via an external LDAP server and throws an NamingException if the LDAP server can't be reached or * the the LDAP URL or Base DN is wrong. */ private boolean authenticateViaLdap(String normalizedUsername, String password) throws NamingException { Hashtable<String, String> props = new Hashtable<>(); props.put(Context.INITIAL_CONTEXT_FACTORY, "com.sun.jndi.ldap.LdapCtxFactory"); props.put(Context.SECURITY_AUTHENTICATION, "simple"); props.put(Context.SECURITY_PRINCIPAL, "uid=" + normalizedUsername + "," + Common.getLdapBasedn()); props.put(Context.SECURITY_CREDENTIALS, password); props.put(Context.PROVIDER_URL, Common.getLdapUrl()); props.put("com.sun.jndi.ldap.read.timeout", String.valueOf(Common.getLdapTimeout())); props.put("com.sun.jndi.ldap.connect.timeout", String.valueOf(Common.getLdapTimeout())); DirContext context; try { context = new InitialDirContext(props); context.close(); return true; } catch (AuthenticationException e) { return false; } } /** * Verifies and fetches an ID token from Google OAuth by sending an HTTP POST to Google. The actual authentication * happens in the frontend with Google's gapi library. */ public GoogleIdToken fetchOAuthGoogleIdToken(String idTokenString) throws GeneralSecurityException, IOException { HttpTransport transport = new NetHttpTransport(); JacksonFactory jacksonFactory = new JacksonFactory(); GoogleIdTokenVerifier verifier = new GoogleIdTokenVerifier.Builder(transport, jacksonFactory) .setAudience(Collections.singletonList(Common.getOauthGoogleClientId())).build(); return verifier.verify(idTokenString); } /** * Checks the user session cache whether this user tries to login repeatedly */ public boolean isRepeatedLoginAttempt(String normalizedUsername) { userSessionCacheAccessor.addLoginAttempt(normalizedUsername); return userSessionCacheAccessor.isRepeatedLoginAttempt(normalizedUsername); } /** * Retrieves the logged-in user from Play's session. If a user is logged-in their username is stored in Play's * session cookie. With the username a user can be retrieved from the database. Returns null if the session doesn't * contains an username or if the user doesn't exists in the database. * <p> * In most cases getLoggedInUser() is faster since it doesn't has to query the database. */ public User getLoggedInUserBySessionCookie(Http.Session session) { String normalizedUsername = session.get(AuthenticationService.SESSION_USERNAME); User loggedInUser = null; if (normalizedUsername != null) { loggedInUser = userDao.findByUsername(normalizedUsername); } return loggedInUser; } /** * Gets the logged-in user from the RequestScope. It was put into the * RequestScope by the AuthenticationAction. Therefore this method works * only if you use the @Authenticated annotation at your action. */ public User getLoggedInUser() { return (User) RequestScope.get(LOGGED_IN_USER); } /** * Prepares Play's session cookie and the user session cache for the user * with the given username to be logged-in. Does not authenticate the user (use * authenticate() for this). */ public void writeSessionCookieAndSessionCache(Http.Session session, String normalizedUsername, String remoteAddress) { String sessionId = generateSessionId(); userSessionCacheAccessor.setUserSessionId(normalizedUsername, remoteAddress, sessionId); session.put(SESSION_ID, sessionId); session.put(SESSION_USERNAME, normalizedUsername); session.put(SESSION_LOGIN_TIME, String.valueOf(Instant.now().toEpochMilli())); session.put(SESSION_LAST_ACTIVITY_TIME, String.valueOf(Instant.now().toEpochMilli())); } /** * Used StackOverflow for this session ID generator: "This works by choosing * 130 bits from a cryptographically secure random bit generator, and * encoding them in base-32" (http://stackoverflow.com/questions/41107) */ private String generateSessionId() { return new BigInteger(130, random).toString(32); } /** * Refreshes the last activity timestamp in Play's session cookie. This is * usually done with each HTTP call of the user. */ public void refreshSessionCookie(Http.Session session) { session.put(SESSION_LAST_ACTIVITY_TIME, String.valueOf(Instant.now().toEpochMilli())); } /** * Deletes the session cookie. This is usual done during a user logout. */ public void clearSessionCookie(Http.Session session) { session.clear(); } /** * Deletes the session cookie and removes the cache entry. This is usual * done during a user logout. */ public void clearSessionCookieAndSessionCache(Http.Session session, String normalizedUsername, String remoteAddress) { userSessionCacheAccessor.removeUserSessionId(normalizedUsername, remoteAddress); session.clear(); } /** * Checks the session ID stored in Play's session cookie whether it is the * same as stored in the cache during the last login. */ public boolean isValidSessionId(Http.Session session, String normalizedUsername, String remoteAddress) { String cookieSessionId = session.get(SESSION_ID); String cachedSessionId = userSessionCacheAccessor.getUserSessionId(normalizedUsername, remoteAddress); return cookieSessionId != null && cookieSessionId.equals(cachedSessionId); } /** * Returns true if the session login time as saved in Play's session cookie * is older than allowed. */ public boolean isSessionTimeout(Http.Session session) { try { Instant loginTime = Instant.ofEpochMilli(Long.parseLong(session.get(SESSION_LOGIN_TIME))); Instant now = Instant.now(); Instant allowedUntil = loginTime.plus(Common.getUserSessionTimeout(), ChronoUnit.MINUTES); return allowedUntil.isBefore(now); } catch (Exception e) { LOGGER.error(e.getMessage()); // In case of any exception: timeout return true; } } /** * Returns true if the session inactivity time as saved in Play's session * cookie is older than allowed. */ public boolean isInactivityTimeout(Http.Session session) { try { Instant lastActivityTime = Instant.ofEpochMilli(Long.parseLong(session.get(SESSION_LAST_ACTIVITY_TIME))); Instant now = Instant.now(); Instant allowedUntil = lastActivityTime.plus(Common.getUserSessionInactivity(), ChronoUnit.MINUTES); return allowedUntil.isBefore(now); } catch (Exception e) { LOGGER.error(e.getMessage()); // In case of any exception: timeout return true; } } /** * Gets the time of the last activity of the given user */ public Instant getLastSeen(String normalizedUsername) { return userSessionCacheAccessor.getLastSeen(normalizedUsername); } /** * Sets the time of the last activity of the given user */ public void setLastSeen(String normalizedUsername) { userSessionCacheAccessor.setLastSeen(normalizedUsername); } }
def AddBlob(self, blob_hash, length, chunk_number): offset = chunk_number * self.index.chunksize self.index.Seek(offset) if not self.index.ChunkExists(chunk_number): self.size += length self.index.Seek(offset) self.index.Write(blob_hash) self._dirty = True
Today, we are absolutely delighted to announce that volare has concluded months of negotiations to sign an exclusive translation licensing, ebook/digital publishing, and future cooperation agreement with iReader! For those who might be unfamiliar with iReader, they are a technology market leader specializing in wireless entertainment, with focus on ebooks, manhua, magazines, and other content. This means that volare’s works are taking a step from our site and into the greater part of the world through apps and others, reaching out to readers who are yet unaware of us! (What! Craziness! I know right? XD I kid, I kid.) iReader products currently reach 500 million users worldwide, so here’s to even more readers discovering the joy that stems from Chinese novels! (And more competition for the F5 sect?) In coordination with our partnership, volare will be launching three novels from iReader alone this month. The first is being launched today, called “Light” and translated by Selutu! In keeping to our quirky and alternative roots, this is a sci-fi short that was a winner in one of iReader’s writing competitions. It’s incredibly poignant, and I leave you with screenshots from Deyna, who edited the work. *evil cackle* We’re going to go in a completely different bent on Sunday, with the launch of “Red Packet Server“, also translated by Selutu. Online personas meet the Heavenly Court meet fighting for… red packets?! I’ll have to let Selutu do full justice to story on Sunday, stay tuned! And finally, next week we meet a volare group novel! Many of the ladies banded together to do “Doomed to be Cannon Fodder”, with master TLCer/translator etvo, fluff minions translators Ruyi, timebun, Grenn, Grace, and adoptive girl Mehh. XD Plus whoever we’ll continue to rope into the project as time goes on. More on that next week! Happy weekend! (Psst, we have an ever expanding library of pre-authorized works at volare, so come let me know if you want to translate!) <3 etvo
The Army says the size and scope of Jade Helm 15, a Special Operations exercise that begins in July, set it apart from other training exercises. Also setting it apart: The widespread conspiracy theories that the U.S. is preparing to hatch martial law. The Post's Dan Lamothe explains. (Tom LeGro/The Washington Post) The mission is vast both geographically and strategically: Elite service members from four branches of the U.S. military will launch an operation this summer in which they will operate covertly among the U.S. public and travel from state to state in military aircraft. Texas, Utah and a section of southern California are labeled as hostile territory, and New Mexico isn’t much friendlier. That’s the scheme for Jade Helm 15, a new Special Operations exercise that runs from July 15 to Sept. 15. Army Special Operations Command announced it last week, saying the size and scope of the mission sets it apart from many other training exercises. [Texas governor orders State Guard to monitor Jade Helm, citing citizen concerns] “The nature of warfare is always changing and U.S. Army Special Operations Command’s mission is to make certain the Army’s various Special Operations Forces are trained, equipped and organized to successfully conduct worldwide special operations in support of our nation’s interests,” Army Lt. Col. Mark Lastoria, a command spokesman, said in an e-mail. “Training exercise Jade Helm is going to assist our Special Operations Soldiers and leadership in refining the skills needed against an ever changing foreign threat.” The exercise has prompted widespread conspiracy theories that the United States is preparing to hatch martial law. Some examples: If Operation Jade Helm were happening in any other country, it would be immediately labeled for martial law…… http://t.co/D80mMMwc3B — Belinski-Bible&Truth (@realbelinski) March 30, 2015 Beyond Denial: Preparations for Martial Law in America http://t.co/d59Fbq0Q7t — Joe Biggs (@Rambobiggs) March 27, 2015 BREAKING!! Jade Helm 15 Is Martial Law in The USA, Russia To Arm Insurgents In The USA! https://t.co/bg7DjttCRW — PredictionMan (@PredictionMan) March 30, 2015 In particular, some have expressed alarm about this map, which outlines events for the exercise in unclassified documents posted online last week. The Washington Post verified them to be legitimate by speaking to Army sources. They appear to have been prepared for local authorities. This map shows the military’s plan during the exercise Jade Helm 15, which begins in July. (U.S. Army Special Operations Command map) Several media outlets have noted that the Army has pushed back on the outcry, including Stars & Stripes, Army Times and the Houston Chronicle. But it’s also worth noting that the military has routinely launched exercises in the past in which regions of the United States are identified as hostile for the purpose of training. Consider Bold Alligator, a naval exercise in which thousands of Marines and sailors have been involved in the past. The most recent version was launched last fall, and included amphibious landings to prevent insurgent groups in the fictional country known as Garnet — Georgia and part of Florida in real life — from launching attacks. The map looked like this in Navy documents: The Treasure Coast Region, as seen in documents outlining the military exercise Bold Alligator 14. (Image from Navy documents) In another example, U.S. Special Forces support fictional guerrilla forces in numerous counties across North Carolina in the exercise Robin Sage. Green Beret soldiers work to liberate the country of Pineland, and operate in close proximity to civilians, who are warned that they may hear blank gunfire. Marine Special Operations troop also have an exercise that is in some ways similar and called Derna Bridge. It spans several counties in western South Carolina, and includes some activities in Sumter National Forest. Related on Checkpoint: Marine Corps realigns its Special Operations, sends elite troops to Middle East This high-speed vessel will launch a drone submarine and U.S. commandos in the Atlantic
def ordering(self, v1, v2): if self.isMin: return v1 < v2 return v1 > v2
class MutationMixin: """ All mutations should extend this class """ success = graphene.Boolean(default_value=True) errors = graphene.Field(OutputErrorType) @classmethod def mutate(cls, root, info, **input): return cls.resolve_mutation(root, info, **input) @classmethod def parent_resolve(cls, root, info, **kwargs): return super().mutate(root, info, **kwargs)
#include<stdio.h> int main() { //2<=n<=30; int h[31]={0},g[31]={0};int n=0; int sum=0; int i=0;int j=0; scanf("%d",&n); for(i=1;i<=n;i++)scanf("%d %d",&h[i],&g[i]); for(i=1;i<=n;i++)//host team { for(j=1;j<=n;j++)//guest team { if(h[i]==g[j])sum++; } } printf("%d\n",sum); return 0; }
<gh_stars>1-10 package polygon.services; import org.hibernate.Hibernate; import org.hibernate.proxy.HibernateProxy; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.cache.annotation.Cacheable; import org.springframework.stereotype.Service; import org.springframework.transaction.annotation.Transactional; import polygon.models.Building; import polygon.models.City; import polygon.repos.BuildingRepository; import polygon.services.interfaces.BuildingService; import java.util.List; @Service public class BuildingServiceImpl implements BuildingService { @Autowired private BuildingRepository buildingRepository; @Override @Cacheable @Transactional public List<Building> allBuildings() { List<Building> buildings = buildingRepository.findAll(); for (Building b : buildings) { City city = b.getCity(); Hibernate.initialize(city); if (city instanceof HibernateProxy) { city = (City) ((HibernateProxy) city).getHibernateLazyInitializer() .getImplementation(); } } return buildings; } @Override @Transactional public List<Building> allByCity(City city) { List<Building> buildings = buildingRepository.findByCity(city); for (Building b : buildings) { City cityf = b.getCity(); Hibernate.initialize(cityf); if (cityf instanceof HibernateProxy) { cityf = (City) ((HibernateProxy) cityf).getHibernateLazyInitializer() .getImplementation(); } } return buildings; } @Override public Building findById(int id) { return buildingRepository.findById(id).orElse(null); } @Override public void save(Building building) { buildingRepository.saveAndFlush(building); } @Override public boolean safeDelete(int id) { try { Building building = buildingRepository.findById(id).orElse(null); if(building != null && building.getRooms().size() == 0) { buildingRepository.deleteById(id); return true; } } catch (Exception e) { System.out.println(e.toString()); return false; } return false; } }
An Argument for Indexical Representations in Temporal Reasoning This paper discusses the need for indexicals in a representation language. It has been claimed that the cost of updating a knowledge base containing indexicals would be prohibitive and thus that a robot should use its internal clock to eliminate indexicals from its representations. We criticize this view and give an example of a commonplace temporal reasoning/planning problem that can only be solved in a representation formalism that includes both indexical and absolute terms and supports reasoning using both. We show that the example can be formalized within our theory of knowledge and action. We argue that rather than trying to nd restricted settings where indexical knowledge can be reduced to objective knowledge, one should investigate when and how planning and temporal knowledge base update can be performed eeciently in the presence of indexicals.
// Since one BufferManager can be shared by multiple decoders, ContextState is // passed in each time and not just passed in during initialization. Buffer* BufferManager::GetBufferInfoForTarget( ContextState* state, GLenum target) const { switch (target) { case GL_ARRAY_BUFFER: return state->bound_array_buffer.get(); case GL_ELEMENT_ARRAY_BUFFER: return state->vertex_attrib_manager->element_array_buffer(); case GL_COPY_READ_BUFFER: return state->bound_copy_read_buffer.get(); case GL_COPY_WRITE_BUFFER: return state->bound_copy_write_buffer.get(); case GL_PIXEL_PACK_BUFFER: return state->bound_pixel_pack_buffer.get(); case GL_PIXEL_UNPACK_BUFFER: return state->bound_pixel_unpack_buffer.get(); case GL_TRANSFORM_FEEDBACK_BUFFER: return state->bound_transform_feedback_buffer.get(); case GL_UNIFORM_BUFFER: return state->bound_uniform_buffer.get(); default: NOTREACHED(); return nullptr; } }
<filename>src/n0220_contains_duplicate_iii.rs /** * [220] Contains Duplicate III * * Given an array of integers, find out whether there are two distinct indices i and j in the array such that the absolute difference between nums[i] and nums[j] is at most t and the absolute difference between i and j is at most k. * * <div> * Example 1: * * * Input: nums = <span id="example-input-1-1">[1,2,3,1]</span>, k = <span id="example-input-1-2">3</span>, t = <span id="example-input-1-3">0</span> * Output: <span id="example-output-1">true</span> * * * <div> * Example 2: * * * Input: nums = <span id="example-input-2-1">[1,0,1,1]</span>, k = <span id="example-input-2-2">1</span>, t = <span id="example-input-2-3">2</span> * Output: <span id="example-output-2">true</span> * * * <div> * Example 3: * * * Input: nums = <span id="example-input-3-1">[1,5,9,1,5,9]</span>, k = <span id="example-input-3-2">2</span>, t = <span id="example-input-3-3">3</span> * Output: <span id="example-output-3">false</span> * * </div> * </div> * </div> * */ pub struct Solution {} // submission codes start here use std::collections::HashMap; impl Solution { pub fn contains_nearby_almost_duplicate(nums: Vec<i32>, k: i32, t: i32) -> bool { if k < 1 || t < 0 { return false } let mut map = HashMap::new(); for i in 0..nums.len() { let remap = nums[i] as i64 - i32::min_value() as i64; let bucket = remap / (t as i64 + 1); println!("{} {}", remap, bucket); if map.contains_key(&bucket) || map.get(&(bucket-1)).map_or(false, |v| { remap - v <= t as i64}) || map.get(&(bucket+1)).map_or(false, |v| { v - remap <= t as i64}) { return true } if i >= k as usize { let last_bucket = (nums[i - k as usize] as i64 - i32::min_value() as i64) / (t as i64 + 1); map.remove(&last_bucket); } map.insert(bucket, remap); } false } } // submission codes end #[cfg(test)] mod tests { use super::*; #[test] fn test_220() { // assert_eq!(Solution::contains_nearby_almost_duplicate(vec![1,5,9,1,5,9], 2, 3), false); // assert_eq!(Solution::contains_nearby_almost_duplicate(vec![1,2,3,1], 3, 0), true); assert_eq!(Solution::contains_nearby_almost_duplicate(vec![-1,2147483647], 1 ,2147483647), false); } }
On Thursday, NBC News sources confirmed that Chuck Todd will bump David Gregory from the Meet the Press host chair. And while Todd isn’t as liberal as his predecessor, he has played defense for the Barack Obama administration on a number of fronts. Most recently he dismissed the IRS-Tea Party scandal by asking “are there any actual real victims?” and diminished Benghazi hearings as “partisan.” The NBC News chief White House correspondent also claimed Benghazi figure Susan Rice was simply a “victim” of conservative media but in the past compared Vice Presidential nominee Sarah Palin to a “car-wreck.” (videos after the jump) The following is a list of the Top 10 most liberal Chuck Todd quotes from the MRC’s archives: 10. Dumb Conservatives Actually Believe Liberal Bias “Myth” “There are fewer and fewer Republicans who will go on non-Fox shows....I think that the mythology of the big, bad non-conservative media has gotten into some offices...and then these guys, they actually believe the spin that’s out there, ‘Oh, my God, that’s what the mainstream media does, they do anything to disrupt the conservative agenda.’ And so literally, you only — Lindsey [Graham], John McCain, you know, that’s been my — I just fear that it’s sort of like this whole — there’s this whole sort of this, the mythology of this, the media’s out to get conservatives is believed among more and more actual staffers.” — NBC political director Chuck Todd on MSNBC’s Morning Joe, February 19, 2013 talking about why so few conservative members of Congress are willing to go on liberal talk shows. 9. Fox News Not Staffed With Real Pros - Like Us at MSNBC “When a news executive goes out there and states a crazy accusation like that, that it ends up, it only ends up probably hurting what they're trying to do, but it creates, it only denigrates all of us....And, I'm sorry, there are certain news organizations out here whose agenda is to undermine the 90 percent of journalists who are just simply trying to cover stories out there.” — NBC White House correspondent and MSNBC daytime host Chuck Todd on MSNBC’s Morning Joe, February 8, 2010, reacting to Fox News Washington managing editor Bill Sammon’s statement the previous day that “the mainstream media hates the Tea Party movement almost as much as it hates Sarah Palin.” 8. Why No Public Love for Obama’s “Enormous Amount of Legislative Victories?” “You’ve had an enormous amount of legislative victories - it’s comparable to any President in history. It has not translated into political capital with the public. Honestly, are you frustrated by that?” — NBC’s Chuck Todd to President Obama in an interview excerpt shown on MSNBC’s Hardball, July 16, 2010. 7. Getting Ready to Bash GOP for “Beating Up Two Women” Anchor Chuck Todd: “Any danger that the Samantha Power nomination [to become U.N. Ambassador] becomes sort of a proxy that some Senate Republicans try, because that is a Senate confirmation appointment. And that some Republicans try to somehow go after her because they can’t go after Susan Rice?...Beating up on two women, I think, would be something that the Republican Party brand doesn’t need.” Politico’s Lois Romano: “They don’t need it, but they — they haven’t had much, you know, problems beating up on Susan Rice.” Todd: “True, before. That’s for sure.” — MSNBC’s The Daily Rundown, June 5, 2013. 6. Distorting Facts to Declare Obama’s IRS Scandal Over “The IRS ‘scandal’ [makes air quote marks] looks like it’s a bureaucratic scandal, not the political scandal that Republicans were wishing that they had come up with....This sort of brings me to [House Chairman] Darrell Issa at this point. I mean, this is the guy who is — I mean, he is living the fable of the boy who cried wolf, at this point.” — Anchor Chuck Todd setting up a panel discussion on MSNBC’s The Daily Rundown, June 25, 2013. 5. Palin Will Still Draw Political “Car-Wreck Watchers” “Look, she may spend the next year campaigning for Republicans all across the country. She’s probably going to be the person that can attract the largest crowds, some of it is car-wreck watchers - you know, they just are coming, kind of curiosity-seekers.” — NBC White House correspondent Chuck Todd talking about Palin’s resignation, July 3, 2009 Nightly News. 4. “Clean White Suit” Obama a Victim of His Own Virtue “The President is coming under some criticism for tonight’s big-ticket events because they are taking place just a stone’s throw away from Wall Street....One Democratic strategist said that part of the President’s problem is simply his own expectations. Some of the rhetoric he said on the campaign trail made it seem as if he was coming into office in a clean white suit. So now any speck of mud - like raising money from Wall Street donors - shows up a lot clearer than if he came in just as another politician wearing just another gray suit.” — NBC White House correspondent Chuck Todd on Nightly News, October 20, 2009. 3. Susan Rice: “Victim” of Conservative Media “It [criticism of Susan Rice] was all driven, in many cases, by some conservative outlets who were making her the center of the Benghazi story....She sort of became a victim of this....You can become collateral damage in a hurry in just the way you can get piled on, whether it’s Twitter, whether it’s advocacy journalism, whatever you want to describe it, talk radio. And that’s what she was. She became political collateral damage.” — NBC News political director Chuck Todd during live MSNBC coverage on December 13, 2012 after Rice withdrew her name as a potential Secretary of State. 2. No Such Thing as “Unanswered Question” About Benghazi “It certainly looks more partisan than it looks like a serious inquiry....You know, I’ll hear from Republicans that say, ‘But there are unanswered questions!’ Well, no, all the questions have been answered. There’s just some people that don’t like the answers, that wish the answers were somehow more conspiratorial, I guess....To sit here and investigate talking points seems to be totally missing the larger point here. It’s like investigating who cut down a tree, one tree, in a forest that’s been burned down.” — NBC political director and chief White House correspondent Chuck Todd on MSNBC’s Morning Joe, May 13, 2014. 1. Mocking IRS’s Targeting of Tea Party: “Are There Any Actual Real Victims?” “Here is the story many are missing: Why should primarily political organizations get a taxpayer exemption, basically get a handout from the tax code?...So while the IRS is certainly not a good guy here — they have been terrible about being forthcoming — are there any actual real victims?...We know what really is working here for Republicans. Beating up the IRS, good for the base. Good politics there makes for great fundraising e-mails. But let’s remember what the controversy itself is about.” — NBC News political director and chief White House correspondent Chuck Todd’s “Takeaway” on his daily 9am ET MSNBC show The Daily Rundown, June 25, 2014.
/// Translate an HSL colour point into an RGB colour point. /// /// Based wholly on https://en.wikipedia.org/wiki/HSL_and_HSV#From_HSL. /// /// # Examples /// /// ``` /// # use mandalas::util::hsl_to_rgb; /// assert_eq!(hsl_to_rgb([1f64, 1f64, 1f64]), [1f64, 1f64, 1f64]); /// assert_eq!(hsl_to_rgb([0f64, 0f64, 0f64]), [0f64, 0f64, 0f64]); /// ``` pub fn hsl_to_rgb(hsl: [f64; 3]) -> [f64; 3] { let h_deg = hsl[0] * 360f64; let h_prim = h_deg / 60f64; let chroma = (1f64 - (2f64 * hsl[2] - 1f64)) * hsl[1]; let x = chroma * (1f64 - ((h_prim % 2f64) - 1f64).abs()); let rgb1 = if 0f64 <= h_prim && h_prim <= 1f64 { [chroma, x, 0f64] } else if 1f64 <= h_prim && h_prim <= 2f64 { [x, chroma, 0f64] } else if 2f64 <= h_prim && h_prim <= 3f64 { [0f64, chroma, x] } else if 3f64 <= h_prim && h_prim <= 4f64 { [0f64, x, chroma] } else if 4f64 <= h_prim && h_prim <= 5f64 { [x, 0f64, chroma] } else if 5f64 <= h_prim && h_prim < 6f64 { [chroma, 0f64, x] } else { [0f64, 0f64, 0f64] }; let m = hsl[2] - (chroma / 2f64); [rgb1[0] + m, rgb1[1] + m, rgb1[2] + m] }
// token_test.go // // To the extent possible under law, <NAME> has waived all copyright // and related or neighboring rights to token, using the Creative // Commons "CC0" public domain dedication. See LICENSE or // <http://creativecommons.org/publicdomain/zero/1.0/> for full details. package token import ( "crypto/rand" "io" "log" "reflect" "testing" "time" "github.com/nogoegst/locker" ) func TestSymmetricToken(t *testing.T) { testToken(t, locker.Symmetric) } func TestScrambleSignedToken(t *testing.T) { testToken(t, locker.ScrambleSigned) } func testToken(t *testing.T, lr locker.Locker) { pk, sk, err := lr.GenerateKey(rand.Reader) if err != nil { t.Fatal(err) } payload := make([]byte, 32) _, err = io.ReadFull(rand.Reader, payload) if err != nil { t.Fatal(err) } adata := make([]byte, 32) _, err = io.ReadFull(rand.Reader, adata) if err != nil { t.Fatal(err) } tok, err := NewWithDuration(lr, sk, 100*time.Millisecond, payload, adata) if err != nil { t.Fatal(err) } log.Printf("%x", tok) time.Sleep(50 * time.Millisecond) tt, err := Verify(lr, pk, tok, adata) if err != nil { t.Fatal(err) } if !reflect.DeepEqual(payload, tt.Payload) { t.Fatalf("wrong additional data: want %x, got %x", payload, tt.Payload) } badpk, _, err := lr.GenerateKey(rand.Reader) if err != nil { t.Fatal(err) } _, err = Verify(lr, badpk, tok, adata) if err == nil { t.Fatal(err) } time.Sleep(150 * time.Millisecond) tt, err = Verify(lr, pk, tok, adata) if err == nil { log.Printf("%v", tt.ExpirationTime()) t.Fatal(err) } if !reflect.DeepEqual(payload, tt.Payload) { t.Fatalf("wrong additional data: want %x, got %x", payload, tt.Payload) } }
/** Add a source stream. If sourceDir is null, it is set to the * same value as the directory where this compound stream exists. * The id is the string by which the sub-stream will be know in the * compound stream. The caller must ensure that the ID is unique. If the * id is null, it is set to the name of the source file. */ public void addFile(String file) { if (merged) throw new IllegalStateException( "Can't add extensions after merge has been called"); if (file == null) throw new IllegalArgumentException( "Missing source file"); if (! ids.add(file)) throw new IllegalArgumentException( "File " + file + " already added"); FileEntry entry = new FileEntry(); entry.file = file; entries.add(entry); }
def format_jacobian(jacobian): output_dim = jacobian.shape[0] return jacobian.view(output_dim, -1).t()
<reponame>nihilapp/nihilapp<filename>src/data/index.ts export * from './config.data'; export * from './size.data';
<reponame>mavit/mayhem-pi #include "collision.h" #include "platform_data.h" #include "player_view.h" #include "utils.h" #include "allegro_compatibility.h" bool test_collision(struct player_view *pv, struct level_data *currentlevel) { struct vaisseau_data *vaisseau = pv->player->ship; struct platform_data *plt; bool test_it = true; for (int i = 0; i < currentlevel->nbplatforms; i++) { plt = &currentlevel->platformdata[i]; int xmin = plt->xmin - 9; // centrage int xmax = plt->xmax - 23; // centrage int yflat = plt->yflat - 29; if ((xmin <= vaisseau->xpos) && (vaisseau->xpos <= xmax) && ((vaisseau->ypos == yflat) || ((vaisseau->ypos - 1) == yflat) || ((vaisseau->ypos - 2) == yflat) || ((vaisseau->ypos - 3) == yflat)) && ((vaisseau->angle & 0xff) <= 12 || (vaisseau->angle & 0xff) >= 243)) test_it = false; if (vaisseau->shield && (xmin <= vaisseau->xpos) && (vaisseau->xpos <= xmax) && ((vaisseau->ypos == yflat) || ((vaisseau->ypos - 1) == yflat) || ((vaisseau->ypos - 2) == yflat) || ((vaisseau->ypos - 3) == yflat) || ((vaisseau->ypos + 1) == yflat)) && ((vaisseau->angle & 0xff) <= 12 || (vaisseau->angle & 0xff) >= 243)) test_it = false; if ((vaisseau->thrust) && (xmin <= vaisseau->xpos) && (vaisseau->xpos <= xmax) && ((vaisseau->ypos == yflat) || ((vaisseau->ypos - 1) == yflat) || ((vaisseau->ypos + 1) == yflat))) test_it = false; } if (!test_it) return test_it; constexpr int size = 31; int num_frames = vaisseau->coll_map.num_frames; double frame = vaisseau->angle / vaisseau->anglestep; assert(frame >= 0 && frame < num_frames); int y_max = currentlevel->coll_map.height; int x_max = currentlevel->coll_map.width; for (int y = 0; y < size; y++) { for (int x = 0; x < size; x++) { int x_pos = vaisseau->xpos + x; int y_pos = vaisseau->ypos + y; bool inside = x_pos >= 0 && x_pos < x_max && y_pos >= 0 && y_pos < y_max; if (inside && currentlevel->coll_map.is_collide_pixel(x_pos, y_pos, 0) && vaisseau->coll_map.is_collide_pixel(x, y, frame)) { if (currentlevel->wall_collision) { return true; } else { bounce_vaisseau(vaisseau); return false; } } } } return false; } void bounce_vaisseau(struct vaisseau_data *vaisseau) { vaisseau->ax = (0); vaisseau->ay = (0); vaisseau->vx = -vaisseau->vx / 2; vaisseau->vy = -vaisseau->vy / 2; } bool pixel_collision_detect_inbox(struct vaisseau_data *vaisseau1, int xl1, int yt1, struct vaisseau_data *vaisseau2, int xl2, int yt2, int w, int h) { int x, y; int frame0 = vaisseau1->angle / vaisseau1->anglestep; int frame1 = vaisseau2->angle / vaisseau2->anglestep; for (x = 0; x < w; x++) { for (y = 0; y < h; y++) { if (vaisseau1->coll_map.is_collide_pixel(xl1 + x, yt1 + y, frame0) && vaisseau2->coll_map.is_collide_pixel(xl2 + x, yt2 + y, frame1)) { return true; } } } return false; } // // Implementation de la detection des collisions entre vaisseaux //�@First we look for a bounding box, if none, sure, there are no collision // If there is, we find the limit of the bounding box and // we pixel iterate through it... // bool test_collision_ship2ship(struct vaisseau_data *vaisseau1, struct vaisseau_data *vaisseau2) { // first find the bouding box int xl1, xl2, yt1, yt2; int w, h; if (vaisseau1->xpos + 32 >= vaisseau2->xpos && vaisseau1->xpos <= vaisseau2->xpos) { xl2 = 0; xl1 = vaisseau2->xpos - vaisseau1->xpos; w = vaisseau1->xpos + 32 - vaisseau2->xpos; } else if (vaisseau2->xpos + 32 >= vaisseau1->xpos && vaisseau2->xpos <= vaisseau1->xpos) { xl1 = 0; xl2 = vaisseau1->xpos - vaisseau2->xpos; w = vaisseau2->xpos + 32 - vaisseau1->xpos; } else return false; if (vaisseau1->ypos + 32 >= vaisseau2->ypos && vaisseau1->ypos <= vaisseau2->ypos) { yt2 = 0; yt1 = vaisseau2->ypos - vaisseau1->ypos; h = vaisseau1->ypos + 32 - vaisseau2->ypos; } else if (vaisseau2->ypos + 32 >= vaisseau1->ypos && vaisseau2->ypos <= vaisseau1->ypos) { yt1 = 0; yt2 = vaisseau1->ypos - vaisseau2->ypos; h = vaisseau2->ypos + 32 - vaisseau1->ypos; } else return false; // if we arrive here we might have a bounding box // with collision return pixel_collision_detect_inbox(vaisseau1, xl1, yt1, vaisseau2, xl2, yt2, w, h); } bool testcollision_bullet4pix(collision_map &coll_map, int x, int y, int w, int h) { if (x < 0 || (x + 1) >= w || (y - 1) < 0 || y >= h) return true; else { unsigned long address_bmp; // pour le sprite unsigned char pixelcolor; int j; for (j = y - 1; j <= y; j++) { if (coll_map.is_collide_pixel(x, j, 0) || coll_map.is_collide_pixel(x + 1, j, 0)) { return true; } } } return false; } bool testcollision_bullet1pix(vaisseau_data *v, int x, int y) { return v->coll_map.is_collide_pixel(x, y, v->angle / v->anglestep); } bool collision_tir_ship(struct vaisseau_data *v, struct vaisseau_data *allv, int nombre_vaisseau) { struct tir_data *shoot; while (nombre_vaisseau--) { for (int i = 0; i < MAX_TIR; i++) { shoot = &allv->tir[i]; if (shoot->free) continue; // only iterate through the non free shoot int xtrans = shoot->x - v->xpos; int ytrans = shoot->y - v->ypos; // if outside of the bounding box if (xtrans < 0 || xtrans >= 32 || ytrans < 0 || ytrans >= 32) continue; bool b = testcollision_bullet1pix(v, xtrans, ytrans); if (xtrans + 1 < 32) { if (ytrans - 1 >= 0) { b |= testcollision_bullet1pix(v, xtrans + 1, ytrans - 1); b |= testcollision_bullet1pix(v, xtrans, ytrans - 1); } b |= testcollision_bullet1pix(v, xtrans + 1, ytrans); } else if (ytrans - 1 >= 0) { b |= testcollision_bullet1pix(v, xtrans, ytrans - 1); } if (b) { shoot->free = true; bool isProtecting = (v->shield && v->shield_force > 0); if (isProtecting && !v->landed) { v->impactx = shoot->dx; v->impacty = shoot->dy; } return !isProtecting; } } allv++; } return false; } bool collision_backtir_ship(struct vaisseau_data *v, struct vaisseau_data *allv, int nombre_vaisseau) { struct tir_data *backshoot; while (nombre_vaisseau--) { for (int i = 0; i < MAX_TIR; i++) { backshoot = &allv->backtir[i]; if (backshoot->free) continue; // only iterate through the non free shoot int xtrans = backshoot->x - v->xpos; int ytrans = backshoot->y - v->ypos; // if outside of the bounding box if (xtrans < 0 || xtrans >= 32 || ytrans < 0 || ytrans >= 32) continue; bool b = testcollision_bullet1pix(v, xtrans, ytrans); if (xtrans + 1 < 32) { if (ytrans - 1 >= 0) { b |= testcollision_bullet1pix(v, xtrans + 1, ytrans - 1); b |= testcollision_bullet1pix(v, xtrans, ytrans - 1); } b |= testcollision_bullet1pix(v, xtrans + 1, ytrans); } else if (ytrans - 1 >= 0) { b |= testcollision_bullet1pix(v, xtrans, ytrans - 1); } if (b) { backshoot->free = true; bool isProtecting = (v->shield && v->shield_force > 0); if (isProtecting && !v->landed) { v->impactx = backshoot->dx; v->impacty = backshoot->dy; } return !isProtecting; } } allv++; } return false; } bool collision_debris_ship(struct vaisseau_data *v, struct vaisseau_data *allv, int nombre_vaisseau) { struct debris_data *debris; while (nombre_vaisseau--) { for (int i = 0; i < 8; i++) { debris = &allv->debris[i]; if (!debris->active) continue; int xtrans = debris->x - v->xpos; int ytrans = debris->y - v->ypos; // if outside of the bounding box if (xtrans < 0 || xtrans >= 32 || ytrans < 0 || ytrans >= 32) continue; bool b = testcollision_bullet1pix(v, xtrans, ytrans); if (xtrans + 1 < 32) { if (ytrans - 1 >= 0) { b |= testcollision_bullet1pix(v, xtrans + 1, ytrans - 1); b |= testcollision_bullet1pix(v, xtrans, ytrans - 1); } b |= testcollision_bullet1pix(v, xtrans + 1, ytrans); } else if (ytrans - 1 >= 0) { b |= testcollision_bullet1pix(v, xtrans, ytrans - 1); } if (b) { debris->active = false; bool isProtecting = (v->shield && v->shield_force > 0); if (isProtecting && !v->landed) { v->impactx = (debris->vx); v->impacty = (debris->vy); } return !isProtecting; } } allv++; } return false; } bool collision_dca_ship(struct vaisseau_data *v, struct dca_data *alldca, int nb_dca) { struct tir_data *dca_tir; while (nb_dca--) { for (int i = 0; i < MAX_DCA_TIR; i++) { dca_tir = &alldca->dca_tir[i]; if (dca_tir->free) continue; int xtrans = dca_tir->x - v->xpos; int ytrans = dca_tir->y - v->ypos; // if outside of the bounding box if (xtrans < 0 || xtrans >= 32 || ytrans < 0 || ytrans >= 32) continue; bool b = testcollision_bullet1pix(v, xtrans, ytrans); if (xtrans + 1 < 32) { if (ytrans - 1 >= 0) { b |= testcollision_bullet1pix(v, xtrans + 1, ytrans - 1); b |= testcollision_bullet1pix(v, xtrans, ytrans - 1); } b |= testcollision_bullet1pix(v, xtrans + 1, ytrans); } else if (ytrans - 1 >= 0) { b |= testcollision_bullet1pix(v, xtrans, ytrans - 1); } if (b) { dca_tir->free = true; bool isProtecting = (v->shield && v->shield_force > 0); if (isProtecting && !v->landed) { v->impactx = (dca_tir->dx); v->impacty = (dca_tir->dy); } return !isProtecting; } } alldca++; } return false; }
import hash from 'object-hash' // v('div', { id: 'foo' }, 'Hello!') /** * <div id="foo">Hello!</div> */ export interface VNode { nodeName: string, attributes: object | null, children: Array<VNode | string>, hash?: string, // a mapping of the children, making the hash the key childMap?: any } export const v = function( tag: string, attr: object | null, children: Array<VNode | string> ): VNode { const node = { nodeName: tag, attributes: attr, children: children } if (children && children.length) { node["childMap"] = children.reduce((obj, child, idx) => { child["index"] = idx obj[child["hash"]] = child return obj }, {}) } // Hash the node object using SHA1 string node["hash"] = hash(node) return node } export const r = (vnode) => { let el = document.createElement(vnode.nodeName) for (let attr in vnode.attributes) { el.setAttribute(attr, vnode.attributes[attr]) } vnode.children.forEach(c => { el.appendChild(document.createTextNode(c)) }) return el }
def tag_dataset(self, dataset): docid_to_mentions = {} for context in dataset.contexts: docid = str(context.uri) docid_to_mentions[docid] = self.create_mentions(context.mention) return docid_to_mentions
// Copyright 2015 The LUCI Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package coordinator import ( logdog "go.chromium.org/luci/logdog/api/endpoints/coordinator/logs/v1" "go.chromium.org/luci/logdog/common/types" ) // getParamsInst is an internal struct that accumulates a Get request and // associated instructions from a series of iterative GetParam applications. type getParamsInst struct { // r is the Get request to populate. r logdog.GetRequest // stateP is the stream state pointer. It is set by State, and, if supplied, // will cause the log request to return stream state. stateP *LogStream } // tailParamsInst is an internal struct that accumulates a Tail request and // associated instructions from a series of iterative TailParam applications. type tailParamsInst struct { // r is the Tail request to populate. r logdog.TailRequest // stateP is the stream state pointer. It is set by State, and, if supplied, // will cause the log request to return stream state. stateP *LogStream // complete instructs the tail call to fetch a complete entry, instead of just // the last log record. complete bool } // GetParam is a condition or parameter to apply to a Get request. type GetParam interface { applyGet(p *getParamsInst) } // TailParam is a condition or parameter to apply to a Tail request. type TailParam interface { applyTail(p *tailParamsInst) } type loadStateParam struct { stateP *LogStream } // WithState returns a Get/Tail parameter that loads the log stream's state into // the supplied LogState pointer. func WithState(stateP *LogStream) interface { GetParam TailParam } { return &loadStateParam{stateP} } func (p *loadStateParam) applyGet(param *getParamsInst) { param.stateP = p.stateP param.r.State = (p.stateP != nil) } func (p *loadStateParam) applyTail(param *tailParamsInst) { param.stateP = p.stateP param.r.State = (p.stateP != nil) } type indexGetParam struct { index types.MessageIndex } // Index returns a stream Get parameter that causes the Get request to // retrieve logs starting at the requested stream index instead of the default, // zero. func Index(i types.MessageIndex) GetParam { return &indexGetParam{i} } func (p *indexGetParam) applyGet(param *getParamsInst) { param.r.Index = int64(p.index) } type limitBytesGetParam struct { limit int } // LimitBytes applies a byte constraint to the returned logs. If the supplied // limit is <= 0, then no byte constraint will be applied and the server will // choose how many logs to return. func LimitBytes(limit int) GetParam { if limit < 0 { limit = 0 } return &limitBytesGetParam{limit} } func (p *limitBytesGetParam) applyGet(param *getParamsInst) { param.r.ByteCount = int32(p.limit) } type limitCountGetParam struct { limit int } // LimitCount applies a count constraint to the returned logs. If the supplied // limit is <= 0, then no count constraint will be applied and the server will // choose how many logs to return. func LimitCount(limit int) GetParam { if limit < 0 { limit = 0 } return &limitCountGetParam{limit} } func (p *limitCountGetParam) applyGet(param *getParamsInst) { param.r.LogCount = int32(p.limit) } type nonContiguousGetParam struct{} // NonContiguous returns a stream Get parameter that causes the Get request // to allow non-contiguous records to be returned. By default, only contiguous // records starting from the specific Index will be returned. // // By default, a log stream will return only contiguous records starting at the // requested index. For example, if a stream had: {0, 1, 2, 4, 5} and a request // was made for index 0, Get will return {0, 1, 2}, for index 3 {}, and for // index 4 {4, 5}. // // If NonContiguous is true, a request for 0 will return {0, 1, 2, 4, 5} and so // on. // // Log entries generally should not be missing, but may be if either the logs // are still streaming (since they can be ingested out of order) or if a data // loss or corruption occurs. func NonContiguous() GetParam { return nonContiguousGetParam{} } func (nonContiguousGetParam) applyGet(param *getParamsInst) { param.r.NonContiguous = true } type completeTailParam struct{} // Complete instructs the Tail call to retrieve a complete record. // // If frgmented, the resulting record will be manufactured from its composite // parts, and will not actually represent any single record in the log stream. // The time offset, prefix and stream indices, sequence number, and content will // be derived from the initial log entry in the composite set. // // If the log stream is a TEXT or BINARY stream, no behavior change will // occur, and the last log record will be returned. // // If the log stream is a DATAGRAM stream and the Tail record is parked partial, // additional log entries will be fetched via Get and the full log stream will // be assembled. If the partial datagram entry is the "last" in its sequence, // the full datagram ending with it will be returned. If it's partial in the // middle of a sequence, the previous complete datagram will be returned. func Complete() TailParam { return completeTailParam{} } func (completeTailParam) applyTail(param *tailParamsInst) { param.complete = true }
def replaceAll(original, pattern, format): matches_and_replacements = [] def _replaceWithFormat(match): formatted_match = None if match.groupdict(): try: formatted_match = format % match.groupdict() except TypeError: pass if (not formatted_match) and match.groups(): try: formatted_match = format % match.groups() except TypeError: pass if (not formatted_match): try: formatted_match = format % match.group() except TypeError: formatted_match = format matches_and_replacements.append(match.group()) matches_and_replacements.append(formatted_match) return formatted_match replaced = compileRegex(pattern).sub(_replaceWithFormat, original) return replaced, matches_and_replacements
/************************************************************** * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. * *************************************************************/ // MARKER(update_precomp.py): autogen include statement, do not remove #include "precompiled_filter.hxx" #include "eschesdo.hxx" #include <svx/svdxcgv.hxx> #include <svx/svdomedia.hxx> #include <svx/xflftrit.hxx> #include <filter/msfilter/escherex.hxx> #include <svx/unoapi.hxx> #include <svx/svdobj.hxx> #include <svx/svdoashp.hxx> #include <svx/svdoole2.hxx> #include <svx/svdmodel.hxx> #include <editeng/outlobj.hxx> #include <vcl/gradient.hxx> #include <vcl/graph.hxx> #include <vcl/cvtgrf.hxx> #include <vcl/svapp.hxx> #include <vcl/wrkwin.hxx> #include <tools/stream.hxx> #include <tools/zcodec.hxx> #include <svx/svdopath.hxx> #include <stdlib.h> #include <svtools/filter.hxx> #include "svx/EnhancedCustomShapeTypeNames.hxx" #include "svx/EnhancedCustomShapeGeometry.hxx" #include <svx/EnhancedCustomShapeFunctionParser.hxx> #include "svx/EnhancedCustomShape2d.hxx" #include <com/sun/star/beans/PropertyValues.hpp> #include <com/sun/star/beans/XPropertyState.hpp> #include <com/sun/star/awt/GradientStyle.hpp> #include <com/sun/star/awt/RasterOperation.hpp> #include <com/sun/star/awt/Gradient.hpp> #include <com/sun/star/drawing/LineStyle.hpp> #include <com/sun/star/drawing/LineJoint.hpp> #include <com/sun/star/drawing/LineCap.hpp> #include <com/sun/star/drawing/FillStyle.hpp> #include <com/sun/star/drawing/LineDash.hpp> #include <com/sun/star/drawing/BezierPoint.hpp> #include <com/sun/star/drawing/PolyPolygonBezierCoords.hpp> #include <com/sun/star/drawing/ConnectorType.hpp> #include <com/sun/star/drawing/ConnectionType.hpp> #include <com/sun/star/drawing/CircleKind.hpp> #include <com/sun/star/drawing/PointSequence.hpp> #include <com/sun/star/drawing/FlagSequence.hpp> #include <com/sun/star/drawing/PolygonFlags.hpp> #include <com/sun/star/text/WritingMode.hpp> #include <com/sun/star/drawing/TextVerticalAdjust.hpp> #include <com/sun/star/drawing/TextHorizontalAdjust.hpp> #include <com/sun/star/drawing/EnhancedCustomShapeSegment.hpp> #include <com/sun/star/drawing/EnhancedCustomShapeParameterType.hpp> #include <com/sun/star/drawing/EnhancedCustomShapeGluePointType.hpp> #include <com/sun/star/drawing/EnhancedCustomShapeSegmentCommand.hpp> #include <com/sun/star/drawing/EnhancedCustomShapeTextFrame.hpp> #include <com/sun/star/drawing/EnhancedCustomShapeAdjustmentValue.hpp> #include <com/sun/star/drawing/EnhancedCustomShapeTextPathMode.hpp> #include <com/sun/star/drawing/ProjectionMode.hpp> #include <com/sun/star/text/XSimpleText.hpp> #include <com/sun/star/drawing/ShadeMode.hpp> #include <com/sun/star/drawing/TextFitToSizeType.hpp> #include <vcl/hatch.hxx> #include <com/sun/star/awt/XGraphics.hpp> #include <com/sun/star/awt/FontSlant.hpp> #include <com/sun/star/awt/FontWeight.hpp> #include <com/sun/star/drawing/ColorMode.hpp> #include <com/sun/star/drawing/Position3D.hpp> #include <com/sun/star/drawing/Direction3D.hpp> #include <com/sun/star/text/GraphicCrop.hpp> #include <unotools/ucbstreamhelper.hxx> #include <unotools/localfilehelper.hxx> #include <comphelper/extract.hxx> #include <toolkit/helper/vclunohelper.hxx> #include <vcl/virdev.hxx> #include <rtl/crc.h> #include <vos/xception.hxx> using namespace vos; using namespace ::rtl; using namespace ::com::sun::star; // --------------------------------------------------------------------------------------------- EscherExContainer::EscherExContainer( SvStream& rSt, const sal_uInt16 nRecType, const sal_uInt16 nInstance ) : rStrm ( rSt ) { rStrm << (sal_uInt32)( ( 0xf | ( nInstance << 4 ) ) | ( nRecType << 16 ) ) << (sal_uInt32)0; nContPos = rStrm.Tell(); } EscherExContainer::~EscherExContainer() { sal_uInt32 nPos = rStrm.Tell(); sal_uInt32 nSize= nPos - nContPos; if ( nSize ) { rStrm.Seek( nContPos - 4 ); rStrm << nSize; rStrm.Seek( nPos ); } } EscherExAtom::EscherExAtom( SvStream& rSt, const sal_uInt16 nRecType, const sal_uInt16 nInstance, const sal_uInt8 nVersion ) : rStrm ( rSt ) { rStrm << (sal_uInt32)( ( nVersion | ( nInstance << 4 ) ) | ( nRecType << 16 ) ) << (sal_uInt32)0; nContPos = rStrm.Tell(); } EscherExAtom::~EscherExAtom() { sal_uInt32 nPos = rStrm.Tell(); sal_uInt32 nSize= nPos - nContPos; if ( nSize ) { rStrm.Seek( nContPos - 4 ); rStrm << nSize; rStrm.Seek( nPos ); } } // --------------------------------------------------------------------------------------------- EscherExClientRecord_Base::~EscherExClientRecord_Base() { } EscherExClientAnchor_Base::~EscherExClientAnchor_Base() { } // --------------------------------------------------------------------------------------------- void EscherPropertyContainer::ImplInit() { nSortCount = 0; nCountCount = 0; nCountSize = 0; nSortBufSize = 64; bHasComplexData = sal_False; pSortStruct = new EscherPropSortStruct[ nSortBufSize ]; } EscherPropertyContainer::EscherPropertyContainer() : pGraphicProvider ( NULL ), pPicOutStrm ( NULL ) { ImplInit(); }; EscherPropertyContainer::EscherPropertyContainer( EscherGraphicProvider& rGraphProv, SvStream* pPiOutStrm, Rectangle& rBoundRect ) : pGraphicProvider ( &rGraphProv ), pPicOutStrm ( pPiOutStrm ), pShapeBoundRect ( &rBoundRect ) { ImplInit(); } EscherPropertyContainer::~EscherPropertyContainer() { if ( bHasComplexData ) { while ( nSortCount-- ) delete[] pSortStruct[ nSortCount ].pBuf; } delete[] pSortStruct; }; void EscherPropertyContainer::AddOpt( sal_uInt16 nPropID, sal_uInt32 nPropValue, sal_Bool bBlib ) { AddOpt( nPropID, bBlib, nPropValue, NULL, 0 ); } void EscherPropertyContainer::AddOpt( sal_uInt16 nPropID, const rtl::OUString& rString ) { sal_Int32 j, i, nLen = rString.getLength() * 2 + 2; sal_uInt8* pBuf = new sal_uInt8[ nLen ]; for ( j = i = 0; i < rString.getLength(); i++ ) { sal_uInt16 nChar = (sal_uInt16)rString[ i ]; pBuf[ j++ ] = (sal_uInt8)nChar; pBuf[ j++ ] = (sal_uInt8)( nChar >> 8 ); } pBuf[ j++ ] = 0; pBuf[ j++ ] = 0; AddOpt( nPropID, sal_True, nLen, pBuf, nLen ); } void EscherPropertyContainer::AddOpt( sal_uInt16 nPropID, sal_Bool bBlib, sal_uInt32 nPropValue, sal_uInt8* pProp, sal_uInt32 nPropSize ) { if ( bBlib ) // bBlib is only valid when fComplex = 0 nPropID |= 0x4000; if ( pProp ) nPropID |= 0x8000; // fComplex = sal_True; sal_uInt32 i; for( i = 0; i < nSortCount; i++ ) { if ( ( pSortStruct[ i ].nPropId &~0xc000 ) == ( nPropID &~0xc000 ) ) // pruefen, ob Property nur ersetzt wird { pSortStruct[ i ].nPropId = nPropID; if ( pSortStruct[ i ].pBuf ) { nCountSize -= pSortStruct[ i ].nPropSize; delete[] pSortStruct[ i ].pBuf; } pSortStruct[ i ].pBuf = pProp; pSortStruct[ i ].nPropSize = nPropSize; pSortStruct[ i ].nPropValue = nPropValue; if ( pProp ) nCountSize += nPropSize; return; } } nCountCount++; nCountSize += 6; if ( nSortCount == nSortBufSize ) // buffer vergroessern { nSortBufSize <<= 1; EscherPropSortStruct* pTemp = new EscherPropSortStruct[ nSortBufSize ]; for( i = 0; i < nSortCount; i++ ) { pTemp[ i ] = pSortStruct[ i ]; } delete pSortStruct; pSortStruct = pTemp; } pSortStruct[ nSortCount ].nPropId = nPropID; // property einfuegen pSortStruct[ nSortCount ].pBuf = pProp; pSortStruct[ nSortCount ].nPropSize = nPropSize; pSortStruct[ nSortCount++ ].nPropValue = nPropValue; if ( pProp ) { nCountSize += nPropSize; bHasComplexData = sal_True; } } sal_Bool EscherPropertyContainer::GetOpt( sal_uInt16 nPropId, sal_uInt32& rPropValue ) const { EscherPropSortStruct aPropStruct; if ( GetOpt( nPropId, aPropStruct ) ) { rPropValue = aPropStruct.nPropValue; return sal_True; } return sal_False; } sal_Bool EscherPropertyContainer::GetOpt( sal_uInt16 nPropId, EscherPropSortStruct& rPropValue ) const { for( sal_uInt32 i = 0; i < nSortCount; i++ ) { if ( ( pSortStruct[ i ].nPropId &~0xc000 ) == ( nPropId &~0xc000 ) ) { rPropValue = pSortStruct[ i ]; return sal_True; } } return sal_False; } EscherProperties EscherPropertyContainer::GetOpts() const { EscherProperties aVector; for ( sal_uInt32 i = 0; i < nSortCount; ++i ) aVector.push_back( pSortStruct[ i ] ); return aVector; } extern "C" int __LOADONCALLAPI EscherPropSortFunc( const void* p1, const void* p2 ) { sal_Int16 nID1 = ((EscherPropSortStruct*)p1)->nPropId &~0xc000; sal_Int16 nID2 = ((EscherPropSortStruct*)p2)->nPropId &~0xc000; if( nID1 < nID2 ) return -1; else if( nID1 > nID2 ) return 1; else return 0; } void EscherPropertyContainer::Commit( SvStream& rSt, sal_uInt16 nVersion, sal_uInt16 nRecType ) { rSt << (sal_uInt16)( ( nCountCount << 4 ) | ( nVersion & 0xf ) ) << nRecType << nCountSize; if ( nSortCount ) { qsort( pSortStruct, nSortCount, sizeof( EscherPropSortStruct ), EscherPropSortFunc ); sal_uInt32 i; for ( i = 0; i < nSortCount; i++ ) { sal_uInt32 nPropValue = pSortStruct[ i ].nPropValue; sal_uInt16 nPropId = pSortStruct[ i ].nPropId; rSt << nPropId << nPropValue; } if ( bHasComplexData ) { for ( i = 0; i < nSortCount; i++ ) { if ( pSortStruct[ i ].pBuf ) rSt.Write( pSortStruct[ i ].pBuf, pSortStruct[ i ].nPropSize ); } } } } sal_Bool EscherPropertyContainer::IsFontWork() const { sal_uInt32 nTextPathFlags = 0; GetOpt( DFF_Prop_gtextFStrikethrough, nTextPathFlags ); return ( nTextPathFlags & 0x4000 ) != 0; } sal_uInt32 EscherPropertyContainer::ImplGetColor( const sal_uInt32 nSOColor, sal_Bool bSwap ) { if ( bSwap ) { sal_uInt32 nColor = nSOColor & 0xff00; // GRUEN nColor |= (sal_uInt8)( nSOColor ) << 16; // ROT nColor |= (sal_uInt8)( nSOColor >> 16 ); // BLAU return nColor; } else return nSOColor & 0xffffff; } sal_uInt32 EscherPropertyContainer::GetGradientColor( const ::com::sun::star::awt::Gradient* pGradient, sal_uInt32 nStartColor ) { sal_uInt32 nIntensity = 100; Color aColor; if ( pGradient ) { if ( nStartColor & 1 ) { nIntensity = pGradient->StartIntensity; aColor = pGradient->StartColor; } else { nIntensity = pGradient->EndIntensity; aColor = pGradient->EndColor; } } sal_uInt32 nRed = ( ( aColor.GetRed() * nIntensity ) / 100 ); sal_uInt32 nGreen = ( ( aColor.GetGreen() * nIntensity ) / 100 ) << 8; sal_uInt32 nBlue = ( ( aColor.GetBlue() * nIntensity ) / 100 ) << 16; return nRed | nGreen | nBlue; } void EscherPropertyContainer::CreateGradientProperties( const ::com::sun::star::awt::Gradient & rGradient ) { sal_uInt32 nFillType = ESCHER_FillShadeScale; sal_uInt32 nAngle = 0; sal_uInt32 nFillFocus = 0; sal_uInt32 nFillLR = 0; sal_uInt32 nFillTB = 0; sal_uInt32 nFirstColor = 0; bool bWriteFillTo = false; switch ( rGradient.Style ) { case ::com::sun::star::awt::GradientStyle_LINEAR : case ::com::sun::star::awt::GradientStyle_AXIAL : { nFillType = ESCHER_FillShadeScale; nAngle = (rGradient.Angle * 0x10000) / 10; nFillFocus = (sal::static_int_cast<int>(rGradient.Style) == sal::static_int_cast<int>(GradientStyle_LINEAR)) ? 0 : 50; } break; case ::com::sun::star::awt::GradientStyle_RADIAL : case ::com::sun::star::awt::GradientStyle_ELLIPTICAL : case ::com::sun::star::awt::GradientStyle_SQUARE : case ::com::sun::star::awt::GradientStyle_RECT : { nFillLR = (rGradient.XOffset * 0x10000) / 100; nFillTB = (rGradient.YOffset * 0x10000) / 100; if ( ((nFillLR > 0) && (nFillLR < 0x10000)) || ((nFillTB > 0) && (nFillTB < 0x10000)) ) nFillType = ESCHER_FillShadeShape; else nFillType = ESCHER_FillShadeCenter; nFirstColor = 1; bWriteFillTo = true; } break; case ::com::sun::star::awt::GradientStyle_MAKE_FIXED_SIZE : break; } AddOpt( ESCHER_Prop_fillType, nFillType ); AddOpt( ESCHER_Prop_fillAngle, nAngle ); AddOpt( ESCHER_Prop_fillColor, GetGradientColor( &rGradient, nFirstColor ) ); AddOpt( ESCHER_Prop_fillBackColor, GetGradientColor( &rGradient, nFirstColor ^ 1 ) ); AddOpt( ESCHER_Prop_fillFocus, nFillFocus ); if ( bWriteFillTo ) { AddOpt( ESCHER_Prop_fillToLeft, nFillLR ); AddOpt( ESCHER_Prop_fillToTop, nFillTB ); AddOpt( ESCHER_Prop_fillToRight, nFillLR ); AddOpt( ESCHER_Prop_fillToBottom, nFillTB ); } } void EscherPropertyContainer::CreateGradientProperties( const ::com::sun::star::uno::Reference< ::com::sun::star::beans::XPropertySet > & rXPropSet , sal_Bool bTransparentGradient) { ::com::sun::star::uno::Any aAny; ::com::sun::star::awt::Gradient* pGradient = NULL; sal_uInt32 nFillType = ESCHER_FillShadeScale; sal_Int32 nAngle = 0; sal_uInt32 nFillFocus = 0; sal_uInt32 nFillLR = 0; sal_uInt32 nFillTB = 0; sal_uInt32 nFirstColor = 0;//like the control var nChgColors in import logic bool bWriteFillTo = false; //Transparency gradient: Means the third setting in transparency page is set if (bTransparentGradient && EscherPropertyValueHelper::GetPropertyValue( aAny, rXPropSet, String( RTL_CONSTASCII_USTRINGPARAM( "FillTransparenceGradient" ) ), sal_False ) ) { pGradient = (::com::sun::star::awt::Gradient*)aAny.getValue(); ::com::sun::star::uno::Any aAnyTemp; const rtl::OUString aPropName( String( RTL_CONSTASCII_USTRINGPARAM( "FillStyle" ) ) ); if ( EscherPropertyValueHelper::GetPropertyValue( aAnyTemp, rXPropSet, aPropName, sal_False ) ) { ::com::sun::star::drawing::FillStyle eFS; if ( ! ( aAnyTemp >>= eFS ) ) eFS = ::com::sun::star::drawing::FillStyle_SOLID; //solid and transparency if ( eFS == ::com::sun::star::drawing::FillStyle_SOLID) { if ( EscherPropertyValueHelper::GetPropertyValue( aAnyTemp, rXPropSet, String( RTL_CONSTASCII_USTRINGPARAM( "FillColor" ) ), sal_False ) ) { pGradient->StartColor = ImplGetColor( *((sal_uInt32*)aAnyTemp.getValue()), sal_False ); pGradient->EndColor = ImplGetColor( *((sal_uInt32*)aAnyTemp.getValue()), sal_False ); } } //gradient and transparency. else if( eFS == ::com::sun::star::drawing::FillStyle_GRADIENT ) { if ( EscherPropertyValueHelper::GetPropertyValue( aAny, rXPropSet, String( RTL_CONSTASCII_USTRINGPARAM( "FillGradient" ) ), sal_False ) ) pGradient = (::com::sun::star::awt::Gradient*)aAny.getValue(); } } } //Not transparency gradient else if ( EscherPropertyValueHelper::GetPropertyValue( aAny, rXPropSet, String( RTL_CONSTASCII_USTRINGPARAM( "FillGradient" ) ), sal_False ) ) { pGradient = (::com::sun::star::awt::Gradient*)aAny.getValue(); } if ( pGradient ) { switch ( pGradient->Style ) { case ::com::sun::star::awt::GradientStyle_LINEAR : case ::com::sun::star::awt::GradientStyle_AXIAL : { nFillType = ESCHER_FillShadeScale; nAngle = pGradient->Angle; while ( nAngle > 0 ) nAngle -= 3600; while ( nAngle <= -3600 ) nAngle += 3600; //Value of the real number = Integral + (Fractional / 65536.0) nAngle = ( nAngle * 0x10000) / 10; nFillFocus = (pGradient->Style == ::com::sun::star::awt::GradientStyle_LINEAR) ? ( pGradient->XOffset + pGradient->YOffset )/2 : -50; if( !nFillFocus ) nFirstColor=nFirstColor ^ 1; if ( !nAngle ) nFirstColor=nFirstColor ^ 1; } break; case ::com::sun::star::awt::GradientStyle_RADIAL : case ::com::sun::star::awt::GradientStyle_ELLIPTICAL : case ::com::sun::star::awt::GradientStyle_SQUARE : case ::com::sun::star::awt::GradientStyle_RECT : { //according to the import logic and rect type fill** value nFillLR = (pGradient->XOffset * 0x10000) / 100; nFillTB = (pGradient->YOffset * 0x10000) / 100; if ( ((nFillLR > 0) && (nFillLR < 0x10000)) || ((nFillTB > 0) && (nFillTB < 0x10000)) ) nFillType = ESCHER_FillShadeShape; else nFillType = ESCHER_FillShadeCenter; nFirstColor = 1; bWriteFillTo = true; } break; default: break; } } AddOpt( ESCHER_Prop_fillType, nFillType ); AddOpt( ESCHER_Prop_fillAngle, nAngle ); AddOpt( ESCHER_Prop_fillColor, GetGradientColor( pGradient, nFirstColor ) ); AddOpt( ESCHER_Prop_fillBackColor, GetGradientColor( pGradient, nFirstColor ^ 1 ) ); AddOpt( ESCHER_Prop_fillFocus, nFillFocus ); if ( bWriteFillTo ) { //according to rect type fillTo** value if(nFillLR) { AddOpt( ESCHER_Prop_fillToLeft, nFillLR ); AddOpt( ESCHER_Prop_fillToRight, nFillLR ); } if(nFillTB) { AddOpt( ESCHER_Prop_fillToTop, nFillTB ); AddOpt( ESCHER_Prop_fillToBottom, nFillTB ); } } //Transparency gradient if (bTransparentGradient && EscherPropertyValueHelper::GetPropertyValue( aAny, rXPropSet, String( RTL_CONSTASCII_USTRINGPARAM( "FillTransparenceGradient" ) ), sal_False ) ) { pGradient = (::com::sun::star::awt::Gradient*)aAny.getValue(); if ( pGradient ) { sal_uInt32 nBlue = GetGradientColor( pGradient, nFirstColor ) >> 16; AddOpt( ESCHER_Prop_fillOpacity,( ( 100 - ( nBlue * 100 / 255 ) ) << 16 ) / 100 ); nBlue = GetGradientColor( pGradient, nFirstColor ^ 1 ) >>16 ; AddOpt( ESCHER_Prop_fillBackOpacity,( ( 100 - ( nBlue * 100 / 255 ) ) << 16 )/ 100 ); } } } void EscherPropertyContainer::CreateFillProperties( const ::com::sun::star::uno::Reference< ::com::sun::star::beans::XPropertySet > & rXPropSet, sal_Bool bEdge , const ::com::sun::star::uno::Reference< ::com::sun::star::drawing::XShape > & rXShape ) { if ( rXShape.is() ) { SdrObject* pObj = GetSdrObjectFromXShape( rXShape ); if ( pObj ) { SfxItemSet aAttr( pObj->GetMergedItemSet() ); //tranparency with gradient. Means the third setting in transparency page is set sal_Bool bTransparentGradient = ( aAttr.GetItemState( XATTR_FILLFLOATTRANSPARENCE ) == SFX_ITEM_SET ) && ( (const XFillFloatTransparenceItem&) aAttr.Get( XATTR_FILLFLOATTRANSPARENCE ) ).IsEnabled(); CreateFillProperties( rXPropSet, bEdge, bTransparentGradient ); } } } void EscherPropertyContainer::CreateFillProperties( const ::com::sun::star::uno::Reference< ::com::sun::star::beans::XPropertySet > & rXPropSet, sal_Bool bEdge , sal_Bool bTransparentGradient) { ::com::sun::star::uno::Any aAny; AddOpt( ESCHER_Prop_WrapText, ESCHER_WrapNone ); AddOpt( ESCHER_Prop_AnchorText, ESCHER_AnchorMiddle ); sal_uInt32 nFillBackColor = 0; const rtl::OUString aPropName( String( RTL_CONSTASCII_USTRINGPARAM( "FillStyle" ) ) ); if ( EscherPropertyValueHelper::GetPropertyValue( aAny, rXPropSet, aPropName, sal_False ) ) { ::com::sun::star::drawing::FillStyle eFS; if ( ! ( aAny >>= eFS ) ) eFS = ::com::sun::star::drawing::FillStyle_SOLID; switch( eFS ) { case ::com::sun::star::drawing::FillStyle_GRADIENT : { CreateGradientProperties( rXPropSet , bTransparentGradient ); AddOpt( ESCHER_Prop_fNoFillHitTest, 0x140014 ); } break; case ::com::sun::star::drawing::FillStyle_BITMAP : { CreateGraphicProperties( rXPropSet, String( RTL_CONSTASCII_USTRINGPARAM( "FillBitmapURL" ) ), sal_True ); AddOpt( ESCHER_Prop_fNoFillHitTest, 0x140014 ); AddOpt( ESCHER_Prop_fillBackColor, nFillBackColor ); } break; case ::com::sun::star::drawing::FillStyle_HATCH : { CreateGraphicProperties( rXPropSet, String( RTL_CONSTASCII_USTRINGPARAM( "FillHatch" ) ), sal_True ); } break; case ::com::sun::star::drawing::FillStyle_SOLID : default: { if ( bTransparentGradient ) CreateGradientProperties( rXPropSet , bTransparentGradient ); else { ::com::sun::star::beans::PropertyState ePropState = EscherPropertyValueHelper::GetPropertyState( rXPropSet, aPropName ); if ( ePropState == ::com::sun::star::beans::PropertyState_DIRECT_VALUE ) AddOpt( ESCHER_Prop_fillType, ESCHER_FillSolid ); if ( EscherPropertyValueHelper::GetPropertyValue( aAny, rXPropSet, String( RTL_CONSTASCII_USTRINGPARAM( "FillColor" ) ), sal_False ) ) { sal_uInt32 nFillColor = ImplGetColor( *((sal_uInt32*)aAny.getValue()) ); nFillBackColor = nFillColor ^ 0xffffff; AddOpt( ESCHER_Prop_fillColor, nFillColor ); } AddOpt( ESCHER_Prop_fNoFillHitTest, 0x100010 ); AddOpt( ESCHER_Prop_fillBackColor, nFillBackColor ); } break; } case ::com::sun::star::drawing::FillStyle_NONE : AddOpt( ESCHER_Prop_fNoFillHitTest, 0x100000 ); break; } if ( eFS != ::com::sun::star::drawing::FillStyle_NONE ) { sal_uInt16 nTransparency = ( EscherPropertyValueHelper::GetPropertyValue( aAny, rXPropSet, String( RTL_CONSTASCII_USTRINGPARAM( "FillTransparence" ) ), sal_True ) ) ? *((sal_Int16*)aAny.getValue() ) : 0; if ( nTransparency ) AddOpt( ESCHER_Prop_fillOpacity, ( ( 100 - nTransparency ) << 16 ) / 100 ); } } CreateLineProperties( rXPropSet, bEdge ); } void EscherPropertyContainer::CreateTextProperties( const uno::Reference< beans::XPropertySet > & rXPropSet, sal_uInt32 nTextId, const sal_Bool bIsCustomShape, const sal_Bool bIsTextFrame ) { uno::Any aAny; text::WritingMode eWM( text::WritingMode_LR_TB ); drawing::TextVerticalAdjust eVA( drawing::TextVerticalAdjust_TOP ); drawing::TextHorizontalAdjust eHA( drawing::TextHorizontalAdjust_LEFT ); sal_Int32 nLeft ( 0 ); sal_Int32 nTop ( 0 ); sal_Int32 nRight ( 0 ); sal_Int32 nBottom ( 0 ); // used with normal shapes: sal_Bool bAutoGrowWidth ( sal_False ); sal_Bool bAutoGrowHeight ( sal_False ); // used with ashapes: sal_Bool bWordWrap ( sal_False ); sal_Bool bAutoGrowSize ( sal_False ); if ( EscherPropertyValueHelper::GetPropertyValue( aAny, rXPropSet, String( RTL_CONSTASCII_USTRINGPARAM( "TextWritingMode" ) ), sal_True ) ) aAny >>= eWM; if ( EscherPropertyValueHelper::GetPropertyValue( aAny, rXPropSet, String( RTL_CONSTASCII_USTRINGPARAM( "TextVerticalAdjust" ) ), sal_True ) ) aAny >>= eVA; if ( EscherPropertyValueHelper::GetPropertyValue( aAny, rXPropSet, String( RTL_CONSTASCII_USTRINGPARAM( "TextHorizontalAdjust" ) ), sal_True ) ) aAny >>= eHA; if ( bIsCustomShape ) { if ( EscherPropertyValueHelper::GetPropertyValue( aAny, rXPropSet, String( RTL_CONSTASCII_USTRINGPARAM( "TextWordWrap" ) ), sal_False ) ) aAny >>= bWordWrap; if ( EscherPropertyValueHelper::GetPropertyValue( aAny, rXPropSet, String( RTL_CONSTASCII_USTRINGPARAM( "TextAutoGrowHeight" ) ), sal_True ) ) aAny >>= bAutoGrowSize; } else if ( bIsTextFrame ) { if ( EscherPropertyValueHelper::GetPropertyValue( aAny, rXPropSet, String( RTL_CONSTASCII_USTRINGPARAM( "TextAutoGrowWidth" ) ), sal_True ) ) aAny >>= bAutoGrowWidth; // i63936 not setting autogrowheight, because otherwise // the minframeheight of the text will be ignored // // if ( EscherPropertyValueHelper::GetPropertyValue( aAny, rXPropSet, String( RTL_CONSTASCII_USTRINGPARAM( "TextAutoGrowHeight" ) ), sal_True ) ) // aAny >>= bAutoGrowHeight; } if ( EscherPropertyValueHelper::GetPropertyValue( aAny, rXPropSet, String( RTL_CONSTASCII_USTRINGPARAM( "TextLeftDistance" ) ) ) ) aAny >>= nLeft; if ( EscherPropertyValueHelper::GetPropertyValue( aAny, rXPropSet, String( RTL_CONSTASCII_USTRINGPARAM( "TextUpperDistance" ) ) ) ) aAny >>= nTop; if ( EscherPropertyValueHelper::GetPropertyValue( aAny, rXPropSet, String( RTL_CONSTASCII_USTRINGPARAM( "TextRightDistance" ) ) ) ) aAny >>= nRight; if ( EscherPropertyValueHelper::GetPropertyValue( aAny, rXPropSet, String( RTL_CONSTASCII_USTRINGPARAM( "TextLowerDistance" ) ) ) ) aAny >>= nBottom; /* if ( rObj.ImplGetPropertyValue( ::rtl::OUString::createFromAscii("TextWritingMode") ) ) { ::com::sun::star::text::WritingMode eMode; rObj.GetUsrAny() >>= eMode; switch (eMode) { case ::com::sun::star::text::WritingMode_TB_RL: //Well if it so happens that we are fliped 180 we can use //this instead. if (rObj.GetAngle() == 18000) eFlow = ESCHER_txflBtoT; else eFlow = ESCHER_txflTtoBA; break; case ::com::sun::star::text::WritingMode_RL_TB: eDir = ESCHER_txdirRTL; break; } } */ ESCHER_AnchorText eAnchor = ESCHER_AnchorTop; ESCHER_WrapMode eWrapMode = ESCHER_WrapSquare; sal_uInt32 nTextAttr = 0x40004; // rotate text with shape if ( eWM == text::WritingMode_TB_RL ) { // verical writing switch ( eHA ) { case drawing::TextHorizontalAdjust_LEFT : eAnchor = ESCHER_AnchorBottom; break; case drawing::TextHorizontalAdjust_CENTER : eAnchor = ESCHER_AnchorMiddle; break; default : case drawing::TextHorizontalAdjust_BLOCK : case drawing::TextHorizontalAdjust_RIGHT : eAnchor = ESCHER_AnchorTop; break; } if ( eVA == drawing::TextVerticalAdjust_CENTER ) { switch ( eAnchor ) { case ESCHER_AnchorMiddle : eAnchor = ESCHER_AnchorMiddleCentered; break; case ESCHER_AnchorBottom : eAnchor = ESCHER_AnchorBottomCentered; break; default : case ESCHER_AnchorTop : eAnchor = ESCHER_AnchorTopCentered; break; } } if ( bIsCustomShape ) { if ( bWordWrap ) eWrapMode = ESCHER_WrapSquare; else eWrapMode = ESCHER_WrapNone; if ( bAutoGrowSize ) nTextAttr |= 0x20002; } else { if ( bAutoGrowHeight ) eWrapMode = ESCHER_WrapNone; if ( bAutoGrowWidth ) nTextAttr |= 0x20002; } AddOpt( ESCHER_Prop_txflTextFlow, ESCHER_txflTtoBA ); // rotate text within shape by 90 } else { // normal from left to right switch ( eVA ) { case drawing::TextVerticalAdjust_CENTER : eAnchor = ESCHER_AnchorMiddle; break; case drawing::TextVerticalAdjust_BOTTOM : eAnchor = ESCHER_AnchorBottom; break; default : case drawing::TextVerticalAdjust_TOP : eAnchor = ESCHER_AnchorTop; break; } if ( eHA == drawing::TextHorizontalAdjust_CENTER ) { switch( eAnchor ) { case ESCHER_AnchorMiddle : eAnchor = ESCHER_AnchorMiddleCentered; break; case ESCHER_AnchorBottom : eAnchor = ESCHER_AnchorBottomCentered; break; case ESCHER_AnchorTop : eAnchor = ESCHER_AnchorTopCentered; break; default: break; } } if ( bIsCustomShape ) { if ( bWordWrap ) eWrapMode = ESCHER_WrapSquare; else eWrapMode = ESCHER_WrapNone; if ( bAutoGrowSize ) nTextAttr |= 0x20002; } else { if ( bAutoGrowWidth ) eWrapMode = ESCHER_WrapNone; if ( bAutoGrowHeight ) nTextAttr |= 0x20002; } } AddOpt( ESCHER_Prop_dxTextLeft, nLeft * 360 ); AddOpt( ESCHER_Prop_dxTextRight, nRight * 360 ); AddOpt( ESCHER_Prop_dyTextTop, nTop * 360 ); AddOpt( ESCHER_Prop_dyTextBottom, nBottom * 360 ); AddOpt( ESCHER_Prop_WrapText, eWrapMode ); AddOpt( ESCHER_Prop_AnchorText, eAnchor ); AddOpt( ESCHER_Prop_FitTextToShape, nTextAttr ); if ( nTextId ) AddOpt( ESCHER_Prop_lTxid, nTextId ); } sal_Bool EscherPropertyContainer::GetLineArrow( const sal_Bool bLineStart, const ::com::sun::star::uno::Reference< ::com::sun::star::beans::XPropertySet > & rXPropSet, ESCHER_LineEnd& reLineEnd, sal_Int32& rnArrowLength, sal_Int32& rnArrowWidth ) { static String sLineStart ( RTL_CONSTASCII_USTRINGPARAM( "LineStart" ) ); static String sLineStartName( RTL_CONSTASCII_USTRINGPARAM( "LineStartName" ) ); static String sLineEnd ( RTL_CONSTASCII_USTRINGPARAM( "LineEnd" ) ); static String sLineEndName ( RTL_CONSTASCII_USTRINGPARAM( "LineEndName" ) ); const String sLine ( bLineStart ? sLineStart : sLineEnd ); const String sLineName ( bLineStart ? sLineStartName : sLineEndName ); sal_Bool bIsArrow = sal_False; ::com::sun::star::uno::Any aAny; if ( EscherPropertyValueHelper::GetPropertyValue( aAny, rXPropSet, sLine, sal_False ) ) { PolyPolygon aPolyPoly( EscherPropertyContainer::GetPolyPolygon( aAny ) ); if ( aPolyPoly.Count() && aPolyPoly[ 0 ].GetSize() ) { bIsArrow = sal_True; reLineEnd = ESCHER_LineArrowEnd; rnArrowLength = 1; rnArrowWidth = 1; if ( EscherPropertyValueHelper::GetPropertyValue( aAny, rXPropSet, sLineName, sal_False ) ) { String aArrowStartName = *(::rtl::OUString*)aAny.getValue(); rtl::OUString aApiName; sal_Int16 nWhich = bLineStart ? XATTR_LINESTART : XATTR_LINEEND; SvxUnogetApiNameForItem( nWhich, aArrowStartName, aApiName ); sal_Bool bIsMapped = sal_True; if ( aApiName.getLength() ) { /* todo: calculate the best option for ArrowLenght and ArrowWidth */ if ( aApiName.equalsAscii( "Arrow concave" ) ) reLineEnd = ESCHER_LineArrowStealthEnd; else if ( aApiName.equalsAscii( "Square 45" ) ) reLineEnd = ESCHER_LineArrowDiamondEnd; else if ( aApiName.equalsAscii( "Small Arrow" ) ) reLineEnd = ESCHER_LineArrowEnd; else if ( aApiName.equalsAscii( "Dimension Lines" ) ) { rnArrowLength = 0; rnArrowWidth = 2; reLineEnd = ESCHER_LineArrowOvalEnd; } else if ( aApiName.equalsAscii( "Double Arrow" ) ) reLineEnd = ESCHER_LineArrowEnd; else if ( aApiName.equalsAscii( "Rounded short Arrow" ) ) reLineEnd = ESCHER_LineArrowEnd; else if ( aApiName.equalsAscii( "Symmetric Arrow" ) ) reLineEnd = ESCHER_LineArrowEnd; else if ( aApiName.equalsAscii( "Line Arrow" ) ) reLineEnd = ESCHER_LineArrowOpenEnd; else if ( aApiName.equalsAscii( "Rounded large Arrow" ) ) reLineEnd = ESCHER_LineArrowEnd; else if ( aApiName.equalsAscii( "Circle" ) ) reLineEnd = ESCHER_LineArrowOvalEnd; else if ( aApiName.equalsAscii( "Square" ) ) reLineEnd = ESCHER_LineArrowDiamondEnd; else if ( aApiName.equalsAscii( "Arrow" ) ) reLineEnd = ESCHER_LineArrowEnd; else bIsMapped = sal_False; } if ( !bIsMapped && aArrowStartName.GetTokenCount( ' ' ) == 2 ) { sal_Bool b = sal_True; String aArrowName( aArrowStartName.GetToken( 0, ' ' ) ); if ( aArrowName.EqualsAscii( "msArrowEnd" ) ) reLineEnd = ESCHER_LineArrowEnd; else if ( aArrowName.EqualsAscii( "msArrowOpenEnd" ) ) reLineEnd = ESCHER_LineArrowOpenEnd; else if ( aArrowName.EqualsAscii( "msArrowStealthEnd" ) ) reLineEnd = ESCHER_LineArrowStealthEnd; else if ( aArrowName.EqualsAscii( "msArrowDiamondEnd" ) ) reLineEnd = ESCHER_LineArrowDiamondEnd; else if ( aArrowName.EqualsAscii( "msArrowOvalEnd" ) ) reLineEnd = ESCHER_LineArrowOvalEnd; else b = sal_False; // now we have the arrow, and try to determine the arrow size; if ( b ) { String aArrowSize( aArrowStartName.GetToken( 1, ' ' ) ); sal_Int32 nArrowSize = aArrowSize.ToInt32(); rnArrowWidth = ( nArrowSize - 1 ) / 3; rnArrowLength = nArrowSize - ( rnArrowWidth * 3 ) - 1; } } } } } return bIsArrow; } void EscherPropertyContainer::CreateLineProperties( const ::com::sun::star::uno::Reference< ::com::sun::star::beans::XPropertySet > & rXPropSet, sal_Bool bEdge ) { ::com::sun::star::uno::Any aAny; sal_uInt32 nLineFlags = 0x80008; ESCHER_LineEnd eLineEnd; sal_Int32 nArrowLength; sal_Int32 nArrowWidth; sal_Bool bSwapLineEnds = sal_False; if ( EscherPropertyValueHelper::GetPropertyValue( aAny, rXPropSet, String( RTL_CONSTASCII_USTRINGPARAM( "CircleKind" ) ), sal_True ) ) { ::com::sun::star::drawing::CircleKind eCircleKind; if ( aAny >>= eCircleKind ) { if ( eCircleKind == ::com::sun::star::drawing::CircleKind_ARC ) bSwapLineEnds = sal_True; } } if ( GetLineArrow( bSwapLineEnds ? sal_False : sal_True, rXPropSet, eLineEnd, nArrowLength, nArrowWidth ) ) { AddOpt( ESCHER_Prop_lineStartArrowLength, nArrowLength ); AddOpt( ESCHER_Prop_lineStartArrowWidth, nArrowWidth ); AddOpt( ESCHER_Prop_lineStartArrowhead, eLineEnd ); nLineFlags |= 0x100010; } if ( GetLineArrow( bSwapLineEnds ? sal_True : sal_False, rXPropSet, eLineEnd, nArrowLength, nArrowWidth ) ) { AddOpt( ESCHER_Prop_lineEndArrowLength, nArrowLength ); AddOpt( ESCHER_Prop_lineEndArrowWidth, nArrowWidth ); AddOpt( ESCHER_Prop_lineEndArrowhead, eLineEnd ); nLineFlags |= 0x100010; } // support LineCaps if(EscherPropertyValueHelper::GetPropertyValue(aAny, rXPropSet, String(RTL_CONSTASCII_USTRINGPARAM("LineCap")), sal_False)) { ::com::sun::star::drawing::LineCap aLineCap(com::sun::star::drawing::LineCap_BUTT); if(aAny >>= aLineCap) { switch (aLineCap) { default: /* com::sun::star::drawing::LineCap_BUTT */ { AddOpt(ESCHER_Prop_lineEndCapStyle, ESCHER_LineEndCapFlat); break; } case com::sun::star::drawing::LineCap_ROUND: { AddOpt(ESCHER_Prop_lineEndCapStyle, ESCHER_LineEndCapRound); break; } case com::sun::star::drawing::LineCap_SQUARE: { AddOpt(ESCHER_Prop_lineEndCapStyle, ESCHER_LineEndCapSquare); break; } } } } if ( EscherPropertyValueHelper::GetPropertyValue( aAny, rXPropSet, String( RTL_CONSTASCII_USTRINGPARAM( "LineStyle" ) ), sal_False ) ) { ::com::sun::star::drawing::LineStyle eLS; if ( aAny >>= eLS ) { switch ( eLS ) { case ::com::sun::star::drawing::LineStyle_NONE : AddOpt( ESCHER_Prop_fNoLineDrawDash, 0x90000 ); // 80000 break; case ::com::sun::star::drawing::LineStyle_DASH : { if ( EscherPropertyValueHelper::GetPropertyValue( aAny, rXPropSet, String( RTL_CONSTASCII_USTRINGPARAM( "LineDash" ) ), sal_False ) ) { ESCHER_LineDashing eDash = ESCHER_LineSolid; ::com::sun::star::drawing::LineDash* pLineDash = (::com::sun::star::drawing::LineDash*)aAny.getValue(); sal_Int32 nDistance = pLineDash->Distance << 1; switch ( pLineDash->Style ) { case ::com::sun::star::drawing::DashStyle_ROUND : case ::com::sun::star::drawing::DashStyle_ROUNDRELATIVE : AddOpt( ESCHER_Prop_lineEndCapStyle, 0 ); // Style Round setzen break; default : break; } if ( ((!(pLineDash->Dots )) || (!(pLineDash->Dashes )) ) || ( pLineDash->DotLen == pLineDash->DashLen ) ) { sal_Int32 nLen = pLineDash->DotLen; if ( pLineDash->Dashes ) nLen = pLineDash->DashLen; if ( nLen >= nDistance ) eDash = ESCHER_LineLongDashGEL; else if ( pLineDash->Dots ) eDash = ESCHER_LineDotSys; else eDash = ESCHER_LineDashGEL; } else // X Y { if ( pLineDash->Dots != pLineDash->Dashes ) { if ( ( pLineDash->DashLen > nDistance ) || ( pLineDash->DotLen > nDistance ) ) eDash = ESCHER_LineLongDashDotDotGEL; else eDash = ESCHER_LineDashDotDotSys; } else // X Y Y { if ( ( pLineDash->DashLen > nDistance ) || ( pLineDash->DotLen > nDistance ) ) eDash = ESCHER_LineLongDashDotGEL; else eDash = ESCHER_LineDashDotGEL; } } AddOpt( ESCHER_Prop_lineDashing, eDash ); } } case ::com::sun::star::drawing::LineStyle_SOLID : default: { AddOpt( ESCHER_Prop_fNoLineDrawDash, nLineFlags ); } break; } } if ( EscherPropertyValueHelper::GetPropertyValue( aAny, rXPropSet, String( RTL_CONSTASCII_USTRINGPARAM( "LineColor" ) ), sal_False ) ) { sal_uInt32 nLineColor = ImplGetColor( *((sal_uInt32*)aAny.getValue()) ); AddOpt( ESCHER_Prop_lineColor, nLineColor ); AddOpt( ESCHER_Prop_lineBackColor, nLineColor ^ 0xffffff ); } } sal_uInt32 nLineSize = ( EscherPropertyValueHelper::GetPropertyValue( aAny, rXPropSet, String( RTL_CONSTASCII_USTRINGPARAM( "LineWidth" ) ), sal_False ) ) ? *((sal_uInt32*)aAny.getValue()) : 0; if ( nLineSize > 1 ) AddOpt( ESCHER_Prop_lineWidth, nLineSize * 360 ); // 100TH MM -> PT , 1PT = 12700 EMU ESCHER_LineJoin eLineJoin = ESCHER_LineJoinMiter; if ( EscherPropertyValueHelper::GetPropertyValue( aAny, rXPropSet, String( RTL_CONSTASCII_USTRINGPARAM( "LineJoint" ) ), sal_True ) ) { ::com::sun::star::drawing::LineJoint eLJ; if ( aAny >>= eLJ ) { switch ( eLJ ) { case com::sun::star::drawing::LineJoint_NONE : case com::sun::star::drawing::LineJoint_MIDDLE : case com::sun::star::drawing::LineJoint_BEVEL : eLineJoin = ESCHER_LineJoinBevel; break; default: case com::sun::star::drawing::LineJoint_MITER : eLineJoin = ESCHER_LineJoinMiter; break; case com::sun::star::drawing::LineJoint_ROUND : eLineJoin = ESCHER_LineJoinRound; break; } } } AddOpt( ESCHER_Prop_lineJoinStyle, eLineJoin ); if ( EscherPropertyValueHelper::GetPropertyValue( aAny, rXPropSet, String( RTL_CONSTASCII_USTRINGPARAM( "LineTransparence" ) ), sal_True ) ) { sal_Int16 nTransparency = 0; if ( aAny >>= nTransparency ) AddOpt( ESCHER_Prop_lineOpacity, ( ( 100 - nTransparency ) << 16 ) / 100 ); } if ( bEdge == sal_False ) { AddOpt( ESCHER_Prop_fFillOK, 0x1001 ); AddOpt( ESCHER_Prop_fNoFillHitTest, 0x100000 ); } } static Size lcl_SizeToEmu(Size aPrefSize, MapMode aPrefMapMode) { Size aRetSize; if (aPrefMapMode == MAP_PIXEL) aRetSize = Application::GetDefaultDevice()->PixelToLogic( aPrefSize, MAP_100TH_MM ); else aRetSize = Application::GetDefaultDevice()->LogicToLogic( aPrefSize, aPrefMapMode, MAP_100TH_MM ); return aRetSize; } void EscherPropertyContainer::ImplCreateGraphicAttributes( const ::com::sun::star::uno::Reference< ::com::sun::star::beans::XPropertySet > & rXPropSet, sal_uInt32 nBlibId, sal_Bool bCreateCroppingAttributes ) { ::com::sun::star::uno::Any aAny; sal_uInt32 nPicFlags = 0; ::com::sun::star::drawing::ColorMode eColorMode( ::com::sun::star::drawing::ColorMode_STANDARD ); sal_Int16 nLuminance = 0; sal_Int32 nContrast = 0; if ( EscherPropertyValueHelper::GetPropertyValue( aAny, rXPropSet, String( RTL_CONSTASCII_USTRINGPARAM( "GraphicColorMode" ) ) ) ) aAny >>= eColorMode; if ( EscherPropertyValueHelper::GetPropertyValue( aAny, rXPropSet, String( RTL_CONSTASCII_USTRINGPARAM( "AdjustLuminance" ) ) ) ) aAny >>= nLuminance; if ( EscherPropertyValueHelper::GetPropertyValue( aAny, rXPropSet, String( RTL_CONSTASCII_USTRINGPARAM( "AdjustContrast" ) ) ) ) { sal_Int16 nC = sal_Int16(); aAny >>= nC; nContrast = nC; } if ( eColorMode == ::com::sun::star::drawing::ColorMode_WATERMARK ) { eColorMode = ::com::sun::star::drawing::ColorMode_STANDARD; nLuminance += 70; if ( nLuminance > 100 ) nLuminance = 100; nContrast -= 70; if ( nContrast < -100 ) nContrast = -100; } if ( eColorMode == ::com::sun::star::drawing::ColorMode_GREYS ) nPicFlags |= 0x40004; else if ( eColorMode == ::com::sun::star::drawing::ColorMode_MONO ) nPicFlags |= 0x60006; if ( nContrast ) { nContrast += 100; if ( nContrast == 100) nContrast = 0x10000; else if ( nContrast < 100 ) { nContrast *= 0x10000; nContrast /= 100; } else if ( nContrast < 200 ) nContrast = ( 100 * 0x10000 ) / ( 200 - nContrast ); else nContrast = 0x7fffffff; AddOpt( ESCHER_Prop_pictureContrast, nContrast ); } if ( nLuminance ) AddOpt( ESCHER_Prop_pictureBrightness, nLuminance * 327 ); if ( nPicFlags ) AddOpt( ESCHER_Prop_pictureActive, nPicFlags ); if ( bCreateCroppingAttributes && pGraphicProvider ) { Size aPrefSize; MapMode aPrefMapMode; if ( pGraphicProvider->GetPrefSize( nBlibId, aPrefSize, aPrefMapMode ) ) { Size aCropSize(lcl_SizeToEmu(aPrefSize, aPrefMapMode)); if ( aCropSize.Width() && aCropSize.Height() ) { if ( EscherPropertyValueHelper::GetPropertyValue( aAny, rXPropSet, String( RTL_CONSTASCII_USTRINGPARAM( "GraphicCrop" ) ) ) ) { ::com::sun::star::text::GraphicCrop aGraphCrop; if ( aAny >>= aGraphCrop ) { if ( aGraphCrop.Left ) { sal_uInt32 nLeft = ( aGraphCrop.Left * 65536 ) / aCropSize.Width(); AddOpt( ESCHER_Prop_cropFromLeft, nLeft ); } if ( aGraphCrop.Top ) { sal_uInt32 nTop = ( aGraphCrop.Top * 65536 ) / aCropSize.Height(); AddOpt( ESCHER_Prop_cropFromTop, nTop ); } if ( aGraphCrop.Right ) { sal_uInt32 nRight = ( aGraphCrop.Right * 65536 ) / aCropSize.Width(); AddOpt( ESCHER_Prop_cropFromRight, nRight ); } if ( aGraphCrop.Bottom ) { sal_uInt32 nBottom = ( aGraphCrop.Bottom * 65536 ) / aCropSize.Height(); AddOpt( ESCHER_Prop_cropFromBottom, nBottom ); } } } } } } } sal_Bool EscherPropertyContainer::CreateShapeProperties( const ::com::sun::star::uno::Reference< ::com::sun::star::drawing::XShape > & rXShape ) { uno::Reference< beans::XPropertySet > aXPropSet( rXShape, uno::UNO_QUERY ); if ( aXPropSet.is() ) { sal_Bool bVal = false; ::com::sun::star::uno::Any aAny; sal_uInt32 nShapeAttr = 0; EscherPropertyValueHelper::GetPropertyValue( aAny, aXPropSet, String( RTL_CONSTASCII_USTRINGPARAM( "Visible" ) ), sal_True ); if ( aAny >>= bVal ) { if ( !bVal ) nShapeAttr |= 0x20002; // set fHidden = true } EscherPropertyValueHelper::GetPropertyValue( aAny, aXPropSet, String( RTL_CONSTASCII_USTRINGPARAM( "Printable" ) ), sal_True ); if ( aAny >>= bVal ) { if ( !bVal ) nShapeAttr |= 0x10000; // set fPrint = false; } if ( nShapeAttr ) AddOpt( ESCHER_Prop_fPrint, nShapeAttr ); } return sal_True; } sal_Bool EscherPropertyContainer::CreateOLEGraphicProperties( const ::com::sun::star::uno::Reference< ::com::sun::star::drawing::XShape > & rXShape ) { sal_Bool bRetValue = sal_False; if ( rXShape.is() ) { SdrObject* pSdrOLE2( GetSdrObjectFromXShape( rXShape ) ); // SJ: leaving unoapi, because currently there is if ( pSdrOLE2 && pSdrOLE2->ISA( SdrOle2Obj ) ) // no access to the native graphic object { Graphic* pGraphic = ((SdrOle2Obj*)pSdrOLE2)->GetGraphic(); if ( pGraphic ) { GraphicObject aGraphicObject( *pGraphic ); bRetValue = CreateGraphicProperties( rXShape,aGraphicObject ); // End } } } return bRetValue; } sal_Bool EscherPropertyContainer::CreateGraphicProperties( const ::com::sun::star::uno::Reference< ::com::sun::star::drawing::XShape > & rXShape ,const GraphicObject& rGraphicObj ) { sal_Bool bRetValue = sal_False; ByteString aUniqueId( rGraphicObj.GetUniqueID() ); if ( aUniqueId.Len() ) { AddOpt( ESCHER_Prop_fillType, ESCHER_FillPicture ); uno::Reference< beans::XPropertySet > aXPropSet( rXShape, uno::UNO_QUERY ); if ( pGraphicProvider && pPicOutStrm && pShapeBoundRect && aXPropSet.is() ) { ::com::sun::star::uno::Any aAny; ::com::sun::star::awt::Rectangle* pVisArea = NULL; if ( EscherPropertyValueHelper::GetPropertyValue( aAny, aXPropSet, String( RTL_CONSTASCII_USTRINGPARAM( "VisibleArea" ) ) ) ) { pVisArea = new ::com::sun::star::awt::Rectangle; aAny >>= (*pVisArea); } Rectangle aRect( Point( 0, 0 ), pShapeBoundRect->GetSize() ); sal_uInt32 nBlibId = pGraphicProvider->GetBlibID( *pPicOutStrm, aUniqueId, aRect, pVisArea, NULL ); if ( nBlibId ) { AddOpt( ESCHER_Prop_pib, nBlibId, sal_True ); ImplCreateGraphicAttributes( aXPropSet, nBlibId, sal_False ); bRetValue = sal_True; } delete pVisArea; } } return bRetValue; } sal_Bool EscherPropertyContainer::CreateMediaGraphicProperties( const ::com::sun::star::uno::Reference< ::com::sun::star::drawing::XShape > & rXShape ) { sal_Bool bRetValue = sal_False; if ( rXShape.is() ) { SdrObject* pSdrMedia( GetSdrObjectFromXShape( rXShape ) ); // SJ: leaving unoapi, because currently there is if ( pSdrMedia && pSdrMedia->ISA( SdrMediaObj ) ) // no access to the native graphic object { GraphicObject aGraphicObject( ((SdrMediaObj*)pSdrMedia)->getGraphic() ); bRetValue = CreateGraphicProperties( rXShape, aGraphicObject ); } } return bRetValue; } sal_Bool EscherPropertyContainer::ImplCreateEmbeddedBmp( const ByteString& rUniqueId ) { if( rUniqueId.Len() > 0 ) { EscherGraphicProvider aProvider; SvMemoryStream aMemStrm; Rectangle aRect; if ( aProvider.GetBlibID( aMemStrm, rUniqueId, aRect ) ) { // grab BLIP from stream and insert directly as complex property // ownership of stream memory goes to complex property aMemStrm.ObjectOwnsMemory( sal_False ); sal_uInt8* pBuf = (sal_uInt8*) aMemStrm.GetData(); sal_uInt32 nSize = aMemStrm.Seek( STREAM_SEEK_TO_END ); AddOpt( ESCHER_Prop_fillBlip, sal_True, nSize, pBuf, nSize ); return sal_True; } } return sal_False; } sal_Bool EscherPropertyContainer::CreateEmbeddedBitmapProperties( const ::rtl::OUString& rBitmapUrl, ::com::sun::star::drawing::BitmapMode eBitmapMode ) { sal_Bool bRetValue = sal_False; String aVndUrl( RTL_CONSTASCII_USTRINGPARAM( "vnd.sun.star.GraphicObject:" ) ); String aBmpUrl( rBitmapUrl ); xub_StrLen nIndex = aBmpUrl.Search( aVndUrl, 0 ); if( nIndex != STRING_NOTFOUND ) { // note: += ist not defined for xub_StrLen -> conversion to int and back to xub_StrLen nIndex = nIndex + aVndUrl.Len(); if( aBmpUrl.Len() > nIndex ) { ByteString aUniqueId( aBmpUrl, nIndex, aBmpUrl.Len() - nIndex, RTL_TEXTENCODING_UTF8 ); bRetValue = ImplCreateEmbeddedBmp( aUniqueId ); if( bRetValue ) { // bitmap mode property bool bRepeat = eBitmapMode == ::com::sun::star::drawing::BitmapMode_REPEAT; AddOpt( ESCHER_Prop_fillType, bRepeat ? ESCHER_FillTexture : ESCHER_FillPicture ); } } } return bRetValue; } namespace { GraphicObject lclDrawHatch( const ::com::sun::star::drawing::Hatch& rHatch, const Color& rBackColor, bool bFillBackground, const Rectangle& rRect ) { // #121183# For hatch, do no longer create a bitmap with the fixed size of 28x28 pixels. Also // do not create a bitmap in page size, that would explode file sizes (and have no good quality). // Better use a MetaFile graphic in page size; thus we have good quality due to vector format and // no bit file sizes. VirtualDevice aOut; GDIMetaFile aMtf; aOut.SetOutputSizePixel(Size(2, 2)); aOut.EnableOutput(false); aOut.SetMapMode(MapMode(MAP_100TH_MM)); aMtf.Clear(); aMtf.Record(&aOut); aOut.SetLineColor(); aOut.SetFillColor(bFillBackground ? rBackColor : Color(COL_TRANSPARENT)); aOut.DrawRect(rRect); aOut.DrawHatch(PolyPolygon(rRect), Hatch((HatchStyle)rHatch.Style, Color(rHatch.Color), rHatch.Distance, (sal_uInt16)rHatch.Angle)); aMtf.Stop(); aMtf.WindStart(); aMtf.SetPrefMapMode(MapMode(MAP_100TH_MM)); aMtf.SetPrefSize(rRect.GetSize()); return GraphicObject(Graphic(aMtf)); } } // namespace sal_Bool EscherPropertyContainer::CreateEmbeddedHatchProperties( const ::com::sun::star::drawing::Hatch& rHatch, const Color& rBackColor, bool bFillBackground ) { const Rectangle aRect(pShapeBoundRect ? *pShapeBoundRect : Rectangle(Point(0,0), Size(28000, 21000))); GraphicObject aGraphicObject = lclDrawHatch( rHatch, rBackColor, bFillBackground, aRect ); ByteString aUniqueId = aGraphicObject.GetUniqueID(); sal_Bool bRetValue = ImplCreateEmbeddedBmp( aUniqueId ); if ( bRetValue ) AddOpt( ESCHER_Prop_fillType, ESCHER_FillTexture ); return bRetValue; } sal_Bool EscherPropertyContainer::CreateGraphicProperties( const ::com::sun::star::uno::Reference< ::com::sun::star::beans::XPropertySet > & rXPropSet, const String& rSource, const sal_Bool bCreateFillBitmap, const sal_Bool bCreateCroppingAttributes, const sal_Bool bFillBitmapModeAllowed ) { sal_Bool bRetValue = sal_False; sal_Bool bCreateFillStyles = sal_False; sal_Bool bMirrored = sal_False; sal_Bool bRotate = sal_True; sal_uInt16 nAngle = 0; GraphicAttr* pGraphicAttr = NULL; GraphicObject aGraphicObject; String aGraphicUrl; ByteString aUniqueId; bool bIsGraphicMtf(false); // #121074# sal_Int16 nTransparency(0); sal_Int16 nRed(0); sal_Int16 nGreen(0); sal_Int16 nBlue(0); double fGamma(1.0); ::com::sun::star::drawing::BitmapMode eBitmapMode( ::com::sun::star::drawing::BitmapMode_NO_REPEAT ); ::com::sun::star::uno::Any aAny; if ( EscherPropertyValueHelper::GetPropertyValue( aAny, rXPropSet, rSource ) ) { if ( rSource == String( RTL_CONSTASCII_USTRINGPARAM( "MetaFile" ) ) ) { ::com::sun::star::uno::Sequence<sal_uInt8> aSeq = *(::com::sun::star::uno::Sequence<sal_uInt8>*)aAny.getValue(); const sal_uInt8* pAry = aSeq.getArray(); sal_uInt32 nAryLen = aSeq.getLength(); // the metafile is already rotated bRotate = sal_False; if ( pAry && nAryLen ) { Graphic aGraphic; SvMemoryStream aTemp( (void*)pAry, nAryLen, STREAM_READ ); sal_uInt32 nErrCode = GraphicConverter::Import( aTemp, aGraphic, CVT_WMF ); if ( nErrCode == ERRCODE_NONE ) { aGraphicObject = aGraphic; aUniqueId = aGraphicObject.GetUniqueID(); bIsGraphicMtf = aGraphicObject.GetType() == GRAPHIC_GDIMETAFILE; } } } else if ( rSource == String( RTL_CONSTASCII_USTRINGPARAM( "Bitmap" ) ) ) { ::com::sun::star::uno::Reference< ::com::sun::star::awt::XBitmap >xBitmap; if ( ::cppu::extractInterface( xBitmap, aAny ) ) { ::com::sun::star::uno::Reference< ::com::sun::star::awt::XBitmap > xBmp; if ( aAny >>= xBmp ) { BitmapEx aBitmapEx( VCLUnoHelper::GetBitmap( xBmp ) ); Graphic aGraphic( aBitmapEx ); aGraphicObject = aGraphic; aUniqueId = aGraphicObject.GetUniqueID(); bIsGraphicMtf = aGraphicObject.GetType() == GRAPHIC_GDIMETAFILE; } } } else if ( rSource == String( RTL_CONSTASCII_USTRINGPARAM( "FillBitmapURL" ) ) ) { aGraphicUrl = *(::rtl::OUString*)aAny.getValue(); } else if ( rSource == String( RTL_CONSTASCII_USTRINGPARAM( "GraphicURL" ) ) ) { aGraphicUrl = *(::rtl::OUString*)aAny.getValue(); bCreateFillStyles = sal_True; } else if ( rSource == String( RTL_CONSTASCII_USTRINGPARAM( "FillHatch" ) ) ) { ::com::sun::star::drawing::Hatch aHatch; if ( aAny >>= aHatch ) { Color aBackColor; if ( EscherPropertyValueHelper::GetPropertyValue( aAny, rXPropSet, String( RTL_CONSTASCII_USTRINGPARAM( "FillColor" ) ), sal_False ) ) { aBackColor = ImplGetColor( *((sal_uInt32*)aAny.getValue()), sal_False ); } bool bFillBackground = false; if ( EscherPropertyValueHelper::GetPropertyValue( aAny, rXPropSet, String( RTL_CONSTASCII_USTRINGPARAM( "FillBackground" ) ), sal_True ) ) { aAny >>= bFillBackground; } const Rectangle aRect(Point(0, 0), pShapeBoundRect ? pShapeBoundRect->GetSize() : Size(28000, 21000)); aGraphicObject = lclDrawHatch( aHatch, aBackColor, bFillBackground, aRect ); aUniqueId = aGraphicObject.GetUniqueID(); eBitmapMode = ::com::sun::star::drawing::BitmapMode_REPEAT; bIsGraphicMtf = aGraphicObject.GetType() == GRAPHIC_GDIMETAFILE; } } if ( EscherPropertyValueHelper::GetPropertyValue( aAny, rXPropSet, String( RTL_CONSTASCII_USTRINGPARAM( "IsMirrored" ) ), sal_True ) ) aAny >>= bMirrored; // #121074# transparency of graphic is not supported in MS formats, get and apply it // in the GetTransformedGraphic call in GetBlibID if(EscherPropertyValueHelper::GetPropertyValue(aAny, rXPropSet, String(RTL_CONSTASCII_USTRINGPARAM("Transparency")))) { aAny >>= nTransparency; } if ( EscherPropertyValueHelper::GetPropertyValue( aAny, rXPropSet, String( RTL_CONSTASCII_USTRINGPARAM( "AdjustRed" ) ) ) ) { aAny >>= nRed; } if ( EscherPropertyValueHelper::GetPropertyValue( aAny, rXPropSet, String( RTL_CONSTASCII_USTRINGPARAM( "AdjustGreen" ) ) ) ) { aAny >>= nGreen; } if ( EscherPropertyValueHelper::GetPropertyValue( aAny, rXPropSet, String( RTL_CONSTASCII_USTRINGPARAM( "AdjustBlue" ) ) ) ) { aAny >>= nBlue; } if ( EscherPropertyValueHelper::GetPropertyValue( aAny, rXPropSet, String( RTL_CONSTASCII_USTRINGPARAM( "Gamma" ) ) ) ) { aAny >>= fGamma; } if ( bCreateFillBitmap && bFillBitmapModeAllowed ) { if ( EscherPropertyValueHelper::GetPropertyValue( aAny, rXPropSet, String( RTL_CONSTASCII_USTRINGPARAM( "FillBitmapMode" ) ), sal_True ) ) aAny >>= eBitmapMode; } else { nAngle = bRotate && EscherPropertyValueHelper::GetPropertyValue( aAny, rXPropSet, String( RTL_CONSTASCII_USTRINGPARAM( "RotateAngle" ) ), sal_True ) ? (sal_uInt16)( ( *((sal_Int32*)aAny.getValue() ) ) + 5 ) / 10 : 0; } if ( aGraphicUrl.Len() ) { String aVndUrl( RTL_CONSTASCII_USTRINGPARAM( "vnd.sun.star.GraphicObject:" ) ); xub_StrLen nIndex = aGraphicUrl.Search( aVndUrl, 0 ); if ( nIndex != STRING_NOTFOUND ) { nIndex = nIndex + aVndUrl.Len(); if ( aGraphicUrl.Len() > nIndex ) aUniqueId = ByteString( aGraphicUrl, nIndex, aGraphicUrl.Len() - nIndex, RTL_TEXTENCODING_UTF8 ); } else { // externally, linked graphic? convert to embedded // one, if transformations are needed. this is because // everything < msoxp cannot even handle rotated // bitmaps. // And check whether the graphic link target is // actually supported by mso. INetURLObject aTmp( aGraphicUrl ); GraphicDescriptor aDescriptor(aTmp); aDescriptor.Detect(); const sal_uInt16 nFormat = aDescriptor.GetFileFormat(); // can MSO handle it? if ( bMirrored || nAngle || nTransparency || nRed || nGreen || nBlue || (1.0 != fGamma) || (nFormat != GFF_BMP && nFormat != GFF_GIF && nFormat != GFF_JPG && nFormat != GFF_PNG && nFormat != GFF_TIF && nFormat != GFF_PCT && nFormat != GFF_WMF && nFormat != GFF_EMF) ) { SvStream* pIn = ::utl::UcbStreamHelper::CreateStream( aTmp.GetMainURL( INetURLObject::NO_DECODE ), STREAM_READ ); if ( pIn ) { Graphic aGraphic; sal_uInt32 nErrCode = GraphicConverter::Import( *pIn, aGraphic ); if ( nErrCode == ERRCODE_NONE ) { // no. aGraphicObject = aGraphic; aUniqueId = aGraphicObject.GetUniqueID(); } // else: simply keep the graphic link delete pIn; } } if ( !aUniqueId.Len() ) { if ( pGraphicProvider ) { const rtl::OUString& rBaseURI( pGraphicProvider->GetBaseURI() ); INetURLObject aBaseURI( rBaseURI ); if( aBaseURI.GetProtocol() == aTmp.GetProtocol() ) { rtl::OUString aRelUrl( INetURLObject::GetRelURL( rBaseURI, aGraphicUrl, INetURLObject::WAS_ENCODED, INetURLObject::DECODE_TO_IURI, RTL_TEXTENCODING_UTF8, INetURLObject::FSYS_DETECT ) ); if ( aRelUrl.getLength() ) aGraphicUrl = aRelUrl; } } } } } if ( aGraphicUrl.Len() || aUniqueId.Len() ) { if(bMirrored || nTransparency || nRed || nGreen || nBlue || (1.0 != fGamma)) { pGraphicAttr = new GraphicAttr; if(bMirrored) { pGraphicAttr->SetMirrorFlags(BMP_MIRROR_HORZ); } // #121074# if(nTransparency) { pGraphicAttr->SetTransparency((nTransparency * 255) / 100); } if(nRed) { pGraphicAttr->SetChannelR(nRed); } if(nGreen) { pGraphicAttr->SetChannelG(nGreen); } if(nBlue) { pGraphicAttr->SetChannelB(nBlue); } if(1.0 != fGamma) { pGraphicAttr->SetGamma(fGamma); } } if(nAngle && bIsGraphicMtf) { AddOpt( ESCHER_Prop_Rotation, ( ( ((sal_Int32)nAngle << 16 ) / 10 ) + 0x8000 ) &~ 0xffff ); } if ( eBitmapMode == ::com::sun::star::drawing::BitmapMode_REPEAT ) { sal_Int32 nSizeX = 0,nSizeY = 0,nOffsetX = 0,nOffsetY = 0,nPosOffsetX = 0,nPosOffsetY = 0; if ( EscherPropertyValueHelper::GetPropertyValue( aAny, rXPropSet, String( RTL_CONSTASCII_USTRINGPARAM( "FillBitmapSizeX" ) ), sal_True ) ) { aAny >>= nSizeX; } if ( EscherPropertyValueHelper::GetPropertyValue( aAny, rXPropSet, String( RTL_CONSTASCII_USTRINGPARAM( "FillBitmapSizeY" ) ), sal_True ) ) { aAny >>= nSizeY; } if ( EscherPropertyValueHelper::GetPropertyValue( aAny, rXPropSet, String( RTL_CONSTASCII_USTRINGPARAM( "FillBitmapOffsetX" ) ), sal_True ) ) { aAny >>= nOffsetX; } if ( EscherPropertyValueHelper::GetPropertyValue( aAny, rXPropSet, String( RTL_CONSTASCII_USTRINGPARAM( "FillBitmapOffsetY" ) ), sal_True ) ) { aAny >>= nOffsetY; } if ( EscherPropertyValueHelper::GetPropertyValue( aAny, rXPropSet, String( RTL_CONSTASCII_USTRINGPARAM( "FillBitmapPositionOffsetX" ) ), sal_True ) ) { aAny >>= nPosOffsetX; } if ( EscherPropertyValueHelper::GetPropertyValue( aAny, rXPropSet, String( RTL_CONSTASCII_USTRINGPARAM( "FillBitmapPositionOffsetY" ) ), sal_True ) ) { aAny >>= nPosOffsetY; } if(nSizeX == -100 && nSizeY == -100 && nOffsetX == 0 && nOffsetY == 0 && nPosOffsetX == 0 && nPosOffsetY == 0) AddOpt( ESCHER_Prop_fillType, ESCHER_FillPicture ); else AddOpt( ESCHER_Prop_fillType, ESCHER_FillTexture ); } else AddOpt( ESCHER_Prop_fillType, ESCHER_FillPicture ); if ( aUniqueId.Len() ) { // write out embedded graphic if ( pGraphicProvider && pPicOutStrm && pShapeBoundRect ) { Rectangle aRect( Point( 0, 0 ), pShapeBoundRect->GetSize() ); const sal_uInt32 nBlibId(pGraphicProvider->GetBlibID(*pPicOutStrm, aUniqueId, aRect, NULL, pGraphicAttr)); if(nBlibId) { if(bCreateFillBitmap) { AddOpt(ESCHER_Prop_fillBlip, nBlibId, sal_True); } else { AddOpt( ESCHER_Prop_pib, nBlibId, sal_True ); ImplCreateGraphicAttributes( rXPropSet, nBlibId, bCreateCroppingAttributes ); } bRetValue = sal_True; } } else { EscherGraphicProvider aProvider; SvMemoryStream aMemStrm; Rectangle aRect; if ( aProvider.GetBlibID( aMemStrm, aUniqueId, aRect, NULL, pGraphicAttr ) ) { // grab BLIP from stream and insert directly as complex property // ownership of stream memory goes to complex property aMemStrm.ObjectOwnsMemory( sal_False ); sal_uInt8* pBuf = (sal_uInt8*) aMemStrm.GetData(); sal_uInt32 nSize = aMemStrm.Seek( STREAM_SEEK_TO_END ); AddOpt( ESCHER_Prop_fillBlip, sal_True, nSize, pBuf, nSize ); bRetValue = sal_True; } } } // write out link to graphic else { OSL_ASSERT(aGraphicUrl.Len()); AddOpt( ESCHER_Prop_pibName, aGraphicUrl ); sal_uInt32 nPibFlags=0; GetOpt( ESCHER_Prop_pibFlags, nPibFlags ); AddOpt( ESCHER_Prop_pibFlags, ESCHER_BlipFlagLinkToFile|ESCHER_BlipFlagFile|ESCHER_BlipFlagDoNotSave | nPibFlags ); } } } delete pGraphicAttr; if ( bCreateFillStyles ) CreateFillProperties( rXPropSet, sal_True ); return bRetValue; } PolyPolygon EscherPropertyContainer::GetPolyPolygon( const ::com::sun::star::uno::Reference< ::com::sun::star::drawing::XShape > & rXShape ) { PolyPolygon aRetPolyPoly; ::com::sun::star::uno::Reference< ::com::sun::star::beans::XPropertySet > aXPropSet; ::com::sun::star::uno::Any aAny( rXShape->queryInterface( ::getCppuType( (const ::com::sun::star::uno::Reference< ::com::sun::star::beans::XPropertySet >*) 0 ) )); String sPolyPolygonBezier( RTL_CONSTASCII_USTRINGPARAM( "PolyPolygonBezier" ) ); String sPolyPolygon ( RTL_CONSTASCII_USTRINGPARAM( "PolyPolygon" ) ); String sPolygon ( RTL_CONSTASCII_USTRINGPARAM( "Polygon" ) ); if ( aAny >>= aXPropSet ) { sal_Bool bHasProperty = EscherPropertyValueHelper::GetPropertyValue( aAny, aXPropSet, sPolyPolygonBezier, sal_True ); if ( !bHasProperty ) bHasProperty = EscherPropertyValueHelper::GetPropertyValue( aAny, aXPropSet, sPolyPolygon, sal_True ); if ( !bHasProperty ) bHasProperty = EscherPropertyValueHelper::GetPropertyValue( aAny, aXPropSet, sPolygon, sal_True ); if ( bHasProperty ) aRetPolyPoly = GetPolyPolygon( aAny ); } return aRetPolyPoly; } PolyPolygon EscherPropertyContainer::GetPolyPolygon( const ::com::sun::star::uno::Any& rAny ) { sal_Bool bNoError = sal_True; Polygon aPolygon; PolyPolygon aPolyPolygon; if ( rAny.getValueType() == ::getCppuType( ( const ::com::sun::star::drawing::PolyPolygonBezierCoords* ) 0 ) ) { ::com::sun::star::drawing::PolyPolygonBezierCoords* pSourcePolyPolygon = (::com::sun::star::drawing::PolyPolygonBezierCoords*)rAny.getValue(); sal_uInt16 nOuterSequenceCount = (sal_uInt16)pSourcePolyPolygon->Coordinates.getLength(); // Zeiger auf innere sequences holen ::com::sun::star::drawing::PointSequence* pOuterSequence = pSourcePolyPolygon->Coordinates.getArray(); ::com::sun::star::drawing::FlagSequence* pOuterFlags = pSourcePolyPolygon->Flags.getArray(); bNoError = pOuterSequence && pOuterFlags; if ( bNoError ) { sal_uInt16 a, b, nInnerSequenceCount; ::com::sun::star::awt::Point* pArray; // dies wird ein Polygon set for ( a = 0; a < nOuterSequenceCount; a++ ) { ::com::sun::star::drawing::PointSequence* pInnerSequence = pOuterSequence++; ::com::sun::star::drawing::FlagSequence* pInnerFlags = pOuterFlags++; bNoError = pInnerSequence && pInnerFlags; if ( bNoError ) { // Zeiger auf Arrays holen pArray = pInnerSequence->getArray(); ::com::sun::star::drawing::PolygonFlags* pFlags = pInnerFlags->getArray(); if ( pArray && pFlags ) { nInnerSequenceCount = (sal_uInt16)pInnerSequence->getLength(); aPolygon = Polygon( nInnerSequenceCount ); for( b = 0; b < nInnerSequenceCount; b++) { PolyFlags ePolyFlags( *( (PolyFlags*)pFlags++ ) ); ::com::sun::star::awt::Point aPoint( (::com::sun::star::awt::Point)*(pArray++) ); aPolygon[ b ] = Point( aPoint.X, aPoint.Y ); aPolygon.SetFlags( b, ePolyFlags ); if ( ePolyFlags == POLY_CONTROL ) continue; } aPolyPolygon.Insert( aPolygon, POLYPOLY_APPEND ); } } } } } else if ( rAny.getValueType() == ::getCppuType( ( const ::com::sun::star::drawing::PointSequenceSequence* ) 0 ) ) { ::com::sun::star::drawing::PointSequenceSequence* pSourcePolyPolygon = (::com::sun::star::drawing::PointSequenceSequence*)rAny.getValue(); sal_uInt16 nOuterSequenceCount = (sal_uInt16)pSourcePolyPolygon->getLength(); // Zeiger auf innere sequences holen ::com::sun::star::drawing::PointSequence* pOuterSequence = pSourcePolyPolygon->getArray(); bNoError = pOuterSequence != NULL; if ( bNoError ) { sal_uInt16 a, b, nInnerSequenceCount; // dies wird ein Polygon set for( a = 0; a < nOuterSequenceCount; a++ ) { ::com::sun::star::drawing::PointSequence* pInnerSequence = pOuterSequence++; bNoError = pInnerSequence != NULL; if ( bNoError ) { // Zeiger auf Arrays holen ::com::sun::star::awt::Point* pArray = pInnerSequence->getArray(); if ( pArray != NULL ) { nInnerSequenceCount = (sal_uInt16)pInnerSequence->getLength(); aPolygon = Polygon( nInnerSequenceCount ); for( b = 0; b < nInnerSequenceCount; b++) { aPolygon[ b ] = Point( pArray->X, pArray->Y ); pArray++; } aPolyPolygon.Insert( aPolygon, POLYPOLY_APPEND ); } } } } } else if ( rAny.getValueType() == ::getCppuType( ( const ::com::sun::star::drawing::PointSequence* ) 0 ) ) { ::com::sun::star::drawing::PointSequence* pInnerSequence = (::com::sun::star::drawing::PointSequence*)rAny.getValue(); bNoError = pInnerSequence != NULL; if ( bNoError ) { sal_uInt16 a, nInnerSequenceCount; // Zeiger auf Arrays holen ::com::sun::star::awt::Point* pArray = pInnerSequence->getArray(); if ( pArray != NULL ) { nInnerSequenceCount = (sal_uInt16)pInnerSequence->getLength(); aPolygon = Polygon( nInnerSequenceCount ); for( a = 0; a < nInnerSequenceCount; a++) { aPolygon[ a ] = Point( pArray->X, pArray->Y ); pArray++; } aPolyPolygon.Insert( aPolygon, POLYPOLY_APPEND ); } } } return aPolyPolygon; } sal_Bool EscherPropertyContainer::CreatePolygonProperties( const ::com::sun::star::uno::Reference< ::com::sun::star::beans::XPropertySet > & rXPropSet, sal_uInt32 nFlags, sal_Bool bBezier, ::com::sun::star::awt::Rectangle& rGeoRect, Polygon* pPolygon ) { static String sPolyPolygonBezier( RTL_CONSTASCII_USTRINGPARAM( "PolyPolygonBezier" ) ); static String sPolyPolygon ( RTL_CONSTASCII_USTRINGPARAM( "PolyPolygon" ) ); sal_Bool bRetValue = sal_True; sal_Bool bLine = ( nFlags & ESCHER_CREATEPOLYGON_LINE ) != 0; PolyPolygon aPolyPolygon; if ( pPolygon ) aPolyPolygon.Insert( *pPolygon, POLYPOLY_APPEND ); else { ::com::sun::star::uno::Any aAny; bRetValue = EscherPropertyValueHelper::GetPropertyValue( aAny, rXPropSet, ( bBezier ) ? sPolyPolygonBezier : sPolyPolygon, sal_True ); if ( bRetValue ) { aPolyPolygon = GetPolyPolygon( aAny ); bRetValue = aPolyPolygon.Count() != 0; } } if ( bRetValue ) { if ( bLine ) { if ( ( aPolyPolygon.Count() == 1 ) && ( aPolyPolygon[ 0 ].GetSize() == 2 ) ) { const Polygon& rPoly = aPolyPolygon[ 0 ]; rGeoRect = ::com::sun::star::awt::Rectangle( rPoly[ 0 ].X(), rPoly[ 0 ].Y(), rPoly[ 1 ].X() - rPoly[ 0 ].X(), rPoly[ 1 ].Y() - rPoly[ 0 ].Y() ); } else bRetValue = sal_False; } else { Polygon aPolygon; sal_uInt16 i, j, k, nPoints, nBezPoints, nPolyCount = aPolyPolygon.Count(); Rectangle aRect( aPolyPolygon.GetBoundRect() ); rGeoRect = ::com::sun::star::awt::Rectangle( aRect.Left(), aRect.Top(), aRect.GetWidth(), aRect.GetHeight() ); for ( nBezPoints = nPoints = i = 0; i < nPolyCount; i++ ) { k = aPolyPolygon[ i ].GetSize(); nPoints = nPoints + k; for ( j = 0; j < k; j++ ) { if ( aPolyPolygon[ i ].GetFlags( j ) != POLY_CONTROL ) nBezPoints++; } } sal_uInt32 nVerticesBufSize = ( nPoints << 2 ) + 6; sal_uInt8* pVerticesBuf = new sal_uInt8[ nVerticesBufSize ]; sal_uInt32 nSegmentBufSize = ( ( nBezPoints << 2 ) + 8 ); if ( nPolyCount > 1 ) nSegmentBufSize += ( nPolyCount << 1 ); sal_uInt8* pSegmentBuf = new sal_uInt8[ nSegmentBufSize ]; sal_uInt8* pPtr = pVerticesBuf; *pPtr++ = (sal_uInt8)( nPoints ); // Little endian *pPtr++ = (sal_uInt8)( nPoints >> 8 ); *pPtr++ = (sal_uInt8)( nPoints ); *pPtr++ = (sal_uInt8)( nPoints >> 8 ); *pPtr++ = (sal_uInt8)0xf0; *pPtr++ = (sal_uInt8)0xff; for ( j = 0; j < nPolyCount; j++ ) { aPolygon = aPolyPolygon[ j ]; nPoints = aPolygon.GetSize(); for ( i = 0; i < nPoints; i++ ) // Punkte aus Polygon in Buffer schreiben { Point aPoint = aPolygon[ i ]; aPoint.X() -= rGeoRect.X; aPoint.Y() -= rGeoRect.Y; *pPtr++ = (sal_uInt8)( aPoint.X() ); *pPtr++ = (sal_uInt8)( aPoint.X() >> 8 ); *pPtr++ = (sal_uInt8)( aPoint.Y() ); *pPtr++ = (sal_uInt8)( aPoint.Y() >> 8 ); } } pPtr = pSegmentBuf; *pPtr++ = (sal_uInt8)( ( nSegmentBufSize - 6 ) >> 1 ); *pPtr++ = (sal_uInt8)( ( nSegmentBufSize - 6 ) >> 9 ); *pPtr++ = (sal_uInt8)( ( nSegmentBufSize - 6 ) >> 1 ); *pPtr++ = (sal_uInt8)( ( nSegmentBufSize - 6 ) >> 9 ); *pPtr++ = (sal_uInt8)2; *pPtr++ = (sal_uInt8)0; for ( j = 0; j < nPolyCount; j++ ) { *pPtr++ = 0x0; // Polygon start *pPtr++ = 0x40; aPolygon = aPolyPolygon[ j ]; nPoints = aPolygon.GetSize(); for ( i = 0; i < nPoints; i++ ) // Polyflags in Buffer schreiben { *pPtr++ = 0; if ( bBezier ) *pPtr++ = 0xb3; else *pPtr++ = 0xac; if ( ( i + 1 ) != nPoints ) { *pPtr++ = 1; if ( aPolygon.GetFlags( i + 1 ) == POLY_CONTROL ) { *pPtr++ = 0x20; i += 2; } else *pPtr++ = 0; } } if ( nPolyCount > 1 ) { *pPtr++ = 1; // end of polygon *pPtr++ = 0x60; } } *pPtr++ = 0; *pPtr++ = 0x80; AddOpt( ESCHER_Prop_geoRight, rGeoRect.Width ); AddOpt( ESCHER_Prop_geoBottom, rGeoRect.Height ); AddOpt( ESCHER_Prop_shapePath, ESCHER_ShapeComplex ); AddOpt( ESCHER_Prop_pVertices, sal_True, nVerticesBufSize - 6, (sal_uInt8*)pVerticesBuf, nVerticesBufSize ); AddOpt( ESCHER_Prop_pSegmentInfo, sal_True, nSegmentBufSize, (sal_uInt8*)pSegmentBuf, nSegmentBufSize ); } } return bRetValue; } /* in MS,the connector including 9 types : "straightConnector1", "bentConnector2","bentConnector3","bentConnector4","bentConnector5" "curvedConnector2","curvedConnector3","curvedConnector4","curvedConnector5" in AOO,including 4 types:"standard","lines","line","curve" when save as MS file, the connector must be convert to corresponding type. "line" and "lines" <-> "straightConnector1" "standard" <-> "bentConnector2-5" "curve" <-> "curvedConnector2-5" */ sal_Int32 lcl_GetAdjustValueCount( const XPolygon& rPoly ) { int nRet = 0; switch ( rPoly.GetSize() ) { case 2 : case 3: nRet = 0; break; case 4: nRet = 1; break; case 5: nRet = 2; break; default: if ( rPoly.GetSize()>=6 ) nRet = 3; break; } return nRet; } /* Adjust value decide the position which connector should turn a corner */ sal_Int32 lcl_GetConnectorAdjustValue ( const XPolygon& rPoly, sal_uInt16 nIndex ) { sal_uInt16 k = rPoly.GetSize(); OSL_ASSERT ( k >= ( 3 + nIndex ) ); Point aPt; Point aStart = rPoly[0]; Point aEnd = rPoly[k-1]; if ( aEnd.Y() == aStart.Y() ) aEnd.Y() = aStart.Y() +4; if ( aEnd.X() == aStart.X() ) aEnd.X() = aStart.X() +4; sal_Bool bVertical = ( rPoly[1].X()-aStart.X() ) == 0 ; //vertical and horizon alternate if ( nIndex%2 == 1 ) bVertical = !bVertical; aPt = rPoly[ nIndex + 1]; sal_Int32 nAdjustValue; if ( bVertical ) nAdjustValue = ( aPt.Y()-aStart.Y())* 21600 /(aEnd.Y()-aStart.Y()); else nAdjustValue = ( aPt.X()-aStart.X() )* 21600 /(aEnd.X()-aStart.X()); return nAdjustValue; } void lcl_Rotate(sal_Int32 nAngle, Point center, Point& pt) { while ( nAngle<0) nAngle +=36000; while (nAngle>=36000) nAngle -=36000; int cs, sn; switch (nAngle) { case 0: cs =1; sn =0; break; case 9000: cs =0; sn =1; break; case 18000: cs = -1; sn = 0; break; case 27000: cs = 0; sn = -1; break; default: return; break; } sal_Int32 x0 =pt.X()-center.X(); sal_Int32 y0 =pt.Y()-center.Y(); pt.X()=center.X()+ x0*cs-y0*sn; pt.Y()=center.Y()+ y0*cs+x0*sn; } /* FlipV defines that the shape will be flipped vertically about the center of its bounding box. Generally, draw the connector from top to bottom, from left to right when meet the adjust value, but when (X1>X2 or Y1>Y2),the draw director must be reverse, FlipV or FlipH should be set to true. */ sal_Bool lcl_GetAngle(Polygon &rPoly,sal_uInt16& rShapeFlags,sal_Int32& nAngle ) { Point aStart = rPoly[0]; Point aEnd = rPoly[rPoly.GetSize()-1]; nAngle = ( rPoly[1].X() == aStart.X() ) ? 9000: 0 ; Point p1(aStart.X(),aStart.Y()); Point p2(aEnd.X(),aEnd.Y()); if ( nAngle ) { Point center((aEnd.X()+aStart.X())>>1,(aEnd.Y()+aStart.Y())>>1); lcl_Rotate(-nAngle, center,p1); lcl_Rotate(-nAngle, center,p2); } if ( p1.X() > p2.X() ) { if ( nAngle ) rShapeFlags |= SHAPEFLAG_FLIPV; else rShapeFlags |= SHAPEFLAG_FLIPH; } if ( p1.Y() > p2.Y() ) { if ( nAngle ) rShapeFlags |= SHAPEFLAG_FLIPH; else rShapeFlags |= SHAPEFLAG_FLIPV; } if ( (rShapeFlags&SHAPEFLAG_FLIPH) && (rShapeFlags&SHAPEFLAG_FLIPV) ) { rShapeFlags &= ~( SHAPEFLAG_FLIPH | SHAPEFLAG_FLIPV ); nAngle +=18000; } if ( nAngle ) { // Set angle properties nAngle *= 655; nAngle += 0x8000; nAngle &=~0xffff; // nAngle auf volle Gradzahl runden return sal_True; } return sal_False; } sal_Bool EscherPropertyContainer::CreateConnectorProperties( const ::com::sun::star::uno::Reference< ::com::sun::star::drawing::XShape > & rXShape, EscherSolverContainer& rSolverContainer, ::com::sun::star::awt::Rectangle& rGeoRect, sal_uInt16& rShapeType, sal_uInt16& rShapeFlags ) { static String sEdgeKind ( RTL_CONSTASCII_USTRINGPARAM( "EdgeKind" ) ); static String sEdgeStartPoint ( RTL_CONSTASCII_USTRINGPARAM( "EdgeStartPoint" ) ); static String sEdgeEndPoint ( RTL_CONSTASCII_USTRINGPARAM( "EdgeEndPoint" ) ); static String sEdgeStartConnection ( RTL_CONSTASCII_USTRINGPARAM( "EdgeStartConnection" ) ); static String sEdgeEndConnection ( RTL_CONSTASCII_USTRINGPARAM( "EdgeEndConnection" ) ); static String sEdgePath ( RTL_CONSTASCII_USTRINGPARAM( "PolyPolygonBezier") ); sal_Bool bRetValue = sal_False; rShapeType = rShapeFlags = 0; if ( rXShape.is() ) { ::com::sun::star::awt::Point aStartPoint, aEndPoint; ::com::sun::star::uno::Reference< ::com::sun::star::beans::XPropertySet > aXPropSet; ::com::sun::star::uno::Reference< ::com::sun::star::drawing::XShape > aShapeA, aShapeB; ::com::sun::star::uno::Any aAny( rXShape->queryInterface( ::getCppuType( (const ::com::sun::star::uno::Reference< ::com::sun::star::beans::XPropertySet >*) 0 ) )); if ( aAny >>= aXPropSet ) { if ( EscherPropertyValueHelper::GetPropertyValue( aAny, aXPropSet, sEdgeKind, sal_True ) ) { ::com::sun::star::drawing::ConnectorType eCt; aAny >>= eCt; if ( EscherPropertyValueHelper::GetPropertyValue( aAny, aXPropSet, sEdgeStartPoint ) ) { aStartPoint = *(::com::sun::star::awt::Point*)aAny.getValue(); if ( EscherPropertyValueHelper::GetPropertyValue( aAny, aXPropSet, sEdgeEndPoint ) ) { aEndPoint = *(::com::sun::star::awt::Point*)aAny.getValue(); rShapeFlags = SHAPEFLAG_HAVEANCHOR | SHAPEFLAG_HAVESPT | SHAPEFLAG_CONNECTOR; rGeoRect = ::com::sun::star::awt::Rectangle( aStartPoint.X, aStartPoint.Y, ( aEndPoint.X - aStartPoint.X ) + 1, ( aEndPoint.Y - aStartPoint.Y ) + 1 ); //set standard's FLIP in below code if ( eCt != ::com::sun::star::drawing::ConnectorType_STANDARD) { if ( rGeoRect.Height < 0 ) // justify { rShapeFlags |= SHAPEFLAG_FLIPV; rGeoRect.Y = aEndPoint.Y; rGeoRect.Height = -rGeoRect.Height; } if ( rGeoRect.Width < 0 ) { rShapeFlags |= SHAPEFLAG_FLIPH; rGeoRect.X = aEndPoint.X; rGeoRect.Width = -rGeoRect.Width; } } sal_uInt32 nAdjustValue1, nAdjustValue2, nAdjustValue3; nAdjustValue1 = nAdjustValue2 = nAdjustValue3 = 0x2a30; if ( EscherPropertyValueHelper::GetPropertyValue( aAny, aXPropSet, sEdgeStartConnection ) ) aAny >>= aShapeA; if ( EscherPropertyValueHelper::GetPropertyValue( aAny, aXPropSet, sEdgeEndConnection ) ) aAny >>= aShapeB; /* if ( ImplGetPropertyValue( String( RTL_CONSTASCII_USTRINGPARAM( "EdgeLine1Delta" ) ) ) ) { } if ( ImplGetPropertyValue( String( RTL_CONSTASCII_USTRINGPARAM( "EdgeLine2Delta" ) ) ) ) { } if ( ImplGetPropertyValue( String( RTL_CONSTASCII_USTRINGPARAM( "EdgeLine3Delta" ) ) ) ) { } if ( ImplGetPropertyValue( String( RTL_CONSTASCII_USTRINGPARAM( "EdgeNode1HorzDist" ) ) ) ) { } if ( ImplGetPropertyValue( String( RTL_CONSTASCII_USTRINGPARAM( "EdgeNode1VertDist" ) ) ) ) { } if ( ImplGetPropertyValue( String( RTL_CONSTASCII_USTRINGPARAM( "EdgeNode2HorzDist" ) ) ) ) { } if ( ImplGetPropertyValue( String( RTL_CONSTASCII_USTRINGPARAM( "EdgeNode2VertDist" ) ) ) ) { } */ rSolverContainer.AddConnector( rXShape, aStartPoint, aShapeA, aEndPoint, aShapeB ); switch ( eCt ) { case ::com::sun::star::drawing::ConnectorType_CURVE : { rShapeType = ESCHER_ShpInst_CurvedConnector3; AddOpt( ESCHER_Prop_cxstyle, ESCHER_cxstyleCurved ); AddOpt( ESCHER_Prop_adjustValue, nAdjustValue1 ); AddOpt( ESCHER_Prop_adjust2Value, -(sal_Int32)nAdjustValue2 ); } break; case ::com::sun::star::drawing::ConnectorType_STANDARD :// Connector 2->5 { if ( EscherPropertyValueHelper::GetPropertyValue( aAny, aXPropSet, sEdgePath ) ) { PolyPolygon aPolyPolygon = GetPolyPolygon( aAny ); Polygon aPoly; if ( aPolyPolygon.Count() > 0 ) { AddOpt( ESCHER_Prop_cxstyle, ESCHER_cxstyleBent ); aPoly = aPolyPolygon[ 0 ]; sal_Int32 nAdjCount = lcl_GetAdjustValueCount( aPoly ); rShapeType = ( sal_uInt16 )( ESCHER_ShpInst_BentConnector2 + nAdjCount); for ( sal_Int32 i = 0 ; i < nAdjCount; ++ i) AddOpt( (sal_uInt16) ( ESCHER_Prop_adjustValue+i) , lcl_GetConnectorAdjustValue( aPoly, i ) ); bRetValue = sal_True; } sal_Int32 nAngle=0; if (lcl_GetAngle(aPoly,rShapeFlags,nAngle )) { AddOpt( ESCHER_Prop_Rotation, nAngle ); } } else { rShapeType = ESCHER_ShpInst_BentConnector3; AddOpt( ESCHER_Prop_cxstyle, ESCHER_cxstyleBent ); } } break; default: case ::com::sun::star::drawing::ConnectorType_LINE : case ::com::sun::star::drawing::ConnectorType_LINES : // Connector 2->5 { rShapeType = ESCHER_ShpInst_StraightConnector1; AddOpt( ESCHER_Prop_cxstyle, ESCHER_cxstyleStraight ); } break; } CreateLineProperties( aXPropSet, sal_False ); bRetValue = sal_True; } } } } } return bRetValue; } sal_Bool EscherPropertyContainer::CreateShadowProperties( const ::com::sun::star::uno::Reference< ::com::sun::star::beans::XPropertySet > & rXPropSet ) { ::com::sun::star::uno::Any aAny; sal_Bool bHasShadow = sal_False; // shadow is possible only if at least a fillcolor, linecolor or graphic is set sal_uInt32 nLineFlags = 0; // default : shape has no line sal_uInt32 nFillFlags = 0x10; // shape is filled GetOpt( ESCHER_Prop_fNoLineDrawDash, nLineFlags ); GetOpt( ESCHER_Prop_fNoFillHitTest, nFillFlags ); sal_uInt32 nDummy; sal_Bool bGraphic = GetOpt( DFF_Prop_pib, nDummy ) || GetOpt( DFF_Prop_pibName, nDummy ) || GetOpt( DFF_Prop_pibFlags, nDummy ); sal_uInt32 nShadowFlags = 0x20000; if ( ( nLineFlags & 8 ) || ( nFillFlags & 0x10 ) || bGraphic ) { if ( EscherPropertyValueHelper::GetPropertyValue( aAny, rXPropSet, String( RTL_CONSTASCII_USTRINGPARAM( "Shadow" ) ), sal_True ) ) { if ( aAny >>= bHasShadow ) { if ( bHasShadow ) { nShadowFlags |= 2; if ( EscherPropertyValueHelper::GetPropertyValue( aAny, rXPropSet, String( RTL_CONSTASCII_USTRINGPARAM( "ShadowColor" ) ), sal_False ) ) AddOpt( ESCHER_Prop_shadowColor, ImplGetColor( *((sal_uInt32*)aAny.getValue()) ) ); if ( EscherPropertyValueHelper::GetPropertyValue( aAny, rXPropSet, String( RTL_CONSTASCII_USTRINGPARAM( "ShadowXDistance" ) ), sal_False ) ) AddOpt( ESCHER_Prop_shadowOffsetX, *((sal_Int32*)aAny.getValue()) * 360 ); if ( EscherPropertyValueHelper::GetPropertyValue( aAny, rXPropSet, String( RTL_CONSTASCII_USTRINGPARAM( "ShadowYDistance" ) ), sal_False ) ) AddOpt( ESCHER_Prop_shadowOffsetY, *((sal_Int32*)aAny.getValue()) * 360 ); if ( EscherPropertyValueHelper::GetPropertyValue( aAny, rXPropSet, String( RTL_CONSTASCII_USTRINGPARAM( "ShadowTransparence" ) ), sal_False ) ) AddOpt( ESCHER_Prop_shadowOpacity, 0x10000 - (((sal_uInt32)*((sal_uInt16*)aAny.getValue())) * 655 ) ); } } } } AddOpt( ESCHER_Prop_fshadowObscured, nShadowFlags ); return bHasShadow; } // --------------------------------------------------------------------------------------------- sal_Int32 EscherPropertyContainer::GetValueForEnhancedCustomShapeParameter( const ::com::sun::star::drawing::EnhancedCustomShapeParameter& rParameter, const std::vector< sal_Int32 >& rEquationOrder, sal_Bool bAdjustTrans ) { sal_Int32 nValue = 0; if ( rParameter.Value.getValueTypeClass() == uno::TypeClass_DOUBLE ) { double fValue; if ( rParameter.Value >>= fValue ) nValue = (sal_Int32)fValue; } else rParameter.Value >>= nValue; switch( rParameter.Type ) { case com::sun::star::drawing::EnhancedCustomShapeParameterType::EQUATION : { OSL_ASSERT((sal_uInt32)nValue < rEquationOrder.size()); if ( (sal_uInt32)nValue < rEquationOrder.size() ) { nValue = (sal_uInt16)rEquationOrder[ nValue ]; nValue |= (sal_uInt32)0x80000000; } } break; case com::sun::star::drawing::EnhancedCustomShapeParameterType::ADJUSTMENT: { if(bAdjustTrans) { sal_uInt32 nAdjustValue = 0; sal_Bool bGot = GetOpt((sal_uInt16)( DFF_Prop_adjustValue + nValue ), nAdjustValue); if(bGot) nValue = (sal_Int32)nAdjustValue; } } break; case com::sun::star::drawing::EnhancedCustomShapeParameterType::NORMAL : default: break; /* not sure if it is allowed to set following values (but they are not yet used) case com::sun::star::drawing::EnhancedCustomShapeParameterType::BOTTOM : case com::sun::star::drawing::EnhancedCustomShapeParameterType::RIGHT : case com::sun::star::drawing::EnhancedCustomShapeParameterType::TOP : case com::sun::star::drawing::EnhancedCustomShapeParameterType::LEFT : */ } return nValue; } sal_Bool GetValueForEnhancedCustomShapeHandleParameter( sal_Int32& nRetValue, const com::sun::star::drawing::EnhancedCustomShapeParameter& rParameter ) { sal_Bool bSpecial = sal_False; nRetValue = 0; if ( rParameter.Value.getValueTypeClass() == uno::TypeClass_DOUBLE ) { double fValue; if ( rParameter.Value >>= fValue ) nRetValue = (sal_Int32)fValue; } else rParameter.Value >>= nRetValue; switch( rParameter.Type ) { case com::sun::star::drawing::EnhancedCustomShapeParameterType::EQUATION : { nRetValue += 3; bSpecial = sal_True; } break; case com::sun::star::drawing::EnhancedCustomShapeParameterType::ADJUSTMENT : { nRetValue += 0x100; bSpecial = sal_True; } break; case com::sun::star::drawing::EnhancedCustomShapeParameterType::TOP : case com::sun::star::drawing::EnhancedCustomShapeParameterType::LEFT : { nRetValue = 0; bSpecial = sal_True; } break; case com::sun::star::drawing::EnhancedCustomShapeParameterType::RIGHT : case com::sun::star::drawing::EnhancedCustomShapeParameterType::BOTTOM : { nRetValue = 1; bSpecial = sal_True; } break; case com::sun::star::drawing::EnhancedCustomShapeParameterType::NORMAL : { } break; } return bSpecial; } void ConvertEnhancedCustomShapeEquation( SdrObjCustomShape* pCustoShape, std::vector< EnhancedCustomShapeEquation >& rEquations, std::vector< sal_Int32 >& rEquationOrder ) { if ( pCustoShape ) { uno::Sequence< rtl::OUString > sEquationSource; const rtl::OUString sEquations( RTL_CONSTASCII_USTRINGPARAM( "Equations" ) ); SdrCustomShapeGeometryItem& rGeometryItem = (SdrCustomShapeGeometryItem&)(const SdrCustomShapeGeometryItem&) pCustoShape->GetMergedItem( SDRATTR_CUSTOMSHAPE_GEOMETRY ); const uno::Any* pAny = ((SdrCustomShapeGeometryItem&)rGeometryItem).GetPropertyValueByName( sEquations ); if ( pAny ) *pAny >>= sEquationSource; sal_Int32 nEquationSourceCount = sEquationSource.getLength(); if ( nEquationSourceCount && (nEquationSourceCount <= 128) ) { sal_Int32 i; for ( i = 0; i < nEquationSourceCount; i++ ) { EnhancedCustomShape2d aCustoShape2d( pCustoShape ); try { ::boost::shared_ptr< EnhancedCustomShape::ExpressionNode > aExpressNode( EnhancedCustomShape::FunctionParser::parseFunction( sEquationSource[ i ], aCustoShape2d ) ); com::sun::star::drawing::EnhancedCustomShapeParameter aPara( aExpressNode->fillNode( rEquations, NULL, 0 ) ); if ( aPara.Type != com::sun::star::drawing::EnhancedCustomShapeParameterType::EQUATION ) { EnhancedCustomShapeEquation aEquation; aEquation.nOperation = 0; EnhancedCustomShape::FillEquationParameter( aPara, 0, aEquation ); rEquations.push_back( aEquation ); } } catch ( EnhancedCustomShape::ParseError& ) { EnhancedCustomShapeEquation aEquation; // ups, we should not be here, aEquation.nOperation = 0; // creating a default equation with value 1 aEquation.nPara[ 0 ] = 1; // hoping that this will not break anything rEquations.push_back( aEquation ); } catch ( ... ) { EnhancedCustomShapeEquation aEquation; // #i112309# EnhancedCustomShape::Parse error aEquation.nOperation = 0; // not catched on linux platform aEquation.nPara[ 0 ] = 1; rEquations.push_back( aEquation ); } rEquationOrder.push_back( rEquations.size() - 1 ); } // now updating our old equation indices, they are marked with a bit in the hiword of nOperation std::vector< EnhancedCustomShapeEquation >::iterator aIter( rEquations.begin() ); std::vector< EnhancedCustomShapeEquation >::iterator aEnd ( rEquations.end() ); while( aIter != aEnd ) { sal_Int32 nMask = 0x20000000; for( i = 0; i < 3; i++ ) { if ( aIter->nOperation & nMask ) { aIter->nOperation ^= nMask; const sal_Int32 nIndex(aIter->nPara[ i ] & 0x3ff); // #124661# check index access, there are cases where this is out of bound leading // to errors up to crashes when executed if(nIndex < rEquationOrder.size()) { aIter->nPara[ i ] = rEquationOrder[ nIndex ] | 0x400; } else { OSL_ENSURE(false, "Attempted out of bound access to rEquationOrder of CustomShape (!)"); } } nMask <<= 1; } aIter++; } } } } sal_Bool EscherPropertyContainer::IsDefaultObject( SdrObjCustomShape* pCustoShape , const MSO_SPT eShapeType ) { sal_Bool bIsDefaultObject = sal_False; switch(eShapeType) { //if the custom shape is not default shape of ppt, return sal_Fasle; case mso_sptTearDrop: return bIsDefaultObject; default: break; } if ( pCustoShape ) { if ( pCustoShape->IsDefaultGeometry( SdrObjCustomShape::DEFAULT_EQUATIONS ) && pCustoShape->IsDefaultGeometry( SdrObjCustomShape::DEFAULT_VIEWBOX ) && pCustoShape->IsDefaultGeometry( SdrObjCustomShape::DEFAULT_PATH ) && pCustoShape->IsDefaultGeometry( SdrObjCustomShape::DEFAULT_GLUEPOINTS ) && pCustoShape->IsDefaultGeometry( SdrObjCustomShape::DEFAULT_SEGMENTS ) && pCustoShape->IsDefaultGeometry( SdrObjCustomShape::DEFAULT_STRETCHX ) && pCustoShape->IsDefaultGeometry( SdrObjCustomShape::DEFAULT_STRETCHY ) // && pCustoShape->IsDefaultGeometry( SdrObjCustomShape::DEFAULT_HANDLES ) && pCustoShape->IsDefaultGeometry( SdrObjCustomShape::DEFAULT_TEXTFRAMES ) ) bIsDefaultObject = sal_True; } return bIsDefaultObject; } void EscherPropertyContainer::LookForPolarHandles( const MSO_SPT eShapeType, sal_Int32& nAdjustmentsWhichNeedsToBeConverted ) { const mso_CustomShape* pDefCustomShape = GetCustomShapeContent( eShapeType ); if ( pDefCustomShape && pDefCustomShape->nHandles && pDefCustomShape->pHandles ) { sal_Int32 k, nkCount = pDefCustomShape->nHandles; const SvxMSDffHandle* pData = pDefCustomShape->pHandles; for ( k = 0; k < nkCount; k++, pData++ ) { if ( pData->nFlags & MSDFF_HANDLE_FLAGS_POLAR ) { if ( ( pData->nPositionY >= 0x256 ) || ( pData->nPositionY <= 0x107 ) ) nAdjustmentsWhichNeedsToBeConverted |= ( 1 << k ); } } } } sal_Bool EscherPropertyContainer::GetAdjustmentValue( const com::sun::star::drawing::EnhancedCustomShapeAdjustmentValue & rkProp, sal_Int32 nIndex, sal_Int32 nAdjustmentsWhichNeedsToBeConverted, sal_Int32& nValue ) { if ( rkProp.State != beans::PropertyState_DIRECT_VALUE ) return sal_False; sal_Bool bUseFixedFloat = ( nAdjustmentsWhichNeedsToBeConverted & ( 1 << nIndex ) ) != 0; if ( rkProp.Value.getValueTypeClass() == uno::TypeClass_DOUBLE ) { double fValue(0.0); rkProp.Value >>= fValue; if ( bUseFixedFloat ) fValue *= 65536.0; nValue = (sal_Int32)fValue; } else { rkProp.Value >>= nValue; if ( bUseFixedFloat ) nValue <<= 16; } return sal_True; } void EscherPropertyContainer::CreateCustomShapeProperties( const MSO_SPT eShapeType, const uno::Reference< drawing::XShape > & rXShape ) { uno::Reference< beans::XPropertySet > aXPropSet( rXShape, uno::UNO_QUERY ); if ( aXPropSet.is() ) { SdrObjCustomShape* pCustoShape = (SdrObjCustomShape*)GetSdrObjectFromXShape( rXShape ); if ( !pCustoShape ) return; const rtl::OUString sCustomShapeGeometry( RTL_CONSTASCII_USTRINGPARAM( "CustomShapeGeometry" ) ); uno::Any aGeoPropSet = aXPropSet->getPropertyValue( sCustomShapeGeometry ); uno::Sequence< beans::PropertyValue > aGeoPropSeq; if ( aGeoPropSet >>= aGeoPropSeq ) { const rtl::OUString sViewBox ( RTL_CONSTASCII_USTRINGPARAM( "ViewBox" ) ); const rtl::OUString sTextRotateAngle ( RTL_CONSTASCII_USTRINGPARAM( "TextRotateAngle" ) ); const rtl::OUString sExtrusion ( RTL_CONSTASCII_USTRINGPARAM( "Extrusion" ) ); const rtl::OUString sEquations ( RTL_CONSTASCII_USTRINGPARAM( "Equations" ) ); const rtl::OUString sPath ( RTL_CONSTASCII_USTRINGPARAM( "Path" ) ); const rtl::OUString sTextPath ( RTL_CONSTASCII_USTRINGPARAM( "TextPath" ) ); const rtl::OUString sHandles ( RTL_CONSTASCII_USTRINGPARAM( "Handles" ) ); const rtl::OUString sAdjustmentValues ( RTL_CONSTASCII_USTRINGPARAM( "AdjustmentValues" ) ); bool bHasAdjustmentValuesProp = false; uno::Any aAdjustmentValuesProp; bool bHasPathCoordinatesProp = false; uno::Any aPathCoordinatesProp; sal_Int32 nAdjustmentsWhichNeedsToBeConverted = 0; uno::Sequence< beans::PropertyValues > aHandlesPropSeq; sal_Bool bPredefinedHandlesUsed = sal_True; sal_Bool bIsDefaultObject = IsDefaultObject( pCustoShape , eShapeType); // convert property "Equations" into std::vector< EnhancedCustomShapeEquationEquation > std::vector< EnhancedCustomShapeEquation > aEquations; std::vector< sal_Int32 > aEquationOrder; ConvertEnhancedCustomShapeEquation( pCustoShape, aEquations, aEquationOrder ); sal_Int32 i, nCount = aGeoPropSeq.getLength(); for ( i = 0; i < nCount; i++ ) { const beans::PropertyValue& rProp = aGeoPropSeq[ i ]; if ( rProp.Name.equals( sViewBox ) ) { if ( !bIsDefaultObject ) { awt::Rectangle aViewBox; if ( rProp.Value >>= aViewBox ) { AddOpt( DFF_Prop_geoLeft, aViewBox.X ); AddOpt( DFF_Prop_geoTop, aViewBox.Y ); AddOpt( DFF_Prop_geoRight, aViewBox.X + aViewBox.Width ); AddOpt( DFF_Prop_geoBottom,aViewBox.Y + aViewBox.Height ); } } } else if ( rProp.Name.equals( sTextRotateAngle ) ) { double f = 0, fTextRotateAngle; if ( rProp.Value >>= f ) { fTextRotateAngle = fmod( f, 360.0 ); if ( fTextRotateAngle < 0 ) fTextRotateAngle = 360 + fTextRotateAngle; if ( ( fTextRotateAngle < 271.0 ) && ( fTextRotateAngle > 269.0 ) ) AddOpt( DFF_Prop_cdirFont, mso_cdir90 ); else if ( ( fTextRotateAngle < 181.0 ) && ( fTextRotateAngle > 179.0 ) ) AddOpt( DFF_Prop_cdirFont, mso_cdir180 ); else if ( ( fTextRotateAngle < 91.0 ) && ( fTextRotateAngle > 79.0 ) ) AddOpt( DFF_Prop_cdirFont, mso_cdir270 ); } } else if ( rProp.Name.equals( sExtrusion ) ) { uno::Sequence< beans::PropertyValue > aExtrusionPropSeq; if ( rProp.Value >>= aExtrusionPropSeq ) { sal_uInt32 nLightFaceFlagsOrg, nLightFaceFlags; sal_uInt32 nFillHarshFlagsOrg, nFillHarshFlags; nLightFaceFlagsOrg = nLightFaceFlags = 0x000001; nFillHarshFlagsOrg = nFillHarshFlags = 0x00001e; if ( GetOpt( DFF_Prop_fc3DLightFace, nLightFaceFlags ) ) nLightFaceFlagsOrg = nLightFaceFlags; if ( GetOpt( DFF_Prop_fc3DFillHarsh, nFillHarshFlags ) ) nFillHarshFlagsOrg = nFillHarshFlags; sal_Int32 r, nrCount = aExtrusionPropSeq.getLength(); for ( r = 0; r < nrCount; r++ ) { const beans::PropertyValue& rrProp = aExtrusionPropSeq[ r ]; const rtl::OUString sExtrusionBrightness ( RTL_CONSTASCII_USTRINGPARAM( "Brightness" ) ); const rtl::OUString sExtrusionDepth ( RTL_CONSTASCII_USTRINGPARAM( "Depth" ) ); const rtl::OUString sExtrusionDiffusion ( RTL_CONSTASCII_USTRINGPARAM( "Diffusion" ) ); const rtl::OUString sExtrusionNumberOfLineSegments ( RTL_CONSTASCII_USTRINGPARAM( "NumberOfLineSegments" ) ); const rtl::OUString sExtrusionLightFace ( RTL_CONSTASCII_USTRINGPARAM( "LightFace" ) ); const rtl::OUString sExtrusionFirstLightHarsh ( RTL_CONSTASCII_USTRINGPARAM( "FirstLightHarsh" ) ); const rtl::OUString sExtrusionSecondLightHarsh ( RTL_CONSTASCII_USTRINGPARAM( "SecondLightHarsh" ) ); const rtl::OUString sExtrusionFirstLightLevel ( RTL_CONSTASCII_USTRINGPARAM( "FirstLightLevel" ) ); const rtl::OUString sExtrusionSecondLightLevel ( RTL_CONSTASCII_USTRINGPARAM( "SecondLightLevel" ) ); const rtl::OUString sExtrusionFirstLightDirection ( RTL_CONSTASCII_USTRINGPARAM( "FirstLightDirection" ) ); const rtl::OUString sExtrusionSecondLightDirection ( RTL_CONSTASCII_USTRINGPARAM( "SecondLightDirection" ) ); const rtl::OUString sExtrusionMetal ( RTL_CONSTASCII_USTRINGPARAM( "Metal" ) ); const rtl::OUString sExtrusionShadeMode ( RTL_CONSTASCII_USTRINGPARAM( "ShadeMode" ) ); const rtl::OUString sExtrusionRotateAngle ( RTL_CONSTASCII_USTRINGPARAM( "RotateAngle" ) ); const rtl::OUString sExtrusionRotationCenter ( RTL_CONSTASCII_USTRINGPARAM( "RotationCenter" ) ); const rtl::OUString sExtrusionShininess ( RTL_CONSTASCII_USTRINGPARAM( "Shininess" ) ); const rtl::OUString sExtrusionSkew ( RTL_CONSTASCII_USTRINGPARAM( "Skew" ) ); const rtl::OUString sExtrusionSpecularity ( RTL_CONSTASCII_USTRINGPARAM( "Specularity" ) ); const rtl::OUString sExtrusionProjectionMode ( RTL_CONSTASCII_USTRINGPARAM( "ProjectionMode" ) ); const rtl::OUString sExtrusionViewPoint ( RTL_CONSTASCII_USTRINGPARAM( "ViewPoint" ) ); const rtl::OUString sExtrusionOrigin ( RTL_CONSTASCII_USTRINGPARAM( "Origin" ) ); const rtl::OUString sExtrusionColor ( RTL_CONSTASCII_USTRINGPARAM( "Color" ) ); if ( rrProp.Name.equals( sExtrusion ) ) { sal_Bool bExtrusionOn = sal_Bool(); if ( rrProp.Value >>= bExtrusionOn ) { nLightFaceFlags |= 0x80000; if ( bExtrusionOn ) nLightFaceFlags |= 8; else nLightFaceFlags &=~8; } } else if ( rrProp.Name.equals( sExtrusionBrightness ) ) { double fExtrusionBrightness = 0; if ( rrProp.Value >>= fExtrusionBrightness ) AddOpt( DFF_Prop_c3DAmbientIntensity, (sal_Int32)( fExtrusionBrightness * 655.36 ) ); } else if ( rrProp.Name.equals( sExtrusionDepth ) ) { double fDepth = 0; double fFraction = 0; com::sun::star::drawing::EnhancedCustomShapeParameterPair aDepthParaPair; if ( ( rrProp.Value >>= aDepthParaPair ) && ( aDepthParaPair.First.Value >>= fDepth ) && ( aDepthParaPair.Second.Value >>= fFraction ) ) { double fForeDepth = fDepth * fFraction; double fBackDepth = fDepth - fForeDepth; fBackDepth *= 360.0; AddOpt( DFF_Prop_c3DExtrudeBackward, (sal_Int32)fBackDepth ); if ( fForeDepth != 0.0 ) { fForeDepth *= 360.0; AddOpt( DFF_Prop_c3DExtrudeForward, (sal_Int32)fForeDepth ); } } } else if ( rrProp.Name.equals( sExtrusionDiffusion ) ) { double fExtrusionDiffusion = 0; if ( rrProp.Value >>= fExtrusionDiffusion ) AddOpt( DFF_Prop_c3DDiffuseAmt, (sal_Int32)( fExtrusionDiffusion * 655.36 ) ); } else if ( rrProp.Name.equals( sExtrusionNumberOfLineSegments ) ) { sal_Int32 nExtrusionNumberOfLineSegments = 0; if ( rrProp.Value >>= nExtrusionNumberOfLineSegments ) AddOpt( DFF_Prop_c3DTolerance, nExtrusionNumberOfLineSegments ); } else if ( rrProp.Name.equals( sExtrusionLightFace ) ) { sal_Bool bExtrusionLightFace = sal_Bool(); if ( rrProp.Value >>= bExtrusionLightFace ) { nLightFaceFlags |= 0x10000; if ( bExtrusionLightFace ) nLightFaceFlags |= 1; else nLightFaceFlags &=~1; } } else if ( rrProp.Name.equals( sExtrusionFirstLightHarsh ) ) { sal_Bool bExtrusionFirstLightHarsh = sal_Bool(); if ( rrProp.Value >>= bExtrusionFirstLightHarsh ) { nFillHarshFlags |= 0x20000; if ( bExtrusionFirstLightHarsh ) nFillHarshFlags |= 2; else nFillHarshFlags &=~2; } } else if ( rrProp.Name.equals( sExtrusionSecondLightHarsh ) ) { sal_Bool bExtrusionSecondLightHarsh = sal_Bool(); if ( rrProp.Value >>= bExtrusionSecondLightHarsh ) { nFillHarshFlags |= 0x10000; if ( bExtrusionSecondLightHarsh ) nFillHarshFlags |= 1; else nFillHarshFlags &=~1; } } else if ( rrProp.Name.equals( sExtrusionFirstLightLevel ) ) { double fExtrusionFirstLightLevel = 0; if ( rrProp.Value >>= fExtrusionFirstLightLevel ) AddOpt( DFF_Prop_c3DKeyIntensity, (sal_Int32)( fExtrusionFirstLightLevel * 655.36 ) ); } else if ( rrProp.Name.equals( sExtrusionSecondLightLevel ) ) { double fExtrusionSecondLightLevel = 0; if ( rrProp.Value >>= fExtrusionSecondLightLevel ) AddOpt( DFF_Prop_c3DFillIntensity, (sal_Int32)( fExtrusionSecondLightLevel * 655.36 ) ); } else if ( rrProp.Name.equals( sExtrusionFirstLightDirection ) ) { drawing::Direction3D aExtrusionFirstLightDirection; if ( rrProp.Value >>= aExtrusionFirstLightDirection ) { AddOpt( DFF_Prop_c3DKeyX, (sal_Int32)aExtrusionFirstLightDirection.DirectionX ); AddOpt( DFF_Prop_c3DKeyY, (sal_Int32)aExtrusionFirstLightDirection.DirectionY ); AddOpt( DFF_Prop_c3DKeyZ, (sal_Int32)aExtrusionFirstLightDirection.DirectionZ ); } } else if ( rrProp.Name.equals( sExtrusionSecondLightDirection ) ) { drawing::Direction3D aExtrusionSecondLightPosition; if ( rrProp.Value >>= aExtrusionSecondLightPosition ) { AddOpt( DFF_Prop_c3DFillX, (sal_Int32)aExtrusionSecondLightPosition.DirectionX ); AddOpt( DFF_Prop_c3DFillY, (sal_Int32)aExtrusionSecondLightPosition.DirectionY ); AddOpt( DFF_Prop_c3DFillZ, (sal_Int32)aExtrusionSecondLightPosition.DirectionZ ); } } else if ( rrProp.Name.equals( sExtrusionMetal ) ) { sal_Bool bExtrusionMetal = sal_Bool(); if ( rrProp.Value >>= bExtrusionMetal ) { nLightFaceFlags |= 0x40000; if ( bExtrusionMetal ) nLightFaceFlags |= 4; else nLightFaceFlags &=~4; } } else if ( rrProp.Name.equals( sExtrusionShadeMode ) ) { drawing::ShadeMode eExtrusionShadeMode; if ( rrProp.Value >>= eExtrusionShadeMode ) { sal_uInt32 nRenderMode; switch( eExtrusionShadeMode ) { default: case drawing::ShadeMode_FLAT : case drawing::ShadeMode_PHONG : case drawing::ShadeMode_SMOOTH : nRenderMode = mso_FullRender; break; case drawing::ShadeMode_DRAFT : { nRenderMode = mso_Wireframe; } break; } AddOpt( DFF_Prop_c3DRenderMode, nRenderMode ); } } else if ( rrProp.Name.equals( sExtrusionRotateAngle ) ) { double fExtrusionAngleX = 0; double fExtrusionAngleY = 0; com::sun::star::drawing::EnhancedCustomShapeParameterPair aRotateAnglePair; if ( ( rrProp.Value >>= aRotateAnglePair ) && ( aRotateAnglePair.First.Value >>= fExtrusionAngleX ) && ( aRotateAnglePair.Second.Value >>= fExtrusionAngleY ) ) { fExtrusionAngleX *= 65536; fExtrusionAngleY *= 65536; AddOpt( DFF_Prop_c3DXRotationAngle, (sal_Int32)fExtrusionAngleX ); AddOpt( DFF_Prop_c3DYRotationAngle, (sal_Int32)fExtrusionAngleY ); } } else if ( rrProp.Name.equals( sExtrusionRotationCenter ) ) { drawing::Direction3D aExtrusionRotationCenter; if ( rrProp.Value >>= aExtrusionRotationCenter ) { AddOpt( DFF_Prop_c3DRotationCenterX, (sal_Int32)( aExtrusionRotationCenter.DirectionX * 360.0 ) ); AddOpt( DFF_Prop_c3DRotationCenterY, (sal_Int32)( aExtrusionRotationCenter.DirectionY * 360.0 ) ); AddOpt( DFF_Prop_c3DRotationCenterZ, (sal_Int32)( aExtrusionRotationCenter.DirectionZ * 360.0 ) ); nFillHarshFlags &=~8; // don't use AutoRotationCenter; } } else if ( rrProp.Name.equals( sExtrusionShininess ) ) { double fExtrusionShininess = 0; if ( rrProp.Value >>= fExtrusionShininess ) AddOpt( DFF_Prop_c3DShininess, (sal_Int32)( fExtrusionShininess * 655.36 ) ); } else if ( rrProp.Name.equals( sExtrusionSkew ) ) { double fSkewAmount = 0; double fSkewAngle = 0; com::sun::star::drawing::EnhancedCustomShapeParameterPair aSkewParaPair; if ( ( rrProp.Value >>= aSkewParaPair ) && ( aSkewParaPair.First.Value >>= fSkewAmount ) && ( aSkewParaPair.Second.Value >>= fSkewAngle ) ) { AddOpt( DFF_Prop_c3DSkewAmount, (sal_Int32)fSkewAmount ); AddOpt( DFF_Prop_c3DSkewAngle, (sal_Int32)( fSkewAngle * 65536 ) ); } } else if ( rrProp.Name.equals( sExtrusionSpecularity ) ) { double fExtrusionSpecularity = 0; if ( rrProp.Value >>= fExtrusionSpecularity ) AddOpt( DFF_Prop_c3DSpecularAmt, (sal_Int32)( fExtrusionSpecularity * 1333 ) ); } else if ( rrProp.Name.equals( sExtrusionProjectionMode ) ) { drawing::ProjectionMode eExtrusionProjectionMode; if ( rrProp.Value >>= eExtrusionProjectionMode ) { nFillHarshFlags |= 0x40000; if ( eExtrusionProjectionMode == drawing::ProjectionMode_PARALLEL ) nFillHarshFlags |= 4; else nFillHarshFlags &=~4; } } else if ( rrProp.Name.equals( sExtrusionViewPoint ) ) { drawing::Position3D aExtrusionViewPoint; if ( rrProp.Value >>= aExtrusionViewPoint ) { aExtrusionViewPoint.PositionX *= 360.0; aExtrusionViewPoint.PositionY *= 360.0; aExtrusionViewPoint.PositionZ *= 360.0; AddOpt( DFF_Prop_c3DXViewpoint, (sal_Int32)aExtrusionViewPoint.PositionX ); AddOpt( DFF_Prop_c3DYViewpoint, (sal_Int32)aExtrusionViewPoint.PositionY ); AddOpt( DFF_Prop_c3DZViewpoint, (sal_Int32)aExtrusionViewPoint.PositionZ ); } } else if ( rrProp.Name.equals( sExtrusionOrigin ) ) { double fExtrusionOriginX = 0; double fExtrusionOriginY = 0; com::sun::star::drawing::EnhancedCustomShapeParameterPair aOriginPair; if ( ( rrProp.Value >>= aOriginPair ) && ( aOriginPair.First.Value >>= fExtrusionOriginX ) && ( aOriginPair.Second.Value >>= fExtrusionOriginY ) ) { AddOpt( DFF_Prop_c3DOriginX, (sal_Int32)( fExtrusionOriginX * 65536 ) ); AddOpt( DFF_Prop_c3DOriginY, (sal_Int32)( fExtrusionOriginY * 65536 ) ); } } else if ( rrProp.Name.equals( sExtrusionColor ) ) { sal_Bool bExtrusionColor = sal_Bool(); if ( rrProp.Value >>= bExtrusionColor ) { nLightFaceFlags |= 0x20000; if ( bExtrusionColor ) { nLightFaceFlags |= 2; uno::Any aFillColor2; if ( EscherPropertyValueHelper::GetPropertyValue( aFillColor2, aXPropSet, String( RTL_CONSTASCII_USTRINGPARAM( "FillColor2" ) ), sal_True ) ) { sal_uInt32 nFillColor = ImplGetColor( *((sal_uInt32*)aFillColor2.getValue()) ); AddOpt( DFF_Prop_c3DExtrusionColor, nFillColor ); } } else nLightFaceFlags &=~2; } } } if ( nLightFaceFlags != nLightFaceFlagsOrg ) AddOpt( DFF_Prop_fc3DLightFace, nLightFaceFlags ); if ( nFillHarshFlags != nFillHarshFlagsOrg ) AddOpt( DFF_Prop_fc3DFillHarsh, nFillHarshFlags ); } } else if ( rProp.Name.equals( sEquations ) ) { if ( !bIsDefaultObject ) { sal_uInt16 nElements = (sal_uInt16)aEquations.size(); if ( nElements ) { sal_uInt16 nElementSize = 8; sal_uInt32 nStreamSize = nElementSize * nElements + 6; SvMemoryStream aOut( nStreamSize ); aOut << nElements << nElements << nElementSize; std::vector< EnhancedCustomShapeEquation >::const_iterator aIter( aEquations.begin() ); std::vector< EnhancedCustomShapeEquation >::const_iterator aEnd ( aEquations.end() ); while( aIter != aEnd ) { aOut << (sal_uInt16)aIter->nOperation << (sal_Int16)aIter->nPara[ 0 ] << (sal_Int16)aIter->nPara[ 1 ] << (sal_Int16)aIter->nPara[ 2 ]; aIter++; } sal_uInt8* pBuf = new sal_uInt8[ nStreamSize ]; memcpy( pBuf, aOut.GetData(), nStreamSize ); AddOpt( DFF_Prop_pFormulas, sal_True, nStreamSize - 6, pBuf, nStreamSize ); } else { sal_uInt8* pBuf = new sal_uInt8[ 1 ]; AddOpt( DFF_Prop_pFormulas, sal_True, 0, pBuf, 0 ); } } } else if ( rProp.Name.equals( sPath ) ) { uno::Sequence< beans::PropertyValue > aPathPropSeq; if ( rProp.Value >>= aPathPropSeq ) { sal_uInt32 nPathFlags, nPathFlagsOrg; nPathFlagsOrg = nPathFlags = 0x39; if ( GetOpt( DFF_Prop_fFillOK, nPathFlags ) ) nPathFlagsOrg = nPathFlags; sal_Int32 r, nrCount = aPathPropSeq.getLength(); for ( r = 0; r < nrCount; r++ ) { const beans::PropertyValue& rrProp = aPathPropSeq[ r ]; const rtl::OUString sPathExtrusionAllowed ( RTL_CONSTASCII_USTRINGPARAM( "ExtrusionAllowed" ) ); const rtl::OUString sPathConcentricGradientFillAllowed ( RTL_CONSTASCII_USTRINGPARAM( "ConcentricGradientFillAllowed" ) ); const rtl::OUString sPathTextPathAllowed ( RTL_CONSTASCII_USTRINGPARAM( "TextPathAllowed" ) ); const rtl::OUString sPathCoordinates ( RTL_CONSTASCII_USTRINGPARAM( "Coordinates" ) ); const rtl::OUString sPathGluePoints ( RTL_CONSTASCII_USTRINGPARAM( "GluePoints" ) ); const rtl::OUString sPathGluePointType ( RTL_CONSTASCII_USTRINGPARAM( "GluePointType" ) ); const rtl::OUString sPathSegments ( RTL_CONSTASCII_USTRINGPARAM( "Segments" ) ); const rtl::OUString sPathStretchX ( RTL_CONSTASCII_USTRINGPARAM( "StretchX" ) ); const rtl::OUString sPathStretchY ( RTL_CONSTASCII_USTRINGPARAM( "StretchY" ) ); const rtl::OUString sPathTextFrames ( RTL_CONSTASCII_USTRINGPARAM( "TextFrames" ) ); if ( rrProp.Name.equals( sPathExtrusionAllowed ) ) { sal_Bool bExtrusionAllowed = sal_Bool(); if ( rrProp.Value >>= bExtrusionAllowed ) { nPathFlags |= 0x100000; if ( bExtrusionAllowed ) nPathFlags |= 16; else nPathFlags &=~16; } } else if ( rrProp.Name.equals( sPathConcentricGradientFillAllowed ) ) { sal_Bool bConcentricGradientFillAllowed = sal_Bool(); if ( rrProp.Value >>= bConcentricGradientFillAllowed ) { nPathFlags |= 0x20000; if ( bConcentricGradientFillAllowed ) nPathFlags |= 2; else nPathFlags &=~2; } } else if ( rrProp.Name.equals( sPathTextPathAllowed ) ) { sal_Bool bTextPathAllowed = sal_Bool(); if ( rrProp.Value >>= bTextPathAllowed ) { nPathFlags |= 0x40000; if ( bTextPathAllowed ) nPathFlags |= 4; else nPathFlags &=~4; } } else if ( rrProp.Name.equals( sPathCoordinates ) ) { if ( !bIsDefaultObject ) { aPathCoordinatesProp = rrProp.Value; bHasPathCoordinatesProp = true; } } else if ( rrProp.Name.equals( sPathGluePoints ) ) { if ( !bIsDefaultObject ) { com::sun::star::uno::Sequence< com::sun::star::drawing::EnhancedCustomShapeParameterPair> aGluePoints; if ( rrProp.Value >>= aGluePoints ) { // creating the vertices sal_uInt16 nElements = (sal_uInt16)aGluePoints.getLength(); if ( nElements ) { sal_uInt16 j, nElementSize = 8; sal_uInt32 nStreamSize = nElementSize * nElements + 6; SvMemoryStream aOut( nStreamSize ); aOut << nElements << nElements << nElementSize; for( j = 0; j < nElements; j++ ) { sal_Int32 X = GetValueForEnhancedCustomShapeParameter( aGluePoints[ j ].First, aEquationOrder ); sal_Int32 Y = GetValueForEnhancedCustomShapeParameter( aGluePoints[ j ].Second, aEquationOrder ); aOut << X << Y; } sal_uInt8* pBuf = new sal_uInt8[ nStreamSize ]; memcpy( pBuf, aOut.GetData(), nStreamSize ); AddOpt( DFF_Prop_connectorPoints, sal_True, nStreamSize - 6, pBuf, nStreamSize ); // -6 } else { sal_uInt8* pBuf = new sal_uInt8[ 1 ]; AddOpt( DFF_Prop_connectorPoints, sal_True, 0, pBuf, 0 ); } } } } else if ( rrProp.Name.equals( sPathGluePointType ) ) { sal_Int16 nGluePointType = sal_Int16(); if ( rrProp.Value >>= nGluePointType ) AddOpt( DFF_Prop_connectorType, (sal_uInt16)nGluePointType ); } else if ( rrProp.Name.equals( sPathSegments ) ) { if ( !bIsDefaultObject ) { com::sun::star::uno::Sequence< com::sun::star::drawing::EnhancedCustomShapeSegment > aSegments; if ( rrProp.Value >>= aSegments ) { // creating seginfo if ( (sal_uInt16)aSegments.getLength() ) { sal_uInt16 j, nElements = (sal_uInt16)aSegments.getLength(); sal_uInt16 nElementSize = 2; sal_uInt32 nStreamSize = nElementSize * nElements + 6; SvMemoryStream aOut( nStreamSize ); aOut << nElements << nElements << nElementSize; for ( j = 0; j < nElements; j++ ) { sal_uInt16 nVal = (sal_uInt16)aSegments[ j ].Count; switch( aSegments[ j ].Command ) { case com::sun::star::drawing::EnhancedCustomShapeSegmentCommand::UNKNOWN : case com::sun::star::drawing::EnhancedCustomShapeSegmentCommand::LINETO : break; case com::sun::star::drawing::EnhancedCustomShapeSegmentCommand::MOVETO : { nVal = 0x4000; } break; case com::sun::star::drawing::EnhancedCustomShapeSegmentCommand::CURVETO : { nVal |= 0x2000; } break; case com::sun::star::drawing::EnhancedCustomShapeSegmentCommand::CLOSESUBPATH : { nVal = 0x6001; } break; case com::sun::star::drawing::EnhancedCustomShapeSegmentCommand::ENDSUBPATH : { nVal = 0x8000; } break; case com::sun::star::drawing::EnhancedCustomShapeSegmentCommand::NOFILL : { nVal = 0xaa00; } break; case com::sun::star::drawing::EnhancedCustomShapeSegmentCommand::NOSTROKE : { nVal = 0xab00; } break; case com::sun::star::drawing::EnhancedCustomShapeSegmentCommand::ANGLEELLIPSETO : { nVal *= 3; nVal |= 0xa100; } break; case com::sun::star::drawing::EnhancedCustomShapeSegmentCommand::ANGLEELLIPSE : { nVal *= 3; nVal |= 0xa200; } break; case com::sun::star::drawing::EnhancedCustomShapeSegmentCommand::ARCTO : { nVal <<= 2; nVal |= 0xa300; } break; case com::sun::star::drawing::EnhancedCustomShapeSegmentCommand::ARC : { nVal <<= 2; nVal |= 0xa400; } break; case com::sun::star::drawing::EnhancedCustomShapeSegmentCommand::CLOCKWISEARCTO : { nVal <<= 2; nVal |= 0xa500; } break; case com::sun::star::drawing::EnhancedCustomShapeSegmentCommand::CLOCKWISEARC : { nVal <<= 2; nVal |= 0xa600; } break; case com::sun::star::drawing::EnhancedCustomShapeSegmentCommand::ELLIPTICALQUADRANTX : { nVal |= 0xa700; } break; case com::sun::star::drawing::EnhancedCustomShapeSegmentCommand::ELLIPTICALQUADRANTY : { nVal |= 0xa800; } break; } aOut << nVal; } sal_uInt8* pBuf = new sal_uInt8[ nStreamSize ]; memcpy( pBuf, aOut.GetData(), nStreamSize ); AddOpt( DFF_Prop_pSegmentInfo, sal_False, nStreamSize - 6, pBuf, nStreamSize ); } else { sal_uInt8* pBuf = new sal_uInt8[ 1 ]; AddOpt( DFF_Prop_pSegmentInfo, sal_True, 0, pBuf, 0 ); } } } } else if ( rrProp.Name.equals( sPathStretchX ) ) { if ( !bIsDefaultObject ) { sal_Int32 nStretchX = 0; if ( rrProp.Value >>= nStretchX ) AddOpt( DFF_Prop_stretchPointX, nStretchX ); } } else if ( rrProp.Name.equals( sPathStretchY ) ) { if ( !bIsDefaultObject ) { sal_Int32 nStretchY = 0; if ( rrProp.Value >>= nStretchY ) AddOpt( DFF_Prop_stretchPointY, nStretchY ); } } else if ( rrProp.Name.equals( sPathTextFrames ) ) { if ( !bIsDefaultObject ) { com::sun::star::uno::Sequence< com::sun::star::drawing::EnhancedCustomShapeTextFrame > aPathTextFrames; if ( rrProp.Value >>= aPathTextFrames ) { if ( (sal_uInt16)aPathTextFrames.getLength() ) { sal_uInt16 j, nElements = (sal_uInt16)aPathTextFrames.getLength(); sal_uInt16 nElementSize = 16; sal_uInt32 nStreamSize = nElementSize * nElements + 6; SvMemoryStream aOut( nStreamSize ); aOut << nElements << nElements << nElementSize; for ( j = 0; j < nElements; j++ ) { sal_Int32 nLeft = GetValueForEnhancedCustomShapeParameter( aPathTextFrames[ j ].TopLeft.First, aEquationOrder ); sal_Int32 nTop = GetValueForEnhancedCustomShapeParameter( aPathTextFrames[ j ].TopLeft.Second, aEquationOrder ); sal_Int32 nRight = GetValueForEnhancedCustomShapeParameter( aPathTextFrames[ j ].BottomRight.First, aEquationOrder ); sal_Int32 nBottom = GetValueForEnhancedCustomShapeParameter( aPathTextFrames[ j ].BottomRight.Second, aEquationOrder ); aOut << nLeft << nTop << nRight << nBottom; } sal_uInt8* pBuf = new sal_uInt8[ nStreamSize ]; memcpy( pBuf, aOut.GetData(), nStreamSize ); AddOpt( DFF_Prop_textRectangles, sal_True, nStreamSize - 6, pBuf, nStreamSize ); } else { sal_uInt8* pBuf = new sal_uInt8[ 1 ]; AddOpt( DFF_Prop_textRectangles, sal_True, 0, pBuf, 0 ); } } } } } if ( nPathFlags != nPathFlagsOrg ) AddOpt( DFF_Prop_fFillOK, nPathFlags ); } } else if ( rProp.Name.equals( sTextPath ) ) { uno::Sequence< beans::PropertyValue > aTextPathPropSeq; if ( rProp.Value >>= aTextPathPropSeq ) { sal_uInt32 nTextPathFlagsOrg, nTextPathFlags; nTextPathFlagsOrg = nTextPathFlags = 0xffff1000; // default if ( GetOpt( DFF_Prop_gtextFStrikethrough, nTextPathFlags ) ) nTextPathFlagsOrg = nTextPathFlags; sal_Int32 r, nrCount = aTextPathPropSeq.getLength(); for ( r = 0; r < nrCount; r++ ) { const beans::PropertyValue& rrProp = aTextPathPropSeq[ r ]; const rtl::OUString sTextPathMode ( RTL_CONSTASCII_USTRINGPARAM( "TextPathMode" ) ); const rtl::OUString sTextPathScaleX ( RTL_CONSTASCII_USTRINGPARAM( "ScaleX" ) ); const rtl::OUString sSameLetterHeights ( RTL_CONSTASCII_USTRINGPARAM( "SameLetterHeights" ) ); if ( rrProp.Name.equals( sTextPath ) ) { sal_Bool bTextPathOn = sal_Bool(); if ( rrProp.Value >>= bTextPathOn ) { nTextPathFlags |= 0x40000000; if ( bTextPathOn ) { nTextPathFlags |= 0x4000; sal_uInt32 nPathFlags = 0x39; GetOpt( DFF_Prop_fFillOK, nPathFlags ); // SJ: can be removed if we are supporting the TextPathAllowed property in XML nPathFlags |= 0x40004; AddOpt( DFF_Prop_fFillOK, nPathFlags ); } else nTextPathFlags &=~0x4000; } } else if ( rrProp.Name.equals( sTextPathMode ) ) { com::sun::star::drawing::EnhancedCustomShapeTextPathMode eTextPathMode; if ( rrProp.Value >>= eTextPathMode ) { nTextPathFlags |= 0x05000000; nTextPathFlags &=~0x500; // TextPathMode_NORMAL if ( eTextPathMode == com::sun::star::drawing::EnhancedCustomShapeTextPathMode_PATH ) nTextPathFlags |= 0x100; else if ( eTextPathMode == com::sun::star::drawing::EnhancedCustomShapeTextPathMode_SHAPE ) nTextPathFlags |= 0x500; } } else if ( rrProp.Name.equals( sTextPathScaleX ) ) { sal_Bool bTextPathScaleX = sal_Bool(); if ( rrProp.Value >>= bTextPathScaleX ) { nTextPathFlags |= 0x00400000; if ( bTextPathScaleX ) nTextPathFlags |= 0x40; else nTextPathFlags &=~0x40; } } else if ( rrProp.Name.equals( sSameLetterHeights ) ) { sal_Bool bSameLetterHeights = sal_Bool(); if ( rrProp.Value >>= bSameLetterHeights ) { nTextPathFlags |= 0x00800000; if ( bSameLetterHeights ) nTextPathFlags |= 0x80; else nTextPathFlags &=~0x80; } } } if ( nTextPathFlags & 0x4000 ) // Is FontWork ? { // FontWork Text rtl::OUString aText; uno::Reference< text::XSimpleText > xText( rXShape, uno::UNO_QUERY ); if ( xText.is() ) aText = xText->getString(); if ( !aText.getLength() ) aText = ::rtl::OUString::createFromAscii( "your text" ); // todo: moving into a resource AddOpt( DFF_Prop_gtextUNICODE, aText ); // FontWork Font rtl::OUString aFontName; const rtl::OUString sCharFontName ( RTL_CONSTASCII_USTRINGPARAM( "CharFontName" ) ); uno::Any aAny = aXPropSet->getPropertyValue( sCharFontName ); aAny >>= aFontName; if ( !aFontName.getLength() ) aFontName = ::rtl::OUString::createFromAscii( "Arial Black" ); AddOpt( DFF_Prop_gtextFont, aFontName ); sal_Int16 nCharScaleWidth = 100; if ( EscherPropertyValueHelper::GetPropertyValue( aAny, aXPropSet, String( RTL_CONSTASCII_USTRINGPARAM( "CharScaleWidth" ) ), sal_True ) ) { if ( aAny >>= nCharScaleWidth ) { if ( nCharScaleWidth != 100 ) { sal_Int32 nVal = nCharScaleWidth * 655; AddOpt( DFF_Prop_gtextSpacing, nVal ); } } } if ( EscherPropertyValueHelper::GetPropertyValue( aAny, aXPropSet, String( RTL_CONSTASCII_USTRINGPARAM( "CharHeight" ) ), sal_True ) ) { float fCharHeight = 0.0; if ( aAny >>= fCharHeight ) { sal_Int32 nTextSize = static_cast< sal_Int32 > ( fCharHeight * 65536 ); AddOpt(ESCHER_Prop_gtextSize, nTextSize); } } if ( EscherPropertyValueHelper::GetPropertyValue( aAny, aXPropSet, String( RTL_CONSTASCII_USTRINGPARAM( "CharKerning" ) ), sal_True ) ) { sal_Int16 nCharKerning = sal_Int16(); if ( aAny >>= nCharKerning ) { nTextPathFlags |= 0x10000000; if ( nCharKerning ) nTextPathFlags |= 0x1000; else nTextPathFlags &=~0x1000; } } if ( EscherPropertyValueHelper::GetPropertyValue( aAny, aXPropSet, String( RTL_CONSTASCII_USTRINGPARAM( "CharPosture" ) ), sal_True ) ) { awt::FontSlant eFontSlant; if ( aAny >>= eFontSlant ) { nTextPathFlags |= 0x100010; if ( eFontSlant != awt::FontSlant_NONE ) nTextPathFlags |= 0x10; else nTextPathFlags &=~0x10; } } if ( EscherPropertyValueHelper::GetPropertyValue( aAny, aXPropSet, String( RTL_CONSTASCII_USTRINGPARAM( "CharWeight" ) ), sal_True ) ) { float fFontWidth = 0; if ( aAny >>= fFontWidth ) { nTextPathFlags |= 0x200020; if ( fFontWidth > awt::FontWeight::NORMAL ) nTextPathFlags |= 0x20; else nTextPathFlags &=~0x20; } } //export gTextAlign attr if ( EscherPropertyValueHelper::GetPropertyValue( aAny, aXPropSet, String( RTL_CONSTASCII_USTRINGPARAM( "TextHorizontalAdjust" ) ), sal_True ) ) { MSO_GeoTextAlign gTextAlign = mso_alignTextCenter; // SdrFitToSizeType eFTS( ((SdrTextFitToSizeTypeItem&)pCustoShape->GetMergedItem( SDRATTR_TEXT_FITTOSIZE )).GetValue() ); drawing::TextHorizontalAdjust eHA( drawing::TextHorizontalAdjust_LEFT ); aAny >>= eHA; switch( eHA ) { case drawing::TextHorizontalAdjust_LEFT : gTextAlign = mso_alignTextLeft; break; case drawing::TextHorizontalAdjust_CENTER: gTextAlign = mso_alignTextCenter; break; case drawing::TextHorizontalAdjust_RIGHT: gTextAlign = mso_alignTextRight; break; case drawing::TextHorizontalAdjust_BLOCK: { SdrFitToSizeType eFTS( ((SdrTextFitToSizeTypeItem&)pCustoShape->GetMergedItem( SDRATTR_TEXT_FITTOSIZE )).GetValue() ); if ( eFTS == SDRTEXTFIT_ALLLINES) { gTextAlign = mso_alignTextStretch; } else { gTextAlign = mso_alignTextWordJust; } break; } default: break; } AddOpt(DFF_Prop_gtextAlign,gTextAlign); } } if((nTextPathFlags & 0x4000) != 0) //Is Font work { OutlinerParaObject* pOutlinerParaObject = pCustoShape->GetOutlinerParaObject(); if ( pOutlinerParaObject && pOutlinerParaObject->IsVertical() ) nTextPathFlags |= 0x2000; } if ( nTextPathFlags != nTextPathFlagsOrg ) AddOpt( DFF_Prop_gtextFStrikethrough, nTextPathFlags ); } } else if ( rProp.Name.equals( sHandles ) ) { if ( !bIsDefaultObject ) { bPredefinedHandlesUsed = sal_False; if ( rProp.Value >>= aHandlesPropSeq ) { sal_uInt16 nElements = (sal_uInt16)aHandlesPropSeq.getLength(); if ( nElements ) { const rtl::OUString sHandle ( RTL_CONSTASCII_USTRINGPARAM( "Handle" ) ); sal_uInt16 k, j, nElementSize = 36; sal_uInt32 nStreamSize = nElementSize * nElements + 6; SvMemoryStream aOut( nStreamSize ); aOut << nElements << nElements << nElementSize; for ( k = 0; k < nElements; k++ ) { sal_uInt32 nFlags = 0; sal_Int32 nXPosition = 0; sal_Int32 nYPosition = 0; sal_Int32 nXMap = 0; sal_Int32 nYMap = 0; sal_Int32 nXRangeMin = 0x80000000; sal_Int32 nXRangeMax = 0x7fffffff; sal_Int32 nYRangeMin = 0x80000000; sal_Int32 nYRangeMax = 0x7fffffff; const uno::Sequence< beans::PropertyValue >& rPropSeq = aHandlesPropSeq[ k ]; for ( j = 0; j < rPropSeq.getLength(); j++ ) { const beans::PropertyValue& rPropVal = rPropSeq[ j ]; const rtl::OUString sPosition ( RTL_CONSTASCII_USTRINGPARAM( "Position" ) ); const rtl::OUString sMirroredX ( RTL_CONSTASCII_USTRINGPARAM( "MirroredX" ) ); const rtl::OUString sMirroredY ( RTL_CONSTASCII_USTRINGPARAM( "MirroredY" ) ); const rtl::OUString sSwitched ( RTL_CONSTASCII_USTRINGPARAM( "Switched" ) ); const rtl::OUString sPolar ( RTL_CONSTASCII_USTRINGPARAM( "Polar" ) ); // const rtl::OUString sMap ( RTL_CONSTASCII_USTRINGPARAM( "Map" ) ); const rtl::OUString sRadiusRangeMinimum ( RTL_CONSTASCII_USTRINGPARAM( "RadiusRangeMinimum" ) ); const rtl::OUString sRadiusRangeMaximum ( RTL_CONSTASCII_USTRINGPARAM( "RadiusRangeMaximum" ) ); const rtl::OUString sRangeXMinimum ( RTL_CONSTASCII_USTRINGPARAM( "RangeXMinimum" ) ); const rtl::OUString sRangeXMaximum ( RTL_CONSTASCII_USTRINGPARAM( "RangeXMaximum" ) ); const rtl::OUString sRangeYMinimum ( RTL_CONSTASCII_USTRINGPARAM( "RangeYMinimum" ) ); const rtl::OUString sRangeYMaximum ( RTL_CONSTASCII_USTRINGPARAM( "RangeYMaximum" ) ); if ( rPropVal.Name.equals( sPosition ) ) { com::sun::star::drawing::EnhancedCustomShapeParameterPair aPosition; if ( rPropVal.Value >>= aPosition ) { GetValueForEnhancedCustomShapeHandleParameter( nXPosition, aPosition.First ); GetValueForEnhancedCustomShapeHandleParameter( nYPosition, aPosition.Second ); } } else if ( rPropVal.Name.equals( sMirroredX ) ) { sal_Bool bMirroredX = sal_Bool(); if ( rPropVal.Value >>= bMirroredX ) { if ( bMirroredX ) nFlags |= 1; } } else if ( rPropVal.Name.equals( sMirroredY ) ) { sal_Bool bMirroredY = sal_Bool(); if ( rPropVal.Value >>= bMirroredY ) { if ( bMirroredY ) nFlags |= 2; } } else if ( rPropVal.Name.equals( sSwitched ) ) { sal_Bool bSwitched = sal_Bool(); if ( rPropVal.Value >>= bSwitched ) { if ( bSwitched ) nFlags |= 4; } } else if ( rPropVal.Name.equals( sPolar ) ) { com::sun::star::drawing::EnhancedCustomShapeParameterPair aPolar; if ( rPropVal.Value >>= aPolar ) { if ( GetValueForEnhancedCustomShapeHandleParameter( nXMap, aPolar.First ) ) nFlags |= 0x800; if ( GetValueForEnhancedCustomShapeHandleParameter( nYMap, aPolar.Second ) ) nFlags |= 0x1000; nFlags |= 8; } } /* seems not to be used. else if ( rPropVal.Name.equals( sMap ) ) { com::sun::star::drawing::EnhancedCustomShapeParameterPair aMap; if ( rPropVal.Value >>= aMap ) { if ( GetValueForEnhancedCustomShapeHandleParameter( nXMap, aMap.First ) ) nFlags |= 0x800; if ( GetValueForEnhancedCustomShapeHandleParameter( nYMap, aMap.Second ) ) nFlags |= 0x1000; nFlags |= 0x10; } } */ else if ( rPropVal.Name.equals( sRadiusRangeMinimum ) ) { nYRangeMin = (sal_Int32)0xff4c0000; // the range of angles seems to be a not nYRangeMax = (sal_Int32)0x00b40000; // used feature, so we are defaulting this com::sun::star::drawing::EnhancedCustomShapeParameter aRadiusRangeMinimum; if ( rPropVal.Value >>= aRadiusRangeMinimum ) { if ( GetValueForEnhancedCustomShapeHandleParameter( nXRangeMin, aRadiusRangeMinimum ) ) nFlags |= 0x80; nFlags |= 0x2000; } } else if ( rPropVal.Name.equals( sRadiusRangeMaximum ) ) { nYRangeMin = (sal_Int32)0xff4c0000; // the range of angles seems to be a not nYRangeMax = (sal_Int32)0x00b40000; // used feature, so we are defaulting this com::sun::star::drawing::EnhancedCustomShapeParameter aRadiusRangeMaximum; if ( rPropVal.Value >>= aRadiusRangeMaximum ) { if ( GetValueForEnhancedCustomShapeHandleParameter( nXRangeMax, aRadiusRangeMaximum ) ) nFlags |= 0x100; nFlags |= 0x2000; } } else if ( rPropVal.Name.equals( sRangeXMinimum ) ) { com::sun::star::drawing::EnhancedCustomShapeParameter aXRangeMinimum; if ( rPropVal.Value >>= aXRangeMinimum ) { if ( GetValueForEnhancedCustomShapeHandleParameter( nXRangeMin, aXRangeMinimum ) ) nFlags |= 0x80; nFlags |= 0x20; } } else if ( rPropVal.Name.equals( sRangeXMaximum ) ) { com::sun::star::drawing::EnhancedCustomShapeParameter aXRangeMaximum; if ( rPropVal.Value >>= aXRangeMaximum ) { if ( GetValueForEnhancedCustomShapeHandleParameter( nXRangeMax, aXRangeMaximum ) ) nFlags |= 0x100; nFlags |= 0x20; } } else if ( rPropVal.Name.equals( sRangeYMinimum ) ) { com::sun::star::drawing::EnhancedCustomShapeParameter aYRangeMinimum; if ( rPropVal.Value >>= aYRangeMinimum ) { if ( GetValueForEnhancedCustomShapeHandleParameter( nYRangeMin, aYRangeMinimum ) ) nFlags |= 0x200; nFlags |= 0x20; } } else if ( rPropVal.Name.equals( sRangeYMaximum ) ) { com::sun::star::drawing::EnhancedCustomShapeParameter aYRangeMaximum; if ( rPropVal.Value >>= aYRangeMaximum ) { if ( GetValueForEnhancedCustomShapeHandleParameter( nYRangeMax, aYRangeMaximum ) ) nFlags |= 0x400; nFlags |= 0x20; } } } aOut << nFlags << nXPosition << nYPosition << nXMap << nYMap << nXRangeMin << nXRangeMax << nYRangeMin << nYRangeMax; if ( nFlags & 8 ) nAdjustmentsWhichNeedsToBeConverted |= ( 1 << ( nYPosition - 0x100 ) ); } sal_uInt8* pBuf = new sal_uInt8[ nStreamSize ]; memcpy( pBuf, aOut.GetData(), nStreamSize ); AddOpt( DFF_Prop_Handles, sal_True, nStreamSize - 6, pBuf, nStreamSize ); } else { sal_uInt8* pBuf = new sal_uInt8[ 1 ]; AddOpt( DFF_Prop_Handles, sal_True, 0, pBuf, 0 ); } } } } else if ( rProp.Name.equals( sAdjustmentValues ) ) { // it is required, that the information which handle is polar has already be read, // so we are able to change the polar value to a fixed float aAdjustmentValuesProp = rProp.Value; bHasAdjustmentValuesProp = true; } } if ( bHasAdjustmentValuesProp ) { uno::Sequence< com::sun::star::drawing::EnhancedCustomShapeAdjustmentValue > aAdjustmentSeq; if ( aAdjustmentValuesProp >>= aAdjustmentSeq ) { if ( bPredefinedHandlesUsed ) LookForPolarHandles( eShapeType, nAdjustmentsWhichNeedsToBeConverted ); sal_Int32 k, nValue = 0, nAdjustmentValues = aAdjustmentSeq.getLength(); for ( k = 0; k < nAdjustmentValues; k++ ) if( GetAdjustmentValue( aAdjustmentSeq[ k ], k, nAdjustmentsWhichNeedsToBeConverted, nValue ) ) AddOpt( (sal_uInt16)( DFF_Prop_adjustValue + k ), (sal_uInt32)nValue ); } } if( bHasPathCoordinatesProp ) { com::sun::star::uno::Sequence< com::sun::star::drawing::EnhancedCustomShapeParameterPair > aCoordinates; if ( aPathCoordinatesProp >>= aCoordinates ) { // creating the vertices if ( (sal_uInt16)aCoordinates.getLength() ) { sal_uInt16 j, nElements = (sal_uInt16)aCoordinates.getLength(); sal_uInt16 nElementSize = 8; sal_uInt32 nStreamSize = nElementSize * nElements + 6; SvMemoryStream aOut( nStreamSize ); aOut << nElements << nElements << nElementSize; for( j = 0; j < nElements; j++ ) { sal_Int32 X = GetValueForEnhancedCustomShapeParameter( aCoordinates[ j ].First, aEquationOrder, sal_True ); sal_Int32 Y = GetValueForEnhancedCustomShapeParameter( aCoordinates[ j ].Second, aEquationOrder, sal_True ); aOut << X << Y; } sal_uInt8* pBuf = new sal_uInt8[ nStreamSize ]; memcpy( pBuf, aOut.GetData(), nStreamSize ); AddOpt( DFF_Prop_pVertices, sal_True, nStreamSize - 6, pBuf, nStreamSize ); // -6 } else { sal_uInt8* pBuf = new sal_uInt8[ 1 ]; AddOpt( DFF_Prop_pVertices, sal_True, 0, pBuf, 0 ); } } } } } } // --------------------------------------------------------------------------------------------- MSO_SPT EscherPropertyContainer::GetCustomShapeType( const uno::Reference< drawing::XShape > & rXShape, sal_uInt32& nMirrorFlags, rtl::OUString& rShapeType ) { MSO_SPT eShapeType = mso_sptNil; nMirrorFlags = 0; uno::Reference< beans::XPropertySet > aXPropSet( rXShape, uno::UNO_QUERY ); if ( aXPropSet.is() ) { try { const OUString sCustomShapeGeometry( RTL_CONSTASCII_USTRINGPARAM ( "CustomShapeGeometry" ) ); uno::Any aGeoPropSet = aXPropSet->getPropertyValue( sCustomShapeGeometry ); uno::Sequence< beans::PropertyValue > aGeoPropSeq; if ( aGeoPropSet >>= aGeoPropSeq ) { sal_Int32 i, nCount = aGeoPropSeq.getLength(); for ( i = 0; i < nCount; i++ ) { const beans::PropertyValue& rProp = aGeoPropSeq[ i ]; if ( rProp.Name.equalsAscii( "Type" ) ) { if ( rProp.Value >>= rShapeType ) eShapeType = EnhancedCustomShapeTypeNames::Get( rShapeType ); } else if ( rProp.Name.equalsAscii( "MirroredX" ) ) { sal_Bool bMirroredX = sal_Bool(); if ( ( rProp.Value >>= bMirroredX ) && bMirroredX ) nMirrorFlags |= SHAPEFLAG_FLIPH; } else if ( rProp.Name.equalsAscii( "MirroredY" ) ) { sal_Bool bMirroredY = sal_Bool(); if ( ( rProp.Value >>= bMirroredY ) && bMirroredY ) nMirrorFlags |= SHAPEFLAG_FLIPV; } } } } catch( ::com::sun::star::uno::Exception& ) { } } return eShapeType; } MSO_SPT EscherPropertyContainer::GetCustomShapeType( const uno::Reference< drawing::XShape > & rXShape, sal_uInt32& nMirrorFlags ) { rtl::OUString aShapeType; return GetCustomShapeType( rXShape, nMirrorFlags, aShapeType ); } // --------------------------------------------------------------------------------------------- //Implement for form control export sal_Bool EscherPropertyContainer::CreateBlipPropertiesforOLEControl(const ::com::sun::star::uno::Reference< ::com::sun::star::beans::XPropertySet > & rXPropSet, const ::com::sun::star::uno::Reference< ::com::sun::star::drawing::XShape > & rXShape) { SdrObject* pShape = GetSdrObjectFromXShape( rXShape ); if ( pShape ) { SdrModel* pMod = pShape->GetModel(); Graphic aGraphic(SdrExchangeView::GetObjGraphic( pMod, pShape)); GraphicObject aGraphicObject = aGraphic; ByteString aUniqueId = aGraphicObject.GetUniqueID(); if ( aUniqueId.Len() ) { if ( pGraphicProvider && pPicOutStrm && pShapeBoundRect ) { Rectangle aRect( Point( 0, 0 ), pShapeBoundRect->GetSize() ); sal_uInt32 nBlibId = pGraphicProvider->GetBlibID( *pPicOutStrm, aUniqueId, aRect, NULL ); if ( nBlibId ) { AddOpt( ESCHER_Prop_pib, nBlibId, sal_True ); ImplCreateGraphicAttributes( rXPropSet, nBlibId, sal_False ); return sal_True; } } } } return sal_False; } EscherPersistTable::EscherPersistTable() { } // --------------------------------------------------------------------------------------------- EscherPersistTable::~EscherPersistTable() { for ( void* pPtr = maPersistTable.First(); pPtr; pPtr = maPersistTable.Next() ) delete (EscherPersistEntry*)pPtr; } // --------------------------------------------------------------------------------------------- sal_Bool EscherPersistTable::PtIsID( sal_uInt32 nID ) { for ( void* pPtr = maPersistTable.First(); pPtr; pPtr = maPersistTable.Next() ) { if ( ((EscherPersistEntry*)pPtr)->mnID == nID ) return sal_True; } return sal_False; } // --------------------------------------------------------------------------------------------- void EscherPersistTable::PtInsert( sal_uInt32 nID, sal_uInt32 nOfs ) { maPersistTable.Insert( new EscherPersistEntry( nID, nOfs ) ); } // --------------------------------------------------------------------------------------------- sal_uInt32 EscherPersistTable::PtDelete( sal_uInt32 nID ) { for ( void* pPtr = maPersistTable.First(); pPtr; pPtr = maPersistTable.Next() ) { if ( ((EscherPersistEntry*)pPtr)->mnID == nID ) { // sal_uInt32 nRetValue = ((EscherPersistEntry*)pPtr)->mnOffset; delete (EscherPersistEntry*) maPersistTable.Remove(); } } return 0; } // --------------------------------------------------------------------------------------------- sal_uInt32 EscherPersistTable::PtGetOffsetByID( sal_uInt32 nID ) { for ( void* pPtr = maPersistTable.First(); pPtr; pPtr = maPersistTable.Next() ) { if ( ((EscherPersistEntry*)pPtr)->mnID == nID ) return ((EscherPersistEntry*)pPtr)->mnOffset; } return 0; }; // --------------------------------------------------------------------------------------------- sal_uInt32 EscherPersistTable::PtReplace( sal_uInt32 nID, sal_uInt32 nOfs ) { for ( void* pPtr = maPersistTable.First(); pPtr; pPtr = maPersistTable.Next() ) { if ( ((EscherPersistEntry*)pPtr)->mnID == nID ) { sal_uInt32 nRetValue = ((EscherPersistEntry*)pPtr)->mnOffset; ((EscherPersistEntry*)pPtr)->mnOffset = nOfs; return nRetValue; } } return 0; } // --------------------------------------------------------------------------------------------- sal_uInt32 EscherPersistTable::PtReplaceOrInsert( sal_uInt32 nID, sal_uInt32 nOfs ) { for ( void* pPtr = maPersistTable.First(); pPtr; pPtr = maPersistTable.Next() ) { if ( ((EscherPersistEntry*)pPtr)->mnID == nID ) { sal_uInt32 nRetValue = ((EscherPersistEntry*)pPtr)->mnOffset; ((EscherPersistEntry*)pPtr)->mnOffset = nOfs; return nRetValue; } } PtInsert( nID, nOfs ); return 0; } sal_Bool EscherPropertyValueHelper::GetPropertyValue( ::com::sun::star::uno::Any& rAny, const ::com::sun::star::uno::Reference< ::com::sun::star::beans::XPropertySet > & rXPropSet, const String& rString, sal_Bool bTestPropertyAvailability ) { sal_Bool bRetValue = sal_True; if ( bTestPropertyAvailability ) { bRetValue = sal_False; try { ::com::sun::star::uno::Reference< ::com::sun::star::beans::XPropertySetInfo > aXPropSetInfo( rXPropSet->getPropertySetInfo() ); if ( aXPropSetInfo.is() ) bRetValue = aXPropSetInfo->hasPropertyByName( rString ); } catch( ::com::sun::star::uno::Exception& ) { bRetValue = sal_False; } } if ( bRetValue ) { try { rAny = rXPropSet->getPropertyValue( rString ); if ( !rAny.hasValue() ) bRetValue = sal_False; } catch( ::com::sun::star::uno::Exception& ) { bRetValue = sal_False; } } return bRetValue; } // --------------------------------------------------------------------------------------------- ::com::sun::star::beans::PropertyState EscherPropertyValueHelper::GetPropertyState( const ::com::sun::star::uno::Reference< ::com::sun::star::beans::XPropertySet > & rXPropSet, const String& rPropertyName ) { ::com::sun::star::beans::PropertyState eRetValue = ::com::sun::star::beans::PropertyState_AMBIGUOUS_VALUE; try { ::com::sun::star::uno::Reference< ::com::sun::star::beans::XPropertyState > aXPropState ( rXPropSet, ::com::sun::star::uno::UNO_QUERY ); if ( aXPropState.is() ) eRetValue = aXPropState->getPropertyState( rPropertyName ); } catch( ::com::sun::star::uno::Exception& ) { //... } return eRetValue; } // --------------------------------------------------------------------------------------------- // --------------------------------------------------------------------------------------------- // --------------------------------------------------------------------------------------------- EscherBlibEntry::EscherBlibEntry( sal_uInt32 nPictureOffset, const GraphicObject& rObject, const ByteString& rId, const GraphicAttr* pGraphicAttr ) : mnPictureOffset ( nPictureOffset ), mnRefCount ( 1 ), mnSizeExtra ( 0 ), maPrefSize ( rObject.GetPrefSize() ), maPrefMapMode ( rObject.GetPrefMapMode() ), mbIsEmpty ( sal_True ) { mbIsNativeGraphicPossible = ( pGraphicAttr == NULL ); meBlibType = UNKNOWN; mnSize = 0; sal_uInt32 nLen = rId.Len(); const sal_Char* pData = rId.GetBuffer(); GraphicType eType( rObject.GetType() ); if ( nLen && pData && ( eType != GRAPHIC_NONE ) ) { mnIdentifier[ 0 ] = rtl_crc32( 0,pData, nLen ); mnIdentifier[ 1 ] = 0; if ( pGraphicAttr ) { if ( pGraphicAttr->IsSpecialDrawMode() || pGraphicAttr->IsMirrored() || pGraphicAttr->IsCropped() || pGraphicAttr->IsRotated() || pGraphicAttr->IsTransparent() || pGraphicAttr->IsAdjusted() ) { SvMemoryStream aSt( sizeof( GraphicAttr ) ); aSt << static_cast<sal_uInt16>(pGraphicAttr->GetDrawMode()) << static_cast<sal_uInt32>(pGraphicAttr->GetMirrorFlags()) << pGraphicAttr->GetLeftCrop() << pGraphicAttr->GetTopCrop() << pGraphicAttr->GetRightCrop() << pGraphicAttr->GetBottomCrop() << pGraphicAttr->GetRotation() << pGraphicAttr->GetLuminance() << pGraphicAttr->GetContrast() << pGraphicAttr->GetChannelR() << pGraphicAttr->GetChannelG() << pGraphicAttr->GetChannelB() << pGraphicAttr->GetGamma() << (sal_Bool)( pGraphicAttr->IsInvert() == sal_True ) << pGraphicAttr->GetTransparency(); mnIdentifier[ 1 ] = rtl_crc32( 0, aSt.GetData(), aSt.Tell() ); } else mbIsNativeGraphicPossible = sal_True; } sal_uInt32 i, nTmp, n1, n2; n1 = n2 = 0; for ( i = 0; i < nLen; i++ ) { nTmp = n2 >> 28; // rotating 4 bit n2 <<= 4; n2 |= n1 >> 28; n1 <<= 4; n1 |= nTmp; n1 ^= *pData++ - '0'; } mnIdentifier[ 2 ] = n1; mnIdentifier[ 3 ] = n2; mbIsEmpty = sal_False; } }; // --------------------------------------------------------------------------------------------- void EscherBlibEntry::WriteBlibEntry( SvStream& rSt, sal_Bool bWritePictureOffset, sal_uInt32 nResize ) { sal_uInt32 nPictureOffset = ( bWritePictureOffset ) ? mnPictureOffset : 0; rSt << (sal_uInt32)( ( ESCHER_BSE << 16 ) | ( ( (sal_uInt16)meBlibType << 4 ) | 2 ) ) << (sal_uInt32)( 36 + nResize ) << (sal_uInt8)meBlibType; switch ( meBlibType ) { case EMF : case WMF : // EMF/WMF auf OS2 zu Pict Konvertieren rSt << (sal_uInt8)PICT; break; default: rSt << (sal_uInt8)meBlibType; }; rSt.Write( &mnIdentifier[ 0 ], 16 ); rSt << (sal_uInt16)0 << (sal_uInt32)( mnSize + mnSizeExtra ) << mnRefCount << nPictureOffset << (sal_uInt32)0; } // --------------------------------------------------------------------------------------------- EscherBlibEntry::~EscherBlibEntry() { }; // --------------------------------------------------------------------------------------------- sal_Bool EscherBlibEntry::operator==( const EscherBlibEntry& rEscherBlibEntry ) const { for ( int i = 0; i < 3; i++ ) { if ( mnIdentifier[ i ] != rEscherBlibEntry.mnIdentifier[ i ] ) return sal_False; } return sal_True; } // --------------------------------------------------------------------------------------------- // --------------------------------------------------------------------------------------------- // --------------------------------------------------------------------------------------------- EscherGraphicProvider::EscherGraphicProvider( sal_uInt32 nFlags ) : mnFlags ( nFlags ), mpBlibEntrys ( NULL ), mnBlibBufSize ( 0 ), mnBlibEntrys ( 0 ) { } EscherGraphicProvider::~EscherGraphicProvider() { for ( sal_uInt32 i = 0; i < mnBlibEntrys; delete mpBlibEntrys[ i++ ] ) ; delete[] mpBlibEntrys; } void EscherGraphicProvider::SetNewBlipStreamOffset( sal_Int32 nOffset ) { for( sal_uInt32 i = 0; i < mnBlibEntrys; i++ ) { EscherBlibEntry* pBlibEntry = mpBlibEntrys[ i ]; pBlibEntry->mnPictureOffset += nOffset; } } sal_uInt32 EscherGraphicProvider::ImplInsertBlib( EscherBlibEntry* p_EscherBlibEntry ) { if ( mnBlibBufSize == mnBlibEntrys ) { mnBlibBufSize += 64; EscherBlibEntry** pTemp = new EscherBlibEntry*[ mnBlibBufSize ]; for ( sal_uInt32 i = 0; i < mnBlibEntrys; i++ ) { pTemp[ i ] = mpBlibEntrys[ i ]; } delete[] mpBlibEntrys; mpBlibEntrys = pTemp; } mpBlibEntrys[ mnBlibEntrys++ ] = p_EscherBlibEntry; return mnBlibEntrys; } sal_uInt32 EscherGraphicProvider::GetBlibStoreContainerSize( SvStream* pMergePicStreamBSE ) const { sal_uInt32 nSize = 44 * mnBlibEntrys + 8; if ( pMergePicStreamBSE ) { for ( sal_uInt32 i = 0; i < mnBlibEntrys; i++ ) nSize += mpBlibEntrys[ i ]->mnSize + mpBlibEntrys[ i ]->mnSizeExtra; } return nSize; } sal_Bool EscherGraphicProvider::WriteBlibStoreEntry(SvStream& rSt, sal_uInt32 nBlipId, sal_Bool bWritePictureOffSet, sal_uInt32 nResize) { if (nBlipId > mnBlibEntrys || nBlipId == 0) return sal_False; mpBlibEntrys[nBlipId-1]->WriteBlibEntry(rSt, bWritePictureOffSet, nResize); return sal_True; } void EscherGraphicProvider::WriteBlibStoreContainer( SvStream& rSt, SvStream* pMergePicStreamBSE ) { sal_uInt32 nSize = GetBlibStoreContainerSize( pMergePicStreamBSE ); if ( nSize ) { rSt << (sal_uInt32)( ( ESCHER_BstoreContainer << 16 ) | 0x1f ) << (sal_uInt32)( nSize - 8 ); if ( pMergePicStreamBSE ) { sal_uInt32 i, nBlipSize, nOldPos = pMergePicStreamBSE->Tell(); const sal_uInt32 nBuf = 0x40000; // 256KB buffer sal_uInt8* pBuf = new sal_uInt8[ nBuf ]; for ( i = 0; i < mnBlibEntrys; i++ ) { EscherBlibEntry* pBlibEntry = mpBlibEntrys[ i ]; ESCHER_BlibType nBlibType = pBlibEntry->meBlibType; nBlipSize = pBlibEntry->mnSize + pBlibEntry->mnSizeExtra; pBlibEntry->WriteBlibEntry( rSt, sal_False, nBlipSize ); // BLIP pMergePicStreamBSE->Seek( pBlibEntry->mnPictureOffset ); sal_uInt16 n16; // record version and instance *pMergePicStreamBSE >> n16; rSt << n16; // record type *pMergePicStreamBSE >> n16; rSt << sal_uInt16( ESCHER_BlipFirst + nBlibType ); DBG_ASSERT( n16 == ESCHER_BlipFirst + nBlibType , "EscherGraphicProvider::WriteBlibStoreContainer: BLIP record types differ" ); sal_uInt32 n32; // record size *pMergePicStreamBSE >> n32; nBlipSize -= 8; rSt << nBlipSize; DBG_ASSERT( nBlipSize == n32, "EscherGraphicProvider::WriteBlibStoreContainer: BLIP sizes differ" ); // record while ( nBlipSize ) { sal_uInt32 nBytes = ( nBlipSize > nBuf ? nBuf : nBlipSize ); pMergePicStreamBSE->Read( pBuf, nBytes ); rSt.Write( pBuf, nBytes ); nBlipSize -= nBytes; } } delete[] pBuf; pMergePicStreamBSE->Seek( nOldPos ); } else { for ( sal_uInt32 i = 0; i < mnBlibEntrys; i++ ) mpBlibEntrys[ i ]->WriteBlibEntry( rSt, sal_True ); } } } sal_Bool EscherGraphicProvider::GetPrefSize( const sal_uInt32 nBlibId, Size& rPrefSize, MapMode& rPrefMapMode ) { sal_Bool bInRange = nBlibId && ( ( nBlibId - 1 ) < mnBlibEntrys ); if ( bInRange ) { EscherBlibEntry* pEntry = mpBlibEntrys[ nBlibId - 1 ]; rPrefSize = pEntry->maPrefSize; rPrefMapMode = pEntry->maPrefMapMode; } return bInRange; } sal_uInt32 EscherGraphicProvider::GetBlibID( SvStream& rPicOutStrm, const ByteString& rId, const Rectangle& /* rBoundRect */, const com::sun::star::awt::Rectangle* pVisArea, const GraphicAttr* pGraphicAttr ) { sal_uInt32 nBlibId = 0; GraphicObject aGraphicObject( rId ); EscherBlibEntry* p_EscherBlibEntry = new EscherBlibEntry( rPicOutStrm.Tell(), aGraphicObject, rId, pGraphicAttr ); if ( !p_EscherBlibEntry->IsEmpty() ) { for ( sal_uInt32 i = 0; i < mnBlibEntrys; i++ ) { if ( *( mpBlibEntrys[ i ] ) == *p_EscherBlibEntry ) { mpBlibEntrys[ i ]->mnRefCount++; delete p_EscherBlibEntry; return i + 1; } } sal_Bool bUseNativeGraphic( sal_False ); Graphic aGraphic( aGraphicObject.GetTransformedGraphic( pGraphicAttr ) ); GfxLink aGraphicLink; SvMemoryStream aStream; const sal_uInt8* pGraphicAry = NULL; if ( p_EscherBlibEntry->mbIsNativeGraphicPossible && aGraphic.IsLink() ) { aGraphicLink = aGraphic.GetLink(); p_EscherBlibEntry->mnSize = aGraphicLink.GetDataSize(); pGraphicAry = aGraphicLink.GetData(); if ( p_EscherBlibEntry->mnSize && pGraphicAry ) { switch ( aGraphicLink.GetType() ) { case GFX_LINK_TYPE_NATIVE_JPG : p_EscherBlibEntry->meBlibType = PEG; break; case GFX_LINK_TYPE_NATIVE_PNG : p_EscherBlibEntry->meBlibType = PNG; break; // #15508# added BMP type for better exports; need to check this // checked - does not work that way, so keep out for now. It may // work somehow with direct DIB data, but that would need to be checked // carefully // for more comments please check RtfAttributeOutput::FlyFrameGraphic // // case GFX_LINK_TYPE_NATIVE_BMP : p_EscherBlibEntry->meBlibType = DIB; break; case GFX_LINK_TYPE_NATIVE_WMF : { if ( pGraphicAry && ( p_EscherBlibEntry->mnSize > 0x2c ) ) { if ( ( pGraphicAry[ 0x28 ] == 0x20 ) && ( pGraphicAry[ 0x29 ] == 0x45 ) // check the magic && ( pGraphicAry[ 0x2a ] == 0x4d ) && ( pGraphicAry[ 0x2b ] == 0x46 ) ) // number ( emf detection ) { p_EscherBlibEntry->meBlibType = EMF; } else { p_EscherBlibEntry->meBlibType = WMF; if ( ( pGraphicAry[ 0 ] == 0xd7 ) && ( pGraphicAry[ 1 ] == 0xcd ) && ( pGraphicAry[ 2 ] == 0xc6 ) && ( pGraphicAry[ 3 ] == 0x9a ) ) { // we have to get rid of the metafileheader pGraphicAry += 22; p_EscherBlibEntry->mnSize -= 22; } } } } break; default: break; } if ( p_EscherBlibEntry->meBlibType != UNKNOWN ) bUseNativeGraphic = sal_True; } } if ( !bUseNativeGraphic ) { GraphicType eGraphicType = aGraphic.GetType(); if ( ( eGraphicType == GRAPHIC_BITMAP ) || ( eGraphicType == GRAPHIC_GDIMETAFILE ) ) { sal_uInt32 nErrCode; if ( !aGraphic.IsAnimated() ) // !EMF nErrCode = GraphicConverter::Export( aStream, aGraphic, ( eGraphicType == GRAPHIC_BITMAP ) ? CVT_PNG : CVT_WMF ); nErrCode = GraphicConverter::Export( aStream, aGraphic, ( eGraphicType == GRAPHIC_BITMAP ) ? CVT_PNG : CVT_EMF ); else { // to store a animation, a gif has to be included into the msOG chunk of a png #I5583# GraphicFilter* pFilter = GraphicFilter::GetGraphicFilter(); SvMemoryStream aGIFStream; ByteString aVersion( "MSOFFICE9.0" ); aGIFStream.Write( aVersion.GetBuffer(), aVersion.Len() ); nErrCode = pFilter->ExportGraphic( aGraphic, String(), aGIFStream, pFilter->GetExportFormatNumberForShortName( String( RTL_CONSTASCII_USTRINGPARAM( "GIF" ) ) ), NULL ); com::sun::star::uno::Sequence< com::sun::star::beans::PropertyValue > aFilterData( 1 ); com::sun::star::uno::Sequence< com::sun::star::beans::PropertyValue > aAdditionalChunkSequence( 1 ); sal_uInt32 nGIFSreamLen = aGIFStream.Tell(); com::sun::star::uno::Sequence< sal_Int8 > aGIFSeq( nGIFSreamLen ); sal_Int8* pSeq = aGIFSeq.getArray(); aGIFStream.Seek( STREAM_SEEK_TO_BEGIN ); aGIFStream.Read( pSeq, nGIFSreamLen ); com::sun::star::beans::PropertyValue aChunkProp, aFilterProp; aChunkProp.Name = String( RTL_CONSTASCII_USTRINGPARAM( "msOG" ) ); aChunkProp.Value <<= aGIFSeq; aAdditionalChunkSequence[ 0 ] = aChunkProp; aFilterProp.Name = String( RTL_CONSTASCII_USTRINGPARAM( "AdditionalChunks" ) ); aFilterProp.Value <<= aAdditionalChunkSequence; aFilterData[ 0 ] = aFilterProp; nErrCode = pFilter->ExportGraphic( aGraphic, String(), aStream, pFilter->GetExportFormatNumberForShortName( String( RTL_CONSTASCII_USTRINGPARAM( "PNG" ) ) ), &aFilterData ); } if ( nErrCode == ERRCODE_NONE ) { // !EMF p_EscherBlibEntry->meBlibType = ( eGraphicType == GRAPHIC_BITMAP ) ? PNG : WMF; p_EscherBlibEntry->meBlibType = ( eGraphicType == GRAPHIC_BITMAP ) ? PNG : EMF; aStream.Seek( STREAM_SEEK_TO_END ); p_EscherBlibEntry->mnSize = aStream.Tell(); pGraphicAry = (sal_uInt8*)aStream.GetData(); if ( p_EscherBlibEntry->meBlibType == WMF ) // the fileheader is not used { p_EscherBlibEntry->mnSize -= 22; pGraphicAry += 22; } } } } ESCHER_BlibType eBlibType = p_EscherBlibEntry->meBlibType; if ( p_EscherBlibEntry->mnSize && pGraphicAry && ( eBlibType != UNKNOWN ) ) { sal_uInt32 nExtra, nAtomSize = 0; sal_uInt32 nInstance, nUncompressedSize = p_EscherBlibEntry->mnSize; if ( mnFlags & _E_GRAPH_PROV_USE_INSTANCES ) { rPicOutStrm << (sal_uInt32)( 0x7f90000 | (sal_uInt16)( mnBlibEntrys << 4 ) ) << (sal_uInt32)0; nAtomSize = rPicOutStrm.Tell(); if ( eBlibType == PNG ) rPicOutStrm << (sal_uInt16)0x0606; else if ( eBlibType == WMF ) rPicOutStrm << (sal_uInt16)0x0403; else if ( eBlibType == EMF ) rPicOutStrm << (sal_uInt16)0x0402; else if ( eBlibType == PEG ) rPicOutStrm << (sal_uInt16)0x0505; } if ( ( eBlibType == PEG ) || ( eBlibType == PNG ) ) // || ( eBlibType == DIB )) // #15508# { nExtra = 17; p_EscherBlibEntry->mnSizeExtra = nExtra + 8; // #15508# type see SvxMSDffManager::GetBLIPDirect (checked, does not work this way) // see RtfAttributeOutput::FlyFrameGraphic for more comments // maybe it would work with direct DIB data, but that would need thorough testing if( eBlibType == PNG ) { nInstance = 0xf01e6e00; } else // if( eBlibType == PEG ) { nInstance = 0xf01d46a0; } //else // eBlibType == DIB //{ // nInstance = 0xf01d7A80; //} // #15508# //nInstance = ( eBlibType == PNG ) ? 0xf01e6e00 : 0xf01d46a0; rPicOutStrm << nInstance << (sal_uInt32)( p_EscherBlibEntry->mnSize + nExtra ); rPicOutStrm.Write( p_EscherBlibEntry->mnIdentifier, 16 ); rPicOutStrm << (sal_uInt8)0xff; rPicOutStrm.Write( pGraphicAry, p_EscherBlibEntry->mnSize ); } else { ZCodec aZCodec( 0x8000, 0x8000 ); aZCodec.BeginCompression(); SvMemoryStream aDestStrm; aZCodec.Write( aDestStrm, pGraphicAry, p_EscherBlibEntry->mnSize ); aZCodec.EndCompression(); aDestStrm.Seek( STREAM_SEEK_TO_END ); p_EscherBlibEntry->mnSize = aDestStrm.Tell(); pGraphicAry = (sal_uInt8*)aDestStrm.GetData(); if ( p_EscherBlibEntry->mnSize && pGraphicAry ) { nExtra = eBlibType == WMF ? 0x42 : 0x32; // !EMF -> no change p_EscherBlibEntry->mnSizeExtra = nExtra + 8; nInstance = ( eBlibType == WMF ) ? 0xf01b2170 : 0xf01a3d40; // !EMF -> no change rPicOutStrm << nInstance << (sal_uInt32)( p_EscherBlibEntry->mnSize + nExtra ); if ( eBlibType == WMF ) // !EMF -> no change rPicOutStrm.Write( p_EscherBlibEntry->mnIdentifier, 16 ); rPicOutStrm.Write( p_EscherBlibEntry->mnIdentifier, 16 ); /* ##913## For Word the stored size of the graphic is critical the metafile boundaries must match the actual graphics boundaries, and the width and height must be in EMU's If you don't do it this way then objects edited in the msoffice app may show strange behaviour as the size jumps around, and the original size and scaling factor in word will be a very strange figure */ sal_uInt32 nPrefWidth = p_EscherBlibEntry->maPrefSize.Width(); sal_uInt32 nPrefHeight = p_EscherBlibEntry->maPrefSize.Height(); sal_uInt32 nWidth, nHeight; if ( pVisArea ) { nWidth = pVisArea->Width * 360; nHeight = pVisArea->Height * 360; } else { Size aPrefSize(lcl_SizeToEmu(p_EscherBlibEntry->maPrefSize, p_EscherBlibEntry->maPrefMapMode)); nWidth = aPrefSize.Width() * 360; nHeight = aPrefSize.Height() * 360; } rPicOutStrm << nUncompressedSize // WMFSize without FileHeader << (sal_Int32)0 // da die Originalgroesse des WMF's (ohne FileHeader) << (sal_Int32)0 // nicht mehr feststellbar ist, schreiben wir 10cm / x << nPrefWidth << nPrefHeight << nWidth << nHeight << p_EscherBlibEntry->mnSize << (sal_uInt16)0xfe00; // compression Flags rPicOutStrm.Write( pGraphicAry, p_EscherBlibEntry->mnSize ); } } if ( nAtomSize ) { sal_uInt32 nPos = rPicOutStrm.Tell(); rPicOutStrm.Seek( nAtomSize - 4 ); rPicOutStrm << (sal_uInt32)( nPos - nAtomSize ); rPicOutStrm.Seek( nPos ); } nBlibId = ImplInsertBlib( p_EscherBlibEntry ), p_EscherBlibEntry = NULL; } } if ( p_EscherBlibEntry ) delete p_EscherBlibEntry; return nBlibId; } // --------------------------------------------------------------------------------------------- // --------------------------------------------------------------------------------------------- // --------------------------------------------------------------------------------------------- struct EscherConnectorRule { sal_uInt32 nRuleId; sal_uInt32 nShapeA; // SPID of shape A sal_uInt32 nShapeB; // SPID of shape B sal_uInt32 nShapeC; // SPID of connector shape sal_uInt32 ncptiA; // Connection site Index of shape A sal_uInt32 ncptiB; // Connection site Index of shape B }; struct EscherShapeListEntry { ::com::sun::star::uno::Reference< ::com::sun::star::drawing::XShape > aXShape; sal_uInt32 n_EscherId; EscherShapeListEntry( const ::com::sun::star::uno::Reference < ::com::sun::star::drawing::XShape > & rShape, sal_uInt32 nId ) : aXShape ( rShape ), n_EscherId ( nId ) {} }; sal_uInt32 EscherConnectorListEntry::GetClosestPoint( const Polygon& rPoly, const ::com::sun::star::awt::Point& rPoint ) { sal_uInt16 nCount = rPoly.GetSize(); sal_uInt16 nClosest = nCount; double fDist = (sal_uInt32)0xffffffff; while( nCount-- ) { double fDistance = hypot( rPoint.X - rPoly[ nCount ].X(), rPoint.Y - rPoly[ nCount ].Y() ); if ( fDistance < fDist ) { nClosest = nCount; fDist = fDistance; } } return nClosest; }; // --------------------------------------------------------------------------------------------- // bei Rechtecken bei Ellipsen bei Polygonen // // nRule = 0 ->Top 0 ->Top nRule = Index auf ein (Poly)Polygon Punkt // 1 ->Left 2 ->Left // 2 ->Bottom 4 ->Bottom // 3 ->Right 6 ->Right sal_uInt32 EscherConnectorListEntry::GetConnectorRule( sal_Bool bFirst ) { sal_uInt32 nRule = 0; ::com::sun::star::uno::Any aAny; ::com::sun::star::awt::Point aRefPoint( ( bFirst ) ? maPointA : maPointB ); ::com::sun::star::uno::Reference< ::com::sun::star::drawing::XShape > aXShape( ( bFirst ) ? mXConnectToA : mXConnectToB ); String aString( (::rtl::OUString)aXShape->getShapeType() ); ByteString aType( aString, RTL_TEXTENCODING_UTF8 ); aType.Erase( 0, 13 ); // removing "com.sun.star." sal_uInt16 nPos = aType.Search( "Shape" ); aType.Erase( nPos, 5 ); ::com::sun::star::uno::Reference< ::com::sun::star::beans::XPropertySet > aPropertySet( aXShape, ::com::sun::star::uno::UNO_QUERY ); if ( aType == "drawing.PolyPolygon" || aType == "drawing.PolyLine" ) { if ( aPropertySet.is() ) { if ( EscherPropertyValueHelper::GetPropertyValue( aAny, aPropertySet, String( RTL_CONSTASCII_USTRINGPARAM( "PolyPolygon" ) ) ) ) { ::com::sun::star::drawing::PointSequenceSequence* pSourcePolyPolygon = (::com::sun::star::drawing::PointSequenceSequence*)aAny.getValue(); sal_Int32 nOuterSequenceCount = pSourcePolyPolygon->getLength(); ::com::sun::star::drawing::PointSequence* pOuterSequence = pSourcePolyPolygon->getArray(); if ( pOuterSequence ) { sal_Int32 a, b, nIndex = 0; sal_uInt32 nDistance = 0xffffffff; for( a = 0; a < nOuterSequenceCount; a++ ) { ::com::sun::star::drawing::PointSequence* pInnerSequence = pOuterSequence++; if ( pInnerSequence ) { ::com::sun::star::awt::Point* pArray = pInnerSequence->getArray(); if ( pArray ) { for ( b = 0; b < pInnerSequence->getLength(); b++, nIndex++, pArray++ ) { sal_uInt32 nDist = (sal_uInt32)hypot( aRefPoint.X - pArray->X, aRefPoint.Y - pArray->Y ); if ( nDist < nDistance ) { nRule = nIndex; nDistance = nDist; } } } } } } } } } else if ( ( aType == "drawing.OpenBezier" ) || ( aType == "drawing.OpenFreeHand" ) || ( aType == "drawing.PolyLinePath" ) || ( aType == "drawing.ClosedBezier" ) || ( aType == "drawing.ClosedFreeHand" ) || ( aType == "drawing.PolyPolygonPath" ) ) { ::com::sun::star::uno::Reference< ::com::sun::star::beans::XPropertySet > aPropertySet2( aXShape, ::com::sun::star::uno::UNO_QUERY ); if ( aPropertySet2.is() ) { if ( EscherPropertyValueHelper::GetPropertyValue( aAny, aPropertySet2, String( RTL_CONSTASCII_USTRINGPARAM( "PolyPolygonBezier" ) ) ) ) { ::com::sun::star::drawing::PolyPolygonBezierCoords* pSourcePolyPolygon = (::com::sun::star::drawing::PolyPolygonBezierCoords*)aAny.getValue(); sal_Int32 nOuterSequenceCount = pSourcePolyPolygon->Coordinates.getLength(); // Zeiger auf innere sequences holen ::com::sun::star::drawing::PointSequence* pOuterSequence = pSourcePolyPolygon->Coordinates.getArray(); ::com::sun::star::drawing::FlagSequence* pOuterFlags = pSourcePolyPolygon->Flags.getArray(); if ( pOuterSequence && pOuterFlags ) { sal_Int32 a, b, nIndex = 0; sal_uInt32 nDistance = 0xffffffff; for ( a = 0; a < nOuterSequenceCount; a++ ) { ::com::sun::star::drawing::PointSequence* pInnerSequence = pOuterSequence++; ::com::sun::star::drawing::FlagSequence* pInnerFlags = pOuterFlags++; if ( pInnerSequence && pInnerFlags ) { ::com::sun::star::awt::Point* pArray = pInnerSequence->getArray(); ::com::sun::star::drawing::PolygonFlags* pFlags = pInnerFlags->getArray(); if ( pArray && pFlags ) { for ( b = 0; b < pInnerSequence->getLength(); b++, pArray++ ) { PolyFlags ePolyFlags = *( (PolyFlags*)pFlags++ ); if ( ePolyFlags == POLY_CONTROL ) continue; sal_uInt32 nDist = (sal_uInt32)hypot( aRefPoint.X - pArray->X, aRefPoint.Y - pArray->Y ); if ( nDist < nDistance ) { nRule = nIndex; nDistance = nDist; } nIndex++; } } } } } } } } else { bool bRectangularConnection = true; if ( aType == "drawing.Custom" ) { SdrObject* pCustoShape( GetSdrObjectFromXShape( aXShape ) ); if ( pCustoShape && pCustoShape->ISA( SdrObjCustomShape ) ) { SdrCustomShapeGeometryItem& rGeometryItem = (SdrCustomShapeGeometryItem&)(const SdrCustomShapeGeometryItem&) pCustoShape->GetMergedItem( SDRATTR_CUSTOMSHAPE_GEOMETRY ); const rtl::OUString sPath( RTL_CONSTASCII_USTRINGPARAM( "Path" ) ); const rtl::OUString sType( RTL_CONSTASCII_USTRINGPARAM ( "Type" ) ); const rtl::OUString sGluePointType( RTL_CONSTASCII_USTRINGPARAM( "GluePointType" ) ); rtl::OUString sShapeType; uno::Any* pType = rGeometryItem.GetPropertyValueByName( sType ); if ( pType ) *pType >>= sShapeType; MSO_SPT eSpType = EnhancedCustomShapeTypeNames::Get( sShapeType ); uno::Any* pGluePointType = ((SdrCustomShapeGeometryItem&)rGeometryItem).GetPropertyValueByName( sPath, sGluePointType ); sal_Int16 nGluePointType = sal_Int16(); if ( !( pGluePointType && ( *pGluePointType >>= nGluePointType ) ) ) nGluePointType = GetCustomShapeConnectionTypeDefault( eSpType ); if ( nGluePointType == com::sun::star::drawing::EnhancedCustomShapeGluePointType::CUSTOM ) { const SdrGluePointList* pList = pCustoShape->GetGluePointList(); if ( pList ) { Polygon aPoly; sal_uInt16 nNum, nAnz = pList->GetCount(); if ( nAnz ) { for ( nNum = 0; nNum < nAnz; nNum++ ) { const SdrGluePoint& rGP = (*pList)[ nNum ]; Point aPt( rGP.GetAbsolutePos( *pCustoShape ) ); aPoly.Insert( POLY_APPEND, aPt ); } nRule = GetClosestPoint( aPoly, aRefPoint ); bRectangularConnection = false; } } } else if ( nGluePointType == com::sun::star::drawing::EnhancedCustomShapeGluePointType::SEGMENTS ) { SdrObject* pPoly = pCustoShape->DoConvertToPolyObj( sal_True, true ); if ( pPoly && pPoly->ISA( SdrPathObj ) ) { sal_Int16 a, b, nIndex = 0; sal_uInt32 nDistance = 0xffffffff; // #i74631# use explicit constructor here. Also XPolyPolygon is not necessary, // reducing to PolyPolygon const PolyPolygon aPolyPoly(((SdrPathObj*)pPoly)->GetPathPoly()); for ( a = 0; a < aPolyPoly.Count(); a++ ) { const Polygon& rPoly = aPolyPoly.GetObject( a ); for ( b = 0; b < rPoly.GetSize(); b++ ) { if ( rPoly.GetFlags( b ) != POLY_NORMAL ) continue; const Point& rPt = rPoly[ b ]; sal_uInt32 nDist = (sal_uInt32)hypot( aRefPoint.X - rPt.X(), aRefPoint.Y - rPt.Y() ); if ( nDist < nDistance ) { nRule = nIndex; nDistance = nDist; } nIndex++; } } if ( nDistance != 0xffffffff ) bRectangularConnection = false; } } } } if ( bRectangularConnection ) { ::com::sun::star::awt::Point aPoint( aXShape->getPosition() ); ::com::sun::star::awt::Size aSize( aXShape->getSize() ); Rectangle aRect( Point( aPoint.X, aPoint.Y ), Size( aSize.Width, aSize.Height ) ); Point aCenter( aRect.Center() ); Polygon aPoly( 4 ); aPoly[ 0 ] = Point( aCenter.X(), aRect.Top() ); aPoly[ 1 ] = Point( aRect.Left(), aCenter.Y() ); aPoly[ 2 ] = Point( aCenter.X(), aRect.Bottom() ); aPoly[ 3 ] = Point( aRect.Right(), aCenter.Y() ); sal_Int32 nAngle = ( EscherPropertyValueHelper::GetPropertyValue( aAny, aPropertySet, String( RTL_CONSTASCII_USTRINGPARAM( "RotateAngle" ) ), sal_True ) ) ? *((sal_Int32*)aAny.getValue() ) : 0; if ( nAngle ) aPoly.Rotate( aRect.TopLeft(), (sal_uInt16)( ( nAngle + 5 ) / 10 ) ); nRule = GetClosestPoint( aPoly, aRefPoint ); if ( aType == "drawing.Ellipse" ) nRule <<= 1; // In PPT hat eine Ellipse 8 M?glichkeiten sich zu connecten } } return nRule; } EscherSolverContainer::~EscherSolverContainer() { void* pP; for( pP = maShapeList.First(); pP; pP = maShapeList.Next() ) delete (EscherShapeListEntry*)pP; for( pP = maConnectorList.First(); pP; pP = maConnectorList.Next() ) delete (EscherConnectorListEntry*)pP; } void EscherSolverContainer::AddShape( const ::com::sun::star::uno::Reference< ::com::sun::star::drawing::XShape > & rXShape, sal_uInt32 nId ) { maShapeList.Insert( new EscherShapeListEntry( rXShape, nId ), LIST_APPEND ); } void EscherSolverContainer::AddConnector( const ::com::sun::star::uno::Reference< ::com::sun::star::drawing::XShape > & rConnector, const ::com::sun::star::awt::Point& rPA, ::com::sun::star::uno::Reference< ::com::sun::star::drawing::XShape > & rConA, const ::com::sun::star::awt::Point& rPB, ::com::sun::star::uno::Reference< ::com::sun::star::drawing::XShape > & rConB ) { maConnectorList.Insert( new EscherConnectorListEntry( rConnector, rPA, rConA, rPB, rConB ), LIST_APPEND ); } sal_uInt32 EscherSolverContainer::GetShapeId( const ::com::sun::star::uno::Reference< ::com::sun::star::drawing::XShape > & rXShape ) const { for ( EscherShapeListEntry* pPtr = (EscherShapeListEntry*)((List&)maShapeList).First(); pPtr; pPtr = (EscherShapeListEntry*)((List&)maShapeList).Next() ) { if ( rXShape == pPtr->aXShape ) return ( pPtr->n_EscherId ); } return 0; } void EscherSolverContainer::WriteSolver( SvStream& rStrm ) { sal_uInt32 nCount = maConnectorList.Count(); if ( nCount ) { sal_uInt32 nRecHdPos, nCurrentPos, nSize; rStrm << (sal_uInt16)( ( nCount << 4 ) | 0xf ) // open an ESCHER_SolverContainer << (sal_uInt16)ESCHER_SolverContainer // << (sal_uInt32)0; // nRecHdPos = rStrm.Tell() - 4; EscherConnectorRule aConnectorRule; aConnectorRule.nRuleId = 2; for ( EscherConnectorListEntry* pPtr = (EscherConnectorListEntry*)maConnectorList.First(); pPtr; pPtr = (EscherConnectorListEntry*)maConnectorList.Next() ) { aConnectorRule.ncptiA = aConnectorRule.ncptiB = 0xffffffff; aConnectorRule.nShapeC = GetShapeId( pPtr->mXConnector ); aConnectorRule.nShapeA = GetShapeId( pPtr->mXConnectToA ); aConnectorRule.nShapeB = GetShapeId( pPtr->mXConnectToB ); if ( aConnectorRule.nShapeC ) { if ( aConnectorRule.nShapeA ) aConnectorRule.ncptiA = pPtr->GetConnectorRule( sal_True ); if ( aConnectorRule.nShapeB ) aConnectorRule.ncptiB = pPtr->GetConnectorRule( sal_False ); } rStrm << (sal_uInt32)( ( ESCHER_ConnectorRule << 16 ) | 1 ) // atom hd << (sal_uInt32)24 // << aConnectorRule.nRuleId << aConnectorRule.nShapeA << aConnectorRule.nShapeB << aConnectorRule.nShapeC << aConnectorRule.ncptiA << aConnectorRule.ncptiB; aConnectorRule.nRuleId += 2; } nCurrentPos = rStrm.Tell(); // close the ESCHER_SolverContainer nSize = ( nCurrentPos - nRecHdPos ) - 4;// rStrm.Seek( nRecHdPos ); // rStrm << nSize; // rStrm.Seek( nCurrentPos ); // } } // --------------------------------------------------------------------------------------------- EscherExGlobal::EscherExGlobal( sal_uInt32 nGraphicProvFlags ) : EscherGraphicProvider( nGraphicProvFlags ), mpPicStrm( 0 ), mbHasDggCont( false ), mbPicStrmQueried( false ) { } EscherExGlobal::~EscherExGlobal() { } sal_uInt32 EscherExGlobal::GenerateDrawingId() { // new drawing starts a new cluster in the cluster table (cluster identifiers are one-based) sal_uInt32 nClusterId = static_cast< sal_uInt32 >( maClusterTable.size() + 1 ); // drawing identifiers are one-based sal_uInt32 nDrawingId = static_cast< sal_uInt32 >( maDrawingInfos.size() + 1 ); // prepare new entries in the tables maClusterTable.push_back( ClusterEntry( nDrawingId ) ); maDrawingInfos.push_back( DrawingInfo( nClusterId ) ); // return the new drawing identifier return nDrawingId; } sal_uInt32 EscherExGlobal::GenerateShapeId( sal_uInt32 nDrawingId, bool bIsInSpgr ) { // drawing identifier is one-based size_t nDrawingIdx = nDrawingId - 1; OSL_ENSURE( nDrawingIdx < maDrawingInfos.size(), "EscherExGlobal::GenerateShapeId - invalid drawing ID" ); if( nDrawingIdx >= maDrawingInfos.size() ) return 0; DrawingInfo& rDrawingInfo = maDrawingInfos[ nDrawingIdx ]; // cluster identifier in drawing info struct is one-based ClusterEntry* pClusterEntry = &maClusterTable[ rDrawingInfo.mnClusterId - 1 ]; // check cluster overflow, create new cluster entry if( pClusterEntry->mnNextShapeId == DFF_DGG_CLUSTER_SIZE ) { // start a new cluster in the cluster table maClusterTable.push_back( ClusterEntry( nDrawingId ) ); pClusterEntry = &maClusterTable.back(); // new size of maClusterTable is equal to one-based identifier of the new cluster rDrawingInfo.mnClusterId = static_cast< sal_uInt32 >( maClusterTable.size() ); } // build shape identifier from cluster identifier and next free cluster shape identifier rDrawingInfo.mnLastShapeId = static_cast< sal_uInt32 >( rDrawingInfo.mnClusterId * DFF_DGG_CLUSTER_SIZE + pClusterEntry->mnNextShapeId ); // update free shape identifier in cluster entry ++pClusterEntry->mnNextShapeId; /* Old code has counted the shapes only, if we are in a SPGRCONTAINER. Is this really intended? Maybe it's always true... */ if( bIsInSpgr ) ++rDrawingInfo.mnShapeCount; // return the new shape identifier return rDrawingInfo.mnLastShapeId; } sal_uInt32 EscherExGlobal::GetDrawingShapeCount( sal_uInt32 nDrawingId ) const { size_t nDrawingIdx = nDrawingId - 1; OSL_ENSURE( nDrawingIdx < maDrawingInfos.size(), "EscherExGlobal::GetDrawingShapeCount - invalid drawing ID" ); return (nDrawingIdx < maDrawingInfos.size()) ? maDrawingInfos[ nDrawingIdx ].mnShapeCount : 0; } sal_uInt32 EscherExGlobal::GetLastShapeId( sal_uInt32 nDrawingId ) const { size_t nDrawingIdx = nDrawingId - 1; OSL_ENSURE( nDrawingIdx < maDrawingInfos.size(), "EscherExGlobal::GetLastShapeId - invalid drawing ID" ); return (nDrawingIdx < maDrawingInfos.size()) ? maDrawingInfos[ nDrawingIdx ].mnLastShapeId : 0; } sal_uInt32 EscherExGlobal::GetDggAtomSize() const { // 8 bytes header, 16 bytes fixed DGG data, 8 bytes for each cluster return static_cast< sal_uInt32 >( 24 + 8 * maClusterTable.size() ); } void EscherExGlobal::WriteDggAtom( SvStream& rStrm ) const { sal_uInt32 nDggSize = GetDggAtomSize(); // write the DGG record header (do not include the 8 bytes of the header in the data size) rStrm << static_cast< sal_uInt32 >( ESCHER_Dgg << 16 ) << static_cast< sal_uInt32 >( nDggSize - 8 ); // claculate and write the fixed DGG data sal_uInt32 nShapeCount = 0; sal_uInt32 nLastShapeId = 0; for( DrawingInfoVector::const_iterator aIt = maDrawingInfos.begin(), aEnd = maDrawingInfos.end(); aIt != aEnd; ++aIt ) { nShapeCount += aIt->mnShapeCount; nLastShapeId = ::std::max( nLastShapeId, aIt->mnLastShapeId ); } // the non-existing cluster with index #0 is counted too sal_uInt32 nClusterCount = static_cast< sal_uInt32 >( maClusterTable.size() + 1 ); sal_uInt32 nDrawingCount = static_cast< sal_uInt32 >( maDrawingInfos.size() ); rStrm << nLastShapeId << nClusterCount << nShapeCount << nDrawingCount; // write the cluster table for( ClusterTable::const_iterator aIt = maClusterTable.begin(), aEnd = maClusterTable.end(); aIt != aEnd; ++aIt ) rStrm << aIt->mnDrawingId << aIt->mnNextShapeId; } SvStream* EscherExGlobal::QueryPictureStream() { if( !mbPicStrmQueried ) { mpPicStrm = ImplQueryPictureStream(); mbPicStrmQueried = true; } return mpPicStrm; } SvStream* EscherExGlobal::ImplQueryPictureStream() { return 0; } // --------------------------------------------------------------------------------------------- // --------------------------------------------------------------------------------------------- // --------------------------------------------------------------------------------------------- EscherEx::EscherEx( const EscherExGlobalRef& rxGlobal, SvStream& rOutStrm ) : mxGlobal ( rxGlobal ), mpOutStrm ( &rOutStrm ), mnGroupLevel ( 0 ), mnHellLayerId ( USHRT_MAX ), mbEscherSpgr ( sal_False ), mbEscherDg ( sal_False ) { mnStrmStartOfs = mpOutStrm->Tell(); mpImplEscherExSdr.reset( new ImplEscherExSdr( *this ) ); } EscherEx::~EscherEx() { } // --------------------------------------------------------------------------------------------- void EscherEx::Flush( SvStream* pPicStreamMergeBSE /* = NULL */ ) { if ( mxGlobal->HasDggContainer() ) { // store the current stream position at ESCHER_Persist_CurrentPosition key PtReplaceOrInsert( ESCHER_Persist_CurrentPosition, mpOutStrm->Tell() ); if ( DoSeek( ESCHER_Persist_Dgg ) ) { /* The DGG record is still not written. ESCHER_Persist_Dgg seeks to the place where the complete record has to be inserted. */ InsertAtCurrentPos( mxGlobal->GetDggAtomSize(), false ); mxGlobal->WriteDggAtom( *mpOutStrm ); if ( mxGlobal->HasGraphics() ) { /* Calculate the total size of the BSTORECONTAINER including all BSE records containing the picture data contained in the passed in pPicStreamMergeBSE. */ sal_uInt32 nBSCSize = mxGlobal->GetBlibStoreContainerSize( pPicStreamMergeBSE ); if ( nBSCSize > 0 ) { InsertAtCurrentPos( nBSCSize, false ); mxGlobal->WriteBlibStoreContainer( *mpOutStrm, pPicStreamMergeBSE ); } } /* Forget the stream position stored for the DGG which is invalid after the call to InsertAtCurrentPos() anyway. */ PtDelete( ESCHER_Persist_Dgg ); } // seek to initial position (may be different due to inserted DGG and BLIPs) mpOutStrm->Seek( PtGetOffsetByID( ESCHER_Persist_CurrentPosition ) ); } } // --------------------------------------------------------------------------------------------- void EscherEx::InsertAtCurrentPos( sal_uInt32 nBytes, bool bExpandEndOfAtom ) { sal_uInt32 nSize, nType, nSource, nBufSize, nToCopy, nCurPos = mpOutStrm->Tell(); sal_uInt8* pBuf; // Persist table anpassen for ( void* pPtr = maPersistTable.First(); pPtr; pPtr = maPersistTable.Next() ) { sal_uInt32 nOfs = ((EscherPersistEntry*)pPtr)->mnOffset; if ( nOfs >= nCurPos ) ((EscherPersistEntry*)pPtr)->mnOffset += nBytes; } // container und atom sizes anpassen mpOutStrm->Seek( mnStrmStartOfs ); while ( mpOutStrm->Tell() < nCurPos ) { *mpOutStrm >> nType >> nSize; sal_uInt32 nEndOfRecord = mpOutStrm->Tell() + nSize; bool bContainer = (nType & 0x0F) == 0x0F; /* Expand the record, if the insertion position is inside, or if the position is at the end of a container (expands always), or at the end of an atom and bExpandEndOfAtom is set. */ if ( (nCurPos < nEndOfRecord) || ((nCurPos == nEndOfRecord) && (bContainer || bExpandEndOfAtom)) ) { mpOutStrm->SeekRel( -4 ); *mpOutStrm << (sal_uInt32)( nSize + nBytes ); if ( !bContainer ) mpOutStrm->SeekRel( nSize ); } else mpOutStrm->SeekRel( nSize ); } std::vector< sal_uInt32 >::iterator aIter( mOffsets.begin() ); std::vector< sal_uInt32 >::iterator aEnd( mOffsets.end() ); while( aIter != aEnd ) { if ( *aIter > nCurPos ) *aIter += nBytes; aIter++; } mpOutStrm->Seek( STREAM_SEEK_TO_END ); nSource = mpOutStrm->Tell(); nToCopy = nSource - nCurPos; // Stream um nBytes vergroessern pBuf = new sal_uInt8[ 0x40000 ]; // 256KB Buffer while ( nToCopy ) { nBufSize = ( nToCopy >= 0x40000 ) ? 0x40000 : nToCopy; nToCopy -= nBufSize; nSource -= nBufSize; mpOutStrm->Seek( nSource ); mpOutStrm->Read( pBuf, nBufSize ); mpOutStrm->Seek( nSource + nBytes ); mpOutStrm->Write( pBuf, nBufSize ); } delete[] pBuf; mpOutStrm->Seek( nCurPos ); } // --------------------------------------------------------------------------------------------- sal_Bool EscherEx::SeekBehindRecHeader( sal_uInt16 nRecType ) { sal_uInt32 nOldPos, nStreamEnd, nType, nSize; nOldPos = mpOutStrm->Tell(); nStreamEnd = mpOutStrm->Seek( STREAM_SEEK_TO_END ); mpOutStrm->Seek( nOldPos ); while ( mpOutStrm->Tell() < nStreamEnd ) { *mpOutStrm >> nType >> nSize; if ( ( nType >> 16 ) == nRecType ) return sal_True; if ( ( nType & 0xf ) != 0xf ) mpOutStrm->SeekRel( nSize ); } mpOutStrm->Seek( nOldPos ); return sal_False; } // --------------------------------------------------------------------------------------------- void EscherEx::InsertPersistOffset( sal_uInt32 nKey, sal_uInt32 nOffset ) { PtInsert( ESCHER_Persist_PrivateEntry | nKey, nOffset ); } void EscherEx::ReplacePersistOffset( sal_uInt32 nKey, sal_uInt32 nOffset ) { PtReplace( ESCHER_Persist_PrivateEntry | nKey, nOffset ); } sal_uInt32 EscherEx::GetPersistOffset( sal_uInt32 nKey ) { return PtGetOffsetByID( ESCHER_Persist_PrivateEntry | nKey ); } // --------------------------------------------------------------------------------------------- sal_Bool EscherEx::DoSeek( sal_uInt32 nKey ) { sal_uInt32 nPos = PtGetOffsetByID( nKey ); if ( nPos ) mpOutStrm->Seek( nPos ); else { if (! PtIsID( nKey ) ) return sal_False; mpOutStrm->Seek( 0 ); } return sal_True; } // --------------------------------------------------------------------------------------------- sal_Bool EscherEx::SeekToPersistOffset( sal_uInt32 nKey ) { return DoSeek( ESCHER_Persist_PrivateEntry | nKey ); } // --------------------------------------------------------------------------------------------- sal_Bool EscherEx::InsertAtPersistOffset( sal_uInt32 nKey, sal_uInt32 nValue ) { sal_uInt32 nOldPos = mpOutStrm->Tell(); sal_Bool bRetValue = SeekToPersistOffset( nKey ); if ( bRetValue ) { *mpOutStrm << nValue; mpOutStrm->Seek( nOldPos ); } return bRetValue; } // --------------------------------------------------------------------------------------------- void EscherEx::OpenContainer( sal_uInt16 nEscherContainer, int nRecInstance ) { *mpOutStrm << (sal_uInt16)( ( nRecInstance << 4 ) | 0xf ) << nEscherContainer << (sal_uInt32)0; mOffsets.push_back( mpOutStrm->Tell() - 4 ); mRecTypes.push_back( nEscherContainer ); switch( nEscherContainer ) { case ESCHER_DggContainer : { mxGlobal->SetDggContainer(); mnCurrentDg = 0; /* Remember the current position as start position of the DGG record and BSTORECONTAINER, but do not write them actually. This will be done later in Flush() when the number of drawings, the size and contents of the FIDCL cluster table, and the size of the BLIP container are known. */ PtReplaceOrInsert( ESCHER_Persist_Dgg, mpOutStrm->Tell() ); } break; case ESCHER_DgContainer : { if ( mxGlobal->HasDggContainer() ) { if ( !mbEscherDg ) { mbEscherDg = sal_True; mnCurrentDg = mxGlobal->GenerateDrawingId(); AddAtom( 8, ESCHER_Dg, 0, mnCurrentDg ); PtReplaceOrInsert( ESCHER_Persist_Dg | mnCurrentDg, mpOutStrm->Tell() ); *mpOutStrm << (sal_uInt32)0 // The number of shapes in this drawing << (sal_uInt32)0; // The last MSOSPID given to an SP in this DG } } } break; case ESCHER_SpgrContainer : { if ( mbEscherDg ) { mbEscherSpgr = sal_True; } } break; case ESCHER_SpContainer : { } break; default: break; } } // --------------------------------------------------------------------------------------------- void EscherEx::CloseContainer() { sal_uInt32 nSize, nPos = mpOutStrm->Tell(); nSize = ( nPos - mOffsets.back() ) - 4; mpOutStrm->Seek( mOffsets.back() ); *mpOutStrm << nSize; switch( mRecTypes.back() ) { case ESCHER_DgContainer : { if ( mbEscherDg ) { mbEscherDg = sal_False; if ( DoSeek( ESCHER_Persist_Dg | mnCurrentDg ) ) *mpOutStrm << mxGlobal->GetDrawingShapeCount( mnCurrentDg ) << mxGlobal->GetLastShapeId( mnCurrentDg ); } } break; case ESCHER_SpgrContainer : { if ( mbEscherSpgr ) { mbEscherSpgr = sal_False; } } break; default: break; } mOffsets.pop_back(); mRecTypes.pop_back(); mpOutStrm->Seek( nPos ); } // --------------------------------------------------------------------------------------------- void EscherEx::BeginAtom() { mnCountOfs = mpOutStrm->Tell(); *mpOutStrm << (sal_uInt32)0 << (sal_uInt32)0; // record header wird spaeter geschrieben } // --------------------------------------------------------------------------------------------- void EscherEx::EndAtom( sal_uInt16 nRecType, int nRecVersion, int nRecInstance ) { sal_uInt32 nOldPos = mpOutStrm->Tell(); mpOutStrm->Seek( mnCountOfs ); sal_uInt32 nSize = nOldPos - mnCountOfs; *mpOutStrm << (sal_uInt16)( ( nRecInstance << 4 ) | ( nRecVersion & 0xf ) ) << nRecType << (sal_uInt32)( nSize - 8 ); mpOutStrm->Seek( nOldPos ); } // --------------------------------------------------------------------------------------------- void EscherEx::AddAtom( sal_uInt32 nAtomSize, sal_uInt16 nRecType, int nRecVersion, int nRecInstance ) { *mpOutStrm << (sal_uInt16)( ( nRecInstance << 4 ) | ( nRecVersion & 0xf ) ) << nRecType << nAtomSize; } // --------------------------------------------------------------------------------------------- void EscherEx::AddChildAnchor( const Rectangle& rRect ) { AddAtom( 16, ESCHER_ChildAnchor ); *mpOutStrm << (sal_Int32)rRect.Left() << (sal_Int32)rRect.Top() << (sal_Int32)rRect.Right() << (sal_Int32)rRect.Bottom(); } // --------------------------------------------------------------------------------------------- void EscherEx::AddClientAnchor( const Rectangle& rRect ) { AddAtom( 8, ESCHER_ClientAnchor ); *mpOutStrm << (sal_Int16)rRect.Top() << (sal_Int16)rRect.Left() << (sal_Int16)( rRect.GetWidth() + rRect.Left() ) << (sal_Int16)( rRect.GetHeight() + rRect.Top() ); } // --------------------------------------------------------------------------------------------- EscherExHostAppData* EscherEx::EnterAdditionalTextGroup() { return NULL; } // --------------------------------------------------------------------------------------------- sal_uInt32 EscherEx::EnterGroup( const String& rShapeName, const Rectangle* pBoundRect ) { Rectangle aRect; if( pBoundRect ) aRect = *pBoundRect; OpenContainer( ESCHER_SpgrContainer ); OpenContainer( ESCHER_SpContainer ); AddAtom( 16, ESCHER_Spgr, 1 ); PtReplaceOrInsert( ESCHER_Persist_Grouping_Snap | mnGroupLevel, mpOutStrm->Tell() ); *mpOutStrm << (sal_Int32)aRect.Left() // Bounding box fuer die Gruppierten shapes an die sie attached werden << (sal_Int32)aRect.Top() << (sal_Int32)aRect.Right() << (sal_Int32)aRect.Bottom(); sal_uInt32 nShapeId = GenerateShapeId(); if ( !mnGroupLevel ) AddShape( ESCHER_ShpInst_Min, 5, nShapeId ); // Flags: Group | Patriarch else { AddShape( ESCHER_ShpInst_Min, 0x201, nShapeId ); // Flags: Group | HaveAnchor EscherPropertyContainer aPropOpt; aPropOpt.AddOpt( ESCHER_Prop_LockAgainstGrouping, 0x00040004 ); aPropOpt.AddOpt( ESCHER_Prop_dxWrapDistLeft, 0 ); aPropOpt.AddOpt( ESCHER_Prop_dxWrapDistRight, 0 ); // #i51348# shape name if( rShapeName.Len() > 0 ) aPropOpt.AddOpt( ESCHER_Prop_wzName, rShapeName ); Commit( aPropOpt, aRect ); if ( mnGroupLevel > 1 ) AddChildAnchor( aRect ); EscherExHostAppData* pAppData = mpImplEscherExSdr->ImplGetHostData(); if( pAppData ) { if ( mnGroupLevel <= 1 ) pAppData->WriteClientAnchor( *this, aRect ); pAppData->WriteClientData( *this ); } } CloseContainer(); // ESCHER_SpContainer mnGroupLevel++; return nShapeId; } sal_uInt32 EscherEx::EnterGroup( const Rectangle* pBoundRect ) { return EnterGroup( String::EmptyString(), pBoundRect ); } // --------------------------------------------------------------------------------------------- sal_Bool EscherEx::SetGroupSnapRect( sal_uInt32 nGroupLevel, const Rectangle& rRect ) { sal_Bool bRetValue = sal_False; if ( nGroupLevel ) { sal_uInt32 nCurrentPos = mpOutStrm->Tell(); if ( DoSeek( ESCHER_Persist_Grouping_Snap | ( nGroupLevel - 1 ) ) ) { *mpOutStrm << (sal_Int32)rRect.Left() // Bounding box fuer die Gruppierten shapes an die sie attached werden << (sal_Int32)rRect.Top() << (sal_Int32)rRect.Right() << (sal_Int32)rRect.Bottom(); mpOutStrm->Seek( nCurrentPos ); } } return bRetValue; } // --------------------------------------------------------------------------------------------- sal_Bool EscherEx::SetGroupLogicRect( sal_uInt32 nGroupLevel, const Rectangle& rRect ) { sal_Bool bRetValue = sal_False; if ( nGroupLevel ) { sal_uInt32 nCurrentPos = mpOutStrm->Tell(); if ( DoSeek( ESCHER_Persist_Grouping_Logic | ( nGroupLevel - 1 ) ) ) { *mpOutStrm << (sal_Int16)rRect.Top() << (sal_Int16)rRect.Left() << (sal_Int16)rRect.Right() << (sal_Int16)rRect.Bottom(); mpOutStrm->Seek( nCurrentPos ); } } return bRetValue; } // --------------------------------------------------------------------------------------------- void EscherEx::LeaveGroup() { --mnGroupLevel; PtDelete( ESCHER_Persist_Grouping_Snap | mnGroupLevel ); PtDelete( ESCHER_Persist_Grouping_Logic | mnGroupLevel ); CloseContainer(); } // --------------------------------------------------------------------------------------------- void EscherEx::AddShape( sal_uInt32 nShpInstance, sal_uInt32 nFlags, sal_uInt32 nShapeID ) { AddAtom( 8, ESCHER_Sp, 2, nShpInstance ); if ( !nShapeID ) nShapeID = GenerateShapeId(); if ( nFlags ^ 1 ) // is this a group shape ? { // if not if ( mnGroupLevel > 1 ) nFlags |= 2; // this not a topmost shape } *mpOutStrm << nShapeID << nFlags; } // --------------------------------------------------------------------------------------------- void EscherEx::Commit( EscherPropertyContainer& rProps, const Rectangle& ) { rProps.Commit( GetStream() ); } // --------------------------------------------------------------------------------------------- sal_uInt32 EscherEx::GetColor( const sal_uInt32 nSOColor, sal_Bool bSwap ) { if ( bSwap ) { sal_uInt32 nColor = nSOColor & 0xff00; // GRUEN nColor |= (sal_uInt8)( nSOColor ) << 16; // ROT nColor |= (sal_uInt8)( nSOColor >> 16 ); // BLAU return nColor; } else return nSOColor & 0xffffff; } // --------------------------------------------------------------------------------------------- sal_uInt32 EscherEx::GetColor( const Color& rSOColor, sal_Bool bSwap ) { sal_uInt32 nColor = ( rSOColor.GetRed() << 16 ); nColor |= ( rSOColor.GetGreen() << 8 ); nColor |= rSOColor.GetBlue(); if ( !bSwap ) nColor = GetColor( nColor, sal_True ); return nColor; } // ---------------------------------------------------------------------------------------------
package test53.visitor; import java.util.Collection; import java.util.Iterator; /** * Created with IntelliJ IDEA. * User: chin * Date: 8/14/18 * Time: 10:45 AM * To change this template use File | Settings | File Templates. * Description: */ public class Starter { /** * 引例 * print1() 打印集合中的元素, * print2() 如果集合中任然是集合 * print3() 如果给字符串添加引号, 给double追加D, float追加F */ public void print1(Collection collection) { Iterator it = collection.iterator(); while (it.hasNext()) { System.out.println(it.next().toString()); } } // 如果collection中还是集合对象, 上边的it.next().toString()就没有意义了 public void print2(Collection collection) { Iterator it = collection.iterator(); while (it.hasNext()) { Object o = it.next(); if (o instanceof Collection) { print2((Collection)o); } else { System.out.println(o.toString()); } } } // 如果需要打印字符串,加上单引号, 打印double,后边加上D,打印float后边加上F public void print3(Collection clt) { Iterator it = clt.iterator(); while (it.hasNext()) { Object o = it.next(); if (o instanceof Collection) { print3((Collection)o); } else if (o instanceof String) { System.out.println("'" + o.toString() + "'"); } else if (o instanceof Double) { System.out.println(o.toString() + "D"); } else if (o instanceof Float) { System.out.println(o.toString() + "F"); } else { System.out.println(o.toString()); } } } // 条件越多, 分支越长, 代码越难维护, 考虑采用访问者模式 }
package subscr import ( "encoding/base64" "log" ) // DecodeUrlBase64 decode url safe base64 string, auto add '=' if not enough func DecodeUrlBase64(str string) string { l := len(str) if l%4 != 0 { for i := 0; i < 4-l%4; i++ { str += "=" } } deStr, err := base64.URLEncoding.DecodeString(str) if err != nil { log.Println(err) } return string(deStr) } // DecodeBase64 decode base64 string, auto add '=' if not enough func DecodeBase64(str string) string { l := len(str) if l%4 != 0 { for i := 0; i < 4-l%4; i++ { str += "=" } } deStr, err := base64.StdEncoding.DecodeString(str) if err != nil { log.Println(err) } return string(deStr) } // DecodeBytesBase64 decode base64 string, auto add '=' if not enough func DecodeBytesBase64(str []byte) ([]byte, error) { l := len(str) if l%4 != 0 { for i := 0; i < 4-l%4; i++ { str = append(str, '=') } } deStr := make([]byte, base64.StdEncoding.DecodedLen(len(str))) _, err := base64.StdEncoding.Decode(deStr, str) return deStr, err }
// Handle an IGMP-record from an IGMP-packet (called by igmp_receive) static ssize_t igmp_handle_record(struct groups *groups, const uint8_t *data, size_t len) { struct igmpv3_grec *r = (struct igmpv3_grec*)data; if (len < sizeof(*r)) return -1; size_t nsrc = ntohs(r->grec_nsrcs); size_t read = sizeof(*r) + nsrc * sizeof(struct in_addr) + r->grec_auxwords * 4; if (len < read) return -1; if (r->grec_type >= UPDATE_IS_INCLUDE && r->grec_type <= UPDATE_BLOCK && igmp_is_valid_group(r->grec_mca)) { struct in6_addr addr, sources[nsrc]; querier_map(&addr, r->grec_mca); for (size_t i = 0; i < nsrc; ++i) querier_map(&sources[i], r->grec_src[i]); groups_update_state(groups, &addr, sources, nsrc, r->grec_type); } return read; }
<reponame>KarimIO/DEVE #ifndef _IMAGE_h #define _IMAGE_h #include <string> #include <Types.h> #include <Dispatcher.h> #include "base64.h" #include "JPEG.h" #include "RegistrarArbitration.h" using namespace RRAD; /* wrapper for the JSON containing the user image that should be embedded in the comment section of default JPEG format: { ownerID: .., views: #, thumb: base64_img_data data: base64_img_data, access: { } } */ class Image : public RemoteObject { JSON id; JSON img_json; // RPC methods void recordView(std::string viewer); void addRequest(std::string requester); static void destroyImage(RegistrarArbitration* ra, Image* image); public: void recordAccessChange(std::string targetUser, int views); std::vector<uint8> getSteganogram(); std::string getSteganogramBase64(); // : RemoteObject virtual std::string getClassName() override { return "Image"; } virtual JSON executeRPC(std::string name, JSON arguments) override; virtual JSON getID() override { return id; } std::string getThumbnail() { return img_json["thumb"]; } // Constructors Image(std::string title, std::string base64, std::string thumbBase64); Image(JSON id, bool owned, JSON img_json); //changed order to avoid ambiguity, thanks c++ // Local per-image helpers void setAccess(RegistrarArbitration* ra, std::string targetUser, int views); void requestAccess(RegistrarArbitration* ra); JSON getMetadata(); void destroy(RegistrarArbitration* ra); // Static helpers static std::queue< std::pair<Image*, std::string> > requests; static std::mutex psuedoDownloadedMutex; static std::map<std::string, Image*> pseudoDownloaded; static Image* imageFromSteganogram(JSON id, std::vector<uint8> steganogram); static JPEG generateSteganogramJPEG(); static JSON getList(RegistrarArbitration* ra, std::string user); static std::string getImageData(RegistrarArbitration* ra, JSON id); }; #endif //_IMAGE_h
<reponame>lise1020/pybinding #include "leads/Leads.hpp" #include "wrappers.hpp" using namespace cpb; void wrap_leads(py::module& m) { py::class_<leads::Spec>(m, "LeadSpec") .def_readonly("axis", &leads::Spec::axis) .def_readonly("sign", &leads::Spec::sign) .def_readonly("shape", &leads::Spec::shape) ; py::class_<Lead>(m, "Lead") .def_property_readonly("spec", &Lead::spec) .def_property_readonly("indices", [](Lead const& l) { return arrayref(l.indices()); }) .def_property_readonly("system", &Lead::system) .def_property_readonly("h0", [](Lead const& l) { return l.h0().csrref(); }) .def_property_readonly("h1", [](Lead const& l) { return l.h1().csrref(); }) ; py::class_<Leads>(m, "Leads") .def("__len__", &Leads::size) .def("__getitem__", &Leads::operator[]) ; }
/* Insert an MIR instruction to the end of a basic block */ void dvmCompilerAppendMIR(BasicBlock *bb, MIR *mir) { if (bb->firstMIRInsn == NULL) { assert(bb->firstMIRInsn == NULL); bb->lastMIRInsn = bb->firstMIRInsn = mir; mir->prev = mir->next = NULL; } else { bb->lastMIRInsn->next = mir; mir->prev = bb->lastMIRInsn; mir->next = NULL; bb->lastMIRInsn = mir; } }
<gh_stars>10-100 package defaults import ( "testing" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) func TestSet(t *testing.T) { t.Run("should fail on nil pointer", func(t *testing.T) { err := Set(nil) require.Error(t, err) }) t.Run("should not fail on nil value", func(t *testing.T) { type st struct { String string `default:"text"` } var s st err := Set(&s) require.NoError(t, err) assert.Equal(t, "text", s.String) }) t.Run("should fail on non-struct value", func(t *testing.T) { v := 1 err := Set(&v) require.Error(t, err) }) t.Run("should not fail on empty struct", func(t *testing.T) { err := Set(&struct{}{}) require.NoError(t, err) }) t.Run("should be able to set default values", func(t *testing.T) { cfg := struct { String string `default:"text"` StringNumeric string `default:"123"` Integer int `default:"2"` Float64 float64 `default:"0.2"` Bool bool `default:"true"` AnotherStruct []struct { String string `default:"text"` } `default_size:"3"` YetAnotherStruct struct { String string `default:"text_other"` } }{} err := Set(&cfg) require.NoError(t, err) assert.Equal(t, "text", cfg.String) assert.Equal(t, "123", cfg.StringNumeric) assert.Equal(t, 2, cfg.Integer) assert.Equal(t, 0.2, cfg.Float64) assert.Equal(t, true, cfg.Bool) assert.Equal(t, "text", cfg.AnotherStruct[2].String) assert.Equal(t, "text_other", cfg.YetAnotherStruct.String) }) t.Run("should fail on wrong default types", func(t *testing.T) { cfg := struct { Number int `default:"test"` }{} err := Set(&cfg) require.Error(t, err) }) t.Run("should not fail for private fields", func(t *testing.T) { cfg := struct { integer int `default:"2"` }{} err := Set(&cfg) require.NoError(t, err) assert.Equal(t, 0, cfg.integer) }) t.Run("should be able to set chan and map data types", func(t *testing.T) { cfg := struct { Map map[int]string `default_size:"0"` Chan chan bool `default_size:"3"` }{} err := Set(&cfg) require.NoError(t, err) assert.NotNil(t, cfg.Map) assert.NotNil(t, cfg.Chan) assert.Equal(t, 3, cap(cfg.Chan)) }) }
def patch_port(self, port, patches, ironic=None): ironic = ironic or self.ironic ports = self.ports() if isinstance(port, six.string_types): port = ports[port] LOG.debug('Updating port %(mac)s with patches %(patches)s', {'mac': port.address, 'patches': patches}, node_info=self) new_port = ironic.port.update(port.uuid, patches) ports[port.address] = new_port
import numpy as np import keras from keras.datasets import mnist from keras.layers import Input, Dense, Dropout, Conv2D, Flatten, MaxPooling2D from keras.models import Model from keras import backend as K from keras.preprocessing.image import ImageDataGenerator import time batch_size = 128 num_classes = 10 epochs1 = 1 epochs2 = 1 epochs3 = 98 train_begin_time = time.time() best_score = 0 # input image dimensions img_rows, img_cols = 28, 28 # the data, shuffled and split between train and test sets (x_train, y_train), (x_test, y_test) = mnist.load_data() if K.image_data_format() == 'channels_first': x_train = x_train.reshape(x_train.shape[0], 1, img_rows, img_cols) x_test = x_test.reshape(x_test.shape[0], 1, img_rows, img_cols) input_shape = (1, img_rows, img_cols) else: x_train = x_train.reshape(x_train.shape[0], img_rows, img_cols, 1) x_test = x_test.reshape(x_test.shape[0], img_rows, img_cols, 1) input_shape = (img_rows, img_cols, 1) x_train = x_train.astype('float32') x_test = x_test.astype('float32') x_train /= 255 x_test /= 255 print('x_train shape:', x_train.shape) print(x_train.shape[0], 'train samples') print(x_test.shape[0], 'test samples') # convert class vectors to binary class matrices y_train = keras.utils.to_categorical(y_train, num_classes) y_test = keras.utils.to_categorical(y_test, num_classes) def save_weights(model, filename, layer): conv1 = model.get_layer('conv{0}'.format(layer)).get_weights() fc1 = model.get_layer('fc1').get_weights() fc2 = model.get_layer('fc2').get_weights() np.savez(filename, W_conv=conv1[0], b_conv=conv1[1], W_fc1=fc1[0], b_fc1=fc1[1], W_fc2=fc2[0], b_fc2=fc2[1]) class TimeHistory(keras.callbacks.Callback): def on_train_begin(self, logs={}): self.times = [] self.epoch_times = [] self.best_weights = None def on_epoch_begin(self, epoch, logs={}): self.t0 = time.time() def on_epoch_end(self, epoch, logs={}): global best_score self.times.append(time.time() - train_begin_time) self.epoch_times.append(time.time() - self.t0) if logs.get('val_acc') > best_score: try: best_score = logs.get('val_acc') self.best_weights = save_weights(self.model, 'weights_layer3.npz', 3) except Exception: pass def layer1(): main_input = Input(shape=input_shape, name='main_input') conv1 = Conv2D(256,(3,3), activation='relu', padding='same', name='conv1')(main_input) conv1 = MaxPooling2D(pool_size=(2,2))(conv1) conv1_drop = Dropout(.3)(conv1) conv1_flat = Flatten()(conv1_drop) fc1 = Dense(150, activation='relu', name='fc1')(conv1_flat) fc1_drop = Dropout(.5)(fc1) main_output = Dense(10, activation='softmax', name='fc2')(fc1_drop) model = Model(inputs=[main_input], outputs=[main_output]) model.compile(optimizer=keras.optimizers.Adam(lr=0.005), loss='categorical_crossentropy', metrics=['accuracy']) print('Using real-time data augmentation.') # This will do preprocessing and realtime data augmentation: datagen = ImageDataGenerator( rotation_range=7, # randomly rotate images in the range (degrees, 0 to 180) width_shift_range=0.05, # randomly shift images horizontally (fraction of total width) height_shift_range=0.05, # randomly shift images vertically (fraction of total height) zoom_range=.1) # Compute quantities required for feature-wise normalization # (std, mean, and principal components if ZCA whitening is applied). datagen.fit(x_train) time_history = TimeHistory() history = keras.callbacks.History() # Fit the model on the batches generated by datagen.flow(). model.fit_generator(datagen.flow(x_train, y_train, batch_size=batch_size), steps_per_epoch=x_train.shape[0] // batch_size, epochs=epochs1, callbacks=[history, time_history], validation_data=(x_test, y_test)) np.savez('layer1_mnist_results.npz', acc=history.history['acc'], loss=history.history['loss'], val_acc=history.history['val_acc'], val_loss=history.history['val_loss'], times=time_history.times, epoch_times=time_history.epoch_times) conv1_weights = model.get_layer('conv1').get_weights() save_weights(model, "weights_layer1.npz", 1) return conv1_weights def layer2(conv1_weights): main_input = Input(shape=input_shape, name='main_input') conv1 = Conv2D(256, (3,3), activation='relu', padding='same', trainable=False, name='conv1')(main_input) conv2 = Conv2D(256, (3,3), activation='relu', padding='same', name='conv2')(conv1) conv2 = MaxPooling2D(pool_size = (2,2))(conv2) conv2_drop = Dropout(.3)(conv2) conv2_flat = Flatten()(conv2_drop) fc1 = Dense(150, activation='relu', name='fc1')(conv2_flat) fc1_drop = Dropout(.5)(fc1) main_output = Dense(10, activation='softmax', name='fc2')(fc1_drop) model = Model(inputs=[main_input], outputs=[main_output]) model.compile(optimizer=keras.optimizers.Adam(lr=0.005), loss='categorical_crossentropy', metrics=['accuracy']) model.get_layer('conv1').set_weights(conv1_weights) time_history = TimeHistory() history = keras.callbacks.History() print('Using real-time data augmentation.') # This will do preprocessing and realtime data augmentation: datagen = ImageDataGenerator( rotation_range=7, # randomly rotate images in the range (degrees, 0 to 180) width_shift_range=0.05, # randomly shift images horizontally (fraction of total width) height_shift_range=0.05, # randomly shift images vertically (fraction of total height) zoom_range=.1) # Compute quantities required for feature-wise normalization datagen.fit(x_train) time_history = TimeHistory() history = keras.callbacks.History() # Fit the model on the batches generated by datagen.flow(). model.fit_generator(datagen.flow(x_train, y_train, batch_size=batch_size), steps_per_epoch=x_train.shape[0] // batch_size, epochs=epochs2, callbacks=[history, time_history], validation_data=(x_test, y_test)) np.savez('layer2_mnist_results.npz', acc=history.history['acc'], loss=history.history['loss'], val_acc=history.history['val_acc'], val_loss=history.history['val_loss'], times=time_history.times, epoch_times=time_history.epoch_times) conv2_weights = model.get_layer('conv2').get_weights() save_weights(model, "weights_layer2.npz", 2) return conv2_weights def layer3(conv1_weights, conv2_weights): main_input = Input(shape=input_shape, name='main_input') conv1 = Conv2D(256, (3,3), activation='relu', padding='same', trainable=False, name='conv1')(main_input) conv2 = Conv2D(256, (3,3), activation='relu', padding='same', trainable=False, name='conv2')(conv1) conv2 = MaxPooling2D(pool_size = (2,2))(conv2) conv3 = Conv2D(128, (3,3), activation='relu', padding='same', name='conv3')(conv2) conv3_drop = Dropout(.3)(conv3) conv3_flat = Flatten()(conv3_drop) fc1 = Dense(150, activation='relu', name='fc1')(conv3_flat) fc1_drop = Dropout(.5)(fc1) main_output = Dense(10, activation='softmax', name='fc2')(fc1_drop) model = Model(inputs=[main_input], outputs=[main_output]) model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy']) model.get_layer('conv1').set_weights(conv1_weights) model.get_layer('conv2').set_weights(conv2_weights) time_history = TimeHistory() history = keras.callbacks.History() def schedule(epoch): if epoch < 2: return 0.005 elif epoch < 10: return 0.002 elif epoch < 40: return 0.001 elif epoch < 60: return 0.0005 elif epoch < 80: return 0.0001 else: return 0.00005 rate_schedule = keras.callbacks.LearningRateScheduler(schedule) print('Using real-time data augmentation.') # This will do preprocessing and realtime data augmentation: datagen = ImageDataGenerator( rotation_range=7, # randomly rotate images in the range (degrees, 0 to 180) width_shift_range=0.05, # randomly shift images horizontally (fraction of total width) height_shift_range=0.05, # randomly shift images vertically (fraction of total height) zoom_range=.1) # randomly flip images # Compute quantities required for feature-wise normalization datagen.fit(x_train) time_history = TimeHistory() history = keras.callbacks.History() # Fit the model on the batches generated by datagen.flow(). model.fit_generator(datagen.flow(x_train, y_train, batch_size=batch_size), steps_per_epoch=x_train.shape[0] // batch_size, epochs=epochs3, callbacks=[history, time_history, rate_schedule], validation_data=(x_test, y_test)) np.savez('layer3_mnist_results.npz', acc=history.history['acc'], loss=history.history['loss'], val_acc=history.history['val_acc'], val_loss=history.history['val_loss'], times=time_history.times, epoch_times=time_history.epoch_times) conv3_weights = model.get_layer('conv3').get_weights() return conv3_weights if __name__ == "__main__": conv1_weights = layer1() conv2_weights = layer2(conv1_weights) conv3_weights = layer3(conv1_weights,conv2_weights)
// Custom mod handler for item type func itemExternalModFunc(d *schema.ResourceData, m interface{}, item *zabbix.Item) { item.Type = zabbix.ExternalCheck item.InterfaceID = d.Get("interfaceid").(string) item.Delay = d.Get("delay").(string) }
//'paramList' is the List of Params of the API that the resulting list of Frames refers to. public ArrayList<Frame> generateFrames(HTTPMethod method, String endpoint, ArrayList<Param> paramList, Double probSelection, Double probFailure, Double probCriticalFailure, Double trueProbSelection, Double trueProbFailure, Double trueProbCriticalFailure) { ArrayList<Frame> framesList = new ArrayList<Frame>(); ArrayList<TypeParam> types = new ArrayList<TypeParam>(); for(int k = 0; k<6; k++){ TypeParam t = null; if(k<paramList.size()){ t = paramList.get(k).getTypeParam(); } types.add(t); } List<List<String>> classesCombinations = EquivalenceClass.cartesianProduct(types.get(0), types.get(1), types.get(2), types.get(3), types.get(4), types.get(5)); for(int i = 0; i<classesCombinations.size(); i++){ Frame frame = new Frame(new DiscourseSpecifics()); ArrayList<Param> frameParamList = new ArrayList<Param>(); frame.setMethod(method); frame.setEndpoint(endpoint); frame.setProbSelection(probSelection); frame.setProbFailure(probFailure); frame.setProbCriticalFailure(probCriticalFailure); frame.setTrueProbSelection(trueProbSelection); frame.setTrueProbFailure(trueProbFailure); frame.setTrueProbCriticalFailure(trueProbCriticalFailure); for(int j = 0; j<paramList.size(); j++){ Param p1 = new Param(paramList.get(j)); p1.setClassParam(DiscourseEquivalenceClass.valueOf(classesCombinations.get(i).get(j))); frameParamList.add(p1); } frame.setParamList(frameParamList); framesList.add(frame); } return framesList; }
/* Adjusts the register min/max values in the case that the dst_reg is the * variable register that we are working on, and src_reg is a constant or we're * simply doing a BPF_K check. */ static void reg_set_min_max(struct bpf_reg_state *true_reg, struct bpf_reg_state *false_reg, u64 val, u8 opcode) { switch (opcode) { case BPF_JEQ: true_reg->max_value = true_reg->min_value = val; break; case BPF_JNE: false_reg->max_value = false_reg->min_value = val; break; case BPF_JGT: false_reg->min_value = 0; case BPF_JSGT: false_reg->max_value = val; true_reg->min_value = val + 1; break; case BPF_JGE: false_reg->min_value = 0; case BPF_JSGE: false_reg->max_value = val - 1; true_reg->min_value = val; break; default: break; } check_reg_overflow(false_reg); check_reg_overflow(true_reg); }
def compute_task_params(self, observation: Dict): dst_idx = None if self.command is None: if self.opt.obs_mode == "gt": if self.task == "place": src_idx = self.opt.scene_place_src_idx elif self.task == "stack": src_idx = self.opt.scene_stack_src_idx dst_idx = self.opt.scene_stack_dst_idx elif self.opt.obs_mode == "vision": gt2pred_idxs = match_objects( src_odicts=self.scene, dst_odicts=observation ) if self.task == "place": src_idx = gt2pred_idxs[self.opt.scene_place_src_idx] elif self.task == "stack": src_idx = gt2pred_idxs[self.opt.scene_stack_src_idx] dst_idx = gt2pred_idxs[self.opt.scene_stack_dst_idx] else: raise ValueError(f"Invalid observation mode: {self.opt.obs_mode}.") if self.task == "place": assert self.place_dst_xy is not None dst_x, dst_y = self.place_dst_xy elif self.task == "stack": dst_x, dst_y, _ = observation[dst_idx]["position"] else: language_input = copy.deepcopy(observation) for idx, odict in enumerate(language_input): language_input[idx]["position"] = odict["position"] + [0.0] src_idx, (dst_x, dst_y), dst_idx = NLPmod( sentence=self.command[self.command_pointer], vision_output=language_input, ) self.task = "place" if dst_idx is None else "stack" self.place_dst_xy = [dst_x, dst_y] self.command_pointer += 1 is_sphere = observation[src_idx]["shape"] == "sphere" if dst_idx is None: z = 0.0 policy_z = z else: z = observation[dst_idx]["height"] policy_z = z if not self.policy_opt.use_height: policy_z = utils.H_MAX dst_xyz = [dst_x, dst_y, z] plan_xyz = [dst_x, dst_y, z + utils.PLACE_START_CLEARANCE] + clear policy_dst_xyz = [dst_x, dst_y, policy_z] or max return src_idx, dst_idx, dst_xyz, plan_xyz, policy_dst_xyz, is_sphere
With two minutes left in the first half, the Bengals led the Seahawks, 10-3, on Sunday. Cincinnati called a play-action pass, but the pocket collapsed on the rookie quarterback Andy Dalton as he began to execute the fake handoff. With multiple Seahawks players crashing toward him, Dalton stepped up and fired a bomb into the end zone. At the same time, the rookie receiver A.J. Green had gained a step on Seattle safety Earl Thomas. As Green reached the end zone, Dalton’s pass hit him perfectly in stride for a 46-yard touchdown. It was the fifth touchdown reception for Green this season, and the fourth time he’s connected with Dalton for a score (Bruce Gradkowski was responsible for Green’s first touchdown catch as a pro). If Green and Dalton can connect six more times over the next nine weeks, they will become the first pair of rookie teammates to combine for 10 passing touchdowns. A.J. Green’s success has been a matter of when, not if; Sports Illustrated profiled him after his sophomore season in high school and correctly tabbed that he’d star in the N.F.L. by 2011. As a true freshman, Green led the Southeastern Conference in receiving yards and touchdowns. Against eventual national champion Auburn, Green caught 9 passes for 164 yards and 2 touchdowns. To no one’s surprise, he was the first receiver off the board, at No. 4 in the 2011 draft. Andy Dalton’s collegiate career was even more successful. Dalton started an incredible 50 games for Texas Christian University. He helped turn the Horned Frogs into a national power, winning 26 of his last 27 games. Dalton was named the Mountain West Conference’s offensive player of the year in both 2009 and 2010, and his signature moment came when he guided T.C.U. to a Rose Bowl victory over Wisconsin. But N.F.L. scouts harbored some skepticism, for the usual reasons: Dalton lacks ideal size and arm strength. His success was partly written off as a product of playing in a spread offense against a lower level of competition; the win-loss record was mostly attributed to the most dominant defense in the country. The Bengals did not hide their interest in Dalton, but were not enamored enough with him to trade up into the end of the first round to select him. After drafting Green, Cincinnati sat idly by as Dalton fell past suitors in in Seattle and Buffalo until the Bengals could draft him with the 35th overall selection. Together, Dalton and Green are breaking the rules when it comes to rookie quarterback-receiver combos. They’re almost certainly going to be the most productive such pair in at least the past 40 years. Let’s take a look at some of the most notable rookie quarterback-receiver combinations in league history. The Last 20 Years The 2000s had a lot of great rookie performances by both quarterbacks and receivers, but they never came by players on the same team. Rookie receivers weren’t key targets for Matt Ryan or Ben Roethlisberger, or for Byron Leftwich, Vince Young, Joe Flacco, Sam Bradford and Mark Sanchez. Not too surprisingly, Anquan Boldin, Michael Clayton, Eddie Royal, Marques Colston, Mike Williams, Andre Johnson and other top rookie receivers of the past decade weren’t catching passes from rookie quarterbacks. By any measure, the most recent rookie quarterback/receiver combo to have any sort of success was Tim Couch and Kevin Johnson, who were serviceable as rookies for the expansion Cleveland Browns in 1999. Johnson caught 66 passes for 986 yards and 9 touchdowns, while Couch averaged 6.1 yards per pass attempt and threw for more touchdowns than interceptions. There is only one other such duo of note in the past 20 seasons. In 1995, St. Louis’s Tony Banks led the league in yards per completion and finished above league average in yards per pass attempt. The man on the receiving end of his long passes was the rookie Eddie Kennison, who scored 9 touchdowns, gained 924 yards, and finished 9th in the league in yards per catch. Banks and Kennison and Couch and Johnson are remembered as disappointments in St. Louis and Cleveland, but they were the most productive rookie quarterback-receiver combinations in the ’90s. If A.J. Green can finish with 1,000 yards and 10 touchdowns while Dalton produces league-average efficiency numbers, then they’ll go down as the best rookie quarterback-receiver battery in at least the last 20 seasons. Historical View The standard for rookie passer-receiver productivity was set in Cleveland in 1946. Otto Graham, fresh off his service with the Coast Guard, was a rookie for the expansion Browns in the inaugural season of the All-America Football Conference. Dante Lavelli, an Army man whose path had crossed with the Cleveland Browns founder Paul Brown, returned from World War II and was recruited to play for the Browns. That move worked out pretty well. Lavelli led the AAFC in receptions and receiving yards, Graham led the league in touchdown passes, and the Browns went 12-2 and won the AAFC championship. Graham and Lavelli played in Cleveland for a decade, winning titles in the AAFC and the N.F.L.; both players have been inducted into the Pro Football Hall of Fame. Charlie Conerly was drafted by the Redskins in 1945, while he was still in college. He was named the Southeastern Conference Player of the Year in 1947, and was traded by the Redskins to the Giants before the start of his rookie season. Conerly was an immediate success in New York: in 1948, he finished second in the league in both passing yards and passing touchdowns. His top target was the rookie Bill Swiacki, who caught 10 touchdowns in 12 games (nine from Conerly) and finished fourth in the league in receptions. Four years later, Babe Parilli and Billy Howton brought optimism to a Packers team that had struggled offensively after Don Hutson retired in 1945. Howton led the league in receiving yards and caught 13 touchdowns (5 from Parilli) during the 12-game season. Parilli served as the team’s punter and shared time at quarterback with veteran Tobin Rote; together, the Packers led the league in passing touchdowns in ’52. Thanks in large part to Howton, Parilli averaged 18.4 yards per completion and finished second in the league in yards per attempt. In 1956, Johnny Unitas and running back/receiver/utility man Lenny Moore were rookies in Baltimore. Both had Hall of Fame careers, but Moore was mostly used as a running back in his first season with the Colts. The berth of the American Football League enabled many players just out of college to play immediately. Houston Oiler Bill Groman caught 72 passes for 1,473 yards and 12 touchdowns in his — and the league’s — rookie season of 1960. Most of his touchdowns came from George Blanda, but 21-year-old quarterback Jacky Lee played a part in Groman’s big year. Six years later, the league’s Miami Dolphins had a pair of impressive rookies. Bob Griese made the Pro Bowl in the first year of his Hall of Fame career; his favorite target was fellow rookie and Big 10 alumnus Jack Clancy, whose 67 catches ranked third in the league. In the league’s final season, Greg Cook produced one of the best seasons in rookie quarterback history. One of his top targets was the spectacularly named Speedy Thomas. The last great rookie QB-WR combo came 40 years ago. In the final week of January 1971, the Patriots selected Stanford quarterback Jim Plunkett with the first pick in the draft. In the 17th and final round, the Rams selected Plunkett’s Cardinal teammate, Randy Vataha. Four weeks earlier, the two had teamed up to help Stanford win its first Rose Bowl in 30 years, with Vataha catching the decisive touchdown to beat Ohio State. When Vataha didn’t make it out of training camp with the Rams, he joined the Patriots and his former teammate, Plunkett. The two combined for some rare quarterback-receiver magic. Vataha took to the N.F.L. in seamless fashion: in ’70 at Stanford, he caught 54 passes for 895 yards and 7 touchdowns; as a rookie in New England in ’71, he caught 51 passes for 872 yards and 9 touchdowns. Vataha finished second in the league in touchdown catches, fifth in receptions and sixth in yards, while Plunkett finished second in the league with 19 touchdown passes (after throwing 18 his senior year at Stanford). Five years later, the expansion Seahawks led the league in pass attempts while finishing last in rushing. The passing attack relied on a pair of rookies, including the eventual Hall of Famer Steve Largent, who finished sixth in the league in receptions. Quarterback Jim Zorn ranked in the top 10 in passing yards and passing touchdowns, while leading the league in attempts (and interceptions). The most productive pair of rookies at quarterback and receiver in the ’80s played in Washington. Gary Clark caught 72 passes for 926 yards as a rookie in 1985, before making the Pro Bowl in each of his next two seasons. After Lawrence Taylor ended Joe Theismann’s career, Jay Schroeder came in and led the Redskins to a 4-1 record while finishing with better than average efficiency numbers. Since then, N.F.L. teams have been reluctant to start rookie quarterbacks, and even less willing to pair them with equally inexperienced receivers. When teams do, the results usually end up looking like what we saw in Carolina last season with Jimmy Clausen and David Gettis. Banks and Kennison and Couch and Johnson, as lackluster as they may sound, were the best-case pairings for teams that elected to go with rookies at quarterback and wide receiver over the past couple of decades. Before Green and Dalton this season, those were the only two pairs to record even five touchdown passes together as rookies in the last 35 years. What would have been more likely success stories? The 2002 Texans drafted David Carr with the first overall pick and teamed him up with Jabar Gaffney at the top of the second round. In 1998, the Colts selected Peyton Manning with the first pick…. and Jerome Pathon in the second round to develop alongside him. The Dolphins took Ted Ginn and John Beck in the first two rounds of 2007, and the Lions followed up their Calvin Johnson pick with Drew Stanton. Buffalo took Lee Evans and J.P. Losman in the first round of the 2004 draft. As you can see, it’s not unusual for a team to try to solve passing-game woes with two big swings in the draft, as the Bengals did last April. The unusual part is to see the results be so positive and immediate. Chase Stuart contributes to Pro-Football-Reference.com and to Footballguys.com.
<reponame>tosyama/palan /// Array item model class definition. /// /// PlnArrayItem returns item of array /// that indicated by index. /// e.g.) a[3] /// /// @file PlnArrayItem.cpp /// @copyright 2017-2019 <NAME> #include "boost/assert.hpp" #include "../../PlnConstants.h" #include "PlnArrayItem.h" #include "PlnMulOperation.h" #include "PlnAddOperation.h" #include "../PlnVariable.h" #include "../PlnType.h" #include "../types/PlnFixedArrayType.h" #include "../../PlnDataAllocator.h" #include "../../PlnGenerator.h" #include "../../PlnConstants.h" #include "../../PlnMessage.h" #include "../../PlnException.h" static PlnExpression* getIndexExpression( int index, int offset, vector<PlnExpression*> &item_ind, vector<int> &arr_sizes) { PlnExpression *ex = item_ind[index]; if (offset > 1) { auto oex = new PlnExpression(PlnValue((int64_t)offset)); ex = PlnMulOperation::create(ex, oex); } if (index) { int next_offset = offset * arr_sizes[index]; auto base_ex = getIndexExpression(index-1, next_offset, item_ind, arr_sizes); ex = PlnAddOperation::create(base_ex, ex); } return ex; } PlnArrayItem::PlnArrayItem(PlnExpression *array_ex, const vector<PlnExpression*>& item_ind) : PlnArrayItem(array_ex, item_ind, array_ex->values[0].inf.var->var_type) { } static PlnVariable* getArrayVar(PlnVarType *item_type, PlnExpression* array_ex) { BOOST_ASSERT(array_ex->values[0].type == VL_VAR); auto var = new PlnVariable(); auto array_var = array_ex->values[0].inf.var; var->name = array_var->name + "[]"; var->var_type = item_type; var->is_indirect = true; if (array_var->container) var->container = array_var->container; else var->container = array_var; var->is_tmpvar = var->container->is_tmpvar; return var; } // Can be any array type. PlnArrayItem::PlnArrayItem(PlnExpression *array_ex, vector<PlnExpression*> item_ind, PlnVarType* arr_type) : PlnExpression(ET_ARRAYITEM), array_ex(array_ex) { BOOST_ASSERT(item_ind.size()); BOOST_ASSERT(array_ex->values[0].type == VL_VAR); auto array_var = array_ex->values[0].inf.var; if (arr_type->typeinf->type != TP_FIXED_ARRAY) { PlnCompileError err(E_CantUseIndexHere, array_var->name); throw err; } PlnFixedArrayType *farr_type = static_cast<PlnFixedArrayType*>(arr_type->typeinf); values.push_back(getArrayVar(farr_type->item_type, array_ex)); auto& arr_sizes = farr_type->sizes; BOOST_ASSERT(arr_sizes.size() == item_ind.size()); index_ex = getIndexExpression(item_ind.size()-1, 1, item_ind, arr_sizes); if (index_ex->type == ET_VALUE) { if (index_ex->values[0].type == VL_LIT_UINT8) { int64_t i = index_ex->values[0].inf.uintValue; index_ex->values[0] = PlnValue(i); } else if (index_ex->values[0].type == VL_LIT_FLO8) { int64_t i = index_ex->values[0].inf.floValue; index_ex->values[0] = PlnValue(i); } } } PlnArrayItem::PlnArrayItem(PlnExpression *array_ex, PlnExpression* index_ex, PlnVarType* item_type) : PlnExpression(ET_ARRAYITEM), array_ex(array_ex), index_ex(index_ex) { values.push_back(getArrayVar(item_type, array_ex)); } PlnArrayItem::~PlnArrayItem() { if (values.size()) delete values[0].inf.var; delete array_ex; delete index_ex; } void PlnArrayItem::finish(PlnDataAllocator& da, PlnScopeInfo& si) { auto item_var = values[0].inf.var; // PlnValue::getDataPlace may alloc dp. if (!item_var->place) { item_var->place = new PlnDataPlace(item_var->var_type->size(), item_var->var_type->data_type()); item_var->place->comment = &item_var->name; } auto base_dp = da.prepareObjBasePtr(); array_ex->data_places.push_back(base_dp); array_ex->finish(da, si); PlnDataPlace *index_dp; if (index_ex->type == ET_VALUE && (index_ex->values[0].type == VL_LIT_INT8 || index_ex->values[0].type == VL_LIT_UINT8)) { index_dp = da.prepareObjIndexPtr(index_ex->values[0].inf.intValue); delete index_ex; index_ex = NULL; } else { index_dp = da.prepareObjIndexPtr(); index_ex->data_places.push_back(index_dp); index_ex->finish(da, si); } auto item_dp = item_var->place; da.setIndirectObjDp(item_dp, base_dp, index_dp, 0); da.popSrc(base_dp); if (index_ex) da.popSrc(index_dp); if (data_places.size()) { da.pushSrc(data_places[0], item_dp); } } void PlnArrayItem::gen(PlnGenerator& g) { // for lval & rval PlnDataPlace* base_dp = array_ex->data_places[0]; array_ex->gen(g); PlnDataPlace* index_dp = NULL; if (index_ex) { index_ex->gen(g); index_dp = index_ex->data_places[0]; } g.genLoadDp(base_dp, false); if (index_dp) g.genLoadDp(index_dp, false); g.genSaveDp(base_dp); if (index_dp) g.genSaveDp(index_dp); // rval if (data_places.size()) { g.genSaveSrc(data_places[0]); } } vector<PlnExpression*> PlnArrayItem::getAllArrayItems(PlnVariable* var) { BOOST_ASSERT(var->var_type->typeinf->type == TP_FIXED_ARRAY); vector<PlnExpression*> items; PlnFixedArrayType *atype = static_cast<PlnFixedArrayType*>(var->var_type->typeinf); vector<int> &sizes = atype->sizes; int totalsize = 1; for (int i=0; i<sizes.size(); i++) { totalsize *= sizes[i]; } for (uint64_t i=0; i<totalsize; i++) { PlnExpression *item_ex, *array_ex, *index_ex; array_ex = new PlnExpression(var); index_ex = new PlnExpression(i); item_ex = new PlnArrayItem(array_ex, index_ex, atype->item_type); items.push_back(item_ex); } return items; }
/* * Displays a minimap * * Copyright (c) 2013 <NAME>, (<NAME>) * * This file is part of the Simutrans project under the artistic licence. * (see licence.txt) */ #ifndef gui_gui_map_preview_h #define gui_gui_map_preview_h #include "gui_komponente.h" #include "../../simcolor.h" #include "../../display/simgraph.h" #include "../../tpl/array2d_tpl.h" #define MAP_PREVIEW_SIZE_X ((scr_coord_val)(64)) #define MAP_PREVIEW_SIZE_Y ((scr_coord_val)(64)) /** * A map preview component * * @author <NAME> * @date 2013-06-02 * */ class gui_map_preview_t : public gui_component_t { private: array2d_tpl<PIXVAL> *map_data; scr_size map_size; public: gui_map_preview_t(void); void init( scr_coord pos, scr_size size = scr_size(MAP_PREVIEW_SIZE_X,MAP_PREVIEW_SIZE_Y) ) { set_pos( pos ); set_size( size ); } void set_map_data(array2d_tpl<PIXVAL> *map_data_par, scr_size max_size_par) { map_data = map_data_par; map_size = max_size_par; } /** * Draws the component. * @author <NAME>, (<NAME>) */ virtual void draw(scr_coord offset) { display_ddd_box_clip_rgb(pos.x + offset.x, pos.y + offset.y, size.w, size.w, color_idx_to_rgb(MN_GREY0), color_idx_to_rgb(MN_GREY4)); if(map_data) { display_array_wh(pos.x + offset.x + 1, pos.y + offset.y + 1, map_size.w, map_size.h, map_data->to_array()); } } }; #endif
export * from './add-warehouse-products-table.component';
#Author: <NAME> 2019 #merge tables import pandas as pd import click # options @click.command() @click.option('--counts', 'counts_file', required=True, type=click.Path(exists=True, readable=True), help='Barcodes (DNA or RNA).') @click.option('--assignment', 'assignment_file', required=True, type=click.Path(exists=True, readable=True), help='Assignment tsv file.') @click.option('--name', 'name', required=True, type=str, help='Name the statistic should be written with.') @click.option('--output', 'output_file', required=True, type=click.Path(writable=True), help='Output file.') @click.option('--statistic', 'statistic_file', required=True, type=click.Path(writable=True), help='Statistic output file.') def cli(counts_file, assignment_file, output_file, statistic_file, name): # statistic statistic = pd.DataFrame(data={'Experiment' : [name], 'Barcodes': [0], 'Counts' : [0], 'Average counts' : [0], 'Assigned barcodes' : [0], 'Assigned counts' : [0], 'Average assigned counts' : [0], 'Fraction assigned barcodes' : [0], 'Fraction assigned counts' : [0]}) # Association file click.echo("Read assignment file...") assoc_barcodes_oligo=pd.read_csv(assignment_file, header=None, usecols=[0,1], sep="\t", names=['Barcode','Oligo']) assoc_barcodes = set(assoc_barcodes_oligo.Barcode) #get count df click.echo("Read count file...") counts=pd.read_csv(counts_file, header=None, sep="\t", names=['Barcode','Counts']) statistic['Barcodes'] = counts.shape[0] statistic['Counts'] = sum(counts.Counts) statistic['Average counts'] = statistic['Counts']/statistic['Barcodes'] #fill in labels from dictionary click.echo("Collecting rows to remove...") remove_idx=[] for i,row in counts.iterrows(): if row.Barcode not in assoc_barcodes: remove_idx.append(i) click.echo("Remove not assigned...") counts.drop(remove_idx,inplace=True) statistic['Assigned barcodes'] = counts.shape[0] statistic['Assigned counts'] = sum(counts.Counts) statistic['Average assigned counts'] = statistic['Assigned counts']/statistic['Assigned barcodes'] statistic['Fraction assigned barcodes'] = statistic['Assigned barcodes']/statistic['Barcodes'] statistic['Fraction assigned counts'] = statistic['Assigned counts']/statistic['Counts'] click.echo("Write files...") counts.to_csv(output_file, index=False, sep='\t', header=False, compression='gzip') statistic.to_csv(statistic_file, index=False,sep='\t', compression='gzip') if __name__ == '__main__': cli()
// Log calls Log() on the wrapped logger appending the Context's values func (l *Context) Log(keyvals ...interface{}) { if l == nil || IsDisabled(l.Logger) { return } l.Logger.Log(copyIfDynamic(addArrays(l.KeyVals, keyvals))...) }
package main import ( "bufio" "fmt" "log" "os" "crypto/sha512" "encoding/json" "github.com/it-chain/bifrost" "github.com/it-chain/bifrost/conn" "github.com/it-chain/bifrost/mux" "github.com/it-chain/bifrost/pb" "github.com/it-chain/heimdall/auth" "github.com/it-chain/heimdall/key" ) func CreateHost(ip string, mux *mux.Mux, pub key.PubKey, pri key.PriKey) *bifrost.BifrostHost { myconnectionInfo := bifrost.NewHostInfo(conn.Address{IP: ip}, pub, pri) var OnConnectionHandler = func(connection conn.Connection) { log.Printf("New connections are connected [%s]", connection) } return bifrost.New(myconnectionInfo, mux, OnConnectionHandler) } func ReadFromConsole() string { reader := bufio.NewReader(os.Stdin) fmt.Println("Enter text: ") text, _ := reader.ReadString('\n') return text } func BuildEnvelope(protocol mux.Protocol, data interface{}) *pb.Envelope { payload, _ := json.Marshal(data) envelope := &pb.Envelope{} envelope.Protocol = string(protocol) envelope.Payload = payload return envelope } func Sign(envelope *pb.Envelope, priKey key.PriKey) *pb.Envelope { au, _ := auth.NewAuth() hash := sha512.New() hash.Write(envelope.Payload) digest := hash.Sum(nil) sig, _ := au.Sign(priKey, digest, auth.EQUAL_SHA512.SignerOptsToPSSOptions()) envelope.Signature = sig return envelope } func main() { km, err := key.NewKeyManager("~/key") if err != nil { log.Fatalln(err.Error()) } defer os.RemoveAll("~/key") priv, pub, err := km.GenerateKey(key.RSA4096) var protocol mux.Protocol protocol = "/echo/1.0" mux := mux.NewMux() mux.Handle(protocol, func(message conn.OutterMessage) { //log.Printf("Echoed [%s]", string(message.Data)) fmt.Println(fmt.Sprintf("%s", message.Data[:])) }) address := "127.0.0.1:7777" host := CreateHost(address, mux, pub, priv) conn, err := host.ConnectToPeer(bifrost.NewAddress("127.0.0.1:8888")) defer conn.Close() if err != nil { log.Fatalln(err.Error()) } for { input := ReadFromConsole() envelope := Sign(BuildEnvelope(protocol, input), priv) conn.Send(envelope, nil, nil) } }
Calcium-dependent arrhythmias in transgenic mice with heart failure. Transgenic mice overexpressing the inflammatory cytokine tumor necrosis factor (TNF)-alpha (TNF-alpha mice) in the heart develop a progressive heart failure syndrome characterized by biventricular dilatation, decreased ejection fraction, atrial and ventricular arrhythmias on ambulatory telemetry monitoring, and decreased survival compared with nontransgenic littermates. Programmed stimulation in vitro with single extra beats elicits reentrant ventricular arrhythmias in TNF-alpha (n = 12 of 13 hearts) but not in control hearts. We performed optical mapping of voltage and Ca(2+) in isolated perfused ventricles of TNF-alpha mice to study the mechanisms that lead to the initiation and maintenance of the arrhythmias. When compared with controls, hearts from TNF-alpha mice have prolonged of action potential durations (action potential duration at 90% repolarization: 23 +/- 2 ms, n = 7, vs. 18 +/- 1 ms, n = 5; P < 0.05), no increased dispersion of refractoriness between apex and base, elevated diastolic and depressed systolic , and prolonged Ca(2+) transients (72 +/- 6 ms, n = 10, vs. 54 +/- 5 ms, n = 8; P < 0.01). Premature beats have diminished action potential amplitudes and conduct in a slow, heterogeneous manner. Lowering extracellular normalizes conduction and prevents inducible arrhythmias. Thus both action potential prolongation and abnormal Ca(2+) handling may contribute to the initiation of reentrant arrhythmias in this heart failure model by mechanisms distinct from enhanced dispersion of refractoriness or triggered activity.
package maze import ( "fmt" "math/rand" "time" "zura.org/vector" ) const ( Road int = iota Wall ) const ( axisX int = iota axisY ) type Maze struct { Map [][]int Width int Height int } func New(w, h int) *Maze { rand.Seed(time.Now().UnixNano()) m := new(Maze) m.setSize(w, h) m.allocMap() return m } func (m *Maze) allocMap() { m.Map = make([][]int, m.Width) for x := 0; x < m.Width; x += 1 { m.Map[x] = make([]int, m.Height) } } func (m *Maze) setSize(w, h int) { if w < 5 { m.Width = 5 } else { m.Width = w } if h < 5 { m.Height = 5 } else { m.Height = h } } func (m *Maze) Clear() { for y := 0; y < m.Height; y += 1 { for x := 0; x < m.Width; x += 1 { m.Map[x][y] = Wall } } } func (m *Maze) Print() { var c string for y := 0; y < m.Height; y += 1 { for x := 0; x < m.Width; x += 1 { if m.Map[x][y] == Wall { c = "#" } else { c = " " } time.Sleep(time.Millisecond * 20) fmt.Printf("%s", c) } fmt.Println("") } } func (m *Maze) Make() { x := randOdd(m.Width - 2) y := randOdd(m.Height - 2) fmt.Printf("(%d, %d)\n", x, y) m.makeMaze(x, y) } func randOdd(mod int) (r int) { r = 1 + rand.Intn(mod) if r%2 == 0 { r += 1 } if r > mod { r -= 2 } return } func (m *Maze) makeMaze(x, y int) { v := vector.New() for { vx, vy := v.GetVector() px := x + vx*2 py := y + vy*2 if px < 0 || px >= m.Width || py < 0 || py >= m.Height || m.Map[px][py] != Wall { if v.Rotate() { return } continue } m.Map[x+vx][y+vy] = Road m.Map[px][py] = Road m.makeMaze(px, py) v.Reset() } }
def is_cfcli_mta_available(self): is_available = False result, error = self.call_subprocess(['cf', 'deploy']) version = None if error == True: is_available = False elif error == False and 'not a registered command' in result: is_available = False elif error == False and 'Missing positional argument': is_available = True version = 'Unknown' return is_available, version
/** * {@link OperatesOnDeploymentAwareProvider} implementation to * provide {@link CamelContextRegistry} injection to {@link ArquillianResource}- * annotated fields. * * @author [email protected] * @since 19-May-2013 */ public class CamelContextRegistryProvider implements ResourceProvider { @Inject @SuiteScoped private InstanceProducer<CamelContextRegistry> serviceProducer; @Inject private Instance<CamelContextRegistry> serviceInstance; @Override public boolean canProvide(final Class<?> type) { return CamelContextRegistry.class.isAssignableFrom(type); } @Override public Object lookup(ArquillianResource resource, Annotation... qualifiers) { if (serviceInstance.get() == null) { CamelContextRegistry service; try { InitialContext initialContext = new InitialContext(); service = (CamelContextRegistry) initialContext.lookup(CamelConstants.CAMEL_CONTEXT_REGISTRY_BINDING_NAME); } catch (NamingException ex) { throw new IllegalStateException(ex); } if (service != null) { serviceProducer.set(service); } } return serviceInstance.get(); } }
package cmd import ( "os" "testing" "github.com/stretchr/testify/assert" ) func TestGetShipmentEnvironment(t *testing.T) { if !*integrationTest || *usernameTest == "" || *passwordTest == "" { t.SkipNow() } os.Unsetenv("HC_CONFIG") //login token, err := harborLogin(*usernameTest, *passwordTest) assert.NotEmpty(t, token) assert.Nil(t, err) //todo: consider creating a shipment here //test shipment := "mss-shipit-api" env := "dev" shipmentEnv := GetShipmentEnvironment(*usernameTest, token, shipment, env) //assertions assert.NotNil(t, shipmentEnv) assert.Equal(t, shipmentEnv.ParentShipment.Name, shipment) assert.Equal(t, shipmentEnv.Name, env) //logout success, err := harborLogout(*usernameTest, token) assert.Nil(t, err) assert.True(t, success) }
def save(self, to_dir, compressionlevel=9): if os.path.exists(to_dir) and not os.path.isdir(to_dir): raise Exception(f'Not a directory : {to_dir}') elif not os.path.exists(to_dir): os.makedirs(to_dir, mode=int('0755', 8)) _save(os.path.join(to_dir, FILE_USER_FST_DATA), self.compiledFST[0], compressionlevel) _save(os.path.join(to_dir, FILE_USER_ENTRIES_DATA), pickle.dumps(self.entries), compressionlevel)
/** * A transition is a pair from one state to another. * * @param <State> The state enum type. */ private static final class Transition<State> { /** The state to transition from. */ private final State startState; /** The state to transition to. */ private final State endState; /** * Create a transition. * * @param startState The state to transition from. * @param endState The state to transition to. */ public Transition(final State startState, final State endState) { this.startState = startState; this.endState = endState; } @Override public int hashCode() { return Objects.hash(startState, endState); } @SuppressWarnings("unchecked") @Override public boolean equals(final Object obj) { if (obj == null) { return false; } if (!(obj instanceof Transition)) { return false; } final Transition<State> t = (Transition<State>) obj; return t.startState.equals(startState) && t.endState.equals(endState); } }
def WriteSerialized(cls, attribute_container): json_dict = cls.WriteSerializedDict(attribute_container) return json.dumps(json_dict)
//Spawn the animated-element func (spawner *StaticSpawner) Spawn(animatedelementID string, moveDirection state.Direction) { timer := time.NewTimer(spawner.timeMs) animatedElement := spawner.players[animatedelementID] delete(spawner.players, animatedelementID) spawner.playersWaitingForSpawn[animatedelementID] = animatedElement go func() { select { case <-timer.C: animatedElementState := animatedElement.State() animatedElementState.Position = &math.Point2D{X: 5, Y: 5} animatedElementState.Angle = 0.0 animatedElementState.MoveDirection = moveDirection animatedElementState.RotateDirection = state.None delete(spawner.playersWaitingForSpawn, animatedelementID) spawner.players[animatedelementID] = animatedElement spawner.PublishEvent( event.Event{ Action: "spawn", PlayerID: animatedelementID, State: animatedElementState, }, ) } }() }
<filename>bytedance/src/main/java/io/basc/start/bytedance/poi/PoiExtHotelSkuRequest.java package io.basc.start.bytedance.poi; import io.swagger.v3.oas.annotations.media.Schema; import java.io.Serializable; import java.util.List; public class PoiExtHotelSkuRequest implements Serializable { private static final long serialVersionUID = 1L; @Schema(description = "接入方SPU ID 列表", example = "[y0001 y0002]", required = true) private List<String> spu_ext_id; @Schema(description = "拉取价格时间区间[start_date, end_date)", example = "20191001", required = true) private String start_date; @Schema(description = "拉取价格时间区间[start_date, end_date)", example = "20191007", required = true) private String end_date; public List<String> getSpu_ext_id() { return spu_ext_id; } public void setSpu_ext_id(List<String> spu_ext_id) { this.spu_ext_id = spu_ext_id; } public String getStart_date() { return start_date; } public void setStart_date(String start_date) { this.start_date = start_date; } public String getEnd_date() { return end_date; } public void setEnd_date(String end_date) { this.end_date = end_date; } }
<reponame>AnnexCloudSAP/SAP-Cloud-Commerce-2005 package com.annexcloud.dao.impl; import com.annexcloud.dao.AnnexCloudSegmentDao; import de.hybris.platform.acceleratorservices.model.email.EmailAddressModel; import de.hybris.platform.basecommerce.model.site.BaseSiteModel; import de.hybris.platform.personalizationservices.model.CxSegmentModel; import de.hybris.platform.personalizationservices.model.CxUserToSegmentModel; import de.hybris.platform.servicelayer.internal.dao.DefaultGenericDao; import de.hybris.platform.servicelayer.search.SearchResult; import org.apache.commons.collections.CollectionUtils; import org.apache.log4j.Logger; import java.util.HashMap; import java.util.List; import java.util.Map; public class DefaultAnnexCloudSegmentDao extends DefaultGenericDao<CxUserToSegmentModel> implements AnnexCloudSegmentDao { private static final Logger LOG = Logger.getLogger(DefaultAnnexCloudSegmentDao.class); public DefaultAnnexCloudSegmentDao() { super(CxUserToSegmentModel._TYPECODE); } /* final StringBuilder queryWithSegment = new StringBuilder("SELECT {").append(CxUserToSegmentModel.PK).append("} FROM {") .append(CxUserToSegmentModel._TYPECODE).append("} WHERE {").append(CxUserToSegmentModel.BASESITE) .append("} = ?").append(CxUserToSegmentModel.BASESITE);*/ @Override public List<CxSegmentModel> getUserToSegment(BaseSiteModel site) { final Map<String, Object> params = new HashMap<String, Object>(); params.put(CxUserToSegmentModel.BASESITE,site); final StringBuilder query = new StringBuilder("SELECT {").append(CxUserToSegmentModel.SEGMENT).append("} FROM {") .append(CxUserToSegmentModel._TYPECODE).append("} WHERE {").append(CxUserToSegmentModel.BASESITE) .append("} = ?").append(CxUserToSegmentModel.BASESITE).append(" group by {").append(CxUserToSegmentModel.SEGMENT) .append("}"); final SearchResult<CxSegmentModel> results = getFlexibleSearchService().<CxSegmentModel> search(query.toString(),params); if (LOG.isDebugEnabled()) { LOG.info("Results: " + (results == null ? "null" : String.valueOf(results.getCount()))); } return CollectionUtils.isEmpty(results.getResult()) ? null : results.getResult(); // NOSONAR } }
def reset(serial, yes): local_print("Reset is only possible 10secs after plugging in the device.", "Please (re-)plug in your Nitrokey FIDO2 now!") if yes or AskUser.yes_no("Warning: Your credentials will be lost!!! continue?"): local_print("Press key to confirm -- again, your credentials will be lost!!!") try: nkfido2.find(serial).reset() except CtapError as e: local_critical(f"Reset failed ({str(e)})", "Did you confirm with a key-press 10secs after plugging in?", "Please re-try...") local_print("....aaaand they're gone")
/** * Performs increments of visit count of the given IP. * * @param ip The IP to increment * @param amount The amount to increment */ @WriteOnly public void increment(String ip, long amount) { keyValueTable.increment(Bytes.toBytes(ip), amount); }
The latest version of Google's self-driving car - a pod-like two-seater that needs no gas pedal or steering wheel - has made its debut on the roads around Mountain View. This prototype is the first vehicle built from scratch for the purpose of self-driving, Google says. The technology giant's mission is to have driverless cars available to consumers in the next five years. Scroll down for video The latest version of Google's self-driving car - a pod-like two-seater that needs no gas pedal or steering wheel - has made its debut on the roads around Mountain View. GOOGLE'S POD CAR The new pod isn't designed for a long trip, or a joyride. It lacks air bags and other federally required safety features, so it can't go more than 25 miles per hour. It's electric, and has to be recharged after 80 miles. And the pod can only drive in areas that have been thoroughly mapped by Google. At first, it will likely even have a steering wheel and gas pedal - current California regulations require them. 'These prototype vehicles are designed from the ground up to be fully self-driving,' Google said. 'They’re ultimately designed to work without a steering wheel or pedals, but during this phase of our project we’ll have safety drivers aboard with a removable steering wheel, accelerator pedal, and brake pedal that allow them to take over driving if needed.' The prototypes’ speed is capped at a neighborhood-friendly 25mph, and they’ll drive using the same software that our existing Lexus vehicles use—the same fleet that has self-driven over 1 million miles since we started the project. The new pod isn't designed for a long trip, or a joyride. It lacks air bags and other federally required safety features, so it can't go more than 25 miles per hour. It's electric, and has to be recharged after 80 miles. And the pod can only drive in areas that have been thoroughly mapped by Google. At first, it will likely even have a steering wheel and gas pedal - current California regulations require them. Those regulations also require a driver to be able to take back control of the car at any time. But Google is lobbying for more flexible regulations. Google will initially build and test 25 pods, mostly in neighborhoods surrounding its Mountain View headquarters. It will eventually build between 50 and 100, and will broaden testing to sites that are hillier and rainier. The firm also said it holding a contest with local artists to design doors for the cars The ultimate goal, says Google co-founder Sergey Brin, is computer-controlled cars that can eliminate human error, which is a factor in an estimated 90 percent of the 1.2 million road deaths that occur worldwide each year. Self-driving cars could also improve traffic congestion and transport the elderly and disabled. Google shocked the auto industry in 2010 with its announcement that it was working on a driverless car. Brin insists Google doesn't aspire to be a car company, but wants its technology to be adopted by automakers. 'We want to partner to bring self-driving to all the vehicles in the world,' Brin told a group of journalists and community members gathered earlier this week to take rides in the prototype. For now the traditional automakers are pursuing their own self-driving technology, but with less ambitious timeline of 10 to 15 years for a truly driverless car. The ultimate goal, says Google co-founder Sergey Brin, is computer-controlled cars that can eliminate human error, which is a factor in an estimated 90 percent of the 1.2 million road deaths that occur worldwide each year. Chris Urmson, who directs Google's self-driving car project, says the slow-moving, friendly looking prototype - his young son thinks it looks like a koala because of the nose-like black laser on the front - is a good bridge between the company's current test fleet of 20 specially outfitted Lexus SUVs and the more advanced, higher-speed driverless cars of its future, which might not even look like anything on the road today. 'This vehicle is really all about us learning. 'This vehicle could go on a freeway, but when we think about introducing the technology, we want to do that very thoughtfully and very safely,' Urmson says. The latest version of Google's self-driving car - a pod-like two-seater that needs no gas pedal or steering wheel. The new 'pod' can't go more than 25 miles per hour, is electric, and has to be recharged after 80 miles. Convincing drivers that driverless technology is safe is one of the hurdles the company must overcome. Earlier this week, in response to questions from The Associated Press, Google acknowledged 11 minor accidents in the six years it has been testing autonomous cars. Urmson says the company is proud of that record, and notes that Google's vehicles have completed more than 1.7 million miles of testing. He says all but one of the accidents were caused by drivers in other cars; in the only incident caused by a Google car, a staffer was driving in manual mode. HOW DOES GOOGLE'S SELF-DRIVING CAR WORK? Google's prototype two-seater 'bubble' cars have buttons to begin and end the drive, but no other controls. An on-board computer uses data from sensors, including radar, a laser and cameras, to make turns and negotiate its way around pedestrians and other vehicles. Under the vision unveiled by Google, passengers might set their destination by typing it into a map or using commands. The cars are also expected to be electric, capable of going 100 miles (160km) before needing to be recharging. The front of the vehicle has a soft foam-like material where a traditional bumper would be and a more flexible windscreen, in an attempt to be safer for pedestrians. Last week, a Google patent revealed that the firm's self-driving cars will be able to detect and respond to a cyclists' hand signals. Its computers compare the distance between the cyclist's hand and head to decide whether a cyclist is turning or stopping, the patent said The prototypes are restricted to speeds of 25mph (40 km/h) and the ability to self-drive will depend on specifically designed Google road maps tested on the company's current fleet of vehicles. But ultimately the vehicles will be faster and will be able to use Google's extended maps service, using GPS technology to locate the vehicle's exact position on an electronic map. A combination of radar, lasers and cameras sitting on top of the roof give the car a 360-degree 'view', with sensors linked to computer software able to 'see' and identify people, cars, road signs and markings and traffic lights. Consumers question whether they can trust self-driving cars to work all the time, who will be liable if there's an accident and how self-driving cars will interact with regular cars, says the consulting firm J.D. Power and Associates. In a 2013 survey of U.S. drivers, J.D. Power found only one in five was interested in a fully autonomous car. Urmson says Google needs to do a better job of educating people about self-driving technology and updating them on Google's progress. It's building a Web site to teach people about the technology, and the site will feature a monthly report that will include details of any accidents involving Google cars. The site will also have a section where people can send feedback when they interact with the cars. The prototype cars - assembled in suburban Detroit by Roush Industries - have the same array of radars, lasers and cameras as Google's fleet of Lexus SUVs, which allows them to share data. If one car's camera spots orange cones and construction signs, for example, it will alert all the others to slow down in that area or reroute around a lane closure. Dmitri Dolgov, the head of software for the self-driving car project, says Google's software has gotten much better over the last year at classifying objects, like trees and mailboxes, and predicting behavior of pedestrians and other cars. For example, Google's cars will slow down if they sense that a car in the next lane is speeding up to cut in front of them. The prototype cars - assembled in suburban Detroit by Roush Industries - have the same array of radars, lasers and cameras as Google's fleet of Lexus SUVs And in one recent test, a Google car paused when a cyclist ran a red light. Another car, driven by a human, went ahead and nearly hit the cyclist. The system isn't perfect. On a test drive, one of Google's Lexus SUVs seemed momentarily confused when a mail truck partially blocked its path. Later, during a demonstration drive in Google's parking lot, the prototype - without a wheel or pedal - braked when it spotted a row of folding chairs. It had to figure out that the chairs wouldn't move before it proceeded. Dolgov says it's impossible to predict everything its test cars might see, so they're programmed to act in the most conservative way when they confront something unusual, like the time a Google SUV stopped and waited while a woman in a wheelchair chased a duck with a broom. Google isn't alone in developing self-driving cars. Mercedes-Benz, Infiniti and other brands already have advanced driver assistance systems, like lane keeping and adaptive cruise control, that can pilot the car on the highway with minimal input from the driver. Unlike Google, automakers think self-driving cars will arrive feature-by-feature instead of all at once, giving people plenty of time to adapt to autonomous driving. But Urmson says that approach is 'fundamentally wrong.' 'We believe that's like saying, `If I work really hard at jumping, one day I'll just be able to fly,'' he said. Egil Juliussen, the principal analyst of infotainment and advanced driver assist systems for the consulting firm IHS Automotive, says Google's 'moon shot' strategy is difficult and riskier than just adding features to existing cars. But he thinks it could ultimately be successful. Google could make self-driving urban pods for universities or urban centers, for example, or sell its technology to automakers. Brin says the company is still refining its plans for self-driving cars, but he's excited about their potential.
Implant site preparation using a single bur versus multiple drilling steps: 4-month post-loading results of a multicenter randomised controlled trial. PURPOSE To compare the clinical outcome of implants inserted in sites prepared with a simplified protocol consisting of one single drill versus multiple conventional drilling steps. MATERIALS AND METHODS In two private clinics, 40 patients, requiring one single implant and having a residual bone height of at least 10 mm and a thickness of at least 5 mm measured on computerised tomography (CT) scans, were randomised after flap elevation to have the implant site prepared using a single drilling step with a newly designed tapered-cylinder drill (1-drill group) or a conventional procedure with multiple drills (multiple-drill group). Implants were left to heal non-submerged for 3 months and then they were loaded with a final metal-ceramic crown. Outcome measures were: implant failure; any complications; peri-implant marginal bone level changes assessed by a blinded outcome assessor; operation time; operator preference and post-surgical pain, swelling and analgesic consumption. All patients were followed up to 4 months after implant loading. RESULTS Twenty patients were randomised to the 1-drill group and 20 patients to the multiple-drill group. No implant failed and no complications occurred. Four months after loading, implants in the 1-drill group lost 0.54 mm of peri-implant bone versus 0.41 mm for the implants in the multiple-drill group. There were no statistically significant differences for marginal bone level changes between the two groups (difference 0.13 mm, 95% CI -0.21; 0.47, P = 0.108). Less time which was statistically significant (3.66 mins, 95% CI 2.69; 4.63, P < 0.0001) was required to place the implant with the single bur. Both operators always preferred the single bur technique. Postoperatively, patients in the 1-drill group vs patients in the multiple-drill group reported statistically significant differences for pain level (difference 27.5, 95% CI 3.3; 51.7, P < 0.0001), number of days in which the swelling persisted (difference 3.4, 95% CI 2.4; 4.4, P < 0.0001) and the number of analgesic drugs taken (difference 2.8, 95% CI 1.4; 4.2, P < 0.0001) CONCLUSIONS: Within the limits of this trial, both drilling techniques produced successful results over a 4-month post-loading follow-up period, but the single bur procedure required less surgical time and lead to less postoperative morbidity.
<gh_stars>10-100 # Copyright (c) Barefoot Networks, Inc. # Licensed under the Apache License, Version 2.0 (the "License") from p4_hlir.hlir import p4_counter, P4_DIRECT, P4_COUNTER_BYTES from programSerializer import ProgramSerializer from compilationException import * import ebpfTable import ebpfProgram class EbpfCounter(object): # noinspection PyUnresolvedReferences def __init__(self, hlircounter, program): assert isinstance(hlircounter, p4_counter) assert isinstance(program, ebpfProgram.EbpfProgram) self.name = hlircounter.name self.hlircounter = hlircounter width = hlircounter.min_width # ebpf counters only work on 64-bits if width <= 64: self.valueTypeName = program.config.uprefix + "64" else: raise NotSupportedException( "{0}: Counters with {1} bits", hlircounter, width) self.dataMapName = self.name if ((hlircounter.binding is None) or (hlircounter.binding[0] != P4_DIRECT)): raise NotSupportedException( "{0}: counter which is not direct", hlircounter) self.autoIncrement = (hlircounter.binding != None and hlircounter.binding[0] == P4_DIRECT) if hlircounter.type is P4_COUNTER_BYTES: self.increment = "{0}->len".format(program.packetName) else: self.increment = "1" def getSize(self, program): if self.hlircounter.instance_count is not None: return self.hlircounter.instance_count if self.autoIncrement: return self.getTable(program).size program.emitWarning( "{0} does not specify a max_size; using 1024", self.hlircounter) return 1024 def getTable(self, program): table = program.getTable(self.hlircounter.binding[1].name) assert isinstance(table, ebpfTable.EbpfTable) return table def serialize(self, serializer, program): assert isinstance(serializer, ProgramSerializer) # Direct counters have the same key as the associated table # Static counters have integer keys if self.autoIncrement: keyTypeName = "struct " + self.getTable(program).keyTypeName else: keyTypeName = program.config.uprefix + "32" program.config.serializeTableDeclaration( serializer, self.dataMapName, True, keyTypeName, self.valueTypeName, self.getSize(program)) def serializeCode(self, keyname, serializer, program): assert isinstance(serializer, ProgramSerializer) assert isinstance(program, ebpfProgram.EbpfProgram) serializer.emitIndent() serializer.appendFormat("/* Update counter {0} */", self.name) serializer.newline() valueName = "ctrvalue" initValuename = "init_val" serializer.emitIndent() serializer.appendFormat("{0} *{1};", self.valueTypeName, valueName) serializer.newline() serializer.emitIndent() serializer.appendFormat("{0} {1};", self.valueTypeName, initValuename) serializer.newline() serializer.emitIndent() serializer.appendLine("/* perform lookup */") serializer.emitIndent() program.config.serializeLookup( serializer, self.dataMapName, keyname, valueName) serializer.newline() serializer.emitIndent() serializer.appendFormat("if ({0} != NULL) ", valueName) serializer.newline() serializer.increaseIndent() serializer.emitIndent() serializer.appendFormat("__sync_fetch_and_add({0}, {1});", valueName, self.increment) serializer.newline() serializer.decreaseIndent() serializer.emitIndent() serializer.append("else ") serializer.blockStart() serializer.emitIndent() serializer.appendFormat("{0} = {1};", initValuename, self.increment) serializer.newline() serializer.emitIndent() program.config.serializeUpdate( serializer, self.dataMapName, keyname, initValuename) serializer.newline() serializer.blockEnd(True)
On the Distance Between the Rumor Source and Its Optimal Estimate in a Regular Tree This paper addresses the rumor source identification problem, where the goal is to find the origin node of a rumor in a network among a given set of nodes with the rumor. In this paper, we focus on a network represented by a regular tree which does not have any cycle and in which all nodes have the same number of edges connected to a node. For this network, we clarify that, with quite high probability, the origin node is within the distance 3 from the node selected by the optimal estimator, where the distance is the number of edges of the unique path connecting two nodes. This is clarified by the probability distribution of the distance between the origin and the selected node. I. INTRODUCTION In social networks, a rumor spreads like an infectious disease. In fact, it can be modeled as an infectious disease , . The most common theme of studies about a rumor (or infectious disease) is to analyze mechanisms of a spreading behavior of a rumor in a given network , . Unlike this type of studies, we address the rumor source identification problem introduced by Shah and Zaman . The goal of this problem is to find the origin node of a rumor (rumor source) in a network among a given set of nodes with the rumor. If the rumor source can be detected, it is available to find a weak node which spreads a computer virus, to give ranking to websites for a search engine, etc. For this problem, Shah and Zaman introduced the optimal estimator and analyzed the correct detection probability of it for some types of networks. This probability asymptotically goes to one for a very special network called geometric tree (see ). However, they analytically or experimentally showed that the probability is asymptotically not high or goes to zero for many other networks such as regular trees, small-world networks, and scale-free networks, where a regular tree is a network which does not have any cycle and in which all nodes have the same degree, i.e, the number of edges connected to a node. Although the optimal estimator may not find the rumor source, it actually selects a node near the rumor source. This fact is known experimentally (cf. and ) and is not known analytically to the best of our knowledge. In this paper, we focus on this fact and clarify it analytically. Especially, we focus on regular trees and clarify that, with quite high probability, the rumor source is within the distance "3" from the node selected by the optimal estimator, where the An earlier version was presented at SITA2014 . In this paper, we improved notations, added Corollary 1, revised proofs, and corrected the bound of Theorem 3 and many errors. distance is the number of edges of the unique path connecting two nodes. This is clarified by the probability distribution of the distance between the rumor source and the selected node. II. RUMOR SOURCE IDENTIFICATION PROBLEM In this section, we introduce the rumor source identification problem and show some known results of this problem. Let G be an undirected and connected graph. Let V(G) denote the set of nodes and E(G) denote the set of edges of the graph G. We denote the edge connecting two nodes i, j ∈ V(G) by the set of nodes {i, j} ∈ E(G). In this paper, we consider the case where G is a regular tree, that is, the graph does not have any cycle, and all nodes have the same degree † δ ≥ 3. We assume that the number of nodes is countably infinite in order to avoid boundary effects. A rumor spreads in a given regular tree G. Initially, the only one node v 1 ∈ V(G) (the rumor source) possesses a rumor. The node possessing the rumor infects it to connected adjacent nodes, and these nodes keep it forever. For {i, j} ∈ E(G), let τ ij ∈ R be a real-valued random variable (RV) that represents the rumor spreading time from the node i to the node j after i gets the rumor. In this model, spreading times {τ ij : {i, j} ∈ E(G)} are independent and drawn according to the exponential distribution with the unit mean. Thus, the cumulative distribution function F of τ ij is represented as F (x) = 1 − e −x if x ≥ 0, and F (x) = 0 if x ≤ 0. This spreading model is sometimes called the susceptible-infected (SI) model . Suppose that we observe a network consisted of n infected nodes in the graph G at some time. Since the rumor spreads to the connected adjacent nodes, this network is a connected subgraph of G. We denote the RV of this network by G n and its realization as G n . We only know an observed network and do not know the realization of spreading times on edges. Then, the goal of the rumor source identification problem is to find the rumor source v 1 among V(G n ) given G n . For this problem, the optimal estimator is the maximum likelihood (ML) estimator ϕ ML (G n ) (cf. ) defined as where ties broken uniformly at random and Pr{G n |v} is the probability observing G n under the SI model assuming v is the rumor source. For this optimal estimator, let C n be the correct detection probability when a graph of n infected nodes is observed, i.e., C n = Pr{ϕ ML (G n ) = v 1 }. Shah and Zaman showed the asymptotic behavior of C n as the next theorem. Theorem 1 ( ): For a regular tree with degree δ ≥ 3, it holds that where I x (a, b) is the regularized incomplete beta function defined as I x (a, b) According to this theorem, when δ = 3, lim n→∞ C n = 0.25. Moreover, it rapidly converges to 1 − ln(2) ≈ 0.307 as δ goes to infinity (cf. ). This means that, unfortunately, the correct detection probability is not very high for regular trees. III. MAIN RESULTS In this section, we show that the ML estimator can select a node near the rumor source with high probability. To this end, we clarify the probability distribution of the distance d (≥ 1) between the rumor source and the node selected by the ML estimator. We denote this probability by D n (d) and define it as whereV n = ϕ ML (G n ) and d G (v, w) denotes the distance between nodes v and w in the graph G. Note that D n (0) = C n . When δ = 3, we can clarify a closed-form expression of the asymptotic behavior of D n (d) as the next theorem. Theorem 2: Let δ = 3. Then, for any d ≥ 1, we have We denote the rising factorial x(x+ 1)(x+ 2) · · · (x+ k − 1) by x k . The next theorem gives tight upper and lower bounds of lim n→∞ D n (d) for more general degrees. These theorems imply that the ML estimator can select a node near the rumor source with high probability. This is clear from the next corollary and its numerical results (Fig. 1). , the corollary is immediately obtained by Theorems 1-3. Since ǫ 40 ≈ 10 −7 , Fig. 1 gives almost exact numerical results of lim n→∞ Pr{d G (V n , v 1 ) ≤ d}. We note that numerical results for other degrees δ are almost the same (see Fig. 2). Thus, these results show that the rumor source is within the distance 3 from the node selected by the ML estimator with quite high probability. We note that Khim and Loh gave another lower bound of lim n→∞ Pr{d G (V n , v 1 ) ≤ d}. However, it is quite looser than our bound and is zero at least values of parameters d and δ are within the rage in Fig. 1 and Fig. 2. IV. PROOFS OF THEOREMS In this section, we prove our main theorems. We will denote n-length sequences of RVs (X 1 , X 2 , · · · , X n ) and its realizations (x 1 , x 2 , · · · , x n ) by X n and x n , respectively. For the sake of brevity, we denote V(G) by V and V(G n ) by V n . For any node v ∈ V in a regular tree with degree δ ≥ 3, there are δ neighbors. Thus, there are δ subtrees rooted at these δ neighbors with the parent node v. In other words, the regular tree is divided into these δ subtrees and the node v. Let X j (v) be the number of infected nodes in the jth subtree among those subtrees (j = 1, 2 · · · , δ). When v is not the rumor source, let δth subtree contain the rumor source v 1 . Note that, if v is an infected node, we have δ j=1 X j (v) = n−1. The next lemma is a key lemma to prove our main theorems. We denote the set of nodes with distance d (≥ 1) from the rumor source by V (d) . Note that the number of elements of V (d) is δ(δ − 1) d−1 . Then, D n (d) can be represented as where the last equality comes from Lemma 1. On the other hand, let {V i } ∞ i=1 be the sequence of RVs each representing ith infected node, where V 1 = v 1 with probability 1. Then, we have V n = {V 1 , V 2 , · · · , V n }. This implies that the where X δ (v) = (X 1 (v), X 2 (v), · · · , X δ (v)). We also have Thus, we need to obtain closed-form expressions of Since {τ ij } are independent and these have the memoryless property, an infecting node is uniformly selected from boundary nodes at each step. Hence, we have for any v n−1 ∈ S n−1 and v n ∈ B({v 1 , · · · , v n−1 }), . . Then, for d ≥ 1 and k ≥ d + 1, the kth infected node is v (d) if and only if the following event occurs for some j 1 , j 2 , · · · , j d such that 2 ≤ j 1 < j 2 < · · · < j d−1 < j d = k: where (a) comes from the chain rule of the probability, and (b) comes from Appendix A. The remaining case is that d = 1 and k ≥ d + 1 (= 2). In this case, we have where (a) comes from Appendix A. Thus, by recalling that (7) also holds in this case. B. Closed-Form Expression of Suppose that the kth infected node is v k . Since we consider a regular tree, v k has δ neighboring nodes {v k,1 , · · · , v k,δ }. Let Y j (v k ) be the number of infected nodes of the subtree rooted at v k,j with the parent node v k after v k is infected. Let the subtree rooted at v k,δ contain the rumor source. Thus, at the time that v k is infected, it holds that X δ (v k ) = k − 1. From then on, an infecting node is uniformly selected from boundary nodes at each step. We note that Then, numbers {Y j (v k )} are drawn according to the Pólya's urn model with δ colors balls (cf. and ): Initially, b j balls of color C j (j = 1, 2, · · · , δ) are in the urn, At each step, a single ball is uniformly drawn form the urn. Then, the drawn ball is returned with additional m = δ − 2 balls of the same color. Repeat this drawing process. Y j (v k ) corresponds to the number of times that the balls of color C j are drawn. According to , when the total number of drawing balls is n − k, the joint distribution where b = δ j=1 b j and δ j=1 y j = n − k. We note that the above probability only depends on n, k and δ. Now, by definition, we have When n is odd, we have Pr{v (d) ∈ V n , X(v) = n/2} = 0. Thus, we only consider the first term of (3). According to (7), (9) and (10), (4) can be represented as where the equality follows since Thus, we have . In a similar way, we have D n (d) for even n as follows: . This is because where (a) follows since x i =n−1, max 1≤j≤3 {x j }<n/2 x i =n−1, max 1≤j≤3 {x j }=n/2 (1) for any d ≥ 1 and k ≥ d (see Appendix D), we have for any d ≥ 2 and k ≥ d + 1, where ζ 0 k−1 (0) = 1. Note that this holds even if d = 1 and k ≥ d + 1. Since it holds , that for any l ≥ 1 and k ≥ l, we have for any d ≥ 1 and k ≥ d+1, where is the unsigned Stirling numbers of the first kind and s(k, l) is the signed Stirling numbers of the first kind defined as s(k, l) (−1) k−l k l . Thus, we have for odd n ≥ 3, and for even n ≥ 2, Now, the well-known Lebesgue's dominated convergence theorem and the fact that Thus, we can evaluate the probability as follows: and (b) comes from the symmetric property of where (a) comes from the fact that events By using the same way as in (see also ), we have (see a precise derivation in Appendix F) According to these equalities, (16), (17), and the dominated convergence theorem, we have (see a precise derivation in Appendix G) where g(δ, d, m) is a partial sum of (20), and the inequality comes from the fact that (according to (17), (18), and (19)) On the other hand, we have where (a) comes from the fact that This completes the proof of Theorem 3. APPENDIX B Let f (u, z) be a double series defined as where we assume that s(−1, l) = 0. First of all, we show that f (u, z) is absolutely convergent. If we assume that −1 where (a) comes from (24), (b) follows since ∞ l=0 s(k, l)u l = u(u−1) · · · (u−k+1), (c) comes from the fact that u+1 k = 1 if k = 0, (d) comes from Maclaurin series with respect to z which are convergent if |z| < 1, and (e) comes from Maclaurin series with respect to u which are convergent if |u| < 1. Thus, for any z, u ∈ R such that |z| < 1 and |u| < 1, we have Since two power series are convergent in a neighborhood of 0, all coefficients are equal (see ). This means that f l (z) = z (ln(1 + z)) l l! APPENDIX C In this appendix, we prove Lemma 1. First of all, we introduce some notations. Let R(v, G n ) be the rumor centrality of a node v in G n , T v w be the subtree of G n rooted at the node w with the ancestor node v, and |T v w | be the number of nodes in T v w . Here, we assume that T v w = ∅ and |T v w | = 0 if w / ∈ V(G n ). We note that the ML estimator becomes (see. ) then v is called the local rumor center w.r.t. N l (v). For the local rumor center, we know the following properties (see. ): {T v w \ {w}}. • A node v ∈ V(G n ) is a local rumor center w.r.t. N l (v) ⇒ there exists at most a node w ∈ N l (v) such that where the equality holds if and only if According to these properties, for a node v ∈ V(G n ), if it holds that |T v w | ≤ n 2 for all w ∈ N (v), the node v is a (local) rumor center w.r.t. N (v). Then, there exists at most a node w ∈ N (v) such that where the equality holds if and only if Hence, for a node v ∈ V(G n ), if X(v) < n/2, i.e., max{|T v w |, w ∈ N (v)} < n/2, we have Thus, the MAP estimator outputs v, and hence For a node v ∈ V(G n ), if X(v) = n 2 , i.e., there exists a node w ∈ N (v) such that |T v w | = n 2 and |T v w ′ | < n 2 for any other w ′ ∈ N (v), we have Thus, the MAP estimator outputs v with probability 1/2, and hence Then, the MAP estimator does not output v, and hence This completes the proof. APPENDIX D We note that Thus, for any d ≥ 1 and k ≥ d, we have where ζ 0 k (1) = 1. APPENDIX E In order to show the equation (15), we use the next lemma (cf. e.g. ). Lemma 2 (Dominated convergence theorem): Let f 1 , f 2 , · · · : N → R be a sequence of real-valued functions on positive integers N such that f n (k) converges as n → ∞, ∀k ∈ N. Suppose that there is g : Then, we have We note that where (a) follows since Pr{V k = v (d) } = 0 for any k ≤ d, and (b) comes from the fact that if v (d) is the kth infected node (k ≥ ⌈n/2⌉ + 1), it must hold that X(v (d) ) ≥ n/2. We also note that where (a) comes from the fact that if v (d) is the kth infected node (k ≥ ⌊n/2⌋+ 2), it must hold that X(v (d) ) > n/2. Thus, we have By noticing that Pr{V k = v (d) } does not depend on n (see (7)), we can set f n (k) = Pr{V k = v (d) , X(v (d) ) < n/2} Then, we have |f n (k)| = Pr{V k = v (d) , X(v (d) ) < n/2} We also have On the other hand, according to (13) and (14), we have for any k ≥ d + 1 and odd n ≥ 3, and for any k ≥ d + 1 and even n ≥ 2, s(k, l). By noticing that we have for any k ≥ d + 1, We note that for any k ≤ d, Thus, according to Lemma 2, we have By noticing that |V (d) | = δ(δ −1) d−1 , we have (15) from (25). APPENDIX F We consider the Pólya's urn model with 2 colors balls: Initially, b j balls of color C j (j = 1, 2) are in the urn. At each step, a single ball is uniformly drawn form the urn. Then, the drawn ball is returned with additional m balls of the same color. Repeat this drawing process n times. LetỸ j denote the number of balls of the color C j in the urn at the end of time n. Let Y j denote the number of times that the balls of color C j are drawn after n draws. According to , we have the next theorem. Theorem 4:Ỹ where b 1 + b 2 + n · m is the total number of balls in the urn at the end of time n, and Y is a Beta random variable with parameters b 1 /m and b 2 /m. That is for x ∈ , We immediately have the next corollary. Corollary 2: where Y is the same Beta random variable as that of Theorem 4. Proof: Y 1 can be written as Thus, we have Since b1+b2+n·m m·n → 1 and b1 m·n → 0 as n → ∞, we have where almost sure convergence comes from Theorem 4. This completes the proof. After can be regarded as the Pólya's urn model with the following settings: Here, we assume that the total number of drawing balls is n − k. Then, according to Corollary 2, we have where Y is a Beta random variable with parameters 1/(δ − 2) and k − 1 + (δ − 1)/(δ − 2). Thus, we have Similarly, we have Due to (26) and (27), we have (18). On the other hand, after v (d) is infected kth, X δ (v (d) ) can be regarded as the Pólya's urn model with the following settings: Here, we assume that the total number of drawing balls is n − k. Then, according to Corollary 2, we have where Y is a Beta random variable with parameters k − 1 + 1/(δ − 2) and (δ − 1)/(δ − 2). Thus, we have Similarly, we have Due to (28) and (29), we have (19).
def add_commit_push(show=True): branch = ewm.get_branch_name() files = [x.split(' ', 1)[-1] for x in ewm.get_status()] message = 'Changed ' + ', '.join(files) + ' in branch ' + branch return bh.run( 'git add .; git commit -m "{}"; git push'.format(message), show=show )
//=--------------------------------------------------------------------------= // ExistInprocServer [RegisterUnknownObject Helper] //=--------------------------------------------------------------------------= // Checks for the Implemented Categories key under a given key // // Parameters: // riid - [in] CLSID of object to be examined // // Output: // BOOL - Returns TRUE if Implemented Categories exists // Returns FALSE if Implemented Categories doesn't exist // BOOL ExistImplementedCategories(REFCLSID riid) { char szGuidStr[MAX_PATH]; char szScratch[MAX_PATH]; long l; DWORD dwDummy; HKEY hkCLSID, hkImplementedCategories; if (!StringFromGuidA(riid, szGuidStr)) return FALSE; wsprintf(szScratch, "CLSID\\%s", szGuidStr); l = RegCreateKeyEx(HKEY_CLASSES_ROOT, szScratch, 0, "", REG_OPTION_NON_VOLATILE, KEY_READ, NULL, &hkCLSID, &dwDummy); if (l != ERROR_SUCCESS) return FALSE; l = RegOpenKeyEx(hkCLSID, "Implemented Categories", 0, KEY_ALL_ACCESS, &hkImplementedCategories); RegCloseKey(hkCLSID); if (l != ERROR_SUCCESS) return FALSE; RegCloseKey(hkImplementedCategories); return TRUE; }
import { BadRequestException, Injectable, Logger, NotFoundException, } from '@nestjs/common'; import { ConfigService } from '@nestjs/config'; import { UserRepository } from 'modules/user/repositories/user/user.repository'; import { CONFIG } from 'config/config-keys'; import { UserDocument } from 'modules/user/entities/user.entity'; import { CreateUserDto, UserDto } from 'modules/user/dtos/user.dto'; import { LoginDto } from 'modules/auth/dtos/login.dto'; import { MailerService } from '../../../../mailer/mailer.service'; import { ForgotPasswordDto, RecoveryPasswordDto, } from 'modules/auth/dtos/recovery-password.dto'; import * as bcrypt from 'bcrypt'; import { AuthJwtService } from 'modules/auth/services/auth-jwt/auth-jwt.service'; @Injectable() export class AuthService { private readonly logger = new Logger(AuthService.name); constructor( private config: ConfigService, private userRepository: UserRepository, private authJwt: AuthJwtService, private mailerService: MailerService, ) {} /** * Register new user * @param user */ async register(user: CreateUserDto): Promise<{ success: boolean }> { if (await this.userRepository.findByEmail(user.email)) { // Log error this.logger.error('User are registered: ' + user.email); // 400 Http response throw new BadRequestException('The given email is already registered'); } const password = await bcrypt.hash( user.password, +this.config.get(CONFIG.BCRYPT_SALT_ROUNDS), ); // const refreshSecret = await this.cypher.encrypt(uid(64)); await this.userRepository.create({ ...user, password, }); // Log this.logger.log('User registered successfully: ' + user.email); return { success: true, }; } /** * Called by controller after passport local strategy validation * @param user created in local strategy * @param onlyJwt */ async generateJwt(user: UserDto, onlyJwt = false): Promise<LoginDto> { const response: LoginDto = {}; // Generate access jwt response.accessToken = await this.authJwt.signAccess(user).catch((e) => { this.logger.log(e.message); throw new BadRequestException(e.message); }); this.logger.log('Credentials generated successfully'); // return accessToken if (onlyJwt) { return response; } // generate refresh jwt response.refreshToken = await this.authJwt.signRefresh(user).catch((e) => { this.logger.log(e.message); throw new BadRequestException(e.message); }); // populate with user profile response.user = user; // Return Login response return response; } /** * Search user and send an email with a token to recovery account * @param user */ async forgotPassword(user: ForgotPasswordDto) { if (!(await this.userRepository.findByEmail(user.email))) { this.logger.error('User not registered: ' + user.email); throw new NotFoundException('The given email is not registered'); } const payload: RecoveryPasswordDto = { user: user.email, }; const jwtDto = await this.authJwt.signRecovery(payload).catch((e) => { this.logger.log(e.message); throw new BadRequestException(e.message); }); const mailerConfig = { to: user.email, from: '<EMAIL>', subject: 'Recovery password request NEST CHAT ✔', text: 'http://localhost:4200/reset-password?token=' + jwtDto.token, }; // Send recovery password mail await this.mailerService.sendMail(mailerConfig); this.logger.log('Recovery password mail was sent to ' + user.email); return { success: true, }; } /** * Validate recovery token sent to user email * and save new password * @param data * @param password */ async resetPassword( data: RecoveryPasswordDto, password: string, ): Promise<{ success: boolean }> { const user: UserDocument = await this.userRepository.findByEmail(data.user); if (!user) { this.logger.error('User not found: ' + user.email); throw new NotFoundException('User not found'); } user.password = await bcrypt.hash( password, +this.config.get(CONFIG.BCRYPT_SALT_ROUNDS), ); await user.save(); this.logger.log('Password update successfully to user ' + user.email); return { success: true, }; } }
/** * Uses the specified producer to send the specified message. * * @param producer the producer * @param message the message * @param retryTimes times for retry * @return a {@code SendResult} * @throws TooManyRetryException if too many retry occurs */ public static final SendResult send(Producer producer, Message message, int retryTimes) throws TooManyRetryException { try { return producer.send(message); } catch (Throwable e) { logger.warn("Send message failed, start retry stage: {}", message, e); List<Throwable> causes = new ArrayList<>(); for (var remainingTimes = Math.max(0, retryTimes); remainingTimes > 0; remainingTimes--) { try { return producer.send(message); } catch (Throwable cause) { causes.add(cause); } } throw new TooManyRetryException("send message failed", e, causes); } }
/** * Factory to create objects that are stored in the InMemory store. * */ public class StoreManagerImpl implements StoreManager { private static final String UNKNOWN_REPOSITORY = "Unknown repository "; private static final String CMIS_READ = "cmis:read"; private static final String CMIS_WRITE = "cmis:write"; private static final String CMIS_ALL = "cmis:all"; private final BindingsObjectFactory fObjectFactory; private final TypeDefinitionFactory typeFactory = TypeDefinitionFactory.newInstance(); private static final String OPENCMIS_VERSION; private static final String OPENCMIS_SERVER; static { Package p = Package.getPackage("org.apache.chemistry.opencmis.inmemory"); if (p == null) { OPENCMIS_VERSION = "?"; OPENCMIS_SERVER = "Apache-Chemistry-OpenCMIS-InMemory"; } else { String ver = p.getImplementationVersion(); OPENCMIS_VERSION = (null == ver ? "?" : ver); OPENCMIS_SERVER = "Apache-Chemistry-OpenCMIS-InMemory/" + OPENCMIS_VERSION; } } /** * Map from repository id to a type manager. */ private final Map<String, TypeManagerImpl> fMapRepositoryToTypeManager = new HashMap<String, TypeManagerImpl>(); /** * Map from repository id to a object store. */ private final Map<String, ObjectStore> fMapRepositoryToObjectStore = new HashMap<String, ObjectStore>(); public ObjectStoreImpl getStore(String repositoryId) { return (ObjectStoreImpl) fMapRepositoryToObjectStore.get(repositoryId); } public StoreManagerImpl() { fObjectFactory = new BindingsObjectFactoryImpl(); } @Override public List<String> getAllRepositoryIds() { Set<String> repIds = fMapRepositoryToObjectStore.keySet(); List<String> result = new ArrayList<String>(); result.addAll(repIds); return result; } @Override public void initRepository(String repositoryId) { fMapRepositoryToObjectStore.put(repositoryId, new ObjectStoreImpl(repositoryId)); fMapRepositoryToTypeManager.put(repositoryId, new TypeManagerImpl()); } @Override public void createAndInitRepository(String repositoryId, String typeCreatorClassName) { if (fMapRepositoryToObjectStore.containsKey(repositoryId) || fMapRepositoryToTypeManager.containsKey(repositoryId)) { throw new CmisInvalidArgumentException("Cannot add repository, repository " + repositoryId + " already exists."); } fMapRepositoryToObjectStore.put(repositoryId, new ObjectStoreImpl(repositoryId)); fMapRepositoryToTypeManager.put(repositoryId, new TypeManagerImpl()); // initialize the type system: initTypeSystem(repositoryId, typeCreatorClassName); } @Override public ObjectStore getObjectStore(String repositoryId) { return fMapRepositoryToObjectStore.get(repositoryId); } @Override public CmisServiceValidator getServiceValidator() { return new InMemoryServiceValidatorImpl(this); } @Override public BindingsObjectFactory getObjectFactory() { return fObjectFactory; } @Override public TypeDefinitionContainer getTypeById(String repositoryId, String typeId) { TypeManager typeManager = fMapRepositoryToTypeManager.get(repositoryId); if (null == typeManager) { throw new CmisObjectNotFoundException(UNKNOWN_REPOSITORY + repositoryId); } boolean cmis11 = InMemoryServiceContext.getCallContext().getCmisVersion() != CmisVersion.CMIS_1_0; TypeDefinitionContainer tdc = typeManager.getTypeById(typeId); if (null != tdc && !cmis11) { TypeDefinition td = tdc.getTypeDefinition(); if (td.getBaseTypeId() == BaseTypeId.CMIS_ITEM || td.getBaseTypeId() == BaseTypeId.CMIS_SECONDARY || td.getId().equals(BaseTypeId.CMIS_ITEM.value()) || td.getId().equals(BaseTypeId.CMIS_SECONDARY.value())) { tdc = null; // filter new types for CMIS 1.0 } else { // remove type mutability information: MutableTypeDefinition tdm = typeFactory.copy(td, true); tdm.setTypeMutability(null); tdc = new TypeDefinitionContainerImpl(tdm); } } return tdc; } @Override public TypeDefinitionContainer getTypeById(String repositoryId, String typeId, boolean includePropertyDefinitions, int depthParam) { int depth = depthParam; TypeManager typeManager = fMapRepositoryToTypeManager.get(repositoryId); if (null == typeManager) { throw new CmisInvalidArgumentException(UNKNOWN_REPOSITORY + repositoryId); } TypeDefinitionContainer tc = typeManager.getTypeById(typeId); boolean cmis11 = InMemoryServiceContext.getCallContext().getCmisVersion() != CmisVersion.CMIS_1_0; if (tc != null) { if (depth == -1) { if (cmis11 && includePropertyDefinitions) { return tc; } else { depth = Integer.MAX_VALUE; } } else if (depth == 0 || depth < -1) { throw new CmisInvalidArgumentException("illegal depth value: " + depth); } return cloneTypeList(depth, includePropertyDefinitions, tc, null, cmis11); } else { return null; } } @Override public Collection<TypeDefinitionContainer> getTypeDefinitionList(String repositoryId, boolean includePropertyDefinitions) { TypeManager typeManager = fMapRepositoryToTypeManager.get(repositoryId); if (null == typeManager) { throw new CmisInvalidArgumentException(UNKNOWN_REPOSITORY + repositoryId); } Collection<TypeDefinitionContainer> typeColl = getRootTypes(repositoryId, includePropertyDefinitions); return typeColl; } @Override public List<TypeDefinitionContainer> getRootTypes(String repositoryId, boolean includePropertyDefinitions) { List<TypeDefinitionContainer> result; TypeManager typeManager = fMapRepositoryToTypeManager.get(repositoryId); if (null == typeManager) { throw new CmisInvalidArgumentException(UNKNOWN_REPOSITORY + repositoryId); } List<TypeDefinitionContainer> rootTypes = typeManager.getRootTypes(); // remove cmis:item and cmis:secondary for CMIS 1.0 boolean cmis11 = InMemoryServiceContext.getCallContext().getCmisVersion() != CmisVersion.CMIS_1_0; if (!cmis11) { rootTypes = new ArrayList<TypeDefinitionContainer>(rootTypes); TypeDefinitionContainer tcItem = null, tcSecondary = null; for (TypeDefinitionContainer tc : rootTypes) { if (tc.getTypeDefinition().getId().equals(BaseTypeId.CMIS_ITEM.value())) { tcItem = tc; } if (tc.getTypeDefinition().getId().equals(BaseTypeId.CMIS_SECONDARY.value())) { tcSecondary = tc; } } if (tcItem != null) { rootTypes.remove(tcItem); } if (tcSecondary != null) { rootTypes.remove(tcSecondary); } } if (cmis11 && includePropertyDefinitions) { result = rootTypes; } else { result = cloneTypeDefinitionTree(rootTypes, includePropertyDefinitions, cmis11); } return result; } private List<TypeDefinitionContainer> cloneTypeDefinitionTree (List<TypeDefinitionContainer> tdcList, boolean includePropertyDefinitions, boolean cmis11) { List<TypeDefinitionContainer> result = new ArrayList<TypeDefinitionContainer>(tdcList.size()); for (TypeDefinitionContainer c : tdcList) { MutableTypeDefinition td = typeFactory.copy(c.getTypeDefinition(), includePropertyDefinitions); if (!cmis11) { td.setTypeMutability(null); } TypeDefinitionContainerImpl tdc = new TypeDefinitionContainerImpl(td); tdc.setChildren(cloneTypeDefinitionTree(c.getChildren(), includePropertyDefinitions, cmis11)); result.add(tdc); } return result; } @Override public RepositoryInfo getRepositoryInfo(String repositoryId) { ObjectStore sm = fMapRepositoryToObjectStore.get(repositoryId); if (null == sm) { return null; } RepositoryInfo repoInfo = createRepositoryInfo(repositoryId); return repoInfo; } public void clearTypeSystem(String repositoryId) { TypeManagerImpl typeManager = fMapRepositoryToTypeManager.get(repositoryId); if (null == typeManager) { throw new CmisInvalidArgumentException(UNKNOWN_REPOSITORY + repositoryId); } typeManager.clearTypeSystem(); } public static List<TypeDefinition> initTypeSystem(String typeCreatorClassName) { List<TypeDefinition> typesList = null; if (typeCreatorClassName != null) { Object obj = null; TypeCreator typeCreator = null; final String message = "Illegal class to create type system, must implement TypeCreator interface."; try { obj = Class.forName(typeCreatorClassName).newInstance(); } catch (InstantiationException e) { throw new CmisRuntimeException(message, e); } catch (IllegalAccessException e) { throw new CmisRuntimeException(message, e); } catch (ClassNotFoundException e) { throw new CmisRuntimeException(message, e); } if (obj instanceof TypeCreator) { typeCreator = (TypeCreator) obj; } else { throw new CmisRuntimeException(message); } // retrieve the list of available types from the configured class. // test typesList = typeCreator.createTypesList(); } return typesList; } private void initTypeSystem(String repositoryId, String typeCreatorClassName) { List<TypeDefinition> typeDefs = null; TypeManagerImpl typeManager = fMapRepositoryToTypeManager.get(repositoryId); if (null == typeManager) { throw new CmisObjectNotFoundException(UNKNOWN_REPOSITORY + repositoryId); } if (null != typeCreatorClassName) { typeDefs = initTypeSystem(typeCreatorClassName); } typeManager.initTypeSystem(typeDefs, true); } @SuppressWarnings("serial") private RepositoryInfo createRepositoryInfo(String repositoryId) { boolean cmis11 = InMemoryServiceContext.getCallContext().getCmisVersion() != CmisVersion.CMIS_1_0; ObjectStore objStore = getObjectStore(repositoryId); String rootFolderId = objStore.getRootFolder().getId(); // repository info RepositoryInfoImpl repoInfo; repoInfo = new RepositoryInfoImpl(); repoInfo.setId(repositoryId == null ? "inMem" : repositoryId); repoInfo.setName("Apache Chemistry OpenCMIS InMemory Repository"); repoInfo.setDescription("Apache Chemistry OpenCMIS InMemory Repository (Version: " + OPENCMIS_VERSION + ")"); repoInfo.setRootFolder(rootFolderId); repoInfo.setPrincipalAnonymous(InMemoryAce.getAnonymousUser()); repoInfo.setPrincipalAnyone(InMemoryAce.getAnyoneUser()); repoInfo.setThinClientUri(""); repoInfo.setChangesIncomplete(Boolean.TRUE); repoInfo.setLatestChangeLogToken("token-24"); repoInfo.setVendorName("Apache Chemistry"); repoInfo.setProductName(OPENCMIS_SERVER); repoInfo.setProductVersion(OPENCMIS_VERSION); // set capabilities RepositoryCapabilitiesImpl caps = new RepositoryCapabilitiesImpl(); caps.setAllVersionsSearchable(false); caps.setCapabilityAcl(CapabilityAcl.MANAGE); caps.setCapabilityChanges(CapabilityChanges.OBJECTIDSONLY); caps.setCapabilityContentStreamUpdates(CapabilityContentStreamUpdates.ANYTIME); caps.setCapabilityJoin(CapabilityJoin.NONE); caps.setCapabilityQuery(CapabilityQuery.BOTHCOMBINED); caps.setCapabilityRendition(CapabilityRenditions.READ); caps.setIsPwcSearchable(false); caps.setIsPwcUpdatable(true); caps.setSupportsGetDescendants(true); caps.setSupportsGetFolderTree(true); caps.setSupportsMultifiling(true); caps.setSupportsUnfiling(true); caps.setSupportsVersionSpecificFiling(false); caps.setCapabilityAcl(CapabilityAcl.MANAGE); AclCapabilitiesDataImpl aclCaps = new AclCapabilitiesDataImpl(); aclCaps.setAclPropagation(AclPropagation.OBJECTONLY); aclCaps.setSupportedPermissions(SupportedPermissions.BASIC); // permissions List<PermissionDefinition> permissions = new ArrayList<PermissionDefinition>(); permissions.add(createPermission(CMIS_READ, "Read")); permissions.add(createPermission(CMIS_WRITE, "Write")); permissions.add(createPermission(CMIS_ALL, "All")); if (cmis11) { NewTypeSettableAttributesImpl typeAttrs = new NewTypeSettableAttributesImpl(); typeAttrs.setCanSetControllableAcl(false); typeAttrs.setCanSetControllablePolicy(false); typeAttrs.setCanSetCreatable(true); typeAttrs.setCanSetDescription(true); typeAttrs.setCanSetDisplayName(true); typeAttrs.setCanSetFileable(false); typeAttrs.setCanSetFulltextIndexed(false); typeAttrs.setCanSetId(true); typeAttrs.setCanSetIncludedInSupertypeQuery(false); typeAttrs.setCanSetLocalName(true); typeAttrs.setCanSetLocalNamespace(true); typeAttrs.setCanSetQueryable(false); typeAttrs.setCanSetQueryName(true); caps.setNewTypeSettableAttributes(typeAttrs); } aclCaps.setPermissionDefinitionData(permissions); // mapping List<PermissionMapping> list = new ArrayList<PermissionMapping>(); list.add(createMapping(PermissionMapping.CAN_GET_DESCENDENTS_FOLDER, CMIS_READ)); list.add(createMapping(PermissionMapping.CAN_GET_CHILDREN_FOLDER, CMIS_READ)); list.add(createMapping(PermissionMapping.CAN_GET_PARENTS_FOLDER, CMIS_READ)); list.add(createMapping(PermissionMapping.CAN_GET_FOLDER_PARENT_OBJECT, CMIS_READ)); list.add(createMapping(PermissionMapping.CAN_CREATE_DOCUMENT_FOLDER, CMIS_WRITE)); list.add(createMapping(PermissionMapping.CAN_CREATE_FOLDER_FOLDER, CMIS_WRITE)); list.add(createMapping(PermissionMapping.CAN_CREATE_RELATIONSHIP_SOURCE, CMIS_READ)); list.add(createMapping(PermissionMapping.CAN_CREATE_RELATIONSHIP_TARGET, CMIS_READ)); list.add(createMapping(PermissionMapping.CAN_GET_PROPERTIES_OBJECT, CMIS_READ)); list.add(createMapping(PermissionMapping.CAN_VIEW_CONTENT_OBJECT, CMIS_READ)); list.add(createMapping(PermissionMapping.CAN_UPDATE_PROPERTIES_OBJECT, CMIS_WRITE)); list.add(createMapping(PermissionMapping.CAN_MOVE_OBJECT, CMIS_WRITE)); list.add(createMapping(PermissionMapping.CAN_MOVE_TARGET, CMIS_WRITE)); list.add(createMapping(PermissionMapping.CAN_MOVE_SOURCE, CMIS_WRITE)); list.add(createMapping(PermissionMapping.CAN_DELETE_OBJECT, CMIS_WRITE)); list.add(createMapping(PermissionMapping.CAN_DELETE_TREE_FOLDER, CMIS_WRITE)); list.add(createMapping(PermissionMapping.CAN_SET_CONTENT_DOCUMENT, CMIS_WRITE)); list.add(createMapping(PermissionMapping.CAN_DELETE_CONTENT_DOCUMENT, CMIS_WRITE)); list.add(createMapping(PermissionMapping.CAN_ADD_TO_FOLDER_OBJECT, CMIS_WRITE)); list.add(createMapping(PermissionMapping.CAN_REMOVE_FROM_FOLDER_OBJECT, CMIS_WRITE)); list.add(createMapping(PermissionMapping.CAN_CHECKOUT_DOCUMENT, CMIS_WRITE)); list.add(createMapping(PermissionMapping.CAN_CANCEL_CHECKOUT_DOCUMENT, CMIS_WRITE)); list.add(createMapping(PermissionMapping.CAN_CHECKIN_DOCUMENT, CMIS_WRITE)); list.add(createMapping(PermissionMapping.CAN_GET_ALL_VERSIONS_VERSION_SERIES, CMIS_READ)); list.add(createMapping(PermissionMapping.CAN_GET_OBJECT_RELATIONSHIPS_OBJECT, CMIS_READ)); list.add(createMapping(PermissionMapping.CAN_ADD_POLICY_OBJECT, CMIS_WRITE)); list.add(createMapping(PermissionMapping.CAN_REMOVE_POLICY_OBJECT, CMIS_WRITE)); list.add(createMapping(PermissionMapping.CAN_GET_APPLIED_POLICIES_OBJECT, CMIS_READ)); list.add(createMapping(PermissionMapping.CAN_GET_ACL_OBJECT, CMIS_READ)); list.add(createMapping(PermissionMapping.CAN_APPLY_ACL_OBJECT, CMIS_ALL)); Map<String, PermissionMapping> map = new LinkedHashMap<String, PermissionMapping>(); for (PermissionMapping pm : list) { map.put(pm.getKey(), pm); } List<BaseTypeId> changesOnType; // CMIS 1.1 extensions if (cmis11) { repoInfo.setCmisVersionSupported(CmisVersion.CMIS_1_1.value()); repoInfo.setCmisVersion(CmisVersion.CMIS_1_1); changesOnType = new ArrayList<BaseTypeId>() { { add(BaseTypeId.CMIS_DOCUMENT); add(BaseTypeId.CMIS_FOLDER); add(BaseTypeId.CMIS_ITEM); } }; Set<PropertyType> propertyTypeSet = new HashSet<PropertyType>() { { add(PropertyType.BOOLEAN); add(PropertyType.DATETIME); add(PropertyType.DECIMAL); add(PropertyType.HTML); add(PropertyType.ID); add(PropertyType.INTEGER); add(PropertyType.STRING); add(PropertyType.URI); } }; CreatablePropertyTypesImpl creatablePropertyTypes = new CreatablePropertyTypesImpl(); creatablePropertyTypes.setCanCreate(propertyTypeSet); caps.setCreatablePropertyTypes(creatablePropertyTypes); caps.setCapabilityOrderBy(CapabilityOrderBy.COMMON); } else { repoInfo.setCmisVersionSupported(CmisVersion.CMIS_1_0.value()); repoInfo.setCmisVersion(CmisVersion.CMIS_1_0); changesOnType = new ArrayList<BaseTypeId>() { { add(BaseTypeId.CMIS_DOCUMENT); add(BaseTypeId.CMIS_FOLDER); } }; } repoInfo.setChangesOnType(changesOnType); aclCaps.setPermissionMappingData(map); repoInfo.setAclCapabilities(aclCaps); repoInfo.setCapabilities(caps); return repoInfo; } private static PermissionDefinition createPermission(String permission, String description) { PermissionDefinitionDataImpl pd = new PermissionDefinitionDataImpl(); pd.setId(permission); pd.setDescription(description); return pd; } private static PermissionMapping createMapping(String key, String permission) { PermissionMappingDataImpl pm = new PermissionMappingDataImpl(); pm.setKey(key); pm.setPermissions(Collections.singletonList(permission)); return pm; } /** * traverse tree and replace each need node with a clone. remove properties * on clone if requested, cut children of clone if depth is exceeded. * * @param depth * levels of children to copy * @param includePropertyDefinitions * indicates with or without property definitions * @param tdc * type definition to clone * @param parent * parent container where to add clone as child * @return cloned type definition */ public static TypeDefinitionContainer cloneTypeList(int depth, boolean includePropertyDefinitions, TypeDefinitionContainer tdc, TypeDefinitionContainer parent, boolean cmis11) { final TypeDefinitionFactory typeFactory = TypeDefinitionFactory.newInstance(); MutableTypeDefinition tdClone = typeFactory.copy(tdc.getTypeDefinition(), includePropertyDefinitions); if (!cmis11) { tdClone.setTypeMutability(null); } TypeDefinitionContainerImpl tdcClone = new TypeDefinitionContainerImpl(tdClone); if (null != parent) { parent.getChildren().add(tdcClone); } if (depth > 0) { List<TypeDefinitionContainer> children = tdc.getChildren(); for (TypeDefinitionContainer child : children) { cloneTypeList(depth - 1, includePropertyDefinitions, child, tdcClone, cmis11); } } return tdcClone; } @Override public TypeManager getTypeManager(String repositoryId) { TypeManager typeManager = fMapRepositoryToTypeManager.get(repositoryId); return typeManager; } @Override public boolean supportsSingleFiling(String repositoryId) { return false; } @Override public boolean supportsMultiFilings(String repositoryId) { return true; } @Override public ObjectList query(String user, String repositoryId, String statement, Boolean searchAllVersions, Boolean includeAllowableActions, IncludeRelationships includeRelationships, String renditionFilter, BigInteger maxItems, BigInteger skipCount) { TypeManager tm = getTypeManager(repositoryId); ObjectStore objectStore = getObjectStore(repositoryId); InMemoryQueryProcessor queryProcessor = new InMemoryQueryProcessor(getStore(repositoryId)); ObjectList objList = queryProcessor.query(tm, objectStore, user, repositoryId, statement, searchAllVersions, includeAllowableActions, includeRelationships, renditionFilter, maxItems, skipCount); return objList; } }
Microsoft plans to give $75 million to nonprofits that can spread computer science education throughout the world, CEO Satya Nadella said on Wednesday during Salesforce’s annual Dreamforce conference in San Francisco. The investment is part of the company’s YouthSpark initiative to promote computer science education it originally launched back in 2012. Microsoft will divvy out the money over a three-year period to select programs, including the company’s Technology Education and Literacy in Schools program, in which technology workers partner up with high schools to teach computer science to their students. The goal of the investment is to make computer science as important of a subject as math or physics in schools, which have long been core subjects. “Computational thinking will be in every aspect of our economy,” Nadella said. Subscribe to Data Sheet, Fortune’s daily newsletter on the business of technology. For more on Microsoft, check out the following Fortune video:
<filename>winter-framework/src/main/java/de/uni_mannheim/informatik/dws/winter/webtables/app/ShowTableData.java<gh_stars>10-100 /* * Copyright (c) 2017 Data and Web Science Group, University of Mannheim, Germany (http://dws.informatik.uni-mannheim.de/) * * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and limitations under the License. */ package de.uni_mannheim.informatik.dws.winter.webtables.app; import java.io.File; import java.io.IOException; import java.io.OutputStreamWriter; import java.util.Collection; import java.util.HashSet; import java.util.Set; import org.apache.commons.lang.StringUtils; import org.slf4j.Logger; import com.beust.jcommander.Parameter; import au.com.bytecode.opencsv.CSVWriter; import de.uni_mannheim.informatik.dws.winter.utils.Executable; import de.uni_mannheim.informatik.dws.winter.utils.ProgressReporter; import de.uni_mannheim.informatik.dws.winter.utils.WinterLogManager; import de.uni_mannheim.informatik.dws.winter.utils.query.Func; import de.uni_mannheim.informatik.dws.winter.utils.query.Q; import de.uni_mannheim.informatik.dws.winter.webtables.Table; import de.uni_mannheim.informatik.dws.winter.webtables.TableColumn; import de.uni_mannheim.informatik.dws.winter.webtables.TableContext; import de.uni_mannheim.informatik.dws.winter.webtables.TableRow; import de.uni_mannheim.informatik.dws.winter.webtables.parsers.CsvTableParser; import de.uni_mannheim.informatik.dws.winter.webtables.parsers.JsonTableParser; import de.uni_mannheim.informatik.dws.winter.webtables.preprocessing.TableDisambiguationExtractor; import de.uni_mannheim.informatik.dws.winter.webtables.preprocessing.TableNumberingExtractor; import de.uni_mannheim.informatik.dws.winter.webtables.writers.JsonTableWriter; /** * @author <NAME> (<EMAIL>) * */ public class ShowTableData extends Executable { @Parameter(names = "-d") private boolean showData = false; @Parameter(names = "-w") private int columnWidth = 20; @Parameter(names = "-keyColumnIndex") private Integer keyColumnIndex = null; @Parameter(names = "-convertValues") private boolean convertValues = false; @Parameter(names = "-update") private boolean update = false; @Parameter(names = "-detectKey") private boolean detectKey = false; @Parameter(names = "-listColumnIds") private boolean listColumnIds; @Parameter(names = "-header") private boolean showHeader = false; @Parameter(names = "-rows") private int numRows = 0; @Parameter(names = "-csv") private boolean createCSV = false; @Parameter(names = "-dep") private boolean showDependencyInfo = false; @Parameter(names = "-prov") private boolean showProvenanceInfo = false; @Parameter(names = "-pre") private boolean applyPreprocessing = false; private static final Logger logger = WinterLogManager.getLogger(); public static void main(String[] args) throws IOException { ShowTableData s = new ShowTableData(); if(s.parseCommandLine(ShowTableData.class, args) && s.getParams()!=null) { s.run(); } } public void run() throws IOException { JsonTableParser p = new JsonTableParser(); JsonTableWriter w = new JsonTableWriter(); // p.setConvertValues(convertValues | detectKey); CsvTableParser csvP = new CsvTableParser(); // csvP.setConvertValues(convertValues | detectKey); String[] files = getParams().toArray(new String[getParams().size()]); File dir = null; if(files.length==1) { dir = new File(files[0]); if(dir.isDirectory()) { files = dir.list(); } else { dir = null; } } ProgressReporter prg = new ProgressReporter(files.length, "Processing Tables"); CSVWriter csvW = null; if(createCSV) { csvW = new CSVWriter(new OutputStreamWriter(System.out)); } for(String s : files) { Table t = null; File f = new File(s); if(dir!=null) { f = new File(dir,s); } try { if(s.endsWith("json")) { t = p.parseTable(f); } else if(s.endsWith("csv")) { t = csvP.parseTable(f); } else { logger.error(String.format("Unknown table format '%s' (must be .json or .csv)", f.getName())); continue; } if(applyPreprocessing) { new TableDisambiguationExtractor().extractDisambiguations(Q.toList(t)); new TableNumberingExtractor().extractNumbering(Q.toList(t)); } if(convertValues) { t.convertValues(); } // update the table if requested if(detectKey) { t.identifySubjectColumn(0.3,true); logger.error(String.format("* Detected Entity-Label Column: %s", t.getSubjectColumn()==null ? "?" : t.getSubjectColumn().getHeader())); } if(keyColumnIndex!=null) { logger.error(String.format("* Setting Entity-Label Column: %s", t.getSchema().get(keyColumnIndex))); t.setSubjectColumnIndex(keyColumnIndex); } if(update) { w.write(t, f); } if(createCSV) { // create a csv file with the table meta data csvW.writeNext(new String[] { s, Integer.toString(t.getRows().size()), Integer.toString(t.getColumns().size()), t.getContext()==null ? "" : t.getContext().getUrl(), t.getContext()==null ? "" : t.getContext().getPageTitle(), t.getContext()==null ? "" : t.getContext().getTableTitle(), Integer.toString(getOriginalTables(t).size()), t.getSubjectColumn()==null ? "" : Integer.toString(t.getSubjectColumn().getColumnIndex()) }); } else if(listColumnIds) { // list the columns in the table for(TableColumn c : t.getColumns()) { if(!showHeader) { System.out.println(c.getIdentifier()); } else { System.out.println(c.toString()); } } } else { // print the table meta data in human readable format TableContext ctx = t.getContext(); System.out.println(String.format("*** Table %s ***", s)); if(ctx!=null) { System.out.println(String.format("* URL: %s", ctx.getUrl())); System.out.println(String.format("* Title: %s", ctx.getPageTitle())); System.out.println(String.format("* Heading: %s", ctx.getTableTitle())); } System.out.println(String.format("* # Columns: %d", t.getColumns().size())); System.out.println(String.format("* # Rows: %d", t.getRows().size())); System.out.println(String.format("* Created from %d original tables", getOriginalTables(t).size())); System.out.println(String.format("* Entity-Label Column: %s", t.getSubjectColumn()==null ? "?" : t.getSubjectColumn().getHeader())); if(showProvenanceInfo) { // collect all provenance data Set<String> provenance = getOriginalTables(t); if(provenance.size()>0) { System.out.println("Provenance:"); System.out.println(String.format("\t%s", StringUtils.join(Q.sort(provenance), ",") )); } else { System.out.println("Table has no provenance data attached."); } } if(showDependencyInfo) { if(t.getSchema().getFunctionalDependencies()!=null && t.getSchema().getFunctionalDependencies().size()>0) { System.out.println("*** Functional Dependencies ***"); for(Collection<TableColumn> det : t.getSchema().getFunctionalDependencies().keySet()) { Collection<TableColumn> dep = t.getSchema().getFunctionalDependencies().get(det); System.out.println( String.format( "{%s}->{%s}", StringUtils.join(Q.project(det, new TableColumn.ColumnHeaderProjection()), ","), StringUtils.join(Q.project(dep, new TableColumn.ColumnHeaderProjection()), ","))); } } if(t.getSchema().getCandidateKeys()!=null && t.getSchema().getCandidateKeys().size()>0) { System.out.println("*** Candidate Keys ***"); for(Collection<TableColumn> candidateKey : t.getSchema().getCandidateKeys()) { System.out.println( String.format("{%s}", StringUtils.join(Q.project(candidateKey, new TableColumn.ColumnHeaderProjection()), ","))); } } } if(showData) { System.out.println(t.getSchema().format(columnWidth)); System.out.println(t.getSchema().formatDataTypes(columnWidth)); int maxRows = Math.min(numRows, t.getRows().size()); if(maxRows==0) { maxRows = t.getRows().size(); } for(int i = 0; i < maxRows; i++) { TableRow r = t.getRows().get(i); if(showProvenanceInfo) { System.out.println(StringUtils.join(r.getProvenance(), " / ")); } System.out.println(r.format(columnWidth)); } } else { System.out.println(StringUtils.join(Q.project(t.getColumns(), new Func<String, TableColumn>() { @Override public String invoke(TableColumn in) { return String.format("%s (%s)", in.getHeader(), in.getDataType()); }} ), ", ")); } prg.incrementProgress(); prg.report(); } } catch(Exception e) { System.err.println(String.format("Cannot process table '%s'!",f)); e.printStackTrace(); } } if(createCSV) { csvW.close(); } } private Set<String> getOriginalTables(Table t) { Set<String> tbls = new HashSet<>(); for(TableColumn c : t.getColumns()) { for(String prov : c.getProvenance()) { tbls.add(prov.split("~")[0]); } } return tbls; } }
<filename>api/schema/student/__init__.py from .schema import StudentProfileMutation, RegisterStudentInput, StudentQuery, StudentInput
// Copyright (c) HashiCorp, Inc. // SPDX-License-Identifier: MPL-2.0 package plugin import ( "context" "testing" "google.golang.org/grpc" "github.com/hashicorp/go-plugin" "github.com/hashicorp/vault/sdk/logical" "github.com/hashicorp/vault/sdk/plugin/pb" ) func TestStorage_GRPC_ReturnsErrIfStorageNil(t *testing.T) { _, err := new(GRPCStorageServer).Get(context.Background(), nil) if err == nil { t.Error("Expected error when using server with no impl") } } func TestStorage_impl(t *testing.T) { var _ logical.Storage = new(GRPCStorageClient) } func TestStorage_GRPC(t *testing.T) { storage := &logical.InmemStorage{} client, _ := plugin.TestGRPCConn(t, func(s *grpc.Server) { pb.RegisterStorageServer(s, &GRPCStorageServer{ impl: storage, }) }) defer client.Close() testStorage := &GRPCStorageClient{client: pb.NewStorageClient(client)} logical.TestStorage(t, testStorage) }
A “two-eyed seeing” approach to Indigenizing nursing curricula Educational institutions, including schools of nursing, find themselves in significant times, as they work to Indigenize programs, and strive to repair and heal relationships with Indigenous peoples as recommended in the Truth and Reconciliation Commission of Canada (2015). Educators question where to begin the process, how such Indigenization should occur, and what the curricular end result should look like. In response, the authors considered many aspects from the literature, specific to nursing programs. The following themes were explored: partnering with community, cultural relevance, and faculty development. Through the utilization of a “two-eyed seeing” approach, institutional administrators need to partner with Indigenous Elders and community members to facilitate relationships required to provide the knowledge necessary to bring about change within educational programs. It is through such an approach that nursing curricula can be designed to be culturally safe and relevant for both Indigenous and non-Indigenous learners, and faculty can be supported in their growth and development in Indigenous knowledge. The authors propose that through “two-eyed seeing” and the integration of the Aboriginal Nurses Association of Canada (2009) core competencies, Indigenization of nursing curricula may ultimately move forward in a culturally reciprocal and respectful way. INTRODUCTION Universities in many countries are working towards the Indigenization of educational programs. In Canada, Indigenous peoples make up 4.9% of the population (totaling more than 1.6 million people), with the Indigenous population increasing four times faster than that of non-Indigenous people. However, few healthcare workers in Canada identify themselves as Indigenous, and this shortage has negatively affected health outcomes of Indigenous peoples. While there continues to be an underrepresentation of Indigenous students in nursing schools recruitment efforts are underway to promote these numbers. There continues to be a need to understand the factors affecting the retention of Indigenous students, and nurse educators and universities need to explore ways to foster success among Indigenous nursing students. High attrition rates, together with the shortage of Indigenous nurses, demand changes to nursing education curricula. Such changes must begin with a consideration of embedding Indigenous content into nursing programs. The process of Indigenization of curricula can be difficult, and the authors begin with a discussion surrounding the critiques and challenges of this work. When changing curricula, "two-eyed seeing" is required to bring together and honour differing views. It is through "two-eyed seeing", that the process of Indigenization can occur from a culturally safe approach. In reviewing the literature, several themes were identified in nursing curricula at the global level: the importance of partnering with community, faculty development, and cultural relevance. This narrative review of the literature was conducted through a constructionist framework. Constructionism, as defined by Papert, et al., views learning as building knowledge structures in a context whereby the learner is consciously engaged and considers that concepts can be understood by constructing them. From such an approach, the authors were able to examine the Indigenization of nursing curricula, and devise a series of recommendations towards this continued work in the future. DEFINITION OF INDIGENIZATION, AND WHY IS IT SO DIFFICULT? What does it mean to Indigenize curriculum, and how does it look within education programs? These questions have been asked, and grappled with, by many post-secondary institutions. Indigenizing program materials is more than understanding geographical, historical, economic, and sociopolitical contexts. To offer decolonized nursing education means engaging and questioning processes of colonialism that have occurred and are still occurring. As stated by Pete (2016), the process of Indigenization is more than simply adding content to educational programs: it is a transformation by decolonizing our collective work, and is a shared responsibility by Indigenous and non-Indigenous peoples. Bopp, Brown, and Robb (2016) further explain Indigenization as a process whereby knowledge is produced within the institution from an Indigenous perspective. Such definitions appear daunting, but through the use of documents such as the Truth and Reconciliation Commission of Canada (TRC) (2015), educational institutions are striving to move forward to create supportive spaces in which Indigenous people can succeed. While the steps required to move forward may eventually become clearer, challenges remain, and a few are outlined here. There is a dearth of literature available on the process of Indigenization, and no one really knows what an Indigenized educational institution might look like. In essence, "we don't know what we don't know". Furthermore, it is well known that Indigenous identity does not mean the same to all people, nor is it consistent between communities. The danger in making curricular changes when very few In-digenous people have been consulted, is that it is easy for non-Indigenous peoples to generalize characteristics about the former, a practice known as pan-Indigenizing. It is important for the creation of ethical time and space to develop thorough, careful, and reflexive relationships prior to making changes within programs. Without such a cautious approach, the process of Indigenizing could evolve into ongoing colonialism (including power differentials), despite best intentions to the contrary. Because such ethical time and space is sometimes difficult to find in some academic programs (due to all hours being filled with curricular -required content), educators often do not have the time to devote to learning about Indigenization and how to go about making changes to their courses. For this reason, Indigenization may occur hurriedly, or without the mandatory careful consideration. The challenges of Indigenization lie in the method and spirit of integration of Indigenous knowledge into program curricula. One approach to address these difficulties is through "two-eyed seeing", which is described next. ING" Although Western and Indigenous knowledge systems differ in epistemological and ontological philosophies, blending principles from both can be beneficial to educate student nurses to provide holistic and culturally safe care. Indigenizing nursing curricula requires inclusion of Aboriginal perspectives and the valuing of Indigenous knowledge and epistemology. The authors recommend using a "two-eyed seeing" approach in curricula to heighten student nurses' knowledge relating to Indigenous people and to clarify understandings about the realities and diversities among Indigenous people to provide individualized healthcare. "Two-eyed seeing" (Sesatu'k Etuaptmunk), a concept first used by Elder Albert Marshall of Eskasoni First Nation, Nova Scotia, Canada, means that Indigenous knowledge and Western science are considered complementary knowledge forms; when integrated, these knowledge forms can advance the health and well-being of Indigenous people. Marshall asserts that "two-eyed seeing" is the need to learn (from one eye) the strengths of Indigenous traditional knowledge, and from the other eye, the strengths of Western scientific knowledge. Learning to use both eyes together will benefit both Western and Indigenous peoples' health and lives. A "two-eyed seeing" approach integrated in nursing curricula will assist in educating students about Indigenous people. It will further support the consideration of historical, economic, and socio-political contexts that shape Indigenous healthcare, access to healthcare, and healthcare deficits. To Indigenize best practice, nursing faculty employing a "two-eye seeing" approach could facilitate the implementation of program changes with respectful approaches that are culturally safe for both Indigenous and non-Indigenous students. Such approaches must be emulated from nursing professors and staff preceptoring students. Indigenous knowledge and clinical examples may be embedded in simulation experiences, case studies, and presentations in curricula. However, it is important that Indigenous people not be essentialized in a "one-size-fits-all" health care approach. There is diversity amongst Indigenous communities and Nations. Essentialism ignores diversities and the unique experiences within groups, thus reinforcing the notion that only cultural characteristics shape socioeconomic, political, and health disadvantages. Essentialism overlooks the social determinates of Indigenous health that considers contexts of poverty, history (i.e., residential school, colonialism, assimilation), community infrastructure, resources, health behaviors, physical, and social environments that impact the health and health outcomes for Indigenous peoples. Thus, poor health and illnesses for Indigenous peoples are rooted in "deeper social structures and processes" (p. 1), and those may exist at the local community, national, or global levels. According to Browne (2007), lifestyle and personal choice of Indigenous people are often positioned as issues without examination of broader sociopolitical factors. Western healthcare discourses can also marginalize and stigmatize Indigenous people. It is, therefore, imperative that nursing curricula imbues appropriate discourses related to Indigenous people and contextualizes health status for Indigenous peoples regarding sociopolitical, historical, environmental, and economic factors. Doing so assists in decreasing labelling, laying blame for diseases rates, or for accessing healthcare late in disease trajectories. The authors recommend using a "two-eyed seeing" approach as a lens to Indigenizing nursing curricula, and the Aboriginal Nurses Association of Canada (ANAC) (2009) core competencies as foundational in nursing curricula. BACKGROUND LITERATURE Upon review of the literature, a few themes were identified in regards to the Indigenization of nursing curricula at the global level: the importance of partnering with community, the need for faculty development, and the significance of culturally relevant curriculum. Each of these aspects will be discussed in the following section. Importance of partnering with community Although the notion of an emic versus etic perspective has long been associated within the research realm, it has a direct relation to universities who have programs that are geared toward Indigenous content. Universities need to position themselves toward successful delivery of Indigenous content. However, this cannot be achieved without understanding Indigenous history. Furthermore, fostering relationships with communities and establishing ethical spaces for open dialogue is essential for the exchange of knowledge and learning the best way to provide healthcare to Indigenous people. Tantamount to the success of Indigenized curricula is the partnership of community stakeholders who have firsthand knowledge of the history related to Indigenous peoples. In order to achieve this, support must come from relevant sources: this could be Elders in the community or on reserve land, or Indigenous persons in government positions. Nursing faculty must learn from Indigenous people -the experts that can teach about their own culture, social determinants of health, and healthcare needs and outcomes. Mahara, Duncan, Whyte, and Browne (2011) conducted a study in which they strategized how to build a framework that included cultural competency and cultural safety in nursing education. Mahara et al., 2011 found that it was vital to enable Elders from the Indigenous community to work as partners in nursing programs. This was seen as a way forward in learning the "competencies related to Indigenous knowledge" (p. 5). Bopp et al. (2016) further discussed issues related to Indigenization within academic institutions and predicted that unless institutions partner with local Indigenous peoples and include them in decision-making processes, attempts to Indigenize education will fall short. Educational institutions must strive to maintain partnerships with key Indigenous stakeholders, and these can only be achieved if they are rooted within the whole of the institution. Once those partnerships exist, then individual programs (i.e., like nursing programs) can move towards successful pathways for both Indigenous and non-Indigenous students alike. 4.2 Need for faculty development All too often, faculty are asked to design courses within which they are required to embed Indigenous knowledge and health care practice within the nursing program; they often fall short because they do not possess enough knowledge to impart this information to students. Faculty are pivotal to providing a culturally safe environment for students as they navigate their way through nursing programs, but this cannot be achieved if faculty are not knowledgeable in how to do so. Moreover, it is of equal importance that faculty be afforded opportunities for professional development training to achieve this kind of knowledge. Indigenizing curricula results in student nurses being knowledgeable about Indigenous peoples' contexts, health, wellness, illness, and access to healthcare services; these components are also essential in implementing cultural safety in nursing practice. However, most faculty who teach within nursing programs do not have knowledge of Indigenous knowing, being, cultural background, and traditions which are seen as necessary to create a culturally safe environment. Faculty development should include support from the Indigenous community in order to validate, create accountability, and protect the situatedness of this knowledge. Professional development courses may be used toward the achievement of cultural competence, but equally important is the environment and the people within it that validate the very essence of the Indigenous story. Faculty development should include the skills necessary to enable them to relate to the soul of what it means to be Indigenous. This can be done with the help of pedagogy that involves narrative inquiry practices as a way forward to providing rich discussion in classrooms. In order to achieve this, faculty must have opportunity to learn about Indigenous history and customs related to practices. This could be done by enabling faculty to visit traditional or reserve lands and listening to the narratives of Indigenous persons. Faculty should also have an opportunity to immerse themselves in the Indigenous culture that they will pass along to students in the future. In some cases, Indigenous students' learning can be promoted through storytelling, thus faculty development should include skills needed to include this in classroom teaching. Recognizing that not everyone is equipped with enough knowledge and skills to tell stories, perhaps this can be done with the support of community Elders. Overall, success in student attainment of Indigenous knowledge comes from the faculty members who convey it and this can only be done if learning opportunities are available, and they take advantage of them. Significance of culturally relevant curricula Indigenization in the nursing curricula is culturally relevant to both Indigenous and non-Indigenous learners. The content and delivery of program materials needs to align with how students will process and apply theories to become competent practitioners. As mentioned earlier, current educational systems are predominantly posited in a Eurocentric (Western) worldview. Knowledge of colonial practices, treaties, acts, residential school, power imbalances, and intergenerational trauma has not been included in mainstream learning or dialogues. As Indigenization strategies are newly developing in educational institutions, those teaching in and attending nursing programs may lack awareness and understanding of the context of historical atrocities and ongoing legacies experienced by Indigenous peoples. Setting students up for success, through Indigenization, is complex as institutions embark on a journey toward a shift from current epistemological and ontological institutional practices. Each learner is challenged in their own way, and attempts are made to bridge the knowledge gaps for those not informed of Indigenous ways of knowing and being. Similarly, sensitivity to the acculturation of Indigenous students must occur, and care must be taken to prevent assumptions that all Aboriginal peoples are aware of their ancestors' history, while still eliminating barriers to holistically support Indigenous learners. The TRC (2015) addresses disparities in health outcomes recommending that society ". . . close the gaps in health outcomes between aboriginal and nonaboriginal peoples" (action 19, pp. 2-3). Understanding individual biases, assumptions, and beliefs brings awareness of the influence of cultural bias on practitioners' care for Indigenous peoples and how providers have an impact on a person's experience with health systems. Implementing learning to support culturally safe care is commonly addressed in the literature and a further call to action. As current practices and learning applications in the literature are explored, it becomes more obvious that nursing programs need to be tailored to be culturally responsive to students, faculty, and the Indigenous communities within which they work and serve. CONCLUSION Embedding Indigenous content in a meaningful way into nursing curriculum is multifaceted. The authors discovered many aspects to consider in this process, and the themes of partnering with community, faculty development, and cultural relevance were explored in this article. During this historical time of repair and healing, we are called to action by the TRC (2015) to eradicate health care disparities for Indigenous peoples. Nursing educators are to provide non-Indigenous faculty and learners with the supports needed to be culturally safe. As well, Indigenous nursing students need to be supported in their learning, thus promoting increased enrolment in nursing education. As we move away from colonized epistemological and ontological philosophies to create spaces for Indigenous and non-Indigenous learners, we rely on our Indigenous Elders to guide and inform healing practices that will enhance nursing education and empower nurses to be responsive to the diversity of health care needs. Similarly, academic institutions offering nursing education need to become more attuned with Indigenous communities residing in the ancestral lands where they are situated, and explore how to foster relationships in order to recruit and retain Indigenous students into nursing programs. "Two-eyed seeing" embraces a strength-based approach which capitalizes on the best of Indigenous and Western epistemologies with mutual reciprocity and respect, and it is through this approach that the authors believe Indigenization in nursing curricula may be realized. Recommendations Recommendations for Indigenizing Nursing Curricula include: • Implement a "two-eyed seeing" approach • Involve Elders and Indigenous community members in curricula development, implementation, and evaluation • Use ANAC (2009) core competencies in curricula development • Use Truth and Reconciliation Commission of Canada: Call to Action (2015) report in curricula development • Nursing professors, clinical educators, and staff preceptoring nursing students must respectfully educate student nurses about Indigenous peoples' health using culturally safe examples in class and practice • Have Indigenous nurses, Elders, and community members as guest speakers in classes • Implement assignments in nursing curricula that embody Indigenous knowledge and teachings in case studies, presentations, and simulation
import GravidKravState from './GravidKravState'; interface GravidKravProps { state?: GravidKravState; } export default GravidKravProps;
<reponame>g6tech/spring-corp<filename>src/main/java/spring/corp/framework/configuracao/ManagerSetting.java package spring.corp.framework.configuracao; import java.io.BufferedReader; import java.io.FileInputStream; import java.io.IOException; import java.io.InputStream; import java.io.InputStreamReader; import java.util.Properties; import spring.corp.framework.log.ManagerLog; import spring.corp.framework.utils.StringUtils; public class ManagerSetting { private static Properties p = new Properties(); static { try { load(ManagerSetting.class.getResourceAsStream("/configuracao.properties")); String file = System.getProperty("configuracao"); if (file != null) { load(new FileInputStream(file)); } } catch (IOException e) { throw new RuntimeException("ARQUIVO DE CONFIGURA\u00c7\u00c3O N\u00c3O ENCONTRADO."); } finally { } } public static void load(InputStream fis) { synchronized (p) { try { BufferedReader br = new BufferedReader(new InputStreamReader(fis)); String line = null; while ((line = br.readLine()) != null) { if (!StringUtils.isBlank(line) && !line.startsWith("#")) { String[] values = line.split("[=]"); if (ManagerLog.isDebug(ManagerSetting.class)) { try { ManagerLog.debug(ManagerSetting.class, "Carregando a seguinte chave: [" + values[0] +"] e Valor: [" + values[1] + "]"); } catch (IndexOutOfBoundsException e) { ManagerLog.debug(ManagerSetting.class, "Erro ao ler o array. Tamanho: [" + values.length + "] Esperado: 2"); if (values.length == 1) { ManagerLog.error(ManagerSetting.class, e, "Carregando a seguinte chave: [" + values[0] +"]"); } } } //nao vai chamar o metodo put da classe por que senao vai dar deadlock. p.put(values[0].trim(), values[1].trim()); } } } catch (IOException e) { throw new RuntimeException("ERRO NA LEITURA DO ARQUIVO DE CONFIGURA\u00c7\u00c3O."); } finally { } } } public static void put(String key, String value) { synchronized (p) { p.put(key.trim(), value.trim()); } } /** * Selecionar Configuração * @param key (String) chave com valor da configuração * @return (String) configuração */ public static String getSetting(String key) { synchronized (p) { return (String) p.get(key); } } }
The multifarious oxytocin: a review Oxytocin over centuries has always been regarded as the drug of paramount importance during childbirth. Oxytocin, a peptide hormone facilitates parturition and breastfeeding. These nine amino acid peptides have presently been found to be associated with a wide variety of pathophysiological functions associated with social behaviours. It has been recently recognised as an important modulator of human social behaviour. Its correlation as a common factor important in various neuropsychiatric disorders such as schizophrenia, personality disorders and autism, mood and anxiety disorders has been highlighted. Anticipatory role of oxytocin in osteoporosis, diabetes and cancer has been coaxing the researchers for developing new therapeutic modalities. Over a course of past 100 years, oxytocin has come a long way from being an insipid agent used as an aid in labour and delivery to the drug of neuropsychiatric conditions. This review article summarises the varied functions of oxytocin, its apt dosing when used therapeutically and reinforcement of development of new lines of treatment involving the use of oxytocin and antagonists for multiple human disorders. INTRODUCTION Oxytocin is a naturally occurring peptide hormone synthesized in the supraoptic and paraventricular nuclei of the hypothalamus. Part of it is transported as secretory granules to the posterior pituitary gland for subsequent release into the bloodstream to reach the peripheral targets and the rest is directly spread into other brain areas as a neurotransmitter. 1 Human studies confirm the role of oxytocin as a social hormone in addition to its importance in parturition, milk let down and maternal bonding. 2,3 Oxytocin decreases anxiety and cortisol release in response to social stress, reduces amygdala activity to frightening and threatening visual images or emotional faces. 4,5 Oxytocin exhibits very weak antidiuretic properties which mandates its infusion in an isotonic solution. 6 Pharmacokinetics of oxytocin reveals its inactivation by enzyme chymotrypsin in gastrointestinal tract which emphasizes on its administration via intravenous, intramuscular or transmucosal route via nasal spray. Onset of action of intravenous oxytocin is 1-2 minutes with a half-life of approximately 15 minutes. 7 When given intramuscularly, onset is 2-4 minutes and action last for 30-60 minutes. 8 Frequent dosing and a chance of crossing the blood brain barrier is enabled by intranasal route of administration. 9 In this review article, authors shall follow the whole course of long journey of oxytocin from its origin to present day status and take a cursory look into its unfolding prospective. Oxytocin in augmentation of labour Oxytocin is the most extensively used uterotonic drug for augmenting labour or to maintain uterine contractibility during labour. 10 The process of stimulating the uterus to increase the frequency, duration and intensity of contractions after the onset of spontaneous labour is known as augmentation of labour. Oxytocin has been commonly used as a method to augment delayed labour when poor uterine contractions are assessed to be the underlying cause. Uterine motility depends on the formation of the contractile protein actomyosin in the presence of myosin light chain kinase; the Ca 2+ dependent phosphorylating enzyme. Oxytocin by binding with oxytocin receptor promotes contractions by increasing the intracellular Ca 2+ which in turn activates myosin light chain kinase. Oxytocin acts on specific receptors in the muscle lining of the uterus and the concentration of these receptors increases tremendously during pregnancy, reaching peak in early labour at term. 11 Studies have shown that obese women are more likely to have prolonged labour resulting in larger, more frequent applications of both synthetic oxytocin and cervical ripening methods. 12 Effects of oxytocin on labour induction also appear to be blunted by obesity. In a study conducted by Carlson NS et al, it was shown that obese parturients with higher BMI had significantly higher mean oxytocin infusion rates when compared to obese women with lower BMI while both were spontaneously labouring healthy, nulliparous without pre labour rupture of membrane. 13 Around 70% of obese parturients exhibit metabolic dysregulation with changes in circulating hormones from adipose tissues like leptin, apelin, visfatin, ghrelin, adiponectin and free fatty acids. Obesity therefore results in altered physiology which results in oxytocin regulation and response. All these factors may affect the myometrial contractility, and the variation in expression and function of the oxytocin receptors present in human myometrium caused by increased BMI. 14, 15 Selin L et al, found no advantages for routine use of high dose (>6 mIU/min) oxytocin in the augmentation of labour. 16 Low dose (3.3 mIU/min) oxytocin regimen is recommended to avoid unnecessary cardiovascular events of tachysystole and fetal distress. To induce labour, oxytocin doses in the range of 1-6 mIU/min are widely used. This is equivalent to approximately 0.3 IU per hour. Oxytocin in third stage of labour in normal vaginal delivery Post-partum haemorrhage (PPH) still remains as a major global cause of maternal morbidity and mortality. Since, PPH occurs suddenly in low risk pregnancies also, both prophylactic and therapeutic approaches are essential for minimizing blood loss and preventing PPH in all parturients at delivery. 17 The active management of third stage of labour (AMTSL) comprises of prophylactic administration of a uterotonic agent prior to placental separation, early cord clamping and traction and uterine massage. Since uterine atony is responsible for more than 80% of PPH cases, the administration of a uterotonic agent seems to be a mandatory component of AMTSL to prevent PPH. 17 The third stage of labour is facilitated by the use of oxytocin which thus decreases the risk of post-partum haemorrhage. Guidelines recommend administration of intramuscular oxytocin 10 IU to mother and controlled cord traction after vaginal delivery during the third stage of labour. This has reduced the incidence of PPH significantly. 18 Through intravenous route, oxytocin 5 IU diluted to 5 ml in normal saline can be given slowly over 1-2 minutes to achieve the uterotonic effect. Nipple stimulation or breast feeding as early as possible has been postulated to be a stimulus for release of oxytocin and consequent uterine contractions. The uterine contractions reduce bleeding during third stage of labour. 19 The same has been recommended by FOGSI (The Federation of Obstetric and Gynaecological Societies of India) as a possible adjuvant method for physiological prevention of PPH. However, more studies with adequate sample sizes are required to be undertaken to assess the impact of nipple stimulation in comparison to uterotonic agents like oxytocin. Thus, routine prophylactic administration of oxytocin as a component of AMTSL reduces blood loss and incidence of PPH without much increase in the incidence of adverse effect. 20 Oxytocin during caesarean section Oxytocin is administered to the parturient intravenously during caesarean section to significantly reduce the incidence of post-partum haemorrhage. Two categories of parturients land up in caesarean section. First category includes the elective caesarean section in which the patient is not in labour and has not received any prior oxytocin. Second category includes the labouring patients being taken up for caesarean section. Labouring patients in whom oxytocin is administered there is down regulation of receptors and therefore decreased responsiveness of uterine oxytocin receptors results in higher oxytocin requirement. 21 While evaluating the risk/benefit of prophylactic oxytocin, one must be aware of its adverse effects which could be myocardial ischaemia, hypotension, increased cardiac output, tachycardia, flushing, nausea, vomiting and mild antidiuretic effect. Haemodynamic effects of oxytocin can be minimized by slow intravenous injection of oxytocin. Fast injection of oxytocin results in greater increase in heart rate and decrease in mean arterial pressure with no difference in blood loss at a dose of 5 IU. 22 Recent trials have shown that doses of oxytocin below the commonly used 5 IU are equally effective and associated with fewer complications. A dose of 0.3-1 IU oxytocin given slowly over 1 minute, followed by an infusion of 5-10 IU/hour for 4 hours represents an evidence based approach for women at relatively lower risk of post-partum haemorrhage at elective caesarean section. 23 In case of caesarean section in labouring parturient, a slow 3 IU bolus of oxytocin followed by an infusion of 5-10 IU/hour for 4 hours is supported by limited evidence. 23 Many anaesthesiologists avoid repeated bolus administration of oxytocin due to oxytocin receptor desensitization in labouring parturients thus preferring the use of second line uterotonic protocols. 24 Oxytocin in breastfeeding As soon as the baby sucks at the breast, sensory impulses are transmitted from nipple to the mother's brain. As a result, the anterior and posterior lobes of the pituitary secrete prolactin and oxytocin respectively. Prolactin is responsible for the milk secretion by the cells of the alveoli. The milk collected in the alveoli then flows into the ducts. 25 At times milk is ejected in fine streams. This is known as "let down reflex" or the "milk ejection reflex" or oxytocin reflex. Oxytocin reflex becomes conditioned to the mother's emotions and sensations like sight, smell or touch of her baby. Baby should therefore be kept in skin to skin contact with the mother. Oxytocin induces a state of calmness and reduces stress. 26 Skin to skin contact helps both breastfeeding and emotional bonding. 27 Many women stop expressing milk earlier in post-natal period because they get disheartened by their apparently poor milk production. This may correlate more to inadequate production of oxytocin than of prolactin. Oxytocin secretion being sensitive to psychological stimuli is easily inhibited by stress. Oxytocin production is stimulated by various sensory stimuli from the infant which are missing in case of mother expressing milk in neonatal unit for her preterm neonate. A study of nasal oxytocin in such mothers reported a dramatic effect of oxytocin on milk production in primigravid mothers. 28 Intranasal oxytocin has been shown to benefit women with quadriplegia who have lost the neuronal connection between hypothalamus and nipple. 29 Numerous studies suggest that oxytocin given during labour has a negative effect on breastfeeding, possibly it reduces sucking behaviour in the newborns in a dose dependent manner. 30 Oxytocin as love hormone Reproductive endocrinologists have found that the oxytocin is not just the hormone of labour, it is the love hormone too. Oxytocin's role in social recognition, bonding and orgasm has also been investigated. Social bonding is responsible for survival of species as it favours reproduction and imparts safety against predators and environmental variations and enhances further brain development. 31 Lack of socialization results in various physical and mental disorders. Oxytocin and its receptors are important for "happiness" and building trust. It is an important brain substance for building trust and development of emotional relationships and thus social bonding. Moreover, oxytocin has been shown to be involved in a plethora of social and affective disorders, physiological and pathophysiological behaviours including attachment security, paternal behaviour, mating and motherhood to autism and obsessive-compulsive disorder. 32 Plasma oxytocin levels have been found to be higher amongst individuals who admit being in love. Oxytocin increases sexual receptivity and can counteract impotence and can be expected to have an important role in treatment for male infertility in days to come. The erectile tissues i.e. corpus cavernosum and corpus spongiosum are one of the main peripheral targets of oxytocin. It is thought to be associated with ejaculation by the contraction of ejaculatory tissues namely bladder neck, prostatic urethra, and ejaculatory duct. 33 Studies have found increases in plasma oxytocin at orgasm-in both males and females. 34 The maternal behaviour is specifically because of oxytocin. Virgin female sheep infused with oxytocin in cerebrospinal fluid shows maternal behaviour towards foreign lambs. 35 Oxytocin and autism spectrum disorders (ASD) Autism spectrum disorder (ASD) is a neurodevelopmental disorder whose primary symptoms include deficits in social interaction and communication along with restricted and repetitive behaviours. Prevalence of ASD is 1 in 100 at present, still no medication has been established for treatment of its symptoms. Modahl C et al, found that children with autism have lower plasma oxytocin levels in comparison to healthy controls of the same age group. 36 A defect in peptide processing of oxytocin was shown in a follow up study of autistic individuals with decreased plasma oxytocin associated with increased extended peptide inactive forms of oxytocin derived from the same prohormone. 37 Therefore, exogenous oxytocin administration has been suggested to be effective in reversing social and communicative dysfunction in individuals with ASD. Guastella A et al, carried out a study with finding that typically developed male adults who were administered 24 IU of oxytocin gazed more frequently and longer at the eye region. 38 Oxytocin might facilitate interpersonal communication by improving eye contact as eye contact is critical for the same. Various behavioural studies undertaken suggest that oxytocin improves wide variety of social behaviours including facial or vocal recognition of emotion, gazing at the eyes, and trust in another person, this facilitates socially acceptable behaviour while reducing repetitive behaviour. 39 Various neuroimaging studies taken up to elucidate the mechanism of benefits of oxytocin administration on ASD individuals found significant changes in brain activation by oxytocin administration resulting in behavioural improvement. Domes G et al, studied the effect of 24 IU oxytocin on neural response in house and face matching tasks, followed by magnetic resonance screening. 40 They found that amygdale activation was significantly increased in ASD individuals. However, in typically developed individuals, oxytocin administration reduced activation of amygdala. Based on these, it was concluded that oxytocin reduced responsiveness of amygdala when suppressing fear and stress in normal individuals, while increased its responsiveness for face/place recognition in individuals with ASD. Oxytocin seems to be a promising agent that needs to be explored to detect changes in well validated measures of social perception, social cognition and repetitive behaviours. Oxytocin and schizophrenia Schizophrenia is a heterogenous, debilitating, neuropsychiatric disorder characterized by positive and negative symptoms and cognitive deficits. The currently available antipsychotic drugs provide significant relief from the positive symptoms like auditory and visual hallucinations, delusions, dis-organized behaviour or speech with little therapeutic effects on the negative symptoms and cognitive deficits resulting in poor prognosis. Recently, studies have shown that stimulation of oxytocin system might produce therapeutic effects on all the symptom domains of schizophrenia. Many studies have found an inverse relationship between the degree of negative symptoms (avolution, anhedonia, asociality, alogia) and levels of plasma oxytocin and oxytocin levels in cerebrospinal fluid. 41 Studies have shown, the booming ability of oxytocin to enhance trust towards strangers provides a possible mechanism for its therapeutic role in paranoid delusions (positive symptoms). Cacciotti-Saija C et al, reported that addition of twice daily intranasal oxytocin administration to six weeks of social cognitive training in patients with early psychosis showed a positive correlation with reduction of negative symptoms (scale for assessment of negative symptoms) and cognitive deficits. 42 Therapeutic effects of oxytocin can provide a ray of hope to patients of schizophrenia and their families that oxytocin may provide relief especially from the debilitating negative symptoms and cognitive deficits. This will probably in near future allow the schizophrenics to lead more fulfilling lives. Oxytocin and affective disorders Affective psychiatric disorders have a substantial comorbidity existing between major depressive disorders (MDD) and anxiety. Major depressive disorder is characterized by depressed mood, anhedonia (loss of interest or pleasure in previously rewarding stimuli), sleep disturbances, anxiety and sexual dysfunction. Oxytocin has now been implicated in a plethora of behaviours and neurochemical processes. The actions of oxytocin are mediated through oxytocin receptor which is a G protein-coupled receptor coupled to phospholipase C. This receptor is extensively distributed in the central nervous system. 43 Recent evidence has shown role of oxytocin in complex behaviours particularly aanxiolysi. 44 Oxytocin may be of benefit in patients of MDD with comorbid anxiety or those with anxiety only. Anxiolytic action of oxytocin may be mediated through 5HT receptor activation. Early life stress has been found to result in increased anxiety and depression related behaviours and the severity of the response to stress exposure in adulthood. Adverse early life experiences result in altered activity of the brain oxytocin system in adulthood, also its receptors and thus increase the possibility of developing mental disorders later in life. In support of this, oxytocin has been shown to increase the sense of attachment security in adult males who suffer from insecure attachment patterns, which are usually the result of adverse early life experiences and can also result in the development of MDD. 45 Oxytocin knockout mice have altered social interactions, which can be reversed by intra medial amygdala oxytocin infusion. 46 this study findings suggested that oxytocin may be important for both the development of social withdrawal/anxiety in MDD and that exogenous oxytocin may be of therapeutic benefit in MDD patients with low attachment security. A possible mechanism of action of oxytocin in treatment of MDD is due to its interaction with serotonergic system, 5HT1A and 2A specific agonists have been shown to dose dependently increase plasma oxytocin levels. SSRI treatment leads to loss of libido and anorgasmia resulting in poor compliance. Sexual stimulation in males and females causes an increase in plasma oxytocin levels. Thus, combining oxytocin with SSRI treatment may help to reduce the concurrence of sexual dysfunction caused by former and therefore improve compliance to antidepressant treatment. The potential of its synergistic actions and multiple interactions with other neurotransmitters and neuropeptide systems determine the importance of oxytocin for the fine-tuned balance of emotionality, stress coping and complex social interactions, that shape our personality and mental wellbeing. Oxytocin and osteoporosis Oxytocin has a peripheral, direct and significant action on the skeleton through its stimulation of osteoblast formation and modulation of osteoclast formation. 47 The last phase of pregnancy and lactation correspond to most of the fetal and postnatal bone growth because of which mother is likely to lose ~120 g of calcium from her own skeleton. Hormonal adaptations comprising of low estrogen and elevated parathormone levels facilitate maternal hyper resorption of bones and inter-generational calcium transfer. 48 However, shortly after this profound bone loss, the mother's skeleton is rapidly repleted else pregnancy and lactation related osteoporosis would occur. Oxytocin has been shown to maintain an increased cell activity in bone, stimulating the proliferation of both forming and resorbing cells with well controlled the amount of bone resorption. The complimentary genetic and pharmacologic approaches reveal oxytocin as new anabolic regulator of bone mass and might have utility in treatment for human osteoporosis. 49 Bone loss due to bone resorption is accompanied by increased bone marrow adiposity since osteoblasts and adipocytes share the same precursor cells since an inverse relationship has been shown to exist between the two lineages. Both oxytocin and carbetocin (an oxytocin analogue) negatively modulate adipogenesis while promoting osteogenesis in both human multipotent adipose-derived stem (hMADS) cells and human bone marrow mesenchymal stromal cells. Clinically, lower plasma oxytocin levels were seen in postmenopausal women with osteoporosis than their healthy counterparts. 50 Oxytocin administration therefore holds promise as a potential therapy for this disease. Oxytocin analogues can emerge as anabolic stimuli to restore the skeletal loss occurring after pregnancy and lactation or in postmenopausal women. Oxytocin in diabetes and obesity Oxytocin has emerged as a modality for the treating diabetes and obesity. Oxytocin treatment lessens the cardiomyocyte death induced by ischemia-reperfusion by triggering pro-survival pathways within injured cardiomyocytes. Oxytocin treatment decreases cardiac apoptosis, fibrosis, and hypertrophy. In addition, oxytocin stimulates glucose uptake in both cardiac stem cells and cardiomyocytes and increases cell resistance to diabetic conditions. Role of oxytocin in lowering of body weight by mechanisms involving increased energy expenditure, reduced adiposity and food intake has been shown. Reduction in body weight and composition can be obtained by central, peripheral and intranasal oxytocin administration. In addition, an oxytocin effect as a prosocial hormone may provide additional benefit in the treatment of complex diseases such as diabetes and metabolic syndrome. The oxytocin mediated cardio-protection include activation of the natriuretic peptides and nitric oxide both increasing formation of cGMP in the heart, activation of cAMP activated protein kinase and by inhibition of excess of reactive oxygen species produced as a consequence of ischemia. Considering the efficacy of intranasal oxytocin delivery in stimulating the synthesis of central and peripheral oxytocin, and in reducing obesity and hedonic eating habits, investigation into the role of combined intranasal oxytocin treatment and exercise training are warranted. Consequently, treatment with oxytocin might potentially improve cardiovascular outcome in patients at risk for heart failure especially in association with obesity and diabetes. 51 Oxytocin and cancer Recently, research has focused on unravelling the involvement of oxytocin in cancer, and its potential role as a cancer biomarker. Oxytocin effects may depend on cell type, concentration of the hormone, its interactions with other hormones in the microenvironment and the precise localization of its receptor on the cell membrane. Future research is needed to further elucidate the involvement of oxytocin in cancer, and whether it could be a clinical cancer biomarker or therapeutic target. 52 CONCLUSION In the current review, authors have detailed the tale of the multifarious oxytocin beginning right before pregnancy, continuing during birth and later, travelling from brain to the heart and throughout the body, modulating a wide range of physiological functions and emotions like love, affection, attraction, happiness and hatred after stress. The nonapeptide appears to play a pivotal role in modulating social behaviour and the evidence for its role in broad range of neuro psychiatric disorders is accumulating. Biochemical, pathophysiological, psychological studies are expected to reinforce the development of new drugs comprising oxytocin agonists and antagonists for treatment of various disorders such as osteoporosis, diabetes, cancer etc.
{-# LANGUAGE OverloadedStrings #-} import Control.Applicative import Control.Monad import Data.List import Data.Maybe process [] = [] process [a] = [a] process (a:b:c) | a=='R' && b=='U' = 'D' : (process c) | a=='U' && b=='R' = 'D' :(process c) | otherwise = a: (process (b:c)) main = do getLine a<-getLine print $ length $ process a
Functional Magnetic Resonance Imaging Study of Thymus Activation Induced by Different Intensity Electrical Stimulation Functional magnetic resonance imaging (fMRI) was used to observe the activation response of the hypothalamus under different intensity electrical stimulation, and to explore the role of thalamus in the pain regulation network. Ten subjects were selected as normal subjects, and 41 °C and 51 °C were performed on the dorsal side of the right forearm of all subjects, respectively. CCHS mission-mode functional magnetic resonance (fMRI) scans (41 °C. CCHS-fMRI group and 51 °C CHS-fMRI group) were pre-processed with fMRI using SPM8. A single sample t-test was used to compare 41 °C CCHS-fMRI group and 51 °C. The CCHS-fMRI group underwent intragroup analysis to observe the activation of brain regions under two different temperature CHS, and recorded the activation intensity of the activation region. A paired t-test was used to explore the difference in activation between the PD group and the control group, and brain regions with statistically significant differences were analyzed. The activation intensity of the activated brain region was recorded, and the cause of the difference was analyzed. At the end of the trial, the visual analog scale (CVAS) was used to score the pain experienced by the subject at the stimulation temperature, and then the subject was subjected to a sensory quantification test (QST), including the measurement of thermal sensation (WS) and thermal pain (HP). At the same time, under the same intensity electrical stimulation, the activation signal of the lateral thalamus was stronger than that of the lateral thalamus, showing a contralateral advantage, while the bilateral thalamus lacked this manifestation. The thalamus should be an important component of the pain-regulating network. This region exhibits a segregationactivated phenomenon, and each region has its own unique stimulatory response characteristics, which helps to understand the role of the thalamus in the treatment of pain information.
/* * This is where you parse your lower-case arguments. * the format was defined in the lm_optstr assignment * in benchmark_init */ int benchmark_optswitch(int opt, char *optarg) { debug("benchmark_optswitch\n"); switch (opt) { case 'm': optm = atoi(optarg); break; case 's': opts = atoi(optarg); break; case 'w': optw = atoi(optarg); break; default: return (-1); } return (0); }
#ifndef ICOLLIDER_HPP #define ICOLLIDER_HPP #include <Core\Vector.h> #include <Core\Rect.hpp> template <class T> class QuadTreeNode; template<class T> class ICollider { public: QuadTreeNode<T>* node; bool update; Core::Rect<float&> rect; inline ICollider(float& x=0, float& y=0, float& w=0, float& h=0) : rect(x, y, w, h), update(false), node(nullptr) { } inline ICollider(const ICollider& copy) : rect(copy.rect), update(copy.update), node(copy.node) { } inline ICollider& operator=(const ICollider& left) = delete; inline bool collides(const Core::FloatRect& other) const { float x=other.x, y=other.y, w=other.w, h=other.h; return rect.intersects(Core::Rect<float&>(x, y, w, h)); } inline bool collides(const Core::Rect<float&>& other) const { return rect.intersects(other); } inline bool collides(const Core::Vector2& other) const { return rect.intersects(Core::Rect<float&>(other.x, other.y, 1.f, 1.f)); } inline bool collides(const ICollider* other) const { return rect.intersects(other->rect); } inline bool needUpdate() const { return update; } virtual void Update() { } }; #endif
/** * Merge two arrays into a sorted array * @param a the first array * @param b the second array */ private void mergeArray(int[]a,int[]b) { int[]c = new int[a.length+b.length]; int s = 0; for(int i = 0; i < c.length; i++) { if(i<a.length) { c[i]=a[i]; } else { c[i]=b[s]; s++; } } sort(c); print(c); }
Mitochondrial DNA: Epigenetics and environment Maintenance of the mitochondrial genome is essential for proper cellular function. For this purpose, mitochondrial DNA (mtDNA) needs to be faithfully replicated, transcribed, translated, and repaired in the face of constant onslaught from endogenous and environmental agents. Although only 13 polypeptides are encoded within mtDNA, the mitochondrial proteome comprises over 1500 proteins that are encoded by nuclear genes and translocated to the mitochondria for the purpose of maintaining mitochondrial function. Regulation of mtDNA and mitochondrial proteins by epigenetic changes and post‐translational modifications facilitate crosstalk between the nucleus and the mitochondria and ultimately lead to the maintenance of cellular health and homeostasis. DNA methyl transferases have been identified in the mitochondria implicating that methylation occurs within this organelle; however, the extent to which mtDNA is methylated has been debated for many years. Mechanisms of demethylation within this organelle have also been postulated, but the exact mechanisms and their outcomes is still an active area of research. Mitochondrial dysfunction in the form of altered gene expression and ATP production, resulting from epigenetic changes, can lead to various conditions including aging‐related neurodegenerative disorders, altered metabolism, changes in circadian rhythm, and cancer. Here, we provide an overview of the epigenetic regulation of mtDNA via methylation, long and short noncoding RNAs, and post‐translational modifications of nucleoid proteins (as mitochondria lack histones). We also highlight the influence of xenobiotics such as airborne environmental pollutants, contamination from heavy metals, and therapeutic drugs on mtDNA methylation. Environ. Mol. Mutagen., 60:668–682, 2019. © 2019 Wiley Periodicals, Inc.
Combined Partial Test Vector Reuse and FDR Coding for Two Dimensional SoC Test Compression This paper proposes a novel approach to core based SoC test compression. Research works show that almost all the test vectors have the same part in common. Therefore there exists such a vector, from which parts of each test vector from the different test sets can be sought. Based on this, first we attempt to find a vector named overlapped vector which contains parts of each test vector and has shorter length than that of the sum of each test vector's length. Second the overlapped test vectors are further compressed utilizing frequency-directed run-length (FDR) coding. Due to the fact that the test application time is proportional to the length of test vector, our proposal achieves as short test time as possible. Experimental results demonstrate that the proposed method obtains reduced test application time and significant test data compression rate.
<gh_stars>100-1000 #include <cctbx/boost_python/flex_fwd.h> #include <cctbx/sgtbx/space_group_type.h> #include <cctbx/sgtbx/space_group_hash.h> #include <boost/python/tuple.hpp> #include <boost/python/class.hpp> #include <boost/python/args.hpp> #include <boost/python/return_arg.hpp> #include <scitbx/boost_python/utils.h> #include <boost_adaptbx/hash.h> namespace cctbx { namespace sgtbx { namespace boost_python { namespace { struct space_group_wrappers : boost::python::pickle_suite, boost_adaptbx::py_hashable<space_group> { typedef space_group w_t; static rt_mx getitem(w_t const& o, std::size_t i_op) { if (i_op >= o.order_z()) scitbx::boost_python::raise_index_error(); return o(i_op); } static rt_mx call_3(w_t const& o, std::size_t i_ltr, std::size_t i_inv, std::size_t i_smx) { return o(i_ltr, i_inv, i_smx); } static boost::python::tuple getinitargs(w_t const& o) { return boost::python::make_tuple(o.type().hall_symbol()); } /* Must tidy the spacegroup order of operations because asu_mappings.find_i_sym(i_seq, rt_mx) rely on the order being retained. We do this in a setstate method that ignores its arguments and only calls make_tidy() on itself. Likewise the corresponding getstate() is a dummy returning an empty tuple. */ static boost::python::tuple getstate(w_t const& o) { return boost::python::tuple(); } static void setstate(w_t& o, boost::python::tuple state) { o.make_tidy(); } static void wrap() { using namespace boost::python; class_<w_t>("space_group") .def(init<parse_string&, optional<bool, bool, bool, int> >(( arg("hall_symbol"), arg("pedantic")=false, arg("no_centring_type_symbol")=false, arg("no_expand")=false, arg("t_den")=sg_t_den))) .def(init<std::string const&, optional<bool, bool, bool, int> >(( arg("hall_symbol"), arg("pedantic")=false, arg("no_centring_type_symbol")=false, arg("no_expand")=false, arg("t_den")=sg_t_den))) .def(init<space_group_symbols const&, optional<int> >(( arg("space_group_symbols"), arg("t_den")=sg_t_den))) .def(init<space_group const&>((arg("other")))) .def("reset", &w_t::reset, (arg("t_den")=sg_t_den)) .def("expand_ltr", &w_t::expand_ltr, return_self<>(), (arg("new_t"))) .def("expand_inv", &w_t::expand_inv, return_self<>(), (arg("new_inv_t"))) .def("expand_smx", (space_group&(w_t::*)(rt_mx const&)) &w_t::expand_smx, return_self<>(), (arg("new_smx"))) .def("expand_smx", (space_group&(w_t::*)(std::string const&)) &w_t::expand_smx, return_self<>(), (arg("smx_symbol"))) .def("expand_conventional_centring_type", &w_t::expand_conventional_centring_type, (arg("symbol"))) .def("parse_hall_symbol", &w_t::parse_hall_symbol, ( arg("hall_symbol"), arg("pedantic")=false, arg("no_centring_type_symbol")=false)) .def("change_basis", &w_t::change_basis, (arg("cb_op"))) .def("change_of_origin_realising_origin_centricity", &w_t::change_of_origin_realising_origin_centricity) .def("r_den", &w_t::r_den) .def("t_den", &w_t::t_den) .def("order_p", &w_t::order_p) .def("order_z", &w_t::order_z) .def("__len__", &w_t::order_z) .def("n_equivalent_positions", &w_t::n_equivalent_positions) .def("n_ltr", &w_t::n_ltr) .def("is_centric", (bool(w_t::*)() const) &w_t::is_centric) .def("is_origin_centric", &w_t::is_origin_centric) .def("f_inv", &w_t::f_inv) .def("n_smx", &w_t::n_smx) .def("__call__", call_3, ( arg("i_ltr"), arg("i_inv"), arg("i_smx"))) .def("__call__", getitem, (arg("i_op"))) .def("__getitem__", getitem) .def("make_tidy", &w_t::make_tidy, return_self<>()) .def("is_tidy", &w_t::is_tidy) .def("contains", &w_t::contains, (arg("smx"))) .def("__contains__", &w_t::contains) .def("__eq__", &w_t::operator==) .def("__ne__", &w_t::operator!=) .def("__hash__", py_hash) .def("conventional_centring_type_symbol", &w_t::conventional_centring_type_symbol) .def("z2p_op", &w_t::z2p_op, ( arg("r_den")=cb_r_den, arg("t_den")=cb_t_den)) .def("construct_z2p_op", &w_t::construct_z2p_op, ( arg("r_den")=cb_r_den, arg("t_den")=cb_t_den)) .def("is_chiral", &w_t::is_chiral) .def("is_sys_absent", (bool(w_t::*)(miller::index<> const&) const) &w_t::is_sys_absent, (arg("miller_index"))) .def("is_sys_absent", (af::shared<bool>(w_t::*) (af::const_ref<miller::index<> > const&) const) &w_t::is_sys_absent, (arg("miller_indices"))) .def("is_centric", (bool(w_t::*)(miller::index<> const&) const) &w_t::is_centric, (arg("miller_index"))) .def("is_centric", (af::shared<bool>(w_t::*) (af::const_ref<miller::index<> > const&) const) &w_t::is_centric, (arg("miller_indices"))) .def("phase_restriction", &w_t::phase_restriction, ( arg("miller_index"))) .def("is_valid_phase", &w_t::is_valid_phase, ( arg("miller_index"), arg("phi"), arg("deg")=false, arg("tolerance")=1e-5)) .def("nearest_valid_phases", &w_t::nearest_valid_phases, ( arg("miller_indices"), arg("phases"), arg("deg")=false)) .def("multiplicity", (int(w_t::*)(miller::index<> const&, bool) const) &w_t::multiplicity, (arg("miller_index"))) .def("multiplicity", (af::shared<int>(w_t::*) (af::const_ref<miller::index<> > const&, bool) const) &w_t::multiplicity, (arg("miller_indices"))) .def("epsilon", (int(w_t::*)(miller::index<> const&) const) &w_t::epsilon, (arg("miller_index"))) .def("epsilon", (af::shared<int>(w_t::*) (af::const_ref<miller::index<> > const&) const) &w_t::epsilon, (arg("miller_indices"))) .def("multiplicity", (int(w_t::*)(vec3_rat const&) const) &w_t::multiplicity, (arg("site"))) .def("average_unit_cell", &w_t::average_unit_cell, ( arg("unit_cell"))) .def("is_compatible_unit_cell", &w_t::is_compatible_unit_cell, ( arg("unit_cell"), arg("relative_length_tolerance")=0.01, arg("absolute_angle_tolerance")=1.)) .def("average_u_star", (scitbx::sym_mat3<double>(w_t::*)( scitbx::sym_mat3<double> const&) const) &w_t::average_u_star, ( arg("u_star"))) .def("build_derived_acentric_group", &w_t::build_derived_acentric_group) .def("build_derived_group", &w_t::build_derived_group) .def("build_derived_reflection_intensity_group", &w_t::build_derived_reflection_intensity_group, ( arg("anomalous_flag"))) .def("build_derived_patterson_group", &w_t::build_derived_patterson_group) .def("build_derived_point_group", &w_t::build_derived_point_group) .def("build_derived_laue_group", &w_t::build_derived_laue_group) .def("point_group_type", &w_t::point_group_type) .def("laue_group_type", &w_t::laue_group_type) .def("crystal_system", &w_t::crystal_system) .def("match_tabulated_settings", &w_t::match_tabulated_settings) .def("gridding", &w_t::gridding) .def("refine_gridding", (sg_vec3(w_t::*)(sg_vec3 const&) const) &w_t::refine_gridding, (arg("grid"))) .def("all_ops", &w_t::all_ops, (arg("mod")=0, arg("cancel")=false)) .def("unique", &w_t::unique, (arg("special_op"))) .def("type", &w_t::type) .def_pickle(space_group_wrappers()) ; } }; } // namespace <anoymous> void wrap_space_group() { space_group_wrappers::wrap(); } }}} // namespace cctbx::sgtbx::boost_python
def __mlx_irq_to_queue_idx(self, irq): mlx5_fp_irq_re = re.compile("mlx5_comp(\d+)") mlx4_fp_irq_re = re.compile("mlx4\-(\d+)") m5 = mlx5_fp_irq_re.search(self.__irqs2procline[irq]) if m5: return int(m5.group(1)) else: m4 = mlx4_fp_irq_re.search(self.__irqs2procline[irq]) if m4: return int(m4.group(1)) return sys.maxsize
def zero_cross(sig): return len(np.where(np.diff(np.sign(sig)))[0])
def mp3gen(): global music_listing if music_listing is not None: return if sys.platform != 'darwin' \ and sys.platform != 'win32' \ and not sys.platform.startswith('linux'): print "Music only enabled on darwin, win32, and linux." return music_listing = [] for root, dirs, files in os.walk(profile.data['music_path']): for filename in files: if os.path.splitext(filename)[1] == ".mp3": if sys.platform == 'darwin': music_listing.append([ 'afplay', os.path.join(root, filename.lower())]) else: music_listing.append([ 'mpg123', os.path.join(root, filename.lower())]) elif os.path.splitext(filename)[1] in sox_file_types: if sys.platform != 'darwin': music_listing.append([ 'play', os.path.join(root, filename.lower())])
/*****************************************************************************/ /** Loads the core skeleton. * * This function loads the core skeleton from a file. * * @param strFilename The file from which the core skeleton should be loaded * from. * * @return One of the following values: * \li \b true if successful * \li \b false if an error happend *****************************************************************************/ bool CalCoreModel::loadCoreSkeleton(const std::string& strFilename) { m_pCoreSkeleton = CalLoader::loadCoreSkeleton(strFilename); return bool(m_pCoreSkeleton); }
# MC621 - Desafios de Programacao II - 2s2020 # Contest: 20/11/2020 # Problema F: Buy the String # leitura do numero de casos testes t = int(input()) # leitura dos casos testes for _ in range(t): # descricao do caso teste n, c0, c1, h = map(int, input().split()) # leitura da string binaria em questao s = list(input()) # calculo custo minimo para compra da string if( h + c1 <= c0): n = len(s)*c1 + s.count('0')*h elif( h + c0 <= c1): n = len(s)*c0 + s.count('1')*h else: n = s.count('0')*c0 + s.count('1')*c1 # imprime o resultado print(n)
n = int(input()) x = list(map(int, input().split())) pdlist = [] for i in range(max(x) + 1): p = i dlist = [] for j in x: dlist.append(int((int(j) - int(p)) ** 2)) sumd = sum(dlist) pdlist.append(sumd) minpdlist = min(pdlist) print(minpdlist)
#include "Classes/ObjectPlacement/ObjectPlacementModule.h" #include "Classes/ObjectPlacement/Private/ObjectPlacementData.h" #include "Classes/ObjectPlacement/Private/ObjectPlacementSystem.h" #include "Classes/Qt/Scene/SceneEditor2.h" #include "Classes/Qt/Scene/System/ModifSystem.h" #include "Classes/SceneManager/SceneData.h" #include "Classes/Selection/Selection.h" #include <TArc/Utils/ModuleCollection.h> #include <TArc/WindowSubSystem/ActionUtils.h> #include <TArc/WindowSubSystem/QtAction.h> #include <Entity/ComponentUtils.h> #include <Scene3D/Components/ComponentHelpers.h> void ObjectPlacementModule::OnContextCreated(DAVA::TArc::DataContext* context) { SceneData* sceneData = context->GetData<SceneData>(); SceneEditor2* scene = sceneData->GetScene().Get(); std::unique_ptr<ObjectPlacementData> objectPlacementData = std::make_unique<ObjectPlacementData>(); objectPlacementData->objectPlacementSystem.reset(new ObjectPlacementSystem(scene)); scene->AddSystem(objectPlacementData->objectPlacementSystem.get(), DAVA::ComponentUtils::MakeMask<DAVA::RenderComponent>(), DAVA::Scene::SCENE_SYSTEM_REQUIRE_PROCESS); context->CreateData(std::move(objectPlacementData)); } void ObjectPlacementModule::OnContextDeleted(DAVA::TArc::DataContext* context) { SceneData* sceneData = context->GetData<SceneData>(); SceneEditor2* scene = sceneData->GetScene().Get(); ObjectPlacementData* objectPlacementData = context->GetData<ObjectPlacementData>(); scene->RemoveSystem(objectPlacementData->objectPlacementSystem.get()); } void ObjectPlacementModule::PostInit() { const QString toolBarName("Main Toolbar"); const QString editMenuName("Edit"); const QString centerPivotPointName("actionCenterPivotPoint"); using namespace DAVA::TArc; ContextAccessor* accessor = GetAccessor(); UI* ui = GetUI(); FieldDescriptor sceneDataFieldDescr; sceneDataFieldDescr.fieldName = DAVA::FastName(SceneData::scenePropertyName); sceneDataFieldDescr.type = DAVA::ReflectedTypeDB::Get<SceneData>(); FieldDescriptor landscapeSnapFieldDescr; landscapeSnapFieldDescr.fieldName = DAVA::FastName(ObjectPlacementData::snapToLandscapePropertyName); landscapeSnapFieldDescr.type = DAVA::ReflectedTypeDB::Get<ObjectPlacementData>(); // Place on landscape { QtAction* action = new QtAction(accessor, QIcon(":/QtIcons/modify_placeonland.png"), QString("Place on landscape")); { // enable/disable action->SetStateUpdationFunction(QtAction::Enabled, sceneDataFieldDescr, [](const DAVA::Any& value) -> DAVA::Any { return value.CanCast<SceneData::TSceneType>() && value.Cast<SceneData::TSceneType>().Get() != nullptr; }); } action->setShortcuts({ QKeySequence(Qt::Key_P) }); action->setShortcutContext(Qt::WindowShortcut); connections.AddConnection(action, &QAction::triggered, DAVA::MakeFunction(this, &ObjectPlacementModule::OnPlaceOnLandscape)); ActionPlacementInfo placementInfo; placementInfo.AddPlacementPoint(CreateMenuPoint(MenuItems::menuEdit, { InsertionParams::eInsertionMethod::AfterItem, centerPivotPointName })); placementInfo.AddPlacementPoint(CreateToolbarPoint(toolBarName)); ui->AddAction(mainWindowKey, placementInfo, action); } // Snap to landscape { QtAction* action = new QtAction(accessor, QIcon(":/QtIcons/modify_snaptoland.png"), "Enable snap to landscape"); { // check/uncheck action->SetStateUpdationFunction(QtAction::Checked, landscapeSnapFieldDescr, [](const DAVA::Any& value) -> DAVA::Any { return value.Get<bool>(false); }); } { // enable/disable action->SetStateUpdationFunction(QtAction::Enabled, sceneDataFieldDescr, [](const DAVA::Any& value) -> DAVA::Any { return value.CanCast<SceneData::TSceneType>() && value.Cast<SceneData::TSceneType>().Get() != nullptr; }); } { // tooltip text action->SetStateUpdationFunction(QtAction::Text, landscapeSnapFieldDescr, [](const DAVA::Any& value) -> DAVA::Any { bool checked = value.Get<bool>(false); if (checked) return DAVA::String("Disable snap to landscape"); return DAVA::String("Enable snap to landscape"); }); } connections.AddConnection(action, &QAction::triggered, DAVA::MakeFunction(this, &ObjectPlacementModule::OnSnapToLandscape)); ActionPlacementInfo placementInfo; placementInfo.AddPlacementPoint(CreateMenuPoint(MenuItems::menuEdit, { InsertionParams::eInsertionMethod::AfterItem, "Place on landscape" })); placementInfo.AddPlacementPoint(CreateToolbarPoint(toolBarName)); ui->AddAction(mainWindowKey, placementInfo, action); } // Place and align { QtAction* action = new QtAction(accessor, QIcon(":/QtIcons/modify_placeonobj.png"), QString("Place and align")); { // enable/disable action->SetStateUpdationFunction(QtAction::Enabled, sceneDataFieldDescr, [](const DAVA::Any& value) -> DAVA::Any { return value.CanCast<SceneData::TSceneType>() && value.Cast<SceneData::TSceneType>().Get() != nullptr; }); } action->setShortcuts({ QKeySequence(Qt::CTRL + Qt::Key_P) }); action->setShortcutContext(Qt::WindowShortcut); connections.AddConnection(action, &QAction::triggered, DAVA::MakeFunction(this, &ObjectPlacementModule::OnPlaceAndAlign)); ActionPlacementInfo placementInfo; placementInfo.AddPlacementPoint(CreateMenuPoint(MenuItems::menuEdit, { InsertionParams::eInsertionMethod::AfterItem, "Place on landscape" })); placementInfo.AddPlacementPoint(CreateToolbarPoint(toolBarName)); ui->AddAction(mainWindowKey, placementInfo, action); } } void ObjectPlacementModule::OnPlaceOnLandscape() { DAVA::TArc::DataContext* context = GetAccessor()->GetActiveContext(); ObjectPlacementData* data = context->GetData<ObjectPlacementData>(); data->objectPlacementSystem->PlaceOnLandscape(); } void ObjectPlacementModule::OnSnapToLandscape() { DAVA::TArc::DataContext* context = GetAccessor()->GetActiveContext(); ObjectPlacementData* data = context->GetData<ObjectPlacementData>(); bool snapToLandscapeEnabled = data->GetSnapToLandscape(); data->SetSnapToLandscape(!snapToLandscapeEnabled); } void ObjectPlacementModule::OnPlaceAndAlign() { DAVA::TArc::DataContext* context = GetAccessor()->GetActiveContext(); ObjectPlacementData* data = context->GetData<ObjectPlacementData>(); data->objectPlacementSystem->PlaceAndAlign(); } DAVA_VIRTUAL_REFLECTION_IMPL(ObjectPlacementModule) { DAVA::ReflectionRegistrator<ObjectPlacementModule>::Begin() .ConstructorByPointer() .End(); } DECL_GUI_MODULE(ObjectPlacementModule);
#include <stdio.h> #include <string.h> #include <stdlib.h> struct range { int start; int end; }; #define max(a,b) ((a) > (b) ? (a) : (b)) #define min(a,b) ((a) < (b) ? (a) : (b)) #define BUF_SIZE 20 int main(void) { struct range range0, range1; char line[BUF_SIZE]; FILE* fp = stdin; if(!fgets(line, BUF_SIZE, fp)) return 0; sscanf(line, "%d %d %d %d", &range0.start, &range0.end, &range1.start, &range1.end ); int s = max(range0.start, range1.start); int g = min(range0.end, range1.end); printf("%d\n", (g-s>0)*(g-s)); return 0; }
a,b,c,d=map(int, input().split(' ')) if c*2<d : print(-1) exit() elif ( d*2>=a*2) or( d*2>=b*2) : print(-1) exit() else: res=0 for i in range(2*a): if (i>=c) and (i<=2*c) and (i>=d) and (i<=2*d): res=i break; if (res!=0) :print(2*a,2*b,i,sep='\n',end='') else : print(-1)
/***************************************************************************** * CSRDlg::CreateRecoContext * *---------------------------* * Description: * This creates a new instance of the recognizer with whatever is the * current defaults for the recognizer. * The "fInitialize" argument is FALSE by default. If set, it does * NOT attempt to set the m_pCurUserToken reco profile and instead * just picks up whatever CoCreateInstance() on the shared recognizer * gave it. * NOTE: The caller is responsible for displaying error messages to * the user when this fails. * Return: * S_OK * Failed HRESULT from recognizer/recocontext initialization functions ****************************************************************** BECKYW ***/ HRESULT CSRDlg::CreateRecoContext(BOOL *pfContextInitialized, BOOL fInitialize, ULONG ulFlags) { if ( m_cpRecoCtxt ) { m_cpRecoCtxt->SetNotifySink( NULL ); } m_cpRecoCtxt.Release(); HRESULT hr; if ( m_cpRecoEngine ) { m_cpRecoEngine->SetRecoState( SPRST_INACTIVE ); } if ( m_cpRecoEngine ) { SPRECOSTATE recostate; hr = m_cpRecoEngine->GetRecoState( &recostate ); if ( SUCCEEDED( hr ) && (SPRST_ACTIVE_ALWAYS == recostate) ) { hr = m_cpRecoEngine->SetRecoState( SPRST_INACTIVE ); } if ( SUCCEEDED( hr ) && (ulFlags & SRDLGF_RECOGNIZER) ) { hr = m_cpRecoEngine->SetRecognizer( NULL ); } if ( SUCCEEDED( hr ) && (ulFlags & SRDLGF_AUDIOINPUT)) { hr = m_cpRecoEngine->SetInput( NULL, TRUE ); } if ( (SPRST_ACTIVE_ALWAYS == recostate) ) { HRESULT hrRecoState = m_cpRecoEngine->SetRecoState( recostate ); if ( FAILED( hrRecoState ) ) { hr = hrRecoState; } } } else { hr = m_cpRecoEngine.CoCreateInstance( CLSID_SpSharedRecognizer ); } if(!fInitialize && SUCCEEDED( hr )) { hr = m_cpRecoEngine->SetRecoProfile(m_pCurUserToken); } if ( SUCCEEDED( hr ) ) { hr = m_cpRecoEngine->CreateRecoContext(&m_cpRecoCtxt); } if ( SUCCEEDED( hr ) ) { hr = m_cpRecoCtxt->SetNotifyWindowMessage(m_hDlg, WM_RECOEVENT, 0, 0); } if ( SUCCEEDED( hr ) ) { const ULONGLONG ullInterest = SPFEI(SPEI_SR_AUDIO_LEVEL); hr = m_cpRecoCtxt->SetInterest(ullInterest, ullInterest); } if ( pfContextInitialized ) { *pfContextInitialized = SUCCEEDED( hr ); } if ( FAILED( hr )) { m_cpRecoCtxt.Release(); m_cpRecoEngine.Release(); return hr; } #ifdef _DEBUG CComPtr<ISpObjectToken> cpCurDefaultToken; SpGetDefaultTokenFromCategoryId(SPCAT_RECOGNIZERS, &cpCurDefaultToken); CComPtr<ISpObjectToken> cpRecognizerToken; m_cpRecoEngine->GetRecognizer( &cpRecognizerToken ); if ( cpRecognizerToken ) { CSpDynamicString dstrCurDefaultToken; cpCurDefaultToken->GetId( &dstrCurDefaultToken ); CSpDynamicString dstrRecognizerToken; cpRecognizerToken->GetId( &dstrRecognizerToken ); if ( 0 != wcsicmp( dstrCurDefaultToken, dstrRecognizerToken ) ) { OutputDebugString( L"Warning: We just created a recognizer that isn't the default!\n" ); } } #endif hr = m_cpRecoEngine->SetRecoState( SPRST_ACTIVE_ALWAYS ); return(hr); }
// NewClient creates a pubsubhubbub client func NewClient(hubURL string, callbackURL string, from string) *Client { return &Client{ hubURL: hubURL, callbackURL: callbackURL, from: from, httpClient: &http.Client{}, } }
<reponame>cogwirrel/html-to-pdf-puppeteer-lambda<filename>lib/html-to-pdf-puppeteer-lambda-stack.ts<gh_stars>0 import * as cdk from '@aws-cdk/core'; import * as lambda from '@aws-cdk/aws-lambda'; import * as apigateway from '@aws-cdk/aws-apigateway'; import * as iam from '@aws-cdk/aws-iam'; export class HtmlToPdfPuppeteerLambdaStack extends cdk.Stack { constructor(scope: cdk.Construct, id: string, props?: cdk.StackProps) { super(scope, id, props); const htmlToPdfLambda = new lambda.Function(this, 'HtmlToPdfLambda', { runtime: lambda.Runtime.NODEJS_12_X, code: lambda.Code.fromAsset('./lambda/dist'), layers: [ // https://github.com/shelfio/chrome-aws-lambda-layer lambda.LayerVersion.fromLayerVersionArn(this, 'ChromeLayer', 'arn:aws:lambda:eu-west-1:764866452798:layer:chrome-aws-lambda:19'), ], handler: 'index.handler', timeout: cdk.Duration.seconds(30), memorySize: 1600, }); const api = new apigateway.RestApi(this, "HtmlToPdfApi", { restApiName: "HtmlToPdfApi", description: "HTML to PDF api", defaultMethodOptions: { authorizationType: apigateway.AuthorizationType.IAM, }, policy: new iam.PolicyDocument({ statements: [ new iam.PolicyStatement({ effect: iam.Effect.ALLOW, principals: [new iam.AccountPrincipal(this.account)], actions: ["execute-api:Invoke"], resources: ["execute-api:/*"], }), ], }), }); const resource = api.root.addResource('html-to-pdf'); const integration = new apigateway.LambdaIntegration(htmlToPdfLambda); resource.addMethod('POST', integration); } }
#include<stdio.h> #include<stdlib.h> int main() { int a; long int x,y,y1,y2; scanf("%d%ld%ld",&a,&x,&y); y1 = y/a; y2 = y%a; if(y2!=0 && (y1==0 || y1%2==1) && (2*x<a && 2*x>-1*a)) { if(y1==0) printf("1\n"); else printf("%ld\n",((y1-1)/2)*3+2); } else if(y2!=0 && y1!=0 && y1%2==0 && x!=0 && (x<a && x>-1*a)) { if(x<a && x>0) printf("%ld\n",(y1/2)*3+1); else if(x>-1*a && x<0) printf("%ld\n", (y1/2)*3); } else printf("-1\n"); return 0; }
<reponame>CodeFreezr/rosettacode-to-go<filename>tasks/Least-common-multiple/least-common-multiple.go package main import ( "fmt" "math/big" ) var m, n, z big.Int func init() { m.SetString("2562047788015215500854906332309589561", 10) n.SetString("6795454494268282920431565661684282819", 10) } func main() { fmt.Println(z.Mul(z.Div(&m, z.GCD(nil, nil, &m, &n)), &n)) } //\Least-common-multiple\least-common-multiple.go
<gh_stars>0 import api from functools import * import requests r = requests.get(url=api.GET_QUESTION_STATUS, auth=(api.USER, api.PWD), headers=api.headers, timeout=10, verify=False) with open('weburl.txt','w') as f: f.write(reduce(lambda a,b:a+"\n" +b,[i['attack']['web_ip'] for i in r.json()['AiChallenge']])) with open('serverurl.txt','w') as f: f.write(reduce(lambda a,b:a+"\n" +b,[str(i['defense']) for i in r.json()['AiChallenge']]))
package rbac const identityHeader = `X-RH-Identity` // PaginatedBody represents the response body format from the RBAC service type PaginatedBody struct { Meta PaginationMeta `json:"meta"` Links PaginationLinks `json:"links"` Data interface{} `json:"data"` } // PaginationMeta contains metadata for pagination type PaginationMeta struct { Count int `json:"count"` Limit int `json:"limit"` Offset int `json:"offset"` } // PaginationLinks provides links to additional pages of response data type PaginationLinks struct { First string `json:"first"` Next string `json:"next"` Previous string `json:"previous"` Last string `json:"last"` }
/** * Modified to add a "completed" flag, which lets the caller know if the case instance has run to completion without encountering a wait state or experiencing an error/ exception. * * @author Tijs Rademakers */ public class CaseInstanceResponse { protected String id; protected String name; protected String url; protected String businessKey; protected boolean ended; protected String caseDefinitionId; protected String caseDefinitionUrl; protected List<RestVariable> variables = new ArrayList<>(); protected String tenantId; protected boolean completed; @ApiModelProperty(example = "187") public String getId() { return id; } public void setId(String id) { this.id = id; } @ApiModelProperty(example = "processName") public String getName() { return name; } public void setName(String name) { this.name = name; } @ApiModelProperty(example = "http://localhost:8182/cmmn-repository/case-definitions/caseOne%3A1%3A4") public String getUrl() { return url; } public void setUrl(String url) { this.url = url; } @ApiModelProperty(example = "myBusinessKey") public String getBusinessKey() { return businessKey; } public void setBusinessKey(String businessKey) { this.businessKey = businessKey; } public boolean isEnded() { return ended; } public void setEnded(boolean ended) { this.ended = ended; } @ApiModelProperty(example = "oneTaskCase:1:158") public String getCaseDefinitionId() { return caseDefinitionId; } public void setCaseDefinitionId(String caseDefinitionId) { this.caseDefinitionId = caseDefinitionId; } @ApiModelProperty(example = "http://localhost:8182/cmmn-repository/case-definitions/caseOne%3A1%3A4") public String getCaseDefinitionUrl() { return caseDefinitionUrl; } public void setCaseDefinitionUrl(String caseDefinitionUrl) { this.caseDefinitionUrl = caseDefinitionUrl; } public List<RestVariable> getVariables() { return variables; } public void setVariables(List<RestVariable> variables) { this.variables = variables; } public void addVariable(RestVariable variable) { variables.add(variable); } public void setTenantId(String tenantId) { this.tenantId = tenantId; } @ApiModelProperty(example = "null") public String getTenantId() { return tenantId; } public boolean isCompleted() { return completed; } public void setCompleted(boolean completed) { this.completed = completed; } }