patch
stringlengths 17
31.2k
| y
int64 1
1
| oldf
stringlengths 0
2.21M
| idx
int64 1
1
| id
int64 4.29k
68.4k
| msg
stringlengths 8
843
| proj
stringclasses 212
values | lang
stringclasses 9
values |
---|---|---|---|---|---|---|---|
@@ -38,9 +38,10 @@ import org.apache.lucene.search.MaxScoreAccumulator.DocAndScore;
*/
public abstract class TopScoreDocCollector extends TopDocsCollector<ScoreDoc> {
- abstract static class ScorerLeafCollector implements LeafCollector {
+ /** Scorable leaf collector */
+ public abstract static class ScorerLeafCollector implements LeafCollector {
- Scorable scorer;
+ protected Scorable scorer;
@Override
public void setScorer(Scorable scorer) throws IOException { | 1 | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.lucene.search;
import java.io.IOException;
import java.util.Collection;
import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.search.MaxScoreAccumulator.DocAndScore;
/**
* A {@link Collector} implementation that collects the top-scoring hits,
* returning them as a {@link TopDocs}. This is used by {@link IndexSearcher} to
* implement {@link TopDocs}-based search. Hits are sorted by score descending
* and then (when the scores are tied) docID ascending. When you create an
* instance of this collector you should know in advance whether documents are
* going to be collected in doc Id order or not.
*
* <p><b>NOTE</b>: The values {@link Float#NaN} and
* {@link Float#NEGATIVE_INFINITY} are not valid scores. This
* collector will not properly collect hits with such
* scores.
*/
public abstract class TopScoreDocCollector extends TopDocsCollector<ScoreDoc> {
abstract static class ScorerLeafCollector implements LeafCollector {
Scorable scorer;
@Override
public void setScorer(Scorable scorer) throws IOException {
this.scorer = scorer;
}
}
private static class SimpleTopScoreDocCollector extends TopScoreDocCollector {
SimpleTopScoreDocCollector(int numHits, HitsThresholdChecker hitsThresholdChecker,
MaxScoreAccumulator minScoreAcc) {
super(numHits, hitsThresholdChecker, minScoreAcc);
}
@Override
public LeafCollector getLeafCollector(LeafReaderContext context) throws IOException {
// reset the minimum competitive score
docBase = context.docBase;
return new ScorerLeafCollector() {
@Override
public void setScorer(Scorable scorer) throws IOException {
super.setScorer(scorer);
minCompetitiveScore = 0f;
updateMinCompetitiveScore(scorer);
if (minScoreAcc != null) {
updateGlobalMinCompetitiveScore(scorer);
}
}
@Override
public void collect(int doc) throws IOException {
float score = scorer.score();
// This collector relies on the fact that scorers produce positive values:
assert score >= 0; // NOTE: false for NaN
totalHits++;
hitsThresholdChecker.incrementHitCount();
if (minScoreAcc != null && (totalHits & minScoreAcc.modInterval) == 0) {
updateGlobalMinCompetitiveScore(scorer);
}
if (score <= pqTop.score) {
if (totalHitsRelation == TotalHits.Relation.EQUAL_TO) {
// we just reached totalHitsThreshold, we can start setting the min
// competitive score now
updateMinCompetitiveScore(scorer);
}
// Since docs are returned in-order (i.e., increasing doc Id), a document
// with equal score to pqTop.score cannot compete since HitQueue favors
// documents with lower doc Ids. Therefore reject those docs too.
return;
}
pqTop.doc = doc + docBase;
pqTop.score = score;
pqTop = pq.updateTop();
updateMinCompetitiveScore(scorer);
}
};
}
}
private static class PagingTopScoreDocCollector extends TopScoreDocCollector {
private final ScoreDoc after;
private int collectedHits;
PagingTopScoreDocCollector(int numHits, ScoreDoc after, HitsThresholdChecker hitsThresholdChecker,
MaxScoreAccumulator minScoreAcc) {
super(numHits, hitsThresholdChecker, minScoreAcc);
this.after = after;
this.collectedHits = 0;
}
@Override
protected int topDocsSize() {
return collectedHits < pq.size() ? collectedHits : pq.size();
}
@Override
protected TopDocs newTopDocs(ScoreDoc[] results, int start) {
return results == null
? new TopDocs(new TotalHits(totalHits, totalHitsRelation), new ScoreDoc[0])
: new TopDocs(new TotalHits(totalHits, totalHitsRelation), results);
}
@Override
public LeafCollector getLeafCollector(LeafReaderContext context) throws IOException {
docBase = context.docBase;
final int afterDoc = after.doc - context.docBase;
return new ScorerLeafCollector() {
@Override
public void collect(int doc) throws IOException {
float score = scorer.score();
// This collector relies on the fact that scorers produce positive values:
assert score >= 0; // NOTE: false for NaN
totalHits++;
hitsThresholdChecker.incrementHitCount();
if (minScoreAcc != null && (totalHits & minScoreAcc.modInterval) == 0) {
updateGlobalMinCompetitiveScore(scorer);
}
if (score > after.score || (score == after.score && doc <= afterDoc)) {
// hit was collected on a previous page
if (totalHitsRelation == TotalHits.Relation.EQUAL_TO) {
// we just reached totalHitsThreshold, we can start setting the min
// competitive score now
updateMinCompetitiveScore(scorer);
}
return;
}
if (score <= pqTop.score) {
if (totalHitsRelation == TotalHits.Relation.EQUAL_TO) {
// we just reached totalHitsThreshold, we can start setting the min
// competitive score now
updateMinCompetitiveScore(scorer);
}
// Since docs are returned in-order (i.e., increasing doc Id), a document
// with equal score to pqTop.score cannot compete since HitQueue favors
// documents with lower doc Ids. Therefore reject those docs too.
return;
}
collectedHits++;
pqTop.doc = doc + docBase;
pqTop.score = score;
pqTop = pq.updateTop();
updateMinCompetitiveScore(scorer);
}
};
}
}
/**
* Creates a new {@link TopScoreDocCollector} given the number of hits to
* collect and the number of hits to count accurately.
*
* <p><b>NOTE</b>: If the total hit count of the top docs is less than or exactly
* {@code totalHitsThreshold} then this value is accurate. On the other hand,
* if the {@link TopDocs#totalHits} value is greater than {@code totalHitsThreshold}
* then its value is a lower bound of the hit count. A value of {@link Integer#MAX_VALUE}
* will make the hit count accurate but will also likely make query processing slower.
* <p><b>NOTE</b>: The instances returned by this method
* pre-allocate a full array of length
* <code>numHits</code>, and fill the array with sentinel
* objects.
*/
public static TopScoreDocCollector create(int numHits, int totalHitsThreshold) {
return create(numHits, null, totalHitsThreshold);
}
/**
* Creates a new {@link TopScoreDocCollector} given the number of hits to
* collect, the bottom of the previous page, and the number of hits to count
* accurately.
*
* <p><b>NOTE</b>: If the total hit count of the top docs is less than or exactly
* {@code totalHitsThreshold} then this value is accurate. On the other hand,
* if the {@link TopDocs#totalHits} value is greater than {@code totalHitsThreshold}
* then its value is a lower bound of the hit count. A value of {@link Integer#MAX_VALUE}
* will make the hit count accurate but will also likely make query processing slower.
* <p><b>NOTE</b>: The instances returned by this method
* pre-allocate a full array of length
* <code>numHits</code>, and fill the array with sentinel
* objects.
*/
public static TopScoreDocCollector create(int numHits, ScoreDoc after, int totalHitsThreshold) {
return create(numHits, after, HitsThresholdChecker.create(Math.max(totalHitsThreshold, numHits)), null);
}
static TopScoreDocCollector create(int numHits, ScoreDoc after, HitsThresholdChecker hitsThresholdChecker,
MaxScoreAccumulator minScoreAcc) {
if (numHits <= 0) {
throw new IllegalArgumentException("numHits must be > 0; please use TotalHitCountCollector if you just need the total hit count");
}
if (hitsThresholdChecker == null) {
throw new IllegalArgumentException("hitsThresholdChecker must be non null");
}
if (after == null) {
return new SimpleTopScoreDocCollector(numHits, hitsThresholdChecker, minScoreAcc);
} else {
return new PagingTopScoreDocCollector(numHits, after, hitsThresholdChecker, minScoreAcc);
}
}
/**
* Create a CollectorManager which uses a shared hit counter to maintain number of hits
* and a shared {@link MaxScoreAccumulator} to propagate the minimum score accross segments
*/
public static CollectorManager<TopScoreDocCollector, TopDocs> createSharedManager(int numHits, FieldDoc after,
int totalHitsThreshold) {
return new CollectorManager<>() {
private final HitsThresholdChecker hitsThresholdChecker = HitsThresholdChecker.createShared(Math.max(totalHitsThreshold, numHits));
private final MaxScoreAccumulator minScoreAcc = new MaxScoreAccumulator();
@Override
public TopScoreDocCollector newCollector() throws IOException {
return TopScoreDocCollector.create(numHits, after, hitsThresholdChecker, minScoreAcc);
}
@Override
public TopDocs reduce(Collection<TopScoreDocCollector> collectors) throws IOException {
final TopDocs[] topDocs = new TopDocs[collectors.size()];
int i = 0;
for (TopScoreDocCollector collector : collectors) {
topDocs[i++] = collector.topDocs();
}
return TopDocs.merge(0, numHits, topDocs);
}
};
}
int docBase;
ScoreDoc pqTop;
final HitsThresholdChecker hitsThresholdChecker;
final MaxScoreAccumulator minScoreAcc;
float minCompetitiveScore;
// prevents instantiation
TopScoreDocCollector(int numHits, HitsThresholdChecker hitsThresholdChecker,
MaxScoreAccumulator minScoreAcc) {
super(new HitQueue(numHits, true));
assert hitsThresholdChecker != null;
// HitQueue implements getSentinelObject to return a ScoreDoc, so we know
// that at this point top() is already initialized.
pqTop = pq.top();
this.hitsThresholdChecker = hitsThresholdChecker;
this.minScoreAcc = minScoreAcc;
}
@Override
protected TopDocs newTopDocs(ScoreDoc[] results, int start) {
if (results == null) {
return EMPTY_TOPDOCS;
}
return new TopDocs(new TotalHits(totalHits, totalHitsRelation), results);
}
@Override
public ScoreMode scoreMode() {
return hitsThresholdChecker.scoreMode();
}
protected void updateGlobalMinCompetitiveScore(Scorable scorer) throws IOException {
assert minScoreAcc != null;
DocAndScore maxMinScore = minScoreAcc.get();
if (maxMinScore != null) {
// since we tie-break on doc id and collect in doc id order we can require
// the next float if the global minimum score is set on a document id that is
// smaller than the ids in the current leaf
float score = docBase > maxMinScore.docID ? Math.nextUp(maxMinScore.score) : maxMinScore.score;
if (score > minCompetitiveScore) {
assert hitsThresholdChecker.isThresholdReached();
scorer.setMinCompetitiveScore(score);
minCompetitiveScore = score;
totalHitsRelation = TotalHits.Relation.GREATER_THAN_OR_EQUAL_TO;
}
}
}
protected void updateMinCompetitiveScore(Scorable scorer) throws IOException {
if (hitsThresholdChecker.isThresholdReached()
&& pqTop != null
&& pqTop.score != Float.NEGATIVE_INFINITY) { // -Infinity is the score of sentinels
// since we tie-break on doc id and collect in doc id order, we can require
// the next float
float localMinScore = Math.nextUp(pqTop.score);
if (localMinScore > minCompetitiveScore) {
scorer.setMinCompetitiveScore(localMinScore);
totalHitsRelation = TotalHits.Relation.GREATER_THAN_OR_EQUAL_TO;
minCompetitiveScore = localMinScore;
if (minScoreAcc != null) {
// we don't use the next float but we register the document
// id so that other leaves can require it if they are after
// the current maximum
minScoreAcc.accumulate(pqTop.doc, pqTop.score);
}
}
}
}
}
| 1 | 38,050 | This is used in o.a.l.sandbox.search.LargeNumHitsTopDocsCollector. | apache-lucene-solr | java |
@@ -1,11 +1,11 @@
# optimizer
optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001)
-optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2))
+optimizer_config = dict(grad_clip=None)
# learning policy
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=500,
- warmup_ratio=1.0 / 3,
+ warmup_ratio=1.0 / 1000,
step=[8, 11])
total_epochs = 12 | 1 | # optimizer
optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001)
optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2))
# learning policy
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=500,
warmup_ratio=1.0 / 3,
step=[8, 11])
total_epochs = 12
| 1 | 19,019 | We may also do the same for `schedule_2x.py` and `schedule_20e.py`. | open-mmlab-mmdetection | py |
@@ -4,6 +4,7 @@ class User < ActiveRecord::Base
has_many :traces, -> { where(:visible => true) }
has_many :diary_entries, -> { order(:created_at => :desc) }
has_many :diary_comments, -> { order(:created_at => :desc) }
+ has_and_belongs_to_many :diary_entries_subscriptions, :class_name => "DiaryEntry", :join_table => "diary_entries_subscribers", :foreign_key => "subscriber_id"
has_many :messages, -> { where(:to_user_visible => true).order(:sent_on => :desc).preload(:sender, :recipient) }, :foreign_key => :to_user_id
has_many :new_messages, -> { where(:to_user_visible => true, :message_read => false).order(:sent_on => :desc) }, :class_name => "Message", :foreign_key => :to_user_id
has_many :sent_messages, -> { where(:from_user_visible => true).order(:sent_on => :desc).preload(:sender, :recipient) }, :class_name => "Message", :foreign_key => :from_user_id | 1 | class User < ActiveRecord::Base
require "xml/libxml"
has_many :traces, -> { where(:visible => true) }
has_many :diary_entries, -> { order(:created_at => :desc) }
has_many :diary_comments, -> { order(:created_at => :desc) }
has_many :messages, -> { where(:to_user_visible => true).order(:sent_on => :desc).preload(:sender, :recipient) }, :foreign_key => :to_user_id
has_many :new_messages, -> { where(:to_user_visible => true, :message_read => false).order(:sent_on => :desc) }, :class_name => "Message", :foreign_key => :to_user_id
has_many :sent_messages, -> { where(:from_user_visible => true).order(:sent_on => :desc).preload(:sender, :recipient) }, :class_name => "Message", :foreign_key => :from_user_id
has_many :friends, -> { joins(:befriendee).where(:users => { :status => %w(active confirmed) }) }
has_many :friend_users, :through => :friends, :source => :befriendee
has_many :tokens, :class_name => "UserToken"
has_many :preferences, :class_name => "UserPreference"
has_many :changesets, -> { order(:created_at => :desc) }
has_many :changeset_comments, :foreign_key => :author_id
has_and_belongs_to_many :changeset_subscriptions, :class_name => "Changeset", :join_table => "changesets_subscribers", :foreign_key => "subscriber_id"
has_many :note_comments, :foreign_key => :author_id
has_many :notes, :through => :note_comments
has_many :client_applications
has_many :oauth_tokens, -> { order(:authorized_at => :desc).preload(:client_application) }, :class_name => "OauthToken"
has_many :blocks, :class_name => "UserBlock"
has_many :blocks_created, :class_name => "UserBlock", :foreign_key => :creator_id
has_many :blocks_revoked, :class_name => "UserBlock", :foreign_key => :revoker_id
has_many :roles, :class_name => "UserRole"
scope :visible, -> { where(:status => %w(pending active confirmed)) }
scope :active, -> { where(:status => %w(active confirmed)) }
scope :identifiable, -> { where(:data_public => true) }
has_attached_file :image,
:default_url => "/assets/:class/:attachment/:style.png",
:styles => { :large => "100x100>", :small => "50x50>" }
validates :display_name, :presence => true, :allow_nil => true, :length => 3..255,
:exclusion => %w(new terms save confirm confirm-email go_public reset-password forgot-password suspended)
validates :display_name, :if => proc { |u| u.display_name_changed? },
:uniqueness => { :case_sensitive => false }
validates :display_name, :if => proc { |u| u.display_name_changed? },
:format => { :with => %r{\A[^\x00-\x1f\x7f\ufffe\uffff/;.,?%#]*\z} }
validates :display_name, :if => proc { |u| u.display_name_changed? },
:format => { :with => /\A\S/, :message => "has leading whitespace" }
validates :display_name, :if => proc { |u| u.display_name_changed? },
:format => { :with => /\S\z/, :message => "has trailing whitespace" }
validates :email, :presence => true, :confirmation => true
validates :email, :if => proc { |u| u.email_changed? },
:uniqueness => { :case_sensitive => false }
validates :pass_crypt, :confirmation => true, :length => 8..255
validates :home_lat, :home_lon, :allow_nil => true, :numericality => true
validates :home_zoom, :allow_nil => true, :numericality => { :only_integer => true }
validates :preferred_editor, :inclusion => Editors::ALL_EDITORS, :allow_nil => true
validates :image, :attachment_content_type => { :content_type => %r{\Aimage/.*\Z} }
validates :auth_uid, :unless => proc { |u| u.auth_provider.nil? },
:uniqueness => { :scope => :auth_provider }
validates_email_format_of :email, :if => proc { |u| u.email_changed? }
validates_email_format_of :new_email, :allow_blank => true, :if => proc { |u| u.new_email_changed? }
after_initialize :set_defaults
before_save :encrypt_password
after_save :spam_check
def self.authenticate(options)
if options[:username] && options[:password]
user = find_by("email = ? OR display_name = ?", options[:username], options[:username])
if user.nil?
users = where("LOWER(email) = LOWER(?) OR LOWER(display_name) = LOWER(?)", options[:username], options[:username])
user = users.first if users.count == 1
end
if user && PasswordHash.check(user.pass_crypt, user.pass_salt, options[:password])
if PasswordHash.upgrade?(user.pass_crypt, user.pass_salt)
user.pass_crypt, user.pass_salt = PasswordHash.create(options[:password])
user.save
end
else
user = nil
end
elsif options[:token]
token = UserToken.find_by_token(options[:token])
user = token.user if token
end
if user &&
(user.status == "deleted" ||
(user.status == "pending" && !options[:pending]) ||
(user.status == "suspended" && !options[:suspended]))
user = nil
end
token.update_column(:expiry, 1.week.from_now) if token && user
user
end
def to_xml
doc = OSM::API.new.get_xml_doc
doc.root << to_xml_node
doc
end
def to_xml_node
el1 = XML::Node.new "user"
el1["display_name"] = display_name.to_s
el1["account_created"] = creation_time.xmlschema
if home_lat && home_lon
home = XML::Node.new "home"
home["lat"] = home_lat.to_s
home["lon"] = home_lon.to_s
home["zoom"] = home_zoom.to_s
el1 << home
end
el1
end
def description
RichText.new(self[:description_format], self[:description])
end
def languages
attribute_present?(:languages) ? self[:languages].split(/ *[, ] */) : []
end
def languages=(languages)
self[:languages] = languages.join(",")
end
def preferred_language
languages.find { |l| Language.exists?(:code => l) }
end
def preferred_languages
@locales ||= Locale.list(languages)
end
def nearby(radius = NEARBY_RADIUS, num = NEARBY_USERS)
if home_lon && home_lat
gc = OSM::GreatCircle.new(home_lat, home_lon)
sql_for_distance = gc.sql_for_distance("home_lat", "home_lon")
nearby = User.where("id != ? AND status IN (\'active\', \'confirmed\') AND data_public = ? AND #{sql_for_distance} <= ?", id, true, radius).order(sql_for_distance).limit(num)
else
nearby = []
end
nearby
end
def distance(nearby_user)
OSM::GreatCircle.new(home_lat, home_lon).distance(nearby_user.home_lat, nearby_user.home_lon)
end
def is_friends_with?(new_friend)
friends.where(:friend_user_id => new_friend.id).exists?
end
##
# returns true if a user is visible
def visible?
%w(pending active confirmed).include? status
end
##
# returns true if a user is active
def active?
%w(active confirmed).include? status
end
##
# returns true if the user has the moderator role, false otherwise
def moderator?
has_role? "moderator"
end
##
# returns true if the user has the administrator role, false otherwise
def administrator?
has_role? "administrator"
end
##
# returns true if the user has the requested role
def has_role?(role)
roles.any? { |r| r.role == role }
end
##
# returns the first active block which would require users to view
# a message, or nil if there are none.
def blocked_on_view
blocks.active.detect(&:needs_view?)
end
##
# delete a user - leave the account but purge most personal data
def delete
self.display_name = "user_#{id}"
self.description = ""
self.home_lat = nil
self.home_lon = nil
self.image = nil
self.email_valid = false
self.new_email = nil
self.auth_provider = nil
self.auth_uid = nil
self.status = "deleted"
save
end
##
# return a spam score for a user
def spam_score
changeset_score = changesets.size * 50
trace_score = traces.size * 50
diary_entry_score = diary_entries.inject(0) { |a, e| a + e.body.spam_score }
diary_comment_score = diary_comments.inject(0) { |a, e| a + e.body.spam_score }
score = description.spam_score / 4.0
score += diary_entries.where("created_at > ?", 1.day.ago).count * 10
score += diary_entry_score / diary_entries.length unless diary_entries.empty?
score += diary_comment_score / diary_comments.length unless diary_comments.empty?
score -= changeset_score
score -= trace_score
score.to_i
end
##
# perform a spam check on a user
def spam_check
if status == "active" && spam_score > SPAM_THRESHOLD
update_column(:status, "suspended")
end
end
##
# return an oauth access token for a specified application
def access_token(application_key)
ClientApplication.find_by_key(application_key).access_token_for_user(self)
end
private
def set_defaults
self.creation_time = Time.now.getutc unless attribute_present?(:creation_time)
end
def encrypt_password
if pass_crypt_confirmation
self.pass_crypt, self.pass_salt = PasswordHash.create(pass_crypt)
self.pass_crypt_confirmation = nil
end
end
end
| 1 | 10,178 | Should foreign key here be something like `diary_entry_id`? Or above, in `diary_entry.rb`, it should be `diary_entry_id`? | openstreetmap-openstreetmap-website | rb |
@@ -54,12 +54,13 @@ type (
namespaceEntry *cache.NamespaceCacheEntry
// internal state
- hasBufferedEvents bool
- failWorkflowTaskInfo *failWorkflowTaskInfo
- activityNotStartedCancelled bool
- continueAsNewBuilder mutableState
- stopProcessing bool // should stop processing any more commands
- mutableState mutableState
+ hasBufferedEvents bool
+ failWorkflowTaskInfo *failWorkflowTaskInfo
+ activityNotStartedCancelled bool
+ continueAsNewBuilder mutableState
+ stopProcessing bool // should stop processing any more commands
+ mutableState mutableState
+ initiatedChildExecutionsInSession map[string]struct{} // Set of initiated child executions in the workflow task
// validation
attrValidator *commandAttrValidator | 1 | // The MIT License
//
// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved.
//
// Copyright (c) 2020 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
package history
import (
"fmt"
"github.com/pborman/uuid"
commandpb "go.temporal.io/api/command/v1"
commonpb "go.temporal.io/api/common/v1"
enumspb "go.temporal.io/api/enums/v1"
failurepb "go.temporal.io/api/failure/v1"
historypb "go.temporal.io/api/history/v1"
"go.temporal.io/api/serviceerror"
"go.temporal.io/server/common"
"go.temporal.io/server/common/backoff"
"go.temporal.io/server/common/cache"
"go.temporal.io/server/common/enums"
"go.temporal.io/server/common/log"
"go.temporal.io/server/common/log/tag"
"go.temporal.io/server/common/metrics"
"go.temporal.io/server/common/payloads"
)
type (
commandAttrValidationFn func() error
workflowTaskHandlerImpl struct {
identity string
workflowTaskCompletedID int64
namespaceEntry *cache.NamespaceCacheEntry
// internal state
hasBufferedEvents bool
failWorkflowTaskInfo *failWorkflowTaskInfo
activityNotStartedCancelled bool
continueAsNewBuilder mutableState
stopProcessing bool // should stop processing any more commands
mutableState mutableState
// validation
attrValidator *commandAttrValidator
sizeLimitChecker *workflowSizeChecker
logger log.Logger
namespaceCache cache.NamespaceCache
metricsClient metrics.Client
config *Config
}
failWorkflowTaskInfo struct {
cause enumspb.WorkflowTaskFailedCause
message string
}
)
func newWorkflowTaskHandler(
identity string,
workflowTaskCompletedID int64,
namespaceEntry *cache.NamespaceCacheEntry,
mutableState mutableState,
attrValidator *commandAttrValidator,
sizeLimitChecker *workflowSizeChecker,
logger log.Logger,
namespaceCache cache.NamespaceCache,
metricsClient metrics.Client,
config *Config,
) *workflowTaskHandlerImpl {
return &workflowTaskHandlerImpl{
identity: identity,
workflowTaskCompletedID: workflowTaskCompletedID,
namespaceEntry: namespaceEntry,
// internal state
hasBufferedEvents: mutableState.HasBufferedEvents(),
failWorkflowTaskInfo: nil,
activityNotStartedCancelled: false,
continueAsNewBuilder: nil,
stopProcessing: false,
mutableState: mutableState,
// validation
attrValidator: attrValidator,
sizeLimitChecker: sizeLimitChecker,
logger: logger,
namespaceCache: namespaceCache,
metricsClient: metricsClient,
config: config,
}
}
func (handler *workflowTaskHandlerImpl) handleCommands(
commands []*commandpb.Command,
) error {
// overall workflow size / count check
failWorkflow, err := handler.sizeLimitChecker.failWorkflowSizeExceedsLimit()
if err != nil || failWorkflow {
return err
}
for _, command := range commands {
err = handler.handleCommand(command)
if err != nil || handler.stopProcessing {
return err
}
}
return nil
}
func (handler *workflowTaskHandlerImpl) handleCommand(command *commandpb.Command) error {
switch command.GetCommandType() {
case enumspb.COMMAND_TYPE_SCHEDULE_ACTIVITY_TASK:
return handler.handleCommandScheduleActivity(command.GetScheduleActivityTaskCommandAttributes())
case enumspb.COMMAND_TYPE_COMPLETE_WORKFLOW_EXECUTION:
return handler.handleCommandCompleteWorkflow(command.GetCompleteWorkflowExecutionCommandAttributes())
case enumspb.COMMAND_TYPE_FAIL_WORKFLOW_EXECUTION:
return handler.handleCommandFailWorkflow(command.GetFailWorkflowExecutionCommandAttributes())
case enumspb.COMMAND_TYPE_CANCEL_WORKFLOW_EXECUTION:
return handler.handleCommandCancelWorkflow(command.GetCancelWorkflowExecutionCommandAttributes())
case enumspb.COMMAND_TYPE_START_TIMER:
return handler.handleCommandStartTimer(command.GetStartTimerCommandAttributes())
case enumspb.COMMAND_TYPE_REQUEST_CANCEL_ACTIVITY_TASK:
return handler.handleCommandRequestCancelActivity(command.GetRequestCancelActivityTaskCommandAttributes())
case enumspb.COMMAND_TYPE_CANCEL_TIMER:
return handler.handleCommandCancelTimer(command.GetCancelTimerCommandAttributes())
case enumspb.COMMAND_TYPE_RECORD_MARKER:
return handler.handleCommandRecordMarker(command.GetRecordMarkerCommandAttributes())
case enumspb.COMMAND_TYPE_REQUEST_CANCEL_EXTERNAL_WORKFLOW_EXECUTION:
return handler.handleCommandRequestCancelExternalWorkflow(command.GetRequestCancelExternalWorkflowExecutionCommandAttributes())
case enumspb.COMMAND_TYPE_SIGNAL_EXTERNAL_WORKFLOW_EXECUTION:
return handler.handleCommandSignalExternalWorkflow(command.GetSignalExternalWorkflowExecutionCommandAttributes())
case enumspb.COMMAND_TYPE_CONTINUE_AS_NEW_WORKFLOW_EXECUTION:
return handler.handleCommandContinueAsNewWorkflow(command.GetContinueAsNewWorkflowExecutionCommandAttributes())
case enumspb.COMMAND_TYPE_START_CHILD_WORKFLOW_EXECUTION:
return handler.handleCommandStartChildWorkflow(command.GetStartChildWorkflowExecutionCommandAttributes())
case enumspb.COMMAND_TYPE_UPSERT_WORKFLOW_SEARCH_ATTRIBUTES:
return handler.handleCommandUpsertWorkflowSearchAttributes(command.GetUpsertWorkflowSearchAttributesCommandAttributes())
default:
return serviceerror.NewInvalidArgument(fmt.Sprintf("Unknown command type: %v", command.GetCommandType()))
}
}
func (handler *workflowTaskHandlerImpl) handleCommandScheduleActivity(
attr *commandpb.ScheduleActivityTaskCommandAttributes,
) error {
handler.metricsClient.IncCounter(
metrics.HistoryRespondWorkflowTaskCompletedScope,
metrics.CommandTypeScheduleActivityCounter,
)
executionInfo := handler.mutableState.GetExecutionInfo()
namespaceID := executionInfo.NamespaceID
targetNamespaceID := namespaceID
if attr.GetNamespace() != "" {
targetNamespaceEntry, err := handler.namespaceCache.GetNamespace(attr.GetNamespace())
if err != nil {
return serviceerror.NewInternal(fmt.Sprintf("Unable to schedule activity across namespace %v.", attr.GetNamespace()))
}
targetNamespaceID = targetNamespaceEntry.GetInfo().Id
}
if err := handler.validateCommandAttr(
func() error {
return handler.attrValidator.validateActivityScheduleAttributes(
namespaceID,
targetNamespaceID,
attr,
executionInfo.WorkflowRunTimeout,
)
},
enumspb.WORKFLOW_TASK_FAILED_CAUSE_BAD_SCHEDULE_ACTIVITY_ATTRIBUTES,
); err != nil || handler.stopProcessing {
return err
}
failWorkflow, err := handler.sizeLimitChecker.failWorkflowIfPayloadSizeExceedsLimit(
metrics.CommandTypeTag(enumspb.COMMAND_TYPE_SCHEDULE_ACTIVITY_TASK.String()),
attr.GetInput().Size(),
"ScheduleActivityTaskCommandAttributes.Input exceeds size limit.",
)
if err != nil || failWorkflow {
handler.stopProcessing = true
return err
}
enums.SetDefaultTaskQueueKind(&attr.GetTaskQueue().Kind)
_, _, err = handler.mutableState.AddActivityTaskScheduledEvent(handler.workflowTaskCompletedID, attr)
switch err.(type) {
case nil:
return nil
case *serviceerror.InvalidArgument:
return handler.handlerFailCommand(
enumspb.WORKFLOW_TASK_FAILED_CAUSE_SCHEDULE_ACTIVITY_DUPLICATE_ID, "",
)
default:
return err
}
}
func (handler *workflowTaskHandlerImpl) handleCommandRequestCancelActivity(
attr *commandpb.RequestCancelActivityTaskCommandAttributes,
) error {
handler.metricsClient.IncCounter(
metrics.HistoryRespondWorkflowTaskCompletedScope,
metrics.CommandTypeCancelActivityCounter,
)
if err := handler.validateCommandAttr(
func() error {
return handler.attrValidator.validateActivityCancelAttributes(attr)
},
enumspb.WORKFLOW_TASK_FAILED_CAUSE_BAD_REQUEST_CANCEL_ACTIVITY_ATTRIBUTES,
); err != nil || handler.stopProcessing {
return err
}
scheduleID := attr.GetScheduledEventId()
actCancelReqEvent, ai, err := handler.mutableState.AddActivityTaskCancelRequestedEvent(
handler.workflowTaskCompletedID,
scheduleID,
handler.identity,
)
switch err.(type) {
case nil:
if ai.StartedID == common.EmptyEventID {
// We haven't started the activity yet, we can cancel the activity right away and
// schedule a workflow task to ensure the workflow makes progress.
_, err = handler.mutableState.AddActivityTaskCanceledEvent(
ai.ScheduleID,
ai.StartedID,
actCancelReqEvent.GetEventId(),
payloads.EncodeString(activityCancellationMsgActivityNotStarted),
handler.identity,
)
if err != nil {
return err
}
handler.activityNotStartedCancelled = true
}
return nil
case *serviceerror.InvalidArgument:
return handler.handlerFailCommand(
enumspb.WORKFLOW_TASK_FAILED_CAUSE_BAD_REQUEST_CANCEL_ACTIVITY_ATTRIBUTES, "",
)
default:
return err
}
}
func (handler *workflowTaskHandlerImpl) handleCommandStartTimer(
attr *commandpb.StartTimerCommandAttributes,
) error {
handler.metricsClient.IncCounter(
metrics.HistoryRespondWorkflowTaskCompletedScope,
metrics.CommandTypeStartTimerCounter,
)
if err := handler.validateCommandAttr(
func() error {
return handler.attrValidator.validateTimerScheduleAttributes(attr)
},
enumspb.WORKFLOW_TASK_FAILED_CAUSE_BAD_START_TIMER_ATTRIBUTES,
); err != nil || handler.stopProcessing {
return err
}
_, _, err := handler.mutableState.AddTimerStartedEvent(handler.workflowTaskCompletedID, attr)
switch err.(type) {
case nil:
return nil
case *serviceerror.InvalidArgument:
return handler.handlerFailCommand(
enumspb.WORKFLOW_TASK_FAILED_CAUSE_START_TIMER_DUPLICATE_ID, "",
)
default:
return err
}
}
func (handler *workflowTaskHandlerImpl) handleCommandCompleteWorkflow(
attr *commandpb.CompleteWorkflowExecutionCommandAttributes,
) error {
handler.metricsClient.IncCounter(
metrics.HistoryRespondWorkflowTaskCompletedScope,
metrics.CommandTypeCompleteWorkflowCounter,
)
if handler.hasBufferedEvents {
return handler.handlerFailCommand(enumspb.WORKFLOW_TASK_FAILED_CAUSE_UNHANDLED_COMMAND, "")
}
if err := handler.validateCommandAttr(
func() error {
return handler.attrValidator.validateCompleteWorkflowExecutionAttributes(attr)
},
enumspb.WORKFLOW_TASK_FAILED_CAUSE_BAD_COMPLETE_WORKFLOW_EXECUTION_ATTRIBUTES,
); err != nil || handler.stopProcessing {
return err
}
failWorkflow, err := handler.sizeLimitChecker.failWorkflowIfPayloadSizeExceedsLimit(
metrics.CommandTypeTag(enumspb.COMMAND_TYPE_COMPLETE_WORKFLOW_EXECUTION.String()),
attr.GetResult().Size(),
"CompleteWorkflowExecutionCommandAttributes.Result exceeds size limit.",
)
if err != nil || failWorkflow {
handler.stopProcessing = true
return err
}
// If the workflow task has more than one completion event than just pick the first one
if !handler.mutableState.IsWorkflowExecutionRunning() {
handler.metricsClient.IncCounter(
metrics.HistoryRespondWorkflowTaskCompletedScope,
metrics.MultipleCompletionCommandsCounter,
)
handler.logger.Warn(
"Multiple completion commands",
tag.WorkflowCommandType(enumspb.COMMAND_TYPE_COMPLETE_WORKFLOW_EXECUTION),
tag.ErrorTypeMultipleCompletionCommands,
)
return nil
}
// check if this is a cron workflow
cronBackoff, err := handler.mutableState.GetCronBackoffDuration()
if err != nil {
handler.stopProcessing = true
return err
}
if cronBackoff == backoff.NoBackoff {
// not cron, so complete this workflow execution
if _, err := handler.mutableState.AddCompletedWorkflowEvent(handler.workflowTaskCompletedID, attr); err != nil {
return serviceerror.NewInternal("Unable to add complete workflow event.")
}
return nil
}
// this is a cron workflow
startEvent, err := handler.mutableState.GetStartEvent()
if err != nil {
return err
}
startAttributes := startEvent.GetWorkflowExecutionStartedEventAttributes()
return handler.retryCronContinueAsNew(
startAttributes,
int32(cronBackoff.Seconds()),
enumspb.CONTINUE_AS_NEW_INITIATOR_CRON_SCHEDULE,
nil,
attr.Result,
)
}
func (handler *workflowTaskHandlerImpl) handleCommandFailWorkflow(
attr *commandpb.FailWorkflowExecutionCommandAttributes,
) error {
handler.metricsClient.IncCounter(
metrics.HistoryRespondWorkflowTaskCompletedScope,
metrics.CommandTypeFailWorkflowCounter,
)
if handler.hasBufferedEvents {
return handler.handlerFailCommand(enumspb.WORKFLOW_TASK_FAILED_CAUSE_UNHANDLED_COMMAND, "")
}
if err := handler.validateCommandAttr(
func() error {
return handler.attrValidator.validateFailWorkflowExecutionAttributes(attr)
},
enumspb.WORKFLOW_TASK_FAILED_CAUSE_BAD_FAIL_WORKFLOW_EXECUTION_ATTRIBUTES,
); err != nil || handler.stopProcessing {
return err
}
failWorkflow, err := handler.sizeLimitChecker.failWorkflowIfPayloadSizeExceedsLimit(
metrics.CommandTypeTag(enumspb.COMMAND_TYPE_FAIL_WORKFLOW_EXECUTION.String()),
attr.GetFailure().Size(),
"FailWorkflowExecutionCommandAttributes.Failure exceeds size limit.",
)
if err != nil || failWorkflow {
handler.stopProcessing = true
return err
}
// If the workflow task has more than one completion event than just pick the first one
if !handler.mutableState.IsWorkflowExecutionRunning() {
handler.metricsClient.IncCounter(
metrics.HistoryRespondWorkflowTaskCompletedScope,
metrics.MultipleCompletionCommandsCounter,
)
handler.logger.Warn(
"Multiple completion commands",
tag.WorkflowCommandType(enumspb.COMMAND_TYPE_FAIL_WORKFLOW_EXECUTION),
tag.ErrorTypeMultipleCompletionCommands,
)
return nil
}
// below will check whether to do continue as new based on backoff & backoff or cron
backoffInterval, retryState := handler.mutableState.GetRetryBackoffDuration(attr.GetFailure())
continueAsNewInitiator := enumspb.CONTINUE_AS_NEW_INITIATOR_RETRY
// first check the backoff retry
if backoffInterval == backoff.NoBackoff {
// if no backoff retry, set the backoffInterval using cron schedule
backoffInterval, err = handler.mutableState.GetCronBackoffDuration()
if err != nil {
handler.stopProcessing = true
return err
}
continueAsNewInitiator = enumspb.CONTINUE_AS_NEW_INITIATOR_CRON_SCHEDULE
}
// second check the backoff / cron schedule
if backoffInterval == backoff.NoBackoff {
// no retry or cron
if _, err := handler.mutableState.AddFailWorkflowEvent(handler.workflowTaskCompletedID, retryState, attr); err != nil {
return err
}
return nil
}
// this is a cron / backoff workflow
startEvent, err := handler.mutableState.GetStartEvent()
if err != nil {
return err
}
startAttributes := startEvent.GetWorkflowExecutionStartedEventAttributes()
return handler.retryCronContinueAsNew(
startAttributes,
int32(backoffInterval.Seconds()),
continueAsNewInitiator,
attr.GetFailure(),
startAttributes.LastCompletionResult,
)
}
func (handler *workflowTaskHandlerImpl) handleCommandCancelTimer(
attr *commandpb.CancelTimerCommandAttributes,
) error {
handler.metricsClient.IncCounter(
metrics.HistoryRespondWorkflowTaskCompletedScope,
metrics.CommandTypeCancelTimerCounter,
)
if err := handler.validateCommandAttr(
func() error {
return handler.attrValidator.validateTimerCancelAttributes(attr)
},
enumspb.WORKFLOW_TASK_FAILED_CAUSE_BAD_CANCEL_TIMER_ATTRIBUTES,
); err != nil || handler.stopProcessing {
return err
}
_, err := handler.mutableState.AddTimerCanceledEvent(
handler.workflowTaskCompletedID,
attr,
handler.identity)
switch err.(type) {
case nil:
// timer deletion is a success, we may have deleted a fired timer in
// which case we should reset hasBufferedEvents
// TODO deletion of timer fired event refreshing hasBufferedEvents
// is not entirely correct, since during these commands processing, new event may appear
handler.hasBufferedEvents = handler.mutableState.HasBufferedEvents()
return nil
case *serviceerror.InvalidArgument:
return handler.handlerFailCommand(enumspb.WORKFLOW_TASK_FAILED_CAUSE_BAD_CANCEL_TIMER_ATTRIBUTES,
err.Error())
default:
return err
}
}
func (handler *workflowTaskHandlerImpl) handleCommandCancelWorkflow(
attr *commandpb.CancelWorkflowExecutionCommandAttributes,
) error {
handler.metricsClient.IncCounter(metrics.HistoryRespondWorkflowTaskCompletedScope,
metrics.CommandTypeCancelWorkflowCounter)
if handler.hasBufferedEvents {
return handler.handlerFailCommand(enumspb.WORKFLOW_TASK_FAILED_CAUSE_UNHANDLED_COMMAND, "")
}
if err := handler.validateCommandAttr(
func() error {
return handler.attrValidator.validateCancelWorkflowExecutionAttributes(attr)
},
enumspb.WORKFLOW_TASK_FAILED_CAUSE_BAD_CANCEL_WORKFLOW_EXECUTION_ATTRIBUTES,
); err != nil || handler.stopProcessing {
return err
}
// If the workflow task has more than one completion event than just pick the first one
if !handler.mutableState.IsWorkflowExecutionRunning() {
handler.metricsClient.IncCounter(
metrics.HistoryRespondWorkflowTaskCompletedScope,
metrics.MultipleCompletionCommandsCounter,
)
handler.logger.Warn(
"Multiple completion commands",
tag.WorkflowCommandType(enumspb.COMMAND_TYPE_CANCEL_WORKFLOW_EXECUTION),
tag.ErrorTypeMultipleCompletionCommands,
)
return nil
}
_, err := handler.mutableState.AddWorkflowExecutionCanceledEvent(handler.workflowTaskCompletedID, attr)
return err
}
func (handler *workflowTaskHandlerImpl) handleCommandRequestCancelExternalWorkflow(
attr *commandpb.RequestCancelExternalWorkflowExecutionCommandAttributes,
) error {
handler.metricsClient.IncCounter(
metrics.HistoryRespondWorkflowTaskCompletedScope,
metrics.CommandTypeCancelExternalWorkflowCounter,
)
executionInfo := handler.mutableState.GetExecutionInfo()
namespaceID := executionInfo.NamespaceID
targetNamespaceID := namespaceID
if attr.GetNamespace() != "" {
targetNamespaceEntry, err := handler.namespaceCache.GetNamespace(attr.GetNamespace())
if err != nil {
return serviceerror.NewInternal(fmt.Sprintf("Unable to cancel workflow across namespace: %v.", attr.GetNamespace()))
}
targetNamespaceID = targetNamespaceEntry.GetInfo().Id
}
if err := handler.validateCommandAttr(
func() error {
return handler.attrValidator.validateCancelExternalWorkflowExecutionAttributes(
namespaceID,
targetNamespaceID,
attr,
)
},
enumspb.WORKFLOW_TASK_FAILED_CAUSE_BAD_REQUEST_CANCEL_EXTERNAL_WORKFLOW_EXECUTION_ATTRIBUTES,
); err != nil || handler.stopProcessing {
return err
}
cancelRequestID := uuid.New()
_, _, err := handler.mutableState.AddRequestCancelExternalWorkflowExecutionInitiatedEvent(
handler.workflowTaskCompletedID, cancelRequestID, attr,
)
return err
}
func (handler *workflowTaskHandlerImpl) handleCommandRecordMarker(
attr *commandpb.RecordMarkerCommandAttributes,
) error {
handler.metricsClient.IncCounter(
metrics.HistoryRespondWorkflowTaskCompletedScope,
metrics.CommandTypeRecordMarkerCounter,
)
if err := handler.validateCommandAttr(
func() error {
return handler.attrValidator.validateRecordMarkerAttributes(attr)
},
enumspb.WORKFLOW_TASK_FAILED_CAUSE_BAD_RECORD_MARKER_ATTRIBUTES,
); err != nil || handler.stopProcessing {
return err
}
failWorkflow, err := handler.sizeLimitChecker.failWorkflowIfPayloadSizeExceedsLimit(
metrics.CommandTypeTag(enumspb.COMMAND_TYPE_RECORD_MARKER.String()),
common.GetPayloadsMapSize(attr.GetDetails()),
"RecordMarkerCommandAttributes.Details exceeds size limit.",
)
if err != nil || failWorkflow {
handler.stopProcessing = true
return err
}
_, err = handler.mutableState.AddRecordMarkerEvent(handler.workflowTaskCompletedID, attr)
return err
}
func (handler *workflowTaskHandlerImpl) handleCommandContinueAsNewWorkflow(
attr *commandpb.ContinueAsNewWorkflowExecutionCommandAttributes,
) error {
handler.metricsClient.IncCounter(
metrics.HistoryRespondWorkflowTaskCompletedScope,
metrics.CommandTypeContinueAsNewCounter,
)
if handler.hasBufferedEvents {
return handler.handlerFailCommand(enumspb.WORKFLOW_TASK_FAILED_CAUSE_UNHANDLED_COMMAND, "")
}
executionInfo := handler.mutableState.GetExecutionInfo()
if err := handler.validateCommandAttr(
func() error {
return handler.attrValidator.validateContinueAsNewWorkflowExecutionAttributes(
attr,
executionInfo,
)
},
enumspb.WORKFLOW_TASK_FAILED_CAUSE_BAD_CONTINUE_AS_NEW_ATTRIBUTES,
); err != nil || handler.stopProcessing {
return err
}
failWorkflow, err := handler.sizeLimitChecker.failWorkflowIfPayloadSizeExceedsLimit(
metrics.CommandTypeTag(enumspb.COMMAND_TYPE_CONTINUE_AS_NEW_WORKFLOW_EXECUTION.String()),
attr.GetInput().Size(),
"ContinueAsNewWorkflowExecutionCommandAttributes. Input exceeds size limit.",
)
if err != nil || failWorkflow {
handler.stopProcessing = true
return err
}
if attr.WorkflowRunTimeoutSeconds <= 0 {
// TODO(maxim): is workflowTaskCompletedID the correct id?
// TODO(maxim): should we introduce new TimeoutTypes (Workflow, Run) for workflows?
handler.stopProcessing = true
_, err := handler.mutableState.AddTimeoutWorkflowEvent(handler.workflowTaskCompletedID, enumspb.RETRY_STATE_TIMEOUT)
return err
}
handler.logger.Debug("!!!! Continued as new without timeout",
tag.WorkflowRunID(executionInfo.RunID))
// If the workflow task has more than one completion event than just pick the first one
if !handler.mutableState.IsWorkflowExecutionRunning() {
handler.metricsClient.IncCounter(
metrics.HistoryRespondWorkflowTaskCompletedScope,
metrics.MultipleCompletionCommandsCounter,
)
handler.logger.Warn(
"Multiple completion commands",
tag.WorkflowCommandType(enumspb.COMMAND_TYPE_CONTINUE_AS_NEW_WORKFLOW_EXECUTION),
tag.ErrorTypeMultipleCompletionCommands,
)
return nil
}
// Extract parentNamespace so it can be passed down to next run of workflow execution
var parentNamespace string
if handler.mutableState.HasParentExecution() {
parentNamespaceID := executionInfo.ParentNamespaceID
parentNamespaceEntry, err := handler.namespaceCache.GetNamespaceByID(parentNamespaceID)
if err != nil {
return err
}
parentNamespace = parentNamespaceEntry.GetInfo().Name
}
_, newStateBuilder, err := handler.mutableState.AddContinueAsNewEvent(
handler.workflowTaskCompletedID,
handler.workflowTaskCompletedID,
parentNamespace,
attr,
)
if err != nil {
return err
}
handler.continueAsNewBuilder = newStateBuilder
return nil
}
func (handler *workflowTaskHandlerImpl) handleCommandStartChildWorkflow(
attr *commandpb.StartChildWorkflowExecutionCommandAttributes,
) error {
handler.metricsClient.IncCounter(
metrics.HistoryRespondWorkflowTaskCompletedScope,
metrics.CommandTypeChildWorkflowCounter,
)
executionInfo := handler.mutableState.GetExecutionInfo()
namespaceID := executionInfo.NamespaceID
parentNamespace := handler.namespaceEntry.GetInfo().GetName()
targetNamespaceID := namespaceID
targetNamespace := parentNamespace
if attr.GetNamespace() != "" {
targetNamespaceEntry, err := handler.namespaceCache.GetNamespace(attr.GetNamespace())
if err != nil {
return serviceerror.NewInternal(fmt.Sprintf("Unable to schedule child execution across namespace %v.", attr.GetNamespace()))
}
targetNamespace = targetNamespaceEntry.GetInfo().Name
targetNamespaceID = targetNamespaceEntry.GetInfo().Id
}
if err := handler.validateCommandAttr(
func() error {
return handler.attrValidator.validateStartChildExecutionAttributes(
namespaceID,
targetNamespaceID,
targetNamespace,
attr,
executionInfo,
)
},
enumspb.WORKFLOW_TASK_FAILED_CAUSE_BAD_START_CHILD_EXECUTION_ATTRIBUTES,
); err != nil || handler.stopProcessing {
return err
}
failWorkflow, err := handler.sizeLimitChecker.failWorkflowIfPayloadSizeExceedsLimit(
metrics.CommandTypeTag(enumspb.COMMAND_TYPE_START_CHILD_WORKFLOW_EXECUTION.String()),
attr.GetInput().Size(),
"StartChildWorkflowExecutionCommandAttributes.Input exceeds size limit.",
)
if err != nil || failWorkflow {
handler.stopProcessing = true
return err
}
enabled := handler.config.EnableParentClosePolicy(handler.namespaceEntry.GetInfo().Name)
if enabled {
enums.SetDefaultParentClosePolicy(&attr.ParentClosePolicy)
} else {
attr.ParentClosePolicy = enumspb.PARENT_CLOSE_POLICY_ABANDON
}
enums.SetDefaultWorkflowIdReusePolicy(&attr.WorkflowIdReusePolicy)
requestID := uuid.New()
_, _, err = handler.mutableState.AddStartChildWorkflowExecutionInitiatedEvent(
handler.workflowTaskCompletedID, requestID, attr,
)
return err
}
func (handler *workflowTaskHandlerImpl) handleCommandSignalExternalWorkflow(
attr *commandpb.SignalExternalWorkflowExecutionCommandAttributes,
) error {
handler.metricsClient.IncCounter(
metrics.HistoryRespondWorkflowTaskCompletedScope,
metrics.CommandTypeSignalExternalWorkflowCounter,
)
executionInfo := handler.mutableState.GetExecutionInfo()
namespaceID := executionInfo.NamespaceID
targetNamespaceID := namespaceID
if attr.GetNamespace() != "" {
targetNamespaceEntry, err := handler.namespaceCache.GetNamespace(attr.GetNamespace())
if err != nil {
return serviceerror.NewInternal(fmt.Sprintf("Unable to signal workflow across namespace: %v.", attr.GetNamespace()))
}
targetNamespaceID = targetNamespaceEntry.GetInfo().Id
}
if err := handler.validateCommandAttr(
func() error {
return handler.attrValidator.validateSignalExternalWorkflowExecutionAttributes(
namespaceID,
targetNamespaceID,
attr,
)
},
enumspb.WORKFLOW_TASK_FAILED_CAUSE_BAD_SIGNAL_WORKFLOW_EXECUTION_ATTRIBUTES,
); err != nil || handler.stopProcessing {
return err
}
failWorkflow, err := handler.sizeLimitChecker.failWorkflowIfPayloadSizeExceedsLimit(
metrics.CommandTypeTag(enumspb.COMMAND_TYPE_SIGNAL_EXTERNAL_WORKFLOW_EXECUTION.String()),
attr.GetInput().Size(),
"SignalExternalWorkflowExecutionCommandAttributes.Input exceeds size limit.",
)
if err != nil || failWorkflow {
handler.stopProcessing = true
return err
}
signalRequestID := uuid.New() // for deduplicate
_, _, err = handler.mutableState.AddSignalExternalWorkflowExecutionInitiatedEvent(
handler.workflowTaskCompletedID, signalRequestID, attr,
)
return err
}
func (handler *workflowTaskHandlerImpl) handleCommandUpsertWorkflowSearchAttributes(
attr *commandpb.UpsertWorkflowSearchAttributesCommandAttributes,
) error {
handler.metricsClient.IncCounter(
metrics.HistoryRespondWorkflowTaskCompletedScope,
metrics.CommandTypeUpsertWorkflowSearchAttributesCounter,
)
// get namespace name
executionInfo := handler.mutableState.GetExecutionInfo()
namespaceID := executionInfo.NamespaceID
namespaceEntry, err := handler.namespaceCache.GetNamespaceByID(namespaceID)
if err != nil {
return serviceerror.NewInternal(fmt.Sprintf("Unable to get namespace for namespaceID: %v.", namespaceID))
}
namespace := namespaceEntry.GetInfo().Name
// valid search attributes for upsert
if err := handler.validateCommandAttr(
func() error {
return handler.attrValidator.validateUpsertWorkflowSearchAttributes(
namespace,
attr,
)
},
enumspb.WORKFLOW_TASK_FAILED_CAUSE_BAD_SEARCH_ATTRIBUTES,
); err != nil || handler.stopProcessing {
return err
}
// blob size limit check
failWorkflow, err := handler.sizeLimitChecker.failWorkflowIfPayloadSizeExceedsLimit(
metrics.CommandTypeTag(enumspb.COMMAND_TYPE_UPSERT_WORKFLOW_SEARCH_ATTRIBUTES.String()),
searchAttributesSize(attr.GetSearchAttributes().GetIndexedFields()),
"UpsertWorkflowSearchAttributesCommandAttributes exceeds size limit.",
)
if err != nil || failWorkflow {
handler.stopProcessing = true
return err
}
_, err = handler.mutableState.AddUpsertWorkflowSearchAttributesEvent(
handler.workflowTaskCompletedID, attr,
)
return err
}
func searchAttributesSize(fields map[string]*commonpb.Payload) int {
result := 0
for k, v := range fields {
result += len(k)
result += len(v.GetData())
}
return result
}
func (handler *workflowTaskHandlerImpl) retryCronContinueAsNew(
attr *historypb.WorkflowExecutionStartedEventAttributes,
backoffInterval int32,
continueAsNewInitiator enumspb.ContinueAsNewInitiator,
failure *failurepb.Failure,
lastCompletionResult *commonpb.Payloads,
) error {
continueAsNewAttributes := &commandpb.ContinueAsNewWorkflowExecutionCommandAttributes{
WorkflowType: attr.WorkflowType,
TaskQueue: attr.TaskQueue,
RetryPolicy: attr.RetryPolicy,
Input: attr.Input,
WorkflowRunTimeoutSeconds: attr.WorkflowRunTimeoutSeconds,
WorkflowTaskTimeoutSeconds: attr.WorkflowTaskTimeoutSeconds,
CronSchedule: attr.CronSchedule,
BackoffStartIntervalInSeconds: backoffInterval,
Initiator: continueAsNewInitiator,
Failure: failure,
LastCompletionResult: lastCompletionResult,
Header: attr.Header,
Memo: attr.Memo,
SearchAttributes: attr.SearchAttributes,
}
_, newStateBuilder, err := handler.mutableState.AddContinueAsNewEvent(
handler.workflowTaskCompletedID,
handler.workflowTaskCompletedID,
attr.GetParentWorkflowNamespace(),
continueAsNewAttributes,
)
if err != nil {
return err
}
handler.continueAsNewBuilder = newStateBuilder
return nil
}
func (handler *workflowTaskHandlerImpl) validateCommandAttr(
validationFn commandAttrValidationFn,
failedCause enumspb.WorkflowTaskFailedCause,
) error {
if err := validationFn(); err != nil {
if _, ok := err.(*serviceerror.InvalidArgument); ok {
return handler.handlerFailCommand(failedCause, err.Error())
}
return err
}
return nil
}
func (handler *workflowTaskHandlerImpl) handlerFailCommand(
failedCause enumspb.WorkflowTaskFailedCause,
failMessage string,
) error {
handler.failWorkflowTaskInfo = &failWorkflowTaskInfo{
cause: failedCause,
message: failMessage,
}
handler.stopProcessing = true
return nil
}
| 1 | 9,936 | session is a new term. Should we call more like currentCommandBatch or something else? | temporalio-temporal | go |
@@ -514,7 +514,7 @@ func (s *namespaceHandlerGlobalNamespaceEnabledNotMasterClusterSuite) TestUpdate
_, err := s.MetadataManager.CreateNamespace(&persistence.CreateNamespaceRequest{
Namespace: &persistenceblobs.NamespaceDetail{
Info: &persistenceblobs.NamespaceInfo{
- Id: uuid.NewRandom(),
+ Id: uuid.New(),
Name: namespace,
Status: namespacepb.NamespaceStatus_Registered,
Description: description, | 1 | // The MIT License
//
// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved.
//
// Copyright (c) 2020 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
package namespace
import (
"context"
"log"
"os"
"testing"
"github.com/gogo/protobuf/types"
"github.com/pborman/uuid"
"github.com/stretchr/testify/mock"
"github.com/stretchr/testify/suite"
namespacepb "go.temporal.io/temporal-proto/namespace"
replicationpb "go.temporal.io/temporal-proto/replication"
"go.temporal.io/temporal-proto/serviceerror"
"go.temporal.io/temporal-proto/workflowservice"
"github.com/temporalio/temporal/.gen/proto/persistenceblobs"
"github.com/temporalio/temporal/common"
"github.com/temporalio/temporal/common/archiver"
"github.com/temporalio/temporal/common/archiver/provider"
"github.com/temporalio/temporal/common/cluster"
"github.com/temporalio/temporal/common/log/loggerimpl"
"github.com/temporalio/temporal/common/mocks"
"github.com/temporalio/temporal/common/persistence"
persistencetests "github.com/temporalio/temporal/common/persistence/persistence-tests"
"github.com/temporalio/temporal/common/service/config"
dc "github.com/temporalio/temporal/common/service/dynamicconfig"
)
type (
namespaceHandlerGlobalNamespaceEnabledNotMasterClusterSuite struct {
suite.Suite
persistencetests.TestBase
minRetentionDays int
maxBadBinaryCount int
metadataMgr persistence.MetadataManager
mockProducer *mocks.KafkaProducer
mockNamespaceReplicator Replicator
archivalMetadata archiver.ArchivalMetadata
mockArchiverProvider *provider.MockArchiverProvider
handler *HandlerImpl
}
)
func TestNamespaceHandlerGlobalNamespaceEnabledNotMasterClusterSuite(t *testing.T) {
s := new(namespaceHandlerGlobalNamespaceEnabledNotMasterClusterSuite)
suite.Run(t, s)
}
func (s *namespaceHandlerGlobalNamespaceEnabledNotMasterClusterSuite) SetupSuite() {
if testing.Verbose() {
log.SetOutput(os.Stdout)
}
s.TestBase = persistencetests.NewTestBaseWithCassandra(&persistencetests.TestBaseOptions{
ClusterMetadata: cluster.GetTestClusterMetadata(true, false),
})
s.TestBase.Setup()
}
func (s *namespaceHandlerGlobalNamespaceEnabledNotMasterClusterSuite) TearDownSuite() {
s.TestBase.TearDownWorkflowStore()
}
func (s *namespaceHandlerGlobalNamespaceEnabledNotMasterClusterSuite) SetupTest() {
logger := loggerimpl.NewNopLogger()
dcCollection := dc.NewCollection(dc.NewNopClient(), logger)
s.minRetentionDays = 1
s.maxBadBinaryCount = 10
s.metadataMgr = s.TestBase.MetadataManager
s.mockProducer = &mocks.KafkaProducer{}
s.mockNamespaceReplicator = NewNamespaceReplicator(s.mockProducer, logger)
s.archivalMetadata = archiver.NewArchivalMetadata(
dcCollection,
"",
false,
"",
false,
&config.ArchivalNamespaceDefaults{},
)
s.mockArchiverProvider = &provider.MockArchiverProvider{}
s.handler = NewHandler(
s.minRetentionDays,
dc.GetIntPropertyFilteredByNamespace(s.maxBadBinaryCount),
logger,
s.metadataMgr,
s.ClusterMetadata,
s.mockNamespaceReplicator,
s.archivalMetadata,
s.mockArchiverProvider,
)
}
func (s *namespaceHandlerGlobalNamespaceEnabledNotMasterClusterSuite) TearDownTest() {
s.mockProducer.AssertExpectations(s.T())
s.mockArchiverProvider.AssertExpectations(s.T())
}
func (s *namespaceHandlerGlobalNamespaceEnabledNotMasterClusterSuite) TestRegisterGetNamespace_LocalNamespace_AllDefault() {
namespace := s.getRandomNamespace()
isGlobalNamespace := false
var clusters []*replicationpb.ClusterReplicationConfiguration
for _, name := range persistence.GetOrUseDefaultClusters(s.ClusterMetadata.GetCurrentClusterName(), nil) {
clusters = append(clusters, &replicationpb.ClusterReplicationConfiguration{
ClusterName: name,
})
}
retention := int32(1)
registerResp, err := s.handler.RegisterNamespace(context.Background(), &workflowservice.RegisterNamespaceRequest{
Name: namespace,
IsGlobalNamespace: isGlobalNamespace,
WorkflowExecutionRetentionPeriodInDays: retention,
})
s.NoError(err)
s.Nil(registerResp)
resp, err := s.handler.DescribeNamespace(context.Background(), &workflowservice.DescribeNamespaceRequest{
Name: namespace,
})
s.NoError(err)
s.NotEmpty(resp.NamespaceInfo.GetId())
resp.NamespaceInfo.Id = ""
s.Equal(&namespacepb.NamespaceInfo{
Name: namespace,
Status: namespacepb.NamespaceStatus_Registered,
Description: "",
OwnerEmail: "",
Data: map[string]string{},
Id: "",
}, resp.NamespaceInfo)
s.Equal(&namespacepb.NamespaceConfiguration{
WorkflowExecutionRetentionPeriodInDays: retention,
EmitMetric: &types.BoolValue{Value: false},
HistoryArchivalStatus: namespacepb.ArchivalStatus_Disabled,
HistoryArchivalURI: "",
VisibilityArchivalStatus: namespacepb.ArchivalStatus_Disabled,
VisibilityArchivalURI: "",
BadBinaries: &namespacepb.BadBinaries{Binaries: map[string]*namespacepb.BadBinaryInfo{}},
}, resp.Configuration)
s.Equal(&replicationpb.NamespaceReplicationConfiguration{
ActiveClusterName: s.ClusterMetadata.GetCurrentClusterName(),
Clusters: clusters,
}, resp.ReplicationConfiguration)
s.Equal(common.EmptyVersion, resp.GetFailoverVersion())
s.Equal(isGlobalNamespace, resp.GetIsGlobalNamespace())
}
func (s *namespaceHandlerGlobalNamespaceEnabledNotMasterClusterSuite) TestRegisterGetNamespace_LocalNamespace_NoDefault() {
namespace := s.getRandomNamespace()
description := "some random description"
email := "some random email"
retention := int32(7)
emitMetric := true
activeClusterName := cluster.TestCurrentClusterName
clusters := []*replicationpb.ClusterReplicationConfiguration{
&replicationpb.ClusterReplicationConfiguration{
ClusterName: activeClusterName,
},
}
data := map[string]string{"some random key": "some random value"}
isGlobalNamespace := false
var expectedClusters []*replicationpb.ClusterReplicationConfiguration
for _, name := range persistence.GetOrUseDefaultClusters(s.ClusterMetadata.GetCurrentClusterName(), nil) {
expectedClusters = append(expectedClusters, &replicationpb.ClusterReplicationConfiguration{
ClusterName: name,
})
}
registerResp, err := s.handler.RegisterNamespace(context.Background(), &workflowservice.RegisterNamespaceRequest{
Name: namespace,
Description: description,
OwnerEmail: email,
WorkflowExecutionRetentionPeriodInDays: retention,
EmitMetric: emitMetric,
Clusters: clusters,
ActiveClusterName: activeClusterName,
Data: data,
IsGlobalNamespace: isGlobalNamespace,
})
s.NoError(err)
s.Nil(registerResp)
resp, err := s.handler.DescribeNamespace(context.Background(), &workflowservice.DescribeNamespaceRequest{
Name: namespace,
})
s.NoError(err)
s.NotEmpty(resp.NamespaceInfo.GetId())
resp.NamespaceInfo.Id = ""
s.Equal(&namespacepb.NamespaceInfo{
Name: namespace,
Status: namespacepb.NamespaceStatus_Registered,
Description: description,
OwnerEmail: email,
Data: data,
Id: "",
}, resp.NamespaceInfo)
s.Equal(&namespacepb.NamespaceConfiguration{
WorkflowExecutionRetentionPeriodInDays: retention,
EmitMetric: &types.BoolValue{Value: emitMetric},
HistoryArchivalStatus: namespacepb.ArchivalStatus_Disabled,
HistoryArchivalURI: "",
VisibilityArchivalStatus: namespacepb.ArchivalStatus_Disabled,
VisibilityArchivalURI: "",
BadBinaries: &namespacepb.BadBinaries{Binaries: map[string]*namespacepb.BadBinaryInfo{}},
}, resp.Configuration)
s.Equal(&replicationpb.NamespaceReplicationConfiguration{
ActiveClusterName: s.ClusterMetadata.GetCurrentClusterName(),
Clusters: expectedClusters,
}, resp.ReplicationConfiguration)
s.Equal(common.EmptyVersion, resp.GetFailoverVersion())
s.Equal(isGlobalNamespace, resp.GetIsGlobalNamespace())
}
func (s *namespaceHandlerGlobalNamespaceEnabledNotMasterClusterSuite) TestUpdateGetNamespace_LocalNamespace_NoAttrSet() {
namespace := s.getRandomNamespace()
description := "some random description"
email := "some random email"
retention := int32(7)
emitMetric := true
data := map[string]string{"some random key": "some random value"}
var clusters []*replicationpb.ClusterReplicationConfiguration
for _, name := range persistence.GetOrUseDefaultClusters(s.ClusterMetadata.GetCurrentClusterName(), nil) {
clusters = append(clusters, &replicationpb.ClusterReplicationConfiguration{
ClusterName: name,
})
}
isGlobalNamespace := false
registerResp, err := s.handler.RegisterNamespace(context.Background(), &workflowservice.RegisterNamespaceRequest{
Name: namespace,
Description: description,
OwnerEmail: email,
WorkflowExecutionRetentionPeriodInDays: retention,
EmitMetric: emitMetric,
Clusters: clusters,
ActiveClusterName: s.ClusterMetadata.GetCurrentClusterName(),
Data: data,
IsGlobalNamespace: isGlobalNamespace,
})
s.NoError(err)
s.Nil(registerResp)
fnTest := func(info *namespacepb.NamespaceInfo, config *namespacepb.NamespaceConfiguration,
replicationConfig *replicationpb.NamespaceReplicationConfiguration, isGlobalNamespace bool, failoverVersion int64) {
s.NotEmpty(info.GetId())
info.Id = ""
s.Equal(&namespacepb.NamespaceInfo{
Name: namespace,
Status: namespacepb.NamespaceStatus_Registered,
Description: description,
OwnerEmail: email,
Data: data,
Id: "",
}, info)
s.Equal(&namespacepb.NamespaceConfiguration{
WorkflowExecutionRetentionPeriodInDays: retention,
EmitMetric: &types.BoolValue{Value: emitMetric},
HistoryArchivalStatus: namespacepb.ArchivalStatus_Disabled,
HistoryArchivalURI: "",
VisibilityArchivalStatus: namespacepb.ArchivalStatus_Disabled,
VisibilityArchivalURI: "",
BadBinaries: &namespacepb.BadBinaries{Binaries: map[string]*namespacepb.BadBinaryInfo{}},
}, config)
s.Equal(&replicationpb.NamespaceReplicationConfiguration{
ActiveClusterName: s.ClusterMetadata.GetCurrentClusterName(),
Clusters: clusters,
}, replicationConfig)
s.Equal(common.EmptyVersion, failoverVersion)
s.Equal(isGlobalNamespace, isGlobalNamespace)
}
updateResp, err := s.handler.UpdateNamespace(context.Background(), &workflowservice.UpdateNamespaceRequest{
Name: namespace,
})
s.NoError(err)
fnTest(
updateResp.NamespaceInfo,
updateResp.Configuration,
updateResp.ReplicationConfiguration,
updateResp.GetIsGlobalNamespace(),
updateResp.GetFailoverVersion(),
)
getResp, err := s.handler.DescribeNamespace(context.Background(), &workflowservice.DescribeNamespaceRequest{
Name: namespace,
})
s.NoError(err)
fnTest(
getResp.NamespaceInfo,
getResp.Configuration,
getResp.ReplicationConfiguration,
getResp.GetIsGlobalNamespace(),
getResp.GetFailoverVersion(),
)
}
func (s *namespaceHandlerGlobalNamespaceEnabledNotMasterClusterSuite) TestUpdateGetNamespace_LocalNamespace_AllAttrSet() {
namespace := s.getRandomNamespace()
isGlobalNamespace := false
registerResp, err := s.handler.RegisterNamespace(context.Background(), &workflowservice.RegisterNamespaceRequest{
Name: namespace,
IsGlobalNamespace: isGlobalNamespace,
WorkflowExecutionRetentionPeriodInDays: 1,
})
s.NoError(err)
s.Nil(registerResp)
description := "some random description"
email := "some random email"
retention := int32(7)
emitMetric := true
data := map[string]string{"some random key": "some random value"}
var clusters []*replicationpb.ClusterReplicationConfiguration
for _, name := range persistence.GetOrUseDefaultClusters(s.ClusterMetadata.GetCurrentClusterName(), nil) {
clusters = append(clusters, &replicationpb.ClusterReplicationConfiguration{
ClusterName: name,
})
}
fnTest := func(info *namespacepb.NamespaceInfo, config *namespacepb.NamespaceConfiguration,
replicationConfig *replicationpb.NamespaceReplicationConfiguration, isGlobalNamespace bool, failoverVersion int64) {
s.NotEmpty(info.GetId())
info.Id = ""
s.Equal(&namespacepb.NamespaceInfo{
Name: namespace,
Status: namespacepb.NamespaceStatus_Registered,
Description: description,
OwnerEmail: email,
Data: data,
Id: "",
}, info)
s.Equal(&namespacepb.NamespaceConfiguration{
WorkflowExecutionRetentionPeriodInDays: retention,
EmitMetric: &types.BoolValue{Value: emitMetric},
HistoryArchivalStatus: namespacepb.ArchivalStatus_Disabled,
HistoryArchivalURI: "",
VisibilityArchivalStatus: namespacepb.ArchivalStatus_Disabled,
VisibilityArchivalURI: "",
BadBinaries: &namespacepb.BadBinaries{Binaries: map[string]*namespacepb.BadBinaryInfo{}},
}, config)
s.Equal(&replicationpb.NamespaceReplicationConfiguration{
ActiveClusterName: s.ClusterMetadata.GetCurrentClusterName(),
Clusters: clusters,
}, replicationConfig)
s.Equal(common.EmptyVersion, failoverVersion)
s.Equal(isGlobalNamespace, isGlobalNamespace)
}
updateResp, err := s.handler.UpdateNamespace(context.Background(), &workflowservice.UpdateNamespaceRequest{
Name: namespace,
UpdatedInfo: &namespacepb.UpdateNamespaceInfo{
Description: description,
OwnerEmail: email,
Data: data,
},
Configuration: &namespacepb.NamespaceConfiguration{
WorkflowExecutionRetentionPeriodInDays: retention,
EmitMetric: &types.BoolValue{Value: emitMetric},
HistoryArchivalStatus: namespacepb.ArchivalStatus_Disabled,
HistoryArchivalURI: "",
VisibilityArchivalStatus: namespacepb.ArchivalStatus_Disabled,
VisibilityArchivalURI: "",
BadBinaries: &namespacepb.BadBinaries{Binaries: map[string]*namespacepb.BadBinaryInfo{}},
},
ReplicationConfiguration: &replicationpb.NamespaceReplicationConfiguration{
ActiveClusterName: s.ClusterMetadata.GetCurrentClusterName(),
Clusters: clusters,
},
})
s.NoError(err)
fnTest(
updateResp.NamespaceInfo,
updateResp.Configuration,
updateResp.ReplicationConfiguration,
updateResp.GetIsGlobalNamespace(),
updateResp.GetFailoverVersion(),
)
getResp, err := s.handler.DescribeNamespace(context.Background(), &workflowservice.DescribeNamespaceRequest{
Name: namespace,
})
s.NoError(err)
fnTest(
getResp.NamespaceInfo,
getResp.Configuration,
getResp.ReplicationConfiguration,
getResp.GetIsGlobalNamespace(),
getResp.GetFailoverVersion(),
)
}
func (s *namespaceHandlerGlobalNamespaceEnabledNotMasterClusterSuite) TestRegisterGetNamespace_GlobalNamespace_AllDefault() {
namespace := s.getRandomNamespace()
isGlobalNamespace := true
var clusters []*replicationpb.ClusterReplicationConfiguration
for _, name := range persistence.GetOrUseDefaultClusters(s.ClusterMetadata.GetCurrentClusterName(), nil) {
clusters = append(clusters, &replicationpb.ClusterReplicationConfiguration{
ClusterName: name,
})
}
s.Equal(1, len(clusters))
registerResp, err := s.handler.RegisterNamespace(context.Background(), &workflowservice.RegisterNamespaceRequest{
Name: namespace,
IsGlobalNamespace: isGlobalNamespace,
})
s.Error(err)
s.IsType(&serviceerror.InvalidArgument{}, err)
s.Nil(registerResp)
resp, err := s.handler.DescribeNamespace(context.Background(), &workflowservice.DescribeNamespaceRequest{
Name: namespace,
})
s.Error(err)
s.IsType(&serviceerror.NotFound{}, err)
s.Nil(resp)
}
func (s *namespaceHandlerGlobalNamespaceEnabledNotMasterClusterSuite) TestRegisterGetNamespace_GlobalNamespace_NoDefault() {
namespace := s.getRandomNamespace()
description := "some random description"
email := "some random email"
retention := int32(7)
emitMetric := true
activeClusterName := ""
clusters := []*replicationpb.ClusterReplicationConfiguration{}
for clusterName := range s.ClusterMetadata.GetAllClusterInfo() {
if clusterName != s.ClusterMetadata.GetCurrentClusterName() {
activeClusterName = clusterName
}
clusters = append(clusters, &replicationpb.ClusterReplicationConfiguration{
ClusterName: clusterName,
})
}
s.True(len(activeClusterName) > 0)
s.True(len(clusters) > 1)
data := map[string]string{"some random key": "some random value"}
isGlobalNamespace := true
registerResp, err := s.handler.RegisterNamespace(context.Background(), &workflowservice.RegisterNamespaceRequest{
Name: namespace,
Description: description,
OwnerEmail: email,
WorkflowExecutionRetentionPeriodInDays: retention,
EmitMetric: emitMetric,
Clusters: clusters,
ActiveClusterName: activeClusterName,
Data: data,
IsGlobalNamespace: isGlobalNamespace,
})
s.Error(err)
s.IsType(&serviceerror.InvalidArgument{}, err)
s.Nil(registerResp)
resp, err := s.handler.DescribeNamespace(context.Background(), &workflowservice.DescribeNamespaceRequest{
Name: namespace,
})
s.Error(err)
s.IsType(&serviceerror.NotFound{}, err)
s.Nil(resp)
}
func (s *namespaceHandlerGlobalNamespaceEnabledNotMasterClusterSuite) TestUpdateGetNamespace_GlobalNamespace_NoAttrSet() {
namespace := s.getRandomNamespace()
description := "some random description"
email := "some random email"
retention := int32(7)
emitMetric := true
activeClusterName := ""
clusters := []string{}
for clusterName := range s.ClusterMetadata.GetAllClusterInfo() {
if clusterName != s.ClusterMetadata.GetCurrentClusterName() {
activeClusterName = clusterName
}
clusters = append(clusters, clusterName)
}
s.True(len(activeClusterName) > 0)
s.True(len(clusters) > 1)
data := map[string]string{"some random key": "some random value"}
isGlobalNamespace := true
_, err := s.MetadataManager.CreateNamespace(&persistence.CreateNamespaceRequest{
Namespace: &persistenceblobs.NamespaceDetail{
Info: &persistenceblobs.NamespaceInfo{
Id: uuid.NewRandom(),
Name: namespace,
Status: namespacepb.NamespaceStatus_Registered,
Description: description,
Owner: email,
Data: data,
},
Config: &persistenceblobs.NamespaceConfig{
RetentionDays: retention,
EmitMetric: emitMetric,
HistoryArchivalStatus: namespacepb.ArchivalStatus_Disabled,
HistoryArchivalURI: "",
VisibilityArchivalStatus: namespacepb.ArchivalStatus_Disabled,
VisibilityArchivalURI: "",
},
ReplicationConfig: &persistenceblobs.NamespaceReplicationConfig{
ActiveClusterName: activeClusterName,
Clusters: clusters,
},
ConfigVersion: 0,
FailoverVersion: s.ClusterMetadata.GetNextFailoverVersion(activeClusterName, 0),
},
IsGlobalNamespace: isGlobalNamespace,
})
s.NoError(err)
resp, err := s.handler.UpdateNamespace(context.Background(), &workflowservice.UpdateNamespaceRequest{
Name: namespace,
})
s.Error(err)
s.IsType(&serviceerror.InvalidArgument{}, err)
s.Nil(resp)
}
func (s *namespaceHandlerGlobalNamespaceEnabledNotMasterClusterSuite) TestUpdateGetNamespace_GlobalNamespace_AllAttrSet() {
namespace := s.getRandomNamespace()
description := "some random description"
email := "some random email"
retention := int32(7)
emitMetric := true
activeClusterName := ""
clusters := []*replicationpb.ClusterReplicationConfiguration{}
clustersDB := []string{}
for clusterName := range s.ClusterMetadata.GetAllClusterInfo() {
if clusterName != s.ClusterMetadata.GetCurrentClusterName() {
activeClusterName = clusterName
}
clusters = append(clusters, &replicationpb.ClusterReplicationConfiguration{
ClusterName: clusterName,
})
clustersDB = append(clustersDB, clusterName)
}
s.True(len(activeClusterName) > 0)
s.True(len(clusters) > 1)
s.True(len(clustersDB) > 1)
data := map[string]string{"some random key": "some random value"}
isGlobalNamespace := true
_, err := s.MetadataManager.CreateNamespace(&persistence.CreateNamespaceRequest{
Namespace: &persistenceblobs.NamespaceDetail{
Info: &persistenceblobs.NamespaceInfo{
Id: uuid.NewRandom(),
Name: namespace,
Status: namespacepb.NamespaceStatus_Registered,
Description: "",
Owner: "",
Data: map[string]string{},
},
Config: &persistenceblobs.NamespaceConfig{
RetentionDays: 0,
EmitMetric: false,
HistoryArchivalStatus: namespacepb.ArchivalStatus_Disabled,
HistoryArchivalURI: "",
VisibilityArchivalStatus: namespacepb.ArchivalStatus_Disabled,
VisibilityArchivalURI: "",
},
ReplicationConfig: &persistenceblobs.NamespaceReplicationConfig{
ActiveClusterName: activeClusterName,
Clusters: clustersDB,
},
ConfigVersion: 0,
FailoverVersion: s.ClusterMetadata.GetNextFailoverVersion(activeClusterName, 0),
},
IsGlobalNamespace: isGlobalNamespace,
})
s.NoError(err)
updateResp, err := s.handler.UpdateNamespace(context.Background(), &workflowservice.UpdateNamespaceRequest{
Name: namespace,
UpdatedInfo: &namespacepb.UpdateNamespaceInfo{
Description: description,
OwnerEmail: email,
Data: data,
},
Configuration: &namespacepb.NamespaceConfiguration{
WorkflowExecutionRetentionPeriodInDays: retention,
EmitMetric: &types.BoolValue{Value: emitMetric},
HistoryArchivalStatus: namespacepb.ArchivalStatus_Disabled,
HistoryArchivalURI: "",
VisibilityArchivalStatus: namespacepb.ArchivalStatus_Disabled,
VisibilityArchivalURI: "",
BadBinaries: &namespacepb.BadBinaries{Binaries: map[string]*namespacepb.BadBinaryInfo{}},
},
ReplicationConfiguration: &replicationpb.NamespaceReplicationConfiguration{
ActiveClusterName: "",
Clusters: clusters,
},
})
s.Error(err)
s.IsType(&serviceerror.InvalidArgument{}, err)
s.Nil(updateResp)
}
func (s *namespaceHandlerGlobalNamespaceEnabledNotMasterClusterSuite) TestUpdateGetNamespace_GlobalNamespace_Failover() {
namespace := s.getRandomNamespace()
description := "some random description"
email := "some random email"
retention := int32(7)
emitMetric := true
prevActiveClusterName := ""
nextActiveClusterName := s.ClusterMetadata.GetCurrentClusterName()
clusters := []*replicationpb.ClusterReplicationConfiguration{}
clustersDB := []string{}
for clusterName := range s.ClusterMetadata.GetAllClusterInfo() {
if clusterName != s.ClusterMetadata.GetCurrentClusterName() {
prevActiveClusterName = clusterName
}
clusters = append(clusters, &replicationpb.ClusterReplicationConfiguration{
ClusterName: clusterName,
})
clustersDB = append(clustersDB, clusterName)
}
s.True(len(prevActiveClusterName) > 0)
s.True(len(clusters) > 1)
s.True(len(clustersDB) > 1)
data := map[string]string{"some random key": "some random value"}
isGlobalNamespace := true
_, err := s.MetadataManager.CreateNamespace(&persistence.CreateNamespaceRequest{
Namespace: &persistenceblobs.NamespaceDetail{
Info: &persistenceblobs.NamespaceInfo{
Id: uuid.NewRandom(),
Name: namespace,
Status: namespacepb.NamespaceStatus_Registered,
Description: description,
Owner: email,
Data: data,
},
Config: &persistenceblobs.NamespaceConfig{
RetentionDays: retention,
EmitMetric: emitMetric,
HistoryArchivalStatus: namespacepb.ArchivalStatus_Disabled,
HistoryArchivalURI: "",
VisibilityArchivalStatus: namespacepb.ArchivalStatus_Disabled,
VisibilityArchivalURI: "",
},
ReplicationConfig: &persistenceblobs.NamespaceReplicationConfig{
ActiveClusterName: prevActiveClusterName,
Clusters: clustersDB,
},
ConfigVersion: 0,
FailoverVersion: s.ClusterMetadata.GetNextFailoverVersion(prevActiveClusterName, 0),
},
IsGlobalNamespace: isGlobalNamespace,
})
s.NoError(err)
fnTest := func(info *namespacepb.NamespaceInfo, config *namespacepb.NamespaceConfiguration,
replicationConfig *replicationpb.NamespaceReplicationConfiguration, isGlobalNamespace bool, failoverVersion int64) {
s.NotEmpty(info.GetId())
info.Id = ""
s.Equal(&namespacepb.NamespaceInfo{
Name: namespace,
Status: namespacepb.NamespaceStatus_Registered,
Description: description,
OwnerEmail: email,
Data: data,
Id: "",
}, info)
s.Equal(&namespacepb.NamespaceConfiguration{
WorkflowExecutionRetentionPeriodInDays: retention,
EmitMetric: &types.BoolValue{Value: emitMetric},
HistoryArchivalStatus: namespacepb.ArchivalStatus_Disabled,
HistoryArchivalURI: "",
VisibilityArchivalStatus: namespacepb.ArchivalStatus_Disabled,
VisibilityArchivalURI: "",
BadBinaries: &namespacepb.BadBinaries{Binaries: map[string]*namespacepb.BadBinaryInfo{}},
}, config)
s.Equal(&replicationpb.NamespaceReplicationConfiguration{
ActiveClusterName: nextActiveClusterName,
Clusters: clusters,
}, replicationConfig)
s.Equal(s.ClusterMetadata.GetNextFailoverVersion(
nextActiveClusterName,
s.ClusterMetadata.GetNextFailoverVersion(prevActiveClusterName, 0),
), failoverVersion)
s.Equal(isGlobalNamespace, isGlobalNamespace)
}
s.mockProducer.On("Publish", mock.Anything).Return(nil).Once()
updateResp, err := s.handler.UpdateNamespace(context.Background(), &workflowservice.UpdateNamespaceRequest{
Name: namespace,
ReplicationConfiguration: &replicationpb.NamespaceReplicationConfiguration{
ActiveClusterName: s.ClusterMetadata.GetCurrentClusterName(),
},
})
s.NoError(err)
fnTest(
updateResp.NamespaceInfo,
updateResp.Configuration,
updateResp.ReplicationConfiguration,
updateResp.GetIsGlobalNamespace(),
updateResp.GetFailoverVersion(),
)
getResp, err := s.handler.DescribeNamespace(context.Background(), &workflowservice.DescribeNamespaceRequest{
Name: namespace,
})
s.NoError(err)
fnTest(
getResp.NamespaceInfo,
getResp.Configuration,
getResp.ReplicationConfiguration,
getResp.GetIsGlobalNamespace(),
getResp.GetFailoverVersion(),
)
}
func (s *namespaceHandlerGlobalNamespaceEnabledNotMasterClusterSuite) getRandomNamespace() string {
return "namespace" + uuid.New()
}
| 1 | 9,609 | Will go fmt before squash. | temporalio-temporal | go |
@@ -148,6 +148,7 @@ func DefaultConfig() Config {
DockerGraphPath: "/var/lib/docker",
ReservedMemory: 0,
AvailableLoggingDrivers: []dockerclient.LoggingDriver{dockerclient.JsonFileDriver},
+ PrivilegedCapable: true,
}
}
| 1 | // Copyright 2014-2015 Amazon.com, Inc. or its affiliates. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License"). You may
// not use this file except in compliance with the License. A copy of the
// License is located at
//
// http://aws.amazon.com/apache2.0/
//
// or in the "license" file accompanying this file. This file is distributed
// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
// express or implied. See the License for the specific language governing
// permissions and limitations under the License.
package config
import (
"encoding/json"
"errors"
"fmt"
"io"
"io/ioutil"
"os"
"reflect"
"strconv"
"strings"
"github.com/aws/amazon-ecs-agent/agent/ec2"
"github.com/aws/amazon-ecs-agent/agent/engine/dockerclient"
"github.com/aws/amazon-ecs-agent/agent/logger"
"github.com/aws/amazon-ecs-agent/agent/utils"
)
var log = logger.ForModule("config")
const (
// http://www.iana.org/assignments/service-names-port-numbers/service-names-port-numbers.xhtml?search=docker
DOCKER_RESERVED_PORT = 2375
DOCKER_RESERVED_SSL_PORT = 2376
SSH_PORT = 22
AGENT_INTROSPECTION_PORT = 51678
DEFAULT_CLUSTER_NAME = "default"
)
// Merge merges two config files, preferring the ones on the left. Any nil or
// zero values present in the left that are not present in the right will be
// overridden
func (lhs *Config) Merge(rhs Config) *Config {
left := reflect.ValueOf(lhs).Elem()
right := reflect.ValueOf(&rhs).Elem()
for i := 0; i < left.NumField(); i++ {
leftField := left.Field(i)
if utils.ZeroOrNil(leftField.Interface()) {
leftField.Set(reflect.ValueOf(right.Field(i).Interface()))
}
}
return lhs //make it chainable
}
// complete returns true if all fields of the config are populated / nonzero
func (cfg *Config) complete() bool {
cfgElem := reflect.ValueOf(cfg).Elem()
for i := 0; i < cfgElem.NumField(); i++ {
if utils.ZeroOrNil(cfgElem.Field(i).Interface()) {
return false
}
}
return true
}
// checkMissingAndDeprecated checks all zero-valued fields for tags of the form
// missing:STRING and acts based on that string. Current options are: fatal,
// warn. Fatal will result in an error being returned, warn will result in a
// warning that the field is missing being logged.
func (cfg *Config) checkMissingAndDepreciated() error {
cfgElem := reflect.ValueOf(cfg).Elem()
cfgStructField := reflect.Indirect(reflect.ValueOf(cfg)).Type()
fatalFields := []string{}
for i := 0; i < cfgElem.NumField(); i++ {
cfgField := cfgElem.Field(i)
if utils.ZeroOrNil(cfgField.Interface()) {
missingTag := cfgStructField.Field(i).Tag.Get("missing")
if len(missingTag) == 0 {
continue
}
switch missingTag {
case "warn":
log.Warn("Configuration key not set", "key", cfgStructField.Field(i).Name)
case "fatal":
log.Crit("Configuration key not set", "key", cfgStructField.Field(i).Name)
fatalFields = append(fatalFields, cfgStructField.Field(i).Name)
default:
log.Warn("Unexpected `missing` tag value", "tag", missingTag)
}
} else {
// present
deprecatedTag := cfgStructField.Field(i).Tag.Get("deprecated")
if len(deprecatedTag) == 0 {
continue
}
log.Warn("Use of deprecated configuration key", "key", cfgStructField.Field(i).Name, "message", deprecatedTag)
}
}
if len(fatalFields) > 0 {
return errors.New("Missing required fields: " + strings.Join(fatalFields, ", "))
}
return nil
}
// trimWhitespace trims whitespace from all string config values with the
// `trim` tag
func (cfg *Config) trimWhitespace() {
cfgElem := reflect.ValueOf(cfg).Elem()
cfgStructField := reflect.Indirect(reflect.ValueOf(cfg)).Type()
for i := 0; i < cfgElem.NumField(); i++ {
cfgField := cfgElem.Field(i)
if !cfgField.CanInterface() {
continue
}
trimTag := cfgStructField.Field(i).Tag.Get("trim")
if len(trimTag) == 0 {
continue
}
if cfgField.Kind() != reflect.String {
log.Warn("Cannot trim non-string field", "type", cfgField.Kind().String(), "index", i)
continue
}
str := cfgField.Interface().(string)
cfgField.SetString(strings.TrimSpace(str))
}
}
func DefaultConfig() Config {
return Config{
DockerEndpoint: "unix:///var/run/docker.sock",
ReservedPorts: []uint16{SSH_PORT, DOCKER_RESERVED_PORT, DOCKER_RESERVED_SSL_PORT, AGENT_INTROSPECTION_PORT},
ReservedPortsUDP: []uint16{},
DataDir: "/data/",
DisableMetrics: false,
DockerGraphPath: "/var/lib/docker",
ReservedMemory: 0,
AvailableLoggingDrivers: []dockerclient.LoggingDriver{dockerclient.JsonFileDriver},
}
}
func FileConfig() Config {
config_file := utils.DefaultIfBlank(os.Getenv("ECS_AGENT_CONFIG_FILE_PATH"), "/etc/ecs_container_agent/config.json")
file, err := os.Open(config_file)
if err != nil {
return Config{}
}
data, err := ioutil.ReadAll(file)
if err != nil {
log.Error("Unable to read config file", "err", err)
return Config{}
}
if strings.TrimSpace(string(data)) == "" {
// empty file, not an error
return Config{}
}
config := Config{}
err = json.Unmarshal(data, &config)
if err != nil {
log.Error("Error reading config json data", "err", err)
}
// Handle any deprecated keys correctly here
if utils.ZeroOrNil(config.Cluster) && !utils.ZeroOrNil(config.ClusterArn) {
config.Cluster = config.ClusterArn
}
return config
}
// EnvironmentConfig reads the given configs from the environment and attempts
// to convert them to the given type
func EnvironmentConfig() Config {
endpoint := os.Getenv("ECS_BACKEND_HOST")
clusterRef := os.Getenv("ECS_CLUSTER")
awsRegion := os.Getenv("AWS_DEFAULT_REGION")
dockerEndpoint := os.Getenv("DOCKER_HOST")
engineAuthType := os.Getenv("ECS_ENGINE_AUTH_TYPE")
engineAuthData := os.Getenv("ECS_ENGINE_AUTH_DATA")
var checkpoint bool
dataDir := os.Getenv("ECS_DATADIR")
if dataDir != "" {
// if we have a directory to checkpoint to, default it to be on
checkpoint = utils.ParseBool(os.Getenv("ECS_CHECKPOINT"), true)
} else {
// if the directory is not set, default to checkpointing off for
// backwards compatibility
checkpoint = utils.ParseBool(os.Getenv("ECS_CHECKPOINT"), false)
}
// Format: json array, e.g. [1,2,3]
reservedPortEnv := os.Getenv("ECS_RESERVED_PORTS")
portDecoder := json.NewDecoder(strings.NewReader(reservedPortEnv))
var reservedPorts []uint16
err := portDecoder.Decode(&reservedPorts)
// EOF means the string was blank as opposed to UnexepctedEof which means an
// invalid parse
// Blank is not a warning; we have sane defaults
if err != io.EOF && err != nil {
log.Warn("Invalid format for \"ECS_RESERVED_PORTS\" environment variable; expected a JSON array like [1,2,3].", "err", err)
}
reservedPortUDPEnv := os.Getenv("ECS_RESERVED_PORTS_UDP")
portDecoderUDP := json.NewDecoder(strings.NewReader(reservedPortUDPEnv))
var reservedPortsUDP []uint16
err = portDecoderUDP.Decode(&reservedPortsUDP)
// EOF means the string was blank as opposed to UnexepctedEof which means an
// invalid parse
// Blank is not a warning; we have sane defaults
if err != io.EOF && err != nil {
log.Warn("Invalid format for \"ECS_RESERVED_PORTS_UDP\" environment variable; expected a JSON array like [1,2,3].", "err", err)
}
updateDownloadDir := os.Getenv("ECS_UPDATE_DOWNLOAD_DIR")
updatesEnabled := utils.ParseBool(os.Getenv("ECS_UPDATES_ENABLED"), false)
disableMetrics := utils.ParseBool(os.Getenv("ECS_DISABLE_METRICS"), false)
dockerGraphPath := os.Getenv("ECS_DOCKER_GRAPHPATH")
reservedMemoryEnv := os.Getenv("ECS_RESERVED_MEMORY")
var reservedMemory64 uint64
var reservedMemory uint16
if reservedMemoryEnv == "" {
reservedMemory = 0
} else {
reservedMemory64, err = strconv.ParseUint(reservedMemoryEnv, 10, 16)
if err != nil {
log.Warn("Invalid format for \"ECS_RESERVED_MEMORY\" environment variable; expected unsigned integer.", "err", err)
reservedMemory = 0
} else {
reservedMemory = uint16(reservedMemory64)
}
}
availableLoggingDriversEnv := os.Getenv("ECS_AVAILABLE_LOGGING_DRIVERS")
loggingDriverDecoder := json.NewDecoder(strings.NewReader(availableLoggingDriversEnv))
var availableLoggingDrivers []dockerclient.LoggingDriver
err = loggingDriverDecoder.Decode(&availableLoggingDrivers)
// EOF means the string was blank as opposed to UnexepctedEof which means an
// invalid parse
// Blank is not a warning; we have sane defaults
if err != io.EOF && err != nil {
log.Warn("Invalid format for \"ECS_AVAILABLE_LOGGING_DRIVERS\" environment variable; expected a JSON array like [\"json-file\",\"syslog\"].", "err", err)
}
privilegedDisabled := utils.ParseBool(os.Getenv("ECS_DISABLE_PRIVILEGED"), false)
seLinuxCapable := utils.ParseBool(os.Getenv("ECS_SELINUX_CAPABLE"), false)
appArmorCapable := utils.ParseBool(os.Getenv("ECS_APPARMOR_CAPABLE"), false)
return Config{
Cluster: clusterRef,
APIEndpoint: endpoint,
AWSRegion: awsRegion,
DockerEndpoint: dockerEndpoint,
ReservedPorts: reservedPorts,
ReservedPortsUDP: reservedPortsUDP,
DataDir: dataDir,
Checkpoint: checkpoint,
EngineAuthType: engineAuthType,
EngineAuthData: NewSensitiveRawMessage([]byte(engineAuthData)),
UpdatesEnabled: updatesEnabled,
UpdateDownloadDir: updateDownloadDir,
DisableMetrics: disableMetrics,
DockerGraphPath: dockerGraphPath,
ReservedMemory: reservedMemory,
AvailableLoggingDrivers: availableLoggingDrivers,
PrivilegedDisabled: privilegedDisabled,
SELinuxCapable: seLinuxCapable,
AppArmorCapable: appArmorCapable,
}
}
var ec2MetadataClient = ec2.DefaultClient
func EC2MetadataConfig() Config {
iid, err := ec2MetadataClient.InstanceIdentityDocument()
if err != nil {
log.Crit("Unable to communicate with EC2 Metadata service to infer region: " + err.Error())
return Config{}
}
return Config{AWSRegion: iid.Region}
}
// NewConfig returns a config struct created by merging environment variables,
// a config file, and EC2 Metadata info.
// The 'config' struct it returns can be used, even if an error is returned. An
// error is returned, however, if the config is incomplete in some way that is
// considered fatal.
func NewConfig() (config *Config, err error) {
ctmp := EnvironmentConfig() //Environment overrides all else
config = &ctmp
defer func() {
config.trimWhitespace()
err = config.validate()
config.Merge(DefaultConfig())
}()
if config.complete() {
// No need to do file / network IO
return config, nil
}
config.Merge(FileConfig())
if config.AWSRegion == "" {
// Get it from metadata only if we need to (network io)
config.Merge(EC2MetadataConfig())
}
return config, err
}
// validate performs validation over members of the Config struct
func (config *Config) validate() error {
err := config.checkMissingAndDepreciated()
if err != nil {
return err
}
var badDrivers []string
for _, driver := range config.AvailableLoggingDrivers {
_, ok := dockerclient.LoggingDriverMinimumVersion[driver]
if !ok {
badDrivers = append(badDrivers, string(driver))
}
}
if len(badDrivers) > 0 {
return errors.New("Invalid logging drivers: " + strings.Join(badDrivers, ", "))
}
return nil
}
// String returns a lossy string representation of the config suitable for human readable display.
// Consequently, it *should not* return any sensitive information.
func (config *Config) String() string {
return fmt.Sprintf("Cluster: %v, Region: %v, DataDir: %v, Checkpoint: %v, AuthType: %v, UpdatesEnabled: %v, DisableMetrics: %v, ReservedMem: %v", config.Cluster, config.AWSRegion, config.DataDir, config.Checkpoint, config.EngineAuthType, config.UpdatesEnabled, config.DisableMetrics, config.ReservedMemory)
}
| 1 | 13,631 | Setting this to `true` means that it will always get merged in. The way merging is done here is that if a value is its zero value (`false` for `bool`), the value is considered unchanged. In order for this to work, you'll need to change this to be a `*bool` type instead. | aws-amazon-ecs-agent | go |
@@ -101,7 +101,6 @@ func newDefaultBootstrapConfig() *BootstrapConfig {
// MiningConfig holds all configuration options related to mining.
type MiningConfig struct {
MinerAddress address.Address `json:"minerAddress"`
- BlockSignerAddress address.Address `json:"blockSignerAddress"`
AutoSealIntervalSeconds uint `json:"autoSealIntervalSeconds"`
StoragePrice *types.AttoFIL `json:"storagePrice"`
} | 1 | package config
import (
"encoding/json"
"fmt"
"io/ioutil"
"os"
"reflect"
"regexp"
"strings"
"gx/ipfs/QmVmDhyTTUcQXFD1rRQ64fGLMSAoaQvNH3hwuaCFAPq2hy/errors"
"github.com/filecoin-project/go-filecoin/address"
"github.com/filecoin-project/go-filecoin/types"
)
// Config is an in memory representation of the filecoin configuration file
type Config struct {
API *APIConfig `json:"api"`
Bootstrap *BootstrapConfig `json:"bootstrap"`
Datastore *DatastoreConfig `json:"datastore"`
Swarm *SwarmConfig `json:"swarm"`
Mining *MiningConfig `json:"mining"`
Wallet *WalletConfig `json:"wallet"`
Heartbeat *HeartbeatConfig `json:"heartbeat"`
}
// APIConfig holds all configuration options related to the api.
type APIConfig struct {
Address string `json:"address"`
AccessControlAllowOrigin []string `json:"accessControlAllowOrigin"`
AccessControlAllowCredentials bool `json:"accessControlAllowCredentials"`
AccessControlAllowMethods []string `json:"accessControlAllowMethods"`
}
func newDefaultAPIConfig() *APIConfig {
return &APIConfig{
Address: "/ip4/127.0.0.1/tcp/3453",
AccessControlAllowOrigin: []string{
"http://localhost:8080",
"https://localhost:8080",
"http://127.0.0.1:8080",
"https://127.0.0.1:8080",
},
AccessControlAllowMethods: []string{"GET", "POST", "PUT"},
}
}
// DatastoreConfig holds all the configuration options for the datastore.
// TODO: use the advanced datastore configuration from ipfs
type DatastoreConfig struct {
Type string `json:"type"`
Path string `json:"path"`
}
// Validators hold the list of validation functions for each configuration
// property. Validators must take a key and json string respectively as
// arguments, and must return either an error or nil depending on whether or not
// the given key and value are valid. Validators will only be run if a property
// being set matches the name given in this map.
var Validators = map[string]func(string, string) error{
"heartbeat.nickname": validateLettersOnly,
}
func newDefaultDatastoreConfig() *DatastoreConfig {
return &DatastoreConfig{
Type: "badgerds",
Path: "badger",
}
}
// SwarmConfig holds all configuration options related to the swarm.
type SwarmConfig struct {
Address string `json:"address"`
PublicRelayAddress string `json:"public_relay_address,omitempty"`
}
func newDefaultSwarmConfig() *SwarmConfig {
return &SwarmConfig{
Address: "/ip4/0.0.0.0/tcp/6000",
}
}
// BootstrapConfig holds all configuration options related to bootstrap nodes
type BootstrapConfig struct {
Addresses []string `json:"addresses"`
MinPeerThreshold int `json:"minPeerThreshold"`
Period string `json:"period,omitempty"`
}
// TODO: provide bootstrap node addresses
func newDefaultBootstrapConfig() *BootstrapConfig {
return &BootstrapConfig{
Addresses: []string{},
MinPeerThreshold: 0, // TODO: we don't actually have an bootstrap peers yet.
Period: "1m",
}
}
// MiningConfig holds all configuration options related to mining.
type MiningConfig struct {
MinerAddress address.Address `json:"minerAddress"`
BlockSignerAddress address.Address `json:"blockSignerAddress"`
AutoSealIntervalSeconds uint `json:"autoSealIntervalSeconds"`
StoragePrice *types.AttoFIL `json:"storagePrice"`
}
func newDefaultMiningConfig() *MiningConfig {
return &MiningConfig{
MinerAddress: address.Address{},
AutoSealIntervalSeconds: 120,
StoragePrice: types.NewZeroAttoFIL(),
}
}
// WalletConfig holds all configuration options related to the wallet.
type WalletConfig struct {
DefaultAddress address.Address `json:"defaultAddress,omitempty"`
}
func newDefaultWalletConfig() *WalletConfig {
return &WalletConfig{
DefaultAddress: address.Address{},
}
}
// HeartbeatConfig holds all configuration options related to node heartbeat.
type HeartbeatConfig struct {
// BeatTarget represents the address the filecoin node will send heartbeats to.
BeatTarget string `json:"beatTarget"`
// BeatPeriod represents how frequently heartbeats are sent.
// Golang duration units are accepted.
BeatPeriod string `json:"beatPeriod"`
// ReconnectPeriod represents how long the node waits before attempting to reconnect.
// Golang duration units are accepted.
ReconnectPeriod string `json:"reconnectPeriod"`
// Nickname represents the nickname of the filecoin node,
Nickname string `json:"nickname"`
}
func newDefaultHeartbeatConfig() *HeartbeatConfig {
return &HeartbeatConfig{
BeatTarget: "",
BeatPeriod: "3s",
ReconnectPeriod: "10s",
Nickname: "",
}
}
// NewDefaultConfig returns a config object with all the fields filled out to
// their default values
func NewDefaultConfig() *Config {
return &Config{
API: newDefaultAPIConfig(),
Bootstrap: newDefaultBootstrapConfig(),
Datastore: newDefaultDatastoreConfig(),
Swarm: newDefaultSwarmConfig(),
Mining: newDefaultMiningConfig(),
Wallet: newDefaultWalletConfig(),
Heartbeat: newDefaultHeartbeatConfig(),
}
}
// WriteFile writes the config to the given filepath.
func (cfg *Config) WriteFile(file string) error {
f, err := os.OpenFile(file, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0644)
if err != nil {
return err
}
defer f.Close() // nolint: errcheck
configString, err := json.MarshalIndent(*cfg, "", "\t")
if err != nil {
return err
}
_, err = fmt.Fprint(f, string(configString))
return err
}
// ReadFile reads a config file from disk.
func ReadFile(file string) (*Config, error) {
f, err := os.Open(file)
if err != nil {
return nil, err
}
cfg := NewDefaultConfig()
rawConfig, err := ioutil.ReadAll(f)
if err != nil {
return nil, err
}
if len(rawConfig) == 0 {
return cfg, nil
}
err = json.Unmarshal(rawConfig, &cfg)
if err != nil {
return nil, err
}
return cfg, nil
}
// Set sets the config sub-struct referenced by `key`, e.g. 'api.address'
// or 'datastore' to the json key value pair encoded in jsonVal.
func (cfg *Config) Set(dottedKey string, jsonString string) error {
if !json.Valid([]byte(jsonString)) {
jsonBytes, _ := json.Marshal(jsonString)
jsonString = string(jsonBytes)
}
if err := validate(dottedKey, jsonString); err != nil {
return err
}
keys := strings.Split(dottedKey, ".")
for i := len(keys) - 1; i >= 0; i-- {
jsonString = fmt.Sprintf(`{ "%s": %s }`, keys[i], jsonString)
}
decoder := json.NewDecoder(strings.NewReader(jsonString))
decoder.DisallowUnknownFields()
return decoder.Decode(&cfg)
}
// Get gets the config sub-struct referenced by `key`, e.g. 'api.address'
func (cfg *Config) Get(key string) (interface{}, error) {
v := reflect.Indirect(reflect.ValueOf(cfg))
keyTags := strings.Split(key, ".")
OUTER:
for j, keyTag := range keyTags {
if v.Type().Kind() == reflect.Struct {
for i := 0; i < v.NumField(); i++ {
jsonTag := strings.Split(
v.Type().Field(i).Tag.Get("json"),
",")[0]
if jsonTag == keyTag {
v = v.Field(i)
if j == len(keyTags)-1 {
return v.Interface(), nil
}
v = reflect.Indirect(v) // only attempt one dereference
continue OUTER
}
}
}
return nil, fmt.Errorf("key: %s invalid for config", key)
}
// Cannot get here as len(strings.Split(s, sep)) >= 1 with non-empty sep
return nil, fmt.Errorf("empty key is invalid")
}
// validate runs validations on a given key and json string. validate uses the
// validators map defined at the top of this file to determine which validations
// to use for each key.
func validate(dottedKey string, jsonString string) error {
var obj interface{}
if err := json.Unmarshal([]byte(jsonString), &obj); err != nil {
return err
}
// recursively validate sub-keys by partially unmarshalling
if reflect.ValueOf(obj).Kind() == reflect.Map {
var obj map[string]json.RawMessage
if err := json.Unmarshal([]byte(jsonString), &obj); err != nil {
return err
}
for key := range obj {
if err := validate(dottedKey+"."+key, string(obj[key])); err != nil {
return err
}
}
return nil
}
if validationFunc, present := Validators[dottedKey]; present {
return validationFunc(dottedKey, jsonString)
}
return nil
}
// validateLettersOnly validates that a given value contains only letters. If it
// does not, an error is returned using the given key for the message.
func validateLettersOnly(key string, value string) error {
if match, _ := regexp.MatchString("^\"[a-zA-Z]+\"$", value); !match {
return errors.Errorf(`"%s" must only contain letters`, key)
}
return nil
}
| 1 | 17,328 | It was decided that blockSignerAddress is not only redundant (use the miner owner public key instead which is already stored), but does not belong in config. | filecoin-project-venus | go |
@@ -70,6 +70,7 @@ setup(
'tenacity>=5.1.1',
'tqdm>=4.32',
'requests_futures==1.0.0',
+ 'jsonschema==3.*',
],
extras_require={
'pyarrow': [ | 1 | import os
import sys
from pathlib import Path
from setuptools import find_packages, setup
from setuptools.command.install import install
VERSION = Path(Path(__file__).parent, "quilt3", "VERSION").read_text().strip()
def readme():
readme_short = """
Quilt manages data like code (with packages, repositories, browsing and
revision history) so that teams can experiment faster in machine learning,
biotech, and other data-driven domains.
The `quilt3` PyPi package allows you to build, push, and install data packages.
Visit the `documentation quickstart <https://docs.quiltdata.com/quickstart>`_
to learn more.
"""
return readme_short
class VerifyVersionCommand(install):
"""Custom command to verify that the git tag matches our version"""
description = 'verify that the git tag matches our version'
def run(self):
tag = os.getenv('CIRCLE_TAG')
if tag != VERSION:
info = "Git tag: {0} does not match the version of this app: {1}".format(
tag, VERSION
)
sys.exit(info)
setup(
name="quilt3",
version=VERSION,
packages=find_packages(),
description='Quilt: where data comes together',
long_description=readme(),
python_requires='>=3.6',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Operating System :: OS Independent',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
],
author='quiltdata',
author_email='[email protected]',
license='Apache-2.0',
url='https://github.com/quiltdata/quilt',
keywords='',
install_requires=[
'appdirs>=1.4.0',
'aws-requests-auth>=0.4.2',
'boto3>=1.10.0',
'dnspython>=1.16.0',
'flask',
'flask_cors',
'flask_json',
'jsonlines==1.2.0',
'packaging>=16.8',
'PyYAML>=5.1',
'requests>=2.12.4',
'tenacity>=5.1.1',
'tqdm>=4.32',
'requests_futures==1.0.0',
],
extras_require={
'pyarrow': [
'numpy>=1.14.0', # required by pandas, but missing from its dependencies.
'pandas>=0.19.2',
'pyarrow>=0.14.1', # as of 7/5/19: linux/circleci bugs on 0.14.0
],
'tests': [
'numpy>=1.14.0', # required by pandas, but missing from its dependencies.
'pandas>=0.19.2',
'pyarrow>=0.14.1', # as of 7/5/19: linux/circleci bugs on 0.14.0
'pytest<5.1.0', # TODO: Fix pytest.ensuretemp in conftest.py
'pytest-cov',
'pytest-env',
'responses',
'tox',
'detox',
'tox-pytest-summary',
'git-pylint-commit-hook',
],
},
include_package_data=True,
entry_points={
'console_scripts': ['quilt3=quilt3.main:main'],
},
cmdclass={
'verify': VerifyVersionCommand,
}
)
| 1 | 19,614 | are we not asking for trouble here by not pinning this? or does 3.* imply all of the draft versions we'd try to validate? | quiltdata-quilt | py |
@@ -39,10 +39,8 @@ class TemporalMemoryCompatibilityTest(unittest.TestCase):
results1 = createAndRunNetwork(TPRegion,
"bottomUpOut",
checkpointMidway=False,
- temporalImp="tm_py")
+ temporalImp="tm_cpp")
- # temporalImp="tm_py" here is a temporary placeholder value until C++ TM is
- # finished, at which point it should be changed to "cpp"
results2 = createAndRunNetwork(TPRegion,
"bottomUpOut",
checkpointMidway=False, | 1 | #!/usr/bin/env python
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2016, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import json
import unittest
import numpy
from nupic.regions.TPRegion import TPRegion
from network_creation_common import createAndRunNetwork
class TemporalMemoryCompatibilityTest(unittest.TestCase):
def testTMPyCpp(self):
"""
Test compatibility between C++ and Python TM implementation.
"""
results1 = createAndRunNetwork(TPRegion,
"bottomUpOut",
checkpointMidway=False,
temporalImp="tm_py")
# temporalImp="tm_py" here is a temporary placeholder value until C++ TM is
# finished, at which point it should be changed to "cpp"
results2 = createAndRunNetwork(TPRegion,
"bottomUpOut",
checkpointMidway=False,
temporalImp="tm_py")
self.compareArrayResults(results1, results2)
def compareArrayResults(self, results1, results2):
self.assertEqual(len(results1), len(results2))
for i in xrange(len(results1)):
result1 = list(results1[i].nonzero()[0])
result2 = list(results2[i].nonzero()[0])
self.assertEqual(result1, result2,
"Row {0} not equal: {1} vs. {2}".format(i, result1, result2))
if __name__ == "__main__":
unittest.main()
| 1 | 20,585 | No, we want to compare `tm_py` and `tm_cpp` in this test. | numenta-nupic | py |
@@ -47,6 +47,9 @@ const (
// TODO(liu-cong) configurable timeout
decoupleSinkTimeout = 30 * time.Second
+ // Limit for request payload in bytes (100Mb)
+ maxRequestBodyBytes = 100000000
+
// EventArrivalTime is used to access the metadata stored on a
// CloudEvent to measure the time difference between when an event is
// received on a broker and before it is dispatched to the trigger function. | 1 | /*
Copyright 2020 Google LLC
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package ingress
import (
"context"
"errors"
nethttp "net/http"
"time"
cev2 "github.com/cloudevents/sdk-go/v2"
"github.com/cloudevents/sdk-go/v2/binding"
"github.com/cloudevents/sdk-go/v2/binding/transformer"
ceclient "github.com/cloudevents/sdk-go/v2/client"
"github.com/cloudevents/sdk-go/v2/protocol"
"github.com/cloudevents/sdk-go/v2/protocol/http"
"github.com/google/knative-gcp/pkg/logging"
"github.com/google/knative-gcp/pkg/metrics"
"github.com/google/knative-gcp/pkg/tracing"
"github.com/google/knative-gcp/pkg/utils/clients"
"github.com/google/wire"
"go.opencensus.io/trace"
"go.uber.org/zap"
"google.golang.org/api/support/bundler"
grpccode "google.golang.org/grpc/codes"
grpcstatus "google.golang.org/grpc/status"
"k8s.io/apimachinery/pkg/types"
"knative.dev/eventing/pkg/kncloudevents"
kntracing "knative.dev/eventing/pkg/tracing"
)
const (
// TODO(liu-cong) configurable timeout
decoupleSinkTimeout = 30 * time.Second
// EventArrivalTime is used to access the metadata stored on a
// CloudEvent to measure the time difference between when an event is
// received on a broker and before it is dispatched to the trigger function.
// The format is an RFC3339 time in string format. For example: 2019-08-26T23:38:17.834384404Z.
EventArrivalTime = "knativearrivaltime"
// For probes.
heathCheckPath = "/healthz"
// for permission denied error msg
// TODO(cathyzhyi) point to official doc rather than github doc
deniedErrMsg string = `Failed to publish to PubSub because permission denied.
Please refer to "Configure the Authentication Mechanism for GCP" at https://github.com/google/knative-gcp/blob/master/docs/install/install-gcp-broker.md`
)
// HandlerSet provides a handler with a real HTTPMessageReceiver and pubsub MultiTopicDecoupleSink.
var HandlerSet wire.ProviderSet = wire.NewSet(
NewHandler,
clients.NewHTTPMessageReceiver,
wire.Bind(new(HttpMessageReceiver), new(*kncloudevents.HttpMessageReceiver)),
NewMultiTopicDecoupleSink,
wire.Bind(new(DecoupleSink), new(*multiTopicDecoupleSink)),
clients.NewPubsubClient,
metrics.NewIngressReporter,
)
// DecoupleSink is an interface to send events to a decoupling sink (e.g., pubsub).
type DecoupleSink interface {
// Send sends the event from a broker to the corresponding decoupling sink.
Send(ctx context.Context, broker types.NamespacedName, event cev2.Event) protocol.Result
}
// HttpMessageReceiver is an interface to listen on http requests.
type HttpMessageReceiver interface {
StartListen(ctx context.Context, handler nethttp.Handler) error
}
// Handler receives events and persists them to storage (pubsub).
type Handler struct {
// httpReceiver is an HTTP server to receive events.
httpReceiver HttpMessageReceiver
// decouple is the client to send events to a decouple sink.
decouple DecoupleSink
logger *zap.Logger
reporter *metrics.IngressReporter
}
// NewHandler creates a new ingress handler.
func NewHandler(ctx context.Context, httpReceiver HttpMessageReceiver, decouple DecoupleSink, reporter *metrics.IngressReporter) *Handler {
return &Handler{
httpReceiver: httpReceiver,
decouple: decouple,
reporter: reporter,
logger: logging.FromContext(ctx),
}
}
// Start blocks to receive events over HTTP.
func (h *Handler) Start(ctx context.Context) error {
return h.httpReceiver.StartListen(ctx, h)
}
// ServeHTTP implements net/http Handler interface method.
// 1. Performs basic validation of the request.
// 2. Parse request URL to get namespace and broker.
// 3. Convert request to event.
// 4. Send event to decouple sink.
func (h *Handler) ServeHTTP(response nethttp.ResponseWriter, request *nethttp.Request) {
if request.URL.Path == heathCheckPath {
response.WriteHeader(nethttp.StatusOK)
return
}
ctx := request.Context()
ctx = logging.WithLogger(ctx, h.logger)
ctx = tracing.WithLogging(ctx, trace.FromContext(ctx))
logging.FromContext(ctx).Debug("Serving http", zap.Any("headers", request.Header))
if request.Method != nethttp.MethodPost {
response.WriteHeader(nethttp.StatusMethodNotAllowed)
return
}
broker, err := ConvertPathToNamespacedName(request.URL.Path)
ctx = logging.With(ctx, zap.Stringer("broker", broker))
if err != nil {
logging.FromContext(ctx).Debug("Malformed request path", zap.String("path", request.URL.Path))
nethttp.Error(response, err.Error(), nethttp.StatusNotFound)
return
}
event, err := h.toEvent(ctx, request)
if err != nil {
nethttp.Error(response, err.Error(), nethttp.StatusBadRequest)
return
}
event.SetExtension(EventArrivalTime, cev2.Timestamp{Time: time.Now()})
span := trace.FromContext(ctx)
span.SetName(kntracing.BrokerMessagingDestination(broker))
if span.IsRecordingEvents() {
span.AddAttributes(
append(
ceclient.EventTraceAttributes(event),
kntracing.MessagingSystemAttribute,
tracing.PubSubProtocolAttribute,
kntracing.BrokerMessagingDestinationAttribute(broker),
kntracing.MessagingMessageIDAttribute(event.ID()),
)...,
)
}
// Optimistically set status code to StatusAccepted. It will be updated if there is an error.
// According to the data plane spec (https://github.com/knative/eventing/blob/master/docs/spec/data-plane.md), a
// non-callable SINK (which broker is) MUST respond with 202 Accepted if the request is accepted.
statusCode := nethttp.StatusAccepted
ctx, cancel := context.WithTimeout(ctx, decoupleSinkTimeout)
defer cancel()
defer func() { h.reportMetrics(request.Context(), broker, event, statusCode) }()
if res := h.decouple.Send(ctx, broker, *event); !cev2.IsACK(res) {
logging.FromContext(ctx).Error("Error publishing to PubSub", zap.Error(res))
statusCode = nethttp.StatusInternalServerError
switch {
case errors.Is(res, ErrNotFound):
statusCode = nethttp.StatusNotFound
case errors.Is(res, ErrNotReady):
statusCode = nethttp.StatusServiceUnavailable
case errors.Is(res, bundler.ErrOverflow):
statusCode = nethttp.StatusTooManyRequests
case grpcstatus.Code(res) == grpccode.PermissionDenied:
nethttp.Error(response, deniedErrMsg, statusCode)
return
}
nethttp.Error(response, "Failed to publish to PubSub", statusCode)
return
}
response.WriteHeader(statusCode)
}
// toEvent converts an http request to an event.
func (h *Handler) toEvent(ctx context.Context, request *nethttp.Request) (*cev2.Event, error) {
message := http.NewMessageFromHttpRequest(request)
defer func() {
if err := message.Finish(nil); err != nil {
logging.FromContext(ctx).Error("Failed to close message", zap.Any("message", message), zap.Error(err))
}
}()
// If encoding is unknown, the message is not an event.
if message.ReadEncoding() == binding.EncodingUnknown {
logging.FromContext(ctx).Debug("Unknown encoding", zap.Any("request", request))
return nil, errors.New("Unknown encoding. Not a cloud event?")
}
event, err := binding.ToEvent(request.Context(), message, transformer.AddTimeNow)
if err != nil {
logging.FromContext(ctx).Error("Failed to convert request to event", zap.Error(err))
return nil, err
}
return event, nil
}
func (h *Handler) reportMetrics(ctx context.Context, broker types.NamespacedName, event *cev2.Event, statusCode int) {
args := metrics.IngressReportArgs{
Namespace: broker.Namespace,
Broker: broker.Name,
EventType: event.Type(),
ResponseCode: statusCode,
}
if err := h.reporter.ReportEventCount(ctx, args); err != nil {
logging.FromContext(ctx).Warn("Failed to record metrics.", zap.Any("broker", broker.Name), zap.Error(err))
}
}
| 1 | 18,202 | Let me know if we'd rather have this as an env variable. | google-knative-gcp | go |
@@ -80,10 +80,10 @@ func NewVaultServiceAccount(name string) *corev1.ServiceAccount {
}
}
-func NewVaultServiceAccountRole(namespace string) *rbacv1.ClusterRole {
+func NewVaultServiceAccountRole(namespace, serviceAccountName string) *rbacv1.ClusterRole {
return &rbacv1.ClusterRole{
ObjectMeta: metav1.ObjectMeta{
- Name: fmt.Sprintf("auth-delegator:%s:vault", namespace),
+ Name: fmt.Sprintf("auth-delegator:%s:%s", namespace, serviceAccountName),
},
Rules: []rbacv1.PolicyRule{
{ | 1 | /*
Copyright 2020 The cert-manager Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package vault
import (
"context"
"fmt"
"path"
vault "github.com/hashicorp/vault/api"
corev1 "k8s.io/api/core/v1"
rbacv1 "k8s.io/api/rbac/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes"
)
const vaultToken = "vault-root-token"
// VaultInitializer holds the state of a configured Vault PKI. We use the same
// Vault server for all tests. PKIs are mounted and unmounted for each test
// scenario that uses them.
type VaultInitializer struct {
client *vault.Client
proxy *proxy
Details
RootMount string
IntermediateMount string
// Whether the intermediate CA should be configured with root CA
ConfigureWithRoot bool
Role string // AppRole auth Role
AppRoleAuthPath string // AppRole auth mount point in Vault
KubernetesAuthPath string // Kubernetes auth mount point in Vault
APIServerURL string // Kubernetes API Server URL
APIServerCA string // Kubernetes API Server CA certificate
}
func NewVaultTokenSecret(name string) *corev1.Secret {
return &corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: name,
},
StringData: map[string]string{
"token": vaultToken,
},
}
}
func NewVaultAppRoleSecret(name, secretId string) *corev1.Secret {
return &corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
GenerateName: name,
},
StringData: map[string]string{
"secretkey": secretId,
},
}
}
func NewVaultServiceAccount(name string) *corev1.ServiceAccount {
return &corev1.ServiceAccount{
ObjectMeta: metav1.ObjectMeta{
Name: name,
},
}
}
func NewVaultServiceAccountRole(namespace string) *rbacv1.ClusterRole {
return &rbacv1.ClusterRole{
ObjectMeta: metav1.ObjectMeta{
Name: fmt.Sprintf("auth-delegator:%s:vault", namespace),
},
Rules: []rbacv1.PolicyRule{
{
APIGroups: []string{"authentication.k8s.io"},
Resources: []string{"tokenreviews"},
Verbs: []string{"create"},
},
{
APIGroups: []string{"authorization.k8s.io"},
Resources: []string{"subjectaccessreviews"},
Verbs: []string{"create"},
},
},
}
}
func NewVaultServiceAccountClusterRoleBinding(roleName, namespace, subject string) *rbacv1.ClusterRoleBinding {
return &rbacv1.ClusterRoleBinding{
ObjectMeta: metav1.ObjectMeta{
Name: roleName,
},
RoleRef: rbacv1.RoleRef{
APIGroup: "rbac.authorization.k8s.io",
Kind: "ClusterRole",
Name: roleName,
},
Subjects: []rbacv1.Subject{
{
Name: subject,
Kind: "ServiceAccount",
Namespace: namespace,
},
},
}
}
func NewVaultKubernetesSecret(name string, serviceAccountName string) *corev1.Secret {
return &corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: name,
Annotations: map[string]string{
"kubernetes.io/service-account.name": serviceAccountName,
},
},
Type: "kubernetes.io/service-account-token",
}
}
// Set up a new Vault client, port-forward to the Vault instance.
func (v *VaultInitializer) Init() error {
if v.AppRoleAuthPath == "" {
v.AppRoleAuthPath = "approle"
}
if v.KubernetesAuthPath == "" {
v.KubernetesAuthPath = "kubernetes"
}
v.proxy = newProxy(v.Namespace, v.PodName, v.Kubectl, v.VaultCA)
client, err := v.proxy.init()
if err != nil {
return err
}
v.client = client
return nil
}
// Set up a Vault PKI.
func (v *VaultInitializer) Setup() error {
// Enable a new Vault secrets engine at v.RootMount
if err := v.mountPKI(v.RootMount, "87600h"); err != nil {
return err
}
// Generate a self-signed CA cert using the engine at v.RootMount
rootCa, err := v.generateRootCert()
if err != nil {
return err
}
// Configure issuing certificate endpoints and CRL distribution points to be
// set on certs issued by v.RootMount.
if err := v.configureCert(v.RootMount); err != nil {
return err
}
// Enable a new Vault secrets engine at v.IntermediateMount
if err := v.mountPKI(v.IntermediateMount, "43800h"); err != nil {
return err
}
// Generate a CSR for secrets engine at v.IntermediateMount
csr, err := v.generateIntermediateSigningReq()
if err != nil {
return err
}
// Issue a new intermediate CA from v.RootMount for the CSR created above.
intermediateCa, err := v.signCertificate(csr)
if err != nil {
return err
}
// Set the engine at v.IntermediateMount as an intermediateCA using the cert
// issued by v.RootMount, above and optionally the root CA cert.
caChain := intermediateCa
if v.ConfigureWithRoot {
caChain = fmt.Sprintf("%s\n%s", intermediateCa, rootCa)
}
if err := v.importSignIntermediate(caChain, v.IntermediateMount); err != nil {
return err
}
// Configure issuing certificate endpoints and CRL distribution points to be
// set on certs issued by v.IntermediateMount.
if err := v.configureCert(v.IntermediateMount); err != nil {
return err
}
if err := v.setupRole(); err != nil {
return err
}
if err := v.setupKubernetesBasedAuth(); err != nil {
return err
}
return nil
}
func (v *VaultInitializer) Clean() error {
if err := v.client.Sys().Unmount("/" + v.IntermediateMount); err != nil {
return fmt.Errorf("Unable to unmount %v: %v", v.IntermediateMount, err)
}
if err := v.client.Sys().Unmount("/" + v.RootMount); err != nil {
return fmt.Errorf("Unable to unmount %v: %v", v.RootMount, err)
}
v.proxy.clean()
return nil
}
func (v *VaultInitializer) CreateAppRole() (string, string, error) {
// create policy
role_path := path.Join(v.IntermediateMount, "sign", v.Role)
policy := fmt.Sprintf("path \"%s\" { capabilities = [ \"create\", \"update\" ] }", role_path)
err := v.client.Sys().PutPolicy(v.Role, policy)
if err != nil {
return "", "", fmt.Errorf("Error creating policy: %s", err.Error())
}
// # create approle
params := map[string]string{
"period": "24h",
"policies": v.Role,
}
baseUrl := path.Join("/v1", "auth", v.AppRoleAuthPath, "role", v.Role)
_, err = v.proxy.callVault("POST", baseUrl, "", params)
if err != nil {
return "", "", fmt.Errorf("Error creating approle: %s", err.Error())
}
// # read the role-id
url := path.Join(baseUrl, "role-id")
roleId, err := v.proxy.callVault("GET", url, "role_id", map[string]string{})
if err != nil {
return "", "", fmt.Errorf("Error reading role_id: %s", err.Error())
}
// # read the secret-id
url = path.Join(baseUrl, "secret-id")
secretId, err := v.proxy.callVault("POST", url, "secret_id", map[string]string{})
if err != nil {
return "", "", fmt.Errorf("Error reading secret_id: %s", err.Error())
}
return roleId, secretId, nil
}
func (v *VaultInitializer) CleanAppRole() error {
url := path.Join("/v1", "auth", v.AppRoleAuthPath, "role", v.Role)
_, err := v.proxy.callVault("DELETE", url, "", map[string]string{})
if err != nil {
return fmt.Errorf("Error deleting AppRole: %s", err.Error())
}
err = v.client.Sys().DeletePolicy(v.Role)
if err != nil {
return fmt.Errorf("Error deleting policy: %s", err.Error())
}
return nil
}
func (v *VaultInitializer) mountPKI(mount, ttl string) error {
opts := &vault.MountInput{
Type: "pki",
Config: vault.MountConfigInput{
MaxLeaseTTL: "87600h",
},
}
if err := v.client.Sys().Mount("/"+mount, opts); err != nil {
return fmt.Errorf("Error mounting %s: %s", mount, err.Error())
}
return nil
}
func (v *VaultInitializer) generateRootCert() (string, error) {
params := map[string]string{
"common_name": "Root CA",
"ttl": "87600h",
"exclude_cn_from_sans": "true",
"key_type": "ec",
"key_bits": "256",
}
url := path.Join("/v1", v.RootMount, "root", "generate", "internal")
cert, err := v.proxy.callVault("POST", url, "certificate", params)
if err != nil {
return "", fmt.Errorf("Error generating CA root certificate: %s", err.Error())
}
return cert, nil
}
func (v *VaultInitializer) generateIntermediateSigningReq() (string, error) {
params := map[string]string{
"common_name": "Intermediate CA",
"ttl": "43800h",
"exclude_cn_from_sans": "true",
"key_type": "ec",
"key_bits": "256",
}
url := path.Join("/v1", v.IntermediateMount, "intermediate", "generate", "internal")
csr, err := v.proxy.callVault("POST", url, "csr", params)
if err != nil {
return "", fmt.Errorf("Error generating CA intermediate certificate: %s", err.Error())
}
return csr, nil
}
func (v *VaultInitializer) signCertificate(csr string) (string, error) {
params := map[string]string{
"use_csr_values": "true",
"ttl": "43800h",
"exclude_cn_from_sans": "true",
"csr": csr,
}
url := path.Join("/v1", v.RootMount, "root", "sign-intermediate")
cert, err := v.proxy.callVault("POST", url, "certificate", params)
if err != nil {
return "", fmt.Errorf("Error signing intermediate Vault certificate: %s", err.Error())
}
return cert, nil
}
func (v *VaultInitializer) importSignIntermediate(caChain, intermediateMount string) error {
params := map[string]string{
"certificate": caChain,
}
url := path.Join("/v1", intermediateMount, "intermediate", "set-signed")
_, err := v.proxy.callVault("POST", url, "", params)
if err != nil {
return fmt.Errorf("Error importing intermediate Vault certificate: %s", err.Error())
}
return nil
}
func (v *VaultInitializer) configureCert(mount string) error {
params := map[string]string{
"issuing_certificates": fmt.Sprintf("https://vault.vault:8200/v1/%s/ca", mount),
"crl_distribution_points": fmt.Sprintf("https://vault.vault:8200/v1/%s/crl", mount),
}
url := path.Join("/v1", mount, "config", "urls")
_, err := v.proxy.callVault("POST", url, "", params)
if err != nil {
return fmt.Errorf("Error configuring Vault certificate: %s", err.Error())
}
return nil
}
func (v *VaultInitializer) setupRole() error {
// vault auth-enable approle
auths, err := v.client.Sys().ListAuth()
if err != nil {
return fmt.Errorf("Error fetching auth mounts: %s", err.Error())
}
if _, ok := auths[v.AppRoleAuthPath]; !ok {
options := &vault.EnableAuthOptions{Type: "approle"}
if err := v.client.Sys().EnableAuthWithOptions(v.AppRoleAuthPath, options); err != nil {
return fmt.Errorf("Error enabling approle: %s", err.Error())
}
}
params := map[string]string{
"allow_any_name": "true",
"max_ttl": "2160h",
"key_type": "any",
"require_cn": "false",
"allowed_uri_sans": "spiffe://cluster.local/*",
}
url := path.Join("/v1", v.IntermediateMount, "roles", v.Role)
_, err = v.proxy.callVault("POST", url, "", params)
if err != nil {
return fmt.Errorf("Error creating role %s: %s", v.Role, err.Error())
}
return nil
}
func (v *VaultInitializer) setupKubernetesBasedAuth() error {
if len(v.APIServerURL) == 0 {
// skip initialization if not provided
return nil
}
// vault auth-enable kubernetes
auths, err := v.client.Sys().ListAuth()
if err != nil {
return fmt.Errorf("Error fetching auth mounts: %s", err.Error())
}
if _, ok := auths[v.KubernetesAuthPath]; !ok {
options := &vault.EnableAuthOptions{Type: "kubernetes"}
if err := v.client.Sys().EnableAuthWithOptions(v.KubernetesAuthPath, options); err != nil {
return fmt.Errorf("Error enabling kubernetes auth: %s", err.Error())
}
}
// vault write auth/kubernetes/config
params := map[string]string{
"kubernetes_host": v.APIServerURL,
"kubernetes_ca_cert": v.APIServerCA,
}
url := fmt.Sprintf("/v1/auth/%s/config", v.KubernetesAuthPath)
_, err = v.proxy.callVault("POST", url, "", params)
if err != nil {
return fmt.Errorf("error configuring kubernetes auth backend: %s", err.Error())
}
return nil
}
// CreateKubernetesRole creates a service account and ClusterRoleBinding for Kubernetes auth delegation
func (v *VaultInitializer) CreateKubernetesRole(client kubernetes.Interface, namespace, roleName, serviceAccountName string) error {
serviceAccount := NewVaultServiceAccount(serviceAccountName)
_, err := client.CoreV1().ServiceAccounts(namespace).Create(context.TODO(), serviceAccount, metav1.CreateOptions{})
if err != nil {
return fmt.Errorf("error creating ServiceAccount for Kubernetes auth: %s", err.Error())
}
role := NewVaultServiceAccountRole(namespace)
_, err = client.RbacV1().ClusterRoles().Create(context.TODO(), role, metav1.CreateOptions{})
if err != nil {
return fmt.Errorf("error creating Role for Kubernetes auth ServiceAccount: %s", err.Error())
}
roleBinding := NewVaultServiceAccountClusterRoleBinding(role.Name, namespace, serviceAccountName)
_, err = client.RbacV1().ClusterRoleBindings().Create(context.TODO(), roleBinding, metav1.CreateOptions{})
if err != nil {
return fmt.Errorf("error creating RoleBinding for Kubernetes auth ServiceAccount: %s", err.Error())
}
// vault write auth/kubernetes/role/<roleName>
roleParams := map[string]string{
"bound_service_account_names": serviceAccountName,
"bound_service_account_namespaces": namespace,
"policies": "[" + v.Role + "]",
}
url := path.Join(fmt.Sprintf("/v1/auth/%s/role", v.KubernetesAuthPath), roleName)
_, err = v.proxy.callVault("POST", url, "", roleParams)
if err != nil {
return fmt.Errorf("error configuring kubernetes auth role: %s", err.Error())
}
return nil
}
// CleanKubernetesRole cleans up the ClusterRoleBinding and ServiceAccount for Kubernetes auth delegation
func (v *VaultInitializer) CleanKubernetesRole(client kubernetes.Interface, namespace, roleName, serviceAccountName string) error {
if err := client.RbacV1().RoleBindings(namespace).Delete(context.TODO(), roleName, metav1.DeleteOptions{}); err != nil {
return err
}
if err := client.RbacV1().Roles(namespace).Delete(context.TODO(), roleName, metav1.DeleteOptions{}); err != nil {
return err
}
if err := client.CoreV1().ServiceAccounts(namespace).Delete(context.TODO(), serviceAccountName, metav1.DeleteOptions{}); err != nil {
return err
}
// vault delete auth/kubernetes/role/<roleName>
url := path.Join(fmt.Sprintf("/v1/auth/%s/role", v.KubernetesAuthPath), roleName)
_, err := v.proxy.callVault("DELETE", url, "", nil)
if err != nil {
return fmt.Errorf("error cleaning up kubernetes auth role: %s", err.Error())
}
return nil
}
| 1 | 27,462 | question: what is happening here? | jetstack-cert-manager | go |
@@ -14,6 +14,8 @@ public abstract class AbstractSetTest extends AbstractTraversableRangeTest {
@Override
abstract protected <T> Set<T> empty();
+ abstract protected <T> Set<T> emptyWithNull();
+
@Override
abstract protected <T> Set<T> of(T element);
| 1 | /* / \____ _ _ ____ ______ / \ ____ __ _______
* / / \/ \ / \/ \ / /\__\/ // \/ \ // /\__\ JΛVΛSLΛNG
* _/ / /\ \ \/ / /\ \\__\\ \ // /\ \ /\\/ \ /__\ \ Copyright 2014-2016 Javaslang, http://javaslang.io
* /___/\_/ \_/\____/\_/ \_/\__\/__/\__\_/ \_// \__/\_____/ Licensed under the Apache License, Version 2.0
*/
package javaslang.collection;
import org.junit.Test;
import java.math.BigDecimal;
public abstract class AbstractSetTest extends AbstractTraversableRangeTest {
@Override
abstract protected <T> Set<T> empty();
@Override
abstract protected <T> Set<T> of(T element);
@SuppressWarnings("unchecked")
@Override
abstract protected <T> Set<T> of(T... elements);
// -- static narrow
@Test
public void shouldNarrowSet() {
final Set<Double> doubles = of(1.0d);
final Set<Number> numbers = Set.narrow(doubles);
final int actual = numbers.add(new BigDecimal("2.0")).sum().intValue();
assertThat(actual).isEqualTo(3);
}
@Test
public void shouldAddAllOfIterable() {
assertThat(of(1, 2, 3).addAll(empty())).isEqualTo(of(1, 2, 3));
assertThat(empty().addAll(of(2, 3, 4))).isEqualTo(of(2, 3, 4));
assertThat(of(1, 2, 3).addAll(of(2, 3, 4))).isEqualTo(of(1, 2, 3, 4));
}
@Test
public void shouldCalculateDifference() {
assertThat(of(1, 2, 3).diff(of(2))).isEqualTo(of(1, 3));
assertThat(of(1, 2, 3).diff(of(5))).isEqualTo(of(1, 2, 3));
assertThat(of(1, 2, 3).diff(of(1, 2, 3))).isEqualTo(empty());
assertThat(empty().diff(of(1, 2))).isEqualTo(empty());
}
@Test
public void shouldCalculateIntersect() {
assertThat(of(1, 2, 3).intersect(of(2))).isEqualTo(of(2));
assertThat(of(1, 2, 3).intersect(of(5))).isEqualTo(empty());
assertThat(of(1, 2, 3).intersect(of(1, 2, 3))).isEqualTo(of(1, 2, 3));
assertThat(empty().intersect(of(1, 2))).isEqualTo(empty());
}
@Test
public void shouldCalculateUnion() {
assertThat(of(1, 2, 3).union(of(2))).isEqualTo(of(1, 2, 3));
assertThat(of(1, 2, 3).union(of(5))).isEqualTo(of(1, 2, 3, 5));
assertThat(of(1, 2, 3).union(of(1, 2, 3))).isEqualTo(of(1, 2, 3));
assertThat(empty().union(of(1, 2))).isEqualTo(of(1, 2));
}
@Test
public void shouldRemoveElement() {
assertThat(of(1, 2, 3).remove(2)).isEqualTo(of(1, 3));
assertThat(of(1, 2, 3).remove(5)).isEqualTo(of(1, 2, 3));
assertThat(empty().remove(5)).isEqualTo(empty());
}
@Test
public void shouldRemoveAllElements() {
assertThat(of(1, 2, 3).removeAll(empty())).isEqualTo(of(1, 2, 3));
assertThat(of(1, 2, 3).removeAll(of(2))).isEqualTo(of(1, 3));
assertThat(of(1, 2, 3).removeAll(of(5))).isEqualTo(of(1, 2, 3));
assertThat(empty().removeAll(of(5))).isEqualTo(empty());
}
@Test
public void shouldMapDistinctElementsToOneElement() {
assertThat(of(1, 2, 3).map(i -> 0)).isEqualTo(of(0));
}
@Override
@Test
public void shouldBeAwareOfExistingNonUniqueElement() {
// sets have only distinct elements
}
@Override
@Test
public void shouldReplaceFirstOccurrenceOfNonNilUsingCurrNewWhenMultipleOccurrencesExist() {
// sets have only distinct elements
}
}
| 1 | 10,884 | \[Checkstyle\] ERROR: 'protected' modifier out of order with the JLS suggestions\. | vavr-io-vavr | java |
@@ -4,6 +4,18 @@ const chai = require('chai');
const expect = chai.expect;
chai.use(require('chai-subset'));
+const EJSON = require('mongodb-extjson');
+const getKmsProviders = localKey => {
+ const result = EJSON.parse(process.env.CSFLE_KMS_PROVIDERS || 'NOT_PROVIDED');
+ if (localKey) {
+ result.local = { key: localKey };
+ }
+
+ return result;
+};
+
+const noop = () => {};
+
// Tests for the ClientEncryption type are not included as part of the YAML tests.
// In the prose tests LOCAL_MASTERKEY refers to the following base64: | 1 | 'use strict';
const BSON = require('bson');
const chai = require('chai');
const expect = chai.expect;
chai.use(require('chai-subset'));
// Tests for the ClientEncryption type are not included as part of the YAML tests.
// In the prose tests LOCAL_MASTERKEY refers to the following base64:
// .. code:: javascript
// Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk
describe('Client Side Encryption Prose Tests', function () {
const metadata = { requires: { clientSideEncryption: true, mongodb: '>=4.2.0' } };
const dataDbName = 'db';
const dataCollName = 'coll';
const dataNamespace = `${dataDbName}.${dataCollName}`;
const keyVaultDbName = 'keyvault';
const keyVaultCollName = 'datakeys';
const keyVaultNamespace = `${keyVaultDbName}.${keyVaultCollName}`;
const shared = require('../shared');
const dropCollection = shared.dropCollection;
const APMEventCollector = shared.APMEventCollector;
const localKey = Buffer.from(
'Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk',
'base64'
);
describe('Data key and double encryption', function () {
// Data key and double encryption
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
// First, perform the setup.
beforeEach(function () {
const mongodbClientEncryption = this.configuration.mongodbClientEncryption;
// #. Create a MongoClient without encryption enabled (referred to as ``client``). Enable command monitoring to listen for command_started events.
this.client = this.configuration.newClient({}, { monitorCommands: true });
this.commandStartedEvents = new APMEventCollector(this.client, 'commandStarted', {
exclude: ['ismaster']
});
const schemaMap = {
[dataNamespace]: {
bsonType: 'object',
properties: {
encrypted_placeholder: {
encrypt: {
keyId: '/placeholder',
bsonType: 'string',
algorithm: 'AEAD_AES_256_CBC_HMAC_SHA_512-Random'
}
}
}
}
};
return (
Promise.resolve()
.then(() => this.client.connect())
// #. Using ``client``, drop the collections ``keyvault.datakeys`` and ``db.coll``.
.then(() => dropCollection(this.client.db(dataDbName), dataCollName))
.then(() => dropCollection(this.client.db(keyVaultDbName), keyVaultCollName))
// #. Create the following:
// - A MongoClient configured with auto encryption (referred to as ``client_encrypted``)
// - A ``ClientEncryption`` object (referred to as ``client_encryption``)
// Configure both objects with ``aws`` and the ``local`` KMS providers as follows:
// .. code:: javascript
// {
// "aws": { <AWS credentials> },
// "local": { "key": <base64 decoding of LOCAL_MASTERKEY> }
// }
// Configure both objects with ``keyVaultNamespace`` set to ``keyvault.datakeys``.
// Configure the ``MongoClient`` with the following ``schema_map``:
// .. code:: javascript
// {
// "db.coll": {
// "bsonType": "object",
// "properties": {
// "encrypted_placeholder": {
// "encrypt": {
// "keyId": "/placeholder",
// "bsonType": "string",
// "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random"
// }
// }
// }
// }
// }
// Configure ``client_encryption`` with the ``keyVaultClient`` of the previously created ``client``.
.then(() => {
this.clientEncryption = new mongodbClientEncryption.ClientEncryption(this.client, {
bson: BSON,
kmsProviders: this.configuration.kmsProviders(null, localKey),
keyVaultNamespace
});
})
.then(() => {
this.clientEncrypted = this.configuration.newClient(
{},
{
autoEncryption: {
keyVaultNamespace,
kmsProviders: this.configuration.kmsProviders(null, localKey),
schemaMap
}
}
);
return this.clientEncrypted.connect();
})
);
});
afterEach(function () {
if (this.commandStartedEvents) {
this.commandStartedEvents.teardown();
this.commandStartedEvents = undefined;
}
return Promise.resolve()
.then(() => this.clientEncrypted && this.clientEncrypted.close())
.then(() => this.client && this.client.close());
});
it('should work for local KMS provider', metadata, function () {
let localDatakeyId;
let localEncrypted;
return Promise.resolve()
.then(() => {
// #. Call ``client_encryption.createDataKey()`` with the ``local`` KMS provider and keyAltNames set to ``["local_altname"]``.
// - Expect a BSON binary with subtype 4 to be returned, referred to as ``local_datakey_id``.
// - Use ``client`` to run a ``find`` on ``keyvault.datakeys`` by querying with the ``_id`` set to the ``local_datakey_id``.
// - Expect that exactly one document is returned with the "masterKey.provider" equal to "local".
// - Check that ``client`` captured a command_started event for the ``insert`` command containing a majority writeConcern.
this.commandStartedEvents.clear();
return this.clientEncryption
.createDataKey('local', { keyAltNames: ['local_altname'] })
.then(result => {
localDatakeyId = result;
expect(localDatakeyId).to.have.property('sub_type', 4);
})
.then(() => {
return this.client
.db(keyVaultDbName)
.collection(keyVaultCollName)
.find({ _id: localDatakeyId })
.toArray();
})
.then(results => {
expect(results)
.to.have.a.lengthOf(1)
.and.to.have.nested.property('0.masterKey.provider', 'local');
expect(this.commandStartedEvents.events).to.containSubset([
{ commandName: 'insert', command: { writeConcern: { w: 'majority' } } }
]);
});
})
.then(() => {
// #. Call ``client_encryption.encrypt()`` with the value "hello local", the algorithm ``AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic``, and the ``key_id`` of ``local_datakey_id``.
// - Expect the return value to be a BSON binary subtype 6, referred to as ``local_encrypted``.
// - Use ``client_encrypted`` to insert ``{ _id: "local", "value": <local_encrypted> }`` into ``db.coll``.
// - Use ``client_encrypted`` to run a find querying with ``_id`` of "local" and expect ``value`` to be "hello local".
const coll = this.clientEncrypted.db(dataDbName).collection(dataCollName);
return this.clientEncryption
.encrypt('hello local', {
algorithm: 'AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic',
keyId: localDatakeyId
})
.then(value => {
localEncrypted = value;
expect(localEncrypted).to.have.property('sub_type', 6);
})
.then(() => coll.insertOne({ _id: 'local', value: localEncrypted }))
.then(() => coll.findOne({ _id: 'local' }))
.then(result => {
expect(result).to.have.property('value', 'hello local');
});
})
.then(() => {
// #. Call ``client_encryption.encrypt()`` with the value "hello local", the algorithm ``AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic``, and the ``key_alt_name`` of ``local_altname``.
// - Expect the return value to be a BSON binary subtype 6. Expect the value to exactly match the value of ``local_encrypted``.
return this.clientEncryption
.encrypt('hello local', {
algorithm: 'AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic',
keyId: localDatakeyId
})
.then(encrypted => {
expect(encrypted).to.deep.equal(localEncrypted);
});
});
});
it('should work for aws KMS provider', metadata, function () {
// Then, repeat the above tests with the ``aws`` KMS provider:
let awsDatakeyId;
let awsEncrypted;
return Promise.resolve()
.then(() => {
// #. Call ``client_encryption.createDataKey()`` with the ``aws`` KMS provider, keyAltNames set to ``["aws_altname"]``, and ``masterKey`` as follows:
// .. code:: javascript
// {
// region: "us-east-1",
// key: "arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0"
// }
// - Expect a BSON binary with subtype 4 to be returned, referred to as ``aws_datakey_id``.
// - Use ``client`` to run a ``find`` on ``keyvault.datakeys`` by querying with the ``_id`` set to the ``aws_datakey_id``.
// - Expect that exactly one document is returned with the "masterKey.provider" equal to "aws".
// - Check that ``client`` captured a command_started event for the ``insert`` command containing a majority writeConcern.
this.commandStartedEvents.clear();
const masterKey = {
region: 'us-east-1',
key: 'arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0'
};
return this.clientEncryption
.createDataKey('aws', { masterKey, keyAltNames: ['aws_altname'] })
.then(result => {
awsDatakeyId = result;
expect(awsDatakeyId).to.have.property('sub_type', 4);
})
.then(() => {
return this.client
.db(keyVaultDbName)
.collection(keyVaultCollName)
.find({ _id: awsDatakeyId })
.toArray();
})
.then(results => {
expect(results)
.to.have.a.lengthOf(1)
.and.to.have.nested.property('0.masterKey.provider', 'aws');
expect(this.commandStartedEvents.events).to.containSubset([
{ commandName: 'insert', command: { writeConcern: { w: 'majority' } } }
]);
});
})
.then(() => {
// #. Call ``client_encryption.encrypt()`` with the value "hello aws", the algorithm ``AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic``, and the ``key_id`` of ``aws_datakey_id``.
// - Expect the return value to be a BSON binary subtype 6, referred to as ``aws_encrypted``.
// - Use ``client_encrypted`` to insert ``{ _id: "aws", "value": <aws_encrypted> }`` into ``db.coll``.
// - Use ``client_encrypted`` to run a find querying with ``_id`` of "aws" and expect ``value`` to be "hello aws".
const coll = this.clientEncrypted.db(dataDbName).collection(dataCollName);
return this.clientEncryption
.encrypt('hello aws', {
algorithm: 'AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic',
keyId: awsDatakeyId
})
.then(value => {
awsEncrypted = value;
expect(awsEncrypted).to.have.property('sub_type', 6);
})
.then(() => coll.insertOne({ _id: 'aws', value: awsEncrypted }))
.then(() => coll.findOne({ _id: 'aws' }))
.then(result => {
expect(result).to.have.property('value', 'hello aws');
});
})
.then(() => {
// #. Call ``client_encryption.encrypt()`` with the value "hello aws", the algorithm ``AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic``, and the ``key_alt_name`` of ``aws_altname``.
// - Expect the return value to be a BSON binary subtype 6. Expect the value to exactly match the value of ``aws_encrypted``.
return this.clientEncryption
.encrypt('hello aws', {
algorithm: 'AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic',
keyId: awsDatakeyId
})
.then(encrypted => {
expect(encrypted).to.deep.equal(awsEncrypted);
});
});
});
it('should error on an attempt to double-encrypt a value', metadata, function () {
// Then, run the following final tests:
// #. Test explicit encrypting an auto encrypted field.
// - Use ``client_encrypted`` to attempt to insert ``{ "encrypted_placeholder": (local_encrypted) }``
// - Expect an exception to be thrown, since this is an attempt to auto encrypt an already encrypted value.
return Promise.resolve()
.then(() => this.clientEncryption.createDataKey('local'))
.then(keyId =>
this.clientEncryption.encrypt('hello double', {
keyId,
algorithm: 'AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic'
})
)
.then(encrypted =>
this.clientEncrypted
.db(dataDbName)
.collection(dataCollName)
.insertOne({ encrypted_placeholder: encrypted })
.then(
() => {
throw new Error('Expected double-encryption to fail, but it has succeeded');
},
err => {
expect(err).to.be.an.instanceOf(Error);
}
)
);
});
});
describe('Custom Endpoint', function () {
// Data keys created with AWS KMS may specify a custom endpoint to contact (instead of the default endpoint derived from the AWS region).
beforeEach(function () {
// 1. Create a ``ClientEncryption`` object (referred to as ``client_encryption``)
// Configure with ``aws`` KMS providers as follows:
// .. code:: javascript
// {
// "aws": { <AWS credentials> }
// }
// Configure with ``keyVaultNamespace`` set to ``keyvault.datakeys``, and a default MongoClient as the ``keyVaultClient``.
this.client = this.configuration.newClient();
return this.client.connect().then(() => {
const mongodbClientEncryption = this.configuration.mongodbClientEncryption;
this.clientEncryption = new mongodbClientEncryption.ClientEncryption(this.client, {
bson: BSON,
keyVaultNamespace,
kmsProviders: this.configuration.kmsProviders('aws')
});
});
});
afterEach(function () {
return this.client && this.client.close();
});
const testCases = [
{
description: 'no custom endpoint',
masterKey: {
region: 'us-east-1',
key: 'arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0'
},
succeed: true
},
{
description: 'custom endpoint',
masterKey: {
region: 'us-east-1',
key: 'arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0',
endpoint: 'kms.us-east-1.amazonaws.com'
},
succeed: true
},
{
description: 'custom endpoint with port',
masterKey: {
region: 'us-east-1',
key: 'arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0',
endpoint: 'kms.us-east-1.amazonaws.com:443'
},
succeed: true
},
{
description: 'custom endpoint with bad url',
masterKey: {
region: 'us-east-1',
key: 'arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0',
endpoint: 'kms.us-east-1.amazonaws.com:12345'
},
succeed: false,
errorValidator: err => {
expect(err)
.to.be.an.instanceOf(Error)
.and.to.have.property('message')
.that.matches(/KMS request failed/);
}
},
{
description: 'custom endpoint that does not match region',
masterKey: {
region: 'us-east-1',
key: 'arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0',
endpoint: 'kms.us-east-2.amazonaws.com'
},
succeed: false,
errorValidator: err => {
// Expect this to fail with an exception with a message containing the string: "us-east-1"
expect(err)
.to.be.an.instanceOf(Error)
.and.to.have.property('message')
.that.matches(/us-east-1/);
}
},
{
description: 'custom endpoint with parse error',
masterKey: {
region: 'us-east-1',
key: 'arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0',
endpoint: 'example.com'
},
suceed: false,
errorValidator: err => {
// Expect this to fail with an exception with a message containing the string: "parse error"
expect(err)
.to.be.an.instanceOf(Error)
.and.to.have.property('message')
.that.matches(/parse error/);
}
}
];
testCases.forEach(testCase => {
it(testCase.description, metadata, function () {
// 2. Call `client_encryption.createDataKey()` with "aws" as the provider and the following masterKey:
// .. code:: javascript
// {
// ...
// }
// Expect this to succeed. Use the returned UUID of the key to explicitly encrypt and decrypt the string "test" to validate it works.
const masterKey = testCase.masterKey;
return this.clientEncryption.createDataKey('aws', { masterKey }).then(
keyId => {
if (!testCase.succeed) {
throw new Error('Expected test case to fail to create data key, but it succeeded');
}
return this.clientEncryption
.encrypt('test', {
keyId,
algorithm: 'AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic'
})
.then(encrypted => this.clientEncryption.decrypt(encrypted))
.then(result => {
expect(result).to.equal('test');
});
},
err => {
if (testCase.succeed) {
throw err;
}
if (!testCase.errorValidator) {
throw new Error('Invalid Error validator');
}
testCase.errorValidator(err);
}
);
});
});
});
describe('BSON size limits and batch splitting', function () {
const fs = require('fs');
const path = require('path');
const { EJSON } = BSON;
function loadLimits(file) {
return EJSON.parse(
fs.readFileSync(path.resolve(__dirname, '../../spec/client-side-encryption/limits', file))
);
}
const limitsSchema = loadLimits('limits-schema.json');
const limitsKey = loadLimits('limits-key.json');
const limitsDoc = loadLimits('limits-doc.json');
before(function () {
// First, perform the setup.
// #. Create a MongoClient without encryption enabled (referred to as ``client``).
this.client = this.configuration.newClient();
return (
this.client
.connect()
// #. Using ``client``, drop and create the collection ``db.coll`` configured with the included JSON schema `limits/limits-schema.json <../limits/limits-schema.json>`_.
.then(() => dropCollection(this.client.db(dataDbName), dataCollName))
.then(() => {
return this.client.db(dataDbName).createCollection(dataCollName, {
validator: { $jsonSchema: limitsSchema }
});
})
// #. Using ``client``, drop the collection ``keyvault.datakeys``. Insert the document `limits/limits-key.json <../limits/limits-key.json>`_
.then(() => dropCollection(this.client.db(keyVaultDbName), keyVaultCollName))
.then(() => {
return this.client
.db(keyVaultDbName)
.collection(keyVaultCollName)
.insertOne(limitsKey, { writeConcern: { w: 'majority' } });
})
);
});
beforeEach(function () {
// #. Create a MongoClient configured with auto encryption (referred to as ``client_encrypted``)
// Configure with the ``local`` KMS provider as follows:
// .. code:: javascript
// { "local": { "key": <base64 decoding of LOCAL_MASTERKEY> } }
// Configure with the ``keyVaultNamespace`` set to ``keyvault.datakeys``.
this.clientEncrypted = this.configuration.newClient(
{},
{
monitorCommands: true,
autoEncryption: {
keyVaultNamespace,
kmsProviders: this.configuration.kmsProviders('local', localKey)
}
}
);
return this.clientEncrypted.connect().then(() => {
this.encryptedColl = this.clientEncrypted.db(dataDbName).collection(dataCollName);
this.commandStartedEvents = new APMEventCollector(this.clientEncrypted, 'commandStarted', {
include: ['insert']
});
});
});
afterEach(function () {
if (this.commandStartedEvents) {
this.commandStartedEvents.teardown();
this.commandStartedEvents = undefined;
}
if (this.clientEncrypted) {
return this.clientEncrypted.close();
}
});
after(function () {
return this.client && this.client.close();
});
// Using ``client_encrypted`` perform the following operations:
function repeatedChar(char, length) {
return Array.from({ length })
.map(() => char)
.join('');
}
const testCases = [
// #. Insert ``{ "_id": "over_2mib_under_16mib", "unencrypted": <the string "a" repeated 2097152 times> }``.
// Expect this to succeed since this is still under the ``maxBsonObjectSize`` limit.
{
description: 'should succeed for over_2mib_under_16mib',
docs: () => [{ _id: 'over_2mib_under_16mib', unencrypted: repeatedChar('a', 2097152) }],
expectedEvents: [{ commandName: 'insert' }]
},
// #. Insert the document `limits/limits-doc.json <../limits/limits-doc.json>`_ concatenated with ``{ "_id": "encryption_exceeds_2mib", "unencrypted": < the string "a" repeated (2097152 - 2000) times > }``
// Note: limits-doc.json is a 1005 byte BSON document that encrypts to a ~10,000 byte document.
// Expect this to succeed since after encryption this still is below the normal maximum BSON document size.
// Note, before auto encryption this document is under the 2 MiB limit. After encryption it exceeds the 2 MiB limit, but does NOT exceed the 16 MiB limit.
{
description: 'should succeed for encryption_exceeds_2mib',
docs: () => [
Object.assign({}, limitsDoc, {
_id: 'encryption_exceeds_2mib',
unencrypted: repeatedChar('a', 2097152 - 2000)
})
],
expectedEvents: [{ commandName: 'insert' }]
},
// #. Bulk insert the following:
// - ``{ "_id": "over_2mib_1", "unencrypted": <the string "a" repeated (2097152) times> }``
// - ``{ "_id": "over_2mib_2", "unencrypted": <the string "a" repeated (2097152) times> }``
// Expect the bulk write to succeed and split after first doc (i.e. two inserts occur). This may be verified using `command monitoring <https://github.com/mongodb/specifications/tree/master/source/command-monitoring/command-monitoring.rst>`_.
{
description: 'should succeed for bulk over_2mib',
docs: () => [
{ _id: 'over_2mib_1', unencrypted: repeatedChar('a', 2097152) },
{ _id: 'over_2mib_2', unencrypted: repeatedChar('a', 2097152) }
],
expectedEvents: [{ commandName: 'insert' }, { commandName: 'insert' }]
},
// #. Bulk insert the following:
// - The document `limits/limits-doc.json <../limits/limits-doc.json>`_ concatenated with ``{ "_id": "encryption_exceeds_2mib_1", "unencrypted": < the string "a" repeated (2097152 - 2000) times > }``
// - The document `limits/limits-doc.json <../limits/limits-doc.json>`_ concatenated with ``{ "_id": "encryption_exceeds_2mib_2", "unencrypted": < the string "a" repeated (2097152 - 2000) times > }``
// Expect the bulk write to succeed and split after first doc (i.e. two inserts occur). This may be verified using `command monitoring <https://github.com/mongodb/specifications/tree/master/source/command-monitoring/command-monitoring.rst>`_.
{
description: 'should succeed for bulk encryption_exceeds_2mib',
docs: () => [
Object.assign({}, limitsDoc, {
_id: 'encryption_exceeds_2mib_1',
unencrypted: repeatedChar('a', 2097152 - 2000)
}),
Object.assign({}, limitsDoc, {
_id: 'encryption_exceeds_2mib_2',
unencrypted: repeatedChar('a', 2097152 - 2000)
})
],
expectedEvents: [{ commandName: 'insert' }, { commandName: 'insert' }]
},
// #. Insert ``{ "_id": "under_16mib", "unencrypted": <the string "a" repeated 16777216 - 2000 times>``.
// Expect this to succeed since this is still (just) under the ``maxBsonObjectSize`` limit.
{
description: 'should succeed for under_16mib',
docs: () => [{ _id: 'under_16mib', unencrypted: repeatedChar('a', 16777216 - 2000) }],
expectedEvents: [{ commandName: 'insert' }]
},
// #. Insert the document `limits/limits-doc.json <../limits/limits-doc.json>`_ concatenated with ``{ "_id": "encryption_exceeds_16mib", "unencrypted": < the string "a" repeated (16777216 - 2000) times > }``
// Expect this to fail since encryption results in a document exceeding the ``maxBsonObjectSize`` limit.
{
description: 'should fail for encryption_exceeds_16mib',
docs: () => [
Object.assign({}, limitsDoc, {
_id: 'encryption_exceeds_16mib',
unencrypted: repeatedChar('a', 16777216 - 2000)
})
],
error: true
}
];
testCases.forEach(testCase => {
it(testCase.description, metadata, function () {
return this.encryptedColl.insertMany(testCase.docs()).then(
() => {
if (testCase.error) {
throw new Error('Expected this insert to fail, but it succeeded');
}
const expectedEvents = Array.from(testCase.expectedEvents);
const actualEvents = pruneEvents(this.commandStartedEvents.events);
expect(actualEvents)
.to.have.a.lengthOf(expectedEvents.length)
.and.to.containSubset(expectedEvents);
},
err => {
if (!testCase.error) {
throw err;
}
}
);
});
});
function pruneEvents(events) {
return events.map(event => {
// We are pruning out the bunch of repeating As, mostly
// b/c an error failure will try to print 2mb of 'a's
// and not have a good time.
event.command = Object.assign({}, event.command);
event.command.documents = event.command.documents.map(doc => {
doc = Object.assign({}, doc);
if (doc.unencrypted) {
doc.unencrypted = "Lots of repeating 'a's";
}
return doc;
});
return event;
});
}
});
describe('Views are prohibited', function () {
before(function () {
// First, perform the setup.
// #. Create a MongoClient without encryption enabled (referred to as ``client``).
this.client = this.configuration.newClient();
return this.client
.connect()
.then(() => dropCollection(this.client.db(dataDbName), dataCollName))
.then(() => {
return this.client.db(dataDbName).createCollection(dataCollName);
})
.then(() => {
return this.client
.db(dataDbName)
.createCollection('view', { viewOn: dataCollName, pipeline: [] });
});
});
after(function () {
return this.client && this.client.close();
});
beforeEach(function () {
this.clientEncrypted = this.configuration.newClient(
{},
{
autoEncryption: {
keyVaultNamespace,
kmsProviders: this.configuration.kmsProviders(null, localKey)
}
}
);
return this.clientEncrypted.connect();
});
afterEach(function () {
return this.clientEncrypted && this.clientEncrypted.close();
});
it('should error when inserting into a view with autoEncryption', metadata, function () {
return this.clientEncrypted
.db(dataDbName)
.collection('view')
.insertOne({ a: 1 })
.then(
() => {
throw new Error('Expected insert to fail, but it succeeded');
},
err => {
expect(err)
.to.have.property('message')
.that.matches(/cannot auto encrypt a view/);
}
);
});
});
// TODO: We cannot implement these tests according to spec b/c the tests require a
// connect-less client. So instead we are implementing the tests via APM,
// and confirming that the externalClient is firing off keyVault requests during
// encrypted operations
describe('External Key Vault', function () {
const fs = require('fs');
const path = require('path');
const { EJSON } = BSON;
function loadExternal(file) {
return EJSON.parse(
fs.readFileSync(path.resolve(__dirname, '../../spec/client-side-encryption/external', file))
);
}
const externalKey = loadExternal('external-key.json');
const externalSchema = loadExternal('external-schema.json');
beforeEach(function () {
this.client = this.configuration.newClient();
// #. Create a MongoClient without encryption enabled (referred to as ``client``).
return (
this.client
.connect()
// #. Using ``client``, drop the collections ``keyvault.datakeys`` and ``db.coll``.
// Insert the document `external/external-key.json <../external/external-key.json>`_ into ``keyvault.datakeys``.
.then(() => dropCollection(this.client.db(dataDbName), dataCollName))
.then(() => dropCollection(this.client.db(keyVaultDbName), keyVaultCollName))
.then(() => {
return this.client
.db(keyVaultDbName)
.collection(keyVaultCollName)
.insertOne(externalKey, { writeConcern: { w: 'majority' } });
})
);
});
afterEach(function () {
if (this.commandStartedEvents) {
this.commandStartedEvents.teardown();
this.commandStartedEvents = undefined;
}
return Promise.resolve()
.then(() => this.externalClient && this.externalClient.close())
.then(() => this.clientEncrypted && this.clientEncrypted.close())
.then(() => this.client && this.client.close());
});
function defineTest(withExternalKeyVault) {
it(
`should work ${withExternalKeyVault ? 'with' : 'without'} external key vault`,
metadata,
function () {
const ClientEncryption = this.configuration.mongodbClientEncryption.ClientEncryption;
return (
Promise.resolve()
.then(() => {
// If ``withExternalKeyVault == true``, configure both objects with an external key vault client. The external client MUST connect to the same
// MongoDB cluster that is being tested against, except it MUST use the username ``fake-user`` and password ``fake-pwd``.
this.externalClient = this.configuration.newClient(
// this.configuration.url('fake-user', 'fake-pwd'),
// TODO: Do this properly
{},
{ monitorCommands: true }
);
this.commandStartedEvents = new APMEventCollector(
this.externalClient,
'commandStarted',
{
include: ['find']
}
);
return this.externalClient.connect();
})
// #. Create the following:
// - A MongoClient configured with auto encryption (referred to as ``client_encrypted``)
// - A ``ClientEncryption`` object (referred to as ``client_encryption``)
// Configure both objects with the ``local`` KMS providers as follows:
// .. code:: javascript
// { "local": { "key": <base64 decoding of LOCAL_MASTERKEY> } }
// Configure both objects with ``keyVaultNamespace`` set to ``keyvault.datakeys``.
// Configure ``client_encrypted`` to use the schema `external/external-schema.json <../external/external-schema.json>`_ for ``db.coll`` by setting a schema map like: ``{ "db.coll": <contents of external-schema.json>}``
.then(() => {
const options = {
bson: BSON,
keyVaultNamespace,
kmsProviders: this.configuration.kmsProviders('local', localKey)
};
if (withExternalKeyVault) {
options.keyVaultClient = this.externalClient;
}
this.clientEncryption = new ClientEncryption(
this.client,
Object.assign({}, options)
);
this.clientEncrypted = this.configuration.newClient(
{},
{
autoEncryption: Object.assign({}, options, {
schemaMap: {
'db.coll': externalSchema
}
})
}
);
return this.clientEncrypted.connect();
})
.then(() => {
// #. Use ``client_encrypted`` to insert the document ``{"encrypted": "test"}`` into ``db.coll``.
// If ``withExternalKeyVault == true``, expect an authentication exception to be thrown. Otherwise, expect the insert to succeed.
this.commandStartedEvents.clear();
return this.clientEncrypted
.db(dataDbName)
.collection(dataCollName)
.insertOne({ encrypted: 'test' })
.then(() => {
if (withExternalKeyVault) {
expect(this.commandStartedEvents.events).to.containSubset([
{
commandName: 'find',
databaseName: keyVaultDbName,
command: { find: keyVaultCollName }
}
]);
} else {
expect(this.commandStartedEvents.events).to.not.containSubset([
{
commandName: 'find',
databaseName: keyVaultDbName,
command: { find: keyVaultCollName }
}
]);
}
});
// TODO: Do this in the spec-compliant way using bad auth credentials
// .then(
// () => {
// if (withExternalKeyVault) {
// throw new Error(
// 'expected insert to fail with authentication error, but it passed'
// );
// }
// },
// err => {
// if (!withExternalKeyVault) {
// throw err;
// }
// expect(err).to.be.an.instanceOf(Error);
// }
// );
})
.then(() => {
// #. Use ``client_encryption`` to explicitly encrypt the string ``"test"`` with key ID ``LOCALAAAAAAAAAAAAAAAAA==`` and deterministic algorithm.
// If ``withExternalKeyVault == true``, expect an authentication exception to be thrown. Otherwise, expect the insert to succeed.
this.commandStartedEvents.clear();
return this.clientEncryption
.encrypt('test', {
keyId: externalKey._id,
algorithm: 'AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic'
})
.then(() => {
if (withExternalKeyVault) {
expect(this.commandStartedEvents.events).to.containSubset([
{
commandName: 'find',
databaseName: keyVaultDbName,
command: { find: keyVaultCollName }
}
]);
} else {
expect(this.commandStartedEvents.events).to.not.containSubset([
{
commandName: 'find',
databaseName: keyVaultDbName,
command: { find: keyVaultCollName }
}
]);
}
});
// TODO: Do this in the spec-compliant way using bad auth credentials
// .then(
// () => {
// if (withExternalKeyVault) {
// throw new Error(
// 'expected insert to fail with authentication error, but it passed'
// );
// }
// },
// err => {
// if (!withExternalKeyVault) {
// throw err;
// }
// expect(err).to.be.an.instanceOf(Error);
// }
// );
})
);
}
);
}
// Run the following tests twice, parameterized by a boolean ``withExternalKeyVault``.
defineTest(true);
defineTest(false);
});
});
| 1 | 19,429 | Can we use EJSON from bson here? and in doing so avoid bringing in the deprecated `mongodb-extjson` lib `const { EJSON } = require('bson')` | mongodb-node-mongodb-native | js |
@@ -262,9 +262,6 @@ class GridPlot(CompositePlot):
show_legend = param.Boolean(default=False, doc="""
Legends add to much clutter in a grid and are disabled by default.""")
- tick_format = param.String(default="%.2f", doc="""
- Formatting string for the GridPlot ticklabels.""")
-
xaxis = param.ObjectSelector(default='bottom',
objects=['bottom', 'top', None], doc="""
Whether and where to display the xaxis, supported options are | 1 | from __future__ import division
import numpy as np
import matplotlib as mpl
from mpl_toolkits.mplot3d import Axes3D # noqa (For 3D plots)
from matplotlib import pyplot as plt
from matplotlib import gridspec, animation
import param
from ...core import (OrderedDict, HoloMap, AdjointLayout, NdLayout,
GridSpace, Element, CompositeOverlay, Empty,
Collator, GridMatrix, Layout)
from ...core.options import Store, Compositor, SkipRendering
from ...core.util import int_to_roman, int_to_alpha, basestring
from ...core import traversal
from ..plot import DimensionedPlot, GenericLayoutPlot, GenericCompositePlot
from ..util import get_dynamic_mode, initialize_sampled
from .renderer import MPLRenderer
from .util import compute_ratios
class MPLPlot(DimensionedPlot):
"""
An MPLPlot object draws a matplotlib figure object when called or
indexed but can also return a matplotlib animation object as
appropriate. MPLPlots take element objects such as Image, Contours
or Points as inputs and plots them in the appropriate format using
matplotlib. As HoloMaps are supported, all plots support animation
via the anim() method.
"""
renderer = MPLRenderer
sideplots = {}
fig_alpha = param.Number(default=1.0, bounds=(0, 1), doc="""
Alpha of the overall figure background.""")
fig_bounds = param.NumericTuple(default=(0.15, 0.15, 0.85, 0.85),
doc="""
The bounds of the overall figure as a 4-tuple of the form
(left, bottom, right, top), defining the size of the border
around the subplots.""")
fig_inches = param.Parameter(default=4, doc="""
The overall matplotlib figure size in inches. May be set as
an integer in which case it will be used to autocompute a
size. Alternatively may be set with an explicit tuple or list,
in which case it will be applied directly after being scaled
by fig_size. If either the width or height is set to None,
it will be computed automatically.""")
fig_latex = param.Boolean(default=False, doc="""
Whether to use LaTeX text in the overall figure.""")
fig_rcparams = param.Dict(default={}, doc="""
matplotlib rc parameters to apply to the overall figure.""")
fig_size = param.Integer(default=100, bounds=(1, None), doc="""
Size relative to the supplied overall fig_inches in percent.""")
initial_hooks = param.HookList(default=[], doc="""
Optional list of hooks called before plotting the data onto
the axis. The hook is passed the plot object and the displayed
object, other plotting handles can be accessed via plot.handles.""")
final_hooks = param.HookList(default=[], doc="""
Optional list of hooks called when finalizing an axis.
The hook is passed the plot object and the displayed
object, other plotting handles can be accessed via plot.handles.""")
finalize_hooks = param.HookList(default=[], doc="""
Optional list of hooks called when finalizing an axis.
The hook is passed the plot object and the displayed
object, other plotting handles can be accessed via plot.handles.""")
sublabel_format = param.String(default=None, allow_None=True, doc="""
Allows labeling the subaxes in each plot with various formatters
including {Alpha}, {alpha}, {numeric} and {roman}.""")
sublabel_position = param.NumericTuple(default=(-0.35, 0.85), doc="""
Position relative to the plot for placing the optional subfigure label.""")
sublabel_size = param.Number(default=18, doc="""
Size of optional subfigure label.""")
projection = param.Parameter(default=None, doc="""
The projection of the plot axis, default of None is equivalent to
2D plot, '3d' and 'polar' are also supported by matplotlib by default.
May also supply a custom projection that is either a matplotlib
projection type or implements the `_as_mpl_axes` method.""")
show_frame = param.Boolean(default=True, doc="""
Whether or not to show a complete frame around the plot.""")
_close_figures = True
def __init__(self, fig=None, axis=None, **params):
self._create_fig = True
super(MPLPlot, self).__init__(**params)
# List of handles to matplotlib objects for animation update
scale = self.fig_size/100.
if isinstance(self.fig_inches, (tuple, list)):
self.fig_inches = [None if i is None else i*scale
for i in self.fig_inches]
else:
self.fig_inches *= scale
fig, axis = self._init_axis(fig, axis)
self.handles['fig'] = fig
self.handles['axis'] = axis
if self.final_hooks and self.finalize_hooks:
self.warning('Set either final_hooks or deprecated '
'finalize_hooks, not both.')
self.finalize_hooks = self.final_hooks
self.handles['bbox_extra_artists'] = []
def _init_axis(self, fig, axis):
"""
Return an axis which may need to be initialized from
a new figure.
"""
if not fig and self._create_fig:
rc_params = self.fig_rcparams
if self.fig_latex:
rc_params['text.usetex'] = True
with mpl.rc_context(rc=rc_params):
fig = plt.figure()
l, b, r, t = self.fig_bounds
inches = self.fig_inches
fig.subplots_adjust(left=l, bottom=b, right=r, top=t)
fig.patch.set_alpha(self.fig_alpha)
if isinstance(inches, (tuple, list)):
inches = list(inches)
if inches[0] is None:
inches[0] = inches[1]
elif inches[1] is None:
inches[1] = inches[0]
fig.set_size_inches(list(inches))
else:
fig.set_size_inches([inches, inches])
axis = fig.add_subplot(111, projection=self.projection)
axis.set_aspect('auto')
return fig, axis
def _subplot_label(self, axis):
layout_num = self.layout_num if self.subplot else 1
if self.sublabel_format and not self.adjoined and layout_num > 0:
from mpl_toolkits.axes_grid1.anchored_artists import AnchoredText
labels = {}
if '{Alpha}' in self.sublabel_format:
labels['Alpha'] = int_to_alpha(layout_num-1)
elif '{alpha}' in self.sublabel_format:
labels['alpha'] = int_to_alpha(layout_num-1, upper=False)
elif '{numeric}' in self.sublabel_format:
labels['numeric'] = self.layout_num
elif '{Roman}' in self.sublabel_format:
labels['Roman'] = int_to_roman(layout_num)
elif '{roman}' in self.sublabel_format:
labels['roman'] = int_to_roman(layout_num).lower()
at = AnchoredText(self.sublabel_format.format(**labels), loc=3,
bbox_to_anchor=self.sublabel_position, frameon=False,
prop=dict(size=self.sublabel_size, weight='bold'),
bbox_transform=axis.transAxes)
at.patch.set_visible(False)
axis.add_artist(at)
sublabel = at.txt.get_children()[0]
self.handles['sublabel'] = sublabel
self.handles['bbox_extra_artists'] += [sublabel]
def _finalize_axis(self, key):
"""
General method to finalize the axis and plot.
"""
if 'title' in self.handles:
self.handles['title'].set_visible(self.show_title)
self.drawn = True
if self.subplot:
return self.handles['axis']
else:
fig = self.handles['fig']
if not getattr(self, 'overlaid', False) and self._close_figures:
plt.close(fig)
return fig
@property
def state(self):
return self.handles['fig']
def anim(self, start=0, stop=None, fps=30):
"""
Method to return a matplotlib animation. The start and stop
frames may be specified as well as the fps.
"""
figure = self.initialize_plot()
anim = animation.FuncAnimation(figure, self.update_frame,
frames=self.keys,
interval = 1000.0/fps)
# Close the figure handle
if self._close_figures: plt.close(figure)
return anim
def update(self, key):
rc_params = self.fig_rcparams
if self.fig_latex:
rc_params['text.usetex'] = True
mpl.rcParams.update(rc_params)
if len(self) == 1 and key == 0 and not self.drawn:
return self.initialize_plot()
return self.__getitem__(key)
class CompositePlot(GenericCompositePlot, MPLPlot):
"""
CompositePlot provides a baseclass for plots coordinate multiple
subplots to form a Layout.
"""
def update_frame(self, key, ranges=None):
ranges = self.compute_ranges(self.layout, key, ranges)
for subplot in self.subplots.values():
subplot.update_frame(key, ranges=ranges)
title = self._format_title(key) if self.show_title else ''
if 'title' in self.handles:
self.handles['title'].set_text(title)
else:
title = self.handles['axis'].set_title(title, **self._fontsize('title'))
self.handles['title'] = title
class GridPlot(CompositePlot):
"""
Plot a group of elements in a grid layout based on a GridSpace element
object.
"""
aspect = param.Parameter(default='equal', doc="""
Aspect ratios on GridPlot should be automatically determined.""")
padding = param.Number(default=0.1, doc="""
The amount of padding as a fraction of the total Grid size""")
shared_xaxis = param.Boolean(default=False, doc="""
If enabled the x-axes of the GridSpace will be drawn from the
objects inside the Grid rather than the GridSpace dimensions.""")
shared_yaxis = param.Boolean(default=False, doc="""
If enabled the x-axes of the GridSpace will be drawn from the
objects inside the Grid rather than the GridSpace dimensions.""")
show_frame = param.Boolean(default=False, doc="""
Whether to draw a frame around the Grid.""")
show_legend = param.Boolean(default=False, doc="""
Legends add to much clutter in a grid and are disabled by default.""")
tick_format = param.String(default="%.2f", doc="""
Formatting string for the GridPlot ticklabels.""")
xaxis = param.ObjectSelector(default='bottom',
objects=['bottom', 'top', None], doc="""
Whether and where to display the xaxis, supported options are
'bottom', 'top' and None.""")
yaxis = param.ObjectSelector(default='left',
objects=['left', 'right', None], doc="""
Whether and where to display the yaxis, supported options are
'left', 'right' and None.""")
xrotation = param.Integer(default=0, bounds=(0, 360), doc="""
Rotation angle of the xticks.""")
yrotation = param.Integer(default=0, bounds=(0, 360), doc="""
Rotation angle of the yticks.""")
def __init__(self, layout, axis=None, create_axes=True, ranges=None,
keys=None, dimensions=None, layout_num=1, **params):
if not isinstance(layout, GridSpace):
raise Exception("GridPlot only accepts GridSpace.")
self.layout = layout
self.cols, self.rows = layout.shape
self.layout_num = layout_num
extra_opts = self.lookup_options(layout, 'plot').options
if not keys or not dimensions:
dimensions, keys = traversal.unique_dimkeys(layout)
if 'uniform' not in params:
params['uniform'] = traversal.uniform(layout)
dynamic, sampled = get_dynamic_mode(layout)
if sampled:
initialize_sampled(layout, dimensions, keys[0])
super(GridPlot, self).__init__(keys=keys, dimensions=dimensions,
dynamic=dynamic,
**dict(extra_opts, **params))
# Compute ranges layoutwise
grid_kwargs = {}
if axis is not None:
bbox = axis.get_position()
l, b, w, h = bbox.x0, bbox.y0, bbox.width, bbox.height
grid_kwargs = {'left': l, 'right': l+w, 'bottom': b, 'top': b+h}
self.position = (l, b, w, h)
self.fig_inches = self._get_size()
self._layoutspec = gridspec.GridSpec(self.rows, self.cols, **grid_kwargs)
self.subplots, self.subaxes, self.layout = self._create_subplots(layout, axis, ranges, create_axes)
def _get_size(self):
max_dim = max(self.layout.shape)
# Reduce plot size as GridSpace gets larger
shape_factor = 1. / max_dim
# Expand small grids to a sensible viewing size
expand_factor = 1 + (max_dim - 1) * 0.1
scale_factor = expand_factor * shape_factor
cols, rows = self.layout.shape
if isinstance(self.fig_inches, (tuple, list)):
fig_inches = list(self.fig_inches)
if fig_inches[0] is None:
fig_inches[0] = fig_inches[1] * (cols/rows)
if fig_inches[1] is None:
fig_inches[1] = fig_inches[0] * (rows/cols)
return fig_inches
else:
fig_inches = (self.fig_inches,)*2
return (scale_factor * cols * fig_inches[0],
scale_factor * rows * fig_inches[1])
def _create_subplots(self, layout, axis, ranges, create_axes):
layout = layout.map(Compositor.collapse_element, [CompositeOverlay],
clone=False)
norm_opts = self._traverse_options(layout, 'norm', ['axiswise'], [Element])
axiswise = all(norm_opts['axiswise'])
if not ranges:
self.handles['fig'].set_size_inches(self.fig_inches)
subplots, subaxes = OrderedDict(), OrderedDict()
frame_ranges = self.compute_ranges(layout, None, ranges)
frame_ranges = OrderedDict([(key, self.compute_ranges(layout, key, frame_ranges))
for key in self.keys])
collapsed_layout = layout.clone(shared_data=False, id=layout.id)
r, c = (0, 0)
for coord in layout.keys(full_grid=True):
if not isinstance(coord, tuple): coord = (coord,)
view = layout.data.get(coord, None)
# Create subplot
if type(view) in (Layout, NdLayout):
raise SkipRendering("Cannot plot nested Layouts.")
if view is not None:
vtype = view.type if isinstance(view, HoloMap) else view.__class__
opts = self.lookup_options(view, 'plot').options
else:
vtype = None
# Create axes
kwargs = {}
if create_axes:
projection = self._get_projection(view) if vtype else None
subax = plt.subplot(self._layoutspec[r, c], projection=projection)
if not axiswise and self.shared_xaxis and self.xaxis is not None:
self.xaxis = 'top'
if not axiswise and self.shared_yaxis and self.yaxis is not None:
self.yaxis = 'right'
# Disable subplot axes depending on shared axis options
# and the position in the grid
if (self.shared_xaxis or self.shared_yaxis) and not axiswise:
if c == 0 and r != 0:
subax.xaxis.set_ticks_position('none')
kwargs['xaxis'] = 'bottom-bare'
if c != 0 and r == 0 and not layout.ndims == 1:
subax.yaxis.set_ticks_position('none')
kwargs['yaxis'] = 'left-bare'
if r != 0 and c != 0:
kwargs['xaxis'] = 'bottom-bare'
kwargs['yaxis'] = 'left-bare'
if not self.shared_xaxis:
kwargs['xaxis'] = 'bottom-bare'
if not self.shared_yaxis:
kwargs['yaxis'] = 'left-bare'
else:
kwargs['xaxis'] = 'bottom-bare'
kwargs['yaxis'] = 'left-bare'
subaxes[(r, c)] = subax
else:
subax = None
if vtype and issubclass(vtype, CompositeOverlay) and (c == self.cols - 1 and
r == self.rows//2):
kwargs['show_legend'] = self.show_legend
kwargs['legend_position'] = 'right'
if (not isinstance(self.layout, GridMatrix) and not
((c == self.cols//2 and r == 0) or
(c == 0 and r == self.rows//2))):
kwargs['labelled'] = []
# Create subplot
if view is not None:
params = dict(fig=self.handles['fig'], axis=subax,
dimensions=self.dimensions, show_title=False,
subplot=not create_axes, ranges=frame_ranges,
uniform=self.uniform, keys=self.keys,
show_legend=False)
plotting_class = Store.registry['matplotlib'][vtype]
subplot = plotting_class(view, **dict(opts, **dict(params, **kwargs)))
collapsed_layout[coord] = subplot.layout if isinstance(subplot, CompositePlot) else subplot.hmap
subplots[(r, c)] = subplot
elif subax is not None:
subax.set_visible(False)
if r != self.rows-1:
r += 1
else:
r = 0
c += 1
if create_axes:
self.handles['axis'] = self._layout_axis(layout, axis)
self._adjust_subplots(self.handles['axis'], subaxes)
return subplots, subaxes, collapsed_layout
def initialize_plot(self, ranges=None):
# Get the extent of the layout elements (not the whole layout)
key = self.keys[-1]
axis = self.handles['axis']
subplot_kwargs = dict()
ranges = self.compute_ranges(self.layout, key, ranges)
for subplot in self.subplots.values():
subplot.initialize_plot(ranges=ranges, **subplot_kwargs)
if self.show_title:
title = axis.set_title(self._format_title(key),
**self._fontsize('title'))
self.handles['title'] = title
self._readjust_axes(axis)
self.drawn = True
if self.subplot: return self.handles['axis']
if self._close_figures: plt.close(self.handles['fig'])
return self.handles['fig']
def _readjust_axes(self, axis):
if self.subplot:
axis.set_position(self.position)
if self.aspect == 'equal':
axis.set_aspect(float(self.rows)/self.cols)
self.handles['fig'].canvas.draw()
self._adjust_subplots(self.handles['axis'], self.subaxes)
def _layout_axis(self, layout, axis):
fig = self.handles['fig']
axkwargs = {'gid': str(self.position)} if axis else {}
layout_axis = fig.add_subplot(1,1,1, **axkwargs)
if axis:
axis.set_visible(False)
layout_axis.set_position(self.position)
layout_axis.patch.set_visible(False)
tick_fontsize = self._fontsize('ticks','labelsize',common=False)
if tick_fontsize: layout_axis.tick_params(**tick_fontsize)
# Set labels
layout_axis.set_xlabel(str(layout.kdims[0]),
**self._fontsize('xlabel'))
if layout.ndims == 2:
layout_axis.set_ylabel(str(layout.kdims[1]),
**self._fontsize('ylabel'))
# Compute and set x- and y-ticks
dims = layout.kdims
keys = layout.keys()
if layout.ndims == 1:
dim1_keys = keys
dim2_keys = [0]
layout_axis.get_yaxis().set_visible(False)
else:
dim1_keys, dim2_keys = zip(*keys)
layout_axis.set_ylabel(str(dims[1]))
layout_axis.set_aspect(float(self.rows)/self.cols)
# Process ticks
plot_width = (1.0 - self.padding) / self.cols
border_width = self.padding / (self.cols-1) if self.cols > 1 else 0
xticks = [(plot_width/2)+(r*(plot_width+border_width)) for r in range(self.cols)]
plot_height = (1.0 - self.padding) / self.rows
border_height = self.padding / (self.rows-1) if layout.ndims > 1 else 0
yticks = [(plot_height/2)+(r*(plot_height+border_height)) for r in range(self.rows)]
layout_axis.set_xticks(xticks)
layout_axis.set_xticklabels(self._process_ticklabels(sorted(set(dim1_keys)), dims[0]))
for tick in layout_axis.get_xticklabels():
tick.set_rotation(self.xrotation)
ydim = dims[1] if layout.ndims > 1 else None
layout_axis.set_yticks(yticks)
layout_axis.set_yticklabels(self._process_ticklabels(sorted(set(dim2_keys)), ydim))
for tick in layout_axis.get_yticklabels():
tick.set_rotation(self.yrotation)
if not self.show_frame:
layout_axis.spines['right' if self.yaxis == 'left' else 'left'].set_visible(False)
layout_axis.spines['bottom' if self.xaxis == 'top' else 'top'].set_visible(False)
axis = layout_axis
if self.xaxis is not None:
axis.xaxis.set_ticks_position(self.xaxis)
axis.xaxis.set_label_position(self.xaxis)
else:
axis.xaxis.set_visible(False)
if self.yaxis is not None:
axis.yaxis.set_ticks_position(self.yaxis)
axis.yaxis.set_label_position(self.yaxis)
else:
axis.yaxis.set_visible(False)
for pos in ['left', 'right', 'top', 'bottom']:
axis.spines[pos].set_visible(False)
return layout_axis
def _process_ticklabels(self, labels, dim):
formatted_labels = []
for k in labels:
if dim and dim.value_format:
k = dim.value_format(k)
elif not isinstance(k, (str, type(None))):
k = self.tick_format % k
elif k is None:
k = ''
formatted_labels.append(k)
return formatted_labels
def _adjust_subplots(self, axis, subaxes):
bbox = axis.get_position()
l, b, w, h = bbox.x0, bbox.y0, bbox.width, bbox.height
if self.padding:
width_padding = w/(1./self.padding)
height_padding = h/(1./self.padding)
else:
width_padding, height_padding = 0, 0
if self.cols == 1:
b_w = 0
else:
b_w = width_padding / (self.cols - 1)
if self.rows == 1:
b_h = 0
else:
b_h = height_padding / (self.rows - 1)
ax_w = (w - (width_padding if self.cols > 1 else 0)) / self.cols
ax_h = (h - (height_padding if self.rows > 1 else 0)) / self.rows
r, c = (0, 0)
for ax in subaxes.values():
xpos = l + (c*ax_w) + (c * b_w)
ypos = b + (r*ax_h) + (r * b_h)
if r != self.rows-1:
r += 1
else:
r = 0
c += 1
if not ax is None:
ax.set_position([xpos, ypos, ax_w, ax_h])
class AdjointLayoutPlot(CompositePlot):
"""
LayoutPlot allows placing up to three Views in a number of
predefined and fixed layouts, which are defined by the layout_dict
class attribute. This allows placing subviews next to a main plot
in either a 'top' or 'right' position.
Initially, a LayoutPlot computes an appropriate layout based for
the number of Views in the AdjointLayout object it has been given, but
when embedded in a NdLayout, it can recompute the layout to
match the number of rows and columns as part of a larger grid.
"""
layout_dict = {'Single': ['main'],
'Dual': ['main', 'right'],
'Triple': ['top', None, 'main', 'right'],
'Embedded Dual': [None, 'main']}
def __init__(self, layout, layout_type, subaxes, subplots, **params):
# The AdjointLayout ViewableElement object
self.layout = layout
# Type may be set to 'Embedded Dual' by a call it grid_situate
self.layout_type = layout_type
self.view_positions = self.layout_dict[self.layout_type]
# The supplied (axes, view) objects as indexed by position
self.subaxes = {pos: ax for ax, pos in zip(subaxes, self.view_positions)}
super(AdjointLayoutPlot, self).__init__(subplots=subplots, **params)
def initialize_plot(self, ranges=None):
"""
Plot all the views contained in the AdjointLayout Object using axes
appropriate to the layout configuration. All the axes are
supplied by LayoutPlot - the purpose of the call is to
invoke subplots with correct options and styles and hide any
empty axes as necessary.
"""
for pos in self.view_positions:
# Pos will be one of 'main', 'top' or 'right' or None
view = self.layout.get(pos, None)
subplot = self.subplots.get(pos, None)
ax = self.subaxes.get(pos, None)
# If no view object or empty position, disable the axis
if None in [view, pos, subplot]:
ax.set_axis_off()
continue
subplot.initialize_plot(ranges=ranges)
self.adjust_positions()
self.drawn = True
def adjust_positions(self):
"""
Make adjustments to the positions of subplots (if available)
relative to the main plot axes as required.
This method is called by LayoutPlot after an initial pass
used to position all the Layouts together. This method allows
LayoutPlots to make final adjustments to the axis positions.
"""
checks = [self.view_positions, self.subaxes, self.subplots]
right = all('right' in check for check in checks)
top = all('top' in check for check in checks)
if not 'main' in self.subplots or not (top or right):
return
self.handles['fig'].canvas.draw()
main_ax = self.subplots['main'].handles['axis']
bbox = main_ax.get_position()
if right:
ax = self.subaxes['right']
subplot = self.subplots['right']
if isinstance(subplot, AdjoinedPlot):
subplot_size = subplot.subplot_size
border_size = subplot.border_size
else:
subplot_size = 0.25
border_size = 0.25
ax.set_position([bbox.x1 + bbox.width * border_size,
bbox.y0,
bbox.width * subplot_size, bbox.height])
if isinstance(subplot, GridPlot):
ax.set_aspect('equal')
if top:
ax = self.subaxes['top']
subplot = self.subplots['top']
if isinstance(subplot, AdjoinedPlot):
subplot_size = subplot.subplot_size
border_size = subplot.border_size
else:
subplot_size = 0.25
border_size = 0.25
ax.set_position([bbox.x0,
bbox.y1 + bbox.height * border_size,
bbox.width, bbox.height * subplot_size])
if isinstance(subplot, GridPlot):
ax.set_aspect('equal')
def update_frame(self, key, ranges=None):
for pos in self.view_positions:
subplot = self.subplots.get(pos)
if subplot is not None:
subplot.update_frame(key, ranges)
def __len__(self):
return max([1 if self.keys is None else len(self.keys), 1])
class LayoutPlot(GenericLayoutPlot, CompositePlot):
"""
A LayoutPlot accepts either a Layout or a NdLayout and
displays the elements in a cartesian grid in scanline order.
"""
absolute_scaling = param.ObjectSelector(default=False, doc="""
If aspect_weight is enabled absolute_scaling determines whether
axes are scaled relative to the widest plot or whether the
aspect scales the axes in absolute terms.""")
aspect_weight = param.Number(default=0, doc="""
Weighting of the individual aspects when computing the Layout
grid aspects and overall figure size.""")
fig_bounds = param.NumericTuple(default=(0.05, 0.05, 0.95, 0.95), doc="""
The bounds of the figure as a 4-tuple of the form
(left, bottom, right, top), defining the size of the border
around the subplots.""")
tight = param.Boolean(default=False, doc="""
Tightly fit the axes in the layout within the fig_bounds
and tight_padding.""")
tight_padding = param.Parameter(default=3, doc="""
Integer or tuple specifying the padding in inches in a tight layout.""")
hspace = param.Number(default=0.5, doc="""
Specifies the space between horizontally adjacent elements in the grid.
Default value is set conservatively to avoid overlap of subplots.""")
vspace = param.Number(default=0.1, doc="""
Specifies the space between vertically adjacent elements in the grid.
Default value is set conservatively to avoid overlap of subplots.""")
fontsize = param.Parameter(default={'title':16}, allow_None=True)
def __init__(self, layout, **params):
super(LayoutPlot, self).__init__(layout=layout, **params)
self.subplots, self.subaxes, self.layout = self._compute_gridspec(layout)
def _compute_gridspec(self, layout):
"""
Computes the tallest and widest cell for each row and column
by examining the Layouts in the GridSpace. The GridSpec is then
instantiated and the LayoutPlots are configured with the
appropriate embedded layout_types. The first element of the
returned tuple is a dictionary of all the LayoutPlots indexed
by row and column. The second dictionary in the tuple supplies
the grid indicies needed to instantiate the axes for each
LayoutPlot.
"""
layout_items = layout.grid_items()
layout_dimensions = layout.kdims if isinstance(layout, NdLayout) else None
layouts = {}
col_widthratios, row_heightratios = {}, {}
for (r, c) in self.coords:
# Get view at layout position and wrap in AdjointLayout
_, view = layout_items.get((r, c), (None, None))
layout_view = view if isinstance(view, AdjointLayout) else AdjointLayout([view])
layouts[(r, c)] = layout_view
# Compute shape of AdjointLayout element
layout_lens = {1:'Single', 2:'Dual', 3:'Triple'}
layout_type = layout_lens[len(layout_view)]
# Get aspects
main = layout_view.main
main = main.last if isinstance(main, HoloMap) else main
main_options = self.lookup_options(main, 'plot').options if main else {}
if main and not isinstance(main_options.get('aspect', 1), basestring):
main_aspect = np.nan if isinstance(main, Empty) else main_options.get('aspect', 1)
main_aspect = self.aspect_weight*main_aspect + 1-self.aspect_weight
else:
main_aspect = np.nan
if layout_type in ['Dual', 'Triple']:
el = layout_view.get('right', None)
eltype = type(el)
if el and eltype in MPLPlot.sideplots:
plot_type = MPLPlot.sideplots[type(el)]
ratio = 0.6*(plot_type.subplot_size+plot_type.border_size)
width_ratios = [4, 4*ratio]
else:
width_ratios = [4, 1]
else:
width_ratios = [4]
inv_aspect = 1./main_aspect if main_aspect else np.NaN
if layout_type in ['Embedded Dual', 'Triple']:
el = layout_view.get('top', None)
eltype = type(el)
if el and eltype in MPLPlot.sideplots:
plot_type = MPLPlot.sideplots[type(el)]
ratio = 0.6*(plot_type.subplot_size+plot_type.border_size)
height_ratios = [4*ratio, 4]
else:
height_ratios = [1, 4]
else:
height_ratios = [4]
if not isinstance(main_aspect, (basestring, type(None))):
width_ratios = [wratio * main_aspect for wratio in width_ratios]
height_ratios = [hratio * inv_aspect for hratio in height_ratios]
layout_shape = (len(width_ratios), len(height_ratios))
# For each row and column record the width and height ratios
# of the LayoutPlot with the most horizontal or vertical splits
# and largest aspect
prev_heights = row_heightratios.get(r, (0, []))
if layout_shape[1] > prev_heights[0]:
row_heightratios[r] = [layout_shape[1], prev_heights[1]]
row_heightratios[r][1].append(height_ratios)
prev_widths = col_widthratios.get(c, (0, []))
if layout_shape[0] > prev_widths[0]:
col_widthratios[c] = (layout_shape[0], prev_widths[1])
col_widthratios[c][1].append(width_ratios)
col_splits = [v[0] for __, v in sorted(col_widthratios.items())]
row_splits = [v[0] for ___, v in sorted(row_heightratios.items())]
widths = np.array([r for col in col_widthratios.values()
for ratios in col[1] for r in ratios])/4
wr_unnormalized = compute_ratios(col_widthratios, False)
hr_list = compute_ratios(row_heightratios)
wr_list = compute_ratios(col_widthratios)
# Compute the number of rows and cols
cols, rows = len(wr_list), len(hr_list)
wr_list = [r if np.isfinite(r) else 1 for r in wr_list]
hr_list = [r if np.isfinite(r) else 1 for r in hr_list]
width = sum([r if np.isfinite(r) else 1 for r in wr_list])
yscale = width/sum([(1/v)*4 if np.isfinite(v) else 4 for v in wr_unnormalized])
if self.absolute_scaling:
width = width*np.nanmax(widths)
xinches, yinches = None, None
if not isinstance(self.fig_inches, (tuple, list)):
xinches = self.fig_inches * width
yinches = xinches/yscale
elif self.fig_inches[0] is None:
xinches = self.fig_inches[1] * yscale
yinches = self.fig_inches[1]
elif self.fig_inches[1] is None:
xinches = self.fig_inches[0]
yinches = self.fig_inches[0] / yscale
if xinches and yinches:
self.handles['fig'].set_size_inches([xinches, yinches])
self.gs = gridspec.GridSpec(rows, cols,
width_ratios=wr_list,
height_ratios=hr_list,
wspace=self.hspace,
hspace=self.vspace)
# Situate all the Layouts in the grid and compute the gridspec
# indices for all the axes required by each LayoutPlot.
gidx = 0
layout_count = 0
tight = self.tight
collapsed_layout = layout.clone(shared_data=False, id=layout.id)
frame_ranges = self.compute_ranges(layout, None, None)
frame_ranges = OrderedDict([(key, self.compute_ranges(layout, key, frame_ranges))
for key in self.keys])
layout_subplots, layout_axes = {}, {}
for r, c in self.coords:
# Compute the layout type from shape
wsplits = col_splits[c]
hsplits = row_splits[r]
if (wsplits, hsplits) == (1,1):
layout_type = 'Single'
elif (wsplits, hsplits) == (2,1):
layout_type = 'Dual'
elif (wsplits, hsplits) == (1,2):
layout_type = 'Embedded Dual'
elif (wsplits, hsplits) == (2,2):
layout_type = 'Triple'
# Get the AdjoinLayout at the specified coordinate
view = layouts[(r, c)]
positions = AdjointLayoutPlot.layout_dict[layout_type]
# Create temporary subplots to get projections types
# to create the correct subaxes for all plots in the layout
_, _, projs = self._create_subplots(layouts[(r, c)], positions,
None, frame_ranges, create=False)
gidx, gsinds = self.grid_situate(gidx, layout_type, cols)
layout_key, _ = layout_items.get((r, c), (None, None))
if isinstance(layout, NdLayout) and layout_key:
layout_dimensions = OrderedDict(zip(layout_dimensions, layout_key))
# Generate the axes and create the subplots with the appropriate
# axis objects, handling any Empty objects.
obj = layouts[(r, c)]
empty = isinstance(obj.main, Empty)
if empty:
obj = AdjointLayout([])
else:
layout_count += 1
subaxes = [plt.subplot(self.gs[ind], projection=proj)
for ind, proj in zip(gsinds, projs)]
subplot_data = self._create_subplots(obj, positions,
layout_dimensions, frame_ranges,
dict(zip(positions, subaxes)),
num=0 if empty else layout_count)
subplots, adjoint_layout, _ = subplot_data
layout_axes[(r, c)] = subaxes
# Generate the AdjointLayoutsPlot which will coordinate
# plotting of AdjointLayouts in the larger grid
plotopts = self.lookup_options(view, 'plot').options
layout_plot = AdjointLayoutPlot(adjoint_layout, layout_type, subaxes, subplots,
fig=self.handles['fig'], **plotopts)
layout_subplots[(r, c)] = layout_plot
tight = not any(type(p) is GridPlot for p in layout_plot.subplots.values()) and tight
if layout_key:
collapsed_layout[layout_key] = adjoint_layout
# Apply tight layout if enabled and incompatible
# GridPlot isn't present.
if tight:
if isinstance(self.tight_padding, (tuple, list)):
wpad, hpad = self.tight_padding
padding = dict(w_pad=wpad, h_pad=hpad)
else:
padding = dict(w_pad=self.tight_padding, h_pad=self.tight_padding)
self.gs.tight_layout(self.handles['fig'], rect=self.fig_bounds, **padding)
return layout_subplots, layout_axes, collapsed_layout
def grid_situate(self, current_idx, layout_type, subgrid_width):
"""
Situate the current AdjointLayoutPlot in a LayoutPlot. The
LayoutPlot specifies a layout_type into which the AdjointLayoutPlot
must be embedded. This enclosing layout is guaranteed to have
enough cells to display all the views.
Based on this enforced layout format, a starting index
supplied by LayoutPlot (indexing into a large gridspec
arrangement) is updated to the appropriate embedded value. It
will also return a list of gridspec indices associated with
the all the required layout axes.
"""
# Set the layout configuration as situated in a NdLayout
if layout_type == 'Single':
start, inds = current_idx+1, [current_idx]
elif layout_type == 'Dual':
start, inds = current_idx+2, [current_idx, current_idx+1]
bottom_idx = current_idx + subgrid_width
if layout_type == 'Embedded Dual':
bottom = ((current_idx+1) % subgrid_width) == 0
grid_idx = (bottom_idx if bottom else current_idx)+1
start, inds = grid_idx, [current_idx, bottom_idx]
elif layout_type == 'Triple':
bottom = ((current_idx+2) % subgrid_width) == 0
grid_idx = (bottom_idx if bottom else current_idx) + 2
start, inds = grid_idx, [current_idx, current_idx+1,
bottom_idx, bottom_idx+1]
return start, inds
def _create_subplots(self, layout, positions, layout_dimensions, ranges, axes={}, num=1, create=True):
"""
Plot all the views contained in the AdjointLayout Object using axes
appropriate to the layout configuration. All the axes are
supplied by LayoutPlot - the purpose of the call is to
invoke subplots with correct options and styles and hide any
empty axes as necessary.
"""
subplots = {}
projections = []
adjoint_clone = layout.clone(shared_data=False, id=layout.id)
subplot_opts = dict(show_title=False, adjoined=layout)
for pos in positions:
# Pos will be one of 'main', 'top' or 'right' or None
view = layout.get(pos, None)
ax = axes.get(pos, None)
if view is None:
projections.append(None)
continue
# Determine projection type for plot
projections.append(self._get_projection(view))
if not create:
continue
# Customize plotopts depending on position.
plotopts = self.lookup_options(view, 'plot').options
# Options common for any subplot
override_opts = {}
sublabel_opts = {}
if pos == 'main':
own_params = self.get_param_values(onlychanged=True)
sublabel_opts = {k: v for k, v in own_params
if 'sublabel_' in k}
if not isinstance(view, GridSpace):
override_opts = dict(aspect='square')
elif pos == 'right':
right_opts = dict(invert_axes=True,
xaxis=None)
override_opts = dict(subplot_opts, **right_opts)
elif pos == 'top':
top_opts = dict(yaxis=None)
override_opts = dict(subplot_opts, **top_opts)
# Override the plotopts as required
plotopts = dict(sublabel_opts, **plotopts)
plotopts.update(override_opts, fig=self.handles['fig'])
vtype = view.type if isinstance(view, HoloMap) else view.__class__
if isinstance(view, GridSpace):
plotopts['create_axes'] = ax is not None
plot_type = Store.registry['matplotlib'][vtype]
if pos != 'main' and vtype in MPLPlot.sideplots:
plot_type = MPLPlot.sideplots[vtype]
num = num if len(self.coords) > 1 else 0
subplots[pos] = plot_type(view, axis=ax, keys=self.keys,
dimensions=self.dimensions,
layout_dimensions=layout_dimensions,
ranges=ranges, subplot=True,
uniform=self.uniform, layout_num=num,
**plotopts)
if isinstance(view, (Element, HoloMap, Collator, CompositeOverlay)):
adjoint_clone[pos] = subplots[pos].hmap
else:
adjoint_clone[pos] = subplots[pos].layout
return subplots, adjoint_clone, projections
def initialize_plot(self):
key = self.keys[-1]
ranges = self.compute_ranges(self.layout, key, None)
for subplot in self.subplots.values():
subplot.initialize_plot(ranges=ranges)
# Create title handle
if self.show_title and len(self.coords) > 1:
title = self._format_title(key)
title = self.handles['fig'].suptitle(title, **self._fontsize('title'))
self.handles['title'] = title
self.handles['bbox_extra_artists'] += [title]
return self._finalize_axis(None)
class AdjoinedPlot(DimensionedPlot):
aspect = param.Parameter(default='auto', doc="""
Aspect ratios on SideHistogramPlot should be determined by the
AdjointLayoutPlot.""")
bgcolor = param.Parameter(default=(1, 1, 1, 0), doc="""
Make plot background invisible.""")
border_size = param.Number(default=0.25, doc="""
The size of the border expressed as a fraction of the main plot.""")
show_frame = param.Boolean(default=False)
show_title = param.Boolean(default=False, doc="""
Titles should be disabled on all SidePlots to avoid clutter.""")
subplot_size = param.Number(default=0.25, doc="""
The size subplots as expressed as a fraction of the main plot.""")
show_xlabel = param.Boolean(default=False, doc="""
Whether to show the x-label of the plot. Disabled by default
because plots are often too cramped to fit the title correctly.""")
| 1 | 15,325 | So this parameter is now deprecated? | holoviz-holoviews | py |
@@ -269,6 +269,7 @@ rseq_shared_fragment_flushtime_update(dcontext_t *dcontext)
rseq_clear_tls_ptr(dcontext);
}
+#ifdef HAVE_RSEQ
bool
rseq_is_registered_for_current_thread(void)
{ | 1 | /* *******************************************************************************
* Copyright (c) 2019-2020 Google, Inc. All rights reserved.
* *******************************************************************************/
/*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* * Neither the name of Google, Inc. nor the names of its contributors may be
* used to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL VMWARE, INC. OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
* DAMAGE.
*/
/**************************************************************************************
* Restartable sequence ("rseq") support (i#2350).
* This is a kernel feature which provides cpu-atomic regions: if a thread
* is pre-empted within an rseq region, an abort handler is invoked.
* The feature is difficult to handle under binary instrumentation.
* We rely on the app following certain conventions, including containing a
* section holding a table of all rseq sequences.
*/
#include "../globals.h"
#include "../module_shared.h"
#include "module_private.h"
#include "os_private.h"
#include "rseq_linux.h"
#include "../fragment.h"
#include "decode.h"
#ifdef CLIENT_INTERFACE
# include "instrument.h"
#endif
#include <stddef.h>
#ifdef HAVE_RSEQ
# include <linux/rseq.h>
#else
struct rseq_cs {
uint version;
uint flags;
uint64 start_ip;
uint64 post_commit_offset;
uint64 abort_ip;
} __attribute__((aligned(4 * sizeof(uint64))));
struct rseq {
uint cpu_id_start;
uint cpu_id;
union {
uint64 ptr64;
} rseq_cs;
uint flags;
} __attribute__((aligned(4 * sizeof(uint64))));
# define RSEQ_FLAG_UNREGISTER 1
#endif
#include "include/syscall.h"
#include <errno.h>
vm_area_vector_t *d_r_rseq_areas;
DECLARE_CXTSWPROT_VAR(static mutex_t rseq_trigger_lock,
INIT_LOCK_FREE(rseq_trigger_lock));
static volatile bool rseq_enabled;
/* We require all threads to use the same TLS offset to point at struct rseq. */
static int rseq_tls_offset;
/* The signature is registered per thread, but we require all registrations
* to be the same.
*/
static int rseq_signature;
typedef struct _rseq_region_t {
app_pc start;
app_pc end;
app_pc handler;
/* We need to preserve input registers for targeting "start" instead of "handler"
* for our 2nd invocation, if they're written in the rseq region. We only support
* GPR inputs. We document that we do not support any other inputs (no flags, no
* SIMD registers).
*/
bool reg_written[DR_NUM_GPR_REGS];
} rseq_region_t;
/* We need to store a struct rseq_cs per fragment_t. To avoid the cost of adding a
* pointer field to every fragment_t, and the complexity of another subclass like
* trace_t, we store them externally in a hashtable. The FRAG_HAS_RSEQ_ENDPOINT flag
* avoids the hashtable lookup on every fragment.
*/
static generic_table_t *rseq_cs_table;
#define INIT_RSEQ_CS_TABLE_SIZE 5
/* vmvector callbacks */
static void
rseq_area_free(void *data)
{
HEAP_TYPE_FREE(GLOBAL_DCONTEXT, data, rseq_region_t, ACCT_VMAREAS, PROTECTED);
}
static void *
rseq_area_dup(void *data)
{
rseq_region_t *src = (rseq_region_t *)data;
rseq_region_t *dst =
HEAP_TYPE_ALLOC(GLOBAL_DCONTEXT, rseq_region_t, ACCT_VMAREAS, PROTECTED);
ASSERT(src != NULL);
*dst = *src;
return dst;
}
static inline size_t
rseq_cs_alloc_size(void)
{
return sizeof(struct rseq) + __alignof(struct rseq_cs);
}
static void
rseq_cs_free(dcontext_t *dcontext, void *data)
{
global_heap_free(data, rseq_cs_alloc_size() HEAPACCT(ACCT_OTHER));
}
void
d_r_rseq_init(void)
{
VMVECTOR_ALLOC_VECTOR(d_r_rseq_areas, GLOBAL_DCONTEXT,
VECTOR_SHARED | VECTOR_NEVER_MERGE, rseq_areas);
vmvector_set_callbacks(d_r_rseq_areas, rseq_area_free, rseq_area_dup, NULL, NULL);
rseq_cs_table = generic_hash_create(GLOBAL_DCONTEXT, INIT_RSEQ_CS_TABLE_SIZE, 80,
HASHTABLE_SHARED | HASHTABLE_PERSISTENT,
rseq_cs_free _IF_DEBUG("rseq_cs table"));
/* Enable rseq pre-attach for things like dr_prepopulate_cache(). */
if (rseq_is_registered_for_current_thread())
rseq_locate_rseq_regions();
}
void
d_r_rseq_exit(void)
{
generic_hash_destroy(GLOBAL_DCONTEXT, rseq_cs_table);
vmvector_delete_vector(GLOBAL_DCONTEXT, d_r_rseq_areas);
DELETE_LOCK(rseq_trigger_lock);
}
void
rseq_thread_attach(dcontext_t *dcontext)
{
rseq_region_t *info;
if (!vmvector_lookup_data(d_r_rseq_areas, dcontext->next_tag, NULL, NULL,
(void **)&info))
return;
/* The thread missed the save of its state on rseq entry. We could try to save here
* so the restore on rseq exit won't read incorrect values, but it's simpler and
* less error-prone to send it to the abort handler, like we do on detach or other
* translation points.
*/
dcontext->next_tag = info->handler;
}
bool
rseq_get_region_info(app_pc pc, app_pc *start OUT, app_pc *end OUT, app_pc *handler OUT,
bool **reg_written OUT, int *reg_written_size OUT)
{
rseq_region_t *info;
if (!vmvector_lookup_data(d_r_rseq_areas, pc, start, end, (void **)&info))
return false;
if (handler != NULL)
*handler = info->handler;
if (reg_written != NULL)
*reg_written = info->reg_written;
if (reg_written_size != NULL)
*reg_written_size = sizeof(info->reg_written) / sizeof(info->reg_written[0]);
return true;
}
int
rseq_get_tls_ptr_offset(void)
{
/* This read is assumed to be atomic. */
ASSERT(rseq_tls_offset != 0);
return rseq_tls_offset + offsetof(struct rseq, rseq_cs);
}
static void
rseq_clear_tls_ptr(dcontext_t *dcontext)
{
ASSERT(rseq_tls_offset != 0);
byte *base = get_segment_base(LIB_SEG_TLS);
struct rseq *app_rseq = (struct rseq *)(base + rseq_tls_offset);
/* We're directly writing this in the cache, so we do not bother with safe_read
* or safe_write here either. We already cannot handle rseq adversarial cases.
*/
if (is_dynamo_address((byte *)(ptr_uint_t)app_rseq->rseq_cs.ptr64))
app_rseq->rseq_cs.ptr64 = 0;
}
int
rseq_get_signature(void)
{
/* This is only called after rseq is initialized and the signature determined. */
ASSERT(rseq_enabled);
return rseq_signature;
}
byte *
rseq_get_rseq_cs_alloc(byte **rseq_cs_aligned OUT)
{
byte *rseq_cs_alloc = global_heap_alloc(rseq_cs_alloc_size() HEAPACCT(ACCT_OTHER));
*rseq_cs_aligned = (byte *)ALIGN_FORWARD(rseq_cs_alloc, __alignof(struct rseq_cs));
return rseq_cs_alloc;
}
void
rseq_record_rseq_cs(byte *rseq_cs_alloc, fragment_t *f, cache_pc start, cache_pc end,
cache_pc abort)
{
struct rseq_cs *target =
(struct rseq_cs *)ALIGN_FORWARD(rseq_cs_alloc, __alignof(struct rseq_cs));
target->version = 0;
target->flags = 0;
target->start_ip = (ptr_uint_t)start;
target->post_commit_offset = (ptr_uint_t)(end - start);
target->abort_ip = (ptr_uint_t)abort;
TABLE_RWLOCK(rseq_cs_table, write, lock);
generic_hash_add(GLOBAL_DCONTEXT, rseq_cs_table, (ptr_uint_t)f, rseq_cs_alloc);
TABLE_RWLOCK(rseq_cs_table, write, unlock);
}
void
rseq_remove_fragment(dcontext_t *dcontext, fragment_t *f)
{
if (!rseq_enabled)
return;
/* Avoid freeing a live rseq_cs for a thread-private fragment deletion. */
rseq_clear_tls_ptr(dcontext);
TABLE_RWLOCK(rseq_cs_table, write, lock);
generic_hash_remove(GLOBAL_DCONTEXT, rseq_cs_table, (ptr_uint_t)f);
TABLE_RWLOCK(rseq_cs_table, write, unlock);
}
void
rseq_shared_fragment_flushtime_update(dcontext_t *dcontext)
{
if (!rseq_enabled)
return;
/* Avoid freeing a live rseq_cs for thread-shared fragment deletion.
* We clear the pointer on completion of the native rseq execution, but it's
* not easy to clear it on midpoint exits. We instead clear prior to
* rseq_cs being freed: for thread-private in rseq_remove_fragment() and for
* thread-shared each thread should come here prior to deletion.
*/
rseq_clear_tls_ptr(dcontext);
}
bool
rseq_is_registered_for_current_thread(void)
{
/* Unfortunately there's no way to query the current rseq struct.
* For 64-bit we can pass a kernel address and look for EFAULT
* vs EINVAL, but there is no kernel address for 32-bit.
* So we try to perform a legitimate registration.
*/
struct rseq test_rseq = {};
int res = dynamorio_syscall(SYS_rseq, 4, &test_rseq, sizeof(test_rseq), 0, 0);
if (res == -EINVAL) /* Our struct != registered struct. */
return true;
if (res == -ENOSYS)
return false;
/* If seccomp blocks SYS_rseq we'll get -EPERM. SYS_rseq also returns -EPERM
* if &test_rseq == the app's struct but the signature is different, but that
* seems so unlikely that we just assume -EPERM implies seccomp.
*/
if (res == -EPERM)
return false;
ASSERT(res == 0); /* If not, the struct size or sthg changed! */
if (dynamorio_syscall(SYS_rseq, 4, &test_rseq, sizeof(test_rseq),
RSEQ_FLAG_UNREGISTER, 0) != 0) {
ASSERT_NOT_REACHED();
}
return false;
}
static void
rseq_analyze_instructions(rseq_region_t *info)
{
/* We analyze the instructions inside [start,end) looking for register state that we
* need to preserve for our restart. We do not want to blindly spill and restore
* 16+ registers for every sequence (too much overhead).
*/
instr_t instr;
instr_init(GLOBAL_DCONTEXT, &instr);
app_pc pc = info->start;
int i;
bool reached_cti = false;
memset(info->reg_written, 0, sizeof(info->reg_written));
while (pc < info->end) {
instr_reset(GLOBAL_DCONTEXT, &instr);
app_pc next_pc = decode(GLOBAL_DCONTEXT, pc, &instr);
if (next_pc == NULL) {
REPORT_FATAL_ERROR_AND_EXIT(RSEQ_BEHAVIOR_UNSUPPORTED, 3,
get_application_name(), get_application_pid(),
"Rseq sequence contains invalid instructions");
ASSERT_NOT_REACHED();
}
if (instr_is_syscall(&instr)
/* Allow a syscall for our test in debug build. */
IF_DEBUG(
&&!check_filter("api.rseq;linux.rseq;linux.rseq_table;linux.rseq_noarray",
get_short_name(get_application_name())))) {
REPORT_FATAL_ERROR_AND_EXIT(RSEQ_BEHAVIOR_UNSUPPORTED, 3,
get_application_name(), get_application_pid(),
"Rseq sequence contains a system call");
ASSERT_NOT_REACHED();
}
if (instr_is_call(&instr)) {
REPORT_FATAL_ERROR_AND_EXIT(RSEQ_BEHAVIOR_UNSUPPORTED, 3,
get_application_name(), get_application_pid(),
"Rseq sequence contains a call");
ASSERT_NOT_REACHED();
}
if (instr_is_cti(&instr))
reached_cti = true;
/* We potentially need to preserve any register written anywhere inside
* the sequence. We can't limit ourselves to registers clearly live on
* input, since code *after* the sequence could read them. We do disallow
* callouts to helper functions to simplify our lives.
*
* We only preserve GPR's, for simplicity, and because they are far more likely
* as inputs than flags or SIMD registers. We'd like to verify that only GPR's
* are used, but A) we can't easily check values read *after* the sequence (the
* handler could set up state read afterward and sometimes clobbered inside), B)
* we do want to support SIMD and flags writes in the sequence, and C) even
* checking for values read in the sequence would want new interfaces like
* DR_REG_START_SIMD or register iterators for reasonable code.
*/
for (i = 0; i < DR_NUM_GPR_REGS; i++) {
if (info->reg_written[i])
continue;
reg_id_t reg = DR_REG_START_GPR + (reg_id_t)i;
if (instr_writes_to_reg(&instr, reg, DR_QUERY_DEFAULT)) {
LOG(GLOBAL, LOG_LOADER, 3,
"Rseq region @" PFX " writes register %s at " PFX "\n", info->start,
reg_names[reg], pc);
info->reg_written[i] = true;
}
}
pc = next_pc;
}
instr_free(GLOBAL_DCONTEXT, &instr);
}
static void
rseq_process_entry(struct rseq_cs *entry, ssize_t load_offs)
{
LOG(GLOBAL, LOG_LOADER, 2,
"Found rseq region: ver=%u; flags=%u; start=" PFX "; end=" PFX "; abort=" PFX
"\n",
entry->version, entry->flags, entry->start_ip + load_offs,
entry->start_ip + entry->post_commit_offset + load_offs,
entry->abort_ip + load_offs);
rseq_region_t *info =
HEAP_TYPE_ALLOC(GLOBAL_DCONTEXT, rseq_region_t, ACCT_VMAREAS, PROTECTED);
info->start = (app_pc)(ptr_uint_t)entry->start_ip + load_offs;
info->end = info->start + entry->post_commit_offset;
info->handler = (app_pc)(ptr_uint_t)entry->abort_ip + load_offs;
int signature;
if (!d_r_safe_read(info->handler - sizeof(signature), sizeof(signature),
&signature)) {
REPORT_FATAL_ERROR_AND_EXIT(RSEQ_BEHAVIOR_UNSUPPORTED, 3, get_application_name(),
get_application_pid(),
"Rseq signature is unreadable");
ASSERT_NOT_REACHED();
}
if (signature != rseq_signature) {
if (rseq_signature == 0) {
SELF_UNPROTECT_DATASEC(DATASEC_RARELY_PROT);
ATOMIC_4BYTE_WRITE(&rseq_signature, signature, false);
SELF_PROTECT_DATASEC(DATASEC_RARELY_PROT);
LOG(GLOBAL, LOG_LOADER, 2, "Rseq signature is 0x%08x\n", rseq_signature);
} else {
REPORT_FATAL_ERROR_AND_EXIT(RSEQ_BEHAVIOR_UNSUPPORTED, 3,
get_application_name(), get_application_pid(),
"Rseq signatures are not all identical");
ASSERT_NOT_REACHED();
}
}
rseq_analyze_instructions(info);
vmvector_add(d_r_rseq_areas, info->start, info->end, (void *)info);
RSTATS_INC(num_rseq_regions);
/* Check the start pc. We don't take the effort to check for non-tags or
* interior pc's.
*/
if (fragment_lookup(GLOBAL_DCONTEXT, info->start) != NULL) {
/* We rely on the app not running rseq code for non-rseq purposes (since we
* can't easily tell the difference; plus we avoid a flush for lazy rseq
* activation).
*/
REPORT_FATAL_ERROR_AND_EXIT(
RSEQ_BEHAVIOR_UNSUPPORTED, 3, get_application_name(), get_application_pid(),
"Rseq sequences must not be used for non-rseq purposes");
ASSERT_NOT_REACHED();
}
}
static void
rseq_process_elf_sections(module_area_t *ma, bool at_map,
ELF_SECTION_HEADER_TYPE *sec_hdr_start, const char *strtab,
ssize_t load_offs)
{
bool found_array = false;
uint i;
ELF_HEADER_TYPE *elf_hdr = (ELF_HEADER_TYPE *)ma->start;
ELF_SECTION_HEADER_TYPE *sec_hdr = sec_hdr_start;
/* The section entries on disk need load_offs. The rseq entries in memory are
* relocated and only need the offset if relocations have not yet been applied.
*/
ssize_t entry_offs = 0;
if (at_map || (DYNAMO_OPTION(early_inject) && !dr_api_entry && !dynamo_started))
entry_offs = load_offs;
for (i = 0; i < elf_hdr->e_shnum; i++) {
#define RSEQ_PTR_ARRAY_SEC_NAME "__rseq_cs_ptr_array"
if (strcmp(strtab + sec_hdr->sh_name, RSEQ_PTR_ARRAY_SEC_NAME) == 0) {
found_array = true;
byte **ptrs = (byte **)(sec_hdr->sh_addr + load_offs);
int j;
for (j = 0; j < sec_hdr->sh_size / sizeof(ptrs); ++j) {
/* We require that the table is loaded. If not, bail, but unlike
* failing to find section headers, make this a fatal error: better
* to notify the user than try to run the rseq w/o proper handling.
*/
if (ptrs < (byte **)ma->start || ptrs > (byte **)ma->end) {
REPORT_FATAL_ERROR_AND_EXIT(
RSEQ_BEHAVIOR_UNSUPPORTED, 3, get_application_name(),
get_application_pid(),
RSEQ_PTR_ARRAY_SEC_NAME " is not in a loaded segment");
ASSERT_NOT_REACHED();
}
/* We assume this is a full mapping and it's safe to read the data
* (a partial map shouldn't make it to module list processing).
* We do perform a sanity check to handle unusual non-relocated
* cases (it's possible this array is not in a loaded segment?).
*/
byte *entry = *ptrs + entry_offs;
if (entry < ma->start || entry > ma->end) {
REPORT_FATAL_ERROR_AND_EXIT(
RSEQ_BEHAVIOR_UNSUPPORTED, 3, get_application_name(),
get_application_pid(),
RSEQ_PTR_ARRAY_SEC_NAME "'s entries are not in a loaded segment");
ASSERT_NOT_REACHED();
}
rseq_process_entry((struct rseq_cs *)entry, entry_offs);
++ptrs;
}
break;
}
++sec_hdr;
}
if (!found_array) {
sec_hdr = sec_hdr_start;
for (i = 0; i < elf_hdr->e_shnum; i++) {
#define RSEQ_SEC_NAME "__rseq_cs"
#define RSEQ_OLD_SEC_NAME "__rseq_table"
if (strcmp(strtab + sec_hdr->sh_name, RSEQ_SEC_NAME) == 0 ||
strcmp(strtab + sec_hdr->sh_name, RSEQ_OLD_SEC_NAME) == 0) {
/* There may be padding at the start of the section, so ensure we skip
* over it. We're reading the loaded data, not the file, so it will
* always be aligned.
*/
#define RSEQ_CS_ALIGNMENT (4 * sizeof(__u64))
struct rseq_cs *array = (struct rseq_cs *)ALIGN_FORWARD(
sec_hdr->sh_addr + load_offs, RSEQ_CS_ALIGNMENT);
int j;
for (j = 0; j < sec_hdr->sh_size / sizeof(*array); ++j) {
/* We require that the table is loaded. If not, bail. */
if (array < (struct rseq_cs *)ma->start ||
array > (struct rseq_cs *)ma->end) {
REPORT_FATAL_ERROR_AND_EXIT(
RSEQ_BEHAVIOR_UNSUPPORTED, 3, get_application_name(),
get_application_pid(),
RSEQ_SEC_NAME " is not in a loaded segment");
ASSERT_NOT_REACHED();
}
rseq_process_entry(array, entry_offs);
++array;
}
break;
}
++sec_hdr;
}
}
}
/* Returns whether successfully searched for rseq data (not whether found rseq data). */
static bool
rseq_process_module(module_area_t *ma, bool at_map)
{
bool res = false;
ASSERT(is_elf_so_header(ma->start, ma->end - ma->start));
ELF_HEADER_TYPE *elf_hdr = (ELF_HEADER_TYPE *)ma->start;
ASSERT(elf_hdr->e_shentsize == sizeof(ELF_SECTION_HEADER_TYPE));
int fd = INVALID_FILE;
byte *sec_map = NULL, *str_map = NULL;
size_t sec_size = 0, str_size = 0;
ELF_SECTION_HEADER_TYPE *sec_hdr = NULL;
char *strtab;
ssize_t load_offs = ma->start - ma->os_data.base_address;
if (at_map && elf_hdr->e_shoff + ma->start < ma->end) {
sec_map = elf_hdr->e_shoff + ma->start;
sec_hdr = (ELF_SECTION_HEADER_TYPE *)sec_map;
/* We assume strtab is there too. */
strtab = (char *)(ma->start + sec_hdr[elf_hdr->e_shstrndx].sh_offset);
if (strtab > (char *)ma->end)
goto rseq_process_module_cleanup;
} else {
/* The section headers are not mapped in. Unfortunately this is the common
* case: they are typically at the end of the file. For this reason, we delay
* calling this function until we see the app use rseq.
*/
if (ma->full_path == NULL)
goto rseq_process_module_cleanup;
fd = os_open(ma->full_path, OS_OPEN_READ);
if (fd == INVALID_FILE)
goto rseq_process_module_cleanup;
off_t offs = ALIGN_BACKWARD(elf_hdr->e_shoff, PAGE_SIZE);
sec_size =
ALIGN_FORWARD(elf_hdr->e_shoff + elf_hdr->e_shnum * elf_hdr->e_shentsize,
PAGE_SIZE) -
offs;
sec_map =
os_map_file(fd, &sec_size, offs, NULL, MEMPROT_READ, MAP_FILE_COPY_ON_WRITE);
if (sec_map == NULL)
goto rseq_process_module_cleanup;
sec_hdr = (ELF_SECTION_HEADER_TYPE *)(sec_map + elf_hdr->e_shoff - offs);
/* We also need the section header string table. */
offs = ALIGN_BACKWARD(sec_hdr[elf_hdr->e_shstrndx].sh_offset, PAGE_SIZE);
str_size = ALIGN_FORWARD(sec_hdr[elf_hdr->e_shstrndx].sh_offset +
sec_hdr[elf_hdr->e_shstrndx].sh_size,
PAGE_SIZE) -
offs;
str_map =
os_map_file(fd, &str_size, offs, NULL, MEMPROT_READ, MAP_FILE_COPY_ON_WRITE);
if (str_map == NULL)
goto rseq_process_module_cleanup;
strtab = (char *)(str_map + sec_hdr[elf_hdr->e_shstrndx].sh_offset - offs);
}
rseq_process_elf_sections(ma, at_map, sec_hdr, strtab, load_offs);
res = true;
rseq_process_module_cleanup:
if (str_size != 0)
os_unmap_file(str_map, str_size);
if (sec_size != 0)
os_unmap_file(sec_map, sec_size);
if (fd != INVALID_FILE)
os_close(fd);
DODEBUG({
if (!res) {
const char *name = GET_MODULE_NAME(&ma->names);
if (name == NULL)
name = "(null)";
LOG(GLOBAL, LOG_INTERP | LOG_VMAREAS, 2,
"%s: error looking for rseq table in %s\n", __FUNCTION__, name);
if (strstr(name, "linux-vdso.so") == NULL) {
SYSLOG_INTERNAL_WARNING_ONCE(
"Failed to identify whether a module has an rseq table");
}
}
});
return res;
}
static int
rseq_locate_tls_offset(void)
{
/* We assume (and document) that the loader's static TLS is used, so every thread
* has a consistent %fs:-offs address. Unfortunately, using a local copy of the
* rseq code for our non-instrumented execution requires us to locate the app's
* struct using heuristics, because the system call was poorly designed and will not
* let us replace the app's. Alternatives of no local copy have worse problems.
*/
/* Static TLS is at a negative offset from the app library segment base. We simply
* search all possible aligned slots. Typically there are <64 possible slots.
*/
int offset = 0;
byte *addr = get_app_segment_base(LIB_SEG_TLS);
byte *seg_bottom;
if (addr > 0 && get_memory_info(addr, &seg_bottom, NULL, NULL)) {
LOG(GLOBAL, LOG_LOADER, 3, "rseq within static TLS " PFX " - " PFX "\n",
seg_bottom, addr);
/* struct rseq_cs is aligned to 32. */
int alignment = __alignof(struct rseq_cs);
int i;
for (i = 0; addr - i * alignment >= seg_bottom; i++) {
byte *try_addr = addr - i * alignment;
ASSERT(try_addr >= seg_bottom); /* For loop guarantees this. */
/* Our strategy is to check all of the aligned static TLS addresses to
* find the registered one. Our caller is not supposed to call here
* until the app has registered the current thread.
*/
static const int RSEQ_RARE_SIGNATURE = 42;
int res = dynamorio_syscall(SYS_rseq, 4, try_addr, sizeof(struct rseq),
RSEQ_FLAG_UNREGISTER, RSEQ_RARE_SIGNATURE);
LOG(GLOBAL, LOG_LOADER, 3, "Tried rseq @ " PFX " => %d\n", try_addr, res);
if (res == -EINVAL) /* Our struct != registered struct. */
continue;
/* We expect -EPERM on a signature mismatch. On the small chance the app
* actually used 42 for its signature we'll have to re-register it.
*/
if (res == 0) {
int res = dynamorio_syscall(SYS_rseq, 4, try_addr, sizeof(struct rseq), 0,
RSEQ_RARE_SIGNATURE);
ASSERT(res == 0);
res = -EPERM;
}
if (res == -EPERM) {
/* Found it! */
LOG(GLOBAL, LOG_LOADER, 2,
"Found struct rseq @ " PFX " for thread => %s:-0x%x\n", try_addr,
get_register_name(LIB_SEG_TLS), i * alignment);
offset = -i * alignment;
}
break;
}
}
return offset;
}
void
rseq_process_syscall(dcontext_t *dcontext)
{
byte *seg_base = get_app_segment_base(LIB_SEG_TLS);
byte *app_addr = (byte *)dcontext->sys_param0;
bool constant_offset = false;
if (rseq_tls_offset == 0) {
SELF_UNPROTECT_DATASEC(DATASEC_RARELY_PROT);
int offset = app_addr - seg_base;
/* To handle races here, we use an atomic_exchange. */
int prior = atomic_exchange_int(&rseq_tls_offset, offset);
SELF_PROTECT_DATASEC(DATASEC_RARELY_PROT);
constant_offset = (prior == 0 || prior == offset);
LOG(GLOBAL, LOG_LOADER, 2,
"Observed struct rseq @ " PFX " for thread => %s:-0x%x\n", app_addr,
get_register_name(LIB_SEG_TLS), -rseq_tls_offset);
} else
constant_offset = (seg_base + rseq_tls_offset == app_addr);
if (!constant_offset) {
REPORT_FATAL_ERROR_AND_EXIT(
RSEQ_BEHAVIOR_UNSUPPORTED, 3, get_application_name(), get_application_pid(),
"struct rseq is not always in static thread-local storage");
ASSERT_NOT_REACHED();
}
}
/* Restartable sequence region identification.
*
* To avoid extra overhead going to disk to read section headers, we delay looking
* for rseq data until the app invokes an rseq syscall (or on attach we see a thread
* that has rseq set up). We document that we do not handle the app using rseq
* regions for non-rseq purposes, so we do not need to flush the cache here.
* Since we also identify the rseq_cs address here, this should be called *after*
* the app has registered the current thread for rseq.
*/
void
rseq_locate_rseq_regions(void)
{
if (rseq_enabled)
return;
/* This is a global operation, but the trigger could be hit by two threads at once,
* thus requiring synchronization.
*/
d_r_mutex_lock(&rseq_trigger_lock);
if (rseq_enabled) {
d_r_mutex_unlock(&rseq_trigger_lock);
return;
}
int offset = 0;
if (rseq_tls_offset == 0) {
/* Identify the TLS offset of this thread's struct rseq. */
offset = rseq_locate_tls_offset();
if (offset == 0) {
REPORT_FATAL_ERROR_AND_EXIT(
RSEQ_BEHAVIOR_UNSUPPORTED, 3, get_application_name(),
get_application_pid(),
"struct rseq is not in static thread-local storage");
ASSERT_NOT_REACHED();
}
}
SELF_UNPROTECT_DATASEC(DATASEC_RARELY_PROT);
bool new_value = true;
ATOMIC_1BYTE_WRITE(&rseq_enabled, new_value, false);
if (rseq_tls_offset == 0)
ATOMIC_4BYTE_WRITE(&rseq_tls_offset, offset, false);
SELF_PROTECT_DATASEC(DATASEC_RARELY_PROT);
module_iterator_t *iter = module_iterator_start();
while (module_iterator_hasnext(iter)) {
module_area_t *ma = module_iterator_next(iter);
rseq_process_module(ma, false /*!at_map*/);
}
module_iterator_stop(iter);
d_r_mutex_unlock(&rseq_trigger_lock);
}
void
rseq_module_init(module_area_t *ma, bool at_map)
{
if (rseq_enabled) {
rseq_process_module(ma, at_map);
}
}
void
rseq_process_native_abort(dcontext_t *dcontext)
{
#ifdef CLIENT_INTERFACE
/* Raise a transfer event. */
LOG(THREAD, LOG_INTERP | LOG_VMAREAS, 2, "Abort triggered in rseq native code\n");
get_mcontext(dcontext)->pc = dcontext->next_tag;
if (instrument_kernel_xfer(dcontext, DR_XFER_RSEQ_ABORT, osc_empty,
/* We do not know the source PC so we do not
* supply a source state.
*/
NULL, NULL, dcontext->next_tag,
get_mcontext(dcontext)->xsp, osc_empty,
get_mcontext(dcontext), 0)) {
dcontext->next_tag = canonicalize_pc_target(dcontext, get_mcontext(dcontext)->pc);
}
#endif
}
| 1 | 22,880 | Actually, it looks like HAVE_RSEQ is only used to determine whether the rseq.h header is around, which then only determines whether the regression test is built: it does not affect whether the core handles an application using rseq. The core rseq mangling code and all the code in rseq_linux.c is always enabled. I believe the idea is that we want DR built on one machine to work on as many others as possible, rather than requiring a custom build for every variant of kernel feature or whatnot. What is the top-level problem being solved here? If it requires disabling or enabling the core handling rseq, probably a separate option should be put in place and maybe HAVE_RSEQ renamed to make it clearer what its consequences are. | DynamoRIO-dynamorio | c |
@@ -3,8 +3,7 @@ package cache
import (
"sync"
- "github.com/gofrs/uuid"
- "github.com/spiffe/spire/pkg/common/selector"
+ "github.com/spiffe/spire/proto/spire/common"
)
type Subscriber interface { | 1 | package cache
import (
"sync"
"github.com/gofrs/uuid"
"github.com/spiffe/spire/pkg/common/selector"
)
type Subscriber interface {
Updates() <-chan *WorkloadUpdate
Finish()
}
type WorkloadUpdate struct {
Entries []*Entry
Bundle *Bundle
FederatedBundles map[string]*Bundle
}
type subscriber struct {
c chan *WorkloadUpdate
m sync.Mutex
sel Selectors
sid uuid.UUID
active bool
}
type subscribers struct {
selMap map[string][]uuid.UUID // map of selector to UID
sidMap map[uuid.UUID]*subscriber
m sync.Mutex
}
func NewSubscriber(selectors Selectors) (*subscriber, error) {
u, err := uuid.NewV4()
if err != nil {
return nil, err
}
return &subscriber{
c: make(chan *WorkloadUpdate, 1),
sel: selectors,
sid: u,
active: true,
}, nil
}
// Updates returns the channel where the updates are received.
func (sub *subscriber) Updates() <-chan *WorkloadUpdate {
return sub.c
}
// Finish finishes subscriber's updates subscription. Hence no more updates
// will be received on Updates() channel.
func (sub *subscriber) Finish() {
sub.m.Lock()
defer sub.m.Unlock()
sub.active = false
close(sub.c)
}
func (s *subscribers) add(sub *subscriber) error {
s.m.Lock()
defer s.m.Unlock()
s.sidMap[sub.sid] = sub
selSet := selector.NewSetFromRaw(sub.sel)
selPSet := selSet.Power()
for sel := range selPSet {
selStr := sel.String()
s.selMap[selStr] = append(s.selMap[selStr], sub.sid)
}
return nil
}
func (s *subscribers) get(sels Selectors) (subs []*subscriber) {
s.m.Lock()
defer s.m.Unlock()
sids := s.getSubIds(sels)
for _, id := range sids {
subs = append(subs, s.sidMap[id])
}
return
}
func (s *subscribers) getAll() (subs []*subscriber) {
s.m.Lock()
defer s.m.Unlock()
for _, sub := range s.sidMap {
subs = append(subs, sub)
}
return
}
func (s *subscribers) remove(sub *subscriber) {
s.m.Lock()
defer s.m.Unlock()
delete(s.sidMap, sub.sid)
for sel, sids := range s.selMap {
for i, uid := range sids {
if uid == sub.sid {
s.selMap[sel] = append(sids[:i], sids[i+1:]...)
}
}
}
}
func (s *subscribers) getSubIds(sels Selectors) []uuid.UUID {
subIds := []uuid.UUID{}
selSet := selector.NewSetFromRaw(sels)
selPSet := selSet.Power()
for sel := range selPSet {
selStr := sel.String()
subIds = append(subIds, s.selMap[selStr]...)
}
subIds = dedupe(subIds)
return subIds
}
func NewSubscribers() *subscribers {
return &subscribers{
selMap: make(map[string][]uuid.UUID),
sidMap: make(map[uuid.UUID]*subscriber),
}
}
func dedupe(ids []uuid.UUID) (deduped []uuid.UUID) {
uniqueMap := map[uuid.UUID]bool{}
for i := range ids {
uniqueMap[ids[i]] = true
}
for key := range uniqueMap {
deduped = append(deduped, key)
}
return
}
| 1 | 11,008 | I don't see a corresponding call to freeSelectorSet... am I missing something? | spiffe-spire | go |
@@ -2,13 +2,13 @@ require "rails_helper"
describe AuthCallbacksController do
context '#create' do
- it 'redirects to the dashboard path without an auth origin' do
+ it 'redirects to the practice path without an auth origin' do
request.env['omniauth.auth'] = OmniAuth.config.mock_auth[:github]
request.env['omniauth.origin'] = nil
get :create, provider: 'github'
- should redirect_to(dashboard_url)
+ should redirect_to(practice_url)
end
end
end | 1 | require "rails_helper"
describe AuthCallbacksController do
context '#create' do
it 'redirects to the dashboard path without an auth origin' do
request.env['omniauth.auth'] = OmniAuth.config.mock_auth[:github]
request.env['omniauth.origin'] = nil
get :create, provider: 'github'
should redirect_to(dashboard_url)
end
end
end
| 1 | 12,830 | Prefer double-quoted strings unless you need single quotes to avoid extra backslashes for escaping. | thoughtbot-upcase | rb |
@@ -40,9 +40,10 @@ namespace OpenTelemetry.Trace
public const string HttpRouteKey = "http.route";
public const string HttpFlavorKey = "http.flavor";
- public const string DatabaseTypeKey = "db.type";
- public const string DatabaseInstanceKey = "db.instance";
+ public const string DatabaseSystemKey = "db.system";
+ public const string DatabaseNameKey = "db.name";
public const string DatabaseStatementKey = "db.statement";
+ public const string DatabaseStatementTypeKey = "db.statement_type";
#pragma warning restore CS1591 // Missing XML comment for publicly visible type or member
}
} | 1 | // <copyright file="SpanAttributeConstants.cs" company="OpenTelemetry Authors">
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// </copyright>
namespace OpenTelemetry.Trace
{
/// <summary>
/// Defines well-known span attribute keys.
/// </summary>
public static class SpanAttributeConstants
{
#pragma warning disable CS1591 // Missing XML comment for publicly visible type or member
public const string ComponentKey = "component";
public const string PeerServiceKey = "peer.service";
public const string StatusCodeKey = "ot.status_code";
public const string StatusDescriptionKey = "ot.status_description";
public const string HttpMethodKey = "http.method";
public const string HttpSchemeKey = "http.scheme";
public const string HttpTargetKey = "http.target";
public const string HttpStatusCodeKey = "http.status_code";
public const string HttpStatusTextKey = "http.status_text";
public const string HttpUserAgentKey = "http.user_agent";
public const string HttpPathKey = "http.path";
public const string HttpHostKey = "http.host";
public const string HttpUrlKey = "http.url";
public const string HttpRouteKey = "http.route";
public const string HttpFlavorKey = "http.flavor";
public const string DatabaseTypeKey = "db.type";
public const string DatabaseInstanceKey = "db.instance";
public const string DatabaseStatementKey = "db.statement";
#pragma warning restore CS1591 // Missing XML comment for publicly visible type or member
}
}
| 1 | 14,480 | Is this part of spec? | open-telemetry-opentelemetry-dotnet | .cs |
@@ -3725,6 +3725,18 @@ client_thread_run(void)
byte *xsp;
GET_STACK_PTR(xsp);
void *crec = get_clone_record((reg_t)xsp);
+ /* i#2335: we support setup separate from start, and we want to allow a client
+ * to create a client thread during init, but we do not support that thread
+ * executing until the app has started (b/c we have no signal handlers in place).
+ */
+ /* i#3973: in addition to _executing_ a client thread before the
+ * app has started, if we even create the thread before
+ * dynamo_initialized is set, we will not copy tls blocks. By
+ * waiting for the app to be started before dynamo_thread_init is
+ * called, we ensure this race condition can never happen, since
+ * dynamo_initialized will always be set before the app is started.
+ */
+ wait_for_event(dr_app_started, 0);
IF_DEBUG(int rc =)
dynamo_thread_init(get_clone_record_dstack(crec), NULL, crec, true);
ASSERT(rc != -1); /* this better be a new thread */ | 1 | /* *******************************************************************************
* Copyright (c) 2010-2019 Google, Inc. All rights reserved.
* Copyright (c) 2011 Massachusetts Institute of Technology All rights reserved.
* Copyright (c) 2000-2010 VMware, Inc. All rights reserved.
* *******************************************************************************/
/*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* * Neither the name of VMware, Inc. nor the names of its contributors may be
* used to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL VMWARE, INC. OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
* DAMAGE.
*/
/* Copyright (c) 2003-2007 Determina Corp. */
/* Copyright (c) 2001-2003 Massachusetts Institute of Technology */
/* Copyright (c) 2000-2001 Hewlett-Packard Company */
/*
* os.c - Linux specific routines
*/
/* Easiest to match kernel stat struct by using 64-bit.
* This limits us to 2.4+ kernel but that's ok.
* I don't really want to get into requiring kernel headers to build
* general release packages, though that would be fine for targeted builds.
* There are 3 different stat syscalls (SYS_oldstat, SYS_stat, and SYS_stat64)
* and using _LARGEFILE64_SOURCE with SYS_stat64 is the best match.
*/
#define _LARGEFILE64_SOURCE
/* for mmap-related #defines */
#include <sys/types.h>
#include <sys/mman.h>
/* in case MAP_32BIT is missing */
#ifndef MAP_32BIT
# define MAP_32BIT 0x40
#endif
#ifndef MAP_ANONYMOUS
# define MAP_ANONYMOUS MAP_ANON /* MAP_ANON on Mac */
#endif
/* for open */
#include <sys/stat.h>
#include <fcntl.h>
#include "../globals.h"
#include "../hashtable.h"
#include "../native_exec.h"
#include <unistd.h> /* for write and usleep and _exit */
#include <limits.h>
#ifdef MACOS
# include <sys/sysctl.h> /* for sysctl */
# ifndef SYS___sysctl
/* The name was changed on Yosemite */
# define SYS___sysctl SYS_sysctl
# endif
# include <mach/mach_traps.h> /* for swtch_pri */
# include "include/syscall_mach.h"
#endif
#ifdef LINUX
# include <sys/vfs.h> /* for statfs */
#elif defined(MACOS)
# include <sys/mount.h> /* for statfs */
# include <mach/mach.h>
# include <mach/task.h>
# include <mach/semaphore.h>
# include <mach/sync_policy.h>
#endif
#include <dirent.h>
/* for getrlimit */
#include <sys/time.h>
#include <sys/resource.h>
#ifndef X64
struct compat_rlimit {
uint rlim_cur;
uint rlim_max;
};
#endif
#ifdef MACOS
typedef struct rlimit rlimit64_t;
#else
typedef struct rlimit64 rlimit64_t;
#endif
#ifdef LINUX
/* For clone and its flags, the manpage says to include sched.h with _GNU_SOURCE
* defined. _GNU_SOURCE brings in unwanted extensions and causes name
* conflicts. Instead, we include unix/sched.h which comes from the Linux
* kernel headers.
*/
# include <linux/sched.h>
#endif
#include "module.h" /* elf */
#include "tls.h"
#if defined(X86) && defined(DEBUG)
# include "os_asm_defines.asm" /* for TLS_SELF_OFFSET_ASM */
#endif
#ifndef F_DUPFD_CLOEXEC /* in linux 2.6.24+ */
# define F_DUPFD_CLOEXEC 1030
#endif
/* This is not always sufficient to identify a syscall return value.
* For example, MacOS has some 32-bit syscalls that return 64-bit
* values in xdx:xax.
*/
#define MCXT_SYSCALL_RES(mc) ((mc)->IF_X86_ELSE(xax, r0))
#if defined(AARCH64)
# define ASM_R2 "x2"
# define ASM_R3 "x3"
# define READ_TP_TO_R3_DISP_IN_R2 \
"mrs " ASM_R3 ", tpidr_el0\n\t" \
"ldr " ASM_R3 ", [" ASM_R3 ", " ASM_R2 "] \n\t"
#elif defined(ARM)
# define ASM_R2 "r2"
# define ASM_R3 "r3"
# define READ_TP_TO_R3_DISP_IN_R2 \
"mrc p15, 0, " ASM_R3 \
", c13, c0, " STRINGIFY(USR_TLS_REG_OPCODE) " \n\t" \
"ldr " ASM_R3 ", [" ASM_R3 \
", " ASM_R2 "] \n\t"
#endif /* ARM */
/* Prototype for all functions in .init_array. */
typedef int (*init_fn_t)(int argc, char **argv, char **envp);
/* For STATIC_LIBRARY we do not cache environ so the app can change it. */
#ifndef STATIC_LIBRARY
/* i#46: Private __environ pointer. Points at the environment variable array
* on the stack, which is different from what libc __environ may point at. We
* use the environment for following children and setting options, so its OK
* that we don't see what libc says.
*/
char **our_environ;
#endif
#include <errno.h>
/* avoid problems with use of errno as var name in rest of file */
#if !defined(STANDALONE_UNIT_TEST) && !defined(MACOS)
# undef errno
#endif
/* we define __set_errno below */
/* must be prior to <link.h> => <elf.h> => INT*_{MIN,MAX} */
#include "instr.h" /* for get_app_segment_base() */
#include "decode_fast.h" /* decode_cti: maybe os_handle_mov_seg should be ifdef X86? */
#include <dlfcn.h>
#include <stdlib.h>
#include <stdio.h>
#include <signal.h>
#include <syslog.h> /* vsyslog */
#include "../vmareas.h"
#ifdef RCT_IND_BRANCH
# include "../rct.h"
#endif
#ifdef LINUX
# include "include/syscall.h" /* our own local copy */
#else
# include <sys/syscall.h>
#endif
#include "../module_shared.h"
#include "os_private.h"
#include "../synch.h"
#include "memquery.h"
#include "ksynch.h"
#ifndef HAVE_MEMINFO_QUERY
# include "memcache.h"
#endif
#ifdef CLIENT_INTERFACE
# include "instrument.h"
#endif
#ifdef LINUX
# include "rseq_linux.h"
#endif
#ifdef MACOS
# define SYSNUM_EXIT_PROCESS SYS_exit
# define SYSNUM_EXIT_THREAD SYS_bsdthread_terminate
#else
# define SYSNUM_EXIT_PROCESS SYS_exit_group
# define SYSNUM_EXIT_THREAD SYS_exit
#endif
#ifdef ANDROID
/* Custom prctl flags specific to Android (xref i#1861) */
# define PR_SET_VMA 0x53564d41
# define PR_SET_VMA_ANON_NAME 0
#endif
/* Guards data written by os_set_app_thread_area(). */
DECLARE_CXTSWPROT_VAR(static mutex_t set_thread_area_lock,
INIT_LOCK_FREE(set_thread_area_lock));
static bool first_thread_tls_initialized;
static bool last_thread_tls_exited;
tls_type_t tls_global_type;
#ifndef HAVE_TLS
/* We use a table lookup to find a thread's dcontext */
/* Our only current no-TLS target, VMKernel (VMX86_SERVER), doesn't have apps with
* tons of threads anyway
*/
# define MAX_THREADS 512
typedef struct _tls_slot_t {
thread_id_t tid;
dcontext_t *dcontext;
} tls_slot_t;
/* Stored in heap for self-prot */
static tls_slot_t *tls_table;
/* not static so deadlock_avoidance_unlock() can look for it */
DECLARE_CXTSWPROT_VAR(mutex_t tls_lock, INIT_LOCK_FREE(tls_lock));
#endif
#ifdef CLIENT_INTERFACE
/* Should we place this in a client header? Currently mentioned in
* dr_raw_tls_calloc() docs.
*/
static bool client_tls_allocated[MAX_NUM_CLIENT_TLS];
DECLARE_CXTSWPROT_VAR(static mutex_t client_tls_lock, INIT_LOCK_FREE(client_tls_lock));
#endif
#include <stddef.h> /* for offsetof */
#include <sys/utsname.h> /* for struct utsname */
/* forward decl */
static void
handle_execve_post(dcontext_t *dcontext);
static bool
os_switch_lib_tls(dcontext_t *dcontext, bool to_app);
static bool
os_switch_seg_to_context(dcontext_t *dcontext, reg_id_t seg, bool to_app);
#ifdef X86
static bool
os_set_dr_tls_base(dcontext_t *dcontext, os_local_state_t *tls, byte *base);
#endif
#ifdef LINUX
static bool
handle_app_mremap(dcontext_t *dcontext, byte *base, size_t size, byte *old_base,
size_t old_size, uint old_prot, uint old_type);
static void
handle_app_brk(dcontext_t *dcontext, byte *lowest_brk /*if known*/, byte *old_brk,
byte *new_brk);
#endif
/* full path to our own library, used for execve */
static char dynamorio_library_path[MAXIMUM_PATH]; /* just dir */
static char dynamorio_library_filepath[MAXIMUM_PATH];
/* Issue 20: path to other architecture */
static char dynamorio_alt_arch_path[MAXIMUM_PATH];
static char dynamorio_alt_arch_filepath[MAXIMUM_PATH]; /* just dir */
/* Makefile passes us LIBDIR_X{86,64} defines */
#define DR_LIBDIR_X86 STRINGIFY(LIBDIR_X86)
#define DR_LIBDIR_X64 STRINGIFY(LIBDIR_X64)
/* pc values delimiting dynamo dll image */
static app_pc dynamo_dll_start = NULL;
static app_pc dynamo_dll_end = NULL; /* open-ended */
/* pc values delimiting the app, equal to the "dll" bounds for static DR */
static app_pc executable_start = NULL;
static app_pc executable_end = NULL;
/* Used by get_application_name(). */
static char executable_path[MAXIMUM_PATH];
static char *executable_basename;
/* does the kernel provide tids that must be used to distinguish threads in a group? */
static bool kernel_thread_groups;
static bool kernel_64bit;
pid_t pid_cached;
static bool fault_handling_initialized;
#ifdef PROFILE_RDTSC
uint kilo_hertz; /* cpu clock speed */
#endif
/* Xref PR 258731, dup of STDOUT/STDERR in case app wants to close them. */
DR_API file_t our_stdout = STDOUT_FILENO;
DR_API file_t our_stderr = STDERR_FILENO;
DR_API file_t our_stdin = STDIN_FILENO;
/* we steal fds from the app */
static rlimit64_t app_rlimit_nofile; /* cur rlimit set by app */
static int min_dr_fd;
/* we store all DR files so we can prevent the app from changing them,
* and so we can close them in a child of fork.
* the table key is the fd and the payload is the set of DR_FILE_* flags.
*/
static generic_table_t *fd_table;
#define INIT_HTABLE_SIZE_FD 6 /* should remain small */
#ifdef DEBUG
static int num_fd_add_pre_heap;
#endif
#ifdef LINUX
/* i#1004: brk emulation */
static byte *app_brk_map;
static byte *app_brk_cur;
static byte *app_brk_end;
#endif
#ifdef MACOS
static int macos_version;
#endif
static bool
is_readable_without_exception_internal(const byte *pc, size_t size, bool query_os);
static bool
mmap_check_for_module_overlap(app_pc base, size_t size, bool readable, uint64 inode,
bool at_map);
#ifdef LINUX
static char *
read_proc_self_exe(bool ignore_cache);
#endif
/* Libc independent directory iterator, similar to readdir. If we ever need
* this on Windows we should generalize it and export it to clients.
*/
typedef struct _dir_iterator_t {
file_t fd;
int off;
int end;
const char *name; /* Name of the current entry. */
char buf[4 * MAXIMUM_PATH]; /* Expect stack alloc, so not too big. */
} dir_iterator_t;
static void
os_dir_iterator_start(dir_iterator_t *iter, file_t fd);
static bool
os_dir_iterator_next(dir_iterator_t *iter);
/* XXX: If we generalize to Windows, will we need os_dir_iterator_stop()? */
/* vsyscall page. hardcoded at 0xffffe000 in earlier kernels, but
* randomly placed since fedora2.
* marked rx then: FIXME: should disallow this guy when that's the case!
* random vsyscall page is identified in maps files as "[vdso]"
* (kernel-provided fake shared library or Virt Dyn Shared Object).
*/
/* i#1583: vdso is now 2 pages, yet we assume vsyscall is on 1st page. */
/* i#2945: vdso is now 3 pages and vsyscall is not on the 1st page. */
app_pc vsyscall_page_start = NULL;
/* pc of the end of the syscall instr itself */
app_pc vsyscall_syscall_end_pc = NULL;
/* pc where kernel returns control after sysenter vsyscall */
app_pc vsyscall_sysenter_return_pc = NULL;
/* pc where our hook-displaced code was copied */
app_pc vsyscall_sysenter_displaced_pc = NULL;
#define VSYSCALL_PAGE_START_HARDCODED ((app_pc)(ptr_uint_t)0xffffe000)
#ifdef X64
/* i#430, in Red Hat Enterprise Server 5.6, vsyscall region is marked
* not executable
* ffffffffff600000-ffffffffffe00000 ---p 00000000 00:00 0 [vsyscall]
*/
# define VSYSCALL_REGION_MAPS_NAME "[vsyscall]"
#endif
/* i#1908: vdso and vsyscall are now split */
app_pc vdso_page_start = NULL;
size_t vdso_size = 0;
#if !defined(STANDALONE_UNIT_TEST) && !defined(STATIC_LIBRARY)
/* The pthreads library keeps errno in its pthread_descr data structure,
* which it looks up by dispatching on the stack pointer. This doesn't work
* when within dynamo. Thus, we define our own __errno_location() for use both
* by us and the app, to prevent pthreads looking at the stack pointer when
* out of the code cache.
*/
/* FIXME: maybe we should create 1st dcontext earlier so we don't need init_errno?
* any problems with init_errno being set and then dcontext->errno being read?
* FIXME: if a thread issues a dr_app_stop, then we don't want to use
* this errno slot? But it may later do a start...probably ok to keep using
* the slot. But, when threads die, they'll all use the same init_errno!
*/
static int init_errno; /* errno until 1st dcontext created */
int *
__errno_location(void)
{
/* Each dynamo thread should have a separate errno */
dcontext_t *dcontext = get_thread_private_dcontext();
if (dcontext == NULL)
return &init_errno;
else {
/* WARNING: init_errno is in data segment so can be RO! */
return &(dcontext->upcontext_ptr->dr_errno);
}
}
#endif /* !STANDALONE_UNIT_TEST && !STATIC_LIBRARY */
#if defined(HAVE_TLS) && defined(CLIENT_INTERFACE)
/* i#598
* (gdb) x/20i (*(errno_loc_t)0xf721e413)
* 0xf721e413 <__errno_location>: push %ebp
* 0xf721e414 <__errno_location+1>: mov %esp,%ebp
* 0xf721e416 <__errno_location+3>: call <__x86.get_pc_thunk.cx>
* 0xf721e41b <__errno_location+8>: add $0x166bd9,%ecx
* 0xf721e421 <__errno_location+14>: mov -0x1c(%ecx),%eax
* 0xf721e427 <__errno_location+20>: add %gs:0x0,%eax
* 0xf721e42e <__errno_location+27>: pop %ebp
* 0xf721e42f <__errno_location+28>: ret
*
* __errno_location calcuates the errno location by adding
* TLS's base with errno's offset in TLS.
* However, because the TLS has been switched in os_tls_init,
* the calculated address is wrong.
* We first get the errno offset in TLS at init time and
* calculate correct address by adding the app's tls base.
*/
/* __errno_location on ARM:
* 0xb6f0b290 <__errno_location>: ldr r3, [pc, #12]
* 0xb6f0b292 <__errno_location+2>: mrc 15, 0, r0, cr13, cr0, {3}
* 0xb6f0b296 <__errno_location+6>: add r3, pc
* 0xb6f0b298 <__errno_location+8>: ldr r3, [r3, #0]
* 0xb6f0b29a <__errno_location+10>: adds r0, r0, r3
* 0xb6f0b29c <__errno_location+12>: bx lr
* It uses the predefined offset to get errno location in TLS,
* and we should be able to reuse the code here.
*/
static int libc_errno_tls_offs;
static int *
our_libc_errno_loc(void)
{
void *app_tls = os_get_app_tls_base(NULL, TLS_REG_LIB);
if (app_tls == NULL)
return NULL;
return (int *)(app_tls + libc_errno_tls_offs);
}
#endif
/* i#238/PR 499179: libc errno preservation
*
* Errno location is per-thread so we store the
* function globally and call it each time. Note that pthreads seems
* to be the one who provides per-thread errno: using raw syscalls to
* create threads, we end up with a global errno:
*
* > for i in linux.thread.*0/log.*; do grep 'libc errno' $i | head -1; done
* libc errno loc: 0x00007f153de26698
* libc errno loc: 0x00007f153de26698
* > for i in pthreads.pthreads.*0/log.*; do grep 'libc errno' $i | head -1; done
* libc errno loc: 0x00007fc24d1ce698
* libc errno loc: 0x00007fc24d1cd8b8
* libc errno loc: 0x00007fc24c7cc8b8
*/
typedef int *(*errno_loc_t)(void);
static errno_loc_t
get_libc_errno_location(bool do_init)
{
static errno_loc_t libc_errno_loc;
if (do_init) {
module_iterator_t *mi = module_iterator_start();
while (module_iterator_hasnext(mi)) {
module_area_t *area = module_iterator_next(mi);
const char *modname = GET_MODULE_NAME(&area->names);
/* We ensure matches start to avoid matching "libgolibc.so".
* GET_MODULE_NAME never includes the path: i#138 will add path.
*/
if (modname != NULL && strstr(modname, "libc.so") == modname) {
bool found = true;
/* called during init when .data is writable */
libc_errno_loc =
(errno_loc_t)d_r_get_proc_address(area->start, "__errno_location");
ASSERT(libc_errno_loc != NULL);
LOG(GLOBAL, LOG_THREADS, 2, "libc errno loc func: " PFX "\n",
libc_errno_loc);
#ifdef CLIENT_INTERFACE
/* Currently, the DR is loaded by system loader and hooked up
* to app's libc. So right now, we still need this routine.
* we can remove this after libc independency and/or
* early injection
*/
if (INTERNAL_OPTION(private_loader)) {
acquire_recursive_lock(&privload_lock);
if (privload_lookup_by_base(area->start) != NULL)
found = false;
release_recursive_lock(&privload_lock);
}
#endif
if (found)
break;
}
}
module_iterator_stop(mi);
#if defined(HAVE_TLS) && defined(CLIENT_INTERFACE)
/* i#598: init the libc errno's offset. If we didn't find libc above,
* then we don't need to do this.
*/
if (INTERNAL_OPTION(private_loader) && libc_errno_loc != NULL) {
void *priv_lib_tls_base = os_get_priv_tls_base(NULL, TLS_REG_LIB);
ASSERT(priv_lib_tls_base != NULL);
libc_errno_tls_offs = (void *)libc_errno_loc() - priv_lib_tls_base;
libc_errno_loc = &our_libc_errno_loc;
}
#endif
}
return libc_errno_loc;
}
/* i#238/PR 499179: our __errno_location isn't affecting libc so until
* we have libc independence or our own private isolated libc we need
* to preserve the app's libc's errno
*/
int
get_libc_errno(void)
{
#if defined(STANDALONE_UNIT_TEST) && (defined(MACOS) || defined(ANDROID))
return errno;
#else
# ifdef STANDALONE_UNIT_TEST
errno_loc_t func = __errno_location;
# else
errno_loc_t func = get_libc_errno_location(false);
# endif
if (func == NULL) {
/* libc hasn't been loaded yet or we're doing early injection. */
return 0;
} else {
int *loc = (*func)();
ASSERT(loc != NULL);
LOG(THREAD_GET, LOG_THREADS, 5, "libc errno loc: " PFX "\n", loc);
if (loc != NULL)
return *loc;
}
return 0;
#endif
}
/* N.B.: pthreads has two other locations it keeps on a per-thread basis:
* h_errno and res_state. See glibc-2.2.4/linuxthreads/errno.c.
* If dynamo ever modifies those we'll need to do to them what we now do to
* errno.
*/
/* The environment vars exhibit totally messed up behavior when someone
* does an execve of /bin/sh -- not sure what's going on, but using our
* own implementation of unsetenv fixes all our problems. If we use
* libc's, unsetenv either does nothing or ends up having getenv return
* NULL for other vars that are obviously set (by iterating through environ).
* FIXME: find out the real story here.
*/
int
our_unsetenv(const char *name)
{
/* FIXME: really we should have some kind of synchronization */
size_t name_len;
char **env = our_environ;
if (name == NULL || *name == '\0' || strchr(name, '=') != NULL) {
return -1;
}
ASSERT(our_environ != NULL);
if (our_environ == NULL)
return -1;
name_len = strlen(name);
while (*env != NULL) {
if (strncmp(*env, name, name_len) == 0 && (*env)[name_len] == '=') {
/* We have a match. Shift the subsequent entries. Keep going to
* handle later matches.
*/
char **e;
for (e = env; *e != NULL; e++)
*e = *(e + 1);
} else {
env++;
}
}
return 0;
}
/* Clobbers the name rather than shifting, to preserve auxv (xref i#909). */
bool
disable_env(const char *name)
{
size_t name_len;
char **env = our_environ;
if (name == NULL || *name == '\0' || strchr(name, '=') != NULL) {
return false;
}
ASSERT(our_environ != NULL);
if (our_environ == NULL)
return false;
name_len = strlen(name);
while (*env != NULL) {
if (strncmp(*env, name, name_len) == 0 && (*env)[name_len] == '=') {
/* We have a match. If we shift subsequent entries we'll mess
* up access to auxv, which is after the env block, so we instead
* disable the env var by changing its name.
* We keep going to handle later matches.
*/
snprintf(*env, name_len, "__disabled__");
}
env++;
}
return true;
}
/* i#46: Private getenv.
*/
char *
our_getenv(const char *name)
{
char **env = our_environ;
size_t i;
size_t name_len;
if (name == NULL || name[0] == '\0' || strchr(name, '=') != NULL) {
return NULL;
}
ASSERT_MESSAGE(CHKLVL_ASSERTS,
"our_environ is missing. _init() or "
"dynamorio_set_envp() were not called",
our_environ != NULL);
if (our_environ == NULL)
return NULL;
name_len = strlen(name);
for (i = 0; env[i] != NULL; i++) {
if (strncmp(env[i], name, name_len) == 0 && env[i][name_len] == '=') {
return env[i] + name_len + 1;
}
}
return NULL;
}
bool
is_our_environ_followed_by_auxv(void)
{
#ifdef STATIC_LIBRARY
/* Since we initialize late, our_environ is likely no longer pointed at
* the stack (i#2122).
*/
return false;
#else
return true;
#endif
}
/* Work around drpreload's _init going first. We can get envp in our own _init
* routine down below, but drpreload.so comes first and calls
* dynamorio_app_init before our own _init routine gets called. Apps using the
* app API are unaffected because our _init routine will have run by then. For
* STATIC_LIBRARY, we used to set our_environ in our_init(), but to support
* the app setting DYNAMORIO_OPTIONS after our_init() runs, we now just use environ.
*/
DYNAMORIO_EXPORT
void
dynamorio_set_envp(char **envp)
{
our_environ = envp;
}
/* shared library init */
static int
our_init(int argc, char **argv, char **envp)
{
/* If we do not want to use drpreload.so, we can take over here: but when using
* drpreload, this is called *after* we have already taken over.
*/
extern void dynamorio_app_take_over(void);
bool takeover = false;
#ifdef INIT_TAKE_OVER
takeover = true;
#endif
#ifdef VMX86_SERVER
/* PR 391765: take over here instead of using preload */
takeover = os_in_vmkernel_classic();
#endif
#ifndef STATIC_LIBRARY
if (our_environ != NULL) {
/* Set by dynamorio_set_envp above. These should agree. */
ASSERT(our_environ == envp);
} else {
our_environ = envp;
}
#endif
/* if using preload, no -early_inject */
#ifdef STATIC_LIBRARY
if (!takeover) {
const char *takeover_env = getenv("DYNAMORIO_TAKEOVER_IN_INIT");
if (takeover_env != NULL && strcmp(takeover_env, "1") == 0) {
takeover = true;
}
}
#endif
if (takeover) {
if (dynamorio_app_init() == 0 /* success */) {
dynamorio_app_take_over();
}
}
return 0;
}
#if defined(STATIC_LIBRARY) || defined(STANDALONE_UNIT_TEST)
/* If we're getting linked into a binary that already has an _init definition
* like the app's exe or unit_tests, we add a pointer to our_init() to the
* .init_array section. We can't use the constructor attribute because not all
* toolchains pass the args and environment to the constructor.
*/
static init_fn_t
# ifdef MACOS
__attribute__((section("__DATA,__mod_init_func"), aligned(sizeof(void *)), used))
# else
__attribute__((section(".init_array"), aligned(sizeof(void *)), used))
# endif
init_array[] = { our_init };
#else
/* If we're a normal shared object, then we override _init.
*/
int
_init(int argc, char **argv, char **envp)
{
# ifdef ANDROID
/* i#1862: the Android loader passes *nothing* to lib init routines. We
* rely on DR being listed before libc so we can read the TLS slot the
* kernel set up.
*/
if (!get_kernel_args(&argc, &argv, &envp)) {
/* XXX: scan the stack and look for known auxv patterns or sthg. */
argc = 0;
argv = NULL;
envp = NULL;
}
ASSERT_MESSAGE(CHKLVL_ASSERTS, "failed to find envp", envp != NULL);
# endif
return our_init(argc, argv, envp);
}
#endif
bool
kernel_is_64bit(void)
{
return kernel_64bit;
}
#ifdef MACOS
/* XXX: if we get enough of these, move to os_macos.c or sthg */
static bool
sysctl_query(int level0, int level1, void *buf, size_t bufsz)
{
int res;
int name[2];
size_t len = bufsz;
name[0] = level0;
name[1] = level1;
res = dynamorio_syscall(SYS___sysctl, 6, &name, 2, buf, &len, NULL, 0);
return (res >= 0);
}
int
os_get_version(void)
{
return macos_version;
}
#endif
static void
get_uname(void)
{
/* assumption: only called at init, so we don't need any synch
* or .data unprot
*/
static struct utsname uinfo; /* can be large, avoid stack overflow */
#ifdef MACOS
if (!sysctl_query(CTL_KERN, KERN_OSTYPE, &uinfo.sysname, sizeof(uinfo.sysname)) ||
!sysctl_query(CTL_KERN, KERN_HOSTNAME, &uinfo.nodename, sizeof(uinfo.nodename)) ||
!sysctl_query(CTL_KERN, KERN_OSRELEASE, &uinfo.release, sizeof(uinfo.release)) ||
!sysctl_query(CTL_KERN, KERN_VERSION, &uinfo.version, sizeof(uinfo.version)) ||
!sysctl_query(CTL_HW, HW_MACHINE, &uinfo.machine, sizeof(uinfo.machine))) {
ASSERT(false && "sysctl queries failed");
return;
}
#else
DEBUG_DECLARE(int res =)
dynamorio_syscall(SYS_uname, 1, (ptr_uint_t)&uinfo);
ASSERT(res >= 0);
#endif
LOG(GLOBAL, LOG_TOP, 1, "uname:\n\tsysname: %s\n", uinfo.sysname);
LOG(GLOBAL, LOG_TOP, 1, "\tnodename: %s\n", uinfo.nodename);
LOG(GLOBAL, LOG_TOP, 1, "\trelease: %s\n", uinfo.release);
LOG(GLOBAL, LOG_TOP, 1, "\tversion: %s\n", uinfo.version);
LOG(GLOBAL, LOG_TOP, 1, "\tmachine: %s\n", uinfo.machine);
if (strncmp(uinfo.machine, "x86_64", sizeof("x86_64")) == 0)
kernel_64bit = true;
#ifdef MACOS
/* XXX: I would skip these checks for standalone so we don't have to set env
* vars for frontends to see the options but I'm still afraid of some syscall
* crash with no output: I'd rather have two messages than silent crashing.
*/
if (DYNAMO_OPTION(max_supported_os_version) != 0) { /* 0 disables */
/* We only support OSX 10.7.5+. That means kernels 11.x+. */
# define MIN_DARWIN_VERSION_SUPPORTED 11
int kernel_major;
if (sscanf(uinfo.release, "%d", &kernel_major) != 1 ||
kernel_major > DYNAMO_OPTION(max_supported_os_version) ||
kernel_major < MIN_DARWIN_VERSION_SUPPORTED) {
/* We make this non-fatal as it's likely DR will work */
SYSLOG(SYSLOG_WARNING, UNSUPPORTED_OS_VERSION, 3, get_application_name(),
get_application_pid(), uinfo.release);
}
macos_version = kernel_major;
}
#endif
}
/* os-specific initializations */
void
d_r_os_init(void)
{
ksynch_init();
get_uname();
/* Populate global data caches. */
get_application_name();
get_application_base();
/* determine whether gettid is provided and needed for threads,
* or whether getpid suffices. even 2.4 kernels have gettid
* (maps to getpid), don't have an old enough target to test this.
*/
#ifdef MACOS
kernel_thread_groups = (dynamorio_syscall(SYS_thread_selfid, 0) >= 0);
#else
kernel_thread_groups = (dynamorio_syscall(SYS_gettid, 0) >= 0);
#endif
LOG(GLOBAL, LOG_TOP | LOG_STATS, 1, "thread id is from %s\n",
kernel_thread_groups ? "gettid" : "getpid");
#ifdef MACOS
/* SYS_thread_selfid was added in 10.6. We have no simple way to get the
* thread id on 10.5, so we don't support it.
*/
if (!kernel_thread_groups) {
SYSLOG(SYSLOG_WARNING, UNSUPPORTED_OS_VERSION, 3, get_application_name(),
get_application_pid(), "Mac OSX 10.5 or earlier");
}
#else
ASSERT_CURIOSITY(kernel_thread_groups);
#endif
pid_cached = get_process_id();
#ifdef VMX86_SERVER
vmk_init();
#endif
d_r_signal_init();
/* We now set up an early fault handler for d_r_safe_read() (i#350) */
fault_handling_initialized = true;
memquery_init();
#ifdef PROFILE_RDTSC
if (dynamo_options.profile_times) {
ASSERT_NOT_TESTED();
kilo_hertz = get_timer_frequency();
LOG(GLOBAL, LOG_TOP | LOG_STATS, 1, "CPU MHz is %d\n", kilo_hertz / 1000);
}
#endif /* PROFILE_RDTSC */
/* Needs to be after heap_init */
IF_NO_MEMQUERY(memcache_init());
/* we didn't have heap in os_file_init() so create and add global logfile now */
fd_table = generic_hash_create(
GLOBAL_DCONTEXT, INIT_HTABLE_SIZE_FD, 80 /* load factor: not perf-critical */,
HASHTABLE_SHARED | HASHTABLE_PERSISTENT, NULL _IF_DEBUG("fd table"));
#ifdef DEBUG
if (GLOBAL != INVALID_FILE)
fd_table_add(GLOBAL, OS_OPEN_CLOSE_ON_FORK);
#endif
/* Ensure initialization */
get_dynamorio_dll_start();
#ifdef LINUX
if (DYNAMO_OPTION(emulate_brk))
init_emulated_brk(NULL);
#endif
#ifdef ANDROID
/* This must be set up earlier than privload_tls_init, and must be set up
* for non-client-interface as well, as this initializes DR_TLS_BASE_OFFSET
* (i#1931).
*/
init_android_version();
#endif
#ifdef LINUX
if (!standalone_library)
d_r_rseq_init();
#endif
#ifdef MACOS64
tls_process_init();
#endif
}
/* called before any logfiles are opened */
void
os_file_init(void)
{
/* We steal fds from the app for better transparency. We lower the max file
* descriptor limit as viewed by the app, and block SYS_dup{2,3} and
* SYS_fcntl(F_DUPFD*) from creating a file explicitly in our space. We do
* not try to stop incremental file opening from extending into our space:
* if the app really is running out of fds, we'll give it some of ours:
* after all we probably don't need all -steal_fds, and if we really need fds
* we typically open them at startup. We also don't bother watching all
* syscalls that take in fds from affecting our fds.
*/
if (DYNAMO_OPTION(steal_fds) > 0) {
struct rlimit rlimit_nofile;
/* SYS_getrlimit uses an old 32-bit-field struct so we want SYS_ugetrlimit */
if (dynamorio_syscall(
IF_MACOS_ELSE(SYS_getrlimit, IF_X64_ELSE(SYS_getrlimit, SYS_ugetrlimit)),
2, RLIMIT_NOFILE, &rlimit_nofile) != 0) {
/* linux default is 1024 */
SYSLOG_INTERNAL_WARNING("getrlimit RLIMIT_NOFILE failed"); /* can't LOG yet */
rlimit_nofile.rlim_cur = 1024;
rlimit_nofile.rlim_max = 1024;
}
/* pretend the limit is lower and reserve the top spots for us.
* for simplicity and to give as much room as possible to app,
* raise soft limit to equal hard limit.
* if an app really depends on a low soft limit, they can run
* with -steal_fds 0.
*/
if (rlimit_nofile.rlim_max > DYNAMO_OPTION(steal_fds)) {
int res;
min_dr_fd = rlimit_nofile.rlim_max - DYNAMO_OPTION(steal_fds);
app_rlimit_nofile.rlim_max = min_dr_fd;
app_rlimit_nofile.rlim_cur = app_rlimit_nofile.rlim_max;
rlimit_nofile.rlim_cur = rlimit_nofile.rlim_max;
res = dynamorio_syscall(SYS_setrlimit, 2, RLIMIT_NOFILE, &rlimit_nofile);
if (res != 0) {
SYSLOG_INTERNAL_WARNING("unable to raise RLIMIT_NOFILE soft limit: %d",
res);
}
} else /* not fatal: we'll just end up using fds in app space */
SYSLOG_INTERNAL_WARNING("unable to reserve fds");
}
/* we don't have heap set up yet so we init fd_table in os_init */
}
/* we need to re-cache after a fork */
static char *
get_application_pid_helper(bool ignore_cache)
{
static char pidstr[16];
if (!pidstr[0] || ignore_cache) {
int pid = get_process_id();
snprintf(pidstr, sizeof(pidstr) - 1, "%d", pid);
}
return pidstr;
}
/* get application pid, (cached), used for event logging */
char *
get_application_pid()
{
return get_application_pid_helper(false);
}
/* i#907: Called during early injection before data section protection to avoid
* issues with /proc/self/exe.
*/
void
set_executable_path(const char *exe_path)
{
strncpy(executable_path, exe_path, BUFFER_SIZE_ELEMENTS(executable_path));
NULL_TERMINATE_BUFFER(executable_path);
}
/* The OSX kernel used to place the bare executable path above envp.
* On recent XNU versions, the kernel now prefixes the executable path
* with the string executable_path= so it can be parsed getenv style.
*/
#ifdef MACOS
# define EXECUTABLE_KEY "executable_path="
#endif
/* i#189: we need to re-cache after a fork */
static char *
get_application_name_helper(bool ignore_cache, bool full_path)
{
if (!executable_path[0] || ignore_cache) {
#ifdef VMX86_SERVER
if (os_in_vmkernel_userworld()) {
vmk_getnamefrompid(pid, executable_path, sizeof(executable_path));
} else
#endif
if (DYNAMO_OPTION(early_inject)) {
ASSERT(executable_path[0] != '\0' &&
"i#907: Can't read /proc/self/exe for early injection");
} else {
#ifdef LINUX
/* Populate cache from /proc/self/exe link. */
strncpy(executable_path, read_proc_self_exe(ignore_cache),
BUFFER_SIZE_ELEMENTS(executable_path));
#else
/* OSX kernel puts full app exec path above envp */
char *c, **env = our_environ;
do {
env++;
} while (*env != NULL);
env++; /* Skip the NULL separating the envp array from exec_path */
c = *env;
if (strncmp(EXECUTABLE_KEY, c, strlen(EXECUTABLE_KEY)) == 0) {
c += strlen(EXECUTABLE_KEY);
}
/* If our frontends always absolute-ize paths prior to exec,
* this should usually be absolute -- but we go ahead and
* handle relative just in case (and to handle child processes).
* We add the cur dir, but note that the resulting path can
* still contain . or .. so it's not normalized (but it is a
* correct absolute path). Xref i#1402, i#1406, i#1407.
*/
if (*c != '/') {
int len;
if (!os_get_current_dir(executable_path,
BUFFER_SIZE_ELEMENTS(executable_path)))
len = 0;
else
len = strlen(executable_path);
snprintf(executable_path + len,
BUFFER_SIZE_ELEMENTS(executable_path) - len, "%s%s",
len > 0 ? "/" : "", c);
} else
strncpy(executable_path, c, BUFFER_SIZE_ELEMENTS(executable_path));
#endif
NULL_TERMINATE_BUFFER(executable_path);
/* FIXME: Fall back on /proc/self/cmdline and maybe argv[0] from
* _init().
*/
ASSERT(strlen(executable_path) > 0 && "readlink /proc/self/exe failed");
}
}
/* Get basename. */
if (executable_basename == NULL || ignore_cache) {
executable_basename = strrchr(executable_path, '/');
executable_basename =
(executable_basename == NULL ? executable_path : executable_basename + 1);
}
return (full_path ? executable_path : executable_basename);
}
/* get application name, (cached), used for event logging */
char *
get_application_name(void)
{
return get_application_name_helper(false, true /* full path */);
}
/* Note: this is exported so that libdrpreload.so (preload.c) can use it to
* get process names to do selective process following (PR 212034). The
* alternative is to duplicate or compile in this code into libdrpreload.so,
* which is messy. Besides, libdynamorio.so is already loaded into the process
* and avaiable, so cleaner to just use functions from it.
*/
DYNAMORIO_EXPORT const char *
get_application_short_name(void)
{
return get_application_name_helper(false, false /* short name */);
}
/* Processor information provided by kernel */
#define PROC_CPUINFO "/proc/cpuinfo"
#define CPUMHZ_LINE_LENGTH 64
#define CPUMHZ_LINE_FORMAT "cpu MHz\t\t: %lu.%03lu\n"
/* printed in /usr/src/linux-2.4/arch/i386/kernel/setup.c calibrated in time.c */
/* seq_printf(m, "cpu MHz\t\t: %lu.%03lu\n", cpu_khz / 1000, (cpu_khz % 1000)) */
/* e.g. cpu MHz : 1594.851 */
static timestamp_t
get_timer_frequency_cpuinfo(void)
{
file_t cpuinfo;
ssize_t nread;
char *buf;
char *mhz_line;
ulong cpu_mhz = 1000;
ulong cpu_khz = 0;
cpuinfo = os_open(PROC_CPUINFO, OS_OPEN_READ);
/* This can happen in a chroot or if /proc is disabled. */
if (cpuinfo == INVALID_FILE)
return 1000 * 1000; /* 1 GHz */
/* cpu MHz is typically in the first 4096 bytes. If not, or we get a short
* or interrupted read, our timer frequency estimate will be off, but it's
* not the end of the world.
* FIXME: Factor a buffered file reader out of our maps iterator if we want
* to do this the right way.
*/
buf = global_heap_alloc(PAGE_SIZE HEAPACCT(ACCT_OTHER));
nread = os_read(cpuinfo, buf, PAGE_SIZE - 1);
if (nread > 0) {
buf[nread] = '\0';
mhz_line = strstr(buf, "cpu MHz\t\t:");
if (mhz_line != NULL &&
sscanf(mhz_line, CPUMHZ_LINE_FORMAT, &cpu_mhz, &cpu_khz) == 2) {
LOG(GLOBAL, LOG_ALL, 2, "Processor speed exactly %lu.%03luMHz\n", cpu_mhz,
cpu_khz);
}
}
global_heap_free(buf, PAGE_SIZE HEAPACCT(ACCT_OTHER));
os_close(cpuinfo);
return cpu_mhz * 1000 + cpu_khz;
}
timestamp_t
get_timer_frequency()
{
#ifdef VMX86_SERVER
if (os_in_vmkernel_userworld()) {
return vmk_get_timer_frequency();
}
#endif
return get_timer_frequency_cpuinfo();
}
/* DR has standardized on UTC time which counts from since Jan 1, 1601.
* That's the Windows standard. But Linux uses the Epoch of Jan 1, 1970.
*/
#define UTC_TO_EPOCH_SECONDS 11644473600
/* seconds since 1601 */
uint
query_time_seconds(void)
{
struct timeval current_time;
uint64 val = dynamorio_syscall(SYS_gettimeofday, 2, ¤t_time, NULL);
#ifdef MACOS
/* MacOS before Sierra returns usecs:secs and does not set the timeval struct. */
if (macos_version < MACOS_VERSION_SIERRA) {
if ((int)val < 0)
return 0;
return (uint)val + UTC_TO_EPOCH_SECONDS;
}
#endif
if ((int)val >= 0) {
return current_time.tv_sec + UTC_TO_EPOCH_SECONDS;
} else {
ASSERT_NOT_REACHED();
return 0;
}
}
/* milliseconds since 1601 */
uint64
query_time_millis()
{
struct timeval current_time;
uint64 val = dynamorio_syscall(SYS_gettimeofday, 2, ¤t_time, NULL);
#ifdef MACOS
/* MacOS before Sierra returns usecs:secs and does not set the timeval struct. */
if (macos_version < MACOS_VERSION_SIERRA) {
if ((int)val > 0) {
current_time.tv_sec = (uint)val;
current_time.tv_usec = (uint)(val >> 32);
}
}
#endif
if ((int)val >= 0) {
uint64 res =
(((uint64)current_time.tv_sec) * 1000) + (current_time.tv_usec / 1000);
res += UTC_TO_EPOCH_SECONDS * 1000;
return res;
} else {
ASSERT_NOT_REACHED();
return 0;
}
}
/* microseconds since 1601 */
uint64
query_time_micros()
{
struct timeval current_time;
uint64 val = dynamorio_syscall(SYS_gettimeofday, 2, ¤t_time, NULL);
#ifdef MACOS
/* MacOS before Sierra returns usecs:secs and does not set the timeval struct. */
if (macos_version < MACOS_VERSION_SIERRA) {
if ((int)val > 0) {
current_time.tv_sec = (uint)val;
current_time.tv_usec = (uint)(val >> 32);
}
}
#endif
if ((int)val >= 0) {
uint64 res = (((uint64)current_time.tv_sec) * 1000000) + current_time.tv_usec;
res += UTC_TO_EPOCH_SECONDS * 1000000;
return res;
} else {
ASSERT_NOT_REACHED();
return 0;
}
}
#ifdef RETURN_AFTER_CALL
/* Finds the bottom of the call stack, presumably at program startup. */
/* This routine is a copycat of internal_dump_callstack and makes
assumptions about program state, i.e. that frame pointers are valid
and should be used only in well known points for release build.
*/
static app_pc
find_stack_bottom()
{
app_pc retaddr = 0;
int depth = 0;
reg_t *fp;
/* from dump_dr_callstack() */
asm("mov %%" ASM_XBP ", %0" : "=m"(fp));
LOG(THREAD_GET, LOG_ALL, 3, "Find stack bottom:\n");
while (fp != NULL && is_readable_without_exception((byte *)fp, sizeof(reg_t) * 2)) {
retaddr = (app_pc) * (fp + 1); /* presumably also readable */
LOG(THREAD_GET, LOG_ALL, 3,
"\tframe ptr " PFX " => parent " PFX ", ret = " PFX "\n", fp, *fp, retaddr);
depth++;
/* yes I've seen weird recursive cases before */
if (fp == (reg_t *)*fp || depth > 100)
break;
fp = (reg_t *)*fp;
}
return retaddr;
}
#endif /* RETURN_AFTER_CALL */
/* os-specific atexit cleanup */
void
os_slow_exit(void)
{
#ifdef MACOS64
tls_process_exit();
#endif
#ifdef LINUX
if (!standalone_library)
d_r_rseq_exit();
#endif
d_r_signal_exit();
memquery_exit();
ksynch_exit();
generic_hash_destroy(GLOBAL_DCONTEXT, fd_table);
fd_table = NULL;
if (doing_detach) {
vsyscall_page_start = NULL;
IF_DEBUG(num_fd_add_pre_heap = 0;)
}
DELETE_LOCK(set_thread_area_lock);
#ifdef CLIENT_INTERFACE
DELETE_LOCK(client_tls_lock);
#endif
IF_NO_MEMQUERY(memcache_exit());
}
/* Helper function that calls cleanup_and_terminate after blocking most signals
*(i#2921).
*/
void
block_cleanup_and_terminate(dcontext_t *dcontext, int sysnum, ptr_uint_t sys_arg1,
ptr_uint_t sys_arg2, bool exitproc,
/* these 2 args are only used for Mac thread exit */
ptr_uint_t sys_arg3, ptr_uint_t sys_arg4)
{
/* This thread is on its way to exit. We are blocking all signals since any
* signal that reaches us now can be delayed until after the exit is complete.
* We may still receive a suspend signal for synchronization that we may need
* to reply to (i#2921).
*/
if (sysnum == SYS_kill)
block_all_noncrash_signals_except(NULL, 2, dcontext->sys_param0, SUSPEND_SIGNAL);
else
block_all_noncrash_signals_except(NULL, 1, SUSPEND_SIGNAL);
cleanup_and_terminate(dcontext, sysnum, sys_arg1, sys_arg2, exitproc, sys_arg3,
sys_arg4);
}
/* os-specific atexit cleanup */
void
os_fast_exit(void)
{
/* nothing */
}
void
os_terminate_with_code(dcontext_t *dcontext, terminate_flags_t flags, int exit_code)
{
/* i#1319: we support a signal via 2nd byte */
bool use_signal = exit_code > 0x00ff;
/* XXX: TERMINATE_THREAD not supported */
ASSERT_NOT_IMPLEMENTED(TEST(TERMINATE_PROCESS, flags));
if (use_signal) {
int sig = (exit_code & 0xff00) >> 8;
os_terminate_via_signal(dcontext, flags, sig);
ASSERT_NOT_REACHED();
}
if (TEST(TERMINATE_CLEANUP, flags)) {
/* we enter from several different places, so rewind until top-level kstat */
KSTOP_REWIND_UNTIL(thread_measured);
block_cleanup_and_terminate(dcontext, SYSNUM_EXIT_PROCESS, exit_code, 0,
true /*whole process*/, 0, 0);
} else {
/* clean up may be impossible - just terminate */
d_r_config_exit(); /* delete .1config file */
exit_process_syscall(exit_code);
}
}
void
os_terminate(dcontext_t *dcontext, terminate_flags_t flags)
{
os_terminate_with_code(dcontext, flags, -1);
}
int
os_timeout(int time_in_milliseconds)
{
ASSERT_NOT_IMPLEMENTED(false);
return 0;
}
/************************************************************************
* SEGMENT STEALING
*
* Not easy to make truly transparent -- but the alternative of dispatch
* by thread id on global memory has performance implications.
* Pull the non-STEAL_SEGMENT code out of the cvs attic for a base if
* transparency becomes more of a problem.
*/
#define TLS_LOCAL_STATE_OFFSET (offsetof(os_local_state_t, state))
/* offset from top of page */
#define TLS_OS_LOCAL_STATE 0x00
#define TLS_SELF_OFFSET (TLS_OS_LOCAL_STATE + offsetof(os_local_state_t, self))
#define TLS_THREAD_ID_OFFSET (TLS_OS_LOCAL_STATE + offsetof(os_local_state_t, tid))
#define TLS_DCONTEXT_OFFSET (TLS_OS_LOCAL_STATE + TLS_DCONTEXT_SLOT)
#ifdef X86
# define TLS_MAGIC_OFFSET (TLS_OS_LOCAL_STATE + offsetof(os_local_state_t, magic))
#endif
/* they should be used with os_tls_offset, so do not need add TLS_OS_LOCAL_STATE here
*/
#define TLS_APP_LIB_TLS_BASE_OFFSET (offsetof(os_local_state_t, app_lib_tls_base))
#define TLS_APP_ALT_TLS_BASE_OFFSET (offsetof(os_local_state_t, app_alt_tls_base))
#define TLS_APP_LIB_TLS_REG_OFFSET (offsetof(os_local_state_t, app_lib_tls_reg))
#define TLS_APP_ALT_TLS_REG_OFFSET (offsetof(os_local_state_t, app_alt_tls_reg))
/* N.B.: imm and offs are ushorts!
* We use %c[0-9] to get gcc to emit an integer constant without a leading $ for
* the segment offset. See the documentation here:
* http://gcc.gnu.org/onlinedocs/gccint/Output-Template.html#Output-Template
* Also, var needs to match the pointer size, or else we'll get stack corruption.
* XXX: This is marked volatile prevent gcc from speculating this code before
* checks for is_thread_tls_initialized(), but if we could find a more
* precise constraint, then the compiler would be able to optimize better. See
* glibc comments on THREAD_SELF.
*/
#ifdef MACOS64
/* For now we have both a directly-addressable os_local_state_t and a pointer to
* it in slot 6. If we settle on always doing the full os_local_state_t in slots,
* we would probably get rid of the indirection here and directly access slot fields.
*/
# define WRITE_TLS_SLOT_IMM(imm, var) \
IF_NOT_HAVE_TLS(ASSERT_NOT_REACHED()); \
ASSERT(sizeof(var) == sizeof(void *)); \
__asm__ __volatile__( \
"mov %%gs:%1, %%" ASM_XAX " \n\t" \
"movq %0, %c2(%%" ASM_XAX ") \n\t" \
: \
: "r"(var), "m"(*(void **)(DR_TLS_BASE_SLOT * sizeof(void *))), "i"(imm) \
: "memory", ASM_XAX);
# define READ_TLS_SLOT_IMM(imm, var) \
IF_NOT_HAVE_TLS(ASSERT_NOT_REACHED()); \
ASSERT(sizeof(var) == sizeof(void *)); \
__asm__ __volatile__("mov %%gs:%1, %%" ASM_XAX " \n\t" \
"movq %c2(%%" ASM_XAX "), %0 \n\t" \
: "=r"(var) \
: "m"(*(void **)(DR_TLS_BASE_SLOT * sizeof(void *))), \
"i"(imm) \
: ASM_XAX);
# define WRITE_TLS_SLOT(offs, var) \
IF_NOT_HAVE_TLS(ASSERT_NOT_REACHED()); \
__asm__ __volatile__("mov %%gs:%0, %%" ASM_XAX " \n\t" \
"movzwq %1, %%" ASM_XDX " \n\t" \
"movq %2, (%%" ASM_XAX ", %%" ASM_XDX ") \n\t" \
: \
: "m"(*(void **)(DR_TLS_BASE_SLOT * sizeof(void *))), \
"m"(offs), "r"(var) \
: "memory", ASM_XAX, ASM_XDX);
# define READ_TLS_SLOT(offs, var) \
IF_NOT_HAVE_TLS(ASSERT_NOT_REACHED()); \
ASSERT(sizeof(var) == sizeof(void *)); \
__asm__ __volatile__("mov %%gs:%1, %%" ASM_XAX " \n\t" \
"movzwq %2, %%" ASM_XDX " \n\t" \
"movq (%%" ASM_XAX ", %%" ASM_XDX "), %0 \n\t" \
: "=r"(var) \
: "m"(*(void **)(DR_TLS_BASE_SLOT * sizeof(void *))), \
"m"(offs) \
: "memory", ASM_XAX, ASM_XDX);
#elif defined(X86)
# define WRITE_TLS_SLOT_IMM(imm, var) \
IF_NOT_HAVE_TLS(ASSERT_NOT_REACHED()); \
ASSERT(sizeof(var) == sizeof(void *)); \
asm volatile("mov %0, %" ASM_SEG ":%c1" : : "r"(var), "i"(imm));
# define READ_TLS_SLOT_IMM(imm, var) \
IF_NOT_HAVE_TLS(ASSERT_NOT_REACHED()); \
ASSERT(sizeof(var) == sizeof(void *)); \
asm volatile("mov %" ASM_SEG ":%c1, %0" : "=r"(var) : "i"(imm));
# define WRITE_TLS_INT_SLOT_IMM(imm, var) \
IF_NOT_HAVE_TLS(ASSERT_NOT_REACHED()); \
ASSERT(sizeof(var) == sizeof(int)); \
asm volatile("movl %0, %" ASM_SEG ":%c1" : : "r"(var), "i"(imm));
# define READ_TLS_INT_SLOT_IMM(imm, var) \
IF_NOT_HAVE_TLS(ASSERT_NOT_REACHED()); \
ASSERT(sizeof(var) == sizeof(int)); \
asm volatile("movl %" ASM_SEG ":%c1, %0" : "=r"(var) : "i"(imm));
/* FIXME: need dedicated-storage var for _TLS_SLOT macros, can't use expr */
# define WRITE_TLS_SLOT(offs, var) \
IF_NOT_HAVE_TLS(ASSERT_NOT_REACHED()); \
ASSERT(sizeof(var) == sizeof(void *)); \
ASSERT(sizeof(offs) == 2); \
asm("mov %0, %%" ASM_XAX : : "m"((var)) : ASM_XAX); \
asm("movzw" IF_X64_ELSE("q", "l") " %0, %%" ASM_XDX : : "m"((offs)) : ASM_XDX); \
asm("mov %%" ASM_XAX ", %" ASM_SEG ":(%%" ASM_XDX ")" : : : ASM_XAX, ASM_XDX);
# define READ_TLS_SLOT(offs, var) \
ASSERT(sizeof(var) == sizeof(void *)); \
ASSERT(sizeof(offs) == 2); \
asm("movzw" IF_X64_ELSE("q", "l") " %0, %%" ASM_XAX : : "m"((offs)) : ASM_XAX); \
asm("mov %" ASM_SEG ":(%%" ASM_XAX "), %%" ASM_XAX : : : ASM_XAX); \
asm("mov %%" ASM_XAX ", %0" : "=m"((var)) : : ASM_XAX);
#elif defined(AARCHXX)
/* Android needs indirection through a global. The Android toolchain has
* trouble with relocations if we use a global directly in asm, so we convert to
* a local variable in these macros. We pay the cost of the extra instructions
* for Linux ARM to share the code.
*/
# define WRITE_TLS_SLOT_IMM(imm, var) \
do { \
uint _base_offs = DR_TLS_BASE_OFFSET; \
__asm__ __volatile__("mov " ASM_R2 ", %0 \n\t" READ_TP_TO_R3_DISP_IN_R2 \
"str %1, [" ASM_R3 ", %2] \n\t" \
: \
: "r"(_base_offs), "r"(var), "i"(imm) \
: "memory", ASM_R2, ASM_R3); \
} while (0)
# define READ_TLS_SLOT_IMM(imm, var) \
do { \
uint _base_offs = DR_TLS_BASE_OFFSET; \
__asm__ __volatile__("mov " ASM_R2 ", %1 \n\t" READ_TP_TO_R3_DISP_IN_R2 \
"ldr %0, [" ASM_R3 ", %2] \n\t" \
: "=r"(var) \
: "r"(_base_offs), "i"(imm) \
: ASM_R2, ASM_R3); \
} while (0)
# define WRITE_TLS_INT_SLOT_IMM WRITE_TLS_SLOT_IMM /* b/c 32-bit */
# define READ_TLS_INT_SLOT_IMM READ_TLS_SLOT_IMM /* b/c 32-bit */
# define WRITE_TLS_SLOT(offs, var) \
do { \
uint _base_offs = DR_TLS_BASE_OFFSET; \
__asm__ __volatile__("mov " ASM_R2 ", %0 \n\t" READ_TP_TO_R3_DISP_IN_R2 \
"add " ASM_R3 ", " ASM_R3 ", %2 \n\t" \
"str %1, [" ASM_R3 "] \n\t" \
: \
: "r"(_base_offs), "r"(var), "r"(offs) \
: "memory", ASM_R2, ASM_R3); \
} while (0)
# define READ_TLS_SLOT(offs, var) \
do { \
uint _base_offs = DR_TLS_BASE_OFFSET; \
__asm__ __volatile__("mov " ASM_R2 ", %1 \n\t" READ_TP_TO_R3_DISP_IN_R2 \
"add " ASM_R3 ", " ASM_R3 ", %2 \n\t" \
"ldr %0, [" ASM_R3 "] \n\t" \
: "=r"(var) \
: "r"(_base_offs), "r"(offs) \
: ASM_R2, ASM_R3); \
} while (0)
#endif /* X86/ARM */
#ifdef X86
/* We use this at thread init and exit to make it easy to identify
* whether TLS is initialized (i#2089).
* We assume alignment does not matter.
*/
static os_local_state_t uninit_tls; /* has .magic == 0 */
#endif
static bool
is_thread_tls_initialized(void)
{
#ifdef MACOS64
/* For now we have both a directly-addressable os_local_state_t and a pointer to
* it in slot 6. If we settle on always doing the full os_local_state_t in slots,
* we would probably get rid of the indirection here and directly read the magic
* field from its slot.
*/
byte **tls_swap_slot;
tls_swap_slot = (byte **)get_app_tls_swap_slot_addr();
if (tls_swap_slot == NULL || *tls_swap_slot == NULL ||
*tls_swap_slot == TLS_SLOT_VAL_EXITED)
return false;
return true;
#elif defined(X86)
if (INTERNAL_OPTION(safe_read_tls_init)) {
/* Avoid faults during early init or during exit when we have no handler.
* It's not worth extending the handler as the faults are a perf hit anyway.
* For standalone_library, first_thread_tls_initialized will always be false,
* so we'll return false here and use our check in get_thread_private_dcontext().
*/
if (!first_thread_tls_initialized || last_thread_tls_exited)
return false;
/* To handle WSL (i#1986) where fs and gs start out equal to ss (0x2b),
* and when the MSR is used having a zero selector, and other complexities,
* we just do a blind safe read as the simplest solution once we're past
* initial init and have a fault handler.
*
* i#2089: to avoid the perf cost of syscalls to verify the tid, and to
* distinguish a fork child from a separate-group thread, we no longer read
* the tid field and check that the TLS belongs to this particular thread:
* instead we rely on clearing the .magic field for child threads and at
* thread exit (to avoid a fault) and we simply check the field here.
* A native app thread is very unlikely to match this.
*/
return safe_read_tls_magic() == TLS_MAGIC_VALID;
} else {
/* XXX i#2089: we're keeping this legacy code around until
* we're confident that the safe read code above is safer, more
* performant, and more robust.
*/
os_local_state_t *os_tls = NULL;
ptr_uint_t cur_seg = read_thread_register(SEG_TLS);
/* Handle WSL (i#1986) where fs and gs start out equal to ss (0x2b) */
if (cur_seg != 0 && cur_seg != read_thread_register(SEG_SS)) {
/* XXX: make this a safe read: but w/o dcontext we need special asm support */
READ_TLS_SLOT_IMM(TLS_SELF_OFFSET, os_tls);
}
# ifdef X64
if (os_tls == NULL && tls_dr_using_msr()) {
/* When the MSR is used, the selector in the register remains 0.
* We can't clear the MSR early in a new thread and then look for
* a zero base here b/c if kernel decides to use GDT that zeroing
* will set the selector, unless we want to assume we know when
* the kernel uses the GDT.
* Instead we make a syscall to get the tid. This should be ok
* perf-wise b/c the common case is the non-zero above.
*/
byte *base = tls_get_fs_gs_segment_base(SEG_TLS);
ASSERT(tls_global_type == TLS_TYPE_ARCH_PRCTL);
if (base != (byte *)POINTER_MAX && base != NULL) {
os_tls = (os_local_state_t *)base;
}
}
# endif
if (os_tls != NULL) {
return (os_tls->tid == get_sys_thread_id() ||
/* The child of a fork will initially come here */
os_tls->state.spill_space.dcontext->owning_process ==
get_parent_id());
} else
return false;
}
#elif defined(AARCHXX)
byte **dr_tls_base_addr;
if (tls_global_type == TLS_TYPE_NONE)
return false;
dr_tls_base_addr = (byte **)get_dr_tls_base_addr();
if (dr_tls_base_addr == NULL || *dr_tls_base_addr == NULL ||
/* We use the TLS slot's value to identify a now-exited thread (i#1578) */
*dr_tls_base_addr == TLS_SLOT_VAL_EXITED)
return false;
/* We would like to ASSERT is_dynamo_address(*tls_swap_slot) but that leads
* to infinite recursion for an address not in the vm_reserve area, as
* dynamo_vm_areas_start_reading() ending up calling
* deadlock_avoidance_unlock() which calls get_thread_private_dcontext()
* which comes here.
*/
return true;
#endif
}
bool
is_DR_segment_reader_entry(app_pc pc)
{
/* This routine is used to avoid problems with dr_prepopulate_cache() building
* bbs for DR code that reads DR segments when DR is a static library.
* It's a little ugly but it's not clear there's a better solution.
* See the discussion in i#2463 c#2.
*/
#ifdef X86
if (INTERNAL_OPTION(safe_read_tls_init)) {
return pc == (app_pc)safe_read_tls_magic || pc == (app_pc)safe_read_tls_self;
}
#endif
/* XXX i#2463: for ARM and for -no_safe_read_tls_init it may be
* more complicated as the PC may not be a function entry but the
* start of a bb after a branch in our C code that uses inline asm
* to read the TLS.
*/
return false;
}
#if defined(X86) || defined(DEBUG)
static bool
is_thread_tls_allocated(void)
{
# if defined(X86) && !defined(MACOS64)
if (INTERNAL_OPTION(safe_read_tls_init)) {
/* We use this routine to allow currently-native threads, for which
* is_thread_tls_initialized() (and thus is_thread_initialized()) will
* return false.
* Caution: this will also return true on a fresh clone child.
*/
uint magic;
if (!first_thread_tls_initialized || last_thread_tls_exited)
return false;
magic = safe_read_tls_magic();
return magic == TLS_MAGIC_VALID || magic == TLS_MAGIC_INVALID;
}
# endif
return is_thread_tls_initialized();
}
#endif
/* converts a local_state_t offset to a segment offset */
ushort
os_tls_offset(ushort tls_offs)
{
/* no ushort truncation issues b/c TLS_LOCAL_STATE_OFFSET is 0 */
IF_NOT_HAVE_TLS(ASSERT_NOT_REACHED());
ASSERT(TLS_LOCAL_STATE_OFFSET == 0);
return (TLS_LOCAL_STATE_OFFSET + tls_offs IF_MACOS64(+tls_get_dr_offs()));
}
/* converts a segment offset to a local_state_t offset */
ushort
os_local_state_offset(ushort seg_offs)
{
/* no ushort truncation issues b/c TLS_LOCAL_STATE_OFFSET is 0 */
IF_NOT_HAVE_TLS(ASSERT_NOT_REACHED());
ASSERT(TLS_LOCAL_STATE_OFFSET == 0);
return (seg_offs - TLS_LOCAL_STATE_OFFSET IF_MACOS64(-tls_get_dr_offs()));
}
/* XXX: Will return NULL if called before os_thread_init(), which sets
* ostd->dr_fs/gs_base.
*/
void *
os_get_priv_tls_base(dcontext_t *dcontext, reg_id_t reg)
{
os_thread_data_t *ostd;
IF_NOT_HAVE_TLS(ASSERT_NOT_REACHED());
ASSERT(reg == TLS_REG_ALT || reg == TLS_REG_LIB);
if (dcontext == NULL)
dcontext = get_thread_private_dcontext();
if (dcontext == NULL)
return NULL;
ostd = (os_thread_data_t *)dcontext->os_field;
if (reg == TLS_REG_LIB)
return ostd->priv_lib_tls_base;
else if (reg == TLS_REG_ALT)
return ostd->priv_alt_tls_base;
ASSERT_NOT_REACHED();
return NULL;
}
os_local_state_t *
get_os_tls(void)
{
os_local_state_t *os_tls;
ASSERT(is_thread_tls_initialized());
READ_TLS_SLOT_IMM(TLS_SELF_OFFSET, os_tls);
return os_tls;
}
/* Obtain TLS from dcontext directly, which succeeds in pre-thread-init
* situations where get_os_tls() fails.
*/
static os_local_state_t *
get_os_tls_from_dc(dcontext_t *dcontext)
{
byte *local_state;
ASSERT(dcontext != NULL);
local_state = (byte *)dcontext->local_state;
if (local_state == NULL)
return NULL;
return (os_local_state_t *)(local_state - offsetof(os_local_state_t, state));
}
#ifdef AARCHXX
bool
os_set_app_tls_base(dcontext_t *dcontext, reg_id_t reg, void *base)
{
os_local_state_t *os_tls;
IF_NOT_HAVE_TLS(ASSERT_NOT_REACHED());
ASSERT(reg == TLS_REG_LIB || reg == TLS_REG_ALT);
if (dcontext == NULL)
dcontext = get_thread_private_dcontext();
/* we will be called only if TLS is initialized */
ASSERT(dcontext != NULL);
os_tls = get_os_tls_from_dc(dcontext);
if (reg == TLS_REG_LIB) {
os_tls->app_lib_tls_base = base;
LOG(THREAD, LOG_THREADS, 1, "TLS app lib base =" PFX "\n", base);
return true;
} else if (reg == TLS_REG_ALT) {
os_tls->app_alt_tls_base = base;
LOG(THREAD, LOG_THREADS, 1, "TLS app alt base =" PFX "\n", base);
return true;
}
ASSERT_NOT_REACHED();
return false;
}
#endif
void *
os_get_app_tls_base(dcontext_t *dcontext, reg_id_t reg)
{
os_local_state_t *os_tls;
IF_NOT_HAVE_TLS(ASSERT_NOT_REACHED());
ASSERT(reg == TLS_REG_LIB || reg == TLS_REG_ALT);
if (dcontext == NULL)
dcontext = get_thread_private_dcontext();
if (dcontext == NULL) {
/* No dcontext means we haven't initialized TLS, so we haven't replaced
* the app's segments. get_segment_base is expensive, but this should
* be rare. Re-examine if it pops up in a profile.
*/
return get_segment_base(reg);
}
os_tls = get_os_tls_from_dc(dcontext);
if (reg == TLS_REG_LIB)
return os_tls->app_lib_tls_base;
else if (reg == TLS_REG_ALT)
return os_tls->app_alt_tls_base;
ASSERT_NOT_REACHED();
return NULL;
}
ushort
os_get_app_tls_base_offset(reg_id_t reg)
{
IF_NOT_HAVE_TLS(ASSERT_NOT_REACHED());
ASSERT(TLS_LOCAL_STATE_OFFSET == 0);
if (reg == TLS_REG_LIB)
return TLS_APP_LIB_TLS_BASE_OFFSET;
else if (reg == TLS_REG_ALT)
return TLS_APP_ALT_TLS_BASE_OFFSET;
ASSERT_NOT_REACHED();
return 0;
}
#ifdef X86
ushort
os_get_app_tls_reg_offset(reg_id_t reg)
{
IF_NOT_HAVE_TLS(ASSERT_NOT_REACHED());
ASSERT(TLS_LOCAL_STATE_OFFSET == 0);
if (reg == TLS_REG_LIB)
return TLS_APP_LIB_TLS_REG_OFFSET;
else if (reg == TLS_REG_ALT)
return TLS_APP_ALT_TLS_REG_OFFSET;
ASSERT_NOT_REACHED();
return 0;
}
#endif
void *
d_r_get_tls(ushort tls_offs)
{
void *val;
READ_TLS_SLOT(tls_offs, val);
return val;
}
void
d_r_set_tls(ushort tls_offs, void *value)
{
WRITE_TLS_SLOT(tls_offs, value);
}
/* Returns POINTER_MAX on failure.
* Assumes that cs, ss, ds, and es are flat.
* Should we export this to clients? For now they can get
* this information via opnd_compute_address().
*/
byte *
get_segment_base(uint seg)
{
#ifdef MACOS64
ptr_uint_t *pthread_self = (ptr_uint_t *)read_thread_register(seg);
return (byte *)&pthread_self[SEG_TLS_BASE_OFFSET];
#elif defined(X86)
if (seg == SEG_CS || seg == SEG_SS || seg == SEG_DS || seg == SEG_ES)
return NULL;
# ifdef HAVE_TLS
return tls_get_fs_gs_segment_base(seg);
# else
return (byte *)POINTER_MAX;
# endif /* HAVE_TLS */
#elif defined(AARCHXX)
/* XXX i#1551: should we rename/refactor to avoid "segment"? */
return (byte *)read_thread_register(seg);
#endif
}
/* i#572: handle opnd_compute_address to return the application
* segment base value.
*/
byte *
get_app_segment_base(uint seg)
{
#ifdef X86
if (seg == SEG_CS || seg == SEG_SS || seg == SEG_DS || seg == SEG_ES)
return NULL;
#endif /* X86 */
if (IF_CLIENT_INTERFACE_ELSE(INTERNAL_OPTION(private_loader), false) &&
first_thread_tls_initialized && !last_thread_tls_exited) {
return d_r_get_tls(os_get_app_tls_base_offset(seg));
}
return get_segment_base(seg);
}
local_state_extended_t *
get_local_state_extended()
{
os_local_state_t *os_tls;
ASSERT(is_thread_tls_initialized());
READ_TLS_SLOT_IMM(TLS_SELF_OFFSET, os_tls);
return &(os_tls->state);
}
local_state_t *
get_local_state()
{
#ifdef HAVE_TLS
return (local_state_t *)get_local_state_extended();
#else
return NULL;
#endif
}
#ifdef DEBUG
void
os_enter_dynamorio(void)
{
# ifdef ARM
/* i#1578: check that app's tls value doesn't match our sentinel */
ASSERT(*(byte **)get_dr_tls_base_addr() != TLS_SLOT_VAL_EXITED);
# endif
}
#endif
/* i#107: handle segment register usage conflicts between app and dr:
* os_handle_mov_seg updates the app's tls selector maintained by DR.
* It is called before entering code cache in dispatch_enter_fcache.
*/
void
os_handle_mov_seg(dcontext_t *dcontext, byte *pc)
{
#ifdef X86
instr_t instr;
opnd_t opnd;
reg_id_t seg;
ushort sel = 0;
our_modify_ldt_t *desc;
int desc_idx;
os_local_state_t *os_tls;
os_thread_data_t *ostd;
instr_init(dcontext, &instr);
decode_cti(dcontext, pc, &instr);
/* the first instr must be mov seg */
ASSERT(instr_get_opcode(&instr) == OP_mov_seg);
opnd = instr_get_dst(&instr, 0);
ASSERT(opnd_is_reg(opnd));
seg = opnd_get_reg(opnd);
ASSERT(reg_is_segment(seg));
ostd = (os_thread_data_t *)dcontext->os_field;
desc = (our_modify_ldt_t *)ostd->app_thread_areas;
os_tls = get_os_tls();
/* get the selector value */
opnd = instr_get_src(&instr, 0);
if (opnd_is_reg(opnd)) {
sel = (ushort)reg_get_value_priv(opnd_get_reg(opnd), get_mcontext(dcontext));
} else {
void *ptr;
ptr = (ushort *)opnd_compute_address_priv(opnd, get_mcontext(dcontext));
ASSERT(ptr != NULL);
if (!d_r_safe_read(ptr, sizeof(sel), &sel)) {
/* FIXME: if invalid address, should deliver a signal to user. */
ASSERT_NOT_IMPLEMENTED(false);
}
}
/* calculate the entry_number */
desc_idx = SELECTOR_INDEX(sel) - tls_min_index();
if (seg == TLS_REG_LIB) {
os_tls->app_lib_tls_reg = sel;
os_tls->app_lib_tls_base = (void *)(ptr_uint_t)desc[desc_idx].base_addr;
} else {
os_tls->app_alt_tls_reg = sel;
os_tls->app_alt_tls_base = (void *)(ptr_uint_t)desc[desc_idx].base_addr;
}
instr_free(dcontext, &instr);
LOG(THREAD_GET, LOG_THREADS, 2,
"thread " TIDFMT " segment change %s to selector 0x%x => "
"app lib tls base: " PFX ", alt tls base: " PFX "\n",
d_r_get_thread_id(), reg_names[seg], sel, os_tls->app_lib_tls_base,
os_tls->app_alt_tls_base);
#elif defined(ARM)
/* FIXME i#1551: NYI on ARM */
ASSERT_NOT_REACHED();
#endif /* X86/ARM */
}
/* Initialization for TLS mangling (-mangle_app_seg on x86).
* Must be called before DR setup its own segment.
*/
static void
os_tls_app_seg_init(os_local_state_t *os_tls, void *segment)
{
app_pc app_lib_tls_base, app_alt_tls_base;
#if defined(X86) && !defined(MACOS64)
int i, index;
our_modify_ldt_t *desc;
os_tls->app_lib_tls_reg = read_thread_register(TLS_REG_LIB);
os_tls->app_alt_tls_reg = read_thread_register(TLS_REG_ALT);
#endif
app_lib_tls_base = get_segment_base(TLS_REG_LIB);
app_alt_tls_base = get_segment_base(TLS_REG_ALT);
/* If we're a non-initial thread, tls will be set to the parent's value,
* or to &uninit_tls (i#2089), both of which will be is_dynamo_address().
*/
os_tls->app_lib_tls_base =
is_dynamo_address(app_lib_tls_base) ? NULL : app_lib_tls_base;
os_tls->app_alt_tls_base =
is_dynamo_address(app_alt_tls_base) ? NULL : app_alt_tls_base;
#if defined(X86) && !defined(MACOS64)
/* get all TLS thread area value */
/* XXX: is get_thread_area supported in 64-bit kernel?
* It has syscall number 211.
* It works for a 32-bit application running in a 64-bit kernel.
* It returns error value -38 for a 64-bit app in a 64-bit kernel.
*/
desc = &os_tls->os_seg_info.app_thread_areas[0];
tls_initialize_indices(os_tls);
index = tls_min_index();
for (i = 0; i < GDT_NUM_TLS_SLOTS; i++) {
tls_get_descriptor(i + index, &desc[i]);
}
#endif /* X86 */
os_tls->os_seg_info.dr_tls_base = segment;
os_tls->os_seg_info.priv_alt_tls_base = IF_X86_ELSE(segment, NULL);
/* now allocate the tls segment for client libraries */
if (IF_CLIENT_INTERFACE_ELSE(INTERNAL_OPTION(private_loader), false)) {
os_tls->os_seg_info.priv_lib_tls_base = IF_UNIT_TEST_ELSE(
os_tls->app_lib_tls_base, privload_tls_init(os_tls->app_lib_tls_base));
}
#if defined(X86) && !defined(MACOSX64)
LOG(THREAD_GET, LOG_THREADS, 1,
"thread " TIDFMT " app lib tls reg: 0x%x, alt tls reg: 0x%x\n",
d_r_get_thread_id(), os_tls->app_lib_tls_reg, os_tls->app_alt_tls_reg);
#endif
LOG(THREAD_GET, LOG_THREADS, 1,
"thread " TIDFMT " app lib tls base: " PFX ", alt tls base: " PFX "\n",
d_r_get_thread_id(), os_tls->app_lib_tls_base, os_tls->app_alt_tls_base);
LOG(THREAD_GET, LOG_THREADS, 1,
"thread " TIDFMT " priv lib tls base: " PFX ", alt tls base: " PFX ", "
"DR's tls base: " PFX "\n",
d_r_get_thread_id(), os_tls->os_seg_info.priv_lib_tls_base,
os_tls->os_seg_info.priv_alt_tls_base, os_tls->os_seg_info.dr_tls_base);
}
void
os_tls_init(void)
{
#ifdef X86
ASSERT(TLS_MAGIC_OFFSET_ASM == TLS_MAGIC_OFFSET);
ASSERT(TLS_SELF_OFFSET_ASM == TLS_SELF_OFFSET);
#endif
#ifdef HAVE_TLS
/* We create a 1-page segment with an LDT entry for each thread and load its
* selector into fs/gs.
* FIXME PR 205276: this whole scheme currently does not check if app is using
* segments need to watch modify_ldt syscall
*/
# ifdef MACOS64
/* Today we're allocating enough contiguous TLS slots to hold os_local_state_t.
* We also store a pointer to it in TLS slot 6.
*/
byte *segment = tls_get_dr_addr();
# else
byte *segment = heap_mmap(PAGE_SIZE, MEMPROT_READ | MEMPROT_WRITE, VMM_SPECIAL_MMAP);
# endif
os_local_state_t *os_tls = (os_local_state_t *)segment;
LOG(GLOBAL, LOG_THREADS, 1, "os_tls_init for thread " TIDFMT "\n",
d_r_get_thread_id());
ASSERT(!is_thread_tls_initialized());
/* MUST zero out dcontext slot so uninit access gets NULL */
memset(segment, 0, PAGE_SIZE);
/* store key data in the tls itself */
os_tls->self = os_tls;
os_tls->tid = get_sys_thread_id();
os_tls->tls_type = TLS_TYPE_NONE;
# ifdef X86
os_tls->magic = TLS_MAGIC_VALID;
# endif
/* We save DR's TLS segment base here so that os_get_dr_tls_base() will work
* even when -no_mangle_app_seg is set. If -mangle_app_seg is set, this
* will be overwritten in os_tls_app_seg_init().
*/
os_tls->os_seg_info.dr_tls_base = segment;
ASSERT(proc_is_cache_aligned(os_tls->self + TLS_LOCAL_STATE_OFFSET));
/* Verify that local_state_extended_t should indeed be used. */
ASSERT(DYNAMO_OPTION(ibl_table_in_tls));
/* initialize DR TLS seg base before replacing app's TLS in tls_thread_init */
if (MACHINE_TLS_IS_DR_TLS)
os_tls_app_seg_init(os_tls, segment);
tls_thread_init(os_tls, segment);
ASSERT(os_tls->tls_type != TLS_TYPE_NONE);
/* store type in global var for convenience: should be same for all threads */
tls_global_type = os_tls->tls_type;
/* FIXME: this should be a SYSLOG fatal error? Should fall back on !HAVE_TLS?
* Should have create_ldt_entry() return failure instead of asserting, then.
*/
#else
tls_table = (tls_slot_t *)global_heap_alloc(MAX_THREADS *
sizeof(tls_slot_t) HEAPACCT(ACCT_OTHER));
memset(tls_table, 0, MAX_THREADS * sizeof(tls_slot_t));
#endif
if (!first_thread_tls_initialized) {
first_thread_tls_initialized = true;
if (last_thread_tls_exited) /* re-attach */
last_thread_tls_exited = false;
}
ASSERT(is_thread_tls_initialized());
}
static bool
should_zero_tls_at_thread_exit()
{
#ifdef X86
/* i#2089: For a thread w/o CLONE_SIGHAND we cannot handle a fault, so we want to
* leave &uninit_tls (which was put in place in os_thread_exit()) as long as
* possible. For non-detach, that means until the exit.
*/
return !INTERNAL_OPTION(safe_read_tls_init) || doing_detach;
#else
return true;
#endif
}
/* TLS exit for the current thread who must own local_state. */
void
os_tls_thread_exit(local_state_t *local_state)
{
#ifdef HAVE_TLS
/* We assume (assert below) that local_state_t's start == local_state_extended_t */
os_local_state_t *os_tls =
(os_local_state_t *)(((byte *)local_state) - offsetof(os_local_state_t, state));
tls_type_t tls_type = os_tls->tls_type;
int index = os_tls->ldt_index;
ASSERT(offsetof(local_state_t, spill_space) ==
offsetof(local_state_extended_t, spill_space));
if (should_zero_tls_at_thread_exit()) {
tls_thread_free(tls_type, index);
# if defined(X86) && defined(X64) && !defined(MACOS)
if (tls_type == TLS_TYPE_ARCH_PRCTL) {
/* syscall re-sets gs register so re-clear it */
if (read_thread_register(SEG_TLS) != 0) {
static const ptr_uint_t zero = 0;
WRITE_DR_SEG(zero); /* macro needs lvalue! */
}
}
# endif
}
/* We already set TLS to &uninit_tls in os_thread_exit() */
/* Do not set last_thread_tls_exited if a client_thread is exiting.
* If set, get_thread_private_dcontext() returns NULL, which may cause
* other thread fault on using dcontext.
*/
if (dynamo_exited_all_other_threads && !last_thread_tls_exited) {
last_thread_tls_exited = true;
first_thread_tls_initialized = false; /* for possible re-attach */
}
#endif
}
/* Frees local_state. If the calling thread is exiting (i.e.,
* !other_thread) then also frees kernel resources for the calling
* thread; if other_thread then that may not be possible.
*/
void
os_tls_exit(local_state_t *local_state, bool other_thread)
{
#ifdef HAVE_TLS
# if defined(X86) && !defined(MACOS64)
static const ptr_uint_t zero = 0;
# endif /* X86 */
/* We can't read from fs: as we can be called from other threads */
# if defined(X86) && !defined(MACOS64)
/* If the MSR is in use, writing to the reg faults. We rely on it being 0
* to indicate that.
*/
if (!other_thread && read_thread_register(SEG_TLS) != 0 &&
should_zero_tls_at_thread_exit()) {
WRITE_DR_SEG(zero); /* macro needs lvalue! */
}
# endif /* X86 */
/* For another thread we can't really make these syscalls so we have to
* leave it un-cleaned-up. That's fine if the other thread is exiting:
* but for detach (i#95) we get the other thread to run this code.
*/
if (!other_thread)
os_tls_thread_exit(local_state);
# ifndef MACOS64
/* We can't free prior to tls_thread_free() in case that routine refs os_tls */
/* ASSUMPTION: local_state_t is laid out at same start as local_state_extended_t */
os_local_state_t *os_tls =
(os_local_state_t *)(((byte *)local_state) - offsetof(os_local_state_t, state));
heap_munmap(os_tls->self, PAGE_SIZE, VMM_SPECIAL_MMAP);
# endif
#else
global_heap_free(tls_table, MAX_THREADS * sizeof(tls_slot_t) HEAPACCT(ACCT_OTHER));
DELETE_LOCK(tls_lock);
#endif
}
static int
os_tls_get_gdt_index(dcontext_t *dcontext)
{
os_local_state_t *os_tls = (os_local_state_t *)(((byte *)dcontext->local_state) -
offsetof(os_local_state_t, state));
if (os_tls->tls_type == TLS_TYPE_GDT)
return os_tls->ldt_index;
else
return -1;
}
void
os_tls_pre_init(int gdt_index)
{
#if defined(X86) && !defined(MACOS64)
/* Only set to above 0 for tls_type == TLS_TYPE_GDT */
if (gdt_index > 0) {
/* PR 458917: clear gdt slot to avoid leak across exec */
DEBUG_DECLARE(bool ok;)
static const ptr_uint_t zero = 0;
/* Be sure to clear the selector before anything that might
* call get_thread_private_dcontext()
*/
WRITE_DR_SEG(zero); /* macro needs lvalue! */
DEBUG_DECLARE(ok =)
tls_clear_descriptor(gdt_index);
ASSERT(ok);
}
#elif defined(ARM)
/* FIXME i#1551: NYI on ARM */
ASSERT_NOT_IMPLEMENTED(false);
#endif /* X86/ARM */
}
#ifdef CLIENT_INTERFACE
/* Allocates num_slots tls slots aligned with alignment align */
bool
os_tls_calloc(OUT uint *offset, uint num_slots, uint alignment)
{
bool res = false;
uint i, count = 0;
int start = -1;
uint offs = offsetof(os_local_state_t, client_tls);
if (num_slots == 0 || num_slots > MAX_NUM_CLIENT_TLS)
return false;
d_r_mutex_lock(&client_tls_lock);
for (i = 0; i < MAX_NUM_CLIENT_TLS; i++) {
if (!client_tls_allocated[i] &&
/* ALIGNED doesn't work for 0 */
(alignment == 0 || ALIGNED(offs + i * sizeof(void *), alignment))) {
if (start == -1)
start = i;
count++;
if (count >= num_slots)
break;
} else {
start = -1;
count = 0;
}
}
if (count >= num_slots) {
for (i = 0; i < num_slots; i++)
client_tls_allocated[i + start] = true;
*offset = offs + start * sizeof(void *);
res = true;
}
d_r_mutex_unlock(&client_tls_lock);
return res;
}
bool
os_tls_cfree(uint offset, uint num_slots)
{
uint i;
uint offs = (offset - offsetof(os_local_state_t, client_tls)) / sizeof(void *);
bool ok = true;
d_r_mutex_lock(&client_tls_lock);
for (i = 0; i < num_slots; i++) {
if (!client_tls_allocated[i + offs])
ok = false;
client_tls_allocated[i + offs] = false;
}
d_r_mutex_unlock(&client_tls_lock);
return ok;
}
#endif
/* os_data is a clone_record_t for signal_thread_inherit */
void
os_thread_init(dcontext_t *dcontext, void *os_data)
{
os_local_state_t *os_tls = get_os_tls();
os_thread_data_t *ostd = (os_thread_data_t *)heap_alloc(
dcontext, sizeof(os_thread_data_t) HEAPACCT(ACCT_OTHER));
dcontext->os_field = (void *)ostd;
/* make sure stack fields, etc. are 0 now so they can be initialized on demand
* (don't have app esp register handy here to init now)
*/
memset(ostd, 0, sizeof(*ostd));
ksynch_init_var(&ostd->suspended);
ksynch_init_var(&ostd->wakeup);
ksynch_init_var(&ostd->resumed);
ksynch_init_var(&ostd->terminated);
ksynch_init_var(&ostd->detached);
#ifdef RETURN_AFTER_CALL
/* We only need the stack bottom for the initial thread, and due to thread
* init now preceding vm_areas_init(), we initialize in find_executable_vm_areas()
*/
ostd->stack_bottom_pc = NULL;
#endif
ASSIGN_INIT_LOCK_FREE(ostd->suspend_lock, suspend_lock);
signal_thread_init(dcontext, os_data);
/* i#107, initialize thread area information,
* the value was first get in os_tls_init and stored in os_tls
*/
ostd->priv_lib_tls_base = os_tls->os_seg_info.priv_lib_tls_base;
ostd->priv_alt_tls_base = os_tls->os_seg_info.priv_alt_tls_base;
ostd->dr_tls_base = os_tls->os_seg_info.dr_tls_base;
LOG(THREAD, LOG_THREADS, 1, "TLS app lib base =" PFX "\n", os_tls->app_lib_tls_base);
LOG(THREAD, LOG_THREADS, 1, "TLS app alt base =" PFX "\n", os_tls->app_alt_tls_base);
LOG(THREAD, LOG_THREADS, 1, "TLS priv lib base =" PFX "\n", ostd->priv_lib_tls_base);
LOG(THREAD, LOG_THREADS, 1, "TLS priv alt base =" PFX "\n", ostd->priv_alt_tls_base);
LOG(THREAD, LOG_THREADS, 1, "TLS DynamoRIO base=" PFX "\n", ostd->dr_tls_base);
#ifdef X86
if (INTERNAL_OPTION(mangle_app_seg)) {
ostd->app_thread_areas = heap_alloc(
dcontext, sizeof(our_modify_ldt_t) * GDT_NUM_TLS_SLOTS HEAPACCT(ACCT_OTHER));
memcpy(ostd->app_thread_areas, os_tls->os_seg_info.app_thread_areas,
sizeof(our_modify_ldt_t) * GDT_NUM_TLS_SLOTS);
}
#endif
LOG(THREAD, LOG_THREADS, 1, "post-TLS-setup, cur %s base is " PFX "\n",
IF_X86_ELSE("gs", "tpidruro"),
get_segment_base(IF_X86_ELSE(SEG_GS, DR_REG_TPIDRURO)));
LOG(THREAD, LOG_THREADS, 1, "post-TLS-setup, cur %s base is " PFX "\n",
IF_X86_ELSE("fs", "tpidrurw"),
get_segment_base(IF_X86_ELSE(SEG_FS, DR_REG_TPIDRURW)));
#ifdef MACOS
/* XXX: do we need to free/close dcontext->thread_port? I don't think so. */
dcontext->thread_port = dynamorio_mach_syscall(MACH_thread_self_trap, 0);
LOG(THREAD, LOG_ALL, 1, "Mach thread port: %d\n", dcontext->thread_port);
#endif
}
/* os_data is a clone_record_t for signal_thread_inherit */
void
os_thread_init_finalize(dcontext_t *dcontext, void *os_data)
{
/* We do not want to record pending signals until at least synch_thread_init()
* is finished so we delay until here: but we need this inside the
* thread_initexit_lock (i#2779).
*/
signal_thread_inherit(dcontext, os_data);
}
void
os_thread_exit(dcontext_t *dcontext, bool other_thread)
{
os_thread_data_t *ostd = (os_thread_data_t *)dcontext->os_field;
/* i#237/PR 498284: if we had a vfork child call execve we need to clean up
* the env vars.
*/
if (dcontext->thread_record->execve)
handle_execve_post(dcontext);
DELETE_LOCK(ostd->suspend_lock);
signal_thread_exit(dcontext, other_thread);
ksynch_free_var(&ostd->suspended);
ksynch_free_var(&ostd->wakeup);
ksynch_free_var(&ostd->resumed);
ksynch_free_var(&ostd->terminated);
ksynch_free_var(&ostd->detached);
#ifdef X86
if (ostd->clone_tls != NULL) {
if (!other_thread) {
/* Avoid faults in is_thread_tls_initialized() */
/* FIXME i#2088: we need to restore the app's aux seg, if any, instead. */
os_set_dr_tls_base(dcontext, NULL, (byte *)&uninit_tls);
}
DODEBUG({
HEAP_TYPE_FREE(dcontext, ostd->clone_tls, os_local_state_t, ACCT_THREAD_MGT,
UNPROTECTED);
});
}
#endif
/* for non-debug we do fast exit path and don't free local heap */
DODEBUG({
if (MACHINE_TLS_IS_DR_TLS) {
#ifdef X86
heap_free(dcontext, ostd->app_thread_areas,
sizeof(our_modify_ldt_t) * GDT_NUM_TLS_SLOTS HEAPACCT(ACCT_OTHER));
#endif
#ifdef CLIENT_INTERFACE
if (INTERNAL_OPTION(private_loader))
privload_tls_exit(IF_UNIT_TEST_ELSE(NULL, ostd->priv_lib_tls_base));
#endif
}
heap_free(dcontext, ostd, sizeof(os_thread_data_t) HEAPACCT(ACCT_OTHER));
});
}
/* Happens in the parent prior to fork. */
static void
os_fork_pre(dcontext_t *dcontext)
{
os_thread_data_t *ostd = (os_thread_data_t *)dcontext->os_field;
/* Otherwise a thread might wait for us. */
ASSERT_OWN_NO_LOCKS();
ASSERT(ostd->fork_threads == NULL && ostd->fork_num_threads == 0);
/* i#239: Synch with all other threads to ensure that they are holding no
* locks across the fork.
* FIXME i#26: Suspend signals received before initializing siginfo are
* squelched, so we won't be able to suspend threads that are initializing.
*/
LOG(GLOBAL, 2, LOG_SYSCALLS | LOG_THREADS,
"fork: synching with other threads to prevent deadlock in child\n");
if (!synch_with_all_threads(THREAD_SYNCH_SUSPENDED_VALID_MCONTEXT_OR_NO_XFER,
&ostd->fork_threads, &ostd->fork_num_threads,
THREAD_SYNCH_VALID_MCONTEXT,
/* If we fail to suspend a thread, there is a
* risk of deadlock in the child, so it's worth
* retrying on failure.
*/
THREAD_SYNCH_SUSPEND_FAILURE_RETRY)) {
/* If we failed to synch with all threads, we live with the possiblity
* of deadlock and continue as normal.
*/
LOG(GLOBAL, 1, LOG_SYSCALLS | LOG_THREADS,
"fork: synch failed, possible deadlock in child\n");
ASSERT_CURIOSITY(false);
}
vmm_heap_fork_pre(dcontext);
/* We go back to the code cache to execute the syscall, so we can't hold
* locks. If the synch succeeded, no one else is running, so it should be
* safe to release these locks. However, if there are any rogue threads,
* then releasing these locks will allow them to synch and create threads.
* Such threads could be running due to synch failure or presence of
* non-suspendable client threads. We keep our data in ostd to prevent some
* conflicts, but there are some unhandled corner cases.
*/
d_r_mutex_unlock(&thread_initexit_lock);
d_r_mutex_unlock(&all_threads_synch_lock);
}
/* Happens after the fork in both the parent and child. */
static void
os_fork_post(dcontext_t *dcontext, bool parent)
{
os_thread_data_t *ostd = (os_thread_data_t *)dcontext->os_field;
/* Re-acquire the locks we released before the fork. */
d_r_mutex_lock(&all_threads_synch_lock);
d_r_mutex_lock(&thread_initexit_lock);
/* Resume the other threads that we suspended. */
if (parent) {
LOG(GLOBAL, 2, LOG_SYSCALLS | LOG_THREADS,
"fork: resuming other threads after fork\n");
}
end_synch_with_all_threads(ostd->fork_threads, ostd->fork_num_threads,
parent /*resume in parent, not in child*/);
ostd->fork_threads = NULL; /* Freed by end_synch_with_all_threads. */
ostd->fork_num_threads = 0;
vmm_heap_fork_post(dcontext, parent);
}
/* this one is called before child's new logfiles are set up */
void
os_fork_init(dcontext_t *dcontext)
{
int iter;
/* We use a larger data size than file_t to avoid clobbering our stack (i#991) */
ptr_uint_t fd;
ptr_uint_t flags;
/* Static assert would save debug build overhead: could use array bound trick */
ASSERT(sizeof(file_t) <= sizeof(ptr_uint_t));
/* i#239: If there were unsuspended threads across the fork, we could have
* forked while another thread held locks. We reset the locks and try to
* cope with any intermediate state left behind from the parent. If we
* encounter more deadlocks after fork, we can add more lock and data resets
* on a case by case basis.
*/
d_r_mutex_fork_reset(&all_threads_synch_lock);
d_r_mutex_fork_reset(&thread_initexit_lock);
os_fork_post(dcontext, false /*!parent*/);
/* re-populate cached data that contains pid */
pid_cached = get_process_id();
get_application_pid_helper(true);
get_application_name_helper(true, true /* not important */);
/* close all copies of parent files */
TABLE_RWLOCK(fd_table, write, lock);
iter = 0;
do {
iter = generic_hash_iterate_next(GLOBAL_DCONTEXT, fd_table, iter, &fd,
(void **)&flags);
if (iter < 0)
break;
if (TEST(OS_OPEN_CLOSE_ON_FORK, flags)) {
close_syscall((file_t)fd);
iter = generic_hash_iterate_remove(GLOBAL_DCONTEXT, fd_table, iter, fd);
}
} while (true);
TABLE_RWLOCK(fd_table, write, unlock);
}
static void
os_swap_dr_tls(dcontext_t *dcontext, bool to_app)
{
#ifdef X86
/* If the option is off, we really should swap it (xref i#107/i#2088 comments
* in os_swap_context()) but there are few consequences of not doing it, and we
* have no code set up separate from the i#2089 scheme here.
*/
if (!INTERNAL_OPTION(safe_read_tls_init))
return;
if (to_app) {
/* i#2089: we want the child to inherit a TLS with invalid .magic, but we
* need our own syscall execution and post-syscall code to have valid scratch
* and dcontext values. We can't clear our own magic b/c we don't know when
* the child will be scheduled, so we use a copy of our TLS. We carefully
* never have a valid magic there in case a prior child is still unscheduled.
*
* We assume the child will not modify this TLS copy in any way.
* CLONE_SETTLS touc * hes the other segment (we'll have to watch for
* addition of CLONE_SETTLS_AUX). The parent will use the scratch space
* returning from the syscall to d_r_dispatch, but we restore via os_clone_post()
* immediately before anybody calls get_thread_private_dcontext() or
* anything.
*/
/* FIXME i#2088: to preserve the app's aux seg, if any, we should pass it
* and the seg reg value via the clone record (like we do for ARM today).
*/
os_thread_data_t *ostd = (os_thread_data_t *)dcontext->os_field;
os_local_state_t *cur_tls = get_os_tls_from_dc(dcontext);
if (ostd->clone_tls == NULL) {
ostd->clone_tls = (os_local_state_t *)HEAP_TYPE_ALLOC(
dcontext, os_local_state_t, ACCT_THREAD_MGT, UNPROTECTED);
LOG(THREAD, LOG_THREADS, 2, "TLS copy is " PFX "\n", ostd->clone_tls);
}
/* Leave no window where a prior uninit child could read valid magic by
* invalidating prior to copying.
*/
cur_tls->magic = TLS_MAGIC_INVALID;
memcpy(ostd->clone_tls, cur_tls, sizeof(*ostd->clone_tls));
cur_tls->magic = TLS_MAGIC_VALID;
ostd->clone_tls->self = ostd->clone_tls;
os_set_dr_tls_base(dcontext, NULL, (byte *)ostd->clone_tls);
} else {
/* i#2089: restore the parent's DR TLS */
os_local_state_t *real_tls = get_os_tls_from_dc(dcontext);
/* For dr_app_start we can end up here with nothing to do, so we check. */
if (get_segment_base(SEG_TLS) != (byte *)real_tls) {
DEBUG_DECLARE(os_thread_data_t *ostd =
(os_thread_data_t *)dcontext->os_field);
ASSERT(get_segment_base(SEG_TLS) == (byte *)ostd->clone_tls);
/* We assume there's no need to copy the scratch slots back */
os_set_dr_tls_base(dcontext, real_tls, (byte *)real_tls);
}
}
#endif
}
static void
os_new_thread_pre(void)
{
/* We use a barrier on new threads to ensure we make progress when
* attaching to an app that is continually making threads.
* XXX i#1305: if we fully suspend all threads during attach we can
* get rid of this barrier.
*/
wait_for_event(dr_attach_finished, 0);
ATOMIC_INC(int, uninit_thread_count);
}
/* This is called from pre_system_call() and before cloning a client thread in
* dr_create_client_thread. Hence os_clone_pre is used for app threads as well
* as client threads. Do not add anything that we do not want to happen while
* in DR mode.
*/
static void
os_clone_pre(dcontext_t *dcontext)
{
/* We switch the lib tls segment back to app's segment.
* Please refer to comment on os_switch_lib_tls.
*/
if (IF_CLIENT_INTERFACE_ELSE(INTERNAL_OPTION(private_loader), false)) {
os_switch_lib_tls(dcontext, true /*to app*/);
}
os_swap_dr_tls(dcontext, true /*to app*/);
}
/* This is called from d_r_dispatch prior to post_system_call() and after
* cloning a client thread in dr_create_client_thread. Hence os_clone_post is
* used for app threads as well as client threads. Do not add anything that
* we do not want to happen while in DR mode.
*/
void
os_clone_post(dcontext_t *dcontext)
{
os_swap_dr_tls(dcontext, false /*to DR*/);
}
byte *
os_get_dr_tls_base(dcontext_t *dcontext)
{
os_thread_data_t *ostd = (os_thread_data_t *)dcontext->os_field;
return ostd->dr_tls_base;
}
/* We only bother swapping the library segment if we're using the private
* loader.
*/
bool
os_should_swap_state(void)
{
#ifdef X86
/* -private_loader currently implies -mangle_app_seg, but let's be safe. */
return (INTERNAL_OPTION(mangle_app_seg) &&
IF_CLIENT_INTERFACE_ELSE(INTERNAL_OPTION(private_loader), false));
#elif defined(AARCHXX)
/* FIXME i#1582: this should return true, but there is a lot of complexity
* getting os_switch_seg_to_context() to do the right then when called
* at main thread init, secondary thread init, early and late injection,
* and thread exit, since it is fragile with its writes to app TLS.
*/
return false;
#endif
}
bool
os_using_app_state(dcontext_t *dcontext)
{
#ifdef X86
/* FIXME: This could be optimized to avoid the syscall by keeping state in
* the dcontext.
*/
if (INTERNAL_OPTION(mangle_app_seg)) {
return (get_segment_base(TLS_REG_LIB) ==
os_get_app_tls_base(dcontext, TLS_REG_LIB));
}
#endif
/* We're always in the app state if we're not mangling. */
return true;
}
/* Similar to PEB swapping on Windows, this call will switch between DR's
* private lib segment base and the app's segment base.
* i#107/i#2088: If the app wants to use SEG_TLS, we should also switch that back at
* this boundary, but there are many places where we simply assume it is always
* installed.
*/
void
os_swap_context(dcontext_t *dcontext, bool to_app, dr_state_flags_t flags)
{
if (os_should_swap_state())
os_switch_seg_to_context(dcontext, LIB_SEG_TLS, to_app);
if (TEST(DR_STATE_DR_TLS, flags))
os_swap_dr_tls(dcontext, to_app);
}
void
os_swap_context_go_native(dcontext_t *dcontext, dr_state_flags_t flags)
{
#ifdef AARCHXX
/* FIXME i#1582: remove this routine once os_should_swap_state()
* is not disabled and we can actually call
* os_swap_context_go_native() safely from multiple places.
*/
os_switch_seg_to_context(dcontext, LIB_SEG_TLS, true /*to app*/);
#else
os_swap_context(dcontext, true /*to app*/, flags);
#endif
}
void
os_thread_under_dynamo(dcontext_t *dcontext)
{
os_swap_context(dcontext, false /*to dr*/, DR_STATE_GO_NATIVE);
signal_swap_mask(dcontext, false /*to dr*/);
start_itimer(dcontext);
}
void
os_thread_not_under_dynamo(dcontext_t *dcontext)
{
stop_itimer(dcontext);
signal_swap_mask(dcontext, true /*to app*/);
os_swap_context(dcontext, true /*to app*/, DR_STATE_GO_NATIVE);
}
void
os_process_under_dynamorio_initiate(dcontext_t *dcontext)
{
LOG(GLOBAL, LOG_THREADS, 1, "process now under DR\n");
/* We only support regular process-wide signal handlers for delayed takeover. */
/* i#2161: we ignore alarm signals during the attach process to avoid races. */
signal_reinstate_handlers(dcontext, true /*ignore alarm*/);
/* XXX: there's a tradeoff here: we have a race when we remove the hook
* because dr_app_stop() has no barrier and a thread sent native might
* resume from vsyscall after we remove the hook. However, if we leave the
* hook, then the next takeover signal might hit a native thread that's
* inside DR just to go back native after having hit the hook. For now we
* remove the hook and rely on translate_from_synchall_to_dispatch() moving
* threads from vsyscall to our gencode and not relying on the hook being
* present to finish up their go-native code.
*/
hook_vsyscall(dcontext, false);
}
void
os_process_under_dynamorio_complete(dcontext_t *dcontext)
{
/* i#2161: only now do we un-ignore alarm signals. */
signal_reinstate_alarm_handlers(dcontext);
IF_NO_MEMQUERY({
/* Update the memory cache (i#2037) now that we've taken over all the
* threads, if there may have been a gap between setup and start.
*/
if (dr_api_entry)
memcache_update_all_from_os();
});
}
void
os_process_not_under_dynamorio(dcontext_t *dcontext)
{
/* We only support regular process-wide signal handlers for mixed-mode control. */
signal_remove_handlers(dcontext);
unhook_vsyscall();
LOG(GLOBAL, LOG_THREADS, 1, "process no longer under DR\n");
}
bool
detach_do_not_translate(thread_record_t *tr)
{
return false;
}
void
detach_finalize_translation(thread_record_t *tr, priv_mcontext_t *mc)
{
/* Nothing to do. */
}
void
detach_finalize_cleanup(void)
{
/* Nothing to do. */
}
static pid_t
get_process_group_id()
{
return dynamorio_syscall(SYS_getpgid, 0);
}
process_id_t
get_parent_id(void)
{
return dynamorio_syscall(SYS_getppid, 0);
}
thread_id_t
get_sys_thread_id(void)
{
#ifdef MACOS
if (kernel_thread_groups)
return dynamorio_syscall(SYS_thread_selfid, 0);
#else
if (kernel_thread_groups)
return dynamorio_syscall(SYS_gettid, 0);
#endif
return dynamorio_syscall(SYS_getpid, 0);
}
thread_id_t
d_r_get_thread_id(void)
{
/* i#228/PR 494330: making a syscall here is a perf bottleneck since we call
* this routine in read and recursive locks so use the TLS value instead
*/
thread_id_t id = get_tls_thread_id();
if (id != INVALID_THREAD_ID)
return id;
else
return get_sys_thread_id();
}
thread_id_t
get_tls_thread_id(void)
{
ptr_int_t tid; /* can't use thread_id_t since it's 32-bits */
if (!is_thread_tls_initialized())
return INVALID_THREAD_ID;
READ_TLS_SLOT_IMM(TLS_THREAD_ID_OFFSET, tid);
/* it reads 8-bytes into the memory, which includes app_gs and app_fs.
* 0x000000007127357b <get_tls_thread_id+37>: mov %gs:(%rax),%rax
* 0x000000007127357f <get_tls_thread_id+41>: mov %rax,-0x8(%rbp)
* so we remove the TRUNCATE check and trucate it on return.
*/
return (thread_id_t)tid;
}
/* returns the thread-private dcontext pointer for the calling thread */
dcontext_t *
get_thread_private_dcontext(void)
{
#ifdef HAVE_TLS
dcontext_t *dcontext;
/* We have to check this b/c this is called from __errno_location prior
* to os_tls_init, as well as after os_tls_exit, and early in a new
* thread's initialization (see comments below on that).
*/
if (!is_thread_tls_initialized())
return (IF_CLIENT_INTERFACE(standalone_library ? GLOBAL_DCONTEXT :) NULL);
/* We used to check tid and return NULL to distinguish parent from child, but
* that was affecting performance (xref PR 207366: but I'm leaving the assert in
* for now so debug build will still incur it). So we fixed the cases that
* needed that:
*
* - dynamo_thread_init() calling is_thread_initialized() for a new thread
* created via clone or the start/stop interface: so we have
* is_thread_initialized() pay the d_r_get_thread_id() cost.
* - new_thread_setup()'s ENTER_DR_HOOK kstats, or a crash and the signal
* handler asking about dcontext: we have new_thread_dynamo_start()
* clear the segment register for us early on.
* - child of fork (ASSERT_OWN_NO_LOCKS, etc. on re-entering DR):
* here we just suppress the assert: we'll use this same dcontext.
* xref PR 209518 where w/o this fix we used to need an extra KSTOP.
*
* An alternative would be to have the parent thread clear the segment
* register, or even set up the child's TLS ahead of time ourselves
* (and special-case so that we know if at clone syscall the app state is not
* quite correct: but we're already stealing a register there: PR 286194).
* We could also have the kernel set up TLS for us (PR 285898).
*
* For hotp_only or non-full-control (native_exec, e.g.) (PR 212012), this
* routine is not the only issue: we have to catch all new threads since
* hotp_only gateways assume tls is set up.
* Xref PR 192231.
*/
/* PR 307698: this assert causes large slowdowns (also xref PR 207366) */
DOCHECK(CHKLVL_DEFAULT + 1, {
ASSERT(get_tls_thread_id() == get_sys_thread_id() ||
/* ok for fork as mentioned above */
pid_cached != get_process_id());
});
READ_TLS_SLOT_IMM(TLS_DCONTEXT_OFFSET, dcontext);
return dcontext;
#else
/* Assumption: no lock needed on a read => no race conditions between
* reading and writing same tid! Since both get and set are only for
* the current thread, they cannot both execute simultaneously for the
* same tid, right?
*/
thread_id_t tid = d_r_get_thread_id();
int i;
if (tls_table != NULL) {
for (i = 0; i < MAX_THREADS; i++) {
if (tls_table[i].tid == tid) {
return tls_table[i].dcontext;
}
}
}
return NULL;
#endif
}
/* sets the thread-private dcontext pointer for the calling thread */
void
set_thread_private_dcontext(dcontext_t *dcontext)
{
#ifdef HAVE_TLS
ASSERT(is_thread_tls_allocated());
WRITE_TLS_SLOT_IMM(TLS_DCONTEXT_OFFSET, dcontext);
#else
thread_id_t tid = d_r_get_thread_id();
int i;
bool found = false;
ASSERT(tls_table != NULL);
d_r_mutex_lock(&tls_lock);
for (i = 0; i < MAX_THREADS; i++) {
if (tls_table[i].tid == tid) {
if (dcontext == NULL) {
/* if setting to NULL, clear the entire slot for reuse */
tls_table[i].tid = 0;
}
tls_table[i].dcontext = dcontext;
found = true;
break;
}
}
if (!found) {
if (dcontext == NULL) {
/* don't do anything...but why would this happen? */
} else {
/* look for an empty slot */
for (i = 0; i < MAX_THREADS; i++) {
if (tls_table[i].tid == 0) {
tls_table[i].tid = tid;
tls_table[i].dcontext = dcontext;
found = true;
break;
}
}
}
}
d_r_mutex_unlock(&tls_lock);
ASSERT(found);
#endif
}
/* replaces old with new
* use for forking: child should replace parent's id with its own
*/
static void
replace_thread_id(thread_id_t old, thread_id_t new)
{
#ifdef HAVE_TLS
thread_id_t new_tid = new;
ASSERT(is_thread_tls_initialized());
DOCHECK(1, {
thread_id_t old_tid;
IF_LINUX_ELSE(READ_TLS_INT_SLOT_IMM(TLS_THREAD_ID_OFFSET, old_tid),
READ_TLS_SLOT_IMM(TLS_THREAD_ID_OFFSET, old_tid));
ASSERT(old_tid == old);
});
IF_LINUX_ELSE(WRITE_TLS_INT_SLOT_IMM(TLS_THREAD_ID_OFFSET, new_tid),
WRITE_TLS_SLOT_IMM(TLS_THREAD_ID_OFFSET, new_tid));
#else
int i;
d_r_mutex_lock(&tls_lock);
for (i = 0; i < MAX_THREADS; i++) {
if (tls_table[i].tid == old) {
tls_table[i].tid = new;
break;
}
}
d_r_mutex_unlock(&tls_lock);
#endif
}
/* translate native flags to platform independent protection bits */
static inline uint
osprot_to_memprot(uint prot)
{
uint mem_prot = 0;
if (TEST(PROT_EXEC, prot))
mem_prot |= MEMPROT_EXEC;
if (TEST(PROT_READ, prot))
mem_prot |= MEMPROT_READ;
if (TEST(PROT_WRITE, prot))
mem_prot |= MEMPROT_WRITE;
return mem_prot;
}
/* returns osprot flags preserving all native protection flags except
* for RWX, which are replaced according to memprot */
uint
osprot_replace_memprot(uint old_osprot, uint memprot)
{
/* Note only protection flags PROT_ are relevant to mprotect()
* and they are separate from any other MAP_ flags passed to mmap()
*/
uint new_osprot = memprot_to_osprot(memprot);
return new_osprot;
}
/* libc independence */
static inline long
mprotect_syscall(byte *p, size_t size, uint prot)
{
return dynamorio_syscall(SYS_mprotect, 3, p, size, prot);
}
/* free memory allocated from os_raw_mem_alloc */
bool
os_raw_mem_free(void *p, size_t size, uint flags, heap_error_code_t *error_code)
{
long rc;
ASSERT(error_code != NULL);
ASSERT(size > 0 && ALIGNED(size, PAGE_SIZE));
rc = munmap_syscall(p, size);
if (rc != 0) {
*error_code = -rc;
} else {
*error_code = HEAP_ERROR_SUCCESS;
}
return (rc == 0);
}
/* try to alloc memory at preferred from os directly,
* caller is required to handle thread synchronization and to update
*/
void *
os_raw_mem_alloc(void *preferred, size_t size, uint prot, uint flags,
heap_error_code_t *error_code)
{
byte *p;
uint os_prot = memprot_to_osprot(prot);
uint os_flags =
MAP_PRIVATE | MAP_ANONYMOUS | (TEST(RAW_ALLOC_32BIT, flags) ? MAP_32BIT : 0);
ASSERT(error_code != NULL);
/* should only be used on aligned pieces */
ASSERT(size > 0 && ALIGNED(size, PAGE_SIZE));
p = mmap_syscall(preferred, size, os_prot, os_flags, -1, 0);
if (!mmap_syscall_succeeded(p)) {
*error_code = -(heap_error_code_t)(ptr_int_t)p;
LOG(GLOBAL, LOG_HEAP, 3, "os_raw_mem_alloc %d bytes failed" PFX "\n", size, p);
return NULL;
}
if (preferred != NULL && p != preferred) {
*error_code = HEAP_ERROR_NOT_AT_PREFERRED;
os_raw_mem_free(p, size, flags, error_code);
LOG(GLOBAL, LOG_HEAP, 3, "os_raw_mem_alloc %d bytes failed" PFX "\n", size, p);
return NULL;
}
LOG(GLOBAL, LOG_HEAP, 2, "os_raw_mem_alloc: " SZFMT " bytes @ " PFX "\n", size, p);
return p;
}
#ifdef LINUX
void
init_emulated_brk(app_pc exe_end)
{
ASSERT(DYNAMO_OPTION(emulate_brk));
if (app_brk_map != NULL)
return;
/* i#1004: emulate brk via a separate mmap.
* The real brk starts out empty, but we need at least a page to have an
* mmap placeholder.
*/
app_brk_map = mmap_syscall(exe_end, PAGE_SIZE, PROT_READ | PROT_WRITE,
MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
ASSERT(mmap_syscall_succeeded(app_brk_map));
app_brk_cur = app_brk_map;
app_brk_end = app_brk_map + PAGE_SIZE;
}
static byte *
emulate_app_brk(dcontext_t *dcontext, byte *new_val)
{
byte *old_brk = app_brk_cur;
ASSERT(DYNAMO_OPTION(emulate_brk));
LOG(THREAD, LOG_HEAP, 2, "%s: cur=" PFX ", requested=" PFX "\n", __FUNCTION__,
app_brk_cur, new_val);
new_val = (byte *)ALIGN_FORWARD(new_val, PAGE_SIZE);
if (new_val == NULL || new_val == app_brk_cur ||
/* Not allowed to shrink below original base */
new_val < app_brk_map) {
/* Just return cur val */
} else if (new_val < app_brk_cur) {
/* Shrink */
if (munmap_syscall(new_val, app_brk_cur - new_val) == 0) {
app_brk_cur = new_val;
app_brk_end = new_val;
}
} else if (new_val < app_brk_end) {
/* We've already allocated the space */
app_brk_cur = new_val;
} else {
/* Expand */
byte *remap = (byte *)dynamorio_syscall(SYS_mremap, 4, app_brk_map,
app_brk_end - app_brk_map,
new_val - app_brk_map, 0 /*do not move*/);
if (mmap_syscall_succeeded(remap)) {
ASSERT(remap == app_brk_map);
app_brk_cur = new_val;
app_brk_end = new_val;
} else {
LOG(THREAD, LOG_HEAP, 1, "%s: mremap to " PFX " failed\n", __FUNCTION__,
new_val);
}
}
if (app_brk_cur != old_brk)
handle_app_brk(dcontext, app_brk_map, old_brk, app_brk_cur);
return app_brk_cur;
}
#endif /* LINUX */
#if defined(CLIENT_INTERFACE) && defined(LINUX)
DR_API
/* XXX: could add dr_raw_mem_realloc() instead of dr_raw_mremap() -- though there
* is no realloc for Windows: supposed to reserve yourself and then commit in
* pieces.
*/
void *
dr_raw_mremap(void *old_address, size_t old_size, size_t new_size, int flags,
void *new_address)
{
byte *res;
dr_mem_info_t info;
dcontext_t *dcontext = get_thread_private_dcontext();
/* i#173: we need prot + type from prior to mremap */
DEBUG_DECLARE(bool ok =)
query_memory_ex(old_address, &info);
/* XXX: this could be a large region w/ multiple protection regions
* inside. For now we assume our handling of it doesn't care.
*/
ASSERT(ok);
if (is_pretend_or_executable_writable(old_address))
info.prot |= DR_MEMPROT_WRITE;
/* we just unconditionally send the 5th param */
res = (byte *)dynamorio_syscall(SYS_mremap, 5, old_address, old_size, new_size, flags,
new_address);
handle_app_mremap(dcontext, res, new_size, old_address, old_size, info.prot,
info.size);
return res;
}
DR_API
void *
dr_raw_brk(void *new_address)
{
dcontext_t *dcontext = get_thread_private_dcontext();
if (DYNAMO_OPTION(emulate_brk)) {
/* i#1004: emulate brk via a separate mmap */
return (void *)emulate_app_brk(dcontext, (byte *)new_address);
} else {
/* We pay the cost of 2 syscalls. This should be infrequent enough that
* it doesn't mater.
*/
if (new_address == NULL) {
/* Just a query */
return (void *)dynamorio_syscall(SYS_brk, 1, new_address);
} else {
byte *old_brk = (byte *)dynamorio_syscall(SYS_brk, 1, 0);
byte *res = (byte *)dynamorio_syscall(SYS_brk, 1, new_address);
handle_app_brk(dcontext, NULL, old_brk, res);
return res;
}
}
}
#endif /* CLIENT_INTERFACE && LINUX */
/* caller is required to handle thread synchronization and to update dynamo vm areas */
void
os_heap_free(void *p, size_t size, heap_error_code_t *error_code)
{
long rc;
ASSERT(error_code != NULL);
if (!dynamo_exited)
LOG(GLOBAL, LOG_HEAP, 4, "os_heap_free: %d bytes @ " PFX "\n", size, p);
rc = munmap_syscall(p, size);
if (rc != 0) {
*error_code = -rc;
} else {
*error_code = HEAP_ERROR_SUCCESS;
}
ASSERT(rc == 0);
}
/* reserve virtual address space without committing swap space for it,
and of course no physical pages since it will never be touched */
/* to be transparent, we do not use sbrk, and are
* instead using mmap, and asserting that all os_heap requests are for
* reasonably large pieces of memory */
void *
os_heap_reserve(void *preferred, size_t size, heap_error_code_t *error_code,
bool executable)
{
void *p;
uint prot = PROT_NONE;
#ifdef VMX86_SERVER
/* PR 365331: we need to be in the mmap_text region for code cache and
* gencode (PROT_EXEC).
*/
ASSERT(!os_in_vmkernel_userworld() || !executable || preferred == NULL ||
((byte *)preferred >= os_vmk_mmap_text_start() &&
((byte *)preferred) + size <= os_vmk_mmap_text_end()));
/* Note that a preferred address overrides PROT_EXEC and a mmap_data
* address will be honored, even though any execution there will fault.
*/
/* FIXME: note that PROT_EXEC => read access, so our guard pages and other
* non-committed memory, while not writable, is readable.
* Plus, we can't later clear all prot bits for userworld mmap due to PR 107872
* (PR 365748 covers fixing this for us).
* But in most uses we should get our preferred vmheap and shouldn't run
* out of vmheap, so this should be a corner-case issue.
*/
if (executable)
prot = PROT_EXEC;
#endif
/* should only be used on aligned pieces */
ASSERT(size > 0 && ALIGNED(size, PAGE_SIZE));
ASSERT(error_code != NULL);
/* FIXME: note that this memory is in fact still committed - see man mmap */
/* FIXME: case 2347 on Linux or -vm_reserve should be set to false */
/* FIXME: Need to actually get a mmap-ing with |MAP_NORESERVE */
p = mmap_syscall(
preferred, size, prot,
MAP_PRIVATE |
MAP_ANONYMOUS IF_X64(| (DYNAMO_OPTION(heap_in_lower_4GB) ? MAP_32BIT : 0)),
-1, 0);
if (!mmap_syscall_succeeded(p)) {
*error_code = -(heap_error_code_t)(ptr_int_t)p;
LOG(GLOBAL, LOG_HEAP, 4, "os_heap_reserve %d bytes failed " PFX "\n", size, p);
return NULL;
} else if (preferred != NULL && p != preferred) {
/* We didn't get the preferred address. To harmonize with windows behavior and
* give greater control we fail the reservation. */
heap_error_code_t dummy;
*error_code = HEAP_ERROR_NOT_AT_PREFERRED;
os_heap_free(p, size, &dummy);
ASSERT(dummy == HEAP_ERROR_SUCCESS);
LOG(GLOBAL, LOG_HEAP, 4,
"os_heap_reserve %d bytes at " PFX " not preferred " PFX "\n", size,
preferred, p);
return NULL;
} else {
*error_code = HEAP_ERROR_SUCCESS;
}
LOG(GLOBAL, LOG_HEAP, 2, "os_heap_reserve: %d bytes @ " PFX "\n", size, p);
#ifdef VMX86_SERVER
/* PR 365331: ensure our memory is all in the mmap_text region */
ASSERT(!os_in_vmkernel_userworld() || !executable ||
((byte *)p >= os_vmk_mmap_text_start() &&
((byte *)p) + size <= os_vmk_mmap_text_end()));
#endif
#if defined(ANDROID) && defined(DEBUG)
/* We don't label in release to be more transparent */
dynamorio_syscall(SYS_prctl, 5, PR_SET_VMA, PR_SET_VMA_ANON_NAME, p, size,
"DynamoRIO-internal");
#endif
return p;
}
static bool
find_free_memory_in_region(byte *start, byte *end, size_t size, byte **found_start OUT,
byte **found_end OUT)
{
memquery_iter_t iter;
/* XXX: despite /proc/sys/vm/mmap_min_addr == PAGE_SIZE, mmap won't
* give me that address if I use it as a hint.
*/
app_pc last_end = (app_pc)(PAGE_SIZE * 16);
bool found = false;
memquery_iterator_start(&iter, NULL, false /*won't alloc*/);
while (memquery_iterator_next(&iter)) {
if (iter.vm_start >= start &&
MIN(iter.vm_start, end) - MAX(last_end, start) >= size) {
if (found_start != NULL)
*found_start = MAX(last_end, start);
if (found_end != NULL)
*found_end = MIN(iter.vm_start, end);
found = true;
break;
}
if (iter.vm_end >= end)
break;
last_end = iter.vm_end;
}
memquery_iterator_stop(&iter);
return found;
}
void *
os_heap_reserve_in_region(void *start, void *end, size_t size,
heap_error_code_t *error_code, bool executable)
{
byte *p = NULL;
byte *try_start = NULL, *try_end = NULL;
uint iters = 0;
ASSERT(ALIGNED(start, PAGE_SIZE) && ALIGNED(end, PAGE_SIZE));
ASSERT(ALIGNED(size, PAGE_SIZE));
LOG(GLOBAL, LOG_HEAP, 3,
"os_heap_reserve_in_region: " SZFMT " bytes in " PFX "-" PFX "\n", size, start,
end);
/* if no restriction on location use regular os_heap_reserve() */
if (start == (void *)PTR_UINT_0 && end == (void *)POINTER_MAX)
return os_heap_reserve(NULL, size, error_code, executable);
/* loop to handle races */
#define RESERVE_IN_REGION_MAX_ITERS 128
while (find_free_memory_in_region(start, end, size, &try_start, &try_end)) {
/* If there's space we'd prefer the end, to avoid the common case of
* a large binary + heap at attach where we're likely to reserve
* right at the start of the brk: we'd prefer to leave more brk space.
*/
p = os_heap_reserve(try_end - size, size, error_code, executable);
if (p != NULL) {
ASSERT(*error_code == HEAP_ERROR_SUCCESS);
ASSERT(p >= (byte *)start && p + size <= (byte *)end);
break;
}
if (++iters > RESERVE_IN_REGION_MAX_ITERS) {
ASSERT_NOT_REACHED();
break;
}
}
if (p == NULL)
*error_code = HEAP_ERROR_CANT_RESERVE_IN_REGION;
else
*error_code = HEAP_ERROR_SUCCESS;
LOG(GLOBAL, LOG_HEAP, 2,
"os_heap_reserve_in_region: reserved " SZFMT " bytes @ " PFX " in " PFX "-" PFX
"\n",
size, p, start, end);
return p;
}
/* commit previously reserved with os_heap_reserve pages */
/* returns false when out of memory */
/* A replacement of os_heap_alloc can be constructed by using os_heap_reserve
and os_heap_commit on a subset of the reserved pages. */
/* caller is required to handle thread synchronization */
bool
os_heap_commit(void *p, size_t size, uint prot, heap_error_code_t *error_code)
{
uint os_prot = memprot_to_osprot(prot);
long res;
/* should only be used on aligned pieces */
ASSERT(size > 0 && ALIGNED(size, PAGE_SIZE));
ASSERT(p);
ASSERT(error_code != NULL);
/* FIXME: note that the memory would not be not truly committed if we have */
/* not actually marked a mmap-ing without MAP_NORESERVE */
res = mprotect_syscall(p, size, os_prot);
if (res != 0) {
*error_code = -res;
return false;
} else {
*error_code = HEAP_ERROR_SUCCESS;
}
LOG(GLOBAL, LOG_HEAP, 2, "os_heap_commit: %d bytes @ " PFX "\n", size, p);
return true;
}
/* caller is required to handle thread synchronization and to update dynamo vm areas */
void
os_heap_decommit(void *p, size_t size, heap_error_code_t *error_code)
{
int rc;
ASSERT(error_code != NULL);
if (!dynamo_exited)
LOG(GLOBAL, LOG_HEAP, 4, "os_heap_decommit: %d bytes @ " PFX "\n", size, p);
*error_code = HEAP_ERROR_SUCCESS;
/* FIXME: for now do nothing since os_heap_reserve has in fact committed the memory */
rc = 0;
/* TODO:
p = mmap_syscall(p, size, PROT_NONE, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
we should either do a mremap()
or we can do a munmap() followed 'quickly' by a mmap() -
also see above the comment that os_heap_reserve() in fact is not so lightweight
*/
ASSERT(rc == 0);
}
bool
os_heap_systemwide_overcommit(heap_error_code_t last_error_code)
{
/* FIXME: conservative answer yes */
return true;
}
bool
os_heap_get_commit_limit(size_t *commit_used, size_t *commit_limit)
{
/* FIXME - NYI */
return false;
}
/* yield the current thread */
void
os_thread_yield()
{
#ifdef MACOS
/* XXX i#1291: use raw syscall instead */
swtch_pri(0);
#else
dynamorio_syscall(SYS_sched_yield, 0);
#endif
}
bool
thread_signal(process_id_t pid, thread_id_t tid, int signum)
{
#ifdef MACOS
/* FIXME i#58: this takes in a thread port. Need to map thread id to port.
* Need to figure out whether we support raw Mach threads w/o pthread on top.
*/
ASSERT_NOT_IMPLEMENTED(false);
return false;
#else
/* FIXME: for non-NPTL use SYS_kill */
/* Note that the pid is equivalent to the thread group id.
* However, we can have threads sharing address space but not pid
* (if created via CLONE_VM but not CLONE_THREAD), so make sure to
* use the pid of the target thread, not our pid.
*/
return (dynamorio_syscall(SYS_tgkill, 3, pid, tid, signum) == 0);
#endif
}
static bool
known_thread_signal(thread_record_t *tr, int signum)
{
#ifdef MACOS
ptr_int_t res;
if (tr->dcontext == NULL)
return FALSE;
res = dynamorio_syscall(SYS___pthread_kill, 2, tr->dcontext->thread_port, signum);
LOG(THREAD_GET, LOG_ALL, 3, "%s: signal %d to port %d => %ld\n", __FUNCTION__, signum,
tr->dcontext->thread_port, res);
return res == 0;
#else
return thread_signal(tr->pid, tr->id, signum);
#endif
}
void
os_thread_sleep(uint64 milliseconds)
{
#ifdef MACOS
semaphore_t sem = MACH_PORT_NULL;
int res;
#else
struct timespec remain;
int count = 0;
#endif
struct timespec req;
req.tv_sec = (milliseconds / 1000);
/* docs say can go up to 1000000000, but doesn't work on FC9 */
req.tv_nsec = (milliseconds % 1000) * 1000000;
#ifdef MACOS
if (sem == MACH_PORT_NULL) {
DEBUG_DECLARE(kern_return_t res =)
semaphore_create(mach_task_self(), &sem, SYNC_POLICY_FIFO, 0);
ASSERT(res == KERN_SUCCESS);
}
res =
dynamorio_syscall(SYSNUM_NO_CANCEL(SYS___semwait_signal), 6, sem, MACH_PORT_NULL,
1, 1, (int64_t)req.tv_sec, (int32_t)req.tv_nsec);
if (res == -EINTR) {
/* FIXME i#58: figure out how much time elapsed and re-wait */
}
#else
/* FIXME: if we need accurate sleeps in presence of itimers we should
* be using SYS_clock_nanosleep w/ an absolute time instead of relative
*/
while (dynamorio_syscall(SYS_nanosleep, 2, &req, &remain) == -EINTR) {
/* interrupted by signal or something: finish the interval */
ASSERT_CURIOSITY_ONCE(remain.tv_sec <= req.tv_sec &&
(remain.tv_sec < req.tv_sec ||
/* there seems to be some rounding, and sometimes
* remain nsec > req nsec (I've seen 40K diff)
*/
req.tv_nsec - remain.tv_nsec < 100000 ||
req.tv_nsec - remain.tv_nsec > -100000));
/* not unusual for client threads to use itimers and have their run
* routine sleep forever
*/
if (count++ > 3 && !IS_CLIENT_THREAD(get_thread_private_dcontext())) {
ASSERT_NOT_REACHED();
break; /* paranoid */
}
req = remain;
}
#endif
}
bool
os_thread_suspend(thread_record_t *tr)
{
os_thread_data_t *ostd = (os_thread_data_t *)tr->dcontext->os_field;
ASSERT(ostd != NULL);
/* See synch comments in os_thread_resume: the mutex held there
* prevents prematurely sending a re-suspend signal.
*/
d_r_mutex_lock(&ostd->suspend_lock);
ostd->suspend_count++;
ASSERT(ostd->suspend_count > 0);
/* If already suspended, do not send another signal. However, we do
* need to ensure the target is suspended in case of a race, so we can't
* just return.
*/
if (ostd->suspend_count == 1) {
/* PR 212090: we use a custom signal handler to suspend. We wait
* here until the target reaches the suspend point, and leave it
* up to the caller to check whether it is a safe suspend point,
* to match Windows behavior.
*/
ASSERT(ksynch_get_value(&ostd->suspended) == 0);
if (!known_thread_signal(tr, SUSPEND_SIGNAL)) {
ostd->suspend_count--;
d_r_mutex_unlock(&ostd->suspend_lock);
return false;
}
}
/* we can unlock before the wait loop b/c we're using a separate "resumed"
* int and os_thread_resume holds the lock across its wait. this way a resume
* can proceed as soon as the suspended thread is suspended, before the
* suspending thread gets scheduled again.
*/
d_r_mutex_unlock(&ostd->suspend_lock);
while (ksynch_get_value(&ostd->suspended) == 0) {
/* For Linux, waits only if the suspended flag is not set as 1. Return value
* doesn't matter because the flag will be re-checked.
*/
/* We time out and assert in debug build to provide better diagnostics than a
* silent hang. We can't safely return false b/c the synch model here
* assumes there will not be a retry until the target reaches the suspend
* point. Xref i#2779.
*/
#define SUSPEND_DEBUG_TIMEOUT_MS 5000
if (ksynch_wait(&ostd->suspended, 0, SUSPEND_DEBUG_TIMEOUT_MS) == -ETIMEDOUT) {
ASSERT_CURIOSITY(false && "failed to suspend thread in 5s");
}
if (ksynch_get_value(&ostd->suspended) == 0) {
/* If it still has to wait, give up the cpu. */
os_thread_yield();
}
}
return true;
}
bool
os_thread_resume(thread_record_t *tr)
{
os_thread_data_t *ostd = (os_thread_data_t *)tr->dcontext->os_field;
ASSERT(ostd != NULL);
/* This mutex prevents sending a re-suspend signal before the target
* reaches a safe post-resume point from a first suspend signal.
* Given that race, we can't just use atomic_add_exchange_int +
* atomic_dec_becomes_zero on suspend_count.
*/
d_r_mutex_lock(&ostd->suspend_lock);
ASSERT(ostd->suspend_count > 0);
/* PR 479750: if do get here and target is not suspended then abort
* to avoid possible deadlocks
*/
if (ostd->suspend_count == 0) {
d_r_mutex_unlock(&ostd->suspend_lock);
return true; /* the thread is "resumed", so success status */
}
ostd->suspend_count--;
if (ostd->suspend_count > 0) {
d_r_mutex_unlock(&ostd->suspend_lock);
return true; /* still suspended */
}
ksynch_set_value(&ostd->wakeup, 1);
ksynch_wake(&ostd->wakeup);
while (ksynch_get_value(&ostd->resumed) == 0) {
/* For Linux, waits only if the resumed flag is not set as 1. Return value
* doesn't matter because the flag will be re-checked.
*/
ksynch_wait(&ostd->resumed, 0, 0);
if (ksynch_get_value(&ostd->resumed) == 0) {
/* If it still has to wait, give up the cpu. */
os_thread_yield();
}
}
ksynch_set_value(&ostd->wakeup, 0);
ksynch_set_value(&ostd->resumed, 0);
d_r_mutex_unlock(&ostd->suspend_lock);
return true;
}
bool
os_thread_terminate(thread_record_t *tr)
{
/* PR 297902: for NPTL sending SIGKILL will take down the whole group:
* so instead we send SIGUSR2 and have a flag set telling
* target thread to execute SYS_exit
*/
os_thread_data_t *ostd = (os_thread_data_t *)tr->dcontext->os_field;
ASSERT(ostd != NULL);
ostd->terminate = true;
/* Even if the thread is currently suspended, it's simpler to send it
* another signal than to resume it.
*/
return known_thread_signal(tr, SUSPEND_SIGNAL);
}
bool
is_thread_terminated(dcontext_t *dcontext)
{
os_thread_data_t *ostd = (os_thread_data_t *)dcontext->os_field;
ASSERT(ostd != NULL);
return (ksynch_get_value(&ostd->terminated) == 1);
}
static void
os_wait_thread_futex(KSYNCH_TYPE *var)
{
while (ksynch_get_value(var) == 0) {
/* On Linux, waits only if var is not set as 1. Return value
* doesn't matter because var will be re-checked.
*/
ksynch_wait(var, 0, 0);
if (ksynch_get_value(var) == 0) {
/* If it still has to wait, give up the cpu. */
os_thread_yield();
}
}
}
void
os_wait_thread_terminated(dcontext_t *dcontext)
{
os_thread_data_t *ostd = (os_thread_data_t *)dcontext->os_field;
ASSERT(ostd != NULL);
os_wait_thread_futex(&ostd->terminated);
}
void
os_wait_thread_detached(dcontext_t *dcontext)
{
os_thread_data_t *ostd = (os_thread_data_t *)dcontext->os_field;
ASSERT(ostd != NULL);
os_wait_thread_futex(&ostd->detached);
}
void
os_signal_thread_detach(dcontext_t *dcontext)
{
os_thread_data_t *ostd = (os_thread_data_t *)dcontext->os_field;
ASSERT(ostd != NULL);
ostd->do_detach = true;
}
bool
thread_get_mcontext(thread_record_t *tr, priv_mcontext_t *mc)
{
/* PR 212090: only works when target is suspended by us, and
* we then take the signal context
*/
os_thread_data_t *ostd = (os_thread_data_t *)tr->dcontext->os_field;
ASSERT(ostd != NULL);
ASSERT(ostd->suspend_count > 0);
if (ostd->suspend_count == 0)
return false;
ASSERT(ostd->suspended_sigcxt != NULL);
sigcontext_to_mcontext(mc, ostd->suspended_sigcxt, DR_MC_ALL);
return true;
}
bool
thread_set_mcontext(thread_record_t *tr, priv_mcontext_t *mc)
{
/* PR 212090: only works when target is suspended by us, and
* we then replace the signal context
*/
os_thread_data_t *ostd = (os_thread_data_t *)tr->dcontext->os_field;
ASSERT(ostd != NULL);
ASSERT(ostd->suspend_count > 0);
if (ostd->suspend_count == 0)
return false;
ASSERT(ostd->suspended_sigcxt != NULL);
mcontext_to_sigcontext(ostd->suspended_sigcxt, mc, DR_MC_ALL);
return true;
}
/* Only one of mc and dmc can be non-NULL. */
bool
os_context_to_mcontext(dr_mcontext_t *dmc, priv_mcontext_t *mc, os_cxt_ptr_t osc)
{
if (dmc != NULL)
sigcontext_to_mcontext(dr_mcontext_as_priv_mcontext(dmc), &osc, dmc->flags);
else if (mc != NULL)
sigcontext_to_mcontext(mc, &osc, DR_MC_ALL);
else
return false;
return true;
}
/* Only one of mc and dmc can be non-NULL. */
bool
mcontext_to_os_context(os_cxt_ptr_t osc, dr_mcontext_t *dmc, priv_mcontext_t *mc)
{
if (dmc != NULL)
mcontext_to_sigcontext(&osc, dr_mcontext_as_priv_mcontext(dmc), dmc->flags);
else if (mc != NULL)
mcontext_to_sigcontext(&osc, mc, DR_MC_ALL);
else
return false;
return true;
}
bool
is_thread_currently_native(thread_record_t *tr)
{
return (!tr->under_dynamo_control ||
/* start/stop doesn't change under_dynamo_control and has its own field */
(tr->dcontext != NULL && tr->dcontext->currently_stopped));
}
#ifdef CLIENT_SIDELINE /* PR 222812: tied to sideline usage */
# ifdef LINUX /* XXX i#58: just until we have Mac support */
static void
client_thread_run(void)
{
void (*func)(void *param);
dcontext_t *dcontext;
byte *xsp;
GET_STACK_PTR(xsp);
void *crec = get_clone_record((reg_t)xsp);
IF_DEBUG(int rc =)
dynamo_thread_init(get_clone_record_dstack(crec), NULL, crec, true);
ASSERT(rc != -1); /* this better be a new thread */
dcontext = get_thread_private_dcontext();
ASSERT(dcontext != NULL);
LOG(THREAD, LOG_ALL, 1, "\n***** CLIENT THREAD %d *****\n\n", d_r_get_thread_id());
/* We stored the func and args in particular clone record fields */
func = (void (*)(void *param))dcontext->next_tag;
/* Reset any inherited mask (i#2337). */
signal_swap_mask(dcontext, false /*to DR*/);
void *arg = (void *)get_clone_record_app_xsp(crec);
LOG(THREAD, LOG_ALL, 1, "func=" PFX ", arg=" PFX "\n", func, arg);
/* i#2335: we support setup separate from start, and we want to allow a client
* to create a client thread during init, but we do not support that thread
* executing until the app has started (b/c we have no signal handlers in place).
*/
wait_for_event(dr_app_started, 0);
(*func)(arg);
LOG(THREAD, LOG_ALL, 1, "\n***** CLIENT THREAD %d EXITING *****\n\n",
d_r_get_thread_id());
block_cleanup_and_terminate(dcontext, SYS_exit, 0, 0, false /*just thread*/,
IF_MACOS_ELSE(dcontext->thread_port, 0), 0);
}
# endif
/* i#41/PR 222812: client threads
* * thread must have dcontext since many API routines require one and we
* don't expose GLOBAL_DCONTEXT (xref PR 243008, PR 216936, PR 536058)
* * reversed the old design of not using dstack (partly b/c want dcontext)
* and I'm using the same parent-creates-dstack and clone_record_t design
* to create linux threads: dstack should be big enough for client threads
* (xref PR 202669)
* * reversed the old design of explicit dr_terminate_client_thread(): now
* the thread is auto-terminated and stack cleaned up on return from run
* function
*/
DR_API bool
dr_create_client_thread(void (*func)(void *param), void *arg)
{
# ifdef LINUX
dcontext_t *dcontext = get_thread_private_dcontext();
byte *xsp;
/* We do not pass SIGCHLD since don't want signal to parent and don't support
* waiting on child.
* We do not pass CLONE_THREAD so that the new thread is in its own thread
* group, allowing it to have private itimers and not receive any signals
* sent to the app's thread groups. It also makes the thread not show up in
* the thread list for the app, making it more invisible.
*/
uint flags = CLONE_VM | CLONE_FS | CLONE_FILES |
CLONE_SIGHAND
/* CLONE_THREAD required. Signals and itimers are private anyway. */
IF_VMX86(| (os_in_vmkernel_userworld() ? CLONE_THREAD : 0));
pre_second_thread();
/* need to share signal handler table, prior to creating clone record */
handle_clone(dcontext, flags);
ATOMIC_INC(int, uninit_thread_count);
void *crec = create_clone_record(dcontext, (reg_t *)&xsp);
/* make sure client_thread_run can get the func and arg, and that
* signal_thread_inherit gets the right syscall info
*/
set_clone_record_fields(crec, (reg_t)arg, (app_pc)func, SYS_clone, flags);
LOG(THREAD, LOG_ALL, 1, "dr_create_client_thread xsp=" PFX " dstack=" PFX "\n", xsp,
get_clone_record_dstack(crec));
/* i#501 switch to app's tls before creating client thread.
* i#3526 switch DR's tls to an invalid one before cloning, and switch lib_tls
* to the app's.
*/
os_clone_pre(dcontext);
thread_id_t newpid = dynamorio_clone(flags, xsp, NULL, NULL, NULL, client_thread_run);
/* i#3526 switch DR's tls back to the original one before cloning. */
os_clone_post(dcontext);
/* i#501 the app's tls was switched in os_clone_pre. */
if (IF_CLIENT_INTERFACE_ELSE(INTERNAL_OPTION(private_loader), false))
os_switch_lib_tls(dcontext, false /*to dr*/);
if (newpid < 0) {
LOG(THREAD, LOG_ALL, 1, "client thread creation failed: %d\n", newpid);
return false;
} else if (newpid == 0) {
/* dynamorio_clone() should have called client_thread_run directly */
ASSERT_NOT_REACHED();
return false;
}
return true;
# else
ASSERT_NOT_IMPLEMENTED(false); /* FIXME i#58: implement on Mac */
return false;
# endif
}
#endif /* CLIENT_SIDELINE PR 222812: tied to sideline usage */
int
get_num_processors(void)
{
static uint num_cpu = 0; /* cached value */
if (!num_cpu) {
#ifdef MACOS
DEBUG_DECLARE(bool ok =)
sysctl_query(CTL_HW, HW_NCPU, &num_cpu, sizeof(num_cpu));
ASSERT(ok);
#else
/* We used to use get_nprocs_conf, but that's in libc, so now we just
* look at the /sys filesystem ourselves, which is what glibc does.
*/
uint local_num_cpus = 0;
file_t cpu_dir = os_open_directory("/sys/devices/system/cpu", OS_OPEN_READ);
dir_iterator_t iter;
ASSERT(cpu_dir != INVALID_FILE &&
"/sys must be mounted: mount -t sysfs sysfs /sys");
os_dir_iterator_start(&iter, cpu_dir);
while (os_dir_iterator_next(&iter)) {
int dummy_num;
if (sscanf(iter.name, "cpu%d", &dummy_num) == 1)
local_num_cpus++;
}
os_close(cpu_dir);
num_cpu = local_num_cpus;
#endif
ASSERT(num_cpu);
}
return num_cpu;
}
/* i#46: To support -no_private_loader, we have to call the dlfcn family of
* routines in libdl.so. When we do early injection, there is no loader to
* resolve these imports, so they will crash. Early injection is incompatible
* with -no_private_loader, so this should never happen.
*/
#if defined(CLIENT_INTERFACE) || defined(HOT_PATCHING_INTERFACE)
shlib_handle_t
load_shared_library(const char *name, bool reachable)
{
# ifdef STATIC_LIBRARY
if (os_files_same(name, get_application_name())) {
/* The private loader falls back to dlsym() and friends for modules it
* doesn't recognize, so this works without disabling the private loader.
*/
return dlopen(NULL, RTLD_LAZY); /* Gets a handle to the exe. */
}
# endif
/* We call locate_and_load_private_library() to support searching for
* a pathless name.
*/
if (IF_CLIENT_INTERFACE_ELSE(INTERNAL_OPTION(private_loader), false))
return (shlib_handle_t)locate_and_load_private_library(name, reachable);
# if defined(STATIC_LIBRARY) || defined(MACOS)
ASSERT(!DYNAMO_OPTION(early_inject));
return dlopen(name, RTLD_LAZY);
# else
/* -no_private_loader is no longer supported in our default builds.
* If we want it for hybrid mode we should add a new build param and include
* the libdl calls here under that param.
*/
ASSERT_NOT_REACHED();
return NULL;
# endif
}
#endif
#if defined(CLIENT_INTERFACE)
shlib_routine_ptr_t
lookup_library_routine(shlib_handle_t lib, const char *name)
{
if (IF_CLIENT_INTERFACE_ELSE(INTERNAL_OPTION(private_loader), false)) {
return (shlib_routine_ptr_t)get_private_library_address((app_pc)lib, name);
}
# if defined(STATIC_LIBRARY) || defined(MACOS)
ASSERT(!DYNAMO_OPTION(early_inject));
return dlsym(lib, name);
# else
ASSERT_NOT_REACHED(); /* -no_private_loader is no longer supported: see above */
return NULL;
# endif
}
void
unload_shared_library(shlib_handle_t lib)
{
if (IF_CLIENT_INTERFACE_ELSE(INTERNAL_OPTION(private_loader), false)) {
unload_private_library(lib);
} else {
# if defined(STATIC_LIBRARY) || defined(MACOS)
ASSERT(!DYNAMO_OPTION(early_inject));
if (!DYNAMO_OPTION(avoid_dlclose)) {
dlclose(lib);
}
# else
ASSERT_NOT_REACHED(); /* -no_private_loader is no longer supported: see above */
# endif
}
}
void
shared_library_error(char *buf, int maxlen)
{
const char *err;
if (IF_CLIENT_INTERFACE_ELSE(INTERNAL_OPTION(private_loader), false)) {
err = "error in private loader";
} else {
# if defined(STATIC_LIBRARY) || defined(MACOS)
ASSERT(!DYNAMO_OPTION(early_inject));
err = dlerror();
if (err == NULL) {
err = "dlerror returned NULL";
}
# else
ASSERT_NOT_REACHED(); /* -no_private_loader is no longer supported */
err = "unknown error";
# endif
}
strncpy(buf, err, maxlen - 1);
buf[maxlen - 1] = '\0'; /* strncpy won't put on trailing null if maxes out */
}
/* addr is any pointer known to lie within the library.
* for linux, one of addr or name is needed; for windows, neither is needed.
*/
bool
shared_library_bounds(IN shlib_handle_t lib, IN byte *addr, IN const char *name,
OUT byte **start, OUT byte **end)
{
ASSERT(start != NULL && end != NULL);
/* PR 366195: dlopen() handle truly is opaque, so we have to use either
* addr or name
*/
ASSERT(addr != NULL || name != NULL);
*start = addr;
if (IF_CLIENT_INTERFACE_ELSE(INTERNAL_OPTION(private_loader), false)) {
privmod_t *mod;
/* look for private library first */
acquire_recursive_lock(&privload_lock);
mod = privload_lookup_by_base((app_pc)lib);
if (name != NULL && mod == NULL)
mod = privload_lookup(name);
if (mod != NULL && !mod->externally_loaded) {
*start = mod->base;
if (end != NULL)
*end = mod->base + mod->size;
release_recursive_lock(&privload_lock);
return true;
}
release_recursive_lock(&privload_lock);
}
return (memquery_library_bounds(name, start, end, NULL, 0, NULL, 0) > 0);
}
#endif /* defined(CLIENT_INTERFACE) */
static int
fcntl_syscall(int fd, int cmd, long arg)
{
return dynamorio_syscall(SYSNUM_NO_CANCEL(SYS_fcntl), 3, fd, cmd, arg);
}
/* dups curfd to a private fd.
* returns -1 if unsuccessful.
*/
file_t
fd_priv_dup(file_t curfd)
{
file_t newfd = -1;
if (DYNAMO_OPTION(steal_fds) > 0) {
/* RLIMIT_NOFILES is 1 greater than max and F_DUPFD starts at given value */
/* XXX: if > linux 2.6.24, can use F_DUPFD_CLOEXEC to avoid later call:
* so how do we tell if the flag is supported? try calling once at init?
*/
newfd = fcntl_syscall(curfd, F_DUPFD, min_dr_fd);
if (newfd < 0) {
/* We probably ran out of fds, esp if debug build and there are
* lots of threads. Should we track how many we've given out to
* avoid a failed syscall every time after?
*/
SYSLOG_INTERNAL_WARNING_ONCE("ran out of stolen fd space");
/* Try again but this time in the app space, somewhere high up
* to avoid issues like tcsh assuming it can own fds 3-5 for
* piping std{in,out,err} (xref the old -open_tcsh_fds option).
*/
newfd = fcntl_syscall(curfd, F_DUPFD, min_dr_fd / 2);
}
}
return newfd;
}
bool
fd_mark_close_on_exec(file_t fd)
{
/* we assume FD_CLOEXEC is the only flag and don't bother w/ F_GETFD */
if (fcntl_syscall(fd, F_SETFD, FD_CLOEXEC) != 0) {
SYSLOG_INTERNAL_WARNING("unable to mark file %d as close-on-exec", fd);
return false;
}
return true;
}
void
fd_table_add(file_t fd, uint flags)
{
if (fd_table != NULL) {
TABLE_RWLOCK(fd_table, write, lock);
DODEBUG({
/* i#1010: If the fd is already in the table, chances are it's a
* stale logfile fd left behind by a vforked or cloned child that
* called execve. Avoid an assert if that happens.
*/
bool present = generic_hash_remove(GLOBAL_DCONTEXT, fd_table, (ptr_uint_t)fd);
ASSERT_CURIOSITY_ONCE(!present && "stale fd not cleaned up");
});
generic_hash_add(GLOBAL_DCONTEXT, fd_table, (ptr_uint_t)fd,
/* store the flags, w/ a set bit to ensure not 0 */
(void *)(ptr_uint_t)(flags | OS_OPEN_RESERVED));
TABLE_RWLOCK(fd_table, write, unlock);
} else {
#ifdef DEBUG
num_fd_add_pre_heap++;
/* we add main_logfile in d_r_os_init() */
ASSERT(num_fd_add_pre_heap == 1 && "only main_logfile should come here");
#endif
}
}
static bool
fd_is_dr_owned(file_t fd)
{
ptr_uint_t flags;
ASSERT(fd_table != NULL);
TABLE_RWLOCK(fd_table, read, lock);
flags = (ptr_uint_t)generic_hash_lookup(GLOBAL_DCONTEXT, fd_table, (ptr_uint_t)fd);
TABLE_RWLOCK(fd_table, read, unlock);
return (flags != 0);
}
static bool
fd_is_in_private_range(file_t fd)
{
return (DYNAMO_OPTION(steal_fds) > 0 && min_dr_fd > 0 && fd >= min_dr_fd);
}
file_t
os_open_protected(const char *fname, int os_open_flags)
{
file_t dup;
file_t res = os_open(fname, os_open_flags);
if (res < 0)
return res;
/* we could have os_open() always switch to a private fd but it's probably
* not worth the extra syscall for temporary open/close sequences so we
* only use it for persistent files
*/
dup = fd_priv_dup(res);
if (dup >= 0) {
close_syscall(res);
res = dup;
fd_mark_close_on_exec(res);
} /* else just keep original */
/* ditto here, plus for things like config.c opening files we can't handle
* grabbing locks and often don't have heap available so no fd_table
*/
fd_table_add(res, os_open_flags);
return res;
}
void
os_close_protected(file_t f)
{
ASSERT(fd_table != NULL || dynamo_exited);
if (fd_table != NULL) {
TABLE_RWLOCK(fd_table, write, lock);
generic_hash_remove(GLOBAL_DCONTEXT, fd_table, (ptr_uint_t)f);
TABLE_RWLOCK(fd_table, write, unlock);
}
os_close(f);
}
bool
os_get_current_dir(char *buf, size_t bufsz)
{
#ifdef MACOS
static char noheap_buf[MAXPATHLEN];
bool res = false;
file_t fd = os_open(".", OS_OPEN_READ);
int len;
/* F_GETPATH assumes a buffer of size MAXPATHLEN */
char *fcntl_buf;
if (dynamo_heap_initialized)
fcntl_buf = global_heap_alloc(MAXPATHLEN HEAPACCT(ACCT_OTHER));
else
fcntl_buf = noheap_buf;
if (fd == INVALID_FILE)
goto cwd_error;
if (fcntl_syscall(fd, F_GETPATH, (long)fcntl_buf) != 0)
goto cwd_error;
len = snprintf(buf, bufsz, "%s", fcntl_buf);
buf[bufsz - 1] = '\0';
return (len > 0 && len < bufsz);
cwd_error:
if (dynamo_heap_initialized)
global_heap_free(fcntl_buf, MAXPATHLEN HEAPACCT(ACCT_OTHER));
os_close(fd);
return res;
#else
return (dynamorio_syscall(SYS_getcwd, 2, buf, bufsz) > 0);
#endif
}
ssize_t
os_write(file_t f, const void *buf, size_t count)
{
return write_syscall(f, buf, count);
}
/* There are enough differences vs the shared drlibc_os.c version that we override
* it here. We use a loop to ensure reachability for the core.
*/
byte *
os_map_file(file_t f, size_t *size INOUT, uint64 offs, app_pc addr, uint prot,
map_flags_t map_flags)
{
int flags;
byte *map = NULL;
#if defined(X64)
bool loop = false;
uint iters = 0;
# define MAX_MMAP_LOOP_ITERS 100
byte *region_start = NULL, *region_end = NULL;
#else
uint pg_offs;
ASSERT_TRUNCATE(pg_offs, uint, offs / PAGE_SIZE);
pg_offs = (uint)(offs / PAGE_SIZE);
#endif
#ifdef VMX86_SERVER
flags = MAP_PRIVATE; /* MAP_SHARED not supported yet */
#else
flags = TEST(MAP_FILE_COPY_ON_WRITE, map_flags) ? MAP_PRIVATE : MAP_SHARED;
#endif
#if defined(X64)
/* Allocate memory from reachable range for image: or anything (pcache
* in particular): for low 4GB, easiest to just pass MAP_32BIT (which is
* low 2GB, but good enough).
*/
if (DYNAMO_OPTION(heap_in_lower_4GB) && !TEST(MAP_FILE_FIXED, map_flags))
flags |= MAP_32BIT;
#endif
/* Allows memory request instead of mapping a file,
* so we can request memory from a particular address with fixed argument */
if (f == -1)
flags |= MAP_ANONYMOUS;
if (TEST(MAP_FILE_FIXED, map_flags))
flags |= MAP_FIXED;
#if defined(X64)
if (!TEST(MAP_32BIT, flags) && TEST(MAP_FILE_REACHABLE, map_flags)) {
vmcode_get_reachable_region(®ion_start, ®ion_end);
/* addr need not be NULL: we'll use it if it's in the region */
ASSERT(!TEST(MAP_FILE_FIXED, map_flags));
/* Loop to handle races */
loop = true;
}
if ((!TEST(MAP_32BIT, flags) && TEST(MAP_FILE_REACHABLE, map_flags) &&
(is_vmm_reserved_address(addr, *size, NULL, NULL) ||
/* Try to honor a library's preferred address. This does open up a race
* window during attach where another thread could take this spot,
* and with this current code we'll never go back and try to get VMM
* memory. We live with that as being rare rather than complicate the code.
*/
!rel32_reachable_from_current_vmcode(addr))) ||
(TEST(MAP_FILE_FIXED, map_flags) && !TEST(MAP_FILE_VMM_COMMIT, map_flags) &&
is_vmm_reserved_address(addr, *size, NULL, NULL))) {
if (DYNAMO_OPTION(vm_reserve)) {
/* Try to get space inside the vmcode reservation. */
map = heap_reserve_for_external_mapping(addr, *size,
VMM_SPECIAL_MMAP | VMM_REACHABLE);
if (map != NULL) {
addr = map;
flags |= MAP_FIXED;
}
}
}
while (!loop ||
(addr != NULL && addr >= region_start && addr + *size <= region_end) ||
find_free_memory_in_region(region_start, region_end, *size, &addr, NULL)) {
#endif
map = mmap_syscall(addr, *size, memprot_to_osprot(prot), flags, f,
/* x86 Linux mmap uses offset in pages */
IF_LINUX_ELSE(IF_X64_ELSE(offs, pg_offs), offs));
if (!mmap_syscall_succeeded(map)) {
LOG(THREAD_GET, LOG_SYSCALLS, 2, "%s failed: " PIFX "\n", __func__, map);
map = NULL;
}
#if defined(X64)
else if (loop && (map < region_start || map + *size > region_end)) {
/* Try again: probably a race. Hopefully our notion of "there's a free
* region big enough" matches the kernel's, else we'll loop forever
* (which we try to catch w/ a max iters count).
*/
munmap_syscall(map, *size);
map = NULL;
} else
break;
if (!loop)
break;
if (++iters > MAX_MMAP_LOOP_ITERS) {
ASSERT_NOT_REACHED();
map = NULL;
break;
}
addr = NULL; /* pick a new one */
}
#endif
return map;
}
bool
os_unmap_file(byte *map, size_t size)
{
if (DYNAMO_OPTION(vm_reserve) && is_vmm_reserved_address(map, size, NULL, NULL)) {
/* XXX i#3570: We'd prefer to have the VMM perform this to ensure it matches
* how it originally reserved the memory. To do that would we expose a way
* to ask for MAP_FIXED in os_heap_reserve*()?
*/
byte *addr = mmap_syscall(map, size, PROT_NONE,
MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED, -1, 0);
if (!mmap_syscall_succeeded(addr))
return false;
return heap_unreserve_for_external_mapping(map, size,
VMM_SPECIAL_MMAP | VMM_REACHABLE);
}
long res = munmap_syscall(map, size);
return (res == 0);
}
#ifdef LINUX
static void
os_get_memory_file_shm_path(const char *name, OUT char *buf, size_t bufsz)
{
snprintf(buf, bufsz, "/dev/shm/%s.%d", name, get_process_id());
buf[bufsz - 1] = '\0';
}
#endif
file_t
os_create_memory_file(const char *name, size_t size)
{
#ifdef LINUX
char path[MAXIMUM_PATH];
file_t fd;
/* We need an in-memory file. We prefer the new memfd_create over /dev/shm (it
* has no name conflict issues, stale files left around on a crash, or
* reliance on tmpfs).
*/
# ifdef SYS_memfd_create
snprintf(path, BUFFER_SIZE_ELEMENTS(path), "/%s.%d", name, get_process_id());
NULL_TERMINATE_BUFFER(path);
fd = dynamorio_syscall(SYS_memfd_create, 2, path, 0);
# else
fd = -ENOSYS;
# endif
if (fd == -ENOSYS) {
/* Fall back on /dev/shm. */
os_get_memory_file_shm_path(name, path, BUFFER_SIZE_ELEMENTS(path));
NULL_TERMINATE_BUFFER(path);
fd = open_syscall(path, O_CREAT | O_EXCL | O_RDWR, S_IRUSR | S_IWUSR);
if (fd == -EEXIST) {
/* We assume a stale file from some prior crash. */
SYSLOG_INTERNAL_WARNING("Removing presumed-stale %s", path);
os_delete_file(path);
fd = open_syscall(path, O_CREAT | O_EXCL | O_RDWR, S_IRUSR | S_IWUSR);
}
}
if (fd < 0)
return INVALID_FILE;
/* Work around an IMA (kernel optional feature "Integrity Measurement
* Architecture") slowdown where the first executable mmap causes a hash
* to be computed of the entire file size, which can take 5 or 10
* *seconds* for gigabyte files. This is only done once, so if we
* trigger it while the file is tiny, we can avoid the delay later.
*/
byte *temp_map = mmap_syscall(0, PAGE_SIZE, PROT_READ | PROT_EXEC, MAP_SHARED, fd, 0);
if (mmap_syscall_succeeded(temp_map))
munmap_syscall(temp_map, PAGE_SIZE);
/* Else, not fatal: this may not be destined for a later executable mapping anyway. */
if (dynamorio_syscall(SYS_ftruncate, 2, fd, size) < 0) {
close_syscall(fd);
return INVALID_FILE;
}
file_t priv_fd = fd_priv_dup(fd);
close_syscall(fd); /* Close the old descriptor on success *and* error. */
if (priv_fd < 0) {
return INVALID_FILE;
}
fd = priv_fd;
fd_mark_close_on_exec(fd); /* We could use MFD_CLOEXEC for memfd_create. */
return fd;
#else
ASSERT_NOT_IMPLEMENTED(false && "i#3556 NYI for Mac");
return INVALID_FILE;
#endif
}
void
os_delete_memory_file(const char *name, file_t fd)
{
#ifdef LINUX
/* There is no need to delete a memfd_create path, but if we used shm we need
* to clean it up. We blindly do this rather than trying to record whether
* we created this file.
*/
char path[MAXIMUM_PATH];
os_get_memory_file_shm_path(name, path, BUFFER_SIZE_ELEMENTS(path));
NULL_TERMINATE_BUFFER(path);
os_delete_file(path);
close_syscall(fd);
#else
ASSERT_NOT_IMPLEMENTED(false && "i#3556 NYI for Mac");
#endif
}
bool
os_get_disk_free_space(/*IN*/ file_t file_handle,
/*OUT*/ uint64 *AvailableQuotaBytes /*OPTIONAL*/,
/*OUT*/ uint64 *TotalQuotaBytes /*OPTIONAL*/,
/*OUT*/ uint64 *TotalVolumeBytes /*OPTIONAL*/)
{
/* libc struct seems to match kernel's */
struct statfs stat;
ptr_int_t res = dynamorio_syscall(SYS_fstatfs, 2, file_handle, &stat);
if (res != 0) {
LOG(THREAD_GET, LOG_SYSCALLS, 2, "%s failed: " PIFX "\n", __func__, res);
return false;
}
LOG(GLOBAL, LOG_STATS, 3,
"os_get_disk_free_space: avail=" SZFMT ", free=" SZFMT ", bsize=" SZFMT "\n",
stat.f_bavail, stat.f_bfree, stat.f_bsize);
if (AvailableQuotaBytes != NULL)
*AvailableQuotaBytes = ((uint64)stat.f_bavail * stat.f_bsize);
/* no support for quotas */
if (TotalQuotaBytes != NULL)
*TotalQuotaBytes = ((uint64)stat.f_bavail * stat.f_bsize);
if (TotalVolumeBytes != NULL) /* despite name this is how much is free */
*TotalVolumeBytes = ((uint64)stat.f_bfree * stat.f_bsize);
return true;
}
#ifdef LINUX
static bool
symlink_is_self_exe(const char *path)
{
/* Look for "/proc/%d/exe" where %d exists in /proc/self/task/%d,
* or "/proc/self/exe". Rule out the exe link for another process
* (though it could also be under DR we have no simple way to obtain
* its actual app path).
*/
# define SELF_LEN_LEADER 6 /* "/proc/" */
# define SELF_LEN_TRAILER 4 /* "/exe" */
# define SELF_LEN_MAX 18
size_t len = strlen(path);
if (strcmp(path, "/proc/self/exe") == 0)
return true;
if (len < SELF_LEN_MAX && /* /proc/nnnnnn/exe */
strncmp(path, "/proc/", SELF_LEN_LEADER) == 0 &&
strncmp(path + len - SELF_LEN_TRAILER, "/exe", SELF_LEN_TRAILER) == 0) {
int pid;
if (sscanf(path + SELF_LEN_LEADER, "%d", &pid) == 1) {
char task[32];
snprintf(task, BUFFER_SIZE_ELEMENTS(task), "/proc/self/task/%d", pid);
NULL_TERMINATE_BUFFER(task);
return os_file_exists(task, true /*dir*/);
}
}
return false;
}
#endif
void
exit_process_syscall(long status)
{
/* We now assume SYS_exit_group is defined: not building on old machines,
* but will execute there. We try exit_group and if it fails we use exit.
*
* FIXME: if no exit_group, kill all other threads (==processes in same addr
* space) manually? Presumably we got here b/c at an unsafe point to do
* full exit? Or is that not true: what about dr_abort()?
*/
dynamorio_syscall(SYSNUM_EXIT_PROCESS, 1, status);
/* would assert that result is -ENOSYS but assert likely calls us => infinite loop */
exit_thread_syscall(status);
ASSERT_NOT_REACHED();
}
void
exit_thread_syscall(long status)
{
#ifdef MACOS
mach_port_t thread_port = dynamorio_mach_syscall(MACH_thread_self_trap, 0);
/* FIXME i#1403: on MacOS we fail to free the app's stack: we need to pass it to
* bsdthread_terminate.
*/
dynamorio_syscall(SYSNUM_EXIT_THREAD, 4, 0, 0, thread_port, 0);
#else
dynamorio_syscall(SYSNUM_EXIT_THREAD, 1, status);
#endif
}
/* FIXME: this one will not be easily internationalizable
yet it is easier to have a syslog based Unix implementation with real strings.
*/
void
os_syslog(syslog_event_type_t priority, uint message_id, uint substitutions_num,
va_list args)
{
int native_priority;
switch (priority) {
case SYSLOG_INFORMATION: native_priority = LOG_INFO; break;
case SYSLOG_WARNING: native_priority = LOG_WARNING; break;
case SYSLOG_CRITICAL: native_priority = LOG_CRIT; break;
case SYSLOG_ERROR: native_priority = LOG_ERR; break;
default: ASSERT_NOT_REACHED();
}
/* can amount to passing a format string (careful here) to vsyslog */
/* Never let user controlled data in the format string! */
ASSERT_NOT_IMPLEMENTED(false);
}
/* This is subject to races, but should only happen at init/attach when
* there should only be one live thread.
*/
static bool
safe_read_via_query(const void *base, size_t size, void *out_buf, size_t *bytes_read)
{
bool res = false;
size_t num_read = 0;
ASSERT(!fault_handling_initialized);
/* XXX: in today's init ordering, allmem will never be initialized when we come
* here, but we check it nevertheless to be general in case this routine is
* ever called at some later time
*/
if (IF_MEMQUERY_ELSE(false, memcache_initialized()))
res = is_readable_without_exception_internal(base, size, false /*use allmem*/);
else
res = is_readable_without_exception_query_os((void *)base, size);
if (res) {
memcpy(out_buf, base, size);
num_read = size;
}
if (bytes_read != NULL)
*bytes_read = num_read;
return res;
}
bool
safe_read_ex(const void *base, size_t size, void *out_buf, size_t *bytes_read)
{
STATS_INC(num_safe_reads);
/* XXX i#350: we'd like to always use safe_read_fast() and remove this extra
* call layer, but safe_read_fast() requires fault handling to be set up.
* We do set up an early signal handler in d_r_os_init(),
* but there is still be a window prior to that with no handler.
*/
if (!fault_handling_initialized) {
return safe_read_via_query(base, size, out_buf, bytes_read);
} else {
return safe_read_fast(base, size, out_buf, bytes_read);
}
}
bool
safe_read_if_fast(const void *base, size_t size, void *out_buf)
{
if (!fault_handling_initialized) {
memcpy(out_buf, base, size);
return true;
} else {
return safe_read_ex(base, size, out_buf, NULL);
}
}
/* FIXME - fold this together with safe_read_ex() (is a lot of places to update) */
bool
d_r_safe_read(const void *base, size_t size, void *out_buf)
{
return safe_read_ex(base, size, out_buf, NULL);
}
bool
safe_write_ex(void *base, size_t size, const void *in_buf, size_t *bytes_written)
{
return safe_write_try_except(base, size, in_buf, bytes_written);
}
/* is_readable_without_exception checks to see that all bytes with addresses
* from pc to pc+size-1 are readable and that reading from there won't
* generate an exception. if 'from_os' is true, check what the os thinks
* the prot bits are instead of using the all memory list.
*/
static bool
is_readable_without_exception_internal(const byte *pc, size_t size, bool query_os)
{
uint prot = MEMPROT_NONE;
byte *check_pc = (byte *)ALIGN_BACKWARD(pc, PAGE_SIZE);
if (size > ((byte *)POINTER_MAX - pc))
size = (byte *)POINTER_MAX - pc;
do {
bool rc = query_os ? get_memory_info_from_os(check_pc, NULL, NULL, &prot)
: get_memory_info(check_pc, NULL, NULL, &prot);
if (!rc || !TESTANY(MEMPROT_READ | MEMPROT_EXEC, prot))
return false;
if (POINTER_OVERFLOW_ON_ADD(check_pc, PAGE_SIZE))
break;
check_pc += PAGE_SIZE;
} while (check_pc < pc + size);
return true;
}
bool
is_readable_without_exception(const byte *pc, size_t size)
{
/* case 9745 / i#853: We've had problems with all_memory_areas not being
* accurate in the past. Parsing proc maps is too slow for some apps, so we
* use a runtime option.
*/
bool query_os = IF_MEMQUERY_ELSE(true, !DYNAMO_OPTION(use_all_memory_areas));
return is_readable_without_exception_internal(pc, size, query_os);
}
/* Identical to is_readable_without_exception except that the os is queried
* for info on the indicated region */
bool
is_readable_without_exception_query_os(byte *pc, size_t size)
{
return is_readable_without_exception_internal(pc, size, true);
}
bool
is_readable_without_exception_query_os_noblock(byte *pc, size_t size)
{
if (memquery_from_os_will_block())
return false;
return is_readable_without_exception_internal(pc, size, true);
}
bool
is_user_address(byte *pc)
{
/* FIXME: NYI */
/* note returning true will always skip the case 9022 logic on Linux */
return true;
}
/* change protections on memory region starting at pc of length length
* this does not update the all memory area info
*/
bool
os_set_protection(byte *pc, size_t length, uint prot /*MEMPROT_*/)
{
app_pc start_page = (app_pc)PAGE_START(pc);
uint num_bytes = ALIGN_FORWARD(length + (pc - start_page), PAGE_SIZE);
long res = 0;
uint flags = memprot_to_osprot(prot);
DOSTATS({
/* once on each side of prot, to get on right side of writability */
if (!TEST(PROT_WRITE, flags)) {
STATS_INC(protection_change_calls);
STATS_ADD(protection_change_pages, num_bytes / PAGE_SIZE);
}
});
res = mprotect_syscall((void *)start_page, num_bytes, flags);
if (res != 0)
return false;
LOG(THREAD_GET, LOG_VMAREAS, 3,
"change_prot(" PFX ", " PIFX ", %s) => "
"mprotect(" PFX ", " PIFX ", %d)==%d pages\n",
pc, length, memprot_string(prot), start_page, num_bytes, flags,
num_bytes / PAGE_SIZE);
DOSTATS({
/* once on each side of prot, to get on right side of writability */
if (TEST(PROT_WRITE, flags)) {
STATS_INC(protection_change_calls);
STATS_ADD(protection_change_pages, num_bytes / PAGE_SIZE);
}
});
return true;
}
/* change protections on memory region starting at pc of length length */
bool
set_protection(byte *pc, size_t length, uint prot /*MEMPROT_*/)
{
if (os_set_protection(pc, length, prot) == false)
return false;
#ifndef HAVE_MEMINFO_QUERY
else {
app_pc start_page = (app_pc)PAGE_START(pc);
uint num_bytes = ALIGN_FORWARD(length + (pc - start_page), PAGE_SIZE);
memcache_update_locked(start_page, start_page + num_bytes, prot,
-1 /*type unchanged*/, true /*exists*/);
}
#endif
return true;
}
/* change protections on memory region starting at pc of length length */
bool
change_protection(byte *pc, size_t length, bool writable)
{
if (writable)
return make_writable(pc, length);
else
make_unwritable(pc, length);
return true;
}
/* make pc's page writable */
bool
make_writable(byte *pc, size_t size)
{
long res;
app_pc start_page = (app_pc)PAGE_START(pc);
size_t prot_size = (size == 0) ? PAGE_SIZE : size;
uint prot = PROT_EXEC | PROT_READ | PROT_WRITE;
/* if can get current protection then keep old read/exec flags.
* this is crucial on modern linux kernels which refuse to mark stack +x.
*/
if (!is_in_dynamo_dll(pc) /*avoid allmem assert*/ &&
#ifdef STATIC_LIBRARY
/* FIXME i#975: is_in_dynamo_dll() is always false for STATIC_LIBRARY,
* but we can't call get_memory_info() until allmem is initialized. Our
* uses before then are for patching x86.asm, which is OK.
*/
IF_NO_MEMQUERY(memcache_initialized() &&)
#endif
get_memory_info(pc, NULL, NULL, &prot))
prot |= PROT_WRITE;
ASSERT(start_page == pc && ALIGN_FORWARD(size, PAGE_SIZE) == size);
res = mprotect_syscall((void *)start_page, prot_size, prot);
LOG(THREAD_GET, LOG_VMAREAS, 3, "make_writable: pc " PFX " -> " PFX "-" PFX " %d\n",
pc, start_page, start_page + prot_size, res);
ASSERT(res == 0);
if (res != 0)
return false;
STATS_INC(protection_change_calls);
STATS_ADD(protection_change_pages, size / PAGE_SIZE);
#ifndef HAVE_MEMINFO_QUERY
/* update all_memory_areas list with the protection change */
if (memcache_initialized()) {
memcache_update_locked(start_page, start_page + prot_size,
osprot_to_memprot(prot), -1 /*type unchanged*/,
true /*exists*/);
}
#endif
return true;
}
/* like make_writable but adds COW */
bool
make_copy_on_writable(byte *pc, size_t size)
{
/* FIXME: for current usage this should be fine */
return make_writable(pc, size);
}
/* make pc's page unwritable */
void
make_unwritable(byte *pc, size_t size)
{
long res;
app_pc start_page = (app_pc)PAGE_START(pc);
size_t prot_size = (size == 0) ? PAGE_SIZE : size;
uint prot = PROT_EXEC | PROT_READ;
/* if can get current protection then keep old read/exec flags.
* this is crucial on modern linux kernels which refuse to mark stack +x.
*/
if (!is_in_dynamo_dll(pc) /*avoid allmem assert*/ &&
#ifdef STATIC_LIBRARY
/* FIXME i#975: is_in_dynamo_dll() is always false for STATIC_LIBRARY,
* but we can't call get_memory_info() until allmem is initialized. Our
* uses before then are for patching x86.asm, which is OK.
*/
IF_NO_MEMQUERY(memcache_initialized() &&)
#endif
get_memory_info(pc, NULL, NULL, &prot))
prot &= ~PROT_WRITE;
ASSERT(start_page == pc && ALIGN_FORWARD(size, PAGE_SIZE) == size);
/* inc stats before making unwritable, in case messing w/ data segment */
STATS_INC(protection_change_calls);
STATS_ADD(protection_change_pages, size / PAGE_SIZE);
res = mprotect_syscall((void *)start_page, prot_size, prot);
LOG(THREAD_GET, LOG_VMAREAS, 3, "make_unwritable: pc " PFX " -> " PFX "-" PFX "\n",
pc, start_page, start_page + prot_size);
ASSERT(res == 0);
#ifndef HAVE_MEMINFO_QUERY
/* update all_memory_areas list with the protection change */
if (memcache_initialized()) {
memcache_update_locked(start_page, start_page + prot_size,
osprot_to_memprot(prot), -1 /*type unchanged*/,
false /*!exists*/);
}
#endif
}
/****************************************************************************/
/* SYSTEM CALLS */
/* SYS_ defines are in /usr/include/bits/syscall.h
* numbers used by libc are in /usr/include/asm/unistd.h
* kernel defines are in /usr/src/linux-2.4/include/asm-i386/unistd.h
* kernel function names are in /usr/src/linux/arch/i386/kernel/entry.S
*
* For now, we've copied the SYS/NR defines from syscall.h and unistd.h
* and put them in our own local syscall.h.
*/
/* num_raw should be the xax register value.
* For a live system call, dcontext_live should be passed (for examining
* the dcontext->last_exit and exit_reason flags); otherwise, gateway should
* be passed.
*/
int
os_normalized_sysnum(int num_raw, instr_t *gateway, dcontext_t *dcontext)
{
#ifdef MACOS
/* The x64 encoding indicates the syscall type in the top 8 bits.
* We drop the 0x2000000 for BSD so we can use the SYS_ enum constants.
* That leaves 0x1000000 for Mach and 0x3000000 for Machdep.
* On 32-bit, a different encoding is used: we transform that
* to the x64 encoding minus BSD.
*/
int interrupt = 0;
int num = 0;
if (gateway != NULL) {
if (instr_is_interrupt(gateway))
interrupt = instr_get_interrupt_number(gateway);
} else {
ASSERT(dcontext != NULL);
if (TEST(LINK_SPECIAL_EXIT, dcontext->last_exit->flags)) {
if (dcontext->upcontext.upcontext.exit_reason ==
EXIT_REASON_NI_SYSCALL_INT_0x81)
interrupt = 0x81;
else {
ASSERT(dcontext->upcontext.upcontext.exit_reason ==
EXIT_REASON_NI_SYSCALL_INT_0x82);
interrupt = 0x82;
}
}
}
# ifdef X64
if (num_raw >> 24 == 0x2)
return (int)(num_raw & 0xffffff); /* Drop BSD bit */
else
num = (int)num_raw; /* Keep Mach and Machdep bits */
# else
if ((ptr_int_t)num_raw < 0) /* Mach syscall */
return (SYSCALL_NUM_MARKER_MACH | -(int)num_raw);
else {
/* Bottom 16 bits are the number, top are arg size. */
num = (int)(num_raw & 0xffff);
}
# endif
if (interrupt == 0x81)
num |= SYSCALL_NUM_MARKER_MACH;
else if (interrupt == 0x82)
num |= SYSCALL_NUM_MARKER_MACHDEP;
return num;
#else
return num_raw;
#endif
}
static bool
ignorable_system_call_normalized(int num)
{
switch (num) {
#if defined(SYS_exit_group)
case SYS_exit_group:
#endif
case SYS_exit:
#ifdef MACOS
case SYS_bsdthread_terminate:
#endif
#ifdef LINUX
case SYS_brk:
# ifdef SYS_uselib
case SYS_uselib:
# endif
#endif
#if defined(X64) || !defined(ARM)
case SYS_mmap:
#endif
#if !defined(X64) && !defined(MACOS)
case SYS_mmap2:
#endif
case SYS_munmap:
#ifdef LINUX
case SYS_mremap:
#endif
case SYS_mprotect:
#ifdef ANDROID
case SYS_prctl:
#endif
case SYS_execve:
#ifdef LINUX
case SYS_clone:
#elif defined(MACOS)
case SYS_bsdthread_create:
case SYS_posix_spawn:
#endif
#ifdef SYS_fork
case SYS_fork:
#endif
#ifdef SYS_vfork
case SYS_vfork:
#endif
case SYS_kill:
#if defined(SYS_tkill)
case SYS_tkill:
#endif
#if defined(SYS_tgkill)
case SYS_tgkill:
#endif
#if defined(LINUX) && !defined(X64) && !defined(ARM)
case SYS_signal:
#endif
#ifdef MACOS
case SYS_sigsuspend_nocancel:
#endif
#if !defined(X64) || defined(MACOS)
case SYS_sigaction:
case SYS_sigsuspend:
case SYS_sigpending:
case SYS_sigreturn:
case SYS_sigprocmask:
#endif
#ifdef LINUX
case SYS_rt_sigreturn:
case SYS_rt_sigaction:
case SYS_rt_sigprocmask:
case SYS_rt_sigpending:
case SYS_rt_sigtimedwait:
case SYS_rt_sigqueueinfo:
case SYS_rt_sigsuspend:
# ifdef SYS_signalfd
case SYS_signalfd:
# endif
case SYS_signalfd4:
#endif
case SYS_sigaltstack:
#if defined(LINUX) && !defined(X64) && !defined(ARM)
case SYS_sgetmask:
case SYS_ssetmask:
#endif
case SYS_setitimer:
case SYS_getitimer:
#ifdef MACOS
case SYS_close_nocancel:
#endif
case SYS_close:
#ifdef SYS_dup2
case SYS_dup2:
#endif
#ifdef LINUX
case SYS_dup3:
#endif
#ifdef MACOS
case SYS_fcntl_nocancel:
#endif
case SYS_fcntl:
#if defined(X64) || !defined(ARM)
case SYS_getrlimit:
#endif
#if defined(LINUX) && !defined(X64)
case SYS_ugetrlimit:
#endif
case SYS_setrlimit:
#ifdef LINUX
case SYS_prlimit64:
#endif
#if defined(LINUX) && defined(X86)
/* i#784: app may have behavior relying on SIGALRM */
case SYS_alarm:
#endif
/* i#107: syscall might change/query app's seg memory
* need stop app from clobbering our GDT slot.
*/
#if defined(LINUX) && defined(X86) && defined(X64)
case SYS_arch_prctl:
#endif
#if defined(LINUX) && defined(X86)
case SYS_set_thread_area:
case SYS_get_thread_area:
/* FIXME: we might add SYS_modify_ldt later. */
#endif
#if defined(LINUX) && defined(ARM)
/* syscall changes app's thread register */
case SYS_set_tls:
case SYS_cacheflush:
#endif
#if defined(LINUX)
/* syscalls change procsigmask */
case SYS_pselect6:
case SYS_ppoll:
case SYS_epoll_pwait:
/* Used as a lazy trigger. */
case SYS_rseq:
#endif
return false;
#ifdef LINUX
# ifdef SYS_readlink
case SYS_readlink:
# endif
case SYS_readlinkat: return !DYNAMO_OPTION(early_inject);
#endif
default:
#ifdef VMX86_SERVER
if (is_vmkuw_sysnum(num))
return vmkuw_ignorable_system_call(num);
#endif
return true;
}
}
bool
ignorable_system_call(int num_raw, instr_t *gateway, dcontext_t *dcontext_live)
{
return ignorable_system_call_normalized(
os_normalized_sysnum(num_raw, gateway, dcontext_live));
}
typedef struct {
unsigned long addr;
unsigned long len;
unsigned long prot;
unsigned long flags;
unsigned long fd;
unsigned long offset;
} mmap_arg_struct_t;
static inline reg_t *
sys_param_addr(dcontext_t *dcontext, int num)
{
/* we force-inline get_mcontext() and so don't take it as a param */
priv_mcontext_t *mc = get_mcontext(dcontext);
#if defined(X86) && defined(X64)
switch (num) {
case 0: return &mc->xdi;
case 1: return &mc->xsi;
case 2: return &mc->xdx;
case 3: return &mc->r10; /* since rcx holds retaddr for syscall instr */
case 4: return &mc->r8;
case 5: return &mc->r9;
default: CLIENT_ASSERT(false, "invalid system call parameter number");
}
#else
# ifdef MACOS
/* XXX: if we don't end up using dcontext->sys_was_int here, we could
* make that field Linux-only.
*/
/* For 32-bit, the args are passed on the stack, above a retaddr slot
* (regardless of whether using a sysenter or int gateway).
*/
return ((reg_t *)mc->esp) + 1 /*retaddr*/ + num;
# endif
/* even for vsyscall where ecx (syscall) or esp (sysenter) are saved into
* ebp, the original parameter registers are not yet changed pre-syscall,
* except for ebp, which is pushed on the stack:
* 0xffffe400 55 push %ebp %esp -> %esp (%esp)
* 0xffffe401 89 cd mov %ecx -> %ebp
* 0xffffe403 0f 05 syscall -> %ecx
*
* 0xffffe400 51 push %ecx %esp -> %esp (%esp)
* 0xffffe401 52 push %edx %esp -> %esp (%esp)
* 0xffffe402 55 push %ebp %esp -> %esp (%esp)
* 0xffffe403 89 e5 mov %esp -> %ebp
* 0xffffe405 0f 34 sysenter -> %esp
*/
switch (num) {
case 0: return &mc->IF_X86_ELSE(xbx, r0);
case 1: return &mc->IF_X86_ELSE(xcx, r1);
case 2: return &mc->IF_X86_ELSE(xdx, r2);
case 3: return &mc->IF_X86_ELSE(xsi, r3);
case 4: return &mc->IF_X86_ELSE(xdi, r4);
/* FIXME: do a safe_read: but what about performance?
* See the #if 0 below, as well. */
case 5:
return IF_X86_ELSE((dcontext->sys_was_int ? &mc->xbp : ((reg_t *)mc->xsp)),
&mc->r5);
# ifdef ARM
/* AArch32 supposedly has 7 args in some cases. */
case 6: return &mc->r6;
# endif
default: CLIENT_ASSERT(false, "invalid system call parameter number");
}
#endif
return 0;
}
static inline reg_t
sys_param(dcontext_t *dcontext, int num)
{
return *sys_param_addr(dcontext, num);
}
void
set_syscall_param(dcontext_t *dcontext, int param_num, reg_t new_value)
{
*sys_param_addr(dcontext, param_num) = new_value;
}
static inline bool
syscall_successful(priv_mcontext_t *mc, int normalized_sysnum)
{
#ifdef MACOS
if (TEST(SYSCALL_NUM_MARKER_MACH, normalized_sysnum)) {
/* XXX: Mach syscalls vary (for some KERN_SUCCESS=0 is success,
* for others that return mach_port_t 0 is failure (I think?).
* We defer to drsyscall.
*/
return ((ptr_int_t)MCXT_SYSCALL_RES(mc) >= 0);
} else
return !TEST(EFLAGS_CF, mc->xflags);
#else
if (normalized_sysnum == IF_X64_ELSE(SYS_mmap, SYS_mmap2) ||
# if !defined(ARM) && !defined(X64)
normalized_sysnum == SYS_mmap ||
# endif
normalized_sysnum == SYS_mremap)
return mmap_syscall_succeeded((byte *)MCXT_SYSCALL_RES(mc));
return ((ptr_int_t)MCXT_SYSCALL_RES(mc) >= 0);
#endif
}
/* For non-Mac, this does nothing to indicate "success": you can pass -errno.
* For Mac, this clears CF and just sets xax. To return a 64-bit value in
* 32-bit mode, the caller must explicitly set xdx as well (we don't always
* do so b/c syscalls that just return 32-bit values do not touch xdx).
*/
static inline void
set_success_return_val(dcontext_t *dcontext, reg_t val)
{
/* since always coming from d_r_dispatch now, only need to set mcontext */
priv_mcontext_t *mc = get_mcontext(dcontext);
#ifdef MACOS
/* On MacOS, success is determined by CF, except for Mach syscalls, but
* there it doesn't hurt to set CF.
*/
mc->xflags &= ~(EFLAGS_CF);
#endif
MCXT_SYSCALL_RES(mc) = val;
}
/* Always pass a positive value for errno */
static inline void
set_failure_return_val(dcontext_t *dcontext, uint errno_val)
{
priv_mcontext_t *mc = get_mcontext(dcontext);
#ifdef MACOS
/* On MacOS, success is determined by CF, and errno is positive */
mc->xflags |= EFLAGS_CF;
MCXT_SYSCALL_RES(mc) = errno_val;
#else
MCXT_SYSCALL_RES(mc) = -(int)errno_val;
#endif
}
#ifdef CLIENT_INTERFACE
DR_API
reg_t
dr_syscall_get_param(void *drcontext, int param_num)
{
dcontext_t *dcontext = (dcontext_t *)drcontext;
CLIENT_ASSERT(dcontext->client_data->in_pre_syscall,
"dr_syscall_get_param() can only be called from pre-syscall event");
return sys_param(dcontext, param_num);
}
DR_API
void
dr_syscall_set_param(void *drcontext, int param_num, reg_t new_value)
{
dcontext_t *dcontext = (dcontext_t *)drcontext;
CLIENT_ASSERT(dcontext->client_data->in_pre_syscall ||
dcontext->client_data->in_post_syscall,
"dr_syscall_set_param() can only be called from a syscall event");
*sys_param_addr(dcontext, param_num) = new_value;
}
DR_API
reg_t
dr_syscall_get_result(void *drcontext)
{
dcontext_t *dcontext = (dcontext_t *)drcontext;
CLIENT_ASSERT(dcontext->client_data->in_post_syscall,
"dr_syscall_get_param() can only be called from post-syscall event");
return MCXT_SYSCALL_RES(get_mcontext(dcontext));
}
DR_API
bool
dr_syscall_get_result_ex(void *drcontext, dr_syscall_result_info_t *info INOUT)
{
dcontext_t *dcontext = (dcontext_t *)drcontext;
priv_mcontext_t *mc = get_mcontext(dcontext);
CLIENT_ASSERT(dcontext->client_data->in_post_syscall,
"only call dr_syscall_get_param_ex() from post-syscall event");
CLIENT_ASSERT(info != NULL, "invalid parameter");
CLIENT_ASSERT(info->size == sizeof(*info), "invalid dr_syscall_result_info_t size");
if (info->size != sizeof(*info))
return false;
info->value = MCXT_SYSCALL_RES(mc);
info->succeeded = syscall_successful(mc, dcontext->sys_num);
if (info->use_high) {
/* MacOS has some 32-bit syscalls that return 64-bit values in
* xdx:xax, but the other syscalls don't clear xdx, so we can't easily
* return a 64-bit value all the time.
*/
IF_X86_ELSE({ info->high = mc->xdx; }, { ASSERT_NOT_REACHED(); });
}
if (info->use_errno) {
if (info->succeeded)
info->errno_value = 0;
else {
info->errno_value = (uint)IF_LINUX(-(int)) MCXT_SYSCALL_RES(mc);
}
}
return true;
}
DR_API
void
dr_syscall_set_result(void *drcontext, reg_t value)
{
dcontext_t *dcontext = (dcontext_t *)drcontext;
CLIENT_ASSERT(dcontext->client_data->in_pre_syscall ||
dcontext->client_data->in_post_syscall,
"dr_syscall_set_result() can only be called from a syscall event");
/* For non-Mac, the caller can still pass -errno and this will work */
set_success_return_val(dcontext, value);
}
DR_API
bool
dr_syscall_set_result_ex(void *drcontext, dr_syscall_result_info_t *info)
{
dcontext_t *dcontext = (dcontext_t *)drcontext;
priv_mcontext_t *mc = get_mcontext(dcontext);
CLIENT_ASSERT(dcontext->client_data->in_pre_syscall ||
dcontext->client_data->in_post_syscall,
"dr_syscall_set_result() can only be called from a syscall event");
CLIENT_ASSERT(info->size == sizeof(*info), "invalid dr_syscall_result_info_t size");
if (info->size != sizeof(*info))
return false;
if (info->use_errno) {
if (info->succeeded) {
/* a weird case but we let the user combine these */
set_success_return_val(dcontext, info->errno_value);
} else
set_failure_return_val(dcontext, info->errno_value);
} else {
if (info->succeeded)
set_success_return_val(dcontext, info->value);
else {
/* use this to set CF, even though it might negate the value */
set_failure_return_val(dcontext, (uint)info->value);
/* now set the value, overriding set_failure_return_val() */
MCXT_SYSCALL_RES(mc) = info->value;
}
if (info->use_high) {
/* MacOS has some 32-bit syscalls that return 64-bit values in
* xdx:xax.
*/
IF_X86_ELSE({ mc->xdx = info->high; }, { ASSERT_NOT_REACHED(); });
}
}
return true;
}
DR_API
void
dr_syscall_set_sysnum(void *drcontext, int new_num)
{
dcontext_t *dcontext = (dcontext_t *)drcontext;
priv_mcontext_t *mc = get_mcontext(dcontext);
CLIENT_ASSERT(dcontext->client_data->in_pre_syscall ||
dcontext->client_data->in_post_syscall,
"dr_syscall_set_sysnum() can only be called from a syscall event");
MCXT_SYSNUM_REG(mc) = new_num;
}
DR_API
void
dr_syscall_invoke_another(void *drcontext)
{
dcontext_t *dcontext = (dcontext_t *)drcontext;
CLIENT_ASSERT(dcontext->client_data->in_post_syscall,
"dr_syscall_invoke_another() can only be called from post-syscall "
"event");
LOG(THREAD, LOG_SYSCALLS, 2, "invoking additional syscall on client request\n");
dcontext->client_data->invoke_another_syscall = true;
# ifdef X86
if (get_syscall_method() == SYSCALL_METHOD_SYSENTER) {
priv_mcontext_t *mc = get_mcontext(dcontext);
/* restore xbp to xsp */
mc->xbp = mc->xsp;
}
# endif /* X86 */
/* for x64 we don't need to copy xcx into r10 b/c we use r10 as our param */
}
#endif /* CLIENT_INTERFACE */
static inline bool
is_thread_create_syscall_helper(ptr_uint_t sysnum, ptr_uint_t flags)
{
#ifdef MACOS
/* XXX i#1403: we need earlier injection to intercept
* bsdthread_register in order to capture workqueue threads.
*/
return (sysnum == SYS_bsdthread_create || sysnum == SYS_vfork);
#else
# ifdef SYS_vfork
if (sysnum == SYS_vfork)
return true;
# endif
# ifdef LINUX
if (sysnum == SYS_clone && TEST(CLONE_VM, flags))
return true;
# endif
return false;
#endif
}
bool
is_thread_create_syscall(dcontext_t *dcontext)
{
priv_mcontext_t *mc = get_mcontext(dcontext);
return is_thread_create_syscall_helper(MCXT_SYSNUM_REG(mc), sys_param(dcontext, 0));
}
bool
was_thread_create_syscall(dcontext_t *dcontext)
{
return is_thread_create_syscall_helper(dcontext->sys_num,
/* flags in param0 */
dcontext->sys_param0);
}
bool
is_sigreturn_syscall_number(int sysnum)
{
#ifdef MACOS
return sysnum == SYS_sigreturn;
#else
return (IF_NOT_X64(sysnum == SYS_sigreturn ||) sysnum == SYS_rt_sigreturn);
#endif
}
bool
is_sigreturn_syscall(dcontext_t *dcontext)
{
priv_mcontext_t *mc = get_mcontext(dcontext);
return is_sigreturn_syscall_number(MCXT_SYSNUM_REG(mc));
}
bool
was_sigreturn_syscall(dcontext_t *dcontext)
{
return is_sigreturn_syscall_number(dcontext->sys_num);
}
/* process a signal this process/thread is sending to itself */
static void
handle_self_signal(dcontext_t *dcontext, uint sig)
{
/* FIXME PR 297903: watch for all DEFAULT_TERMINATE signals,
* and for any thread in the group, not just self.
*
* FIXME PR 297033: watch for SIGSTOP and SIGCONT.
*
* With -intercept_all_signals, we only need to watch for SIGKILL
* and SIGSTOP here, and we avoid the FIXMEs below. If it's fine
* for DR not to clean up on a SIGKILL, then SIGSTOP is all that's
* left (at least once we have PR 297033 and are intercepting the
* various STOP variations and CONT).
*/
if (sig == SIGABRT && !DYNAMO_OPTION(intercept_all_signals)) {
LOG(GLOBAL, LOG_TOP | LOG_SYSCALLS, 1,
"thread " TIDFMT " sending itself a SIGABRT\n", d_r_get_thread_id());
KSTOP(num_exits_dir_syscall);
/* FIXME: need to check whether app has a handler for SIGABRT! */
/* FIXME PR 211180/6723: this will do SYS_exit rather than the SIGABRT.
* Should do set_default_signal_action(SIGABRT) (and set a flag so
* no races w/ another thread re-installing?) and then SYS_kill.
*/
block_cleanup_and_terminate(dcontext, SYSNUM_EXIT_THREAD, -1, 0,
(is_last_app_thread() && !dynamo_exited),
IF_MACOS_ELSE(dcontext->thread_port, 0), 0);
ASSERT_NOT_REACHED();
}
}
/***************************************************************************
* EXECVE
*/
/* when adding here, also add to the switch in handle_execve if necessary */
enum {
ENV_PROP_RUNUNDER,
ENV_PROP_OPTIONS,
ENV_PROP_EXECVE_LOGDIR,
ENV_PROP_EXE_PATH,
ENV_PROP_CONFIGDIR,
};
static const char *const env_to_propagate[] = {
/* these must line up with the enum */
DYNAMORIO_VAR_RUNUNDER,
DYNAMORIO_VAR_OPTIONS,
/* DYNAMORIO_VAR_EXECVE_LOGDIR is different from DYNAMORIO_VAR_LOGDIR:
* - DYNAMORIO_VAR_LOGDIR: a parent dir inside which a new dir will be created;
* - DYNAMORIO_VAR_EXECVE_LOGDIR: the same subdir with the pre-execve process.
* Xref comment in create_log_dir about their precedence.
*/
DYNAMORIO_VAR_EXECVE_LOGDIR,
/* i#909: needed for early injection */
DYNAMORIO_VAR_EXE_PATH,
/* these will only be propagated if they exist */
DYNAMORIO_VAR_CONFIGDIR,
};
#define NUM_ENV_TO_PROPAGATE (sizeof(env_to_propagate) / sizeof(env_to_propagate[0]))
/* Called at pre-SYS_execve to append DR vars in the target process env vars list.
* For late injection via libdrpreload, we call this for *all children, because
* even if -no_follow_children is specified, a whitelist will still ask for takeover
* and it's libdrpreload who checks the whitelist.
* For -early, however, we check the config ahead of time and only call this routine
* if we in fact want to inject.
* XXX i#1679: these parent vs child differences bring up corner cases of which
* config dir takes precedence (if the child clears the HOME env var, e.g.).
*/
static void
add_dr_env_vars(dcontext_t *dcontext, char *inject_library_path, const char *app_path)
{
char **envp = (char **)sys_param(dcontext, 2);
int idx, j, preload = -1, ldpath = -1;
int num_old, num_new, sz;
bool need_var[NUM_ENV_TO_PROPAGATE];
int prop_idx[NUM_ENV_TO_PROPAGATE];
bool ldpath_us = false, preload_us = false;
char **new_envp, *var, *old;
/* check if any var needs to be propagated */
for (j = 0; j < NUM_ENV_TO_PROPAGATE; j++) {
prop_idx[j] = -1;
if (get_config_val(env_to_propagate[j]) == NULL)
need_var[j] = false;
else
need_var[j] = true;
}
/* Special handling for DYNAMORIO_VAR_EXECVE_LOGDIR:
* we only need it if follow_children is true and PROCESS_DIR exists.
*/
if (DYNAMO_OPTION(follow_children) && get_log_dir(PROCESS_DIR, NULL, NULL))
need_var[ENV_PROP_EXECVE_LOGDIR] = true;
else
need_var[ENV_PROP_EXECVE_LOGDIR] = false;
if (DYNAMO_OPTION(early_inject))
need_var[ENV_PROP_EXE_PATH] = true;
/* iterate the env in target process */
if (envp == NULL) {
LOG(THREAD, LOG_SYSCALLS, 3, "\tenv is NULL\n");
idx = 0;
} else {
for (idx = 0; envp[idx] != NULL; idx++) {
/* execve env vars should never be set here */
ASSERT(strstr(envp[idx], DYNAMORIO_VAR_EXECVE) != envp[idx]);
for (j = 0; j < NUM_ENV_TO_PROPAGATE; j++) {
if (strstr(envp[idx], env_to_propagate[j]) == envp[idx]) {
/* If conflict between env and cfg, we assume those env vars
* are for DR usage only, and replace them with cfg value.
*/
prop_idx[j] = idx; /* remember the index for replacing later */
break;
}
}
if (!DYNAMO_OPTION(early_inject) &&
strstr(envp[idx], "LD_LIBRARY_PATH=") == envp[idx]) {
ldpath = idx;
if (strstr(envp[idx], inject_library_path) != NULL)
ldpath_us = true;
}
if (!DYNAMO_OPTION(early_inject) &&
strstr(envp[idx], "LD_PRELOAD=") == envp[idx]) {
preload = idx;
if (strstr(envp[idx], DYNAMORIO_PRELOAD_NAME) != NULL &&
strstr(envp[idx], get_dynamorio_library_path()) != NULL) {
preload_us = true;
}
}
LOG(THREAD, LOG_SYSCALLS, 3, "\tenv %d: %s\n", idx, envp[idx]);
}
}
/* We want to add new env vars, so we create a new envp
* array. We have to deallocate them and restore the old
* envp if execve fails; if execve succeeds, the address
* space is reset so we don't need to do anything.
*/
num_old = idx;
/* how many new env vars we need add */
num_new = 2 + /* execve indicator var plus final NULL */
(DYNAMO_OPTION(early_inject)
? 0
: (((preload < 0) ? 1 : 0) + ((ldpath < 0) ? 1 : 0)));
for (j = 0; j < NUM_ENV_TO_PROPAGATE; j++) {
if ((DYNAMO_OPTION(follow_children) || j == ENV_PROP_EXE_PATH) && need_var[j] &&
prop_idx[j] < 0)
num_new++;
}
/* setup new envp */
new_envp =
heap_alloc(dcontext, sizeof(char *) * (num_old + num_new) HEAPACCT(ACCT_OTHER));
/* copy old envp */
memcpy(new_envp, envp, sizeof(char *) * num_old);
/* change/add preload and ldpath if necessary */
if (!DYNAMO_OPTION(early_inject) && !preload_us) {
int idx_preload;
LOG(THREAD, LOG_SYSCALLS, 1,
"WARNING: execve env does NOT preload DynamoRIO, forcing it!\n");
if (preload >= 0) {
/* replace the existing preload */
const char *dr_lib_path = get_dynamorio_library_path();
sz = strlen(envp[preload]) + strlen(DYNAMORIO_PRELOAD_NAME) +
strlen(dr_lib_path) + 3;
var = heap_alloc(dcontext, sizeof(char) * sz HEAPACCT(ACCT_OTHER));
old = envp[preload] + strlen("LD_PRELOAD=");
snprintf(var, sz, "LD_PRELOAD=%s %s %s", DYNAMORIO_PRELOAD_NAME, dr_lib_path,
old);
idx_preload = preload;
} else {
/* add new preload */
const char *dr_lib_path = get_dynamorio_library_path();
sz = strlen("LD_PRELOAD=") + strlen(DYNAMORIO_PRELOAD_NAME) +
strlen(dr_lib_path) + 2;
var = heap_alloc(dcontext, sizeof(char) * sz HEAPACCT(ACCT_OTHER));
snprintf(var, sz, "LD_PRELOAD=%s %s", DYNAMORIO_PRELOAD_NAME, dr_lib_path);
idx_preload = idx++;
}
*(var + sz - 1) = '\0'; /* null terminate */
new_envp[idx_preload] = var;
LOG(THREAD, LOG_SYSCALLS, 2, "\tnew env %d: %s\n", idx_preload,
new_envp[idx_preload]);
}
if (!DYNAMO_OPTION(early_inject) && !ldpath_us) {
int idx_ldpath;
if (ldpath >= 0) {
sz = strlen(envp[ldpath]) + strlen(inject_library_path) + 2;
var = heap_alloc(dcontext, sizeof(char) * sz HEAPACCT(ACCT_OTHER));
old = envp[ldpath] + strlen("LD_LIBRARY_PATH=");
snprintf(var, sz, "LD_LIBRARY_PATH=%s:%s", inject_library_path, old);
idx_ldpath = ldpath;
} else {
sz = strlen("LD_LIBRARY_PATH=") + strlen(inject_library_path) + 1;
var = heap_alloc(dcontext, sizeof(char) * sz HEAPACCT(ACCT_OTHER));
snprintf(var, sz, "LD_LIBRARY_PATH=%s", inject_library_path);
idx_ldpath = idx++;
}
*(var + sz - 1) = '\0'; /* null terminate */
new_envp[idx_ldpath] = var;
LOG(THREAD, LOG_SYSCALLS, 2, "\tnew env %d: %s\n", idx_ldpath,
new_envp[idx_ldpath]);
}
/* propagating DR env vars */
for (j = 0; j < NUM_ENV_TO_PROPAGATE; j++) {
const char *val = "";
if (!need_var[j])
continue;
if (!DYNAMO_OPTION(follow_children) && j != ENV_PROP_EXE_PATH)
continue;
switch (j) {
case ENV_PROP_RUNUNDER:
ASSERT(strcmp(env_to_propagate[j], DYNAMORIO_VAR_RUNUNDER) == 0);
/* Must pass RUNUNDER_ALL to get child injected if has no app config.
* If rununder var is already set we assume it's set to 1.
*/
ASSERT((RUNUNDER_ON | RUNUNDER_ALL) == 0x3); /* else, update "3" */
val = "3";
break;
case ENV_PROP_OPTIONS:
ASSERT(strcmp(env_to_propagate[j], DYNAMORIO_VAR_OPTIONS) == 0);
val = d_r_option_string;
break;
case ENV_PROP_EXECVE_LOGDIR:
/* we use PROCESS_DIR for DYNAMORIO_VAR_EXECVE_LOGDIR */
ASSERT(strcmp(env_to_propagate[j], DYNAMORIO_VAR_EXECVE_LOGDIR) == 0);
ASSERT(get_log_dir(PROCESS_DIR, NULL, NULL));
break;
case ENV_PROP_EXE_PATH:
ASSERT(strcmp(env_to_propagate[j], DYNAMORIO_VAR_EXE_PATH) == 0);
val = app_path;
break;
default:
val = getenv(env_to_propagate[j]);
if (val == NULL)
val = "";
break;
}
if (j == ENV_PROP_EXECVE_LOGDIR) {
uint logdir_length;
get_log_dir(PROCESS_DIR, NULL, &logdir_length);
/* logdir_length includes the terminating NULL */
sz = strlen(DYNAMORIO_VAR_EXECVE_LOGDIR) + logdir_length + 1 /* '=' */;
var = heap_alloc(dcontext, sizeof(char) * sz HEAPACCT(ACCT_OTHER));
snprintf(var, sz, "%s=", DYNAMORIO_VAR_EXECVE_LOGDIR);
get_log_dir(PROCESS_DIR, var + strlen(var), &logdir_length);
} else {
sz = strlen(env_to_propagate[j]) + strlen(val) + 2 /* '=' + null */;
var = heap_alloc(dcontext, sizeof(char) * sz HEAPACCT(ACCT_OTHER));
snprintf(var, sz, "%s=%s", env_to_propagate[j], val);
}
*(var + sz - 1) = '\0'; /* null terminate */
prop_idx[j] = (prop_idx[j] >= 0) ? prop_idx[j] : idx++;
new_envp[prop_idx[j]] = var;
LOG(THREAD, LOG_SYSCALLS, 2, "\tnew env %d: %s\n", prop_idx[j],
new_envp[prop_idx[j]]);
}
if (!DYNAMO_OPTION(follow_children) && !DYNAMO_OPTION(early_inject)) {
if (prop_idx[ENV_PROP_RUNUNDER] >= 0) {
/* disable auto-following of this execve, yet still allow preload
* on other side to inject if config file exists.
* kind of hacky mangle here:
*/
ASSERT(!need_var[ENV_PROP_RUNUNDER]);
ASSERT(new_envp[prop_idx[ENV_PROP_RUNUNDER]][0] == 'D');
new_envp[prop_idx[ENV_PROP_RUNUNDER]][0] = 'X';
}
}
sz = strlen(DYNAMORIO_VAR_EXECVE) + 4;
/* we always pass this var to indicate "post-execve" */
var = heap_alloc(dcontext, sizeof(char) * sz HEAPACCT(ACCT_OTHER));
/* PR 458917: we overload this to also pass our gdt index */
ASSERT(os_tls_get_gdt_index(dcontext) < 100 &&
os_tls_get_gdt_index(dcontext) >= -1); /* only 2 chars allocated */
snprintf(var, sz, "%s=%02d", DYNAMORIO_VAR_EXECVE, os_tls_get_gdt_index(dcontext));
*(var + sz - 1) = '\0'; /* null terminate */
new_envp[idx++] = var;
LOG(THREAD, LOG_SYSCALLS, 2, "\tnew env %d: %s\n", idx - 1, new_envp[idx - 1]);
/* must end with NULL */
new_envp[idx++] = NULL;
ASSERT((num_new + num_old) == idx);
/* update syscall param */
*sys_param_addr(dcontext, 2) = (reg_t)new_envp; /* OUT */
/* store for reset in case execve fails, and for cleanup if
* this is a vfork thread
*/
dcontext->sys_param0 = (reg_t)envp;
dcontext->sys_param1 = (reg_t)new_envp;
}
static ssize_t
script_file_reader(const char *pathname, void *buf, size_t count)
{
/* FIXME i#2090: Check file is executable. */
file_t file = os_open(pathname, OS_OPEN_READ);
size_t len;
if (file == INVALID_FILE)
return -1;
len = os_read(file, buf, count);
os_close(file);
return len;
}
/* For early injection, recognise when the executable is a script ("#!") and
* modify the syscall parameters to invoke a script interpreter instead. In
* this case we will have allocated memory here but we expect the caller to
* do a non-failing execve of libdynamorio.so and therefore not to have to
* free the memory. That is one reason for checking that the (final) script
* interpreter really is an executable binary.
* We recognise one error case here and return the non-zero error code (ELOOP)
* but in other cases we leave it up to the caller to detect the error, which
* it may do by attempting to exec the path natively, expecting this to fail,
* though there is the obvious danger that the file might have been modified
* just before the exec.
* We do not, and cannot easily, handle a file that is executable but not
* readable. Currently such files will be executed without DynamoRIO though
* in some situations it would be more helpful to stop with an error.
*
* XXX: There is a minor transparency bug with misformed binaries. For example,
* execve can return EINVAL if the ELF executable has more than one PT_INTERP
* segment but we do not check this and so under DynamoRIO the error would be
* detected only after the exec, if we are following the child.
*
* FIXME i#2091: There is a memory leak if a script is recognised, and it is
* later decided not to inject (see where should_inject is set), and the exec
* fails, because in this case there is no mechanism for freeing the memory
* allocated in this function. This function should return sufficient information
* for the caller to free the memory, which it can do so before the exec if it
* reverts to the original syscall arguments and execs the script.
*/
static int
handle_execve_script(dcontext_t *dcontext)
{
char *fname = (char *)sys_param(dcontext, 0);
char **orig_argv = (char **)sys_param(dcontext, 1);
script_interpreter_t *script;
int ret = 0;
script = global_heap_alloc(sizeof(*script) HEAPACCT(ACCT_OTHER));
if (!find_script_interpreter(script, fname, script_file_reader))
goto free_and_return;
if (script->argc == 0) {
ret = ELOOP;
goto free_and_return;
}
/* Check that the final interpreter is an executable binary. */
{
file_t file = os_open(script->argv[0], OS_OPEN_READ);
bool is64;
if (file == INVALID_FILE)
goto free_and_return;
if (!module_file_is_module64(file, &is64, NULL)) {
os_close(file);
goto free_and_return;
}
}
{
size_t i, orig_argc = 0;
char **new_argv;
/* Concatenate new arguments and original arguments. */
while (orig_argv[orig_argc] != NULL)
++orig_argc;
if (orig_argc == 0)
orig_argc = 1;
new_argv = global_heap_alloc((script->argc + orig_argc + 1) *
sizeof(char *) HEAPACCT(ACCT_OTHER));
for (i = 0; i < script->argc; i++)
new_argv[i] = script->argv[i];
new_argv[script->argc] = fname; /* replaces orig_argv[0] */
for (i = 1; i < orig_argc; i++)
new_argv[script->argc + i] = orig_argv[i];
new_argv[script->argc + orig_argc] = NULL;
/* Modify syscall parameters. */
*sys_param_addr(dcontext, 0) = (reg_t)new_argv[0];
*sys_param_addr(dcontext, 1) = (reg_t)new_argv;
}
return 0;
free_and_return:
global_heap_free(script, sizeof(*script) HEAPACCT(ACCT_OTHER));
return ret;
}
static int
handle_execve(dcontext_t *dcontext)
{
/* in /usr/src/linux/arch/i386/kernel/process.c:
* asmlinkage int sys_execve(struct pt_regs regs) { ...
* error = do_execve(filename, (char **) regs.xcx, (char **) regs.xdx, ®s);
* in fs/exec.c:
* int do_execve(char * filename, char ** argv, char ** envp, struct pt_regs * regs)
*/
/* We need to make sure we get injected into the new image:
* we simply make sure LD_PRELOAD contains us, and that our directory
* is on LD_LIBRARY_PATH (seems not to work to put absolute paths in
* LD_PRELOAD).
* FIXME: this doesn't work for setuid programs
*
* For -follow_children we also pass the current DYNAMORIO_RUNUNDER and
* DYNAMORIO_OPTIONS and logdir to the new image to support a simple
* run-all-children model without bothering w/ setting up config files for
* children, and to support injecting across execve that does not
* preserve $HOME.
* FIXME i#287/PR 546544: we'll need to propagate DYNAMORIO_AUTOINJECT too
* once we use it in preload
*/
/* FIXME i#191: supposed to preserve things like pending signal
* set across execve: going to ignore for now
*/
char *fname;
bool x64 = IF_X64_ELSE(true, false);
bool expect_to_fail = false;
bool should_inject;
file_t file;
char *inject_library_path;
char rununder_buf[16]; /* just an integer printed in ascii */
bool app_specific, from_env, rununder_on;
#if defined(LINUX) || defined(DEBUG)
const char **argv;
#endif
if (DYNAMO_OPTION(follow_children) && DYNAMO_OPTION(early_inject)) {
int ret = handle_execve_script(dcontext);
if (ret != 0)
return ret;
}
fname = (char *)sys_param(dcontext, 0);
#if defined(LINUX) || defined(DEBUG)
argv = (const char **)sys_param(dcontext, 1);
#endif
#ifdef LINUX
if (DYNAMO_OPTION(early_inject) && symlink_is_self_exe(fname)) {
/* i#907: /proc/self/exe points at libdynamorio.so. Make sure we run
* the right thing here.
*/
fname = get_application_name();
}
#endif
LOG(GLOBAL, LOG_ALL, 1,
"\n---------------------------------------------------------------------------"
"\n");
LOG(THREAD, LOG_ALL, 1,
"\n---------------------------------------------------------------------------"
"\n");
DODEBUG({
int i;
SYSLOG_INTERNAL_INFO("-- execve %s --", fname);
LOG(THREAD, LOG_SYSCALLS, 1, "syscall: execve %s\n", fname);
LOG(GLOBAL, LOG_TOP | LOG_SYSCALLS, 1, "execve %s\n", fname);
if (d_r_stats->loglevel >= 3) {
if (argv == NULL) {
LOG(THREAD, LOG_SYSCALLS, 3, "\targs are NULL\n");
} else {
for (i = 0; argv[i] != NULL; i++) {
LOG(THREAD, LOG_SYSCALLS, 2, "\targ %d: len=%d\n", i,
strlen(argv[i]));
LOG(THREAD, LOG_SYSCALLS, 3, "\targ %d: %s\n", i, argv[i]);
}
}
}
});
/* i#237/PR 498284: if we're a vfork "thread" we're really in a different
* process and if we exec then the parent process will still be alive. We
* can't easily clean our own state (dcontext, dstack, etc.) up in our
* parent process: we need it to invoke the syscall and the syscall might
* fail. We could expand cleanup_and_terminate to also be able to invoke
* SYS_execve: but execve seems more likely to fail than termination
* syscalls. Our solution is to mark this thread as "execve" and hide it
* from regular thread queries; we clean it up in the process-exiting
* synch_with_thread(), or if the same parent thread performs another vfork
* (to prevent heap accumulation from repeated vfork+execve). Since vfork
* on linux suspends the parent, there cannot be any races with the execve
* syscall completing: there can't even be peer vfork threads, so we could
* set a flag and clean up in d_r_dispatch, but that seems overkill. (If vfork
* didn't suspend the parent we'd need to touch a marker file or something
* to know the execve was finished.)
*/
mark_thread_execve(dcontext->thread_record, true);
#ifdef STATIC_LIBRARY
/* no way we can inject, we just lose control */
SYSLOG_INTERNAL_WARNING("WARNING: static DynamoRIO library, losing control on "
"execve");
return 0;
#endif
/* Issue 20: handle cross-architecture execve */
file = os_open(fname, OS_OPEN_READ);
if (file != INVALID_FILE) {
if (!module_file_is_module64(file, &x64,
NULL /*only care about primary==execve*/))
expect_to_fail = true;
os_close(file);
} else
expect_to_fail = true;
inject_library_path =
IF_X64_ELSE(x64, !x64) ? dynamorio_library_path : dynamorio_alt_arch_path;
should_inject = DYNAMO_OPTION(follow_children);
if (get_config_val_other_app(get_short_name(fname), get_process_id(),
x64 ? DR_PLATFORM_64BIT : DR_PLATFORM_32BIT,
DYNAMORIO_VAR_RUNUNDER, rununder_buf,
BUFFER_SIZE_ELEMENTS(rununder_buf), &app_specific,
&from_env, NULL /* 1config is ok */)) {
if (should_inject_from_rununder(rununder_buf, app_specific, from_env,
&rununder_on))
should_inject = rununder_on;
}
if (should_inject)
add_dr_env_vars(dcontext, inject_library_path, fname);
else {
dcontext->sys_param0 = 0;
dcontext->sys_param1 = 0;
}
#ifdef LINUX
/* We have to be accurate with expect_to_fail as we cannot come back
* and fail the syscall once the kernel execs DR!
*/
if (should_inject && DYNAMO_OPTION(early_inject) && !expect_to_fail) {
/* i#909: change the target image to libdynamorio.so */
const char *drpath = IF_X64_ELSE(x64, !x64) ? dynamorio_library_filepath
: dynamorio_alt_arch_filepath;
TRY_EXCEPT(dcontext, /* try */
{
if (symlink_is_self_exe(argv[0])) {
/* we're out of sys_param entries so we assume argv[0] == fname
*/
dcontext->sys_param3 = (reg_t)argv;
argv[0] = fname; /* XXX: handle readable but not writable! */
} else
dcontext->sys_param3 = 0; /* no restore in post */
dcontext->sys_param4 =
(reg_t)fname; /* store for restore in post */
*sys_param_addr(dcontext, 0) = (reg_t)drpath;
LOG(THREAD, LOG_SYSCALLS, 2, "actual execve on: %s\n",
(char *)sys_param(dcontext, 0));
},
/* except */
{
dcontext->sys_param3 = 0; /* no restore in post */
dcontext->sys_param4 = 0; /* no restore in post */
LOG(THREAD, LOG_SYSCALLS, 2,
"argv is unreadable, expect execve to fail\n");
});
} else {
dcontext->sys_param3 = 0; /* no restore in post */
dcontext->sys_param4 = 0; /* no restore in post */
}
#endif
/* we need to clean up the .1config file here. if the execve fails,
* we'll just live w/o dynamic option re-read.
*/
d_r_config_exit();
return 0;
}
static void
handle_execve_post(dcontext_t *dcontext)
{
/* if we get here it means execve failed (doesn't return on success),
* or we did an execve from a vfork and its memory changes are visible
* in the parent process.
* we have to restore env to how it was and free the allocated heap.
*/
char **old_envp = (char **)dcontext->sys_param0;
char **new_envp = (char **)dcontext->sys_param1;
#ifdef STATIC_LIBRARY
/* nothing to clean up */
return;
#endif
#ifdef LINUX
if (dcontext->sys_param4 != 0) {
/* restore original /proc/.../exe */
*sys_param_addr(dcontext, 0) = dcontext->sys_param4;
if (dcontext->sys_param3 != 0) {
/* restore original argv[0] */
const char **argv = (const char **)dcontext->sys_param3;
argv[0] = (const char *)dcontext->sys_param4;
}
}
#endif
if (new_envp != NULL) {
int i;
LOG(THREAD, LOG_SYSCALLS, 2, "\tcleaning up our env vars\n");
/* we replaced existing ones and/or added new ones.
* we can't compare to old_envp b/c it may have changed by now.
*/
for (i = 0; new_envp[i] != NULL; i++) {
if (is_dynamo_address((byte *)new_envp[i])) {
heap_free(dcontext, new_envp[i],
sizeof(char) * (strlen(new_envp[i]) + 1) HEAPACCT(ACCT_OTHER));
}
}
i++; /* need to de-allocate final null slot too */
heap_free(dcontext, new_envp, sizeof(char *) * i HEAPACCT(ACCT_OTHER));
/* restore prev envp if we're post-syscall */
if (!dcontext->thread_record->execve)
*sys_param_addr(dcontext, 2) = (reg_t)old_envp;
}
}
/* i#237/PR 498284: to avoid accumulation of thread state we clean up a vfork
* child who invoked execve here so we have at most one outstanding thread. we
* also clean up at process exit and before thread creation. we could do this
* in d_r_dispatch but too rare to be worth a flag check there.
*/
static void
cleanup_after_vfork_execve(dcontext_t *dcontext)
{
thread_record_t **threads;
int num_threads, i;
if (num_execve_threads == 0)
return;
d_r_mutex_lock(&thread_initexit_lock);
get_list_of_threads_ex(&threads, &num_threads, true /*include execve*/);
for (i = 0; i < num_threads; i++) {
if (threads[i]->execve) {
LOG(THREAD, LOG_SYSCALLS, 2, "cleaning up earlier vfork thread " TIDFMT "\n",
threads[i]->id);
dynamo_other_thread_exit(threads[i]);
}
}
d_r_mutex_unlock(&thread_initexit_lock);
global_heap_free(threads,
num_threads * sizeof(thread_record_t *) HEAPACCT(ACCT_THREAD_MGT));
}
static void
set_stdfile_fileno(stdfile_t **stdfile, file_t file_no)
{
#ifdef STDFILE_FILENO
(*stdfile)->STDFILE_FILENO = file_no;
#else
# warning stdfile_t is opaque; DynamoRIO will not set fds of libc FILEs.
/* i#1973: musl libc support (and potentially other non-glibcs) */
/* only called by handle_close_pre(), so warning is specific to that. */
SYSLOG_INTERNAL_WARNING_ONCE(
"DynamoRIO cannot set the file descriptors of private libc FILEs on "
"this platform. Client usage of stdio.h stdin, stdout, or stderr may "
"no longer work as expected, because the app is closing the UNIX fds "
"backing these.");
#endif
}
/* returns whether to execute syscall */
static bool
handle_close_pre(dcontext_t *dcontext)
{
/* in fs/open.c: asmlinkage long sys_close(unsigned int fd) */
uint fd = (uint)sys_param(dcontext, 0);
LOG(THREAD, LOG_SYSCALLS, 3, "syscall: close fd %d\n", fd);
/* prevent app from closing our files */
if (fd_is_dr_owned(fd)) {
SYSLOG_INTERNAL_WARNING_ONCE("app trying to close DR file(s)");
LOG(THREAD, LOG_TOP | LOG_SYSCALLS, 1,
"WARNING: app trying to close DR file %d! Not allowing it.\n", fd);
if (DYNAMO_OPTION(fail_on_stolen_fds)) {
set_failure_return_val(dcontext, EBADF);
DODEBUG({ dcontext->expect_last_syscall_to_fail = true; });
} else
set_success_return_val(dcontext, 0);
return false; /* do not execute syscall */
}
/* Xref PR 258731 - duplicate STDOUT/STDERR when app closes them so we (or
* a client) can continue to use them for logging. */
if (DYNAMO_OPTION(dup_stdout_on_close) && fd == STDOUT) {
our_stdout = fd_priv_dup(fd);
if (our_stdout < 0) /* no private fd available */
our_stdout = dup_syscall(fd);
if (our_stdout >= 0)
fd_mark_close_on_exec(our_stdout);
fd_table_add(our_stdout, 0);
LOG(THREAD, LOG_TOP | LOG_SYSCALLS, 1,
"WARNING: app is closing stdout=%d - duplicating descriptor for "
"DynamoRIO usage got %d.\n",
fd, our_stdout);
if (privmod_stdout != NULL &&
IF_CLIENT_INTERFACE_ELSE(INTERNAL_OPTION(private_loader), false)) {
/* update the privately loaded libc's stdout _fileno. */
set_stdfile_fileno(privmod_stdout, our_stdout);
}
}
if (DYNAMO_OPTION(dup_stderr_on_close) && fd == STDERR) {
our_stderr = fd_priv_dup(fd);
if (our_stderr < 0) /* no private fd available */
our_stderr = dup_syscall(fd);
if (our_stderr >= 0)
fd_mark_close_on_exec(our_stderr);
fd_table_add(our_stderr, 0);
LOG(THREAD, LOG_TOP | LOG_SYSCALLS, 1,
"WARNING: app is closing stderr=%d - duplicating descriptor for "
"DynamoRIO usage got %d.\n",
fd, our_stderr);
if (privmod_stderr != NULL &&
IF_CLIENT_INTERFACE_ELSE(INTERNAL_OPTION(private_loader), false)) {
/* update the privately loaded libc's stderr _fileno. */
set_stdfile_fileno(privmod_stderr, our_stderr);
}
}
if (DYNAMO_OPTION(dup_stdin_on_close) && fd == STDIN) {
our_stdin = fd_priv_dup(fd);
if (our_stdin < 0) /* no private fd available */
our_stdin = dup_syscall(fd);
if (our_stdin >= 0)
fd_mark_close_on_exec(our_stdin);
fd_table_add(our_stdin, 0);
LOG(THREAD, LOG_TOP | LOG_SYSCALLS, 1,
"WARNING: app is closing stdin=%d - duplicating descriptor for "
"DynamoRIO usage got %d.\n",
fd, our_stdin);
if (privmod_stdin != NULL &&
IF_CLIENT_INTERFACE_ELSE(INTERNAL_OPTION(private_loader), false)) {
/* update the privately loaded libc's stdout _fileno. */
set_stdfile_fileno(privmod_stdin, our_stdin);
}
}
return true;
}
/***************************************************************************/
/* Used to obtain the pc of the syscall instr itself when the dcontext dc
* is currently in a syscall handler.
* Alternatively for sysenter we could set app_sysenter_instr_addr for Linux.
*/
#define SYSCALL_PC(dc) \
((get_syscall_method() == SYSCALL_METHOD_INT || \
get_syscall_method() == SYSCALL_METHOD_SYSCALL) \
? (ASSERT(SYSCALL_LENGTH == INT_LENGTH), POST_SYSCALL_PC(dc) - INT_LENGTH) \
: (vsyscall_syscall_end_pc - SYSENTER_LENGTH))
static void
handle_exit(dcontext_t *dcontext)
{
priv_mcontext_t *mc = get_mcontext(dcontext);
bool exit_process = false;
if (dcontext->sys_num == SYSNUM_EXIT_PROCESS) {
/* We can have multiple thread groups within the same address space.
* We need to know whether this is the only group left.
* FIXME: we can have races where new threads are created after our
* check: we'll live with that for now, but the right approach is to
* suspend all threads via synch_with_all_threads(), do the check,
* and if exit_process then exit w/o resuming: though have to
* coordinate lock access w/ cleanup_and_terminate.
* Xref i#94. Xref PR 541760.
*/
process_id_t mypid = get_process_id();
thread_record_t **threads;
int num_threads, i;
exit_process = true;
d_r_mutex_lock(&thread_initexit_lock);
get_list_of_threads(&threads, &num_threads);
for (i = 0; i < num_threads; i++) {
if (threads[i]->pid != mypid && !IS_CLIENT_THREAD(threads[i]->dcontext)) {
exit_process = false;
break;
}
}
if (!exit_process) {
/* We need to clean up the other threads in our group here. */
thread_id_t myid = d_r_get_thread_id();
priv_mcontext_t mcontext;
DEBUG_DECLARE(thread_synch_result_t synch_res;)
LOG(THREAD, LOG_TOP | LOG_SYSCALLS, 1,
"SYS_exit_group %d not final group: %d cleaning up just "
"threads in group\n",
get_process_id(), d_r_get_thread_id());
/* Set where we are to handle reciprocal syncs */
copy_mcontext(mc, &mcontext);
mc->pc = SYSCALL_PC(dcontext);
for (i = 0; i < num_threads; i++) {
if (threads[i]->id != myid && threads[i]->pid == mypid) {
/* See comments in dynamo_process_exit_cleanup(): we terminate
* to make cleanup easier, but may want to switch to shifting
* the target thread to a stack-free loop.
*/
DEBUG_DECLARE(synch_res =)
synch_with_thread(
threads[i]->id, true /*block*/, true /*have initexit lock*/,
THREAD_SYNCH_VALID_MCONTEXT, THREAD_SYNCH_TERMINATED_AND_CLEANED,
THREAD_SYNCH_SUSPEND_FAILURE_IGNORE);
/* initexit lock may be released and re-acquired in course of
* doing the synch so we may have races where the thread
* exits on its own (or new threads appear): we'll live
* with those for now.
*/
ASSERT(synch_res == THREAD_SYNCH_RESULT_SUCCESS);
}
}
copy_mcontext(&mcontext, mc);
}
d_r_mutex_unlock(&thread_initexit_lock);
global_heap_free(
threads, num_threads * sizeof(thread_record_t *) HEAPACCT(ACCT_THREAD_MGT));
}
if (is_last_app_thread() && !dynamo_exited) {
LOG(THREAD, LOG_TOP | LOG_SYSCALLS, 1,
"SYS_exit%s(%d) in final thread " TIDFMT " of " PIDFMT
" => exiting DynamoRIO\n",
(dcontext->sys_num == SYSNUM_EXIT_PROCESS) ? "_group" : "",
MCXT_SYSNUM_REG(mc), d_r_get_thread_id(), get_process_id());
/* we want to clean up even if not automatic startup! */
automatic_startup = true;
exit_process = true;
} else {
LOG(THREAD, LOG_TOP | LOG_THREADS | LOG_SYSCALLS, 1,
"SYS_exit%s(%d) in thread " TIDFMT " of " PIDFMT " => cleaning up %s\n",
(dcontext->sys_num == SYSNUM_EXIT_PROCESS) ? "_group" : "",
MCXT_SYSNUM_REG(mc), d_r_get_thread_id(), get_process_id(),
exit_process ? "process" : "thread");
}
KSTOP(num_exits_dir_syscall);
block_cleanup_and_terminate(dcontext, MCXT_SYSNUM_REG(mc), sys_param(dcontext, 0),
sys_param(dcontext, 1), exit_process,
/* SYS_bsdthread_terminate has 2 more args */
sys_param(dcontext, 2), sys_param(dcontext, 3));
}
#if defined(LINUX) && defined(X86) /* XXX i#58: just until we have Mac support \
*/
static bool
os_set_app_thread_area(dcontext_t *dcontext, our_modify_ldt_t *user_desc)
{
# ifdef X86
int i;
os_thread_data_t *ostd = dcontext->os_field;
our_modify_ldt_t *desc = (our_modify_ldt_t *)ostd->app_thread_areas;
if (user_desc->seg_not_present == 1) {
/* find an empty one to update */
for (i = 0; i < GDT_NUM_TLS_SLOTS; i++) {
if (desc[i].seg_not_present == 1)
break;
}
if (i < GDT_NUM_TLS_SLOTS) {
user_desc->entry_number = GDT_SELECTOR(i + tls_min_index());
memcpy(&desc[i], user_desc, sizeof(*user_desc));
} else
return false;
} else {
/* If we used early injection, this might be ld.so trying to set up TLS. We
* direct the app to use the GDT entry we already set up for our private
* libraries, but only the first time it requests TLS.
*/
if (user_desc->entry_number == -1 && return_stolen_lib_tls_gdt) {
d_r_mutex_lock(&set_thread_area_lock);
if (return_stolen_lib_tls_gdt) {
uint selector = read_thread_register(LIB_SEG_TLS);
uint index = SELECTOR_INDEX(selector);
SELF_UNPROTECT_DATASEC(DATASEC_RARELY_PROT);
return_stolen_lib_tls_gdt = false;
SELF_PROTECT_DATASEC(DATASEC_RARELY_PROT);
user_desc->entry_number = index;
LOG(GLOBAL, LOG_THREADS, 2,
"%s: directing app to use "
"selector 0x%x for first call to set_thread_area\n",
__FUNCTION__, selector);
}
d_r_mutex_unlock(&set_thread_area_lock);
}
/* update the specific one */
i = user_desc->entry_number - tls_min_index();
if (i < 0 || i >= GDT_NUM_TLS_SLOTS)
return false;
LOG(GLOBAL, LOG_THREADS, 2,
"%s: change selector 0x%x base from " PFX " to " PFX "\n", __FUNCTION__,
GDT_SELECTOR(user_desc->entry_number), desc[i].base_addr,
user_desc->base_addr);
memcpy(&desc[i], user_desc, sizeof(*user_desc));
}
/* if not conflict with dr's tls, perform the syscall */
if (IF_CLIENT_INTERFACE_ELSE(!INTERNAL_OPTION(private_loader), true) &&
GDT_SELECTOR(user_desc->entry_number) != read_thread_register(SEG_TLS) &&
GDT_SELECTOR(user_desc->entry_number) != read_thread_register(LIB_SEG_TLS))
return false;
# elif defined(ARM)
/* FIXME i#1551: NYI on ARM */
ASSERT_NOT_IMPLEMENTED(false);
# endif /* X86/ARM */
return true;
}
static bool
os_get_app_thread_area(dcontext_t *dcontext, our_modify_ldt_t *user_desc)
{
# ifdef X86
os_thread_data_t *ostd = (os_thread_data_t *)dcontext->os_field;
our_modify_ldt_t *desc = (our_modify_ldt_t *)ostd->app_thread_areas;
int i = user_desc->entry_number - tls_min_index();
if (i < 0 || i >= GDT_NUM_TLS_SLOTS)
return false;
if (desc[i].seg_not_present == 1)
return false;
# elif defined(ARM)
/* FIXME i#1551: NYI on ARM */
ASSERT_NOT_IMPLEMENTED(false);
# endif /* X86/ARM */
return true;
}
#endif
/* This function is used for switch lib tls segment on creating thread.
* We switch to app's lib tls seg before thread creation system call, i.e.
* clone and vfork, and switch back to dr's lib tls seg after the system call.
* They are only called on parent thread, not the child thread.
* The child thread's tls is setup in os_tls_app_seg_init.
*/
/* XXX: It looks like the Linux kernel has some dependency on the segment
* descriptor. If using dr's segment descriptor, the created thread will have
* access violation for tls not being setup. However, it works fine if we switch
* the descriptor to app's segment descriptor before creating the thread.
* We should be able to remove this function later if we find the problem.
*/
static bool
os_switch_lib_tls(dcontext_t *dcontext, bool to_app)
{
return os_switch_seg_to_context(dcontext, LIB_SEG_TLS, to_app);
}
#ifdef X86
/* dcontext can be NULL if !to_app */
static bool
os_switch_seg_to_base(dcontext_t *dcontext, os_local_state_t *os_tls, reg_id_t seg,
bool to_app, app_pc base)
{
bool res = false;
ASSERT(dcontext != NULL);
ASSERT(IF_X86_ELSE((seg == SEG_FS || seg == SEG_GS),
(seg == DR_REG_TPIDRURW || DR_REG_TPIDRURO)));
switch (os_tls->tls_type) {
# if defined(X64) && !defined(MACOS)
case TLS_TYPE_ARCH_PRCTL: {
res = tls_set_fs_gs_segment_base(os_tls->tls_type, seg, base, NULL);
ASSERT(res);
LOG(GLOBAL, LOG_THREADS, 2,
"%s %s: arch_prctl successful for thread " TIDFMT " base " PFX "\n",
__FUNCTION__, to_app ? "to app" : "to DR", d_r_get_thread_id(), base);
if (seg == SEG_TLS && base == NULL) {
/* Set the selector to 0 so we don't think TLS is available. */
/* FIXME i#107: Still assumes app isn't using SEG_TLS. */
reg_t zero = 0;
WRITE_DR_SEG(zero);
}
break;
}
# endif
case TLS_TYPE_GDT: {
our_modify_ldt_t desc;
uint index;
uint selector;
if (to_app) {
selector = os_tls->app_lib_tls_reg;
index = SELECTOR_INDEX(selector);
} else {
index = (seg == LIB_SEG_TLS ? tls_priv_lib_index() : tls_dr_index());
ASSERT(index != -1 && "TLS indices not initialized");
selector = GDT_SELECTOR(index);
}
if (selector != 0) {
if (to_app) {
our_modify_ldt_t *areas =
((os_thread_data_t *)dcontext->os_field)->app_thread_areas;
ASSERT((index >= tls_min_index()) &&
((index - tls_min_index()) <= GDT_NUM_TLS_SLOTS));
desc = areas[index - tls_min_index()];
} else {
tls_init_descriptor(&desc, base, GDT_NO_SIZE_LIMIT, index);
}
res = tls_set_fs_gs_segment_base(os_tls->tls_type, seg, NULL, &desc);
ASSERT(res);
} else {
/* For a selector of zero, we just reset the segment to zero. We
* don't need to call set_thread_area.
*/
res = true; /* Indicate success. */
}
/* XXX i#2098: it's unsafe to call LOG here in between GDT and register changes */
/* i558 update lib seg reg to enforce the segment changes */
if (seg == SEG_TLS)
WRITE_DR_SEG(selector);
else
WRITE_LIB_SEG(selector);
LOG(THREAD, LOG_LOADER, 2, "%s: switching to %s, setting %s to 0x%x\n",
__FUNCTION__, (to_app ? "app" : "dr"), reg_names[seg], selector);
LOG(THREAD, LOG_LOADER, 2,
"%s %s: set_thread_area successful for thread " TIDFMT " base " PFX "\n",
__FUNCTION__, to_app ? "to app" : "to DR", d_r_get_thread_id(), base);
break;
}
case TLS_TYPE_LDT: {
uint index;
uint selector;
if (to_app) {
selector = os_tls->app_lib_tls_reg;
index = SELECTOR_INDEX(selector);
} else {
index = (seg == LIB_SEG_TLS ? tls_priv_lib_index() : tls_dr_index());
ASSERT(index != -1 && "TLS indices not initialized");
selector = LDT_SELECTOR(index);
}
LOG(THREAD, LOG_LOADER, 2, "%s: switching to %s, setting %s to 0x%x\n",
__FUNCTION__, (to_app ? "app" : "dr"), reg_names[seg], selector);
if (seg == SEG_TLS)
WRITE_DR_SEG(selector);
else
WRITE_LIB_SEG(selector);
LOG(THREAD, LOG_LOADER, 2,
"%s %s: ldt selector swap successful for thread " TIDFMT "\n", __FUNCTION__,
to_app ? "to app" : "to DR", d_r_get_thread_id());
break;
}
default: ASSERT_NOT_REACHED(); return false;
}
ASSERT((!to_app && seg == SEG_TLS) ||
BOOLS_MATCH(to_app, os_using_app_state(dcontext)));
return res;
}
static bool
os_set_dr_tls_base(dcontext_t *dcontext, os_local_state_t *tls, byte *base)
{
if (tls == NULL) {
ASSERT(dcontext != NULL);
tls = get_os_tls_from_dc(dcontext);
}
return os_switch_seg_to_base(dcontext, tls, SEG_TLS, false, base);
}
#endif /* X86 */
static bool
os_switch_seg_to_context(dcontext_t *dcontext, reg_id_t seg, bool to_app)
{
os_local_state_t *os_tls = get_os_tls_from_dc(dcontext);
#ifdef X86
app_pc base;
/* we can only update the executing thread's segment (i#920) */
ASSERT_MESSAGE(CHKLVL_ASSERTS + 1 /*expensive*/, "can only act on executing thread",
/* i#2089: a clone syscall, or when native, temporarily puts in
* invalid TLS, so we don't check get_thread_private_dcontext().
*/
is_thread_tls_allocated() &&
dcontext->owning_thread == get_sys_thread_id());
if (to_app) {
base = os_get_app_tls_base(dcontext, seg);
} else {
base = os_get_priv_tls_base(dcontext, seg);
}
return os_switch_seg_to_base(dcontext, os_tls, seg, to_app, base);
#elif defined(AARCHXX)
bool res = false;
os_thread_data_t *ostd = (os_thread_data_t *)dcontext->os_field;
ASSERT(INTERNAL_OPTION(private_loader));
if (to_app) {
/* On switching to app's TLS, we need put DR's TLS base into app's TLS
* at the same offset so it can be loaded on entering code cache.
* Otherwise, the context switch code on entering fcache will fault on
* accessing DR's TLS.
* The app's TLS slot value is stored into privlib's TLS slot for
* later restore on switching back to privlib's TLS.
*/
byte **priv_lib_tls_swap_slot =
(byte **)(ostd->priv_lib_tls_base + DR_TLS_BASE_OFFSET);
byte **app_lib_tls_swap_slot =
(byte **)(os_tls->app_lib_tls_base + DR_TLS_BASE_OFFSET);
LOG(THREAD, LOG_LOADER, 3,
"%s: switching to app: app slot=&" PFX " *" PFX ", priv slot=&" PFX " *" PFX
"\n",
__FUNCTION__, app_lib_tls_swap_slot, *app_lib_tls_swap_slot,
priv_lib_tls_swap_slot, *priv_lib_tls_swap_slot);
byte *dr_tls_base = *priv_lib_tls_swap_slot;
*priv_lib_tls_swap_slot = *app_lib_tls_swap_slot;
*app_lib_tls_swap_slot = dr_tls_base;
LOG(THREAD, LOG_LOADER, 2, "%s: switching to %s, setting coproc reg to 0x%x\n",
__FUNCTION__, (to_app ? "app" : "dr"), os_tls->app_lib_tls_base);
res = write_thread_register(os_tls->app_lib_tls_base);
} else {
/* Restore the app's TLS slot that we used for storing DR's TLS base,
* and put DR's TLS base back to privlib's TLS slot.
*/
byte **priv_lib_tls_swap_slot =
(byte **)(ostd->priv_lib_tls_base + DR_TLS_BASE_OFFSET);
byte **app_lib_tls_swap_slot =
(byte **)(os_tls->app_lib_tls_base + DR_TLS_BASE_OFFSET);
byte *dr_tls_base = *app_lib_tls_swap_slot;
LOG(THREAD, LOG_LOADER, 3,
"%s: switching to DR: app slot=&" PFX " *" PFX ", priv slot=&" PFX " *" PFX
"\n",
__FUNCTION__, app_lib_tls_swap_slot, *app_lib_tls_swap_slot,
priv_lib_tls_swap_slot, *priv_lib_tls_swap_slot);
*app_lib_tls_swap_slot = *priv_lib_tls_swap_slot;
*priv_lib_tls_swap_slot = dr_tls_base;
LOG(THREAD, LOG_LOADER, 2, "%s: switching to %s, setting coproc reg to 0x%x\n",
__FUNCTION__, (to_app ? "app" : "dr"), ostd->priv_lib_tls_base);
res = write_thread_register(ostd->priv_lib_tls_base);
}
LOG(THREAD, LOG_LOADER, 2, "%s %s: set_tls swap success=%d for thread " TIDFMT "\n",
__FUNCTION__, to_app ? "to app" : "to DR", res, d_r_get_thread_id());
return res;
#elif defined(AARCH64)
(void)os_tls;
ASSERT_NOT_IMPLEMENTED(false); /* FIXME i#1569 */
return false;
#endif /* X86/ARM/AARCH64 */
}
/* System call interception: put any special handling here
* Arguments come from the pusha right before the call
*/
/* WARNING: flush_fragments_and_remove_region assumes that pre and post system
* call handlers do not examine or modify fcache or its fragments in any
* way except for calling flush_fragments_and_remove_region!
*/
/* WARNING: All registers are IN values, but NOT OUT values --
* must set mcontext's register for that.
*/
/* Returns false if system call should NOT be executed (in which case,
* post_system_call() will *not* be called!).
* Returns true if system call should go ahead
*/
/* XXX: split out specific handlers into separate routines
*/
bool
pre_system_call(dcontext_t *dcontext)
{
priv_mcontext_t *mc = get_mcontext(dcontext);
bool execute_syscall = true;
dr_where_am_i_t old_whereami = dcontext->whereami;
dcontext->whereami = DR_WHERE_SYSCALL_HANDLER;
/* FIXME We haven't yet done the work to detect which syscalls we
* can determine a priori will fail. Once we do, we will set the
* expect_last_syscall_to_fail to true for those case, and can
* confirm in post_system_call() that the syscall failed as
* expected.
*/
DODEBUG(dcontext->expect_last_syscall_to_fail = false;);
/* save key register values for post_system_call (they get clobbered
* in syscall itself)
*/
dcontext->sys_num = os_normalized_sysnum((int)MCXT_SYSNUM_REG(mc), NULL, dcontext);
RSTATS_INC(pre_syscall);
DOSTATS({
if (ignorable_system_call_normalized(dcontext->sys_num))
STATS_INC(pre_syscall_ignorable);
});
LOG(THREAD, LOG_SYSCALLS, 2, "system call %d\n", dcontext->sys_num);
#if defined(LINUX) && defined(X86)
/* PR 313715: If we fail to hook the vsyscall page (xref PR 212570, PR 288330)
* we fall back on int, but we have to tweak syscall param #5 (ebp)
* Once we have PR 288330 we can remove this.
*/
if (should_syscall_method_be_sysenter() && !dcontext->sys_was_int) {
dcontext->sys_xbp = mc->xbp;
/* not using SAFE_READ due to performance concerns (we do this for
* every single system call on systems where we can't hook vsyscall!)
*/
TRY_EXCEPT(dcontext, /* try */ { mc->xbp = *(reg_t *)mc->xsp; }, /* except */
{
ASSERT_NOT_REACHED();
mc->xbp = 0;
});
}
#endif
switch (dcontext->sys_num) {
case SYSNUM_EXIT_PROCESS:
#if defined(LINUX) && VMX86_SERVER
if (os_in_vmkernel_32bit()) {
/* on esx 3.5 => ENOSYS, so wait for SYS_exit */
LOG(THREAD, LOG_SYSCALLS, 2, "on esx35 => ignoring exitgroup\n");
DODEBUG({ dcontext->expect_last_syscall_to_fail = true; });
break;
}
#endif
/* fall-through */
case SYSNUM_EXIT_THREAD: {
handle_exit(dcontext);
break;
}
/****************************************************************************/
/* MEMORY REGIONS */
#if defined(LINUX) && !defined(X64) && !defined(ARM)
case SYS_mmap: {
/* in /usr/src/linux/arch/i386/kernel/sys_i386.c:
asmlinkage int old_mmap(struct mmap_arg_struct_t *arg)
*/
mmap_arg_struct_t *arg = (mmap_arg_struct_t *)sys_param(dcontext, 0);
mmap_arg_struct_t arg_buf;
if (d_r_safe_read(arg, sizeof(mmap_arg_struct_t), &arg_buf)) {
void *addr = (void *)arg->addr;
size_t len = (size_t)arg->len;
uint prot = (uint)arg->prot;
LOG(THREAD, LOG_SYSCALLS, 2,
"syscall: mmap addr=" PFX " size=" PIFX " prot=0x%x"
" flags=" PIFX " offset=" PIFX " fd=%d\n",
addr, len, prot, arg->flags, arg->offset, arg->fd);
/* Check for overlap with existing code or patch-proof regions */
if (addr != NULL &&
!app_memory_pre_alloc(dcontext, addr, len, osprot_to_memprot(prot),
!TEST(MAP_FIXED, arg->flags),
false /*we'll update in post*/,
false /*unknown*/)) {
/* Rather than failing or skipping the syscall we'd like to just
* remove the hint -- but we don't want to write to app memory, so
* we do fail. We could set up our own mmap_arg_struct_t but
* we'd need dedicate per-thread storage, and SYS_mmap is obsolete.
*/
execute_syscall = false;
set_failure_return_val(dcontext, ENOMEM);
DODEBUG({ dcontext->expect_last_syscall_to_fail = true; });
break;
}
}
/* post_system_call does the work */
dcontext->sys_param0 = (reg_t)arg;
break;
}
#endif
case IF_MACOS_ELSE(SYS_mmap, IF_X64_ELSE(SYS_mmap, SYS_mmap2)): {
/* in /usr/src/linux/arch/i386/kernel/sys_i386.c:
asmlinkage long sys_mmap2(unsigned long addr, unsigned long len,
unsigned long prot, unsigned long flags,
unsigned long fd, unsigned long pgoff)
*/
void *addr = (void *)sys_param(dcontext, 0);
size_t len = (size_t)sys_param(dcontext, 1);
uint prot = (uint)sys_param(dcontext, 2);
uint flags = (uint)sys_param(dcontext, 3);
LOG(THREAD, LOG_SYSCALLS, 2,
"syscall: mmap2 addr=" PFX " size=" PIFX " prot=0x%x"
" flags=" PIFX " offset=" PIFX " fd=%d\n",
addr, len, prot, flags, sys_param(dcontext, 5), sys_param(dcontext, 4));
/* Check for overlap with existing code or patch-proof regions */
/* Try to see whether it's an image, though we can't tell for addr==NULL
* (typical for 1st mmap).
*/
bool image = addr != NULL && !TEST(MAP_ANONYMOUS, flags) &&
mmap_check_for_module_overlap(addr, len, TEST(PROT_READ, prot), 0, true);
if (addr != NULL &&
!app_memory_pre_alloc(dcontext, addr, len, osprot_to_memprot(prot),
!TEST(MAP_FIXED, flags), false /*we'll update in post*/,
image /*best estimate*/)) {
if (!TEST(MAP_FIXED, flags)) {
/* Rather than failing or skipping the syscall we just remove
* the hint which should eliminate any overlap.
*/
*sys_param_addr(dcontext, 0) = 0;
} else {
execute_syscall = false;
set_failure_return_val(dcontext, ENOMEM);
DODEBUG({ dcontext->expect_last_syscall_to_fail = true; });
break;
}
}
/* post_system_call does the work */
dcontext->sys_param0 = (reg_t)addr;
dcontext->sys_param1 = len;
dcontext->sys_param2 = prot;
dcontext->sys_param3 = flags;
break;
}
/* must flush stale fragments when we see munmap/mremap */
case SYS_munmap: {
/* in /usr/src/linux/mm/mmap.c:
asmlinkage long sys_munmap(unsigned long addr, uint len)
*/
app_pc addr = (void *)sys_param(dcontext, 0);
size_t len = (size_t)sys_param(dcontext, 1);
LOG(THREAD, LOG_SYSCALLS, 2, "syscall: munmap addr=" PFX " size=" PFX "\n", addr,
len);
RSTATS_INC(num_app_munmaps);
/* FIXME addr is supposed to be on a page boundary so we
* could detect that condition here and set
* expect_last_syscall_to_fail.
*/
/* save params in case an undo is needed in post_system_call */
dcontext->sys_param0 = (reg_t)addr;
dcontext->sys_param1 = len;
/* We assume that the unmap will succeed and so are conservative
* and remove the region from exec areas and flush all fragments
* prior to issuing the syscall. If the unmap fails, we try to
* recover in post_system_call() by re-adding the region. This
* approach has its shortcomings -- see comments below in
* post_system_call().
*/
/* Check for unmapping a module. */
os_get_module_info_lock();
if (module_overlaps(addr, len)) {
/* FIXME - handle unmapping more than one module at once, or only unmapping
* part of a module (for which case should adjust view size? or treat as full
* unmap?). Theoretical for now as we haven't seen this. */
module_area_t *ma = module_pc_lookup(addr);
ASSERT_CURIOSITY(ma != NULL);
ASSERT_CURIOSITY(addr == ma->start);
/* XREF 307599 on rounding module end to the next PAGE boundary */
ASSERT_CURIOSITY((app_pc)ALIGN_FORWARD(addr + len, PAGE_SIZE) == ma->end);
os_get_module_info_unlock();
/* i#210:
* we only think a module is removed if its first memory region
* is unloaded (unmapped).
* XREF i#160 to fix the real problem of handling module splitting.
*/
if (ma != NULL && ma->start == addr)
module_list_remove(addr, ALIGN_FORWARD(len, PAGE_SIZE));
} else
os_get_module_info_unlock();
app_memory_deallocation(dcontext, (app_pc)addr, len,
false /* don't own thread_initexit_lock */,
true /* image, FIXME: though not necessarily */);
/* FIXME: case 4983 use is_elf_so_header() */
#ifndef HAVE_MEMINFO_QUERY
memcache_lock();
memcache_remove(addr, addr + len);
memcache_unlock();
#endif
break;
}
#ifdef LINUX
case SYS_mremap: {
/* in /usr/src/linux/mm/mmap.c:
asmlinkage unsigned long sys_mremap(unsigned long addr,
unsigned long old_len, unsigned long new_len,
unsigned long flags, unsigned long new_addr)
*/
dr_mem_info_t info;
app_pc addr = (void *)sys_param(dcontext, 0);
size_t old_len = (size_t)sys_param(dcontext, 1);
size_t new_len = (size_t)sys_param(dcontext, 2);
DEBUG_DECLARE(bool ok;)
LOG(THREAD, LOG_SYSCALLS, 2, "syscall: mremap addr=" PFX " size=" PFX "\n", addr,
old_len);
/* post_system_call does the work */
dcontext->sys_param0 = (reg_t)addr;
dcontext->sys_param1 = old_len;
dcontext->sys_param2 = new_len;
/* i#173
* we need memory type and prot to set the
* new memory region in the post_system_call
*/
DEBUG_DECLARE(ok =)
query_memory_ex(addr, &info);
ASSERT(ok);
dcontext->sys_param3 = info.prot;
dcontext->sys_param4 = info.type;
DOCHECK(1, {
/* we don't expect to see remappings of modules */
os_get_module_info_lock();
ASSERT_CURIOSITY(!module_overlaps(addr, old_len));
os_get_module_info_unlock();
});
break;
}
#endif
case SYS_mprotect: {
/* in /usr/src/linux/mm/mprotect.c:
asmlinkage long sys_mprotect(unsigned long start, uint len,
unsigned long prot)
*/
uint res;
DEBUG_DECLARE(size_t size;)
app_pc addr = (void *)sys_param(dcontext, 0);
size_t len = (size_t)sys_param(dcontext, 1);
uint prot = (uint)sys_param(dcontext, 2);
uint old_memprot = MEMPROT_NONE, new_memprot;
bool exists = true;
/* save params in case an undo is needed in post_system_call */
dcontext->sys_param0 = (reg_t)addr;
dcontext->sys_param1 = len;
dcontext->sys_param2 = prot;
LOG(THREAD, LOG_SYSCALLS, 2,
"syscall: mprotect addr=" PFX " size=" PFX " prot=%s\n", addr, len,
memprot_string(osprot_to_memprot(prot)));
if (!get_memory_info(addr, NULL, IF_DEBUG_ELSE(&size, NULL), &old_memprot)) {
exists = false;
/* Xref PR 413109, PR 410921: if the start, or any page, is not mapped,
* this should fail with ENOMEM. We used to force-fail it to avoid
* asserts in our own allmem update code, but there are cases where a
* seemingly unmapped page succeeds (i#1912: next page of grows-down
* initial stack). Thus we let it go through.
*/
LOG(THREAD, LOG_SYSCALLS, 2,
"\t" PFX " isn't mapped: probably mprotect will fail\n", addr);
} else {
/* If mprotect region spans beyond the end of the vmarea then it
* spans 2 or more vmareas with dissimilar protection (xref
* PR 410921) or has unallocated regions in between (PR 413109).
*/
DOCHECK(1, dcontext->mprot_multi_areas = len > size ? true : false;);
}
new_memprot = osprot_to_memprot(prot) |
/* mprotect won't change meta flags */
(old_memprot & MEMPROT_META_FLAGS);
res = app_memory_protection_change(dcontext, addr, len, new_memprot, &new_memprot,
NULL, false /*!image*/);
if (res != DO_APP_MEM_PROT_CHANGE) {
if (res == FAIL_APP_MEM_PROT_CHANGE) {
ASSERT_NOT_IMPLEMENTED(false); /* return code? */
} else {
ASSERT_NOT_IMPLEMENTED(res != SUBSET_APP_MEM_PROT_CHANGE);
ASSERT_NOT_REACHED();
}
execute_syscall = false;
} else {
/* FIXME Store state for undo if the syscall fails. */
IF_NO_MEMQUERY(memcache_update_locked(addr, addr + len, new_memprot,
-1 /*type unchanged*/, exists));
}
break;
}
#ifdef ANDROID
case SYS_prctl:
dcontext->sys_param0 = sys_param(dcontext, 0);
dcontext->sys_param1 = sys_param(dcontext, 1);
dcontext->sys_param2 = sys_param(dcontext, 2);
dcontext->sys_param3 = sys_param(dcontext, 3);
dcontext->sys_param4 = sys_param(dcontext, 4);
break;
#endif
#ifdef LINUX
case SYS_brk: {
if (DYNAMO_OPTION(emulate_brk)) {
/* i#1004: emulate brk via a separate mmap */
byte *new_val = (byte *)sys_param(dcontext, 0);
byte *res = emulate_app_brk(dcontext, new_val);
execute_syscall = false;
/* SYS_brk returns old brk on failure */
set_success_return_val(dcontext, (reg_t)res);
} else {
/* i#91/PR 396352: need to watch SYS_brk to maintain all_memory_areas.
* We store the old break in the param1 slot.
*/
DODEBUG(dcontext->sys_param0 = (reg_t)sys_param(dcontext, 0););
dcontext->sys_param1 = dynamorio_syscall(SYS_brk, 1, 0);
}
break;
}
# ifdef SYS_uselib
case SYS_uselib: {
/* Used to get the kernel to load a share library (legacy system call).
* Was primarily used when statically linking to dynamically loaded shared
* libraries that were loaded at known locations. Shouldn't be used by
* applications using the dynamic loader (ld) which is currently the only
* way we can inject so we don't expect to see this. PR 307621. */
ASSERT_NOT_IMPLEMENTED(false);
break;
}
# endif
#endif
/****************************************************************************/
/* SPAWNING */
#ifdef LINUX
case SYS_clone: {
/* in /usr/src/linux/arch/i386/kernel/process.c
* 32-bit params: flags, newsp, ptid, tls, ctid
* 64-bit params: should be the same yet tls (for ARCH_SET_FS) is in r8?!?
* I don't see how sys_clone gets its special args: shouldn't it
* just get pt_regs as a "special system call"?
* sys_clone(unsigned long clone_flags, unsigned long newsp,
* void __user *parent_tid, void __user *child_tid, struct pt_regs *regs)
*/
uint flags = (uint)sys_param(dcontext, 0);
LOG(THREAD, LOG_SYSCALLS, 2, "syscall: clone with flags = " PFX "\n", flags);
LOG(THREAD, LOG_SYSCALLS, 2,
"args: " PFX ", " PFX ", " PFX ", " PFX ", " PFX "\n", sys_param(dcontext, 0),
sys_param(dcontext, 1), sys_param(dcontext, 2), sys_param(dcontext, 3),
sys_param(dcontext, 4));
handle_clone(dcontext, flags);
if ((flags & CLONE_VM) == 0) {
LOG(THREAD, LOG_SYSCALLS, 1, "\tWARNING: CLONE_VM not set!\n");
}
/* save for post_system_call */
dcontext->sys_param0 = (reg_t)flags;
/* i#1010: If we have private fds open (usually logfiles), we should
* clean those up before they get reused by a new thread.
* XXX: Ideally we'd do this in fd_table_add(), but we can't acquire
* thread_initexit_lock there.
*/
cleanup_after_vfork_execve(dcontext);
/* For thread creation clone syscalls a clone_record_t structure
* containing the pc after the app's syscall instr and other data
* (see i#27) is placed at the bottom of the dstack (which is allocated
* by create_clone_record() - it also saves app stack and switches
* to dstack). xref i#149/PR 403015.
* Note: This must be done after sys_param0 is set.
*/
if (is_thread_create_syscall(dcontext)) {
create_clone_record(dcontext,
sys_param_addr(dcontext, SYSCALL_PARAM_CLONE_STACK));
os_clone_pre(dcontext);
os_new_thread_pre();
} else /* This is really a fork. */
os_fork_pre(dcontext);
break;
}
#elif defined(MACOS)
case SYS_bsdthread_create: {
/* XXX i#1403: we need earlier injection to intercept
* bsdthread_register in order to capture workqueue threads.
* For now we settle for intercepting bsd threads at the user thread func.
* We miss a little user-mode code but this is enough to get started.
*/
app_pc func = (app_pc)sys_param(dcontext, 0);
void *func_arg = (void *)sys_param(dcontext, 1);
void *clone_rec;
LOG(THREAD, LOG_SYSCALLS, 1,
"bsdthread_create: thread func " PFX ", arg " PFX "\n", func, func_arg);
handle_clone(dcontext, CLONE_THREAD | CLONE_VM | CLONE_SIGHAND | SIGCHLD);
clone_rec = create_clone_record(dcontext, NULL, func, func_arg);
dcontext->sys_param0 = (reg_t)func;
dcontext->sys_param1 = (reg_t)func_arg;
*sys_param_addr(dcontext, 0) = (reg_t)new_bsdthread_intercept;
*sys_param_addr(dcontext, 1) = (reg_t)clone_rec;
os_new_thread_pre();
break;
}
case SYS_posix_spawn: {
/* FIXME i#1644: monitor this call which can be fork or exec */
ASSERT_NOT_IMPLEMENTED(false);
break;
}
#endif
#ifdef SYS_vfork
case SYS_vfork: {
/* treat as if sys_clone with flags just as sys_vfork does */
/* in /usr/src/linux/arch/i386/kernel/process.c */
uint flags = CLONE_VFORK | CLONE_VM | SIGCHLD;
LOG(THREAD, LOG_SYSCALLS, 2, "syscall: vfork\n");
handle_clone(dcontext, flags);
cleanup_after_vfork_execve(dcontext);
/* save for post_system_call, treated as if SYS_clone */
dcontext->sys_param0 = (reg_t)flags;
/* vfork has the same needs as clone. Pass info via a clone_record_t
* structure to child. See SYS_clone for info about i#149/PR 403015.
*/
IF_LINUX(ASSERT(is_thread_create_syscall(dcontext)));
dcontext->sys_param1 = mc->xsp; /* for restoring in parent */
# ifdef MACOS
create_clone_record(dcontext, (reg_t *)&mc->xsp, NULL, NULL);
# else
create_clone_record(dcontext, (reg_t *)&mc->xsp /*child uses parent sp*/);
# endif
os_clone_pre(dcontext);
os_new_thread_pre();
break;
}
#endif
#ifdef SYS_fork
case SYS_fork: {
LOG(THREAD, LOG_SYSCALLS, 2, "syscall: fork\n");
os_fork_pre(dcontext);
break;
}
#endif
case SYS_execve: {
int ret = handle_execve(dcontext);
if (ret != 0) {
execute_syscall = false;
set_failure_return_val(dcontext, ret);
}
break;
}
/****************************************************************************/
/* SIGNALS */
case IF_MACOS_ELSE(SYS_sigaction, SYS_rt_sigaction): { /* 174 */
/* in /usr/src/linux/kernel/signal.c:
asmlinkage long
sys_rt_sigaction(int sig, const struct sigaction *act,
struct sigaction *oact, size_t sigsetsize)
*/
int sig = (int)sys_param(dcontext, 0);
const kernel_sigaction_t *act =
(const kernel_sigaction_t *)sys_param(dcontext, 1);
prev_sigaction_t *oact = (prev_sigaction_t *)sys_param(dcontext, 2);
size_t sigsetsize = (size_t)
/* On Mac there is no size arg (but it doesn't use old sigaction, so
* closer to rt_ than non-rt_ below).
*/
IF_MACOS_ELSE(sizeof(kernel_sigset_t), sys_param(dcontext, 3));
uint res;
LOG(THREAD, LOG_SYSCALLS, 2, "syscall: %ssigaction %d " PFX " " PFX " %d\n",
IF_MACOS_ELSE("", "rt_"), sig, act, oact, sigsetsize);
/* post_syscall does some work as well */
dcontext->sys_param0 = (reg_t)sig;
dcontext->sys_param1 = (reg_t)act;
dcontext->sys_param2 = (reg_t)oact;
dcontext->sys_param3 = (reg_t)sigsetsize;
execute_syscall = handle_sigaction(dcontext, sig, act, oact, sigsetsize, &res);
if (!execute_syscall) {
LOG(THREAD, LOG_SYSCALLS, 2, "sigaction emulation => %d\n", -res);
if (res == 0)
set_success_return_val(dcontext, 0);
else
set_failure_return_val(dcontext, res);
}
break;
}
#if defined(LINUX) && !defined(X64)
case SYS_sigaction: { /* 67 */
/* sys_sigaction(int sig, const struct old_sigaction *act,
* struct old_sigaction *oact)
*/
int sig = (int)sys_param(dcontext, 0);
const old_sigaction_t *act = (const old_sigaction_t *)sys_param(dcontext, 1);
old_sigaction_t *oact = (old_sigaction_t *)sys_param(dcontext, 2);
uint res;
LOG(THREAD, LOG_SYSCALLS, 2, "syscall: sigaction %d " PFX " " PFX "\n", sig, act,
oact);
dcontext->sys_param0 = (reg_t)sig;
dcontext->sys_param1 = (reg_t)act;
dcontext->sys_param2 = (reg_t)oact;
execute_syscall = handle_old_sigaction(dcontext, sig, act, oact, &res);
if (!execute_syscall) {
LOG(THREAD, LOG_SYSCALLS, 2, "sigaction emulation => %d\n", -res);
if (res == 0)
set_success_return_val(dcontext, 0);
else
set_failure_return_val(dcontext, res);
}
break;
}
#endif
#if defined(LINUX) && !defined(X64)
case SYS_sigreturn: { /* 119 */
/* in /usr/src/linux/arch/i386/kernel/signal.c:
asmlinkage int sys_sigreturn(unsigned long __unused)
*/
execute_syscall = handle_sigreturn(dcontext, false);
/* app will not expect syscall to return, so when handle_sigreturn
* returns false it always redirects the context, and thus no
* need to set return val here.
*/
break;
}
#endif
#ifdef LINUX
case SYS_rt_sigreturn: { /* 173 */
/* in /usr/src/linux/arch/i386/kernel/signal.c:
asmlinkage int sys_rt_sigreturn(unsigned long __unused)
*/
execute_syscall = handle_sigreturn(dcontext, true);
/* see comment for SYS_sigreturn on return val */
break;
}
#endif
#ifdef MACOS
case SYS_sigreturn: {
/* int sigreturn(struct ucontext *uctx, int infostyle) */
execute_syscall = handle_sigreturn(dcontext, (void *)sys_param(dcontext, 0),
(int)sys_param(dcontext, 1));
/* see comment for SYS_sigreturn on return val */
break;
}
#endif
case SYS_sigaltstack: { /* 186 */
/* in /usr/src/linux/arch/i386/kernel/signal.c:
asmlinkage int
sys_sigaltstack(const stack_t *uss, stack_t *uoss)
*/
const stack_t *uss = (const stack_t *)sys_param(dcontext, 0);
stack_t *uoss = (stack_t *)sys_param(dcontext, 1);
uint res;
LOG(THREAD, LOG_SYSCALLS, 2, "syscall: sigaltstack " PFX " " PFX "\n", uss, uoss);
execute_syscall =
handle_sigaltstack(dcontext, uss, uoss, get_mcontext(dcontext)->xsp, &res);
if (!execute_syscall) {
LOG(THREAD, LOG_SYSCALLS, 2, "sigaltstack emulation => %d\n", -res);
if (res == 0)
set_success_return_val(dcontext, res);
else
set_failure_return_val(dcontext, res);
}
break;
}
case IF_MACOS_ELSE(SYS_sigprocmask, SYS_rt_sigprocmask): { /* 175 */
/* in /usr/src/linux/kernel/signal.c:
asmlinkage long
sys_rt_sigprocmask(int how, sigset_t *set, sigset_t *oset,
size_t sigsetsize)
*/
/* we also need access to the params in post_system_call */
dcontext->sys_param0 = sys_param(dcontext, 0);
dcontext->sys_param1 = sys_param(dcontext, 1);
dcontext->sys_param2 = sys_param(dcontext, 2);
dcontext->sys_param3 = sys_param(dcontext, 3);
execute_syscall = handle_sigprocmask(dcontext, (int)sys_param(dcontext, 0),
(kernel_sigset_t *)sys_param(dcontext, 1),
(kernel_sigset_t *)sys_param(dcontext, 2),
(size_t)sys_param(dcontext, 3));
if (!execute_syscall)
set_success_return_val(dcontext, 0);
break;
}
#ifdef MACOS
case SYS_sigsuspend_nocancel:
#endif
case IF_MACOS_ELSE(SYS_sigsuspend, SYS_rt_sigsuspend): { /* 179 */
/* in /usr/src/linux/kernel/signal.c:
asmlinkage int
sys_rt_sigsuspend(sigset_t *unewset, size_t sigsetsize)
*/
handle_sigsuspend(dcontext, (kernel_sigset_t *)sys_param(dcontext, 0),
(size_t)sys_param(dcontext, 1));
break;
}
#ifdef LINUX
# ifdef SYS_signalfd
case SYS_signalfd: /* 282/321 */
# endif
case SYS_signalfd4: { /* 289 */
/* int signalfd (int fd, const sigset_t *mask, size_t sizemask) */
/* int signalfd4(int fd, const sigset_t *mask, size_t sizemask, int flags) */
ptr_int_t new_result;
dcontext->sys_param0 = sys_param(dcontext, 0);
dcontext->sys_param1 = sys_param(dcontext, 1);
dcontext->sys_param2 = sys_param(dcontext, 2);
# ifdef SYS_signalfd
if (dcontext->sys_num == SYS_signalfd)
dcontext->sys_param3 = 0;
else
# endif
dcontext->sys_param3 = sys_param(dcontext, 3);
new_result = handle_pre_signalfd(
dcontext, (int)dcontext->sys_param0, (kernel_sigset_t *)dcontext->sys_param1,
(size_t)dcontext->sys_param2, (int)dcontext->sys_param3);
execute_syscall = false;
/* since non-Mac, we can use this even if the call failed */
set_success_return_val(dcontext, new_result);
break;
}
#endif
case SYS_kill: { /* 37 */
/* in /usr/src/linux/kernel/signal.c:
* asmlinkage long sys_kill(int pid, int sig)
*/
pid_t pid = (pid_t)sys_param(dcontext, 0);
uint sig = (uint)sys_param(dcontext, 1);
LOG(GLOBAL, LOG_TOP | LOG_SYSCALLS, 2,
"thread " TIDFMT " sending signal %d to pid " PIDFMT "\n",
d_r_get_thread_id(), sig, pid);
/* We check whether targeting this process or this process group */
if (pid == get_process_id() || pid == 0 || pid == -get_process_group_id()) {
handle_self_signal(dcontext, sig);
}
break;
}
#if defined(SYS_tkill)
case SYS_tkill: { /* 238 */
/* in /usr/src/linux/kernel/signal.c:
* asmlinkage long sys_tkill(int pid, int sig)
*/
pid_t tid = (pid_t)sys_param(dcontext, 0);
uint sig = (uint)sys_param(dcontext, 1);
LOG(GLOBAL, LOG_TOP | LOG_SYSCALLS, 2,
"thread " TIDFMT " sending signal %d to tid %d\n", d_r_get_thread_id(), sig,
tid);
if (tid == d_r_get_thread_id()) {
handle_self_signal(dcontext, sig);
}
break;
}
#endif
#if defined(SYS_tgkill)
case SYS_tgkill: { /* 270 */
/* in /usr/src/linux/kernel/signal.c:
* asmlinkage long sys_tgkill(int tgid, int pid, int sig)
*/
pid_t tgid = (pid_t)sys_param(dcontext, 0);
pid_t tid = (pid_t)sys_param(dcontext, 1);
uint sig = (uint)sys_param(dcontext, 2);
LOG(GLOBAL, LOG_TOP | LOG_SYSCALLS, 2,
"thread " TIDFMT " sending signal %d to tid %d tgid %d\n",
d_r_get_thread_id(), sig, tid, tgid);
/* some kernels support -1 values:
+ tgkill(-1, tid, sig) == tkill(tid, sig)
* tgkill(tgid, -1, sig) == kill(tgid, sig)
* the 2nd was proposed but is not in 2.6.20 so I'm ignoring it, since
* I don't want to kill the thread when the signal is never sent!
* FIXME: the 1st is in my tkill manpage, but not my 2.6.20 kernel sources!
*/
if ((tgid == -1 || tgid == get_process_id()) && tid == d_r_get_thread_id()) {
handle_self_signal(dcontext, sig);
}
break;
}
#endif
case SYS_setitimer: /* 104 */
dcontext->sys_param0 = sys_param(dcontext, 0);
dcontext->sys_param1 = sys_param(dcontext, 1);
dcontext->sys_param2 = sys_param(dcontext, 2);
handle_pre_setitimer(dcontext, (int)sys_param(dcontext, 0),
(const struct itimerval *)sys_param(dcontext, 1),
(struct itimerval *)sys_param(dcontext, 2));
break;
case SYS_getitimer: /* 105 */
dcontext->sys_param0 = sys_param(dcontext, 0);
dcontext->sys_param1 = sys_param(dcontext, 1);
break;
#if defined(LINUX) && defined(X86)
case SYS_alarm: /* 27 on x86 and 37 on x64 */
dcontext->sys_param0 = sys_param(dcontext, 0);
handle_pre_alarm(dcontext, (unsigned int)dcontext->sys_param0);
break;
#endif
#if 0
# ifndef X64
case SYS_signal: { /* 48 */
/* in /usr/src/linux/kernel/signal.c:
asmlinkage unsigned long
sys_signal(int sig, __sighandler_t handler)
*/
break;
}
case SYS_sigsuspend: { /* 72 */
/* in /usr/src/linux/arch/i386/kernel/signal.c:
asmlinkage int
sys_sigsuspend(int history0, int history1, old_sigset_t mask)
*/
break;
}
case SYS_sigprocmask: { /* 126 */
/* in /usr/src/linux/kernel/signal.c:
asmlinkage long
sys_sigprocmask(int how, old_sigset_t *set, old_sigset_t *oset)
*/
break;
}
# endif
#else
/* until we've implemented them, keep down here to get warning: */
# if defined(LINUX) && !defined(X64)
# ifndef ARM
case SYS_signal:
# endif
case SYS_sigsuspend:
case SYS_sigprocmask:
# endif
#endif
#if defined(LINUX) && !defined(X64)
case SYS_sigpending: /* 73 */
# ifndef ARM
case SYS_sgetmask: /* 68 */
case SYS_ssetmask: /* 69 */
# endif
#endif
#ifdef LINUX
case SYS_rt_sigtimedwait: /* 177 */
case SYS_rt_sigqueueinfo: /* 178 */
#endif
case IF_MACOS_ELSE(SYS_sigpending, SYS_rt_sigpending): { /* 176 */
/* FIXME i#92: handle all of these syscalls! */
LOG(THREAD, LOG_ASYNCH | LOG_SYSCALLS, 1,
"WARNING: unhandled signal system call %d\n", dcontext->sys_num);
SYSLOG_INTERNAL_WARNING_ONCE("unhandled signal system call %d",
dcontext->sys_num);
break;
}
#ifdef LINUX
case SYS_ppoll: {
kernel_sigset_t *sigmask = (kernel_sigset_t *)sys_param(dcontext, 3);
dcontext->sys_param3 = (reg_t)sigmask;
if (sigmask == NULL)
break;
size_t sizemask = (size_t)sys_param(dcontext, 4);
/* The original app's sigmask parameter is now NULL effectively making the syscall
* a non p* version, and the mask's semantics are emulated by DR instead.
*/
set_syscall_param(dcontext, 3, (reg_t)NULL);
bool sig_pending = false;
if (!handle_pre_extended_syscall_sigmasks(dcontext, sigmask, sizemask,
&sig_pending)) {
/* In old kernels with sizeof(kernel_sigset_t) != sizemask, we're forcing
* failure. We're already violating app transparency in other places in DR.
*/
set_failure_return_val(dcontext, EINVAL);
DODEBUG({ dcontext->expect_last_syscall_to_fail = true; });
execute_syscall = false;
}
if (sig_pending) {
/* If there had been pending signals, we revert re-writing the app's
* parameter, but we leave the modified signal mask.
*/
set_syscall_param(dcontext, 3, dcontext->sys_param3);
set_failure_return_val(dcontext, EINTR);
DODEBUG({ dcontext->expect_last_syscall_to_fail = true; });
execute_syscall = false;
}
break;
}
case SYS_pselect6: {
typedef struct {
kernel_sigset_t *sigmask;
size_t sizemask;
} data_t;
dcontext->sys_param3 = sys_param(dcontext, 5);
data_t *data_param = (data_t *)dcontext->sys_param3;
data_t data;
if (data_param == NULL) {
/* The kernel does not consider a NULL 6th+7th-args struct to be an error but
* just a NULL sigmask.
*/
dcontext->sys_param4 = (reg_t)NULL;
break;
}
/* Refer to comments in SYS_ppoll above. Taking extra steps here due to struct
* argument in pselect6.
*/
if (!d_r_safe_read(data_param, sizeof(data), &data)) {
LOG(THREAD, LOG_SYSCALLS, 2, "\treturning EFAULT to app for pselect6\n");
set_failure_return_val(dcontext, EFAULT);
DODEBUG({ dcontext->expect_last_syscall_to_fail = true; });
execute_syscall = false;
break;
}
dcontext->sys_param4 = (reg_t)data.sigmask;
if (data.sigmask == NULL)
break;
kernel_sigset_t *nullsigmaskptr = NULL;
if (!safe_write_ex((void *)&data_param->sigmask, sizeof(data_param->sigmask),
&nullsigmaskptr, NULL)) {
set_failure_return_val(dcontext, EFAULT);
DODEBUG({ dcontext->expect_last_syscall_to_fail = true; });
execute_syscall = false;
break;
}
bool sig_pending = false;
if (!handle_pre_extended_syscall_sigmasks(dcontext, data.sigmask, data.sizemask,
&sig_pending)) {
set_failure_return_val(dcontext, EINVAL);
DODEBUG({ dcontext->expect_last_syscall_to_fail = true; });
execute_syscall = false;
}
if (sig_pending) {
if (!safe_write_ex((void *)&data_param->sigmask, sizeof(data_param->sigmask),
&dcontext->sys_param4, NULL)) {
set_failure_return_val(dcontext, EFAULT);
DODEBUG({ dcontext->expect_last_syscall_to_fail = true; });
execute_syscall = false;
break;
}
set_failure_return_val(dcontext, EINTR);
DODEBUG({ dcontext->expect_last_syscall_to_fail = true; });
execute_syscall = false;
}
break;
}
case SYS_epoll_pwait: {
kernel_sigset_t *sigmask = (kernel_sigset_t *)sys_param(dcontext, 4);
dcontext->sys_param4 = (reg_t)sigmask;
if (sigmask == NULL)
break;
size_t sizemask = (size_t)sys_param(dcontext, 5);
/* Refer to comments in SYS_ppoll above. */
set_syscall_param(dcontext, 4, (reg_t)NULL);
bool sig_pending = false;
if (!handle_pre_extended_syscall_sigmasks(dcontext, sigmask, sizemask,
&sig_pending)) {
set_failure_return_val(dcontext, EINVAL);
DODEBUG({ dcontext->expect_last_syscall_to_fail = true; });
execute_syscall = false;
}
if (sig_pending) {
set_syscall_param(dcontext, 4, dcontext->sys_param4);
set_failure_return_val(dcontext, EINTR);
DODEBUG({ dcontext->expect_last_syscall_to_fail = true; });
execute_syscall = false;
}
break;
}
#endif
/****************************************************************************/
/* FILES */
/* prevent app from closing our files or opening a new file in our fd space.
* it's not worth monitoring all syscalls that take in fds from affecting ours.
*/
#ifdef MACOS
case SYS_close_nocancel:
#endif
case SYS_close: {
execute_syscall = handle_close_pre(dcontext);
#ifdef LINUX
if (execute_syscall)
signal_handle_close(dcontext, (file_t)sys_param(dcontext, 0));
#endif
break;
}
#ifdef SYS_dup2
case SYS_dup2:
IF_LINUX(case SYS_dup3:)
{
file_t newfd = (file_t)sys_param(dcontext, 1);
if (fd_is_dr_owned(newfd) || fd_is_in_private_range(newfd)) {
SYSLOG_INTERNAL_WARNING_ONCE("app trying to dup-close DR file(s)");
LOG(THREAD, LOG_TOP | LOG_SYSCALLS, 1,
"WARNING: app trying to dup2/dup3 to %d. Disallowing.\n", newfd);
if (DYNAMO_OPTION(fail_on_stolen_fds)) {
set_failure_return_val(dcontext, EBADF);
DODEBUG({ dcontext->expect_last_syscall_to_fail = true; });
} else
set_success_return_val(dcontext, 0);
execute_syscall = false;
}
break;
}
#endif
#ifdef MACOS
case SYS_fcntl_nocancel:
#endif
case SYS_fcntl: {
int cmd = (int)sys_param(dcontext, 1);
long arg = (long)sys_param(dcontext, 2);
/* we only check for asking for min in private space: not min below
* but actual will be above (see notes in os_file_init())
*/
if ((cmd == F_DUPFD || cmd == F_DUPFD_CLOEXEC) && fd_is_in_private_range(arg)) {
SYSLOG_INTERNAL_WARNING_ONCE("app trying to open private fd(s)");
LOG(THREAD, LOG_TOP | LOG_SYSCALLS, 1,
"WARNING: app trying to dup to >= %d. Disallowing.\n", arg);
set_failure_return_val(dcontext, EINVAL);
DODEBUG({ dcontext->expect_last_syscall_to_fail = true; });
execute_syscall = false;
} else {
dcontext->sys_param0 = sys_param(dcontext, 0);
dcontext->sys_param1 = cmd;
}
break;
}
#if defined(X64) || !defined(ARM) || defined(MACOS)
case SYS_getrlimit:
#endif
#if defined(LINUX) && !defined(X64)
case SYS_ugetrlimit:
#endif
/* save for post */
dcontext->sys_param0 = sys_param(dcontext, 0); /* resource */
dcontext->sys_param1 = sys_param(dcontext, 1); /* rlimit */
break;
case SYS_setrlimit: {
int resource = (int)sys_param(dcontext, 0);
if (resource == RLIMIT_NOFILE && DYNAMO_OPTION(steal_fds) > 0) {
#if !defined(ARM) && !defined(X64) && !defined(MACOS)
struct compat_rlimit rlim;
#else
struct rlimit rlim;
#endif
if (!d_r_safe_read((void *)sys_param(dcontext, 1), sizeof(rlim), &rlim)) {
LOG(THREAD, LOG_SYSCALLS, 2, "\treturning EFAULT to app for prlimit64\n");
set_failure_return_val(dcontext, EFAULT);
DODEBUG({ dcontext->expect_last_syscall_to_fail = true; });
} else if (rlim.rlim_cur > rlim.rlim_max) {
LOG(THREAD, LOG_SYSCALLS, 2, "\treturning EINVAL for prlimit64\n");
set_failure_return_val(dcontext, EINVAL);
DODEBUG({ dcontext->expect_last_syscall_to_fail = true; });
} else if (rlim.rlim_max <= min_dr_fd &&
/* Can't raise hard unless have CAP_SYS_RESOURCE capability.
* XXX i#2980: should query for that capability.
*/
rlim.rlim_max <= app_rlimit_nofile.rlim_max) {
/* if the new rlimit is lower, pretend succeed */
app_rlimit_nofile.rlim_cur = rlim.rlim_cur;
app_rlimit_nofile.rlim_max = rlim.rlim_max;
set_success_return_val(dcontext, 0);
} else {
LOG(THREAD, LOG_SYSCALLS, 2, "\treturning EPERM to app for setrlimit\n");
/* don't let app raise limits as that would mess up our fd space */
set_failure_return_val(dcontext, EPERM);
DODEBUG({ dcontext->expect_last_syscall_to_fail = true; });
}
execute_syscall = false;
}
break;
}
#ifdef LINUX
case SYS_prlimit64:
/* save for post */
dcontext->sys_param0 = sys_param(dcontext, 0); /* pid */
dcontext->sys_param1 = sys_param(dcontext, 1); /* resource */
dcontext->sys_param2 = sys_param(dcontext, 2); /* new rlimit */
dcontext->sys_param3 = sys_param(dcontext, 3); /* old rlimit */
if (/* XXX: how do we handle the case of setting rlimit.nofile on another
* process that is running with DynamoRIO?
*/
/* XXX: CLONE_FILES allows different processes to share the same file
* descriptor table, and different threads of the same process have
* separate file descriptor tables. POSIX specifies that rlimits are
* per-process, not per-thread, and Linux follows suit, so the threads
* with different descriptors will not matter, and the pids sharing
* descriptors turns into the hard-to-solve IPC problem.
*/
(dcontext->sys_param0 == 0 || dcontext->sys_param0 == get_process_id()) &&
dcontext->sys_param1 == RLIMIT_NOFILE &&
dcontext->sys_param2 != (reg_t)NULL && DYNAMO_OPTION(steal_fds) > 0) {
rlimit64_t rlim;
if (!d_r_safe_read((void *)(dcontext->sys_param2), sizeof(rlim), &rlim)) {
LOG(THREAD, LOG_SYSCALLS, 2, "\treturning EFAULT to app for prlimit64\n");
set_failure_return_val(dcontext, EFAULT);
DODEBUG({ dcontext->expect_last_syscall_to_fail = true; });
} else {
LOG(THREAD, LOG_SYSCALLS, 2,
"syscall: prlimit64 soft=" INT64_FORMAT_STRING
" hard=" INT64_FORMAT_STRING " vs DR %d\n",
rlim.rlim_cur, rlim.rlim_max, min_dr_fd);
if (rlim.rlim_cur > rlim.rlim_max) {
LOG(THREAD, LOG_SYSCALLS, 2, "\treturning EINVAL for prlimit64\n");
set_failure_return_val(dcontext, EINVAL);
DODEBUG({ dcontext->expect_last_syscall_to_fail = true; });
} else if (rlim.rlim_max <= min_dr_fd &&
/* Can't raise hard unless have CAP_SYS_RESOURCE capability.
* XXX i#2980: should query for that capability.
*/
rlim.rlim_max <= app_rlimit_nofile.rlim_max) {
/* if the new rlimit is lower, pretend succeed */
app_rlimit_nofile.rlim_cur = rlim.rlim_cur;
app_rlimit_nofile.rlim_max = rlim.rlim_max;
set_success_return_val(dcontext, 0);
/* set old rlimit if necessary */
if (dcontext->sys_param3 != (reg_t)NULL) {
safe_write_ex((void *)(dcontext->sys_param3), sizeof(rlim),
&app_rlimit_nofile, NULL);
}
} else {
/* don't let app raise limits as that would mess up our fd space */
LOG(THREAD, LOG_SYSCALLS, 2,
"\treturning EPERM to app for prlimit64\n");
set_failure_return_val(dcontext, EPERM);
DODEBUG({ dcontext->expect_last_syscall_to_fail = true; });
}
}
execute_syscall = false;
}
break;
#endif
#ifdef LINUX
# ifdef SYS_readlink
case SYS_readlink:
# endif
case SYS_readlinkat:
if (DYNAMO_OPTION(early_inject)) {
dcontext->sys_param0 = sys_param(dcontext, 0);
dcontext->sys_param1 = sys_param(dcontext, 1);
dcontext->sys_param2 = sys_param(dcontext, 2);
if (dcontext->sys_num == SYS_readlinkat)
dcontext->sys_param3 = sys_param(dcontext, 3);
}
break;
/* i#107 syscalls that might change/query app's segment */
# if defined(X86) && defined(X64)
case SYS_arch_prctl: {
/* we handle arch_prctl in post_syscall */
dcontext->sys_param0 = sys_param(dcontext, 0);
dcontext->sys_param1 = sys_param(dcontext, 1);
break;
}
# endif
# ifdef X86
case SYS_set_thread_area: {
our_modify_ldt_t desc;
if (INTERNAL_OPTION(mangle_app_seg) &&
d_r_safe_read((void *)sys_param(dcontext, 0), sizeof(desc), &desc)) {
if (os_set_app_thread_area(dcontext, &desc) &&
safe_write_ex((void *)sys_param(dcontext, 0), sizeof(desc), &desc,
NULL)) {
/* check if the range is unlimited */
ASSERT_CURIOSITY(desc.limit == 0xfffff);
execute_syscall = false;
set_success_return_val(dcontext, 0);
}
}
break;
}
case SYS_get_thread_area: {
our_modify_ldt_t desc;
if (INTERNAL_OPTION(mangle_app_seg) &&
d_r_safe_read((const void *)sys_param(dcontext, 0), sizeof(desc), &desc)) {
if (os_get_app_thread_area(dcontext, &desc) &&
safe_write_ex((void *)sys_param(dcontext, 0), sizeof(desc), &desc,
NULL)) {
execute_syscall = false;
set_success_return_val(dcontext, 0);
}
}
break;
}
# endif /* X86 */
# ifdef ARM
case SYS_set_tls: {
LOG(THREAD, LOG_VMAREAS | LOG_SYSCALLS, 2, "syscall: set_tls " PFX "\n",
sys_param(dcontext, 0));
if (os_set_app_tls_base(dcontext, TLS_REG_LIB, (void *)sys_param(dcontext, 0))) {
execute_syscall = false;
set_success_return_val(dcontext, 0);
} else {
ASSERT_NOT_REACHED();
}
break;
}
case SYS_cacheflush: {
/* We assume we don't want to change the executable_areas list or change
* the selfmod status of this region: else we should call something
* that invokes handle_modified_code() in a way that handles a bigger
* region than a single write.
*/
app_pc start = (app_pc)sys_param(dcontext, 0);
app_pc end = (app_pc)sys_param(dcontext, 1);
LOG(THREAD, LOG_VMAREAS | LOG_SYSCALLS, 2,
"syscall: cacheflush " PFX "-" PFX "\n", start, end);
flush_fragments_from_region(dcontext, start, end - start,
/* An unlink flush should be fine: the app must
* use synch to ensure other threads see the
* new code.
*/
false /*don't force synchall*/);
break;
}
# endif /* ARM */
#elif defined(MACOS)
/* FIXME i#58: handle i386_{get,set}_ldt and thread_fast_set_cthread_self64 */
#endif
#ifdef DEBUG
# ifdef MACOS
case SYS_open_nocancel:
# endif
# ifdef SYS_open
case SYS_open: {
dcontext->sys_param0 = sys_param(dcontext, 0);
break;
}
# endif
#endif
#ifdef LINUX
case SYS_rseq:
LOG(THREAD, LOG_VMAREAS | LOG_SYSCALLS, 2, "syscall: rseq " PFX " %d %d %d\n",
sys_param(dcontext, 0), sys_param(dcontext, 1), sys_param(dcontext, 2),
sys_param(dcontext, 3));
if (DYNAMO_OPTION(disable_rseq)) {
set_failure_return_val(dcontext, ENOSYS);
DODEBUG({ dcontext->expect_last_syscall_to_fail = true; });
execute_syscall = false;
} else {
dcontext->sys_param0 = sys_param(dcontext, 0);
}
break;
#endif
default: {
#ifdef VMX86_SERVER
if (is_vmkuw_sysnum(dcontext->sys_num)) {
execute_syscall = vmkuw_pre_system_call(dcontext);
break;
}
#endif
break;
}
} /* end switch */
dcontext->whereami = old_whereami;
return execute_syscall;
}
void
all_memory_areas_lock(void)
{
IF_NO_MEMQUERY(memcache_lock());
}
void
all_memory_areas_unlock(void)
{
IF_NO_MEMQUERY(memcache_unlock());
}
void
update_all_memory_areas(app_pc start, app_pc end, uint prot, int type)
{
IF_NO_MEMQUERY(memcache_update(start, end, prot, type));
}
bool
remove_from_all_memory_areas(app_pc start, app_pc end)
{
IF_NO_MEMQUERY(return memcache_remove(start, end));
return true;
}
/* We consider a module load to happen at the first mmap, so we check on later
* overmaps to ensure things look consistent. */
static bool
mmap_check_for_module_overlap(app_pc base, size_t size, bool readable, uint64 inode,
bool at_map)
{
module_area_t *ma;
os_get_module_info_lock();
ma = module_pc_lookup(base);
if (ma != NULL) {
/* FIXME - how can we distinguish between the loader mapping the segments
* over the initial map from someone just mapping over part of a module? If
* is the latter case need to adjust the view size or remove from module list. */
LOG(GLOBAL, LOG_VMAREAS, 2,
"%s mmap overlapping module area : \n"
"\tmap : base=" PFX " base+size=" PFX " inode=" UINT64_FORMAT_STRING "\n"
"\tmod : start=" PFX " end=" PFX " inode=" UINT64_FORMAT_STRING "\n",
at_map ? "new" : "existing", base, base + size, inode, ma->start, ma->end,
ma->names.inode);
ASSERT_CURIOSITY(base >= ma->start);
if (at_map) {
ASSERT_CURIOSITY(base + size <= ma->end);
} else {
/* FIXME - I'm having problems with this check for existing maps. I
* haven't been able to get gdb to break in early enough to really get a good
* look at the early loader behavior. Two issues: One case is with our .so
* for which the anonymous .bss mapping is one page larger than expected
* (which might be some loader bug in the size calculation? or something? if
* so should see it trigger the at_map curiosity on some dll and can address
* then) and the other is that for a few executables the .bss mapping is much
* larger (~0x20000 larger) then expected when running under DR (but not
* running natively where it is instead the expected size). Both could just
* be the loader merging adjacent identically protected regions though I
* can't explain the discrepancy between DR and native given that our vmmheap
* is elsewhere in the address space (so who and how allocated that adjacent
* memory). I've yet to see any issue with dynamically loaded modules so
* it's probably the loader merging regions. Still worth investigating. */
ASSERT_CURIOSITY(inode == 0 /*see above comment*/ ||
module_contains_addr(ma, base + size - 1));
}
/* Handle cases like transparent huge pages where there are anon regions on top
* of the file mapping (i#2566).
*/
if (ma->names.inode == 0)
ma->names.inode = inode;
ASSERT_CURIOSITY(ma->names.inode == inode || inode == 0 /* for .bss */);
DOCHECK(1, {
if (readable && module_is_header(base, size)) {
/* Case 8879: For really small modules, to save disk space, the same
* disk page could hold both RO and .data, occupying just 1 page of
* disk space, e.g. /usr/lib/httpd/modules/mod_auth_anon.so. When
* such a module is mapped in, the os maps the same disk page twice,
* one readonly and one copy-on-write (see pg. 96, Sec 4.4 from
* Linkers and Loaders by John R. Levine). This makes the data
* section also satisfy the elf_header check above. So, if the new
* mmap overlaps an elf_area and it is also a header, then make sure
* the previous page (correcting for alignment) is also a elf_header.
* Note, if it is a header of a different module, then we'll not have
* an overlap, so we will not hit this case.
*/
ASSERT_CURIOSITY(
ma->start + ma->os_data.alignment ==
base
/* On Mac we walk the dyld module list before the
* address space, so we often hit modules we already
* know about. */
IF_MACOS(|| !dynamo_initialized && ma->start == base));
}
});
}
os_get_module_info_unlock();
#ifdef ANDROID
/* i#1860: we need to keep looking for the segment with .dynamic as Android's
* loader does not map the whole file up front.
*/
if (ma != NULL && at_map && readable)
os_module_update_dynamic_info(base, size, at_map);
#endif
return ma != NULL;
}
static void
os_add_new_app_module(dcontext_t *dcontext, bool at_map, app_pc base, size_t size,
uint memprot)
{
memquery_iter_t iter;
bool found_map = false;
uint64 inode = 0;
const char *filename = "";
size_t mod_size = size;
if (!at_map) {
/* the size is the first seg size, get the whole module size instead */
app_pc first_seg_base = NULL;
app_pc first_seg_end = NULL;
app_pc last_seg_end = NULL;
if (module_walk_program_headers(base, size, at_map, false, &first_seg_base,
&first_seg_end, &last_seg_end, NULL, NULL)) {
ASSERT_CURIOSITY(size ==
(ALIGN_FORWARD(first_seg_end, PAGE_SIZE) -
(ptr_uint_t)first_seg_base) ||
base == vdso_page_start || base == vsyscall_page_start);
mod_size =
ALIGN_FORWARD(last_seg_end, PAGE_SIZE) - (ptr_uint_t)first_seg_base;
}
}
LOG(THREAD, LOG_SYSCALLS | LOG_VMAREAS, 2, "dlopen " PFX "-" PFX "%s\n", base,
base + mod_size, TEST(MEMPROT_EXEC, memprot) ? " +x" : "");
/* Mapping in a new module. From what we've observed of the loader's
* behavior, it first maps the file in with size equal to the final
* memory image size (I'm not sure how it gets that size without reading
* in the elf header and then walking through all the program headers to
* get the largest virtual offset). This is necessary to reserve all the
* space that will be needed. It then walks through the program headers
* mapping over the the previously mapped space with the appropriate
* permissions and offsets. Note that the .bss portion is mapped over
* as anonymous. It may also, depending on the program headers, make some
* areas read-only after fixing up their relocations etc. NOTE - at
* no point are the section headers guaranteed to be mapped in so we can't
* reliably walk sections (only segments) without looking to disk.
*/
/* FIXME - when should we add the module to our list? At the first map
* seems to be the best choice as we know the bounds and it's difficult to
* tell when the loader is finished. The downside is that at the initial map
* the memory layout isn't finalized (memory beyond the first segment will
* be shifted for page alignment reasons), so we have to be careful and
* make adjustments to read anything beyond the first segment until the
* loader finishes. This goes for the client too as it gets notified when we
* add to the list. FIXME we could try to track the expected segment overmaps
* and only notify the client after the last one (though that's still before
* linking and relocation, but that's true on Windows too). */
/* Get filename & inode for the list. */
memquery_iterator_start(&iter, base, true /* plan to alloc a module_area_t */);
while (memquery_iterator_next(&iter)) {
if (iter.vm_start == base) {
ASSERT_CURIOSITY(iter.inode != 0 || base == vdso_page_start ||
base == vsyscall_page_start);
ASSERT_CURIOSITY(iter.offset == 0); /* first map shouldn't have offset */
/* XREF 307599 on rounding module end to the next PAGE boundary */
ASSERT_CURIOSITY(
(iter.vm_end - iter.vm_start == ALIGN_FORWARD(size, PAGE_SIZE)));
inode = iter.inode;
filename = dr_strdup(iter.comment HEAPACCT(ACCT_OTHER));
found_map = true;
break;
}
}
memquery_iterator_stop(&iter);
#ifdef HAVE_MEMINFO
/* barring weird races we should find this map except */
ASSERT_CURIOSITY(found_map);
#else /* HAVE_MEMINFO */
/* Without /proc/maps or other memory querying interface available at
* library map time, there is no way to find out the name of the file
* that was mapped, thus its inode isn't available either.
*
* Just module_list_add with no filename will still result in
* library name being extracted from the .dynamic section and added
* to the module list. However, this name may not always exist, thus
* we might have a library with no file name available at all!
*
* Note: visor implements vsi mem maps that give file info, but, no
* path, should be ok. xref PR 401580.
*
* Once PR 235433 is implemented in visor then fix memquery_iterator*() to
* use vsi to find out page protection info, file name & inode.
*/
#endif /* HAVE_MEMINFO */
/* XREF 307599 on rounding module end to the next PAGE boundary */
if (found_map) {
module_list_add(base, ALIGN_FORWARD(mod_size, PAGE_SIZE), at_map, filename,
inode);
dr_strfree(filename HEAPACCT(ACCT_OTHER));
}
}
void
os_check_new_app_module(dcontext_t *dcontext, app_pc pc)
{
module_area_t *ma;
os_get_module_info_lock();
ma = module_pc_lookup(pc);
/* ma might be NULL due to dynamic generated code or custom loaded modules */
if (ma == NULL) {
dr_mem_info_t info;
/* i#1760: an app module loaded by custom loader (e.g., bionic libc)
* might not be detected by DynamoRIO in process_mmap.
*/
if (query_memory_ex_from_os(pc, &info) && info.type == DR_MEMTYPE_IMAGE) {
/* add the missing module */
os_get_module_info_unlock();
os_add_new_app_module(get_thread_private_dcontext(), false /*!at_map*/,
info.base_pc, info.size, info.prot);
os_get_module_info_lock();
}
}
os_get_module_info_unlock();
}
/* All processing for mmap and mmap2. */
static void
process_mmap(dcontext_t *dcontext, app_pc base, size_t size, uint prot,
uint flags _IF_DEBUG(const char *map_type))
{
bool image = false;
uint memprot = osprot_to_memprot(prot);
#ifdef ANDROID
/* i#1861: avoid merging file-backed w/ anon regions */
if (!TEST(MAP_ANONYMOUS, flags))
memprot |= MEMPROT_HAS_COMMENT;
#endif
LOG(THREAD, LOG_SYSCALLS, 4, "process_mmap(" PFX "," PFX ",0x%x,%s,%s)\n", base, size,
flags, memprot_string(memprot), map_type);
/* Notes on how ELF SOs are mapped in.
*
* o The initial mmap for an ELF file specifies enough space for
* all segments (and their constituent sections) in the file.
* The protection bits for that section are used for the entire
* region, and subsequent mmaps for subsequent segments within
* the region modify their portion's protection bits as needed.
* So if the prot bits for the first segment are +x, the entire
* region is +x. ** Note that our primary concern is adjusting
* exec areas to reflect the prot bits of subsequent
* segments. ** The region is added to the all-memory areas
* and also to exec areas (as determined by app_memory_allocation()).
*
* o Any subsequent segment sub-mappings specify their own protection
* bits and therefore are added to the exec areas via normal
* processing. They are also "naturally" added to the all-mems list.
* We do a little extra processing when mapping into a previously
* mapped region and the prot bits mismatch; if the new mapping is
* not +x, flushing needs to occur.
*/
/* process_mmap can be called with PROT_NONE, so we need to check if we
* can read the memory to see if it is a elf_header
*/
/* XXX: get inode for check */
if (TEST(MAP_ANONYMOUS, flags)) {
/* not an ELF mmap */
LOG(THREAD, LOG_SYSCALLS, 4, "mmap " PFX ": anon\n", base);
} else if (mmap_check_for_module_overlap(base, size, TEST(MEMPROT_READ, memprot), 0,
true)) {
/* FIXME - how can we distinguish between the loader mapping the segments
* over the initial map from someone just mapping over part of a module? If
* is the latter case need to adjust the view size or remove from module list. */
image = true;
DODEBUG({ map_type = "ELF SO"; });
LOG(THREAD, LOG_SYSCALLS, 4, "mmap " PFX ": overlaps image\n", base);
} else if (TEST(MEMPROT_READ, memprot) &&
/* i#727: We can still get SIGBUS on mmap'ed files that can't be
* read, so pass size=0 to use a safe_read.
*/
module_is_header(base, 0)) {
#ifdef ANDROID
/* The Android loader's initial all-segment-covering mmap is anonymous */
dr_mem_info_t info;
if (query_memory_ex_from_os((byte *)ALIGN_FORWARD(base + size, PAGE_SIZE),
&info) &&
info.prot == MEMPROT_NONE && info.type == DR_MEMTYPE_DATA) {
LOG(THREAD, LOG_SYSCALLS, 4, "mmap " PFX ": Android elf\n", base);
image = true;
DODEBUG({ map_type = "ELF SO"; });
os_add_new_app_module(dcontext, true /*at_map*/, base,
/* pass segment size, not whole module size */
size, memprot);
} else
#endif
if (module_is_partial_map(base, size, memprot)) {
/* i#1240: App might read first page of ELF header using mmap, which
* might accidentally be treated as a module load. Heuristically
* distinguish this by saying that if this is the first mmap for an ELF
* (i.e., it doesn't overlap with a previous map), and if it's small,
* then don't treat it as a module load.
*/
LOG(THREAD, LOG_SYSCALLS, 4, "mmap " PFX ": partial\n", base);
} else {
LOG(THREAD, LOG_SYSCALLS, 4, "mmap " PFX ": elf header\n", base);
image = true;
DODEBUG({ map_type = "ELF SO"; });
os_add_new_app_module(dcontext, true /*at_map*/, base, size, memprot);
}
}
LOG(THREAD, LOG_SYSCALLS, 4, "\t try app_mem_alloc\n");
IF_NO_MEMQUERY(memcache_handle_mmap(dcontext, base, size, memprot, image));
if (app_memory_allocation(dcontext, base, size, memprot, image _IF_DEBUG(map_type)))
STATS_INC(num_app_code_modules);
LOG(THREAD, LOG_SYSCALLS, 4, "\t app_mem_alloc -- DONE\n");
}
#ifdef LINUX
/* Call right after the system call.
* i#173: old_prot and old_type should be from before the system call
*/
static bool
handle_app_mremap(dcontext_t *dcontext, byte *base, size_t size, byte *old_base,
size_t old_size, uint old_prot, uint old_type)
{
if (!mmap_syscall_succeeded(base))
return false;
if (base != old_base || size < old_size) { /* take action only if
* there was a change */
DEBUG_DECLARE(bool ok;)
/* fragments were shifted...don't try to fix them, just flush */
app_memory_deallocation(dcontext, (app_pc)old_base, old_size,
false /* don't own thread_initexit_lock */,
false /* not image, FIXME: somewhat arbitrary */);
DOCHECK(1, {
/* we don't expect to see remappings of modules */
os_get_module_info_lock();
ASSERT_CURIOSITY(!module_overlaps(base, size));
os_get_module_info_unlock();
});
/* Verify that the current prot on the new region (according to
* the os) is the same as what the prot used to be for the old
* region.
*/
DOCHECK(1, {
uint memprot;
ok = get_memory_info_from_os(base, NULL, NULL, &memprot);
/* allow maps to have +x,
* +x may be caused by READ_IMPLIES_EXEC set in personality flag (i#262)
*/
ASSERT(ok &&
(memprot == old_prot || (memprot & (~MEMPROT_EXEC)) == old_prot));
});
app_memory_allocation(dcontext, base, size, old_prot,
old_type == DR_MEMTYPE_IMAGE _IF_DEBUG("mremap"));
IF_NO_MEMQUERY(memcache_handle_mremap(dcontext, base, size, old_base, old_size,
old_prot, old_type));
}
return true;
}
static void
handle_app_brk(dcontext_t *dcontext, byte *lowest_brk /*if known*/, byte *old_brk,
byte *new_brk)
{
/* i#851: the brk might not be page aligned */
old_brk = (app_pc)ALIGN_FORWARD(old_brk, PAGE_SIZE);
new_brk = (app_pc)ALIGN_FORWARD(new_brk, PAGE_SIZE);
if (new_brk < old_brk) {
/* Usually the heap is writable, so we don't really need to call
* this here: but seems safest to do so, esp if someone made part of
* the heap read-only and then put code there.
*/
app_memory_deallocation(dcontext, new_brk, old_brk - new_brk,
false /* don't own thread_initexit_lock */,
false /* not image */);
} else if (new_brk > old_brk) {
/* No need to call app_memory_allocation() as doesn't interact
* w/ security policies.
*/
}
IF_NO_MEMQUERY(memcache_handle_app_brk(lowest_brk, old_brk, new_brk));
}
#endif
/* This routine is *not* called is pre_system_call() returns false to skip
* the syscall.
*/
/* XXX: split out specific handlers into separate routines
*/
void
post_system_call(dcontext_t *dcontext)
{
priv_mcontext_t *mc = get_mcontext(dcontext);
/* registers have been clobbered, so sysnum is kept in dcontext */
int sysnum = dcontext->sys_num;
/* We expect most syscall failures to return < 0, so >= 0 is success.
* Some syscall return addresses that have the sign bit set and so
* appear to be failures but are not. They are handled on a
* case-by-case basis in the switch statement below.
*/
ptr_int_t result = (ptr_int_t)MCXT_SYSCALL_RES(mc); /* signed */
bool success = syscall_successful(mc, sysnum);
app_pc base;
size_t size;
uint prot;
dr_where_am_i_t old_whereami;
DEBUG_DECLARE(bool ok;)
RSTATS_INC(post_syscall);
old_whereami = dcontext->whereami;
dcontext->whereami = DR_WHERE_SYSCALL_HANDLER;
#if defined(LINUX) && defined(X86)
/* PR 313715: restore xbp since for some vsyscall sequences that use
* the syscall instruction its value is needed:
* 0xffffe400 <__kernel_vsyscall+0>: push %ebp
* 0xffffe401 <__kernel_vsyscall+1>: mov %ecx,%ebp
* 0xffffe403 <__kernel_vsyscall+3>: syscall
* 0xffffe405 <__kernel_vsyscall+5>: mov $0x2b,%ecx
* 0xffffe40a <__kernel_vsyscall+10>: movl %ecx,%ss
* 0xffffe40c <__kernel_vsyscall+12>: mov %ebp,%ecx
* 0xffffe40e <__kernel_vsyscall+14>: pop %ebp
* 0xffffe40f <__kernel_vsyscall+15>: ret
*/
if (should_syscall_method_be_sysenter() && !dcontext->sys_was_int) {
mc->xbp = dcontext->sys_xbp;
}
#endif
/* handle fork, try to do it early before too much logging occurs */
if (false
#ifdef SYS_fork
|| sysnum ==
SYS_fork
#endif
IF_LINUX(
|| (sysnum == SYS_clone && !TEST(CLONE_VM, dcontext->sys_param0)))) {
if (result == 0) {
/* we're the child */
thread_id_t child = get_sys_thread_id();
#ifdef DEBUG
thread_id_t parent = get_parent_id();
SYSLOG_INTERNAL_INFO("-- parent %d forked child %d --", parent, child);
#endif
/* first, fix TLS of dcontext */
ASSERT(parent != 0);
/* change parent pid to our pid */
replace_thread_id(dcontext->owning_thread, child);
dcontext->owning_thread = child;
dcontext->owning_process = get_process_id();
/* now let dynamo initialize new shared memory, logfiles, etc.
* need access to static vars in dynamo.c, that's why we don't do it. */
/* FIXME - xref PR 246902 - d_r_dispatch runs a lot of code before
* getting to post_system_call() is any of that going to be messed up
* by waiting till here to fixup the child logfolder/file and tid?
*/
dynamorio_fork_init(dcontext);
LOG(THREAD, LOG_SYSCALLS, 1,
"after fork-like syscall: parent is %d, child is %d\n", parent, child);
} else {
/* we're the parent */
os_fork_post(dcontext, true /*parent*/);
}
}
LOG(THREAD, LOG_SYSCALLS, 2, "post syscall: sysnum=" PFX ", result=" PFX " (%d)\n",
sysnum, MCXT_SYSCALL_RES(mc), (int)MCXT_SYSCALL_RES(mc));
switch (sysnum) {
/****************************************************************************/
/* MEMORY REGIONS */
#ifdef DEBUG
# ifdef MACOS
case SYS_open_nocancel:
# endif
# ifdef SYS_open
case SYS_open: {
if (success) {
/* useful for figuring out what module was loaded that then triggers
* module.c elf curiosities
*/
LOG(THREAD, LOG_SYSCALLS, 2, "SYS_open %s => %d\n", dcontext->sys_param0,
(int)result);
}
break;
}
# endif
#endif
#if defined(LINUX) && !defined(X64) && !defined(ARM)
case SYS_mmap:
#endif
case IF_MACOS_ELSE(SYS_mmap, IF_X64_ELSE(SYS_mmap, SYS_mmap2)): {
uint flags;
DEBUG_DECLARE(const char *map_type;)
RSTATS_INC(num_app_mmaps);
base = (app_pc)MCXT_SYSCALL_RES(mc); /* For mmap, it's NOT arg->addr! */
/* mmap isn't simply a user-space wrapper for mmap2. It's called
* directly when dynamically loading an SO, i.e., dlopen(). */
#ifdef LINUX /* MacOS success is in CF */
success = mmap_syscall_succeeded((app_pc)result);
/* The syscall either failed OR the retcode is less than the
* largest uint value of any errno and the addr returned is
* page-aligned.
*/
ASSERT_CURIOSITY(
!success ||
((app_pc)result < (app_pc)(ptr_int_t)-0x1000 && ALIGNED(base, PAGE_SIZE)));
#else
ASSERT_CURIOSITY(!success || ALIGNED(base, PAGE_SIZE));
#endif
if (!success)
goto exit_post_system_call;
#if defined(LINUX) && !defined(X64) && !defined(ARM)
if (sysnum == SYS_mmap) {
/* The syscall succeeded so the read of 'arg' should be
* safe. */
mmap_arg_struct_t *arg = (mmap_arg_struct_t *)dcontext->sys_param0;
size = (size_t)arg->len;
prot = (uint)arg->prot;
flags = (uint)arg->flags;
DEBUG_DECLARE(map_type = "mmap";)
} else {
#endif
size = (size_t)dcontext->sys_param1;
prot = (uint)dcontext->sys_param2;
flags = (uint)dcontext->sys_param3;
DEBUG_DECLARE(map_type = IF_X64_ELSE("mmap2", "mmap");)
#if defined(LINUX) && !defined(X64) && !defined(ARM)
}
#endif
process_mmap(dcontext, base, size, prot, flags _IF_DEBUG(map_type));
break;
}
case SYS_munmap: {
app_pc addr = (app_pc)dcontext->sys_param0;
size_t len = (size_t)dcontext->sys_param1;
/* We assumed in pre_system_call() that the unmap would succeed
* and flushed fragments and removed the region from exec areas.
* If the unmap failed, we re-add the region to exec areas.
*
* The same logic can be used on Windows (but isn't yet).
*/
/* FIXME There are shortcomings to the approach. If another thread
* executes in the region after our pre_system_call processing
* but before the re-add below, it will get a security violation.
* That's less than ideal but at least isn't a security hole.
* The overall shortcoming is that we lose the state from our
* stateful security policies -- future exec list, tables used
* for RCT (.C/.E/.F) -- which can't be easily restored. Also,
* the re-add could add a region that wasn't on the exec list
* previously.
*
* See case 7559 for a better approach.
*/
if (!success) {
dr_mem_info_t info;
/* must go to os to get real memory since we already removed */
DEBUG_DECLARE(ok =)
query_memory_ex_from_os(addr, &info);
ASSERT(ok);
app_memory_allocation(dcontext, addr, len, info.prot,
info.type ==
DR_MEMTYPE_IMAGE _IF_DEBUG("failed munmap"));
IF_NO_MEMQUERY(
memcache_update_locked((app_pc)ALIGN_BACKWARD(addr, PAGE_SIZE),
(app_pc)ALIGN_FORWARD(addr + len, PAGE_SIZE),
info.prot, info.type, false /*add back*/));
}
break;
}
#ifdef LINUX
case SYS_mremap: {
app_pc old_base = (app_pc)dcontext->sys_param0;
size_t old_size = (size_t)dcontext->sys_param1;
base = (app_pc)MCXT_SYSCALL_RES(mc);
size = (size_t)dcontext->sys_param2;
/* even if no shift, count as munmap plus mmap */
RSTATS_INC(num_app_munmaps);
RSTATS_INC(num_app_mmaps);
success =
handle_app_mremap(dcontext, base, size, old_base, old_size,
/* i#173: use memory prot and type
* obtained from pre_system_call
*/
(uint)dcontext->sys_param3, (uint)dcontext->sys_param4);
/* The syscall either failed OR the retcode is less than the
* largest uint value of any errno and the addr returned is
* is page-aligned.
*/
ASSERT_CURIOSITY(
!success ||
((app_pc)result < (app_pc)(ptr_int_t)-0x1000 && ALIGNED(base, PAGE_SIZE)));
if (!success)
goto exit_post_system_call;
break;
}
#endif
case SYS_mprotect: {
base = (app_pc)dcontext->sys_param0;
size = dcontext->sys_param1;
prot = dcontext->sys_param2;
#ifdef VMX86_SERVER
/* PR 475111: workaround for PR 107872 */
if (os_in_vmkernel_userworld() && result == -EBUSY && prot == PROT_NONE) {
result = mprotect_syscall(base, size, PROT_READ);
/* since non-Mac, we can use this even if the call failed */
set_success_return_val(dcontext, result);
success = (result >= 0);
LOG(THREAD, LOG_VMAREAS, 1,
"re-doing mprotect -EBUSY for " PFX "-" PFX " => %d\n", base, base + size,
(int)result);
SYSLOG_INTERNAL_WARNING_ONCE("re-doing mprotect for PR 475111, PR 107872");
}
#endif
/* FIXME i#143: we need to tweak the returned oldprot for
* writable areas we've made read-only
*/
if (!success) {
uint memprot = 0;
/* Revert the prot bits if needed. */
if (!get_memory_info_from_os(base, NULL, NULL, &memprot))
memprot = PROT_NONE;
LOG(THREAD, LOG_SYSCALLS, 3,
"syscall: mprotect failed: " PFX "-" PFX " prot->%d\n", base, base + size,
osprot_to_memprot(prot));
LOG(THREAD, LOG_SYSCALLS, 3, "\told prot->%d\n", memprot);
if (prot != memprot_to_osprot(memprot)) {
/* We're trying to reverse the prot change, assuming that
* this action doesn't have any unexpected side effects
* when doing so (such as not reversing some bit of internal
* state).
*/
uint new_memprot;
DEBUG_DECLARE(uint res =)
app_memory_protection_change(dcontext, base, size,
osprot_to_memprot(prot), &new_memprot, NULL,
false /*!image*/);
ASSERT_NOT_IMPLEMENTED(res != SUBSET_APP_MEM_PROT_CHANGE);
ASSERT(res == DO_APP_MEM_PROT_CHANGE ||
res == PRETEND_APP_MEM_PROT_CHANGE);
/* PR 410921 - Revert the changes to all-mems list.
* FIXME: This fix assumes the whole region had the prot &
* type, which is true in the cases we have seen so far, but
* theoretically may not be true. If it isn't true, multiple
* memory areas with different types/protections might have
* been changed in pre_system_call(), so will have to keep a
* list of all vmareas changed. This might be expensive for
* each mprotect syscall to guard against a rare theoretical bug.
*/
ASSERT_CURIOSITY(!dcontext->mprot_multi_areas);
IF_NO_MEMQUERY(memcache_update_locked(
base, base + size, memprot, -1 /*type unchanged*/, true /*exists*/));
}
}
break;
}
#ifdef ANDROID
case SYS_prctl: {
int code = (int)dcontext->sys_param0;
int subcode = (ulong)dcontext->sys_param1;
if (success && code == PR_SET_VMA && subcode == PR_SET_VMA_ANON_NAME) {
byte *addr = (byte *)dcontext->sys_param2;
size_t len = (size_t)dcontext->sys_param3;
IF_DEBUG(const char *comment = (const char *)dcontext->sys_param4;)
uint memprot = 0;
if (!get_memory_info_from_os(addr, NULL, NULL, &memprot))
memprot = MEMPROT_NONE;
/* We're post-syscall so from_os should match the prctl */
ASSERT((comment == NULL && !TEST(MEMPROT_HAS_COMMENT, memprot)) ||
(comment != NULL && TEST(MEMPROT_HAS_COMMENT, memprot)));
LOG(THREAD, LOG_SYSCALLS, 2,
"syscall: prctl PR_SET_VMA_ANON_NAME base=" PFX " size=" PFX
" comment=%s\n",
addr, len, comment == NULL ? "<null>" : comment);
IF_NO_MEMQUERY(memcache_update_locked(
addr, addr + len, memprot, -1 /*type unchanged*/, true /*exists*/));
}
break;
}
#endif
#ifdef LINUX
case SYS_brk: {
/* i#91/PR 396352: need to watch SYS_brk to maintain all_memory_areas.
* This code should work regardless of whether syscall failed
* (if it failed, the old break will be returned). We stored
* the old break in sys_param1 in pre-syscall.
*/
app_pc old_brk = (app_pc)dcontext->sys_param1;
app_pc new_brk = (app_pc)result;
DEBUG_DECLARE(app_pc req_brk = (app_pc)dcontext->sys_param0;);
ASSERT(!DYNAMO_OPTION(emulate_brk)); /* shouldn't get here */
# ifdef DEBUG
if (DYNAMO_OPTION(early_inject) &&
req_brk != NULL /* Ignore calls that don't increase brk. */) {
DO_ONCE({
ASSERT_CURIOSITY(new_brk > old_brk &&
"i#1004: first brk() "
"allocation failed with -early_inject");
});
}
# endif
handle_app_brk(dcontext, NULL, old_brk, new_brk);
break;
}
#endif
/****************************************************************************/
/* SPAWNING -- fork mostly handled above */
#ifdef LINUX
case SYS_clone: {
/* in /usr/src/linux/arch/i386/kernel/process.c */
LOG(THREAD, LOG_SYSCALLS, 2, "syscall: clone returned " PFX "\n",
MCXT_SYSCALL_RES(mc));
/* We switch the lib tls segment back to dr's privlib segment.
* Please refer to comment on os_switch_lib_tls.
* It is only called in parent thread.
* The child thread's tls setup is done in os_tls_app_seg_init.
*/
if (was_thread_create_syscall(dcontext)) {
if (IF_CLIENT_INTERFACE_ELSE(INTERNAL_OPTION(private_loader), false))
os_switch_lib_tls(dcontext, false /*to dr*/);
/* i#2089: we already restored the DR tls in os_clone_post() */
}
break;
}
#elif defined(MACOS) && !defined(X64)
case SYS_bsdthread_create: {
/* restore stack values we clobbered */
ASSERT(*sys_param_addr(dcontext, 0) == (reg_t)new_bsdthread_intercept);
*sys_param_addr(dcontext, 0) = dcontext->sys_param0;
*sys_param_addr(dcontext, 1) = dcontext->sys_param1;
break;
}
#endif
#ifdef SYS_fork
case SYS_fork: {
LOG(THREAD, LOG_SYSCALLS, 2, "syscall: fork returned " PFX "\n",
MCXT_SYSCALL_RES(mc));
break;
}
#endif
#ifdef SYS_vfork
case SYS_vfork: {
LOG(THREAD, LOG_SYSCALLS, 2, "syscall: vfork returned " PFX "\n",
MCXT_SYSCALL_RES(mc));
IF_LINUX(ASSERT(was_thread_create_syscall(dcontext)));
/* restore xsp in parent */
LOG(THREAD, LOG_SYSCALLS, 2, "vfork: restoring xsp from " PFX " to " PFX "\n",
mc->xsp, dcontext->sys_param1);
mc->xsp = dcontext->sys_param1;
if (MCXT_SYSCALL_RES(mc) != 0) {
/* We switch the lib tls segment back to dr's segment.
* Please refer to comment on os_switch_lib_tls.
* It is only called in parent thread.
* The child thread's tls setup is done in os_tls_app_seg_init.
*/
if (IF_CLIENT_INTERFACE_ELSE(INTERNAL_OPTION(private_loader), false)) {
os_switch_lib_tls(dcontext, false /*to dr*/);
}
/* i#2089: we already restored the DR tls in os_clone_post() */
}
break;
}
#endif
case SYS_execve: {
/* if we get here it means execve failed (doesn't return on success) */
success = false;
mark_thread_execve(dcontext->thread_record, false);
ASSERT(result < 0);
LOG(THREAD, LOG_SYSCALLS, 2, "syscall: execve failed\n");
handle_execve_post(dcontext);
/* Don't 'break' as we have an ASSERT(success) just below
* the switch(). */
goto exit_post_system_call;
break; /* unnecessary but good form so keep it */
}
/****************************************************************************/
/* SIGNALS */
case IF_MACOS_ELSE(SYS_sigaction, SYS_rt_sigaction): { /* 174 */
/* in /usr/src/linux/kernel/signal.c:
asmlinkage long
sys_rt_sigaction(int sig, const struct sigaction *act,
struct sigaction *oact, size_t sigsetsize)
*/
/* FIXME i#148: Handle syscall failure. */
int sig = (int)dcontext->sys_param0;
const kernel_sigaction_t *act = (const kernel_sigaction_t *)dcontext->sys_param1;
prev_sigaction_t *oact = (prev_sigaction_t *)dcontext->sys_param2;
size_t sigsetsize = (size_t)dcontext->sys_param3;
uint res;
res = handle_post_sigaction(dcontext, success, sig, act, oact, sigsetsize);
LOG(THREAD, LOG_SYSCALLS, 2, "syscall: %ssigaction => %d\n",
IF_MACOS_ELSE("", "rt_"), -res);
if (res != 0)
set_failure_return_val(dcontext, res);
if (!success || res != 0)
goto exit_post_system_call;
break;
}
#if defined(LINUX) && !defined(X64)
case SYS_sigaction: { /* 67 */
int sig = (int)dcontext->sys_param0;
const old_sigaction_t *act = (const old_sigaction_t *)dcontext->sys_param1;
old_sigaction_t *oact = (old_sigaction_t *)dcontext->sys_param2;
uint res = handle_post_old_sigaction(dcontext, success, sig, act, oact);
LOG(THREAD, LOG_SYSCALLS, 2, "syscall: sigaction => %d\n", -res);
if (res != 0)
set_failure_return_val(dcontext, res);
if (!success || res != 0)
goto exit_post_system_call;
break;
}
#endif
case IF_MACOS_ELSE(SYS_sigprocmask, SYS_rt_sigprocmask): { /* 175 */
/* in /usr/src/linux/kernel/signal.c:
asmlinkage long
sys_rt_sigprocmask(int how, sigset_t *set, sigset_t *oset,
size_t sigsetsize)
*/
/* FIXME i#148: Handle syscall failure. */
handle_post_sigprocmask(
dcontext, (int)dcontext->sys_param0, (kernel_sigset_t *)dcontext->sys_param1,
(kernel_sigset_t *)dcontext->sys_param2, (size_t)dcontext->sys_param3);
break;
}
#if defined(LINUX) && !defined(X64)
case SYS_sigreturn: /* 119 */
#endif
case IF_MACOS_ELSE(SYS_sigreturn, SYS_rt_sigreturn): /* 173 */
/* there is no return value: it's just the value of eax, so avoid
* assert below
*/
success = true;
break;
case SYS_setitimer: /* 104 */
handle_post_setitimer(dcontext, success, (int)dcontext->sys_param0,
(const struct itimerval *)dcontext->sys_param1,
(struct itimerval *)dcontext->sys_param2);
break;
case SYS_getitimer: /* 105 */
handle_post_getitimer(dcontext, success, (int)dcontext->sys_param0,
(struct itimerval *)dcontext->sys_param1);
break;
#if defined(LINUX) && defined(X86)
case SYS_alarm: /* 27 on x86 and 37 on x64 */
handle_post_alarm(dcontext, success, (unsigned int)dcontext->sys_param0);
break;
#endif
#if defined(LINUX) && defined(X86) && defined(X64)
case SYS_arch_prctl: {
if (success && INTERNAL_OPTION(mangle_app_seg)) {
tls_handle_post_arch_prctl(dcontext, dcontext->sys_param0,
dcontext->sys_param1);
}
break;
}
#endif
#ifdef LINUX
case SYS_ppoll: {
if (dcontext->sys_param3 == (reg_t)NULL)
break;
handle_post_extended_syscall_sigmasks(dcontext, success);
set_syscall_param(dcontext, 3, dcontext->sys_param3);
break;
}
case SYS_pselect6: {
if (dcontext->sys_param4 == (reg_t)NULL)
break;
typedef struct {
kernel_sigset_t *sigmask;
size_t sizemask;
} data_t;
data_t *data_param = (data_t *)dcontext->sys_param3;
handle_post_extended_syscall_sigmasks(dcontext, success);
if (!safe_write_ex((void *)&data_param->sigmask, sizeof(data_param->sigmask),
&dcontext->sys_param4, NULL)) {
LOG(THREAD, LOG_SYSCALLS, 2, "\tEFAULT for pselect6 post syscall\n");
}
break;
}
case SYS_epoll_pwait: {
if (dcontext->sys_param4 == (reg_t)NULL)
break;
handle_post_extended_syscall_sigmasks(dcontext, success);
set_syscall_param(dcontext, 4, dcontext->sys_param4);
break;
}
#endif
/****************************************************************************/
/* FILES */
#ifdef SYS_dup2
case SYS_dup2: IF_LINUX(case SYS_dup3:) {
# ifdef LINUX
if (success) {
signal_handle_dup(dcontext, (file_t)sys_param(dcontext, 1),
(file_t)result);
}
# endif
break;
}
#endif
#ifdef MACOS
case SYS_fcntl_nocancel:
#endif
case SYS_fcntl: {
#ifdef LINUX /* Linux-only since only for signalfd */
if (success) {
file_t fd = (long)dcontext->sys_param0;
int cmd = (int)dcontext->sys_param1;
if ((cmd == F_DUPFD || cmd == F_DUPFD_CLOEXEC))
signal_handle_dup(dcontext, fd, (file_t)result);
}
break;
#endif
}
case IF_MACOS_ELSE(SYS_getrlimit, IF_X64_ELSE(SYS_getrlimit, SYS_ugetrlimit)): {
int resource = dcontext->sys_param0;
if (success && resource == RLIMIT_NOFILE) {
/* we stole some space: hide it from app */
struct rlimit *rlim = (struct rlimit *)dcontext->sys_param1;
safe_write_ex(&rlim->rlim_cur, sizeof(rlim->rlim_cur),
&app_rlimit_nofile.rlim_cur, NULL);
safe_write_ex(&rlim->rlim_max, sizeof(rlim->rlim_max),
&app_rlimit_nofile.rlim_max, NULL);
}
break;
}
#if !defined(ARM) && !defined(X64) && !defined(MACOS)
/* Old struct w/ smaller fields */
case SYS_getrlimit: {
int resource = dcontext->sys_param0;
if (success && resource == RLIMIT_NOFILE) {
struct compat_rlimit *rlim = (struct compat_rlimit *)dcontext->sys_param1;
safe_write_ex(&rlim->rlim_cur, sizeof(rlim->rlim_cur),
&app_rlimit_nofile.rlim_cur, NULL);
safe_write_ex(&rlim->rlim_max, sizeof(rlim->rlim_max),
&app_rlimit_nofile.rlim_max, NULL);
}
break;
}
#endif
#ifdef LINUX
case SYS_prlimit64: {
int resource = dcontext->sys_param1;
rlimit64_t *rlim = (rlimit64_t *)dcontext->sys_param3;
if (success && resource == RLIMIT_NOFILE && rlim != NULL &&
/* XXX: xref pid discussion in pre_system_call SYS_prlimit64 */
(dcontext->sys_param0 == 0 || dcontext->sys_param0 == get_process_id())) {
safe_write_ex(rlim, sizeof(*rlim), &app_rlimit_nofile, NULL);
}
break;
}
#endif
#ifdef LINUX
# ifdef SYS_readlink
case SYS_readlink:
# endif
case SYS_readlinkat:
if (success && DYNAMO_OPTION(early_inject)) {
bool is_at = (sysnum == SYS_readlinkat);
/* i#907: /proc/self/exe is a symlink to libdynamorio.so. We need
* to fix it up if the app queries. Any thread id can be passed to
* /proc/%d/exe, so we have to check. We could instead look for
* libdynamorio.so in the result but we've tweaked our injector
* in the past to exec different binaries so this seems more robust.
*/
if (symlink_is_self_exe((const char *)(is_at ? dcontext->sys_param1
: dcontext->sys_param0))) {
char *tgt = (char *)(is_at ? dcontext->sys_param2 : dcontext->sys_param1);
size_t tgt_sz =
(size_t)(is_at ? dcontext->sys_param3 : dcontext->sys_param2);
int len = snprintf(tgt, tgt_sz, "%s", get_application_name());
if (len > 0)
set_success_return_val(dcontext, len);
else {
set_failure_return_val(dcontext, EINVAL);
DODEBUG({ dcontext->expect_last_syscall_to_fail = true; });
}
}
}
break;
case SYS_rseq:
/* Lazy rseq handling. */
if (success) {
rseq_process_syscall(dcontext);
rseq_locate_rseq_regions();
}
break;
#endif
default:
#ifdef VMX86_SERVER
if (is_vmkuw_sysnum(sysnum)) {
vmkuw_post_system_call(dcontext);
break;
}
#endif
break;
} /* switch */
DODEBUG({
if (ignorable_system_call_normalized(sysnum)) {
STATS_INC(post_syscall_ignorable);
} else {
/* Many syscalls can fail though they aren't ignored. However, they
* shouldn't happen without us knowing about them. See PR 402769
* for SYS_close case.
*/
if (!(success || sysnum == SYS_close ||
IF_MACOS(sysnum == SYS_close_nocancel ||)
dcontext->expect_last_syscall_to_fail)) {
LOG(THREAD, LOG_SYSCALLS, 1,
"Unexpected failure of non-ignorable syscall %d\n", sysnum);
}
}
});
exit_post_system_call:
#ifdef CLIENT_INTERFACE
/* The instrument_post_syscall should be called after DR finishes all
* its operations, since DR needs to know the real syscall results,
* and any changes made by the client are simply to fool the app.
* Also, dr_syscall_invoke_another() needs to set eax, which shouldn't
* affect the result of the 1st syscall. Xref i#1.
*/
/* after restore of xbp so client sees it as though was sysenter */
instrument_post_syscall(dcontext, sysnum);
#endif
dcontext->whereami = old_whereami;
}
#ifdef LINUX
# ifdef STATIC_LIBRARY
/* Static libraries may optionally define two linker variables
* (dynamorio_so_start and dynamorio_so_end) to help mitigate
* edge cases in detecting DR's library bounds. They are optional.
*
* If not specified, the variables' location will default to
* weak_dynamorio_so_bounds_filler and they will not be used.
* Note that referencing the value of these symbols will crash:
* always use the address only.
*/
extern int dynamorio_so_start WEAK
__attribute__((alias("weak_dynamorio_so_bounds_filler")));
extern int dynamorio_so_end WEAK
__attribute__((alias("weak_dynamorio_so_bounds_filler")));
static int weak_dynamorio_so_bounds_filler;
# else /* !STATIC_LIBRARY */
/* For non-static linux we always get our bounds from linker-provided symbols.
* Note that referencing the value of these symbols will crash: always use the
* address only.
*/
extern int dynamorio_so_start, dynamorio_so_end;
# endif /* STATIC_LIBRARY */
#endif /* LINUX */
/* get_dynamo_library_bounds initializes dynamorio library bounds, using a
* release-time assert if there is a problem doing so. It does not use any
* heap, and we assume it is called prior to find_executable_vm_areas in a
* single thread.
*/
static void
get_dynamo_library_bounds(void)
{
/* Note that we're not counting DYNAMORIO_PRELOAD_NAME as a DR area, to match
* Windows, so we should unload it like we do there. The other reason not to
* count it is so is_in_dynamo_dll() can be the only exception to the
* never-execute-from-DR-areas list rule
*/
int res;
app_pc check_start, check_end;
char *libdir;
const char *dynamorio_libname = NULL;
bool do_memquery = true;
#ifdef STATIC_LIBRARY
# ifdef LINUX
/* For static+linux, we might have linker vars to help us and we definitely
* know our "library name" since we are in the app. When we have both we
* don't need to do a memquery.
*/
if (&dynamorio_so_start != &weak_dynamorio_so_bounds_filler &&
&dynamorio_so_end != &weak_dynamorio_so_bounds_filler) {
do_memquery = false;
dynamo_dll_start = (app_pc)&dynamorio_so_start;
dynamo_dll_end = (app_pc)ALIGN_FORWARD(&dynamorio_so_end, PAGE_SIZE);
LOG(GLOBAL, LOG_VMAREAS, 2,
"Using dynamorio_so_start and dynamorio_so_end for library bounds"
"\n");
const char *dr_path = get_application_name();
strncpy(dynamorio_library_filepath, dr_path,
BUFFER_SIZE_ELEMENTS(dynamorio_library_filepath));
NULL_TERMINATE_BUFFER(dynamorio_library_filepath);
const char *slash = strrchr(dr_path, '/');
ASSERT(slash != NULL);
/* Include the slash in the library path */
size_t copy_chars = 1 + slash - dr_path;
ASSERT(copy_chars < BUFFER_SIZE_ELEMENTS(dynamorio_library_path));
strncpy(dynamorio_library_path, dr_path, copy_chars);
dynamorio_library_path[copy_chars] = '\0';
}
# endif
if (do_memquery) {
/* No linker vars, so we need to find bound using an internal PC */
check_start = (app_pc)&get_dynamo_library_bounds;
}
#else /* !STATIC_LIBRARY */
# ifdef LINUX
/* PR 361594: we get our bounds from linker-provided symbols.
* Note that referencing the value of these symbols will crash:
* always use the address only.
*/
extern int dynamorio_so_start, dynamorio_so_end;
dynamo_dll_start = (app_pc)&dynamorio_so_start;
dynamo_dll_end = (app_pc)ALIGN_FORWARD(&dynamorio_so_end, PAGE_SIZE);
# elif defined(MACOS)
dynamo_dll_start = module_dynamorio_lib_base();
# endif
check_start = dynamo_dll_start;
#endif /* STATIC_LIBRARY */
if (do_memquery) {
static char dynamorio_libname_buf[MAXIMUM_PATH];
res = memquery_library_bounds(
NULL, &check_start, &check_end, dynamorio_library_path,
BUFFER_SIZE_ELEMENTS(dynamorio_library_path), dynamorio_libname_buf,
BUFFER_SIZE_ELEMENTS(dynamorio_libname_buf));
ASSERT(res > 0);
#ifndef STATIC_LIBRARY
dynamorio_libname = IF_UNIT_TEST_ELSE(UNIT_TEST_EXE_NAME, dynamorio_libname_buf);
#endif /* STATIC_LIBRARY */
snprintf(dynamorio_library_filepath,
BUFFER_SIZE_ELEMENTS(dynamorio_library_filepath), "%s%s",
dynamorio_library_path, dynamorio_libname);
NULL_TERMINATE_BUFFER(dynamorio_library_filepath);
#if !defined(STATIC_LIBRARY) && defined(LINUX)
ASSERT(check_start == dynamo_dll_start && check_end == dynamo_dll_end);
#elif defined(MACOS)
ASSERT(check_start == dynamo_dll_start);
dynamo_dll_end = check_end;
#else
dynamo_dll_start = check_start;
dynamo_dll_end = check_end;
#endif
}
LOG(GLOBAL, LOG_VMAREAS, 1, PRODUCT_NAME " library path: %s\n",
dynamorio_library_path);
LOG(GLOBAL, LOG_VMAREAS, 1, PRODUCT_NAME " library file path: %s\n",
dynamorio_library_filepath);
LOG(GLOBAL, LOG_VMAREAS, 1, "DR library bounds: " PFX " to " PFX "\n",
dynamo_dll_start, dynamo_dll_end);
/* Issue 20: we need the path to the alt arch */
strncpy(dynamorio_alt_arch_path, dynamorio_library_path,
BUFFER_SIZE_ELEMENTS(dynamorio_alt_arch_path));
/* Assumption: libdir name is not repeated elsewhere in path */
libdir = strstr(dynamorio_alt_arch_path, IF_X64_ELSE(DR_LIBDIR_X64, DR_LIBDIR_X86));
if (libdir != NULL) {
const char *newdir = IF_X64_ELSE(DR_LIBDIR_X86, DR_LIBDIR_X64);
/* do NOT place the NULL */
strncpy(libdir, newdir, strlen(newdir));
} else {
SYSLOG_INTERNAL_WARNING("unable to determine lib path for cross-arch execve");
}
NULL_TERMINATE_BUFFER(dynamorio_alt_arch_path);
LOG(GLOBAL, LOG_VMAREAS, 1, PRODUCT_NAME " alt arch path: %s\n",
dynamorio_alt_arch_path);
snprintf(dynamorio_alt_arch_filepath,
BUFFER_SIZE_ELEMENTS(dynamorio_alt_arch_filepath), "%s%s",
dynamorio_alt_arch_path, dynamorio_libname);
NULL_TERMINATE_BUFFER(dynamorio_alt_arch_filepath);
if (dynamo_dll_start == NULL || dynamo_dll_end == NULL) {
REPORT_FATAL_ERROR_AND_EXIT(FAILED_TO_FIND_DR_BOUNDS, 2, get_application_name(),
get_application_pid());
}
}
/* get full path to our own library, (cached), used for forking and message file name */
char *
get_dynamorio_library_path(void)
{
if (!dynamorio_library_filepath[0]) { /* not cached */
get_dynamo_library_bounds();
}
return dynamorio_library_filepath;
}
#ifdef LINUX
/* Get full path+name of executable file from /proc/self/exe. Returns an empty
* string on error.
* FIXME i#47: This will return DR's path when using early injection.
*/
static char *
read_proc_self_exe(bool ignore_cache)
{
static char exepath[MAXIMUM_PATH];
static bool tried = false;
# ifdef MACOS
ASSERT_NOT_IMPLEMENTED(false);
# endif
if (!tried || ignore_cache) {
tried = true;
/* assume we have /proc/self/exe symlink: could add HAVE_PROC_EXE
* but we have no alternative solution except assuming the first
* /proc/self/maps entry is the executable
*/
ssize_t res;
DEBUG_DECLARE(int len =)
snprintf(exepath, BUFFER_SIZE_ELEMENTS(exepath), "/proc/%d/exe",
get_process_id());
ASSERT(len > 0);
NULL_TERMINATE_BUFFER(exepath);
/* i#960: readlink does not null terminate, so we do it. */
# ifdef SYS_readlink
res = dynamorio_syscall(SYS_readlink, 3, exepath, exepath,
BUFFER_SIZE_ELEMENTS(exepath) - 1);
# else
res = dynamorio_syscall(SYS_readlinkat, 4, AT_FDCWD, exepath, exepath,
BUFFER_SIZE_ELEMENTS(exepath) - 1);
# endif
ASSERT(res < BUFFER_SIZE_ELEMENTS(exepath));
exepath[MAX(res, 0)] = '\0';
NULL_TERMINATE_BUFFER(exepath);
}
return exepath;
}
#endif /* LINUX */
app_pc
get_application_base(void)
{
if (executable_start == NULL) {
#if defined(STATIC_LIBRARY)
/* When compiled statically, the app and the DR's "library" are the same. */
executable_start = get_dynamorio_dll_start();
executable_end = get_dynamorio_dll_end();
#elif defined(HAVE_MEMINFO)
/* Haven't done find_executable_vm_areas() yet so walk maps ourselves */
const char *name = get_application_name();
if (name != NULL && name[0] != '\0') {
DEBUG_DECLARE(int count =)
memquery_library_bounds(name, &executable_start, &executable_end, NULL, 0,
NULL, 0);
ASSERT(count > 0 && executable_start != NULL);
}
#else
/* We have to fail. Should we dl_iterate this early? */
#endif
}
return executable_start;
}
app_pc
get_application_end(void)
{
if (executable_end == NULL)
get_application_base();
return executable_end;
}
app_pc
get_image_entry()
{
static app_pc image_entry_point = NULL;
if (image_entry_point == NULL && executable_start != NULL) {
module_area_t *ma;
os_get_module_info_lock();
ma = module_pc_lookup(executable_start);
ASSERT(ma != NULL);
if (ma != NULL) {
ASSERT(executable_start == ma->start);
SELF_UNPROTECT_DATASEC(DATASEC_RARELY_PROT);
image_entry_point = ma->entry_point;
SELF_PROTECT_DATASEC(DATASEC_RARELY_PROT);
}
os_get_module_info_unlock();
}
return image_entry_point;
}
#ifdef DEBUG
void
mem_stats_snapshot()
{
/* FIXME: NYI */
}
#endif
bool
is_in_dynamo_dll(app_pc pc)
{
ASSERT(dynamo_dll_start != NULL);
#ifdef VMX86_SERVER
/* We want to consider vmklib as part of the DR lib for allowing
* execution (_init calls os_in_vmkernel_classic()) and for
* reporting crashes as our fault
*/
if (vmk_in_vmklib(pc))
return true;
#endif
return (pc >= dynamo_dll_start && pc < dynamo_dll_end);
}
app_pc
get_dynamorio_dll_start()
{
if (dynamo_dll_start == NULL)
get_dynamo_library_bounds();
ASSERT(dynamo_dll_start != NULL);
return dynamo_dll_start;
}
app_pc
get_dynamorio_dll_end()
{
if (dynamo_dll_end == NULL)
get_dynamo_library_bounds();
ASSERT(dynamo_dll_end != NULL);
return dynamo_dll_end;
}
app_pc
get_dynamorio_dll_preferred_base()
{
/* on Linux there is no preferred base if we're PIC,
* therefore is always equal to dynamo_dll_start */
return get_dynamorio_dll_start();
}
static void
found_vsyscall_page(memquery_iter_t *iter _IF_DEBUG(OUT const char **map_type))
{
#ifndef X64
/* We assume no vsyscall page for x64; thus, checking the
* hardcoded address shouldn't have any false positives.
*/
ASSERT(iter->vm_end - iter->vm_start == PAGE_SIZE ||
/* i#1583: recent kernels have 2-page vdso */
iter->vm_end - iter->vm_start == 2 * PAGE_SIZE);
ASSERT(!dynamo_initialized); /* .data should be +w */
/* we're not considering as "image" even if part of ld.so (xref i#89) and
* thus we aren't adjusting our code origins policies to remove the
* vsyscall page exemption.
*/
DODEBUG({ *map_type = "VDSO"; });
/* On re-attach, the vdso can be split into two entries (from DR's hook),
* so take just the first one as the start (xref i#2157).
*/
if (vdso_page_start == NULL) {
vdso_page_start = iter->vm_start;
vdso_size = iter->vm_end - iter->vm_start;
}
/* The vsyscall page can be on the 2nd page inside the vdso, but until we
* see a syscall we don't know and we point it at the vdso start.
*/
if (vsyscall_page_start == NULL)
vsyscall_page_start = iter->vm_start;
LOG(GLOBAL, LOG_VMAREAS, 1, "found vdso/vsyscall pages @ " PFX " %s\n",
vsyscall_page_start, iter->comment);
#else
/* i#172
* fix bugs for OS where vdso page is set unreadable as below
* ffffffffff600000-ffffffffffe00000 ---p 00000000 00:00 0 [vdso]
* but it is readable indeed.
*/
/* i#430
* fix bugs for OS where vdso page is set unreadable as below
* ffffffffff600000-ffffffffffe00000 ---p 00000000 00:00 0 [vsyscall]
* but it is readable indeed.
*/
if (!TESTALL((PROT_READ | PROT_EXEC), iter->prot))
iter->prot |= (PROT_READ | PROT_EXEC);
/* i#1908: vdso and vsyscall pages are now split */
if (strncmp(iter->comment, VSYSCALL_PAGE_MAPS_NAME,
strlen(VSYSCALL_PAGE_MAPS_NAME)) == 0)
vdso_page_start = iter->vm_start;
else if (strncmp(iter->comment, VSYSCALL_REGION_MAPS_NAME,
strlen(VSYSCALL_REGION_MAPS_NAME)) == 0)
vsyscall_page_start = iter->vm_start;
#endif
}
#ifndef HAVE_MEMINFO_QUERY
static void
add_to_memcache(byte *region_start, byte *region_end, void *user_data)
{
memcache_update_locked(region_start, region_end, MEMPROT_NONE, DR_MEMTYPE_DATA,
false /*!exists*/);
}
#endif
int
os_walk_address_space(memquery_iter_t *iter, bool add_modules)
{
int count = 0;
#ifdef MACOS
app_pc shared_start, shared_end;
bool have_shared = module_dyld_shared_region(&shared_start, &shared_end);
#endif
#ifdef RETURN_AFTER_CALL
dcontext_t *dcontext = get_thread_private_dcontext();
os_thread_data_t *ostd = (os_thread_data_t *)dcontext->os_field;
#endif
#ifndef HAVE_MEMINFO_QUERY
/* We avoid tracking the innards of vmheap for all_memory_areas by
* adding a single no-access region for the whole vmheap.
* Queries from heap routines use _from_os.
* Queries in check_thread_vm_area are fine getting "noaccess": wants
* any DR memory not on exec areas list to be noaccess.
* Queries from clients: should be ok to hide innards. Marking noaccess
* should be safer than marking free, as unruly client might try to mmap
* something in the free space: better to have it think it's reserved but
* not yet used memory. FIXME: we're not marking beyond-vmheap DR regions
* as noaccess!
*/
iterate_vmm_regions(add_to_memcache, NULL);
#endif
#ifndef HAVE_MEMINFO
count = find_vm_areas_via_probe();
#else
while (memquery_iterator_next(iter)) {
bool image = false;
size_t size = iter->vm_end - iter->vm_start;
/* i#479, hide private module and match Windows's behavior */
bool skip = dynamo_vm_area_overlap(iter->vm_start, iter->vm_end) &&
!is_in_dynamo_dll(iter->vm_start) /* our own text section is ok */
/* client lib text section is ok (xref i#487) */
IF_CLIENT_INTERFACE(&&!is_in_client_lib(iter->vm_start));
DEBUG_DECLARE(const char *map_type = "Private");
/* we can't really tell what's a stack and what's not, but we rely on
* our passing NULL preventing rwx regions from being added to executable
* or future list, even w/ -executable_if_alloc
*/
LOG(GLOBAL, LOG_VMAREAS, 2, "start=" PFX " end=" PFX " prot=%x comment=%s\n",
iter->vm_start, iter->vm_end, iter->prot, iter->comment);
/* Issue 89: the vdso might be loaded inside ld.so as below,
* which causes ASSERT_CURIOSITY in mmap_check_for_module_overlap fail.
* b7fa3000-b7fbd000 r-xp 00000000 08:01 108679 /lib/ld-2.8.90.so
* b7fbd000-b7fbe000 r-xp b7fbd000 00:00 0 [vdso]
* b7fbe000-b7fbf000 r--p 0001a000 08:01 108679 /lib/ld-2.8.90.so
* b7fbf000-b7fc0000 rw-p 0001b000 08:01 108679 /lib/ld-2.8.90.so
* So we always first check if it is a vdso page before calling
* mmap_check_for_module_overlap.
* Update: with i#160/PR 562667 handling non-contiguous modules like
* ld.so we now gracefully handle other objects like vdso in gaps in
* module, but it's simpler to leave this ordering here.
*/
if (skip) {
/* i#479, hide private module and match Windows's behavior */
LOG(GLOBAL, LOG_VMAREAS, 2, PFX "-" PFX " skipping: internal DR region\n",
iter->vm_start, iter->vm_end);
# ifdef MACOS
} else if (have_shared && iter->vm_start >= shared_start &&
iter->vm_start < shared_end) {
/* Skip modules we happen to find inside the dyld shared cache,
* as we'll fail to identify the library. We add them
* in module_walk_dyld_list instead.
*/
image = true;
# endif
} else if (strncmp(iter->comment, VSYSCALL_PAGE_MAPS_NAME,
strlen(VSYSCALL_PAGE_MAPS_NAME)) == 0 ||
IF_X64_ELSE(strncmp(iter->comment, VSYSCALL_REGION_MAPS_NAME,
strlen(VSYSCALL_REGION_MAPS_NAME)) == 0,
/* Older kernels do not label it as "[vdso]", but it is
* hardcoded there.
*/
/* 32-bit */
iter->vm_start == VSYSCALL_PAGE_START_HARDCODED)) {
if (add_modules) {
found_vsyscall_page(iter _IF_DEBUG(&map_type));
/* We'd like to add vsyscall to the module list too but when it's
* separate from vdso it has no ELF header which is too complex
* to force into the module list.
*/
if (module_is_header(iter->vm_start, iter->vm_end - iter->vm_start)) {
module_list_add(iter->vm_start, iter->vm_end - iter->vm_start, false,
iter->comment, iter->inode);
}
}
} else if (add_modules &&
mmap_check_for_module_overlap(iter->vm_start, size,
TEST(MEMPROT_READ, iter->prot),
iter->inode, false)) {
/* we already added the whole image region when we hit the first map for it */
image = true;
DODEBUG({ map_type = "ELF SO"; });
} else if (TEST(MEMPROT_READ, iter->prot) &&
module_is_header(iter->vm_start, size)) {
size_t image_size = size;
app_pc mod_base, mod_first_end, mod_max_end;
char *exec_match;
bool found_exec = false;
image = true;
DODEBUG({ map_type = "ELF SO"; });
LOG(GLOBAL, LOG_VMAREAS, 2,
"Found already mapped module first segment :\n"
"\t" PFX "-" PFX "%s inode=" UINT64_FORMAT_STRING " name=%s\n",
iter->vm_start, iter->vm_end, TEST(MEMPROT_EXEC, iter->prot) ? " +x" : "",
iter->inode, iter->comment);
# ifdef LINUX
/* Mapped images should have inodes, except for cases where an anon
* map is placed on top (i#2566)
*/
ASSERT_CURIOSITY(iter->inode != 0 || iter->comment[0] == '\0');
# endif
ASSERT_CURIOSITY(iter->offset == 0); /* first map shouldn't have offset */
/* Get size by walking the program headers. This includes .bss. */
if (module_walk_program_headers(iter->vm_start, size, false,
true, /* i#1589: ld.so relocated .dynamic */
&mod_base, &mod_first_end, &mod_max_end, NULL,
NULL)) {
image_size = mod_max_end - mod_base;
} else {
ASSERT_NOT_REACHED();
}
LOG(GLOBAL, LOG_VMAREAS, 2,
"Found already mapped module total module :\n"
"\t" PFX "-" PFX " inode=" UINT64_FORMAT_STRING " name=%s\n",
iter->vm_start, iter->vm_start + image_size, iter->inode, iter->comment);
if (add_modules) {
const char *modpath = iter->comment;
/* look for executable */
# ifdef LINUX
exec_match = get_application_name();
if (exec_match != NULL && exec_match[0] != '\0')
found_exec = (strcmp(iter->comment, exec_match) == 0);
/* Handle an anon region for the header (i#2566) */
if (!found_exec && executable_start != NULL &&
executable_start == iter->vm_start) {
found_exec = true;
/* The maps file's first entry may not have the path, in the
* presence of mremapping for hugepages (i#2566; i#3387) (this
* could happen for libraries too, but we don't have alternatives
* there). Or, it may have an incorrect path. Prefer the path
* we recorded in early injection or obtained from
* /proc/self/exe.
*/
modpath = get_application_name();
}
# else
/* We don't have a nice normalized name: it can have ./ or ../ inside
* it. But, we can distinguish an exe from a lib here, even for PIE,
* so we go with that plus a basename comparison.
*/
exec_match = (char *)get_application_short_name();
if (module_is_executable(iter->vm_start) && exec_match != NULL &&
exec_match[0] != '\0') {
const char *iter_basename = strrchr(iter->comment, '/');
if (iter_basename == NULL)
iter_basename = iter->comment;
else
iter_basename++;
found_exec = (strcmp(iter_basename, exec_match) == 0);
}
# endif
if (found_exec) {
if (executable_start == NULL)
executable_start = iter->vm_start;
else
ASSERT(iter->vm_start == executable_start);
LOG(GLOBAL, LOG_VMAREAS, 2,
"Found executable %s @" PFX "-" PFX " %s\n",
get_application_name(), iter->vm_start,
iter->vm_start + image_size, iter->comment);
}
/* We don't yet know whether contiguous so we have to settle for the
* first segment's size. We'll update it in module_list_add().
*/
module_list_add(iter->vm_start, mod_first_end - mod_base, false, modpath,
iter->inode);
# ifdef MACOS
/* look for dyld */
if (strcmp(iter->comment, "/usr/lib/dyld") == 0)
module_walk_dyld_list(iter->vm_start);
# endif
}
} else if (iter->inode != 0) {
DODEBUG({ map_type = "Mapped File"; });
}
/* add all regions (incl. dynamo_areas and stack) to all_memory_areas */
# ifndef HAVE_MEMINFO_QUERY
/* Don't add if we're using one single vmheap entry. */
if (!is_vmm_reserved_address(iter->vm_start, iter->vm_end - iter->vm_start, NULL,
NULL)) {
LOG(GLOBAL, LOG_VMAREAS, 4,
"os_walk_address_space: adding: " PFX "-" PFX " prot=%d\n",
iter->vm_start, iter->vm_end, iter->prot);
memcache_update_locked(iter->vm_start, iter->vm_end, iter->prot,
image ? DR_MEMTYPE_IMAGE : DR_MEMTYPE_DATA,
false /*!exists*/);
}
# endif
/* FIXME: best if we could pass every region to vmareas, but
* it has no way of determining if this is a stack b/c we don't have
* a dcontext at this point -- so we just don't pass the stack
*/
if (!skip /* i#479, hide private module and match Windows's behavior */ &&
add_modules &&
app_memory_allocation(NULL, iter->vm_start, (iter->vm_end - iter->vm_start),
iter->prot, image _IF_DEBUG(map_type))) {
count++;
}
}
#endif /* !HAVE_MEMINFO */
#ifndef HAVE_MEMINFO_QUERY
DOLOG(4, LOG_VMAREAS, memcache_print(GLOBAL, "init: all memory areas:\n"););
#endif
#ifdef RETURN_AFTER_CALL
/* Find the bottom of the stack of the initial (native) entry */
ostd->stack_bottom_pc = find_stack_bottom();
LOG(THREAD, LOG_ALL, 1, "Stack bottom pc = " PFX "\n", ostd->stack_bottom_pc);
#endif
/* now that we've walked memory print all modules */
LOG(GLOBAL, LOG_VMAREAS, 2, "Module list after memory walk\n");
DOLOG(1, LOG_VMAREAS, {
if (add_modules)
print_modules(GLOBAL, DUMP_NOT_XML);
});
return count;
}
/* assumed to be called after find_dynamo_library_vm_areas() */
int
find_executable_vm_areas(void)
{
int count;
memquery_iter_t iter;
memquery_iterator_start(&iter, NULL, true /*may alloc*/);
count = os_walk_address_space(&iter, true);
memquery_iterator_stop(&iter);
STATS_ADD(num_app_code_modules, count);
/* now that we have the modules set up, query libc */
get_libc_errno_location(true /*force init*/);
return count;
}
/* initializes dynamorio library bounds.
* does not use any heap.
* assumed to be called prior to find_executable_vm_areas.
*/
int
find_dynamo_library_vm_areas(void)
{
#ifndef STATIC_LIBRARY
/* We didn't add inside get_dynamo_library_bounds b/c it was called pre-alloc.
* We don't bother to break down the sub-regions.
* Assumption: we don't need to have the protection flags for DR sub-regions.
* For static library builds, DR's code is in the exe and isn't considered
* to be a DR area.
*/
add_dynamo_vm_area(get_dynamorio_dll_start(), get_dynamorio_dll_end(),
MEMPROT_READ | MEMPROT_WRITE | MEMPROT_EXEC,
true /* from image */ _IF_DEBUG(dynamorio_library_filepath));
#endif
#ifdef VMX86_SERVER
if (os_in_vmkernel_userworld())
vmk_add_vmklib_to_dynamo_areas();
#endif
return 1;
}
bool
get_stack_bounds(dcontext_t *dcontext, byte **base, byte **top)
{
os_thread_data_t *ostd = (os_thread_data_t *)dcontext->os_field;
if (ostd->stack_base == NULL) {
/* initialize on-demand since don't have app esp handy in os_thread_init()
* FIXME: the comment here -- ignoring it for now, if hit cases confirming
* it the right thing will be to merge adjacent rwx regions and assume
* their union is the stack -- otherwise have to have special stack init
* routine called from x86.asm new_thread_dynamo_start and internal_dynamo_start,
* and the latter is not a do-once...
*/
size_t size = 0;
bool ok;
/* store stack info at thread startup, since stack can get fragmented in
* /proc/self/maps w/ later mprotects and it can be hard to piece together later
*/
if (IF_MEMQUERY_ELSE(false, DYNAMO_OPTION(use_all_memory_areas))) {
ok = get_memory_info((app_pc)get_mcontext(dcontext)->xsp, &ostd->stack_base,
&size, NULL);
} else {
ok = get_memory_info_from_os((app_pc)get_mcontext(dcontext)->xsp,
&ostd->stack_base, &size, NULL);
}
if (!ok) {
/* This can happen with dr_prepopulate_cache() before we start running
* the app.
*/
ASSERT(!dynamo_started);
return false;
}
ostd->stack_top = ostd->stack_base + size;
LOG(THREAD, LOG_THREADS, 1, "App stack is " PFX "-" PFX "\n", ostd->stack_base,
ostd->stack_top);
}
if (base != NULL)
*base = ostd->stack_base;
if (top != NULL)
*top = ostd->stack_top;
return true;
}
#ifdef RETURN_AFTER_CALL
initial_call_stack_status_t
at_initial_stack_bottom(dcontext_t *dcontext, app_pc target_pc)
{
/* We can't rely exclusively on finding the true stack bottom
* b/c we can't always walk the call stack (PR 608990) so we
* use the image entry as our primary trigger
*/
if (executable_start != NULL /*defensive*/ && reached_image_entry_yet()) {
return INITIAL_STACK_EMPTY;
} else {
/* If our stack walk ends early we could have false positives, but
* that's better than false negatives if we miss the image entry
* or we were unable to find the executable_start
*/
os_thread_data_t *ostd = (os_thread_data_t *)dcontext->os_field;
if (target_pc == ostd->stack_bottom_pc) {
return INITIAL_STACK_BOTTOM_REACHED;
} else {
return INITIAL_STACK_BOTTOM_NOT_REACHED;
}
}
}
#endif /* RETURN_AFTER_CALL */
/* Uses our cached data structures (if in use, else raw query) to retrieve memory info */
bool
query_memory_ex(const byte *pc, OUT dr_mem_info_t *out_info)
{
#ifdef HAVE_MEMINFO_QUERY
return query_memory_ex_from_os(pc, out_info);
#else
return memcache_query_memory(pc, out_info);
#endif
}
bool
query_memory_cur_base(const byte *pc, OUT dr_mem_info_t *info)
{
return query_memory_ex(pc, info);
}
/* Use our cached data structures (if in use, else raw query) to retrieve memory info */
bool
get_memory_info(const byte *pc, byte **base_pc, size_t *size,
uint *prot /* OUT optional, returns MEMPROT_* value */)
{
dr_mem_info_t info;
if (is_vmm_reserved_address((byte *)pc, 1, NULL, NULL)) {
if (!query_memory_ex_from_os(pc, &info) || info.type == DR_MEMTYPE_FREE)
return false;
} else {
if (!query_memory_ex(pc, &info) || info.type == DR_MEMTYPE_FREE)
return false;
}
if (base_pc != NULL)
*base_pc = info.base_pc;
if (size != NULL)
*size = info.size;
if (prot != NULL)
*prot = info.prot;
return true;
}
/* We assume that this routine might be called instead of query_memory_ex()
* b/c the caller is in a fragile location and cannot acquire locks, so
* we try to do the same here.
*/
bool
query_memory_ex_from_os(const byte *pc, OUT dr_mem_info_t *info)
{
bool have_type = false;
bool res = memquery_from_os(pc, info, &have_type);
if (!res) {
/* No other failure types for now */
info->type = DR_MEMTYPE_ERROR;
} else if (res && !have_type) {
/* We pass 0 instead of info->size b/c even if marked as +r we can still
* get SIGBUS if beyond end of mmapped file: not uncommon if querying
* in middle of library load before .bss fully set up (PR 528744).
* However, if there is no fault handler, is_elf_so_header's safe_read will
* recurse to here, so in that case we use info->size but we assume
* it's only at init or exit and so not in the middle of a load
* and less likely to be querying a random mmapped file.
* The cleaner fix is to allow safe_read to work w/o a dcontext or
* fault handling: i#350/PR 529066.
*/
if (TEST(MEMPROT_READ, info->prot) &&
module_is_header(info->base_pc, fault_handling_initialized ? 0 : info->size))
info->type = DR_MEMTYPE_IMAGE;
else {
/* FIXME: won't quite match find_executable_vm_areas marking as
* image: can be doubly-mapped so; don't want to count vdso; etc.
*/
info->type = DR_MEMTYPE_DATA;
}
}
return res;
}
bool
get_memory_info_from_os(const byte *pc, byte **base_pc, size_t *size,
uint *prot /* OUT optional, returns MEMPROT_* value */)
{
dr_mem_info_t info;
if (!query_memory_ex_from_os(pc, &info) || info.type == DR_MEMTYPE_FREE)
return false;
if (base_pc != NULL)
*base_pc = info.base_pc;
if (size != NULL)
*size = info.size;
if (prot != NULL)
*prot = info.prot;
return true;
}
/* in utils.c, exported only for our hack! */
extern void
deadlock_avoidance_unlock(mutex_t *lock, bool ownable);
void
mutex_wait_contended_lock(mutex_t *lock _IF_CLIENT_INTERFACE(priv_mcontext_t *mc))
{
#ifdef CLIENT_INTERFACE
dcontext_t *dcontext = get_thread_private_dcontext();
bool set_client_safe_for_synch =
((dcontext != NULL) && IS_CLIENT_THREAD(dcontext) &&
((mutex_t *)dcontext->client_data->client_grab_mutex == lock));
if (mc != NULL) {
ASSERT(dcontext != NULL);
/* set_safe_for_sync can't be true at the same time as passing
* an mcontext to return into: nothing would be able to reset the
* client_thread_safe_for_sync flag.
*/
ASSERT(!set_client_safe_for_synch);
*get_mcontext(dcontext) = *mc;
}
#endif
/* i#96/PR 295561: use futex(2) if available */
if (ksynch_kernel_support()) {
/* Try to get the lock. If already held, it's fine to store any value
* > LOCK_SET_STATE (we don't rely on paired incs/decs) so that
* the next unlocker will call mutex_notify_released_lock().
*/
ptr_int_t res;
#ifndef LINUX /* we actually don't use this for Linux: see below */
KSYNCH_TYPE *event = mutex_get_contended_event(lock);
ASSERT(event != NULL && ksynch_var_initialized(event));
#endif
while (atomic_exchange_int(&lock->lock_requests, LOCK_CONTENDED_STATE) !=
LOCK_FREE_STATE) {
#ifdef CLIENT_INTERFACE
if (set_client_safe_for_synch)
dcontext->client_data->client_thread_safe_for_synch = true;
if (mc != NULL)
set_synch_state(dcontext, THREAD_SYNCH_VALID_MCONTEXT);
#endif
/* Unfortunately the synch semantics are different for Linux vs Mac.
* We have to use lock_requests as the futex to avoid waiting if
* lock_requests changes, while on Mac the underlying synch prevents
* a wait there.
*/
#ifdef LINUX
/* We'll abort the wait if lock_requests has changed at all.
* We can't have a series of changes that result in no apparent
* change w/o someone acquiring the lock, b/c
* mutex_notify_released_lock() sets lock_requests to LOCK_FREE_STATE.
*/
res = ksynch_wait(&lock->lock_requests, LOCK_CONTENDED_STATE, 0);
#else
res = ksynch_wait(event, 0, 0);
#endif
if (res != 0 && res != -EWOULDBLOCK)
os_thread_yield();
#ifdef CLIENT_INTERFACE
if (set_client_safe_for_synch)
dcontext->client_data->client_thread_safe_for_synch = false;
if (mc != NULL)
set_synch_state(dcontext, THREAD_SYNCH_NONE);
#endif
/* we don't care whether properly woken (res==0), var mismatch
* (res==-EWOULDBLOCK), or error: regardless, someone else
* could have acquired the lock, so we try again
*/
}
} else {
/* we now have to undo our earlier request */
atomic_dec_and_test(&lock->lock_requests);
while (!d_r_mutex_trylock(lock)) {
#ifdef CLIENT_INTERFACE
if (set_client_safe_for_synch)
dcontext->client_data->client_thread_safe_for_synch = true;
if (mc != NULL)
set_synch_state(dcontext, THREAD_SYNCH_VALID_MCONTEXT);
#endif
os_thread_yield();
#ifdef CLIENT_INTERFACE
if (set_client_safe_for_synch)
dcontext->client_data->client_thread_safe_for_synch = false;
if (mc != NULL)
set_synch_state(dcontext, THREAD_SYNCH_NONE);
#endif
}
#ifdef DEADLOCK_AVOIDANCE
/* HACK: trylock's success causes it to do DEADLOCK_AVOIDANCE_LOCK, so to
* avoid two in a row (causes assertion on owner) we unlock here
* In the future we will remove the trylock here and this will go away.
*/
deadlock_avoidance_unlock(lock, true);
#endif
}
return;
}
void
mutex_notify_released_lock(mutex_t *lock)
{
/* i#96/PR 295561: use futex(2) if available. */
if (ksynch_kernel_support()) {
/* Set to LOCK_FREE_STATE to avoid concurrent lock attempts from
* resulting in a futex_wait value match w/o anyone owning the lock
*/
lock->lock_requests = LOCK_FREE_STATE;
/* No reason to wake multiple threads: just one */
#ifdef LINUX
ksynch_wake(&lock->lock_requests);
#else
ksynch_wake(&lock->contended_event);
#endif
} /* else nothing to do */
}
/* read_write_lock_t implementation doesn't expect the contention path
helpers to guarantee the lock is held (unlike mutexes) so simple
yields are still acceptable.
*/
void
rwlock_wait_contended_writer(read_write_lock_t *rwlock)
{
os_thread_yield();
}
void
rwlock_notify_writer(read_write_lock_t *rwlock)
{
/* nothing to do here */
}
void
rwlock_wait_contended_reader(read_write_lock_t *rwlock)
{
os_thread_yield();
}
void
rwlock_notify_readers(read_write_lock_t *rwlock)
{
/* nothing to do here */
}
/***************************************************************************/
/* events are un-signaled when successfully waited upon. */
typedef struct linux_event_t {
/* Any function that sets this flag must also notify possibly waiting
* thread(s). See i#96/PR 295561.
*/
KSYNCH_TYPE signaled;
mutex_t lock;
bool broadcast;
} linux_event_t;
/* FIXME: this routine will need to have a macro wrapper to let us
* assign different ranks to all events for DEADLOCK_AVOIDANCE.
* Currently a single rank seems to work.
*/
event_t
create_event(void)
{
event_t e = (event_t)global_heap_alloc(sizeof(linux_event_t) HEAPACCT(ACCT_OTHER));
ksynch_init_var(&e->signaled);
ASSIGN_INIT_LOCK_FREE(e->lock, event_lock); /* FIXME: pass the event name here */
e->broadcast = false;
return e;
}
event_t
create_broadcast_event(void)
{
event_t e = create_event();
e->broadcast = true;
return e;
}
void
destroy_event(event_t e)
{
DELETE_LOCK(e->lock);
ksynch_free_var(&e->signaled);
global_heap_free(e, sizeof(linux_event_t) HEAPACCT(ACCT_OTHER));
}
void
signal_event(event_t e)
{
d_r_mutex_lock(&e->lock);
ksynch_set_value(&e->signaled, 1);
if (e->broadcast)
ksynch_wake_all(&e->signaled);
else
ksynch_wake(&e->signaled);
LOG(THREAD_GET, LOG_THREADS, 3, "thread " TIDFMT " signalling event " PFX "\n",
d_r_get_thread_id(), e);
d_r_mutex_unlock(&e->lock);
}
void
reset_event(event_t e)
{
d_r_mutex_lock(&e->lock);
ksynch_set_value(&e->signaled, 0);
LOG(THREAD_GET, LOG_THREADS, 3, "thread " TIDFMT " resetting event " PFX "\n",
d_r_get_thread_id(), e);
d_r_mutex_unlock(&e->lock);
}
bool
wait_for_event(event_t e, int timeout_ms)
{
#ifdef DEBUG
dcontext_t *dcontext = get_thread_private_dcontext();
#endif
uint64 start_time, cur_time;
if (timeout_ms > 0)
start_time = query_time_millis();
/* Use a user-space event on Linux, a kernel event on Windows. */
LOG(THREAD, LOG_THREADS, 3, "thread " TIDFMT " waiting for event " PFX "\n",
d_r_get_thread_id(), e);
do {
if (ksynch_get_value(&e->signaled) == 1) {
d_r_mutex_lock(&e->lock);
if (ksynch_get_value(&e->signaled) == 0) {
/* some other thread beat us to it */
LOG(THREAD, LOG_THREADS, 3,
"thread " TIDFMT " was beaten to event " PFX "\n",
d_r_get_thread_id(), e);
d_r_mutex_unlock(&e->lock);
} else {
if (!e->broadcast) {
/* reset the event */
ksynch_set_value(&e->signaled, 0);
}
d_r_mutex_unlock(&e->lock);
LOG(THREAD, LOG_THREADS, 3,
"thread " TIDFMT " finished waiting for event " PFX "\n",
d_r_get_thread_id(), e);
return true;
}
} else {
/* Waits only if the signaled flag is not set as 1. Return value
* doesn't matter because the flag will be re-checked.
*/
ksynch_wait(&e->signaled, 0, timeout_ms);
}
if (ksynch_get_value(&e->signaled) == 0) {
/* If it still has to wait, give up the cpu. */
os_thread_yield();
}
if (timeout_ms > 0)
cur_time = query_time_millis();
} while (timeout_ms <= 0 || cur_time - start_time < timeout_ms);
return false;
}
/***************************************************************************
* DIRECTORY ITERATOR
*/
/* These structs are written to the buf that we pass to getdents. We can
* iterate them by adding d_reclen to the current buffer offset and interpreting
* that as the next entry.
*/
struct linux_dirent {
#ifdef SYS_getdents
/* Adapted from struct old_linux_dirent in linux/fs/readdir.c: */
unsigned long d_ino;
unsigned long d_off;
unsigned short d_reclen;
char d_name[];
#else
/* Adapted from struct linux_dirent64 in linux/include/linux/dirent.h: */
uint64 d_ino;
int64 d_off;
unsigned short d_reclen;
unsigned char d_type;
char d_name[];
#endif
};
#define CURRENT_DIRENT(iter) ((struct linux_dirent *)(&iter->buf[iter->off]))
static void
os_dir_iterator_start(dir_iterator_t *iter, file_t fd)
{
iter->fd = fd;
iter->off = 0;
iter->end = 0;
}
static bool
os_dir_iterator_next(dir_iterator_t *iter)
{
#ifdef MACOS
/* We can use SYS_getdirentries, but do we even need a dir iterator?
* On Linux it's only used to enumerate /proc/pid/task.
*/
ASSERT_NOT_IMPLEMENTED(false);
return false;
#else
if (iter->off < iter->end) {
/* Have existing dents, get the next offset. */
iter->off += CURRENT_DIRENT(iter)->d_reclen;
ASSERT(iter->off <= iter->end);
}
if (iter->off == iter->end) {
/* Do a getdents syscall. Unlike when reading a file, the kernel will
* not read a partial linux_dirent struct, so we don't need to shift the
* left over bytes to the buffer start. See the getdents manpage for
* the example code that this is based on.
*/
iter->off = 0;
# ifdef SYS_getdents
iter->end =
dynamorio_syscall(SYS_getdents, 3, iter->fd, iter->buf, sizeof(iter->buf));
# else
iter->end =
dynamorio_syscall(SYS_getdents64, 3, iter->fd, iter->buf, sizeof(iter->buf));
# endif
ASSERT(iter->end <= sizeof(iter->buf));
if (iter->end <= 0) { /* No more dents, or error. */
iter->name = NULL;
if (iter->end < 0) {
LOG(GLOBAL, LOG_SYSCALLS, 1, "getdents syscall failed with errno %d\n",
-iter->end);
}
return false;
}
}
iter->name = CURRENT_DIRENT(iter)->d_name;
return true;
#endif
}
/***************************************************************************
* THREAD TAKEOVER
*/
/* Record used to synchronize thread takeover. */
typedef struct _takeover_record_t {
thread_id_t tid;
event_t event;
} takeover_record_t;
/* When attempting thread takeover, we store an array of thread id and event
* pairs here. Each thread we signal is supposed to enter DR control and signal
* this event after it has added itself to all_threads.
*
* XXX: What we really want is to be able to use SYS_rt_tgsigqueueinfo (Linux >=
* 2.6.31) to pass the event_t to each thread directly, rather than using this
* side data structure.
*/
static takeover_record_t *thread_takeover_records;
static uint num_thread_takeover_records;
/* This is the dcontext of the thread that initiated the takeover. We read the
* owning_thread and signal_field threads from it in the signaled threads to
* set up siginfo sharing.
*/
static dcontext_t *takeover_dcontext;
/* Lists active threads in the process.
* XXX: The /proc man page says /proc/pid/task is only available if the main
* thread is still alive, but experiments on 2.6.38 show otherwise.
*/
static thread_id_t *
os_list_threads(dcontext_t *dcontext, uint *num_threads_out)
{
dir_iterator_t iter;
file_t task_dir;
uint tids_alloced = 10;
uint num_threads = 0;
thread_id_t *new_tids;
thread_id_t *tids;
ASSERT(num_threads_out != NULL);
#ifdef MACOS
/* XXX i#58: NYI.
* We may want SYS_proc_info with PROC_INFO_PID_INFO and PROC_PIDLISTTHREADS,
* or is that just BSD threads and instead we want process_set_tasks()
* and task_info() as in 7.3.1.3 in Singh's OSX book?
*/
*num_threads_out = 0;
return NULL;
#endif
tids =
HEAP_ARRAY_ALLOC(dcontext, thread_id_t, tids_alloced, ACCT_THREAD_MGT, PROTECTED);
task_dir = os_open_directory("/proc/self/task", OS_OPEN_READ);
ASSERT(task_dir != INVALID_FILE);
os_dir_iterator_start(&iter, task_dir);
while (os_dir_iterator_next(&iter)) {
thread_id_t tid;
DEBUG_DECLARE(int r;)
if (strcmp(iter.name, ".") == 0 || strcmp(iter.name, "..") == 0)
continue;
IF_DEBUG(r =)
sscanf(iter.name, "%u", &tid);
ASSERT_MESSAGE(CHKLVL_ASSERTS, "failed to parse /proc/pid/task entry", r == 1);
if (tid <= 0)
continue;
if (num_threads == tids_alloced) {
/* realloc, essentially. Less expensive than counting first. */
new_tids = HEAP_ARRAY_ALLOC(dcontext, thread_id_t, tids_alloced * 2,
ACCT_THREAD_MGT, PROTECTED);
memcpy(new_tids, tids, sizeof(thread_id_t) * tids_alloced);
HEAP_ARRAY_FREE(dcontext, tids, thread_id_t, tids_alloced, ACCT_THREAD_MGT,
PROTECTED);
tids = new_tids;
tids_alloced *= 2;
}
tids[num_threads++] = tid;
}
ASSERT(iter.end == 0); /* No reading errors. */
os_close(task_dir);
/* realloc back down to num_threads for caller simplicity. */
new_tids =
HEAP_ARRAY_ALLOC(dcontext, thread_id_t, num_threads, ACCT_THREAD_MGT, PROTECTED);
memcpy(new_tids, tids, sizeof(thread_id_t) * num_threads);
HEAP_ARRAY_FREE(dcontext, tids, thread_id_t, tids_alloced, ACCT_THREAD_MGT,
PROTECTED);
tids = new_tids;
*num_threads_out = num_threads;
return tids;
}
/* List the /proc/self/task directory and add all unknown thread ids to the
* all_threads hashtable in dynamo.c. Returns true if we found any unknown
* threads and false otherwise. We assume that since we don't know about them
* they are not under DR and have no dcontexts.
*/
bool
os_take_over_all_unknown_threads(dcontext_t *dcontext)
{
uint i;
uint num_threads;
thread_id_t *tids;
uint threads_to_signal = 0;
/* We do not want to re-takeover a thread that's in between notifying us on
* the last call to this routine and getting onto the all_threads list as
* we'll self-interpret our own code leading to a lot of problems.
* XXX: should we use an event to avoid this inefficient loop? We expect
* this to only happen in rare cases during attach when threads are in flux.
*/
while (uninit_thread_count > 0) /* relying on volatile */
os_thread_yield();
/* This can only happen if we had already taken over a thread, because there is
* full synchronization at detach. The same thread may now already be on its way
* to exit, and its thread record might be gone already and make it look like a
* new native thread below. If we rely on the thread to self-detect that it was
* interrupted at a DR address we may run into a deadlock (i#2694). In order to
* avoid this, we wait here. This is expected to be uncommon, and can only happen
* with very short-lived threads.
* XXX: if this loop turns out to be too inefficient, we could support detecting
* the lock function's address bounds along w/ is_dynamo_address.
*/
while (exiting_thread_count > 0)
os_thread_yield();
d_r_mutex_lock(&thread_initexit_lock);
CLIENT_ASSERT(thread_takeover_records == NULL,
"Only one thread should attempt app take over!");
#ifdef LINUX
/* Check this thread for rseq in between setup and start. */
if (rseq_is_registered_for_current_thread())
rseq_locate_rseq_regions();
#endif
/* Find tids for which we have no thread record, meaning they are not under
* our control. Shift them to the beginning of the tids array.
*/
tids = os_list_threads(dcontext, &num_threads);
if (tids == NULL) {
d_r_mutex_unlock(&thread_initexit_lock);
return false; /* have to assume no unknown */
}
for (i = 0; i < num_threads; i++) {
thread_record_t *tr = thread_lookup(tids[i]);
if (tr == NULL ||
/* Re-takeover known threads that are currently native as well.
* XXX i#95: we need a synchall-style loop for known threads as
* they can be in DR for syscall hook handling.
* Update: we now remove the hook for start/stop: but native_exec
* or other individual threads going native could still hit this.
*/
(is_thread_currently_native(tr)
IF_CLIENT_INTERFACE(&&!IS_CLIENT_THREAD(tr->dcontext))))
tids[threads_to_signal++] = tids[i];
}
LOG(GLOBAL, LOG_THREADS, 1, "TAKEOVER: %d threads to take over\n", threads_to_signal);
if (threads_to_signal > 0) {
takeover_record_t *records;
/* Assuming pthreads, prepare signal_field for sharing. */
handle_clone(dcontext, PTHREAD_CLONE_FLAGS);
/* Create records with events for all the threads we want to signal. */
LOG(GLOBAL, LOG_THREADS, 1, "TAKEOVER: publishing takeover records\n");
records = HEAP_ARRAY_ALLOC(dcontext, takeover_record_t, threads_to_signal,
ACCT_THREAD_MGT, PROTECTED);
for (i = 0; i < threads_to_signal; i++) {
LOG(GLOBAL, LOG_THREADS, 1, "TAKEOVER: will signal thread " TIDFMT "\n",
tids[i]);
records[i].tid = tids[i];
records[i].event = create_event();
}
/* Publish the records and the initial take over dcontext. */
thread_takeover_records = records;
num_thread_takeover_records = threads_to_signal;
takeover_dcontext = dcontext;
/* Signal the other threads. */
for (i = 0; i < threads_to_signal; i++) {
thread_signal(get_process_id(), records[i].tid, SUSPEND_SIGNAL);
}
d_r_mutex_unlock(&thread_initexit_lock);
/* Wait for all the threads we signaled. */
ASSERT_OWN_NO_LOCKS();
for (i = 0; i < threads_to_signal; i++) {
static const int progress_period = 50;
if (i % progress_period == 0) {
char buf[16];
/* +1 to include the attach request thread to match the final msg. */
snprintf(buf, BUFFER_SIZE_ELEMENTS(buf), "%d/%d", i + 1,
threads_to_signal + 1);
NULL_TERMINATE_BUFFER(buf);
SYSLOG(SYSLOG_VERBOSE, INFO_ATTACHED, 3, buf, get_application_name(),
get_application_pid());
}
static const int wait_ms = 25;
while (!wait_for_event(records[i].event, wait_ms)) {
/* The thread may have exited (i#2601). We assume no tid re-use. */
char task[64];
snprintf(task, BUFFER_SIZE_ELEMENTS(task), "/proc/self/task/%d", tids[i]);
NULL_TERMINATE_BUFFER(task);
if (!os_file_exists(task, false /*!is dir*/)) {
SYSLOG_INTERNAL_WARNING_ONCE("thread exited while attaching");
break;
}
/* Else try again. */
}
}
/* Now that we've taken over the other threads, we can safely free the
* records and reset the shared globals.
*/
d_r_mutex_lock(&thread_initexit_lock);
LOG(GLOBAL, LOG_THREADS, 1,
"TAKEOVER: takeover complete, unpublishing records\n");
thread_takeover_records = NULL;
num_thread_takeover_records = 0;
takeover_dcontext = NULL;
for (i = 0; i < threads_to_signal; i++) {
destroy_event(records[i].event);
}
HEAP_ARRAY_FREE(dcontext, records, takeover_record_t, threads_to_signal,
ACCT_THREAD_MGT, PROTECTED);
}
d_r_mutex_unlock(&thread_initexit_lock);
HEAP_ARRAY_FREE(dcontext, tids, thread_id_t, num_threads, ACCT_THREAD_MGT, PROTECTED);
return threads_to_signal > 0;
}
bool
os_thread_re_take_over(void)
{
#ifdef X86
/* i#2089: is_thread_initialized() will fail for a currently-native app.
* We bypass the magic field checks here of is_thread_tls_initialized().
* XXX: should this be inside is_thread_initialized()? But that may mislead
* other callers: the caller has to restore the TLs. Some old code also
* used get_thread_private_dcontext() being NULL to indicate an unknown thread:
* that should also call here.
*/
if (!is_thread_initialized() && is_thread_tls_allocated()) {
/* It's safe to call thread_lookup() for ourself. */
thread_record_t *tr = thread_lookup(get_sys_thread_id());
if (tr != NULL) {
ASSERT(is_thread_currently_native(tr));
LOG(GLOBAL, LOG_THREADS, 1, "\tretakeover for cur-native thread " TIDFMT "\n",
get_sys_thread_id());
LOG(tr->dcontext->logfile, LOG_THREADS, 1,
"\nretakeover for cur-native thread " TIDFMT "\n", get_sys_thread_id());
os_swap_dr_tls(tr->dcontext, false /*to dr*/);
ASSERT(is_thread_initialized());
return true;
}
}
#endif
return false;
}
static void
os_thread_signal_taken_over(void)
{
thread_id_t mytid;
event_t event = NULL;
uint i;
/* Wake up the thread that initiated the take over. */
mytid = d_r_get_thread_id();
ASSERT(thread_takeover_records != NULL);
for (i = 0; i < num_thread_takeover_records; i++) {
if (thread_takeover_records[i].tid == mytid) {
event = thread_takeover_records[i].event;
break;
}
}
ASSERT_MESSAGE(CHKLVL_ASSERTS, "mytid not present in takeover records!",
event != NULL);
signal_event(event);
}
/* Takes over the current thread from the signal handler. We notify the thread
* that signaled us by signalling our event in thread_takeover_records.
* If it returns, it returns false, and the thread should be let go.
*/
bool
os_thread_take_over(priv_mcontext_t *mc, kernel_sigset_t *sigset)
{
dcontext_t *dcontext;
priv_mcontext_t *dc_mc;
LOG(GLOBAL, LOG_THREADS, 1, "TAKEOVER: received signal in thread " TIDFMT "\n",
get_sys_thread_id());
/* Do standard DR thread initialization. Mirrors code in
* create_clone_record and new_thread_setup, except we're not putting a
* clone record on the dstack.
*/
os_thread_re_take_over();
if (!is_thread_initialized()) {
/* If this is a thread on its way to init, don't self-interp (i#2688). */
if (is_dynamo_address(mc->pc)) {
os_thread_signal_taken_over();
return false;
}
dcontext = init_thread_with_shared_siginfo(mc, takeover_dcontext);
ASSERT(dcontext != NULL);
} else {
/* Re-takeover a thread that we let go native */
dcontext = get_thread_private_dcontext();
ASSERT(dcontext != NULL);
}
signal_set_mask(dcontext, sigset);
signal_swap_mask(dcontext, true /*to app*/);
dynamo_thread_under_dynamo(dcontext);
dc_mc = get_mcontext(dcontext);
*dc_mc = *mc;
dcontext->whereami = DR_WHERE_APP;
dcontext->next_tag = mc->pc;
os_thread_signal_taken_over();
DOLOG(2, LOG_TOP, {
byte *cur_esp;
GET_STACK_PTR(cur_esp);
LOG(THREAD, LOG_TOP, 2,
"%s: next_tag=" PFX ", cur xsp=" PFX ", mc->xsp=" PFX "\n", __FUNCTION__,
dcontext->next_tag, cur_esp, mc->xsp);
});
#ifdef LINUX
/* See whether we should initiate lazy rseq handling, and avoid treating
* regions as rseq when the rseq syscall is never set up.
*/
if (rseq_is_registered_for_current_thread()) {
rseq_locate_rseq_regions();
rseq_thread_attach(dcontext);
}
#endif
/* Start interpreting from the signal context. */
call_switch_stack(dcontext, dcontext->dstack, (void (*)(void *))d_r_dispatch,
NULL /*not on d_r_initstack*/, false /*shouldn't return*/);
ASSERT_NOT_REACHED();
return true; /* make compiler happy */
}
bool
os_thread_take_over_suspended_native(dcontext_t *dcontext)
{
os_thread_data_t *ostd = (os_thread_data_t *)dcontext->os_field;
if (!is_thread_currently_native(dcontext->thread_record) ||
ksynch_get_value(&ostd->suspended) < 0)
return false;
/* Thread is sitting in suspend signal loop so we just set a flag
* for when it resumes:
*/
/* XXX: there's no event for a client to trigger this on so not yet
* tested. i#721 may help.
*/
ASSERT_NOT_TESTED();
ostd->retakeover = true;
return true;
}
/* Called for os-specific takeover of a secondary thread from the one
* that called dr_app_setup().
*/
dcontext_t *
os_thread_take_over_secondary(priv_mcontext_t *mc)
{
thread_record_t **list;
int num_threads;
int i;
dcontext_t *dcontext;
/* We want to share with the thread that called dr_app_setup. */
d_r_mutex_lock(&thread_initexit_lock);
get_list_of_threads(&list, &num_threads);
ASSERT(num_threads >= 1);
for (i = 0; i < num_threads; i++) {
/* Find a thread that's already set up */
if (is_thread_signal_info_initialized(list[i]->dcontext))
break;
}
ASSERT(i < num_threads);
ASSERT(list[i]->id != get_sys_thread_id());
/* Assuming pthreads, prepare signal_field for sharing. */
handle_clone(list[i]->dcontext, PTHREAD_CLONE_FLAGS);
dcontext = init_thread_with_shared_siginfo(mc, list[i]->dcontext);
d_r_mutex_unlock(&thread_initexit_lock);
global_heap_free(list,
num_threads * sizeof(thread_record_t *) HEAPACCT(ACCT_THREAD_MGT));
return dcontext;
}
/***************************************************************************/
uint
os_random_seed(void)
{
uint seed;
/* reading from /dev/urandom for a non-blocking random */
int urand = os_open("/dev/urandom", OS_OPEN_READ);
DEBUG_DECLARE(int read =) os_read(urand, &seed, sizeof(seed));
ASSERT(read == sizeof(seed));
os_close(urand);
return seed;
}
#ifdef RCT_IND_BRANCH
/* Analyze a range in a possibly new module
* return false if not a code section in a module
* otherwise returns true and adds all valid targets for rct_ind_branch_check
*/
bool
rct_analyze_module_at_violation(dcontext_t *dcontext, app_pc target_pc)
{
/* FIXME: note that this will NOT find the data section corresponding to the given PC
* we don't yet have a corresponding get_allocation_size or an ELF header walk routine
* on linux
*/
app_pc code_start;
size_t code_size;
uint prot;
if (!get_memory_info(target_pc, &code_start, &code_size, &prot))
return false;
/* TODO: in almost all cases expect the region at module_base+module_size to be
* the corresponding data section.
* Writable yet initialized data indeed needs to be processed.
*/
if (code_size > 0) {
app_pc code_end = code_start + code_size;
app_pc data_start;
size_t data_size;
ASSERT(TESTALL(MEMPROT_READ | MEMPROT_EXEC, prot)); /* code */
if (!get_memory_info(code_end, &data_start, &data_size, &prot))
return false;
ASSERT(data_start == code_end);
ASSERT(TESTALL(MEMPROT_READ | MEMPROT_WRITE, prot)); /* data */
app_pc text_start = code_start;
app_pc text_end = data_start + data_size;
/* TODO: performance: do this only in case relocation info is not present */
DEBUG_DECLARE(uint found =)
find_address_references(dcontext, text_start, text_end, code_start, code_end);
LOG(GLOBAL, LOG_RCT, 2, PFX "-" PFX " : %d ind targets of %d code size",
text_start, text_end, found, code_size);
return true;
}
return false;
}
# ifdef X64
bool
rct_add_rip_rel_addr(dcontext_t *dcontext, app_pc tgt _IF_DEBUG(app_pc src))
{
/* FIXME PR 276762: not implemented */
return false;
}
# endif
#endif /* RCT_IND_BRANCH */
#ifdef HOT_PATCHING_INTERFACE
void *
get_drmarker_hotp_policy_status_table()
{
ASSERT_NOT_IMPLEMENTED(false);
return NULL;
}
void
set_drmarker_hotp_policy_status_table(void *new_table)
{
ASSERT_NOT_IMPLEMENTED(false);
}
byte *
hook_text(byte *hook_code_buf, const app_pc image_addr, intercept_function_t hook_func,
const void *callee_arg, const after_intercept_action_t action_after,
const bool abort_if_hooked, const bool ignore_cti, byte **app_code_copy_p,
byte **alt_exit_tgt_p)
{
ASSERT_NOT_IMPLEMENTED(false);
return NULL;
}
void
unhook_text(byte *hook_code_buf, app_pc image_addr)
{
ASSERT_NOT_IMPLEMENTED(false);
}
void
insert_jmp_at_tramp_entry(dcontext_t *dcontext, byte *trampoline, byte *target)
{
ASSERT_NOT_IMPLEMENTED(false);
}
#endif /* HOT_PATCHING_INTERFACE */
bool
aslr_is_possible_attack(app_pc target)
{
/* FIXME: ASLR not implemented */
return false;
}
app_pc
aslr_possible_preferred_address(app_pc target_addr)
{
/* FIXME: ASLR not implemented */
return NULL;
}
void
take_over_primary_thread()
{
/* nothing to do here */
}
bool
os_current_user_directory(char *directory_prefix /* INOUT */, uint directory_len,
bool create)
{
/* XXX: could share some of this code w/ corresponding windows routine */
uid_t uid = dynamorio_syscall(SYS_getuid, 0);
char *directory = directory_prefix;
char *dirend = directory_prefix + strlen(directory_prefix);
snprintf(dirend, directory_len - (dirend - directory_prefix), "%cdpc-%d", DIRSEP,
uid);
directory_prefix[directory_len - 1] = '\0';
if (!os_file_exists(directory, true /*is dir*/) && create) {
/* XXX: we should ensure we do not follow symlinks */
/* XXX: should add support for CREATE_DIR_FORCE_OWNER */
if (!os_create_dir(directory, CREATE_DIR_REQUIRE_NEW)) {
LOG(GLOBAL, LOG_CACHE, 2, "\terror creating per-user dir %s\n", directory);
return false;
} else {
LOG(GLOBAL, LOG_CACHE, 2, "\tcreated per-user dir %s\n", directory);
}
}
return true;
}
bool
os_validate_user_owned(file_t file_or_directory_handle)
{
/* note on Linux this scheme should never be used */
ASSERT(false && "chown Alice evilfile");
return false;
}
bool
os_check_option_compatibility(void)
{
/* no options are Linux OS version dependent */
return false;
}
#ifdef X86_32
/* Emulate uint64 modulo and division by uint32 on ia32.
* XXX: Does *not* handle 64-bit divisors!
*/
static uint64
uint64_divmod(uint64 dividend, uint64 divisor64, uint32 *remainder)
{
/* Assumes little endian, which x86 is. */
union {
uint64 v64;
struct {
uint32 lo;
uint32 hi;
};
} res;
uint32 upper;
uint32 divisor = (uint32)divisor64;
/* Our uses don't use large divisors. */
ASSERT(divisor64 <= UINT_MAX && "divisor is larger than uint32 can hold");
/* Divide out the high bits first. */
res.v64 = dividend;
upper = res.hi;
res.hi = upper / divisor;
upper %= divisor;
/* Use the unsigned div instruction, which uses EDX:EAX to form a 64-bit
* dividend. We only get a 32-bit quotient out, which is why we divide out
* the high bits first. The quotient will fit in EAX.
*
* DIV r/m32 F7 /6 Unsigned divide EDX:EAX by r/m32, with result stored
* in EAX <- Quotient, EDX <- Remainder.
* inputs:
* EAX = res.lo
* EDX = upper
* rm = divisor
* outputs:
* res.lo = EAX
* *remainder = EDX
* The outputs precede the inputs in gcc inline asm syntax, and so to put
* inputs in EAX and EDX we use "0" and "1".
*/
asm("divl %2"
: "=a"(res.lo), "=d"(*remainder)
: "rm"(divisor), "0"(res.lo), "1"(upper));
return res.v64;
}
/* Match libgcc's prototype. */
uint64
__udivdi3(uint64 dividend, uint64 divisor)
{
uint32 remainder;
return uint64_divmod(dividend, divisor, &remainder);
}
/* Match libgcc's prototype. */
uint64
__umoddi3(uint64 dividend, uint64 divisor)
{
uint32 remainder;
uint64_divmod(dividend, divisor, &remainder);
return (uint64)remainder;
}
/* Same thing for signed. */
static int64
int64_divmod(int64 dividend, int64 divisor64, int *remainder)
{
union {
int64 v64;
struct {
int lo;
int hi;
};
} res;
int upper;
int divisor = (int)divisor64;
/* Our uses don't use large divisors. */
ASSERT(divisor64 <= INT_MAX && divisor64 >= INT_MIN && "divisor too large for int");
/* Divide out the high bits first. */
res.v64 = dividend;
upper = res.hi;
res.hi = upper / divisor;
upper %= divisor;
/* Like above but with the signed div instruction, which does a signed divide
* on edx:eax by r/m32 => quotient in eax, remainder in edx.
*/
asm("idivl %2"
: "=a"(res.lo), "=d"(*remainder)
: "rm"(divisor), "0"(res.lo), "1"(upper));
return res.v64;
}
/* Match libgcc's prototype. */
int64
__divdi3(int64 dividend, int64 divisor)
{
int remainder;
return int64_divmod(dividend, divisor, &remainder);
}
/* __moddi3 is coming from third_party/libgcc for x86 as well as arm. */
#elif defined(ARM)
/* i#1566: for ARM, __aeabi versions are used instead of udivdi3 and umoddi3.
* We link with __aeabi routines from libgcc via third_party/libgcc.
*/
#endif /* X86_32 */
/****************************************************************************
* Tests
*/
#if defined(STANDALONE_UNIT_TEST)
void
test_uint64_divmod(void)
{
# ifdef X86_32
uint64 quotient;
uint32 remainder;
/* Simple division below 2^32. */
quotient = uint64_divmod(9, 3, &remainder);
EXPECT(quotient == 3, true);
EXPECT(remainder == 0, true);
quotient = uint64_divmod(10, 3, &remainder);
EXPECT(quotient == 3, true);
EXPECT(remainder == 1, true);
/* Division when upper bits are less than the divisor. */
quotient = uint64_divmod(45ULL << 31, 1U << 31, &remainder);
EXPECT(quotient == 45, true);
EXPECT(remainder == 0, true);
/* Division when upper bits are greater than the divisor. */
quotient = uint64_divmod(45ULL << 32, 15, &remainder);
EXPECT(quotient == 3ULL << 32, true);
EXPECT(remainder == 0, true);
quotient = uint64_divmod((45ULL << 32) + 13, 15, &remainder);
EXPECT(quotient == 3ULL << 32, true);
EXPECT(remainder == 13, true);
/* Try calling the intrinsics. Don't divide by powers of two, gcc will
* lower that to a shift.
*/
quotient = (45ULL << 32);
quotient /= 15;
EXPECT(quotient == (3ULL << 32), true);
quotient = (45ULL << 32) + 13;
remainder = quotient % 15;
EXPECT(remainder == 13, true);
# endif /* X86_32 */
}
void
unit_test_os(void)
{
test_uint64_divmod();
}
#endif /* STANDALONE_UNIT_TEST */
| 1 | 19,692 | Hmm, do we need to do this for Windows too? It won't have this private loader TLS issue but it will run DR code in a separate thread before DR init is fully done which I think violates some assumptions. For Windows we would move this wait from win32/os.c to win32/callback.s intercept_new_thread where it checks whether it's a client thread right before calling dynamo_thread_init. If you do not want to deal with Windows you could bail on my suggestion and leave the issue open (or maybe file a new issue) for Windows, or if you want to put the suggested core/win32/ change in place and your new test passes on Appveyor we could then claim to have fully solved this related issue too for all platforms. | DynamoRIO-dynamorio | c |
@@ -840,3 +840,8 @@ def kinesis_get_latest_records(stream_name, shard_id, count=10, env=None):
while len(result) > count:
result.pop(0)
return result
+
+
+def get_lambda_name_from_arn(arn):
+ attributes = arn.split(':')
+ return attributes[-1] | 1 | import os
import re
import json
import time
import boto3
import base64
import logging
import six
from six.moves.urllib.parse import quote_plus, unquote_plus
from localstack import config
from localstack.constants import (
REGION_LOCAL, LOCALHOST, MOTO_ACCOUNT_ID, ENV_DEV, APPLICATION_AMZ_JSON_1_1,
APPLICATION_AMZ_JSON_1_0, APPLICATION_X_WWW_FORM_URLENCODED, TEST_AWS_ACCOUNT_ID)
from localstack.utils.common import (
run_safe, to_str, is_string, is_string_or_bytes, make_http_request, is_port_open, get_service_protocol)
from localstack.utils.aws.aws_models import KinesisStream
# AWS environment variable names
ENV_ACCESS_KEY = 'AWS_ACCESS_KEY_ID'
ENV_SECRET_KEY = 'AWS_SECRET_ACCESS_KEY'
ENV_SESSION_TOKEN = 'AWS_SESSION_TOKEN'
# set up logger
LOG = logging.getLogger(__name__)
# cache local region
LOCAL_REGION = None
# Use this field if you want to provide a custom boto3 session.
# This field takes priority over CREATE_NEW_SESSION_PER_BOTO3_CONNECTION
CUSTOM_BOTO3_SESSION = None
# Use this flag to enable creation of a new session for each boto3 connection.
# This flag will be ignored if CUSTOM_BOTO3_SESSION is specified
CREATE_NEW_SESSION_PER_BOTO3_CONNECTION = False
# Used in AWS assume role function
INITIAL_BOTO3_SESSION = None
# Boto clients cache
BOTO_CLIENTS_CACHE = {}
# Assume role loop seconds
DEFAULT_TIMER_LOOP_SECONDS = 60 * 50
# maps SQS queue ARNs to queue URLs
SQS_ARN_TO_URL_CACHE = {}
class Environment(object):
def __init__(self, region=None, prefix=None):
# target is the runtime environment to use, e.g.,
# 'local' for local mode
self.region = region or get_local_region()
# prefix can be 'prod', 'stg', 'uat-1', etc.
self.prefix = prefix
def apply_json(self, j):
if isinstance(j, str):
j = json.loads(j)
self.__dict__.update(j)
@staticmethod
def from_string(s):
parts = s.split(':')
if len(parts) == 1:
if s in PREDEFINED_ENVIRONMENTS:
return PREDEFINED_ENVIRONMENTS[s]
parts = [get_local_region(), s]
if len(parts) > 2:
raise Exception('Invalid environment string "%s"' % s)
region = parts[0]
prefix = parts[1]
return Environment(region=region, prefix=prefix)
@staticmethod
def from_json(j):
if not isinstance(j, dict):
j = j.to_dict()
result = Environment()
result.apply_json(j)
return result
def __str__(self):
return '%s:%s' % (self.region, self.prefix)
PREDEFINED_ENVIRONMENTS = {
ENV_DEV: Environment(region=REGION_LOCAL, prefix=ENV_DEV)
}
def get_environment(env=None, region_name=None):
"""
Return an Environment object based on the input arguments.
Parameter `env` can be either of:
* None (or empty), in which case the rules below are applied to (env = os.environ['ENV'] or ENV_DEV)
* an Environment object (then this object is returned)
* a string '<region>:<name>', which corresponds to Environment(region='<region>', prefix='<prefix>')
* the predefined string 'dev' (ENV_DEV), which implies Environment(region='local', prefix='dev')
* a string '<name>', which implies Environment(region=DEFAULT_REGION, prefix='<name>')
Additionally, parameter `region_name` can be used to override DEFAULT_REGION.
"""
if not env:
if 'ENV' in os.environ:
env = os.environ['ENV']
else:
env = ENV_DEV
elif not is_string(env) and not isinstance(env, Environment):
raise Exception('Invalid environment: %s' % env)
if is_string(env):
env = Environment.from_string(env)
if region_name:
env.region = region_name
if not env.region:
raise Exception('Invalid region in environment: "%s"' % env)
return env
def is_local_env(env):
return not env or env.region == REGION_LOCAL or env.prefix == ENV_DEV
class Boto3Session(boto3.session.Session):
""" Custom boto3 session that points to local endpoint URLs. """
def resource(self, service, *args, **kwargs):
self._fix_endpoint(kwargs)
return connect_to_resource(service, *args, **kwargs)
def client(self, service, *args, **kwargs):
self._fix_endpoint(kwargs)
return connect_to_service(service, *args, **kwargs)
def _fix_endpoint(self, kwargs):
if 'amazonaws.com' in kwargs.get('endpoint_url', ''):
kwargs.pop('endpoint_url')
def get_boto3_credentials():
global INITIAL_BOTO3_SESSION
if CUSTOM_BOTO3_SESSION:
return CUSTOM_BOTO3_SESSION.get_credentials()
if not INITIAL_BOTO3_SESSION:
INITIAL_BOTO3_SESSION = boto3.session.Session()
return INITIAL_BOTO3_SESSION.get_credentials()
def get_boto3_session():
if CUSTOM_BOTO3_SESSION:
return CUSTOM_BOTO3_SESSION
if CREATE_NEW_SESSION_PER_BOTO3_CONNECTION:
return boto3.session.Session()
# return default session
return boto3
def get_region():
# TODO look up region from context
return get_local_region()
def get_local_region():
global LOCAL_REGION
if LOCAL_REGION is None:
session = boto3.session.Session()
LOCAL_REGION = session.region_name or ''
return LOCAL_REGION or config.DEFAULT_REGION
def get_local_service_url(service_name_or_port):
""" Return the local service URL for the given service name or port. """
if isinstance(service_name_or_port, int):
return '%s://%s:%s' % (get_service_protocol(), LOCALHOST, service_name_or_port)
service_name = service_name_or_port
if service_name == 's3api':
service_name = 's3'
elif service_name == 'runtime.sagemaker':
service_name = 'sagemaker-runtime'
service_name_upper = service_name.upper().replace('-', '_').replace('.', '_')
return os.environ['TEST_%s_URL' % service_name_upper]
def is_service_enabled(service_name):
""" Return whether the service with the given name (e.g., "lambda") is available. """
try:
url = get_local_service_url(service_name)
assert url
return is_port_open(url, http_path='/', expect_success=False)
except Exception:
return False
def connect_to_resource(service_name, env=None, region_name=None, endpoint_url=None, *args, **kwargs):
"""
Generic method to obtain an AWS service resource using boto3, based on environment, region, or custom endpoint_url.
"""
return connect_to_service(service_name, client=False, env=env, region_name=region_name, endpoint_url=endpoint_url)
def connect_to_service(service_name, client=True, env=None, region_name=None, endpoint_url=None,
config=None, *args, **kwargs):
"""
Generic method to obtain an AWS service client using boto3, based on environment, region, or custom endpoint_url.
"""
region_name = region_name or get_region()
env = get_environment(env, region_name=region_name)
region = env.region if env.region != REGION_LOCAL else region_name
key_elements = [service_name, client, env, region, endpoint_url, config]
cache_key = '/'.join([str(k) for k in key_elements])
if cache_key not in BOTO_CLIENTS_CACHE:
# Cache clients, as this is a relatively expensive operation
my_session = get_boto3_session()
method = my_session.client if client else my_session.resource
verify = True
if not endpoint_url:
if is_local_env(env):
endpoint_url = get_local_service_url(service_name)
verify = False
BOTO_CLIENTS_CACHE[cache_key] = method(service_name, region_name=region,
endpoint_url=endpoint_url, verify=verify, config=config)
return BOTO_CLIENTS_CACHE[cache_key]
class VelocityInput:
"""Simple class to mimick the behavior of variable '$input' in AWS API Gateway integration velocity templates.
See: http://docs.aws.amazon.com/apigateway/latest/developerguide/api-gateway-mapping-template-reference.html"""
def __init__(self, value):
self.value = value
def path(self, path):
from jsonpath_rw import parse
value = self.value if isinstance(self.value, dict) else json.loads(self.value)
jsonpath_expr = parse(path)
result = [match.value for match in jsonpath_expr.find(value)]
result = result[0] if len(result) == 1 else result
return result
def json(self, path):
return json.dumps(self.path(path))
def __repr__(self):
return '$input'
class VelocityUtil:
"""Simple class to mimick the behavior of variable '$util' in AWS API Gateway integration velocity templates.
See: http://docs.aws.amazon.com/apigateway/latest/developerguide/api-gateway-mapping-template-reference.html"""
def base64Encode(self, s):
if not isinstance(s, str):
s = json.dumps(s)
encoded_str = s.encode(config.DEFAULT_ENCODING)
encoded_b64_str = base64.b64encode(encoded_str)
return encoded_b64_str.decode(config.DEFAULT_ENCODING)
def base64Decode(self, s):
if not isinstance(s, str):
s = json.dumps(s)
return base64.b64decode(s)
def toJson(self, obj):
return obj and json.dumps(obj)
def urlEncode(self, s):
return quote_plus(s)
def urlDecode(self, s):
return unquote_plus(s)
def escapeJavaScript(self, s):
return str(s).replace("'", r"\'")
def render_velocity_template(template, context, variables={}, as_json=False):
import airspeed
# run a few fixes to properly prepare the template
template = re.sub(r'(^|\n)#\s+set(.*)', r'\1#set\2', template, re.MULTILINE)
t = airspeed.Template(template)
var_map = {
'input': VelocityInput(context),
'util': VelocityUtil()
}
var_map.update(variables or {})
replaced = t.merge(var_map)
if as_json:
replaced = json.loads(replaced)
return replaced
def check_valid_region(headers):
""" Check whether a valid region is provided, and if not then raise an Exception. """
auth_header = headers.get('Authorization')
if not auth_header:
raise Exception('Unable to find "Authorization" header in request')
replaced = re.sub(r'.*Credential=([^,]+),.*', r'\1', auth_header)
if auth_header == replaced:
raise Exception('Unable to find "Credential" section in "Authorization" header')
# Format is: <your-access-key-id>/<date>/<aws-region>/<aws-service>/aws4_request
# See https://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-auth-using-authorization-header.html
parts = replaced.split('/')
region = parts[2]
if region not in config.VALID_REGIONS:
raise Exception('Invalid region specified in "Authorization" header: "%s"' % region)
def set_default_region_in_headers(headers):
auth_header = headers.get('Authorization')
if not auth_header:
return
replaced = re.sub(r'(.*Credential=[^/]+/[^/]+/)([^/])+/', r'\1%s/' % get_region(), auth_header)
headers['Authorization'] = replaced
def fix_account_id_in_arns(response, colon_delimiter=':', existing=None, replace=None):
""" Fix the account ID in the ARNs returned in the given Flask response or string """
existing = existing or ['123456789', '1234567890', MOTO_ACCOUNT_ID]
existing = existing if isinstance(existing, list) else [existing]
replace = replace or TEST_AWS_ACCOUNT_ID
is_str_obj = is_string_or_bytes(response)
content = to_str(response if is_str_obj else response._content)
replace = r'arn{col}aws{col}\1{col}\2{col}{acc}{col}'.format(col=colon_delimiter, acc=replace)
for acc_id in existing:
regex = r'arn{col}aws{col}([^:%]+){col}([^:%]*){col}{acc}{col}'.format(col=colon_delimiter, acc=acc_id)
content = re.sub(regex, replace, content)
if not is_str_obj:
response._content = content
response.headers['content-length'] = len(response._content)
return response
return content
def get_s3_client():
return boto3.resource('s3',
endpoint_url=config.TEST_S3_URL,
config=boto3.session.Config(s3={'addressing_style': 'path'}),
verify=False)
def sqs_queue_url_for_arn(queue_arn):
if '://' in queue_arn:
return queue_arn
if queue_arn in SQS_ARN_TO_URL_CACHE:
return SQS_ARN_TO_URL_CACHE[queue_arn]
sqs_client = connect_to_service('sqs')
parts = queue_arn.split(':')
result = sqs_client.get_queue_url(QueueName=parts[5], QueueOwnerAWSAccountId=parts[4])['QueueUrl']
SQS_ARN_TO_URL_CACHE[queue_arn] = result
return result
def extract_region_from_auth_header(headers):
auth = headers.get('Authorization') or ''
region = re.sub(r'.*Credential=[^/]+/[^/]+/([^/]+)/.*', r'\1', auth)
region = region or get_region()
return region
def extract_region_from_arn(arn):
parts = arn.split(':')
return parts[3] if len(parts) > 1 else None
def get_account_id(account_id=None, env=None):
if account_id:
return account_id
env = get_environment(env)
if is_local_env(env):
return os.environ['TEST_AWS_ACCOUNT_ID']
raise Exception('Unable to determine AWS account ID (%s, %s)' % (account_id, env))
def role_arn(role_name, account_id=None, env=None):
if not role_name:
return role_name
if role_name.startswith('arn:aws:iam::'):
return role_name
env = get_environment(env)
account_id = get_account_id(account_id, env=env)
return 'arn:aws:iam::%s:role/%s' % (account_id, role_name)
def iam_resource_arn(resource, role=None, env=None):
env = get_environment(env)
if not role:
role = get_iam_role(resource, env=env)
return role_arn(role_name=role, account_id=get_account_id())
def get_iam_role(resource, env=None):
env = get_environment(env)
return 'role-%s' % resource
def secretsmanager_secret_arn(secret_name, account_id=None, region_name=None):
pattern = 'arn:aws:secretsmanager:%s:%s:secret:%s'
return _resource_arn(secret_name, pattern, account_id=account_id, region_name=region_name)
def cloudformation_stack_arn(stack_name, account_id=None, region_name=None):
pattern = 'arn:aws:cloudformation:%s:%s:stack/%s/id-1234'
return _resource_arn(stack_name, pattern, account_id=account_id, region_name=region_name)
def dynamodb_table_arn(table_name, account_id=None, region_name=None):
pattern = 'arn:aws:dynamodb:%s:%s:table/%s'
return _resource_arn(table_name, pattern, account_id=account_id, region_name=region_name)
def dynamodb_stream_arn(table_name, latest_stream_label, account_id=None):
account_id = get_account_id(account_id)
return ('arn:aws:dynamodb:%s:%s:table/%s/stream/%s' %
(get_region(), account_id, table_name, latest_stream_label))
def log_group_arn(group_name, account_id=None, region_name=None):
pattern = 'arn:aws:logs:%s:%s:log-group:%s'
return _resource_arn(group_name, pattern, account_id=account_id, region_name=region_name)
def events_rule_arn(rule_name, account_id=None, region_name=None):
pattern = 'arn:aws:events:%s:%s:rule/%s'
return _resource_arn(rule_name, pattern, account_id=account_id, region_name=region_name)
def lambda_function_arn(function_name, account_id=None, region_name=None):
return lambda_function_or_layer_arn('function', function_name, account_id=account_id, region_name=region_name)
def lambda_layer_arn(layer_name, version=None, account_id=None):
return lambda_function_or_layer_arn('layer', layer_name, version=None, account_id=account_id)
def lambda_function_or_layer_arn(type, entity_name, version=None, account_id=None, region_name=None):
pattern = 'arn:aws:lambda:.*:.*:(function|layer):.*'
if re.match(pattern, entity_name):
return entity_name
if ':' in entity_name:
raise Exception('Lambda %s name should not contain a colon ":": %s' % (type, entity_name))
account_id = get_account_id(account_id)
region_name = region_name or get_region()
pattern = re.sub(r'\([^\|]+\|.+\)', type, pattern)
result = pattern.replace('.*', '%s') % (region_name, account_id, entity_name)
if version:
result = '%s:%s' % (result, version)
return result
def lambda_function_name(name_or_arn):
if ':' not in name_or_arn:
return name_or_arn
parts = name_or_arn.split(':')
# name is index #6 in pattern: arn:aws:lambda:.*:.*:function:.*
return parts[6]
def state_machine_arn(name, account_id=None, region_name=None):
pattern = 'arn:aws:states:%s:%s:stateMachine:%s'
return _resource_arn(name, pattern, account_id=account_id, region_name=region_name)
def stepfunctions_activity_arn(name, account_id=None, region_name=None):
pattern = 'arn:aws:states:%s:%s:activity:%s'
return _resource_arn(name, pattern, account_id=account_id, region_name=region_name)
def fix_arn(arn):
""" Function that attempts to "canonicalize" the given ARN. This includes converting
resource names to ARNs, replacing incorrect regions, account IDs, etc. """
if arn.startswith('arn:aws:lambda'):
parts = arn.split(':')
region = parts[3] if parts[3] in config.VALID_REGIONS else get_region()
return lambda_function_arn(lambda_function_name(arn), region_name=region)
LOG.warning('Unable to fix/canonicalize ARN: %s' % arn)
return arn
def cognito_user_pool_arn(user_pool_id, account_id=None, region_name=None):
pattern = 'arn:aws:cognito-idp:%s:%s:userpool/%s'
return _resource_arn(user_pool_id, pattern, account_id=account_id, region_name=region_name)
def kinesis_stream_arn(stream_name, account_id=None, region_name=None):
pattern = 'arn:aws:kinesis:%s:%s:stream/%s'
return _resource_arn(stream_name, pattern, account_id=account_id, region_name=region_name)
def firehose_stream_arn(stream_name, account_id=None, region_name=None):
pattern = 'arn:aws:firehose:%s:%s:deliverystream/%s'
return _resource_arn(stream_name, pattern, account_id=account_id, region_name=region_name)
def es_domain_arn(domain_name, account_id=None, region_name=None):
pattern = 'arn:aws:es:%s:%s:domain/%s'
return _resource_arn(domain_name, pattern, account_id=account_id, region_name=region_name)
def s3_bucket_arn(bucket_name, account_id=None):
return 'arn:aws:s3:::%s' % (bucket_name)
def _resource_arn(name, pattern, account_id=None, region_name=None):
if ':' in name:
return name
account_id = get_account_id(account_id)
region_name = region_name or get_region()
return pattern % (region_name, account_id, name)
def create_sqs_queue(queue_name, env=None):
env = get_environment(env)
# queue
conn = connect_to_service('sqs', env=env)
return conn.create_queue(QueueName=queue_name)
def sqs_queue_arn(queue_name, account_id=None, region_name=None):
account_id = get_account_id(account_id)
region_name = region_name or get_region()
return ('arn:aws:sqs:%s:%s:%s' % (region_name, account_id, queue_name))
def apigateway_restapi_arn(api_id, account_id=None, region_name=None):
account_id = get_account_id(account_id)
region_name = region_name or get_region()
return ('arn:aws:apigateway:%s:%s:/restapis/%s' % (region_name, account_id, api_id))
def sqs_queue_name(queue_arn):
parts = queue_arn.split(':')
return queue_arn if len(parts) == 1 else parts[5]
def sns_topic_arn(topic_name, account_id=None):
account_id = get_account_id(account_id)
return ('arn:aws:sns:%s:%s:%s' % (get_region(), account_id, topic_name))
def get_sqs_queue_url(queue_arn):
region_name = extract_region_from_arn(queue_arn)
queue_name = sqs_queue_name(queue_arn)
client = connect_to_service('sqs', region_name=region_name)
response = client.get_queue_url(QueueName=queue_name)
return response['QueueUrl']
def sqs_receive_message(queue_arn):
region_name = extract_region_from_arn(queue_arn)
client = connect_to_service('sqs', region_name=region_name)
queue_url = get_sqs_queue_url(queue_arn)
response = client.receive_message(QueueUrl=queue_url)
return response
def mock_aws_request_headers(service='dynamodb', region_name=None):
ctype = APPLICATION_AMZ_JSON_1_0
if service == 'kinesis':
ctype = APPLICATION_AMZ_JSON_1_1
elif service == 'sqs':
ctype = APPLICATION_X_WWW_FORM_URLENCODED
access_key = get_boto3_credentials().access_key
region_name = region_name or get_region()
headers = {
'Content-Type': ctype,
'Accept-Encoding': 'identity',
'X-Amz-Date': '20160623T103251Z',
'Authorization': ('AWS4-HMAC-SHA256 ' +
'Credential=%s/20160623/%s/%s/aws4_request, ' +
'SignedHeaders=content-type;host;x-amz-date;x-amz-target, Signature=1234') % (
access_key, region_name, service)
}
return headers
def dynamodb_get_item_raw(request):
headers = mock_aws_request_headers()
headers['X-Amz-Target'] = 'DynamoDB_20120810.GetItem'
new_item = make_http_request(url=config.TEST_DYNAMODB_URL,
method='POST', data=json.dumps(request), headers=headers)
new_item = json.loads(new_item.text)
return new_item
def create_dynamodb_table(table_name, partition_key, env=None, stream_view_type=None):
"""Utility method to create a DynamoDB table"""
dynamodb = connect_to_service('dynamodb', env=env, client=True)
stream_spec = {'StreamEnabled': False}
key_schema = [{
'AttributeName': partition_key,
'KeyType': 'HASH'
}]
attr_defs = [{
'AttributeName': partition_key,
'AttributeType': 'S'
}]
if stream_view_type is not None:
stream_spec = {
'StreamEnabled': True,
'StreamViewType': stream_view_type
}
table = None
try:
table = dynamodb.create_table(TableName=table_name, KeySchema=key_schema,
AttributeDefinitions=attr_defs, ProvisionedThroughput={
'ReadCapacityUnits': 10, 'WriteCapacityUnits': 10
},
StreamSpecification=stream_spec
)
except Exception as e:
if 'ResourceInUseException' in str(e):
# Table already exists -> return table reference
return connect_to_resource('dynamodb', env=env).Table(table_name)
time.sleep(2)
return table
def get_apigateway_integration(api_id, method, path, env=None):
apigateway = connect_to_service(service_name='apigateway', client=True, env=env)
resources = apigateway.get_resources(restApiId=api_id, limit=100)
resource_id = None
for r in resources['items']:
if r['path'] == path:
resource_id = r['id']
if not resource_id:
raise Exception('Unable to find apigateway integration for path "%s"' % path)
integration = apigateway.get_integration(
restApiId=api_id, resourceId=resource_id, httpMethod=method
)
return integration
def get_apigateway_resource_for_path(api_id, path, parent=None, resources=None):
if resources is None:
apigateway = connect_to_service(service_name='apigateway')
resources = apigateway.get_resources(restApiId=api_id, limit=100)
if not isinstance(path, list):
path = path.split('/')
if not path:
return parent
for resource in resources:
if resource['pathPart'] == path[0] and (not parent or parent['id'] == resource['parentId']):
return get_apigateway_resource_for_path(api_id, path[1:], parent=resource, resources=resources)
return None
def get_apigateway_path_for_resource(api_id, resource_id, path_suffix='', resources=None, region_name=None):
if resources is None:
apigateway = connect_to_service(service_name='apigateway', region_name=region_name)
resources = apigateway.get_resources(restApiId=api_id, limit=100)['items']
target_resource = list(filter(lambda res: res['id'] == resource_id, resources))[0]
path_part = target_resource.get('pathPart', '')
if path_suffix:
if path_part:
path_suffix = '%s/%s' % (path_part, path_suffix)
else:
path_suffix = path_part
parent_id = target_resource.get('parentId')
if not parent_id:
return '/%s' % path_suffix
return get_apigateway_path_for_resource(api_id, parent_id,
path_suffix=path_suffix, resources=resources, region_name=region_name)
def create_api_gateway(name, description=None, resources=None, stage_name=None,
enabled_api_keys=[], env=None, usage_plan_name=None, region_name=None):
client = connect_to_service('apigateway', env=env, region_name=region_name)
if not resources:
resources = []
if not stage_name:
stage_name = 'testing'
if not usage_plan_name:
usage_plan_name = 'Basic Usage'
if not description:
description = 'Test description for API "%s"' % name
LOG.info('Creating API resources under API Gateway "%s".' % name)
api = client.create_rest_api(name=name, description=description)
# list resources
api_id = api['id']
resources_list = client.get_resources(restApiId=api_id)
root_res_id = resources_list['items'][0]['id']
# add API resources and methods
for path, methods in six.iteritems(resources):
# create resources recursively
parent_id = root_res_id
for path_part in path.split('/'):
api_resource = client.create_resource(restApiId=api_id, parentId=parent_id, pathPart=path_part)
parent_id = api_resource['id']
# add methods to the API resource
for method in methods:
client.put_method(
restApiId=api_id,
resourceId=api_resource['id'],
httpMethod=method['httpMethod'],
authorizationType=method.get('authorizationType') or 'NONE',
apiKeyRequired=method.get('apiKeyRequired') or False
)
# create integrations for this API resource/method
integrations = method['integrations']
create_api_gateway_integrations(api_id, api_resource['id'], method,
integrations, env=env, region_name=region_name)
# deploy the API gateway
client.create_deployment(restApiId=api_id, stageName=stage_name)
return api
def create_api_gateway_integrations(api_id, resource_id, method,
integrations=[], env=None, region_name=None):
client = connect_to_service('apigateway', env=env, region_name=region_name)
for integration in integrations:
req_templates = integration.get('requestTemplates') or {}
res_templates = integration.get('responseTemplates') or {}
success_code = integration.get('successCode') or '200'
client_error_code = integration.get('clientErrorCode') or '400'
server_error_code = integration.get('serverErrorCode') or '500'
# create integration
client.put_integration(
restApiId=api_id,
resourceId=resource_id,
httpMethod=method['httpMethod'],
integrationHttpMethod=method.get('integrationHttpMethod') or method['httpMethod'],
type=integration['type'],
uri=integration['uri'],
requestTemplates=req_templates
)
response_configs = [
{'pattern': '^2.*', 'code': success_code, 'res_templates': res_templates},
{'pattern': '^4.*', 'code': client_error_code, 'res_templates': {}},
{'pattern': '^5.*', 'code': server_error_code, 'res_templates': {}}
]
# create response configs
for response_config in response_configs:
# create integration response
client.put_integration_response(
restApiId=api_id,
resourceId=resource_id,
httpMethod=method['httpMethod'],
statusCode=response_config['code'],
responseTemplates=response_config['res_templates'],
selectionPattern=response_config['pattern']
)
# create method response
client.put_method_response(
restApiId=api_id,
resourceId=resource_id,
httpMethod=method['httpMethod'],
statusCode=response_config['code']
)
def apigateway_invocations_arn(lambda_uri):
return ('arn:aws:apigateway:%s:lambda:path/2015-03-31/functions/%s/invocations' %
(get_region(), lambda_uri))
def get_elasticsearch_endpoint(domain=None, region_name=None):
env = get_environment(region_name=region_name)
if is_local_env(env):
return os.environ['TEST_ELASTICSEARCH_URL']
# get endpoint from API
es_client = connect_to_service(service_name='es', region_name=env.region)
info = es_client.describe_elasticsearch_domain(DomainName=domain)
endpoint = 'https://%s' % info['DomainStatus']['Endpoint']
return endpoint
def connect_elasticsearch(endpoint=None, domain=None, region_name=None, env=None):
from elasticsearch import Elasticsearch, RequestsHttpConnection
from requests_aws4auth import AWS4Auth
env = get_environment(env, region_name=region_name)
verify_certs = False
use_ssl = False
if not endpoint and is_local_env(env):
endpoint = os.environ['TEST_ELASTICSEARCH_URL']
if not endpoint and not is_local_env(env) and domain:
endpoint = get_elasticsearch_endpoint(domain=domain, region_name=env.region)
# use ssl?
if 'https://' in endpoint:
use_ssl = True
if not is_local_env(env):
verify_certs = True
if CUSTOM_BOTO3_SESSION or (ENV_ACCESS_KEY in os.environ and ENV_SECRET_KEY in os.environ):
access_key = os.environ.get(ENV_ACCESS_KEY)
secret_key = os.environ.get(ENV_SECRET_KEY)
session_token = os.environ.get(ENV_SESSION_TOKEN)
if CUSTOM_BOTO3_SESSION:
credentials = CUSTOM_BOTO3_SESSION.get_credentials()
access_key = credentials.access_key
secret_key = credentials.secret_key
session_token = credentials.token
awsauth = AWS4Auth(access_key, secret_key, env.region, 'es', session_token=session_token)
connection_class = RequestsHttpConnection
return Elasticsearch(hosts=[endpoint], verify_certs=verify_certs, use_ssl=use_ssl,
connection_class=connection_class, http_auth=awsauth)
return Elasticsearch(hosts=[endpoint], verify_certs=verify_certs, use_ssl=use_ssl)
def create_kinesis_stream(stream_name, shards=1, env=None, delete=False):
env = get_environment(env)
# stream
stream = KinesisStream(id=stream_name, num_shards=shards)
conn = connect_to_service('kinesis', env=env)
stream.connect(conn)
if delete:
run_safe(lambda: stream.destroy(), print_error=False)
stream.create()
stream.wait_for()
return stream
def kinesis_get_latest_records(stream_name, shard_id, count=10, env=None):
kinesis = connect_to_service('kinesis', env=env)
result = []
response = kinesis.get_shard_iterator(StreamName=stream_name, ShardId=shard_id,
ShardIteratorType='TRIM_HORIZON')
shard_iterator = response['ShardIterator']
while shard_iterator:
records_response = kinesis.get_records(ShardIterator=shard_iterator)
records = records_response['Records']
for record in records:
try:
record['Data'] = to_str(record['Data'])
except Exception:
pass
result.extend(records)
shard_iterator = records_response['NextShardIterator'] if records else False
while len(result) > count:
result.pop(0)
return result
| 1 | 10,997 | We can remove this function and use `lambda_function_name(..)` in this file instead. | localstack-localstack | py |
@@ -17,10 +17,12 @@
package metrics
-import "time"
+import (
+ "time"
+)
-// CreateSender creates metrics sender with appropriate transport
-func CreateSender(disableMetrics bool, metricsAddress string) *Sender {
+// NewSender creates metrics sender with appropriate transport
+func NewSender(disableMetrics bool, metricsAddress, applicationVersion string) *Sender {
var transport Transport
if disableMetrics {
transport = NewNoopTransport() | 1 | /*
* Copyright (C) 2019 The "MysteriumNetwork/node" Authors.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package metrics
import "time"
// CreateSender creates metrics sender with appropriate transport
func CreateSender(disableMetrics bool, metricsAddress string) *Sender {
var transport Transport
if disableMetrics {
transport = NewNoopTransport()
} else {
transport = NewElasticSearchTransport(metricsAddress, 10*time.Second)
}
return &Sender{Transport: transport}
}
| 1 | 13,719 | nitpick: `ApplicationVersion` could be `AppVersion`. It's smaller but gives the same understanding for purposes of the field. | mysteriumnetwork-node | go |
@@ -537,6 +537,9 @@ func (handler *DCRedirectionHandlerImpl) PollWorkflowTaskQueue(
return err
})
+ if resp == nil && err == nil {
+ return &workflowservice.PollWorkflowTaskQueueResponse{}, nil
+ }
return resp, err
}
| 1 | // The MIT License
//
// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved.
//
// Copyright (c) 2020 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
package frontend
import (
"context"
"time"
"go.temporal.io/api/workflowservice/v1"
healthpb "google.golang.org/grpc/health/grpc_health_v1"
"go.temporal.io/server/common"
"go.temporal.io/server/common/log"
"go.temporal.io/server/common/metrics"
"go.temporal.io/server/common/resource"
"go.temporal.io/server/common/service/config"
)
var _ Handler = (*DCRedirectionHandlerImpl)(nil)
type (
// DCRedirectionHandlerImpl is simple wrapper over frontend service, doing redirection based on policy
DCRedirectionHandlerImpl struct {
resource.Resource
currentClusterName string
config *Config
redirectionPolicy DCRedirectionPolicy
tokenSerializer common.TaskTokenSerializer
frontendHandler Handler
}
)
// NewDCRedirectionHandler creates a thrift handler for the temporal service, frontend
func NewDCRedirectionHandler(
wfHandler Handler,
policy config.DCRedirectionPolicy,
) *DCRedirectionHandlerImpl {
resource := wfHandler.GetResource()
dcRedirectionPolicy := RedirectionPolicyGenerator(
resource.GetClusterMetadata(),
wfHandler.GetConfig(),
resource.GetNamespaceCache(),
policy,
)
return &DCRedirectionHandlerImpl{
Resource: resource,
currentClusterName: resource.GetClusterMetadata().GetCurrentClusterName(),
config: wfHandler.GetConfig(),
redirectionPolicy: dcRedirectionPolicy,
tokenSerializer: common.NewProtoTaskTokenSerializer(),
frontendHandler: wfHandler,
}
}
// Start starts the handler
func (handler *DCRedirectionHandlerImpl) Start() {
handler.frontendHandler.Start()
}
// Stop stops the handler
func (handler *DCRedirectionHandlerImpl) Stop() {
handler.frontendHandler.Stop()
}
// GetResource return resource
func (handler *DCRedirectionHandlerImpl) GetResource() resource.Resource {
return handler.Resource
}
// GetConfig return config
func (handler *DCRedirectionHandlerImpl) GetConfig() *Config {
return handler.frontendHandler.GetConfig()
}
// UpdateHealthStatus sets the health status for this rpc handler.
// This health status will be used within the rpc health check handler
func (handler *DCRedirectionHandlerImpl) UpdateHealthStatus(status HealthStatus) {
handler.frontendHandler.UpdateHealthStatus(status)
}
// Check is for health check
func (handler *DCRedirectionHandlerImpl) Check(ctx context.Context, request *healthpb.HealthCheckRequest) (*healthpb.HealthCheckResponse, error) {
return handler.frontendHandler.Check(ctx, request)
}
func (handler *DCRedirectionHandlerImpl) Watch(request *healthpb.HealthCheckRequest, server healthpb.Health_WatchServer) error {
return handler.frontendHandler.Watch(request, server)
}
// Namespace APIs, namespace APIs does not require redirection
// DeprecateNamespace API call
func (handler *DCRedirectionHandlerImpl) DeprecateNamespace(
ctx context.Context,
request *workflowservice.DeprecateNamespaceRequest,
) (resp *workflowservice.DeprecateNamespaceResponse, retError error) {
var cluster = handler.currentClusterName
scope, startTime := handler.beforeCall(metrics.DCRedirectionDeprecateNamespaceScope)
defer func() {
handler.afterCall(scope, startTime, cluster, &retError)
}()
return handler.frontendHandler.DeprecateNamespace(ctx, request)
}
// DescribeNamespace API call
func (handler *DCRedirectionHandlerImpl) DescribeNamespace(
ctx context.Context,
request *workflowservice.DescribeNamespaceRequest,
) (resp *workflowservice.DescribeNamespaceResponse, retError error) {
var cluster = handler.currentClusterName
scope, startTime := handler.beforeCall(metrics.DCRedirectionDescribeNamespaceScope)
defer func() {
handler.afterCall(scope, startTime, cluster, &retError)
}()
return handler.frontendHandler.DescribeNamespace(ctx, request)
}
// ListNamespaces API call
func (handler *DCRedirectionHandlerImpl) ListNamespaces(
ctx context.Context,
request *workflowservice.ListNamespacesRequest,
) (resp *workflowservice.ListNamespacesResponse, retError error) {
var cluster = handler.currentClusterName
scope, startTime := handler.beforeCall(metrics.DCRedirectionListNamespacesScope)
defer func() {
handler.afterCall(scope, startTime, cluster, &retError)
}()
return handler.frontendHandler.ListNamespaces(ctx, request)
}
// RegisterNamespace API call
func (handler *DCRedirectionHandlerImpl) RegisterNamespace(
ctx context.Context,
request *workflowservice.RegisterNamespaceRequest,
) (resp *workflowservice.RegisterNamespaceResponse, retError error) {
var cluster = handler.currentClusterName
scope, startTime := handler.beforeCall(metrics.DCRedirectionRegisterNamespaceScope)
defer func() {
handler.afterCall(scope, startTime, cluster, &retError)
}()
return handler.frontendHandler.RegisterNamespace(ctx, request)
}
// UpdateNamespace API call
func (handler *DCRedirectionHandlerImpl) UpdateNamespace(
ctx context.Context,
request *workflowservice.UpdateNamespaceRequest,
) (resp *workflowservice.UpdateNamespaceResponse, retError error) {
var cluster = handler.currentClusterName
scope, startTime := handler.beforeCall(metrics.DCRedirectionUpdateNamespaceScope)
defer func() {
handler.afterCall(scope, startTime, cluster, &retError)
}()
return handler.frontendHandler.UpdateNamespace(ctx, request)
}
// Other APIs
// DescribeTaskQueue API call
func (handler *DCRedirectionHandlerImpl) DescribeTaskQueue(
ctx context.Context,
request *workflowservice.DescribeTaskQueueRequest,
) (resp *workflowservice.DescribeTaskQueueResponse, retError error) {
var apiName = "DescribeTaskQueue"
var err error
var cluster string
scope, startTime := handler.beforeCall(metrics.DCRedirectionDescribeTaskQueueScope)
defer func() {
handler.afterCall(scope, startTime, cluster, &retError)
}()
err = handler.redirectionPolicy.WithNamespaceRedirect(ctx, request.GetNamespace(), apiName, func(targetDC string) error {
cluster = targetDC
switch {
case targetDC == handler.currentClusterName:
resp, err = handler.frontendHandler.DescribeTaskQueue(ctx, request)
default:
remoteClient := handler.GetRemoteFrontendClient(targetDC)
resp, err = remoteClient.DescribeTaskQueue(ctx, request)
}
return err
})
return resp, err
}
// DescribeWorkflowExecution API call
func (handler *DCRedirectionHandlerImpl) DescribeWorkflowExecution(
ctx context.Context,
request *workflowservice.DescribeWorkflowExecutionRequest,
) (resp *workflowservice.DescribeWorkflowExecutionResponse, retError error) {
var apiName = "DescribeWorkflowExecution"
var err error
var cluster string
scope, startTime := handler.beforeCall(metrics.DCRedirectionDescribeWorkflowExecutionScope)
defer func() {
handler.afterCall(scope, startTime, cluster, &retError)
}()
err = handler.redirectionPolicy.WithNamespaceRedirect(ctx, request.GetNamespace(), apiName, func(targetDC string) error {
cluster = targetDC
switch {
case targetDC == handler.currentClusterName:
resp, err = handler.frontendHandler.DescribeWorkflowExecution(ctx, request)
default:
remoteClient := handler.GetRemoteFrontendClient(targetDC)
resp, err = remoteClient.DescribeWorkflowExecution(ctx, request)
}
return err
})
return resp, err
}
// GetWorkflowExecutionHistory API call
func (handler *DCRedirectionHandlerImpl) GetWorkflowExecutionHistory(
ctx context.Context,
request *workflowservice.GetWorkflowExecutionHistoryRequest,
) (resp *workflowservice.GetWorkflowExecutionHistoryResponse, retError error) {
var apiName = "GetWorkflowExecutionHistory"
var err error
var cluster string
scope, startTime := handler.beforeCall(metrics.DCRedirectionGetWorkflowExecutionHistoryScope)
defer func() {
handler.afterCall(scope, startTime, cluster, &retError)
}()
err = handler.redirectionPolicy.WithNamespaceRedirect(ctx, request.GetNamespace(), apiName, func(targetDC string) error {
cluster = targetDC
switch {
case targetDC == handler.currentClusterName:
resp, err = handler.frontendHandler.GetWorkflowExecutionHistory(ctx, request)
default:
remoteClient := handler.GetRemoteFrontendClient(targetDC)
resp, err = remoteClient.GetWorkflowExecutionHistory(ctx, request)
}
return err
})
return resp, err
}
// ListArchivedWorkflowExecutions API call
func (handler *DCRedirectionHandlerImpl) ListArchivedWorkflowExecutions(
ctx context.Context,
request *workflowservice.ListArchivedWorkflowExecutionsRequest,
) (resp *workflowservice.ListArchivedWorkflowExecutionsResponse, retError error) {
var apiName = "ListArchivedWorkflowExecutions"
var err error
var cluster string
scope, startTime := handler.beforeCall(metrics.DCRedirectionListArchivedWorkflowExecutionsScope)
defer func() {
handler.afterCall(scope, startTime, cluster, &retError)
}()
err = handler.redirectionPolicy.WithNamespaceRedirect(ctx, request.GetNamespace(), apiName, func(targetDC string) error {
cluster = targetDC
switch {
case targetDC == handler.currentClusterName:
resp, err = handler.frontendHandler.ListArchivedWorkflowExecutions(ctx, request)
default:
remoteClient := handler.GetRemoteFrontendClient(targetDC)
resp, err = remoteClient.ListArchivedWorkflowExecutions(ctx, request)
}
return err
})
return resp, err
}
// ListClosedWorkflowExecutions API call
func (handler *DCRedirectionHandlerImpl) ListClosedWorkflowExecutions(
ctx context.Context,
request *workflowservice.ListClosedWorkflowExecutionsRequest,
) (resp *workflowservice.ListClosedWorkflowExecutionsResponse, retError error) {
var apiName = "ListClosedWorkflowExecutions"
var err error
var cluster string
scope, startTime := handler.beforeCall(metrics.DCRedirectionListClosedWorkflowExecutionsScope)
defer func() {
handler.afterCall(scope, startTime, cluster, &retError)
}()
err = handler.redirectionPolicy.WithNamespaceRedirect(ctx, request.GetNamespace(), apiName, func(targetDC string) error {
cluster = targetDC
switch {
case targetDC == handler.currentClusterName:
resp, err = handler.frontendHandler.ListClosedWorkflowExecutions(ctx, request)
default:
remoteClient := handler.GetRemoteFrontendClient(targetDC)
resp, err = remoteClient.ListClosedWorkflowExecutions(ctx, request)
}
return err
})
return resp, err
}
// ListOpenWorkflowExecutions API call
func (handler *DCRedirectionHandlerImpl) ListOpenWorkflowExecutions(
ctx context.Context,
request *workflowservice.ListOpenWorkflowExecutionsRequest,
) (resp *workflowservice.ListOpenWorkflowExecutionsResponse, retError error) {
var apiName = "ListOpenWorkflowExecutions"
var err error
var cluster string
scope, startTime := handler.beforeCall(metrics.DCRedirectionListOpenWorkflowExecutionsScope)
defer func() {
handler.afterCall(scope, startTime, cluster, &retError)
}()
err = handler.redirectionPolicy.WithNamespaceRedirect(ctx, request.GetNamespace(), apiName, func(targetDC string) error {
cluster = targetDC
switch {
case targetDC == handler.currentClusterName:
resp, err = handler.frontendHandler.ListOpenWorkflowExecutions(ctx, request)
default:
remoteClient := handler.GetRemoteFrontendClient(targetDC)
resp, err = remoteClient.ListOpenWorkflowExecutions(ctx, request)
}
return err
})
return resp, err
}
// ListWorkflowExecutions API call
func (handler *DCRedirectionHandlerImpl) ListWorkflowExecutions(
ctx context.Context,
request *workflowservice.ListWorkflowExecutionsRequest,
) (resp *workflowservice.ListWorkflowExecutionsResponse, retError error) {
var apiName = "ListWorkflowExecutions"
var err error
var cluster string
scope, startTime := handler.beforeCall(metrics.DCRedirectionListWorkflowExecutionsScope)
defer func() {
handler.afterCall(scope, startTime, cluster, &retError)
}()
err = handler.redirectionPolicy.WithNamespaceRedirect(ctx, request.GetNamespace(), apiName, func(targetDC string) error {
cluster = targetDC
switch {
case targetDC == handler.currentClusterName:
resp, err = handler.frontendHandler.ListWorkflowExecutions(ctx, request)
default:
remoteClient := handler.GetRemoteFrontendClient(targetDC)
resp, err = remoteClient.ListWorkflowExecutions(ctx, request)
}
return err
})
return resp, err
}
// ScanWorkflowExecutions API call
func (handler *DCRedirectionHandlerImpl) ScanWorkflowExecutions(
ctx context.Context,
request *workflowservice.ScanWorkflowExecutionsRequest,
) (resp *workflowservice.ScanWorkflowExecutionsResponse, retError error) {
var apiName = "ScanWorkflowExecutions"
var err error
var cluster string
scope, startTime := handler.beforeCall(metrics.DCRedirectionScanWorkflowExecutionsScope)
defer func() {
handler.afterCall(scope, startTime, cluster, &retError)
}()
err = handler.redirectionPolicy.WithNamespaceRedirect(ctx, request.GetNamespace(), apiName, func(targetDC string) error {
cluster = targetDC
switch {
case targetDC == handler.currentClusterName:
resp, err = handler.frontendHandler.ScanWorkflowExecutions(ctx, request)
default:
remoteClient := handler.GetRemoteFrontendClient(targetDC)
resp, err = remoteClient.ScanWorkflowExecutions(ctx, request)
}
return err
})
return resp, err
}
// CountWorkflowExecutions API call
func (handler *DCRedirectionHandlerImpl) CountWorkflowExecutions(
ctx context.Context,
request *workflowservice.CountWorkflowExecutionsRequest,
) (resp *workflowservice.CountWorkflowExecutionsResponse, retError error) {
var apiName = "CountWorkflowExecutions"
var err error
var cluster string
scope, startTime := handler.beforeCall(metrics.DCRedirectionCountWorkflowExecutionsScope)
defer func() {
handler.afterCall(scope, startTime, cluster, &retError)
}()
err = handler.redirectionPolicy.WithNamespaceRedirect(ctx, request.GetNamespace(), apiName, func(targetDC string) error {
cluster = targetDC
switch {
case targetDC == handler.currentClusterName:
resp, err = handler.frontendHandler.CountWorkflowExecutions(ctx, request)
default:
remoteClient := handler.GetRemoteFrontendClient(targetDC)
resp, err = remoteClient.CountWorkflowExecutions(ctx, request)
}
return err
})
return resp, err
}
// GetSearchAttributes API call
func (handler *DCRedirectionHandlerImpl) GetSearchAttributes(
ctx context.Context,
request *workflowservice.GetSearchAttributesRequest,
) (resp *workflowservice.GetSearchAttributesResponse, retError error) {
var cluster = handler.currentClusterName
scope, startTime := handler.beforeCall(metrics.DCRedirectionGetSearchAttributesScope)
defer func() {
handler.afterCall(scope, startTime, cluster, &retError)
}()
return handler.frontendHandler.GetSearchAttributes(ctx, request)
}
// PollActivityTaskQueue API call
func (handler *DCRedirectionHandlerImpl) PollActivityTaskQueue(
ctx context.Context,
request *workflowservice.PollActivityTaskQueueRequest,
) (resp *workflowservice.PollActivityTaskQueueResponse, retError error) {
var apiName = "PollActivityTaskQueue"
var err error
var cluster string
scope, startTime := handler.beforeCall(metrics.DCRedirectionPollActivityTaskQueueScope)
defer func() {
handler.afterCall(scope, startTime, cluster, &retError)
}()
err = handler.redirectionPolicy.WithNamespaceRedirect(ctx, request.GetNamespace(), apiName, func(targetDC string) error {
cluster = targetDC
switch {
case targetDC == handler.currentClusterName:
resp, err = handler.frontendHandler.PollActivityTaskQueue(ctx, request)
default:
remoteClient := handler.GetRemoteFrontendClient(targetDC)
resp, err = remoteClient.PollActivityTaskQueue(ctx, request)
}
return err
})
return resp, err
}
// PollWorkflowTaskQueue API call
func (handler *DCRedirectionHandlerImpl) PollWorkflowTaskQueue(
ctx context.Context,
request *workflowservice.PollWorkflowTaskQueueRequest,
) (resp *workflowservice.PollWorkflowTaskQueueResponse, retError error) {
var apiName = "PollWorkflowTaskQueue"
var err error
var cluster string
scope, startTime := handler.beforeCall(metrics.DCRedirectionPollWorkflowTaskQueueScope)
defer func() {
handler.afterCall(scope, startTime, cluster, &retError)
}()
err = handler.redirectionPolicy.WithNamespaceRedirect(ctx, request.GetNamespace(), apiName, func(targetDC string) error {
cluster = targetDC
switch {
case targetDC == handler.currentClusterName:
resp, err = handler.frontendHandler.PollWorkflowTaskQueue(ctx, request)
default:
remoteClient := handler.GetRemoteFrontendClient(targetDC)
resp, err = remoteClient.PollWorkflowTaskQueue(ctx, request)
}
return err
})
return resp, err
}
// QueryWorkflow API call
func (handler *DCRedirectionHandlerImpl) QueryWorkflow(
ctx context.Context,
request *workflowservice.QueryWorkflowRequest,
) (resp *workflowservice.QueryWorkflowResponse, retError error) {
var apiName = "QueryWorkflow"
var err error
var cluster string
scope, startTime := handler.beforeCall(metrics.DCRedirectionQueryWorkflowScope)
defer func() {
handler.afterCall(scope, startTime, cluster, &retError)
}()
err = handler.redirectionPolicy.WithNamespaceRedirect(ctx, request.GetNamespace(), apiName, func(targetDC string) error {
cluster = targetDC
switch {
case targetDC == handler.currentClusterName:
resp, err = handler.frontendHandler.QueryWorkflow(ctx, request)
default:
remoteClient := handler.GetRemoteFrontendClient(targetDC)
resp, err = remoteClient.QueryWorkflow(ctx, request)
}
return err
})
return resp, err
}
// RecordActivityTaskHeartbeat API call
func (handler *DCRedirectionHandlerImpl) RecordActivityTaskHeartbeat(
ctx context.Context,
request *workflowservice.RecordActivityTaskHeartbeatRequest,
) (resp *workflowservice.RecordActivityTaskHeartbeatResponse, retError error) {
var apiName = "RecordActivityTaskHeartbeat"
var err error
var cluster string
scope, startTime := handler.beforeCall(metrics.DCRedirectionRecordActivityTaskHeartbeatScope)
defer func() {
handler.afterCall(scope, startTime, cluster, &retError)
}()
token, err := handler.tokenSerializer.Deserialize(request.TaskToken)
if err != nil {
return nil, err
}
err = handler.redirectionPolicy.WithNamespaceIDRedirect(ctx, token.GetNamespaceId(), apiName, func(targetDC string) error {
cluster = targetDC
switch {
case targetDC == handler.currentClusterName:
resp, err = handler.frontendHandler.RecordActivityTaskHeartbeat(ctx, request)
default:
remoteClient := handler.GetRemoteFrontendClient(targetDC)
resp, err = remoteClient.RecordActivityTaskHeartbeat(ctx, request)
}
return err
})
return resp, err
}
// RecordActivityTaskHeartbeatById API call
func (handler *DCRedirectionHandlerImpl) RecordActivityTaskHeartbeatById(
ctx context.Context,
request *workflowservice.RecordActivityTaskHeartbeatByIdRequest,
) (resp *workflowservice.RecordActivityTaskHeartbeatByIdResponse, retError error) {
var apiName = "RecordActivityTaskHeartbeatById"
var err error
var cluster string
scope, startTime := handler.beforeCall(metrics.DCRedirectionRecordActivityTaskHeartbeatByIdScope)
defer func() {
handler.afterCall(scope, startTime, cluster, &retError)
}()
err = handler.redirectionPolicy.WithNamespaceRedirect(ctx, request.GetNamespace(), apiName, func(targetDC string) error {
cluster = targetDC
switch {
case targetDC == handler.currentClusterName:
resp, err = handler.frontendHandler.RecordActivityTaskHeartbeatById(ctx, request)
default:
remoteClient := handler.GetRemoteFrontendClient(targetDC)
resp, err = remoteClient.RecordActivityTaskHeartbeatById(ctx, request)
}
return err
})
return resp, err
}
// RequestCancelWorkflowExecution API call
func (handler *DCRedirectionHandlerImpl) RequestCancelWorkflowExecution(
ctx context.Context,
request *workflowservice.RequestCancelWorkflowExecutionRequest,
) (resp *workflowservice.RequestCancelWorkflowExecutionResponse, retError error) {
var apiName = "RequestCancelWorkflowExecution"
var err error
var cluster string
scope, startTime := handler.beforeCall(metrics.DCRedirectionRequestCancelWorkflowExecutionScope)
defer func() {
handler.afterCall(scope, startTime, cluster, &retError)
}()
err = handler.redirectionPolicy.WithNamespaceRedirect(ctx, request.GetNamespace(), apiName, func(targetDC string) error {
cluster = targetDC
switch {
case targetDC == handler.currentClusterName:
resp, err = handler.frontendHandler.RequestCancelWorkflowExecution(ctx, request)
default:
remoteClient := handler.GetRemoteFrontendClient(targetDC)
resp, err = remoteClient.RequestCancelWorkflowExecution(ctx, request)
}
return err
})
return resp, err
}
// ResetStickyTaskQueue API call
func (handler *DCRedirectionHandlerImpl) ResetStickyTaskQueue(
ctx context.Context,
request *workflowservice.ResetStickyTaskQueueRequest,
) (resp *workflowservice.ResetStickyTaskQueueResponse, retError error) {
var apiName = "ResetStickyTaskQueue"
var err error
var cluster string
scope, startTime := handler.beforeCall(metrics.DCRedirectionResetStickyTaskQueueScope)
defer func() {
handler.afterCall(scope, startTime, cluster, &retError)
}()
err = handler.redirectionPolicy.WithNamespaceRedirect(ctx, request.GetNamespace(), apiName, func(targetDC string) error {
cluster = targetDC
switch {
case targetDC == handler.currentClusterName:
resp, err = handler.frontendHandler.ResetStickyTaskQueue(ctx, request)
default:
remoteClient := handler.GetRemoteFrontendClient(targetDC)
resp, err = remoteClient.ResetStickyTaskQueue(ctx, request)
}
return err
})
return resp, err
}
// ResetWorkflowExecution API call
func (handler *DCRedirectionHandlerImpl) ResetWorkflowExecution(
ctx context.Context,
request *workflowservice.ResetWorkflowExecutionRequest,
) (resp *workflowservice.ResetWorkflowExecutionResponse, retError error) {
var apiName = "ResetWorkflowExecution"
var err error
var cluster string
scope, startTime := handler.beforeCall(metrics.DCRedirectionResetWorkflowExecutionScope)
defer func() {
handler.afterCall(scope, startTime, cluster, &retError)
}()
err = handler.redirectionPolicy.WithNamespaceRedirect(ctx, request.GetNamespace(), apiName, func(targetDC string) error {
cluster = targetDC
switch {
case targetDC == handler.currentClusterName:
resp, err = handler.frontendHandler.ResetWorkflowExecution(ctx, request)
default:
remoteClient := handler.GetRemoteFrontendClient(targetDC)
resp, err = remoteClient.ResetWorkflowExecution(ctx, request)
}
return err
})
return resp, err
}
// RespondActivityTaskCanceled API call
func (handler *DCRedirectionHandlerImpl) RespondActivityTaskCanceled(
ctx context.Context,
request *workflowservice.RespondActivityTaskCanceledRequest,
) (resp *workflowservice.RespondActivityTaskCanceledResponse, retError error) {
var apiName = "RespondActivityTaskCanceled"
var err error
var cluster string
scope, startTime := handler.beforeCall(metrics.DCRedirectionRespondActivityTaskCanceledScope)
defer func() {
handler.afterCall(scope, startTime, cluster, &retError)
}()
token, err := handler.tokenSerializer.Deserialize(request.TaskToken)
if err != nil {
return resp, err
}
err = handler.redirectionPolicy.WithNamespaceIDRedirect(ctx, token.GetNamespaceId(), apiName, func(targetDC string) error {
cluster = targetDC
switch {
case targetDC == handler.currentClusterName:
resp, err = handler.frontendHandler.RespondActivityTaskCanceled(ctx, request)
default:
remoteClient := handler.GetRemoteFrontendClient(targetDC)
resp, err = remoteClient.RespondActivityTaskCanceled(ctx, request)
}
return err
})
return resp, err
}
// RespondActivityTaskCanceledById API call
func (handler *DCRedirectionHandlerImpl) RespondActivityTaskCanceledById(
ctx context.Context,
request *workflowservice.RespondActivityTaskCanceledByIdRequest,
) (resp *workflowservice.RespondActivityTaskCanceledByIdResponse, retError error) {
var apiName = "RespondActivityTaskCanceledById"
var err error
var cluster string
scope, startTime := handler.beforeCall(metrics.DCRedirectionRespondActivityTaskCanceledByIdScope)
defer func() {
handler.afterCall(scope, startTime, cluster, &retError)
}()
err = handler.redirectionPolicy.WithNamespaceRedirect(ctx, request.GetNamespace(), apiName, func(targetDC string) error {
cluster = targetDC
switch {
case targetDC == handler.currentClusterName:
resp, err = handler.frontendHandler.RespondActivityTaskCanceledById(ctx, request)
default:
remoteClient := handler.GetRemoteFrontendClient(targetDC)
resp, err = remoteClient.RespondActivityTaskCanceledById(ctx, request)
}
return err
})
return resp, err
}
// RespondActivityTaskCompleted API call
func (handler *DCRedirectionHandlerImpl) RespondActivityTaskCompleted(
ctx context.Context,
request *workflowservice.RespondActivityTaskCompletedRequest,
) (resp *workflowservice.RespondActivityTaskCompletedResponse, retError error) {
var apiName = "RespondActivityTaskCompleted"
var err error
var cluster string
scope, startTime := handler.beforeCall(metrics.DCRedirectionRespondActivityTaskCompletedScope)
defer func() {
handler.afterCall(scope, startTime, cluster, &retError)
}()
token, err := handler.tokenSerializer.Deserialize(request.TaskToken)
if err != nil {
return resp, err
}
err = handler.redirectionPolicy.WithNamespaceIDRedirect(ctx, token.GetNamespaceId(), apiName, func(targetDC string) error {
cluster = targetDC
switch {
case targetDC == handler.currentClusterName:
resp, err = handler.frontendHandler.RespondActivityTaskCompleted(ctx, request)
default:
remoteClient := handler.GetRemoteFrontendClient(targetDC)
resp, err = remoteClient.RespondActivityTaskCompleted(ctx, request)
}
return err
})
return resp, err
}
// RespondActivityTaskCompletedById API call
func (handler *DCRedirectionHandlerImpl) RespondActivityTaskCompletedById(
ctx context.Context,
request *workflowservice.RespondActivityTaskCompletedByIdRequest,
) (resp *workflowservice.RespondActivityTaskCompletedByIdResponse, retError error) {
var apiName = "RespondActivityTaskCompletedById"
var err error
var cluster string
scope, startTime := handler.beforeCall(metrics.DCRedirectionRespondActivityTaskCompletedByIdScope)
defer func() {
handler.afterCall(scope, startTime, cluster, &retError)
}()
err = handler.redirectionPolicy.WithNamespaceRedirect(ctx, request.GetNamespace(), apiName, func(targetDC string) error {
cluster = targetDC
switch {
case targetDC == handler.currentClusterName:
resp, err = handler.frontendHandler.RespondActivityTaskCompletedById(ctx, request)
default:
remoteClient := handler.GetRemoteFrontendClient(targetDC)
resp, err = remoteClient.RespondActivityTaskCompletedById(ctx, request)
}
return err
})
return resp, err
}
// RespondActivityTaskFailed API call
func (handler *DCRedirectionHandlerImpl) RespondActivityTaskFailed(
ctx context.Context,
request *workflowservice.RespondActivityTaskFailedRequest,
) (resp *workflowservice.RespondActivityTaskFailedResponse, retError error) {
var apiName = "RespondActivityTaskFailed"
var err error
var cluster string
scope, startTime := handler.beforeCall(metrics.DCRedirectionRespondActivityTaskFailedScope)
defer func() {
handler.afterCall(scope, startTime, cluster, &retError)
}()
token, err := handler.tokenSerializer.Deserialize(request.TaskToken)
if err != nil {
return resp, err
}
err = handler.redirectionPolicy.WithNamespaceIDRedirect(ctx, token.GetNamespaceId(), apiName, func(targetDC string) error {
cluster = targetDC
switch {
case targetDC == handler.currentClusterName:
resp, err = handler.frontendHandler.RespondActivityTaskFailed(ctx, request)
default:
remoteClient := handler.GetRemoteFrontendClient(targetDC)
resp, err = remoteClient.RespondActivityTaskFailed(ctx, request)
}
return err
})
return resp, err
}
// RespondActivityTaskFailedById API call
func (handler *DCRedirectionHandlerImpl) RespondActivityTaskFailedById(
ctx context.Context,
request *workflowservice.RespondActivityTaskFailedByIdRequest,
) (resp *workflowservice.RespondActivityTaskFailedByIdResponse, retError error) {
var apiName = "RespondActivityTaskFailedById"
var err error
var cluster string
scope, startTime := handler.beforeCall(metrics.DCRedirectionRespondActivityTaskFailedByIdScope)
defer func() {
handler.afterCall(scope, startTime, cluster, &retError)
}()
err = handler.redirectionPolicy.WithNamespaceRedirect(ctx, request.GetNamespace(), apiName, func(targetDC string) error {
cluster = targetDC
switch {
case targetDC == handler.currentClusterName:
resp, err = handler.frontendHandler.RespondActivityTaskFailedById(ctx, request)
default:
remoteClient := handler.GetRemoteFrontendClient(targetDC)
resp, err = remoteClient.RespondActivityTaskFailedById(ctx, request)
}
return err
})
return resp, err
}
// RespondWorkflowTaskCompleted API call
func (handler *DCRedirectionHandlerImpl) RespondWorkflowTaskCompleted(
ctx context.Context,
request *workflowservice.RespondWorkflowTaskCompletedRequest,
) (resp *workflowservice.RespondWorkflowTaskCompletedResponse, retError error) {
var apiName = "RespondWorkflowTaskCompleted"
var err error
var cluster string
scope, startTime := handler.beforeCall(metrics.DCRedirectionRespondWorkflowTaskCompletedScope)
defer func() {
handler.afterCall(scope, startTime, cluster, &retError)
}()
token, err := handler.tokenSerializer.Deserialize(request.TaskToken)
if err != nil {
return nil, err
}
err = handler.redirectionPolicy.WithNamespaceIDRedirect(ctx, token.GetNamespaceId(), apiName, func(targetDC string) error {
cluster = targetDC
switch {
case targetDC == handler.currentClusterName:
resp, err = handler.frontendHandler.RespondWorkflowTaskCompleted(ctx, request)
default:
remoteClient := handler.GetRemoteFrontendClient(targetDC)
resp, err = remoteClient.RespondWorkflowTaskCompleted(ctx, request)
}
return err
})
return resp, err
}
// RespondWorkflowTaskFailed API call
func (handler *DCRedirectionHandlerImpl) RespondWorkflowTaskFailed(
ctx context.Context,
request *workflowservice.RespondWorkflowTaskFailedRequest,
) (resp *workflowservice.RespondWorkflowTaskFailedResponse, retError error) {
var apiName = "RespondWorkflowTaskFailed"
var err error
var cluster string
scope, startTime := handler.beforeCall(metrics.DCRedirectionRespondWorkflowTaskFailedScope)
defer func() {
handler.afterCall(scope, startTime, cluster, &retError)
}()
token, err := handler.tokenSerializer.Deserialize(request.TaskToken)
if err != nil {
return resp, err
}
err = handler.redirectionPolicy.WithNamespaceIDRedirect(ctx, token.GetNamespaceId(), apiName, func(targetDC string) error {
cluster = targetDC
switch {
case targetDC == handler.currentClusterName:
resp, err = handler.frontendHandler.RespondWorkflowTaskFailed(ctx, request)
default:
remoteClient := handler.GetRemoteFrontendClient(targetDC)
resp, err = remoteClient.RespondWorkflowTaskFailed(ctx, request)
}
return err
})
return resp, err
}
// RespondQueryTaskCompleted API call
func (handler *DCRedirectionHandlerImpl) RespondQueryTaskCompleted(
ctx context.Context,
request *workflowservice.RespondQueryTaskCompletedRequest,
) (resp *workflowservice.RespondQueryTaskCompletedResponse, retError error) {
var apiName = "RespondQueryTaskCompleted"
var err error
var cluster string
scope, startTime := handler.beforeCall(metrics.DCRedirectionRespondQueryTaskCompletedScope)
defer func() {
handler.afterCall(scope, startTime, cluster, &retError)
}()
token, err := handler.tokenSerializer.DeserializeQueryTaskToken(request.TaskToken)
if err != nil {
return resp, err
}
err = handler.redirectionPolicy.WithNamespaceIDRedirect(ctx, token.GetNamespaceId(), apiName, func(targetDC string) error {
cluster = targetDC
switch {
case targetDC == handler.currentClusterName:
resp, err = handler.frontendHandler.RespondQueryTaskCompleted(ctx, request)
default:
remoteClient := handler.GetRemoteFrontendClient(targetDC)
resp, err = remoteClient.RespondQueryTaskCompleted(ctx, request)
}
return err
})
return resp, err
}
// SignalWithStartWorkflowExecution API call
func (handler *DCRedirectionHandlerImpl) SignalWithStartWorkflowExecution(
ctx context.Context,
request *workflowservice.SignalWithStartWorkflowExecutionRequest,
) (resp *workflowservice.SignalWithStartWorkflowExecutionResponse, retError error) {
var apiName = "SignalWithStartWorkflowExecution"
var err error
var cluster string
scope, startTime := handler.beforeCall(metrics.DCRedirectionSignalWithStartWorkflowExecutionScope)
defer func() {
handler.afterCall(scope, startTime, cluster, &retError)
}()
err = handler.redirectionPolicy.WithNamespaceRedirect(ctx, request.GetNamespace(), apiName, func(targetDC string) error {
cluster = targetDC
switch {
case targetDC == handler.currentClusterName:
resp, err = handler.frontendHandler.SignalWithStartWorkflowExecution(ctx, request)
default:
remoteClient := handler.GetRemoteFrontendClient(targetDC)
resp, err = remoteClient.SignalWithStartWorkflowExecution(ctx, request)
}
return err
})
return resp, err
}
// SignalWorkflowExecution API call
func (handler *DCRedirectionHandlerImpl) SignalWorkflowExecution(
ctx context.Context,
request *workflowservice.SignalWorkflowExecutionRequest,
) (resp *workflowservice.SignalWorkflowExecutionResponse, retError error) {
var apiName = "SignalWorkflowExecution"
var err error
var cluster string
scope, startTime := handler.beforeCall(metrics.DCRedirectionSignalWorkflowExecutionScope)
defer func() {
handler.afterCall(scope, startTime, cluster, &retError)
}()
err = handler.redirectionPolicy.WithNamespaceRedirect(ctx, request.GetNamespace(), apiName, func(targetDC string) error {
cluster = targetDC
switch {
case targetDC == handler.currentClusterName:
resp, err = handler.frontendHandler.SignalWorkflowExecution(ctx, request)
default:
remoteClient := handler.GetRemoteFrontendClient(targetDC)
resp, err = remoteClient.SignalWorkflowExecution(ctx, request)
}
return err
})
return resp, err
}
// StartWorkflowExecution API call
func (handler *DCRedirectionHandlerImpl) StartWorkflowExecution(
ctx context.Context,
request *workflowservice.StartWorkflowExecutionRequest,
) (resp *workflowservice.StartWorkflowExecutionResponse, retError error) {
var apiName = "StartWorkflowExecution"
var err error
var cluster string
scope, startTime := handler.beforeCall(metrics.DCRedirectionStartWorkflowExecutionScope)
defer func() {
handler.afterCall(scope, startTime, cluster, &retError)
}()
err = handler.redirectionPolicy.WithNamespaceRedirect(ctx, request.GetNamespace(), apiName, func(targetDC string) error {
cluster = targetDC
switch {
case targetDC == handler.currentClusterName:
resp, err = handler.frontendHandler.StartWorkflowExecution(ctx, request)
default:
remoteClient := handler.GetRemoteFrontendClient(targetDC)
resp, err = remoteClient.StartWorkflowExecution(ctx, request)
}
return err
})
return resp, err
}
// TerminateWorkflowExecution API call
func (handler *DCRedirectionHandlerImpl) TerminateWorkflowExecution(
ctx context.Context,
request *workflowservice.TerminateWorkflowExecutionRequest,
) (resp *workflowservice.TerminateWorkflowExecutionResponse, retError error) {
var apiName = "TerminateWorkflowExecution"
var err error
var cluster string
scope, startTime := handler.beforeCall(metrics.DCRedirectionTerminateWorkflowExecutionScope)
defer func() {
handler.afterCall(scope, startTime, cluster, &retError)
}()
err = handler.redirectionPolicy.WithNamespaceRedirect(ctx, request.GetNamespace(), apiName, func(targetDC string) error {
cluster = targetDC
switch {
case targetDC == handler.currentClusterName:
resp, err = handler.frontendHandler.TerminateWorkflowExecution(ctx, request)
default:
remoteClient := handler.GetRemoteFrontendClient(targetDC)
resp, err = remoteClient.TerminateWorkflowExecution(ctx, request)
}
return err
})
return resp, err
}
// ListTaskQueuePartitions API call
func (handler *DCRedirectionHandlerImpl) ListTaskQueuePartitions(
ctx context.Context,
request *workflowservice.ListTaskQueuePartitionsRequest,
) (resp *workflowservice.ListTaskQueuePartitionsResponse, retError error) {
var apiName = "ListTaskQueuePartitions"
var err error
var cluster string
scope, startTime := handler.beforeCall(metrics.DCRedirectionListTaskQueuePartitionsScope)
defer func() {
handler.afterCall(scope, startTime, cluster, &retError)
}()
err = handler.redirectionPolicy.WithNamespaceRedirect(ctx, request.GetNamespace(), apiName, func(targetDC string) error {
cluster = targetDC
switch {
case targetDC == handler.currentClusterName:
resp, err = handler.frontendHandler.ListTaskQueuePartitions(ctx, request)
default:
remoteClient := handler.GetRemoteFrontendClient(targetDC)
resp, err = remoteClient.ListTaskQueuePartitions(ctx, request)
}
return err
})
return resp, err
}
// GetClusterInfo API call
func (handler *DCRedirectionHandlerImpl) GetClusterInfo(
ctx context.Context,
request *workflowservice.GetClusterInfoRequest,
) (*workflowservice.GetClusterInfoResponse, error) {
return handler.frontendHandler.GetClusterInfo(ctx, request)
}
func (handler *DCRedirectionHandlerImpl) beforeCall(
scope int,
) (metrics.Scope, time.Time) {
return handler.GetMetricsClient().Scope(scope), handler.GetTimeSource().Now()
}
func (handler *DCRedirectionHandlerImpl) afterCall(
scope metrics.Scope,
startTime time.Time,
cluster string,
retError *error,
) {
log.CapturePanic(handler.GetLogger(), retError)
scope = scope.Tagged(metrics.TargetClusterTag(cluster))
scope.IncCounter(metrics.ClientRedirectionRequests)
scope.RecordTimer(metrics.ClientRedirectionLatency, handler.GetTimeSource().Now().Sub(startTime))
if *retError != nil {
scope.IncCounter(metrics.ClientRedirectionFailures)
}
}
| 1 | 10,573 | nit: maybe prefer `resp = &workflowservice.PollWorkflowTaskQueueResponse{}` over adding another function exit points especially for non-error cases? | temporalio-temporal | go |
@@ -107,6 +107,10 @@ module Mongoid
#
# @since 6.0.0
def client
+ client_options = send(:client_options)
+ if client_options[:read].is_a?(Symbol)
+ client_options = client_options.merge(read: {mode: client_options[:read]})
+ end
@client ||= (client = Clients.with_name(client_name)
client = client.use(database_name) if database_name_option
client.with(client_options)) | 1 | module Mongoid
# Object encapsulating logic for setting/getting a collection and database name
# and a client with particular options to use when persisting models.
#
# @since 6.0.0
class PersistenceContext
extend Forwardable
# Delegate the cluster method to the client.
def_delegators :client, :cluster
# Delegate the storage options method to the object.
def_delegators :@object, :storage_options
# The options defining this persistence context.
#
# @return [ Hash ] The persistence context options.
#
# @since 6.0.0
attr_reader :options
# Extra options in addition to driver client options that determine the
# persistence context.
#
# @return [ Array<Symbol> ] The list of extra options besides client options
# that determine the persistence context.
#
# @since 6.0.0
EXTRA_OPTIONS = [ :client,
:collection
].freeze
# The full list of valid persistence context options.
#
# @return [ Array<Symbol> ] The full list of options defining the persistence
# context.
#
# @since 6.0.0
VALID_OPTIONS = ( Mongo::Client::VALID_OPTIONS + EXTRA_OPTIONS ).freeze
# Initialize the persistence context object.
#
# @example Create a new persistence context.
# PersistenceContext.new(model, collection: 'other')
#
# @param [ Object ] object The class or model instance for which a persistence context
# should be created.
# @param [ Hash ] opts The persistence context options.
#
# @since 6.0.0
def initialize(object, opts = {})
@object = object
set_options!(opts)
end
# Get the collection for this persistence context.
#
# @example Get the collection for this persistence context.
# context.collection
#
# @param [ Object ] parent The parent object whose collection name is used
# instead of this persistence context's collection name.
#
# @return [ Mongo::Collection ] The collection for this persistence
# context.
#
# @since 6.0.0
def collection(parent = nil)
parent ? parent.collection.with(client_options) : client[collection_name.to_sym]
end
# Get the collection name for this persistence context.
#
# @example Get the collection name for this persistence context.
# context.collection_name
#
# @return [ String ] The collection name for this persistence
# context.
#
# @since 6.0.0
def collection_name
@collection_name ||= (__evaluate__(options[:collection] ||
storage_options[:collection]))
end
# Get the database name for this persistence context.
#
# @example Get the database name for this persistence context.
# context.database_name
#
# @return [ String ] The database name for this persistence
# context.
#
# @since 6.0.0
def database_name
__evaluate__(database_name_option) || client.database.name
end
# Get the client for this persistence context.
#
# @example Get the client for this persistence context.
# context.client
#
# @return [ Mongo::Client ] The client for this persistence
# context.
#
# @since 6.0.0
def client
@client ||= (client = Clients.with_name(client_name)
client = client.use(database_name) if database_name_option
client.with(client_options))
end
# Determine if this persistence context is equal to another.
#
# @example Compare two persistence contexts.
# context == other_context
#
# @param [ Object ] other The object to be compared with this one.
#
# @return [ true, false ] Whether the two persistence contexts are equal.
#
# @since 6.0.0
def ==(other)
return false unless other.is_a?(PersistenceContext)
options == other.options
end
private
def client_name
@client_name ||= options[:client] ||
Threaded.client_override ||
storage_options && __evaluate__(storage_options[:client])
end
def set_options!(opts)
@options ||= opts.each.reduce({}) do |_options, (key, value)|
unless VALID_OPTIONS.include?(key.to_sym)
raise Errors::InvalidPersistenceOption.new(key.to_sym, VALID_OPTIONS)
end
value ? _options.merge!(key => value) : _options
end
end
def __evaluate__(name)
return nil unless name
name.respond_to?(:call) ? name.call.to_sym : name.to_sym
end
def client_options
@client_options ||= options.select do |k, v|
Mongo::Client::VALID_OPTIONS.include?(k.to_sym)
end
end
def database_name_option
@database_name_option ||= options[:database] ||
Threaded.database_override ||
storage_options && storage_options[:database]
end
class << self
# Set the persistence context for a particular class or model instance.
#
# @example Set the persistence context for a class or model instance.
# PersistenceContext.set(model)
#
# @param [ Object ] object The class or model instance.
# @param [ Hash, Mongoid::PersistenceContext ] options_or_context The persistence
# options or a persistence context object.
#
# @return [ Mongoid::PersistenceContext ] The persistence context for the object.
#
# @since 6.0.0
def set(object, options_or_context)
context = PersistenceContext.new(object, options_or_context.is_a?(PersistenceContext) ?
options_or_context.options : options_or_context)
Thread.current["[mongoid][#{object.object_id}]:context"] = context
end
# Get the persistence context for a particular class or model instance.
#
# @example Get the persistence context for a class or model instance.
# PersistenceContext.get(model)
#
# @param [ Object ] object The class or model instance.
#
# @return [ Mongoid::PersistenceContext ] The persistence context for the object.
#
# @since 6.0.0
def get(object)
Thread.current["[mongoid][#{object.object_id}]:context"]
end
# Clear the persistence context for a particular class or model instance.
#
# @example Clear the persistence context for a class or model instance.
# PersistenceContext.clear(model)
#
# @param [ Class, Object ] object The class or model instance.
# @param [ Mongo::Cluster ] cluster The original cluster before this context was used.
#
# @since 6.0.0
def clear(object, cluster = nil)
if context = get(object)
context.client.close unless (context.cluster.equal?(cluster) || cluster.nil?)
end
ensure
Thread.current["[mongoid][#{object.object_id}]:context"] = nil
end
end
end
end
| 1 | 11,891 | Do we specifically need to keep the read preference as a symbol for use elsewhere? If not, I think it would be cleaner to just modify the options before caching them in the `client_options` method so that we don't do this check every time. If we do need it a a symbol elsewhere, I'd suggest either putting `return @client if @client` at the top of the method (and changing the assignment below from `||=` to `=`) or moving this within the parenthetical block in the last assignment so that we don't repeat this work. | mongodb-mongoid | rb |
@@ -69,7 +69,8 @@ module.exports = class Webcam extends Plugin {
'picture'
],
mirror: true,
- facingMode: 'user'
+ facingMode: 'user',
+ preferredMimeType: null
}
// merge default options with the ones set by user | 1 | const { h } = require('preact')
const { Plugin } = require('@uppy/core')
const Translator = require('@uppy/utils/lib/Translator')
const getFileTypeExtension = require('@uppy/utils/lib/getFileTypeExtension')
const canvasToBlob = require('@uppy/utils/lib/canvasToBlob')
const supportsMediaRecorder = require('./supportsMediaRecorder')
const CameraIcon = require('./CameraIcon')
const CameraScreen = require('./CameraScreen')
const PermissionsScreen = require('./PermissionsScreen')
// Setup getUserMedia, with polyfill for older browsers
// Adapted from: https://developer.mozilla.org/en-US/docs/Web/API/MediaDevices/getUserMedia
function getMediaDevices () {
// eslint-disable-next-line compat/compat
if (navigator.mediaDevices && navigator.mediaDevices.getUserMedia) {
// eslint-disable-next-line compat/compat
return navigator.mediaDevices
}
const getUserMedia = navigator.mozGetUserMedia || navigator.webkitGetUserMedia
if (!getUserMedia) {
return null
}
return {
getUserMedia (opts) {
return new Promise((resolve, reject) => {
getUserMedia.call(navigator, opts, resolve, reject)
})
}
}
}
/**
* Webcam
*/
module.exports = class Webcam extends Plugin {
static VERSION = require('../package.json').version
constructor (uppy, opts) {
super(uppy, opts)
this.mediaDevices = getMediaDevices()
this.supportsUserMedia = !!this.mediaDevices
this.protocol = location.protocol.match(/https/i) ? 'https' : 'http'
this.id = this.opts.id || 'Webcam'
this.title = this.opts.title || 'Camera'
this.type = 'acquirer'
this.icon = CameraIcon
this.defaultLocale = {
strings: {
smile: 'Smile!',
takePicture: 'Take a picture',
startRecording: 'Begin video recording',
stopRecording: 'Stop video recording',
allowAccessTitle: 'Please allow access to your camera',
allowAccessDescription: 'In order to take pictures or record video with your camera, please allow camera access for this site.'
}
}
// set default options
const defaultOptions = {
onBeforeSnapshot: () => Promise.resolve(),
countdown: false,
modes: [
'video-audio',
'video-only',
'audio-only',
'picture'
],
mirror: true,
facingMode: 'user'
}
// merge default options with the ones set by user
this.opts = Object.assign({}, defaultOptions, opts)
// i18n
this.translator = new Translator([ this.defaultLocale, this.uppy.locale, this.opts.locale ])
this.i18n = this.translator.translate.bind(this.translator)
this.i18nArray = this.translator.translateArray.bind(this.translator)
this.install = this.install.bind(this)
this.setPluginState = this.setPluginState.bind(this)
this.render = this.render.bind(this)
// Camera controls
this.start = this.start.bind(this)
this.stop = this.stop.bind(this)
this.takeSnapshot = this.takeSnapshot.bind(this)
this.startRecording = this.startRecording.bind(this)
this.stopRecording = this.stopRecording.bind(this)
this.oneTwoThreeSmile = this.oneTwoThreeSmile.bind(this)
this.focus = this.focus.bind(this)
this.webcamActive = false
if (this.opts.countdown) {
this.opts.onBeforeSnapshot = this.oneTwoThreeSmile
}
}
isSupported () {
return !!this.mediaDevices
}
getConstraints () {
const acceptsAudio = this.opts.modes.indexOf('video-audio') !== -1 ||
this.opts.modes.indexOf('audio-only') !== -1
const acceptsVideo = this.opts.modes.indexOf('video-audio') !== -1 ||
this.opts.modes.indexOf('video-only') !== -1 ||
this.opts.modes.indexOf('picture') !== -1
return {
audio: acceptsAudio,
video: acceptsVideo ? { facingMode: this.opts.facingMode } : false
}
}
start () {
if (!this.isSupported()) {
return Promise.reject(new Error('Webcam access not supported'))
}
this.webcamActive = true
const constraints = this.getConstraints()
// ask user for access to their camera
return this.mediaDevices.getUserMedia(constraints)
.then((stream) => {
this.stream = stream
// this.streamSrc = URL.createObjectURL(this.stream)
this.setPluginState({
cameraReady: true
})
})
.catch((err) => {
this.setPluginState({
cameraError: err
})
})
}
startRecording () {
// TODO We can check here if any of the mime types listed in the
// mimeToExtensions map in Utils.js are supported, and prefer to use one of
// those.
// Right now we let the browser pick a type that it deems appropriate.
this.recorder = new MediaRecorder(this.stream)
this.recordingChunks = []
this.recorder.addEventListener('dataavailable', (event) => {
this.recordingChunks.push(event.data)
})
this.recorder.start()
this.setPluginState({
isRecording: true
})
}
stopRecording () {
const stopped = new Promise((resolve, reject) => {
this.recorder.addEventListener('stop', () => {
resolve()
})
this.recorder.stop()
})
return stopped.then(() => {
this.setPluginState({
isRecording: false
})
return this.getVideo()
}).then((file) => {
try {
this.uppy.addFile(file)
} catch (err) {
// Logging the error, exept restrictions, which is handled in Core
if (!err.isRestriction) {
this.uppy.log(err)
}
}
}).then(() => {
this.recordingChunks = null
this.recorder = null
// Close the Dashboard panel if plugin is installed
// into Dashboard (could be other parent UI plugin)
// if (this.parent && this.parent.hideAllPanels) {
// this.parent.hideAllPanels()
// }
}, (error) => {
this.recordingChunks = null
this.recorder = null
throw error
})
}
stop () {
this.stream.getAudioTracks().forEach((track) => {
track.stop()
})
this.stream.getVideoTracks().forEach((track) => {
track.stop()
})
this.webcamActive = false
this.stream = null
}
getVideoElement () {
return this.el.querySelector('.uppy-Webcam-video')
}
oneTwoThreeSmile () {
return new Promise((resolve, reject) => {
let count = this.opts.countdown
let countDown = setInterval(() => {
if (!this.webcamActive) {
clearInterval(countDown)
this.captureInProgress = false
return reject(new Error('Webcam is not active'))
}
if (count > 0) {
this.uppy.info(`${count}...`, 'warning', 800)
count--
} else {
clearInterval(countDown)
this.uppy.info(this.i18n('smile'), 'success', 1500)
setTimeout(() => resolve(), 1500)
}
}, 1000)
})
}
takeSnapshot () {
if (this.captureInProgress) return
this.captureInProgress = true
this.opts.onBeforeSnapshot().catch((err) => {
const message = typeof err === 'object' ? err.message : err
this.uppy.info(message, 'error', 5000)
return Promise.reject(new Error(`onBeforeSnapshot: ${message}`))
}).then(() => {
return this.getImage()
}).then((tagFile) => {
this.captureInProgress = false
// Close the Dashboard panel if plugin is installed
// into Dashboard (could be other parent UI plugin)
// if (this.parent && this.parent.hideAllPanels) {
// this.parent.hideAllPanels()
// }
try {
this.uppy.addFile(tagFile)
} catch (err) {
// Logging the error, exept restrictions, which is handled in Core
if (!err.isRestriction) {
this.uppy.log(err)
}
}
}, (error) => {
this.captureInProgress = false
throw error
})
}
getImage () {
const video = this.getVideoElement()
if (!video) {
return Promise.reject(new Error('No video element found, likely due to the Webcam tab being closed.'))
}
const name = `cam-${Date.now()}.jpg`
const mimeType = 'image/jpeg'
const width = video.videoWidth
const height = video.videoHeight
// const scaleH = this.opts.mirror ? -1 : 1 // Set horizontal scale to -1 if flip horizontal
// const scaleV = 1
// const posX = this.opts.mirror ? width * -1 : 0 // Set x position to -100% if flip horizontal
// const posY = 0
const canvas = document.createElement('canvas')
canvas.width = width
canvas.height = height
const ctx = canvas.getContext('2d')
ctx.drawImage(video, 0, 0)
// ctx.save() // Save the current state
// ctx.scale(scaleH, scaleV) // Set scale to flip the image
// ctx.drawImage(video, posX, posY, width, height) // draw the image
// ctx.restore() // Restore the last saved state
return canvasToBlob(canvas, mimeType).then((blob) => {
return {
source: this.id,
name: name,
data: new Blob([blob], { type: mimeType }),
type: mimeType
}
})
}
getVideo () {
const mimeType = this.recordingChunks[0].type
const fileExtension = getFileTypeExtension(mimeType)
if (!fileExtension) {
return Promise.reject(new Error(`Could not retrieve recording: Unsupported media type "${mimeType}"`))
}
const name = `webcam-${Date.now()}.${fileExtension}`
const blob = new Blob(this.recordingChunks, { type: mimeType })
const file = {
source: this.id,
name: name,
data: new Blob([blob], { type: mimeType }),
type: mimeType
}
return Promise.resolve(file)
}
focus () {
if (!this.opts.countdown) return
setTimeout(() => {
this.uppy.info(this.i18n('smile'), 'success', 1500)
}, 1000)
}
render (state) {
if (!this.webcamActive) {
this.start()
}
const webcamState = this.getPluginState()
if (!webcamState.cameraReady) {
return <PermissionsScreen
icon={CameraIcon}
i18n={this.i18n} />
}
return <CameraScreen
{...webcamState}
onSnapshot={this.takeSnapshot}
onStartRecording={this.startRecording}
onStopRecording={this.stopRecording}
onFocus={this.focus}
onStop={this.stop}
i18n={this.i18n}
modes={this.opts.modes}
supportsRecording={supportsMediaRecorder()}
recording={webcamState.isRecording}
mirror={this.opts.mirror}
src={this.stream} />
}
install () {
this.setPluginState({
cameraReady: false
})
const target = this.opts.target
if (target) {
this.mount(target, this)
}
}
uninstall () {
if (this.stream) {
this.stop()
}
this.unmount()
}
}
| 1 | 12,235 | Since this is for video only, should it be called `preferredVideoMimeType`? If we add it for pictures later, it will likely need to be a different option. | transloadit-uppy | js |
@@ -398,6 +398,8 @@ static bool ImageLayoutMatches(const VkImageAspectFlags aspect_mask, VkImageLayo
// Utility type for ForRange callbacks
struct LayoutUseCheckAndMessage {
+ using LayoutEntry = image_layout_map::ImageSubresourceLayoutMap::LayoutEntry;
+ using RangeGenerator = image_layout_map::ImageSubresourceLayoutMap::LayoutEntry;
const static VkImageAspectFlags kDepthOrStencil = VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT;
const ImageSubresourceLayoutMap *layout_map;
const VkImageAspectFlags aspect_mask; | 1 | /* Copyright (c) 2015-2021 The Khronos Group Inc.
* Copyright (c) 2015-2021 Valve Corporation
* Copyright (c) 2015-2021 LunarG, Inc.
* Copyright (C) 2015-2021 Google Inc.
* Modifications Copyright (C) 2020-2021 Advanced Micro Devices, Inc. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Author: Mark Lobodzinski <[email protected]>
* Author: Dave Houlton <[email protected]>
* Shannon McPherson <[email protected]>
* Author: Tobias Hector <[email protected]>
*/
#include <cmath>
#include <set>
#include <sstream>
#include <string>
#include <iostream>
#include "vk_enum_string_helper.h"
#include "vk_format_utils.h"
#include "vk_layer_data.h"
#include "vk_layer_utils.h"
#include "vk_layer_logging.h"
#include "vk_typemap_helper.h"
#include "chassis.h"
#include "core_validation.h"
#include "core_error_location.h"
#include "shader_validation.h"
#include "descriptor_sets.h"
#include "buffer_validation.h"
#include "sync_vuid_maps.h"
// All VUID from copy_bufferimage_to_imagebuffer_common.txt
static const char *GetBufferImageCopyCommandVUID(std::string id, bool image_to_buffer, bool copy2) {
// clang-format off
static const std::map<std::string, std::array<const char *, 4>> copy_imagebuffer_vuid = {
{"00193", {
"VUID-vkCmdCopyBufferToImage-bufferOffset-00193", // !copy2 & !image_to_buffer
"VUID-vkCmdCopyImageToBuffer-bufferOffset-00193", // !copy2 & image_to_buffer
"VUID-VkCopyBufferToImageInfo2KHR-bufferOffset-00193", // copy2 & !image_to_buffer
"VUID-VkCopyImageToBufferInfo2KHR-bufferOffset-00193", // copy2 & image_to_buffer
}},
{"01558", {
"VUID-vkCmdCopyBufferToImage-bufferOffset-01558",
"VUID-vkCmdCopyImageToBuffer-bufferOffset-01558",
"VUID-VkCopyBufferToImageInfo2KHR-bufferOffset-01558",
"VUID-VkCopyImageToBufferInfo2KHR-bufferOffset-01558",
}},
{"01559", {
"VUID-vkCmdCopyBufferToImage-bufferOffset-01559",
"VUID-vkCmdCopyImageToBuffer-bufferOffset-01559",
"VUID-VkCopyBufferToImageInfo2KHR-bufferOffset-01559",
"VUID-VkCopyImageToBufferInfo2KHR-bufferOffset-01559",
}},
{"00197", {
"VUID-vkCmdCopyBufferToImage-imageOffset-00197",
"VUID-vkCmdCopyImageToBuffer-imageOffset-00197",
"VUID-VkCopyBufferToImageInfo2KHR-imageOffset-00197",
"VUID-VkCopyImageToBufferInfo2KHR-imageOffset-00197",
}},
{"00198", {
"VUID-vkCmdCopyBufferToImage-imageOffset-00198",
"VUID-vkCmdCopyImageToBuffer-imageOffset-00198",
"VUID-VkCopyBufferToImageInfo2KHR-imageOffset-00198",
"VUID-VkCopyImageToBufferInfo2KHR-imageOffset-00198",
}},
{"00199", {
"VUID-vkCmdCopyBufferToImage-srcImage-00199",
"VUID-vkCmdCopyImageToBuffer-srcImage-00199",
"VUID-VkCopyBufferToImageInfo2KHR-srcImage-00199",
"VUID-VkCopyImageToBufferInfo2KHR-srcImage-00199",
}},
{"00200", {
"VUID-vkCmdCopyBufferToImage-imageOffset-00200",
"VUID-vkCmdCopyImageToBuffer-imageOffset-00200",
"VUID-VkCopyBufferToImageInfo2KHR-imageOffset-00200",
"VUID-VkCopyImageToBufferInfo2KHR-imageOffset-00200",
}},
{"00201", {
"VUID-vkCmdCopyBufferToImage-srcImage-00201",
"VUID-vkCmdCopyImageToBuffer-srcImage-00201",
"VUID-VkCopyBufferToImageInfo2KHR-srcImage-00201",
"VUID-VkCopyImageToBufferInfo2KHR-srcImage-00201",
}},
{"00203", {
"VUID-vkCmdCopyBufferToImage-bufferRowLength-00203",
"VUID-vkCmdCopyImageToBuffer-bufferRowLength-00203",
"VUID-VkCopyBufferToImageInfo2KHR-bufferRowLength-00203",
"VUID-VkCopyImageToBufferInfo2KHR-bufferRowLength-00203",
}},
{"00204", {
"VUID-vkCmdCopyBufferToImage-bufferImageHeight-00204",
"VUID-vkCmdCopyImageToBuffer-bufferImageHeight-00204",
"VUID-VkCopyBufferToImageInfo2KHR-bufferImageHeight-00204",
"VUID-VkCopyImageToBufferInfo2KHR-bufferImageHeight-00204",
}},
{"00205", {
"VUID-vkCmdCopyBufferToImage-imageOffset-00205",
"VUID-vkCmdCopyImageToBuffer-imageOffset-00205",
"VUID-VkCopyBufferToImageInfo2KHR-imageOffset-00205",
"VUID-VkCopyImageToBufferInfo2KHR-imageOffset-00205",
}},
{"00206", {
"VUID-vkCmdCopyBufferToImage-bufferOffset-00206",
"VUID-vkCmdCopyImageToBuffer-bufferOffset-00206",
"VUID-VkCopyBufferToImageInfo2KHR-bufferOffset-00206",
"VUID-VkCopyImageToBufferInfo2KHR-bufferOffset-00206",
}},
{"00207", {
"VUID-vkCmdCopyBufferToImage-imageExtent-00207",
"VUID-vkCmdCopyImageToBuffer-imageExtent-00207",
"VUID-VkCopyBufferToImageInfo2KHR-imageExtent-00207",
"VUID-VkCopyImageToBufferInfo2KHR-imageExtent-00207",
}},
{"00208", {
"VUID-vkCmdCopyBufferToImage-imageExtent-00208",
"VUID-vkCmdCopyImageToBuffer-imageExtent-00208",
"VUID-VkCopyBufferToImageInfo2KHR-imageExtent-00208",
"VUID-VkCopyImageToBufferInfo2KHR-imageExtent-00208",
}},
{"00209", {
"VUID-vkCmdCopyBufferToImage-imageExtent-00209",
"VUID-vkCmdCopyImageToBuffer-imageExtent-00209",
"VUID-VkCopyBufferToImageInfo2KHR-imageExtent-00209",
"VUID-VkCopyImageToBufferInfo2KHR-imageExtent-00209",
}},
{"00211", {
"VUID-vkCmdCopyBufferToImage-aspectMask-00211",
"VUID-vkCmdCopyImageToBuffer-aspectMask-00211",
"VUID-VkCopyBufferToImageInfo2KHR-aspectMask-00211",
"VUID-VkCopyImageToBufferInfo2KHR-aspectMask-00211",
}},
{"01560", {
"VUID-vkCmdCopyBufferToImage-aspectMask-01560",
"VUID-vkCmdCopyImageToBuffer-aspectMask-01560",
"VUID-VkCopyBufferToImageInfo2KHR-aspectMask-01560",
"VUID-VkCopyImageToBufferInfo2KHR-aspectMask-01560",
}},
{"00213", {
"VUID-vkCmdCopyBufferToImage-baseArrayLayer-00213",
"VUID-vkCmdCopyImageToBuffer-baseArrayLayer-00213",
"VUID-VkCopyBufferToImageInfo2KHR-baseArrayLayer-00213",
"VUID-VkCopyImageToBufferInfo2KHR-baseArrayLayer-00213",
}},
{"04052", {
"VUID-vkCmdCopyBufferToImage-commandBuffer-04052",
"VUID-vkCmdCopyImageToBuffer-commandBuffer-04052",
"VUID-VkCopyBufferToImageInfo2KHR-commandBuffer-04052",
"VUID-VkCopyImageToBufferInfo2KHR-commandBuffer-04052",
}},
{"04053", {
"VUID-vkCmdCopyBufferToImage-srcImage-04053",
"VUID-vkCmdCopyImageToBuffer-srcImage-04053",
"VUID-VkCopyBufferToImageInfo2KHR-srcImage-04053",
"VUID-VkCopyImageToBufferInfo2KHR-srcImage-04053",
}}
};
// clang-format on
uint8_t index = 0;
index |= (image_to_buffer) ? 0x1 : 0;
index |= (copy2) ? 0x2 : 0;
return copy_imagebuffer_vuid.at(id).at(index);
}
// Transfer VkImageSubresourceRange into VkImageSubresourceLayers struct
static VkImageSubresourceLayers LayersFromRange(const VkImageSubresourceRange &subresource_range) {
VkImageSubresourceLayers subresource_layers;
subresource_layers.aspectMask = subresource_range.aspectMask;
subresource_layers.baseArrayLayer = subresource_range.baseArrayLayer;
subresource_layers.layerCount = subresource_range.layerCount;
subresource_layers.mipLevel = subresource_range.baseMipLevel;
return subresource_layers;
}
// Transfer VkImageSubresourceLayers into VkImageSubresourceRange struct
static VkImageSubresourceRange RangeFromLayers(const VkImageSubresourceLayers &subresource_layers) {
VkImageSubresourceRange subresource_range;
subresource_range.aspectMask = subresource_layers.aspectMask;
subresource_range.baseArrayLayer = subresource_layers.baseArrayLayer;
subresource_range.layerCount = subresource_layers.layerCount;
subresource_range.baseMipLevel = subresource_layers.mipLevel;
subresource_range.levelCount = 1;
return subresource_range;
}
static VkImageSubresourceRange MakeImageFullRange(const VkImageCreateInfo &create_info) {
const auto format = create_info.format;
VkImageSubresourceRange init_range{0, 0, VK_REMAINING_MIP_LEVELS, 0, VK_REMAINING_ARRAY_LAYERS};
#ifdef VK_USE_PLATFORM_ANDROID_KHR
const VkExternalFormatANDROID *external_format_android = LvlFindInChain<VkExternalFormatANDROID>(&create_info);
bool is_external_format_conversion = (external_format_android != nullptr && external_format_android->externalFormat != 0);
#else
bool is_external_format_conversion = false;
#endif
if (FormatIsColor(format) || FormatIsMultiplane(format) || is_external_format_conversion) {
init_range.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; // Normalization will expand this for multiplane
} else {
init_range.aspectMask =
(FormatHasDepth(format) ? VK_IMAGE_ASPECT_DEPTH_BIT : 0) | (FormatHasStencil(format) ? VK_IMAGE_ASPECT_STENCIL_BIT : 0);
}
return NormalizeSubresourceRange(create_info, init_range);
}
IMAGE_STATE::IMAGE_STATE(VkDevice dev, VkImage img, const VkImageCreateInfo *pCreateInfo)
: BINDABLE(img, kVulkanObjectTypeImage),
safe_create_info(pCreateInfo),
createInfo(*safe_create_info.ptr()),
valid(false),
acquired(false),
shared_presentable(false),
layout_locked(false),
get_sparse_reqs_called(false),
sparse_metadata_required(false),
sparse_metadata_bound(false),
has_ahb_format(false),
is_swapchain_image(false),
ahb_format(0),
full_range{MakeImageFullRange(createInfo)},
create_from_swapchain(VK_NULL_HANDLE),
bind_swapchain(VK_NULL_HANDLE),
bind_swapchain_imageIndex(0),
range_encoder(full_range),
disjoint(false),
plane0_memory_requirements_checked(false),
plane1_memory_requirements_checked(false),
plane2_memory_requirements_checked(false),
subresource_encoder(full_range),
fragment_encoder(nullptr),
store_device_as_workaround(dev), // TODO REMOVE WHEN encoder can be const
swapchain_fake_address(0U),
sparse_requirements{} {
if ((createInfo.sharingMode == VK_SHARING_MODE_CONCURRENT) && (createInfo.queueFamilyIndexCount > 0)) {
uint32_t *queue_family_indices = new uint32_t[createInfo.queueFamilyIndexCount];
for (uint32_t i = 0; i < createInfo.queueFamilyIndexCount; i++) {
queue_family_indices[i] = pCreateInfo->pQueueFamilyIndices[i];
}
createInfo.pQueueFamilyIndices = queue_family_indices;
}
if (createInfo.flags & VK_IMAGE_CREATE_SPARSE_BINDING_BIT) {
sparse = true;
}
auto *external_memory_info = LvlFindInChain<VkExternalMemoryImageCreateInfo>(pCreateInfo->pNext);
if (external_memory_info) {
external_memory_handle = external_memory_info->handleTypes;
}
}
bool IMAGE_STATE::IsCreateInfoEqual(const VkImageCreateInfo &other_createInfo) const {
bool is_equal = (createInfo.sType == other_createInfo.sType) && (createInfo.flags == other_createInfo.flags);
is_equal = is_equal && IsImageTypeEqual(other_createInfo) && IsFormatEqual(other_createInfo);
is_equal = is_equal && IsMipLevelsEqual(other_createInfo) && IsArrayLayersEqual(other_createInfo);
is_equal = is_equal && IsUsageEqual(other_createInfo) && IsInitialLayoutEqual(other_createInfo);
is_equal = is_equal && IsExtentEqual(other_createInfo) && IsTilingEqual(other_createInfo);
is_equal = is_equal && IsSamplesEqual(other_createInfo) && IsSharingModeEqual(other_createInfo);
return is_equal &&
((createInfo.sharingMode == VK_SHARING_MODE_CONCURRENT) ? IsQueueFamilyIndicesEqual(other_createInfo) : true);
}
// Check image compatibility rules for VK_NV_dedicated_allocation_image_aliasing
bool IMAGE_STATE::IsCreateInfoDedicatedAllocationImageAliasingCompatible(const VkImageCreateInfo &other_createInfo) const {
bool is_compatible = (createInfo.sType == other_createInfo.sType) && (createInfo.flags == other_createInfo.flags);
is_compatible = is_compatible && IsImageTypeEqual(other_createInfo) && IsFormatEqual(other_createInfo);
is_compatible = is_compatible && IsMipLevelsEqual(other_createInfo);
is_compatible = is_compatible && IsUsageEqual(other_createInfo) && IsInitialLayoutEqual(other_createInfo);
is_compatible = is_compatible && IsSamplesEqual(other_createInfo) && IsSharingModeEqual(other_createInfo);
is_compatible = is_compatible &&
((createInfo.sharingMode == VK_SHARING_MODE_CONCURRENT) ? IsQueueFamilyIndicesEqual(other_createInfo) : true);
is_compatible = is_compatible && IsTilingEqual(other_createInfo);
is_compatible = is_compatible && createInfo.extent.width <= other_createInfo.extent.width &&
createInfo.extent.height <= other_createInfo.extent.height &&
createInfo.extent.depth <= other_createInfo.extent.depth &&
createInfo.arrayLayers <= other_createInfo.arrayLayers;
return is_compatible;
}
bool IMAGE_STATE::IsCompatibleAliasing(IMAGE_STATE *other_image_state) const {
if (!is_swapchain_image && !other_image_state->is_swapchain_image &&
!(createInfo.flags & other_image_state->createInfo.flags & VK_IMAGE_CREATE_ALIAS_BIT)) {
return false;
}
if ((create_from_swapchain == VK_NULL_HANDLE) && binding.mem_state &&
(binding.mem_state == other_image_state->binding.mem_state) && (binding.offset == other_image_state->binding.offset) &&
IsCreateInfoEqual(other_image_state->createInfo)) {
return true;
}
if ((bind_swapchain == other_image_state->bind_swapchain) && (bind_swapchain != VK_NULL_HANDLE)) {
return true;
}
return false;
}
IMAGE_VIEW_STATE::IMAGE_VIEW_STATE(const std::shared_ptr<IMAGE_STATE> &im, VkImageView iv, const VkImageViewCreateInfo *ci)
: BASE_NODE(iv, kVulkanObjectTypeImageView),
create_info(*ci),
normalized_subresource_range(NormalizeSubresourceRange(*im, ci->subresourceRange)),
range_generator(im->subresource_encoder, normalized_subresource_range),
samplerConversion(VK_NULL_HANDLE),
image_state(im) {
auto *conversion_info = LvlFindInChain<VkSamplerYcbcrConversionInfo>(create_info.pNext);
if (conversion_info) samplerConversion = conversion_info->conversion;
if (image_state) {
// A light normalization of the createInfo range
auto &sub_res_range = create_info.subresourceRange;
sub_res_range.levelCount = ResolveRemainingLevels(&sub_res_range, image_state->createInfo.mipLevels);
sub_res_range.layerCount = ResolveRemainingLayers(&sub_res_range, image_state->createInfo.arrayLayers);
// Cache a full normalization (for "full image/whole image" comparisons)
// normalized_subresource_range = NormalizeSubresourceRange(*image_state, ci->subresourceRange);
samples = image_state->createInfo.samples;
if (image_state->has_ahb_format) {
// When the image has a external format the views format must be VK_FORMAT_UNDEFINED and it is required to use a sampler
// Ycbcr conversion. Thus we can't extract any meaningful information from the format parameter. As a Sampler Ycbcr
// conversion must be used the shader type is always float.
descriptor_format_bits = DESCRIPTOR_REQ_COMPONENT_TYPE_FLOAT;
} else {
descriptor_format_bits = DescriptorRequirementsBitsFromFormat(create_info.format);
}
image_state->AddParent(this);
}
}
static VkImageLayout NormalizeImageLayout(VkImageLayout layout, VkImageLayout non_normal, VkImageLayout normal) {
return (layout == non_normal) ? normal : layout;
}
static VkImageLayout NormalizeDepthImageLayout(VkImageLayout layout) {
return NormalizeImageLayout(layout, VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL,
VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_STENCIL_ATTACHMENT_OPTIMAL);
}
static VkImageLayout NormalizeStencilImageLayout(VkImageLayout layout) {
return NormalizeImageLayout(layout, VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL,
VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_STENCIL_READ_ONLY_OPTIMAL);
}
static VkImageLayout NormalizeSynchronization2Layout(const VkImageAspectFlags aspect_mask, VkImageLayout layout) {
if (layout == VK_IMAGE_LAYOUT_ATTACHMENT_OPTIMAL_KHR) {
if (aspect_mask == VK_IMAGE_ASPECT_COLOR_BIT) {
layout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL;
} else if (aspect_mask == (VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT)) {
layout = VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL;
} else if (aspect_mask == VK_IMAGE_ASPECT_DEPTH_BIT) {
layout = VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_OPTIMAL;
} else if (aspect_mask == VK_IMAGE_ASPECT_STENCIL_BIT) {
layout = VK_IMAGE_LAYOUT_STENCIL_ATTACHMENT_OPTIMAL;
}
} else if (layout == VK_IMAGE_LAYOUT_READ_ONLY_OPTIMAL_KHR) {
if (aspect_mask == VK_IMAGE_ASPECT_COLOR_BIT) {
layout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL;
} else if (aspect_mask == (VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT)) {
layout = VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL;
} else if (aspect_mask == VK_IMAGE_ASPECT_DEPTH_BIT) {
layout = VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_OPTIMAL;
} else if (aspect_mask == VK_IMAGE_ASPECT_STENCIL_BIT) {
layout = VK_IMAGE_LAYOUT_STENCIL_READ_ONLY_OPTIMAL;
}
}
return layout;
}
static bool ImageLayoutMatches(const VkImageAspectFlags aspect_mask, VkImageLayout a, VkImageLayout b) {
bool matches = (a == b);
if (!matches) {
a = NormalizeSynchronization2Layout(aspect_mask, a);
b = NormalizeSynchronization2Layout(aspect_mask, b);
matches = (a == b);
if (!matches) {
// Relaxed rules when referencing *only* the depth or stencil aspects
if (aspect_mask == VK_IMAGE_ASPECT_DEPTH_BIT) {
matches = NormalizeDepthImageLayout(a) == NormalizeDepthImageLayout(b);
} else if (aspect_mask == VK_IMAGE_ASPECT_STENCIL_BIT) {
matches = NormalizeStencilImageLayout(a) == NormalizeStencilImageLayout(b);
}
}
}
return matches;
}
// Utility type for ForRange callbacks
struct LayoutUseCheckAndMessage {
const static VkImageAspectFlags kDepthOrStencil = VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT;
const ImageSubresourceLayoutMap *layout_map;
const VkImageAspectFlags aspect_mask;
const char *message;
VkImageLayout layout;
LayoutUseCheckAndMessage() = delete;
LayoutUseCheckAndMessage(const ImageSubresourceLayoutMap *layout_map_, const VkImageAspectFlags aspect_mask_ = 0)
: layout_map(layout_map_), aspect_mask{aspect_mask_}, message(nullptr), layout(kInvalidLayout) {}
bool Check(const VkImageSubresource &subres, VkImageLayout check, VkImageLayout current_layout, VkImageLayout initial_layout) {
message = nullptr;
layout = kInvalidLayout; // Success status
if (current_layout != kInvalidLayout && !ImageLayoutMatches(aspect_mask, check, current_layout)) {
message = "previous known";
layout = current_layout;
} else if ((initial_layout != kInvalidLayout) && !ImageLayoutMatches(aspect_mask, check, initial_layout)) {
// To check the relaxed rule matching we need to see how the initial use was used
const auto initial_layout_state = layout_map->GetSubresourceInitialLayoutState(subres);
assert(initial_layout_state); // If we have an initial layout, we better have a state for it
if (!((initial_layout_state->aspect_mask & kDepthOrStencil) &&
ImageLayoutMatches(initial_layout_state->aspect_mask, check, initial_layout))) {
message = "previously used";
layout = initial_layout;
}
}
return layout == kInvalidLayout;
}
};
bool IMAGE_VIEW_STATE::OverlapSubresource(const IMAGE_VIEW_STATE &compare_view) const {
if (image_view() == compare_view.image_view()) {
return true;
}
if (image_state->image() != compare_view.image_state->image()) {
return false;
}
if (normalized_subresource_range.aspectMask != compare_view.normalized_subresource_range.aspectMask) {
return false;
}
// compare if overlap mip level
if ((normalized_subresource_range.baseMipLevel < compare_view.normalized_subresource_range.baseMipLevel) &&
((normalized_subresource_range.baseMipLevel + normalized_subresource_range.levelCount) <=
compare_view.normalized_subresource_range.baseMipLevel)) {
return false;
}
if ((normalized_subresource_range.baseMipLevel > compare_view.normalized_subresource_range.baseMipLevel) &&
(normalized_subresource_range.baseMipLevel >=
(compare_view.normalized_subresource_range.baseMipLevel + compare_view.normalized_subresource_range.levelCount))) {
return false;
}
// compare if overlap array layer
if ((normalized_subresource_range.baseArrayLayer < compare_view.normalized_subresource_range.baseArrayLayer) &&
((normalized_subresource_range.baseArrayLayer + normalized_subresource_range.layerCount) <=
compare_view.normalized_subresource_range.baseArrayLayer)) {
return false;
}
if ((normalized_subresource_range.baseArrayLayer > compare_view.normalized_subresource_range.baseArrayLayer) &&
(normalized_subresource_range.baseArrayLayer >=
(compare_view.normalized_subresource_range.baseArrayLayer + compare_view.normalized_subresource_range.layerCount))) {
return false;
}
return true;
}
uint32_t FullMipChainLevels(uint32_t height, uint32_t width, uint32_t depth) {
// uint cast applies floor()
return 1u + static_cast<uint32_t>(log2(std::max({height, width, depth})));
}
uint32_t FullMipChainLevels(VkExtent3D extent) { return FullMipChainLevels(extent.height, extent.width, extent.depth); }
uint32_t FullMipChainLevels(VkExtent2D extent) { return FullMipChainLevels(extent.height, extent.width); }
bool CoreChecks::FindLayouts(const IMAGE_STATE &image_state, std::vector<VkImageLayout> &layouts) const {
const auto *layout_range_map = GetLayoutRangeMap(imageLayoutMap, image_state.image());
if (!layout_range_map) return false;
// TODO: FindLayouts function should mutate into a ValidatePresentableLayout with the loop wrapping the LogError
// from the caller. You can then use decode to add the subresource of the range::begin to the error message.
// TODO: what is this test and what is it supposed to do?! -- the logic doesn't match the comment below?!
// TODO: Make this robust for >1 aspect mask. Now it will just say ignore potential errors in this case.
if (layout_range_map->size() >= (image_state.createInfo.arrayLayers * image_state.createInfo.mipLevels + 1)) {
return false;
}
for (const auto &entry : *layout_range_map) {
layouts.push_back(entry.second);
}
return true;
}
// Set image layout for given VkImageSubresourceRange struct
void CoreChecks::SetImageLayout(CMD_BUFFER_STATE *cb_node, const IMAGE_STATE &image_state,
const VkImageSubresourceRange &image_subresource_range, VkImageLayout layout,
VkImageLayout expected_layout) {
auto *subresource_map = GetImageSubresourceLayoutMap(cb_node, image_state);
assert(subresource_map); // the non-const getter must return a valid pointer
if (subresource_map->SetSubresourceRangeLayout(*cb_node, image_subresource_range, layout, expected_layout)) {
cb_node->image_layout_change_count++; // Change the version of this data to force revalidation
}
for (const auto *alias_state : image_state.aliasing_images) {
assert(alias_state);
// The map state of the aliases should all be in sync, so no need to check the return value
subresource_map = GetImageSubresourceLayoutMap(cb_node, *alias_state);
assert(subresource_map);
subresource_map->SetSubresourceRangeLayout(*cb_node, image_subresource_range, layout, expected_layout);
}
}
// Set the initial image layout for all slices of an image view
void CoreChecks::SetImageViewInitialLayout(CMD_BUFFER_STATE *cb_node, const IMAGE_VIEW_STATE &view_state, VkImageLayout layout) {
if (disabled[image_layout_validation]) {
return;
}
IMAGE_STATE *image_state = view_state.image_state.get();
auto *subresource_map = GetImageSubresourceLayoutMap(cb_node, *image_state);
subresource_map->SetSubresourceRangeInitialLayout(*cb_node, layout, view_state);
for (const auto *alias_state : image_state->aliasing_images) {
assert(alias_state);
subresource_map = GetImageSubresourceLayoutMap(cb_node, *alias_state);
subresource_map->SetSubresourceRangeInitialLayout(*cb_node, layout, view_state);
}
}
// Set the initial image layout for a passed non-normalized subresource range
void CoreChecks::SetImageInitialLayout(CMD_BUFFER_STATE *cb_node, const IMAGE_STATE &image_state,
const VkImageSubresourceRange &range, VkImageLayout layout) {
auto *subresource_map = GetImageSubresourceLayoutMap(cb_node, image_state);
assert(subresource_map);
subresource_map->SetSubresourceRangeInitialLayout(*cb_node, NormalizeSubresourceRange(image_state, range), layout);
for (const auto *alias_state : image_state.aliasing_images) {
assert(alias_state);
subresource_map = GetImageSubresourceLayoutMap(cb_node, *alias_state);
assert(subresource_map);
subresource_map->SetSubresourceRangeInitialLayout(*cb_node, NormalizeSubresourceRange(*alias_state, range), layout);
}
}
void CoreChecks::SetImageInitialLayout(CMD_BUFFER_STATE *cb_node, VkImage image, const VkImageSubresourceRange &range,
VkImageLayout layout) {
const IMAGE_STATE *image_state = GetImageState(image);
if (!image_state) return;
SetImageInitialLayout(cb_node, *image_state, range, layout);
};
void CoreChecks::SetImageInitialLayout(CMD_BUFFER_STATE *cb_node, const IMAGE_STATE &image_state,
const VkImageSubresourceLayers &layers, VkImageLayout layout) {
SetImageInitialLayout(cb_node, image_state, RangeFromLayers(layers), layout);
}
// Set image layout for all slices of an image view
void CoreChecks::SetImageViewLayout(CMD_BUFFER_STATE *cb_node, const IMAGE_VIEW_STATE &view_state, VkImageLayout layout,
VkImageLayout layoutStencil) {
IMAGE_STATE *image_state = view_state.image_state.get();
VkImageSubresourceRange sub_range = view_state.normalized_subresource_range;
// When changing the layout of a 3D image subresource via a 2D or 2D_ARRRAY image view, all depth slices of
// the subresource mip level(s) are transitioned, ignoring any layers restriction in the subresource info.
if ((image_state->createInfo.imageType == VK_IMAGE_TYPE_3D) && (view_state.create_info.viewType != VK_IMAGE_VIEW_TYPE_3D)) {
sub_range.baseArrayLayer = 0;
sub_range.layerCount = image_state->createInfo.extent.depth;
}
if (sub_range.aspectMask == (VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT) && layoutStencil != kInvalidLayout) {
sub_range.aspectMask = VK_IMAGE_ASPECT_DEPTH_BIT;
SetImageLayout(cb_node, *image_state, sub_range, layout);
sub_range.aspectMask = VK_IMAGE_ASPECT_STENCIL_BIT;
SetImageLayout(cb_node, *image_state, sub_range, layoutStencil);
} else {
SetImageLayout(cb_node, *image_state, sub_range, layout);
}
}
bool CoreChecks::ValidateRenderPassLayoutAgainstFramebufferImageUsage(RenderPassCreateVersion rp_version, VkImageLayout layout,
VkImage image, VkImageView image_view,
VkFramebuffer framebuffer, VkRenderPass renderpass,
uint32_t attachment_index, const char *variable_name) const {
bool skip = false;
auto image_state = GetImageState(image);
const char *vuid;
const bool use_rp2 = (rp_version == RENDER_PASS_VERSION_2);
const char *function_name = use_rp2 ? "vkCmdBeginRenderPass2()" : "vkCmdBeginRenderPass()";
if (!image_state) {
LogObjectList objlist(image);
objlist.add(renderpass);
objlist.add(framebuffer);
objlist.add(image_view);
skip |=
LogError(image, "VUID-VkRenderPassBeginInfo-framebuffer-parameter",
"%s: RenderPass %s uses %s where pAttachments[%" PRIu32 "] = %s, which refers to an invalid image",
function_name, report_data->FormatHandle(renderpass).c_str(), report_data->FormatHandle(framebuffer).c_str(),
attachment_index, report_data->FormatHandle(image_view).c_str());
return skip;
}
auto image_usage = image_state->createInfo.usage;
const auto stencil_usage_info = LvlFindInChain<VkImageStencilUsageCreateInfo>(image_state->createInfo.pNext);
if (stencil_usage_info) {
image_usage |= stencil_usage_info->stencilUsage;
}
// Check for layouts that mismatch image usages in the framebuffer
if (layout == VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL && !(image_usage & VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT)) {
vuid = use_rp2 ? "VUID-vkCmdBeginRenderPass2-initialLayout-03094" : "VUID-vkCmdBeginRenderPass-initialLayout-00895";
LogObjectList objlist(image);
objlist.add(renderpass);
objlist.add(framebuffer);
objlist.add(image_view);
skip |= LogError(objlist, vuid,
"%s: Layout/usage mismatch for attachment %u in %s"
" - the %s is %s but the image attached to %s via %s"
" was not created with VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT",
function_name, attachment_index, report_data->FormatHandle(renderpass).c_str(), variable_name,
string_VkImageLayout(layout), report_data->FormatHandle(framebuffer).c_str(),
report_data->FormatHandle(image_view).c_str());
}
if (layout == VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL &&
!(image_usage & (VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT | VK_IMAGE_USAGE_SAMPLED_BIT))) {
vuid = use_rp2 ? "VUID-vkCmdBeginRenderPass2-initialLayout-03097" : "VUID-vkCmdBeginRenderPass-initialLayout-00897";
LogObjectList objlist(image);
objlist.add(renderpass);
objlist.add(framebuffer);
objlist.add(image_view);
skip |= LogError(objlist, vuid,
"%s: Layout/usage mismatch for attachment %u in %s"
" - the %s is %s but the image attached to %s via %s"
" was not created with VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT or VK_IMAGE_USAGE_SAMPLED_BIT",
function_name, attachment_index, report_data->FormatHandle(renderpass).c_str(), variable_name,
string_VkImageLayout(layout), report_data->FormatHandle(framebuffer).c_str(),
report_data->FormatHandle(image_view).c_str());
}
if (layout == VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL && !(image_usage & VK_IMAGE_USAGE_TRANSFER_SRC_BIT)) {
vuid = use_rp2 ? "VUID-vkCmdBeginRenderPass2-initialLayout-03098" : "VUID-vkCmdBeginRenderPass-initialLayout-00898";
LogObjectList objlist(image);
objlist.add(renderpass);
objlist.add(framebuffer);
objlist.add(image_view);
skip |= LogError(objlist, vuid,
"%s: Layout/usage mismatch for attachment %u in %s"
" - the %s is %s but the image attached to %s via %s"
" was not created with VK_IMAGE_USAGE_TRANSFER_SRC_BIT",
function_name, attachment_index, report_data->FormatHandle(renderpass).c_str(), variable_name,
string_VkImageLayout(layout), report_data->FormatHandle(framebuffer).c_str(),
report_data->FormatHandle(image_view).c_str());
}
if (layout == VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL && !(image_usage & VK_IMAGE_USAGE_TRANSFER_DST_BIT)) {
vuid = use_rp2 ? "VUID-vkCmdBeginRenderPass2-initialLayout-03099" : "VUID-vkCmdBeginRenderPass-initialLayout-00899";
LogObjectList objlist(image);
objlist.add(renderpass);
objlist.add(framebuffer);
objlist.add(image_view);
skip |= LogError(objlist, vuid,
"%s: Layout/usage mismatch for attachment %u in %s"
" - the %s is %s but the image attached to %s via %s"
" was not created with VK_IMAGE_USAGE_TRANSFER_DST_BIT",
function_name, attachment_index, report_data->FormatHandle(renderpass).c_str(), variable_name,
string_VkImageLayout(layout), report_data->FormatHandle(framebuffer).c_str(),
report_data->FormatHandle(image_view).c_str());
}
if (device_extensions.vk_khr_maintenance2) {
if ((layout == VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_STENCIL_ATTACHMENT_OPTIMAL ||
layout == VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_STENCIL_READ_ONLY_OPTIMAL ||
layout == VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL ||
layout == VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL) &&
!(image_usage & VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT)) {
vuid = use_rp2 ? "VUID-vkCmdBeginRenderPass2-initialLayout-03096" : "VUID-vkCmdBeginRenderPass-initialLayout-01758";
LogObjectList objlist(image);
objlist.add(renderpass);
objlist.add(framebuffer);
objlist.add(image_view);
skip |= LogError(objlist, vuid,
"%s: Layout/usage mismatch for attachment %u in %s"
" - the %s is %s but the image attached to %s via %s"
" was not created with VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT",
function_name, attachment_index, report_data->FormatHandle(renderpass).c_str(), variable_name,
string_VkImageLayout(layout), report_data->FormatHandle(framebuffer).c_str(),
report_data->FormatHandle(image_view).c_str());
}
} else {
// The create render pass 2 extension requires maintenance 2 (the previous branch), so no vuid switch needed here.
if ((layout == VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL ||
layout == VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL) &&
!(image_usage & VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT)) {
LogObjectList objlist(image);
objlist.add(renderpass);
objlist.add(framebuffer);
objlist.add(image_view);
skip |= LogError(objlist, "VUID-vkCmdBeginRenderPass-initialLayout-00896",
"%s: Layout/usage mismatch for attachment %u in %s"
" - the %s is %s but the image attached to %s via %s"
" was not created with VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT",
function_name, attachment_index, report_data->FormatHandle(renderpass).c_str(), variable_name,
string_VkImageLayout(layout), report_data->FormatHandle(framebuffer).c_str(),
report_data->FormatHandle(image_view).c_str());
}
}
return skip;
}
bool CoreChecks::VerifyFramebufferAndRenderPassLayouts(RenderPassCreateVersion rp_version, const CMD_BUFFER_STATE *pCB,
const VkRenderPassBeginInfo *pRenderPassBegin,
const FRAMEBUFFER_STATE *framebuffer_state) const {
bool skip = false;
auto const render_pass_info = GetRenderPassState(pRenderPassBegin->renderPass)->createInfo.ptr();
auto const &framebuffer_info = framebuffer_state->createInfo;
const VkImageView *attachments = framebuffer_info.pAttachments;
auto render_pass = GetRenderPassState(pRenderPassBegin->renderPass)->renderPass();
auto framebuffer = framebuffer_state->framebuffer();
if (render_pass_info->attachmentCount != framebuffer_info.attachmentCount) {
skip |= LogError(pCB->commandBuffer(), kVUID_Core_DrawState_InvalidRenderpass,
"You cannot start a render pass using a framebuffer with a different number of attachments.");
}
const auto *attachment_info = LvlFindInChain<VkRenderPassAttachmentBeginInfo>(pRenderPassBegin->pNext);
if (((framebuffer_info.flags & VK_FRAMEBUFFER_CREATE_IMAGELESS_BIT) != 0) && attachment_info != nullptr) {
attachments = attachment_info->pAttachments;
}
if (attachments != nullptr) {
const auto *const_p_cb = static_cast<const CMD_BUFFER_STATE *>(pCB);
for (uint32_t i = 0; i < render_pass_info->attachmentCount; ++i) {
auto image_view = attachments[i];
auto view_state = GetImageViewState(image_view);
if (!view_state) {
LogObjectList objlist(pRenderPassBegin->renderPass);
objlist.add(framebuffer_state->framebuffer());
objlist.add(image_view);
skip |= LogError(objlist, "VUID-VkRenderPassBeginInfo-framebuffer-parameter",
"vkCmdBeginRenderPass(): %s pAttachments[%" PRIu32 "] = %s is not a valid VkImageView handle",
report_data->FormatHandle(framebuffer_state->framebuffer()).c_str(), i,
report_data->FormatHandle(image_view).c_str());
continue;
}
const VkImage image = view_state->create_info.image;
const IMAGE_STATE *image_state = GetImageState(image);
if (!image_state) {
LogObjectList objlist(pRenderPassBegin->renderPass);
objlist.add(framebuffer_state->framebuffer());
objlist.add(image_view);
objlist.add(image);
skip |= LogError(objlist, "VUID-VkRenderPassBeginInfo-framebuffer-parameter",
"vkCmdBeginRenderPass(): %s pAttachments[%" PRIu32 "] = %s references non-extant %s.",
report_data->FormatHandle(framebuffer_state->framebuffer()).c_str(), i,
report_data->FormatHandle(image_view).c_str(), report_data->FormatHandle(image).c_str());
continue;
}
auto attachment_initial_layout = render_pass_info->pAttachments[i].initialLayout;
auto final_layout = render_pass_info->pAttachments[i].finalLayout;
// Default to expecting stencil in the same layout.
auto attachment_stencil_initial_layout = attachment_initial_layout;
// If a separate layout is specified, look for that.
const auto *attachment_description_stencil_layout =
LvlFindInChain<VkAttachmentDescriptionStencilLayout>(render_pass_info->pAttachments[i].pNext);
if (attachment_description_stencil_layout) {
attachment_stencil_initial_layout = attachment_description_stencil_layout->stencilInitialLayout;
}
// Cast pCB to const because we don't want to create entries that don't exist here (in case the key changes to something
// in common with the non-const version.)
const ImageSubresourceLayoutMap *subresource_map = (attachment_initial_layout != VK_IMAGE_LAYOUT_UNDEFINED)
? GetImageSubresourceLayoutMap(const_p_cb, image)
: nullptr;
if (subresource_map) { // If no layout information for image yet, will be checked at QueueSubmit time
LayoutUseCheckAndMessage layout_check(subresource_map);
bool subres_skip = false;
auto pos = subresource_map->Find(view_state->normalized_subresource_range);
// IncrementInterval skips over all the subresources that have the same state as we just checked, incrementing to
// the next "constant value" range
for (; !(pos.AtEnd()) && !subres_skip; pos.IncrementInterval()) {
const VkImageSubresource &subres = pos->subresource;
// Allow for differing depth and stencil layouts
VkImageLayout check_layout = attachment_initial_layout;
if (subres.aspectMask & VK_IMAGE_ASPECT_STENCIL_BIT) check_layout = attachment_stencil_initial_layout;
if (!layout_check.Check(subres, check_layout, pos->current_layout, pos->initial_layout)) {
subres_skip |= LogError(
device, kVUID_Core_DrawState_InvalidRenderpass,
"You cannot start a render pass using attachment %u where the render pass initial layout is %s "
"and the %s layout of the attachment is %s. The layouts must match, or the render "
"pass initial layout for the attachment must be VK_IMAGE_LAYOUT_UNDEFINED",
i, string_VkImageLayout(check_layout), layout_check.message, string_VkImageLayout(layout_check.layout));
}
}
skip |= subres_skip;
}
ValidateRenderPassLayoutAgainstFramebufferImageUsage(rp_version, attachment_initial_layout, image, image_view,
framebuffer, render_pass, i, "initial layout");
ValidateRenderPassLayoutAgainstFramebufferImageUsage(rp_version, final_layout, image, image_view, framebuffer,
render_pass, i, "final layout");
}
for (uint32_t j = 0; j < render_pass_info->subpassCount; ++j) {
auto &subpass = render_pass_info->pSubpasses[j];
for (uint32_t k = 0; k < render_pass_info->pSubpasses[j].inputAttachmentCount; ++k) {
auto &attachment_ref = subpass.pInputAttachments[k];
if (attachment_ref.attachment != VK_ATTACHMENT_UNUSED) {
auto image_view = attachments[attachment_ref.attachment];
auto view_state = GetImageViewState(image_view);
if (view_state) {
auto image = view_state->create_info.image;
ValidateRenderPassLayoutAgainstFramebufferImageUsage(rp_version, attachment_ref.layout, image, image_view,
framebuffer, render_pass, attachment_ref.attachment,
"input attachment layout");
}
}
}
for (uint32_t k = 0; k < render_pass_info->pSubpasses[j].colorAttachmentCount; ++k) {
auto &attachment_ref = subpass.pColorAttachments[k];
if (attachment_ref.attachment != VK_ATTACHMENT_UNUSED) {
auto image_view = attachments[attachment_ref.attachment];
auto view_state = GetImageViewState(image_view);
if (view_state) {
auto image = view_state->create_info.image;
ValidateRenderPassLayoutAgainstFramebufferImageUsage(rp_version, attachment_ref.layout, image, image_view,
framebuffer, render_pass, attachment_ref.attachment,
"color attachment layout");
if (subpass.pResolveAttachments) {
ValidateRenderPassLayoutAgainstFramebufferImageUsage(
rp_version, attachment_ref.layout, image, image_view, framebuffer, render_pass,
attachment_ref.attachment, "resolve attachment layout");
}
}
}
}
if (render_pass_info->pSubpasses[j].pDepthStencilAttachment) {
auto &attachment_ref = *subpass.pDepthStencilAttachment;
if (attachment_ref.attachment != VK_ATTACHMENT_UNUSED) {
auto image_view = attachments[attachment_ref.attachment];
auto view_state = GetImageViewState(image_view);
if (view_state) {
auto image = view_state->create_info.image;
ValidateRenderPassLayoutAgainstFramebufferImageUsage(rp_version, attachment_ref.layout, image, image_view,
framebuffer, render_pass, attachment_ref.attachment,
"input attachment layout");
}
}
}
}
}
return skip;
}
void CoreChecks::TransitionAttachmentRefLayout(CMD_BUFFER_STATE *pCB, FRAMEBUFFER_STATE *pFramebuffer,
const safe_VkAttachmentReference2 &ref) {
if (ref.attachment != VK_ATTACHMENT_UNUSED) {
IMAGE_VIEW_STATE *image_view = pCB->GetActiveAttachmentImageViewState(ref.attachment);
if (image_view) {
VkImageLayout stencil_layout = kInvalidLayout;
const auto *attachment_reference_stencil_layout = LvlFindInChain<VkAttachmentReferenceStencilLayout>(ref.pNext);
if (attachment_reference_stencil_layout) {
stencil_layout = attachment_reference_stencil_layout->stencilLayout;
}
SetImageViewLayout(pCB, *image_view, ref.layout, stencil_layout);
}
}
}
void CoreChecks::TransitionSubpassLayouts(CMD_BUFFER_STATE *pCB, const RENDER_PASS_STATE *render_pass_state,
const int subpass_index, FRAMEBUFFER_STATE *framebuffer_state) {
assert(render_pass_state);
if (framebuffer_state) {
auto const &subpass = render_pass_state->createInfo.pSubpasses[subpass_index];
for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) {
TransitionAttachmentRefLayout(pCB, framebuffer_state, subpass.pInputAttachments[j]);
}
for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) {
TransitionAttachmentRefLayout(pCB, framebuffer_state, subpass.pColorAttachments[j]);
}
if (subpass.pDepthStencilAttachment) {
TransitionAttachmentRefLayout(pCB, framebuffer_state, *subpass.pDepthStencilAttachment);
}
}
}
// Transition the layout state for renderpass attachments based on the BeginRenderPass() call. This includes:
// 1. Transition into initialLayout state
// 2. Transition from initialLayout to layout used in subpass 0
void CoreChecks::TransitionBeginRenderPassLayouts(CMD_BUFFER_STATE *cb_state, const RENDER_PASS_STATE *render_pass_state,
FRAMEBUFFER_STATE *framebuffer_state) {
// First record expected initialLayout as a potential initial layout usage.
auto const rpci = render_pass_state->createInfo.ptr();
for (uint32_t i = 0; i < rpci->attachmentCount; ++i) {
auto *view_state = cb_state->GetActiveAttachmentImageViewState(i);
if (view_state) {
IMAGE_STATE *image_state = view_state->image_state.get();
const auto initial_layout = rpci->pAttachments[i].initialLayout;
const auto *attachment_description_stencil_layout =
LvlFindInChain<VkAttachmentDescriptionStencilLayout>(rpci->pAttachments[i].pNext);
if (attachment_description_stencil_layout) {
const auto stencil_initial_layout = attachment_description_stencil_layout->stencilInitialLayout;
VkImageSubresourceRange sub_range = view_state->normalized_subresource_range;
sub_range.aspectMask = VK_IMAGE_ASPECT_DEPTH_BIT;
SetImageInitialLayout(cb_state, *image_state, sub_range, initial_layout);
sub_range.aspectMask = VK_IMAGE_ASPECT_STENCIL_BIT;
SetImageInitialLayout(cb_state, *image_state, sub_range, stencil_initial_layout);
} else {
SetImageInitialLayout(cb_state, *image_state, view_state->normalized_subresource_range, initial_layout);
}
}
}
// Now transition for first subpass (index 0)
TransitionSubpassLayouts(cb_state, render_pass_state, 0, framebuffer_state);
}
bool VerifyAspectsPresent(VkImageAspectFlags aspect_mask, VkFormat format) {
if ((aspect_mask & VK_IMAGE_ASPECT_COLOR_BIT) != 0) {
if (!(FormatIsColor(format) || FormatIsMultiplane(format))) return false;
}
if ((aspect_mask & VK_IMAGE_ASPECT_DEPTH_BIT) != 0) {
if (!FormatHasDepth(format)) return false;
}
if ((aspect_mask & VK_IMAGE_ASPECT_STENCIL_BIT) != 0) {
if (!FormatHasStencil(format)) return false;
}
if (0 != (aspect_mask & (VK_IMAGE_ASPECT_PLANE_0_BIT | VK_IMAGE_ASPECT_PLANE_1_BIT | VK_IMAGE_ASPECT_PLANE_2_BIT))) {
if (FormatPlaneCount(format) == 1) return false;
}
return true;
}
// Verify an ImageMemoryBarrier's old/new ImageLayouts are compatible with the Image's ImageUsageFlags.
bool CoreChecks::ValidateBarrierLayoutToImageUsage(const Location &loc, VkImage image, VkImageLayout layout,
VkImageUsageFlags usage_flags) const {
bool skip = false;
bool is_error = false;
switch (layout) {
case VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL:
is_error = ((usage_flags & VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT) == 0);
break;
case VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL:
is_error = ((usage_flags & VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT) == 0);
break;
case VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL:
is_error = ((usage_flags & VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT) == 0);
break;
case VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL:
is_error = ((usage_flags & (VK_IMAGE_USAGE_SAMPLED_BIT | VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT)) == 0);
break;
case VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL:
is_error = ((usage_flags & VK_IMAGE_USAGE_TRANSFER_SRC_BIT) == 0);
break;
case VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL:
is_error = ((usage_flags & VK_IMAGE_USAGE_TRANSFER_DST_BIT) == 0);
break;
case VK_IMAGE_LAYOUT_SHADING_RATE_OPTIMAL_NV:
is_error = ((usage_flags & VK_IMAGE_USAGE_SHADING_RATE_IMAGE_BIT_NV) == 0);
break;
case VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_STENCIL_ATTACHMENT_OPTIMAL:
is_error = ((usage_flags & VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT) == 0);
break;
case VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_STENCIL_READ_ONLY_OPTIMAL:
is_error = ((usage_flags & VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT) == 0);
break;
default:
// Other VkImageLayout values do not have VUs defined in this context.
break;
}
if (is_error) {
const auto &vuid = sync_vuid_maps::GetBadImageLayoutVUID(loc, layout);
skip |=
LogError(image, vuid, "%s Image barrier Layout=%s is not compatible with %s usage flags 0x%" PRIx32 ".",
loc.Message().c_str(), string_VkImageLayout(layout), report_data->FormatHandle(image).c_str(), usage_flags);
}
return skip;
}
// Verify image barriers are compatible with the images they reference.
template <typename ImageBarrier>
bool CoreChecks::ValidateBarriersToImages(const Location &outer_loc, const CMD_BUFFER_STATE *cb_state,
uint32_t imageMemoryBarrierCount, const ImageBarrier *pImageMemoryBarriers) const {
bool skip = false;
using sync_vuid_maps::GetImageBarrierVUID;
using sync_vuid_maps::ImageError;
// Scoreboard for checking for duplicate and inconsistent barriers to images
struct ImageBarrierScoreboardEntry {
uint32_t index;
// This is designed for temporary storage within the scope of the API call. If retained storage of the barriers is
// required, copies should be made and smart or unique pointers used in some other stucture (or this one refactored)
const ImageBarrier *barrier;
};
// Necessary to resolve warning C4503 when building with Visual Studio 2015.
// Adding a struct wrapper is their recommend solution for the expanded type name growing too long
// when creating maps full of maps.
struct ImageBarrierScoreboardSubresMap {
layer_data::unordered_map<VkImageSubresourceRange, ImageBarrierScoreboardEntry> map;
};
using ImageBarrierScoreboardImageMap = layer_data::unordered_map<VkImage, ImageBarrierScoreboardSubresMap>;
// Scoreboard for duplicate layout transition barriers within the list
// Pointers retained in the scoreboard only have the lifetime of *this* call (i.e. within the scope of the API call)
ImageBarrierScoreboardImageMap layout_transitions;
for (uint32_t i = 0; i < imageMemoryBarrierCount; ++i) {
auto loc = outer_loc.dot(Field::pImageMemoryBarriers, i);
const auto &img_barrier = pImageMemoryBarriers[i];
// Update the scoreboard of layout transitions and check for barriers affecting the same image and subresource
// TODO: a higher precision could be gained by adapting the command_buffer image_layout_map logic looking for conflicts
// at a per sub-resource level
if (img_barrier.oldLayout != img_barrier.newLayout) {
const ImageBarrierScoreboardEntry new_entry{i, &img_barrier};
const auto image_it = layout_transitions.find(img_barrier.image);
if (image_it != layout_transitions.end()) {
auto &subres_map = image_it->second.map;
auto subres_it = subres_map.find(img_barrier.subresourceRange);
if (subres_it != subres_map.end()) {
auto &entry = subres_it->second;
auto entry_layout =
NormalizeSynchronization2Layout(entry.barrier->subresourceRange.aspectMask, entry.barrier->newLayout);
auto old_layout =
NormalizeSynchronization2Layout(img_barrier.subresourceRange.aspectMask, img_barrier.oldLayout);
if ((entry_layout != old_layout) && (old_layout != VK_IMAGE_LAYOUT_UNDEFINED)) {
const VkImageSubresourceRange &range = img_barrier.subresourceRange;
const auto &vuid = GetImageBarrierVUID(loc, ImageError::kConflictingLayout);
skip = LogError(
cb_state->commandBuffer(), vuid,
"%s conflicts with earlier entry pImageMemoryBarrier[%u]. %s"
" subresourceRange: aspectMask=%u baseMipLevel=%u levelCount=%u, baseArrayLayer=%u, layerCount=%u; "
"conflicting barrier transitions image layout from %s when earlier barrier transitioned to layout %s.",
loc.Message().c_str(), entry.index, report_data->FormatHandle(img_barrier.image).c_str(),
range.aspectMask, range.baseMipLevel, range.levelCount, range.baseArrayLayer, range.layerCount,
string_VkImageLayout(img_barrier.oldLayout), string_VkImageLayout(entry.barrier->newLayout));
}
entry = new_entry;
} else {
subres_map[img_barrier.subresourceRange] = new_entry;
}
} else {
layout_transitions[img_barrier.image].map[img_barrier.subresourceRange] = new_entry;
}
}
auto image_state = GetImageState(img_barrier.image);
if (image_state) {
VkImageUsageFlags usage_flags = image_state->createInfo.usage;
skip |=
ValidateBarrierLayoutToImageUsage(loc.dot(Field::oldLayout), img_barrier.image, img_barrier.oldLayout, usage_flags);
skip |=
ValidateBarrierLayoutToImageUsage(loc.dot(Field::newLayout), img_barrier.image, img_barrier.newLayout, usage_flags);
// Make sure layout is able to be transitioned, currently only presented shared presentable images are locked
if (image_state->layout_locked) {
// TODO: Add unique id for error when available
skip |= LogError(
img_barrier.image, 0,
"%s Attempting to transition shared presentable %s"
" from layout %s to layout %s, but image has already been presented and cannot have its layout transitioned.",
loc.Message().c_str(), report_data->FormatHandle(img_barrier.image).c_str(),
string_VkImageLayout(img_barrier.oldLayout), string_VkImageLayout(img_barrier.newLayout));
}
const VkImageCreateInfo &image_create_info = image_state->createInfo;
const VkFormat image_format = image_create_info.format;
const VkImageAspectFlags aspect_mask = img_barrier.subresourceRange.aspectMask;
// For a Depth/Stencil image both aspects MUST be set
auto image_loc = loc.dot(Field::image);
if (FormatIsDepthAndStencil(image_format)) {
if (enabled_features.core12.separateDepthStencilLayouts) {
if (!(aspect_mask & (VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT))) {
auto vuid = GetImageBarrierVUID(loc, ImageError::kNotDepthOrStencilAspect);
skip |= LogError(img_barrier.image, vuid,
"%s references %s of format %s that must have either the depth or stencil "
"aspects set, but its aspectMask is 0x%" PRIx32 ".",
image_loc.Message().c_str(), report_data->FormatHandle(img_barrier.image).c_str(),
string_VkFormat(image_format), aspect_mask);
}
} else {
auto const ds_mask = VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT;
if ((aspect_mask & ds_mask) != (ds_mask)) {
auto error = device_extensions.vk_khr_separate_depth_stencil_layouts
? ImageError::kNotSeparateDepthAndStencilAspect
: ImageError::kNotDepthAndStencilAspect;
auto vuid = GetImageBarrierVUID(image_loc, error);
skip |= LogError(img_barrier.image, vuid,
"%s references %s of format %s that must have the depth and stencil "
"aspects set, but its aspectMask is 0x%" PRIx32 ".",
image_loc.Message().c_str(), report_data->FormatHandle(img_barrier.image).c_str(),
string_VkFormat(image_format), aspect_mask);
}
}
}
const auto *subresource_map = GetImageSubresourceLayoutMap(cb_state, img_barrier.image);
if (img_barrier.oldLayout == VK_IMAGE_LAYOUT_UNDEFINED) {
// TODO: Set memory invalid which is in mem_tracker currently
// Not sure if this needs to be in the ForRange traversal, pulling it out as it is currently invariant with
// subresource.
} else if (subresource_map && !QueueFamilyIsExternal(img_barrier.srcQueueFamilyIndex)) {
bool subres_skip = false;
LayoutUseCheckAndMessage layout_check(subresource_map);
VkImageSubresourceRange normalized_isr = NormalizeSubresourceRange(*image_state, img_barrier.subresourceRange);
// IncrementInterval skips over all the subresources that have the same state as we just checked, incrementing to
// the next "constant value" range
for (auto pos = subresource_map->Find(normalized_isr); !(pos.AtEnd()) && !subres_skip; pos.IncrementInterval()) {
const auto &value = *pos;
auto old_layout =
NormalizeSynchronization2Layout(img_barrier.subresourceRange.aspectMask, img_barrier.oldLayout);
if (!layout_check.Check(value.subresource, old_layout, value.current_layout, value.initial_layout)) {
const auto &vuid = GetImageBarrierVUID(loc, ImageError::kConflictingLayout);
subres_skip = LogError(cb_state->commandBuffer(), vuid,
"%s %s cannot transition the layout of aspect=%d level=%d layer=%d from %s when the "
"%s layout is %s.",
loc.Message().c_str(), report_data->FormatHandle(img_barrier.image).c_str(),
value.subresource.aspectMask, value.subresource.mipLevel,
value.subresource.arrayLayer, string_VkImageLayout(img_barrier.oldLayout),
layout_check.message, string_VkImageLayout(layout_check.layout));
}
}
skip |= subres_skip;
}
// checks color format and (single-plane or non-disjoint)
// if ycbcr extension is not supported then single-plane and non-disjoint are always both true
if ((FormatIsColor(image_format) == true) &&
((FormatIsMultiplane(image_format) == false) || (image_state->disjoint == false))) {
if (aspect_mask != VK_IMAGE_ASPECT_COLOR_BIT) {
auto error = device_extensions.vk_khr_sampler_ycbcr_conversion ? ImageError::kNotColorAspect
: ImageError::kNotColorAspectYcbcr;
const auto &vuid = GetImageBarrierVUID(loc, error);
skip |= LogError(img_barrier.image, vuid,
"%s references %s of format %s that must be only VK_IMAGE_ASPECT_COLOR_BIT, "
"but its aspectMask is 0x%" PRIx32 ".",
image_loc.Message().c_str(), report_data->FormatHandle(img_barrier.image).c_str(),
string_VkFormat(image_format), aspect_mask);
}
}
VkImageAspectFlags valid_disjoint_mask =
VK_IMAGE_ASPECT_PLANE_0_BIT | VK_IMAGE_ASPECT_PLANE_1_BIT | VK_IMAGE_ASPECT_PLANE_2_BIT | VK_IMAGE_ASPECT_COLOR_BIT;
if ((FormatIsMultiplane(image_format) == true) && (image_state->disjoint == true) &&
((aspect_mask & valid_disjoint_mask) == 0)) {
const auto &vuid = GetImageBarrierVUID(image_loc, ImageError::kBadMultiplanarAspect);
skip |= LogError(img_barrier.image, vuid,
"%s references %s of format %s has aspectMask (0x%" PRIx32
") but needs to include either an VK_IMAGE_ASPECT_PLANE_*_BIT or VK_IMAGE_ASPECT_COLOR_BIT.",
image_loc.Message().c_str(), report_data->FormatHandle(img_barrier.image).c_str(),
string_VkFormat(image_format), aspect_mask);
}
if ((FormatPlaneCount(image_format) == 2) && ((aspect_mask & VK_IMAGE_ASPECT_PLANE_2_BIT) != 0)) {
const auto &vuid = GetImageBarrierVUID(image_loc, ImageError::kBadPlaneCount);
skip |= LogError(img_barrier.image, vuid,
"%s references %s of format %s has only two planes but included "
"VK_IMAGE_ASPECT_PLANE_2_BIT in its aspectMask (0x%" PRIx32 ").",
image_loc.Message().c_str(), report_data->FormatHandle(img_barrier.image).c_str(),
string_VkFormat(image_format), aspect_mask);
}
}
}
return skip;
}
template <typename Barrier, typename TransferBarrier>
bool CoreChecks::ValidateQFOTransferBarrierUniqueness(const Location &loc, const CMD_BUFFER_STATE *cb_state, const Barrier &barrier,
const QFOTransferBarrierSets<TransferBarrier> &barrier_sets) const {
bool skip = false;
auto pool = cb_state->command_pool.get();
const char *handle_name = TransferBarrier::HandleName();
const char *transfer_type = nullptr;
if (!IsTransferOp(barrier)) {
return skip;
}
const TransferBarrier *barrier_record = nullptr;
if (TempIsReleaseOp<Barrier, true /* Assume IsTransfer */>(pool, barrier) &&
!QueueFamilyIsExternal(barrier.dstQueueFamilyIndex)) {
const auto found = barrier_sets.release.find(barrier);
if (found != barrier_sets.release.cend()) {
barrier_record = &(*found);
transfer_type = "releasing";
}
} else if (IsAcquireOp<Barrier, true /*Assume IsTransfer */>(pool, barrier) &&
!QueueFamilyIsExternal(barrier.srcQueueFamilyIndex)) {
const auto found = barrier_sets.acquire.find(barrier);
if (found != barrier_sets.acquire.cend()) {
barrier_record = &(*found);
transfer_type = "acquiring";
}
}
if (barrier_record != nullptr) {
skip |=
LogWarning(cb_state->commandBuffer(), TransferBarrier::ErrMsgDuplicateQFOInCB(),
"%s %s queue ownership of %s (%s), from srcQueueFamilyIndex %" PRIu32 " to dstQueueFamilyIndex %" PRIu32
" duplicates existing barrier recorded in this command buffer.",
loc.Message().c_str(), transfer_type, handle_name, report_data->FormatHandle(barrier_record->handle).c_str(),
barrier_record->srcQueueFamilyIndex, barrier_record->dstQueueFamilyIndex);
}
return skip;
}
VulkanTypedHandle BarrierTypedHandle(const VkImageMemoryBarrier &barrier) {
return VulkanTypedHandle(barrier.image, kVulkanObjectTypeImage);
}
VulkanTypedHandle BarrierTypedHandle(const VkImageMemoryBarrier2KHR &barrier) {
return VulkanTypedHandle(barrier.image, kVulkanObjectTypeImage);
}
const IMAGE_STATE *BarrierHandleState(const ValidationStateTracker &device_state, const VkImageMemoryBarrier &barrier) {
return device_state.GetImageState(barrier.image);
}
const IMAGE_STATE *BarrierHandleState(const ValidationStateTracker &device_state, const VkImageMemoryBarrier2KHR &barrier) {
return device_state.GetImageState(barrier.image);
}
VulkanTypedHandle BarrierTypedHandle(const VkBufferMemoryBarrier &barrier) {
return VulkanTypedHandle(barrier.buffer, kVulkanObjectTypeBuffer);
}
VulkanTypedHandle BarrierTypedHandle(const VkBufferMemoryBarrier2KHR &barrier) {
return VulkanTypedHandle(barrier.buffer, kVulkanObjectTypeBuffer);
}
const BUFFER_STATE *BarrierHandleState(const ValidationStateTracker &device_state, const VkBufferMemoryBarrier &barrier) {
return device_state.GetBufferState(barrier.buffer);
}
const BUFFER_STATE *BarrierHandleState(const ValidationStateTracker &device_state, const VkBufferMemoryBarrier2KHR &barrier) {
return device_state.GetBufferState(barrier.buffer);
}
template <typename Barrier, typename TransferBarrier>
void CoreChecks::RecordBarrierValidationInfo(const Location &loc, CMD_BUFFER_STATE *cb_state, const Barrier &barrier,
QFOTransferBarrierSets<TransferBarrier> &barrier_sets) {
auto pool = cb_state->command_pool.get();
if (IsTransferOp(barrier)) {
if (TempIsReleaseOp<Barrier, true /* Assume IsTransfer*/>(pool, barrier) &&
!QueueFamilyIsExternal(barrier.dstQueueFamilyIndex)) {
barrier_sets.release.emplace(barrier);
} else if (IsAcquireOp<Barrier, true /*Assume IsTransfer */>(pool, barrier) &&
!QueueFamilyIsExternal(barrier.srcQueueFamilyIndex)) {
barrier_sets.acquire.emplace(barrier);
}
}
// 7.7.4: If the values of srcQueueFamilyIndex and dstQueueFamilyIndex are equal, no ownership transfer is performed, and the
// barrier operates as if they were both set to VK_QUEUE_FAMILY_IGNORED.
const uint32_t src_queue_family = barrier.srcQueueFamilyIndex;
const uint32_t dst_queue_family = barrier.dstQueueFamilyIndex;
const bool is_ownership_transfer = src_queue_family != dst_queue_family;
if (is_ownership_transfer) {
// Only enqueue submit time check if it is needed. If more submit time checks are added, change the criteria
// TODO create a better named list, or rename the submit time lists to something that matches the broader usage...
auto handle_state = BarrierHandleState(*this, barrier);
bool mode_concurrent = handle_state ? handle_state->createInfo.sharingMode == VK_SHARING_MODE_CONCURRENT : false;
if (!mode_concurrent) {
const auto typed_handle = BarrierTypedHandle(barrier);
core_error::LocationCapture loc_capture(loc);
cb_state->queue_submit_functions.emplace_back(
[loc_capture, cb_state, typed_handle, src_queue_family, dst_queue_family](const ValidationStateTracker *device_data,
const QUEUE_STATE *queue_state) {
return ValidateConcurrentBarrierAtSubmit(loc_capture.Get(), device_data, queue_state, cb_state, typed_handle,
src_queue_family, dst_queue_family);
});
}
}
}
// Check if all barriers are of a given operation type.
template <typename Barrier, typename OpCheck>
bool AllTransferOp(const COMMAND_POOL_STATE *pool, OpCheck &op_check, uint32_t count, const Barrier *barriers) {
if (!pool) return false;
for (uint32_t b = 0; b < count; b++) {
if (!op_check(pool, barriers[b])) return false;
}
return true;
}
// Look at the barriers to see if we they are all release or all acquire, the result impacts queue properties validation
template <typename BufBarrier, typename ImgBarrier>
BarrierOperationsType CoreChecks::ComputeBarrierOperationsType(const CMD_BUFFER_STATE *cb_state, uint32_t buffer_barrier_count,
const BufBarrier *buffer_barriers, uint32_t image_barrier_count,
const ImgBarrier *image_barriers) const {
auto pool = cb_state->command_pool.get();
BarrierOperationsType op_type = kGeneral;
// Look at the barrier details only if they exist
// Note: AllTransferOp returns true for count == 0
if ((buffer_barrier_count + image_barrier_count) != 0) {
if (AllTransferOp(pool, TempIsReleaseOp<BufBarrier>, buffer_barrier_count, buffer_barriers) &&
AllTransferOp(pool, TempIsReleaseOp<ImgBarrier>, image_barrier_count, image_barriers)) {
op_type = kAllRelease;
} else if (AllTransferOp(pool, IsAcquireOp<BufBarrier>, buffer_barrier_count, buffer_barriers) &&
AllTransferOp(pool, IsAcquireOp<ImgBarrier>, image_barrier_count, image_barriers)) {
op_type = kAllAcquire;
}
}
return op_type;
}
// explictly instantiate so these can be used in core_validation.cpp
template BarrierOperationsType CoreChecks::ComputeBarrierOperationsType(const CMD_BUFFER_STATE *cb_state,
uint32_t buffer_barrier_count,
const VkBufferMemoryBarrier *buffer_barriers,
uint32_t image_barrier_count,
const VkImageMemoryBarrier *image_barriers) const;
template BarrierOperationsType CoreChecks::ComputeBarrierOperationsType(const CMD_BUFFER_STATE *cb_state,
uint32_t buffer_barrier_count,
const VkBufferMemoryBarrier2KHR *buffer_barriers,
uint32_t image_barrier_count,
const VkImageMemoryBarrier2KHR *image_barriers) const;
// Verify image barrier image state and that the image is consistent with FB image
template <typename ImgBarrier>
bool CoreChecks::ValidateImageBarrierAttachment(const Location &loc, CMD_BUFFER_STATE const *cb_state,
const FRAMEBUFFER_STATE *framebuffer, uint32_t active_subpass,
const safe_VkSubpassDescription2 &sub_desc, const VkRenderPass rp_handle,
const ImgBarrier &img_barrier, const CMD_BUFFER_STATE *primary_cb_state) const {
using sync_vuid_maps::GetImageBarrierVUID;
using sync_vuid_maps::ImageError;
bool skip = false;
const auto *fb_state = framebuffer;
assert(fb_state);
const auto img_bar_image = img_barrier.image;
bool image_match = false;
bool sub_image_found = false; // Do we find a corresponding subpass description
VkImageLayout sub_image_layout = VK_IMAGE_LAYOUT_UNDEFINED;
uint32_t attach_index = 0;
// Verify that a framebuffer image matches barrier image
const auto attachment_count = fb_state->createInfo.attachmentCount;
for (uint32_t attachment = 0; attachment < attachment_count; ++attachment) {
auto view_state = primary_cb_state ? primary_cb_state->GetActiveAttachmentImageViewState(attachment) : cb_state->GetActiveAttachmentImageViewState(attachment);
if (view_state && (img_bar_image == view_state->create_info.image)) {
image_match = true;
attach_index = attachment;
break;
}
}
if (image_match) { // Make sure subpass is referring to matching attachment
if (sub_desc.pDepthStencilAttachment && sub_desc.pDepthStencilAttachment->attachment == attach_index) {
sub_image_layout = sub_desc.pDepthStencilAttachment->layout;
sub_image_found = true;
}
if (!sub_image_found && device_extensions.vk_khr_depth_stencil_resolve) {
const auto *resolve = LvlFindInChain<VkSubpassDescriptionDepthStencilResolve>(sub_desc.pNext);
if (resolve && resolve->pDepthStencilResolveAttachment &&
resolve->pDepthStencilResolveAttachment->attachment == attach_index) {
sub_image_layout = resolve->pDepthStencilResolveAttachment->layout;
sub_image_found = true;
}
}
if (!sub_image_found) {
for (uint32_t j = 0; j < sub_desc.colorAttachmentCount; ++j) {
if (sub_desc.pColorAttachments && sub_desc.pColorAttachments[j].attachment == attach_index) {
sub_image_layout = sub_desc.pColorAttachments[j].layout;
sub_image_found = true;
break;
}
if (!sub_image_found && sub_desc.pResolveAttachments &&
sub_desc.pResolveAttachments[j].attachment == attach_index) {
sub_image_layout = sub_desc.pResolveAttachments[j].layout;
sub_image_found = true;
break;
}
}
}
if (!sub_image_found) {
auto img_loc = loc.dot(Field::image);
const auto &vuid = GetImageBarrierVUID(img_loc, ImageError::kRenderPassMismatch);
skip |=
LogError(rp_handle, vuid,
"%s Barrier for %s is not referenced by the VkSubpassDescription for active subpass (%d) of current %s.",
img_loc.Message().c_str(), report_data->FormatHandle(img_bar_image).c_str(), active_subpass,
report_data->FormatHandle(rp_handle).c_str());
}
} else { // !image_match
auto img_loc = loc.dot(Field::image);
const auto &vuid = GetImageBarrierVUID(img_loc, ImageError::kRenderPassMismatch);
skip |= LogError(fb_state->framebuffer(), vuid, "%s Barrier for %s does not match an image from the current %s.",
img_loc.Message().c_str(), report_data->FormatHandle(img_bar_image).c_str(),
report_data->FormatHandle(fb_state->framebuffer()).c_str());
}
if (img_barrier.oldLayout != img_barrier.newLayout) {
auto layout_loc = loc.dot(Field::oldLayout);
const auto &vuid = GetImageBarrierVUID(layout_loc, ImageError::kRenderPassLayoutChange);
skip |= LogError(cb_state->commandBuffer(), vuid,
"%s As the Image Barrier for %s is being executed within a render pass instance, oldLayout must "
"equal newLayout yet they are %s and %s.",
layout_loc.Message().c_str(), report_data->FormatHandle(img_barrier.image).c_str(),
string_VkImageLayout(img_barrier.oldLayout), string_VkImageLayout(img_barrier.newLayout));
} else {
if (sub_image_found && sub_image_layout != img_barrier.oldLayout) {
LogObjectList objlist(rp_handle);
objlist.add(img_bar_image);
auto layout_loc = loc.dot(Field::oldLayout);
const auto &vuid = GetImageBarrierVUID(layout_loc, ImageError::kRenderPassLayoutChange);
skip |= LogError(objlist, vuid,
"%s Barrier for %s is referenced by the VkSubpassDescription for active "
"subpass (%d) of current %s as having layout %s, but image barrier has layout %s.",
layout_loc.Message().c_str(), report_data->FormatHandle(img_bar_image).c_str(), active_subpass,
report_data->FormatHandle(rp_handle).c_str(), string_VkImageLayout(sub_image_layout),
string_VkImageLayout(img_barrier.oldLayout));
}
}
return skip;
}
// explictly instantiate so these can be used in core_validation.cpp
template bool CoreChecks::ValidateImageBarrierAttachment(const Location &loc, CMD_BUFFER_STATE const *cb_state,
const FRAMEBUFFER_STATE *framebuffer, uint32_t active_subpass,
const safe_VkSubpassDescription2 &sub_desc, const VkRenderPass rp_handle,
const VkImageMemoryBarrier &img_barrier,
const CMD_BUFFER_STATE *primary_cb_state) const;
template bool CoreChecks::ValidateImageBarrierAttachment(const Location &loc, CMD_BUFFER_STATE const *cb_state,
const FRAMEBUFFER_STATE *framebuffer, uint32_t active_subpass,
const safe_VkSubpassDescription2 &sub_desc, const VkRenderPass rp_handle,
const VkImageMemoryBarrier2KHR &img_barrier,
const CMD_BUFFER_STATE *primary_cb_state) const;
template <typename ImgBarrier>
void CoreChecks::EnqueueSubmitTimeValidateImageBarrierAttachment(const Location &loc, CMD_BUFFER_STATE *cb_state,
const ImgBarrier &barrier) {
// Secondary CBs can have null framebuffer so queue up validation in that case 'til FB is known
if ((cb_state->activeRenderPass) && (VK_NULL_HANDLE == cb_state->activeFramebuffer) &&
(VK_COMMAND_BUFFER_LEVEL_SECONDARY == cb_state->createInfo.level)) {
const auto active_subpass = cb_state->activeSubpass;
const auto rp_state = cb_state->activeRenderPass;
const auto &sub_desc = rp_state->createInfo.pSubpasses[active_subpass];
// Secondary CB case w/o FB specified delay validation
auto *this_ptr = this; // Required for older compilers with c++20 compatibility
core_error::LocationCapture loc_capture(loc);
const auto render_pass = rp_state->renderPass();
cb_state->cmd_execute_commands_functions.emplace_back(
[this_ptr, loc_capture, cb_state, active_subpass, sub_desc, render_pass, barrier](const CMD_BUFFER_STATE *primary_cb,
const FRAMEBUFFER_STATE *fb) {
return this_ptr->ValidateImageBarrierAttachment(loc_capture.Get(), cb_state, fb, active_subpass, sub_desc,
render_pass, barrier, primary_cb);
});
}
}
void CoreChecks::RecordBarriers(Func func_name, CMD_BUFFER_STATE *cb_state, uint32_t bufferBarrierCount,
const VkBufferMemoryBarrier *pBufferMemBarriers, uint32_t imageMemBarrierCount,
const VkImageMemoryBarrier *pImageMemBarriers) {
for (uint32_t i = 0; i < bufferBarrierCount; i++) {
Location loc(func_name, Struct::VkBufferMemoryBarrier, Field::pBufferMemoryBarriers, i);
RecordBarrierValidationInfo(loc, cb_state, pBufferMemBarriers[i], cb_state->qfo_transfer_buffer_barriers);
}
for (uint32_t i = 0; i < imageMemBarrierCount; i++) {
Location loc(func_name, Struct::VkImageMemoryBarrier, Field::pImageMemoryBarriers, i);
const auto &img_barrier = pImageMemBarriers[i];
RecordBarrierValidationInfo(loc, cb_state, img_barrier, cb_state->qfo_transfer_image_barriers);
EnqueueSubmitTimeValidateImageBarrierAttachment(loc, cb_state, img_barrier);
}
}
void CoreChecks::RecordBarriers(Func func_name, CMD_BUFFER_STATE *cb_state, const VkDependencyInfoKHR &dep_info) {
for (uint32_t i = 0; i < dep_info.bufferMemoryBarrierCount; i++) {
Location loc(func_name, Struct::VkBufferMemoryBarrier2KHR, Field::pBufferMemoryBarriers, i);
RecordBarrierValidationInfo(loc, cb_state, dep_info.pBufferMemoryBarriers[i], cb_state->qfo_transfer_buffer_barriers);
}
for (uint32_t i = 0; i < dep_info.imageMemoryBarrierCount; i++) {
Location loc(func_name, Struct::VkImageMemoryBarrier2KHR, Field::pImageMemoryBarriers, i);
const auto &img_barrier = dep_info.pImageMemoryBarriers[i];
RecordBarrierValidationInfo(loc, cb_state, img_barrier, cb_state->qfo_transfer_image_barriers);
EnqueueSubmitTimeValidateImageBarrierAttachment(loc, cb_state, img_barrier);
}
}
template <typename TransferBarrier, typename Scoreboard>
bool CoreChecks::ValidateAndUpdateQFOScoreboard(const debug_report_data *report_data, const CMD_BUFFER_STATE *cb_state,
const char *operation, const TransferBarrier &barrier,
Scoreboard *scoreboard) const {
// Record to the scoreboard or report that we have a duplication
bool skip = false;
auto inserted = scoreboard->emplace(barrier, cb_state);
if (!inserted.second && inserted.first->second != cb_state) {
// This is a duplication (but don't report duplicates from the same CB, as we do that at record time
LogObjectList objlist(cb_state->commandBuffer());
objlist.add(barrier.handle);
objlist.add(inserted.first->second->commandBuffer());
skip = LogWarning(objlist, TransferBarrier::ErrMsgDuplicateQFOInSubmit(),
"%s: %s %s queue ownership of %s (%s), from srcQueueFamilyIndex %" PRIu32
" to dstQueueFamilyIndex %" PRIu32 " duplicates existing barrier submitted in this batch from %s.",
"vkQueueSubmit()", TransferBarrier::BarrierName(), operation, TransferBarrier::HandleName(),
report_data->FormatHandle(barrier.handle).c_str(), barrier.srcQueueFamilyIndex,
barrier.dstQueueFamilyIndex, report_data->FormatHandle(inserted.first->second->commandBuffer()).c_str());
}
return skip;
}
template <typename TransferBarrier>
bool CoreChecks::ValidateQueuedQFOTransferBarriers(
const CMD_BUFFER_STATE *cb_state, QFOTransferCBScoreboards<TransferBarrier> *scoreboards,
const GlobalQFOTransferBarrierMap<TransferBarrier> &global_release_barriers) const {
bool skip = false;
const auto &cb_barriers = GetQFOBarrierSets(cb_state, TransferBarrier());
const char *barrier_name = TransferBarrier::BarrierName();
const char *handle_name = TransferBarrier::HandleName();
// No release should have an extant duplicate (WARNING)
for (const auto &release : cb_barriers.release) {
// Check the global pending release barriers
const auto set_it = global_release_barriers.find(release.handle);
if (set_it != global_release_barriers.cend()) {
const QFOTransferBarrierSet<TransferBarrier> &set_for_handle = set_it->second;
const auto found = set_for_handle.find(release);
if (found != set_for_handle.cend()) {
skip |= LogWarning(cb_state->commandBuffer(), TransferBarrier::ErrMsgDuplicateQFOSubmitted(),
"%s: %s releasing queue ownership of %s (%s), from srcQueueFamilyIndex %" PRIu32
" to dstQueueFamilyIndex %" PRIu32
" duplicates existing barrier queued for execution, without intervening acquire operation.",
"vkQueueSubmit()", barrier_name, handle_name, report_data->FormatHandle(found->handle).c_str(),
found->srcQueueFamilyIndex, found->dstQueueFamilyIndex);
}
}
skip |= ValidateAndUpdateQFOScoreboard(report_data, cb_state, "releasing", release, &scoreboards->release);
}
// Each acquire must have a matching release (ERROR)
for (const auto &acquire : cb_barriers.acquire) {
const auto set_it = global_release_barriers.find(acquire.handle);
bool matching_release_found = false;
if (set_it != global_release_barriers.cend()) {
const QFOTransferBarrierSet<TransferBarrier> &set_for_handle = set_it->second;
matching_release_found = set_for_handle.find(acquire) != set_for_handle.cend();
}
if (!matching_release_found) {
skip |= LogError(cb_state->commandBuffer(), TransferBarrier::ErrMsgMissingQFOReleaseInSubmit(),
"%s: in submitted command buffer %s acquiring ownership of %s (%s), from srcQueueFamilyIndex %" PRIu32
" to dstQueueFamilyIndex %" PRIu32 " has no matching release barrier queued for execution.",
"vkQueueSubmit()", barrier_name, handle_name, report_data->FormatHandle(acquire.handle).c_str(),
acquire.srcQueueFamilyIndex, acquire.dstQueueFamilyIndex);
}
skip |= ValidateAndUpdateQFOScoreboard(report_data, cb_state, "acquiring", acquire, &scoreboards->acquire);
}
return skip;
}
bool CoreChecks::ValidateQueuedQFOTransfers(const CMD_BUFFER_STATE *cb_state,
QFOTransferCBScoreboards<QFOImageTransferBarrier> *qfo_image_scoreboards,
QFOTransferCBScoreboards<QFOBufferTransferBarrier> *qfo_buffer_scoreboards) const {
bool skip = false;
skip |=
ValidateQueuedQFOTransferBarriers<QFOImageTransferBarrier>(cb_state, qfo_image_scoreboards, qfo_release_image_barrier_map);
skip |= ValidateQueuedQFOTransferBarriers<QFOBufferTransferBarrier>(cb_state, qfo_buffer_scoreboards,
qfo_release_buffer_barrier_map);
return skip;
}
template <typename TransferBarrier>
void RecordQueuedQFOTransferBarriers(QFOTransferBarrierSets<TransferBarrier> &cb_barriers,
GlobalQFOTransferBarrierMap<TransferBarrier> &global_release_barriers) {
// Add release barriers from this submit to the global map
for (const auto &release : cb_barriers.release) {
// the global barrier list is mapped by resource handle to allow cleanup on resource destruction
// NOTE: We're using [] because creation of a Set is a needed side effect for new handles
global_release_barriers[release.handle].insert(release);
}
// Erase acquired barriers from this submit from the global map -- essentially marking releases as consumed
for (const auto &acquire : cb_barriers.acquire) {
// NOTE: We're not using [] because we don't want to create entries for missing releases
auto set_it = global_release_barriers.find(acquire.handle);
if (set_it != global_release_barriers.end()) {
QFOTransferBarrierSet<TransferBarrier> &set_for_handle = set_it->second;
set_for_handle.erase(acquire);
if (set_for_handle.size() == 0) { // Clean up empty sets
global_release_barriers.erase(set_it);
}
}
}
}
void CoreChecks::RecordQueuedQFOTransfers(CMD_BUFFER_STATE *cb_state) {
RecordQueuedQFOTransferBarriers<QFOImageTransferBarrier>(cb_state->qfo_transfer_image_barriers, qfo_release_image_barrier_map);
RecordQueuedQFOTransferBarriers<QFOBufferTransferBarrier>(cb_state->qfo_transfer_buffer_barriers,
qfo_release_buffer_barrier_map);
}
template <typename ImgBarrier>
void CoreChecks::TransitionImageLayouts(CMD_BUFFER_STATE *cb_state, uint32_t barrier_count, const ImgBarrier *barriers) {
// For ownership transfers, the barrier is specified twice; as a release
// operation on the yielding queue family, and as an acquire operation
// on the acquiring queue family. This barrier may also include a layout
// transition, which occurs 'between' the two operations. For validation
// purposes it doesn't seem important which side performs the layout
// transition, but it must not be performed twice. We'll arbitrarily
// choose to perform it as part of the acquire operation.
//
// However, we still need to record initial layout for the "initial layout" validation
for (uint32_t i = 0; i < barrier_count; i++) {
const auto &mem_barrier = barriers[i];
const bool is_release_op = IsReleaseOp(cb_state, mem_barrier);
auto *image_state = GetImageState(mem_barrier.image);
if (image_state) {
RecordTransitionImageLayout(cb_state, image_state, mem_barrier, is_release_op);
}
}
}
// explictly instantiate this template so it can be used in core_validation.cpp
template void CoreChecks::TransitionImageLayouts(CMD_BUFFER_STATE *cb_state, uint32_t barrier_count,
const VkImageMemoryBarrier *barrier);
template void CoreChecks::TransitionImageLayouts(CMD_BUFFER_STATE *cb_state, uint32_t barrier_count,
const VkImageMemoryBarrier2KHR *barrier);
VkImageLayout NormalizeSynchronization2Layout(const VkImageAspectFlags aspect_mask, VkImageLayout layout);
template <typename ImgBarrier>
void CoreChecks::RecordTransitionImageLayout(CMD_BUFFER_STATE *cb_state, const IMAGE_STATE *image_state,
const ImgBarrier &mem_barrier, bool is_release_op) {
if (enabled_features.synchronization2_features.synchronization2) {
if (mem_barrier.oldLayout == mem_barrier.newLayout) {
return;
}
}
VkImageSubresourceRange normalized_isr = NormalizeSubresourceRange(*image_state, mem_barrier.subresourceRange);
const auto &image_create_info = image_state->createInfo;
// Special case for 3D images with VK_IMAGE_CREATE_2D_ARRAY_COMPATIBLE_BIT flag bit, where <extent.depth> and
// <arrayLayers> can potentially alias. When recording layout for the entire image, pre-emptively record layouts
// for all (potential) layer sub_resources.
if (0 != (image_create_info.flags & VK_IMAGE_CREATE_2D_ARRAY_COMPATIBLE_BIT)) {
normalized_isr.baseArrayLayer = 0;
normalized_isr.layerCount = image_create_info.extent.depth; // Treat each depth slice as a layer subresource
}
VkImageLayout initial_layout = NormalizeSynchronization2Layout(mem_barrier.subresourceRange.aspectMask, mem_barrier.oldLayout);
VkImageLayout new_layout = NormalizeSynchronization2Layout(mem_barrier.subresourceRange.aspectMask, mem_barrier.newLayout);
// Layout transitions in external instance are not tracked, so don't validate initial layout.
if (QueueFamilyIsExternal(mem_barrier.srcQueueFamilyIndex)) {
initial_layout = VK_IMAGE_LAYOUT_UNDEFINED;
}
if (is_release_op) {
SetImageInitialLayout(cb_state, *image_state, normalized_isr, initial_layout);
} else {
SetImageLayout(cb_state, *image_state, normalized_isr, new_layout, initial_layout);
}
}
bool CoreChecks::VerifyImageLayout(const CMD_BUFFER_STATE *cb_node, const IMAGE_STATE *image_state,
const VkImageSubresourceRange &range, VkImageAspectFlags aspect_mask,
VkImageLayout explicit_layout, VkImageLayout optimal_layout, const char *caller,
const char *layout_invalid_msg_code, const char *layout_mismatch_msg_code, bool *error) const {
if (disabled[image_layout_validation]) return false;
assert(cb_node);
assert(image_state);
const auto image = image_state->image();
bool skip = false;
const auto *subresource_map = GetImageSubresourceLayoutMap(cb_node, image);
if (subresource_map) {
bool subres_skip = false;
LayoutUseCheckAndMessage layout_check(subresource_map, aspect_mask);
// IncrementInterval skips over all the subresources that have the same state as we just checked, incrementing to
// the next "constant value" range
for (auto pos = subresource_map->Find(range); !(pos.AtEnd()) && !subres_skip; pos.IncrementInterval()) {
if (!layout_check.Check(pos->subresource, explicit_layout, pos->current_layout, pos->initial_layout)) {
*error = true;
subres_skip |= LogError(cb_node->commandBuffer(), layout_mismatch_msg_code,
"%s: Cannot use %s (layer=%u mip=%u) with specific layout %s that doesn't match the "
"%s layout %s.",
caller, report_data->FormatHandle(image).c_str(), pos->subresource.arrayLayer,
pos->subresource.mipLevel, string_VkImageLayout(explicit_layout), layout_check.message,
string_VkImageLayout(layout_check.layout));
}
}
skip |= subres_skip;
}
// If optimal_layout is not UNDEFINED, check that layout matches optimal for this case
if ((VK_IMAGE_LAYOUT_UNDEFINED != optimal_layout) && (explicit_layout != optimal_layout)) {
if (VK_IMAGE_LAYOUT_GENERAL == explicit_layout) {
if (image_state->createInfo.tiling != VK_IMAGE_TILING_LINEAR) {
// LAYOUT_GENERAL is allowed, but may not be performance optimal, flag as perf warning.
skip |= LogPerformanceWarning(cb_node->commandBuffer(), kVUID_Core_DrawState_InvalidImageLayout,
"%s: For optimal performance %s layout should be %s instead of GENERAL.", caller,
report_data->FormatHandle(image).c_str(), string_VkImageLayout(optimal_layout));
}
} else if (device_extensions.vk_khr_shared_presentable_image) {
if (image_state->shared_presentable) {
if (VK_IMAGE_LAYOUT_SHARED_PRESENT_KHR != explicit_layout) {
skip |=
LogError(device, layout_invalid_msg_code,
"%s: Layout for shared presentable image is %s but must be VK_IMAGE_LAYOUT_SHARED_PRESENT_KHR.",
caller, string_VkImageLayout(optimal_layout));
}
}
} else {
*error = true;
skip |= LogError(cb_node->commandBuffer(), layout_invalid_msg_code,
"%s: Layout for %s is %s but can only be %s or VK_IMAGE_LAYOUT_GENERAL.", caller,
report_data->FormatHandle(image).c_str(), string_VkImageLayout(explicit_layout),
string_VkImageLayout(optimal_layout));
}
}
return skip;
}
bool CoreChecks::VerifyImageLayout(const CMD_BUFFER_STATE *cb_node, const IMAGE_STATE *image_state,
const VkImageSubresourceLayers &subLayers, VkImageLayout explicit_layout,
VkImageLayout optimal_layout, const char *caller, const char *layout_invalid_msg_code,
const char *layout_mismatch_msg_code, bool *error) const {
return VerifyImageLayout(cb_node, image_state, RangeFromLayers(subLayers), explicit_layout, optimal_layout, caller,
layout_invalid_msg_code, layout_mismatch_msg_code, error);
}
void CoreChecks::TransitionFinalSubpassLayouts(CMD_BUFFER_STATE *pCB, const VkRenderPassBeginInfo *pRenderPassBegin,
FRAMEBUFFER_STATE *framebuffer_state) {
auto render_pass = GetRenderPassState(pRenderPassBegin->renderPass);
if (!render_pass) return;
const VkRenderPassCreateInfo2 *render_pass_info = render_pass->createInfo.ptr();
if (framebuffer_state) {
for (uint32_t i = 0; i < render_pass_info->attachmentCount; ++i) {
auto *view_state = pCB->GetActiveAttachmentImageViewState(i);
if (view_state) {
VkImageLayout stencil_layout = kInvalidLayout;
const auto *attachment_description_stencil_layout =
LvlFindInChain<VkAttachmentDescriptionStencilLayout>(render_pass_info->pAttachments[i].pNext);
if (attachment_description_stencil_layout) {
stencil_layout = attachment_description_stencil_layout->stencilFinalLayout;
}
SetImageViewLayout(pCB, *view_state, render_pass_info->pAttachments[i].finalLayout, stencil_layout);
}
}
}
}
#ifdef VK_USE_PLATFORM_ANDROID_KHR
// Android-specific validation that uses types defined only with VK_USE_PLATFORM_ANDROID_KHR
// This could also move into a seperate core_validation_android.cpp file... ?
//
// AHB-specific validation within non-AHB APIs
//
bool CoreChecks::ValidateCreateImageANDROID(const debug_report_data *report_data, const VkImageCreateInfo *create_info) const {
bool skip = false;
const VkExternalFormatANDROID *ext_fmt_android = LvlFindInChain<VkExternalFormatANDROID>(create_info->pNext);
if (ext_fmt_android) {
if (0 != ext_fmt_android->externalFormat) {
if (VK_FORMAT_UNDEFINED != create_info->format) {
skip |=
LogError(device, "VUID-VkImageCreateInfo-pNext-01974",
"vkCreateImage(): VkImageCreateInfo struct has a chained VkExternalFormatANDROID struct with non-zero "
"externalFormat, but the VkImageCreateInfo's format is not VK_FORMAT_UNDEFINED.");
}
if (0 != (VK_IMAGE_CREATE_MUTABLE_FORMAT_BIT & create_info->flags)) {
skip |= LogError(device, "VUID-VkImageCreateInfo-pNext-02396",
"vkCreateImage(): VkImageCreateInfo struct has a chained VkExternalFormatANDROID struct with "
"non-zero externalFormat, but flags include VK_IMAGE_CREATE_MUTABLE_FORMAT_BIT.");
}
if (0 != (~VK_IMAGE_USAGE_SAMPLED_BIT & create_info->usage)) {
skip |= LogError(device, "VUID-VkImageCreateInfo-pNext-02397",
"vkCreateImage(): VkImageCreateInfo struct has a chained VkExternalFormatANDROID struct with "
"non-zero externalFormat, but usage includes bits (0x%" PRIx32 ") other than VK_IMAGE_USAGE_SAMPLED_BIT.",
create_info->usage);
}
if (VK_IMAGE_TILING_OPTIMAL != create_info->tiling) {
skip |= LogError(device, "VUID-VkImageCreateInfo-pNext-02398",
"vkCreateImage(): VkImageCreateInfo struct has a chained VkExternalFormatANDROID struct with "
"non-zero externalFormat, but layout is not VK_IMAGE_TILING_OPTIMAL.");
}
}
if ((0 != ext_fmt_android->externalFormat) &&
(ahb_ext_formats_map.find(ext_fmt_android->externalFormat) == ahb_ext_formats_map.end())) {
skip |= LogError(device, "VUID-VkExternalFormatANDROID-externalFormat-01894",
"vkCreateImage(): Chained VkExternalFormatANDROID struct contains a non-zero externalFormat (%" PRIu64
") which has "
"not been previously retrieved by vkGetAndroidHardwareBufferPropertiesANDROID().",
ext_fmt_android->externalFormat);
}
}
if ((nullptr == ext_fmt_android) || (0 == ext_fmt_android->externalFormat)) {
if (VK_FORMAT_UNDEFINED == create_info->format) {
skip |=
LogError(device, "VUID-VkImageCreateInfo-pNext-01975",
"vkCreateImage(): VkImageCreateInfo struct's format is VK_FORMAT_UNDEFINED, but either does not have a "
"chained VkExternalFormatANDROID struct or the struct exists but has an externalFormat of 0.");
}
}
const VkExternalMemoryImageCreateInfo *emici = LvlFindInChain<VkExternalMemoryImageCreateInfo>(create_info->pNext);
if (emici && (emici->handleTypes & VK_EXTERNAL_MEMORY_HANDLE_TYPE_ANDROID_HARDWARE_BUFFER_BIT_ANDROID)) {
if (create_info->imageType != VK_IMAGE_TYPE_2D) {
skip |=
LogError(device, "VUID-VkImageCreateInfo-pNext-02393",
"vkCreateImage(): VkImageCreateInfo struct with imageType %s has chained VkExternalMemoryImageCreateInfo "
"struct with handleType VK_EXTERNAL_MEMORY_HANDLE_TYPE_ANDROID_HARDWARE_BUFFER_BIT_ANDROID.",
string_VkImageType(create_info->imageType));
}
if ((create_info->mipLevels != 1) && (create_info->mipLevels != FullMipChainLevels(create_info->extent))) {
skip |= LogError(device, "VUID-VkImageCreateInfo-pNext-02394",
"vkCreateImage(): VkImageCreateInfo struct with chained VkExternalMemoryImageCreateInfo struct of "
"handleType VK_EXTERNAL_MEMORY_HANDLE_TYPE_ANDROID_HARDWARE_BUFFER_BIT_ANDROID "
"specifies mipLevels = %" PRId32 " (full chain mipLevels are %" PRId32 ").",
create_info->mipLevels, FullMipChainLevels(create_info->extent));
}
}
return skip;
}
bool CoreChecks::ValidateCreateImageViewANDROID(const VkImageViewCreateInfo *create_info) const {
bool skip = false;
const IMAGE_STATE *image_state = GetImageState(create_info->image);
if (image_state->has_ahb_format) {
if (VK_FORMAT_UNDEFINED != create_info->format) {
skip |= LogError(create_info->image, "VUID-VkImageViewCreateInfo-image-02399",
"vkCreateImageView(): VkImageViewCreateInfo struct has a chained VkExternalFormatANDROID struct, but "
"format member is %s and must be VK_FORMAT_UNDEFINED.",
string_VkFormat(create_info->format));
}
// Chain must include a compatible ycbcr conversion
bool conv_found = false;
uint64_t external_format = 0;
const VkSamplerYcbcrConversionInfo *ycbcr_conv_info = LvlFindInChain<VkSamplerYcbcrConversionInfo>(create_info->pNext);
if (ycbcr_conv_info != nullptr) {
VkSamplerYcbcrConversion conv_handle = ycbcr_conv_info->conversion;
if (ycbcr_conversion_ahb_fmt_map.find(conv_handle) != ycbcr_conversion_ahb_fmt_map.end()) {
conv_found = true;
external_format = ycbcr_conversion_ahb_fmt_map.at(conv_handle);
}
}
if ((!conv_found) || (external_format != image_state->ahb_format)) {
skip |= LogError(create_info->image, "VUID-VkImageViewCreateInfo-image-02400",
"vkCreateImageView(): VkImageViewCreateInfo struct has a chained VkExternalFormatANDROID struct with "
"an externalFormat (%" PRIu64
") but needs a chained VkSamplerYcbcrConversionInfo struct with a VkSamplerYcbcrConversion created "
"with the same external format.",
image_state->ahb_format);
}
// Errors in create_info swizzles
if (IsIdentitySwizzle(create_info->components) == false) {
skip |= LogError(
create_info->image, "VUID-VkImageViewCreateInfo-image-02401",
"vkCreateImageView(): VkImageViewCreateInfo struct has a chained VkExternalFormatANDROID struct, but "
"includes one or more non-identity component swizzles, r swizzle = %s, g swizzle = %s, b swizzle = %s, a swizzle "
"= %s.",
string_VkComponentSwizzle(create_info->components.r), string_VkComponentSwizzle(create_info->components.g),
string_VkComponentSwizzle(create_info->components.b), string_VkComponentSwizzle(create_info->components.a));
}
}
return skip;
}
bool CoreChecks::ValidateGetImageSubresourceLayoutANDROID(const VkImage image) const {
bool skip = false;
const IMAGE_STATE *image_state = GetImageState(image);
if (image_state != nullptr) {
if (image_state->external_ahb && (0 == image_state->GetBoundMemory().size())) {
skip |= LogError(image, "VUID-vkGetImageSubresourceLayout-image-01895",
"vkGetImageSubresourceLayout(): Attempt to query layout from an image created with "
"VK_EXTERNAL_MEMORY_HANDLE_TYPE_ANDROID_HARDWARE_BUFFER_BIT_ANDROID handleType which has not yet been "
"bound to memory.");
}
}
return skip;
}
#else
bool CoreChecks::ValidateCreateImageANDROID(const debug_report_data *report_data, const VkImageCreateInfo *create_info) const {
return false;
}
bool CoreChecks::ValidateCreateImageViewANDROID(const VkImageViewCreateInfo *create_info) const { return false; }
bool CoreChecks::ValidateGetImageSubresourceLayoutANDROID(const VkImage image) const { return false; }
#endif // VK_USE_PLATFORM_ANDROID_KHR
bool CoreChecks::ValidateImageFormatFeatures(const VkImageCreateInfo *pCreateInfo) const {
bool skip = false;
// validates based on imageCreateFormatFeatures from vkspec.html#resources-image-creation-limits
VkFormatFeatureFlags tiling_features = VK_FORMAT_FEATURE_FLAG_BITS_MAX_ENUM;
const VkImageTiling image_tiling = pCreateInfo->tiling;
const VkFormat image_format = pCreateInfo->format;
if (image_format == VK_FORMAT_UNDEFINED) {
// VU 01975 states format can't be undefined unless an android externalFormat
#ifdef VK_USE_PLATFORM_ANDROID_KHR
const VkExternalFormatANDROID *ext_fmt_android = LvlFindInChain<VkExternalFormatANDROID>(pCreateInfo->pNext);
if ((image_tiling == VK_IMAGE_TILING_OPTIMAL) && (ext_fmt_android != nullptr) && (0 != ext_fmt_android->externalFormat)) {
auto it = ahb_ext_formats_map.find(ext_fmt_android->externalFormat);
if (it != ahb_ext_formats_map.end()) {
tiling_features = it->second;
}
}
#endif
} else if (image_tiling == VK_IMAGE_TILING_DRM_FORMAT_MODIFIER_EXT) {
uint64_t drm_format_modifier = 0;
const VkImageDrmFormatModifierExplicitCreateInfoEXT *drm_explicit =
LvlFindInChain<VkImageDrmFormatModifierExplicitCreateInfoEXT>(pCreateInfo->pNext);
const VkImageDrmFormatModifierListCreateInfoEXT *drm_implicit =
LvlFindInChain<VkImageDrmFormatModifierListCreateInfoEXT>(pCreateInfo->pNext);
if (drm_explicit != nullptr) {
drm_format_modifier = drm_explicit->drmFormatModifier;
} else {
// VUID 02261 makes sure its only explict or implict in parameter checking
assert(drm_implicit != nullptr);
for (uint32_t i = 0; i < drm_implicit->drmFormatModifierCount; i++) {
drm_format_modifier |= drm_implicit->pDrmFormatModifiers[i];
}
}
VkFormatProperties2 format_properties_2 = {VK_STRUCTURE_TYPE_FORMAT_PROPERTIES_2, nullptr};
VkDrmFormatModifierPropertiesListEXT drm_properties_list = {VK_STRUCTURE_TYPE_DRM_FORMAT_MODIFIER_PROPERTIES_LIST_EXT,
nullptr};
format_properties_2.pNext = (void *)&drm_properties_list;
DispatchGetPhysicalDeviceFormatProperties2(physical_device, image_format, &format_properties_2);
std::vector<VkDrmFormatModifierPropertiesEXT> drm_properties;
drm_properties.resize(drm_properties_list.drmFormatModifierCount);
drm_properties_list.pDrmFormatModifierProperties = &drm_properties[0];
DispatchGetPhysicalDeviceFormatProperties2(physical_device, image_format, &format_properties_2);
for (uint32_t i = 0; i < drm_properties_list.drmFormatModifierCount; i++) {
if ((drm_properties_list.pDrmFormatModifierProperties[i].drmFormatModifier & drm_format_modifier) != 0) {
tiling_features |= drm_properties_list.pDrmFormatModifierProperties[i].drmFormatModifierTilingFeatures;
}
}
} else {
VkFormatProperties format_properties = GetPDFormatProperties(image_format);
tiling_features = (image_tiling == VK_IMAGE_TILING_LINEAR) ? format_properties.linearTilingFeatures
: format_properties.optimalTilingFeatures;
}
// Lack of disjoint format feature support while using the flag
if (FormatIsMultiplane(image_format) && ((pCreateInfo->flags & VK_IMAGE_CREATE_DISJOINT_BIT) != 0) &&
((tiling_features & VK_FORMAT_FEATURE_DISJOINT_BIT) == 0)) {
skip |= LogError(device, "VUID-VkImageCreateInfo-imageCreateFormatFeatures-02260",
"vkCreateImage(): can't use VK_IMAGE_CREATE_DISJOINT_BIT because %s doesn't support "
"VK_FORMAT_FEATURE_DISJOINT_BIT based on imageCreateFormatFeatures.",
string_VkFormat(pCreateInfo->format));
}
return skip;
}
bool CoreChecks::PreCallValidateCreateImage(VkDevice device, const VkImageCreateInfo *pCreateInfo,
const VkAllocationCallbacks *pAllocator, VkImage *pImage) const {
bool skip = false;
if (device_extensions.vk_android_external_memory_android_hardware_buffer) {
skip |= ValidateCreateImageANDROID(report_data, pCreateInfo);
} else { // These checks are omitted or replaced when Android HW Buffer extension is active
if (pCreateInfo->format == VK_FORMAT_UNDEFINED) {
return LogError(device, "VUID-VkImageCreateInfo-format-00943",
"vkCreateImage(): VkFormat for image must not be VK_FORMAT_UNDEFINED.");
}
}
if (pCreateInfo->flags & VK_IMAGE_CREATE_CUBE_COMPATIBLE_BIT) {
if (VK_IMAGE_TYPE_2D != pCreateInfo->imageType) {
skip |= LogError(device, "VUID-VkImageCreateInfo-flags-00949",
"vkCreateImage(): Image type must be VK_IMAGE_TYPE_2D when VK_IMAGE_CREATE_CUBE_COMPATIBLE_BIT "
"flag bit is set");
}
if ((pCreateInfo->extent.width != pCreateInfo->extent.height) || (pCreateInfo->arrayLayers < 6)) {
skip |= LogError(device, "VUID-VkImageCreateInfo-imageType-00954",
"vkCreateImage(): If VK_IMAGE_CREATE_CUBE_COMPATIBLE_BIT flag bit is set, width (%d) must equal "
"height (%d) and arrayLayers (%d) must be >= 6.",
pCreateInfo->extent.width, pCreateInfo->extent.height, pCreateInfo->arrayLayers);
}
}
const VkPhysicalDeviceLimits *device_limits = &phys_dev_props.limits;
VkImageUsageFlags attach_flags = VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT |
VK_IMAGE_USAGE_TRANSIENT_ATTACHMENT_BIT | VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT;
if ((pCreateInfo->usage & attach_flags) && (pCreateInfo->extent.width > device_limits->maxFramebufferWidth)) {
skip |= LogError(device, "VUID-VkImageCreateInfo-usage-00964",
"vkCreateImage(): Image usage flags include a frame buffer attachment bit and image width (%u) exceeds "
"device maxFramebufferWidth (%u).",
pCreateInfo->extent.width, device_limits->maxFramebufferWidth);
}
if ((pCreateInfo->usage & attach_flags) && (pCreateInfo->extent.height > device_limits->maxFramebufferHeight)) {
skip |= LogError(device, "VUID-VkImageCreateInfo-usage-00965",
"vkCreateImage(): Image usage flags include a frame buffer attachment bit and image height (%u) exceeds "
"device maxFramebufferHeight (%u).",
pCreateInfo->extent.height, device_limits->maxFramebufferHeight);
}
if (device_extensions.vk_ext_fragment_density_map || device_extensions.vk_ext_fragment_density_map_2) {
uint32_t ceiling_width = static_cast<uint32_t>(ceil(
static_cast<float>(device_limits->maxFramebufferWidth) /
std::max(static_cast<float>(phys_dev_ext_props.fragment_density_map_props.minFragmentDensityTexelSize.width), 1.0f)));
if ((pCreateInfo->usage & VK_IMAGE_USAGE_FRAGMENT_DENSITY_MAP_BIT_EXT) && (pCreateInfo->extent.width > ceiling_width)) {
skip |=
LogError(device, "VUID-VkImageCreateInfo-usage-02559",
"vkCreateImage(): Image usage flags include a fragment density map bit and image width (%u) exceeds the "
"ceiling of device "
"maxFramebufferWidth (%u) / minFragmentDensityTexelSize.width (%u). The ceiling value: %u",
pCreateInfo->extent.width, device_limits->maxFramebufferWidth,
phys_dev_ext_props.fragment_density_map_props.minFragmentDensityTexelSize.width, ceiling_width);
}
uint32_t ceiling_height = static_cast<uint32_t>(ceil(
static_cast<float>(device_limits->maxFramebufferHeight) /
std::max(static_cast<float>(phys_dev_ext_props.fragment_density_map_props.minFragmentDensityTexelSize.height), 1.0f)));
if ((pCreateInfo->usage & VK_IMAGE_USAGE_FRAGMENT_DENSITY_MAP_BIT_EXT) && (pCreateInfo->extent.height > ceiling_height)) {
skip |=
LogError(device, "VUID-VkImageCreateInfo-usage-02560",
"vkCreateImage(): Image usage flags include a fragment density map bit and image height (%u) exceeds the "
"ceiling of device "
"maxFramebufferHeight (%u) / minFragmentDensityTexelSize.height (%u). The ceiling value: %u",
pCreateInfo->extent.height, device_limits->maxFramebufferHeight,
phys_dev_ext_props.fragment_density_map_props.minFragmentDensityTexelSize.height, ceiling_height);
}
}
VkImageFormatProperties format_limits = {};
VkResult result = VK_SUCCESS;
if (pCreateInfo->tiling != VK_IMAGE_TILING_DRM_FORMAT_MODIFIER_EXT) {
result = DispatchGetPhysicalDeviceImageFormatProperties(physical_device, pCreateInfo->format, pCreateInfo->imageType,
pCreateInfo->tiling, pCreateInfo->usage, pCreateInfo->flags,
&format_limits);
} else {
auto modifier_list = LvlFindInChain<VkImageDrmFormatModifierListCreateInfoEXT>(pCreateInfo->pNext);
auto explicit_modifier = LvlFindInChain<VkImageDrmFormatModifierExplicitCreateInfoEXT>(pCreateInfo->pNext);
if (modifier_list) {
for (uint32_t i = 0; i < modifier_list->drmFormatModifierCount; i++) {
auto drm_format_modifier = LvlInitStruct<VkPhysicalDeviceImageDrmFormatModifierInfoEXT>();
drm_format_modifier.drmFormatModifier = modifier_list->pDrmFormatModifiers[i];
auto image_format_info = LvlInitStruct<VkPhysicalDeviceImageFormatInfo2>(&drm_format_modifier);
image_format_info.type = pCreateInfo->imageType;
image_format_info.format = pCreateInfo->format;
image_format_info.tiling = pCreateInfo->tiling;
image_format_info.usage = pCreateInfo->usage;
image_format_info.flags = pCreateInfo->flags;
auto image_format_properties = LvlInitStruct<VkImageFormatProperties2>();
result =
DispatchGetPhysicalDeviceImageFormatProperties2(physical_device, &image_format_info, &image_format_properties);
format_limits = image_format_properties.imageFormatProperties;
/* The application gives a list of modifier and the driver
* selects one. If one is wrong, stop there.
*/
if (result != VK_SUCCESS) break;
}
} else if (explicit_modifier) {
auto drm_format_modifier = LvlInitStruct<VkPhysicalDeviceImageDrmFormatModifierInfoEXT>();
drm_format_modifier.drmFormatModifier = explicit_modifier->drmFormatModifier;
auto image_format_info = LvlInitStruct<VkPhysicalDeviceImageFormatInfo2>(&drm_format_modifier);
image_format_info.type = pCreateInfo->imageType;
image_format_info.format = pCreateInfo->format;
image_format_info.tiling = pCreateInfo->tiling;
image_format_info.usage = pCreateInfo->usage;
image_format_info.flags = pCreateInfo->flags;
auto image_format_properties = LvlInitStruct<VkImageFormatProperties2>();
result = DispatchGetPhysicalDeviceImageFormatProperties2(physical_device, &image_format_info, &image_format_properties);
format_limits = image_format_properties.imageFormatProperties;
}
}
// 1. vkGetPhysicalDeviceImageFormatProperties[2] only success code is VK_SUCCESS
// 2. If call returns an error, then "imageCreateImageFormatPropertiesList" is defined to be the empty list
// 3. All values in 02251 are undefined if "imageCreateImageFormatPropertiesList" is empty.
if (result != VK_SUCCESS) {
// External memory will always have a "imageCreateImageFormatPropertiesList" so skip
#ifdef VK_USE_PLATFORM_ANDROID_KHR
if (!LvlFindInChain<VkExternalFormatANDROID>(pCreateInfo->pNext)) {
#endif // VK_USE_PLATFORM_ANDROID_KHR
skip |= LogError(device, "VUID-VkImageCreateInfo-imageCreateMaxMipLevels-02251",
"vkCreateImage(): Format %s is not supported for this combination of parameters and "
"VkGetPhysicalDeviceImageFormatProperties returned back %s.",
string_VkFormat(pCreateInfo->format), string_VkResult(result));
#ifdef VK_USE_PLATFORM_ANDROID_KHR
}
#endif // VK_USE_PLATFORM_ANDROID_KHR
} else {
if (pCreateInfo->mipLevels > format_limits.maxMipLevels) {
const char *format_string = string_VkFormat(pCreateInfo->format);
skip |= LogError(device, "VUID-VkImageCreateInfo-mipLevels-02255",
"vkCreateImage(): Image mip levels=%d exceed image format maxMipLevels=%d for format %s.",
pCreateInfo->mipLevels, format_limits.maxMipLevels, format_string);
}
uint64_t texel_count = static_cast<uint64_t>(pCreateInfo->extent.width) *
static_cast<uint64_t>(pCreateInfo->extent.height) *
static_cast<uint64_t>(pCreateInfo->extent.depth) * static_cast<uint64_t>(pCreateInfo->arrayLayers) *
static_cast<uint64_t>(pCreateInfo->samples);
uint64_t total_size =
static_cast<uint64_t>(std::ceil(FormatTexelSize(pCreateInfo->format) * static_cast<double>(texel_count)));
// Round up to imageGranularity boundary
VkDeviceSize image_granularity = phys_dev_props.limits.bufferImageGranularity;
uint64_t ig_mask = image_granularity - 1;
total_size = (total_size + ig_mask) & ~ig_mask;
if (total_size > format_limits.maxResourceSize) {
skip |= LogWarning(device, kVUID_Core_Image_InvalidFormatLimitsViolation,
"vkCreateImage(): resource size exceeds allowable maximum Image resource size = 0x%" PRIxLEAST64
", maximum resource size = 0x%" PRIxLEAST64 " ",
total_size, format_limits.maxResourceSize);
}
if (pCreateInfo->arrayLayers > format_limits.maxArrayLayers) {
skip |= LogError(device, "VUID-VkImageCreateInfo-arrayLayers-02256",
"vkCreateImage(): arrayLayers=%d exceeds allowable maximum supported by format of %d.",
pCreateInfo->arrayLayers, format_limits.maxArrayLayers);
}
if ((pCreateInfo->samples & format_limits.sampleCounts) == 0) {
skip |= LogError(device, "VUID-VkImageCreateInfo-samples-02258",
"vkCreateImage(): samples %s is not supported by format 0x%.8X.",
string_VkSampleCountFlagBits(pCreateInfo->samples), format_limits.sampleCounts);
}
if (pCreateInfo->extent.width > format_limits.maxExtent.width) {
skip |= LogError(device, "VUID-VkImageCreateInfo-extent-02252",
"vkCreateImage(): extent.width %u exceeds allowable maximum image extent width %u.",
pCreateInfo->extent.width, format_limits.maxExtent.width);
}
if (pCreateInfo->extent.height > format_limits.maxExtent.height) {
skip |= LogError(device, "VUID-VkImageCreateInfo-extent-02253",
"vkCreateImage(): extent.height %u exceeds allowable maximum image extent height %u.",
pCreateInfo->extent.height, format_limits.maxExtent.height);
}
if (pCreateInfo->extent.depth > format_limits.maxExtent.depth) {
skip |= LogError(device, "VUID-VkImageCreateInfo-extent-02254",
"vkCreateImage(): extent.depth %u exceeds allowable maximum image extent depth %u.",
pCreateInfo->extent.depth, format_limits.maxExtent.depth);
}
}
// Tests for "Formats requiring sampler YCBCR conversion for VK_IMAGE_ASPECT_COLOR_BIT image views"
if (FormatRequiresYcbcrConversion(pCreateInfo->format)) {
if (!enabled_features.ycbcr_image_array_features.ycbcrImageArrays && pCreateInfo->arrayLayers != 1) {
const char *error_vuid = (device_extensions.vk_ext_ycbcr_image_arrays) ? "VUID-VkImageCreateInfo-format-02653"
: "VUID-VkImageCreateInfo-format-02564";
skip |= LogError(device, error_vuid,
"vkCreateImage(): arrayLayers = %d, but when the ycbcrImagesArrays feature is not enabled and using a "
"YCbCr Conversion format, arrayLayers must be 1",
pCreateInfo->arrayLayers);
}
if (pCreateInfo->mipLevels != 1) {
skip |= LogError(device, "VUID-VkImageCreateInfo-format-02561",
"vkCreateImage(): mipLevels = %d, but when using a YCbCr Conversion format, mipLevels must be 1",
pCreateInfo->arrayLayers);
}
if (pCreateInfo->samples != VK_SAMPLE_COUNT_1_BIT) {
skip |= LogError(
device, "VUID-VkImageCreateInfo-format-02562",
"vkCreateImage(): samples = %s, but when using a YCbCr Conversion format, samples must be VK_SAMPLE_COUNT_1_BIT",
string_VkSampleCountFlagBits(pCreateInfo->samples));
}
if (pCreateInfo->imageType != VK_IMAGE_TYPE_2D) {
skip |= LogError(
device, "VUID-VkImageCreateInfo-format-02563",
"vkCreateImage(): imageType = %s, but when using a YCbCr Conversion format, imageType must be VK_IMAGE_TYPE_2D ",
string_VkImageType(pCreateInfo->imageType));
}
}
if (device_extensions.vk_khr_maintenance2) {
if (pCreateInfo->flags & VK_IMAGE_CREATE_BLOCK_TEXEL_VIEW_COMPATIBLE_BIT) {
if (!(FormatIsCompressed_BC(pCreateInfo->format) || FormatIsCompressed_ASTC(pCreateInfo->format) ||
FormatIsCompressed_ETC2_EAC(pCreateInfo->format))) {
skip |= LogError(device, "VUID-VkImageCreateInfo-flags-01572",
"vkCreateImage(): If pCreateInfo->flags contains VK_IMAGE_CREATE_BLOCK_TEXEL_VIEW_COMPATIBLE_BIT, "
"format must be block, ETC or ASTC compressed, but is %s",
string_VkFormat(pCreateInfo->format));
}
if (!(pCreateInfo->flags & VK_IMAGE_CREATE_MUTABLE_FORMAT_BIT)) {
skip |= LogError(device, "VUID-VkImageCreateInfo-flags-01573",
"vkCreateImage(): If pCreateInfo->flags contains VK_IMAGE_CREATE_BLOCK_TEXEL_VIEW_COMPATIBLE_BIT, "
"flags must also contain VK_IMAGE_CREATE_MUTABLE_FORMAT_BIT.");
}
}
}
if (pCreateInfo->sharingMode == VK_SHARING_MODE_CONCURRENT && pCreateInfo->pQueueFamilyIndices) {
skip |= ValidatePhysicalDeviceQueueFamilies(pCreateInfo->queueFamilyIndexCount, pCreateInfo->pQueueFamilyIndices,
"vkCreateImage", "pCreateInfo->pQueueFamilyIndices",
"VUID-VkImageCreateInfo-sharingMode-01420");
}
if (!FormatIsMultiplane(pCreateInfo->format) && !(pCreateInfo->flags & VK_IMAGE_CREATE_ALIAS_BIT) &&
(pCreateInfo->flags & VK_IMAGE_CREATE_DISJOINT_BIT)) {
skip |=
LogError(device, "VUID-VkImageCreateInfo-format-01577",
"vkCreateImage(): format is %s and flags are %s. The flags should not include VK_IMAGE_CREATE_DISJOINT_BIT.",
string_VkFormat(pCreateInfo->format), string_VkImageCreateFlags(pCreateInfo->flags).c_str());
}
const auto swapchain_create_info = LvlFindInChain<VkImageSwapchainCreateInfoKHR>(pCreateInfo->pNext);
if (swapchain_create_info != nullptr) {
if (swapchain_create_info->swapchain != VK_NULL_HANDLE) {
const SWAPCHAIN_NODE *swapchain_state = GetSwapchainState(swapchain_create_info->swapchain);
const VkSwapchainCreateFlagsKHR swapchain_flags = swapchain_state->createInfo.flags;
// Validate rest of Swapchain Image create check that require swapchain state
const char *vuid = "VUID-VkImageSwapchainCreateInfoKHR-swapchain-00995";
if (((swapchain_flags & VK_SWAPCHAIN_CREATE_SPLIT_INSTANCE_BIND_REGIONS_BIT_KHR) != 0) &&
((pCreateInfo->flags & VK_IMAGE_CREATE_SPLIT_INSTANCE_BIND_REGIONS_BIT) == 0)) {
skip |= LogError(
device, vuid,
"vkCreateImage(): Swapchain was created with VK_SWAPCHAIN_CREATE_SPLIT_INSTANCE_BIND_REGIONS_BIT_KHR flag so "
"all swapchain images must have the VK_IMAGE_CREATE_SPLIT_INSTANCE_BIND_REGIONS_BIT flag set.");
}
if (((swapchain_flags & VK_SWAPCHAIN_CREATE_PROTECTED_BIT_KHR) != 0) &&
((pCreateInfo->flags & VK_IMAGE_CREATE_PROTECTED_BIT) == 0)) {
skip |= LogError(device, vuid,
"vkCreateImage(): Swapchain was created with VK_SWAPCHAIN_CREATE_PROTECTED_BIT_KHR flag so all "
"swapchain images must have the VK_IMAGE_CREATE_PROTECTED_BIT flag set.");
}
const VkImageCreateFlags mutable_flags = (VK_IMAGE_CREATE_MUTABLE_FORMAT_BIT | VK_IMAGE_CREATE_EXTENDED_USAGE_BIT);
if (((swapchain_flags & VK_SWAPCHAIN_CREATE_MUTABLE_FORMAT_BIT_KHR) != 0) &&
((pCreateInfo->flags & mutable_flags) != mutable_flags)) {
skip |= LogError(device, vuid,
"vkCreateImage(): Swapchain was created with VK_SWAPCHAIN_CREATE_MUTABLE_FORMAT_BIT_KHR flag so "
"all swapchain images must have the VK_IMAGE_CREATE_MUTABLE_FORMAT_BIT and "
"VK_IMAGE_CREATE_EXTENDED_USAGE_BIT flags both set.");
}
}
}
if ((pCreateInfo->flags & VK_IMAGE_CREATE_PROTECTED_BIT) != 0) {
if (enabled_features.core11.protectedMemory == VK_FALSE) {
skip |= LogError(device, "VUID-VkImageCreateInfo-flags-01890",
"vkCreateImage(): the protectedMemory device feature is disabled: Images cannot be created with the "
"VK_IMAGE_CREATE_PROTECTED_BIT set.");
}
const VkImageCreateFlags invalid_flags =
VK_IMAGE_CREATE_SPARSE_BINDING_BIT | VK_IMAGE_CREATE_SPARSE_RESIDENCY_BIT | VK_IMAGE_CREATE_SPARSE_ALIASED_BIT;
if ((pCreateInfo->flags & invalid_flags) != 0) {
skip |= LogError(device, "VUID-VkImageCreateInfo-None-01891",
"vkCreateImage(): VK_IMAGE_CREATE_PROTECTED_BIT is set so no sparse create flags can be used at same "
"time (VK_IMAGE_CREATE_SPARSE_BINDING_BIT | VK_IMAGE_CREATE_SPARSE_RESIDENCY_BIT | "
"VK_IMAGE_CREATE_SPARSE_ALIASED_BIT).");
}
}
skip |= ValidateImageFormatFeatures(pCreateInfo);
// Check compatibility with VK_KHR_portability_subset
if (ExtEnabled::kNotEnabled != device_extensions.vk_khr_portability_subset) {
if (VK_IMAGE_CREATE_2D_ARRAY_COMPATIBLE_BIT & pCreateInfo->flags &&
VK_FALSE == enabled_features.portability_subset_features.imageView2DOn3DImage) {
skip |= LogError(device, "VUID-VkImageCreateInfo-imageView2DOn3DImage-04459",
"vkCreateImage (portability error): VK_IMAGE_CREATE_2D_ARRAY_COMPATIBLE_BIT is not supported.");
}
if ((VK_SAMPLE_COUNT_1_BIT != pCreateInfo->samples) && (1 != pCreateInfo->arrayLayers) &&
(VK_FALSE == enabled_features.portability_subset_features.multisampleArrayImage)) {
skip |=
LogError(device, "VUID-VkImageCreateInfo-multisampleArrayImage-04460",
"vkCreateImage (portability error): Cannot create an image with samples/texel > 1 && arrayLayers != 1");
}
}
return skip;
}
void CoreChecks::PostCallRecordCreateImage(VkDevice device, const VkImageCreateInfo *pCreateInfo,
const VkAllocationCallbacks *pAllocator, VkImage *pImage, VkResult result) {
if (VK_SUCCESS != result) return;
StateTracker::PostCallRecordCreateImage(device, pCreateInfo, pAllocator, pImage, result);
auto image_state = Get<IMAGE_STATE>(*pImage);
AddInitialLayoutintoImageLayoutMap(*image_state, imageLayoutMap);
}
bool CoreChecks::PreCallValidateDestroyImage(VkDevice device, VkImage image, const VkAllocationCallbacks *pAllocator) const {
const IMAGE_STATE *image_state = GetImageState(image);
bool skip = false;
if (image_state) {
if (image_state->is_swapchain_image) {
// TODO - Add VUID when headers are upstreamed
skip |= LogError(device, "UNASSIGNED-vkDestroyImage-image",
"vkDestroyImage(): %s is a presentable image and it is controlled by the implementation and is "
"destroyed with vkDestroySwapchainKHR.",
report_data->FormatHandle(image_state->image()).c_str());
}
skip |= ValidateObjectNotInUse(image_state, "vkDestroyImage", "VUID-vkDestroyImage-image-01000");
}
return skip;
}
void CoreChecks::PreCallRecordDestroyImage(VkDevice device, VkImage image, const VkAllocationCallbacks *pAllocator) {
// Clean up validation specific data
qfo_release_image_barrier_map.erase(image);
imageLayoutMap.erase(image);
// Clean up generic image state
StateTracker::PreCallRecordDestroyImage(device, image, pAllocator);
}
bool CoreChecks::ValidateImageAttributes(const IMAGE_STATE *image_state, const VkImageSubresourceRange &range,
const char *param_name) const {
bool skip = false;
const VkImage image = image_state->image();
const VkFormat format = image_state->createInfo.format;
if (range.aspectMask != VK_IMAGE_ASPECT_COLOR_BIT) {
skip |= LogError(image, "VUID-vkCmdClearColorImage-aspectMask-02498",
"vkCmdClearColorImage(): %s.aspectMasks must only be set to VK_IMAGE_ASPECT_COLOR_BIT.", param_name);
}
if (FormatIsDepthOrStencil(format)) {
skip |= LogError(image, "VUID-vkCmdClearColorImage-image-00007",
"vkCmdClearColorImage(): %s called with image %s which has a depth/stencil format (%s).", param_name,
report_data->FormatHandle(image).c_str(), string_VkFormat(format));
} else if (FormatIsCompressed(format)) {
skip |= LogError(image, "VUID-vkCmdClearColorImage-image-00007",
"vkCmdClearColorImage(): %s called with image %s which has a compressed format (%s).", param_name,
report_data->FormatHandle(image).c_str(), string_VkFormat(format));
}
if (!(image_state->createInfo.usage & VK_IMAGE_USAGE_TRANSFER_DST_BIT)) {
skip |=
LogError(image, "VUID-vkCmdClearColorImage-image-00002",
"vkCmdClearColorImage() %s called with image %s which was created without VK_IMAGE_USAGE_TRANSFER_DST_BIT.",
param_name, report_data->FormatHandle(image).c_str());
}
return skip;
}
bool CoreChecks::VerifyClearImageLayout(const CMD_BUFFER_STATE *cb_node, const IMAGE_STATE *image_state,
const VkImageSubresourceRange &range, VkImageLayout dest_image_layout,
const char *func_name) const {
bool skip = false;
if (strcmp(func_name, "vkCmdClearDepthStencilImage()") == 0) {
if ((dest_image_layout != VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL) && (dest_image_layout != VK_IMAGE_LAYOUT_GENERAL)) {
skip |= LogError(image_state->image(), "VUID-vkCmdClearDepthStencilImage-imageLayout-00012",
"%s: Layout for cleared image is %s but can only be TRANSFER_DST_OPTIMAL or GENERAL.", func_name,
string_VkImageLayout(dest_image_layout));
}
} else {
assert(strcmp(func_name, "vkCmdClearColorImage()") == 0);
if (!device_extensions.vk_khr_shared_presentable_image) {
if ((dest_image_layout != VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL) && (dest_image_layout != VK_IMAGE_LAYOUT_GENERAL)) {
skip |= LogError(image_state->image(), "VUID-vkCmdClearColorImage-imageLayout-00005",
"%s: Layout for cleared image is %s but can only be TRANSFER_DST_OPTIMAL or GENERAL.", func_name,
string_VkImageLayout(dest_image_layout));
}
} else {
if ((dest_image_layout != VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL) && (dest_image_layout != VK_IMAGE_LAYOUT_GENERAL) &&
(dest_image_layout != VK_IMAGE_LAYOUT_SHARED_PRESENT_KHR)) {
skip |= LogError(
image_state->image(), "VUID-vkCmdClearColorImage-imageLayout-01394",
"%s: Layout for cleared image is %s but can only be TRANSFER_DST_OPTIMAL, SHARED_PRESENT_KHR, or GENERAL.",
func_name, string_VkImageLayout(dest_image_layout));
}
}
}
// Cast to const to prevent creation at validate time.
const auto *subresource_map = GetImageSubresourceLayoutMap(cb_node, image_state->image());
if (subresource_map) {
bool subres_skip = false;
LayoutUseCheckAndMessage layout_check(subresource_map);
VkImageSubresourceRange normalized_isr = NormalizeSubresourceRange(*image_state, range);
// IncrementInterval skips over all the subresources that have the same state as we just checked, incrementing to
// the next "constant value" range
for (auto pos = subresource_map->Find(normalized_isr); !(pos.AtEnd()) && !subres_skip; pos.IncrementInterval()) {
if (!layout_check.Check(pos->subresource, dest_image_layout, pos->current_layout, pos->initial_layout)) {
const char *error_code = "VUID-vkCmdClearColorImage-imageLayout-00004";
if (strcmp(func_name, "vkCmdClearDepthStencilImage()") == 0) {
error_code = "VUID-vkCmdClearDepthStencilImage-imageLayout-00011";
} else {
assert(strcmp(func_name, "vkCmdClearColorImage()") == 0);
}
subres_skip |= LogError(cb_node->commandBuffer(), error_code,
"%s: Cannot clear an image whose layout is %s and doesn't match the %s layout %s.",
func_name, string_VkImageLayout(dest_image_layout), layout_check.message,
string_VkImageLayout(layout_check.layout));
}
}
skip |= subres_skip;
}
return skip;
}
bool CoreChecks::PreCallValidateCmdClearColorImage(VkCommandBuffer commandBuffer, VkImage image, VkImageLayout imageLayout,
const VkClearColorValue *pColor, uint32_t rangeCount,
const VkImageSubresourceRange *pRanges) const {
bool skip = false;
// TODO : Verify memory is in VK_IMAGE_STATE_CLEAR state
const auto *cb_node = GetCBState(commandBuffer);
const auto *image_state = GetImageState(image);
if (cb_node && image_state) {
skip |= ValidateMemoryIsBoundToImage(image_state, "vkCmdClearColorImage()", "VUID-vkCmdClearColorImage-image-00003");
skip |= ValidateCmd(cb_node, CMD_CLEARCOLORIMAGE, "vkCmdClearColorImage()");
if (device_extensions.vk_khr_maintenance1) {
skip |= ValidateImageFormatFeatureFlags(image_state, VK_FORMAT_FEATURE_TRANSFER_DST_BIT, "vkCmdClearColorImage",
"VUID-vkCmdClearColorImage-image-01993");
}
skip |=
ValidateProtectedImage(cb_node, image_state, "vkCmdClearColorImage()", "VUID-vkCmdClearColorImage-commandBuffer-01805");
skip |= ValidateUnprotectedImage(cb_node, image_state, "vkCmdClearColorImage()",
"VUID-vkCmdClearColorImage-commandBuffer-01806");
for (uint32_t i = 0; i < rangeCount; ++i) {
std::string param_name = "pRanges[" + std::to_string(i) + "]";
skip |= ValidateCmdClearColorSubresourceRange(image_state, pRanges[i], param_name.c_str());
skip |= ValidateImageAttributes(image_state, pRanges[i], param_name.c_str());
skip |= VerifyClearImageLayout(cb_node, image_state, pRanges[i], imageLayout, "vkCmdClearColorImage()");
}
// Tests for "Formats requiring sampler Y’CBCR conversion for VK_IMAGE_ASPECT_COLOR_BIT image views"
if (FormatRequiresYcbcrConversion(image_state->createInfo.format)) {
skip |= LogError(device, "VUID-vkCmdClearColorImage-image-01545",
"vkCmdClearColorImage(): format (%s) must not be one of the formats requiring sampler YCBCR "
"conversion for VK_IMAGE_ASPECT_COLOR_BIT image views",
string_VkFormat(image_state->createInfo.format));
}
}
return skip;
}
void CoreChecks::PreCallRecordCmdClearColorImage(VkCommandBuffer commandBuffer, VkImage image, VkImageLayout imageLayout,
const VkClearColorValue *pColor, uint32_t rangeCount,
const VkImageSubresourceRange *pRanges) {
StateTracker::PreCallRecordCmdClearColorImage(commandBuffer, image, imageLayout, pColor, rangeCount, pRanges);
auto cb_node = GetCBState(commandBuffer);
auto image_state = GetImageState(image);
if (cb_node && image_state) {
for (uint32_t i = 0; i < rangeCount; ++i) {
SetImageInitialLayout(cb_node, image, pRanges[i], imageLayout);
}
}
}
bool CoreChecks::ValidateClearDepthStencilValue(VkCommandBuffer commandBuffer, VkClearDepthStencilValue clearValue,
const char *apiName) const {
bool skip = false;
// The extension was not created with a feature bit whichs prevents displaying the 2 variations of the VUIDs
if (!device_extensions.vk_ext_depth_range_unrestricted) {
if (!(clearValue.depth >= 0.0) || !(clearValue.depth <= 1.0)) {
// Also VUID-VkClearDepthStencilValue-depth-00022
skip |= LogError(commandBuffer, "VUID-VkClearDepthStencilValue-depth-02506",
"%s: VK_EXT_depth_range_unrestricted extension is not enabled and VkClearDepthStencilValue::depth "
"(=%f) is not within the [0.0, 1.0] range.",
apiName, clearValue.depth);
}
}
return skip;
}
bool CoreChecks::PreCallValidateCmdClearDepthStencilImage(VkCommandBuffer commandBuffer, VkImage image, VkImageLayout imageLayout,
const VkClearDepthStencilValue *pDepthStencil, uint32_t rangeCount,
const VkImageSubresourceRange *pRanges) const {
bool skip = false;
// TODO : Verify memory is in VK_IMAGE_STATE_CLEAR state
const auto *cb_node = GetCBState(commandBuffer);
const auto *image_state = GetImageState(image);
if (cb_node && image_state) {
const VkFormat image_format = image_state->createInfo.format;
skip |= ValidateMemoryIsBoundToImage(image_state, "vkCmdClearDepthStencilImage()",
"VUID-vkCmdClearDepthStencilImage-image-00010");
skip |= ValidateCmd(cb_node, CMD_CLEARDEPTHSTENCILIMAGE, "vkCmdClearDepthStencilImage()");
if (device_extensions.vk_khr_maintenance1) {
skip |= ValidateImageFormatFeatureFlags(image_state, VK_FORMAT_FEATURE_TRANSFER_DST_BIT, "vkCmdClearDepthStencilImage",
"VUID-vkCmdClearDepthStencilImage-image-01994");
}
skip |= ValidateClearDepthStencilValue(commandBuffer, *pDepthStencil, "vkCmdClearDepthStencilImage()");
skip |= ValidateProtectedImage(cb_node, image_state, "vkCmdClearDepthStencilImage()",
"VUID-vkCmdClearDepthStencilImage-commandBuffer-01807");
skip |= ValidateUnprotectedImage(cb_node, image_state, "vkCmdClearDepthStencilImage()",
"VUID-vkCmdClearDepthStencilImage-commandBuffer-01808");
bool any_include_aspect_depth_bit = false;
bool any_include_aspect_stencil_bit = false;
for (uint32_t i = 0; i < rangeCount; ++i) {
std::string param_name = "pRanges[" + std::to_string(i) + "]";
skip |= ValidateCmdClearDepthSubresourceRange(image_state, pRanges[i], param_name.c_str());
skip |= VerifyClearImageLayout(cb_node, image_state, pRanges[i], imageLayout, "vkCmdClearDepthStencilImage()");
// Image aspect must be depth or stencil or both
VkImageAspectFlags valid_aspects = VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT;
if (((pRanges[i].aspectMask & valid_aspects) == 0) || ((pRanges[i].aspectMask & ~valid_aspects) != 0)) {
skip |= LogError(commandBuffer, "VUID-vkCmdClearDepthStencilImage-aspectMask-02824",
"vkCmdClearDepthStencilImage(): pRanges[%u].aspectMask can only be VK_IMAGE_ASPECT_DEPTH_BIT "
"and/or VK_IMAGE_ASPECT_STENCIL_BIT.",
i);
}
if ((pRanges[i].aspectMask & VK_IMAGE_ASPECT_DEPTH_BIT) != 0) {
any_include_aspect_depth_bit = true;
if (FormatHasDepth(image_format) == false) {
skip |= LogError(commandBuffer, "VUID-vkCmdClearDepthStencilImage-image-02826",
"vkCmdClearDepthStencilImage(): pRanges[%u].aspectMask has a VK_IMAGE_ASPECT_DEPTH_BIT but %s "
"doesn't have a depth component.",
i, string_VkFormat(image_format));
}
}
if ((pRanges[i].aspectMask & VK_IMAGE_ASPECT_STENCIL_BIT) != 0) {
any_include_aspect_stencil_bit = true;
if (FormatHasStencil(image_format) == false) {
skip |= LogError(commandBuffer, "VUID-vkCmdClearDepthStencilImage-image-02825",
"vkCmdClearDepthStencilImage(): pRanges[%u].aspectMask has a VK_IMAGE_ASPECT_STENCIL_BIT but "
"%s doesn't have a stencil component.",
i, string_VkFormat(image_format));
}
}
}
if (any_include_aspect_stencil_bit) {
const auto image_stencil_struct = LvlFindInChain<VkImageStencilUsageCreateInfo>(image_state->createInfo.pNext);
if (image_stencil_struct != nullptr) {
if ((image_stencil_struct->stencilUsage & VK_IMAGE_USAGE_TRANSFER_DST_BIT) == 0) {
skip |=
LogError(device, "VUID-vkCmdClearDepthStencilImage-pRanges-02658",
"vkCmdClearDepthStencilImage(): an element of pRanges.aspect includes VK_IMAGE_ASPECT_STENCIL_BIT "
"and image was created with separate stencil usage, VK_IMAGE_USAGE_TRANSFER_DST_BIT must be "
"included in VkImageStencilUsageCreateInfo::stencilUsage used to create image");
}
} else {
if ((image_state->createInfo.usage & VK_IMAGE_USAGE_TRANSFER_DST_BIT) == 0) {
skip |= LogError(
device, "VUID-vkCmdClearDepthStencilImage-pRanges-02659",
"vkCmdClearDepthStencilImage(): an element of pRanges.aspect includes VK_IMAGE_ASPECT_STENCIL_BIT and "
"image was not created with separate stencil usage, VK_IMAGE_USAGE_TRANSFER_DST_BIT must be included "
"in VkImageCreateInfo::usage used to create image");
}
}
}
if (any_include_aspect_depth_bit && (image_state->createInfo.usage & VK_IMAGE_USAGE_TRANSFER_DST_BIT) == 0) {
skip |= LogError(device, "VUID-vkCmdClearDepthStencilImage-pRanges-02660",
"vkCmdClearDepthStencilImage(): an element of pRanges.aspect includes VK_IMAGE_ASPECT_DEPTH_BIT, "
"VK_IMAGE_USAGE_TRANSFER_DST_BIT must be included in VkImageCreateInfo::usage used to create image");
}
if (image_state && !FormatIsDepthOrStencil(image_format)) {
skip |= LogError(image, "VUID-vkCmdClearDepthStencilImage-image-00014",
"vkCmdClearDepthStencilImage(): called with image %s which doesn't have a depth/stencil format (%s).",
report_data->FormatHandle(image).c_str(), string_VkFormat(image_format));
}
if (VK_IMAGE_USAGE_TRANSFER_DST_BIT != (VK_IMAGE_USAGE_TRANSFER_DST_BIT & image_state->createInfo.usage)) {
skip |= LogError(image, "VUID-vkCmdClearDepthStencilImage-image-00009",
"vkCmdClearDepthStencilImage(): called with image %s which was not created with the "
"VK_IMAGE_USAGE_TRANSFER_DST_BIT set.",
report_data->FormatHandle(image).c_str());
}
}
return skip;
}
void CoreChecks::PreCallRecordCmdClearDepthStencilImage(VkCommandBuffer commandBuffer, VkImage image, VkImageLayout imageLayout,
const VkClearDepthStencilValue *pDepthStencil, uint32_t rangeCount,
const VkImageSubresourceRange *pRanges) {
StateTracker::PreCallRecordCmdClearDepthStencilImage(commandBuffer, image, imageLayout, pDepthStencil, rangeCount, pRanges);
auto cb_node = GetCBState(commandBuffer);
auto image_state = GetImageState(image);
if (cb_node && image_state) {
for (uint32_t i = 0; i < rangeCount; ++i) {
SetImageInitialLayout(cb_node, image, pRanges[i], imageLayout);
}
}
}
// Returns true if [x, xoffset] and [y, yoffset] overlap
static bool RangesIntersect(int32_t start, uint32_t start_offset, int32_t end, uint32_t end_offset) {
bool result = false;
uint32_t intersection_min = std::max(static_cast<uint32_t>(start), static_cast<uint32_t>(end));
uint32_t intersection_max = std::min(static_cast<uint32_t>(start) + start_offset, static_cast<uint32_t>(end) + end_offset);
if (intersection_max > intersection_min) {
result = true;
}
return result;
}
// Returns true if source area of first vkImageCopy/vkImageCopy2KHR region intersects dest area of second region
// It is assumed that these are copy regions within a single image (otherwise no possibility of collision)
template <typename RegionType>
static bool RegionIntersects(const RegionType *rgn0, const RegionType *rgn1, VkImageType type, bool is_multiplane) {
bool result = false;
// Separate planes within a multiplane image cannot intersect
if (is_multiplane && (rgn0->srcSubresource.aspectMask != rgn1->dstSubresource.aspectMask)) {
return result;
}
if ((rgn0->srcSubresource.mipLevel == rgn1->dstSubresource.mipLevel) &&
(RangesIntersect(rgn0->srcSubresource.baseArrayLayer, rgn0->srcSubresource.layerCount, rgn1->dstSubresource.baseArrayLayer,
rgn1->dstSubresource.layerCount))) {
result = true;
switch (type) {
case VK_IMAGE_TYPE_3D:
result &= RangesIntersect(rgn0->srcOffset.z, rgn0->extent.depth, rgn1->dstOffset.z, rgn1->extent.depth);
// fall through
case VK_IMAGE_TYPE_2D:
result &= RangesIntersect(rgn0->srcOffset.y, rgn0->extent.height, rgn1->dstOffset.y, rgn1->extent.height);
// fall through
case VK_IMAGE_TYPE_1D:
result &= RangesIntersect(rgn0->srcOffset.x, rgn0->extent.width, rgn1->dstOffset.x, rgn1->extent.width);
break;
default:
// Unrecognized or new IMAGE_TYPE enums will be caught in parameter_validation
assert(false);
}
}
return result;
}
// Returns non-zero if offset and extent exceed image extents
static const uint32_t kXBit = 1;
static const uint32_t kYBit = 2;
static const uint32_t kZBit = 4;
static uint32_t ExceedsBounds(const VkOffset3D *offset, const VkExtent3D *extent, const VkExtent3D *image_extent) {
uint32_t result = 0;
// Extents/depths cannot be negative but checks left in for clarity
if ((offset->z + extent->depth > image_extent->depth) || (offset->z < 0) ||
((offset->z + static_cast<int32_t>(extent->depth)) < 0)) {
result |= kZBit;
}
if ((offset->y + extent->height > image_extent->height) || (offset->y < 0) ||
((offset->y + static_cast<int32_t>(extent->height)) < 0)) {
result |= kYBit;
}
if ((offset->x + extent->width > image_extent->width) || (offset->x < 0) ||
((offset->x + static_cast<int32_t>(extent->width)) < 0)) {
result |= kXBit;
}
return result;
}
// Test if two VkExtent3D structs are equivalent
static inline bool IsExtentEqual(const VkExtent3D *extent, const VkExtent3D *other_extent) {
bool result = true;
if ((extent->width != other_extent->width) || (extent->height != other_extent->height) ||
(extent->depth != other_extent->depth)) {
result = false;
}
return result;
}
// Test if the extent argument has all dimensions set to 0.
static inline bool IsExtentAllZeroes(const VkExtent3D *extent) {
return ((extent->width == 0) && (extent->height == 0) && (extent->depth == 0));
}
// Returns the image transfer granularity for a specific image scaled by compressed block size if necessary.
VkExtent3D CoreChecks::GetScaledItg(const CMD_BUFFER_STATE *cb_node, const IMAGE_STATE *img) const {
// Default to (0, 0, 0) granularity in case we can't find the real granularity for the physical device.
VkExtent3D granularity = {0, 0, 0};
auto pool = cb_node->command_pool.get();
if (pool) {
granularity = GetPhysicalDeviceState()->queue_family_properties[pool->queueFamilyIndex].minImageTransferGranularity;
if (FormatIsCompressed(img->createInfo.format) || FormatIsSinglePlane_422(img->createInfo.format)) {
auto block_size = FormatTexelBlockExtent(img->createInfo.format);
granularity.width *= block_size.width;
granularity.height *= block_size.height;
}
}
return granularity;
}
// Test elements of a VkExtent3D structure against alignment constraints contained in another VkExtent3D structure
static inline bool IsExtentAligned(const VkExtent3D *extent, const VkExtent3D *granularity) {
bool valid = true;
if ((SafeModulo(extent->depth, granularity->depth) != 0) || (SafeModulo(extent->width, granularity->width) != 0) ||
(SafeModulo(extent->height, granularity->height) != 0)) {
valid = false;
}
return valid;
}
// Check elements of a VkOffset3D structure against a queue family's Image Transfer Granularity values
bool CoreChecks::CheckItgOffset(const CMD_BUFFER_STATE *cb_node, const VkOffset3D *offset, const VkExtent3D *granularity,
const uint32_t i, const char *function, const char *member, const char *vuid) const {
bool skip = false;
VkExtent3D offset_extent = {};
offset_extent.width = static_cast<uint32_t>(abs(offset->x));
offset_extent.height = static_cast<uint32_t>(abs(offset->y));
offset_extent.depth = static_cast<uint32_t>(abs(offset->z));
if (IsExtentAllZeroes(granularity)) {
// If the queue family image transfer granularity is (0, 0, 0), then the offset must always be (0, 0, 0)
if (IsExtentAllZeroes(&offset_extent) == false) {
skip |= LogError(cb_node->commandBuffer(), vuid,
"%s: pRegion[%d].%s (x=%d, y=%d, z=%d) must be (x=0, y=0, z=0) when the command buffer's queue family "
"image transfer granularity is (w=0, h=0, d=0).",
function, i, member, offset->x, offset->y, offset->z);
}
} else {
// If the queue family image transfer granularity is not (0, 0, 0), then the offset dimensions must always be even
// integer multiples of the image transfer granularity.
if (IsExtentAligned(&offset_extent, granularity) == false) {
skip |= LogError(cb_node->commandBuffer(), vuid,
"%s: pRegion[%d].%s (x=%d, y=%d, z=%d) dimensions must be even integer multiples of this command "
"buffer's queue family image transfer granularity (w=%d, h=%d, d=%d).",
function, i, member, offset->x, offset->y, offset->z, granularity->width, granularity->height,
granularity->depth);
}
}
return skip;
}
// Check elements of a VkExtent3D structure against a queue family's Image Transfer Granularity values
bool CoreChecks::CheckItgExtent(const CMD_BUFFER_STATE *cb_node, const VkExtent3D *extent, const VkOffset3D *offset,
const VkExtent3D *granularity, const VkExtent3D *subresource_extent, const VkImageType image_type,
const uint32_t i, const char *function, const char *member, const char *vuid) const {
bool skip = false;
if (IsExtentAllZeroes(granularity)) {
// If the queue family image transfer granularity is (0, 0, 0), then the extent must always match the image
// subresource extent.
if (IsExtentEqual(extent, subresource_extent) == false) {
skip |= LogError(cb_node->commandBuffer(), vuid,
"%s: pRegion[%d].%s (w=%d, h=%d, d=%d) must match the image subresource extents (w=%d, h=%d, d=%d) "
"when the command buffer's queue family image transfer granularity is (w=0, h=0, d=0).",
function, i, member, extent->width, extent->height, extent->depth, subresource_extent->width,
subresource_extent->height, subresource_extent->depth);
}
} else {
// If the queue family image transfer granularity is not (0, 0, 0), then the extent dimensions must always be even
// integer multiples of the image transfer granularity or the offset + extent dimensions must always match the image
// subresource extent dimensions.
VkExtent3D offset_extent_sum = {};
offset_extent_sum.width = static_cast<uint32_t>(abs(offset->x)) + extent->width;
offset_extent_sum.height = static_cast<uint32_t>(abs(offset->y)) + extent->height;
offset_extent_sum.depth = static_cast<uint32_t>(abs(offset->z)) + extent->depth;
bool x_ok = true;
bool y_ok = true;
bool z_ok = true;
switch (image_type) {
case VK_IMAGE_TYPE_3D:
z_ok = ((0 == SafeModulo(extent->depth, granularity->depth)) ||
(subresource_extent->depth == offset_extent_sum.depth));
// fall through
case VK_IMAGE_TYPE_2D:
y_ok = ((0 == SafeModulo(extent->height, granularity->height)) ||
(subresource_extent->height == offset_extent_sum.height));
// fall through
case VK_IMAGE_TYPE_1D:
x_ok = ((0 == SafeModulo(extent->width, granularity->width)) ||
(subresource_extent->width == offset_extent_sum.width));
break;
default:
// Unrecognized or new IMAGE_TYPE enums will be caught in parameter_validation
assert(false);
}
if (!(x_ok && y_ok && z_ok)) {
skip |=
LogError(cb_node->commandBuffer(), vuid,
"%s: pRegion[%d].%s (w=%d, h=%d, d=%d) dimensions must be even integer multiples of this command "
"buffer's queue family image transfer granularity (w=%d, h=%d, d=%d) or offset (x=%d, y=%d, z=%d) + "
"extent (w=%d, h=%d, d=%d) must match the image subresource extents (w=%d, h=%d, d=%d).",
function, i, member, extent->width, extent->height, extent->depth, granularity->width, granularity->height,
granularity->depth, offset->x, offset->y, offset->z, extent->width, extent->height, extent->depth,
subresource_extent->width, subresource_extent->height, subresource_extent->depth);
}
}
return skip;
}
bool CoreChecks::ValidateImageMipLevel(const CMD_BUFFER_STATE *cb_node, const IMAGE_STATE *img, uint32_t mip_level,
const uint32_t i, const char *function, const char *member, const char *vuid) const {
bool skip = false;
if (mip_level >= img->createInfo.mipLevels) {
skip |= LogError(cb_node->commandBuffer(), vuid, "In %s, pRegions[%u].%s.mipLevel is %u, but provided %s has %u mip levels.",
function, i, member, mip_level, report_data->FormatHandle(img->image()).c_str(), img->createInfo.mipLevels);
}
return skip;
}
bool CoreChecks::ValidateImageArrayLayerRange(const CMD_BUFFER_STATE *cb_node, const IMAGE_STATE *img, const uint32_t base_layer,
const uint32_t layer_count, const uint32_t i, const char *function,
const char *member, const char *vuid) const {
bool skip = false;
if (base_layer >= img->createInfo.arrayLayers || layer_count > img->createInfo.arrayLayers ||
(base_layer + layer_count) > img->createInfo.arrayLayers) {
skip |= LogError(cb_node->commandBuffer(), vuid,
"In %s, pRegions[%u].%s.baseArrayLayer is %u and .layerCount is "
"%u, but provided %s has %u array layers.",
function, i, member, base_layer, layer_count, report_data->FormatHandle(img->image()).c_str(),
img->createInfo.arrayLayers);
}
return skip;
}
// Check valid usage Image Transfer Granularity requirements for elements of a VkBufferImageCopy/VkBufferImageCopy2KHR structure
template <typename BufferImageCopyRegionType>
bool CoreChecks::ValidateCopyBufferImageTransferGranularityRequirements(const CMD_BUFFER_STATE *cb_node, const IMAGE_STATE *img,
const BufferImageCopyRegionType *region, const uint32_t i,
const char *function, const char *vuid) const {
bool skip = false;
VkExtent3D granularity = GetScaledItg(cb_node, img);
skip |= CheckItgOffset(cb_node, ®ion->imageOffset, &granularity, i, function, "imageOffset", vuid);
VkExtent3D subresource_extent = GetImageSubresourceExtent(img, ®ion->imageSubresource);
skip |= CheckItgExtent(cb_node, ®ion->imageExtent, ®ion->imageOffset, &granularity, &subresource_extent,
img->createInfo.imageType, i, function, "imageExtent", vuid);
return skip;
}
// Check valid usage Image Transfer Granularity requirements for elements of a VkImageCopy/VkImageCopy2KHR structure
template <typename RegionType>
bool CoreChecks::ValidateCopyImageTransferGranularityRequirements(const CMD_BUFFER_STATE *cb_node, const IMAGE_STATE *src_img,
const IMAGE_STATE *dst_img, const RegionType *region,
const uint32_t i, const char *function,
CopyCommandVersion version) const {
bool skip = false;
const bool is_2khr = (version == COPY_COMMAND_VERSION_2);
const char *vuid;
// Source image checks
VkExtent3D granularity = GetScaledItg(cb_node, src_img);
vuid = is_2khr ? "VUID-VkCopyImageInfo2KHR-srcOffset-01783" : "VUID-vkCmdCopyImage-srcOffset-01783";
skip |= CheckItgOffset(cb_node, ®ion->srcOffset, &granularity, i, function, "srcOffset", vuid);
VkExtent3D subresource_extent = GetImageSubresourceExtent(src_img, ®ion->srcSubresource);
const VkExtent3D extent = region->extent;
vuid = is_2khr ? "VUID-VkCopyImageInfo2KHR-srcOffset-01783" : "VUID-vkCmdCopyImage-srcOffset-01783";
skip |= CheckItgExtent(cb_node, &extent, ®ion->srcOffset, &granularity, &subresource_extent, src_img->createInfo.imageType,
i, function, "extent", vuid);
// Destination image checks
granularity = GetScaledItg(cb_node, dst_img);
vuid = is_2khr ? "VUID-VkCopyImageInfo2KHR-dstOffset-01784" : "VUID-vkCmdCopyImage-dstOffset-01784";
skip |= CheckItgOffset(cb_node, ®ion->dstOffset, &granularity, i, function, "dstOffset", vuid);
// Adjust dest extent, if necessary
const VkExtent3D dest_effective_extent =
GetAdjustedDestImageExtent(src_img->createInfo.format, dst_img->createInfo.format, extent);
subresource_extent = GetImageSubresourceExtent(dst_img, ®ion->dstSubresource);
vuid = is_2khr ? "VUID-VkCopyImageInfo2KHR-dstOffset-01784" : "VUID-vkCmdCopyImage-dstOffset-01784";
skip |= CheckItgExtent(cb_node, &dest_effective_extent, ®ion->dstOffset, &granularity, &subresource_extent,
dst_img->createInfo.imageType, i, function, "extent", vuid);
return skip;
}
// Validate contents of a VkImageCopy or VkImageCopy2KHR struct
template <typename ImageCopyRegionType>
bool CoreChecks::ValidateImageCopyData(const uint32_t regionCount, const ImageCopyRegionType *ic_regions,
const IMAGE_STATE *src_state, const IMAGE_STATE *dst_state,
CopyCommandVersion version) const {
bool skip = false;
const bool is_2khr = (version == COPY_COMMAND_VERSION_2);
const char *func_name = is_2khr ? "vkCmdCopyImage2KHR()" : "vkCmdCopyImage()";
const char *vuid;
for (uint32_t i = 0; i < regionCount; i++) {
const ImageCopyRegionType region = ic_regions[i];
// For comp<->uncomp copies, the copy extent for the dest image must be adjusted
const VkExtent3D src_copy_extent = region.extent;
const VkExtent3D dst_copy_extent =
GetAdjustedDestImageExtent(src_state->createInfo.format, dst_state->createInfo.format, region.extent);
bool slice_override = false;
uint32_t depth_slices = 0;
// Special case for copying between a 1D/2D array and a 3D image
// TBD: This seems like the only way to reconcile 3 mutually-exclusive VU checks for 2D/3D copies. Heads up.
if ((VK_IMAGE_TYPE_3D == src_state->createInfo.imageType) && (VK_IMAGE_TYPE_3D != dst_state->createInfo.imageType)) {
depth_slices = region.dstSubresource.layerCount; // Slice count from 2D subresource
slice_override = (depth_slices != 1);
} else if ((VK_IMAGE_TYPE_3D == dst_state->createInfo.imageType) && (VK_IMAGE_TYPE_3D != src_state->createInfo.imageType)) {
depth_slices = region.srcSubresource.layerCount; // Slice count from 2D subresource
slice_override = (depth_slices != 1);
}
// Do all checks on source image
if (src_state->createInfo.imageType == VK_IMAGE_TYPE_1D) {
if ((0 != region.srcOffset.y) || (1 != src_copy_extent.height)) {
vuid = is_2khr ? "VUID-VkCopyImageInfo2KHR-srcImage-00146" : "VUID-vkCmdCopyImage-srcImage-00146";
skip |= LogError(src_state->image(), vuid,
"%s: pRegion[%d] srcOffset.y is %d and extent.height is %d. For 1D images these must "
"be 0 and 1, respectively.",
func_name, i, region.srcOffset.y, src_copy_extent.height);
}
}
if ((src_state->createInfo.imageType == VK_IMAGE_TYPE_1D) && ((0 != region.srcOffset.z) || (1 != src_copy_extent.depth))) {
vuid = is_2khr ? "VUID-VkCopyImageInfo2KHR-srcImage-01785" : "VUID-vkCmdCopyImage-srcImage-01785";
skip |= LogError(src_state->image(), vuid,
"%s: pRegion[%d] srcOffset.z is %d and extent.depth is %d. For 1D images "
"these must be 0 and 1, respectively.",
func_name, i, region.srcOffset.z, src_copy_extent.depth);
}
if ((src_state->createInfo.imageType == VK_IMAGE_TYPE_2D) && (0 != region.srcOffset.z)) {
vuid = is_2khr ? "VUID-VkCopyImageInfo2KHR-srcImage-01787" : "VUID-vkCmdCopyImage-srcImage-01787";
skip |= LogError(src_state->image(), vuid, "%s: pRegion[%d] srcOffset.z is %d. For 2D images the z-offset must be 0.",
func_name, i, region.srcOffset.z);
}
// Source checks that apply only to compressed images (or to _422 images if ycbcr enabled)
bool ext_ycbcr = IsExtEnabled(device_extensions.vk_khr_sampler_ycbcr_conversion);
if (FormatIsCompressed(src_state->createInfo.format) ||
(ext_ycbcr && FormatIsSinglePlane_422(src_state->createInfo.format))) {
const VkExtent3D block_size = FormatTexelBlockExtent(src_state->createInfo.format);
// image offsets must be multiples of block dimensions
if ((SafeModulo(region.srcOffset.x, block_size.width) != 0) ||
(SafeModulo(region.srcOffset.y, block_size.height) != 0) ||
(SafeModulo(region.srcOffset.z, block_size.depth) != 0)) {
vuid = is_2khr ? "VUID-VkCopyImageInfo2KHR-srcImage-01727" : "VUID-vkCmdCopyImage-srcImage-01727";
skip |= LogError(src_state->image(), vuid,
"%s: pRegion[%d] srcOffset (%d, %d) must be multiples of the compressed image's "
"texel width & height (%d, %d).",
func_name, i, region.srcOffset.x, region.srcOffset.y, block_size.width, block_size.height);
}
const VkExtent3D mip_extent = GetImageSubresourceExtent(src_state, &(region.srcSubresource));
if ((SafeModulo(src_copy_extent.width, block_size.width) != 0) &&
(src_copy_extent.width + region.srcOffset.x != mip_extent.width)) {
vuid = is_2khr ? "VUID-VkCopyImageInfo2KHR-srcImage-01728" : "VUID-vkCmdCopyImage-srcImage-01728";
skip |= LogError(src_state->image(), vuid,
"%s: pRegion[%d] extent width (%d) must be a multiple of the compressed texture block "
"width (%d), or when added to srcOffset.x (%d) must equal the image subresource width (%d).",
func_name, i, src_copy_extent.width, block_size.width, region.srcOffset.x, mip_extent.width);
}
// Extent height must be a multiple of block height, or extent+offset height must equal subresource height
if ((SafeModulo(src_copy_extent.height, block_size.height) != 0) &&
(src_copy_extent.height + region.srcOffset.y != mip_extent.height)) {
vuid = is_2khr ? "VUID-VkCopyImageInfo2KHR-srcImage-01729" : "VUID-vkCmdCopyImage-srcImage-01729";
skip |= LogError(src_state->image(), vuid,
"%s: pRegion[%d] extent height (%d) must be a multiple of the compressed texture block "
"height (%d), or when added to srcOffset.y (%d) must equal the image subresource height (%d).",
func_name, i, src_copy_extent.height, block_size.height, region.srcOffset.y, mip_extent.height);
}
// Extent depth must be a multiple of block depth, or extent+offset depth must equal subresource depth
uint32_t copy_depth = (slice_override ? depth_slices : src_copy_extent.depth);
if ((SafeModulo(copy_depth, block_size.depth) != 0) && (copy_depth + region.srcOffset.z != mip_extent.depth)) {
vuid = is_2khr ? "VUID-VkCopyImageInfo2KHR-srcImage-01730" : "VUID-vkCmdCopyImage-srcImage-01730";
skip |= LogError(src_state->image(), vuid,
"%s: pRegion[%d] extent width (%d) must be a multiple of the compressed texture block "
"depth (%d), or when added to srcOffset.z (%d) must equal the image subresource depth (%d).",
func_name, i, src_copy_extent.depth, block_size.depth, region.srcOffset.z, mip_extent.depth);
}
} // Compressed
// Do all checks on dest image
if (dst_state->createInfo.imageType == VK_IMAGE_TYPE_1D) {
if ((0 != region.dstOffset.y) || (1 != dst_copy_extent.height)) {
vuid = is_2khr ? "VUID-VkCopyImageInfo2KHR-dstImage-00152" : "VUID-vkCmdCopyImage-dstImage-00152";
skip |= LogError(dst_state->image(), vuid,
"%s: pRegion[%d] dstOffset.y is %d and dst_copy_extent.height is %d. For 1D images "
"these must be 0 and 1, respectively.",
func_name, i, region.dstOffset.y, dst_copy_extent.height);
}
}
if ((dst_state->createInfo.imageType == VK_IMAGE_TYPE_1D) && ((0 != region.dstOffset.z) || (1 != dst_copy_extent.depth))) {
vuid = is_2khr ? "VUID-VkCopyImageInfo2KHR-dstImage-01786" : "VUID-vkCmdCopyImage-dstImage-01786";
skip |= LogError(dst_state->image(), vuid,
"%s: pRegion[%d] dstOffset.z is %d and extent.depth is %d. For 1D images these must be 0 "
"and 1, respectively.",
func_name, i, region.dstOffset.z, dst_copy_extent.depth);
}
if ((dst_state->createInfo.imageType == VK_IMAGE_TYPE_2D) && (0 != region.dstOffset.z)) {
vuid = is_2khr ? "VUID-VkCopyImageInfo2KHR-dstImage-01788" : "VUID-vkCmdCopyImage-dstImage-01788";
skip |= LogError(dst_state->image(), vuid, "%s: pRegion[%d] dstOffset.z is %d. For 2D images the z-offset must be 0.",
func_name, i, region.dstOffset.z);
}
// Handle difference between Maintenance 1
if (device_extensions.vk_khr_maintenance1) {
if (src_state->createInfo.imageType == VK_IMAGE_TYPE_3D) {
if ((0 != region.srcSubresource.baseArrayLayer) || (1 != region.srcSubresource.layerCount)) {
vuid = is_2khr ? "VUID-VkCopyImageInfo2KHR-srcImage-04443" : "VUID-vkCmdCopyImage-srcImage-04443";
skip |= LogError(src_state->image(), vuid,
"%s: pRegion[%d] srcSubresource.baseArrayLayer is %d and srcSubresource.layerCount "
"is %d. For VK_IMAGE_TYPE_3D images these must be 0 and 1, respectively.",
func_name, i, region.srcSubresource.baseArrayLayer, region.srcSubresource.layerCount);
}
}
if (dst_state->createInfo.imageType == VK_IMAGE_TYPE_3D) {
if ((0 != region.dstSubresource.baseArrayLayer) || (1 != region.dstSubresource.layerCount)) {
vuid = is_2khr ? "VUID-VkCopyImageInfo2KHR-dstImage-04444" : "VUID-vkCmdCopyImage-dstImage-04444";
skip |= LogError(dst_state->image(), vuid,
"%s: pRegion[%d] dstSubresource.baseArrayLayer is %d and dstSubresource.layerCount "
"is %d. For VK_IMAGE_TYPE_3D images these must be 0 and 1, respectively.",
func_name, i, region.dstSubresource.baseArrayLayer, region.dstSubresource.layerCount);
}
}
} else { // Pre maint 1
if (src_state->createInfo.imageType == VK_IMAGE_TYPE_3D || dst_state->createInfo.imageType == VK_IMAGE_TYPE_3D) {
if ((0 != region.srcSubresource.baseArrayLayer) || (1 != region.srcSubresource.layerCount)) {
vuid = is_2khr ? "VUID-VkCopyImageInfo2KHR-srcImage-00139" : "VUID-vkCmdCopyImage-srcImage-00139";
skip |= LogError(src_state->image(), vuid,
"%s: pRegion[%d] srcSubresource.baseArrayLayer is %d and "
"srcSubresource.layerCount is %d. For copies with either source or dest of type "
"VK_IMAGE_TYPE_3D, these must be 0 and 1, respectively.",
func_name, i, region.srcSubresource.baseArrayLayer, region.srcSubresource.layerCount);
}
if ((0 != region.dstSubresource.baseArrayLayer) || (1 != region.dstSubresource.layerCount)) {
vuid = is_2khr ? "VUID-VkCopyImageInfo2KHR-srcImage-00139" : "VUID-vkCmdCopyImage-srcImage-00139";
skip |= LogError(dst_state->image(), vuid,
"%s: pRegion[%d] dstSubresource.baseArrayLayer is %d and "
"dstSubresource.layerCount is %d. For copies with either source or dest of type "
"VK_IMAGE_TYPE_3D, these must be 0 and 1, respectively.",
func_name, i, region.dstSubresource.baseArrayLayer, region.dstSubresource.layerCount);
}
}
}
// Dest checks that apply only to compressed images (or to _422 images if ycbcr enabled)
if (FormatIsCompressed(dst_state->createInfo.format) ||
(ext_ycbcr && FormatIsSinglePlane_422(dst_state->createInfo.format))) {
const VkExtent3D block_size = FormatTexelBlockExtent(dst_state->createInfo.format);
// image offsets must be multiples of block dimensions
if ((SafeModulo(region.dstOffset.x, block_size.width) != 0) ||
(SafeModulo(region.dstOffset.y, block_size.height) != 0) ||
(SafeModulo(region.dstOffset.z, block_size.depth) != 0)) {
vuid = is_2khr ? "VUID-VkCopyImageInfo2KHR-dstImage-01731" : "VUID-vkCmdCopyImage-dstImage-01731";
skip |= LogError(dst_state->image(), vuid,
"%s: pRegion[%d] dstOffset (%d, %d) must be multiples of the compressed image's "
"texel width & height (%d, %d).",
func_name, i, region.dstOffset.x, region.dstOffset.y, block_size.width, block_size.height);
}
const VkExtent3D mip_extent = GetImageSubresourceExtent(dst_state, &(region.dstSubresource));
if ((SafeModulo(dst_copy_extent.width, block_size.width) != 0) &&
(dst_copy_extent.width + region.dstOffset.x != mip_extent.width)) {
vuid = is_2khr ? "VUID-VkCopyImageInfo2KHR-dstImage-01732" : "VUID-vkCmdCopyImage-dstImage-01732";
skip |= LogError(dst_state->image(), vuid,
"%s: pRegion[%d] dst_copy_extent width (%d) must be a multiple of the compressed texture "
"block width (%d), or when added to dstOffset.x (%d) must equal the image subresource width (%d).",
func_name, i, dst_copy_extent.width, block_size.width, region.dstOffset.x, mip_extent.width);
}
// Extent height must be a multiple of block height, or dst_copy_extent+offset height must equal subresource height
if ((SafeModulo(dst_copy_extent.height, block_size.height) != 0) &&
(dst_copy_extent.height + region.dstOffset.y != mip_extent.height)) {
vuid = is_2khr ? "VUID-VkCopyImageInfo2KHR-dstImage-01733" : "VUID-vkCmdCopyImage-dstImage-01733";
skip |= LogError(dst_state->image(), vuid,
"%s: pRegion[%d] dst_copy_extent height (%d) must be a multiple of the compressed "
"texture block height (%d), or when added to dstOffset.y (%d) must equal the image subresource "
"height (%d).",
func_name, i, dst_copy_extent.height, block_size.height, region.dstOffset.y, mip_extent.height);
}
// Extent depth must be a multiple of block depth, or dst_copy_extent+offset depth must equal subresource depth
uint32_t copy_depth = (slice_override ? depth_slices : dst_copy_extent.depth);
if ((SafeModulo(copy_depth, block_size.depth) != 0) && (copy_depth + region.dstOffset.z != mip_extent.depth)) {
vuid = is_2khr ? "VUID-VkCopyImageInfo2KHR-dstImage-01734" : "VUID-vkCmdCopyImage-dstImage-01734";
skip |= LogError(dst_state->image(), vuid,
"%s: pRegion[%d] dst_copy_extent width (%d) must be a multiple of the compressed texture "
"block depth (%d), or when added to dstOffset.z (%d) must equal the image subresource depth (%d).",
func_name, i, dst_copy_extent.depth, block_size.depth, region.dstOffset.z, mip_extent.depth);
}
} // Compressed
}
return skip;
}
template <typename RegionType>
bool CoreChecks::ValidateCmdCopyImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout,
VkImage dstImage, VkImageLayout dstImageLayout, uint32_t regionCount,
const RegionType *pRegions, CopyCommandVersion version) const {
const auto *cb_node = GetCBState(commandBuffer);
const auto *src_image_state = GetImageState(srcImage);
const auto *dst_image_state = GetImageState(dstImage);
const VkFormat src_format = src_image_state->createInfo.format;
const VkFormat dst_format = dst_image_state->createInfo.format;
const bool is_2khr = (version == COPY_COMMAND_VERSION_2);
bool skip = false;
const char *func_name = is_2khr ? "vkCmdCopyImage2KHR()" : "vkCmdCopyImage()";
const CMD_TYPE cmd_type = is_2khr ? CMD_COPYIMAGE2KHR : CMD_COPYIMAGE;
const char *vuid;
skip = ValidateImageCopyData(regionCount, pRegions, src_image_state, dst_image_state, version);
VkCommandBuffer command_buffer = cb_node->commandBuffer();
for (uint32_t i = 0; i < regionCount; i++) {
const RegionType region = pRegions[i];
// For comp/uncomp copies, the copy extent for the dest image must be adjusted
VkExtent3D src_copy_extent = region.extent;
VkExtent3D dst_copy_extent = GetAdjustedDestImageExtent(src_format, dst_format, region.extent);
bool slice_override = false;
uint32_t depth_slices = 0;
// Special case for copying between a 1D/2D array and a 3D image
// TBD: This seems like the only way to reconcile 3 mutually-exclusive VU checks for 2D/3D copies. Heads up.
if ((VK_IMAGE_TYPE_3D == src_image_state->createInfo.imageType) &&
(VK_IMAGE_TYPE_3D != dst_image_state->createInfo.imageType)) {
depth_slices = region.dstSubresource.layerCount; // Slice count from 2D subresource
slice_override = (depth_slices != 1);
} else if ((VK_IMAGE_TYPE_3D == dst_image_state->createInfo.imageType) &&
(VK_IMAGE_TYPE_3D != src_image_state->createInfo.imageType)) {
depth_slices = region.srcSubresource.layerCount; // Slice count from 2D subresource
slice_override = (depth_slices != 1);
}
skip |= ValidateImageSubresourceLayers(cb_node, ®ion.srcSubresource, func_name, "srcSubresource", i);
skip |= ValidateImageSubresourceLayers(cb_node, ®ion.dstSubresource, func_name, "dstSubresource", i);
vuid = is_2khr ? "VUID-VkCopyImageInfo2KHR-srcSubresource-01696" : "VUID-vkCmdCopyImage-srcSubresource-01696";
skip |=
ValidateImageMipLevel(cb_node, src_image_state, region.srcSubresource.mipLevel, i, func_name, "srcSubresource", vuid);
vuid = is_2khr ? "VUID-VkCopyImageInfo2KHR-dstSubresource-01697" : "VUID-vkCmdCopyImage-dstSubresource-01697";
skip |=
ValidateImageMipLevel(cb_node, dst_image_state, region.dstSubresource.mipLevel, i, func_name, "dstSubresource", vuid);
vuid = is_2khr ? "VUID-VkCopyImageInfo2KHR-srcSubresource-01698" : "VUID-vkCmdCopyImage-srcSubresource-01698";
skip |= ValidateImageArrayLayerRange(cb_node, src_image_state, region.srcSubresource.baseArrayLayer,
region.srcSubresource.layerCount, i, func_name, "srcSubresource", vuid);
vuid = is_2khr ? "VUID-VkCopyImageInfo2KHR-dstSubresource-01699" : "VUID-vkCmdCopyImage-dstSubresource-01699";
skip |= ValidateImageArrayLayerRange(cb_node, dst_image_state, region.dstSubresource.baseArrayLayer,
region.dstSubresource.layerCount, i, func_name, "dstSubresource", vuid);
if (device_extensions.vk_khr_maintenance1) {
// No chance of mismatch if we're overriding depth slice count
if (!slice_override) {
// The number of depth slices in srcSubresource and dstSubresource must match
// Depth comes from layerCount for 1D,2D resources, from extent.depth for 3D
uint32_t src_slices =
(VK_IMAGE_TYPE_3D == src_image_state->createInfo.imageType ? src_copy_extent.depth
: region.srcSubresource.layerCount);
uint32_t dst_slices =
(VK_IMAGE_TYPE_3D == dst_image_state->createInfo.imageType ? dst_copy_extent.depth
: region.dstSubresource.layerCount);
if (src_slices != dst_slices) {
vuid = is_2khr ? "VUID-VkImageCopy2KHR-extent-00140" : "VUID-VkImageCopy-extent-00140";
skip |= LogError(command_buffer, vuid,
"%s: number of depth slices in source (%u) and destination (%u) subresources for pRegions[%u] "
"do not match.",
func_name, src_slices, dst_slices, i);
}
}
} else {
// For each region the layerCount member of srcSubresource and dstSubresource must match
if (region.srcSubresource.layerCount != region.dstSubresource.layerCount) {
vuid = is_2khr ? "VUID-VkImageCopy2KHR-layerCount-00138" : "VUID-VkImageCopy-layerCount-00138";
skip |=
LogError(command_buffer, vuid,
"%s: number of layers in source (%u) and destination (%u) subresources for pRegions[%u] do not match",
func_name, region.srcSubresource.layerCount, region.dstSubresource.layerCount, i);
}
}
// Do multiplane-specific checks, if extension enabled
if (device_extensions.vk_khr_sampler_ycbcr_conversion) {
if ((!FormatIsMultiplane(src_format)) && (!FormatIsMultiplane(dst_format))) {
// If neither image is multi-plane the aspectMask member of src and dst must match
if (region.srcSubresource.aspectMask != region.dstSubresource.aspectMask) {
vuid = is_2khr ? "VUID-VkCopyImageInfo2KHR-srcImage-01551" : "VUID-vkCmdCopyImage-srcImage-01551";
skip |= LogError(command_buffer, vuid,
"%s: Copy between non-multiplane images with differing aspectMasks in pRegions[%u] with "
"source (0x%x) destination (0x%x).",
func_name, i, region.srcSubresource.aspectMask, region.dstSubresource.aspectMask);
}
} else {
// Source image multiplane checks
uint32_t planes = FormatPlaneCount(src_format);
VkImageAspectFlags aspect = region.srcSubresource.aspectMask;
if ((2 == planes) && (aspect != VK_IMAGE_ASPECT_PLANE_0_BIT) && (aspect != VK_IMAGE_ASPECT_PLANE_1_BIT)) {
vuid = is_2khr ? "VUID-VkCopyImageInfo2KHR-srcImage-01552" : "VUID-vkCmdCopyImage-srcImage-01552";
skip |= LogError(command_buffer, vuid,
"%s: pRegions[%u].srcSubresource.aspectMask (0x%x) is invalid for 2-plane format.", func_name,
i, aspect);
}
if ((3 == planes) && (aspect != VK_IMAGE_ASPECT_PLANE_0_BIT) && (aspect != VK_IMAGE_ASPECT_PLANE_1_BIT) &&
(aspect != VK_IMAGE_ASPECT_PLANE_2_BIT)) {
vuid = is_2khr ? "VUID-VkCopyImageInfo2KHR-srcImage-01553" : "VUID-vkCmdCopyImage-srcImage-01553";
skip |= LogError(command_buffer, vuid,
"%s: pRegions[%u].srcSubresource.aspectMask (0x%x) is invalid for 3-plane format.", func_name,
i, aspect);
}
// Single-plane to multi-plane
if ((!FormatIsMultiplane(src_format)) && (FormatIsMultiplane(dst_format)) &&
(VK_IMAGE_ASPECT_COLOR_BIT != aspect)) {
vuid = is_2khr ? "VUID-VkCopyImageInfo2KHR-dstImage-01557" : "VUID-vkCmdCopyImage-dstImage-01557";
skip |= LogError(command_buffer, vuid,
"%s: pRegions[%u].srcSubresource.aspectMask (0x%x) is not VK_IMAGE_ASPECT_COLOR_BIT.",
func_name, i, aspect);
}
// Dest image multiplane checks
planes = FormatPlaneCount(dst_format);
aspect = region.dstSubresource.aspectMask;
if ((2 == planes) && (aspect != VK_IMAGE_ASPECT_PLANE_0_BIT) && (aspect != VK_IMAGE_ASPECT_PLANE_1_BIT)) {
vuid = is_2khr ? "VUID-VkCopyImageInfo2KHR-dstImage-01554" : "VUID-vkCmdCopyImage-dstImage-01554";
skip |= LogError(command_buffer, vuid,
"%s: pRegions[%u].dstSubresource.aspectMask (0x%x) is invalid for 2-plane format.", func_name,
i, aspect);
}
if ((3 == planes) && (aspect != VK_IMAGE_ASPECT_PLANE_0_BIT) && (aspect != VK_IMAGE_ASPECT_PLANE_1_BIT) &&
(aspect != VK_IMAGE_ASPECT_PLANE_2_BIT)) {
vuid = is_2khr ? "VUID-VkCopyImageInfo2KHR-dstImage-01555" : "VUID-vkCmdCopyImage-dstImage-01555";
skip |= LogError(command_buffer, vuid,
"%s: pRegions[%u].dstSubresource.aspectMask (0x%x) is invalid for 3-plane format.", func_name,
i, aspect);
}
// Multi-plane to single-plane
if ((FormatIsMultiplane(src_format)) && (!FormatIsMultiplane(dst_format)) &&
(VK_IMAGE_ASPECT_COLOR_BIT != aspect)) {
vuid = is_2khr ? "VUID-VkCopyImageInfo2KHR-srcImage-01556" : "VUID-vkCmdCopyImage-srcImage-01556";
skip |= LogError(command_buffer, vuid,
"%s: pRegions[%u].dstSubresource.aspectMask (0x%x) is not VK_IMAGE_ASPECT_COLOR_BIT.",
func_name, i, aspect);
}
}
} else {
// !vk_khr_sampler_ycbcr_conversion
// not multi-plane, the aspectMask member of srcSubresource and dstSubresource must match
if (region.srcSubresource.aspectMask != region.dstSubresource.aspectMask) {
vuid = is_2khr ? "VUID-VkImageCopy2KHR-aspectMask-00137" : "VUID-VkImageCopy-aspectMask-00137";
skip |= LogError(
command_buffer, vuid,
"%s: Copy between images with differing aspectMasks in pRegions[%u] with source (0x%x) destination (0x%x).",
func_name, i, region.srcSubresource.aspectMask, region.dstSubresource.aspectMask);
}
}
// For each region, the aspectMask member of srcSubresource must be present in the source image
if (!VerifyAspectsPresent(region.srcSubresource.aspectMask, src_format)) {
vuid = is_2khr ? "VUID-VkCopyImageInfo2KHR-aspectMask-00142" : "VUID-vkCmdCopyImage-aspectMask-00142";
skip |=
LogError(command_buffer, vuid,
"%s: pRegions[%u].srcSubresource.aspectMask (0x%x) cannot specify aspects not present in source image.",
func_name, i, region.srcSubresource.aspectMask);
}
// For each region, the aspectMask member of dstSubresource must be present in the destination image
if (!VerifyAspectsPresent(region.dstSubresource.aspectMask, dst_format)) {
vuid = is_2khr ? "VUID-VkCopyImageInfo2KHR-aspectMask-00143" : "VUID-vkCmdCopyImage-aspectMask-00143";
skip |= LogError(
command_buffer, vuid,
"%s: pRegions[%u].dstSubresource.aspectMask (0x%x) cannot specify aspects not present in destination image.",
func_name, i, region.dstSubresource.aspectMask);
}
// Each dimension offset + extent limits must fall with image subresource extent
VkExtent3D subresource_extent = GetImageSubresourceExtent(src_image_state, &(region.srcSubresource));
if (slice_override) src_copy_extent.depth = depth_slices;
uint32_t extent_check = ExceedsBounds(&(region.srcOffset), &src_copy_extent, &subresource_extent);
if (extent_check & kXBit) {
vuid = is_2khr ? "VUID-VkCopyImageInfo2KHR-srcOffset-00144" : "VUID-vkCmdCopyImage-srcOffset-00144";
skip |= LogError(command_buffer, vuid,
"%s: Source image pRegion[%u] x-dimension offset [%1d] + extent [%1d] exceeds subResource "
"width [%1d].",
func_name, i, region.srcOffset.x, src_copy_extent.width, subresource_extent.width);
}
if (extent_check & kYBit) {
vuid = is_2khr ? "VUID-VkCopyImageInfo2KHR-srcOffset-00145" : "VUID-vkCmdCopyImage-srcOffset-00145";
skip |= LogError(command_buffer, vuid,
"%s: Source image pRegion[%u] y-dimension offset [%1d] + extent [%1d] exceeds subResource "
"height [%1d].",
func_name, i, region.srcOffset.y, src_copy_extent.height, subresource_extent.height);
}
if (extent_check & kZBit) {
vuid = is_2khr ? "VUID-VkCopyImageInfo2KHR-srcOffset-00147" : "VUID-vkCmdCopyImage-srcOffset-00147";
skip |= LogError(command_buffer, vuid,
"%s: Source image pRegion[%u] z-dimension offset [%1d] + extent [%1d] exceeds subResource "
"depth [%1d].",
func_name, i, region.srcOffset.z, src_copy_extent.depth, subresource_extent.depth);
}
// Adjust dest extent if necessary
subresource_extent = GetImageSubresourceExtent(dst_image_state, &(region.dstSubresource));
if (slice_override) dst_copy_extent.depth = depth_slices;
extent_check = ExceedsBounds(&(region.dstOffset), &dst_copy_extent, &subresource_extent);
if (extent_check & kXBit) {
vuid = is_2khr ? "VUID-VkCopyImageInfo2KHR-dstOffset-00150" : "VUID-vkCmdCopyImage-dstOffset-00150";
skip |= LogError(command_buffer, vuid,
"%s: Dest image pRegion[%u] x-dimension offset [%1d] + extent [%1d] exceeds subResource "
"width [%1d].",
func_name, i, region.dstOffset.x, dst_copy_extent.width, subresource_extent.width);
}
if (extent_check & kYBit) {
vuid = is_2khr ? "VUID-VkCopyImageInfo2KHR-dstOffset-00151" : "VUID-vkCmdCopyImage-dstOffset-00151";
skip |= LogError(command_buffer, vuid,
"%s): Dest image pRegion[%u] y-dimension offset [%1d] + extent [%1d] exceeds subResource "
"height [%1d].",
func_name, i, region.dstOffset.y, dst_copy_extent.height, subresource_extent.height);
}
if (extent_check & kZBit) {
vuid = is_2khr ? "VUID-VkCopyImageInfo2KHR-dstOffset-00153" : "VUID-vkCmdCopyImage-dstOffset-00153";
skip |= LogError(command_buffer, vuid,
"%s: Dest image pRegion[%u] z-dimension offset [%1d] + extent [%1d] exceeds subResource "
"depth [%1d].",
func_name, i, region.dstOffset.z, dst_copy_extent.depth, subresource_extent.depth);
}
// The union of all source regions, and the union of all destination regions, specified by the elements of regions,
// must not overlap in memory
if (src_image_state->image() == dst_image_state->image()) {
for (uint32_t j = 0; j < regionCount; j++) {
if (RegionIntersects(®ion, &pRegions[j], src_image_state->createInfo.imageType,
FormatIsMultiplane(src_format))) {
vuid = is_2khr ? "VUID-VkCopyImageInfo2KHR-pRegions-00124" : "VUID-vkCmdCopyImage-pRegions-00124";
skip |= LogError(command_buffer, vuid, "%s: pRegion[%u] src overlaps with pRegions[%u].", func_name, i, j);
}
}
}
// Check depth for 2D as post Maintaince 1 requires both while prior only required one to be 2D
if (device_extensions.vk_khr_maintenance1) {
if (((VK_IMAGE_TYPE_2D == src_image_state->createInfo.imageType) &&
(VK_IMAGE_TYPE_2D == dst_image_state->createInfo.imageType)) &&
(src_copy_extent.depth != 1)) {
vuid = is_2khr ? "VUID-VkCopyImageInfo2KHR-srcImage-01790" : "VUID-vkCmdCopyImage-srcImage-01790";
skip |= LogError(command_buffer, vuid,
"%s: pRegion[%u] both srcImage and dstImage are 2D and extent.depth is %u and has to be 1",
func_name, i, src_copy_extent.depth);
}
} else {
if (((VK_IMAGE_TYPE_2D == src_image_state->createInfo.imageType) ||
(VK_IMAGE_TYPE_2D == dst_image_state->createInfo.imageType)) &&
(src_copy_extent.depth != 1)) {
vuid = is_2khr ? "VUID-VkCopyImageInfo2KHR-srcImage-01789" : "VUID-vkCmdCopyImage-srcImage-01789";
skip |= LogError(command_buffer, vuid,
"%s: pRegion[%u] either srcImage or dstImage is 2D and extent.depth is %u and has to be 1",
func_name, i, src_copy_extent.depth);
}
}
// Check if 2D with 3D and depth not equal to 2D layerCount
if ((VK_IMAGE_TYPE_2D == src_image_state->createInfo.imageType) &&
(VK_IMAGE_TYPE_3D == dst_image_state->createInfo.imageType) &&
(src_copy_extent.depth != region.srcSubresource.layerCount)) {
vuid = is_2khr ? "VUID-VkCopyImageInfo2KHR-srcImage-01791" : "VUID-vkCmdCopyImage-srcImage-01791";
skip |= LogError(command_buffer, vuid,
"%s: pRegion[%u] srcImage is 2D, dstImage is 3D and extent.depth is %u and has to be "
"srcSubresource.layerCount (%u)",
func_name, i, src_copy_extent.depth, region.srcSubresource.layerCount);
} else if ((VK_IMAGE_TYPE_3D == src_image_state->createInfo.imageType) &&
(VK_IMAGE_TYPE_2D == dst_image_state->createInfo.imageType) &&
(src_copy_extent.depth != region.dstSubresource.layerCount)) {
vuid = is_2khr ? "VUID-VkCopyImageInfo2KHR-dstImage-01792" : "VUID-vkCmdCopyImage-dstImage-01792";
skip |= LogError(command_buffer, vuid,
"%s: pRegion[%u] srcImage is 3D, dstImage is 2D and extent.depth is %u and has to be "
"dstSubresource.layerCount (%u)",
func_name, i, src_copy_extent.depth, region.dstSubresource.layerCount);
}
// Check for multi-plane format compatiblity
if (FormatIsMultiplane(src_format) || FormatIsMultiplane(dst_format)) {
size_t src_format_size = 0;
size_t dst_format_size = 0;
if (FormatIsMultiplane(src_format)) {
const VkFormat plane_format = FindMultiplaneCompatibleFormat(src_format, region.srcSubresource.aspectMask);
src_format_size = FormatElementSize(plane_format);
} else {
src_format_size = FormatElementSize(src_format);
}
if (FormatIsMultiplane(dst_format)) {
const VkFormat plane_format = FindMultiplaneCompatibleFormat(dst_format, region.dstSubresource.aspectMask);
dst_format_size = FormatElementSize(plane_format);
} else {
dst_format_size = FormatElementSize(dst_format);
}
// If size is still zero, then format is invalid and will be caught in another VU
if ((src_format_size != dst_format_size) && (src_format_size != 0) && (dst_format_size != 0)) {
vuid = is_2khr ? "VUID-VkCopyImageInfo2KHR-None-01549" : "VUID-vkCmdCopyImage-None-01549";
skip |= LogError(command_buffer, vuid,
"%s: pRegions[%u] called with non-compatible image formats. "
"The src format %s with aspectMask %s is not compatible with dst format %s aspectMask %s.",
func_name, i, string_VkFormat(src_format),
string_VkImageAspectFlags(region.srcSubresource.aspectMask).c_str(), string_VkFormat(dst_format),
string_VkImageAspectFlags(region.dstSubresource.aspectMask).c_str());
}
}
}
// The formats of non-multiplane src_image and dst_image must be compatible. Formats are considered compatible if their texel
// size in bytes is the same between both formats. For example, VK_FORMAT_R8G8B8A8_UNORM is compatible with VK_FORMAT_R32_UINT
// because because both texels are 4 bytes in size.
if (!FormatIsMultiplane(src_format) && !FormatIsMultiplane(dst_format)) {
const char *compatible_vuid =
(device_extensions.vk_khr_sampler_ycbcr_conversion)
? (is_2khr ? "VUID-VkCopyImageInfo2KHR-srcImage-01548" : "VUID-vkCmdCopyImage-srcImage-01548")
: (is_2khr ? "VUID-VkCopyImageInfo2KHR-srcImage-00135" : "VUID-vkCmdCopyImage-srcImage-00135");
// Depth/stencil formats must match exactly.
if (FormatIsDepthOrStencil(src_format) || FormatIsDepthOrStencil(dst_format)) {
if (src_format != dst_format) {
skip |= LogError(command_buffer, compatible_vuid,
"%s: Depth/stencil formats must match exactly for src (%s) and dst (%s).", func_name,
string_VkFormat(src_format), string_VkFormat(dst_format));
}
} else {
if (FormatElementSize(src_format) != FormatElementSize(dst_format)) {
skip |= LogError(command_buffer, compatible_vuid,
"%s: Unmatched image format sizes. "
"The src format %s has size of %" PRIu32 " and dst format %s has size of %" PRIu32 ".",
func_name, string_VkFormat(src_format), FormatElementSize(src_format), string_VkFormat(dst_format),
FormatElementSize(dst_format));
}
}
}
// Source and dest image sample counts must match
if (src_image_state->createInfo.samples != dst_image_state->createInfo.samples) {
std::stringstream ss;
ss << func_name << " called on image pair with non-identical sample counts.";
vuid = is_2khr ? "VUID-VkCopyImageInfo2KHR-srcImage-00136" : "VUID-vkCmdCopyImage-srcImage-00136";
skip |=
LogError(command_buffer, vuid, "%s: The src image sample count (%s) dose not match the dst image sample count (%s).",
func_name, string_VkSampleCountFlagBits(src_image_state->createInfo.samples),
string_VkSampleCountFlagBits(dst_image_state->createInfo.samples));
}
vuid = (device_extensions.vk_khr_sampler_ycbcr_conversion)
? (is_2khr ? "VUID-VkCopyImageInfo2KHR-srcImage-01546" : "VUID-vkCmdCopyImage-srcImage-01546")
: (is_2khr ? "VUID-VkCopyImageInfo2KHR-srcImage-00127" : "VUID-vkCmdCopyImage-srcImage-00127");
skip |= ValidateMemoryIsBoundToImage(src_image_state, func_name, vuid);
vuid = (device_extensions.vk_khr_sampler_ycbcr_conversion)
? (is_2khr ? "VUID-VkCopyImageInfo2KHR-dstImage-01547" : "VUID-vkCmdCopyImage-dstImage-01547")
: (is_2khr ? "VUID-VkCopyImageInfo2KHR-dstImage-00132" : "VUID-vkCmdCopyImage-dstImage-00132");
skip |= ValidateMemoryIsBoundToImage(dst_image_state, func_name, vuid);
// Validate that SRC & DST images have correct usage flags set
vuid = is_2khr ? "VUID-VkCopyImageInfo2KHR-srcImage-00126" : "VUID-vkCmdCopyImage-srcImage-00126";
skip |= ValidateImageUsageFlags(src_image_state, VK_IMAGE_USAGE_TRANSFER_SRC_BIT, true, vuid, func_name,
"VK_IMAGE_USAGE_TRANSFER_SRC_BIT");
vuid = is_2khr ? "VUID-VkCopyImageInfo2KHR-dstImage-00131" : "VUID-vkCmdCopyImage-dstImage-00131";
skip |= ValidateImageUsageFlags(dst_image_state, VK_IMAGE_USAGE_TRANSFER_DST_BIT, true, vuid, func_name,
"VK_IMAGE_USAGE_TRANSFER_DST_BIT");
vuid = is_2khr ? "VUID-vkCmdCopyImage2KHR-commandBuffer-01825" : "VUID-vkCmdCopyImage-commandBuffer-01825";
skip |= ValidateProtectedImage(cb_node, src_image_state, func_name, vuid);
vuid = is_2khr ? "VUID-vkCmdCopyImage2KHR-commandBuffer-01826" : "VUID-vkCmdCopyImage-commandBuffer-01826";
skip |= ValidateProtectedImage(cb_node, dst_image_state, func_name, vuid);
vuid = is_2khr ? "VUID-vkCmdCopyImage2KHR-commandBuffer-01827" : "VUID-vkCmdCopyImage-commandBuffer-01827";
skip |= ValidateUnprotectedImage(cb_node, dst_image_state, func_name, vuid);
// Validation for VK_EXT_fragment_density_map
if (src_image_state->createInfo.flags & VK_IMAGE_CREATE_SUBSAMPLED_BIT_EXT) {
vuid = is_2khr ? "VUID-VkCopyImageInfo2KHR-dstImage-02542" : "VUID-vkCmdCopyImage-dstImage-02542";
skip |=
LogError(command_buffer, vuid,
"%s: srcImage must not have been created with flags containing VK_IMAGE_CREATE_SUBSAMPLED_BIT_EXT", func_name);
}
if (dst_image_state->createInfo.flags & VK_IMAGE_CREATE_SUBSAMPLED_BIT_EXT) {
vuid = is_2khr ? "VUID-VkCopyImageInfo2KHR-dstImage-02542" : "VUID-vkCmdCopyImage-dstImage-02542";
skip |=
LogError(command_buffer, vuid,
"%s: dstImage must not have been created with flags containing VK_IMAGE_CREATE_SUBSAMPLED_BIT_EXT", func_name);
}
if (device_extensions.vk_khr_maintenance1) {
vuid = is_2khr ? "VUID-VkCopyImageInfo2KHR-srcImage-01995" : "VUID-vkCmdCopyImage-srcImage-01995";
skip |= ValidateImageFormatFeatureFlags(src_image_state, VK_FORMAT_FEATURE_TRANSFER_SRC_BIT, func_name, vuid);
vuid = is_2khr ? "VUID-VkCopyImageInfo2KHR-dstImage-01996" : "VUID-vkCmdCopyImage-dstImage-01996";
skip |= ValidateImageFormatFeatureFlags(dst_image_state, VK_FORMAT_FEATURE_TRANSFER_DST_BIT, func_name, vuid);
}
skip |= ValidateCmd(cb_node, cmd_type, func_name);
bool hit_error = false;
const char *invalid_src_layout_vuid =
(src_image_state->shared_presentable && device_extensions.vk_khr_shared_presentable_image)
? (is_2khr ? "VUID-VkCopyImageInfo2KHR-srcImageLayout-01917" : "VUID-vkCmdCopyImage-srcImageLayout-01917")
: (is_2khr ? "VUID-VkCopyImageInfo2KHR-srcImageLayout-00129" : "VUID-vkCmdCopyImage-srcImageLayout-00129");
const char *invalid_dst_layout_vuid =
(dst_image_state->shared_presentable && device_extensions.vk_khr_shared_presentable_image)
? (is_2khr ? "VUID-VkCopyImageInfo2KHR-dstImageLayout-01395" : "VUID-vkCmdCopyImage-dstImageLayout-01395")
: (is_2khr ? "VUID-VkCopyImageInfo2KHR-dstImageLayout-00134" : "VUID-vkCmdCopyImage-dstImageLayout-00134");
for (uint32_t i = 0; i < regionCount; ++i) {
vuid = is_2khr ? "VUID-VkCopyImageInfo2KHR-srcImageLayout-00128" : "VUID-vkCmdCopyImage-srcImageLayout-00128";
skip |= VerifyImageLayout(cb_node, src_image_state, pRegions[i].srcSubresource, srcImageLayout,
VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, func_name, invalid_src_layout_vuid, vuid, &hit_error);
vuid = is_2khr ? "VUID-VkCopyImageInfo2KHR-dstImageLayout-00133" : "VUID-vkCmdCopyImage-dstImageLayout-00133";
skip |= VerifyImageLayout(cb_node, dst_image_state, pRegions[i].dstSubresource, dstImageLayout,
VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, func_name, invalid_dst_layout_vuid, vuid, &hit_error);
skip |= ValidateCopyImageTransferGranularityRequirements(cb_node, src_image_state, dst_image_state, &pRegions[i], i,
func_name, version);
}
return skip;
}
bool CoreChecks::PreCallValidateCmdCopyImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout,
VkImage dstImage, VkImageLayout dstImageLayout, uint32_t regionCount,
const VkImageCopy *pRegions) const {
return ValidateCmdCopyImage(commandBuffer, srcImage, srcImageLayout, dstImage, dstImageLayout, regionCount, pRegions,
COPY_COMMAND_VERSION_1);
}
bool CoreChecks::PreCallValidateCmdCopyImage2KHR(VkCommandBuffer commandBuffer, const VkCopyImageInfo2KHR *pCopyImageInfo) const {
return ValidateCmdCopyImage(commandBuffer, pCopyImageInfo->srcImage, pCopyImageInfo->srcImageLayout, pCopyImageInfo->dstImage,
pCopyImageInfo->dstImageLayout, pCopyImageInfo->regionCount, pCopyImageInfo->pRegions,
COPY_COMMAND_VERSION_2);
}
void CoreChecks::PreCallRecordCmdCopyImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout,
VkImage dstImage, VkImageLayout dstImageLayout, uint32_t regionCount,
const VkImageCopy *pRegions) {
StateTracker::PreCallRecordCmdCopyImage(commandBuffer, srcImage, srcImageLayout, dstImage, dstImageLayout, regionCount,
pRegions);
auto cb_node = GetCBState(commandBuffer);
auto src_image_state = GetImageState(srcImage);
auto dst_image_state = GetImageState(dstImage);
// Make sure that all image slices are updated to correct layout
for (uint32_t i = 0; i < regionCount; ++i) {
SetImageInitialLayout(cb_node, *src_image_state, pRegions[i].srcSubresource, srcImageLayout);
SetImageInitialLayout(cb_node, *dst_image_state, pRegions[i].dstSubresource, dstImageLayout);
}
}
void CoreChecks::PreCallRecordCmdCopyImage2KHR(VkCommandBuffer commandBuffer, const VkCopyImageInfo2KHR *pCopyImageInfo) {
StateTracker::PreCallRecordCmdCopyImage2KHR(commandBuffer, pCopyImageInfo);
auto cb_node = GetCBState(commandBuffer);
auto src_image_state = GetImageState(pCopyImageInfo->srcImage);
auto dst_image_state = GetImageState(pCopyImageInfo->dstImage);
// Make sure that all image slices are updated to correct layout
for (uint32_t i = 0; i < pCopyImageInfo->regionCount; ++i) {
SetImageInitialLayout(cb_node, *src_image_state, pCopyImageInfo->pRegions[i].srcSubresource,
pCopyImageInfo->srcImageLayout);
SetImageInitialLayout(cb_node, *dst_image_state, pCopyImageInfo->pRegions[i].dstSubresource,
pCopyImageInfo->dstImageLayout);
}
}
// Returns true if sub_rect is entirely contained within rect
static inline bool ContainsRect(VkRect2D rect, VkRect2D sub_rect) {
if ((sub_rect.offset.x < rect.offset.x) || (sub_rect.offset.x + sub_rect.extent.width > rect.offset.x + rect.extent.width) ||
(sub_rect.offset.y < rect.offset.y) || (sub_rect.offset.y + sub_rect.extent.height > rect.offset.y + rect.extent.height)) {
return false;
}
return true;
}
bool CoreChecks::ValidateClearAttachmentExtent(VkCommandBuffer command_buffer, uint32_t attachment_index,
const FRAMEBUFFER_STATE *framebuffer, uint32_t fb_attachment,
const VkRect2D &render_area, uint32_t rect_count, const VkClearRect *clear_rects,
const CMD_BUFFER_STATE *primary_cb_state) const {
bool skip = false;
const IMAGE_VIEW_STATE *image_view_state = nullptr;
if (framebuffer && (fb_attachment != VK_ATTACHMENT_UNUSED) && (fb_attachment < framebuffer->createInfo.attachmentCount)) {
if (primary_cb_state) {
image_view_state = primary_cb_state->GetActiveAttachmentImageViewState(fb_attachment);
} else {
image_view_state = GetCBState(command_buffer)->GetActiveAttachmentImageViewState(fb_attachment);
}
}
for (uint32_t j = 0; j < rect_count; j++) {
if (!ContainsRect(render_area, clear_rects[j].rect)) {
skip |= LogError(command_buffer, "VUID-vkCmdClearAttachments-pRects-00016",
"vkCmdClearAttachments(): The area defined by pRects[%d] is not contained in the area of "
"the current render pass instance.",
j);
}
if (image_view_state) {
// The layers specified by a given element of pRects must be contained within every attachment that
// pAttachments refers to
const auto attachment_layer_count = image_view_state->create_info.subresourceRange.layerCount;
if ((clear_rects[j].baseArrayLayer >= attachment_layer_count) ||
(clear_rects[j].baseArrayLayer + clear_rects[j].layerCount > attachment_layer_count)) {
skip |= LogError(command_buffer, "VUID-vkCmdClearAttachments-pRects-00017",
"vkCmdClearAttachments(): The layers defined in pRects[%d] are not contained in the layers "
"of pAttachment[%d].",
j, attachment_index);
}
}
}
return skip;
}
bool CoreChecks::PreCallValidateCmdClearAttachments(VkCommandBuffer commandBuffer, uint32_t attachmentCount,
const VkClearAttachment *pAttachments, uint32_t rectCount,
const VkClearRect *pRects) const {
bool skip = false;
const CMD_BUFFER_STATE *cb_node = GetCBState(commandBuffer); // TODO: Should be const, and never modified during validation
if (!cb_node) return skip;
skip |= ValidateCmd(cb_node, CMD_CLEARATTACHMENTS, "vkCmdClearAttachments()");
// Validate that attachment is in reference list of active subpass
if (cb_node->activeRenderPass) {
const VkRenderPassCreateInfo2 *renderpass_create_info = cb_node->activeRenderPass->createInfo.ptr();
const uint32_t renderpass_attachment_count = renderpass_create_info->attachmentCount;
const VkSubpassDescription2 *subpass_desc = &renderpass_create_info->pSubpasses[cb_node->activeSubpass];
const auto *framebuffer = cb_node->activeFramebuffer.get();
const auto &render_area = cb_node->activeRenderPassBeginInfo.renderArea;
for (uint32_t attachment_index = 0; attachment_index < attachmentCount; attachment_index++) {
auto clear_desc = &pAttachments[attachment_index];
uint32_t fb_attachment = VK_ATTACHMENT_UNUSED;
const VkImageAspectFlags aspect_mask = clear_desc->aspectMask;
if (aspect_mask & VK_IMAGE_ASPECT_METADATA_BIT) {
skip |= LogError(commandBuffer, "VUID-VkClearAttachment-aspectMask-00020",
"vkCmdClearAttachments() pAttachments[%u] mask contains VK_IMAGE_ASPECT_METADATA_BIT",
attachment_index);
} else if (aspect_mask & (VK_IMAGE_ASPECT_MEMORY_PLANE_0_BIT_EXT | VK_IMAGE_ASPECT_MEMORY_PLANE_1_BIT_EXT |
VK_IMAGE_ASPECT_MEMORY_PLANE_2_BIT_EXT | VK_IMAGE_ASPECT_MEMORY_PLANE_3_BIT_EXT)) {
skip |=
LogError(commandBuffer, "VUID-VkClearAttachment-aspectMask-02246",
"vkCmdClearAttachments() pAttachments[%u] mask contains a VK_IMAGE_ASPECT_MEMORY_PLANE_*_BIT_EXT bit",
attachment_index);
} else if (aspect_mask & VK_IMAGE_ASPECT_COLOR_BIT) {
uint32_t color_attachment = VK_ATTACHMENT_UNUSED;
if (clear_desc->colorAttachment < subpass_desc->colorAttachmentCount) {
color_attachment = subpass_desc->pColorAttachments[clear_desc->colorAttachment].attachment;
if ((color_attachment != VK_ATTACHMENT_UNUSED) && (color_attachment >= renderpass_attachment_count)) {
skip |= LogError(
commandBuffer, "VUID-vkCmdClearAttachments-aspectMask-02501",
"vkCmdClearAttachments() pAttachments[%u].colorAttachment=%u is not VK_ATTACHMENT_UNUSED "
"and not a valid attachment for %s attachmentCount=%u. Subpass %u pColorAttachment[%u]=%u.",
attachment_index, clear_desc->colorAttachment,
report_data->FormatHandle(cb_node->activeRenderPass->renderPass()).c_str(), cb_node->activeSubpass,
clear_desc->colorAttachment, color_attachment, renderpass_attachment_count);
color_attachment = VK_ATTACHMENT_UNUSED; // Defensive, prevent lookup past end of renderpass attachment
}
} else {
skip |= LogError(commandBuffer, "VUID-vkCmdClearAttachments-aspectMask-02501",
"vkCmdClearAttachments() pAttachments[%u].colorAttachment=%u out of range for %s"
" subpass %u. colorAttachmentCount=%u",
attachment_index, clear_desc->colorAttachment,
report_data->FormatHandle(cb_node->activeRenderPass->renderPass()).c_str(),
cb_node->activeSubpass, subpass_desc->colorAttachmentCount);
}
fb_attachment = color_attachment;
if ((clear_desc->aspectMask & VK_IMAGE_ASPECT_DEPTH_BIT) ||
(clear_desc->aspectMask & VK_IMAGE_ASPECT_STENCIL_BIT)) {
skip |= LogError(commandBuffer, "VUID-VkClearAttachment-aspectMask-00019",
"vkCmdClearAttachments() pAttachments[%u] aspectMask must set only VK_IMAGE_ASPECT_COLOR_BIT "
"of a color attachment.",
attachment_index);
}
} else { // Must be depth and/or stencil
bool subpass_depth = false;
bool subpass_stencil = false;
if (subpass_desc->pDepthStencilAttachment &&
(subpass_desc->pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED)) {
auto index = subpass_desc->pDepthStencilAttachment->attachment;
subpass_depth = FormatHasDepth(renderpass_create_info->pAttachments[index].format);
subpass_stencil = FormatHasStencil(renderpass_create_info->pAttachments[index].format);
}
if (!subpass_desc->pDepthStencilAttachment ||
(subpass_desc->pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED)) {
if ((clear_desc->aspectMask & VK_IMAGE_ASPECT_DEPTH_BIT) && !subpass_depth) {
skip |= LogError(
commandBuffer, "VUID-vkCmdClearAttachments-aspectMask-02502",
"vkCmdClearAttachments() pAttachments[%u] aspectMask has VK_IMAGE_ASPECT_DEPTH_BIT but there is no "
"depth attachment in subpass",
attachment_index);
}
if ((clear_desc->aspectMask & VK_IMAGE_ASPECT_STENCIL_BIT) && !subpass_stencil) {
skip |= LogError(
commandBuffer, "VUID-vkCmdClearAttachments-aspectMask-02503",
"vkCmdClearAttachments() pAttachments[%u] aspectMask has VK_IMAGE_ASPECT_STENCIL_BIT but there is no "
"stencil attachment in subpass",
attachment_index);
}
} else {
fb_attachment = subpass_desc->pDepthStencilAttachment->attachment;
}
if (subpass_depth) {
skip |= ValidateClearDepthStencilValue(commandBuffer, clear_desc->clearValue.depthStencil,
"vkCmdClearAttachments()");
}
}
if (cb_node->createInfo.level == VK_COMMAND_BUFFER_LEVEL_PRIMARY) {
skip |= ValidateClearAttachmentExtent(commandBuffer, attachment_index, framebuffer, fb_attachment, render_area,
rectCount, pRects);
}
// Once the framebuffer attachment is found, can get the image view state
if (framebuffer && (fb_attachment != VK_ATTACHMENT_UNUSED) &&
(fb_attachment < framebuffer->createInfo.attachmentCount)) {
const IMAGE_VIEW_STATE *image_view_state =
GetCBState(commandBuffer)->GetActiveAttachmentImageViewState(fb_attachment);
if (image_view_state != nullptr) {
skip |= ValidateProtectedImage(cb_node, image_view_state->image_state.get(), "vkCmdClearAttachments()",
"VUID-vkCmdClearAttachments-commandBuffer-02504");
skip |= ValidateUnprotectedImage(cb_node, image_view_state->image_state.get(), "vkCmdClearAttachments()",
"VUID-vkCmdClearAttachments-commandBuffer-02505");
}
}
}
}
return skip;
}
void CoreChecks::PreCallRecordCmdClearAttachments(VkCommandBuffer commandBuffer, uint32_t attachmentCount,
const VkClearAttachment *pAttachments, uint32_t rectCount,
const VkClearRect *pRects) {
auto *cb_node = GetCBState(commandBuffer);
if (cb_node->activeRenderPass && (cb_node->createInfo.level == VK_COMMAND_BUFFER_LEVEL_SECONDARY)) {
const VkRenderPassCreateInfo2 *renderpass_create_info = cb_node->activeRenderPass->createInfo.ptr();
const VkSubpassDescription2 *subpass_desc = &renderpass_create_info->pSubpasses[cb_node->activeSubpass];
std::shared_ptr<std::vector<VkClearRect>> clear_rect_copy;
for (uint32_t attachment_index = 0; attachment_index < attachmentCount; attachment_index++) {
const auto clear_desc = &pAttachments[attachment_index];
uint32_t fb_attachment = VK_ATTACHMENT_UNUSED;
if ((clear_desc->aspectMask & VK_IMAGE_ASPECT_COLOR_BIT) &&
(clear_desc->colorAttachment < subpass_desc->colorAttachmentCount)) {
fb_attachment = subpass_desc->pColorAttachments[clear_desc->colorAttachment].attachment;
} else if ((clear_desc->aspectMask & (VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT)) &&
subpass_desc->pDepthStencilAttachment) {
fb_attachment = subpass_desc->pDepthStencilAttachment->attachment;
}
if (fb_attachment != VK_ATTACHMENT_UNUSED) {
if (!clear_rect_copy) {
// We need a copy of the clear rectangles that will persist until the last lambda executes
// but we want to create it as lazily as possible
clear_rect_copy.reset(new std::vector<VkClearRect>(pRects, pRects + rectCount));
}
// if a secondary level command buffer inherits the framebuffer from the primary command buffer
// (see VkCommandBufferInheritanceInfo), this validation must be deferred until queue submit time
auto val_fn = [this, commandBuffer, attachment_index, fb_attachment, rectCount, clear_rect_copy](
const CMD_BUFFER_STATE *prim_cb, const FRAMEBUFFER_STATE *fb) {
assert(rectCount == clear_rect_copy->size());
const auto &render_area = prim_cb->activeRenderPassBeginInfo.renderArea;
bool skip = false;
skip = ValidateClearAttachmentExtent(commandBuffer, attachment_index, fb, fb_attachment, render_area, rectCount,
clear_rect_copy->data(), prim_cb);
return skip;
};
cb_node->cmd_execute_commands_functions.emplace_back(val_fn);
}
}
}
}
template <typename RegionType>
bool CoreChecks::ValidateCmdResolveImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout,
VkImage dstImage, VkImageLayout dstImageLayout, uint32_t regionCount,
const RegionType *pRegions, CopyCommandVersion version) const {
const auto *cb_node = GetCBState(commandBuffer);
const auto *src_image_state = GetImageState(srcImage);
const auto *dst_image_state = GetImageState(dstImage);
const bool is_2khr = (version == COPY_COMMAND_VERSION_2);
const char *func_name = is_2khr ? "vkCmdResolveImage2KHR()" : "vkCmdResolveImage()";
const CMD_TYPE cmd_type = is_2khr ? CMD_RESOLVEIMAGE : CMD_RESOLVEIMAGE2KHR;
const char *vuid;
bool skip = false;
if (cb_node && src_image_state && dst_image_state) {
vuid = is_2khr ? "VUID-VkResolveImageInfo2KHR-srcImage-00256" : "VUID-vkCmdResolveImage-srcImage-00256";
skip |= ValidateMemoryIsBoundToImage(src_image_state, func_name, vuid);
vuid = is_2khr ? "VUID-VkResolveImageInfo2KHR-dstImage-00258" : "VUID-vkCmdResolveImage-dstImage-00258";
skip |= ValidateMemoryIsBoundToImage(dst_image_state, func_name, vuid);
skip |= ValidateCmd(cb_node, cmd_type, func_name);
vuid = is_2khr ? "VUID-VkResolveImageInfo2KHR-dstImage-02003" : "VUID-vkCmdResolveImage-dstImage-02003";
skip |= ValidateImageFormatFeatureFlags(dst_image_state, VK_FORMAT_FEATURE_COLOR_ATTACHMENT_BIT, func_name, vuid);
vuid = is_2khr ? "VUID-vkCmdResolveImage2KHR-commandBuffer-01837" : "VUID-vkCmdResolveImage-commandBuffer-01837";
skip |= ValidateProtectedImage(cb_node, src_image_state, func_name, vuid);
vuid = is_2khr ? "VUID-vkCmdResolveImage2KHR-commandBuffer-01838" : "VUID-vkCmdResolveImage-commandBuffer-01838";
skip |= ValidateProtectedImage(cb_node, dst_image_state, func_name, vuid);
vuid = is_2khr ? "VUID-vkCmdResolveImage2KHR-commandBuffer-01839" : "VUID-vkCmdResolveImage-commandBuffer-01839";
skip |= ValidateUnprotectedImage(cb_node, dst_image_state, func_name, vuid);
// Validation for VK_EXT_fragment_density_map
if (src_image_state->createInfo.flags & VK_IMAGE_CREATE_SUBSAMPLED_BIT_EXT) {
vuid = is_2khr ? "VUID-VkResolveImageInfo2KHR-dstImage-02546" : "VUID-vkCmdResolveImage-dstImage-02546";
skip |= LogError(cb_node->commandBuffer(), vuid,
"%s: srcImage must not have been created with flags containing "
"VK_IMAGE_CREATE_SUBSAMPLED_BIT_EXT",
func_name);
}
if (dst_image_state->createInfo.flags & VK_IMAGE_CREATE_SUBSAMPLED_BIT_EXT) {
vuid = is_2khr ? "VUID-VkResolveImageInfo2KHR-dstImage-02546" : "VUID-vkCmdResolveImage-dstImage-02546";
skip |= LogError(cb_node->commandBuffer(), vuid,
"%s: dstImage must not have been created with flags containing "
"VK_IMAGE_CREATE_SUBSAMPLED_BIT_EXT",
func_name);
}
bool hit_error = false;
const char *invalid_src_layout_vuid =
is_2khr ? ((src_image_state->shared_presentable && device_extensions.vk_khr_shared_presentable_image)
? "VUID-VkResolveImageInfo2KHR-srcImageLayout-01400"
: "VUID-VkResolveImageInfo2KHR-srcImageLayout-00261")
: ((src_image_state->shared_presentable && device_extensions.vk_khr_shared_presentable_image)
? "VUID-vkCmdResolveImage-srcImageLayout-01400"
: "VUID-vkCmdResolveImage-srcImageLayout-00261");
const char *invalid_dst_layout_vuid =
is_2khr ? ((dst_image_state->shared_presentable && device_extensions.vk_khr_shared_presentable_image)
? "VUID-VkResolveImageInfo2KHR-dstImageLayout-01401"
: "VUID-VkResolveImageInfo2KHR-dstImageLayout-00263")
: ((dst_image_state->shared_presentable && device_extensions.vk_khr_shared_presentable_image)
? "VUID-vkCmdResolveImage-dstImageLayout-01401"
: "VUID-vkCmdResolveImage-dstImageLayout-00263");
// For each region, the number of layers in the image subresource should not be zero
// For each region, src and dest image aspect must be color only
for (uint32_t i = 0; i < regionCount; i++) {
const RegionType region = pRegions[i];
const VkImageSubresourceLayers src_subresource = region.srcSubresource;
const VkImageSubresourceLayers dst_subresource = region.dstSubresource;
skip |= ValidateImageSubresourceLayers(cb_node, &src_subresource, func_name, "srcSubresource", i);
skip |= ValidateImageSubresourceLayers(cb_node, &dst_subresource, func_name, "dstSubresource", i);
vuid = is_2khr ? "VUID-VkResolveImageInfo2KHR-srcImageLayout-00260" : "VUID-vkCmdResolveImage-srcImageLayout-00260";
skip |= VerifyImageLayout(cb_node, src_image_state, src_subresource, srcImageLayout,
VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, func_name, invalid_src_layout_vuid, vuid, &hit_error);
vuid = is_2khr ? "VUID-VkResolveImageInfo2KHR-dstImageLayout-00262" : "VUID-vkCmdResolveImage-dstImageLayout-00262";
skip |= VerifyImageLayout(cb_node, dst_image_state, dst_subresource, dstImageLayout,
VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, func_name, invalid_dst_layout_vuid, vuid, &hit_error);
vuid = is_2khr ? "VUID-VkResolveImageInfo2KHR-srcSubresource-01709" : "VUID-vkCmdResolveImage-srcSubresource-01709";
skip |= ValidateImageMipLevel(cb_node, src_image_state, src_subresource.mipLevel, i, func_name, "srcSubresource", vuid);
vuid = is_2khr ? "VUID-VkResolveImageInfo2KHR-dstSubresource-01710" : "VUID-vkCmdResolveImage-dstSubresource-01710";
skip |= ValidateImageMipLevel(cb_node, dst_image_state, dst_subresource.mipLevel, i, func_name, "dstSubresource", vuid);
vuid = is_2khr ? "VUID-VkResolveImageInfo2KHR-srcSubresource-01711" : "VUID-vkCmdResolveImage-srcSubresource-01711";
skip |= ValidateImageArrayLayerRange(cb_node, src_image_state, src_subresource.baseArrayLayer,
src_subresource.layerCount, i, func_name, "srcSubresource", vuid);
vuid = is_2khr ? "VUID-VkResolveImageInfo2KHR-dstSubresource-01712" : "VUID-vkCmdResolveImage-dstSubresource-01712";
skip |= ValidateImageArrayLayerRange(cb_node, dst_image_state, dst_subresource.baseArrayLayer,
dst_subresource.layerCount, i, func_name, "srcSubresource", vuid);
// layer counts must match
if (src_subresource.layerCount != dst_subresource.layerCount) {
vuid = is_2khr ? "VUID-VkImageResolve2KHR-layerCount-00267" : "VUID-VkImageResolve-layerCount-00267";
skip |=
LogError(cb_node->commandBuffer(), vuid,
"%s: layerCount in source and destination subresource of pRegions[%u] does not match.", func_name, i);
}
// For each region, src and dest image aspect must be color only
if ((src_subresource.aspectMask != VK_IMAGE_ASPECT_COLOR_BIT) ||
(dst_subresource.aspectMask != VK_IMAGE_ASPECT_COLOR_BIT)) {
vuid = is_2khr ? "VUID-VkImageResolve2KHR-aspectMask-00266" : "VUID-VkImageResolve-aspectMask-00266";
skip |= LogError(cb_node->commandBuffer(), vuid,
"%s: src and dest aspectMasks for pRegions[%u] must specify only VK_IMAGE_ASPECT_COLOR_BIT.",
func_name, i);
}
const VkImageType src_image_type = src_image_state->createInfo.imageType;
const VkImageType dst_image_type = dst_image_state->createInfo.imageType;
if ((VK_IMAGE_TYPE_3D == src_image_type) || (VK_IMAGE_TYPE_3D == dst_image_type)) {
if ((0 != src_subresource.baseArrayLayer) || (1 != src_subresource.layerCount)) {
LogObjectList objlist(cb_node->commandBuffer());
objlist.add(src_image_state->image());
objlist.add(dst_image_state->image());
vuid = is_2khr ? "VUID-VkResolveImageInfo2KHR-srcImage-04446" : "VUID-vkCmdResolveImage-srcImage-04446";
skip |= LogError(objlist, vuid,
"%s: pRegions[%u] baseArrayLayer must be 0 and layerCount must be 1 for all "
"subresources if the src or dst image is 3D.",
func_name, i);
}
if ((0 != dst_subresource.baseArrayLayer) || (1 != dst_subresource.layerCount)) {
LogObjectList objlist(cb_node->commandBuffer());
objlist.add(src_image_state->image());
objlist.add(dst_image_state->image());
vuid = is_2khr ? "VUID-VkResolveImageInfo2KHR-srcImage-04447" : "VUID-vkCmdResolveImage-srcImage-04447";
skip |= LogError(objlist, vuid,
"%s: pRegions[%u] baseArrayLayer must be 0 and layerCount must be 1 for all "
"subresources if the src or dst image is 3D.",
func_name, i);
}
}
if (VK_IMAGE_TYPE_1D == src_image_type) {
if ((pRegions[i].srcOffset.y != 0) || (pRegions[i].extent.height != 1)) {
LogObjectList objlist(cb_node->commandBuffer());
objlist.add(src_image_state->image());
vuid = is_2khr ? "VUID-VkResolveImageInfo2KHR-srcImage-00271" : "VUID-vkCmdResolveImage-srcImage-00271";
skip |= LogError(objlist, vuid,
"%s: srcImage (%s) is 1D but pRegions[%u] srcOffset.y (%d) is not 0 or "
"extent.height (%u) is not 1.",
func_name, report_data->FormatHandle(src_image_state->image()).c_str(), i,
pRegions[i].srcOffset.y, pRegions[i].extent.height);
}
}
if ((VK_IMAGE_TYPE_1D == src_image_type) || (VK_IMAGE_TYPE_2D == src_image_type)) {
if ((pRegions[i].srcOffset.z != 0) || (pRegions[i].extent.depth != 1)) {
LogObjectList objlist(cb_node->commandBuffer());
objlist.add(src_image_state->image());
vuid = is_2khr ? "VUID-VkResolveImageInfo2KHR-srcImage-00273" : "VUID-vkCmdResolveImage-srcImage-00273";
skip |= LogError(objlist, vuid,
"%s: srcImage (%s) is 2D but pRegions[%u] srcOffset.z (%d) is not 0 or "
"extent.depth (%u) is not 1.",
func_name, report_data->FormatHandle(src_image_state->image()).c_str(), i,
pRegions[i].srcOffset.z, pRegions[i].extent.depth);
}
}
if (VK_IMAGE_TYPE_1D == dst_image_type) {
if ((pRegions[i].dstOffset.y != 0) || (pRegions[i].extent.height != 1)) {
LogObjectList objlist(cb_node->commandBuffer());
objlist.add(dst_image_state->image());
vuid = is_2khr ? "VUID-VkResolveImageInfo2KHR-dstImage-00276" : "VUID-vkCmdResolveImage-dstImage-00276";
skip |= LogError(objlist, vuid,
"%s: dstImage (%s) is 1D but pRegions[%u] dstOffset.y (%d) is not 0 or "
"extent.height (%u) is not 1.",
func_name, report_data->FormatHandle(dst_image_state->image()).c_str(), i,
pRegions[i].dstOffset.y, pRegions[i].extent.height);
}
}
if ((VK_IMAGE_TYPE_1D == dst_image_type) || (VK_IMAGE_TYPE_2D == dst_image_type)) {
if ((pRegions[i].dstOffset.z != 0) || (pRegions[i].extent.depth != 1)) {
LogObjectList objlist(cb_node->commandBuffer());
objlist.add(dst_image_state->image());
vuid = is_2khr ? "VUID-VkResolveImageInfo2KHR-dstImage-00278" : "VUID-vkCmdResolveImage-dstImage-00278";
skip |= LogError(objlist, vuid,
"%s: dstImage (%s) is 2D but pRegions[%u] dstOffset.z (%d) is not 0 or "
"extent.depth (%u) is not 1.",
func_name, report_data->FormatHandle(dst_image_state->image()).c_str(), i,
pRegions[i].dstOffset.z, pRegions[i].extent.depth);
}
}
// Each srcImage dimension offset + extent limits must fall with image subresource extent
VkExtent3D subresource_extent = GetImageSubresourceExtent(src_image_state, &src_subresource);
// MipLevel bound is checked already and adding extra errors with a "subresource extent of zero" is confusing to
// developer
if (src_subresource.mipLevel < src_image_state->createInfo.mipLevels) {
uint32_t extent_check = ExceedsBounds(&(region.srcOffset), &(region.extent), &subresource_extent);
if ((extent_check & kXBit) != 0) {
LogObjectList objlist(cb_node->commandBuffer());
objlist.add(src_image_state->image());
vuid = is_2khr ? "VUID-VkResolveImageInfo2KHR-srcOffset-00269" : "VUID-vkCmdResolveImage-srcOffset-00269";
skip |= LogError(objlist, vuid,
"%s: srcImage (%s) pRegions[%u] x-dimension offset [%1d] + extent [%u] "
"exceeds subResource width [%u].",
func_name, report_data->FormatHandle(src_image_state->image()).c_str(), i, region.srcOffset.x,
region.extent.width, subresource_extent.width);
}
if ((extent_check & kYBit) != 0) {
LogObjectList objlist(cb_node->commandBuffer());
objlist.add(src_image_state->image());
vuid = is_2khr ? "VUID-VkResolveImageInfo2KHR-srcOffset-00270" : "VUID-vkCmdResolveImage-srcOffset-00270";
skip |= LogError(objlist, vuid,
"%s: srcImage (%s) pRegions[%u] y-dimension offset [%1d] + extent [%u] "
"exceeds subResource height [%u].",
func_name, report_data->FormatHandle(src_image_state->image()).c_str(), i, region.srcOffset.y,
region.extent.height, subresource_extent.height);
}
if ((extent_check & kZBit) != 0) {
LogObjectList objlist(cb_node->commandBuffer());
objlist.add(src_image_state->image());
vuid = is_2khr ? "VUID-VkResolveImageInfo2KHR-srcOffset-00272" : "VUID-vkCmdResolveImage-srcOffset-00272";
skip |= LogError(objlist, vuid,
"%s: srcImage (%s) pRegions[%u] z-dimension offset [%1d] + extent [%u] "
"exceeds subResource depth [%u].",
func_name, report_data->FormatHandle(src_image_state->image()).c_str(), i, region.srcOffset.z,
region.extent.depth, subresource_extent.depth);
}
}
// Each dstImage dimension offset + extent limits must fall with image subresource extent
subresource_extent = GetImageSubresourceExtent(dst_image_state, &dst_subresource);
// MipLevel bound is checked already and adding extra errors with a "subresource extent of zero" is confusing to
// developer
if (dst_subresource.mipLevel < dst_image_state->createInfo.mipLevels) {
uint32_t extent_check = ExceedsBounds(&(region.dstOffset), &(region.extent), &subresource_extent);
if ((extent_check & kXBit) != 0) {
LogObjectList objlist(cb_node->commandBuffer());
objlist.add(dst_image_state->image());
vuid = is_2khr ? "VUID-VkResolveImageInfo2KHR-dstOffset-00274" : "VUID-vkCmdResolveImage-dstOffset-00274";
skip |= LogError(objlist, vuid,
"%s: dstImage (%s) pRegions[%u] x-dimension offset [%1d] + extent [%u] "
"exceeds subResource width [%u].",
func_name, report_data->FormatHandle(dst_image_state->image()).c_str(), i, region.srcOffset.x,
region.extent.width, subresource_extent.width);
}
if ((extent_check & kYBit) != 0) {
LogObjectList objlist(cb_node->commandBuffer());
objlist.add(dst_image_state->image());
vuid = is_2khr ? "VUID-VkResolveImageInfo2KHR-dstOffset-00275" : "VUID-vkCmdResolveImage-dstOffset-00275";
skip |= LogError(objlist, vuid,
"%s: dstImage (%s) pRegions[%u] y-dimension offset [%1d] + extent [%u] "
"exceeds subResource height [%u].",
func_name, report_data->FormatHandle(dst_image_state->image()).c_str(), i, region.srcOffset.y,
region.extent.height, subresource_extent.height);
}
if ((extent_check & kZBit) != 0) {
LogObjectList objlist(cb_node->commandBuffer());
objlist.add(dst_image_state->image());
vuid = is_2khr ? "VUID-VkResolveImageInfo2KHR-dstOffset-00277" : "VUID-vkCmdResolveImage-dstOffset-00277";
skip |= LogError(objlist, vuid,
"%s: dstImage (%s) pRegions[%u] z-dimension offset [%1d] + extent [%u] "
"exceeds subResource depth [%u].",
func_name, report_data->FormatHandle(dst_image_state->image()).c_str(), i, region.srcOffset.z,
region.extent.depth, subresource_extent.depth);
}
}
}
if (src_image_state->createInfo.format != dst_image_state->createInfo.format) {
vuid = is_2khr ? "VUID-VkResolveImageInfo2KHR-srcImage-01386" : "VUID-vkCmdResolveImage-srcImage-01386";
skip |= LogError(cb_node->commandBuffer(), vuid, "%s: srcImage format (%s) and dstImage format (%s) are not the same.",
func_name, string_VkFormat(src_image_state->createInfo.format),
string_VkFormat(dst_image_state->createInfo.format));
}
if (src_image_state->createInfo.imageType != dst_image_state->createInfo.imageType) {
skip |= LogWarning(cb_node->commandBuffer(), kVUID_Core_DrawState_MismatchedImageType,
"%s: srcImage type (%s) and dstImage type (%s) are not the same.", func_name,
string_VkImageType(src_image_state->createInfo.imageType),
string_VkImageType(dst_image_state->createInfo.imageType));
}
if (src_image_state->createInfo.samples == VK_SAMPLE_COUNT_1_BIT) {
vuid = is_2khr ? "VUID-VkResolveImageInfo2KHR-srcImage-00257" : "VUID-vkCmdResolveImage-srcImage-00257";
skip |= LogError(cb_node->commandBuffer(), vuid, "%s: srcImage sample count is VK_SAMPLE_COUNT_1_BIT.", func_name);
}
if (dst_image_state->createInfo.samples != VK_SAMPLE_COUNT_1_BIT) {
vuid = is_2khr ? "VUID-VkResolveImageInfo2KHR-dstImage-00259" : "VUID-vkCmdResolveImage-dstImage-00259";
skip |= LogError(cb_node->commandBuffer(), vuid, "%s: dstImage sample count (%s) is not VK_SAMPLE_COUNT_1_BIT.",
func_name, string_VkSampleCountFlagBits(dst_image_state->createInfo.samples));
}
} else {
assert(0);
}
return skip;
}
bool CoreChecks::PreCallValidateCmdResolveImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout,
VkImage dstImage, VkImageLayout dstImageLayout, uint32_t regionCount,
const VkImageResolve *pRegions) const {
return ValidateCmdResolveImage(commandBuffer, srcImage, srcImageLayout, dstImage, dstImageLayout, regionCount, pRegions,
COPY_COMMAND_VERSION_1);
}
bool CoreChecks::PreCallValidateCmdResolveImage2KHR(VkCommandBuffer commandBuffer,
const VkResolveImageInfo2KHR *pResolveImageInfo) const {
return ValidateCmdResolveImage(commandBuffer, pResolveImageInfo->srcImage, pResolveImageInfo->srcImageLayout,
pResolveImageInfo->dstImage, pResolveImageInfo->dstImageLayout, pResolveImageInfo->regionCount,
pResolveImageInfo->pRegions, COPY_COMMAND_VERSION_2);
}
template <typename RegionType>
bool CoreChecks::ValidateCmdBlitImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout,
VkImage dstImage, VkImageLayout dstImageLayout, uint32_t regionCount,
const RegionType *pRegions, VkFilter filter, CopyCommandVersion version) const {
const auto *cb_node = GetCBState(commandBuffer);
const auto *src_image_state = GetImageState(srcImage);
const auto *dst_image_state = GetImageState(dstImage);
const bool is_2khr = (version == COPY_COMMAND_VERSION_2);
const char *func_name = is_2khr ? "vkCmdBlitImage2KHR()" : "vkCmdBlitImage()";
const CMD_TYPE cmd_type = is_2khr ? CMD_BLITIMAGE : CMD_BLITIMAGE2KHR;
bool skip = false;
if (cb_node) {
skip |= ValidateCmd(cb_node, cmd_type, func_name);
}
if (cb_node && src_image_state && dst_image_state) {
const char *vuid;
const char *location;
vuid = is_2khr ? "VUID-VkBlitImageInfo2KHR-srcImage-00233" : "VUID-vkCmdBlitImage-srcImage-00233";
location = is_2khr ? "vkCmdBlitImage2KHR(): pBlitImageInfo->srcImage" : "vkCmdBlitImage(): srcImage";
skip |= ValidateImageSampleCount(src_image_state, VK_SAMPLE_COUNT_1_BIT, location, vuid);
vuid = is_2khr ? "VUID-VkBlitImageInfo2KHR-dstImage-00234" : "VUID-vkCmdBlitImage-dstImage-00234";
location = is_2khr ? "vkCmdBlitImage2KHR(): pBlitImageInfo->dstImage" : "vkCmdBlitImage(): dstImage";
skip |= ValidateImageSampleCount(dst_image_state, VK_SAMPLE_COUNT_1_BIT, location, vuid);
vuid = is_2khr ? "VUID-VkBlitImageInfo2KHR-srcImage-00220" : "VUID-vkCmdBlitImage-srcImage-00220";
skip |= ValidateMemoryIsBoundToImage(src_image_state, func_name, vuid);
vuid = is_2khr ? "VUID-VkBlitImageInfo2KHR-dstImage-00225" : "VUID-vkCmdBlitImage-dstImage-00225";
skip |= ValidateMemoryIsBoundToImage(dst_image_state, func_name, vuid);
vuid = is_2khr ? "VUID-VkBlitImageInfo2KHR-srcImage-00219" : "VUID-vkCmdBlitImage-srcImage-00219";
skip |= ValidateImageUsageFlags(src_image_state, VK_IMAGE_USAGE_TRANSFER_SRC_BIT, true, vuid, func_name,
"VK_IMAGE_USAGE_TRANSFER_SRC_BIT");
vuid = is_2khr ? "VUID-VkBlitImageInfo2KHR-dstImage-00224" : "VUID-vkCmdBlitImage-dstImage-00224";
skip |= ValidateImageUsageFlags(dst_image_state, VK_IMAGE_USAGE_TRANSFER_DST_BIT, true, vuid, func_name,
"VK_IMAGE_USAGE_TRANSFER_DST_BIT");
skip |= ValidateCmd(cb_node, cmd_type, func_name);
vuid = is_2khr ? "VUID-VkBlitImageInfo2KHR-srcImage-01999" : "VUID-vkCmdBlitImage-srcImage-01999";
skip |= ValidateImageFormatFeatureFlags(src_image_state, VK_FORMAT_FEATURE_BLIT_SRC_BIT, func_name, vuid);
vuid = is_2khr ? "VUID-VkBlitImageInfo2KHR-dstImage-02000" : "VUID-vkCmdBlitImage-dstImage-02000";
skip |= ValidateImageFormatFeatureFlags(dst_image_state, VK_FORMAT_FEATURE_BLIT_DST_BIT, func_name, vuid);
vuid = is_2khr ? "VUID-vkCmdBlitImage2KHR-commandBuffer-01834" : "VUID-vkCmdBlitImage-commandBuffer-01834";
skip |= ValidateProtectedImage(cb_node, src_image_state, func_name, vuid);
vuid = is_2khr ? "VUID-vkCmdBlitImage2KHR-commandBuffer-01835" : "VUID-vkCmdBlitImage-commandBuffer-01835";
skip |= ValidateProtectedImage(cb_node, dst_image_state, func_name, vuid);
vuid = is_2khr ? "VUID-vkCmdBlitImage2KHR-commandBuffer-01836" : "VUID-vkCmdBlitImage-commandBuffer-01836";
skip |= ValidateUnprotectedImage(cb_node, dst_image_state, func_name, vuid);
// Validation for VK_EXT_fragment_density_map
if (src_image_state->createInfo.flags & VK_IMAGE_CREATE_SUBSAMPLED_BIT_EXT) {
vuid = is_2khr ? "VUID-VkBlitImageInfo2KHR-dstImage-02545" : "VUID-vkCmdBlitImage-dstImage-02545";
skip |= LogError(cb_node->commandBuffer(), vuid,
"%s: srcImage must not have been created with flags containing VK_IMAGE_CREATE_SUBSAMPLED_BIT_EXT",
func_name);
}
if (dst_image_state->createInfo.flags & VK_IMAGE_CREATE_SUBSAMPLED_BIT_EXT) {
vuid = is_2khr ? "VUID-VkBlitImageInfo2KHR-dstImage-02545" : "VUID-vkCmdBlitImage-dstImage-02545";
skip |= LogError(cb_node->commandBuffer(), vuid,
"%s: dstImage must not have been created with flags containing VK_IMAGE_CREATE_SUBSAMPLED_BIT_EXT",
func_name);
}
// TODO: Need to validate image layouts, which will include layout validation for shared presentable images
VkFormat src_format = src_image_state->createInfo.format;
VkFormat dst_format = dst_image_state->createInfo.format;
VkImageType src_type = src_image_state->createInfo.imageType;
VkImageType dst_type = dst_image_state->createInfo.imageType;
if (VK_FILTER_LINEAR == filter) {
vuid = is_2khr ? "VUID-VkBlitImageInfo2KHR-filter-02001" : "VUID-vkCmdBlitImage-filter-02001";
skip |= ValidateImageFormatFeatureFlags(src_image_state, VK_FORMAT_FEATURE_SAMPLED_IMAGE_FILTER_LINEAR_BIT, func_name,
vuid);
} else if (VK_FILTER_CUBIC_IMG == filter) {
vuid = is_2khr ? "VUID-VkBlitImageInfo2KHR-filter-02002" : "VUID-vkCmdBlitImage-filter-02002";
skip |= ValidateImageFormatFeatureFlags(src_image_state, VK_FORMAT_FEATURE_SAMPLED_IMAGE_FILTER_CUBIC_BIT_IMG,
func_name, vuid);
}
if (FormatRequiresYcbcrConversion(src_format)) {
vuid = is_2khr ? "VUID-VkBlitImageInfo2KHR-srcImage-01561" : "VUID-vkCmdBlitImage-srcImage-01561";
skip |= LogError(device, vuid,
"%s: srcImage format (%s) must not be one of the formats requiring sampler YCBCR "
"conversion for VK_IMAGE_ASPECT_COLOR_BIT image views",
func_name, string_VkFormat(src_format));
}
if (FormatRequiresYcbcrConversion(dst_format)) {
vuid = is_2khr ? "VUID-VkBlitImageInfo2KHR-dstImage-01562" : "VUID-vkCmdBlitImage-dstImage-01562";
skip |= LogError(device, vuid,
"%s: dstImage format (%s) must not be one of the formats requiring sampler YCBCR "
"conversion for VK_IMAGE_ASPECT_COLOR_BIT image views",
func_name, string_VkFormat(dst_format));
}
if ((VK_FILTER_CUBIC_IMG == filter) && (VK_IMAGE_TYPE_3D != src_type)) {
vuid = is_2khr ? "VUID-VkBlitImageInfo2KHR-filter-00237" : "VUID-vkCmdBlitImage-filter-00237";
skip |= LogError(cb_node->commandBuffer(), vuid,
"%s: source image type must be VK_IMAGE_TYPE_3D when cubic filtering is specified.", func_name);
}
// Validate consistency for unsigned formats
if (FormatIsUInt(src_format) != FormatIsUInt(dst_format)) {
std::stringstream ss;
ss << func_name << ": If one of srcImage and dstImage images has unsigned integer format, "
<< "the other one must also have unsigned integer format. "
<< "Source format is " << string_VkFormat(src_format) << " Destination format is " << string_VkFormat(dst_format);
vuid = is_2khr ? "VUID-VkBlitImageInfo2KHR-srcImage-00230" : "VUID-vkCmdBlitImage-srcImage-00230";
skip |= LogError(cb_node->commandBuffer(), vuid, "%s.", ss.str().c_str());
}
// Validate consistency for signed formats
if (FormatIsSInt(src_format) != FormatIsSInt(dst_format)) {
std::stringstream ss;
ss << func_name << ": If one of srcImage and dstImage images has signed integer format, "
<< "the other one must also have signed integer format. "
<< "Source format is " << string_VkFormat(src_format) << " Destination format is " << string_VkFormat(dst_format);
vuid = is_2khr ? "VUID-VkBlitImageInfo2KHR-srcImage-00229" : "VUID-vkCmdBlitImage-srcImage-00229";
skip |= LogError(cb_node->commandBuffer(), vuid, "%s.", ss.str().c_str());
}
// Validate filter for Depth/Stencil formats
if (FormatIsDepthOrStencil(src_format) && (filter != VK_FILTER_NEAREST)) {
std::stringstream ss;
ss << func_name << ": If the format of srcImage is a depth, stencil, or depth stencil "
<< "then filter must be VK_FILTER_NEAREST.";
vuid = is_2khr ? "VUID-VkBlitImageInfo2KHR-srcImage-00232" : "VUID-vkCmdBlitImage-srcImage-00232";
skip |= LogError(cb_node->commandBuffer(), vuid, "%s.", ss.str().c_str());
}
// Validate aspect bits and formats for depth/stencil images
if (FormatIsDepthOrStencil(src_format) || FormatIsDepthOrStencil(dst_format)) {
if (src_format != dst_format) {
std::stringstream ss;
ss << func_name << ": If one of srcImage and dstImage images has a format of depth, stencil or depth "
<< "stencil, the other one must have exactly the same format. "
<< "Source format is " << string_VkFormat(src_format) << " Destination format is "
<< string_VkFormat(dst_format);
vuid = is_2khr ? "VUID-VkBlitImageInfo2KHR-srcImage-00231" : "VUID-vkCmdBlitImage-srcImage-00231";
skip |= LogError(cb_node->commandBuffer(), vuid, "%s.", ss.str().c_str());
}
} // Depth or Stencil
// Do per-region checks
const char *invalid_src_layout_vuid =
is_2khr ? ((src_image_state->shared_presentable && device_extensions.vk_khr_shared_presentable_image)
? "VUID-VkBlitImageInfo2KHR-srcImageLayout-01398"
: "VUID-VkBlitImageInfo2KHR-srcImageLayout-00222")
: ((src_image_state->shared_presentable && device_extensions.vk_khr_shared_presentable_image)
? "VUID-vkCmdBlitImage-srcImageLayout-01398"
: "VUID-vkCmdBlitImage-srcImageLayout-00222");
const char *invalid_dst_layout_vuid =
is_2khr ? ((dst_image_state->shared_presentable && device_extensions.vk_khr_shared_presentable_image)
? "VUID-VkBlitImageInfo2KHR-dstImageLayout-01399"
: "VUID-VkBlitImageInfo2KHR-dstImageLayout-00227")
: ((dst_image_state->shared_presentable && device_extensions.vk_khr_shared_presentable_image)
? "VUID-vkCmdBlitImage-dstImageLayout-01399"
: "VUID-vkCmdBlitImage-dstImageLayout-00227");
for (uint32_t i = 0; i < regionCount; i++) {
const RegionType rgn = pRegions[i];
bool hit_error = false;
vuid = is_2khr ? "VUID-VkBlitImageInfo2KHR-srcImageLayout-00221" : "VUID-vkCmdBlitImage-srcImageLayout-00221";
skip |= VerifyImageLayout(cb_node, src_image_state, rgn.srcSubresource, srcImageLayout,
VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, func_name, invalid_src_layout_vuid, vuid, &hit_error);
vuid = is_2khr ? "VUID-VkBlitImageInfo2KHR-dstImageLayout-00226" : "VUID-vkCmdBlitImage-dstImageLayout-00226";
skip |= VerifyImageLayout(cb_node, dst_image_state, rgn.dstSubresource, dstImageLayout,
VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, func_name, invalid_dst_layout_vuid, vuid, &hit_error);
skip |= ValidateImageSubresourceLayers(cb_node, &rgn.srcSubresource, func_name, "srcSubresource", i);
skip |= ValidateImageSubresourceLayers(cb_node, &rgn.dstSubresource, func_name, "dstSubresource", i);
vuid = is_2khr ? "VUID-VkBlitImageInfo2KHR-srcSubresource-01705" : "VUID-vkCmdBlitImage-srcSubresource-01705";
skip |=
ValidateImageMipLevel(cb_node, src_image_state, rgn.srcSubresource.mipLevel, i, func_name, "srcSubresource", vuid);
vuid = is_2khr ? "VUID-VkBlitImageInfo2KHR-dstSubresource-01706" : "VUID-vkCmdBlitImage-dstSubresource-01706";
skip |=
ValidateImageMipLevel(cb_node, dst_image_state, rgn.dstSubresource.mipLevel, i, func_name, "dstSubresource", vuid);
vuid = is_2khr ? "VUID-VkBlitImageInfo2KHR-srcSubresource-01707" : "VUID-vkCmdBlitImage-srcSubresource-01707";
skip |= ValidateImageArrayLayerRange(cb_node, src_image_state, rgn.srcSubresource.baseArrayLayer,
rgn.srcSubresource.layerCount, i, func_name, "srcSubresource", vuid);
vuid = is_2khr ? "VUID-VkBlitImageInfo2KHR-dstSubresource-01708" : "VUID-vkCmdBlitImage-dstSubresource-01708";
skip |= ValidateImageArrayLayerRange(cb_node, dst_image_state, rgn.dstSubresource.baseArrayLayer,
rgn.dstSubresource.layerCount, i, func_name, "dstSubresource", vuid);
// Warn for zero-sized regions
if ((rgn.srcOffsets[0].x == rgn.srcOffsets[1].x) || (rgn.srcOffsets[0].y == rgn.srcOffsets[1].y) ||
(rgn.srcOffsets[0].z == rgn.srcOffsets[1].z)) {
std::stringstream ss;
ss << func_name << ": pRegions[" << i << "].srcOffsets specify a zero-volume area.";
skip |= LogWarning(cb_node->commandBuffer(), kVUID_Core_DrawState_InvalidExtents, "%s", ss.str().c_str());
}
if ((rgn.dstOffsets[0].x == rgn.dstOffsets[1].x) || (rgn.dstOffsets[0].y == rgn.dstOffsets[1].y) ||
(rgn.dstOffsets[0].z == rgn.dstOffsets[1].z)) {
std::stringstream ss;
ss << func_name << ": pRegions[" << i << "].dstOffsets specify a zero-volume area.";
skip |= LogWarning(cb_node->commandBuffer(), kVUID_Core_DrawState_InvalidExtents, "%s", ss.str().c_str());
}
// Check that src/dst layercounts match
if (rgn.srcSubresource.layerCount != rgn.dstSubresource.layerCount) {
vuid = is_2khr ? "VUID-VkImageBlit2KHR-layerCount-00239" : "VUID-VkImageBlit-layerCount-00239";
skip |=
LogError(cb_node->commandBuffer(), vuid,
"%s: layerCount in source and destination subresource of pRegions[%d] does not match.", func_name, i);
}
if (rgn.srcSubresource.aspectMask != rgn.dstSubresource.aspectMask) {
vuid = is_2khr ? "VUID-VkImageBlit2KHR-aspectMask-00238" : "VUID-VkImageBlit-aspectMask-00238";
skip |=
LogError(cb_node->commandBuffer(), vuid, "%s: aspectMask members for pRegion[%d] do not match.", func_name, i);
}
if (!VerifyAspectsPresent(rgn.srcSubresource.aspectMask, src_format)) {
vuid = is_2khr ? "VUID-VkBlitImageInfo2KHR-aspectMask-00241" : "VUID-vkCmdBlitImage-aspectMask-00241";
skip |= LogError(cb_node->commandBuffer(), vuid,
"%s: region [%d] source aspectMask (0x%x) specifies aspects not present in source "
"image format %s.",
func_name, i, rgn.srcSubresource.aspectMask, string_VkFormat(src_format));
}
if (!VerifyAspectsPresent(rgn.dstSubresource.aspectMask, dst_format)) {
vuid = is_2khr ? "VUID-VkBlitImageInfo2KHR-aspectMask-00242" : "VUID-vkCmdBlitImage-aspectMask-00242";
skip |= LogError(cb_node->commandBuffer(), vuid,
"%s: region [%d] dest aspectMask (0x%x) specifies aspects not present in dest image format %s.",
func_name, i, rgn.dstSubresource.aspectMask, string_VkFormat(dst_format));
}
// Validate source image offsets
VkExtent3D src_extent = GetImageSubresourceExtent(src_image_state, &(rgn.srcSubresource));
if (VK_IMAGE_TYPE_1D == src_type) {
if ((0 != rgn.srcOffsets[0].y) || (1 != rgn.srcOffsets[1].y)) {
vuid = is_2khr ? "VUID-VkBlitImageInfo2KHR-srcImage-00245" : "VUID-vkCmdBlitImage-srcImage-00245";
skip |= LogError(cb_node->commandBuffer(), vuid,
"%s: region [%d], source image of type VK_IMAGE_TYPE_1D with srcOffset[].y values "
"of (%1d, %1d). These must be (0, 1).",
func_name, i, rgn.srcOffsets[0].y, rgn.srcOffsets[1].y);
}
}
if ((VK_IMAGE_TYPE_1D == src_type) || (VK_IMAGE_TYPE_2D == src_type)) {
if ((0 != rgn.srcOffsets[0].z) || (1 != rgn.srcOffsets[1].z)) {
vuid = is_2khr ? "VUID-VkBlitImageInfo2KHR-srcImage-00247" : "VUID-vkCmdBlitImage-srcImage-00247";
skip |= LogError(cb_node->commandBuffer(), vuid,
"%s: region [%d], source image of type VK_IMAGE_TYPE_1D or VK_IMAGE_TYPE_2D with "
"srcOffset[].z values of (%1d, %1d). These must be (0, 1).",
func_name, i, rgn.srcOffsets[0].z, rgn.srcOffsets[1].z);
}
}
bool oob = false;
if ((rgn.srcOffsets[0].x < 0) || (rgn.srcOffsets[0].x > static_cast<int32_t>(src_extent.width)) ||
(rgn.srcOffsets[1].x < 0) || (rgn.srcOffsets[1].x > static_cast<int32_t>(src_extent.width))) {
oob = true;
vuid = is_2khr ? "VUID-VkBlitImageInfo2KHR-srcOffset-00243" : "VUID-vkCmdBlitImage-srcOffset-00243";
skip |= LogError(cb_node->commandBuffer(), vuid,
"%s: region [%d] srcOffset[].x values (%1d, %1d) exceed srcSubresource width extent (%1d).",
func_name, i, rgn.srcOffsets[0].x, rgn.srcOffsets[1].x, src_extent.width);
}
if ((rgn.srcOffsets[0].y < 0) || (rgn.srcOffsets[0].y > static_cast<int32_t>(src_extent.height)) ||
(rgn.srcOffsets[1].y < 0) || (rgn.srcOffsets[1].y > static_cast<int32_t>(src_extent.height))) {
oob = true;
vuid = is_2khr ? "VUID-VkBlitImageInfo2KHR-srcOffset-00244" : "VUID-vkCmdBlitImage-srcOffset-00244";
skip |= LogError(cb_node->commandBuffer(), vuid,
"%s: region [%d] srcOffset[].y values (%1d, %1d) exceed srcSubresource height extent (%1d).",
func_name, i, rgn.srcOffsets[0].y, rgn.srcOffsets[1].y, src_extent.height);
}
if ((rgn.srcOffsets[0].z < 0) || (rgn.srcOffsets[0].z > static_cast<int32_t>(src_extent.depth)) ||
(rgn.srcOffsets[1].z < 0) || (rgn.srcOffsets[1].z > static_cast<int32_t>(src_extent.depth))) {
oob = true;
vuid = is_2khr ? "VUID-VkBlitImageInfo2KHR-srcOffset-00246" : "VUID-vkCmdBlitImage-srcOffset-00246";
skip |= LogError(cb_node->commandBuffer(), vuid,
"%s: region [%d] srcOffset[].z values (%1d, %1d) exceed srcSubresource depth extent (%1d).",
func_name, i, rgn.srcOffsets[0].z, rgn.srcOffsets[1].z, src_extent.depth);
}
if (oob) {
vuid = is_2khr ? "VUID-VkBlitImageInfo2KHR-pRegions-00215" : "VUID-vkCmdBlitImage-pRegions-00215";
skip |= LogError(cb_node->commandBuffer(), vuid, "%s: region [%d] source image blit region exceeds image dimensions.",
func_name, i);
}
// Validate dest image offsets
VkExtent3D dst_extent = GetImageSubresourceExtent(dst_image_state, &(rgn.dstSubresource));
if (VK_IMAGE_TYPE_1D == dst_type) {
if ((0 != rgn.dstOffsets[0].y) || (1 != rgn.dstOffsets[1].y)) {
vuid = is_2khr ? "VUID-VkBlitImageInfo2KHR-dstImage-00250" : "VUID-vkCmdBlitImage-dstImage-00250";
skip |= LogError(cb_node->commandBuffer(), vuid,
"%s: region [%d], dest image of type VK_IMAGE_TYPE_1D with dstOffset[].y values of "
"(%1d, %1d). These must be (0, 1).",
func_name, i, rgn.dstOffsets[0].y, rgn.dstOffsets[1].y);
}
}
if ((VK_IMAGE_TYPE_1D == dst_type) || (VK_IMAGE_TYPE_2D == dst_type)) {
if ((0 != rgn.dstOffsets[0].z) || (1 != rgn.dstOffsets[1].z)) {
vuid = is_2khr ? "VUID-VkBlitImageInfo2KHR-dstImage-00252" : "VUID-vkCmdBlitImage-dstImage-00252";
skip |= LogError(cb_node->commandBuffer(), vuid,
"%s: region [%d], dest image of type VK_IMAGE_TYPE_1D or VK_IMAGE_TYPE_2D with "
"dstOffset[].z values of (%1d, %1d). These must be (0, 1).",
func_name, i, rgn.dstOffsets[0].z, rgn.dstOffsets[1].z);
}
}
oob = false;
if ((rgn.dstOffsets[0].x < 0) || (rgn.dstOffsets[0].x > static_cast<int32_t>(dst_extent.width)) ||
(rgn.dstOffsets[1].x < 0) || (rgn.dstOffsets[1].x > static_cast<int32_t>(dst_extent.width))) {
oob = true;
vuid = is_2khr ? "VUID-VkBlitImageInfo2KHR-dstOffset-00248" : "VUID-vkCmdBlitImage-dstOffset-00248";
skip |= LogError(cb_node->commandBuffer(), vuid,
"%s: region [%d] dstOffset[].x values (%1d, %1d) exceed dstSubresource width extent (%1d).",
func_name, i, rgn.dstOffsets[0].x, rgn.dstOffsets[1].x, dst_extent.width);
}
if ((rgn.dstOffsets[0].y < 0) || (rgn.dstOffsets[0].y > static_cast<int32_t>(dst_extent.height)) ||
(rgn.dstOffsets[1].y < 0) || (rgn.dstOffsets[1].y > static_cast<int32_t>(dst_extent.height))) {
oob = true;
vuid = is_2khr ? "VUID-VkBlitImageInfo2KHR-dstOffset-00249" : "VUID-vkCmdBlitImage-dstOffset-00249";
skip |= LogError(cb_node->commandBuffer(), vuid,
"%s: region [%d] dstOffset[].y values (%1d, %1d) exceed dstSubresource height extent (%1d).",
func_name, i, rgn.dstOffsets[0].y, rgn.dstOffsets[1].y, dst_extent.height);
}
if ((rgn.dstOffsets[0].z < 0) || (rgn.dstOffsets[0].z > static_cast<int32_t>(dst_extent.depth)) ||
(rgn.dstOffsets[1].z < 0) || (rgn.dstOffsets[1].z > static_cast<int32_t>(dst_extent.depth))) {
oob = true;
vuid = is_2khr ? "VUID-VkBlitImageInfo2KHR-dstOffset-00251" : "VUID-vkCmdBlitImage-dstOffset-00251";
skip |= LogError(cb_node->commandBuffer(), vuid,
"%s: region [%d] dstOffset[].z values (%1d, %1d) exceed dstSubresource depth extent (%1d).",
func_name, i, rgn.dstOffsets[0].z, rgn.dstOffsets[1].z, dst_extent.depth);
}
if (oob) {
vuid = is_2khr ? "VUID-VkBlitImageInfo2KHR-pRegions-00216" : "VUID-vkCmdBlitImage-pRegions-00216";
skip |= LogError(cb_node->commandBuffer(), vuid,
"%s: region [%d] destination image blit region exceeds image dimensions.", func_name, i);
}
if ((VK_IMAGE_TYPE_3D == src_type) || (VK_IMAGE_TYPE_3D == dst_type)) {
if ((0 != rgn.srcSubresource.baseArrayLayer) || (1 != rgn.srcSubresource.layerCount) ||
(0 != rgn.dstSubresource.baseArrayLayer) || (1 != rgn.dstSubresource.layerCount)) {
vuid = is_2khr ? "VUID-VkBlitImageInfo2KHR-srcImage-00240" : "VUID-vkCmdBlitImage-srcImage-00240";
skip |= LogError(cb_node->commandBuffer(), vuid,
"%s: region [%d] blit to/from a 3D image type with a non-zero baseArrayLayer, or a "
"layerCount other than 1.",
func_name, i);
}
}
} // per-region checks
} else {
assert(0);
}
return skip;
}
bool CoreChecks::PreCallValidateCmdBlitImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout,
VkImage dstImage, VkImageLayout dstImageLayout, uint32_t regionCount,
const VkImageBlit *pRegions, VkFilter filter) const {
return ValidateCmdBlitImage(commandBuffer, srcImage, srcImageLayout, dstImage, dstImageLayout, regionCount, pRegions, filter,
COPY_COMMAND_VERSION_1);
}
bool CoreChecks::PreCallValidateCmdBlitImage2KHR(VkCommandBuffer commandBuffer, const VkBlitImageInfo2KHR *pBlitImageInfo) const {
return ValidateCmdBlitImage(commandBuffer, pBlitImageInfo->srcImage, pBlitImageInfo->srcImageLayout, pBlitImageInfo->dstImage,
pBlitImageInfo->dstImageLayout, pBlitImageInfo->regionCount, pBlitImageInfo->pRegions,
pBlitImageInfo->filter, COPY_COMMAND_VERSION_2);
}
template <typename RegionType>
void CoreChecks::RecordCmdBlitImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout, VkImage dstImage,
VkImageLayout dstImageLayout, uint32_t regionCount, const RegionType *pRegions,
VkFilter filter) {
auto cb_node = GetCBState(commandBuffer);
auto src_image_state = GetImageState(srcImage);
auto dst_image_state = GetImageState(dstImage);
// Make sure that all image slices are updated to correct layout
for (uint32_t i = 0; i < regionCount; ++i) {
SetImageInitialLayout(cb_node, *src_image_state, pRegions[i].srcSubresource, srcImageLayout);
SetImageInitialLayout(cb_node, *dst_image_state, pRegions[i].dstSubresource, dstImageLayout);
}
}
void CoreChecks::PreCallRecordCmdBlitImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout,
VkImage dstImage, VkImageLayout dstImageLayout, uint32_t regionCount,
const VkImageBlit *pRegions, VkFilter filter) {
StateTracker::PreCallRecordCmdBlitImage(commandBuffer, srcImage, srcImageLayout, dstImage, dstImageLayout, regionCount,
pRegions, filter);
RecordCmdBlitImage(commandBuffer, srcImage, srcImageLayout, dstImage, dstImageLayout, regionCount, pRegions, filter);
}
void CoreChecks::PreCallRecordCmdBlitImage2KHR(VkCommandBuffer commandBuffer, const VkBlitImageInfo2KHR *pBlitImageInfo) {
StateTracker::PreCallRecordCmdBlitImage2KHR(commandBuffer, pBlitImageInfo);
RecordCmdBlitImage(commandBuffer, pBlitImageInfo->srcImage, pBlitImageInfo->srcImageLayout, pBlitImageInfo->dstImage,
pBlitImageInfo->dstImageLayout, pBlitImageInfo->regionCount, pBlitImageInfo->pRegions,
pBlitImageInfo->filter);
}
GlobalImageLayoutRangeMap *GetLayoutRangeMap(GlobalImageLayoutMap &map, const IMAGE_STATE &image_state) {
// This approach allows for a single hash lookup or/create new
auto &layout_map = map[image_state.image()];
if (!layout_map) {
layout_map.emplace(image_state.subresource_encoder.SubresourceCount());
}
return &layout_map;
}
const GlobalImageLayoutRangeMap *GetLayoutRangeMap(const GlobalImageLayoutMap &map, VkImage image) {
auto it = map.find(image);
if (it != map.end()) {
return &it->second;
}
return nullptr;
}
// Helper to update the Global or Overlay layout map
struct GlobalLayoutUpdater {
bool update(VkImageLayout &dst, const image_layout_map::ImageSubresourceLayoutMap::LayoutEntry &src) const {
if (src.current_layout != image_layout_map::kInvalidLayout && dst != src.current_layout) {
dst = src.current_layout;
return true;
}
return false;
}
layer_data::optional<VkImageLayout> insert(const image_layout_map::ImageSubresourceLayoutMap::LayoutEntry &src) const {
layer_data::optional<VkImageLayout> result;
if (src.current_layout != image_layout_map::kInvalidLayout) {
result.emplace(src.current_layout);
}
return result;
}
};
// This validates that the initial layout specified in the command buffer for the IMAGE is the same as the global IMAGE layout
bool CoreChecks::ValidateCmdBufImageLayouts(const CMD_BUFFER_STATE *pCB, const GlobalImageLayoutMap &globalImageLayoutMap,
GlobalImageLayoutMap &overlayLayoutMap) const {
if (disabled[image_layout_validation]) return false;
bool skip = false;
// Iterate over the layout maps for each referenced image
GlobalImageLayoutRangeMap empty_map(1);
for (const auto &layout_map_entry : pCB->image_layout_map) {
const auto image = layout_map_entry.first;
const auto *image_state = GetImageState(image);
if (!image_state) continue; // Can't check layouts of a dead image
const auto &subres_map = layout_map_entry.second;
const auto &layout_map = subres_map->GetLayoutMap();
// Validate the initial_uses for each subresource referenced
if (layout_map.empty()) continue;
auto *overlay_map = GetLayoutRangeMap(overlayLayoutMap, *image_state);
const auto *global_map = GetLayoutRangeMap(globalImageLayoutMap, image);
if (global_map == nullptr) {
global_map = &empty_map;
}
// Note: don't know if it would matter
// if (global_map->empty() && overlay_map->empty()) // skip this next loop...;
auto pos = layout_map.begin();
const auto end = layout_map.end();
sparse_container::parallel_iterator<const GlobalImageLayoutRangeMap> current_layout(*overlay_map, *global_map,
pos->first.begin);
while (pos != end) {
VkImageLayout initial_layout = pos->second.initial_layout;
assert(initial_layout != image_layout_map::kInvalidLayout);
if (initial_layout == image_layout_map::kInvalidLayout) {
continue;
}
VkImageLayout image_layout = kInvalidLayout;
if (current_layout->range.empty()) break; // When we are past the end of data in overlay and global... stop looking
if (current_layout->pos_A->valid) { // pos_A denotes the overlay map in the parallel iterator
image_layout = current_layout->pos_A->lower_bound->second;
} else if (current_layout->pos_B->valid) { // pos_B denotes the global map in the parallel iterator
image_layout = current_layout->pos_B->lower_bound->second;
}
const auto intersected_range = pos->first & current_layout->range;
if (initial_layout == VK_IMAGE_LAYOUT_UNDEFINED) {
// TODO: Set memory invalid which is in mem_tracker currently
} else if (image_layout != initial_layout) {
// Need to look up the inital layout *state* to get a bit more information
const auto *initial_layout_state = subres_map->GetSubresourceInitialLayoutState(pos->first.begin);
assert(initial_layout_state); // There's no way we should have an initial layout without matching state...
bool matches = ImageLayoutMatches(initial_layout_state->aspect_mask, image_layout, initial_layout);
if (!matches) {
// We can report all the errors for the intersected range directly
for (auto index : sparse_container::range_view<decltype(intersected_range)>(intersected_range)) {
const auto subresource = image_state->subresource_encoder.Decode(index);
skip |= LogError(
pCB->commandBuffer(), kVUID_Core_DrawState_InvalidImageLayout,
"Submitted command buffer expects %s (subresource: aspectMask 0x%X array layer %u, mip level %u) "
"to be in layout %s--instead, current layout is %s.",
report_data->FormatHandle(image).c_str(), subresource.aspectMask, subresource.arrayLayer,
subresource.mipLevel, string_VkImageLayout(initial_layout), string_VkImageLayout(image_layout));
}
}
}
if (pos->first.includes(intersected_range.end)) {
current_layout.seek(intersected_range.end);
} else {
++pos;
if (pos != end) {
current_layout.seek(pos->first.begin);
}
}
}
// Update all layout set operations (which will be a subset of the initial_layouts)
sparse_container::splice(*overlay_map, subres_map->GetLayoutMap(), GlobalLayoutUpdater());
}
return skip;
}
void CoreChecks::UpdateCmdBufImageLayouts(CMD_BUFFER_STATE *pCB) {
for (const auto &layout_map_entry : pCB->image_layout_map) {
const auto image = layout_map_entry.first;
const auto &subres_map = layout_map_entry.second;
const auto *image_state = GetImageState(image);
if (!image_state) continue; // Can't set layouts of a dead image
auto *global_map = GetLayoutRangeMap(imageLayoutMap, *image_state);
sparse_container::splice(*global_map, subres_map->GetLayoutMap(), GlobalLayoutUpdater());
}
}
// ValidateLayoutVsAttachmentDescription is a general function where we can validate various state associated with the
// VkAttachmentDescription structs that are used by the sub-passes of a renderpass. Initial check is to make sure that READ_ONLY
// layout attachments don't have CLEAR as their loadOp.
bool CoreChecks::ValidateLayoutVsAttachmentDescription(const debug_report_data *report_data, RenderPassCreateVersion rp_version,
const VkImageLayout first_layout, const uint32_t attachment,
const VkAttachmentDescription2 &attachment_description) const {
bool skip = false;
const bool use_rp2 = (rp_version == RENDER_PASS_VERSION_2);
// Verify that initial loadOp on READ_ONLY attachments is not CLEAR
// for both loadOp and stencilLoaOp rp2 has it in 1 VU while rp1 has it in 2 VU with half behind Maintenance2 extension
// Each is VUID is below in following order: rp2 -> rp1 with Maintenance2 -> rp1 with no extenstion
if (attachment_description.loadOp == VK_ATTACHMENT_LOAD_OP_CLEAR) {
if (use_rp2 && ((first_layout == VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL) ||
(first_layout == VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL) ||
(first_layout == VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_STENCIL_ATTACHMENT_OPTIMAL))) {
skip |= LogError(device, "VUID-VkRenderPassCreateInfo2-pAttachments-02522",
"vkCreateRenderPass2(): Cannot clear attachment %d with invalid first layout %s.", attachment,
string_VkImageLayout(first_layout));
} else if ((use_rp2 == false) && (device_extensions.vk_khr_maintenance2) &&
(first_layout == VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_STENCIL_ATTACHMENT_OPTIMAL)) {
skip |= LogError(device, "VUID-VkRenderPassCreateInfo-pAttachments-01566",
"vkCreateRenderPass(): Cannot clear attachment %d with invalid first layout %s.", attachment,
string_VkImageLayout(first_layout));
} else if ((use_rp2 == false) && ((first_layout == VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL) ||
(first_layout == VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL))) {
skip |= LogError(device, "VUID-VkRenderPassCreateInfo-pAttachments-00836",
"vkCreateRenderPass(): Cannot clear attachment %d with invalid first layout %s.", attachment,
string_VkImageLayout(first_layout));
}
}
// Same as above for loadOp, but for stencilLoadOp
if (attachment_description.stencilLoadOp == VK_ATTACHMENT_LOAD_OP_CLEAR) {
if (use_rp2 && ((first_layout == VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL) ||
(first_layout == VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL) ||
(first_layout == VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_STENCIL_READ_ONLY_OPTIMAL))) {
skip |= LogError(device, "VUID-VkRenderPassCreateInfo2-pAttachments-02523",
"vkCreateRenderPass2(): Cannot clear attachment %d with invalid first layout %s.", attachment,
string_VkImageLayout(first_layout));
} else if ((use_rp2 == false) && (device_extensions.vk_khr_maintenance2) &&
(first_layout == VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_STENCIL_READ_ONLY_OPTIMAL)) {
skip |= LogError(device, "VUID-VkRenderPassCreateInfo-pAttachments-01567",
"vkCreateRenderPass(): Cannot clear attachment %d with invalid first layout %s.", attachment,
string_VkImageLayout(first_layout));
} else if ((use_rp2 == false) && ((first_layout == VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL) ||
(first_layout == VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL))) {
skip |= LogError(device, "VUID-VkRenderPassCreateInfo-pAttachments-02511",
"vkCreateRenderPass(): Cannot clear attachment %d with invalid first layout %s.", attachment,
string_VkImageLayout(first_layout));
}
}
return skip;
}
// Helper function to validate correct usage bits set for buffers or images. Verify that (actual & desired) flags != 0 or, if strict
// is true, verify that (actual & desired) flags == desired
template <typename T1>
bool CoreChecks::ValidateUsageFlags(VkFlags actual, VkFlags desired, VkBool32 strict, const T1 object,
const VulkanTypedHandle &typed_handle, const char *msgCode, char const *func_name,
char const *usage_str) const {
bool correct_usage = false;
bool skip = false;
const char *type_str = object_string[typed_handle.type];
if (strict) {
correct_usage = ((actual & desired) == desired);
} else {
correct_usage = ((actual & desired) != 0);
}
if (!correct_usage) {
// All callers should have a valid VUID
assert(msgCode != kVUIDUndefined);
skip =
LogError(object, msgCode, "Invalid usage flag for %s used by %s. In this case, %s should have %s set during creation.",
report_data->FormatHandle(typed_handle).c_str(), func_name, type_str, usage_str);
}
return skip;
}
// Helper function to validate usage flags for buffers. For given buffer_state send actual vs. desired usage off to helper above
// where an error will be flagged if usage is not correct
bool CoreChecks::ValidateImageUsageFlags(IMAGE_STATE const *image_state, VkFlags desired, bool strict, const char *msgCode,
char const *func_name, char const *usage_string) const {
return ValidateUsageFlags(image_state->createInfo.usage, desired, strict, image_state->image(),
image_state->Handle(), msgCode, func_name, usage_string);
}
bool CoreChecks::ValidateImageFormatFeatureFlags(IMAGE_STATE const *image_state, VkFormatFeatureFlags desired,
char const *func_name, const char *vuid) const {
bool skip = false;
const VkFormatFeatureFlags image_format_features = image_state->format_features;
if ((image_format_features & desired) != desired) {
// Same error, but more details if it was an AHB external format
if (image_state->has_ahb_format == true) {
skip |= LogError(image_state->image(), vuid,
"In %s, VkFormatFeatureFlags (0x%08X) does not support required feature %s for the external format "
"found in VkAndroidHardwareBufferFormatPropertiesANDROID::formatFeatures used by %s.",
func_name, image_format_features, string_VkFormatFeatureFlags(desired).c_str(),
report_data->FormatHandle(image_state->image()).c_str());
} else {
skip |= LogError(image_state->image(), vuid,
"In %s, VkFormatFeatureFlags (0x%08X) does not support required feature %s for format %u used by %s "
"with tiling %s.",
func_name, image_format_features, string_VkFormatFeatureFlags(desired).c_str(),
image_state->createInfo.format, report_data->FormatHandle(image_state->image()).c_str(),
string_VkImageTiling(image_state->createInfo.tiling));
}
}
return skip;
}
bool CoreChecks::ValidateImageSubresourceLayers(const CMD_BUFFER_STATE *cb_node, const VkImageSubresourceLayers *subresource_layers,
char const *func_name, char const *member, uint32_t i) const {
bool skip = false;
const VkImageAspectFlags apsect_mask = subresource_layers->aspectMask;
// layerCount must not be zero
if (subresource_layers->layerCount == 0) {
skip |= LogError(cb_node->commandBuffer(), "VUID-VkImageSubresourceLayers-layerCount-01700",
"In %s, pRegions[%u].%s.layerCount must not be zero.", func_name, i, member);
}
// aspectMask must not contain VK_IMAGE_ASPECT_METADATA_BIT
if (apsect_mask & VK_IMAGE_ASPECT_METADATA_BIT) {
skip |= LogError(cb_node->commandBuffer(), "VUID-VkImageSubresourceLayers-aspectMask-00168",
"In %s, pRegions[%u].%s.aspectMask has VK_IMAGE_ASPECT_METADATA_BIT set.", func_name, i, member);
}
// if aspectMask contains COLOR, it must not contain either DEPTH or STENCIL
if ((apsect_mask & VK_IMAGE_ASPECT_COLOR_BIT) && (apsect_mask & (VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT))) {
skip |= LogError(cb_node->commandBuffer(), "VUID-VkImageSubresourceLayers-aspectMask-00167",
"In %s, pRegions[%u].%s.aspectMask has VK_IMAGE_ASPECT_COLOR_BIT and either VK_IMAGE_ASPECT_DEPTH_BIT or "
"VK_IMAGE_ASPECT_STENCIL_BIT set.",
func_name, i, member);
}
// aspectMask must not contain VK_IMAGE_ASPECT_MEMORY_PLANE_i_BIT_EXT
if (apsect_mask & (VK_IMAGE_ASPECT_MEMORY_PLANE_0_BIT_EXT | VK_IMAGE_ASPECT_MEMORY_PLANE_1_BIT_EXT |
VK_IMAGE_ASPECT_MEMORY_PLANE_2_BIT_EXT | VK_IMAGE_ASPECT_MEMORY_PLANE_3_BIT_EXT)) {
skip |= LogError(cb_node->commandBuffer(), "VUID-VkImageSubresourceLayers-aspectMask-02247",
"In %s, pRegions[%u].%s.aspectMask has a VK_IMAGE_ASPECT_MEMORY_PLANE_*_BIT_EXT bit set.", func_name, i,
member);
}
return skip;
}
// Helper function to validate usage flags for buffers. For given buffer_state send actual vs. desired usage off to helper above
// where an error will be flagged if usage is not correct
bool CoreChecks::ValidateBufferUsageFlags(BUFFER_STATE const *buffer_state, VkFlags desired, bool strict, const char *msgCode,
char const *func_name, char const *usage_string) const {
return ValidateUsageFlags(buffer_state->createInfo.usage, desired, strict, buffer_state->buffer(),
buffer_state->Handle(), msgCode, func_name, usage_string);
}
bool CoreChecks::ValidateBufferViewRange(const BUFFER_STATE *buffer_state, const VkBufferViewCreateInfo *pCreateInfo,
const VkPhysicalDeviceLimits *device_limits) const {
bool skip = false;
const VkDeviceSize &range = pCreateInfo->range;
if (range != VK_WHOLE_SIZE) {
// Range must be greater than 0
if (range <= 0) {
skip |= LogError(buffer_state->buffer(), "VUID-VkBufferViewCreateInfo-range-00928",
"vkCreateBufferView(): If VkBufferViewCreateInfo range (%" PRIuLEAST64
") does not equal VK_WHOLE_SIZE, range must be greater than 0.",
range);
}
// Range must be a multiple of the element size of format
const uint32_t format_size = FormatElementSize(pCreateInfo->format);
if (SafeModulo(range, format_size) != 0) {
skip |= LogError(buffer_state->buffer(), "VUID-VkBufferViewCreateInfo-range-00929",
"vkCreateBufferView(): If VkBufferViewCreateInfo range (%" PRIuLEAST64
") does not equal VK_WHOLE_SIZE, range must be a multiple of the element size of the format "
"(%" PRIu32 ").",
range, format_size);
}
// Range divided by the element size of format must be less than or equal to VkPhysicalDeviceLimits::maxTexelBufferElements
if (SafeDivision(range, format_size) > device_limits->maxTexelBufferElements) {
skip |= LogError(buffer_state->buffer(), "VUID-VkBufferViewCreateInfo-range-00930",
"vkCreateBufferView(): If VkBufferViewCreateInfo range (%" PRIuLEAST64
") does not equal VK_WHOLE_SIZE, range divided by the element size of the format (%" PRIu32
") must be less than or equal to VkPhysicalDeviceLimits::maxTexelBufferElements (%" PRIuLEAST32 ").",
range, format_size, device_limits->maxTexelBufferElements);
}
// The sum of range and offset must be less than or equal to the size of buffer
if (range + pCreateInfo->offset > buffer_state->createInfo.size) {
skip |= LogError(buffer_state->buffer(), "VUID-VkBufferViewCreateInfo-offset-00931",
"vkCreateBufferView(): If VkBufferViewCreateInfo range (%" PRIuLEAST64
") does not equal VK_WHOLE_SIZE, the sum of offset (%" PRIuLEAST64
") and range must be less than or equal to the size of the buffer (%" PRIuLEAST64 ").",
range, pCreateInfo->offset, buffer_state->createInfo.size);
}
} else {
const uint32_t format_size = FormatElementSize(pCreateInfo->format);
// Size of buffer - offset, divided by the element size of format must be less than or equal to
// VkPhysicalDeviceLimits::maxTexelBufferElements
if (SafeDivision(buffer_state->createInfo.size - pCreateInfo->offset, format_size) >
device_limits->maxTexelBufferElements) {
skip |= LogError(buffer_state->buffer(), "VUID-VkBufferViewCreateInfo-range-04059",
"vkCreateBufferView(): If VkBufferViewCreateInfo range (%" PRIuLEAST64
") equals VK_WHOLE_SIZE, the buffer's size (%" PRIuLEAST64 ") minus the offset (%" PRIuLEAST64
"), divided by the element size of the format (%" PRIu32
") must be less than or equal to VkPhysicalDeviceLimits::maxTexelBufferElements (%" PRIuLEAST32 ").",
range, buffer_state->createInfo.size, pCreateInfo->offset, format_size,
device_limits->maxTexelBufferElements);
}
}
return skip;
}
bool CoreChecks::ValidateBufferViewBuffer(const BUFFER_STATE *buffer_state, const VkBufferViewCreateInfo *pCreateInfo) const {
bool skip = false;
const VkFormatProperties format_properties = GetPDFormatProperties(pCreateInfo->format);
if ((buffer_state->createInfo.usage & VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT) &&
!(format_properties.bufferFeatures & VK_FORMAT_FEATURE_UNIFORM_TEXEL_BUFFER_BIT)) {
skip |= LogError(buffer_state->buffer(), "VUID-VkBufferViewCreateInfo-buffer-00933",
"vkCreateBufferView(): If buffer was created with `usage` containing "
"VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT, format (%s) must "
"be supported for uniform texel buffers",
string_VkFormat(pCreateInfo->format));
}
if ((buffer_state->createInfo.usage & VK_BUFFER_USAGE_STORAGE_TEXEL_BUFFER_BIT) &&
!(format_properties.bufferFeatures & VK_FORMAT_FEATURE_STORAGE_TEXEL_BUFFER_BIT)) {
skip |= LogError(buffer_state->buffer(), "VUID-VkBufferViewCreateInfo-buffer-00934",
"vkCreateBufferView(): If buffer was created with `usage` containing "
"VK_BUFFER_USAGE_STORAGE_TEXEL_BUFFER_BIT, format (%s) must "
"be supported for storage texel buffers",
string_VkFormat(pCreateInfo->format));
}
return skip;
}
bool CoreChecks::PreCallValidateCreateBuffer(VkDevice device, const VkBufferCreateInfo *pCreateInfo,
const VkAllocationCallbacks *pAllocator, VkBuffer *pBuffer) const {
bool skip = false;
// TODO: Add check for "VUID-vkCreateBuffer-flags-00911" (sparse address space accounting)
auto chained_devaddr_struct = LvlFindInChain<VkBufferDeviceAddressCreateInfoEXT>(pCreateInfo->pNext);
if (chained_devaddr_struct) {
if (!(pCreateInfo->flags & VK_BUFFER_CREATE_DEVICE_ADDRESS_CAPTURE_REPLAY_BIT) &&
chained_devaddr_struct->deviceAddress != 0) {
skip |= LogError(device, "VUID-VkBufferCreateInfo-deviceAddress-02604",
"vkCreateBuffer(): Non-zero VkBufferDeviceAddressCreateInfoEXT::deviceAddress "
"requires VK_BUFFER_CREATE_DEVICE_ADDRESS_CAPTURE_REPLAY_BIT.");
}
}
auto chained_opaqueaddr_struct = LvlFindInChain<VkBufferOpaqueCaptureAddressCreateInfo>(pCreateInfo->pNext);
if (chained_opaqueaddr_struct) {
if (!(pCreateInfo->flags & VK_BUFFER_CREATE_DEVICE_ADDRESS_CAPTURE_REPLAY_BIT) &&
chained_opaqueaddr_struct->opaqueCaptureAddress != 0) {
skip |= LogError(device, "VUID-VkBufferCreateInfo-opaqueCaptureAddress-03337",
"vkCreateBuffer(): Non-zero VkBufferOpaqueCaptureAddressCreateInfo::opaqueCaptureAddress"
"requires VK_BUFFER_CREATE_DEVICE_ADDRESS_CAPTURE_REPLAY_BIT.");
}
}
if ((pCreateInfo->flags & VK_BUFFER_CREATE_DEVICE_ADDRESS_CAPTURE_REPLAY_BIT) &&
!enabled_features.core12.bufferDeviceAddressCaptureReplay &&
!enabled_features.buffer_device_address_ext.bufferDeviceAddressCaptureReplay) {
skip |= LogError(
device, "VUID-VkBufferCreateInfo-flags-03338",
"vkCreateBuffer(): the bufferDeviceAddressCaptureReplay device feature is disabled: Buffers cannot be created with "
"the VK_BUFFER_CREATE_DEVICE_ADDRESS_CAPTURE_REPLAY_BIT set.");
}
if (pCreateInfo->sharingMode == VK_SHARING_MODE_CONCURRENT && pCreateInfo->pQueueFamilyIndices) {
skip |= ValidatePhysicalDeviceQueueFamilies(pCreateInfo->queueFamilyIndexCount, pCreateInfo->pQueueFamilyIndices,
"vkCreateBuffer", "pCreateInfo->pQueueFamilyIndices",
"VUID-VkBufferCreateInfo-sharingMode-01419");
}
if ((pCreateInfo->flags & VK_BUFFER_CREATE_PROTECTED_BIT) != 0) {
if (enabled_features.core11.protectedMemory == VK_FALSE) {
skip |= LogError(device, "VUID-VkBufferCreateInfo-flags-01887",
"vkCreateBuffer(): the protectedMemory device feature is disabled: Buffers cannot be created with the "
"VK_BUFFER_CREATE_PROTECTED_BIT set.");
}
const VkBufferCreateFlags invalid_flags =
VK_BUFFER_CREATE_SPARSE_BINDING_BIT | VK_BUFFER_CREATE_SPARSE_RESIDENCY_BIT | VK_BUFFER_CREATE_SPARSE_ALIASED_BIT;
if ((pCreateInfo->flags & invalid_flags) != 0) {
skip |= LogError(device, "VUID-VkBufferCreateInfo-None-01888",
"vkCreateBuffer(): VK_BUFFER_CREATE_PROTECTED_BIT is set so no sparse create flags can be used at "
"same time (VK_BUFFER_CREATE_SPARSE_BINDING_BIT | VK_BUFFER_CREATE_SPARSE_RESIDENCY_BIT | "
"VK_BUFFER_CREATE_SPARSE_ALIASED_BIT).");
}
}
return skip;
}
bool CoreChecks::PreCallValidateCreateBufferView(VkDevice device, const VkBufferViewCreateInfo *pCreateInfo,
const VkAllocationCallbacks *pAllocator, VkBufferView *pView) const {
bool skip = false;
const BUFFER_STATE *buffer_state = GetBufferState(pCreateInfo->buffer);
// If this isn't a sparse buffer, it needs to have memory backing it at CreateBufferView time
if (buffer_state) {
skip |= ValidateMemoryIsBoundToBuffer(buffer_state, "vkCreateBufferView()", "VUID-VkBufferViewCreateInfo-buffer-00935");
// In order to create a valid buffer view, the buffer must have been created with at least one of the following flags:
// UNIFORM_TEXEL_BUFFER_BIT or STORAGE_TEXEL_BUFFER_BIT
skip |= ValidateBufferUsageFlags(buffer_state,
VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT | VK_BUFFER_USAGE_STORAGE_TEXEL_BUFFER_BIT, false,
"VUID-VkBufferViewCreateInfo-buffer-00932", "vkCreateBufferView()",
"VK_BUFFER_USAGE_[STORAGE|UNIFORM]_TEXEL_BUFFER_BIT");
// Buffer view offset must be less than the size of buffer
if (pCreateInfo->offset >= buffer_state->createInfo.size) {
skip |= LogError(buffer_state->buffer(), "VUID-VkBufferViewCreateInfo-offset-00925",
"vkCreateBufferView(): VkBufferViewCreateInfo offset (%" PRIuLEAST64
") must be less than the size of the buffer (%" PRIuLEAST64 ").",
pCreateInfo->offset, buffer_state->createInfo.size);
}
const VkPhysicalDeviceLimits *device_limits = &phys_dev_props.limits;
// Buffer view offset must be a multiple of VkPhysicalDeviceLimits::minTexelBufferOffsetAlignment
if ((pCreateInfo->offset % device_limits->minTexelBufferOffsetAlignment) != 0 &&
!enabled_features.texel_buffer_alignment_features.texelBufferAlignment) {
const char *vuid = device_extensions.vk_ext_texel_buffer_alignment ? "VUID-VkBufferViewCreateInfo-offset-02749"
: "VUID-VkBufferViewCreateInfo-offset-00926";
skip |= LogError(buffer_state->buffer(), vuid,
"vkCreateBufferView(): VkBufferViewCreateInfo offset (%" PRIuLEAST64
") must be a multiple of VkPhysicalDeviceLimits::minTexelBufferOffsetAlignment (%" PRIuLEAST64 ").",
pCreateInfo->offset, device_limits->minTexelBufferOffsetAlignment);
}
if (enabled_features.texel_buffer_alignment_features.texelBufferAlignment) {
VkDeviceSize element_size = FormatElementSize(pCreateInfo->format);
if ((element_size % 3) == 0) {
element_size /= 3;
}
if (buffer_state->createInfo.usage & VK_BUFFER_USAGE_STORAGE_TEXEL_BUFFER_BIT) {
VkDeviceSize alignment_requirement =
phys_dev_ext_props.texel_buffer_alignment_props.storageTexelBufferOffsetAlignmentBytes;
if (phys_dev_ext_props.texel_buffer_alignment_props.storageTexelBufferOffsetSingleTexelAlignment) {
alignment_requirement = std::min(alignment_requirement, element_size);
}
if (SafeModulo(pCreateInfo->offset, alignment_requirement) != 0) {
skip |= LogError(
buffer_state->buffer(), "VUID-VkBufferViewCreateInfo-buffer-02750",
"vkCreateBufferView(): If buffer was created with usage containing "
"VK_BUFFER_USAGE_STORAGE_TEXEL_BUFFER_BIT, "
"VkBufferViewCreateInfo offset (%" PRIuLEAST64
") must be a multiple of the lesser of "
"VkPhysicalDeviceTexelBufferAlignmentPropertiesEXT::storageTexelBufferOffsetAlignmentBytes (%" PRIuLEAST64
") or, if VkPhysicalDeviceTexelBufferAlignmentPropertiesEXT::storageTexelBufferOffsetSingleTexelAlignment "
"(%" PRId32
") is VK_TRUE, the size of a texel of the requested format. "
"If the size of a texel is a multiple of three bytes, then the size of a "
"single component of format is used instead",
pCreateInfo->offset, phys_dev_ext_props.texel_buffer_alignment_props.storageTexelBufferOffsetAlignmentBytes,
phys_dev_ext_props.texel_buffer_alignment_props.storageTexelBufferOffsetSingleTexelAlignment);
}
}
if (buffer_state->createInfo.usage & VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT) {
VkDeviceSize alignment_requirement =
phys_dev_ext_props.texel_buffer_alignment_props.uniformTexelBufferOffsetAlignmentBytes;
if (phys_dev_ext_props.texel_buffer_alignment_props.uniformTexelBufferOffsetSingleTexelAlignment) {
alignment_requirement = std::min(alignment_requirement, element_size);
}
if (SafeModulo(pCreateInfo->offset, alignment_requirement) != 0) {
skip |= LogError(
buffer_state->buffer(), "VUID-VkBufferViewCreateInfo-buffer-02751",
"vkCreateBufferView(): If buffer was created with usage containing "
"VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT, "
"VkBufferViewCreateInfo offset (%" PRIuLEAST64
") must be a multiple of the lesser of "
"VkPhysicalDeviceTexelBufferAlignmentPropertiesEXT::uniformTexelBufferOffsetAlignmentBytes (%" PRIuLEAST64
") or, if VkPhysicalDeviceTexelBufferAlignmentPropertiesEXT::uniformTexelBufferOffsetSingleTexelAlignment "
"(%" PRId32
") is VK_TRUE, the size of a texel of the requested format. "
"If the size of a texel is a multiple of three bytes, then the size of a "
"single component of format is used instead",
pCreateInfo->offset, phys_dev_ext_props.texel_buffer_alignment_props.uniformTexelBufferOffsetAlignmentBytes,
phys_dev_ext_props.texel_buffer_alignment_props.uniformTexelBufferOffsetSingleTexelAlignment);
}
}
}
skip |= ValidateBufferViewRange(buffer_state, pCreateInfo, device_limits);
skip |= ValidateBufferViewBuffer(buffer_state, pCreateInfo);
}
return skip;
}
// For the given format verify that the aspect masks make sense
bool CoreChecks::ValidateImageAspectMask(VkImage image, VkFormat format, VkImageAspectFlags aspect_mask, const char *func_name,
const char *vuid) const {
bool skip = false;
const IMAGE_STATE *image_state = GetImageState(image);
// checks color format and (single-plane or non-disjoint)
// if ycbcr extension is not supported then single-plane and non-disjoint are always both true
if ((FormatIsColor(format)) && ((FormatIsMultiplane(format) == false) || (image_state->disjoint == false))) {
if ((aspect_mask & VK_IMAGE_ASPECT_COLOR_BIT) != VK_IMAGE_ASPECT_COLOR_BIT) {
skip |= LogError(
image, vuid,
"%s: Using format (%s) with aspect flags (%s) but color image formats must have the VK_IMAGE_ASPECT_COLOR_BIT set.",
func_name, string_VkFormat(format), string_VkImageAspectFlags(aspect_mask).c_str());
} else if ((aspect_mask & VK_IMAGE_ASPECT_COLOR_BIT) != aspect_mask) {
skip |= LogError(image, vuid,
"%s: Using format (%s) with aspect flags (%s) but color image formats must have ONLY the "
"VK_IMAGE_ASPECT_COLOR_BIT set.",
func_name, string_VkFormat(format), string_VkImageAspectFlags(aspect_mask).c_str());
}
} else if (FormatIsDepthAndStencil(format)) {
if ((aspect_mask & (VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT)) == 0) {
skip |= LogError(image, vuid,
"%s: Using format (%s) with aspect flags (%s) but depth/stencil image formats must have at least one "
"of VK_IMAGE_ASPECT_DEPTH_BIT and VK_IMAGE_ASPECT_STENCIL_BIT set.",
func_name, string_VkFormat(format), string_VkImageAspectFlags(aspect_mask).c_str());
} else if ((aspect_mask & (VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT)) != aspect_mask) {
skip |= LogError(image, vuid,
"%s: Using format (%s) with aspect flags (%s) but combination depth/stencil image formats can have "
"only the VK_IMAGE_ASPECT_DEPTH_BIT and VK_IMAGE_ASPECT_STENCIL_BIT set.",
func_name, string_VkFormat(format), string_VkImageAspectFlags(aspect_mask).c_str());
}
} else if (FormatIsDepthOnly(format)) {
if ((aspect_mask & VK_IMAGE_ASPECT_DEPTH_BIT) != VK_IMAGE_ASPECT_DEPTH_BIT) {
skip |= LogError(image, vuid,
"%s: Using format (%s) with aspect flags (%s) but depth-only image formats must have the "
"VK_IMAGE_ASPECT_DEPTH_BIT set.",
func_name, string_VkFormat(format), string_VkImageAspectFlags(aspect_mask).c_str());
} else if ((aspect_mask & VK_IMAGE_ASPECT_DEPTH_BIT) != aspect_mask) {
skip |= LogError(image, vuid,
"%s: Using format (%s) with aspect flags (%s) but depth-only image formats can have only the "
"VK_IMAGE_ASPECT_DEPTH_BIT set.",
func_name, string_VkFormat(format), string_VkImageAspectFlags(aspect_mask).c_str());
}
} else if (FormatIsStencilOnly(format)) {
if ((aspect_mask & VK_IMAGE_ASPECT_STENCIL_BIT) != VK_IMAGE_ASPECT_STENCIL_BIT) {
skip |= LogError(image, vuid,
"%s: Using format (%s) with aspect flags (%s) but stencil-only image formats must have the "
"VK_IMAGE_ASPECT_STENCIL_BIT set.",
func_name, string_VkFormat(format), string_VkImageAspectFlags(aspect_mask).c_str());
} else if ((aspect_mask & VK_IMAGE_ASPECT_STENCIL_BIT) != aspect_mask) {
skip |= LogError(image, vuid,
"%s: Using format (%s) with aspect flags (%s) but stencil-only image formats can have only the "
"VK_IMAGE_ASPECT_STENCIL_BIT set.",
func_name, string_VkFormat(format), string_VkImageAspectFlags(aspect_mask).c_str());
}
} else if (FormatIsMultiplane(format)) {
VkImageAspectFlags valid_flags = VK_IMAGE_ASPECT_COLOR_BIT | VK_IMAGE_ASPECT_PLANE_0_BIT | VK_IMAGE_ASPECT_PLANE_1_BIT;
if (3 == FormatPlaneCount(format)) {
valid_flags = valid_flags | VK_IMAGE_ASPECT_PLANE_2_BIT;
}
if ((aspect_mask & valid_flags) != aspect_mask) {
skip |= LogError(image, vuid,
"%s: Using format (%s) with aspect flags (%s) but multi-plane image formats may have only "
"VK_IMAGE_ASPECT_COLOR_BIT or VK_IMAGE_ASPECT_PLANE_n_BITs set, where n = [0, 1, 2].",
func_name, string_VkFormat(format), string_VkImageAspectFlags(aspect_mask).c_str());
}
}
return skip;
}
bool CoreChecks::ValidateImageSubresourceRange(const uint32_t image_mip_count, const uint32_t image_layer_count,
const VkImageSubresourceRange &subresourceRange, const char *cmd_name,
const char *param_name, const char *image_layer_count_var_name, const VkImage image,
SubresourceRangeErrorCodes errorCodes) const {
bool skip = false;
// Validate mip levels
if (subresourceRange.baseMipLevel >= image_mip_count) {
skip |= LogError(image, errorCodes.base_mip_err,
"%s: %s.baseMipLevel (= %" PRIu32
") is greater or equal to the mip level count of the image (i.e. greater or equal to %" PRIu32 ").",
cmd_name, param_name, subresourceRange.baseMipLevel, image_mip_count);
}
if (subresourceRange.levelCount != VK_REMAINING_MIP_LEVELS) {
if (subresourceRange.levelCount == 0) {
skip |=
LogError(image, "VUID-VkImageSubresourceRange-levelCount-01720", "%s: %s.levelCount is 0.", cmd_name, param_name);
} else {
const uint64_t necessary_mip_count = uint64_t{subresourceRange.baseMipLevel} + uint64_t{subresourceRange.levelCount};
if (necessary_mip_count > image_mip_count) {
skip |= LogError(image, errorCodes.mip_count_err,
"%s: %s.baseMipLevel + .levelCount (= %" PRIu32 " + %" PRIu32 " = %" PRIu64
") is greater than the mip level count of the image (i.e. greater than %" PRIu32 ").",
cmd_name, param_name, subresourceRange.baseMipLevel, subresourceRange.levelCount,
necessary_mip_count, image_mip_count);
}
}
}
// Validate array layers
if (subresourceRange.baseArrayLayer >= image_layer_count) {
skip |= LogError(image, errorCodes.base_layer_err,
"%s: %s.baseArrayLayer (= %" PRIu32
") is greater or equal to the %s of the image when it was created (i.e. greater or equal to %" PRIu32 ").",
cmd_name, param_name, subresourceRange.baseArrayLayer, image_layer_count_var_name, image_layer_count);
}
if (subresourceRange.layerCount != VK_REMAINING_ARRAY_LAYERS) {
if (subresourceRange.layerCount == 0) {
skip |=
LogError(image, "VUID-VkImageSubresourceRange-layerCount-01721", "%s: %s.layerCount is 0.", cmd_name, param_name);
} else {
const uint64_t necessary_layer_count =
uint64_t{subresourceRange.baseArrayLayer} + uint64_t{subresourceRange.layerCount};
if (necessary_layer_count > image_layer_count) {
skip |= LogError(image, errorCodes.layer_count_err,
"%s: %s.baseArrayLayer + .layerCount (= %" PRIu32 " + %" PRIu32 " = %" PRIu64
") is greater than the %s of the image when it was created (i.e. greater than %" PRIu32 ").",
cmd_name, param_name, subresourceRange.baseArrayLayer, subresourceRange.layerCount,
necessary_layer_count, image_layer_count_var_name, image_layer_count);
}
}
}
return skip;
}
bool CoreChecks::ValidateCreateImageViewSubresourceRange(const IMAGE_STATE *image_state, bool is_imageview_2d_type,
const VkImageSubresourceRange &subresourceRange) const {
bool is_khr_maintenance1 = IsExtEnabled(device_extensions.vk_khr_maintenance1);
bool is_image_slicable = image_state->createInfo.imageType == VK_IMAGE_TYPE_3D &&
(image_state->createInfo.flags & VK_IMAGE_CREATE_2D_ARRAY_COMPATIBLE_BIT);
bool is_3_d_to_2_d_map = is_khr_maintenance1 && is_image_slicable && is_imageview_2d_type;
uint32_t image_layer_count;
if (is_3_d_to_2_d_map) {
const auto layers = LayersFromRange(subresourceRange);
const auto extent = GetImageSubresourceExtent(image_state, &layers);
image_layer_count = extent.depth;
} else {
image_layer_count = image_state->createInfo.arrayLayers;
}
const auto image_layer_count_var_name = is_3_d_to_2_d_map ? "extent.depth" : "arrayLayers";
SubresourceRangeErrorCodes subresource_range_error_codes = {};
subresource_range_error_codes.base_mip_err = "VUID-VkImageViewCreateInfo-subresourceRange-01478";
subresource_range_error_codes.mip_count_err = "VUID-VkImageViewCreateInfo-subresourceRange-01718";
subresource_range_error_codes.base_layer_err =
is_khr_maintenance1
? (is_3_d_to_2_d_map ? "VUID-VkImageViewCreateInfo-image-02724" : "VUID-VkImageViewCreateInfo-image-01482")
: "VUID-VkImageViewCreateInfo-subresourceRange-01480";
subresource_range_error_codes.layer_count_err = is_khr_maintenance1
? (is_3_d_to_2_d_map ? "VUID-VkImageViewCreateInfo-subresourceRange-02725"
: "VUID-VkImageViewCreateInfo-subresourceRange-01483")
: "VUID-VkImageViewCreateInfo-subresourceRange-01719";
return ValidateImageSubresourceRange(image_state->createInfo.mipLevels, image_layer_count, subresourceRange,
"vkCreateImageView", "pCreateInfo->subresourceRange", image_layer_count_var_name,
image_state->image(), subresource_range_error_codes);
}
bool CoreChecks::ValidateCmdClearColorSubresourceRange(const IMAGE_STATE *image_state,
const VkImageSubresourceRange &subresourceRange,
const char *param_name) const {
SubresourceRangeErrorCodes subresource_range_error_codes = {};
subresource_range_error_codes.base_mip_err = "VUID-vkCmdClearColorImage-baseMipLevel-01470";
subresource_range_error_codes.mip_count_err = "VUID-vkCmdClearColorImage-pRanges-01692";
subresource_range_error_codes.base_layer_err = "VUID-vkCmdClearColorImage-baseArrayLayer-01472";
subresource_range_error_codes.layer_count_err = "VUID-vkCmdClearColorImage-pRanges-01693";
return ValidateImageSubresourceRange(image_state->createInfo.mipLevels, image_state->createInfo.arrayLayers, subresourceRange,
"vkCmdClearColorImage", param_name, "arrayLayers", image_state->image(),
subresource_range_error_codes);
}
bool CoreChecks::ValidateCmdClearDepthSubresourceRange(const IMAGE_STATE *image_state,
const VkImageSubresourceRange &subresourceRange,
const char *param_name) const {
SubresourceRangeErrorCodes subresource_range_error_codes = {};
subresource_range_error_codes.base_mip_err = "VUID-vkCmdClearDepthStencilImage-baseMipLevel-01474";
subresource_range_error_codes.mip_count_err = "VUID-vkCmdClearDepthStencilImage-pRanges-01694";
subresource_range_error_codes.base_layer_err = "VUID-vkCmdClearDepthStencilImage-baseArrayLayer-01476";
subresource_range_error_codes.layer_count_err = "VUID-vkCmdClearDepthStencilImage-pRanges-01695";
return ValidateImageSubresourceRange(image_state->createInfo.mipLevels, image_state->createInfo.arrayLayers, subresourceRange,
"vkCmdClearDepthStencilImage", param_name, "arrayLayers", image_state->image(),
subresource_range_error_codes);
}
bool CoreChecks::ValidateImageBarrierSubresourceRange(const Location &loc, const IMAGE_STATE *image_state,
const VkImageSubresourceRange &subresourceRange) const {
return ValidateImageSubresourceRange(image_state->createInfo.mipLevels, image_state->createInfo.arrayLayers, subresourceRange,
loc.StringFunc().c_str(), loc.StringField().c_str(), "arrayLayers", image_state->image(),
sync_vuid_maps::GetSubResourceVUIDs(loc));
}
namespace barrier_queue_families {
using sync_vuid_maps::GetBarrierQueueVUID;
using sync_vuid_maps::kQueueErrorSummary;
using sync_vuid_maps::QueueError;
class ValidatorState {
public:
ValidatorState(const ValidationStateTracker *device_data, LogObjectList &&obj, const core_error::Location &location,
const VulkanTypedHandle &barrier_handle, const VkSharingMode sharing_mode)
: device_data_(device_data),
objects_(std::move(obj)),
loc_(location),
barrier_handle_(barrier_handle),
sharing_mode_(sharing_mode),
limit_(static_cast<uint32_t>(device_data->physical_device_state->queue_family_properties.size())),
mem_ext_(IsExtEnabled(device_data->device_extensions.vk_khr_external_memory)) {}
// Log the messages using boilerplate from object state, and Vu specific information from the template arg
// One and two family versions, in the single family version, Vu holds the name of the passed parameter
bool LogMsg(QueueError vu_index, uint32_t family, const char *param_name) const {
const std::string val_code = GetBarrierQueueVUID(loc_, vu_index);
const char *annotation = GetFamilyAnnotation(family);
return device_data_->LogError(objects_, val_code, "%s Barrier using %s %s created with sharingMode %s, has %s %u%s. %s",
loc_.Message().c_str(), GetTypeString(),
device_data_->report_data->FormatHandle(barrier_handle_).c_str(), GetModeString(), param_name,
family, annotation, kQueueErrorSummary.at(vu_index).c_str());
}
bool LogMsg(QueueError vu_index, uint32_t src_family, uint32_t dst_family) const {
const std::string val_code = GetBarrierQueueVUID(loc_, vu_index);
const char *src_annotation = GetFamilyAnnotation(src_family);
const char *dst_annotation = GetFamilyAnnotation(dst_family);
return device_data_->LogError(
objects_, val_code,
"%s Barrier using %s %s created with sharingMode %s, has srcQueueFamilyIndex %u%s and dstQueueFamilyIndex %u%s. %s",
loc_.Message().c_str(), GetTypeString(), device_data_->report_data->FormatHandle(barrier_handle_).c_str(),
GetModeString(), src_family, src_annotation, dst_family, dst_annotation, kQueueErrorSummary.at(vu_index).c_str());
}
// This abstract Vu can only be tested at submit time, thus we need a callback from the closure containing the needed
// data. Note that the mem_barrier is copied to the closure as the lambda lifespan exceed the guarantees of validity for
// application input.
static bool ValidateAtQueueSubmit(const QUEUE_STATE *queue_state, const ValidationStateTracker *device_data,
uint32_t src_family, uint32_t dst_family, const ValidatorState &val) {
auto error_code = QueueError::kSubmitQueueMustMatchSrcOrDst;
uint32_t queue_family = queue_state->queueFamilyIndex;
if ((src_family != queue_family) && (dst_family != queue_family)) {
const std::string val_code = GetBarrierQueueVUID(val.loc_, error_code);
const char *src_annotation = val.GetFamilyAnnotation(src_family);
const char *dst_annotation = val.GetFamilyAnnotation(dst_family);
return device_data->LogError(
queue_state->queue, val_code,
"%s Barrier submitted to queue with family index %u, using %s %s created with sharingMode %s, has "
"srcQueueFamilyIndex %u%s and dstQueueFamilyIndex %u%s. %s",
val.loc_.Message().c_str(), queue_family, val.GetTypeString(),
device_data->report_data->FormatHandle(val.barrier_handle_).c_str(), val.GetModeString(), src_family,
src_annotation, dst_family, dst_annotation, kQueueErrorSummary.at(error_code).c_str());
}
return false;
}
// Logical helpers for semantic clarity
inline bool KhrExternalMem() const { return mem_ext_; }
inline bool IsValid(uint32_t queue_family) const { return (queue_family < limit_); }
inline bool IsValidOrSpecial(uint32_t queue_family) const {
return IsValid(queue_family) || (mem_ext_ && QueueFamilyIsExternal(queue_family));
}
// Helpers for LogMsg
const char *GetModeString() const { return string_VkSharingMode(sharing_mode_); }
// Descriptive text for the various types of queue family index
const char *GetFamilyAnnotation(uint32_t family) const {
const char *external = " (VK_QUEUE_FAMILY_EXTERNAL)";
const char *foreign = " (VK_QUEUE_FAMILY_FOREIGN_EXT)";
const char *ignored = " (VK_QUEUE_FAMILY_IGNORED)";
const char *valid = " (VALID)";
const char *invalid = " (INVALID)";
switch (family) {
case VK_QUEUE_FAMILY_EXTERNAL:
return external;
case VK_QUEUE_FAMILY_FOREIGN_EXT:
return foreign;
case VK_QUEUE_FAMILY_IGNORED:
return ignored;
default:
if (IsValid(family)) {
return valid;
}
return invalid;
};
}
const char *GetTypeString() const { return object_string[barrier_handle_.type]; }
VkSharingMode GetSharingMode() const { return sharing_mode_; }
protected:
const ValidationStateTracker *device_data_;
const LogObjectList objects_;
const core_error::Location loc_;
const VulkanTypedHandle barrier_handle_;
const VkSharingMode sharing_mode_;
const uint32_t limit_;
const bool mem_ext_;
};
bool Validate(const CoreChecks *device_data, const CMD_BUFFER_STATE *cb_state, const ValidatorState &val,
const uint32_t src_queue_family, const uint32_t dst_queue_family) {
bool skip = false;
const bool mode_concurrent = val.GetSharingMode() == VK_SHARING_MODE_CONCURRENT;
const bool src_ignored = QueueFamilyIsIgnored(src_queue_family);
const bool dst_ignored = QueueFamilyIsIgnored(dst_queue_family);
if (val.KhrExternalMem()) {
if (mode_concurrent) {
bool sync2 = device_data->enabled_features.synchronization2_features.synchronization2 != 0;
// this requirement is removed by VK_KHR_synchronization2
if (!(src_ignored || dst_ignored) && !sync2) {
skip |= val.LogMsg(QueueError::kSrcOrDstMustBeIgnore, src_queue_family, dst_queue_family);
}
if ((src_ignored && !(dst_ignored || QueueFamilyIsExternal(dst_queue_family))) ||
(dst_ignored && !(src_ignored || QueueFamilyIsExternal(src_queue_family)))) {
skip |= val.LogMsg(QueueError::kSpecialOrIgnoreOnly, src_queue_family, dst_queue_family);
}
} else {
// VK_SHARING_MODE_EXCLUSIVE
if (src_queue_family != dst_queue_family) {
if (!val.IsValidOrSpecial(dst_queue_family)) {
skip |= val.LogMsg(QueueError::kSrcAndDstValidOrSpecial, dst_queue_family, "dstQueueFamilyIndex");
}
if (!val.IsValidOrSpecial(src_queue_family)) {
skip |= val.LogMsg(QueueError::kSrcAndDstValidOrSpecial, src_queue_family, "srcQueueFamilyIndex");
}
}
}
} else {
// No memory extension
if (mode_concurrent) {
bool sync2 = device_data->enabled_features.synchronization2_features.synchronization2 != 0;
// this requirement is removed by VK_KHR_synchronization2
if ((!src_ignored || !dst_ignored) && !sync2) {
skip |= val.LogMsg(QueueError::kSrcAndDestMustBeIgnore, src_queue_family, dst_queue_family);
}
} else {
// VK_SHARING_MODE_EXCLUSIVE
if ((src_queue_family != dst_queue_family) && !(val.IsValid(src_queue_family) && val.IsValid(dst_queue_family))) {
skip |= val.LogMsg(QueueError::kSrcAndDstBothValid, src_queue_family, dst_queue_family);
}
}
}
return skip;
}
} // namespace barrier_queue_families
bool CoreChecks::ValidateConcurrentBarrierAtSubmit(const Location &loc, const ValidationStateTracker *state_data,
const QUEUE_STATE *queue_state, const CMD_BUFFER_STATE *cb_state,
const VulkanTypedHandle &typed_handle, uint32_t src_queue_family,
uint32_t dst_queue_family) {
using barrier_queue_families::ValidatorState;
ValidatorState val(state_data, LogObjectList(cb_state->commandBuffer()), loc, typed_handle, VK_SHARING_MODE_CONCURRENT);
return ValidatorState::ValidateAtQueueSubmit(queue_state, state_data, src_queue_family, dst_queue_family, val);
}
// Type specific wrapper for image barriers
template <typename ImgBarrier>
bool CoreChecks::ValidateBarrierQueueFamilies(const Location &loc, const CMD_BUFFER_STATE *cb_state, const ImgBarrier &barrier,
const IMAGE_STATE *state_data) const {
// State data is required
if (!state_data) {
return false;
}
// Create the validator state from the image state
barrier_queue_families::ValidatorState val(this, LogObjectList(cb_state->commandBuffer()), loc,
state_data->Handle(), state_data->createInfo.sharingMode);
const uint32_t src_queue_family = barrier.srcQueueFamilyIndex;
const uint32_t dst_queue_family = barrier.dstQueueFamilyIndex;
return barrier_queue_families::Validate(this, cb_state, val, src_queue_family, dst_queue_family);
}
// Type specific wrapper for buffer barriers
template <typename BufBarrier>
bool CoreChecks::ValidateBarrierQueueFamilies(const Location &loc, const CMD_BUFFER_STATE *cb_state, const BufBarrier &barrier,
const BUFFER_STATE *state_data) const {
// State data is required
if (!state_data) {
return false;
}
// Create the validator state from the buffer state
barrier_queue_families::ValidatorState val(this, LogObjectList(cb_state->commandBuffer()), loc,
state_data->Handle(), state_data->createInfo.sharingMode);
const uint32_t src_queue_family = barrier.srcQueueFamilyIndex;
const uint32_t dst_queue_family = barrier.dstQueueFamilyIndex;
return barrier_queue_families::Validate(this, cb_state, val, src_queue_family, dst_queue_family);
}
template <typename Barrier>
bool CoreChecks::ValidateBufferBarrier(const LogObjectList &objects, const Location &loc, const CMD_BUFFER_STATE *cb_state,
const Barrier &mem_barrier) const {
using sync_vuid_maps::BufferError;
using sync_vuid_maps::GetBufferBarrierVUID;
bool skip = false;
skip |= ValidateQFOTransferBarrierUniqueness(loc, cb_state, mem_barrier, cb_state->qfo_transfer_buffer_barriers);
// Validate buffer barrier queue family indices
auto buffer_state = GetBufferState(mem_barrier.buffer);
if (buffer_state) {
auto buf_loc = loc.dot(Field::buffer);
const auto &mem_vuid = GetBufferBarrierVUID(buf_loc, BufferError::kNoMemory);
skip |= ValidateMemoryIsBoundToBuffer(buffer_state, loc.StringFunc().c_str(), mem_vuid.c_str());
skip |= ValidateBarrierQueueFamilies(buf_loc, cb_state, mem_barrier, buffer_state);
auto buffer_size = buffer_state->createInfo.size;
if (mem_barrier.offset >= buffer_size) {
auto offset_loc = loc.dot(Field::offset);
const auto &vuid = GetBufferBarrierVUID(offset_loc, BufferError::kOffsetTooBig);
skip |= LogError(objects, vuid, "%s %s has offset 0x%" PRIx64 " which is not less than total size 0x%" PRIx64 ".",
offset_loc.Message().c_str(), report_data->FormatHandle(mem_barrier.buffer).c_str(),
HandleToUint64(mem_barrier.offset), HandleToUint64(buffer_size));
} else if (mem_barrier.size != VK_WHOLE_SIZE && (mem_barrier.offset + mem_barrier.size > buffer_size)) {
auto size_loc = loc.dot(Field::size);
const auto &vuid = GetBufferBarrierVUID(size_loc, BufferError::kSizeOutOfRange);
skip |= LogError(objects, vuid,
"%s %s has offset 0x%" PRIx64 " and size 0x%" PRIx64 " whose sum is greater than total size 0x%" PRIx64
".",
size_loc.Message().c_str(), report_data->FormatHandle(mem_barrier.buffer).c_str(),
HandleToUint64(mem_barrier.offset), HandleToUint64(mem_barrier.size), HandleToUint64(buffer_size));
}
if (mem_barrier.size == 0) {
auto size_loc = loc.dot(Field::size);
const auto &vuid = GetBufferBarrierVUID(size_loc, BufferError::kSizeZero);
skip |= LogError(objects, vuid, "%s %s has a size of 0.", loc.Message().c_str(),
report_data->FormatHandle(mem_barrier.buffer).c_str());
}
}
return skip;
}
template <typename Barrier>
bool CoreChecks::ValidateImageBarrier(const LogObjectList &objects, const Location &loc, const CMD_BUFFER_STATE *cb_state,
const Barrier &mem_barrier) const {
bool skip = false;
skip |= ValidateQFOTransferBarrierUniqueness(loc, cb_state, mem_barrier, cb_state->qfo_transfer_image_barriers);
bool is_ilt = true;
if (enabled_features.synchronization2_features.synchronization2) {
is_ilt = mem_barrier.oldLayout != mem_barrier.newLayout;
}
if (is_ilt) {
if (mem_barrier.newLayout == VK_IMAGE_LAYOUT_UNDEFINED || mem_barrier.newLayout == VK_IMAGE_LAYOUT_PREINITIALIZED) {
auto layout_loc = loc.dot(Field::newLayout);
const auto &vuid = sync_vuid_maps::GetImageBarrierVUID(loc, sync_vuid_maps::ImageError::kBadLayout);
skip |=
LogError(cb_state->commandBuffer(), vuid, "%s Image Layout cannot be transitioned to UNDEFINED or PREINITIALIZED.",
layout_loc.Message().c_str());
}
}
auto image_data = GetImageState(mem_barrier.image);
if (image_data) {
auto image_loc = loc.dot(Field::image);
skip |= ValidateMemoryIsBoundToImage(image_data, loc);
skip |= ValidateBarrierQueueFamilies(image_loc, cb_state, mem_barrier, image_data);
skip |= ValidateImageAspectMask(image_data->image(), image_data->createInfo.format, mem_barrier.subresourceRange.aspectMask,
loc.StringFunc().c_str());
skip |= ValidateImageBarrierSubresourceRange(loc.dot(Field::subresourceRange), image_data, mem_barrier.subresourceRange);
}
return skip;
}
bool CoreChecks::ValidateBarriers(const Location &outer_loc, const CMD_BUFFER_STATE *cb_state, VkPipelineStageFlags src_stage_mask,
VkPipelineStageFlags dst_stage_mask, uint32_t memBarrierCount,
const VkMemoryBarrier *pMemBarriers, uint32_t bufferBarrierCount,
const VkBufferMemoryBarrier *pBufferMemBarriers, uint32_t imageMemBarrierCount,
const VkImageMemoryBarrier *pImageMemBarriers) const {
bool skip = false;
auto queue_flags = GetQueueFlags(*cb_state);
LogObjectList objects(cb_state->commandBuffer());
auto op_type =
ComputeBarrierOperationsType(cb_state, bufferBarrierCount, pBufferMemBarriers, imageMemBarrierCount, pImageMemBarriers);
for (uint32_t i = 0; i < memBarrierCount; ++i) {
const auto &mem_barrier = pMemBarriers[i];
auto loc = outer_loc.dot(Struct::VkMemoryBarrier, Field::pMemoryBarriers, i);
skip |= ValidateMemoryBarrier(objects, loc, op_type, queue_flags, mem_barrier, src_stage_mask, dst_stage_mask);
}
for (uint32_t i = 0; i < imageMemBarrierCount; ++i) {
const auto &mem_barrier = pImageMemBarriers[i];
auto loc = outer_loc.dot(Struct::VkImageMemoryBarrier, Field::pImageMemoryBarriers, i);
skip |= ValidateMemoryBarrier(objects, loc, op_type, queue_flags, mem_barrier, src_stage_mask, dst_stage_mask);
skip |= ValidateImageBarrier(objects, loc, cb_state, mem_barrier);
}
{
Location loc(outer_loc.function, Struct::VkImageMemoryBarrier);
skip |= ValidateBarriersToImages(loc, cb_state, imageMemBarrierCount, pImageMemBarriers);
}
for (uint32_t i = 0; i < bufferBarrierCount; ++i) {
const auto &mem_barrier = pBufferMemBarriers[i];
auto loc = outer_loc.dot(Struct::VkBufferMemoryBarrier, Field::pMemoryBarriers, i);
skip |= ValidateMemoryBarrier(objects, loc, op_type, queue_flags, mem_barrier, src_stage_mask, dst_stage_mask);
skip |= ValidateBufferBarrier(objects, loc, cb_state, mem_barrier);
}
return skip;
}
bool CoreChecks::ValidateDependencyInfo(const LogObjectList &objects, const Location &outer_loc, const CMD_BUFFER_STATE *cb_state,
BarrierOperationsType op_type, const VkDependencyInfoKHR *dep_info) const {
bool skip = false;
if (cb_state->activeRenderPass) {
skip |= ValidateRenderPassPipelineBarriers(outer_loc, cb_state, dep_info);
if (skip) return true; // Early return to avoid redundant errors from below calls
}
auto queue_flags = GetQueueFlags(*cb_state);
for (uint32_t i = 0; i < dep_info->memoryBarrierCount; ++i) {
const auto &mem_barrier = dep_info->pMemoryBarriers[i];
auto loc = outer_loc.dot(Struct::VkMemoryBarrier2KHR, Field::pMemoryBarriers, i);
skip |= ValidateMemoryBarrier(objects, loc, op_type, queue_flags, mem_barrier);
}
for (uint32_t i = 0; i < dep_info->imageMemoryBarrierCount; ++i) {
const auto &mem_barrier = dep_info->pImageMemoryBarriers[i];
auto loc = outer_loc.dot(Struct::VkImageMemoryBarrier2KHR, Field::pImageMemoryBarriers, i);
skip |= ValidateMemoryBarrier(objects, loc, op_type, queue_flags, mem_barrier);
skip |= ValidateImageBarrier(objects, loc, cb_state, mem_barrier);
}
{
Location loc(outer_loc.function, Struct::VkImageMemoryBarrier2KHR);
skip |= ValidateBarriersToImages(loc, cb_state, dep_info->imageMemoryBarrierCount, dep_info->pImageMemoryBarriers);
}
for (uint32_t i = 0; i < dep_info->bufferMemoryBarrierCount; ++i) {
const auto &mem_barrier = dep_info->pBufferMemoryBarriers[i];
auto loc = outer_loc.dot(Struct::VkBufferMemoryBarrier2KHR, Field::pBufferMemoryBarriers, i);
skip |= ValidateMemoryBarrier(objects, loc, op_type, queue_flags, mem_barrier);
skip |= ValidateBufferBarrier(objects, loc, cb_state, mem_barrier);
}
return skip;
}
template <typename Barrier>
bool CoreChecks::ValidateMemoryBarrier(const LogObjectList &objects, const Location &loc, BarrierOperationsType op_type,
VkQueueFlags queue_flags, const Barrier &barrier, VkPipelineStageFlags src_stage_mask,
VkPipelineStageFlags dst_stage_mask) const {
bool skip = false;
if (op_type == kAllRelease || op_type == kGeneral) {
skip |= ValidateAccessMask(objects, loc.dot(Field::srcAccessMask), queue_flags, barrier.srcAccessMask, src_stage_mask);
}
if (op_type == kAllAcquire || op_type == kGeneral) {
skip |= ValidateAccessMask(objects, loc.dot(Field::dstAccessMask), queue_flags, barrier.dstAccessMask, dst_stage_mask);
}
return skip;
}
template <typename Barrier>
bool CoreChecks::ValidateMemoryBarrier(const LogObjectList &objects, const Location &loc, BarrierOperationsType op_type,
VkQueueFlags queue_flags, const Barrier &barrier) const {
bool skip = false;
if (op_type == kAllRelease || op_type == kGeneral) {
skip |= ValidatePipelineStage(objects, loc.dot(Field::srcStageMask), queue_flags, barrier.srcStageMask);
skip |=
ValidateAccessMask(objects, loc.dot(Field::srcAccessMask), queue_flags, barrier.srcAccessMask, barrier.srcStageMask);
}
if (op_type == kAllAcquire || op_type == kGeneral) {
skip |= ValidatePipelineStage(objects, loc.dot(Field::dstStageMask), queue_flags, barrier.dstStageMask);
skip |=
ValidateAccessMask(objects, loc.dot(Field::dstAccessMask), queue_flags, barrier.dstAccessMask, barrier.dstStageMask);
}
return skip;
}
bool CoreChecks::ValidateSubpassBarrier(const LogObjectList &objects, const Location &loc, VkQueueFlags queue_flags,
const VkSubpassDependency2 &barrier) const {
bool skip = false;
const auto *mem_barrier = LvlFindInChain<VkMemoryBarrier2KHR>(barrier.pNext);
if (mem_barrier) {
if (enabled_features.synchronization2_features.synchronization2) {
if (barrier.srcAccessMask != 0) {
skip |= LogError(objects, "UNASSIGNED-CoreChecks-VkSubpassDependency2-srcAccessMask",
"%s is non-zero when a VkMemoryBarrier2KHR is present in pNext.",
loc.dot(Field::srcAccessMask).Message().c_str());
}
if (barrier.dstAccessMask != 0) {
skip |= LogError(objects, "UNASSIGNED-CoreChecks-VkSubpassDependency2-dstAccessMask",
"%s dstAccessMask is non-zero when a VkMemoryBarrier2KHR is present in pNext.",
loc.dot(Field::dstAccessMask).Message().c_str());
}
if (barrier.srcStageMask != 0 || barrier.dstStageMask != 0) {
skip |= LogError(objects, "UNASSIGNED-CoreChecks-VkSubpassDependency2-srcStageMask",
"%s srcStageMask is non-zero when a VkMemoryBarrier2KHR is present in pNext.",
loc.dot(Field::srcStageMask).Message().c_str());
}
if (barrier.dstStageMask != 0) {
skip |= LogError(objects, "UNASSIGNED-CoreChecks-VkSubpassDependency2-dstStageMask",
"%s dstStageMask is non-zero when a VkMemoryBarrier2KHR is present in pNext.",
loc.dot(Field::dstStageMask).Message().c_str());
}
skip |= CoreChecks::ValidateMemoryBarrier(objects, loc.dot(Field::pNext), kGeneral, queue_flags, *mem_barrier);
return skip;
} else {
skip |= LogError(objects, "UNASSIGNED-CoreChecks-VkSubpassDependency2-pNext",
"%s a VkMemoryBarrier2KHR is present in pNext but synchronization2 is not enabled.",
loc.Message().c_str());
}
}
skip |= CoreChecks::ValidateMemoryBarrier(objects, loc, kGeneral, queue_flags, barrier);
return skip;
}
bool CoreChecks::ValidateImageViewFormatFeatures(const IMAGE_STATE *image_state, const VkFormat view_format,
const VkImageUsageFlags image_usage) const {
// Pass in image_usage here instead of extracting it from image_state in case there's a chained VkImageViewUsageCreateInfo
bool skip = false;
VkFormatFeatureFlags tiling_features = VK_FORMAT_FEATURE_FLAG_BITS_MAX_ENUM;
const VkImageTiling image_tiling = image_state->createInfo.tiling;
if (image_state->has_ahb_format == true) {
// AHB image view and image share same feature sets
tiling_features = image_state->format_features;
} else if (image_tiling == VK_IMAGE_TILING_DRM_FORMAT_MODIFIER_EXT) {
// Parameter validation should catch if this is used without VK_EXT_image_drm_format_modifier
assert(device_extensions.vk_ext_image_drm_format_modifier);
VkImageDrmFormatModifierPropertiesEXT drm_format_properties = {VK_STRUCTURE_TYPE_IMAGE_DRM_FORMAT_MODIFIER_PROPERTIES_EXT,
nullptr};
DispatchGetImageDrmFormatModifierPropertiesEXT(device, image_state->image(), &drm_format_properties);
VkFormatProperties2 format_properties_2 = {VK_STRUCTURE_TYPE_FORMAT_PROPERTIES_2, nullptr};
VkDrmFormatModifierPropertiesListEXT drm_properties_list = {VK_STRUCTURE_TYPE_DRM_FORMAT_MODIFIER_PROPERTIES_LIST_EXT,
nullptr};
format_properties_2.pNext = (void *)&drm_properties_list;
DispatchGetPhysicalDeviceFormatProperties2(physical_device, view_format, &format_properties_2);
std::vector<VkDrmFormatModifierPropertiesEXT> drm_properties;
drm_properties.resize(drm_properties_list.drmFormatModifierCount);
drm_properties_list.pDrmFormatModifierProperties = drm_properties.data();
DispatchGetPhysicalDeviceFormatProperties2(physical_device, view_format, &format_properties_2);
for (uint32_t i = 0; i < drm_properties_list.drmFormatModifierCount; i++) {
if ((drm_properties_list.pDrmFormatModifierProperties[i].drmFormatModifier & drm_format_properties.drmFormatModifier) !=
0) {
tiling_features |= drm_properties_list.pDrmFormatModifierProperties[i].drmFormatModifierTilingFeatures;
}
}
} else {
VkFormatProperties format_properties = GetPDFormatProperties(view_format);
tiling_features = (image_tiling == VK_IMAGE_TILING_LINEAR) ? format_properties.linearTilingFeatures
: format_properties.optimalTilingFeatures;
}
if (tiling_features == 0) {
skip |= LogError(image_state->image(), "VUID-VkImageViewCreateInfo-None-02273",
"vkCreateImageView(): pCreateInfo->format %s with tiling %s has no supported format features on this "
"physical device.",
string_VkFormat(view_format), string_VkImageTiling(image_tiling));
} else if ((image_usage & VK_IMAGE_USAGE_SAMPLED_BIT) && !(tiling_features & VK_FORMAT_FEATURE_SAMPLED_IMAGE_BIT)) {
skip |= LogError(image_state->image(), "VUID-VkImageViewCreateInfo-usage-02274",
"vkCreateImageView(): pCreateInfo->format %s with tiling %s does not support usage that includes "
"VK_IMAGE_USAGE_SAMPLED_BIT.",
string_VkFormat(view_format), string_VkImageTiling(image_tiling));
} else if ((image_usage & VK_IMAGE_USAGE_STORAGE_BIT) && !(tiling_features & VK_FORMAT_FEATURE_STORAGE_IMAGE_BIT)) {
skip |= LogError(image_state->image(), "VUID-VkImageViewCreateInfo-usage-02275",
"vkCreateImageView(): pCreateInfo->format %s with tiling %s does not support usage that includes "
"VK_IMAGE_USAGE_STORAGE_BIT.",
string_VkFormat(view_format), string_VkImageTiling(image_tiling));
} else if ((image_usage & VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT) && !(tiling_features & VK_FORMAT_FEATURE_COLOR_ATTACHMENT_BIT)) {
skip |= LogError(image_state->image(), "VUID-VkImageViewCreateInfo-usage-02276",
"vkCreateImageView(): pCreateInfo->format %s with tiling %s does not support usage that includes "
"VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT.",
string_VkFormat(view_format), string_VkImageTiling(image_tiling));
} else if ((image_usage & VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT) &&
!(tiling_features & VK_FORMAT_FEATURE_DEPTH_STENCIL_ATTACHMENT_BIT)) {
skip |= LogError(image_state->image(), "VUID-VkImageViewCreateInfo-usage-02277",
"vkCreateImageView(): pCreateInfo->format %s with tiling %s does not support usage that includes "
"VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT.",
string_VkFormat(view_format), string_VkImageTiling(image_tiling));
} else if ((image_usage & VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT) &&
!(tiling_features & (VK_FORMAT_FEATURE_COLOR_ATTACHMENT_BIT | VK_FORMAT_FEATURE_DEPTH_STENCIL_ATTACHMENT_BIT))) {
skip |= LogError(image_state->image(), "VUID-VkImageViewCreateInfo-usage-02652",
"vkCreateImageView(): pCreateInfo->format %s with tiling %s does not support usage that includes "
"VK_FORMAT_FEATURE_COLOR_ATTACHMENT_BIT or VK_FORMAT_FEATURE_DEPTH_STENCIL_ATTACHMENT_BIT.",
string_VkFormat(view_format), string_VkImageTiling(image_tiling));
} else if ((image_usage & VK_IMAGE_USAGE_FRAGMENT_SHADING_RATE_ATTACHMENT_BIT_KHR) &&
!(tiling_features & VK_FORMAT_FEATURE_FRAGMENT_SHADING_RATE_ATTACHMENT_BIT_KHR)) {
if (enabled_features.fragment_shading_rate_features.attachmentFragmentShadingRate) {
skip |= LogError(image_state->image(), "VUID-VkImageViewCreateInfo-usage-04550",
"vkCreateImageView(): pCreateInfo->format %s with tiling %s does not support usage that includes "
"VK_FORMAT_FEATURE_FRAGMENT_SHADING_RATE_ATTACHMENT_BIT_KHR.",
string_VkFormat(view_format), string_VkImageTiling(image_tiling));
}
}
return skip;
}
bool CoreChecks::PreCallValidateCreateImageView(VkDevice device, const VkImageViewCreateInfo *pCreateInfo,
const VkAllocationCallbacks *pAllocator, VkImageView *pView) const {
bool skip = false;
const IMAGE_STATE *image_state = GetImageState(pCreateInfo->image);
if (image_state) {
skip |=
ValidateImageUsageFlags(image_state,
VK_IMAGE_USAGE_SAMPLED_BIT | VK_IMAGE_USAGE_STORAGE_BIT | VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT |
VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT | VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT |
VK_IMAGE_USAGE_TRANSIENT_ATTACHMENT_BIT | VK_IMAGE_USAGE_SHADING_RATE_IMAGE_BIT_NV |
VK_IMAGE_USAGE_FRAGMENT_DENSITY_MAP_BIT_EXT,
false, "VUID-VkImageViewCreateInfo-image-04441", "vkCreateImageView()",
"VK_IMAGE_USAGE_[SAMPLED|STORAGE|COLOR_ATTACHMENT|DEPTH_STENCIL_ATTACHMENT|INPUT_ATTACHMENT|"
"TRANSIENT_ATTACHMENT|SHADING_RATE_IMAGE|FRAGMENT_DENSITY_MAP]_BIT");
// If this isn't a sparse image, it needs to have memory backing it at CreateImageView time
skip |= ValidateMemoryIsBoundToImage(image_state, "vkCreateImageView()", "VUID-VkImageViewCreateInfo-image-01020");
// Checks imported from image layer
skip |= ValidateCreateImageViewSubresourceRange(
image_state, pCreateInfo->viewType == VK_IMAGE_VIEW_TYPE_2D || pCreateInfo->viewType == VK_IMAGE_VIEW_TYPE_2D_ARRAY,
pCreateInfo->subresourceRange);
VkImageCreateFlags image_flags = image_state->createInfo.flags;
VkFormat image_format = image_state->createInfo.format;
VkImageUsageFlags image_usage = image_state->createInfo.usage;
VkFormat view_format = pCreateInfo->format;
VkImageAspectFlags aspect_mask = pCreateInfo->subresourceRange.aspectMask;
VkImageType image_type = image_state->createInfo.imageType;
VkImageViewType view_type = pCreateInfo->viewType;
// If there's a chained VkImageViewUsageCreateInfo struct, modify image_usage to match
auto chained_ivuci_struct = LvlFindInChain<VkImageViewUsageCreateInfo>(pCreateInfo->pNext);
if (chained_ivuci_struct) {
if (device_extensions.vk_khr_maintenance2) {
if (!device_extensions.vk_ext_separate_stencil_usage) {
if ((image_usage | chained_ivuci_struct->usage) != image_usage) {
skip |= LogError(pCreateInfo->image, "VUID-VkImageViewCreateInfo-pNext-02661",
"vkCreateImageView(): pNext chain includes VkImageViewUsageCreateInfo, usage must not "
"include any bits that were not set in VkImageCreateInfo::usage used to create image");
}
} else {
const auto image_stencil_struct = LvlFindInChain<VkImageStencilUsageCreateInfo>(image_state->createInfo.pNext);
if (image_stencil_struct == nullptr) {
if ((image_usage | chained_ivuci_struct->usage) != image_usage) {
skip |= LogError(
pCreateInfo->image, "VUID-VkImageViewCreateInfo-pNext-02662",
"vkCreateImageView(): pNext chain includes VkImageViewUsageCreateInfo and image was not created "
"with a VkImageStencilUsageCreateInfo in pNext of vkImageCreateInfo, usage must not include "
"any bits that were not set in VkImageCreateInfo::usage used to create image");
}
} else {
if ((aspect_mask & VK_IMAGE_ASPECT_STENCIL_BIT) == VK_IMAGE_ASPECT_STENCIL_BIT &&
(image_stencil_struct->stencilUsage | chained_ivuci_struct->usage) !=
image_stencil_struct->stencilUsage) {
skip |= LogError(
pCreateInfo->image, "VUID-VkImageViewCreateInfo-pNext-02663",
"vkCreateImageView(): pNext chain includes VkImageViewUsageCreateInfo, image was created with a "
"VkImageStencilUsageCreateInfo in pNext of vkImageCreateInfo, and subResourceRange.aspectMask "
"includes VK_IMAGE_ASPECT_STENCIL_BIT, VkImageViewUsageCreateInfo::usage must not include any "
"bits that were not set in VkImageStencilUsageCreateInfo::stencilUsage used to create image");
}
if ((aspect_mask & ~VK_IMAGE_ASPECT_STENCIL_BIT) != 0 &&
(image_usage | chained_ivuci_struct->usage) != image_usage) {
skip |= LogError(
pCreateInfo->image, "VUID-VkImageViewCreateInfo-pNext-02664",
"vkCreateImageView(): pNext chain includes VkImageViewUsageCreateInfo, image was created with a "
"VkImageStencilUsageCreateInfo in pNext of vkImageCreateInfo, and subResourceRange.aspectMask "
"includes bits other than VK_IMAGE_ASPECT_STENCIL_BIT, VkImageViewUsageCreateInfo::usage must not "
"include any bits that were not set in VkImageCreateInfo::usage used to create image");
}
}
}
}
image_usage = chained_ivuci_struct->usage;
}
// If image used VkImageFormatListCreateInfo need to make sure a format from list is used
const auto format_list_info = LvlFindInChain<VkImageFormatListCreateInfo>(image_state->createInfo.pNext);
if (format_list_info && (format_list_info->viewFormatCount > 0)) {
bool foundFormat = false;
for (uint32_t i = 0; i < format_list_info->viewFormatCount; i++) {
if (format_list_info->pViewFormats[i] == view_format) {
foundFormat = true;
break;
}
}
if (foundFormat == false) {
skip |= LogError(pCreateInfo->image, "VUID-VkImageViewCreateInfo-pNext-01585",
"vkCreateImageView(): image was created with a VkImageFormatListCreateInfo in pNext of "
"vkImageCreateInfo, but none of the formats match the VkImageViewCreateInfo::format (%s).",
string_VkFormat(view_format));
}
}
// Validate VK_IMAGE_CREATE_MUTABLE_FORMAT_BIT state, if view/image formats differ
if ((image_flags & VK_IMAGE_CREATE_MUTABLE_FORMAT_BIT) && (image_format != view_format)) {
if (FormatIsMultiplane(image_format)) {
VkFormat compat_format = FindMultiplaneCompatibleFormat(image_format, aspect_mask);
if (view_format != compat_format) {
// View format must match the multiplane compatible format
std::stringstream ss;
ss << "vkCreateImageView(): ImageView format " << string_VkFormat(view_format)
<< " is not compatible with plane " << GetPlaneIndex(aspect_mask) << " of underlying image format "
<< string_VkFormat(image_format) << ", must be " << string_VkFormat(compat_format) << ".";
skip |= LogError(pCreateInfo->image, "VUID-VkImageViewCreateInfo-image-01586", "%s", ss.str().c_str());
}
} else {
if (!(image_flags & VK_IMAGE_CREATE_BLOCK_TEXEL_VIEW_COMPATIBLE_BIT)) {
// Format MUST be compatible (in the same format compatibility class) as the format the image was created with
const VkFormatCompatibilityClass imageClass = FormatCompatibilityClass(image_format);
const VkFormatCompatibilityClass viewClass = FormatCompatibilityClass(view_format);
// Need to only check if one is NONE_BIT to handle edge case both are NONE_BIT
if ((imageClass != viewClass) || (imageClass == VK_FORMAT_COMPATIBILITY_CLASS_NONE_BIT)) {
const char *error_vuid;
if ((!device_extensions.vk_khr_maintenance2) && (!device_extensions.vk_khr_sampler_ycbcr_conversion)) {
error_vuid = "VUID-VkImageViewCreateInfo-image-01018";
} else if ((device_extensions.vk_khr_maintenance2) &&
(!device_extensions.vk_khr_sampler_ycbcr_conversion)) {
error_vuid = "VUID-VkImageViewCreateInfo-image-01759";
} else if ((!device_extensions.vk_khr_maintenance2) &&
(device_extensions.vk_khr_sampler_ycbcr_conversion)) {
error_vuid = "VUID-VkImageViewCreateInfo-image-01760";
} else {
// both enabled
error_vuid = "VUID-VkImageViewCreateInfo-image-01761";
}
std::stringstream ss;
ss << "vkCreateImageView(): ImageView format " << string_VkFormat(view_format)
<< " is not in the same format compatibility class as "
<< report_data->FormatHandle(pCreateInfo->image).c_str() << " format " << string_VkFormat(image_format)
<< ". Images created with the VK_IMAGE_CREATE_MUTABLE_FORMAT BIT "
<< "can support ImageViews with differing formats but they must be in the same compatibility class.";
skip |= LogError(pCreateInfo->image, error_vuid, "%s", ss.str().c_str());
}
}
}
} else {
// Format MUST be IDENTICAL to the format the image was created with
// Unless it is a multi-planar color bit aspect
if ((image_format != view_format) &&
((FormatIsMultiplane(image_format) == false) || (aspect_mask != VK_IMAGE_ASPECT_COLOR_BIT))) {
const char *vuid = (device_extensions.vk_khr_sampler_ycbcr_conversion) ? "VUID-VkImageViewCreateInfo-image-01762"
: "VUID-VkImageViewCreateInfo-image-01019";
std::stringstream ss;
ss << "vkCreateImageView() format " << string_VkFormat(view_format) << " differs from "
<< report_data->FormatHandle(pCreateInfo->image).c_str() << " format " << string_VkFormat(image_format)
<< ". Formats MUST be IDENTICAL unless VK_IMAGE_CREATE_MUTABLE_FORMAT BIT was set on image creation.";
skip |= LogError(pCreateInfo->image, vuid, "%s", ss.str().c_str());
}
}
// Validate correct image aspect bits for desired formats and format consistency
skip |= ValidateImageAspectMask(image_state->image(), image_format, aspect_mask, "vkCreateImageView()");
switch (image_type) {
case VK_IMAGE_TYPE_1D:
if (view_type != VK_IMAGE_VIEW_TYPE_1D && view_type != VK_IMAGE_VIEW_TYPE_1D_ARRAY) {
skip |= LogError(pCreateInfo->image, "VUID-VkImageViewCreateInfo-subResourceRange-01021",
"vkCreateImageView(): pCreateInfo->viewType %s is not compatible with image type %s.",
string_VkImageViewType(view_type), string_VkImageType(image_type));
}
break;
case VK_IMAGE_TYPE_2D:
if (view_type != VK_IMAGE_VIEW_TYPE_2D && view_type != VK_IMAGE_VIEW_TYPE_2D_ARRAY) {
if ((view_type == VK_IMAGE_VIEW_TYPE_CUBE || view_type == VK_IMAGE_VIEW_TYPE_CUBE_ARRAY) &&
!(image_flags & VK_IMAGE_CREATE_CUBE_COMPATIBLE_BIT)) {
skip |= LogError(pCreateInfo->image, "VUID-VkImageViewCreateInfo-image-01003",
"vkCreateImageView(): pCreateInfo->viewType %s is not compatible with image type %s.",
string_VkImageViewType(view_type), string_VkImageType(image_type));
} else if (view_type != VK_IMAGE_VIEW_TYPE_CUBE && view_type != VK_IMAGE_VIEW_TYPE_CUBE_ARRAY) {
skip |= LogError(pCreateInfo->image, "VUID-VkImageViewCreateInfo-subResourceRange-01021",
"vkCreateImageView(): pCreateInfo->viewType %s is not compatible with image type %s.",
string_VkImageViewType(view_type), string_VkImageType(image_type));
}
}
break;
case VK_IMAGE_TYPE_3D:
if (device_extensions.vk_khr_maintenance1) {
if (view_type != VK_IMAGE_VIEW_TYPE_3D) {
if ((view_type == VK_IMAGE_VIEW_TYPE_2D || view_type == VK_IMAGE_VIEW_TYPE_2D_ARRAY)) {
if (!(image_flags & VK_IMAGE_CREATE_2D_ARRAY_COMPATIBLE_BIT)) {
skip |=
LogError(pCreateInfo->image, "VUID-VkImageViewCreateInfo-image-01005",
"vkCreateImageView(): pCreateInfo->viewType %s is not compatible with image type %s.",
string_VkImageViewType(view_type), string_VkImageType(image_type));
} else if ((image_flags & (VK_IMAGE_CREATE_SPARSE_BINDING_BIT | VK_IMAGE_CREATE_SPARSE_RESIDENCY_BIT |
VK_IMAGE_CREATE_SPARSE_ALIASED_BIT))) {
skip |= LogError(
pCreateInfo->image, "VUID-VkImageViewCreateInfo-subResourceRange-01021",
"vkCreateImageView(): pCreateInfo->viewType %s is not compatible with image type %s "
"when the VK_IMAGE_CREATE_SPARSE_BINDING_BIT, VK_IMAGE_CREATE_SPARSE_RESIDENCY_BIT, or "
"VK_IMAGE_CREATE_SPARSE_ALIASED_BIT flags are enabled.",
string_VkImageViewType(view_type), string_VkImageType(image_type));
}
} else {
skip |= LogError(pCreateInfo->image, "VUID-VkImageViewCreateInfo-subResourceRange-01021",
"vkCreateImageView(): pCreateInfo->viewType %s is not compatible with image type %s.",
string_VkImageViewType(view_type), string_VkImageType(image_type));
}
}
} else {
if (view_type != VK_IMAGE_VIEW_TYPE_3D) {
skip |= LogError(pCreateInfo->image, "VUID-VkImageViewCreateInfo-subResourceRange-01021",
"vkCreateImageView(): pCreateInfo->viewType %s is not compatible with image type %s.",
string_VkImageViewType(view_type), string_VkImageType(image_type));
}
}
break;
default:
break;
}
// External format checks needed when VK_ANDROID_external_memory_android_hardware_buffer enabled
if (device_extensions.vk_android_external_memory_android_hardware_buffer) {
skip |= ValidateCreateImageViewANDROID(pCreateInfo);
}
skip |= ValidateImageViewFormatFeatures(image_state, view_format, image_usage);
if (enabled_features.shading_rate_image.shadingRateImage) {
if (image_usage & VK_IMAGE_USAGE_SHADING_RATE_IMAGE_BIT_NV) {
if (view_format != VK_FORMAT_R8_UINT) {
skip |= LogError(pCreateInfo->image, "VUID-VkImageViewCreateInfo-image-02087",
"vkCreateImageView() If image was created with usage containing "
"VK_IMAGE_USAGE_SHADING_RATE_IMAGE_BIT_NV, format must be VK_FORMAT_R8_UINT.");
}
}
}
if (enabled_features.shading_rate_image.shadingRateImage ||
enabled_features.fragment_shading_rate_features.attachmentFragmentShadingRate) {
if (image_usage & VK_IMAGE_USAGE_FRAGMENT_SHADING_RATE_ATTACHMENT_BIT_KHR) {
if (view_type != VK_IMAGE_VIEW_TYPE_2D && view_type != VK_IMAGE_VIEW_TYPE_2D_ARRAY) {
skip |= LogError(pCreateInfo->image, "VUID-VkImageViewCreateInfo-image-02086",
"vkCreateImageView() If image was created with usage containing "
"VK_IMAGE_USAGE_FRAGMENT_SHADING_RATE_ATTACHMENT_BIT_KHR, viewType must be "
"VK_IMAGE_VIEW_TYPE_2D or VK_IMAGE_VIEW_TYPE_2D_ARRAY.");
}
}
}
if (enabled_features.fragment_shading_rate_features.attachmentFragmentShadingRate &&
!phys_dev_ext_props.fragment_shading_rate_props.layeredShadingRateAttachments &&
image_usage & VK_IMAGE_USAGE_FRAGMENT_SHADING_RATE_ATTACHMENT_BIT_KHR &&
pCreateInfo->subresourceRange.layerCount != 1) {
skip |= LogError(device, "VUID-VkImageViewCreateInfo-usage-04551",
"vkCreateImageView(): subresourceRange.layerCount is %u for a shading rate attachment image view.",
pCreateInfo->subresourceRange.layerCount);
}
if (pCreateInfo->subresourceRange.layerCount == VK_REMAINING_ARRAY_LAYERS) {
if (pCreateInfo->viewType == VK_IMAGE_VIEW_TYPE_CUBE &&
image_state->createInfo.arrayLayers - pCreateInfo->subresourceRange.baseArrayLayer != 6) {
skip |= LogError(device, "VUID-VkImageViewCreateInfo-viewType-02962",
"vkCreateImageView(): subresourceRange.layerCount VK_REMAINING_ARRAY_LAYERS=(%d) must be 6",
image_state->createInfo.arrayLayers - pCreateInfo->subresourceRange.baseArrayLayer);
}
if (pCreateInfo->viewType == VK_IMAGE_VIEW_TYPE_CUBE_ARRAY &&
((image_state->createInfo.arrayLayers - pCreateInfo->subresourceRange.baseArrayLayer) % 6) != 0) {
skip |= LogError(
device, "VUID-VkImageViewCreateInfo-viewType-02963",
"vkCreateImageView(): subresourceRange.layerCount VK_REMAINING_ARRAY_LAYERS=(%d) must be a multiple of 6",
image_state->createInfo.arrayLayers - pCreateInfo->subresourceRange.baseArrayLayer);
}
}
if (image_usage & VK_IMAGE_USAGE_FRAGMENT_DENSITY_MAP_BIT_EXT) {
if (pCreateInfo->subresourceRange.levelCount != 1) {
skip |= LogError(pCreateInfo->image, "VUID-VkImageViewCreateInfo-image-02571",
"vkCreateImageView(): If image was created with usage containing "
"VK_IMAGE_USAGE_FRAGMENT_DENSITY_MAP_BIT_EXT, subresourceRange.levelCount (%d) must: be 1",
pCreateInfo->subresourceRange.levelCount);
}
}
if (pCreateInfo->flags & VK_IMAGE_VIEW_CREATE_FRAGMENT_DENSITY_MAP_DYNAMIC_BIT_EXT) {
if (!enabled_features.fragment_density_map_features.fragmentDensityMapDynamic) {
skip |= LogError(pCreateInfo->image, "VUID-VkImageViewCreateInfo-flags-02572",
"vkCreateImageView(): If the fragmentDensityMapDynamic feature is not enabled, "
"flags must not contain VK_IMAGE_VIEW_CREATE_FRAGMENT_DENSITY_MAP_DYNAMIC_BIT_EXT");
}
} else {
if (image_usage & VK_IMAGE_USAGE_FRAGMENT_DENSITY_MAP_BIT_EXT) {
if (image_flags & (VK_IMAGE_CREATE_PROTECTED_BIT | VK_IMAGE_CREATE_SPARSE_BINDING_BIT |
VK_IMAGE_CREATE_SPARSE_RESIDENCY_BIT | VK_IMAGE_CREATE_SPARSE_ALIASED_BIT)) {
skip |= LogError(pCreateInfo->image, "VUID-VkImageViewCreateInfo-flags-04116",
"vkCreateImageView(): If image was created with usage containing "
"VK_IMAGE_USAGE_FRAGMENT_DENSITY_MAP_BIT_EXT flags must not contain any of "
"VK_IMAGE_CREATE_PROTECTED_BIT, VK_IMAGE_CREATE_SPARSE_BINDING_BIT, "
"VK_IMAGE_CREATE_SPARSE_RESIDENCY_BIT, or VK_IMAGE_CREATE_SPARSE_ALIASED_BIT");
}
}
}
if (pCreateInfo->flags & VK_IMAGE_VIEW_CREATE_FRAGMENT_DENSITY_MAP_DEFERRED_BIT_EXT) {
if (!enabled_features.fragment_density_map2_features.fragmentDensityMapDeferred) {
skip |= LogError(pCreateInfo->image, "VUID-VkImageViewCreateInfo-flags-03567",
"vkCreateImageView(): If the fragmentDensityMapDeferred feature is not enabled, "
"flags must not contain VK_IMAGE_VIEW_CREATE_FRAGMENT_DENSITY_MAP_DEFERRED_BIT_EXT");
}
if (pCreateInfo->flags & VK_IMAGE_VIEW_CREATE_FRAGMENT_DENSITY_MAP_DYNAMIC_BIT_EXT) {
skip |=
LogError(pCreateInfo->image, "VUID-VkImageViewCreateInfo-flags-03568",
"vkCreateImageView(): If flags contains VK_IMAGE_VIEW_CREATE_FRAGMENT_DENSITY_MAP_DEFERRED_BIT_EXT, "
"flags must not contain VK_IMAGE_VIEW_CREATE_FRAGMENT_DENSITY_MAP_DYNAMIC_BIT_EXT");
}
}
if (device_extensions.vk_ext_fragment_density_map_2) {
if ((image_flags & VK_IMAGE_CREATE_SUBSAMPLED_BIT_EXT) && (image_usage & VK_IMAGE_USAGE_SAMPLED_BIT) &&
(pCreateInfo->subresourceRange.layerCount >
phys_dev_ext_props.fragment_density_map2_props.maxSubsampledArrayLayers)) {
skip |= LogError(pCreateInfo->image, "VUID-VkImageViewCreateInfo-image-03569",
"vkCreateImageView(): If image was created with flags containing "
"VK_IMAGE_CREATE_SUBSAMPLED_BIT_EXT and usage containing VK_IMAGE_USAGE_SAMPLED_BIT "
"subresourceRange.layerCount (%d) must: be less than or equal to maxSubsampledArrayLayers (%d)",
pCreateInfo->subresourceRange.layerCount,
phys_dev_ext_props.fragment_density_map2_props.maxSubsampledArrayLayers);
}
}
auto astc_decode_mode = LvlFindInChain<VkImageViewASTCDecodeModeEXT>(pCreateInfo->pNext);
if ((device_extensions.vk_ext_astc_decode_mode) && (astc_decode_mode != nullptr)) {
if ((enabled_features.astc_decode_features.decodeModeSharedExponent == VK_FALSE) &&
(astc_decode_mode->decodeMode == VK_FORMAT_E5B9G9R9_UFLOAT_PACK32)) {
skip |= LogError(device, "VUID-VkImageViewASTCDecodeModeEXT-decodeMode-02231",
"vkCreateImageView(): decodeModeSharedExponent is not enabled but "
"VkImageViewASTCDecodeModeEXT::decodeMode is VK_FORMAT_E5B9G9R9_UFLOAT_PACK32.");
}
}
if (ExtEnabled::kNotEnabled != device_extensions.vk_khr_portability_subset) {
// If swizzling is disabled, make sure it isn't used
if ((VK_FALSE == enabled_features.portability_subset_features.imageViewFormatSwizzle) &&
(pCreateInfo->components.r != VK_COMPONENT_SWIZZLE_IDENTITY ||
pCreateInfo->components.g != VK_COMPONENT_SWIZZLE_IDENTITY ||
pCreateInfo->components.b != VK_COMPONENT_SWIZZLE_IDENTITY ||
pCreateInfo->components.a != VK_COMPONENT_SWIZZLE_IDENTITY)) {
skip |= LogError(device, "VUID-VkImageViewCreateInfo-imageViewFormatSwizzle-04465",
"vkCreateImageView (portability error): swizzle is disabled for this device.");
}
// Ensure ImageView's format has the same number of bits and components as Image's format if format reinterpretation is
// disabled
// TODO (ncesario): This is not correct for some cases (e.g., VK_FORMAT_B10G11R11_UFLOAT_PACK32 and
// VK_FORMAT_E5B9G9R9_UFLOAT_PACK32), but requires additional information that should probably be generated from the
// spec. See Github issue #2361.
if ((VK_FALSE == enabled_features.portability_subset_features.imageViewFormatReinterpretation) &&
((FormatElementSize(pCreateInfo->format, VK_IMAGE_ASPECT_COLOR_BIT) !=
FormatElementSize(image_state->createInfo.format, VK_IMAGE_ASPECT_COLOR_BIT)) ||
(FormatChannelCount(pCreateInfo->format) != FormatChannelCount(image_state->createInfo.format)))) {
skip |= LogError(device, "VUID-VkImageViewCreateInfo-imageViewFormatReinterpretation-04466",
"vkCreateImageView (portability error): ImageView format must have"
" the same number of components and bits per component as the Image's format");
}
}
}
return skip;
}
template <typename RegionType>
bool CoreChecks::ValidateCmdCopyBufferBounds(const BUFFER_STATE *src_buffer_state, const BUFFER_STATE *dst_buffer_state,
uint32_t regionCount, const RegionType *pRegions, CopyCommandVersion version) const {
bool skip = false;
const bool is_2khr = (version == COPY_COMMAND_VERSION_2);
const char *func_name = is_2khr ? "vkCmdCopyBuffer2KHR()" : "vkCmdCopyBuffer()";
const char *vuid;
VkDeviceSize src_buffer_size = src_buffer_state->createInfo.size;
VkDeviceSize dst_buffer_size = dst_buffer_state->createInfo.size;
VkDeviceSize src_min = UINT64_MAX;
VkDeviceSize src_max = 0;
VkDeviceSize dst_min = UINT64_MAX;
VkDeviceSize dst_max = 0;
for (uint32_t i = 0; i < regionCount; i++) {
src_min = std::min(src_min, pRegions[i].srcOffset);
src_max = std::max(src_max, (pRegions[i].srcOffset + pRegions[i].size));
dst_min = std::min(dst_min, pRegions[i].dstOffset);
dst_max = std::max(dst_max, (pRegions[i].dstOffset + pRegions[i].size));
// The srcOffset member of each element of pRegions must be less than the size of srcBuffer
if (pRegions[i].srcOffset >= src_buffer_size) {
vuid = is_2khr ? "VUID-VkCopyBufferInfo2KHR-srcOffset-00113" : "VUID-vkCmdCopyBuffer-srcOffset-00113";
skip |= LogError(src_buffer_state->buffer(), vuid,
"%s: pRegions[%d].srcOffset (%" PRIuLEAST64 ") is greater than pRegions[%d].size (%" PRIuLEAST64 ").",
func_name, i, pRegions[i].srcOffset, i, pRegions[i].size);
}
// The dstOffset member of each element of pRegions must be less than the size of dstBuffer
if (pRegions[i].dstOffset >= dst_buffer_size) {
vuid = is_2khr ? "VUID-VkCopyBufferInfo2KHR-dstOffset-00114" : "VUID-vkCmdCopyBuffer-dstOffset-00114";
skip |= LogError(dst_buffer_state->buffer(), vuid,
"%s: pRegions[%d].dstOffset (%" PRIuLEAST64 ") is greater than pRegions[%d].size (%" PRIuLEAST64 ").",
func_name, i, pRegions[i].dstOffset, i, pRegions[i].size);
}
// The size member of each element of pRegions must be less than or equal to the size of srcBuffer minus srcOffset
if (pRegions[i].size > (src_buffer_size - pRegions[i].srcOffset)) {
vuid = is_2khr ? "VUID-VkCopyBufferInfo2KHR-size-00115" : "VUID-vkCmdCopyBuffer-size-00115";
skip |= LogError(src_buffer_state->buffer(), vuid,
"%s: pRegions[%d].size (%" PRIuLEAST64 ") is greater than the source buffer size (%" PRIuLEAST64
") minus pRegions[%d].srcOffset (%" PRIuLEAST64 ").",
func_name, i, pRegions[i].size, src_buffer_size, i, pRegions[i].srcOffset);
}
// The size member of each element of pRegions must be less than or equal to the size of dstBuffer minus dstOffset
if (pRegions[i].size > (dst_buffer_size - pRegions[i].dstOffset)) {
vuid = is_2khr ? "VUID-VkCopyBufferInfo2KHR-size-00116" : "VUID-vkCmdCopyBuffer-size-00116";
skip |= LogError(dst_buffer_state->buffer(), vuid,
"%s: pRegions[%d].size (%" PRIuLEAST64 ") is greater than the destination buffer size (%" PRIuLEAST64
") minus pRegions[%d].dstOffset (%" PRIuLEAST64 ").",
func_name, i, pRegions[i].size, dst_buffer_size, i, pRegions[i].dstOffset);
}
}
// The union of the source regions, and the union of the destination regions, must not overlap in memory
if (src_buffer_state->buffer() == dst_buffer_state->buffer()) {
if (((src_min > dst_min) && (src_min < dst_max)) || ((src_max > dst_min) && (src_max < dst_max))) {
vuid = is_2khr ? "VUID-VkCopyBufferInfo2KHR-pRegions-00117" : "VUID-vkCmdCopyBuffer-pRegions-00117";
skip |= LogError(src_buffer_state->buffer(), vuid, "%s: Detected overlap between source and dest regions in memory.",
func_name);
}
}
return skip;
}
bool CoreChecks::PreCallValidateCmdCopyBuffer(VkCommandBuffer commandBuffer, VkBuffer srcBuffer, VkBuffer dstBuffer,
uint32_t regionCount, const VkBufferCopy *pRegions) const {
const auto cb_node = GetCBState(commandBuffer);
const auto src_buffer_state = GetBufferState(srcBuffer);
const auto dst_buffer_state = GetBufferState(dstBuffer);
bool skip = false;
skip |= ValidateMemoryIsBoundToBuffer(src_buffer_state, "vkCmdCopyBuffer()", "VUID-vkCmdCopyBuffer-srcBuffer-00119");
skip |= ValidateMemoryIsBoundToBuffer(dst_buffer_state, "vkCmdCopyBuffer()", "VUID-vkCmdCopyBuffer-dstBuffer-00121");
// Validate that SRC & DST buffers have correct usage flags set
skip |=
ValidateBufferUsageFlags(src_buffer_state, VK_BUFFER_USAGE_TRANSFER_SRC_BIT, true, "VUID-vkCmdCopyBuffer-srcBuffer-00118",
"vkCmdCopyBuffer()", "VK_BUFFER_USAGE_TRANSFER_SRC_BIT");
skip |=
ValidateBufferUsageFlags(dst_buffer_state, VK_BUFFER_USAGE_TRANSFER_DST_BIT, true, "VUID-vkCmdCopyBuffer-dstBuffer-00120",
"vkCmdCopyBuffer()", "VK_BUFFER_USAGE_TRANSFER_DST_BIT");
skip |= ValidateCmd(cb_node, CMD_COPYBUFFER, "vkCmdCopyBuffer()");
skip |= ValidateCmdCopyBufferBounds(src_buffer_state, dst_buffer_state, regionCount, pRegions, COPY_COMMAND_VERSION_1);
skip |= ValidateProtectedBuffer(cb_node, src_buffer_state, "vkCmdCopyBuffer()", "VUID-vkCmdCopyBuffer-commandBuffer-01822");
skip |= ValidateProtectedBuffer(cb_node, dst_buffer_state, "vkCmdCopyBuffer()", "VUID-vkCmdCopyBuffer-commandBuffer-01823");
skip |= ValidateUnprotectedBuffer(cb_node, dst_buffer_state, "vkCmdCopyBuffer()", "VUID-vkCmdCopyBuffer-commandBuffer-01824");
return skip;
}
bool CoreChecks::PreCallValidateCmdCopyBuffer2KHR(VkCommandBuffer commandBuffer,
const VkCopyBufferInfo2KHR *pCopyBufferInfos) const {
const auto cb_node = GetCBState(commandBuffer);
const auto src_buffer_state = GetBufferState(pCopyBufferInfos->srcBuffer);
const auto dst_buffer_state = GetBufferState(pCopyBufferInfos->dstBuffer);
bool skip = false;
skip |= ValidateMemoryIsBoundToBuffer(src_buffer_state, "vkCmdCopyBuffer2KHR()", "VUID-VkCopyBufferInfo2KHR-srcBuffer-00119");
skip |= ValidateMemoryIsBoundToBuffer(dst_buffer_state, "vkCmdCopyBuffer2KHR()", "VUID-VkCopyBufferInfo2KHR-dstBuffer-00121");
// Validate that SRC & DST buffers have correct usage flags set
skip |= ValidateBufferUsageFlags(src_buffer_state, VK_BUFFER_USAGE_TRANSFER_SRC_BIT, true,
"VUID-VkCopyBufferInfo2KHR-srcBuffer-00118", "vkCmdCopyBuffer2KHR()",
"VK_BUFFER_USAGE_TRANSFER_SRC_BIT");
skip |= ValidateBufferUsageFlags(dst_buffer_state, VK_BUFFER_USAGE_TRANSFER_DST_BIT, true,
"VUID-VkCopyBufferInfo2KHR-dstBuffer-00120", "vkCmdCopyBuffer2KHR()",
"VK_BUFFER_USAGE_TRANSFER_DST_BIT");
skip |= ValidateCmd(cb_node, CMD_COPYBUFFER2KHR, "vkCmdCopyBuffer2KHR()");
skip |= ValidateCmdCopyBufferBounds(src_buffer_state, dst_buffer_state, pCopyBufferInfos->regionCount,
pCopyBufferInfos->pRegions, COPY_COMMAND_VERSION_2);
skip |=
ValidateProtectedBuffer(cb_node, src_buffer_state, "vkCmdCopyBuffer2KHR()", "VUID-vkCmdCopyBuffer2KHR-commandBuffer-01822");
skip |=
ValidateProtectedBuffer(cb_node, dst_buffer_state, "vkCmdCopyBuffer2KHR()", "VUID-vkCmdCopyBuffer2KHR-commandBuffer-01823");
skip |= ValidateUnprotectedBuffer(cb_node, dst_buffer_state, "vkCmdCopyBuffer2KHR()",
"VUID-vkCmdCopyBuffer2KHR-commandBuffer-01824");
return skip;
}
bool CoreChecks::ValidateIdleBuffer(VkBuffer buffer) const {
bool skip = false;
auto buffer_state = GetBufferState(buffer);
if (buffer_state) {
if (buffer_state->InUse()) {
skip |= LogError(buffer, "VUID-vkDestroyBuffer-buffer-00922", "Cannot free %s that is in use by a command buffer.",
report_data->FormatHandle(buffer).c_str());
}
}
return skip;
}
bool CoreChecks::PreCallValidateDestroyImageView(VkDevice device, VkImageView imageView,
const VkAllocationCallbacks *pAllocator) const {
const IMAGE_VIEW_STATE *image_view_state = GetImageViewState(imageView);
bool skip = false;
if (image_view_state) {
skip |=
ValidateObjectNotInUse(image_view_state, "vkDestroyImageView", "VUID-vkDestroyImageView-imageView-01026");
}
return skip;
}
bool CoreChecks::PreCallValidateDestroyBuffer(VkDevice device, VkBuffer buffer, const VkAllocationCallbacks *pAllocator) const {
auto buffer_state = GetBufferState(buffer);
bool skip = false;
if (buffer_state) {
skip |= ValidateIdleBuffer(buffer);
}
return skip;
}
bool CoreChecks::PreCallValidateDestroyBufferView(VkDevice device, VkBufferView bufferView,
const VkAllocationCallbacks *pAllocator) const {
auto buffer_view_state = GetBufferViewState(bufferView);
bool skip = false;
if (buffer_view_state) {
skip |= ValidateObjectNotInUse(buffer_view_state, "vkDestroyBufferView",
"VUID-vkDestroyBufferView-bufferView-00936");
}
return skip;
}
bool CoreChecks::PreCallValidateCmdFillBuffer(VkCommandBuffer commandBuffer, VkBuffer dstBuffer, VkDeviceSize dstOffset,
VkDeviceSize size, uint32_t data) const {
auto cb_node = GetCBState(commandBuffer);
auto buffer_state = GetBufferState(dstBuffer);
bool skip = false;
skip |= ValidateMemoryIsBoundToBuffer(buffer_state, "vkCmdFillBuffer()", "VUID-vkCmdFillBuffer-dstBuffer-00031");
skip |= ValidateCmd(cb_node, CMD_FILLBUFFER, "vkCmdFillBuffer()");
// Validate that DST buffer has correct usage flags set
skip |= ValidateBufferUsageFlags(buffer_state, VK_BUFFER_USAGE_TRANSFER_DST_BIT, true, "VUID-vkCmdFillBuffer-dstBuffer-00029",
"vkCmdFillBuffer()", "VK_BUFFER_USAGE_TRANSFER_DST_BIT");
skip |= ValidateProtectedBuffer(cb_node, buffer_state, "vkCmdFillBuffer()", "VUID-vkCmdFillBuffer-commandBuffer-01811");
skip |= ValidateUnprotectedBuffer(cb_node, buffer_state, "vkCmdFillBuffer()", "VUID-vkCmdFillBuffer-commandBuffer-01812");
if (dstOffset >= buffer_state->createInfo.size) {
skip |= LogError(dstBuffer, "VUID-vkCmdFillBuffer-dstOffset-00024",
"vkCmdFillBuffer(): dstOffset (0x%" PRIxLEAST64
") is not less than destination buffer (%s) size (0x%" PRIxLEAST64 ").",
dstOffset, report_data->FormatHandle(dstBuffer).c_str(), buffer_state->createInfo.size);
}
if ((size != VK_WHOLE_SIZE) && (size > (buffer_state->createInfo.size - dstOffset))) {
skip |= LogError(dstBuffer, "VUID-vkCmdFillBuffer-size-00027",
"vkCmdFillBuffer(): size (0x%" PRIxLEAST64 ") is greater than dstBuffer (%s) size (0x%" PRIxLEAST64
") minus dstOffset (0x%" PRIxLEAST64 ").",
size, report_data->FormatHandle(dstBuffer).c_str(), buffer_state->createInfo.size, dstOffset);
}
return skip;
}
template <typename BufferImageCopyRegionType>
bool CoreChecks::ValidateBufferImageCopyData(const CMD_BUFFER_STATE *cb_node, uint32_t regionCount,
const BufferImageCopyRegionType *pRegions, const IMAGE_STATE *image_state,
const char *function, CopyCommandVersion version, bool image_to_buffer) const {
bool skip = false;
const bool is_2khr = (version == COPY_COMMAND_VERSION_2);
const char *vuid;
assert(image_state != nullptr);
const VkFormat image_format = image_state->createInfo.format;
for (uint32_t i = 0; i < regionCount; i++) {
const VkImageAspectFlags region_aspect_mask = pRegions[i].imageSubresource.aspectMask;
if (image_state->createInfo.imageType == VK_IMAGE_TYPE_1D) {
if ((pRegions[i].imageOffset.y != 0) || (pRegions[i].imageExtent.height != 1)) {
skip |= LogError(image_state->image(), GetBufferImageCopyCommandVUID("00199", image_to_buffer, is_2khr),
"%s: pRegion[%d] imageOffset.y is %d and imageExtent.height is %d. For 1D images these must be 0 "
"and 1, respectively.",
function, i, pRegions[i].imageOffset.y, pRegions[i].imageExtent.height);
}
}
if ((image_state->createInfo.imageType == VK_IMAGE_TYPE_1D) || (image_state->createInfo.imageType == VK_IMAGE_TYPE_2D)) {
if ((pRegions[i].imageOffset.z != 0) || (pRegions[i].imageExtent.depth != 1)) {
skip |= LogError(image_state->image(), GetBufferImageCopyCommandVUID("00201", image_to_buffer, is_2khr),
"%s: pRegion[%d] imageOffset.z is %d and imageExtent.depth is %d. For 1D and 2D images these "
"must be 0 and 1, respectively.",
function, i, pRegions[i].imageOffset.z, pRegions[i].imageExtent.depth);
}
}
if (image_state->createInfo.imageType == VK_IMAGE_TYPE_3D) {
if ((0 != pRegions[i].imageSubresource.baseArrayLayer) || (1 != pRegions[i].imageSubresource.layerCount)) {
skip |= LogError(image_state->image(), GetBufferImageCopyCommandVUID("00213", image_to_buffer, is_2khr),
"%s: pRegion[%d] imageSubresource.baseArrayLayer is %d and imageSubresource.layerCount is %d. "
"For 3D images these must be 0 and 1, respectively.",
function, i, pRegions[i].imageSubresource.baseArrayLayer, pRegions[i].imageSubresource.layerCount);
}
}
// If the the calling command's VkImage parameter's format is not a depth/stencil format,
// then bufferOffset must be a multiple of the calling command's VkImage parameter's element size
const uint32_t element_size = FormatElementSize(image_format, region_aspect_mask);
const VkDeviceSize bufferOffset = pRegions[i].bufferOffset;
if (FormatIsDepthOrStencil(image_format)) {
if (SafeModulo(bufferOffset, 4) != 0) {
skip |= LogError(image_state->image(), GetBufferImageCopyCommandVUID("04053", image_to_buffer, is_2khr),
"%s: pRegion[%d] bufferOffset 0x%" PRIxLEAST64
" must be a multiple 4 if using a depth/stencil format (%s).",
function, i, bufferOffset, string_VkFormat(image_format));
}
} else {
// If not depth/stencil and not multi-plane
if (!FormatIsMultiplane(image_format) && (SafeModulo(bufferOffset, element_size) != 0)) {
vuid = (device_extensions.vk_khr_sampler_ycbcr_conversion)
? GetBufferImageCopyCommandVUID("01558", image_to_buffer, is_2khr)
: GetBufferImageCopyCommandVUID("00193", image_to_buffer, is_2khr);
skip |= LogError(image_state->image(), vuid,
"%s: pRegion[%d] bufferOffset 0x%" PRIxLEAST64
" must be a multiple of this format's texel size (%" PRIu32 ").",
function, i, bufferOffset, element_size);
}
}
// BufferRowLength must be 0, or greater than or equal to the width member of imageExtent
if ((pRegions[i].bufferRowLength != 0) && (pRegions[i].bufferRowLength < pRegions[i].imageExtent.width)) {
vuid = (is_2khr) ? "VUID-VkBufferImageCopy2KHR-bufferRowLength-00195" : "VUID-VkBufferImageCopy-bufferRowLength-00195";
skip |=
LogError(image_state->image(), vuid,
"%s: pRegion[%d] bufferRowLength (%d) must be zero or greater-than-or-equal-to imageExtent.width (%d).",
function, i, pRegions[i].bufferRowLength, pRegions[i].imageExtent.width);
}
// BufferImageHeight must be 0, or greater than or equal to the height member of imageExtent
if ((pRegions[i].bufferImageHeight != 0) && (pRegions[i].bufferImageHeight < pRegions[i].imageExtent.height)) {
vuid =
(is_2khr) ? "VUID-VkBufferImageCopy2KHR-bufferImageHeight-00196" : "VUID-VkBufferImageCopy-bufferImageHeight-00196";
skip |=
LogError(image_state->image(), vuid,
"%s: pRegion[%d] bufferImageHeight (%d) must be zero or greater-than-or-equal-to imageExtent.height (%d).",
function, i, pRegions[i].bufferImageHeight, pRegions[i].imageExtent.height);
}
// Calculate adjusted image extent, accounting for multiplane image factors
VkExtent3D adjusted_image_extent = GetImageSubresourceExtent(image_state, &pRegions[i].imageSubresource);
// imageOffset.x and (imageExtent.width + imageOffset.x) must both be >= 0 and <= image subresource width
if ((pRegions[i].imageOffset.x < 0) || (pRegions[i].imageOffset.x > static_cast<int32_t>(adjusted_image_extent.width)) ||
((pRegions[i].imageOffset.x + static_cast<int32_t>(pRegions[i].imageExtent.width)) >
static_cast<int32_t>(adjusted_image_extent.width))) {
skip |= LogError(image_state->image(), GetBufferImageCopyCommandVUID("00197", image_to_buffer, is_2khr),
"%s: Both pRegion[%d] imageoffset.x (%d) and (imageExtent.width + imageOffset.x) (%d) must be >= "
"zero or <= image subresource width (%d).",
function, i, pRegions[i].imageOffset.x, (pRegions[i].imageOffset.x + pRegions[i].imageExtent.width),
adjusted_image_extent.width);
}
// imageOffset.y and (imageExtent.height + imageOffset.y) must both be >= 0 and <= image subresource height
if ((pRegions[i].imageOffset.y < 0) || (pRegions[i].imageOffset.y > static_cast<int32_t>(adjusted_image_extent.height)) ||
((pRegions[i].imageOffset.y + static_cast<int32_t>(pRegions[i].imageExtent.height)) >
static_cast<int32_t>(adjusted_image_extent.height))) {
skip |= LogError(image_state->image(), GetBufferImageCopyCommandVUID("00198", image_to_buffer, is_2khr),
"%s: Both pRegion[%d] imageoffset.y (%d) and (imageExtent.height + imageOffset.y) (%d) must be >= "
"zero or <= image subresource height (%d).",
function, i, pRegions[i].imageOffset.y, (pRegions[i].imageOffset.y + pRegions[i].imageExtent.height),
adjusted_image_extent.height);
}
// imageOffset.z and (imageExtent.depth + imageOffset.z) must both be >= 0 and <= image subresource depth
if ((pRegions[i].imageOffset.z < 0) || (pRegions[i].imageOffset.z > static_cast<int32_t>(adjusted_image_extent.depth)) ||
((pRegions[i].imageOffset.z + static_cast<int32_t>(pRegions[i].imageExtent.depth)) >
static_cast<int32_t>(adjusted_image_extent.depth))) {
skip |= LogError(image_state->image(), GetBufferImageCopyCommandVUID("00200", image_to_buffer, is_2khr),
"%s: Both pRegion[%d] imageoffset.z (%d) and (imageExtent.depth + imageOffset.z) (%d) must be >= "
"zero or <= image subresource depth (%d).",
function, i, pRegions[i].imageOffset.z, (pRegions[i].imageOffset.z + pRegions[i].imageExtent.depth),
adjusted_image_extent.depth);
}
// subresource aspectMask must have exactly 1 bit set
const int num_bits = sizeof(VkFlags) * CHAR_BIT;
std::bitset<num_bits> aspect_mask_bits(region_aspect_mask);
if (aspect_mask_bits.count() != 1) {
vuid = (is_2khr) ? "VUID-VkBufferImageCopy2KHR-aspectMask-00212" : "VUID-VkBufferImageCopy-aspectMask-00212";
skip |= LogError(image_state->image(), vuid,
"%s: aspectMasks for imageSubresource in pRegion[%d] must have only a single bit set.", function, i);
}
// image subresource aspect bit must match format
if (!VerifyAspectsPresent(region_aspect_mask, image_format)) {
skip |=
LogError(image_state->image(), GetBufferImageCopyCommandVUID("00211", image_to_buffer, is_2khr),
"%s: pRegion[%d] subresource aspectMask 0x%x specifies aspects that are not present in image format 0x%x.",
function, i, region_aspect_mask, image_format);
}
// Checks that apply only to compressed images
if (FormatIsCompressed(image_format) || FormatIsSinglePlane_422(image_format)) {
auto block_size = FormatTexelBlockExtent(image_format);
// BufferRowLength must be a multiple of block width
if (SafeModulo(pRegions[i].bufferRowLength, block_size.width) != 0) {
skip |=
LogError(image_state->image(), GetBufferImageCopyCommandVUID("00203", image_to_buffer, is_2khr),
"%s: pRegion[%d] bufferRowLength (%d) must be a multiple of the compressed image's texel width (%d).",
function, i, pRegions[i].bufferRowLength, block_size.width);
}
// BufferRowHeight must be a multiple of block height
if (SafeModulo(pRegions[i].bufferImageHeight, block_size.height) != 0) {
skip |= LogError(
image_state->image(), GetBufferImageCopyCommandVUID("00204", image_to_buffer, is_2khr),
"%s: pRegion[%d] bufferImageHeight (%d) must be a multiple of the compressed image's texel height (%d).",
function, i, pRegions[i].bufferImageHeight, block_size.height);
}
// image offsets must be multiples of block dimensions
if ((SafeModulo(pRegions[i].imageOffset.x, block_size.width) != 0) ||
(SafeModulo(pRegions[i].imageOffset.y, block_size.height) != 0) ||
(SafeModulo(pRegions[i].imageOffset.z, block_size.depth) != 0)) {
skip |= LogError(image_state->image(), GetBufferImageCopyCommandVUID("00205", image_to_buffer, is_2khr),
"%s: pRegion[%d] imageOffset(x,y) (%d, %d) must be multiples of the compressed image's texel "
"width & height (%d, %d).",
function, i, pRegions[i].imageOffset.x, pRegions[i].imageOffset.y, block_size.width,
block_size.height);
}
// bufferOffset must be a multiple of block size (linear bytes)
uint32_t block_size_in_bytes = FormatElementSize(image_format);
if (SafeModulo(bufferOffset, block_size_in_bytes) != 0) {
skip |= LogError(image_state->image(), GetBufferImageCopyCommandVUID("00206", image_to_buffer, is_2khr),
"%s: pRegion[%d] bufferOffset (0x%" PRIxLEAST64
") must be a multiple of the compressed image's texel block size (%" PRIu32 ").",
function, i, bufferOffset, block_size_in_bytes);
}
// imageExtent width must be a multiple of block width, or extent+offset width must equal subresource width
VkExtent3D mip_extent = GetImageSubresourceExtent(image_state, &(pRegions[i].imageSubresource));
if ((SafeModulo(pRegions[i].imageExtent.width, block_size.width) != 0) &&
(pRegions[i].imageExtent.width + pRegions[i].imageOffset.x != mip_extent.width)) {
skip |= LogError(image_state->image(), GetBufferImageCopyCommandVUID("00207", image_to_buffer, is_2khr),
"%s: pRegion[%d] extent width (%d) must be a multiple of the compressed texture block width "
"(%d), or when added to offset.x (%d) must equal the image subresource width (%d).",
function, i, pRegions[i].imageExtent.width, block_size.width, pRegions[i].imageOffset.x,
mip_extent.width);
}
// imageExtent height must be a multiple of block height, or extent+offset height must equal subresource height
if ((SafeModulo(pRegions[i].imageExtent.height, block_size.height) != 0) &&
(pRegions[i].imageExtent.height + pRegions[i].imageOffset.y != mip_extent.height)) {
skip |= LogError(image_state->image(), GetBufferImageCopyCommandVUID("00208", image_to_buffer, is_2khr),
"%s: pRegion[%d] extent height (%d) must be a multiple of the compressed texture block height "
"(%d), or when added to offset.y (%d) must equal the image subresource height (%d).",
function, i, pRegions[i].imageExtent.height, block_size.height, pRegions[i].imageOffset.y,
mip_extent.height);
}
// imageExtent depth must be a multiple of block depth, or extent+offset depth must equal subresource depth
if ((SafeModulo(pRegions[i].imageExtent.depth, block_size.depth) != 0) &&
(pRegions[i].imageExtent.depth + pRegions[i].imageOffset.z != mip_extent.depth)) {
skip |= LogError(image_state->image(), GetBufferImageCopyCommandVUID("00209", image_to_buffer, is_2khr),
"%s: pRegion[%d] extent width (%d) must be a multiple of the compressed texture block depth "
"(%d), or when added to offset.z (%d) must equal the image subresource depth (%d).",
function, i, pRegions[i].imageExtent.depth, block_size.depth, pRegions[i].imageOffset.z,
mip_extent.depth);
}
}
// Checks that apply only to multi-planar format images
if (FormatIsMultiplane(image_format)) {
// VK_IMAGE_ASPECT_PLANE_2_BIT valid only for image formats with three planes
if ((FormatPlaneCount(image_format) < 3) && (region_aspect_mask == VK_IMAGE_ASPECT_PLANE_2_BIT)) {
skip |= LogError(image_state->image(), GetBufferImageCopyCommandVUID("01560", image_to_buffer, is_2khr),
"%s: pRegion[%d] subresource aspectMask cannot be VK_IMAGE_ASPECT_PLANE_2_BIT unless image "
"format has three planes.",
function, i);
}
// image subresource aspectMask must be VK_IMAGE_ASPECT_PLANE_*_BIT
if (0 ==
(region_aspect_mask & (VK_IMAGE_ASPECT_PLANE_0_BIT | VK_IMAGE_ASPECT_PLANE_1_BIT | VK_IMAGE_ASPECT_PLANE_2_BIT))) {
skip |= LogError(image_state->image(), GetBufferImageCopyCommandVUID("01560", image_to_buffer, is_2khr),
"%s: pRegion[%d] subresource aspectMask for multi-plane image formats must have a "
"VK_IMAGE_ASPECT_PLANE_*_BIT when copying to or from.",
function, i);
} else {
// Know aspect mask is valid
const VkFormat compatible_format = FindMultiplaneCompatibleFormat(image_format, region_aspect_mask);
const uint32_t compatible_size = FormatElementSize(compatible_format);
if (SafeModulo(bufferOffset, compatible_size) != 0) {
skip |= LogError(
image_state->image(), GetBufferImageCopyCommandVUID("01559", image_to_buffer, is_2khr),
"%s: pRegion[%d]->bufferOffset is 0x%" PRIxLEAST64
" but must be a multiple of the multi-plane compatible format's texel size (%u) for plane %u (%s).",
function, i, bufferOffset, element_size, GetPlaneIndex(region_aspect_mask),
string_VkFormat(compatible_format));
}
}
}
// TODO - Don't use ValidateCmdQueueFlags due to currently not having way to add more descriptive message
const COMMAND_POOL_STATE *command_pool = cb_node->command_pool.get();
assert(command_pool != nullptr);
const uint32_t queue_family_index = command_pool->queueFamilyIndex;
const VkQueueFlags queue_flags = GetPhysicalDeviceState()->queue_family_properties[queue_family_index].queueFlags;
if (((queue_flags & (VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT)) == 0) && (SafeModulo(bufferOffset, 4) != 0)) {
LogObjectList objlist(cb_node->commandBuffer());
objlist.add(command_pool->commandPool());
skip |= LogError(image_state->image(), GetBufferImageCopyCommandVUID("04052", image_to_buffer, is_2khr),
"%s: pRegion[%d] bufferOffset 0x%" PRIxLEAST64
" must be a multiple 4 because the command buffer %s was allocated from the command pool %s "
"which was created with queueFamilyIndex %u, which doesn't contain the VK_QUEUE_GRAPHICS_BIT or "
"VK_QUEUE_COMPUTE_BIT flag.",
function, i, bufferOffset, report_data->FormatHandle(cb_node->commandBuffer()).c_str(),
report_data->FormatHandle(command_pool->commandPool()).c_str(), queue_family_index);
}
}
return skip;
}
template <typename BufferImageCopyRegionType>
bool CoreChecks::ValidateImageBounds(const IMAGE_STATE *image_state, const uint32_t regionCount,
const BufferImageCopyRegionType *pRegions, const char *func_name, const char *msg_code) const {
bool skip = false;
const VkImageCreateInfo *image_info = &(image_state->createInfo);
for (uint32_t i = 0; i < regionCount; i++) {
VkExtent3D extent = pRegions[i].imageExtent;
VkOffset3D offset = pRegions[i].imageOffset;
if (IsExtentSizeZero(&extent)) // Warn on zero area subresource
{
skip |= LogWarning(image_state->image(), kVUID_Core_Image_ZeroAreaSubregion,
"%s: pRegion[%d] imageExtent of {%1d, %1d, %1d} has zero area", func_name, i, extent.width,
extent.height, extent.depth);
}
VkExtent3D image_extent = GetImageSubresourceExtent(image_state, &(pRegions[i].imageSubresource));
// If we're using a compressed format, valid extent is rounded up to multiple of block size (per 18.1)
if (FormatIsCompressed(image_info->format) || FormatIsSinglePlane_422(image_state->createInfo.format)) {
auto block_extent = FormatTexelBlockExtent(image_info->format);
if (image_extent.width % block_extent.width) {
image_extent.width += (block_extent.width - (image_extent.width % block_extent.width));
}
if (image_extent.height % block_extent.height) {
image_extent.height += (block_extent.height - (image_extent.height % block_extent.height));
}
if (image_extent.depth % block_extent.depth) {
image_extent.depth += (block_extent.depth - (image_extent.depth % block_extent.depth));
}
}
if (0 != ExceedsBounds(&offset, &extent, &image_extent)) {
skip |= LogError(image_state->image(), msg_code, "%s: pRegion[%d] exceeds image bounds.", func_name, i);
}
}
return skip;
}
template <typename BufferImageCopyRegionType>
bool CoreChecks::ValidateBufferBounds(const IMAGE_STATE *image_state, const BUFFER_STATE *buff_state, uint32_t regionCount,
const BufferImageCopyRegionType *pRegions, const char *func_name,
const char *msg_code) const {
bool skip = false;
VkDeviceSize buffer_size = buff_state->createInfo.size;
for (uint32_t i = 0; i < regionCount; i++) {
VkDeviceSize max_buffer_offset =
GetBufferSizeFromCopyImage(pRegions[i], image_state->createInfo.format) + pRegions[i].bufferOffset;
if (buffer_size < max_buffer_offset) {
skip |=
LogError(device, msg_code, "%s: pRegion[%d] exceeds buffer size of %" PRIu64 " bytes.", func_name, i, buffer_size);
}
}
return skip;
}
template <typename BufferImageCopyRegionType>
bool CoreChecks::ValidateCmdCopyImageToBuffer(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout,
VkBuffer dstBuffer, uint32_t regionCount, const BufferImageCopyRegionType *pRegions,
CopyCommandVersion version) const {
const auto cb_node = GetCBState(commandBuffer);
const auto src_image_state = GetImageState(srcImage);
const auto dst_buffer_state = GetBufferState(dstBuffer);
const bool is_2khr = (version == COPY_COMMAND_VERSION_2);
const char *func_name = is_2khr ? "vkCmdCopyImageToBuffer2KHR()" : "vkCmdCopyImageToBuffer()";
const CMD_TYPE cmd_type = is_2khr ? CMD_COPYIMAGETOBUFFER2KHR : CMD_COPYIMAGETOBUFFER;
const char *vuid;
bool skip = ValidateBufferImageCopyData(cb_node, regionCount, pRegions, src_image_state, func_name, version, true);
// Validate command buffer state
skip |= ValidateCmd(cb_node, cmd_type, func_name);
// Command pool must support graphics, compute, or transfer operations
const auto pool = cb_node->command_pool.get();
VkQueueFlags queue_flags = GetPhysicalDeviceState()->queue_family_properties[pool->queueFamilyIndex].queueFlags;
if (0 == (queue_flags & (VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT | VK_QUEUE_TRANSFER_BIT))) {
vuid =
is_2khr ? "VUID-vkCmdCopyImageToBuffer2KHR-commandBuffer-cmdpool" : "VUID-vkCmdCopyImageToBuffer-commandBuffer-cmdpool";
skip |= LogError(cb_node->createInfo.commandPool, vuid,
"Cannot call %s on a command buffer allocated from a pool without graphics, compute, "
"or transfer capabilities.",
func_name);
}
vuid = is_2khr ? "VUID-VkCopyImageToBufferInfo2KHR-pRegions-00182" : "VUID-vkCmdCopyImageToBuffer-pRegions-00182";
skip |= ValidateImageBounds(src_image_state, regionCount, pRegions, func_name, vuid);
vuid = is_2khr ? "VUID-VkCopyImageToBufferInfo2KHR-pRegions-00183" : "VUID-vkCmdCopyImageToBuffer-pRegions-00183";
skip |= ValidateBufferBounds(src_image_state, dst_buffer_state, regionCount, pRegions, func_name, vuid);
vuid = is_2khr ? "VUID-VkCopyImageToBufferInfo2KHR-srcImage-00188" : "VUID-vkCmdCopyImageToBuffer-srcImage-00188";
const char *location = is_2khr ? "vkCmdCopyImageToBuffer2KHR(): srcImage" : "vkCmdCopyImageToBuffer(): srcImage";
skip |= ValidateImageSampleCount(src_image_state, VK_SAMPLE_COUNT_1_BIT, location, vuid);
vuid = is_2khr ? "VUID-VkCopyImageToBufferInfo2KHR-srcImage-00187" : "VUID-vkCmdCopyImageToBuffer-srcImage-00187";
skip |= ValidateMemoryIsBoundToImage(src_image_state, func_name, vuid);
vuid = is_2khr ? "vkCmdCopyImageToBuffer-dstBuffer2KHR-00192" : "vkCmdCopyImageToBuffer dstBuffer-00192";
skip |= ValidateMemoryIsBoundToBuffer(dst_buffer_state, func_name, vuid);
// Validate that SRC image & DST buffer have correct usage flags set
vuid = is_2khr ? "VUID-VkCopyImageToBufferInfo2KHR-srcImage-00186" : "VUID-vkCmdCopyImageToBuffer-srcImage-00186";
skip |= ValidateImageUsageFlags(src_image_state, VK_IMAGE_USAGE_TRANSFER_SRC_BIT, true, vuid, func_name,
"VK_IMAGE_USAGE_TRANSFER_SRC_BIT");
vuid = is_2khr ? "VUID-VkCopyImageToBufferInfo2KHR-dstBuffer-00191" : "VUID-vkCmdCopyImageToBuffer-dstBuffer-00191";
skip |= ValidateBufferUsageFlags(dst_buffer_state, VK_BUFFER_USAGE_TRANSFER_DST_BIT, true, vuid, func_name,
"VK_BUFFER_USAGE_TRANSFER_DST_BIT");
vuid = is_2khr ? "VUID-vkCmdCopyImageToBuffer2KHR-commandBuffer-01831" : "VUID-vkCmdCopyImageToBuffer-commandBuffer-01831";
skip |= ValidateProtectedImage(cb_node, src_image_state, func_name, vuid);
vuid = is_2khr ? "VUID-vkCmdCopyImageToBuffer2KHR-commandBuffer-01832" : "VUID-vkCmdCopyImageToBuffer-commandBuffer-01832";
skip |= ValidateProtectedBuffer(cb_node, dst_buffer_state, func_name, vuid);
vuid = is_2khr ? "VUID-vkCmdCopyImageToBuffer2KHR-commandBuffer-01833" : "VUID-vkCmdCopyImageToBuffer-commandBuffer-01833";
skip |= ValidateUnprotectedBuffer(cb_node, dst_buffer_state, func_name, vuid);
// Validation for VK_EXT_fragment_density_map
if (src_image_state->createInfo.flags & VK_IMAGE_CREATE_SUBSAMPLED_BIT_EXT) {
vuid = is_2khr ? "VUID-VkCopyImageToBufferInfo2KHR-srcImage-02544" : "VUID-vkCmdCopyImageToBuffer-srcImage-02544";
skip |= LogError(cb_node->commandBuffer(), vuid,
"%s: srcImage must not have been created with flags containing "
"VK_IMAGE_CREATE_SUBSAMPLED_BIT_EXT",
func_name);
}
if (device_extensions.vk_khr_maintenance1) {
vuid = is_2khr ? "VUID-VkCopyImageToBufferInfo2KHR-srcImage-01998" : "VUID-vkCmdCopyImageToBuffer-srcImage-01998";
skip |= ValidateImageFormatFeatureFlags(src_image_state, VK_FORMAT_FEATURE_TRANSFER_SRC_BIT, func_name, vuid);
}
bool hit_error = false;
const char *src_invalid_layout_vuid = (src_image_state->shared_presentable && device_extensions.vk_khr_shared_presentable_image)
? (vuid = is_2khr ? "VUID-VkCopyImageToBufferInfo2KHR-srcImageLayout-01397"
: "VUID-vkCmdCopyImageToBuffer-srcImageLayout-01397")
: (vuid = is_2khr ? "VUID-VkCopyImageToBufferInfo2KHR-srcImageLayout-00190"
: "VUID-vkCmdCopyImageToBuffer-srcImageLayout-00190");
for (uint32_t i = 0; i < regionCount; ++i) {
skip |= ValidateImageSubresourceLayers(cb_node, &pRegions[i].imageSubresource, func_name, "imageSubresource", i);
vuid =
is_2khr ? "VUID-VkCopyImageToBufferInfo2KHR-srcImageLayout-00189" : "VUID-vkCmdCopyImageToBuffer-srcImageLayout-00189";
skip |= VerifyImageLayout(cb_node, src_image_state, pRegions[i].imageSubresource, srcImageLayout,
VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, func_name, src_invalid_layout_vuid, vuid, &hit_error);
vuid = is_2khr ? "VUID-VkCopyImageToBufferInfo2KHR-imageOffset-01794" : "VUID-vkCmdCopyImageToBuffer-imageOffset-01794";
skip |= ValidateCopyBufferImageTransferGranularityRequirements(cb_node, src_image_state, &pRegions[i], i, func_name, vuid);
vuid = is_2khr ? "VUID-VkCopyImageToBufferInfo2KHR-imageSubresource-01703"
: "VUID-vkCmdCopyImageToBuffer-imageSubresource-01703";
skip |= ValidateImageMipLevel(cb_node, src_image_state, pRegions[i].imageSubresource.mipLevel, i, func_name,
"imageSubresource", vuid);
vuid = is_2khr ? "VUID-VkCopyImageToBufferInfo2KHR-imageSubresource-01704"
: "VUID-vkCmdCopyImageToBuffer-imageSubresource-01704";
skip |= ValidateImageArrayLayerRange(cb_node, src_image_state, pRegions[i].imageSubresource.baseArrayLayer,
pRegions[i].imageSubresource.layerCount, i, func_name, "imageSubresource", vuid);
}
return skip;
}
bool CoreChecks::PreCallValidateCmdCopyImageToBuffer(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout,
VkBuffer dstBuffer, uint32_t regionCount,
const VkBufferImageCopy *pRegions) const {
return ValidateCmdCopyImageToBuffer(commandBuffer, srcImage, srcImageLayout, dstBuffer, regionCount, pRegions,
COPY_COMMAND_VERSION_1);
}
bool CoreChecks::PreCallValidateCmdCopyImageToBuffer2KHR(VkCommandBuffer commandBuffer,
const VkCopyImageToBufferInfo2KHR *pCopyImageToBufferInfo) const {
return ValidateCmdCopyImageToBuffer(commandBuffer, pCopyImageToBufferInfo->srcImage, pCopyImageToBufferInfo->srcImageLayout,
pCopyImageToBufferInfo->dstBuffer, pCopyImageToBufferInfo->regionCount,
pCopyImageToBufferInfo->pRegions, COPY_COMMAND_VERSION_2);
}
void CoreChecks::PreCallRecordCmdCopyImageToBuffer(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout,
VkBuffer dstBuffer, uint32_t regionCount, const VkBufferImageCopy *pRegions) {
StateTracker::PreCallRecordCmdCopyImageToBuffer(commandBuffer, srcImage, srcImageLayout, dstBuffer, regionCount, pRegions);
auto cb_node = GetCBState(commandBuffer);
auto src_image_state = GetImageState(srcImage);
// Make sure that all image slices record referenced layout
for (uint32_t i = 0; i < regionCount; ++i) {
SetImageInitialLayout(cb_node, *src_image_state, pRegions[i].imageSubresource, srcImageLayout);
}
}
void CoreChecks::PreCallRecordCmdCopyImageToBuffer2KHR(VkCommandBuffer commandBuffer,
const VkCopyImageToBufferInfo2KHR *pCopyImageToBufferInfo) {
StateTracker::PreCallRecordCmdCopyImageToBuffer2KHR(commandBuffer, pCopyImageToBufferInfo);
auto cb_node = GetCBState(commandBuffer);
auto src_image_state = GetImageState(pCopyImageToBufferInfo->srcImage);
// Make sure that all image slices record referenced layout
for (uint32_t i = 0; i < pCopyImageToBufferInfo->regionCount; ++i) {
SetImageInitialLayout(cb_node, *src_image_state, pCopyImageToBufferInfo->pRegions[i].imageSubresource,
pCopyImageToBufferInfo->srcImageLayout);
}
}
template <typename RegionType>
bool CoreChecks::ValidateCmdCopyBufferToImage(VkCommandBuffer commandBuffer, VkBuffer srcBuffer, VkImage dstImage,
VkImageLayout dstImageLayout, uint32_t regionCount, const RegionType *pRegions,
CopyCommandVersion version) const {
const auto cb_node = GetCBState(commandBuffer);
const auto src_buffer_state = GetBufferState(srcBuffer);
const auto dst_image_state = GetImageState(dstImage);
const bool is_2khr = (version == COPY_COMMAND_VERSION_2);
const char *func_name = is_2khr ? "vkCmdCopyBufferToImage2KHR()" : "vkCmdCopyBufferToImage()";
const CMD_TYPE cmd_type = is_2khr ? CMD_COPYBUFFERTOIMAGE2KHR : CMD_COPYBUFFERTOIMAGE;
const char *vuid;
bool skip = ValidateBufferImageCopyData(cb_node, regionCount, pRegions, dst_image_state, func_name, version, false);
// Validate command buffer state
skip |= ValidateCmd(cb_node, cmd_type, func_name);
vuid = is_2khr ? "VUID-VkCopyBufferToImageInfo2KHR-pRegions-00172" : "VUID-vkCmdCopyBufferToImage-pRegions-00172";
skip |= ValidateImageBounds(dst_image_state, regionCount, pRegions, func_name, vuid);
vuid = is_2khr ? "VUID-VkCopyBufferToImageInfo2KHR-pRegions-00171" : "VUID-vkCmdCopyBufferToImage-pRegions-00171";
skip |= ValidateBufferBounds(dst_image_state, src_buffer_state, regionCount, pRegions, func_name, vuid);
vuid = is_2khr ? "VUID-VkCopyBufferToImageInfo2KHR-dstImage-00179" : "VUID-vkCmdCopyBufferToImage-dstImage-00179";
const char *location = is_2khr ? "vkCmdCopyBufferToImage2KHR(): dstImage" : "vkCmdCopyBufferToImage(): dstImage";
skip |= ValidateImageSampleCount(dst_image_state, VK_SAMPLE_COUNT_1_BIT, location, vuid);
vuid = is_2khr ? "VUID-VkCopyBufferToImageInfo2KHR-srcBuffer-00176" : "VUID-vkCmdCopyBufferToImage-srcBuffer-00176";
skip |= ValidateMemoryIsBoundToBuffer(src_buffer_state, func_name, vuid);
vuid = is_2khr ? "VUID-VkCopyBufferToImageInfo2KHR-dstImage-00178" : "VUID-vkCmdCopyBufferToImage-dstImage-00178";
skip |= ValidateMemoryIsBoundToImage(dst_image_state, func_name, vuid);
vuid = is_2khr ? "VUID-VkCopyBufferToImageInfo2KHR-srcBuffer-00174" : "VUID-vkCmdCopyBufferToImage-srcBuffer-00174";
skip |= ValidateBufferUsageFlags(src_buffer_state, VK_BUFFER_USAGE_TRANSFER_SRC_BIT, true, vuid, func_name,
"VK_BUFFER_USAGE_TRANSFER_SRC_BIT");
vuid = is_2khr ? "VUID-VkCopyBufferToImageInfo2KHR-dstImage-00177" : "VUID-vkCmdCopyBufferToImage-dstImage-00177";
skip |= ValidateImageUsageFlags(dst_image_state, VK_IMAGE_USAGE_TRANSFER_DST_BIT, true, vuid, func_name,
"VK_IMAGE_USAGE_TRANSFER_DST_BIT");
vuid = is_2khr ? "VUID-vkCmdCopyBufferToImage2KHR-commandBuffer-01828" : "VUID-vkCmdCopyBufferToImage-commandBuffer-01828";
skip |= ValidateProtectedBuffer(cb_node, src_buffer_state, func_name, vuid);
vuid = is_2khr ? "VUID-vkCmdCopyBufferToImage2KHR-commandBuffer-01829" : "VUID-vkCmdCopyBufferToImage-commandBuffer-01829";
skip |= ValidateProtectedImage(cb_node, dst_image_state, func_name, vuid);
vuid = is_2khr ? "VUID-vkCmdCopyBufferToImage-commandBuffer-01830" : "VUID-vkCmdCopyBufferToImage-commandBuffer-01830";
skip |= ValidateUnprotectedImage(cb_node, dst_image_state, func_name, vuid);
// Validation for VK_EXT_fragment_density_map
if (dst_image_state->createInfo.flags & VK_IMAGE_CREATE_SUBSAMPLED_BIT_EXT) {
vuid = is_2khr ? "VUID-VkCopyBufferToImageInfo2KHR-dstImage-02543" : "VUID-vkCmdCopyBufferToImage-dstImage-02543";
skip |= LogError(cb_node->commandBuffer(), vuid,
"%s: dstImage must not have been created with flags containing "
"VK_IMAGE_CREATE_SUBSAMPLED_BIT_EXT",
func_name);
}
if (device_extensions.vk_khr_maintenance1) {
vuid = is_2khr ? "VUID-VkCopyBufferToImageInfo2KHR-dstImage-01997" : "VUID-vkCmdCopyBufferToImage-dstImage-01997";
skip |= ValidateImageFormatFeatureFlags(dst_image_state, VK_FORMAT_FEATURE_TRANSFER_DST_BIT, func_name, vuid);
}
bool hit_error = false;
const char *dst_invalid_layout_vuid = (dst_image_state->shared_presentable && device_extensions.vk_khr_shared_presentable_image)
? (is_2khr ? "VUID-VkCopyBufferToImageInfo2KHR-dstImageLayout-01396"
: "VUID-vkCmdCopyBufferToImage-dstImageLayout-01396")
: (is_2khr ? "VUID-VkCopyBufferToImageInfo2KHR-dstImageLayout-00181"
: "VUID-vkCmdCopyBufferToImage-dstImageLayout-00181");
for (uint32_t i = 0; i < regionCount; ++i) {
skip |= ValidateImageSubresourceLayers(cb_node, &pRegions[i].imageSubresource, func_name, "imageSubresource", i);
vuid =
is_2khr ? "VUID-VkCopyBufferToImageInfo2KHR-dstImageLayout-00180" : "VUID-vkCmdCopyBufferToImage-dstImageLayout-00180";
skip |= VerifyImageLayout(cb_node, dst_image_state, pRegions[i].imageSubresource, dstImageLayout,
VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, func_name, dst_invalid_layout_vuid, vuid, &hit_error);
vuid = is_2khr ? "VUID-VkCopyBufferToImageInfo2KHR-imageOffset-01793" : "VUID-vkCmdCopyBufferToImage-imageOffset-01793";
skip |= ValidateCopyBufferImageTransferGranularityRequirements(cb_node, dst_image_state, &pRegions[i], i, func_name, vuid);
vuid = is_2khr ? "VUID-VkCopyBufferToImageInfo2KHR-imageSubresource-01701"
: "VUID-vkCmdCopyBufferToImage-imageSubresource-01701";
skip |= ValidateImageMipLevel(cb_node, dst_image_state, pRegions[i].imageSubresource.mipLevel, i, func_name,
"imageSubresource", vuid);
vuid = is_2khr ? "VUID-VkCopyBufferToImageInfo2KHR-imageSubresource-01702"
: "VUID-vkCmdCopyBufferToImage-imageSubresource-01702";
skip |= ValidateImageArrayLayerRange(cb_node, dst_image_state, pRegions[i].imageSubresource.baseArrayLayer,
pRegions[i].imageSubresource.layerCount, i, func_name, "imageSubresource", vuid);
// TODO - Don't use ValidateCmdQueueFlags due to currently not having way to add more descriptive message
const COMMAND_POOL_STATE *command_pool = cb_node->command_pool.get();
assert(command_pool != nullptr);
const uint32_t queue_family_index = command_pool->queueFamilyIndex;
const VkQueueFlags queue_flags = GetPhysicalDeviceState()->queue_family_properties[queue_family_index].queueFlags;
const VkImageAspectFlags region_aspect_mask = pRegions[i].imageSubresource.aspectMask;
if (((queue_flags & VK_QUEUE_GRAPHICS_BIT) == 0) &&
((region_aspect_mask & (VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT)) != 0)) {
LogObjectList objlist(cb_node->commandBuffer());
objlist.add(command_pool->commandPool());
vuid = is_2khr ? "VUID-VkCopyBufferToImageInfo2KHR-commandBuffer-04477"
: "VUID-vkCmdCopyBufferToImage-commandBuffer-04477";
skip |= LogError(dst_image_state->image(), vuid,
"%s(): pRegion[%d] subresource aspectMask 0x%x specifies VK_IMAGE_ASPECT_DEPTH_BIT or "
"VK_IMAGE_ASPECT_STENCIL_BIT but the command buffer %s was allocated from the command pool %s "
"which was created with queueFamilyIndex %u, which doesn't contain the VK_QUEUE_GRAPHICS_BIT flag.",
func_name, i, region_aspect_mask, report_data->FormatHandle(cb_node->commandBuffer()).c_str(),
report_data->FormatHandle(command_pool->commandPool()).c_str(), queue_family_index);
}
}
return skip;
}
bool CoreChecks::PreCallValidateCmdCopyBufferToImage(VkCommandBuffer commandBuffer, VkBuffer srcBuffer, VkImage dstImage,
VkImageLayout dstImageLayout, uint32_t regionCount,
const VkBufferImageCopy *pRegions) const {
return ValidateCmdCopyBufferToImage(commandBuffer, srcBuffer, dstImage, dstImageLayout, regionCount, pRegions,
COPY_COMMAND_VERSION_1);
}
bool CoreChecks::PreCallValidateCmdCopyBufferToImage2KHR(VkCommandBuffer commandBuffer,
const VkCopyBufferToImageInfo2KHR *pCopyBufferToImageInfo) const {
return ValidateCmdCopyBufferToImage(commandBuffer, pCopyBufferToImageInfo->srcBuffer, pCopyBufferToImageInfo->dstImage,
pCopyBufferToImageInfo->dstImageLayout, pCopyBufferToImageInfo->regionCount,
pCopyBufferToImageInfo->pRegions, COPY_COMMAND_VERSION_2);
}
void CoreChecks::PreCallRecordCmdCopyBufferToImage(VkCommandBuffer commandBuffer, VkBuffer srcBuffer, VkImage dstImage,
VkImageLayout dstImageLayout, uint32_t regionCount,
const VkBufferImageCopy *pRegions) {
StateTracker::PreCallRecordCmdCopyBufferToImage(commandBuffer, srcBuffer, dstImage, dstImageLayout, regionCount, pRegions);
auto cb_node = GetCBState(commandBuffer);
auto dst_image_state = GetImageState(dstImage);
// Make sure that all image slices are record referenced layout
for (uint32_t i = 0; i < regionCount; ++i) {
SetImageInitialLayout(cb_node, *dst_image_state, pRegions[i].imageSubresource, dstImageLayout);
}
}
void CoreChecks::PreCallRecordCmdCopyBufferToImage2KHR(VkCommandBuffer commandBuffer,
const VkCopyBufferToImageInfo2KHR *pCopyBufferToImageInfo2KHR) {
StateTracker::PreCallRecordCmdCopyBufferToImage2KHR(commandBuffer, pCopyBufferToImageInfo2KHR);
auto cb_node = GetCBState(commandBuffer);
auto dst_image_state = GetImageState(pCopyBufferToImageInfo2KHR->dstImage);
// Make sure that all image slices are record referenced layout
for (uint32_t i = 0; i < pCopyBufferToImageInfo2KHR->regionCount; ++i) {
SetImageInitialLayout(cb_node, *dst_image_state, pCopyBufferToImageInfo2KHR->pRegions[i].imageSubresource,
pCopyBufferToImageInfo2KHR->dstImageLayout);
}
}
bool CoreChecks::PreCallValidateGetImageSubresourceLayout(VkDevice device, VkImage image, const VkImageSubresource *pSubresource,
VkSubresourceLayout *pLayout) const {
bool skip = false;
const VkImageAspectFlags sub_aspect = pSubresource->aspectMask;
// The aspectMask member of pSubresource must only have a single bit set
const int num_bits = sizeof(sub_aspect) * CHAR_BIT;
std::bitset<num_bits> aspect_mask_bits(sub_aspect);
if (aspect_mask_bits.count() != 1) {
skip |= LogError(image, "VUID-vkGetImageSubresourceLayout-aspectMask-00997",
"vkGetImageSubresourceLayout(): VkImageSubresource.aspectMask must have exactly 1 bit set.");
}
const IMAGE_STATE *image_entry = GetImageState(image);
if (!image_entry) {
return skip;
}
// Image must have been created with tiling equal to VK_IMAGE_TILING_LINEAR
if (device_extensions.vk_ext_image_drm_format_modifier) {
if ((image_entry->createInfo.tiling != VK_IMAGE_TILING_LINEAR) &&
(image_entry->createInfo.tiling != VK_IMAGE_TILING_DRM_FORMAT_MODIFIER_EXT)) {
skip |= LogError(image, "VUID-vkGetImageSubresourceLayout-image-02270",
"vkGetImageSubresourceLayout(): Image must have tiling of VK_IMAGE_TILING_LINEAR or "
"VK_IMAGE_TILING_DRM_FORMAT_MODIFIER_EXT.");
}
} else {
if (image_entry->createInfo.tiling != VK_IMAGE_TILING_LINEAR) {
skip |= LogError(image, "VUID-vkGetImageSubresourceLayout-image-00996",
"vkGetImageSubresourceLayout(): Image must have tiling of VK_IMAGE_TILING_LINEAR.");
}
}
// mipLevel must be less than the mipLevels specified in VkImageCreateInfo when the image was created
if (pSubresource->mipLevel >= image_entry->createInfo.mipLevels) {
skip |= LogError(image, "VUID-vkGetImageSubresourceLayout-mipLevel-01716",
"vkGetImageSubresourceLayout(): pSubresource.mipLevel (%d) must be less than %d.", pSubresource->mipLevel,
image_entry->createInfo.mipLevels);
}
// arrayLayer must be less than the arrayLayers specified in VkImageCreateInfo when the image was created
if (pSubresource->arrayLayer >= image_entry->createInfo.arrayLayers) {
skip |= LogError(image, "VUID-vkGetImageSubresourceLayout-arrayLayer-01717",
"vkGetImageSubresourceLayout(): pSubresource.arrayLayer (%d) must be less than %d.",
pSubresource->arrayLayer, image_entry->createInfo.arrayLayers);
}
// subresource's aspect must be compatible with image's format.
const VkFormat img_format = image_entry->createInfo.format;
if (image_entry->createInfo.tiling == VK_IMAGE_TILING_LINEAR) {
if (FormatIsMultiplane(img_format)) {
VkImageAspectFlags allowed_flags = (VK_IMAGE_ASPECT_PLANE_0_BIT | VK_IMAGE_ASPECT_PLANE_1_BIT);
const char *vuid = "VUID-vkGetImageSubresourceLayout-format-01581"; // 2-plane version
if (FormatPlaneCount(img_format) > 2u) {
allowed_flags |= VK_IMAGE_ASPECT_PLANE_2_BIT;
vuid = "VUID-vkGetImageSubresourceLayout-format-01582"; // 3-plane version
}
if (sub_aspect != (sub_aspect & allowed_flags)) {
skip |= LogError(image, vuid,
"vkGetImageSubresourceLayout(): For multi-planar images, VkImageSubresource.aspectMask (0x%" PRIx32
") must be a single-plane specifier flag.",
sub_aspect);
}
} else if (FormatIsColor(img_format)) {
if (sub_aspect != VK_IMAGE_ASPECT_COLOR_BIT) {
skip |= LogError(image, kVUID_Core_DrawState_InvalidImageAspect,
"vkGetImageSubresourceLayout(): For color formats, VkImageSubresource.aspectMask must be "
"VK_IMAGE_ASPECT_COLOR.");
}
} else if (FormatIsDepthOrStencil(img_format)) {
if ((sub_aspect != VK_IMAGE_ASPECT_DEPTH_BIT) && (sub_aspect != VK_IMAGE_ASPECT_STENCIL_BIT)) {
}
}
} else if (image_entry->createInfo.tiling == VK_IMAGE_TILING_DRM_FORMAT_MODIFIER_EXT) {
if ((sub_aspect != VK_IMAGE_ASPECT_MEMORY_PLANE_0_BIT_EXT) && (sub_aspect != VK_IMAGE_ASPECT_MEMORY_PLANE_1_BIT_EXT) &&
(sub_aspect != VK_IMAGE_ASPECT_MEMORY_PLANE_2_BIT_EXT) && (sub_aspect != VK_IMAGE_ASPECT_MEMORY_PLANE_3_BIT_EXT)) {
// TODO: This VU also needs to ensure that the DRM index is in range and valid.
skip |= LogError(image, "VUID-vkGetImageSubresourceLayout-tiling-02271",
"vkGetImageSubresourceLayout(): VkImageSubresource.aspectMask must be "
"VK_IMAGE_ASPECT_MEMORY_PLANE_i_BIT_EXT.");
}
}
if (device_extensions.vk_android_external_memory_android_hardware_buffer) {
skip |= ValidateGetImageSubresourceLayoutANDROID(image);
}
return skip;
}
// Validates the image is allowed to be protected
bool CoreChecks::ValidateProtectedImage(const CMD_BUFFER_STATE *cb_state, const IMAGE_STATE *image_state, const char *cmd_name,
const char *vuid, const char *more_message) const {
bool skip = false;
if ((cb_state->unprotected == true) && (image_state->unprotected == false)) {
LogObjectList objlist(cb_state->commandBuffer());
objlist.add(image_state->image());
skip |= LogError(objlist, vuid, "%s: command buffer %s is unprotected while image %s is a protected image.%s", cmd_name,
report_data->FormatHandle(cb_state->commandBuffer()).c_str(),
report_data->FormatHandle(image_state->image()).c_str(), more_message);
}
return skip;
}
// Validates the image is allowed to be unprotected
bool CoreChecks::ValidateUnprotectedImage(const CMD_BUFFER_STATE *cb_state, const IMAGE_STATE *image_state, const char *cmd_name,
const char *vuid, const char *more_message) const {
bool skip = false;
if ((cb_state->unprotected == false) && (image_state->unprotected == true)) {
LogObjectList objlist(cb_state->commandBuffer());
objlist.add(image_state->image());
skip |= LogError(objlist, vuid, "%s: command buffer %s is protected while image %s is an unprotected image.%s", cmd_name,
report_data->FormatHandle(cb_state->commandBuffer()).c_str(),
report_data->FormatHandle(image_state->image()).c_str(), more_message);
}
return skip;
}
// Validates the buffer is allowed to be protected
bool CoreChecks::ValidateProtectedBuffer(const CMD_BUFFER_STATE *cb_state, const BUFFER_STATE *buffer_state, const char *cmd_name,
const char *vuid, const char *more_message) const {
bool skip = false;
if ((cb_state->unprotected == true) && (buffer_state->unprotected == false)) {
LogObjectList objlist(cb_state->commandBuffer());
objlist.add(buffer_state->buffer());
skip |= LogError(objlist, vuid, "%s: command buffer %s is unprotected while buffer %s is a protected buffer.%s", cmd_name,
report_data->FormatHandle(cb_state->commandBuffer()).c_str(),
report_data->FormatHandle(buffer_state->buffer()).c_str(), more_message);
}
return skip;
}
// Validates the buffer is allowed to be unprotected
bool CoreChecks::ValidateUnprotectedBuffer(const CMD_BUFFER_STATE *cb_state, const BUFFER_STATE *buffer_state, const char *cmd_name,
const char *vuid, const char *more_message) const {
bool skip = false;
if ((cb_state->unprotected == false) && (buffer_state->unprotected == true)) {
LogObjectList objlist(cb_state->commandBuffer());
objlist.add(buffer_state->buffer());
skip |= LogError(objlist, vuid, "%s: command buffer %s is protected while buffer %s is an unprotected buffer.%s", cmd_name,
report_data->FormatHandle(cb_state->commandBuffer()).c_str(),
report_data->FormatHandle(buffer_state->buffer()).c_str(), more_message);
}
return skip;
}
| 1 | 16,683 | That doesn't make sense. LayoutEntry doesn't match the generator concept. | KhronosGroup-Vulkan-ValidationLayers | cpp |
@@ -284,7 +284,7 @@ func TestAppDeployOpts_getAppDockerfilePath(t *testing.T) {
mockError := errors.New("mockError")
mockManifest := []byte(`name: appA
-type: 'Load Balanced Web App'
+type: 'Load Balanced Web Svc'
image:
build: appA/Dockerfile
`) | 1 | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package cli
import (
"errors"
"fmt"
"testing"
"github.com/aws/amazon-ecs-cli-v2/internal/pkg/addons"
"github.com/aws/amazon-ecs-cli-v2/internal/pkg/archer"
"github.com/golang/mock/gomock"
"github.com/stretchr/testify/require"
climocks "github.com/aws/amazon-ecs-cli-v2/internal/pkg/cli/mocks"
)
func TestAppDeployOpts_Validate(t *testing.T) {
testCases := map[string]struct {
inProjectName string
inAppName string
inEnvName string
mockWs func(m *climocks.MockwsAppReader)
mockStore func(m *climocks.MockprojectService)
wantedError error
}{
"no existing projects": {
mockWs: func(m *climocks.MockwsAppReader) {},
mockStore: func(m *climocks.MockprojectService) {},
wantedError: errNoProjectInWorkspace,
},
"with workspace error": {
inProjectName: "phonetool",
inAppName: "frontend",
mockWs: func(m *climocks.MockwsAppReader) {
m.EXPECT().ServiceNames().Return(nil, errors.New("some error"))
},
mockStore: func(m *climocks.MockprojectService) {},
wantedError: errors.New("list applications in the workspace: some error"),
},
"with application not in workspace": {
inProjectName: "phonetool",
inAppName: "frontend",
mockWs: func(m *climocks.MockwsAppReader) {
m.EXPECT().ServiceNames().Return([]string{}, nil)
},
mockStore: func(m *climocks.MockprojectService) {},
wantedError: errors.New("application frontend not found in the workspace"),
},
"with unknown environment": {
inProjectName: "phonetool",
inEnvName: "test",
mockWs: func(m *climocks.MockwsAppReader) {},
mockStore: func(m *climocks.MockprojectService) {
m.EXPECT().GetEnvironment("phonetool", "test").
Return(nil, errors.New("unknown env"))
},
wantedError: errors.New("get environment test from metadata store: unknown env"),
},
"successful validation": {
inProjectName: "phonetool",
inAppName: "frontend",
inEnvName: "test",
mockWs: func(m *climocks.MockwsAppReader) {
m.EXPECT().ServiceNames().Return([]string{"frontend"}, nil)
},
mockStore: func(m *climocks.MockprojectService) {
m.EXPECT().GetEnvironment("phonetool", "test").
Return(&archer.Environment{Name: "test"}, nil)
},
},
}
for name, tc := range testCases {
t.Run(name, func(t *testing.T) {
// GIVEN
ctrl := gomock.NewController(t)
defer ctrl.Finish()
mockWs := climocks.NewMockwsAppReader(ctrl)
mockStore := climocks.NewMockprojectService(ctrl)
tc.mockWs(mockWs)
tc.mockStore(mockStore)
opts := appDeployOpts{
appDeployVars: appDeployVars{
GlobalOpts: &GlobalOpts{
projectName: tc.inProjectName,
},
AppName: tc.inAppName,
EnvName: tc.inEnvName,
},
workspaceService: mockWs,
projectService: mockStore,
}
// WHEN
err := opts.Validate()
// THEN
if tc.wantedError != nil {
require.EqualError(t, err, tc.wantedError.Error())
} else {
require.Nil(t, err)
}
})
}
}
func TestAppDeployOpts_Ask(t *testing.T) {
testCases := map[string]struct {
inProjectName string
inAppName string
inEnvName string
inImageTag string
mockWs func(m *climocks.MockwsAppReader)
mockStore func(m *climocks.MockprojectService)
mockPrompt func(m *climocks.Mockprompter)
wantedAppName string
wantedEnvName string
wantedImageTag string
wantedError error
}{
"no applications in the workspace": {
mockWs: func(m *climocks.MockwsAppReader) {
m.EXPECT().ServiceNames().Return([]string{}, nil)
},
mockStore: func(m *climocks.MockprojectService) {},
mockPrompt: func(m *climocks.Mockprompter) {},
wantedError: errors.New("no applications found in the workspace"),
},
"default to single application": {
inEnvName: "test",
inImageTag: "latest",
mockWs: func(m *climocks.MockwsAppReader) {
m.EXPECT().ServiceNames().Return([]string{"frontend"}, nil)
},
mockStore: func(m *climocks.MockprojectService) {},
mockPrompt: func(m *climocks.Mockprompter) {},
wantedAppName: "frontend",
wantedEnvName: "test",
wantedImageTag: "latest",
},
"prompts for application name if there are more than one option": {
inEnvName: "test",
inImageTag: "latest",
mockWs: func(m *climocks.MockwsAppReader) {
m.EXPECT().ServiceNames().Return([]string{"frontend", "webhook"}, nil)
},
mockStore: func(m *climocks.MockprojectService) {},
mockPrompt: func(m *climocks.Mockprompter) {
m.EXPECT().SelectOne("Select an application", "", []string{"frontend", "webhook"}).
Return("frontend", nil)
},
wantedAppName: "frontend",
wantedEnvName: "test",
wantedImageTag: "latest",
},
"fails to list environments": {
inProjectName: "phonetool",
inAppName: "frontend",
inImageTag: "latest",
mockWs: func(m *climocks.MockwsAppReader) {},
mockStore: func(m *climocks.MockprojectService) {
m.EXPECT().ListEnvironments("phonetool").Return(nil, errors.New("some error"))
},
mockPrompt: func(m *climocks.Mockprompter) {
},
wantedError: errors.New("get environments for project phonetool from metadata store: some error"),
},
"no existing environments": {
inProjectName: "phonetool",
inAppName: "frontend",
inImageTag: "latest",
mockWs: func(m *climocks.MockwsAppReader) {},
mockStore: func(m *climocks.MockprojectService) {
m.EXPECT().ListEnvironments("phonetool").Return([]*archer.Environment{}, nil)
},
mockPrompt: func(m *climocks.Mockprompter) {
},
wantedError: errors.New("no environments found in project phonetool"),
},
"defaults to single environment": {
inProjectName: "phonetool",
inAppName: "frontend",
inImageTag: "latest",
mockWs: func(m *climocks.MockwsAppReader) {},
mockStore: func(m *climocks.MockprojectService) {
m.EXPECT().ListEnvironments("phonetool").Return([]*archer.Environment{
{
Name: "test",
},
}, nil)
},
mockPrompt: func(m *climocks.Mockprompter) {
},
wantedAppName: "frontend",
wantedEnvName: "test",
wantedImageTag: "latest",
},
"prompts for environment name if there are more than one option": {
inProjectName: "phonetool",
inAppName: "frontend",
inImageTag: "latest",
mockWs: func(m *climocks.MockwsAppReader) {},
mockStore: func(m *climocks.MockprojectService) {
m.EXPECT().ListEnvironments("phonetool").Return([]*archer.Environment{
{
Name: "test",
},
{
Name: "prod-iad",
},
}, nil)
},
mockPrompt: func(m *climocks.Mockprompter) {
m.EXPECT().SelectOne("Select an environment", "", []string{"test", "prod-iad"}).
Return("prod-iad", nil)
},
wantedAppName: "frontend",
wantedEnvName: "prod-iad",
wantedImageTag: "latest",
},
}
for name, tc := range testCases {
t.Run(name, func(t *testing.T) {
// GIVEN
ctrl := gomock.NewController(t)
defer ctrl.Finish()
mockWs := climocks.NewMockwsAppReader(ctrl)
mockStore := climocks.NewMockprojectService(ctrl)
mockPrompt := climocks.NewMockprompter(ctrl)
tc.mockWs(mockWs)
tc.mockStore(mockStore)
tc.mockPrompt(mockPrompt)
opts := appDeployOpts{
appDeployVars: appDeployVars{
GlobalOpts: &GlobalOpts{
projectName: tc.inProjectName,
prompt: mockPrompt,
},
AppName: tc.inAppName,
EnvName: tc.inEnvName,
ImageTag: tc.inImageTag,
},
workspaceService: mockWs,
projectService: mockStore,
}
// WHEN
err := opts.Ask()
// THEN
if tc.wantedError == nil {
require.Nil(t, err)
require.Equal(t, tc.wantedAppName, opts.AppName)
require.Equal(t, tc.wantedEnvName, opts.EnvName)
require.Equal(t, tc.wantedImageTag, opts.ImageTag)
} else {
require.EqualError(t, err, tc.wantedError.Error())
}
})
}
}
func TestAppDeployOpts_getAppDockerfilePath(t *testing.T) {
var mockWorkspace *climocks.MockwsAppReader
mockError := errors.New("mockError")
mockManifest := []byte(`name: appA
type: 'Load Balanced Web App'
image:
build: appA/Dockerfile
`)
tests := map[string]struct {
inputApp string
setupMocks func(controller *gomock.Controller)
wantPath string
wantErr error
}{
"should return error if workspaceService ReadFile returns error": {
inputApp: "appA",
setupMocks: func(controller *gomock.Controller) {
mockWorkspace = climocks.NewMockwsAppReader(controller)
gomock.InOrder(
mockWorkspace.EXPECT().ReadServiceManifest("appA").Times(1).Return(nil, mockError),
)
},
wantPath: "",
wantErr: fmt.Errorf("read manifest file %s: %w", "appA", mockError),
},
"should trim the manifest DockerfilePath if it contains /Dockerfile": {
inputApp: "appA",
setupMocks: func(controller *gomock.Controller) {
mockWorkspace = climocks.NewMockwsAppReader(controller)
gomock.InOrder(
mockWorkspace.EXPECT().ReadServiceManifest("appA").Times(1).Return(mockManifest, nil),
)
},
wantPath: "appA",
wantErr: nil,
},
}
for name, test := range tests {
t.Run(name, func(t *testing.T) {
ctrl := gomock.NewController(t)
defer ctrl.Finish()
test.setupMocks(ctrl)
opts := appDeployOpts{
appDeployVars: appDeployVars{
AppName: test.inputApp,
},
workspaceService: mockWorkspace,
}
gotPath, gotErr := opts.getAppDockerfilePath()
require.Equal(t, test.wantPath, gotPath)
require.Equal(t, test.wantErr, gotErr)
})
}
}
func TestAppDeployOpts_pushAddonsTemplateToS3Bucket(t *testing.T) {
mockError := errors.New("some error")
tests := map[string]struct {
inputApp string
inEnvironment *archer.Environment
inProject *archer.Project
mockProjectResourcesGetter func(m *climocks.MockprojectResourcesGetter)
mockS3Svc func(m *climocks.MockartifactUploader)
mockAddons func(m *climocks.Mocktemplater)
wantPath string
wantErr error
}{
"should push addons template to S3 bucket": {
inputApp: "mockApp",
inEnvironment: &archer.Environment{
Name: "mockEnv",
Region: "us-west-2",
},
inProject: &archer.Project{
Name: "mockProject",
},
mockProjectResourcesGetter: func(m *climocks.MockprojectResourcesGetter) {
m.EXPECT().GetProjectResourcesByRegion(&archer.Project{
Name: "mockProject",
}, "us-west-2").Return(&archer.ProjectRegionalResources{
S3Bucket: "mockBucket",
}, nil)
},
mockAddons: func(m *climocks.Mocktemplater) {
m.EXPECT().Template().Return("some data", nil)
},
mockS3Svc: func(m *climocks.MockartifactUploader) {
m.EXPECT().PutArtifact("mockBucket", "mockApp.addons.stack.yml", gomock.Any()).Return("https://mockS3DomainName/mockPath", nil)
},
wantErr: nil,
wantPath: "https://mockS3DomainName/mockPath",
},
"should return error if fail to get project resources": {
inputApp: "mockApp",
inEnvironment: &archer.Environment{
Name: "mockEnv",
Region: "us-west-2",
},
inProject: &archer.Project{
Name: "mockProject",
},
mockProjectResourcesGetter: func(m *climocks.MockprojectResourcesGetter) {
m.EXPECT().GetProjectResourcesByRegion(&archer.Project{
Name: "mockProject",
}, "us-west-2").Return(nil, mockError)
},
mockAddons: func(m *climocks.Mocktemplater) {
m.EXPECT().Template().Return("some data", nil)
},
mockS3Svc: func(m *climocks.MockartifactUploader) {},
wantErr: fmt.Errorf("get project resources: some error"),
},
"should return error if fail to upload to S3 bucket": {
inputApp: "mockApp",
inEnvironment: &archer.Environment{
Name: "mockEnv",
Region: "us-west-2",
},
inProject: &archer.Project{
Name: "mockProject",
},
mockProjectResourcesGetter: func(m *climocks.MockprojectResourcesGetter) {
m.EXPECT().GetProjectResourcesByRegion(&archer.Project{
Name: "mockProject",
}, "us-west-2").Return(&archer.ProjectRegionalResources{
S3Bucket: "mockBucket",
}, nil)
},
mockAddons: func(m *climocks.Mocktemplater) {
m.EXPECT().Template().Return("some data", nil)
},
mockS3Svc: func(m *climocks.MockartifactUploader) {
m.EXPECT().PutArtifact("mockBucket", "mockApp.addons.stack.yml", gomock.Any()).Return("", mockError)
},
wantErr: fmt.Errorf("put addons artifact to bucket mockBucket: some error"),
},
"should return empty url if the application doesn't have any addons": {
inputApp: "mockApp",
mockAddons: func(m *climocks.Mocktemplater) {
m.EXPECT().Template().Return("", &addons.ErrDirNotExist{
SvcName: "mockApp",
})
},
mockProjectResourcesGetter: func(m *climocks.MockprojectResourcesGetter) {
m.EXPECT().GetProjectResourcesByRegion(gomock.Any(), gomock.Any()).Times(0)
},
mockS3Svc: func(m *climocks.MockartifactUploader) {
m.EXPECT().PutArtifact(gomock.Any(), gomock.Any(), gomock.Any()).Times(0)
},
wantPath: "",
},
"should fail if addons cannot be retrieved from workspace": {
inputApp: "mockApp",
mockAddons: func(m *climocks.Mocktemplater) {
m.EXPECT().Template().Return("", mockError)
},
mockProjectResourcesGetter: func(m *climocks.MockprojectResourcesGetter) {
m.EXPECT().GetProjectResourcesByRegion(gomock.Any(), gomock.Any()).Times(0)
},
mockS3Svc: func(m *climocks.MockartifactUploader) {
m.EXPECT().PutArtifact(gomock.Any(), gomock.Any(), gomock.Any()).Times(0)
},
wantErr: fmt.Errorf("retrieve addons template: %w", mockError),
},
}
for name, tc := range tests {
t.Run(name, func(t *testing.T) {
ctrl := gomock.NewController(t)
defer ctrl.Finish()
mockProjectSvc := climocks.NewMockprojectService(ctrl)
mockProjectResourcesGetter := climocks.NewMockprojectResourcesGetter(ctrl)
mockS3Svc := climocks.NewMockartifactUploader(ctrl)
mockAddons := climocks.NewMocktemplater(ctrl)
tc.mockProjectResourcesGetter(mockProjectResourcesGetter)
tc.mockS3Svc(mockS3Svc)
tc.mockAddons(mockAddons)
opts := appDeployOpts{
appDeployVars: appDeployVars{
AppName: tc.inputApp,
},
projectService: mockProjectSvc,
projectCFSvc: mockProjectResourcesGetter,
addonsSvc: mockAddons,
s3Service: mockS3Svc,
targetEnvironment: tc.inEnvironment,
targetProject: tc.inProject,
}
gotPath, gotErr := opts.pushAddonsTemplateToS3Bucket()
if gotErr != nil {
require.EqualError(t, gotErr, tc.wantErr.Error())
} else {
require.Equal(t, tc.wantPath, gotPath)
}
})
}
}
| 1 | 12,896 | I think the customer visible strings should be "Service" instead of "Svc" to make it obvious to them | aws-copilot-cli | go |
@@ -108,13 +108,13 @@ public class JavaProcessJobTest {
props.put(AbstractProcessJob.WORKING_DIR, workingDir.getCanonicalPath());
props.put("type", "java");
props.put("fullPath", ".");
-
+
props.put(CommonJobProperties.PROJECT_NAME, "test_project");
props.put(CommonJobProperties.FLOW_ID, "test_flow");
props.put(CommonJobProperties.JOB_ID, "test_job");
props.put(CommonJobProperties.EXEC_ID, "123");
props.put(CommonJobProperties.SUBMIT_USER, "test_user");
-
+ props.put("execute.as.user", "false");
job = new JavaProcessJob("testJavaProcess", props, props, log);
} | 1 | /*
* Copyright 2014 LinkedIn Corp.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package azkaban.jobExecutor;
import java.io.IOException;
import java.io.File;
import java.util.Date;
import java.util.Properties;
import org.apache.log4j.Logger;
import org.junit.After;
import org.junit.AfterClass;
import org.junit.Assert;
import org.junit.Before;
import org.junit.BeforeClass;
import org.junit.ClassRule;
import org.junit.Test;
import org.junit.Rule;
import org.junit.rules.TemporaryFolder;
import azkaban.flow.CommonJobProperties;
import azkaban.utils.Props;
public class JavaProcessJobTest {
@ClassRule
public static TemporaryFolder classTemp = new TemporaryFolder();
@Rule
public TemporaryFolder temp = new TemporaryFolder();
private JavaProcessJob job = null;
private Props props = null;
private Logger log = Logger.getLogger(JavaProcessJob.class);
private static String classPaths;
private static final String inputContent =
"Quick Change in Strategy for a Bookseller \n"
+ " By JULIE BOSMAN \n"
+ "Published: August 11, 2010 \n"
+ " \n"
+ "Twelve years later, it may be Joe Fox's turn to worry. Readers have gone from skipping small \n"
+ "bookstores to wondering if they need bookstores at all. More people are ordering books online \n"
+ "or plucking them from the best-seller bin at Wal-Mart";
private static final String errorInputContent =
inputContent
+ "\n stop_here "
+ "But the threat that has the industry and some readers the most rattled is the growth of e-books. \n"
+ " In the first five months of 2009, e-books made up 2.9 percent of trade book sales. In the same period \n"
+ "in 2010, sales of e-books, which generally cost less than hardcover books, grew to 8.5 percent, according \n"
+ "to the Association of American Publishers, spurred by sales of the Amazon Kindle and the new Apple iPad. \n"
+ "For Barnes & Noble, long the largest and most powerful bookstore chain in the country, the new competition \n"
+ "has led to declining profits and store traffic.";
private static String inputFile;
private static String errorInputFile;
private static String outputFile;
@BeforeClass
public static void init() throws IOException {
// Get the classpath
Properties prop = System.getProperties();
classPaths =
String.format("'%s'", prop.getProperty("java.class.path", null));
long time = (new Date()).getTime();
inputFile = classTemp.newFile("azkaban_input_" + time).getCanonicalPath();
errorInputFile =
classTemp.newFile("azkaban_input_error_" + time).getCanonicalPath();
outputFile = classTemp.newFile("azkaban_output_" + time).getCanonicalPath();
// Dump input files
try {
Utils.dumpFile(inputFile, inputContent);
Utils.dumpFile(errorInputFile, errorInputContent);
} catch (IOException e) {
e.printStackTrace(System.err);
Assert.fail("error in creating input file:" + e.getLocalizedMessage());
}
}
@AfterClass
public static void cleanup() {
classTemp.delete();
}
@Before
public void setUp() throws IOException {
File workingDir = temp.newFolder("testJavaProcess");
// Initialize job
props = new Props();
props.put(AbstractProcessJob.WORKING_DIR, workingDir.getCanonicalPath());
props.put("type", "java");
props.put("fullPath", ".");
props.put(CommonJobProperties.PROJECT_NAME, "test_project");
props.put(CommonJobProperties.FLOW_ID, "test_flow");
props.put(CommonJobProperties.JOB_ID, "test_job");
props.put(CommonJobProperties.EXEC_ID, "123");
props.put(CommonJobProperties.SUBMIT_USER, "test_user");
job = new JavaProcessJob("testJavaProcess", props, props, log);
}
@After
public void tearDown() {
temp.delete();
}
@Test
public void testJavaJob() throws Exception {
// initialize the Props
props.put(JavaProcessJob.JAVA_CLASS,
"azkaban.jobExecutor.WordCountLocal");
props.put("input", inputFile);
props.put("output", outputFile);
props.put("classpath", classPaths);
job.run();
}
@Test
public void testJavaJobHashmap() throws Exception {
// initialize the Props
props.put(JavaProcessJob.JAVA_CLASS,
"azkaban.executor.SleepJavaJob");
props.put("seconds", 1);
props.put("input", inputFile);
props.put("output", outputFile);
props.put("classpath", classPaths);
job.run();
}
@Test
public void testFailedJavaJob() throws Exception {
props.put(JavaProcessJob.JAVA_CLASS,
"azkaban.jobExecutor.WordCountLocal");
props.put("input", errorInputFile);
props.put("output", outputFile);
props.put("classpath", classPaths);
try {
job.run();
} catch (RuntimeException e) {
Assert.assertTrue(true);
}
}
}
| 1 | 11,808 | Explain why this property is necessary? | azkaban-azkaban | java |
@@ -141,7 +141,8 @@ func securityDeposit(ps *EVMParams, stateDB vm.StateDB, gasLimit *uint64) error
func ExecuteContracts(blk *Block, ws state.WorkingSet, bc Blockchain) {
gasLimit := GasLimit
blk.receipts = make(map[hash.Hash32B]*Receipt)
- for idx, execution := range blk.Executions {
+ _, _, executions := action.ClassifyActions(blk.Actions)
+ for idx, execution := range executions {
// TODO (zhi) log receipt to stateDB
if receipt, _ := executeContract(blk, ws, idx, execution, bc, &gasLimit); receipt != nil {
blk.receipts[execution.Hash()] = receipt | 1 | // Copyright (c) 2018 IoTeX
// This is an alpha (internal) release and is not suitable for production. This source code is provided 'as is' and no
// warranties are given as to title or non-infringement, merchantability or fitness for purpose and, to the extent
// permitted by law, all liability for your use of the code is disclaimed. This source code is governed by Apache
// License 2.0 that can be found in the LICENSE file.
package blockchain
import (
"math"
"math/big"
"github.com/CoderZhi/go-ethereum/common"
"github.com/CoderZhi/go-ethereum/core/vm"
"github.com/CoderZhi/go-ethereum/params"
"github.com/pkg/errors"
"github.com/iotexproject/iotex-core/action"
"github.com/iotexproject/iotex-core/address"
"github.com/iotexproject/iotex-core/iotxaddress"
"github.com/iotexproject/iotex-core/logger"
"github.com/iotexproject/iotex-core/pkg/hash"
"github.com/iotexproject/iotex-core/pkg/keypair"
"github.com/iotexproject/iotex-core/state"
)
// ErrInconsistentNonce is the error that the nonce is different from executor's nonce
var ErrInconsistentNonce = errors.New("Nonce is not identical to executor nonce")
// CanTransfer checks whether the from account has enough balance
func CanTransfer(db vm.StateDB, fromHash common.Address, balance *big.Int) bool {
return db.GetBalance(fromHash).Cmp(balance) > 0
}
// MakeTransfer transfers account
func MakeTransfer(db vm.StateDB, fromHash, toHash common.Address, amount *big.Int) {
db.SubBalance(fromHash, amount)
db.AddBalance(toHash, amount)
}
const (
// FailureStatus is the status that contract execution failed
FailureStatus = uint64(0)
// SuccessStatus is the status that contract execution success
SuccessStatus = uint64(1)
)
// EVMParams is the context and parameters
type EVMParams struct {
context vm.Context
nonce uint64
executorRawAddress string
amount *big.Int
contract *common.Address
gas uint64
data []byte
}
// NewEVMParams creates a new context for use in the EVM.
func NewEVMParams(blk *Block, execution *action.Execution, stateDB *EVMStateDBAdapter) (*EVMParams, error) {
// If we don't have an explicit author (i.e. not mining), extract from the header
/*
var beneficiary common.Address
if author == nil {
beneficiary, _ = chain.Engine().Author(header) // Ignore error, we're past header validation
} else {
beneficiary = *author
}
*/
executorHash, err := iotxaddress.GetPubkeyHash(execution.Executor())
if err != nil {
return nil, err
}
executorAddr := common.BytesToAddress(executorHash)
var contractAddrPointer *common.Address
if execution.Contract() != action.EmptyAddress {
contractHash, err := iotxaddress.GetPubkeyHash(execution.Contract())
if err != nil {
return nil, err
}
contractAddr := common.BytesToAddress(contractHash)
contractAddrPointer = &contractAddr
}
producerHash := keypair.HashPubKey(blk.Header.Pubkey)
producer := common.BytesToAddress(producerHash[:])
context := vm.Context{
CanTransfer: CanTransfer,
Transfer: MakeTransfer,
GetHash: GetHashFn(stateDB),
Origin: executorAddr,
Coinbase: producer,
BlockNumber: new(big.Int).SetUint64(blk.Height()),
Time: new(big.Int).SetInt64(blk.Header.Timestamp().Unix()),
Difficulty: new(big.Int).SetUint64(uint64(50)),
GasLimit: GasLimit,
GasPrice: execution.GasPrice(),
}
return &EVMParams{
context,
execution.Nonce(),
execution.Executor(),
execution.Amount(),
contractAddrPointer,
execution.GasLimit(),
execution.Data(),
}, nil
}
// GetHashFn returns a GetHashFunc which retrieves hashes by number
func GetHashFn(stateDB *EVMStateDBAdapter) func(n uint64) common.Hash {
return func(n uint64) common.Hash {
hash, err := stateDB.bc.GetHashByHeight(stateDB.blockHeight - n)
if err != nil {
return common.BytesToHash(hash[:])
}
return common.Hash{}
}
}
func securityDeposit(ps *EVMParams, stateDB vm.StateDB, gasLimit *uint64) error {
executorNonce := stateDB.GetNonce(ps.context.Origin)
if executorNonce > ps.nonce {
logger.Error().Msgf("Nonce on %v: %d vs %d", ps.context.Origin, executorNonce, ps.nonce)
return ErrInconsistentNonce
}
if *gasLimit < ps.gas {
return action.ErrHitGasLimit
}
maxGasValue := new(big.Int).Mul(new(big.Int).SetUint64(ps.gas), ps.context.GasPrice)
if stateDB.GetBalance(ps.context.Origin).Cmp(maxGasValue) < 0 {
return action.ErrInsufficientBalanceForGas
}
*gasLimit -= ps.gas
stateDB.SubBalance(ps.context.Origin, maxGasValue)
return nil
}
// ExecuteContracts process the contracts in a block
func ExecuteContracts(blk *Block, ws state.WorkingSet, bc Blockchain) {
gasLimit := GasLimit
blk.receipts = make(map[hash.Hash32B]*Receipt)
for idx, execution := range blk.Executions {
// TODO (zhi) log receipt to stateDB
if receipt, _ := executeContract(blk, ws, idx, execution, bc, &gasLimit); receipt != nil {
blk.receipts[execution.Hash()] = receipt
}
}
}
// executeContract processes a transfer which contains a contract
func executeContract(blk *Block, ws state.WorkingSet, idx int, execution *action.Execution, bc Blockchain, gasLimit *uint64) (*Receipt, error) {
stateDB := NewEVMStateDBAdapter(bc, ws, blk.Height(), blk.HashBlock(), uint(idx), execution.Hash())
ps, err := NewEVMParams(blk, execution, stateDB)
if err != nil {
return nil, err
}
retval, depositGas, remainingGas, contractAddress, err := executeInEVM(ps, stateDB, gasLimit)
receipt := &Receipt{
ReturnValue: retval,
GasConsumed: ps.gas - remainingGas,
Hash: execution.Hash(),
ContractAddress: contractAddress,
}
if err != nil {
receipt.Status = FailureStatus
} else {
receipt.Status = SuccessStatus
}
if remainingGas > 0 {
*gasLimit += remainingGas
remainingValue := new(big.Int).Mul(new(big.Int).SetUint64(remainingGas), ps.context.GasPrice)
stateDB.AddBalance(ps.context.Origin, remainingValue)
}
if depositGas-remainingGas > 0 {
gasValue := new(big.Int).Mul(new(big.Int).SetUint64(depositGas-remainingGas), ps.context.GasPrice)
stateDB.AddBalance(ps.context.Coinbase, gasValue)
}
receipt.Logs = stateDB.Logs()
logger.Debug().Msgf("Receipt: %+v, %v", receipt, err)
return receipt, err
}
func getChainConfig() *params.ChainConfig {
var chainConfig params.ChainConfig
// chainConfig.ChainID
chainConfig.ConstantinopleBlock = new(big.Int).SetUint64(0) // Constantinople switch block (nil = no fork, 0 = already activated)
return &chainConfig
}
func executeInEVM(evmParams *EVMParams, stateDB *EVMStateDBAdapter, gasLimit *uint64) ([]byte, uint64, uint64, string, error) {
remainingGas := evmParams.gas
if err := securityDeposit(evmParams, stateDB, gasLimit); err != nil {
return nil, 0, 0, action.EmptyAddress, err
}
var config vm.Config
chainConfig := getChainConfig()
evm := vm.NewEVM(evmParams.context, stateDB, chainConfig, config)
intriGas, err := intrinsicGas(evmParams.data)
if err != nil {
return nil, evmParams.gas, remainingGas, action.EmptyAddress, err
}
if remainingGas < intriGas {
return nil, evmParams.gas, remainingGas, action.EmptyAddress, action.ErrOutOfGas
}
contractRawAddress := action.EmptyAddress
remainingGas -= intriGas
executor := vm.AccountRef(evmParams.context.Origin)
var ret []byte
if evmParams.contract == nil {
// create contract
var evmContractAddress common.Address
ret, evmContractAddress, remainingGas, err = evm.Create(executor, evmParams.data, remainingGas, evmParams.amount)
logger.Warn().Hex("contract addrHash", evmContractAddress[:]).Msg("evm.Create")
if err != nil {
return nil, evmParams.gas, remainingGas, action.EmptyAddress, err
}
contractAddress := address.New(stateDB.bc.ChainID(), evmContractAddress.Bytes())
contractRawAddress = contractAddress.IotxAddress()
} else {
// process contract
ret, remainingGas, err = evm.Call(executor, *evmParams.contract, evmParams.data, remainingGas, evmParams.amount)
}
if err == nil {
err = stateDB.Error()
}
if err == vm.ErrInsufficientBalance {
return nil, evmParams.gas, remainingGas, action.EmptyAddress, err
}
if err != nil {
// TODO (zhi) should we refund if any error
return nil, evmParams.gas, 0, contractRawAddress, err
}
// TODO (zhi) figure out what the following function does
// stateDB.Finalise(true)
return ret, evmParams.gas, remainingGas, contractRawAddress, nil
}
// intrinsicGas returns the intrinsic gas of an execution
func intrinsicGas(data []byte) (uint64, error) {
dataSize := uint64(len(data))
if (math.MaxInt64-action.ExecutionBaseIntrinsicGas)/action.ExecutionDataGas < dataSize {
return 0, action.ErrOutOfGas
}
return dataSize*action.ExecutionDataGas + action.ExecutionBaseIntrinsicGas, nil
}
| 1 | 12,878 | It should accept executions as the input | iotexproject-iotex-core | go |
@@ -25,7 +25,10 @@ def single_gpu_test(model,
with torch.no_grad():
result = model(return_loss=False, rescale=True, **data)
+ batch_size = len(result)
if show or out_dir:
+ assert batch_size == 1, 'show or out during test only support ' \
+ 'batch size of 1'
img_tensor = data['img'][0]
img_metas = data['img_metas'][0].data[0]
imgs = tensor2imgs(img_tensor, **img_metas[0]['img_norm_cfg']) | 1 | import os.path as osp
import pickle
import shutil
import tempfile
import time
import mmcv
import torch
import torch.distributed as dist
from mmcv.runner import get_dist_info
from mmdet.core import encode_mask_results, tensor2imgs
def single_gpu_test(model,
data_loader,
show=False,
out_dir=None,
show_score_thr=0.3):
model.eval()
results = []
dataset = data_loader.dataset
prog_bar = mmcv.ProgressBar(len(dataset))
for i, data in enumerate(data_loader):
with torch.no_grad():
result = model(return_loss=False, rescale=True, **data)
if show or out_dir:
img_tensor = data['img'][0]
img_metas = data['img_metas'][0].data[0]
imgs = tensor2imgs(img_tensor, **img_metas[0]['img_norm_cfg'])
assert len(imgs) == len(img_metas)
for img, img_meta in zip(imgs, img_metas):
h, w, _ = img_meta['img_shape']
img_show = img[:h, :w, :]
ori_h, ori_w = img_meta['ori_shape'][:-1]
img_show = mmcv.imresize(img_show, (ori_w, ori_h))
if out_dir:
out_file = osp.join(out_dir, img_meta['ori_filename'])
else:
out_file = None
model.module.show_result(
img_show,
result,
show=show,
out_file=out_file,
score_thr=show_score_thr)
# encode mask results
if isinstance(result[0], tuple):
result = [(bbox_results, encode_mask_results(mask_results))
for bbox_results, mask_results in result]
results.extend(result)
batch_size = len(result)
for _ in range(batch_size):
prog_bar.update()
return results
def multi_gpu_test(model, data_loader, tmpdir=None, gpu_collect=False):
"""Test model with multiple gpus.
This method tests model with multiple gpus and collects the results
under two different modes: gpu and cpu modes. By setting 'gpu_collect=True'
it encodes results to gpu tensors and use gpu communication for results
collection. On cpu mode it saves the results on different gpus to 'tmpdir'
and collects them by the rank 0 worker.
Args:
model (nn.Module): Model to be tested.
data_loader (nn.Dataloader): Pytorch data loader.
tmpdir (str): Path of directory to save the temporary results from
different gpus under cpu mode.
gpu_collect (bool): Option to use either gpu or cpu to collect results.
Returns:
list: The prediction results.
"""
model.eval()
results = []
dataset = data_loader.dataset
rank, world_size = get_dist_info()
if rank == 0:
prog_bar = mmcv.ProgressBar(len(dataset))
time.sleep(2) # This line can prevent deadlock problem in some cases.
for i, data in enumerate(data_loader):
with torch.no_grad():
result = model(return_loss=False, rescale=True, **data)
# encode mask results
if isinstance(result[0], tuple):
result = [(bbox_results, encode_mask_results(mask_results))
for bbox_results, mask_results in result]
results.extend(result)
if rank == 0:
batch_size = len(result)
for _ in range(batch_size * world_size):
prog_bar.update()
# collect results from all ranks
if gpu_collect:
results = collect_results_gpu(results, len(dataset))
else:
results = collect_results_cpu(results, len(dataset), tmpdir)
return results
def collect_results_cpu(result_part, size, tmpdir=None):
rank, world_size = get_dist_info()
# create a tmp dir if it is not specified
if tmpdir is None:
MAX_LEN = 512
# 32 is whitespace
dir_tensor = torch.full((MAX_LEN, ),
32,
dtype=torch.uint8,
device='cuda')
if rank == 0:
tmpdir = tempfile.mkdtemp()
tmpdir = torch.tensor(
bytearray(tmpdir.encode()), dtype=torch.uint8, device='cuda')
dir_tensor[:len(tmpdir)] = tmpdir
dist.broadcast(dir_tensor, 0)
tmpdir = dir_tensor.cpu().numpy().tobytes().decode().rstrip()
else:
mmcv.mkdir_or_exist(tmpdir)
# dump the part result to the dir
mmcv.dump(result_part, osp.join(tmpdir, f'part_{rank}.pkl'))
dist.barrier()
# collect all parts
if rank != 0:
return None
else:
# load results of all parts from tmp dir
part_list = []
for i in range(world_size):
part_file = osp.join(tmpdir, f'part_{i}.pkl')
part_list.append(mmcv.load(part_file))
# sort the results
ordered_results = []
for res in zip(*part_list):
ordered_results.extend(list(res))
# the dataloader may pad some samples
ordered_results = ordered_results[:size]
# remove tmp dir
shutil.rmtree(tmpdir)
return ordered_results
def collect_results_gpu(result_part, size):
rank, world_size = get_dist_info()
# dump result part to tensor with pickle
part_tensor = torch.tensor(
bytearray(pickle.dumps(result_part)), dtype=torch.uint8, device='cuda')
# gather all result part tensor shape
shape_tensor = torch.tensor(part_tensor.shape, device='cuda')
shape_list = [shape_tensor.clone() for _ in range(world_size)]
dist.all_gather(shape_list, shape_tensor)
# padding result part tensor to max length
shape_max = torch.tensor(shape_list).max()
part_send = torch.zeros(shape_max, dtype=torch.uint8, device='cuda')
part_send[:shape_tensor[0]] = part_tensor
part_recv_list = [
part_tensor.new_zeros(shape_max) for _ in range(world_size)
]
# gather all result part
dist.all_gather(part_recv_list, part_send)
if rank == 0:
part_list = []
for recv, shape in zip(part_recv_list, shape_list):
part_list.append(
pickle.loads(recv[:shape[0]].cpu().numpy().tobytes()))
# sort the results
ordered_results = []
for res in zip(*part_list):
ordered_results.extend(list(res))
# the dataloader may pad some samples
ordered_results = ordered_results[:size]
return ordered_results
| 1 | 21,295 | Is this limitation necessary? | open-mmlab-mmdetection | py |
@@ -81,9 +81,13 @@ type IssuerSpec struct {
}
type IssuerConfig struct {
- ACME *ACMEIssuer `json:"acme,omitempty"`
- CA *CAIssuer `json:"ca,omitempty"`
- Vault *VaultIssuer `json:"vault,omitempty"`
+ ACME *ACMEIssuer `json:"acme,omitempty"`
+ CA *CAIssuer `json:"ca,omitempty"`
+ Vault *VaultIssuer `json:"vault,omitempty"`
+ SelfSigned *SelfSignedIssuer `json:"selfSigned,omitempty"`
+}
+
+type SelfSignedIssuer struct {
}
type VaultIssuer struct { | 1 | /*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1alpha1
import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
const (
AltNamesAnnotationKey = "certmanager.k8s.io/alt-names"
CommonNameAnnotationKey = "certmanager.k8s.io/common-name"
IssuerNameAnnotationKey = "certmanager.k8s.io/issuer-name"
IssuerKindAnnotationKey = "certmanager.k8s.io/issuer-kind"
)
// +genclient
// +genclient:nonNamespaced
// +k8s:openapi-gen=true
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// +resource:path=clusterissuers
type ClusterIssuer struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`
Spec IssuerSpec `json:"spec,omitempty"`
Status IssuerStatus `json:"status,omitempty"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// ClusterIssuerList is a list of Issuers
type ClusterIssuerList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata"`
Items []ClusterIssuer `json:"items"`
}
// +genclient
// +k8s:openapi-gen=true
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// +resource:path=issuers
type Issuer struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`
Spec IssuerSpec `json:"spec,omitempty"`
Status IssuerStatus `json:"status,omitempty"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// IssuerList is a list of Issuers
type IssuerList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata"`
Items []Issuer `json:"items"`
}
// IssuerSpec is the specification of an Issuer. This includes any
// configuration required for the issuer.
type IssuerSpec struct {
IssuerConfig `json:",inline"`
}
type IssuerConfig struct {
ACME *ACMEIssuer `json:"acme,omitempty"`
CA *CAIssuer `json:"ca,omitempty"`
Vault *VaultIssuer `json:"vault,omitempty"`
}
type VaultIssuer struct {
// Vault authentication
Auth VaultAuth `json:"auth"`
// Server is the vault connection address
Server string `json:"server"`
// Vault URL path to the certificate role
Path string `json:"path"`
}
// Vault authentication can be configured:
// - With a secret containing a token. Cert-manager is using this token as-is.
// - With a secret containing a AppRole. This AppRole is used to authenticate to
// Vault and retrieve a token.
type VaultAuth struct {
// This Secret contains the Vault token key
TokenSecretRef SecretKeySelector `json:"tokenSecretRef,omitempty"`
// This Secret contains a AppRole and Secret
AppRole VaultAppRole `json:"appRole,omitempty"`
}
type VaultAppRole struct {
RoleId string `json:"roleId"`
SecretRef SecretKeySelector `json:"secretRef"`
}
type CAIssuer struct {
// SecretName is the name of the secret used to sign Certificates issued
// by this Issuer.
SecretName string `json:"secretName"`
}
// ACMEIssuer contains the specification for an ACME issuer
type ACMEIssuer struct {
// Email is the email for this account
Email string `json:"email"`
// Server is the ACME server URL
Server string `json:"server"`
// If true, skip verifying the ACME server TLS certificate
SkipTLSVerify bool `json:"skipTLSVerify,omitempty"`
// PrivateKey is the name of a secret containing the private key for this
// user account.
PrivateKey SecretKeySelector `json:"privateKeySecretRef"`
// HTTP01 config
HTTP01 *ACMEIssuerHTTP01Config `json:"http01,omitempty"`
// DNS-01 config
DNS01 *ACMEIssuerDNS01Config `json:"dns01,omitempty"`
}
type ACMEIssuerHTTP01Config struct {
}
// ACMEIssuerDNS01Config is a structure containing the ACME DNS configuration
// options
type ACMEIssuerDNS01Config struct {
Providers []ACMEIssuerDNS01Provider `json:"providers"`
}
type ACMEIssuerDNS01Provider struct {
Name string `json:"name"`
Akamai *ACMEIssuerDNS01ProviderAkamai `json:"akamai,omitempty"`
CloudDNS *ACMEIssuerDNS01ProviderCloudDNS `json:"clouddns,omitempty"`
Cloudflare *ACMEIssuerDNS01ProviderCloudflare `json:"cloudflare,omitempty"`
Route53 *ACMEIssuerDNS01ProviderRoute53 `json:"route53,omitempty"`
AzureDNS *ACMEIssuerDNS01ProviderAzureDNS `json:"azuredns,omitempty"`
}
// ACMEIssuerDNS01ProviderAkamai is a structure containing the DNS
// configuration for Akamai DNS—Zone Record Management API
type ACMEIssuerDNS01ProviderAkamai struct {
ServiceConsumerDomain string `json:"serviceConsumerDomain"`
ClientToken SecretKeySelector `json:"clientTokenSecretRef"`
ClientSecret SecretKeySelector `json:"clientSecretSecretRef"`
AccessToken SecretKeySelector `json:"accessTokenSecretRef"`
}
// ACMEIssuerDNS01ProviderCloudDNS is a structure containing the DNS
// configuration for Google Cloud DNS
type ACMEIssuerDNS01ProviderCloudDNS struct {
ServiceAccount SecretKeySelector `json:"serviceAccountSecretRef"`
Project string `json:"project"`
}
// ACMEIssuerDNS01ProviderCloudflare is a structure containing the DNS
// configuration for Cloudflare
type ACMEIssuerDNS01ProviderCloudflare struct {
Email string `json:"email"`
APIKey SecretKeySelector `json:"apiKeySecretRef"`
}
// ACMEIssuerDNS01ProviderRoute53 is a structure containing the Route 53
// configuration for AWS
type ACMEIssuerDNS01ProviderRoute53 struct {
AccessKeyID string `json:"accessKeyID"`
SecretAccessKey SecretKeySelector `json:"secretAccessKeySecretRef"`
HostedZoneID string `json:"hostedZoneID"`
Region string `json:"region"`
}
// ACMEIssuerDNS01ProviderAzureDNS is a structure containing the
// configuration for Azure DNS
type ACMEIssuerDNS01ProviderAzureDNS struct {
ClientID string `json:"clientID"`
ClientSecret SecretKeySelector `json:"clientSecretSecretRef"`
SubscriptionID string `json:"subscriptionID"`
TenantID string `json:"tenantID"`
ResourceGroupName string `json:"resourceGroupName"`
// + optional
HostedZoneName string `json:"hostedZoneName"`
}
// IssuerStatus contains status information about an Issuer
type IssuerStatus struct {
Conditions []IssuerCondition `json:"conditions"`
ACME *ACMEIssuerStatus `json:"acme,omitempty"`
}
// IssuerCondition contains condition information for an Issuer.
type IssuerCondition struct {
// Type of the condition, currently ('Ready').
Type IssuerConditionType `json:"type"`
// Status of the condition, one of ('True', 'False', 'Unknown').
Status ConditionStatus `json:"status"`
// LastTransitionTime is the timestamp corresponding to the last status
// change of this condition.
LastTransitionTime metav1.Time `json:"lastTransitionTime"`
// Reason is a brief machine readable explanation for the condition's last
// transition.
Reason string `json:"reason"`
// Message is a human readable description of the details of the last
// transition, complementing reason.
Message string `json:"message"`
}
// IssuerConditionType represents an Issuer condition value.
type IssuerConditionType string
const (
// IssuerConditionReady represents the fact that a given Issuer condition
// is in ready state.
IssuerConditionReady IssuerConditionType = "Ready"
)
// ConditionStatus represents a condition's status.
type ConditionStatus string
// These are valid condition statuses. "ConditionTrue" means a resource is in
// the condition; "ConditionFalse" means a resource is not in the condition;
// "ConditionUnknown" means kubernetes can't decide if a resource is in the
// condition or not. In the future, we could add other intermediate
// conditions, e.g. ConditionDegraded.
const (
// ConditionTrue represents the fact that a given condition is true
ConditionTrue ConditionStatus = "True"
// ConditionFalse represents the fact that a given condition is false
ConditionFalse ConditionStatus = "False"
// ConditionUnknown represents the fact that a given condition is unknown
ConditionUnknown ConditionStatus = "Unknown"
)
type ACMEIssuerStatus struct {
// URI is the unique account identifier, which can also be used to retrieve
// account details from the CA
URI string `json:"uri"`
}
// +genclient
// +k8s:openapi-gen=true
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// +resource:path=certificates
// Certificate is a type to represent a Certificate from ACME
type Certificate struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`
Spec CertificateSpec `json:"spec,omitempty"`
Status CertificateStatus `json:"status,omitempty"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// CertificateList is a list of Certificates
type CertificateList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata"`
Items []Certificate `json:"items"`
}
// CertificateSpec defines the desired state of Certificate
type CertificateSpec struct {
// CommonName is a common name to be used on the Certificate
CommonName string `json:"commonName"`
// DNSNames is a list of subject alt names to be used on the Certificate
DNSNames []string `json:"dnsNames"`
// SecretName is the name of the secret resource to store this secret in
SecretName string `json:"secretName"`
// IssuerRef is a reference to the issuer for this certificate. If the
// namespace field is not set, it is assumed to be in the same namespace
// as the certificate. If the namespace field is set to the empty value "",
// a ClusterIssuer of the given name will be used. Any other value is
// invalid.
IssuerRef ObjectReference `json:"issuerRef"`
ACME *ACMECertificateConfig `json:"acme,omitempty"`
}
// ACMEConfig contains the configuration for the ACME certificate provider
type ACMECertificateConfig struct {
Config []ACMECertificateDomainConfig `json:"config"`
}
type ACMECertificateDomainConfig struct {
Domains []string `json:"domains"`
ACMESolverConfig `json:",inline"`
}
type ACMESolverConfig struct {
HTTP01 *ACMECertificateHTTP01Config `json:"http01,omitempty"`
DNS01 *ACMECertificateDNS01Config `json:"dns01,omitempty"`
}
type ACMECertificateHTTP01Config struct {
Ingress string `json:"ingress"`
IngressClass *string `json:"ingressClass,omitempty"`
}
type ACMECertificateDNS01Config struct {
Provider string `json:"provider"`
}
// CertificateStatus defines the observed state of Certificate
type CertificateStatus struct {
Conditions []CertificateCondition `json:"conditions,omitempty"`
ACME *CertificateACMEStatus `json:"acme,omitempty"`
}
// CertificateCondition contains condition information for an Certificate.
type CertificateCondition struct {
// Type of the condition, currently ('Ready').
Type CertificateConditionType `json:"type"`
// Status of the condition, one of ('True', 'False', 'Unknown').
Status ConditionStatus `json:"status"`
// LastTransitionTime is the timestamp corresponding to the last status
// change of this condition.
LastTransitionTime metav1.Time `json:"lastTransitionTime"`
// Reason is a brief machine readable explanation for the condition's last
// transition.
Reason string `json:"reason"`
// Message is a human readable description of the details of the last
// transition, complementing reason.
Message string `json:"message"`
}
// CertificateConditionType represents an Certificate condition value.
type CertificateConditionType string
const (
// CertificateConditionReady represents the fact that a given Certificate condition
// is in ready state.
CertificateConditionReady CertificateConditionType = "Ready"
// CertificateConditionValidationFailed is used to indicate whether a
// validation for a Certificate has failed.
// This is currently used by the ACME issuer to track when the last
// validation was attempted.
CertificateConditionValidationFailed CertificateConditionType = "ValidateFailed"
)
// CertificateACMEStatus holds the status for an ACME issuer
type CertificateACMEStatus struct {
// Order contains details about the current in-progress ACME Order.
Order ACMEOrderStatus `json:"order,omitempty"`
}
type ACMEOrderStatus struct {
// The URL that can be used to get information about the ACME order.
URL string `json:"url"`
Challenges []ACMEOrderChallenge `json:"challenges,omitempty"`
}
type ACMEOrderChallenge struct {
// The URL that can be used to get information about the ACME challenge.
URL string `json:"url"`
// The URL that can be used to get information about the ACME authorization
// associated with the challenge.
AuthzURL string `json:"authzURL"`
// Type of ACME challenge
// Either http-01 or dns-01
Type string `json:"type"`
// Domain this challenge corresponds to
Domain string `json:"domain"`
// Challenge token for this challenge
Token string `json:"token"`
// Challenge key for this challenge
Key string `json:"key"`
// Set to true if this challenge is for a wildcard domain
Wildcard bool `json:"wildcard"`
// Configuration used to present this challenge
ACMESolverConfig `json:",inline"`
}
type LocalObjectReference struct {
// Name of the referent.
// More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
// TODO: Add other useful fields. apiVersion, kind, uid?
Name string `json:"name,omitempty"`
}
// ObjectReference is a reference to an object. If the namespace field is set,
// it is assumed to be in a namespace
type ObjectReference struct {
Name string `json:"name"`
Kind string `json:"kind,omitempty"`
}
const (
ClusterIssuerKind = "ClusterIssuer"
IssuerKind = "Issuer"
)
type SecretKeySelector struct {
// The name of the secret in the pod's namespace to select from.
LocalObjectReference `json:",inline" protobuf:"bytes,1,opt,name=localObjectReference"`
// The key of the secret to select from. Must be a valid secret key.
Key string `json:"key" protobuf:"bytes,2,opt,name=key"`
}
| 1 | 12,694 | :question: Should there be validation to ensure that only one of these attributes is used? | jetstack-cert-manager | go |
@@ -33,11 +33,11 @@
*/
namespace VuFind\Search\Solr;
+use Interop\Container\ContainerInterface;
use Laminas\EventManager\EventInterface;
-
use Laminas\EventManager\SharedEventManagerInterface;
-use Laminas\ServiceManager\ServiceLocatorInterface;
-use VuFindSearch\Backend\BackendInterface;
+
+use VuFindSearch\Backend\Solr\Backend;
/**
* Solr merged record handling listener. | 1 | <?php
/**
* Solr deduplication (merged records) listener.
*
* See https://vufind.org/wiki/indexing:deduplication for details on how this is
* used.
*
* PHP version 7
*
* Copyright (C) Villanova University 2013.
* Copyright (C) The National Library of Finland 2013-2020.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2,
* as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*
* @category VuFind
* @package Search
* @author David Maus <[email protected]>
* @author Ere Maijala <[email protected]>
* @license http://opensource.org/licenses/gpl-2.0.php GNU General Public License
* @link https://vufind.org Main Site
*/
namespace VuFind\Search\Solr;
use Laminas\EventManager\EventInterface;
use Laminas\EventManager\SharedEventManagerInterface;
use Laminas\ServiceManager\ServiceLocatorInterface;
use VuFindSearch\Backend\BackendInterface;
/**
* Solr merged record handling listener.
*
* @category VuFind
* @package Search
* @author David Maus <[email protected]>
* @author Ere Maijala <[email protected]>
* @license http://opensource.org/licenses/gpl-2.0.php GNU General Public License
* @link https://vufind.org Main Site
*/
class DeduplicationListener
{
/**
* Backend.
*
* @var BackendInterface
*/
protected $backend;
/**
* Superior service manager.
*
* @var ServiceLocatorInterface
*/
protected $serviceLocator;
/**
* Search configuration file identifier.
*
* @var string
*/
protected $searchConfig;
/**
* Data source configuration file identifier.
*
* @var string
*/
protected $dataSourceConfig;
/**
* Whether deduplication is enabled.
*
* @var bool
*/
protected $enabled;
/**
* Constructor.
*
* @param BackendInterface $backend Search backend
* @param ServiceLocatorInterface $serviceLocator Service locator
* @param string $searchConfig Search config file id
* @param string $dataSourceConfig Data source file id
* @param bool $enabled Whether deduplication is
* enabled
*
* @return void
*/
public function __construct(
BackendInterface $backend,
ServiceLocatorInterface $serviceLocator,
$searchConfig, $dataSourceConfig = 'datasources', $enabled = true
) {
$this->backend = $backend;
$this->serviceLocator = $serviceLocator;
$this->searchConfig = $searchConfig;
$this->dataSourceConfig = $dataSourceConfig;
$this->enabled = $enabled;
}
/**
* Attach listener to shared event manager.
*
* @param SharedEventManagerInterface $manager Shared event manager
*
* @return void
*/
public function attach(
SharedEventManagerInterface $manager
) {
$manager->attach('VuFind\Search', 'pre', [$this, 'onSearchPre']);
$manager->attach('VuFind\Search', 'post', [$this, 'onSearchPost']);
}
/**
* Set up filter for excluding merge children.
*
* @param EventInterface $event Event
*
* @return EventInterface
*/
public function onSearchPre(EventInterface $event)
{
$backend = $event->getTarget();
if ($backend === $this->backend) {
$params = $event->getParam('params');
$context = $event->getParam('context');
$contexts = ['search', 'similar', 'getids', 'workExpressions'];
if ($params && in_array($context, $contexts)) {
// If deduplication is enabled, filter out merged child records,
// otherwise filter out dedup records.
if ($this->enabled && 'getids' !== $context
&& !$this->hasChildFilter($params)
) {
$fq = '-merged_child_boolean:true';
if ($context == 'similar' && $id = $event->getParam('id')) {
$fq .= ' AND -local_ids_str_mv:"'
. addcslashes($id, '"') . '"';
}
} else {
$fq = '-merged_boolean:true';
}
$params->add('fq', $fq);
}
}
return $event;
}
/**
* Check search parameters for child records filter
*
* @param array|ArrayAccess $params Search parameters
*
* @return bool
*/
public function hasChildFilter($params)
{
$filters = $params->get('fq');
return $filters != null && in_array('merged_child_boolean:true', $filters);
}
/**
* Fetch appropriate dedup child
*
* @param EventInterface $event Event
*
* @return EventInterface
*/
public function onSearchPost(EventInterface $event)
{
// Inject deduplication details into record objects:
$backend = $event->getParam('backend');
if ($backend != $this->backend->getIdentifier()) {
return $event;
}
$context = $event->getParam('context');
$contexts = ['search', 'similar', 'workExpressions'];
if ($this->enabled && in_array($context, $contexts)) {
$this->fetchLocalRecords($event);
}
return $event;
}
/**
* Fetch local records for all the found dedup records
*
* @param EventInterface $event Event
*
* @return void
*/
protected function fetchLocalRecords($event)
{
$config = $this->serviceLocator->get(\VuFind\Config\PluginManager::class);
$searchConfig = $config->get($this->searchConfig);
$dataSourceConfig = $config->get($this->dataSourceConfig);
$recordSources = !empty($searchConfig->Records->sources)
? explode(',', $searchConfig->Records->sources)
: [];
$sourcePriority = $this->determineSourcePriority($recordSources);
$params = $event->getParam('params');
$buildingPriority = $this->determineBuildingPriority($params);
$idList = [];
// Find out the best records and list their IDs:
$result = $event->getTarget();
foreach ($result->getRecords() as $record) {
$fields = $record->getRawData();
if (!isset($fields['merged_boolean'])) {
continue;
}
$localIds = $fields['local_ids_str_mv'];
$dedupId = $localIds[0];
$priority = 99999;
$undefPriority = 99999;
// Find the document that matches the source priority best:
$dedupData = [];
foreach ($localIds as $localId) {
$localPriority = null;
list($source) = explode('.', $localId, 2);
// Ignore ID if source is not in the list of allowed record sources:
if ($recordSources && !in_array($source, $recordSources)) {
continue;
}
if (!empty($buildingPriority)) {
if (isset($buildingPriority[$source])) {
$localPriority = -$buildingPriority[$source];
} elseif (isset($dataSourceConfig[$source]['institution'])) {
$institution = $dataSourceConfig[$source]['institution'];
if (isset($buildingPriority[$institution])) {
$localPriority = -$buildingPriority[$institution];
}
}
}
if (!isset($localPriority)) {
if (isset($sourcePriority[$source])) {
$localPriority = $sourcePriority[$source];
} else {
$localPriority = ++$undefPriority;
}
}
if (isset($localPriority) && $localPriority < $priority) {
$dedupId = $localId;
$priority = $localPriority;
}
$dedupData[$source] = [
'id' => $localId,
'priority' => $localPriority ?? 99999
];
}
$fields['dedup_id'] = $dedupId;
$idList[] = $dedupId;
// Sort dedupData by priority:
uasort(
$dedupData,
function ($a, $b) {
return $a['priority'] - $b['priority'];
}
);
$fields['dedup_data'] = $dedupData;
$record->setRawData($fields);
}
if (empty($idList)) {
return;
}
// Fetch records and assign them to the result:
$localRecords = $this->backend->retrieveBatch($idList)->getRecords();
foreach ($result->getRecords() as $record) {
$dedupRecordData = $record->getRawData();
if (!isset($dedupRecordData['dedup_id'])) {
continue;
}
// Find the corresponding local record in the results:
$foundLocalRecord = null;
foreach ($localRecords as $localRecord) {
if ($localRecord->getUniqueID() == $dedupRecordData['dedup_id']) {
$foundLocalRecord = $localRecord;
break;
}
}
if (!$foundLocalRecord) {
continue;
}
$localRecordData = $foundLocalRecord->getRawData();
// Copy dedup_data for the active data sources:
foreach ($dedupRecordData['dedup_data'] as $dedupDataKey => $dedupData) {
if (!$recordSources || isset($sourcePriority[$dedupDataKey])) {
$localRecordData['dedup_data'][$dedupDataKey] = $dedupData;
}
}
// Copy fields from dedup record to local record
$localRecordData = $this->appendDedupRecordFields(
$localRecordData,
$dedupRecordData,
$recordSources,
$sourcePriority
);
$foundLocalRecord->setRawData($localRecordData);
$foundLocalRecord->setHighlightDetails($record->getHighlightDetails());
$result->replace($record, $foundLocalRecord);
}
}
/**
* Append fields from dedup record to the selected local record. Note: the last
* two parameters are unused in this default method, but they may be useful for
* custom behavior in subclasses.
*
* @param array $localRecordData Local record data
* @param array $dedupRecordData Dedup record data
* @param array $recordSources List of active record sources, empty if all
* @param array $sourcePriority Array of source priorities keyed by source id
*
* @return array Local record data
*
* @SuppressWarnings(PHPMD.UnusedFormalParameter)
*/
protected function appendDedupRecordFields($localRecordData, $dedupRecordData,
$recordSources, $sourcePriority
) {
$localRecordData['local_ids_str_mv'] = $dedupRecordData['local_ids_str_mv'];
return $localRecordData;
}
/**
* Function that determines the priority for sources
*
* @param array $recordSources Record sources defined in searches.ini
*
* @return array Array keyed by source with priority as the value
*/
protected function determineSourcePriority($recordSources)
{
if (empty($recordSources)) {
return [];
}
return array_flip($recordSources);
}
/**
* Function that determines the priority for buildings
*
* @param object $params Query parameters
*
* @return array Array keyed by building with priority as the value
*/
protected function determineBuildingPriority($params)
{
$result = [];
foreach ($params->get('fq') as $fq) {
if (preg_match_all('/\bbuilding:"([^"]+)"/', $fq, $matches)) {
$values = $matches[1];
foreach ($values as $value) {
if (preg_match('/^\d+\/([^\/]+?)\//', $value, $matches)) {
// Hierarchical facets; take only first level:
$result[] = $matches[1];
} else {
$result[] = $value;
}
}
}
}
array_unshift($result, '');
return array_flip($result);
}
}
| 1 | 30,884 | I would recommend using Psr\Container\ContainerInterface here instead of Interop\Container\ContainerInterface; both currently work, but the Interop version is deprecated and will eventually be removed, so using the Psr version will save us time in the future. | vufind-org-vufind | php |
@@ -312,7 +312,7 @@ func TestLocalExec(t *testing.T) {
err = app.Exec("web", true, "pwd")
assert.NoError(err)
out := stdout()
- assert.Contains(out, "/var/www/html/docroot")
+ assert.Contains(out, "/var/www/html")
stdout = testcommon.CaptureStdOut()
switch app.GetType() { | 1 | package platform
import (
"fmt"
"path/filepath"
"testing"
"time"
"os"
"strings"
log "github.com/Sirupsen/logrus"
"github.com/drud/ddev/pkg/dockerutil"
"github.com/drud/ddev/pkg/fileutil"
"github.com/drud/ddev/pkg/testcommon"
"github.com/drud/ddev/pkg/util"
"github.com/stretchr/testify/assert"
)
var (
TestSites = []testcommon.TestSite{
{
Name: "TestMainPkgDrupal8",
SourceURL: "https://github.com/drud/drupal8/archive/v0.6.0.tar.gz",
ArchiveInternalExtractionPath: "drupal8-0.6.0/",
FilesTarballURL: "https://github.com/drud/drupal8/releases/download/v0.6.0/files.tar.gz",
FilesZipballURL: "https://github.com/drud/drupal8/releases/download/v0.6.0/files.zip",
DBTarURL: "https://github.com/drud/drupal8/releases/download/v0.6.0/db.tar.gz",
DBZipURL: "https://github.com/drud/drupal8/releases/download/v0.6.0/db.zip",
FullSiteTarballURL: "https://github.com/drud/drupal8/releases/download/v0.6.0/site.tar.gz",
},
{
Name: "TestMainPkgWordpress",
SourceURL: "https://github.com/drud/wordpress/archive/v0.4.0.tar.gz",
ArchiveInternalExtractionPath: "wordpress-0.4.0/",
FilesTarballURL: "https://github.com/drud/wordpress/releases/download/v0.4.0/files.tar.gz",
DBTarURL: "https://github.com/drud/wordpress/releases/download/v0.4.0/db.tar.gz",
},
{
Name: "TestMainPkgDrupalKickstart",
SourceURL: "https://github.com/drud/drupal-kickstart/archive/v0.4.0.tar.gz",
ArchiveInternalExtractionPath: "drupal-kickstart-0.4.0/",
FilesTarballURL: "https://github.com/drud/drupal-kickstart/releases/download/v0.4.0/files.tar.gz",
DBTarURL: "https://github.com/drud/drupal-kickstart/releases/download/v0.4.0/db.tar.gz",
FullSiteTarballURL: "https://github.com/drud/drupal-kickstart/releases/download/v0.4.0/site.tar.gz",
},
}
)
func TestMain(m *testing.M) {
if len(GetApps()) > 0 {
log.Fatalf("Local plugin tests require no sites running. You have %v site(s) running.", len(GetApps()))
}
for i := range TestSites {
err := TestSites[i].Prepare()
if err != nil {
log.Fatalf("Prepare() failed on TestSite.Prepare(), err=%v", err)
}
}
log.Debugln("Running tests.")
testRun := m.Run()
for i := range TestSites {
TestSites[i].Cleanup()
}
os.Exit(testRun)
}
// TestLocalSetup reduces the TestSite list on shorter test runs.
func TestLocalSetup(t *testing.T) {
// Allow tests to run in "short" mode, which will only test a single site. This keeps test runtimes low.
// We would much prefer to do this in TestMain, but the Short() flag is not yet available at that point.
if testing.Short() {
TestSites = []testcommon.TestSite{TestSites[0]}
}
}
// TestLocalStart tests the functionality that is called when "ddev start" is executed
func TestLocalStart(t *testing.T) {
// ensure we have docker network
client := dockerutil.GetDockerClient()
err := dockerutil.EnsureNetwork(client, dockerutil.NetName)
if err != nil {
log.Fatal(err)
}
assert := assert.New(t)
app, err := GetPluginApp("local")
assert.NoError(err)
for _, site := range TestSites {
cleanup := site.Chdir()
runTime := testcommon.TimeTrack(time.Now(), fmt.Sprintf("%s LocalStart", site.Name))
testcommon.ClearDockerEnv()
err = app.Init(site.Dir)
assert.NoError(err)
err = app.Start()
assert.NoError(err)
err = app.Wait("web")
assert.NoError(err)
// ensure docker-compose.yaml exists inside .ddev site folder
composeFile := fileutil.FileExists(app.DockerComposeYAMLPath())
assert.True(composeFile)
for _, containerType := range [3]string{"web", "db", "dba"} {
containerName, err := constructContainerName(containerType, app)
assert.NoError(err)
check, err := testcommon.ContainerCheck(containerName, "running")
assert.NoError(err)
assert.True(check, containerType, "container is running")
}
runTime()
cleanup()
}
// try to start a site of same name at different path
another := TestSites[0]
err = another.Prepare()
if err != nil {
assert.FailNow("TestLocalStart: Prepare() failed on another.Prepare(), err=%v", err)
return
}
err = app.Init(another.Dir)
assert.Error(err)
assert.Contains(err.Error(), fmt.Sprintf("container in running state already exists for %s that was created at %s", TestSites[0].Name, TestSites[0].Dir))
another.Cleanup()
}
// TestGetApps tests the GetApps function to ensure it accurately returns a list of running applications.
func TestGetApps(t *testing.T) {
assert := assert.New(t)
apps := GetApps()
assert.Equal(len(apps["local"]), len(TestSites))
for _, site := range TestSites {
var found bool
for _, siteInList := range apps["local"] {
if site.Name == siteInList.GetName() {
found = true
break
}
}
assert.True(found, "Found site %s in list", site.Name)
}
}
// TestLocalImportDB tests the functionality that is called when "ddev import-db" is executed
func TestLocalImportDB(t *testing.T) {
assert := assert.New(t)
app, err := GetPluginApp("local")
assert.NoError(err)
testDir, _ := os.Getwd()
for _, site := range TestSites {
cleanup := site.Chdir()
runTime := testcommon.TimeTrack(time.Now(), fmt.Sprintf("%s LocalImportDB", site.Name))
testcommon.ClearDockerEnv()
err = app.Init(site.Dir)
assert.NoError(err)
// Test simple db loads.
for _, file := range []string{"users.sql", "users.sql.gz", "users.sql.tar", "users.sql.tar.gz", "users.sql.tgz", "users.sql.zip"} {
path := filepath.Join(testDir, "testdata", file)
err = app.ImportDB(path, "")
assert.NoError(err, "Failed to app.ImportDB path: %s err: %v", path, err)
}
if site.DBTarURL != "" {
err = app.Exec("db", true, "mysql", "-e", "DROP DATABASE db;")
assert.NoError(err)
err = app.Exec("db", true, "mysql", "information_schema", "-e", "CREATE DATABASE db;")
assert.NoError(err)
dbPath := filepath.Join(testcommon.CreateTmpDir("local-db"), "db.tar.gz")
err := util.DownloadFile(dbPath, site.DBTarURL)
assert.NoError(err)
err = app.ImportDB(dbPath, "")
assert.NoError(err)
stdout := testcommon.CaptureStdOut()
err = app.Exec("db", true, "mysql", "-e", "SHOW TABLES;")
assert.NoError(err)
out := stdout()
assert.Contains(string(out), "Tables_in_db")
assert.False(strings.Contains(string(out), "Empty set"))
err = os.Remove(dbPath)
assert.NoError(err)
}
if site.DBZipURL != "" {
err = app.Exec("db", true, "mysql", "-e", "DROP DATABASE db;")
assert.NoError(err)
err = app.Exec("db", true, "mysql", "information_schema", "-e", "CREATE DATABASE db;")
assert.NoError(err)
dbZipPath := filepath.Join(testcommon.CreateTmpDir("local-db-zip"), "db.zip")
err = util.DownloadFile(dbZipPath, site.DBZipURL)
assert.NoError(err)
err = app.ImportDB(dbZipPath, "")
assert.NoError(err)
stdout := testcommon.CaptureStdOut()
err = app.Exec("db", true, "mysql", "-e", "SHOW TABLES;")
assert.NoError(err)
out := stdout()
assert.Contains(string(out), "Tables_in_db")
assert.False(strings.Contains(string(out), "Empty set"))
err = os.Remove(dbZipPath)
assert.NoError(err)
}
if site.FullSiteTarballURL != "" {
err = app.Exec("db", true, "mysql", "-e", "DROP DATABASE db;")
assert.NoError(err)
err = app.Exec("db", true, "mysql", "information_schema", "-e", "CREATE DATABASE db;")
assert.NoError(err)
siteTarPath := filepath.Join(testcommon.CreateTmpDir("local-site-tar"), "site.tar.gz")
err = util.DownloadFile(siteTarPath, site.FullSiteTarballURL)
assert.NoError(err)
err = app.ImportDB(siteTarPath, "data.sql")
assert.NoError(err)
err = os.Remove(siteTarPath)
assert.NoError(err)
}
runTime()
cleanup()
}
}
// TestLocalImportFiles tests the functionality that is called when "ddev import-files" is executed
func TestLocalImportFiles(t *testing.T) {
assert := assert.New(t)
app, err := GetPluginApp("local")
assert.NoError(err)
for _, site := range TestSites {
cleanup := site.Chdir()
runTime := testcommon.TimeTrack(time.Now(), fmt.Sprintf("%s LocalImportFiles", site.Name))
testcommon.ClearDockerEnv()
err = app.Init(site.Dir)
assert.NoError(err)
if site.FilesTarballURL != "" {
filePath := filepath.Join(testcommon.CreateTmpDir("local-tarball-files"), "files.tar.gz")
err := util.DownloadFile(filePath, site.FilesTarballURL)
assert.NoError(err)
err = app.ImportFiles(filePath, "")
assert.NoError(err)
err = os.Remove(filePath)
assert.NoError(err)
}
if site.FilesZipballURL != "" {
filePath := filepath.Join(testcommon.CreateTmpDir("local-zipball-files"), "files.zip")
err := util.DownloadFile(filePath, site.FilesZipballURL)
assert.NoError(err)
err = app.ImportFiles(filePath, "")
assert.NoError(err)
err = os.Remove(filePath)
assert.NoError(err)
}
if site.FullSiteTarballURL != "" {
siteTarPath := filepath.Join(testcommon.CreateTmpDir("local-site-tar"), "site.tar.gz")
err = util.DownloadFile(siteTarPath, site.FullSiteTarballURL)
assert.NoError(err)
err = app.ImportFiles(siteTarPath, "docroot/sites/default/files")
assert.NoError(err)
err = os.Remove(siteTarPath)
assert.NoError(err)
}
runTime()
cleanup()
}
}
// TestLocalExec tests the execution of commands inside a docker container of a site.
func TestLocalExec(t *testing.T) {
assert := assert.New(t)
app, err := GetPluginApp("local")
assert.NoError(err)
for _, site := range TestSites {
cleanup := site.Chdir()
runTime := testcommon.TimeTrack(time.Now(), fmt.Sprintf("%s LocalExec", site.Name))
err := app.Init(site.Dir)
assert.NoError(err)
stdout := testcommon.CaptureStdOut()
err = app.Exec("web", true, "pwd")
assert.NoError(err)
out := stdout()
assert.Contains(out, "/var/www/html/docroot")
stdout = testcommon.CaptureStdOut()
switch app.GetType() {
case "drupal7":
fallthrough
case "drupal8":
err := app.Exec("web", true, "drush", "status")
assert.NoError(err)
case "wordpress":
err = app.Exec("web", true, "wp", "--info")
assert.NoError(err)
default:
}
out = stdout()
assert.Contains(string(out), "/etc/php/7.0/cli/php.ini")
runTime()
cleanup()
}
}
// TestLocalLogs tests the container log output functionality.
func TestLocalLogs(t *testing.T) {
assert := assert.New(t)
app, err := GetPluginApp("local")
assert.NoError(err)
for _, site := range TestSites {
cleanup := site.Chdir()
runTime := testcommon.TimeTrack(time.Now(), fmt.Sprintf("%s LocalLogs", site.Name))
err := app.Init(site.Dir)
assert.NoError(err)
stdout := testcommon.CaptureStdOut()
err = app.Logs("web", false, false, "")
assert.NoError(err)
out := stdout()
assert.Contains(out, "Server started")
stdout = testcommon.CaptureStdOut()
err = app.Logs("db", false, false, "")
assert.NoError(err)
out = stdout()
assert.Contains(out, "Database initialized")
stdout = testcommon.CaptureStdOut()
err = app.Logs("db", false, false, "2")
assert.NoError(err)
out = stdout()
assert.Contains(out, "MySQL init process done. Ready for start up.")
assert.False(strings.Contains(out, "Database initialized"))
runTime()
cleanup()
}
}
// TestLocalStop tests the functionality that is called when "ddev stop" is executed
func TestLocalStop(t *testing.T) {
assert := assert.New(t)
app, err := GetPluginApp("local")
assert.NoError(err)
for _, site := range TestSites {
cleanup := site.Chdir()
runTime := testcommon.TimeTrack(time.Now(), fmt.Sprintf("%s LocalStop", site.Name))
testcommon.ClearDockerEnv()
err := app.Init(site.Dir)
assert.NoError(err)
err = app.Stop()
assert.NoError(err)
for _, containerType := range [3]string{"web", "db", "dba"} {
containerName, err := constructContainerName(containerType, app)
assert.NoError(err)
check, err := testcommon.ContainerCheck(containerName, "exited")
assert.NoError(err)
assert.True(check, containerType, "container has exited")
}
runTime()
cleanup()
}
}
// TestDescribeStopped tests that the describe command works properly on a stopped site.
func TestDescribeStopped(t *testing.T) {
assert := assert.New(t)
app, err := GetPluginApp("local")
assert.NoError(err)
for _, site := range TestSites {
cleanup := site.Chdir()
testcommon.ClearDockerEnv()
err := app.Init(site.Dir)
assert.NoError(err)
out, err := app.Describe()
assert.NoError(err)
assert.Contains(out, SiteStopped, "Output did not include the word stopped when describing a stopped site.")
cleanup()
}
}
// TestLocalRemove tests the functionality that is called when "ddev rm" is executed
func TestLocalRemove(t *testing.T) {
assert := assert.New(t)
app, err := GetPluginApp("local")
assert.NoError(err)
for _, site := range TestSites {
cleanup := site.Chdir()
testcommon.ClearDockerEnv()
err := app.Init(site.Dir)
assert.NoError(err)
// start the previously stopped containers -
// stopped/removed have the same state
err = app.Start()
assert.NoError(err)
err = app.Wait("web")
assert.NoError(err)
runTime := testcommon.TimeTrack(time.Now(), fmt.Sprintf("%s LocalRemove", site.Name))
err = app.Down()
assert.NoError(err)
for _, containerType := range [3]string{"web", "db", "dba"} {
_, err := constructContainerName(containerType, app)
assert.Error(err, "Received error on containerName search: ", err)
}
runTime()
cleanup()
}
}
// TestCleanupWithoutCompose ensures app containers can be properly cleaned up without a docker-compose config file present.
func TestCleanupWithoutCompose(t *testing.T) {
assert := assert.New(t)
site := TestSites[0]
revertDir := site.Chdir()
app, err := GetPluginApp("local")
assert.NoError(err)
testcommon.ClearDockerEnv()
err = app.Init(site.Dir)
assert.NoError(err)
// Start a site so we have something to cleanup
err = app.Start()
assert.NoError(err)
err = app.Wait("web")
assert.NoError(err)
// Call the Cleanup command()
err = Cleanup(app)
assert.NoError(err)
for _, containerType := range [3]string{"web", "db", "dba"} {
_, err := constructContainerName(containerType, app)
assert.Error(err)
}
revertDir()
}
// TestGetappsEmpty ensures that GetApps returns an empty list when no applications are running.
func TestGetAppsEmpty(t *testing.T) {
assert := assert.New(t)
apps := GetApps()
assert.Equal(len(apps["local"]), 0)
}
// TestRouterNotRunning ensures the router is shut down after all sites are stopped.
func TestRouterNotRunning(t *testing.T) {
assert := assert.New(t)
containers, err := dockerutil.GetDockerContainers(false)
assert.NoError(err)
for _, container := range containers {
assert.NotEqual(dockerutil.ContainerName(container), "ddev-router", "Failed to find ddev-router container running")
}
}
// constructContainerName builds a container name given the type (web/db/dba) and the app
func constructContainerName(containerType string, app App) (string, error) {
container, err := app.FindContainerByType(containerType)
if err != nil {
return "", err
}
name := dockerutil.ContainerName(container)
return name, nil
}
| 1 | 11,402 | This would also change back to /var/www/html/docroot if we go that way. | drud-ddev | php |
@@ -12,7 +12,7 @@ import info.nightscout.androidaps.MainApp;
*/
public class SP {
- private static SharedPreferences sharedPreferences = PreferenceManager.getDefaultSharedPreferences(MainApp.instance().getApplicationContext());
+ public static SharedPreferences sharedPreferences = PreferenceManager.getDefaultSharedPreferences(MainApp.instance().getApplicationContext());
static public Map<String, ?> getAll() {
return sharedPreferences.getAll(); | 1 | package info.nightscout.androidaps.utils;
import android.content.SharedPreferences;
import android.preference.PreferenceManager;
import java.util.Map;
import info.nightscout.androidaps.MainApp;
/**
* Created by mike on 17.02.2017.
*/
public class SP {
private static SharedPreferences sharedPreferences = PreferenceManager.getDefaultSharedPreferences(MainApp.instance().getApplicationContext());
static public Map<String, ?> getAll() {
return sharedPreferences.getAll();
}
static public void clear() {
sharedPreferences.edit().clear().apply();
}
static public boolean contains(String key) {
return sharedPreferences.contains(key);
}
static public boolean contains(int resourceId) {
return sharedPreferences.contains(MainApp.gs(resourceId));
}
static public String getString(int resourceID, String defaultValue) {
return sharedPreferences.getString(MainApp.gs(resourceID), defaultValue);
}
static public String getString(String key, String defaultValue) {
return sharedPreferences.getString(key, defaultValue);
}
static public boolean getBoolean(int resourceID, Boolean defaultValue) {
try {
return sharedPreferences.getBoolean(MainApp.gs(resourceID), defaultValue);
} catch (Exception e) {
return defaultValue;
}
}
static public boolean getBoolean(String key, Boolean defaultValue) {
try {
return sharedPreferences.getBoolean(key, defaultValue);
} catch (Exception e) {
return defaultValue;
}
}
static public Double getDouble(int resourceID, Double defaultValue) {
return SafeParse.stringToDouble(sharedPreferences.getString(MainApp.gs(resourceID), defaultValue.toString()));
}
static public Double getDouble(String key, Double defaultValue) {
return SafeParse.stringToDouble(sharedPreferences.getString(key, defaultValue.toString()));
}
static public int getInt(int resourceID, Integer defaultValue) {
try {
return sharedPreferences.getInt(MainApp.gs(resourceID), defaultValue);
} catch (Exception e) {
return SafeParse.stringToInt(sharedPreferences.getString(MainApp.gs(resourceID), defaultValue.toString()));
}
}
static public int getInt(String key, Integer defaultValue) {
try {
return sharedPreferences.getInt(key, defaultValue);
} catch (Exception e) {
return SafeParse.stringToInt(sharedPreferences.getString(key, defaultValue.toString()));
}
}
static public long getLong(int resourceID, Long defaultValue) {
try {
return sharedPreferences.getLong(MainApp.gs(resourceID), defaultValue);
} catch (Exception e) {
return SafeParse.stringToLong(sharedPreferences.getString(MainApp.gs(resourceID), defaultValue.toString()));
}
}
static public long getLong(String key, Long defaultValue) {
try {
return sharedPreferences.getLong(key, defaultValue);
} catch (Exception e) {
return SafeParse.stringToLong(sharedPreferences.getString(key, defaultValue.toString()));
}
}
static public void putBoolean(String key, boolean value) {
sharedPreferences.edit().putBoolean(key, value).apply();
}
static public void putBoolean(int resourceID, boolean value) {
sharedPreferences.edit().putBoolean(MainApp.gs(resourceID), value).apply();
}
static public void putDouble(String key, double value) {
sharedPreferences.edit().putString(key, Double.toString(value)).apply();
}
static public void putLong(String key, long value) {
sharedPreferences.edit().putLong(key, value).apply();
}
static public void putLong(int resourceID, long value) {
sharedPreferences.edit().putLong(MainApp.gs(resourceID), value).apply();
}
static public void putInt(String key, int value) {
sharedPreferences.edit().putInt(key, value).apply();
}
static public void putInt(int resourceID, int value) {
sharedPreferences.edit().putInt(MainApp.gs(resourceID), value).apply();
}
static public void incInt(int resourceID) {
int value = SP.getInt(resourceID, 0) + 1;
sharedPreferences.edit().putInt(MainApp.gs(resourceID), value).apply();
}
static public void putString(int resourceID, String value) {
sharedPreferences.edit().putString(MainApp.gs(resourceID), value).apply();
}
static public void putString(String key, String value) {
sharedPreferences.edit().putString(key, value).apply();
}
static public void remove(int resourceID) {
sharedPreferences.edit().remove(MainApp.gs(resourceID)).apply();
}
static public void remove(String key) {
sharedPreferences.edit().remove(key).apply();
}
}
| 1 | 32,327 | Does this need to be public? Can't one of the helper functions below pass the data? If it really needs to be public, could it be write-protected? | MilosKozak-AndroidAPS | java |
@@ -216,11 +216,6 @@ func RunAPIAndWait(ctx context.Context, nd *node.Node, config *config.APIConfig,
}
func CreateServerEnv(ctx context.Context, nd *node.Node) *Env {
- var storageAPI *storage.API
- if nd.StorageProtocol != nil {
- storageAPI = storage.NewAPI(nd.StorageProtocol.StorageClient, nd.StorageProtocol.StorageProvider, nd.PieceManager())
- }
-
return &Env{
blockMiningAPI: nd.BlockMining.BlockMiningAPI,
drandAPI: nd.DrandAPI, | 1 | package commands
import (
"context"
"fmt"
"net/http"
_ "net/http/pprof" // nolint: golint
"os"
"os/signal"
"syscall"
"time"
"github.com/filecoin-project/go-filecoin/internal/pkg/protocol/storage"
cmdkit "github.com/ipfs/go-ipfs-cmdkit"
cmds "github.com/ipfs/go-ipfs-cmds"
cmdhttp "github.com/ipfs/go-ipfs-cmds/http"
writer "github.com/ipfs/go-log/writer"
ma "github.com/multiformats/go-multiaddr"
manet "github.com/multiformats/go-multiaddr-net"
"github.com/pkg/errors"
"github.com/filecoin-project/go-filecoin/internal/app/go-filecoin/node"
"github.com/filecoin-project/go-filecoin/internal/app/go-filecoin/paths"
"github.com/filecoin-project/go-filecoin/internal/pkg/clock"
"github.com/filecoin-project/go-filecoin/internal/pkg/config"
"github.com/filecoin-project/go-filecoin/internal/pkg/journal"
"github.com/filecoin-project/go-filecoin/internal/pkg/repo"
)
var daemonCmd = &cmds.Command{
Helptext: cmdkit.HelpText{
Tagline: "Start a long-running daemon process",
},
Options: []cmdkit.Option{
cmdkit.StringOption(SwarmAddress, "multiaddress to listen on for filecoin network connections"),
cmdkit.StringOption(SwarmPublicRelayAddress, "public multiaddress for routing circuit relay traffic. Necessary for relay nodes to provide this if they are not publically dialable"),
cmdkit.BoolOption(OfflineMode, "start the node without networking"),
cmdkit.BoolOption(ELStdout),
cmdkit.BoolOption(IsRelay, "advertise and allow filecoin network traffic to be relayed through this node"),
cmdkit.StringOption(BlockTime, "time a node waits before trying to mine the next block").WithDefault(clock.DefaultEpochDuration.String()),
},
Run: func(req *cmds.Request, re cmds.ResponseEmitter, env cmds.Environment) error {
return daemonRun(req, re)
},
}
func daemonRun(req *cmds.Request, re cmds.ResponseEmitter) error {
// third precedence is config file.
rep, err := getRepo(req)
if err != nil {
return err
}
// second highest precedence is env vars.
if envAPI := os.Getenv("FIL_API"); envAPI != "" {
rep.Config().API.Address = envAPI
}
// highest precedence is cmd line flag.
if flagAPI, ok := req.Options[OptionAPI].(string); ok && flagAPI != "" {
rep.Config().API.Address = flagAPI
}
if swarmAddress, ok := req.Options[SwarmAddress].(string); ok && swarmAddress != "" {
rep.Config().Swarm.Address = swarmAddress
}
if publicRelayAddress, ok := req.Options[SwarmPublicRelayAddress].(string); ok && publicRelayAddress != "" {
rep.Config().Swarm.PublicRelayAddress = publicRelayAddress
}
opts, err := node.OptionsFromRepo(rep)
if err != nil {
return err
}
if offlineMode, ok := req.Options[OfflineMode].(bool); ok {
opts = append(opts, node.OfflineMode(offlineMode))
}
if isRelay, ok := req.Options[IsRelay].(bool); ok && isRelay {
opts = append(opts, node.IsRelay())
}
durStr, ok := req.Options[BlockTime].(string)
if !ok {
return errors.New("Bad block time passed")
}
blockTime, err := time.ParseDuration(durStr)
if err != nil {
return errors.Wrap(err, "Bad block time passed")
}
opts = append(opts, node.BlockTime(blockTime))
journal, err := journal.NewZapJournal(rep.JournalPath())
if err != nil {
return err
}
opts = append(opts, node.JournalConfigOption(journal))
// Instantiate the node.
fcn, err := node.New(req.Context, opts...)
if err != nil {
return err
}
if fcn.OfflineMode {
_ = re.Emit("Filecoin node running in offline mode (libp2p is disabled)\n")
} else {
_ = re.Emit(fmt.Sprintf("My peer ID is %s\n", fcn.Host().ID().Pretty()))
for _, a := range fcn.Host().Addrs() {
_ = re.Emit(fmt.Sprintf("Swarm listening on: %s\n", a))
}
}
if _, ok := req.Options[ELStdout].(bool); ok {
writer.WriterGroup.AddWriter(os.Stdout)
}
// Start the node.
if err := fcn.Start(req.Context); err != nil {
return err
}
defer fcn.Stop(req.Context)
// Run API server around the node.
ready := make(chan interface{}, 1)
go func() {
<-ready
_ = re.Emit(fmt.Sprintf("API server listening on %s\n", rep.Config().API.Address))
}()
var terminate = make(chan os.Signal, 1)
signal.Notify(terminate, os.Interrupt, syscall.SIGTERM)
defer signal.Stop(terminate)
// The request is expected to remain open so the daemon uses the request context.
// Pass a new context here if the flow changes such that the command should exit while leaving
// a forked deamon running.
return RunAPIAndWait(req.Context, fcn, rep.Config().API, ready, terminate)
}
func getRepo(req *cmds.Request) (repo.Repo, error) {
repoDir, _ := req.Options[OptionRepoDir].(string)
repoDir, err := paths.GetRepoPath(repoDir)
if err != nil {
return nil, err
}
return repo.OpenFSRepo(repoDir, repo.Version)
}
// RunAPIAndWait starts an API server and waits for it to finish.
// The `ready` channel is closed when the server is running and its API address has been
// saved to the node's repo.
// A message sent to or closure of the `terminate` channel signals the server to stop.
func RunAPIAndWait(ctx context.Context, nd *node.Node, config *config.APIConfig, ready chan interface{}, terminate chan os.Signal) error {
servenv := CreateServerEnv(ctx, nd)
cfg := cmdhttp.NewServerConfig()
cfg.APIPath = APIPrefix
cfg.SetAllowedOrigins(config.AccessControlAllowOrigin...)
cfg.SetAllowedMethods(config.AccessControlAllowMethods...)
cfg.SetAllowCredentials(config.AccessControlAllowCredentials)
maddr, err := ma.NewMultiaddr(config.Address)
if err != nil {
return err
}
// Listen on the configured address in order to bind the port number in case it has
// been configured as zero (i.e. OS-provided)
apiListener, err := manet.Listen(maddr)
if err != nil {
return err
}
handler := http.NewServeMux()
handler.Handle("/debug/pprof/", http.DefaultServeMux)
handler.Handle(APIPrefix+"/", cmdhttp.NewHandler(servenv, rootCmdDaemon, cfg))
apiserv := http.Server{
Handler: handler,
}
go func() {
err := apiserv.Serve(manet.NetListener(apiListener))
if err != nil && err != http.ErrServerClosed {
panic(err)
}
}()
// Write the resolved API address to the repo
config.Address = apiListener.Multiaddr().String()
if err := nd.Repo.SetAPIAddr(config.Address); err != nil {
return errors.Wrap(err, "Could not save API address to repo")
}
// Signal that the sever has started and then wait for a signal to stop.
close(ready)
received := <-terminate
if received != nil {
fmt.Println("Received signal", received)
}
fmt.Println("Shutting down...")
// Allow a grace period for clean shutdown.
ctx, cancel := context.WithTimeout(ctx, time.Second*5)
defer cancel()
if err := apiserv.Shutdown(ctx); err != nil {
fmt.Println("Error shutting down API server:", err)
}
return nil
}
func CreateServerEnv(ctx context.Context, nd *node.Node) *Env {
var storageAPI *storage.API
if nd.StorageProtocol != nil {
storageAPI = storage.NewAPI(nd.StorageProtocol.StorageClient, nd.StorageProtocol.StorageProvider, nd.PieceManager())
}
return &Env{
blockMiningAPI: nd.BlockMining.BlockMiningAPI,
drandAPI: nd.DrandAPI,
ctx: ctx,
inspectorAPI: NewInspectorAPI(nd.Repo),
porcelainAPI: nd.PorcelainAPI,
retrievalAPI: nd.RetrievalProtocol,
storageAPI: storageAPI,
}
}
| 1 | 23,534 | Thanks. Now that you've improved this we should just init and expose the StorageAPI on the node, like the other.s | filecoin-project-venus | go |
@@ -116,6 +116,15 @@ public class Key implements Comparable<Key> {
return toRawKey(Arrays.copyOf(value, value.length + 1));
}
+ /**
+ * nextPrefix key will be key with next available rid.
+ *
+ * @return a new key current rid+1.
+ */
+ public Key nextPrefix() {
+ return toRawKey(prefixNext(value));
+ }
+
/**
* The prefixNext key for bytes domain
* | 1 | /*
* Copyright 2017 PingCAP, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.pingcap.tikv.key;
import static com.pingcap.tikv.codec.KeyUtils.formatBytes;
import static java.util.Objects.requireNonNull;
import com.google.protobuf.ByteString;
import com.pingcap.tikv.codec.CodecDataOutput;
import com.pingcap.tikv.types.DataType;
import com.pingcap.tikv.util.FastByteComparisons;
import java.util.Arrays;
public class Key implements Comparable<Key> {
protected static final byte[] TBL_PREFIX = new byte[] {'t'};
protected final byte[] value;
protected final int infFlag;
public static final Key EMPTY = createEmpty();
public static final Key NULL = createNull();
public static final Key MIN = createTypelessMin();
public static final Key MAX = createTypelessMax();
private Key(byte[] value, boolean negative) {
this.value = requireNonNull(value, "value is null");
this.infFlag = (value.length == 0 ? 1 : 0) * (negative ? -1 : 1);
}
protected Key(byte[] value) {
this(value, false);
}
public static Key toRawKey(ByteString bytes, boolean negative) {
return new Key(bytes.toByteArray(), negative);
}
public static Key toRawKey(ByteString bytes) {
return new Key(bytes.toByteArray());
}
public static Key toRawKey(byte[] bytes, boolean negative) {
return new Key(bytes, negative);
}
public static Key toRawKey(byte[] bytes) {
return new Key(bytes);
}
private static Key createNull() {
CodecDataOutput cdo = new CodecDataOutput();
DataType.encodeNull(cdo);
return new Key(cdo.toBytes()) {
@Override
public String toString() {
return "null";
}
};
}
private static Key createEmpty() {
return new Key(new byte[0]) {
@Override
public Key next() {
return this;
}
@Override
public String toString() {
return "EMPTY";
}
};
}
private static Key createTypelessMin() {
CodecDataOutput cdo = new CodecDataOutput();
DataType.encodeIndex(cdo);
return new Key(cdo.toBytes()) {
@Override
public String toString() {
return "MIN";
}
};
}
private static Key createTypelessMax() {
CodecDataOutput cdo = new CodecDataOutput();
DataType.encodeMaxValue(cdo);
return new Key(cdo.toBytes()) {
@Override
public String toString() {
return "MAX";
}
};
}
/**
* Next key simply append a zero byte to previous key.
*
* @return next key with a zero byte appended
*/
public Key next() {
return toRawKey(Arrays.copyOf(value, value.length + 1));
}
/**
* The prefixNext key for bytes domain
*
* <p>It first plus one at LSB and if LSB overflows, a zero byte is appended at the end Original
* bytes will be reused if possible
*
* @return encoded results
*/
static byte[] prefixNext(byte[] value) {
int i;
byte[] newVal = Arrays.copyOf(value, value.length);
for (i = newVal.length - 1; i >= 0; i--) {
newVal[i]++;
if (newVal[i] != 0) {
break;
}
}
if (i == -1) {
return Arrays.copyOf(value, value.length + 1);
} else {
return newVal;
}
}
@Override
public int compareTo(Key other) {
requireNonNull(other, "other is null");
if ((this.infFlag | other.infFlag) != 0) {
return this.infFlag - other.infFlag;
}
return FastByteComparisons.compareTo(value, other.value);
}
@Override
public boolean equals(Object other) {
if (other == this) {
return true;
}
if (other instanceof Key) {
return compareTo((Key) other) == 0;
} else {
return false;
}
}
@Override
public int hashCode() {
return Arrays.hashCode(value) * infFlag;
}
public byte[] getBytes() {
return value;
}
public ByteString toByteString() {
return ByteString.copyFrom(value);
}
public int getInfFlag() {
return infFlag;
}
@Override
public String toString() {
if (infFlag < 0) {
return "-INF";
} else if (infFlag > 0) {
return "+INF";
} else {
return String.format("{%s}", formatBytes(value));
}
}
}
| 1 | 9,554 | it will be better if you can provide some examples | pingcap-tispark | java |
@@ -21,6 +21,8 @@ public abstract class LongRunningOperationDetailView {
public abstract String operationReturnType();
+ public abstract String operationResponseType();
+
public static Builder newBuilder() {
return new AutoValue_LongRunningOperationDetailView.Builder();
} | 1 | /* Copyright 2016 Google Inc
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.google.api.codegen.viewmodel;
import com.google.auto.value.AutoValue;
@AutoValue
public abstract class LongRunningOperationDetailView {
public abstract String operationReturnType();
public static Builder newBuilder() {
return new AutoValue_LongRunningOperationDetailView.Builder();
}
@AutoValue.Builder
public abstract static class Builder {
public abstract Builder operationReturnType(String val);
public abstract LongRunningOperationDetailView build();
}
}
| 1 | 19,867 | "return type" and "response type" have never really been contrasted before, and it's unclear what their meaning is here. Could you clarify? | googleapis-gapic-generator | java |
@@ -10,6 +10,7 @@ import sys
import traceback
import subprocess
import shlex
+import json
# TODO: This is a cross-subpackage import!
from libcodechecker.log import build_action | 1 | # -------------------------------------------------------------------------
# The CodeChecker Infrastructure
# This file is distributed under the University of Illinois Open Source
# License. See LICENSE.TXT for details.
# -------------------------------------------------------------------------
import os
import re
import sys
import traceback
import subprocess
import shlex
# TODO: This is a cross-subpackage import!
from libcodechecker.log import build_action
from libcodechecker.log import option_parser
from libcodechecker.logger import LoggerFactory
LOG = LoggerFactory.get_new_logger('LOG PARSER')
# If these options are present in the original build command, they must
# be forwarded to get_compiler_includes and get_compiler_defines so the
# resulting includes point to the target that was used in the build.
COMPILE_OPTS_FWD_TO_DEFAULTS_GETTER = frozenset(
['^-m(32|64)',
'^-std=.*'])
def get_compiler_includes(compiler, lang, compile_opts, extra_opts=None):
"""
Returns a list of default includes of the given compiler.
"""
start_mark = "#include <...> search starts here:"
end_mark = "End of search list."
if extra_opts is None:
extra_opts = []
# The first sysroot flag found among the compilation options is added
# to the command below to give a more precise default include list.
# Absence of any sysroot flags results in an empty string.
sysroot = next(
(item for item in compile_opts if item.startswith("--sysroot=")), "")
cmd = compiler + " " + ' '.join(extra_opts) + " -E -x " + lang + \
" " + sysroot + " - -v "
LOG.debug("Retrieving default includes via '" + cmd + "'")
include_paths = []
try:
proc = subprocess.Popen(shlex.split(cmd),
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
_, err = proc.communicate("")
do_append = False
for line in err.splitlines(True):
line = line.strip()
if line.startswith(end_mark):
do_append = False
if do_append:
include_paths.append("-isystem " + line)
if line.startswith(start_mark):
do_append = True
except OSError as oerr:
LOG.error("Cannot find include paths: " + oerr.strerror + "\n")
return include_paths
def get_compiler_target(compiler):
"""
Returns the target triple of the given compiler as a string.
If the compiler is not a version of GCC, an empty string is returned.
Compilers other than GCC might have default targets differing from
the build target.
"""
target_label = "Target:"
target = ""
gcc_label = "gcc"
gcc = False
cmd = compiler + ' -v'
LOG.debug("Retrieving target platform information via '" + cmd + "'")
try:
proc = subprocess.Popen(shlex.split(cmd),
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
_, err = proc.communicate("")
for line in err.splitlines(True):
line = line.strip().split()
if line[0] == target_label:
target = line[1]
if line[0] == gcc_label:
gcc = True
if not gcc:
target = ""
except OSError as oerr:
LOG.error("Cannot find compiler target: " + oerr.strerror + "\n")
return target
def parse_compile_commands_json(logfile, add_compiler_defaults=False):
import json
LOG.debug('parse_compile_commands_json: ' + str(add_compiler_defaults))
actions = []
filtered_build_actions = {}
logfile.seek(0)
data = json.load(logfile)
compiler_includes = {}
compiler_target = ''
counter = 0
for entry in data:
sourcefile = entry['file']
if not os.path.isabs(sourcefile):
# Newest versions of intercept-build can create the 'file' in the
# JSON Compilation Database as a relative path.
sourcefile = os.path.join(os.path.abspath(entry['directory']),
sourcefile)
lang = option_parser.get_language(sourcefile[sourcefile.rfind('.'):])
if not lang:
continue
action = build_action.BuildAction(counter)
if 'command' in entry:
command = entry['command']
# Old versions of intercept-build (confirmed to those shipping
# with upstream clang-5.0) do escapes in another way:
# -DVARIABLE="a b" becomes -DVARIABLE=\"a b\" in the output.
# This would be messed up later on by options_parser, so need a
# fix here. (Should be removed once we are sure noone uses this
# intercept-build anymore!)
if r'\"' in command:
command = command.replace(r'\"', '"')
elif 'arguments' in entry:
# Newest versions of intercept-build create an argument vector
# instead of a command string.
command = ' '.join(entry['arguments'])
else:
raise KeyError("No valid 'command' or 'arguments' entry found!")
results = option_parser.parse_options(command)
action.original_command = command
action.analyzer_options = results.compile_opts
action.lang = results.lang
action.target = results.arch
# Store the compiler built in include paths and defines.
if add_compiler_defaults and results.compiler:
if not (results.compiler in compiler_includes):
# Fetch defaults from the compiler,
# make sure we use the correct architecture.
extra_opts = []
for regex in COMPILE_OPTS_FWD_TO_DEFAULTS_GETTER:
pattern = re.compile(regex)
for comp_opt in action.analyzer_options:
if re.match(pattern, comp_opt):
extra_opts.append(comp_opt)
compiler_includes[results.compiler] = \
get_compiler_includes(results.compiler, results.lang,
results.compile_opts, extra_opts)
compiler_target = get_compiler_target(results.compiler)
action.compiler_includes = compiler_includes[results.compiler]
if compiler_target != "":
action.analyzer_options.append("--target=" + compiler_target)
if results.action == option_parser.ActionType.COMPILE or \
results.action == option_parser.ActionType.LINK:
action.skip = False
# TODO: Check arch.
action.directory = entry['directory']
action.sources = sourcefile
# Filter out duplicate compilation commands.
unique_key = action.cmp_key
if filtered_build_actions.get(unique_key) is None:
filtered_build_actions[unique_key] = action
del action
counter += 1
for _, ba in filtered_build_actions.items():
actions.append(ba)
return actions
def parse_log(logfilepath, add_compiler_defaults=False):
LOG.debug('Parsing log file: ' + logfilepath)
with open(logfilepath) as logfile:
try:
actions = \
parse_compile_commands_json(logfile, add_compiler_defaults)
except (ValueError, KeyError, TypeError) as ex:
if os.stat(logfilepath).st_size == 0:
LOG.error('The compile database is empty.')
else:
LOG.error('The compile database is not valid.')
LOG.debug(traceback.format_exc())
LOG.debug(ex)
sys.exit(1)
LOG.debug('Parsing log file done.')
return actions
| 1 | 8,090 | Import order has been violated here. | Ericsson-codechecker | c |
@@ -250,11 +250,14 @@ func (api *API) MessagePreview(ctx context.Context, from, to address.Address, me
// it does not change any state. It is use to interrogate actor state. The from address
// is optional; if not provided, an address will be chosen from the node's wallet.
func (api *API) MessageQuery(ctx context.Context, optFrom, to address.Address, method types.MethodID, baseKey block.TipSetKey, params ...interface{}) ([][]byte, error) {
- snapshot, err := api.actorState.Snapshot(ctx, baseKey)
- if err != nil {
- return [][]byte{}, err
- }
- return snapshot.Query(ctx, optFrom, to, method, params...)
+ // Dragons: delete
+
+ // snapshot, err := api.actorState.Snapshot(ctx, baseKey)
+ // if err != nil {
+ // return [][]byte{}, err
+ // }
+ // return snapshot.Query(ctx, optFrom, to, method, params...)
+ return [][]byte{}, nil
}
// Snapshot returns a interface to the chain state a a particular tipset | 1 | package plumbing
import (
"context"
"fmt"
"io"
"strings"
"time"
"github.com/filecoin-project/go-filecoin/internal/pkg/block"
"github.com/filecoin-project/go-filecoin/internal/pkg/chainsync/status"
"github.com/filecoin-project/go-filecoin/internal/pkg/piecemanager"
"github.com/filecoin-project/go-filecoin/internal/pkg/vm"
"github.com/filecoin-project/go-filecoin/internal/pkg/vm/state"
"github.com/ipfs/go-cid"
"github.com/ipfs/go-datastore/query"
ipld "github.com/ipfs/go-ipld-format"
logging "github.com/ipfs/go-log"
"github.com/libp2p/go-libp2p-core/metrics"
"github.com/libp2p/go-libp2p-core/peer"
"github.com/libp2p/go-libp2p/p2p/protocol/ping"
ma "github.com/multiformats/go-multiaddr"
"github.com/filecoin-project/go-filecoin/internal/app/go-filecoin/plumbing/cfg"
"github.com/filecoin-project/go-filecoin/internal/app/go-filecoin/plumbing/cst"
"github.com/filecoin-project/go-filecoin/internal/app/go-filecoin/plumbing/dag"
"github.com/filecoin-project/go-filecoin/internal/app/go-filecoin/plumbing/msg"
"github.com/filecoin-project/go-filecoin/internal/app/go-filecoin/plumbing/strgdls"
"github.com/filecoin-project/go-filecoin/internal/pkg/chain"
"github.com/filecoin-project/go-filecoin/internal/pkg/consensus"
"github.com/filecoin-project/go-filecoin/internal/pkg/message"
"github.com/filecoin-project/go-filecoin/internal/pkg/net"
"github.com/filecoin-project/go-filecoin/internal/pkg/protocol/storage/storagedeal"
"github.com/filecoin-project/go-filecoin/internal/pkg/types"
"github.com/filecoin-project/go-filecoin/internal/pkg/vm/actor"
"github.com/filecoin-project/go-filecoin/internal/pkg/vm/address"
"github.com/filecoin-project/go-filecoin/internal/pkg/wallet"
)
// API is the plumbing implementation, the irreducible set of calls required
// to implement protocols and user/network-facing features. You probably should
// depend on the higher level porcelain.API instead of this api, as it includes
// these calls in addition to higher level convenience calls to make them more
// ergonomic.
type API struct {
logger logging.EventLogger
chain *cst.ChainStateReadWriter
syncer *cst.ChainSyncProvider
config *cfg.Config
dag *dag.DAG
expected consensus.Protocol
msgPool *message.Pool
msgPreviewer *msg.Previewer
actorState *consensus.ActorStateStore
msgWaiter *msg.Waiter
network *net.Network
outbox *message.Outbox
pieceManager func() piecemanager.PieceManager
storagedeals *strgdls.Store
wallet *wallet.Wallet
}
// APIDeps contains all the API's dependencies
type APIDeps struct {
Chain *cst.ChainStateReadWriter
ActState *consensus.ActorStateStore
Sync *cst.ChainSyncProvider
Config *cfg.Config
DAG *dag.DAG
Deals *strgdls.Store
Expected consensus.Protocol
MsgPool *message.Pool
MsgPreviewer *msg.Previewer
MsgWaiter *msg.Waiter
Network *net.Network
Outbox *message.Outbox
PieceManager func() piecemanager.PieceManager
Wallet *wallet.Wallet
}
// New constructs a new instance of the API.
func New(deps *APIDeps) *API {
return &API{
logger: logging.Logger("porcelain"),
chain: deps.Chain,
actorState: deps.ActState,
syncer: deps.Sync,
config: deps.Config,
dag: deps.DAG,
expected: deps.Expected,
msgPool: deps.MsgPool,
msgPreviewer: deps.MsgPreviewer,
msgWaiter: deps.MsgWaiter,
network: deps.Network,
outbox: deps.Outbox,
pieceManager: deps.PieceManager,
storagedeals: deps.Deals,
wallet: deps.Wallet,
}
}
// ActorGet returns an actor from the latest state on the chain
func (api *API) ActorGet(ctx context.Context, addr address.Address) (*actor.Actor, error) {
return api.chain.GetActor(ctx, addr)
}
// ActorGetSignature returns the signature of the given actor's given method.
// The function signature is typically used to enable a caller to decode the
// output of an actor method call (message).
func (api *API) ActorGetSignature(ctx context.Context, actorAddr address.Address, method types.MethodID) (_ *vm.FunctionSignature, err error) {
return api.chain.GetActorSignature(ctx, actorAddr, method)
}
// ActorLs returns a channel with actors from the latest state on the chain
func (api *API) ActorLs(ctx context.Context) (<-chan state.GetAllActorsResult, error) {
return api.chain.LsActors(ctx)
}
// BlockTime returns the block time used by the consensus protocol.
func (api *API) BlockTime() time.Duration {
return api.expected.BlockTime()
}
// ConfigSet sets the given parameters at the given path in the local config.
// The given path may be either a single field name, or a dotted path to a field.
// The JSON value may be either a single value or a whole data structure to be replace.
// For example:
// ConfigSet("datastore.path", "dev/null") and ConfigSet("datastore", "{\"path\":\"dev/null\"}")
// are the same operation.
func (api *API) ConfigSet(dottedPath string, paramJSON string) error {
return api.config.Set(dottedPath, paramJSON)
}
// ConfigGet gets config parameters from the given path.
// The path may be either a single field name, or a dotted path to a field.
func (api *API) ConfigGet(dottedPath string) (interface{}, error) {
return api.config.Get(dottedPath)
}
// ChainGetBlock gets a block by CID
func (api *API) ChainGetBlock(ctx context.Context, id cid.Cid) (*block.Block, error) {
return api.chain.GetBlock(ctx, id)
}
// ChainGetMessages gets a message collection by CID
func (api *API) ChainGetMessages(ctx context.Context, meta types.TxMeta) ([]*types.SignedMessage, error) {
return api.chain.GetMessages(ctx, meta)
}
// ChainGetReceipts gets a receipt collection by CID
func (api *API) ChainGetReceipts(ctx context.Context, id cid.Cid) ([]*types.MessageReceipt, error) {
return api.chain.GetReceipts(ctx, id)
}
// ChainHeadKey returns the head tipset key
func (api *API) ChainHeadKey() block.TipSetKey {
return api.chain.Head()
}
// ChainSetHead sets `key` as the new head of this chain iff it exists in the nodes chain store.
func (api *API) ChainSetHead(ctx context.Context, key block.TipSetKey) error {
return api.chain.SetHead(ctx, key)
}
// ChainTipSet returns the tipset at the given key
func (api *API) ChainTipSet(key block.TipSetKey) (block.TipSet, error) {
return api.chain.GetTipSet(key)
}
// ChainLs returns an iterator of tipsets from head to genesis
func (api *API) ChainLs(ctx context.Context) (*chain.TipsetIterator, error) {
return api.chain.Ls(ctx)
}
// ChainSampleRandomness produces a slice of random bytes sampled from a TipSet
// in the blockchain at a given height, useful for things like PoSt challenge seed
// generation.
func (api *API) ChainSampleRandomness(ctx context.Context, sampleHeight *types.BlockHeight) ([]byte, error) {
return api.chain.SampleRandomness(ctx, sampleHeight)
}
// SyncerStatus returns the current status of the active or last active chain sync operation.
func (api *API) SyncerStatus() status.Status {
return api.syncer.Status()
}
// ChainSyncHandleNewTipSet submits a chain head to the syncer for processing.
func (api *API) ChainSyncHandleNewTipSet(ci *block.ChainInfo) error {
return api.syncer.HandleNewTipSet(ci)
}
// ChainExport exports the chain from `head` up to and including the genesis block to `out`
func (api *API) ChainExport(ctx context.Context, head block.TipSetKey, out io.Writer) error {
return api.chain.ChainExport(ctx, head, out)
}
// ChainImport imports a chain from `in`.
func (api *API) ChainImport(ctx context.Context, in io.Reader) (block.TipSetKey, error) {
return api.chain.ChainImport(ctx, in)
}
// DealsIterator returns an iterator to access all deals
func (api *API) DealsIterator() (*query.Results, error) {
return api.storagedeals.Iterator()
}
// DealPut puts a given deal in the datastore
func (api *API) DealPut(storageDeal *storagedeal.Deal) error {
return api.storagedeals.Put(storageDeal)
}
// OutboxQueues lists addresses with non-empty outbox queues (in no particular order).
func (api *API) OutboxQueues() []address.Address {
return api.outbox.Queue().Queues()
}
// OutboxQueueLs lists messages in the queue for an address.
func (api *API) OutboxQueueLs(sender address.Address) []*message.Queued {
return api.outbox.Queue().List(sender)
}
// OutboxQueueClear clears messages in the queue for an address/
func (api *API) OutboxQueueClear(ctx context.Context, sender address.Address) {
api.outbox.Queue().Clear(ctx, sender)
}
// MessagePoolPending lists messages un-mined in the pool
func (api *API) MessagePoolPending() []*types.SignedMessage {
return api.msgPool.Pending()
}
// MessagePoolGet fetches a message from the pool.
func (api *API) MessagePoolGet(cid cid.Cid) (value *types.SignedMessage, ok bool) {
return api.msgPool.Get(cid)
}
// MessagePoolRemove removes a message from the message pool.
func (api *API) MessagePoolRemove(cid cid.Cid) {
api.msgPool.Remove(cid)
}
// MessagePreview previews the Gas cost of a message by running it locally on the client and
// recording the amount of Gas used.
func (api *API) MessagePreview(ctx context.Context, from, to address.Address, method types.MethodID, params ...interface{}) (types.GasUnits, error) {
return api.msgPreviewer.Preview(ctx, from, to, method, params...)
}
// MessageQuery calls an actor's method using the most recent chain state. It is read-only,
// it does not change any state. It is use to interrogate actor state. The from address
// is optional; if not provided, an address will be chosen from the node's wallet.
func (api *API) MessageQuery(ctx context.Context, optFrom, to address.Address, method types.MethodID, baseKey block.TipSetKey, params ...interface{}) ([][]byte, error) {
snapshot, err := api.actorState.Snapshot(ctx, baseKey)
if err != nil {
return [][]byte{}, err
}
return snapshot.Query(ctx, optFrom, to, method, params...)
}
// Snapshot returns a interface to the chain state a a particular tipset
func (api *API) Snapshot(ctx context.Context, baseKey block.TipSetKey) (consensus.ActorStateSnapshot, error) {
return api.actorState.Snapshot(ctx, baseKey)
}
// MessageSend sends a message. It uses the default from address if none is given and signs the
// message using the wallet. This call "sends" in the sense that it enqueues the
// message in the msg pool and broadcasts it to the network; it does not wait for the
// message to go on chain. Note that no default from address is provided. The error
// channel returned receives either nil or an error and is immediately closed after
// the message is published to the network to signal that the publish is complete.
func (api *API) MessageSend(ctx context.Context, from, to address.Address, value types.AttoFIL, gasPrice types.AttoFIL, gasLimit types.GasUnits, method types.MethodID, params ...interface{}) (cid.Cid, chan error, error) {
return api.outbox.Send(ctx, from, to, value, gasPrice, gasLimit, true, method, params...)
}
//SignedMessageSend sends a siged message.
func (api *API) SignedMessageSend(ctx context.Context, smsg *types.SignedMessage) (cid.Cid, chan error, error) {
return api.outbox.SignedSend(ctx, smsg, true)
}
// MessageFind returns a message and receipt from the blockchain, if it exists.
func (api *API) MessageFind(ctx context.Context, msgCid cid.Cid) (*msg.ChainMessage, bool, error) {
return api.msgWaiter.Find(ctx, msgCid)
}
// MessageWait invokes the callback when a message with the given cid appears on chain.
// It will find the message in both the case that it is already on chain and
// the case that it appears in a newly mined block. An error is returned if one is
// encountered or if the context is canceled. Otherwise, it waits forever for the message
// to appear on chain.
func (api *API) MessageWait(ctx context.Context, msgCid cid.Cid, cb func(*block.Block, *types.SignedMessage, *types.MessageReceipt) error) error {
return api.msgWaiter.Wait(ctx, msgCid, cb)
}
// NetworkGetBandwidthStats gets stats on the current bandwidth usage of the network
func (api *API) NetworkGetBandwidthStats() metrics.Stats {
return api.network.GetBandwidthStats()
}
// NetworkGetPeerAddresses gets the current addresses of the node
func (api *API) NetworkGetPeerAddresses() []ma.Multiaddr {
return api.network.GetPeerAddresses()
}
// NetworkGetPeerID gets the current peer id of the node
func (api *API) NetworkGetPeerID() peer.ID {
return api.network.GetPeerID()
}
// NetworkFindProvidersAsync issues a findProviders query to the filecoin network content router.
func (api *API) NetworkFindProvidersAsync(ctx context.Context, key cid.Cid, count int) <-chan peer.AddrInfo {
return api.network.Router.FindProvidersAsync(ctx, key, count)
}
// NetworkGetClosestPeers issues a getClosestPeers query to the filecoin network.
func (api *API) NetworkGetClosestPeers(ctx context.Context, key string) (<-chan peer.ID, error) {
return api.network.GetClosestPeers(ctx, key)
}
// NetworkPing sends echo request packets over the network.
func (api *API) NetworkPing(ctx context.Context, pid peer.ID) (<-chan ping.Result, error) {
return api.network.Pinger.Ping(ctx, pid)
}
// NetworkFindPeer searches the libp2p router for a given peer id
func (api *API) NetworkFindPeer(ctx context.Context, peerID peer.ID) (peer.AddrInfo, error) {
return api.network.FindPeer(ctx, peerID)
}
// NetworkConnect connects to peers at the given addresses
func (api *API) NetworkConnect(ctx context.Context, addrs []string) (<-chan net.ConnectionResult, error) {
return api.network.Connect(ctx, addrs)
}
// NetworkPeers lists peers currently available on the network
func (api *API) NetworkPeers(ctx context.Context, verbose, latency, streams bool) (*net.SwarmConnInfos, error) {
return api.network.Peers(ctx, verbose, latency, streams)
}
// SignBytes uses private key information associated with the given address to sign the given bytes.
func (api *API) SignBytes(data []byte, addr address.Address) (types.Signature, error) {
return api.wallet.SignBytes(data, addr)
}
// WalletAddresses gets addresses from the wallet
func (api *API) WalletAddresses() []address.Address {
return api.wallet.Addresses()
}
// WalletFind finds addresses on the wallet
func (api *API) WalletFind(address address.Address) (wallet.Backend, error) {
return api.wallet.Find(address)
}
// WalletGetPubKeyForAddress returns the public key for a given address
func (api *API) WalletGetPubKeyForAddress(addr address.Address) ([]byte, error) {
return api.wallet.GetPubKeyForAddress(addr)
}
// WalletNewAddress generates a new wallet address
func (api *API) WalletNewAddress(addressType string) (address.Address, error) {
switch strings.ToLower(addressType) { //this assumes that any additions to types/helpers.go will be lowercase
case types.BLS:
return wallet.NewAddress(api.wallet, address.BLS)
case types.SECP256K1:
return wallet.NewAddress(api.wallet, address.SECP256K1)
default:
return address.Undef, fmt.Errorf("invalid address type: %s", addressType)
}
}
// WalletImport adds a given set of KeyInfos to the wallet
func (api *API) WalletImport(kinfos ...*types.KeyInfo) ([]address.Address, error) {
return api.wallet.Import(kinfos...)
}
// WalletExport returns the KeyInfos for the given wallet addresses
func (api *API) WalletExport(addrs []address.Address) ([]*types.KeyInfo, error) {
return api.wallet.Export(addrs)
}
// DAGGetNode returns the associated DAG node for the passed in CID.
func (api *API) DAGGetNode(ctx context.Context, ref string) (interface{}, error) {
return api.dag.GetNode(ctx, ref)
}
// DAGGetFileSize returns the file size for a given Cid
func (api *API) DAGGetFileSize(ctx context.Context, c cid.Cid) (uint64, error) {
return api.dag.GetFileSize(ctx, c)
}
// DAGCat returns an iostream with a piece of data stored on the merkeldag with
// the given cid.
func (api *API) DAGCat(ctx context.Context, c cid.Cid) (io.Reader, error) {
return api.dag.Cat(ctx, c)
}
// DAGImportData adds data from an io reader to the merkledag and returns the
// Cid of the given data. Once the data is in the DAG, it can fetched from the
// node via Bitswap and a copy will be kept in the blockstore.
func (api *API) DAGImportData(ctx context.Context, data io.Reader) (ipld.Node, error) {
return api.dag.ImportData(ctx, data)
}
// PieceManager returns the piece manager
func (api *API) PieceManager() piecemanager.PieceManager {
return api.pieceManager()
}
| 1 | 22,720 | Are you suggesting deleting the concept of message querying from plumbing (sounds like more trouble than its worth) or suggesting deleting the snapshot based implementation? | filecoin-project-venus | go |
@@ -114,6 +114,9 @@ func addJoinOtherFlags(cmd *cobra.Command, joinOptions *types.JoinOptions) {
cmd.Flags().StringVar(&joinOptions.TarballPath, types.TarballPath, joinOptions.TarballPath,
"Use this key to set the temp directory path for KubeEdge tarball, if not exist, download it")
+
+ cmd.Flags().StringVarP(&joinOptions.Labels, types.Labels, "l", joinOptions.Labels,
+ `use this key to set the customized labels for node. you can input customized labels like {"key1":"value1", "key2":"value2"}`)
}
// newJoinOptions returns a struct ready for being used for creating cmd join flags. | 1 | /*
Copyright 2019 The KubeEdge Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cmd
import (
"fmt"
"io"
"strings"
"github.com/blang/semver"
"github.com/spf13/cobra"
"github.com/spf13/pflag"
types "github.com/kubeedge/kubeedge/keadm/cmd/keadm/app/cmd/common"
"github.com/kubeedge/kubeedge/keadm/cmd/keadm/app/cmd/util"
)
var (
edgeJoinLongDescription = `
"keadm join" command bootstraps KubeEdge's worker node (at the edge) component.
It will also connect with cloud component to receive
further instructions and forward telemetry data from
devices to cloud
`
edgeJoinExample = `
keadm join --cloudcore-ipport=<ip:port address> --edgenode-name=<unique string as edge identifier>
- For this command --cloudcore-ipport flag is a required option
- This command will download and install the default version of pre-requisites and KubeEdge
keadm join --cloudcore-ipport=10.20.30.40:10000 --edgenode-name=testing123 --kubeedge-version=%s
`
)
// NewEdgeJoin returns KubeEdge edge join command.
func NewEdgeJoin(out io.Writer, joinOptions *types.JoinOptions) *cobra.Command {
if joinOptions == nil {
joinOptions = newJoinOptions()
}
tools := make(map[string]types.ToolsInstaller)
flagVals := make(map[string]types.FlagData)
cmd := &cobra.Command{
Use: "join",
Short: "Bootstraps edge component. Checks and install (if required) the pre-requisites. Execute it on any edge node machine you wish to join",
Long: edgeJoinLongDescription,
Example: fmt.Sprintf(edgeJoinExample, types.DefaultKubeEdgeVersion),
RunE: func(cmd *cobra.Command, args []string) error {
//Visit all the flags and store their values and default values.
checkFlags := func(f *pflag.Flag) {
util.AddToolVals(f, flagVals)
}
cmd.Flags().VisitAll(checkFlags)
err := Add2ToolsList(tools, flagVals, joinOptions)
if err != nil {
return err
}
return Execute(tools)
},
}
addJoinOtherFlags(cmd, joinOptions)
return cmd
}
func addJoinOtherFlags(cmd *cobra.Command, joinOptions *types.JoinOptions) {
cmd.Flags().StringVar(&joinOptions.KubeEdgeVersion, types.KubeEdgeVersion, joinOptions.KubeEdgeVersion,
"Use this key to download and use the required KubeEdge version")
cmd.Flags().Lookup(types.KubeEdgeVersion).NoOptDefVal = joinOptions.KubeEdgeVersion
cmd.Flags().StringVar(&joinOptions.CGroupDriver, types.CGroupDriver, joinOptions.CGroupDriver,
"CGroupDriver that uses to manipulate cgroups on the host (cgroupfs or systemd), the default value is cgroupfs")
cmd.Flags().StringVar(&joinOptions.CertPath, types.CertPath, joinOptions.CertPath,
fmt.Sprintf("The certPath used by edgecore, the default value is %s", types.DefaultCertPath))
cmd.Flags().StringVarP(&joinOptions.CloudCoreIPPort, types.CloudCoreIPPort, "e", joinOptions.CloudCoreIPPort,
"IP:Port address of KubeEdge CloudCore")
if err := cmd.MarkFlagRequired(types.CloudCoreIPPort); err != nil {
fmt.Printf("mark flag required failed with error: %v\n", err)
}
cmd.Flags().StringVarP(&joinOptions.RuntimeType, types.RuntimeType, "r", joinOptions.RuntimeType,
"Container runtime type")
cmd.Flags().StringVarP(&joinOptions.EdgeNodeName, types.EdgeNodeName, "i", joinOptions.EdgeNodeName,
"KubeEdge Node unique identification string, If flag not used then the command will generate a unique id on its own")
cmd.Flags().StringVarP(&joinOptions.RemoteRuntimeEndpoint, types.RemoteRuntimeEndpoint, "p", joinOptions.RemoteRuntimeEndpoint,
"KubeEdge Edge Node RemoteRuntimeEndpoint string, If flag not set, it will use unix:///var/run/dockershim.sock")
cmd.Flags().StringVarP(&joinOptions.Token, types.Token, "t", joinOptions.Token,
"Used for edge to apply for the certificate")
cmd.Flags().StringVarP(&joinOptions.CertPort, types.CertPort, "s", joinOptions.CertPort,
"The port where to apply for the edge certificate")
cmd.Flags().StringVar(&joinOptions.TarballPath, types.TarballPath, joinOptions.TarballPath,
"Use this key to set the temp directory path for KubeEdge tarball, if not exist, download it")
}
// newJoinOptions returns a struct ready for being used for creating cmd join flags.
func newJoinOptions() *types.JoinOptions {
opts := &types.JoinOptions{}
opts.CertPath = types.DefaultCertPath
return opts
}
//Add2ToolsList Reads the flagData (containing val and default val) and join options to fill the list of tools.
func Add2ToolsList(toolList map[string]types.ToolsInstaller, flagData map[string]types.FlagData, joinOptions *types.JoinOptions) error {
var kubeVer string
flgData, ok := flagData[types.KubeEdgeVersion]
if ok {
kubeVer = util.CheckIfAvailable(flgData.Val.(string), flgData.DefVal.(string))
}
if kubeVer == "" {
var latestVersion string
for i := 0; i < util.RetryTimes; i++ {
version, err := util.GetLatestVersion()
if err != nil {
fmt.Println("Failed to get the latest KubeEdge release version")
continue
}
if len(version) > 0 {
kubeVer = strings.TrimPrefix(version, "v")
latestVersion = version
break
}
}
if len(latestVersion) == 0 {
fmt.Println("Failed to get the latest KubeEdge release version, will use default version")
kubeVer = types.DefaultKubeEdgeVersion
}
}
toolList["KubeEdge"] = &util.KubeEdgeInstTool{
Common: util.Common{
ToolVersion: semver.MustParse(kubeVer),
},
CloudCoreIP: joinOptions.CloudCoreIPPort,
EdgeNodeName: joinOptions.EdgeNodeName,
RuntimeType: joinOptions.RuntimeType,
CertPath: joinOptions.CertPath,
RemoteRuntimeEndpoint: joinOptions.RemoteRuntimeEndpoint,
Token: joinOptions.Token,
CertPort: joinOptions.CertPort,
CGroupDriver: joinOptions.CGroupDriver,
TarballPath: joinOptions.TarballPath,
}
toolList["MQTT"] = &util.MQTTInstTool{}
return nil
}
//Execute the installation for each tool and start edgecore
func Execute(toolList map[string]types.ToolsInstaller) error {
//Install all the required pre-requisite tools
for name, tool := range toolList {
if name != "KubeEdge" {
err := tool.InstallTools()
if err != nil {
return err
}
}
}
//Install and Start KubeEdge Node
return toolList["KubeEdge"].InstallTools()
}
| 1 | 21,607 | I recommend using StringSliceVarP to resolve the label flag, like `-l key1=value1,key2=value2`. What do you think? | kubeedge-kubeedge | go |
@@ -711,6 +711,18 @@ class Form extends WidgetBase
}
}
}
+ /**
+ * Add tab icons
+ *
+ * @param array $icons
+ * @return void
+ */
+ public function addTabIcons(array $icons)
+ {
+ $this->allTabs->primary->icons = $icons;
+ $this->allTabs->secondary->icons = $icons;
+ $this->allTabs->outside->icons = $icons;
+ }
/**
* Add tab fields. | 1 | <?php namespace Backend\Widgets;
use Lang;
use Form as FormHelper;
use Backend\Classes\FormTabs;
use Backend\Classes\FormField;
use Backend\Classes\WidgetBase;
use Backend\Classes\WidgetManager;
use Backend\Classes\FormWidgetBase;
use October\Rain\Database\Model;
use October\Rain\Html\Helper as HtmlHelper;
use ApplicationException;
use Exception;
/**
* Form Widget
* Used for building back end forms and renders a form.
*
* @package october\backend
* @author Alexey Bobkov, Samuel Georges
*/
class Form extends WidgetBase
{
use \Backend\Traits\FormModelSaver;
//
// Configurable properties
//
/**
* @var array Form field configuration.
*/
public $fields;
/**
* @var array Primary tab configuration.
*/
public $tabs;
/**
* @var array Secondary tab configuration.
*/
public $secondaryTabs;
/**
* @var Model Form model object.
*/
public $model;
/**
* @var array Dataset containing field values, if none supplied, model is used.
*/
public $data;
/**
* @var string The context of this form, fields that do not belong
* to this context will not be shown.
*/
public $context;
/**
* @var string If the field element names should be contained in an array.
* Eg: <input name="nameArray[fieldName]" />
*/
public $arrayName;
/**
* @var bool Used to flag that this form is being rendered as part of another form,
* a good indicator to expect that the form model and dataset values will differ.
*/
public $isNested = false;
//
// Object properties
//
/**
* @inheritDoc
*/
protected $defaultAlias = 'form';
/**
* @var boolean Determines if field definitions have been created.
*/
protected $fieldsDefined = false;
/**
* @var array Collection of all fields used in this form.
* @see Backend\Classes\FormField
*/
protected $allFields = [];
/**
* @var object Collection of tab sections used in this form.
* @see Backend\Classes\FormTabs
*/
protected $allTabs = [
'outside' => null,
'primary' => null,
'secondary' => null,
];
/**
* @var array Collection of all form widgets used in this form.
*/
protected $formWidgets = [];
/**
* @var string Active session key, used for editing forms and deferred bindings.
*/
public $sessionKey;
/**
* @var bool Render this form with uneditable preview data.
*/
public $previewMode = false;
/**
* @var \Backend\Classes\WidgetManager
*/
protected $widgetManager;
/**
* @inheritDoc
*/
public function init()
{
$this->fillFromConfig([
'fields',
'tabs',
'secondaryTabs',
'model',
'data',
'context',
'arrayName',
'isNested',
]);
$this->widgetManager = WidgetManager::instance();
$this->allTabs = (object) $this->allTabs;
$this->validateModel();
}
/**
* Ensure fields are defined and form widgets are registered so they can
* also be bound to the controller this allows their AJAX features to
* operate.
*
* @return void
*/
public function bindToController()
{
$this->defineFormFields();
parent::bindToController();
}
/**
* @inheritDoc
*/
protected function loadAssets()
{
$this->addJs('js/october.form.js', [
'build' => 'core',
'cache' => 'false'
]);
}
/**
* Renders the widget.
*
* Options:
* - preview: Render this form as an uneditable preview. Default: false
* - useContainer: Wrap the result in a container, used by AJAX. Default: true
* - section: Which form section to render. Default: null
* - outside: Renders the Outside Fields section.
* - primary: Renders the Primary Tabs section.
* - secondary: Renders the Secondary Tabs section.
* - null: Renders all sections
*
* @param array $options
* @return string|bool The rendered partial contents, or false if suppressing an exception
*/
public function render($options = [])
{
if (isset($options['preview'])) {
$this->previewMode = $options['preview'];
}
if (!isset($options['useContainer'])) {
$options['useContainer'] = true;
}
if (!isset($options['section'])) {
$options['section'] = null;
}
$extraVars = [];
$targetPartial = 'form';
/*
* Determine the partial to use based on the supplied section option
*/
if ($section = $options['section']) {
$section = strtolower($section);
if (isset($this->allTabs->{$section})) {
$extraVars['tabs'] = $this->allTabs->{$section};
}
$targetPartial = 'section';
$extraVars['renderSection'] = $section;
}
/*
* Apply a container to the element
*/
if ($useContainer = $options['useContainer']) {
$targetPartial = $section ? 'section-container' : 'form-container';
}
$this->prepareVars();
/*
* Force preview mode on all widgets
*/
if ($this->previewMode) {
foreach ($this->formWidgets as $widget) {
$widget->previewMode = $this->previewMode;
}
}
return $this->makePartial($targetPartial, $extraVars);
}
/**
* Renders a single form field
*
* Options:
* - useContainer: Wrap the result in a container, used by AJAX. Default: true
*
* @param string|array $field The field name or definition
* @param array $options
* @return string|bool The rendered partial contents, or false if suppressing an exception
*/
public function renderField($field, $options = [])
{
$this->prepareVars();
if (is_string($field)) {
if (!isset($this->allFields[$field])) {
throw new ApplicationException(Lang::get(
'backend::lang.form.missing_definition',
compact('field')
));
}
$field = $this->allFields[$field];
}
if (!isset($options['useContainer'])) {
$options['useContainer'] = true;
}
$targetPartial = $options['useContainer'] ? 'field-container' : 'field';
return $this->makePartial($targetPartial, ['field' => $field]);
}
/**
* Renders the HTML element for a field
* @param FormWidgetBase $field
* @return string|bool The rendered partial contents, or false if suppressing an exception
*/
public function renderFieldElement($field)
{
return $this->makePartial(
'field_' . $field->type,
[
'field' => $field,
'formModel' => $this->model
]
);
}
/**
* Validate the supplied form model.
*
* @return mixed
*/
protected function validateModel()
{
if (!$this->model) {
throw new ApplicationException(Lang::get(
'backend::lang.form.missing_model',
['class'=>get_class($this->controller)]
));
}
$this->data = isset($this->data)
? (object) $this->data
: $this->model;
return $this->model;
}
/**
* Prepares the form data
*
* @return void
*/
protected function prepareVars()
{
$this->defineFormFields();
$this->applyFiltersFromModel();
$this->vars['sessionKey'] = $this->getSessionKey();
$this->vars['outsideTabs'] = $this->allTabs->outside;
$this->vars['primaryTabs'] = $this->allTabs->primary;
$this->vars['secondaryTabs'] = $this->allTabs->secondary;
}
/**
* Sets or resets form field values.
* @param array $data
* @return array
*/
public function setFormValues($data = null)
{
if ($data === null) {
$data = $this->getSaveData();
}
/*
* Fill the model as if it were to be saved
*/
$this->prepareModelsToSave($this->model, $data);
/*
* Data set differs from model
*/
if ($this->data !== $this->model) {
$this->data = (object) array_merge((array) $this->data, (array) $data);
}
/*
* Set field values from data source
*/
foreach ($this->allFields as $field) {
$field->value = $this->getFieldValue($field);
}
return $data;
}
/**
* Event handler for refreshing the form.
*
* @return array
*/
public function onRefresh()
{
$result = [];
$saveData = $this->getSaveData();
/**
* @event backend.form.beforeRefresh
* Called before the form is refreshed, modify the $dataHolder->data property in place
*
* Example usage:
*
* Event::listen('backend.form.beforeRefresh', function((\Backend\Widgets\Form) $formWidget, (stdClass) $dataHolder) {
* $dataHolder->data = $arrayOfSaveDataToReplaceExistingDataWith;
* });
*
* Or
*
* $formWidget->bindEvent('form.beforeRefresh', function ((stdClass) $dataHolder) {
* $dataHolder->data = $arrayOfSaveDataToReplaceExistingDataWith;
* });
*
*/
$dataHolder = (object) ['data' => $saveData];
$this->fireSystemEvent('backend.form.beforeRefresh', [$dataHolder]);
$saveData = $dataHolder->data;
/*
* Set the form variables and prepare the widget
*/
$this->setFormValues($saveData);
$this->prepareVars();
/**
* @event backend.form.refreshFields
* Called when the form is refreshed, giving the opportunity to modify the form fields
*
* Example usage:
*
* Event::listen('backend.form.refreshFields', function((\Backend\Widgets\Form) $formWidget, (array) $allFields) {
* $allFields['name']->required = false;
* });
*
* Or
*
* $formWidget->bindEvent('form.refreshFields', function ((array) $allFields) {
* $allFields['name']->required = false;
* });
*
*/
$this->fireSystemEvent('backend.form.refreshFields', [$this->allFields]);
/*
* If an array of fields is supplied, update specified fields individually.
*/
if (($updateFields = post('fields')) && is_array($updateFields)) {
foreach ($updateFields as $field) {
if (!isset($this->allFields[$field])) {
continue;
}
/** @var FormWidgetBase $fieldObject */
$fieldObject = $this->allFields[$field];
$result['#' . $fieldObject->getId('group')] = $this->makePartial('field', ['field' => $fieldObject]);
}
}
/*
* Update the whole form
*/
if (empty($result)) {
$result = ['#'.$this->getId() => $this->makePartial('form')];
}
/**
* @event backend.form.refresh
* Called after the form is refreshed, should return an array of additional result parameters.
*
* Example usage:
*
* Event::listen('backend.form.refresh', function((\Backend\Widgets\Form) $formWidget, (array) $result) {
* $result['#my-partial-id' => $formWidget->makePartial('$/path/to/custom/backend/partial.htm')];
* return $result;
* });
*
* Or
*
* $formWidget->bindEvent('form.refresh', function ((array) $result) use ((\Backend\Widgets\Form $formWidget)) {
* $result['#my-partial-id' => $formWidget->makePartial('$/path/to/custom/backend/partial.htm')];
* return $result;
* });
*
*/
$eventResults = $this->fireSystemEvent('backend.form.refresh', [$result], false);
foreach ($eventResults as $eventResult) {
$result = $eventResult + $result;
}
return $result;
}
/**
* Creates a flat array of form fields from the configuration.
* Also slots fields in to their respective tabs.
*
* @return void
*/
protected function defineFormFields()
{
if ($this->fieldsDefined) {
return;
}
/**
* @event backend.form.extendFieldsBefore
* Called before the form fields are defined
*
* Example usage:
*
* Event::listen('backend.form.extendFieldsBefore', function((\Backend\Widgets\Form) $formWidget) {
* // You should always check to see if you're extending correct model/controller
* if (!$widget->model instanceof \Foo\Example\Models\Bar) {
* return;
* }
*
* // Here you can't use addFields() because it will throw you an exception because form is not yet created
* // and it does not have tabs and fields
* // For this example we will pretend that we want to add a new field named example_field
* $widget->fields['example_field'] = [
* 'label' => 'Example field',
* 'comment' => 'Your example field',
* 'type' => 'text',
* ];
* });
*
* Or
*
* $formWidget->bindEvent('form.extendFieldsBefore', function () use ((\Backend\Widgets\Form $formWidget)) {
* // You should always check to see if you're extending correct model/controller
* if (!$widget->model instanceof \Foo\Example\Models\Bar) {
* return;
* }
*
* // Here you can't use addFields() because it will throw you an exception because form is not yet created
* // and it does not have tabs and fields
* // For this example we will pretend that we want to add a new field named example_field
* $widget->fields['example_field'] = [
* 'label' => 'Example field',
* 'comment' => 'Your example field',
* 'type' => 'text',
* ];
* });
*
*/
$this->fireSystemEvent('backend.form.extendFieldsBefore');
/*
* Outside fields
*/
if (!isset($this->fields) || !is_array($this->fields)) {
$this->fields = [];
}
$this->allTabs->outside = new FormTabs(FormTabs::SECTION_OUTSIDE, (array) $this->config);
$this->addFields($this->fields);
/*
* Primary Tabs + Fields
*/
if (!isset($this->tabs['fields']) || !is_array($this->tabs['fields'])) {
$this->tabs['fields'] = [];
}
$this->allTabs->primary = new FormTabs(FormTabs::SECTION_PRIMARY, $this->tabs);
$this->addFields($this->tabs['fields'], FormTabs::SECTION_PRIMARY);
/*
* Secondary Tabs + Fields
*/
if (!isset($this->secondaryTabs['fields']) || !is_array($this->secondaryTabs['fields'])) {
$this->secondaryTabs['fields'] = [];
}
$this->allTabs->secondary = new FormTabs(FormTabs::SECTION_SECONDARY, $this->secondaryTabs);
$this->addFields($this->secondaryTabs['fields'], FormTabs::SECTION_SECONDARY);
/**
* @event backend.form.extendFields
* Called after the form fields are defined
*
* Example usage:
*
* Event::listen('backend.form.extendFields', function((\Backend\Widgets\Form) $formWidget) {
* // Only for the User controller
* if (!$widget->getController() instanceof \RainLab\User\Controllers\Users) {
* return;
* }
*
* // Only for the User model
* if (!$widget->model instanceof \RainLab\User\Models\User) {
* return;
* }
*
* // Add an extra birthday field
* $widget->addFields([
* 'birthday' => [
* 'label' => 'Birthday',
* 'comment' => 'Select the users birthday',
* 'type' => 'datepicker'
* ]
* ]);
*
* // Remove a Surname field
* $widget->removeField('surname');
* });
*
* Or
*
* $formWidget->bindEvent('form.extendFields', function () use ((\Backend\Widgets\Form $formWidget)) {
* // Only for the User controller
* if (!$widget->getController() instanceof \RainLab\User\Controllers\Users) {
* return;
* }
*
* // Only for the User model
* if (!$widget->model instanceof \RainLab\User\Models\User) {
* return;
* }
*
* // Add an extra birthday field
* $widget->addFields([
* 'birthday' => [
* 'label' => 'Birthday',
* 'comment' => 'Select the users birthday',
* 'type' => 'datepicker'
* ]
* ]);
*
* // Remove a Surname field
* $widget->removeField('surname');
* });
*
*/
$this->fireSystemEvent('backend.form.extendFields', [$this->allFields]);
/*
* Convert automatic spanned fields
*/
foreach ($this->allTabs->outside->getFields() as $fields) {
$this->processAutoSpan($fields);
}
foreach ($this->allTabs->primary->getFields() as $fields) {
$this->processAutoSpan($fields);
}
foreach ($this->allTabs->secondary->getFields() as $fields) {
$this->processAutoSpan($fields);
}
/*
* At least one tab section should stretch
*/
if (
$this->allTabs->secondary->stretch === null
&& $this->allTabs->primary->stretch === null
&& $this->allTabs->outside->stretch === null
) {
if ($this->allTabs->secondary->hasFields()) {
$this->allTabs->secondary->stretch = true;
}
elseif ($this->allTabs->primary->hasFields()) {
$this->allTabs->primary->stretch = true;
}
else {
$this->allTabs->outside->stretch = true;
}
}
/*
* Bind all form widgets to controller
*/
foreach ($this->allFields as $field) {
if ($field->type !== 'widget') {
continue;
}
$widget = $this->makeFormFieldWidget($field);
$widget->bindToController();
}
$this->fieldsDefined = true;
}
/**
* Converts fields with a span set to 'auto' as either
* 'left' or 'right' depending on the previous field.
*
* @return void
*/
protected function processAutoSpan($fields)
{
$prevSpan = null;
foreach ($fields as $field) {
if (strtolower($field->span) === 'auto') {
if ($prevSpan === 'left') {
$field->span = 'right';
}
else {
$field->span = 'left';
}
}
$prevSpan = $field->span;
}
}
/**
* Programatically add fields, used internally and for extensibility.
*
* @param array $fields
* @param string $addToArea
* @return void
*/
public function addFields(array $fields, $addToArea = null)
{
foreach ($fields as $name => $config) {
$fieldObj = $this->makeFormField($name, $config);
$fieldTab = is_array($config) ? array_get($config, 'tab') : null;
/*
* Check that the form field matches the active context
*/
if ($fieldObj->context !== null) {
$context = is_array($fieldObj->context) ? $fieldObj->context : [$fieldObj->context];
if (!in_array($this->getContext(), $context)) {
continue;
}
}
$this->allFields[$name] = $fieldObj;
switch (strtolower($addToArea)) {
case FormTabs::SECTION_PRIMARY:
$this->allTabs->primary->addField($name, $fieldObj, $fieldTab);
break;
case FormTabs::SECTION_SECONDARY:
$this->allTabs->secondary->addField($name, $fieldObj, $fieldTab);
break;
default:
$this->allTabs->outside->addField($name, $fieldObj);
break;
}
}
}
/**
* Add tab fields.
*
* @param array $fields
* @return void
*/
public function addTabFields(array $fields)
{
$this->addFields($fields, 'primary');
}
/**
* @param array $fields
* @return void
*/
public function addSecondaryTabFields(array $fields)
{
$this->addFields($fields, 'secondary');
}
/**
* Programatically remove a field.
*
* @param string $name
* @return bool
*/
public function removeField($name)
{
if (!isset($this->allFields[$name])) {
return false;
}
/*
* Remove from tabs
*/
$this->allTabs->primary->removeField($name);
$this->allTabs->secondary->removeField($name);
$this->allTabs->outside->removeField($name);
/*
* Remove from main collection
*/
unset($this->allFields[$name]);
return true;
}
/**
* Programatically remove all fields belonging to a tab.
*
* @param string $name
* @return bool
*/
public function removeTab($name)
{
foreach ($this->allFields as $fieldName => $field) {
if ($field->tab == $name) {
$this->removeField($fieldName);
}
}
}
/**
* Creates a form field object from name and configuration.
*
* @param string $name
* @param array $config
* @return FormField
*/
protected function makeFormField($name, $config = [])
{
$label = $config['label'] ?? null;
list($fieldName, $fieldContext) = $this->getFieldName($name);
$field = new FormField($fieldName, $label);
if ($fieldContext) {
$field->context = $fieldContext;
}
$field->arrayName = $this->arrayName;
$field->idPrefix = $this->getId();
/*
* Simple field type
*/
if (is_string($config)) {
if ($this->isFormWidget($config) !== false) {
$field->displayAs('widget', ['widget' => $config]);
}
else {
$field->displayAs($config);
}
}
/*
* Defined field type
*/
else {
$fieldType = $config['type'] ?? null;
if (!is_string($fieldType) && $fieldType !== null) {
throw new ApplicationException(Lang::get(
'backend::lang.field.invalid_type',
['type' => gettype($fieldType)]
));
}
/*
* Widget with configuration
*/
if ($this->isFormWidget($fieldType) !== false) {
$config['widget'] = $fieldType;
$fieldType = 'widget';
}
$field->displayAs($fieldType, $config);
}
/*
* Set field value
*/
$field->value = $this->getFieldValue($field);
/*
* Apply the field name to the validation engine
*/
$attrName = implode('.', HtmlHelper::nameToArray($field->fieldName));
if ($this->model && method_exists($this->model, 'setValidationAttributeName')) {
$this->model->setValidationAttributeName($attrName, $field->label);
}
/*
* Check model if field is required
*/
if ($field->required === null && $this->model && method_exists($this->model, 'isAttributeRequired')) {
// Check nested fields
if ($this->isNested) {
// Get the current attribute level
$nameArray = HtmlHelper::nameToArray($this->arrayName);
unset($nameArray[0]);
// Convert any numeric indexes to wildcards
foreach ($nameArray as $i => $value) {
if (preg_match('/^[0-9]*$/', $value)) {
$nameArray[$i] = '*';
}
}
// Recombine names for full attribute name in rules array
$attrName = implode('.', $nameArray) . ".{$attrName}";
}
$field->required = $this->model->isAttributeRequired($attrName);
}
/*
* Get field options from model
*/
$optionModelTypes = ['dropdown', 'radio', 'checkboxlist', 'balloon-selector'];
if (in_array($field->type, $optionModelTypes, false)) {
/*
* Defer the execution of option data collection
*/
$field->options(function () use ($field, $config) {
$fieldOptions = $config['options'] ?? null;
$fieldOptions = $this->getOptionsFromModel($field, $fieldOptions);
return $fieldOptions;
});
}
return $field;
}
/**
* Check if a field type is a widget or not
*
* @param string $fieldType
* @return boolean
*/
protected function isFormWidget($fieldType)
{
if ($fieldType === null) {
return false;
}
if (strpos($fieldType, '\\')) {
return true;
}
$widgetClass = $this->widgetManager->resolveFormWidget($fieldType);
if (!class_exists($widgetClass)) {
return false;
}
if (is_subclass_of($widgetClass, 'Backend\Classes\FormWidgetBase')) {
return true;
}
return false;
}
/**
* Makes a widget object from a form field object.
*
* @param $field
* @return \Backend\Traits\FormWidgetBase|null
*/
protected function makeFormFieldWidget($field)
{
if ($field->type !== 'widget') {
return null;
}
if (isset($this->formWidgets[$field->fieldName])) {
return $this->formWidgets[$field->fieldName];
}
$widgetConfig = $this->makeConfig($field->config);
$widgetConfig->alias = $this->alias . studly_case(HtmlHelper::nameToId($field->fieldName));
$widgetConfig->sessionKey = $this->getSessionKey();
$widgetConfig->previewMode = $this->previewMode;
$widgetConfig->model = $this->model;
$widgetConfig->data = $this->data;
$widgetConfig->parentForm = $this;
$widgetName = $widgetConfig->widget;
$widgetClass = $this->widgetManager->resolveFormWidget($widgetName);
if (!class_exists($widgetClass)) {
throw new ApplicationException(Lang::get(
'backend::lang.widget.not_registered',
['name' => $widgetClass]
));
}
$widget = $this->makeFormWidget($widgetClass, $field, $widgetConfig);
/*
* If options config is defined, request options from the model.
*/
if (isset($field->config['options'])) {
$field->options(function () use ($field) {
$fieldOptions = $field->config['options'];
if ($fieldOptions === true) $fieldOptions = null;
$fieldOptions = $this->getOptionsFromModel($field, $fieldOptions);
return $fieldOptions;
});
}
return $this->formWidgets[$field->fieldName] = $widget;
}
/**
* Get all the loaded form widgets for the instance.
*
* @return array
*/
public function getFormWidgets()
{
return $this->formWidgets;
}
/**
* Get a specified form widget
*
* @param string $field
* @return mixed
*/
public function getFormWidget($field)
{
if (isset($this->formWidgets[$field])) {
return $this->formWidgets[$field];
}
return null;
}
/**
* Get all the registered fields for the instance.
*
* @return array
*/
public function getFields()
{
return $this->allFields;
}
/**
* Get a specified field object
*
* @param string $field
* @return mixed
*/
public function getField($field)
{
if (isset($this->allFields[$field])) {
return $this->allFields[$field];
}
return null;
}
/**
* Get all tab objects for the instance.
*
* @return object[FormTabs]
*/
public function getTabs()
{
return $this->allTabs;
}
/**
* Get a specified tab object.
* Options: outside, primary, secondary.
*
* @param string $field
* @return mixed
*/
public function getTab($tab)
{
if (isset($this->allTabs->$tab)) {
return $this->allTabs->$tab;
}
return null;
}
/**
* Parses a field's name
* @param string $field Field name
* @return array [columnName, context]
*/
protected function getFieldName($field)
{
if (strpos($field, '@') === false) {
return [$field, null];
}
return explode('@', $field);
}
/**
* Looks up the field value.
* @param mixed $field
* @return string
*/
protected function getFieldValue($field)
{
if (is_string($field)) {
if (!isset($this->allFields[$field])) {
throw new ApplicationException(Lang::get(
'backend::lang.form.missing_definition',
compact('field')
));
}
$field = $this->allFields[$field];
}
$defaultValue = $this->shouldFetchDefaultValues()
? $field->getDefaultFromData($this->data)
: null;
return $field->getValueFromData(
$this->data,
is_string($defaultValue) ? trans($defaultValue) : $defaultValue
);
}
/**
* Checks if default values should be taken from data.
* This should be done when model exists or when explicitly configured
*/
protected function shouldFetchDefaultValues() {
$enableDefaults = object_get($this->config, 'enableDefaults');
if ($enableDefaults === false) {
return false;
}
return !$this->model->exists || $enableDefaults;
}
/**
* Returns a HTML encoded value containing the other fields this
* field depends on
* @param \Backend\Classes\FormField $field
* @return string
*/
protected function getFieldDepends($field)
{
if (!$field->dependsOn) {
return '';
}
$dependsOn = is_array($field->dependsOn) ? $field->dependsOn : [$field->dependsOn];
$dependsOn = htmlspecialchars(json_encode($dependsOn), ENT_QUOTES, 'UTF-8');
return $dependsOn;
}
/**
* Helper method to determine if field should be rendered
* with label and comments.
* @param \Backend\Classes\FormField $field
* @return boolean
*/
protected function showFieldLabels($field)
{
if (in_array($field->type, ['checkbox', 'switch', 'section'])) {
return false;
}
if ($field->type === 'widget') {
return $this->makeFormFieldWidget($field)->showLabels;
}
return true;
}
/**
* Returns post data from a submitted form.
*
* @return array
*/
public function getSaveData()
{
$this->defineFormFields();
$result = [];
/*
* Source data
*/
$data = $this->arrayName ? post($this->arrayName) : post();
if (!$data) {
$data = [];
}
/*
* Spin over each field and extract the postback value
*/
foreach ($this->allFields as $field) {
/*
* Disabled and hidden should be omitted from data set
*/
if ($field->disabled || $field->hidden) {
continue;
}
/*
* Handle HTML array, eg: item[key][another]
*/
$parts = HtmlHelper::nameToArray($field->fieldName);
if (($value = $this->dataArrayGet($data, $parts)) !== null) {
/*
* Number fields should be converted to integers
*/
if ($field->type === 'number') {
$value = !strlen(trim($value)) ? null : (float) $value;
}
$this->dataArraySet($result, $parts, $value);
}
}
/*
* Give widgets an opportunity to process the data.
*/
foreach ($this->formWidgets as $field => $widget) {
$parts = HtmlHelper::nameToArray($field);
if ((isset($widget->config->disabled) && $widget->config->disabled)
|| (isset($widget->config->hidden) && $widget->config->hidden)) {
continue;
}
$widgetValue = $widget->getSaveValue($this->dataArrayGet($result, $parts));
$this->dataArraySet($result, $parts, $widgetValue);
}
return $result;
}
/*
* Allow the model to filter fields.
*/
protected function applyFiltersFromModel()
{
/*
* Standard usage
*/
if (method_exists($this->model, 'filterFields')) {
$this->model->filterFields((object) $this->allFields, $this->getContext());
}
/*
* Advanced usage
*/
if (method_exists($this->model, 'fireEvent')) {
/**
* @event model.form.filterFields
* Called after the form is initialized
*
* Example usage:
*
* $model->bindEvent('model.form.filterFields', function ((\Backend\Widgets\Form) $formWidget, (stdClass) $fields, (string) $context) use (\October\Rain\Database\Model $model) {
* if ($model->source_type == 'http') {
* $fields->source_url->hidden = false;
* $fields->git_branch->hidden = true;
* } elseif ($model->source_type == 'git') {
* $fields->source_url->hidden = false;
* $fields->git_branch->hidden = false;
* } else {
* $fields->source_url->hidden = true;
* $fields->git_branch->hidden = true;
* }
* });
*
*/
$this->model->fireEvent('model.form.filterFields', [$this, (object) $this->allFields, $this->getContext()]);
}
}
/**
* Looks at the model for defined options.
*
* @param $field
* @param $fieldOptions
* @return mixed
*/
protected function getOptionsFromModel($field, $fieldOptions)
{
/*
* Advanced usage, supplied options are callable
*/
if (is_array($fieldOptions) && is_callable($fieldOptions)) {
$fieldOptions = call_user_func($fieldOptions, $this, $field);
}
/*
* Refer to the model method or any of its behaviors
*/
if (!is_array($fieldOptions) && !$fieldOptions) {
try {
list($model, $attribute) = $field->resolveModelAttribute($this->model, $field->fieldName);
}
catch (Exception $ex) {
throw new ApplicationException(Lang::get('backend::lang.field.options_method_invalid_model', [
'model' => get_class($this->model),
'field' => $field->fieldName
]));
}
$methodName = 'get'.studly_case($attribute).'Options';
if (
!$this->objectMethodExists($model, $methodName) &&
!$this->objectMethodExists($model, 'getDropdownOptions')
) {
throw new ApplicationException(Lang::get('backend::lang.field.options_method_not_exists', [
'model' => get_class($model),
'method' => $methodName,
'field' => $field->fieldName
]));
}
if ($this->objectMethodExists($model, $methodName)) {
$fieldOptions = $model->$methodName($field->value, $this->data);
}
else {
$fieldOptions = $model->getDropdownOptions($attribute, $field->value, $this->data);
}
}
/*
* Field options are an explicit method reference
*/
elseif (is_string($fieldOptions)) {
if (!$this->objectMethodExists($this->model, $fieldOptions)) {
throw new ApplicationException(Lang::get('backend::lang.field.options_method_not_exists', [
'model' => get_class($this->model),
'method' => $fieldOptions,
'field' => $field->fieldName
]));
}
$fieldOptions = $this->model->$fieldOptions($field->value, $field->fieldName, $this->data);
}
return $fieldOptions;
}
/**
* Returns the active session key.
*
* @return \Illuminate\Routing\Route|mixed|string
*/
public function getSessionKey()
{
if ($this->sessionKey) {
return $this->sessionKey;
}
if (post('_session_key')) {
return $this->sessionKey = post('_session_key');
}
return $this->sessionKey = FormHelper::getSessionKey();
}
/**
* Returns the active context for displaying the form.
*
* @return string
*/
public function getContext()
{
return $this->context;
}
/**
* Internal helper for method existence checks.
*
* @param object $object
* @param string $method
* @return boolean
*/
protected function objectMethodExists($object, $method)
{
if (method_exists($object, 'methodExists')) {
return $object->methodExists($method);
}
return method_exists($object, $method);
}
/**
* Variant to array_get() but preserves dots in key names.
*
* @param array $array
* @param array $parts
* @param null $default
* @return array|null
*/
protected function dataArrayGet(array $array, array $parts, $default = null)
{
if ($parts === null) {
return $array;
}
if (count($parts) === 1) {
$key = array_shift($parts);
if (isset($array[$key])) {
return $array[$key];
}
return $default;
}
foreach ($parts as $segment) {
if (!is_array($array) || !array_key_exists($segment, $array)) {
return $default;
}
$array = $array[$segment];
}
return $array;
}
/**
* Variant to array_set() but preserves dots in key names.
*
* @param array $array
* @param array $parts
* @param string $value
* @return array
*/
protected function dataArraySet(array &$array, array $parts, $value)
{
if ($parts === null) {
return $value;
}
while (count($parts) > 1) {
$key = array_shift($parts);
if (!isset($array[$key]) || !is_array($array[$key])) {
$array[$key] = [];
}
$array =& $array[$key];
}
$array[array_shift($parts)] = $value;
return $array;
}
}
| 1 | 16,138 | @Samuell1 Will assigning the same icons array to all the tabs result in, for example, a primary tab called "Colours" and secondary tab called "Colours" having the same icon? | octobercms-october | php |
@@ -437,6 +437,9 @@ def initTranslation():
finally:
del callerFrame # Avoid reference problems with frames (per python docs)
+def getTranslatedMessage(translatedMessage):
+ return _(translatedMessage)
+
def _translatedManifestPaths(lang=None, forBundle=False):
if lang is None:
lang = languageHandler.getLanguage() # can't rely on default keyword arguments here. | 1 | # -*- coding: UTF-8 -*-
#addonHandler.py
#A part of NonVisual Desktop Access (NVDA)
#Copyright (C) 2012-2016 Rui Batista, NV Access Limited, Noelia Ruiz Martínez, Joseph Lee
#This file is covered by the GNU General Public License.
#See the file COPYING for more details.
import sys
import os.path
import gettext
import glob
import tempfile
import cPickle
import inspect
import itertools
import collections
import pkgutil
import shutil
from cStringIO import StringIO
import zipfile
from configobj import ConfigObj, ConfigObjError
from validate import Validator
import config
import globalVars
import languageHandler
from logHandler import log
import winKernel
MANIFEST_FILENAME = "manifest.ini"
stateFilename="addonsState.pickle"
BUNDLE_EXTENSION = "nvda-addon"
BUNDLE_MIMETYPE = "application/x-nvda-addon"
NVDA_ADDON_PROG_ID = "NVDA.Addon.1"
ADDON_PENDINGINSTALL_SUFFIX=".pendingInstall"
DELETEDIR_SUFFIX=".delete"
state={}
def loadState():
global state
statePath=os.path.join(globalVars.appArgs.configPath,stateFilename)
try:
state = cPickle.load(file(statePath, "r"))
except:
# Defaults.
state = {
"pendingRemovesSet":set(),
"pendingInstallsSet":set(),
"disabledAddons":set(),
"pendingDisableSet":set(),
}
def saveState():
statePath=os.path.join(globalVars.appArgs.configPath,stateFilename)
try:
cPickle.dump(state, file(statePath, "wb"))
except:
log.debugWarning("Error saving state", exc_info=True)
def getRunningAddons():
""" Returns currently loaded addons.
"""
return (addon for addon in getAvailableAddons() if addon.isRunning)
def completePendingAddonRemoves():
"""Removes any addons that could not be removed on the last run of NVDA"""
user_addons = os.path.abspath(os.path.join(globalVars.appArgs.configPath, "addons"))
pendingRemovesSet=state['pendingRemovesSet']
for addonName in list(pendingRemovesSet):
addonPath=os.path.join(user_addons,addonName)
if os.path.isdir(addonPath):
addon=Addon(addonPath)
try:
addon.completeRemove()
except RuntimeError:
log.exception("Failed to remove %s add-on"%addonName)
continue
pendingRemovesSet.discard(addonName)
def completePendingAddonInstalls():
user_addons = os.path.abspath(os.path.join(globalVars.appArgs.configPath, "addons"))
pendingInstallsSet=state['pendingInstallsSet']
for addonName in pendingInstallsSet:
newPath=os.path.join(user_addons,addonName)
oldPath=newPath+ADDON_PENDINGINSTALL_SUFFIX
try:
os.rename(oldPath,newPath)
except:
log.error("Failed to complete addon installation for %s"%addonName,exc_info=True)
pendingInstallsSet.clear()
def removeFailedDeletions():
user_addons = os.path.abspath(os.path.join(globalVars.appArgs.configPath, "addons"))
for p in os.listdir(user_addons):
if p.endswith(DELETEDIR_SUFFIX):
path=os.path.join(user_addons,p)
shutil.rmtree(path,ignore_errors=True)
if os.path.exists(path):
log.error("Failed to delete path %s, try removing manually"%path)
_disabledAddons = set()
def disableAddonsIfAny():
"""Disables add-ons if told to do so by the user from add-ons manager"""
global _disabledAddons
if "disabledAddons" not in state:
state["disabledAddons"] = set()
if "pendingDisableSet" not in state:
state["pendingDisableSet"] = set()
if "pendingEnableSet" not in state:
state["pendingEnableSet"] = set()
# Pull in and enable add-ons that should be disabled and enabled, respectively.
state["disabledAddons"] |= state["pendingDisableSet"]
state["disabledAddons"] -= state["pendingEnableSet"]
_disabledAddons = state["disabledAddons"]
state["pendingDisableSet"].clear()
state["pendingEnableSet"].clear()
def initialize():
""" Initializes the add-ons subsystem. """
loadState()
removeFailedDeletions()
completePendingAddonRemoves()
completePendingAddonInstalls()
# #3090: Are there add-ons that are supposed to not run for this session?
disableAddonsIfAny()
saveState()
getAvailableAddons(refresh=True)
def terminate():
""" Terminates the add-ons subsystem. """
pass
def _getDefaultAddonPaths():
""" Returns paths where addons can be found.
For now, only <userConfig\addons is supported.
@rtype: list(string)
"""
addon_paths = []
user_addons = os.path.abspath(os.path.join(globalVars.appArgs.configPath, "addons"))
if os.path.isdir(user_addons):
addon_paths.append(user_addons)
return addon_paths
def _getAvailableAddonsFromPath(path):
""" Gets available add-ons from path.
An addon is only considered available if the manifest file is loaded with no errors.
@param path: path from where to find addon directories.
@type path: string
@rtype generator of Addon instances
"""
log.debug("Listing add-ons from %s", path)
for p in os.listdir(path):
if p.endswith(DELETEDIR_SUFFIX): continue
addon_path = os.path.join(path, p)
if os.path.isdir(addon_path) and addon_path not in ('.', '..'):
log.debug("Loading add-on from %s", addon_path)
try:
a = Addon(addon_path)
name = a.manifest['name']
log.debug("Found add-on %s", name)
if a.isDisabled:
log.debug("Disabling add-on %s", name)
yield a
except:
log.error("Error loading Addon from path: %s", addon_path, exc_info=True)
_availableAddons = collections.OrderedDict()
def getAvailableAddons(refresh=False):
""" Gets all available addons on the system.
@rtype generator of Addon instances.
"""
if refresh:
_availableAddons.clear()
generators = [_getAvailableAddonsFromPath(path) for path in _getDefaultAddonPaths()]
for addon in itertools.chain(*generators):
_availableAddons[addon.path] = addon
return _availableAddons.itervalues()
def installAddonBundle(bundle):
"""Extracts an Addon bundle in to a unique subdirectory of the user addons directory, marking the addon as needing install completion on NVDA restart."""
addonPath = os.path.join(globalVars.appArgs.configPath, "addons",bundle.manifest['name']+ADDON_PENDINGINSTALL_SUFFIX)
bundle.extract(addonPath)
addon=Addon(addonPath)
# #2715: The add-on must be added to _availableAddons here so that
# translations can be used in installTasks module.
_availableAddons[addon.path]=addon
try:
addon.runInstallTask("onInstall")
except:
log.error("task 'onInstall' on addon '%s' failed"%addon.name,exc_info=True)
del _availableAddons[addon.path]
addon.completeRemove(runUninstallTask=False)
raise AddonError("Installation failed")
state['pendingInstallsSet'].add(bundle.manifest['name'])
saveState()
return addon
class AddonError(Exception):
""" Represents an exception coming from the addon subsystem. """
class Addon(object):
""" Represents an Add-on available on the file system."""
def __init__(self, path):
""" Constructs an L[Addon} from.
@param path: the base directory for the addon data.
@type path: string
"""
self.path = os.path.abspath(path)
self._extendedPackages = set()
self._isLoaded = False
manifest_path = os.path.join(path, MANIFEST_FILENAME)
with open(manifest_path) as f:
translatedInput = None
for translatedPath in _translatedManifestPaths():
p = os.path.join(self.path, translatedPath)
if os.path.exists(p):
log.debug("Using manifest translation from %s", p)
translatedInput = open(p, 'r')
break
self.manifest = AddonManifest(f, translatedInput)
@property
def isPendingInstall(self):
"""True if this addon has not yet been fully installed."""
return self.path.endswith(ADDON_PENDINGINSTALL_SUFFIX)
@property
def isPendingRemove(self):
"""True if this addon is marked for removal."""
return not self.isPendingInstall and self.name in state['pendingRemovesSet']
def requestRemove(self):
"""Markes this addon for removal on NVDA restart."""
if self.isPendingInstall:
self.completeRemove()
state['pendingInstallsSet'].discard(self.name)
#Force availableAddons to be updated
getAvailableAddons(refresh=True)
else:
state['pendingRemovesSet'].add(self.name)
# There's no point keeping a record of this add-on being disabled now.
_disabledAddons.discard(self.name)
state['pendingDisableSet'].discard(self.name)
saveState()
def completeRemove(self,runUninstallTask=True):
if runUninstallTask:
try:
# #2715: The add-on must be added to _availableAddons here so that
# translations can be used in installTasks module.
_availableAddons[self.path] = self
self.runInstallTask("onUninstall")
except:
log.error("task 'onUninstall' on addon '%s' failed"%self.name,exc_info=True)
finally:
del _availableAddons[self.path]
tempPath=tempfile.mktemp(suffix=DELETEDIR_SUFFIX,dir=os.path.dirname(self.path))
try:
os.rename(self.path,tempPath)
except (WindowsError,IOError):
raise RuntimeError("Cannot rename add-on path for deletion")
shutil.rmtree(tempPath,ignore_errors=True)
if os.path.exists(tempPath):
log.error("Error removing addon directory %s, deferring until next NVDA restart"%self.path)
@property
def name(self):
return self.manifest['name']
def addToPackagePath(self, package):
""" Adds this L{Addon} extensions to the specific package path if those exist.
@param package: the python module representing the package.
@type package: python module.
"""
# #3090: Don't even think about adding a disabled add-on to package path.
if self.isDisabled:
return
extension_path = os.path.join(self.path, package.__name__)
if not os.path.isdir(extension_path):
# This addon does not have extension points for this package
return
# Python 2.x doesn't properly handle unicode import paths, so convert them before adding.
converted_path = self._getPathForInclusionInPackage(package)
package.__path__.insert(0, converted_path)
self._extendedPackages.add(package)
log.debug("Addon %s added to %s package path", self.manifest['name'], package.__name__)
def enable(self, shouldEnable):
"""Sets this add-on to be disabled or enabled when NVDA restarts."""
if shouldEnable:
if self.name in state["pendingDisableSet"]:
# Undoing a pending disable.
state["pendingDisableSet"].discard(self.name)
else:
state["pendingEnableSet"].add(self.name)
else:
if self.name in state["pendingEnableSet"]:
# Undoing a pending enable.
state["pendingEnableSet"].discard(self.name)
else:
state["pendingDisableSet"].add(self.name)
# Record enable/disable flags as a way of preparing for disaster such as sudden NVDA crash.
saveState()
@property
def isRunning(self):
return not (self.isPendingInstall or self.isDisabled)
@property
def isDisabled(self):
return self.name in _disabledAddons
@property
def isPendingEnable(self):
return self.name in state["pendingEnableSet"]
@property
def isPendingDisable(self):
return self.name in state["pendingDisableSet"]
def _getPathForInclusionInPackage(self, package):
extension_path = os.path.join(self.path, package.__name__)
return extension_path.encode("mbcs")
def loadModule(self, name):
""" loads a python module from the addon directory
@param name: the module name
@type name: string
@returns the python module with C[name}
@rtype python module
"""
log.debug("Importing module %s from plugin %s", name, self.name)
importer = pkgutil.ImpImporter(self.path)
loader = importer.find_module(name)
if not loader:
return None
# Create a qualified full name to avoid modules with the same name on sys.modules.
fullname = "addons.%s.%s" % (self.name, name)
try:
return loader.load_module(fullname)
except ImportError:
# in this case return None, any other error throw to be handled elsewhere
return None
def getTranslationsInstance(self, domain='nvda'):
""" Gets the gettext translation instance for this addon.
<addon-path<\locale will be used to find .mo files, if exists.
If a translation file is not found the default fallback null translation is returned.
@param domain: the tranlation domain to retrieve. The 'nvda' default should be used in most cases.
@returns: the gettext translation class.
"""
localedir = os.path.join(self.path, "locale")
return gettext.translation(domain, localedir=localedir, languages=[languageHandler.getLanguage()], fallback=True)
def runInstallTask(self,taskName,*args,**kwargs):
"""
Executes the function having the given taskName with the given args and kwargs in the addon's installTasks module if it exists.
"""
if not hasattr(self,'_installTasksModule'):
self._installTasksModule=self.loadModule('installTasks')
if self._installTasksModule:
func=getattr(self._installTasksModule,taskName,None)
if func:
func(*args,**kwargs)
def getDocFilePath(self, fileName=None):
"""Get the path to a documentation file for this add-on.
The file should be located in C{doc\lang\file} inside the add-on,
where C{lang} is the language code and C{file} is the requested file name.
Failing that, the language without country is tried.
English is tried as a last resort.
An add-on can specify a default documentation file name
via the docFileName parameter in its manifest.
@param fileName: The requested file name or C{None} for the add-on's default.
@type fileName: basestring
@return: The path to the requested file or C{None} if it wasn't found.
@rtype: basestring
"""
if not fileName:
fileName = self.manifest["docFileName"]
if not fileName:
return None
docRoot = os.path.join(self.path, "doc")
lang = languageHandler.getLanguage()
langs = [lang]
if "_" in lang:
lang = lang.split("_", 1)[0]
langs.append(lang)
if lang != "en":
langs.append("en")
for lang in langs:
docFile = os.path.join(docRoot, lang, fileName)
if os.path.isfile(docFile):
return docFile
return None
def getCodeAddon(obj=None, frameDist=1):
""" Returns the L{Addon} where C{obj} is defined. If obj is None the caller code frame is assumed to allow simple retrieval of "current calling addon".
@param obj: python object or None for default behaviour.
@param frameDist: howmany frames is the caller code. Only change this for functions in this module.
@return: L{Addon} instance or None if no code does not belong to a add-on package.
@rtype: C{Addon}
"""
global _availableAddons
if obj is None:
obj = sys._getframe(frameDist)
fileName = inspect.getfile(obj)
dir= unicode(os.path.abspath(os.path.dirname(fileName)), "mbcs")
# if fileName is not a subdir of one of the addon paths
# It does not belong to an addon.
for p in _getDefaultAddonPaths():
if dir.startswith(p):
break
else:
raise AddonError("Code does not belong to an addon package.")
curdir = dir
while curdir not in _getDefaultAddonPaths():
if curdir in _availableAddons.keys():
return _availableAddons[curdir]
curdir = os.path.abspath(os.path.join(curdir, ".."))
# Not found!
raise AddonError("Code does not belong to an addon")
def initTranslation():
addon = getCodeAddon(frameDist=2)
translations = addon.getTranslationsInstance()
# Point _ to the translation object in the globals namespace of the caller frame
# FIXME: shall we retrieve the caller module object explicitly?
try:
callerFrame = inspect.currentframe().f_back
callerFrame.f_globals['_'] = translations.ugettext
# Install our pgettext function.
callerFrame.f_globals['pgettext'] = languageHandler.makePgettext(translations)
finally:
del callerFrame # Avoid reference problems with frames (per python docs)
def _translatedManifestPaths(lang=None, forBundle=False):
if lang is None:
lang = languageHandler.getLanguage() # can't rely on default keyword arguments here.
langs=[lang]
if '_' in lang:
langs.append(lang.split('_')[0])
if lang!='en' and not lang.startswith('en_'):
langs.append('en')
sep = "/" if forBundle else os.path.sep
return [sep.join(("locale", lang, MANIFEST_FILENAME)) for lang in langs]
class AddonBundle(object):
""" Represents the contents of an NVDA addon suitable for distribution.
The bundle is compressed using the zip file format. Manifest information
is available without the need for extraction."""
def __init__(self, bundlePath):
""" Constructs an L{AddonBundle} from a filename.
@param bundlePath: The path for the bundle file.
"""
self._path = bundlePath if isinstance(bundlePath, unicode) else unicode(bundlePath, "mbcs")
# Read manifest:
translatedInput=None
with zipfile.ZipFile(self._path, 'r') as z:
for translationPath in _translatedManifestPaths(forBundle=True):
try:
translatedInput = z.open(translationPath, 'r')
break
except KeyError:
pass
self._manifest = AddonManifest(z.open(MANIFEST_FILENAME), translatedInput=translatedInput)
def extract(self, addonPath):
""" Extracts the bundle content to the specified path.
The addon will be extracted to L{addonPath}
@param addonPath: Path where to extract contents.
@type addonPath: string
"""
with zipfile.ZipFile(self._path, 'r') as z:
for info in z.infolist():
if isinstance(info.filename, str):
# #2505: Handle non-Unicode file names.
# Most archivers seem to use the local OEM code page, even though the spec says only cp437.
# HACK: Overriding info.filename is a bit ugly, but it avoids a lot of code duplication.
info.filename = info.filename.decode("cp%d" % winKernel.kernel32.GetOEMCP())
z.extract(info, addonPath)
@property
def manifest(self):
""" Gets the manifest for the represented Addon.
@rtype: AddonManifest
"""
return self._manifest
def __repr__(self):
return "<AddonBundle at %s>" % self._path
def createAddonBundleFromPath(path, destDir=None):
""" Creates a bundle from a directory that contains a a addon manifest file."""
basedir = os.path.abspath(path)
# If caller did not provide a destination directory name
# Put the bundle at the same level of the addon's top directory,
# That is, basedir/..
if destDir is None:
destDir = os.path.dirname(basedir)
manifest_path = os.path.join(basedir, MANIFEST_FILENAME)
if not os.path.isfile(manifest_path):
raise AddonError("Can't find %s manifest file." % manifest_path)
with open(manifest_path) as f:
manifest = AddonManifest(f)
if manifest.errors is not None:
_report_manifest_errors(manifest)
raise AddonError("Manifest file as errors.")
bundleFilename = "%s-%s.%s" % (manifest['name'], manifest['version'], BUNDLE_EXTENSION)
bundleDestination = os.path.join(destDir, bundleFilename)
with zipfile.ZipFile(bundleDestination, 'w') as z:
# FIXME: the include/exclude feature may or may not be useful. Also python files can be pre-compiled.
for dir, dirnames, filenames in os.walk(basedir):
relativePath = os.path.relpath(dir, basedir)
for filename in filenames:
pathInBundle = os.path.join(relativePath, filename)
absPath = os.path.join(dir, filename)
z.write(absPath, pathInBundle)
return AddonBundle(bundleDestination)
def _report_manifest_errors(manifest):
log.warning("Error loading manifest:\n%s", manifest.errors)
class AddonManifest(ConfigObj):
""" Add-on manifest file. It contains metadata about an NVDA add-on package. """
configspec = ConfigObj(StringIO(
"""
# NVDA Add-on Manifest configuration specification
# Add-on unique name
name = string()
# short summary (label) of the add-on to show to users.
summary = string()
# Long description with further information and instructions
description = string(default=None)
# Name of the author or entity that created the add-on
author = string()
# Version of the add-on. Should preferably in some standard format such as x.y.z
version = string()
# URL for more information about the add-on. New versions and such.
url= string(default=None)
# Name of default documentation file for the add-on.
docFileName = string(default=None)
"""))
def __init__(self, input, translatedInput=None):
""" Constructs an L{AddonManifest} instance from manifest string data
@param input: data to read the manifest informatinon
@type input: a fie-like object.
@param translatedInput: translated manifest input
@type translatedInput: file-like object
"""
super(AddonManifest, self).__init__(input, configspec=self.configspec, encoding='utf-8', default_encoding='utf-8')
self._errors = []
val = Validator()
result = self.validate(val, copy=True, preserve_errors=True)
if result != True:
self._errors = result
self._translatedConfig = None
if translatedInput is not None:
self._translatedConfig = ConfigObj(translatedInput, encoding='utf-8', default_encoding='utf-8')
for k in ('summary','description'):
val=self._translatedConfig.get(k)
if val:
self[k]=val
@property
def errors(self):
return self._errors
| 1 | 19,073 | A docstring for this function please. | nvaccess-nvda | py |
@@ -0,0 +1,12 @@
+using System;
+using System.Collections.Generic;
+using System.Text;
+using Microsoft.AspNetCore.Hosting.Server.Features;
+
+namespace Microsoft.AspNetCore.Server.Kestrel.Core.Internal
+{
+ internal class ServerAddressesFeature : IServerAddressesFeature
+ {
+ public ICollection<string> Addresses { get; } = new List<string>();
+ }
+} | 1 | 1 | 12,433 | heads up @JunTaoLuo | aspnet-KestrelHttpServer | .cs |
|
@@ -167,12 +167,12 @@ namespace NLog
if (SkipAssembly(stackFrame))
continue;
- if (stackFrame.GetMethod().Name == "MoveNext")
+ if (stackFrame.GetMethod()?.Name == "MoveNext")
{
if (stackFrames.Length > i)
{
var nextStackFrame = stackFrames[i + 1];
- var declaringType = nextStackFrame.GetMethod().DeclaringType;
+ var declaringType = nextStackFrame.GetMethod()?.DeclaringType;
if (declaringType == typeof(System.Runtime.CompilerServices.AsyncTaskMethodBuilder) ||
declaringType == typeof(System.Runtime.CompilerServices.AsyncTaskMethodBuilder<>))
{ | 1 | //
// Copyright (c) 2004-2018 Jaroslaw Kowalski <[email protected]>, Kim Christensen, Julian Verdurmen
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
//
// * Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * Neither the name of Jaroslaw Kowalski nor the names of its
// contributors may be used to endorse or promote products derived from this
// software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
// THE POSSIBILITY OF SUCH DAMAGE.
//
namespace NLog
{
using System;
using System.Collections.Generic;
using System.Diagnostics;
using System.Reflection;
using JetBrains.Annotations;
using NLog.Common;
using NLog.Config;
using NLog.Filters;
using NLog.Internal;
/// <summary>
/// Implementation of logging engine.
/// </summary>
internal static class LoggerImpl
{
private const int StackTraceSkipMethods = 0;
private static readonly Assembly nlogAssembly = typeof(LoggerImpl).GetAssembly();
private static readonly Assembly mscorlibAssembly = typeof(string).GetAssembly();
private static readonly Assembly systemAssembly = typeof(Debug).GetAssembly();
[System.Diagnostics.CodeAnalysis.SuppressMessage("Microsoft.Naming", "CA2204:Literals should be spelled correctly", Justification = "Using 'NLog' in message.")]
internal static void Write([NotNull] Type loggerType, TargetWithFilterChain targets, LogEventInfo logEvent, LogFactory factory)
{
if (targets == null)
{
return;
}
StackTraceUsage stu = targets.GetStackTraceUsage();
if (stu != StackTraceUsage.None && !logEvent.HasStackTrace)
{
StackTrace stackTrace;
#if NETSTANDARD1_5
stackTrace = (StackTrace)Activator.CreateInstance(typeof(StackTrace), new object[] { stu == StackTraceUsage.WithSource });
#elif NETSTANDARD1_0
stackTrace = null;
#elif !SILVERLIGHT
stackTrace = new StackTrace(StackTraceSkipMethods, stu == StackTraceUsage.WithSource);
#else
stackTrace = new StackTrace();
#endif
if (stackTrace != null)
{
var stackFrames = stackTrace.GetFrames();
int? firstUserFrame = FindCallingMethodOnStackTrace(stackFrames, loggerType);
int? firstLegacyUserFrame = firstUserFrame.HasValue ? SkipToUserStackFrameLegacy(stackFrames, firstUserFrame.Value) : (int?)null;
logEvent.GetCallSiteInformationInternal().SetStackTrace(stackTrace, firstUserFrame ?? 0, firstLegacyUserFrame);
}
}
AsyncContinuation exceptionHandler = (ex) => { };
if (factory.ThrowExceptions)
{
int originalThreadId = AsyncHelpers.GetManagedThreadId();
exceptionHandler = ex =>
{
if (ex != null)
{
if (AsyncHelpers.GetManagedThreadId() == originalThreadId)
{
throw new NLogRuntimeException("Exception occurred in NLog", ex);
}
}
};
}
if (targets.NextInChain == null
&& logEvent.Parameters != null
&& logEvent.Parameters.Length > 0
&& logEvent.Message?.Length < 256
&& ReferenceEquals(logEvent.MessageFormatter, LogMessageTemplateFormatter.DefaultAuto.MessageFormatter))
{
logEvent.MessageFormatter = LogMessageTemplateFormatter.DefaultAutoSingleTarget.MessageFormatter;
}
for (var t = targets; t != null; t = t.NextInChain)
{
if (!WriteToTargetWithFilterChain(t, logEvent, exceptionHandler))
{
break;
}
}
}
/// <summary>
/// Finds first user stack frame in a stack trace
/// </summary>
/// <param name="stackFrames">The stack trace of the logging method invocation</param>
/// <param name="loggerType">Type of the logger or logger wrapper. This is still Logger if it's a subclass of Logger.</param>
/// <returns>Index of the first user stack frame or 0 if all stack frames are non-user</returns>
internal static int? FindCallingMethodOnStackTrace(StackFrame[] stackFrames, [NotNull] Type loggerType)
{
if (stackFrames == null || stackFrames.Length == 0)
return null;
int? firstStackFrameAfterLogger = null;
int? firstUserStackFrame = null;
for (int i = 0; i < stackFrames.Length; ++i)
{
var stackFrame = stackFrames[i];
if (SkipAssembly(stackFrame))
continue;
if (!firstUserStackFrame.HasValue)
firstUserStackFrame = i;
if (IsLoggerType(stackFrame, loggerType))
{
firstStackFrameAfterLogger = null;
continue;
}
if (!firstStackFrameAfterLogger.HasValue)
firstStackFrameAfterLogger = i;
}
return firstStackFrameAfterLogger ?? firstUserStackFrame;
}
/// <summary>
/// This is only done for legacy reason, as the correct method-name and line-number should be extracted from the MoveNext-StackFrame
/// </summary>
/// <param name="stackFrames">The stack trace of the logging method invocation</param>
/// <param name="firstUserStackFrame">Starting point for skipping async MoveNext-frames</param>
internal static int SkipToUserStackFrameLegacy(StackFrame[] stackFrames, int firstUserStackFrame)
{
#if NET4_5
for (int i = firstUserStackFrame; i < stackFrames.Length; ++i)
{
var stackFrame = stackFrames[i];
if (SkipAssembly(stackFrame))
continue;
if (stackFrame.GetMethod().Name == "MoveNext")
{
if (stackFrames.Length > i)
{
var nextStackFrame = stackFrames[i + 1];
var declaringType = nextStackFrame.GetMethod().DeclaringType;
if (declaringType == typeof(System.Runtime.CompilerServices.AsyncTaskMethodBuilder) ||
declaringType == typeof(System.Runtime.CompilerServices.AsyncTaskMethodBuilder<>))
{
//async, search futher
continue;
}
}
}
return i;
}
#endif
return firstUserStackFrame;
}
/// <summary>
/// Assembly to skip?
/// </summary>
/// <param name="frame">Find assembly via this frame. </param>
/// <returns><c>true</c>, we should skip.</returns>
private static bool SkipAssembly(StackFrame frame)
{
var method = frame.GetMethod();
var assembly = method.DeclaringType != null ? method.DeclaringType.GetAssembly() : method.Module.Assembly;
// skip stack frame if the method declaring type assembly is from hidden assemblies list
var skipAssembly = SkipAssembly(assembly);
return skipAssembly;
}
/// <summary>
/// Is this the type of the logger?
/// </summary>
/// <param name="frame">get type of this logger in this frame.</param>
/// <param name="loggerType">Type of the logger.</param>
/// <returns></returns>
private static bool IsLoggerType(StackFrame frame, Type loggerType)
{
var method = frame.GetMethod();
Type declaringType = method.DeclaringType;
var isLoggerType = declaringType != null && (loggerType == declaringType || declaringType.IsSubclassOf(loggerType) || loggerType.IsAssignableFrom(declaringType));
return isLoggerType;
}
private static bool SkipAssembly(Assembly assembly)
{
if (assembly == nlogAssembly)
{
return true;
}
if (assembly == mscorlibAssembly)
{
return true;
}
if (assembly == systemAssembly)
{
return true;
}
if (LogManager.IsHiddenAssembly(assembly))
{
return true;
}
return false;
}
private static bool WriteToTargetWithFilterChain(TargetWithFilterChain targetListHead, LogEventInfo logEvent, AsyncContinuation onException)
{
FilterResult result = GetFilterResult(targetListHead.FilterChain, logEvent);
if ((result == FilterResult.Ignore) || (result == FilterResult.IgnoreFinal))
{
if (InternalLogger.IsDebugEnabled)
{
InternalLogger.Debug("{0}.{1} Rejecting message because of a filter.", logEvent.LoggerName, logEvent.Level);
}
if (result == FilterResult.IgnoreFinal)
{
return false;
}
return true;
}
targetListHead.Target.WriteAsyncLogEvent(logEvent.WithContinuation(onException));
if (result == FilterResult.LogFinal)
{
return false;
}
return true;
}
/// <summary>
/// Gets the filter result.
/// </summary>
/// <param name="filterChain">The filter chain.</param>
/// <param name="logEvent">The log event.</param>
/// <returns>The result of the filter.</returns>
private static FilterResult GetFilterResult(IList<Filter> filterChain, LogEventInfo logEvent)
{
FilterResult result = FilterResult.Neutral;
if (filterChain == null || filterChain.Count == 0)
return result;
try
{
//Memory profiling pointed out that using a foreach-loop was allocating
//an Enumerator. Switching to a for-loop avoids the memory allocation.
for (int i = 0; i < filterChain.Count; i++)
{
Filter f = filterChain[i];
result = f.GetFilterResult(logEvent);
if (result != FilterResult.Neutral)
{
break;
}
}
return result;
}
catch (Exception exception)
{
InternalLogger.Warn(exception, "Exception during filter evaluation. Message will be ignore.");
if (exception.MustBeRethrown())
{
throw;
}
return FilterResult.Ignore;
}
}
}
}
| 1 | 16,932 | .GetMethod() cannot return null now, correct? | NLog-NLog | .cs |
@@ -11,7 +11,7 @@ define(['events', 'playbackManager', 'dom', 'browser', 'css!./iconosd', 'materia
function getOsdElementHtml() {
var html = '';
- html += '<i class="material-icons iconOsdIcon volume_up"></i>';
+ html += '<i class="material-icons iconOsdIcon"></i>';
html += '<div class="iconOsdProgressOuter"><div class="iconOsdProgressInner"></div></div>';
| 1 | define(['events', 'playbackManager', 'dom', 'browser', 'css!./iconosd', 'material-icons'], function (events, playbackManager, dom, browser) {
'use strict';
var currentPlayer;
var osdElement;
var iconElement;
var progressElement;
var enableAnimation;
function getOsdElementHtml() {
var html = '';
html += '<i class="material-icons iconOsdIcon volume_up"></i>';
html += '<div class="iconOsdProgressOuter"><div class="iconOsdProgressInner"></div></div>';
return html;
}
function ensureOsdElement() {
var elem = osdElement;
if (!elem) {
enableAnimation = browser.supportsCssAnimation();
elem = document.createElement('div');
elem.classList.add('hide');
elem.classList.add('iconOsd');
elem.classList.add('iconOsd-hidden');
elem.classList.add('volumeOsd');
elem.innerHTML = getOsdElementHtml();
iconElement = elem.querySelector('i');
progressElement = elem.querySelector('.iconOsdProgressInner');
document.body.appendChild(elem);
osdElement = elem;
}
}
function onHideComplete() {
this.classList.add('hide');
}
var hideTimeout;
function showOsd() {
clearHideTimeout();
var elem = osdElement;
dom.removeEventListener(elem, dom.whichTransitionEvent(), onHideComplete, {
once: true
});
elem.classList.remove('hide');
// trigger reflow
void elem.offsetWidth;
requestAnimationFrame(function () {
elem.classList.remove('iconOsd-hidden');
hideTimeout = setTimeout(hideOsd, 3000);
});
}
function clearHideTimeout() {
if (hideTimeout) {
clearTimeout(hideTimeout);
hideTimeout = null;
}
}
function hideOsd() {
clearHideTimeout();
var elem = osdElement;
if (elem) {
if (enableAnimation) {
// trigger reflow
void elem.offsetWidth;
requestAnimationFrame(function () {
elem.classList.add('iconOsd-hidden');
dom.addEventListener(elem, dom.whichTransitionEvent(), onHideComplete, {
once: true
});
});
} else {
onHideComplete.call(elem);
}
}
}
function updatePlayerVolumeState(isMuted, volume) {
if (iconElement) {
iconElement.innerHTML = isMuted ? '' : '';
}
if (progressElement) {
progressElement.style.width = (volume || 0) + '%';
}
}
function releaseCurrentPlayer() {
var player = currentPlayer;
if (player) {
events.off(player, 'volumechange', onVolumeChanged);
events.off(player, 'playbackstop', hideOsd);
currentPlayer = null;
}
}
function onVolumeChanged(e) {
var player = this;
ensureOsdElement();
updatePlayerVolumeState(player.isMuted(), player.getVolume());
showOsd();
}
function bindToPlayer(player) {
if (player === currentPlayer) {
return;
}
releaseCurrentPlayer();
currentPlayer = player;
if (!player) {
return;
}
hideOsd();
events.on(player, 'volumechange', onVolumeChanged);
events.on(player, 'playbackstop', hideOsd);
}
events.on(playbackManager, 'playerchange', function () {
bindToPlayer(playbackManager.getCurrentPlayer());
});
bindToPlayer(playbackManager.getCurrentPlayer());
});
| 1 | 13,405 | Same thing here, I think the proper fix is elsewhere. | jellyfin-jellyfin-web | js |
@@ -585,6 +585,17 @@ ExWorkProcRetcode ExHdfsScanTcb::work()
openType //
);
+ if ((retcode < 0) &&
+ ((errno == ENOENT) || (errno == EAGAIN)))
+ {
+ ComDiagsArea * diagsArea = NULL;
+ ExRaiseSqlError(getHeap(), &diagsArea,
+ (ExeErrorCode)(EXE_HIVE_DATA_MOD_CHECK_ERROR));
+ pentry_down->setDiagsArea(diagsArea);
+ step_ = HANDLE_ERROR_AND_DONE;
+ break;
+ }
+
// preopen next range.
if ( (currRangeNum_ + 1) < (beginRangeNum_ + numRanges_) )
{ | 1 | // **********************************************************************
// @@@ START COPYRIGHT @@@
//
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
//
// @@@ END COPYRIGHT @@@
// **********************************************************************
#include "Platform.h"
#include <stdint.h>
#include <stdio.h>
#include <unistd.h>
#include <sys/time.h>
#include <poll.h>
#include <iostream>
#include "ex_stdh.h"
#include "ComTdb.h"
#include "ex_tcb.h"
#include "ExHdfsScan.h"
#include "ex_exe_stmt_globals.h"
#include "ExpLOBinterface.h"
#include "SequenceFileReader.h"
#include "Hbase_types.h"
#include "stringBuf.h"
#include "NLSConversion.h"
#include "Context.h"
#include "ExpORCinterface.h"
#include "ComSmallDefs.h"
ex_tcb * ExHdfsScanTdb::build(ex_globals * glob)
{
ExExeStmtGlobals * exe_glob = glob->castToExExeStmtGlobals();
ex_assert(exe_glob,"This operator cannot be in DP2");
ExHdfsScanTcb *tcb = NULL;
if ((isTextFile()) || (isSequenceFile()))
{
tcb = new(exe_glob->getSpace())
ExHdfsScanTcb(
*this,
exe_glob);
}
else if (isOrcFile())
{
tcb = new(exe_glob->getSpace())
ExOrcScanTcb(
*this,
exe_glob);
}
ex_assert(tcb, "Error building ExHdfsScanTcb.");
return (tcb);
}
ex_tcb * ExOrcFastAggrTdb::build(ex_globals * glob)
{
ExHdfsScanTcb *tcb = NULL;
tcb = new(glob->getSpace())
ExOrcFastAggrTcb(
*this,
glob);
ex_assert(tcb, "Error building ExHdfsScanTcb.");
return (tcb);
}
////////////////////////////////////////////////////////////////
// Constructor and initialization.
////////////////////////////////////////////////////////////////
ExHdfsScanTcb::ExHdfsScanTcb(
const ComTdbHdfsScan &hdfsScanTdb,
ex_globals * glob ) :
ex_tcb( hdfsScanTdb, 1, glob)
, workAtp_(NULL)
, bytesLeft_(0)
, hdfsScanBuffer_(NULL)
, hdfsBufNextRow_(NULL)
, hdfsLoggingRow_(NULL)
, hdfsLoggingRowEnd_(NULL)
, debugPrevRow_(NULL)
, hdfsSqlBuffer_(NULL)
, hdfsSqlData_(NULL)
, pool_(NULL)
, step_(NOT_STARTED)
, matches_(0)
, matchBrkPoint_(0)
, endOfRequestedRange_(NULL)
, sequenceFileReader_(NULL)
, seqScanAgain_(false)
, hdfo_(NULL)
, numBytesProcessedInRange_(0)
, exception_(FALSE)
, checkRangeDelimiter_(FALSE)
, dataModCheckDone_(FALSE)
{
Space * space = (glob ? glob->getSpace() : 0);
CollHeap * heap = (glob ? glob->getDefaultHeap() : 0);
const int readBufSize = (Int32)hdfsScanTdb.hdfsBufSize_;
hdfsScanBuffer_ = new(space) char[ readBufSize + 1 ];
hdfsScanBuffer_[readBufSize] = '\0';
moveExprColsBuffer_ = new(space) ExSimpleSQLBuffer( 1, // one row
(Int32)hdfsScanTdb.moveExprColsRowLength_,
space);
short error = moveExprColsBuffer_->getFreeTuple(moveExprColsTupp_);
ex_assert((error == 0), "get_free_tuple cannot hold a row.");
moveExprColsData_ = moveExprColsTupp_.getDataPointer();
hdfsSqlBuffer_ = new(space) ExSimpleSQLBuffer( 1, // one row
(Int32)hdfsScanTdb.hdfsSqlMaxRecLen_,
space);
error = hdfsSqlBuffer_->getFreeTuple(hdfsSqlTupp_);
ex_assert((error == 0), "get_free_tuple cannot hold a row.");
hdfsSqlData_ = hdfsSqlTupp_.getDataPointer();
hdfsAsciiSourceBuffer_ = new(space) ExSimpleSQLBuffer( 1, // one row
(Int32)hdfsScanTdb.asciiRowLen_ * 2, // just in case
space);
error = hdfsAsciiSourceBuffer_->getFreeTuple(hdfsAsciiSourceTupp_);
ex_assert((error == 0), "get_free_tuple cannot hold a row.");
hdfsAsciiSourceData_ = hdfsAsciiSourceTupp_.getDataPointer();
pool_ = new(space)
sql_buffer_pool(hdfsScanTdb.numBuffers_,
hdfsScanTdb.bufferSize_,
space,
((ExHdfsScanTdb &)hdfsScanTdb).denseBuffers() ?
SqlBufferBase::DENSE_ : SqlBufferBase::NORMAL_);
pool_->setStaticMode(TRUE);
defragTd_ = NULL;
// removing the cast produce a compile error
if (((ExHdfsScanTdb &)hdfsScanTdb).useCifDefrag())
{
defragTd_ = pool_->addDefragTuppDescriptor(hdfsScanTdb.outputRowLength_);
}
// Allocate the queue to communicate with parent
allocateParentQueues(qparent_);
workAtp_ = allocateAtp(hdfsScanTdb.workCriDesc_, space);
// fixup expressions
if (selectPred())
selectPred()->fixup(0, getExpressionMode(), this, space, heap, FALSE, glob);
if (moveExpr())
moveExpr()->fixup(0, getExpressionMode(), this, space, heap, FALSE, glob);
if (convertExpr())
convertExpr()->fixup(0, getExpressionMode(), this, space, heap, FALSE, glob);
if (moveColsConvertExpr())
moveColsConvertExpr()->fixup(0, getExpressionMode(), this, space, heap, FALSE, glob);
// Register subtasks with the scheduler
registerSubtasks();
registerResizeSubtasks();
Lng32 fileNum = getGlobals()->castToExExeStmtGlobals()->getMyInstanceNumber();
ExHbaseAccessTcb::buildLoggingPath(((ExHdfsScanTdb &)hdfsScanTdb).getLoggingLocation(),
(char *)((ExHdfsScanTdb &)hdfsScanTdb).getErrCountRowId(),
((ExHdfsScanTdb &)hdfsScanTdb).tableName(),
"hive_scan_err",
fileNum,
loggingFileName_);
LoggingFileCreated_ = FALSE;
//shoud be move to work method
int jniDebugPort = 0;
int jniDebugTimeout = 0;
ehi_ = ExpHbaseInterface::newInstance(glob->getDefaultHeap(),
(char*)"", //Later replace with server cqd
(char*)"", ////Later replace with port cqd
jniDebugPort,
jniDebugTimeout);
}
ExHdfsScanTcb::~ExHdfsScanTcb()
{
freeResources();
}
void ExHdfsScanTcb::freeResources()
{
if (workAtp_)
{
workAtp_->release();
deallocateAtp(workAtp_, getSpace());
workAtp_ = NULL;
}
if (hdfsScanBuffer_)
{
NADELETEBASIC(hdfsScanBuffer_, getSpace());
hdfsScanBuffer_ = NULL;
}
if (hdfsAsciiSourceBuffer_)
{
NADELETEBASIC(hdfsAsciiSourceBuffer_, getSpace());
hdfsAsciiSourceBuffer_ = NULL;
}
// hdfsSqlTupp_.release() ; // ???
if (hdfsSqlBuffer_)
{
delete hdfsSqlBuffer_;
hdfsSqlBuffer_ = NULL;
}
if (moveExprColsBuffer_)
{
delete moveExprColsBuffer_;
moveExprColsBuffer_ = NULL;
}
if (pool_)
{
delete pool_;
pool_ = NULL;
}
if (qparent_.up)
{
delete qparent_.up;
qparent_.up = NULL;
}
if (qparent_.down)
{
delete qparent_.down;
qparent_.down = NULL;
}
ExpLOBinterfaceCleanup
(lobGlob_, getGlobals()->getDefaultHeap());
}
NABoolean ExHdfsScanTcb::needStatsEntry()
{
// stats are collected for ALL and OPERATOR options.
if ((getGlobals()->getStatsArea()->getCollectStatsType() ==
ComTdb::ALL_STATS) ||
(getGlobals()->getStatsArea()->getCollectStatsType() ==
ComTdb::OPERATOR_STATS))
return TRUE;
else if ( getGlobals()->getStatsArea()->getCollectStatsType() == ComTdb::PERTABLE_STATS)
return TRUE;
else
return FALSE;
}
ExOperStats * ExHdfsScanTcb::doAllocateStatsEntry(
CollHeap *heap,
ComTdb *tdb)
{
ExOperStats * stats = NULL;
ExHdfsScanTdb * myTdb = (ExHdfsScanTdb*) tdb;
return new(heap) ExHdfsScanStats(heap,
this,
tdb);
ComTdb::CollectStatsType statsType =
getGlobals()->getStatsArea()->getCollectStatsType();
if (statsType == ComTdb::OPERATOR_STATS)
{
return ex_tcb::doAllocateStatsEntry(heap, tdb);
}
else if (statsType == ComTdb::PERTABLE_STATS)
{
// sqlmp style per-table stats, one entry per table
stats = new(heap) ExPertableStats(heap,
this,
tdb);
((ExOperStatsId*)(stats->getId()))->tdbId_ = tdb->getPertableStatsTdbId();
return stats;
}
else
{
ExHdfsScanTdb * myTdb = (ExHdfsScanTdb*) tdb;
return new(heap) ExHdfsScanStats(heap,
this,
tdb);
}
}
void ExHdfsScanTcb::registerSubtasks()
{
ExScheduler *sched = getGlobals()->getScheduler();
sched->registerInsertSubtask(sWork, this, qparent_.down,"PD");
sched->registerUnblockSubtask(sWork, this, qparent_.up, "PU");
sched->registerCancelSubtask(sWork, this, qparent_.down,"CN");
}
ex_tcb_private_state *ExHdfsScanTcb::allocatePstates(
Lng32 &numElems, // inout, desired/actual elements
Lng32 &pstateLength) // out, length of one element
{
PstateAllocator<ex_tcb_private_state> pa;
return pa.allocatePstates(this, numElems, pstateLength);
}
Int32 ExHdfsScanTcb::fixup()
{
lobGlob_ = NULL;
ExpLOBinterfaceInit
(lobGlob_, getGlobals()->getDefaultHeap(),getGlobals()->castToExExeStmtGlobals()->getContext(),TRUE, hdfsScanTdb().hostName_,hdfsScanTdb().port_);
return 0;
}
void brkpoint()
{}
short ExHdfsScanTcb::setupError(Lng32 exeError, Lng32 retcode,
const char * str, const char * str2, const char * str3)
{
// Make sure retcode is positive.
if (retcode < 0)
retcode = -retcode;
ex_queue_entry *pentry_down = qparent_.down->getHeadEntry();
Lng32 intParam1 = retcode;
Lng32 intParam2 = 0;
ComDiagsArea * diagsArea = NULL;
ExRaiseSqlError(getHeap(), &diagsArea,
(ExeErrorCode)(exeError), NULL, &intParam1,
&intParam2, NULL,
(str ? (char*)str : (char*)" "),
(str2 ? (char*)str2 : (char*)" "),
(str3 ? (char*)str3 : (char*)" "));
pentry_down->setDiagsArea(diagsArea);
return -1;
}
ExWorkProcRetcode ExHdfsScanTcb::work()
{
Lng32 retcode = 0;
SFR_RetCode sfrRetCode = SFR_OK;
char *errorDesc = NULL;
char cursorId[8];
HdfsFileInfo *hdfo = NULL;
Lng32 openType = 0;
int changedLen = 0;
ContextCli *currContext = getGlobals()->castToExExeStmtGlobals()->getCliGlobals()->currContext();
hdfsFS hdfs = currContext->getHdfsServerConnection(hdfsScanTdb().hostName_,hdfsScanTdb().port_);
hdfsFileInfo *dirInfo = NULL;
while (!qparent_.down->isEmpty())
{
ex_queue_entry *pentry_down = qparent_.down->getHeadEntry();
switch (step_)
{
case NOT_STARTED:
{
matches_ = 0;
hdfsStats_ = NULL;
if (getStatsEntry())
hdfsStats_ = getStatsEntry()->castToExHdfsScanStats();
ex_assert(hdfsStats_, "hdfs stats cannot be null");
if (hdfsStats_)
hdfsStats_->init();
beginRangeNum_ = -1;
numRanges_ = -1;
hdfsOffset_ = 0;
checkRangeDelimiter_ = FALSE;
if (hdfsScanTdb().getHdfsFileInfoList()->isEmpty())
{
step_ = CHECK_FOR_DATA_MOD_AND_DONE;
break;
}
myInstNum_ = getGlobals()->getMyInstanceNumber();
beginRangeNum_ =
*(Lng32*)hdfsScanTdb().getHdfsFileRangeBeginList()->get(myInstNum_);
numRanges_ =
*(Lng32*)hdfsScanTdb().getHdfsFileRangeNumList()->get(myInstNum_);
currRangeNum_ = beginRangeNum_;
hdfsScanBufMaxSize_ = hdfsScanTdb().hdfsBufSize_;
dataModCheckDone_ = FALSE;
if (numRanges_ > 0)
step_ = CHECK_FOR_DATA_MOD;
else
step_ = CHECK_FOR_DATA_MOD_AND_DONE;
}
break;
case CHECK_FOR_DATA_MOD:
case CHECK_FOR_DATA_MOD_AND_DONE:
{
char * dirPath = hdfsScanTdb().hdfsRootDir_;
Int64 modTS = hdfsScanTdb().modTSforDir_;
if ((dirPath == NULL) || (modTS == -1))
dataModCheckDone_ = TRUE;
if (NOT dataModCheckDone_)
{
Lng32 numOfPartLevels = hdfsScanTdb().numOfPartCols_;
if (hdfsScanTdb().hdfsDirsToCheck())
{
// TBD
}
retcode = ExpLOBinterfaceDataModCheck
(lobGlob_,
dirPath,
hdfsScanTdb().hostName_,
hdfsScanTdb().port_,
modTS,
numOfPartLevels);
if (retcode < 0)
{
Lng32 cliError = 0;
Lng32 intParam1 = -retcode;
ComDiagsArea * diagsArea = NULL;
ExRaiseSqlError(getHeap(), &diagsArea,
(ExeErrorCode)(EXE_ERROR_FROM_LOB_INTERFACE),
NULL, &intParam1,
&cliError,
NULL,
"HDFS",
(char*)"ExpLOBInterfaceDataModCheck",
getLobErrStr(intParam1));
pentry_down->setDiagsArea(diagsArea);
step_ = HANDLE_ERROR_AND_DONE;
break;
}
if (retcode == 1) // check failed
{
ComDiagsArea * diagsArea = NULL;
ExRaiseSqlError(getHeap(), &diagsArea,
(ExeErrorCode)(EXE_HIVE_DATA_MOD_CHECK_ERROR));
pentry_down->setDiagsArea(diagsArea);
step_ = HANDLE_ERROR_AND_DONE;
break;
}
dataModCheckDone_ = TRUE;
}
if (step_ == CHECK_FOR_DATA_MOD_AND_DONE)
step_ = DONE;
else
step_ = INIT_HDFS_CURSOR;
}
break;
case INIT_HDFS_CURSOR:
{
hdfo_ = (HdfsFileInfo*)
hdfsScanTdb().getHdfsFileInfoList()->get(currRangeNum_);
if ((hdfo_->getBytesToRead() == 0) &&
(beginRangeNum_ == currRangeNum_) && (numRanges_ > 1))
{
// skip the first range if it has 0 bytes to read
// doing this for subsequent ranges is more complex
// since the file may neeed to be closed. The first
// range being 0 is common with sqoop generated files
currRangeNum_++;
hdfo_ = (HdfsFileInfo*)
hdfsScanTdb().getHdfsFileInfoList()->get(currRangeNum_);
}
hdfsOffset_ = hdfo_->getStartOffset();
bytesLeft_ = hdfo_->getBytesToRead();
hdfsFileName_ = hdfo_->fileName();
sprintf(cursorId_, "%d", currRangeNum_);
stopOffset_ = hdfsOffset_ + hdfo_->getBytesToRead();
step_ = OPEN_HDFS_CURSOR;
}
break;
case OPEN_HDFS_CURSOR:
{
retcode = 0;
if (isSequenceFile() && !sequenceFileReader_)
{
sequenceFileReader_ = new(getSpace())
SequenceFileReader((NAHeap *)getSpace());
sfrRetCode = sequenceFileReader_->init();
if (sfrRetCode != JNI_OK)
{
ComDiagsArea * diagsArea = NULL;
ExRaiseSqlError(getHeap(), &diagsArea, (ExeErrorCode)(8447), NULL,
NULL, NULL, NULL, sequenceFileReader_->getErrorText(sfrRetCode), NULL);
pentry_down->setDiagsArea(diagsArea);
step_ = HANDLE_ERROR;
break;
}
}
if (isSequenceFile())
{
sfrRetCode = sequenceFileReader_->open(hdfsFileName_);
if (sfrRetCode == JNI_OK)
{
// Seek to start offset
sfrRetCode = sequenceFileReader_->seeknSync(hdfsOffset_);
}
if (sfrRetCode != JNI_OK)
{
ComDiagsArea * diagsArea = NULL;
ExRaiseSqlError(getHeap(), &diagsArea, (ExeErrorCode)(8447), NULL,
NULL, NULL, NULL, sequenceFileReader_->getErrorText(sfrRetCode), NULL);
pentry_down->setDiagsArea(diagsArea);
step_ = HANDLE_ERROR;
break;
}
}
else
{
openType = 2; // must open
retcode = ExpLOBInterfaceSelectCursor
(lobGlob_,
hdfsFileName_, //hdfsScanTdb().hdfsFileName_,
NULL, //(char*)"",
(Lng32)Lob_External_HDFS_File,
hdfsScanTdb().hostName_,
hdfsScanTdb().port_,
0, NULL, // handle not valid for non lob access
bytesLeft_, // max bytes
cursorId_,
requestTag_, Lob_Memory,
0, // not check status
(NOT hdfsScanTdb().hdfsPrefetch()), //1, // waited op
hdfsOffset_,
hdfsScanBufMaxSize_,
bytesRead_,
NULL,
1, // open
openType //
);
// preopen next range.
if ( (currRangeNum_ + 1) < (beginRangeNum_ + numRanges_) )
{
hdfo = (HdfsFileInfo*)
hdfsScanTdb().getHdfsFileInfoList()->get(currRangeNum_ + 1);
hdfsFileName_ = hdfo->fileName();
sprintf(cursorId, "%d", currRangeNum_ + 1);
openType = 1; // preOpen
retcode = ExpLOBInterfaceSelectCursor
(lobGlob_,
hdfsFileName_, //hdfsScanTdb().hdfsFileName_,
NULL, //(char*)"",
(Lng32)Lob_External_HDFS_File,
hdfsScanTdb().hostName_,
hdfsScanTdb().port_,
0, NULL,//handle not relevant for non lob access
hdfo->getBytesToRead(), // max bytes
cursorId,
requestTag_, Lob_Memory,
0, // not check status
(NOT hdfsScanTdb().hdfsPrefetch()), //1, // waited op
hdfo->getStartOffset(),
hdfsScanBufMaxSize_,
bytesRead_,
NULL,
1,// open
openType
);
hdfsFileName_ = hdfo_->fileName();
}
}
if (retcode < 0)
{
Lng32 cliError = 0;
Lng32 intParam1 = -retcode;
ComDiagsArea * diagsArea = NULL;
ExRaiseSqlError(getHeap(), &diagsArea,
(ExeErrorCode)(EXE_ERROR_FROM_LOB_INTERFACE), NULL,
&intParam1,
&cliError,
NULL,
"HDFS",
(char*)"ExpLOBInterfaceSelectCursor/open",
getLobErrStr(intParam1));
pentry_down->setDiagsArea(diagsArea);
step_ = HANDLE_ERROR;
break;
}
trailingPrevRead_ = 0;
firstBufOfFile_ = true;
numBytesProcessedInRange_ = 0;
step_ = GET_HDFS_DATA;
}
break;
case GET_HDFS_DATA:
{
Int64 bytesToRead = hdfsScanBufMaxSize_ - trailingPrevRead_;
ex_assert(bytesToRead >= 0, "bytesToRead less than zero.");
if (hdfo_->fileIsSplitEnd() && !isSequenceFile())
{
if (bytesLeft_ > 0)
bytesToRead = min(bytesToRead,
(bytesLeft_ + hdfsScanTdb().rangeTailIOSize_));
else
bytesToRead = hdfsScanTdb().rangeTailIOSize_;
}
else
{
ex_assert(bytesLeft_ >= 0, "Bad assumption at e-o-f");
if (bytesToRead > bytesLeft_ +
1 // plus one for end-of-range files with no
// record delimiter at eof.
)
bytesToRead = bytesLeft_ + 1;
}
ex_assert(bytesToRead + trailingPrevRead_ <= hdfsScanBufMaxSize_,
"too many bites.");
if (hdfsStats_)
hdfsStats_->getHdfsTimer().start();
retcode = 0;
if (isSequenceFile())
{
sfrRetCode = sequenceFileReader_->fetchRowsIntoBuffer(stopOffset_,
hdfsScanBuffer_,
hdfsScanBufMaxSize_, //bytesToRead,
bytesRead_,
hdfsScanTdb().recordDelimiter_);
if (sfrRetCode != JNI_OK && sfrRetCode != SFR_NOMORE)
{
ComDiagsArea * diagsArea = NULL;
ExRaiseSqlError(getHeap(), &diagsArea, (ExeErrorCode)(8447), NULL,
NULL, NULL, NULL, sequenceFileReader_->getErrorText(sfrRetCode), NULL);
pentry_down->setDiagsArea(diagsArea);
step_ = HANDLE_ERROR_WITH_CLOSE;
break;
}
else
{
seqScanAgain_ = (sfrRetCode != SFR_NOMORE);
}
}
else
{
retcode = ExpLOBInterfaceSelectCursor
(lobGlob_,
hdfsFileName_,
NULL,
(Lng32)Lob_External_HDFS_File,
hdfsScanTdb().hostName_,
hdfsScanTdb().port_,
0, NULL,
0, cursorId_,
requestTag_, Lob_Memory,
0, // not check status
(NOT hdfsScanTdb().hdfsPrefetch()), //1, // waited op
hdfsOffset_,
bytesToRead,
bytesRead_,
hdfsScanBuffer_ + trailingPrevRead_,
2, // read
0 // openType, not applicable for read
);
if (hdfsStats_)
hdfsStats_->incMaxHdfsIOTime(hdfsStats_->getHdfsTimer().stop());
if (retcode < 0)
{
Lng32 cliError = 0;
Lng32 intParam1 = -retcode;
ComDiagsArea * diagsArea = NULL;
ExRaiseSqlError(getHeap(), &diagsArea,
(ExeErrorCode)(EXE_ERROR_FROM_LOB_INTERFACE), NULL,
&intParam1,
&cliError,
NULL,
"HDFS",
(char*)"ExpLOBInterfaceSelectCursor/read",
getLobErrStr(intParam1));
pentry_down->setDiagsArea(diagsArea);
step_ = HANDLE_ERROR_WITH_CLOSE;
break;
}
}
if (bytesRead_ <= 0)
{
// Finished with this file. Unexpected? Warning/event?
step_ = CLOSE_HDFS_CURSOR;
break;
}
else
{
char * lastByteRead = hdfsScanBuffer_ +
trailingPrevRead_ + bytesRead_ - 1;
if ((bytesRead_ < bytesToRead) &&
(*lastByteRead != hdfsScanTdb().recordDelimiter_))
{
// Some files end without a record delimiter but
// hive treats the end-of-file as a record delimiter.
lastByteRead[1] = hdfsScanTdb().recordDelimiter_;
bytesRead_++;
}
if (bytesRead_ > bytesLeft_)
{
if (isSequenceFile())
endOfRequestedRange_ = hdfsScanBuffer_ + bytesRead_;
else
endOfRequestedRange_ = hdfsScanBuffer_ +
trailingPrevRead_ + bytesLeft_;
}
else
endOfRequestedRange_ = NULL;
if (isSequenceFile())
{
// If the file is compressed, we don't know the real value
// of bytesLeft_, but it doesn't really matter.
if (seqScanAgain_ == false)
bytesLeft_ = 0;
}
else
bytesLeft_ -= bytesRead_;
}
if (hdfsStats_)
hdfsStats_->incBytesRead(bytesRead_);
if (firstBufOfFile_ && hdfo_->fileIsSplitBegin() && !isSequenceFile())
{
// Position in the hdfsScanBuffer_ to the
// first record delimiter.
hdfsBufNextRow_ = hdfs_strchr(hdfsScanBuffer_,
hdfsScanTdb().recordDelimiter_, hdfsScanBuffer_+trailingPrevRead_+ bytesRead_, checkRangeDelimiter_, hdfsScanTdb().getHiveScanMode(), &changedLen);
// May be that the record is too long? Or data isn't ascii?
// Or delimiter is incorrect.
if (! hdfsBufNextRow_)
{
ComDiagsArea *diagsArea = NULL;
ExRaiseSqlError(getHeap(), &diagsArea,
(ExeErrorCode)(8446), NULL,
NULL, NULL, NULL,
(char*)"No record delimiter found in buffer from hdfsRead.",
NULL);
// no need to log errors in this case (bulk load) since this is a major issue
// and need to be correxted
pentry_down->setDiagsArea(diagsArea);
step_ = HANDLE_ERROR_WITH_CLOSE;
break;
}
hdfsBufNextRow_ += 1 + changedLen; // point past record delimiter.
//add changedLen since hdfs_strchr will remove the pointer ahead to remove the \r
}
else
hdfsBufNextRow_ = hdfsScanBuffer_;
debugPrevRow_ = hdfsScanBuffer_; // By convention, at
// beginning of scan, the
// prev is set to next.
debugtrailingPrevRead_ = 0;
debugPenultimatePrevRow_ = NULL;
firstBufOfFile_ = false;
hdfsOffset_ += bytesRead_;
step_ = PROCESS_HDFS_ROW;
}
break;
case PROCESS_HDFS_ROW:
{
exception_ = FALSE;
nextStep_ = NOT_STARTED;
debugPenultimatePrevRow_ = debugPrevRow_;
debugPrevRow_ = hdfsBufNextRow_;
int formattedRowLength = 0;
ComDiagsArea *transformDiags = NULL;
int err = 0;
char *startOfNextRow =
extractAndTransformAsciiSourceToSqlRow(err, transformDiags, hdfsScanTdb().getHiveScanMode());
bool rowWillBeSelected = true;
lastErrorCnd_ = NULL;
if(err)
{
if (hdfsScanTdb().continueOnError())
{
Lng32 errorCount = workAtp_->getDiagsArea()->getNumber(DgSqlCode::ERROR_);
if (errorCount>0)
lastErrorCnd_ = workAtp_->getDiagsArea()->getErrorEntry(errorCount);
exception_ = TRUE;
rowWillBeSelected = false;
}
else
{
if (transformDiags)
pentry_down->setDiagsArea(transformDiags);
step_ = HANDLE_ERROR_WITH_CLOSE;
break;
}
}
if (startOfNextRow == NULL)
{
step_ = REPOS_HDFS_DATA;
if (!exception_)
break;
}
else
{
numBytesProcessedInRange_ +=
startOfNextRow - hdfsBufNextRow_;
hdfsBufNextRow_ = startOfNextRow;
}
if (exception_)
{
nextStep_ = step_;
step_ = HANDLE_EXCEPTION;
break;
}
if (hdfsStats_)
hdfsStats_->incAccessedRows();
workAtp_->getTupp(hdfsScanTdb().workAtpIndex_) =
hdfsSqlTupp_;
if ((rowWillBeSelected) && (selectPred()))
{
ex_expr::exp_return_type evalRetCode =
selectPred()->eval(pentry_down->getAtp(), workAtp_);
if (evalRetCode == ex_expr::EXPR_FALSE)
rowWillBeSelected = false;
else if (evalRetCode == ex_expr::EXPR_ERROR)
{
if (hdfsScanTdb().continueOnError())
{
if (pentry_down->getDiagsArea() || workAtp_->getDiagsArea())
{
Lng32 errorCount = 0;
if (pentry_down->getDiagsArea())
{
errorCount = pentry_down->getDiagsArea()->getNumber(DgSqlCode::ERROR_);
if (errorCount > 0)
lastErrorCnd_ = pentry_down->getDiagsArea()->getErrorEntry(errorCount);
}
else
{
errorCount = workAtp_->getDiagsArea()->getNumber(DgSqlCode::ERROR_);
if (errorCount > 0)
lastErrorCnd_ = workAtp_->getDiagsArea()->getErrorEntry(errorCount);
}
}
exception_ = TRUE;
nextStep_ = step_;
step_ = HANDLE_EXCEPTION;
rowWillBeSelected = false;
break;
}
step_ = HANDLE_ERROR_WITH_CLOSE;
break;
}
else
ex_assert(evalRetCode == ex_expr::EXPR_TRUE,
"invalid return code from expr eval");
}
if (rowWillBeSelected)
{
if (moveColsConvertExpr())
{
ex_expr::exp_return_type evalRetCode =
moveColsConvertExpr()->eval(workAtp_, workAtp_);
if (evalRetCode == ex_expr::EXPR_ERROR)
{
if (hdfsScanTdb().continueOnError())
{
if ( workAtp_->getDiagsArea())
{
Lng32 errorCount = 0;
errorCount = workAtp_->getDiagsArea()->getNumber(DgSqlCode::ERROR_);
if (errorCount > 0)
lastErrorCnd_ = workAtp_->getDiagsArea()->getErrorEntry(errorCount);
}
exception_ = TRUE;
nextStep_ = step_;
step_ = HANDLE_EXCEPTION;
break;
}
step_ = HANDLE_ERROR_WITH_CLOSE;
break;
}
}
if (hdfsStats_)
hdfsStats_->incUsedRows();
step_ = RETURN_ROW;
break;
}
break;
}
case RETURN_ROW:
{
if (qparent_.up->isFull())
return WORK_OK;
lastErrorCnd_ = NULL;
ex_queue_entry *up_entry = qparent_.up->getTailEntry();
queue_index saveParentIndex = up_entry->upState.parentIndex;
queue_index saveDownIndex = up_entry->upState.downIndex;
up_entry->copyAtp(pentry_down);
up_entry->upState.parentIndex =
pentry_down->downState.parentIndex;
up_entry->upState.downIndex = qparent_.down->getHeadIndex();
up_entry->upState.status = ex_queue::Q_OK_MMORE;
if (moveExpr())
{
UInt32 maxRowLen = hdfsScanTdb().outputRowLength_;
UInt32 rowLen = maxRowLen;
if (hdfsScanTdb().useCifDefrag() &&
!pool_->currentBufferHasEnoughSpace((Lng32)hdfsScanTdb().outputRowLength_))
{
up_entry->getTupp(hdfsScanTdb().tuppIndex_) = defragTd_;
defragTd_->setReferenceCount(1);
ex_expr::exp_return_type evalRetCode =
moveExpr()->eval(up_entry->getAtp(), workAtp_,0,-1,&rowLen);
if (evalRetCode == ex_expr::EXPR_ERROR)
{
if (hdfsScanTdb().continueOnError())
{
if ((pentry_down->downState.request == ex_queue::GET_N) &&
(pentry_down->downState.requestValue == matches_))
step_ = CLOSE_HDFS_CURSOR;
else
step_ = PROCESS_HDFS_ROW;
up_entry->upState.parentIndex =saveParentIndex ;
up_entry->upState.downIndex = saveDownIndex ;
if (up_entry->getDiagsArea() || workAtp_->getDiagsArea())
{
Lng32 errorCount = 0;
if (up_entry->getDiagsArea())
{
errorCount = up_entry->getDiagsArea()->getNumber(DgSqlCode::ERROR_);
if (errorCount > 0)
lastErrorCnd_ = up_entry->getDiagsArea()->getErrorEntry(errorCount);
}
else
{
errorCount = workAtp_->getDiagsArea()->getNumber(DgSqlCode::ERROR_);
if (errorCount > 0)
lastErrorCnd_ = workAtp_->getDiagsArea()->getErrorEntry(errorCount);
}
}
exception_ = TRUE;
nextStep_ = step_;
step_ = HANDLE_EXCEPTION;
break;
}
else
{
// Get diags from up_entry onto pentry_down, which
// is where the HANDLE_ERROR step expects it.
ComDiagsArea *diagsArea = pentry_down->getDiagsArea();
if (diagsArea == NULL)
{
diagsArea =
ComDiagsArea::allocate(getGlobals()->getDefaultHeap());
pentry_down->setDiagsArea (diagsArea);
}
pentry_down->getDiagsArea()->
mergeAfter(*up_entry->getDiagsArea());
up_entry->setDiagsArea(NULL);
step_ = HANDLE_ERROR_WITH_CLOSE;
break;
}
if (pool_->get_free_tuple(
up_entry->getTupp(hdfsScanTdb().tuppIndex_),
rowLen))
return WORK_POOL_BLOCKED;
str_cpy_all(up_entry->getTupp(hdfsScanTdb().tuppIndex_).getDataPointer(),
defragTd_->getTupleAddress(),
rowLen);
}
}
else
{
if (pool_->get_free_tuple(
up_entry->getTupp(hdfsScanTdb().tuppIndex_),
(Lng32)hdfsScanTdb().outputRowLength_))
return WORK_POOL_BLOCKED;
ex_expr::exp_return_type evalRetCode =
moveExpr()->eval(up_entry->getAtp(), workAtp_,0,-1,&rowLen);
if (evalRetCode == ex_expr::EXPR_ERROR)
{
if (hdfsScanTdb().continueOnError())
{
if ((pentry_down->downState.request == ex_queue::GET_N) &&
(pentry_down->downState.requestValue == matches_))
step_ = CLOSE_FILE;
else
step_ = PROCESS_HDFS_ROW;
if (up_entry->getDiagsArea() || workAtp_->getDiagsArea())
{
Lng32 errorCount = 0;
if (up_entry->getDiagsArea())
{
errorCount = up_entry->getDiagsArea()->getNumber(DgSqlCode::ERROR_);
if (errorCount > 0)
lastErrorCnd_ = up_entry->getDiagsArea()->getErrorEntry(errorCount);
}
else
{
errorCount = workAtp_->getDiagsArea()->getNumber(DgSqlCode::ERROR_);
if (errorCount > 0)
lastErrorCnd_ = workAtp_->getDiagsArea()->getErrorEntry(errorCount);
}
}
up_entry->upState.parentIndex =saveParentIndex ;
up_entry->upState.downIndex = saveDownIndex ;
exception_ = TRUE;
nextStep_ = step_;
step_ = HANDLE_EXCEPTION;
break;
}
else
{
// Get diags from up_entry onto pentry_down, which
// is where the HANDLE_ERROR step expects it.
ComDiagsArea *diagsArea = pentry_down->getDiagsArea();
if (diagsArea == NULL)
{
diagsArea =
ComDiagsArea::allocate(getGlobals()->getDefaultHeap());
pentry_down->setDiagsArea (diagsArea);
}
pentry_down->getDiagsArea()->
mergeAfter(*up_entry->getDiagsArea());
up_entry->setDiagsArea(NULL);
step_ = HANDLE_ERROR_WITH_CLOSE;
break;
}
}
if (hdfsScanTdb().useCif() && rowLen != maxRowLen)
{
pool_->resizeLastTuple(rowLen,
up_entry->getTupp(hdfsScanTdb().tuppIndex_).getDataPointer());
}
}
}
up_entry->upState.setMatchNo(++matches_);
if (matches_ == matchBrkPoint_)
brkpoint();
qparent_.up->insert();
// use ExOperStats now, to cover OPERATOR stats as well as
// ALL stats.
if (getStatsEntry())
getStatsEntry()->incActualRowsReturned();
workAtp_->setDiagsArea(NULL); // get rid of warnings.
if (((pentry_down->downState.request == ex_queue::GET_N) &&
(pentry_down->downState.requestValue == matches_)) ||
(pentry_down->downState.request == ex_queue::GET_NOMORE))
step_ = CLOSE_HDFS_CURSOR;
else
step_ = PROCESS_HDFS_ROW;
break;
}
case REPOS_HDFS_DATA:
{
bool scanAgain = false;
if (isSequenceFile())
scanAgain = seqScanAgain_;
else
{
if (hdfo_->fileIsSplitEnd())
{
if (numBytesProcessedInRange_ < hdfo_->getBytesToRead())
scanAgain = true;
}
else
if (bytesLeft_ > 0)
scanAgain = true;
}
if (scanAgain)
{
// Get ready for another gulp of hdfs data.
debugtrailingPrevRead_ = trailingPrevRead_;
trailingPrevRead_ = bytesRead_ -
(hdfsBufNextRow_ -
(hdfsScanBuffer_ + trailingPrevRead_));
// Move trailing data from the end of buffer to the front.
// The GET_HDFS_DATA step will use trailingPrevRead_ to
// adjust the read buffer ptr so that the next read happens
// contiguously to the final byte of the prev read. It will
// also use trailingPrevRead_ to to adjust the size of
// the next read so that fixed size buffer is not overrun.
// Finally, trailingPrevRead_ is used in the
// extractSourceFields method to keep from processing
// bytes left in the buffer from the previous read.
if ((trailingPrevRead_ > 0) &&
(hdfsBufNextRow_[0] == RANGE_DELIMITER))
{
checkRangeDelimiter_ = FALSE;
step_ = CLOSE_HDFS_CURSOR;
break;
}
memmove(hdfsScanBuffer_, hdfsBufNextRow_,
(size_t)trailingPrevRead_);
step_ = GET_HDFS_DATA;
}
else
{
trailingPrevRead_ = 0;
step_ = CLOSE_HDFS_CURSOR;
}
break;
}
case CLOSE_HDFS_CURSOR:
{
retcode = 0;
if (isSequenceFile())
{
sfrRetCode = sequenceFileReader_->close();
if (sfrRetCode != JNI_OK)
{
ComDiagsArea * diagsArea = NULL;
ExRaiseSqlError(getHeap(), &diagsArea, (ExeErrorCode)(8447), NULL,
NULL, NULL, NULL, sequenceFileReader_->getErrorText(sfrRetCode), NULL);
pentry_down->setDiagsArea(diagsArea);
step_ = HANDLE_ERROR;
break;
}
}
else
{
retcode = ExpLOBInterfaceSelectCursor
(lobGlob_,
hdfsFileName_,
NULL,
(Lng32)Lob_External_HDFS_File,
hdfsScanTdb().hostName_,
hdfsScanTdb().port_,
0,NULL, //handle not relevant for non lob access
0, cursorId_,
requestTag_, Lob_Memory,
0, // not check status
(NOT hdfsScanTdb().hdfsPrefetch()), //1, // waited op
0,
hdfsScanBufMaxSize_,
bytesRead_,
hdfsScanBuffer_,
3, // close
0); // openType, not applicable for close
if (retcode < 0)
{
Lng32 cliError = 0;
Lng32 intParam1 = -retcode;
ComDiagsArea * diagsArea = NULL;
ExRaiseSqlError(getHeap(), &diagsArea,
(ExeErrorCode)(EXE_ERROR_FROM_LOB_INTERFACE), NULL,
&intParam1,
&cliError,
NULL,
"HDFS",
(char*)"ExpLOBInterfaceSelectCursor/close",
getLobErrStr(intParam1));
pentry_down->setDiagsArea(diagsArea);
step_ = HANDLE_ERROR;
break;
}
}
step_ = CLOSE_FILE;
}
break;
case HANDLE_EXCEPTION:
{
step_ = nextStep_;
exception_ = FALSE;
if (hdfsScanTdb().getMaxErrorRows() > 0)
{
Int64 exceptionCount = 0;
ExHbaseAccessTcb::incrErrorCount( ehi_,exceptionCount,
hdfsScanTdb().getErrCountTable(),hdfsScanTdb().getErrCountRowId());
if (exceptionCount > hdfsScanTdb().getMaxErrorRows())
{
if (pentry_down->getDiagsArea())
pentry_down->getDiagsArea()->clear();
if (workAtp_->getDiagsArea())
workAtp_->getDiagsArea()->clear();
ComDiagsArea *da = workAtp_->getDiagsArea();
if(!da)
{
da = ComDiagsArea::allocate(getHeap());
workAtp_->setDiagsArea(da);
}
*da << DgSqlCode(-EXE_MAX_ERROR_ROWS_EXCEEDED);
step_ = HANDLE_ERROR_WITH_CLOSE;
break;
}
}
if (hdfsScanTdb().getLogErrorRows())
{
int loggingRowLen = hdfsLoggingRowEnd_ - hdfsLoggingRow_ +1;
ExHbaseAccessTcb::handleException((NAHeap *)getHeap(), hdfsLoggingRow_,
loggingRowLen, lastErrorCnd_,
ehi_,
LoggingFileCreated_,
loggingFileName_);
}
if (pentry_down->getDiagsArea())
pentry_down->getDiagsArea()->clear();
if (workAtp_->getDiagsArea())
workAtp_->getDiagsArea()->clear();
}
break;
case HANDLE_ERROR_WITH_CLOSE:
case HANDLE_ERROR:
case HANDLE_ERROR_AND_DONE:
{
if (qparent_.up->isFull())
return WORK_OK;
ex_queue_entry *up_entry = qparent_.up->getTailEntry();
up_entry->copyAtp(pentry_down);
up_entry->upState.parentIndex =
pentry_down->downState.parentIndex;
up_entry->upState.downIndex = qparent_.down->getHeadIndex();
if (workAtp_->getDiagsArea())
{
ComDiagsArea *diagsArea = up_entry->getDiagsArea();
if (diagsArea == NULL)
{
diagsArea =
ComDiagsArea::allocate(getGlobals()->getDefaultHeap());
up_entry->setDiagsArea (diagsArea);
}
up_entry->getDiagsArea()->mergeAfter(*workAtp_->getDiagsArea());
workAtp_->setDiagsArea(NULL);
}
up_entry->upState.status = ex_queue::Q_SQLERROR;
qparent_.up->insert();
if (step_ == HANDLE_ERROR_WITH_CLOSE)
step_ = CLOSE_HDFS_CURSOR;
else if (step_ == HANDLE_ERROR_AND_DONE)
step_ = DONE;
else
step_ = ERROR_CLOSE_FILE;
break;
}
case CLOSE_FILE:
case ERROR_CLOSE_FILE:
{
if (getStatsEntry())
{
ExHdfsScanStats * stats =
getStatsEntry()->castToExHdfsScanStats();
if (stats)
{
ExLobStats s;
s.init();
retcode = ExpLOBinterfaceStats
(lobGlob_,
&s,
hdfsFileName_, //hdfsScanTdb().hdfsFileName_,
NULL, //(char*)"",
(Lng32)Lob_External_HDFS_File,
hdfsScanTdb().hostName_,
hdfsScanTdb().port_);
*stats->lobStats() = *stats->lobStats() + s;
}
}
// if next file is not same as current file, then close the current file.
bool closeFile = true;
if ( (step_ == CLOSE_FILE) &&
((currRangeNum_ + 1) < (beginRangeNum_ + numRanges_)))
{
hdfo = (HdfsFileInfo*) hdfsScanTdb().getHdfsFileInfoList()->get(currRangeNum_ + 1);
if (strcmp(hdfsFileName_, hdfo->fileName()) == 0)
closeFile = false;
}
if (closeFile)
{
retcode = ExpLOBinterfaceCloseFile
(lobGlob_,
hdfsFileName_,
NULL,
(Lng32)Lob_External_HDFS_File,
hdfsScanTdb().hostName_,
hdfsScanTdb().port_);
if ((step_ == CLOSE_FILE) &&
(retcode < 0))
{
Lng32 cliError = 0;
Lng32 intParam1 = -retcode;
ComDiagsArea * diagsArea = NULL;
ExRaiseSqlError(getHeap(), &diagsArea,
(ExeErrorCode)(EXE_ERROR_FROM_LOB_INTERFACE), NULL,
&intParam1,
&cliError,
NULL,
"HDFS",
(char*)"ExpLOBinterfaceCloseFile",
getLobErrStr(intParam1));
pentry_down->setDiagsArea(diagsArea);
}
// sss This is one place that is unconditionally closing the
// hdfsFs that's part of this thread's JNIenv.
// if (ehi_)
// retcode = ehi_->hdfsClose();
}
if (step_ == CLOSE_FILE)
{
currRangeNum_++;
if (currRangeNum_ < (beginRangeNum_ + numRanges_)) {
if (((pentry_down->downState.request == ex_queue::GET_N) &&
(pentry_down->downState.requestValue == matches_)) ||
(pentry_down->downState.request == ex_queue::GET_NOMORE))
step_ = DONE;
else
// move to the next file.
step_ = INIT_HDFS_CURSOR;
break;
}
}
step_ = DONE;
}
break;
case DONE:
{
if (qparent_.up->isFull())
return WORK_OK;
ex_queue_entry *up_entry = qparent_.up->getTailEntry();
up_entry->copyAtp(pentry_down);
up_entry->upState.parentIndex =
pentry_down->downState.parentIndex;
up_entry->upState.downIndex = qparent_.down->getHeadIndex();
up_entry->upState.status = ex_queue::Q_NO_DATA;
up_entry->upState.setMatchNo(matches_);
qparent_.up->insert();
qparent_.down->removeHead();
step_ = NOT_STARTED;
dirInfo = hdfsGetPathInfo(hdfs, "/");
break;
}
default:
{
break;
}
} // switch
} // while
return WORK_OK;
}
char * ExHdfsScanTcb::extractAndTransformAsciiSourceToSqlRow(int &err,
ComDiagsArea* &diagsArea, int mode)
{
err = 0;
char *sourceData = hdfsBufNextRow_;
char *sourceRowEnd = NULL;
char *sourceColEnd = NULL;
int changedLen = 0;
NABoolean isTrailingMissingColumn = FALSE;
ExpTupleDesc * asciiSourceTD =
hdfsScanTdb().workCriDesc_->getTupleDescriptor(hdfsScanTdb().asciiTuppIndex_);
ExpTupleDesc * origSourceTD =
hdfsScanTdb().workCriDesc_->getTupleDescriptor(hdfsScanTdb().origTuppIndex_);
const char cd = hdfsScanTdb().columnDelimiter_;
const char rd = hdfsScanTdb().recordDelimiter_;
const char *sourceDataEnd = hdfsScanBuffer_+trailingPrevRead_+ bytesRead_;
hdfsLoggingRow_ = hdfsBufNextRow_;
if (asciiSourceTD->numAttrs() == 0)
{
sourceRowEnd = hdfs_strchr(sourceData, rd, sourceDataEnd, checkRangeDelimiter_, mode, &changedLen);
hdfsLoggingRowEnd_ = sourceRowEnd + changedLen;
if (!sourceRowEnd)
return NULL;
if ((endOfRequestedRange_) &&
(sourceRowEnd >= endOfRequestedRange_)) {
checkRangeDelimiter_ = TRUE;
*(sourceRowEnd +1)= RANGE_DELIMITER;
}
// no columns need to be converted. For e.g. count(*) with no predicate
return sourceRowEnd+1;
}
Lng32 neededColIndex = 0;
Attributes * attr = NULL;
Attributes * tgtAttr = NULL;
NABoolean rdSeen = FALSE;
for (Lng32 i = 0; i < hdfsScanTdb().convertSkipListSize_; i++)
{
// all remainin columns wil be skip columns, don't bother
// finding their column delimiters
if (neededColIndex == asciiSourceTD->numAttrs())
continue;
tgtAttr = NULL;
if (hdfsScanTdb().convertSkipList_[i] > 0)
{
attr = asciiSourceTD->getAttr(neededColIndex);
tgtAttr = origSourceTD->getAttr(neededColIndex);
neededColIndex++;
}
else
attr = NULL;
if (!isTrailingMissingColumn) {
sourceColEnd = hdfs_strchr(sourceData, rd, cd, sourceDataEnd, checkRangeDelimiter_, &rdSeen,mode, &changedLen);
if (sourceColEnd == NULL) {
if (rdSeen || (sourceRowEnd == NULL))
return NULL;
else
return sourceRowEnd+1;
}
Int32 len = 0;
len = (Int64)sourceColEnd - (Int64)sourceData;
if (rdSeen) {
sourceRowEnd = sourceColEnd + changedLen;
hdfsLoggingRowEnd_ = sourceRowEnd;
if ((endOfRequestedRange_) &&
(sourceRowEnd >= endOfRequestedRange_)) {
checkRangeDelimiter_ = TRUE;
*(sourceRowEnd +1)= RANGE_DELIMITER;
}
if (i != hdfsScanTdb().convertSkipListSize_ - 1)
isTrailingMissingColumn = TRUE;
}
if (attr) // this is a needed column. We need to convert
{
if (attr->getVCIndicatorLength() == sizeof(short))
*(short*)&hdfsAsciiSourceData_[attr->getVCLenIndOffset()]
= (short)len;
else
*(Int32*)&hdfsAsciiSourceData_[attr->getVCLenIndOffset()]
= len;
if (attr->getNullFlag())
{
*(short *)&hdfsAsciiSourceData_[attr->getNullIndOffset()] = 0;
if (hdfsScanTdb().getNullFormat()) // null format specified by user
{
if (((len == 0) && (strlen(hdfsScanTdb().getNullFormat()) == 0)) ||
((len > 0) && (memcmp(sourceData, hdfsScanTdb().getNullFormat(), len) == 0)))
{
*(short *)&hdfsAsciiSourceData_[attr->getNullIndOffset()] = -1;
}
} // if
else // null format not specified by user
{
// Use default null format.
// for non-varchar, length of zero indicates a null value.
// For all datatypes, HIVE_DEFAULT_NULL_STRING('\N') indicates a null value.
if (((len == 0) && (tgtAttr && (NOT DFS2REC::isSQLVarChar(tgtAttr->getDatatype())))) ||
((len > 0) && (memcmp(sourceData, HIVE_DEFAULT_NULL_STRING, len) == 0)))
{
*(short *)&hdfsAsciiSourceData_[attr->getNullIndOffset()] = -1;
}
} // else
} // if nullable attr
if (len > 0)
{
// move address of data into the source operand.
// convertExpr will dereference this addr and get to the actual
// data.
*(Int64*)&hdfsAsciiSourceData_[attr->getOffset()] =
(Int64)sourceData;
}
else
{
*(Int64*)&hdfsAsciiSourceData_[attr->getOffset()] =
(Int64)0;
}
} // if(attr)
} // if (!trailingMissingColumn)
else
{
// A delimiter was found, but not enough columns.
// Treat the rest of the columns as NULL.
if (attr && attr->getNullFlag())
*(short *)&hdfsAsciiSourceData_[attr->getNullIndOffset()] = -1;
}
sourceData = sourceColEnd + 1 ;
}
// It is possible that the above loop came out before
// rowDelimiter is encountered
// So try to find the record delimiter
if (sourceRowEnd == NULL) {
sourceRowEnd = hdfs_strchr(sourceData, rd, sourceDataEnd, checkRangeDelimiter_,mode, &changedLen);
if (sourceRowEnd) {
hdfsLoggingRowEnd_ = sourceRowEnd + changedLen; //changedLen is when hdfs_strchr move the return pointer to remove the extra \r
if ((endOfRequestedRange_) &&
(sourceRowEnd >= endOfRequestedRange_ )) {
checkRangeDelimiter_ = TRUE;
*(sourceRowEnd +1)= RANGE_DELIMITER;
}
}
}
workAtp_->getTupp(hdfsScanTdb().workAtpIndex_) = hdfsSqlTupp_;
workAtp_->getTupp(hdfsScanTdb().asciiTuppIndex_) = hdfsAsciiSourceTupp_;
// for later
workAtp_->getTupp(hdfsScanTdb().moveExprColsTuppIndex_) = moveExprColsTupp_;
if (convertExpr())
{
ex_expr::exp_return_type evalRetCode =
convertExpr()->eval(workAtp_, workAtp_);
if (evalRetCode == ex_expr::EXPR_ERROR)
err = -1;
else
err = 0;
}
if (sourceRowEnd)
return sourceRowEnd+1;
return NULL;
}
short ExHdfsScanTcb::moveRowToUpQueue(const char * row, Lng32 len,
short * rc, NABoolean isVarchar)
{
if (qparent_.up->isFull())
{
if (rc)
*rc = WORK_OK;
return -1;
}
Lng32 length;
if (len <= 0)
length = strlen(row);
else
length = len;
tupp p;
if (pool_->get_free_tuple(p, (Lng32)
((isVarchar ? SQL_VARCHAR_HDR_SIZE : 0)
+ length)))
{
if (rc)
*rc = WORK_POOL_BLOCKED;
return -1;
}
char * dp = p.getDataPointer();
if (isVarchar)
{
*(short*)dp = (short)length;
str_cpy_all(&dp[SQL_VARCHAR_HDR_SIZE], row, length);
}
else
{
str_cpy_all(dp, row, length);
}
ex_queue_entry * pentry_down = qparent_.down->getHeadEntry();
ex_queue_entry * up_entry = qparent_.up->getTailEntry();
up_entry->copyAtp(pentry_down);
up_entry->getAtp()->getTupp((Lng32)hdfsScanTdb().tuppIndex_) = p;
up_entry->upState.parentIndex =
pentry_down->downState.parentIndex;
up_entry->upState.setMatchNo(++matches_);
up_entry->upState.status = ex_queue::Q_OK_MMORE;
// insert into parent
qparent_.up->insert();
return 0;
}
short ExHdfsScanTcb::handleError(short &rc)
{
if (qparent_.up->isFull())
{
rc = WORK_OK;
return -1;
}
if (qparent_.up->isFull())
return WORK_OK;
ex_queue_entry *pentry_down = qparent_.down->getHeadEntry();
ex_queue_entry *up_entry = qparent_.up->getTailEntry();
up_entry->copyAtp(pentry_down);
up_entry->upState.parentIndex =
pentry_down->downState.parentIndex;
up_entry->upState.downIndex = qparent_.down->getHeadIndex();
up_entry->upState.status = ex_queue::Q_SQLERROR;
qparent_.up->insert();
return 0;
}
short ExHdfsScanTcb::handleDone(ExWorkProcRetcode &rc)
{
if (qparent_.up->isFull())
{
rc = WORK_OK;
return -1;
}
ex_queue_entry *pentry_down = qparent_.down->getHeadEntry();
ex_queue_entry *up_entry = qparent_.up->getTailEntry();
up_entry->copyAtp(pentry_down);
up_entry->upState.parentIndex =
pentry_down->downState.parentIndex;
up_entry->upState.downIndex = qparent_.down->getHeadIndex();
up_entry->upState.status = ex_queue::Q_NO_DATA;
up_entry->upState.setMatchNo(matches_);
qparent_.up->insert();
qparent_.down->removeHead();
return 0;
}
////////////////////////////////////////////////////////////////////////
// ORC files
////////////////////////////////////////////////////////////////////////
ExOrcScanTcb::ExOrcScanTcb(
const ComTdbHdfsScan &orcScanTdb,
ex_globals * glob ) :
ExHdfsScanTcb( orcScanTdb, glob),
step_(NOT_STARTED)
{
orci_ = ExpORCinterface::newInstance(glob->getDefaultHeap(),
(char*)orcScanTdb.hostName_,
orcScanTdb.port_);
}
ExOrcScanTcb::~ExOrcScanTcb()
{
}
short ExOrcScanTcb::extractAndTransformOrcSourceToSqlRow(
char * orcRow,
Int64 orcRowLen,
Lng32 numOrcCols,
ComDiagsArea* &diagsArea)
{
short err = 0;
if ((!orcRow) || (orcRowLen <= 0))
return -1;
char *sourceData = orcRow;
ExpTupleDesc * asciiSourceTD =
hdfsScanTdb().workCriDesc_->getTupleDescriptor(hdfsScanTdb().asciiTuppIndex_);
if (asciiSourceTD->numAttrs() == 0)
{
// no columns need to be converted. For e.g. count(*) with no predicate
return 0;
}
Lng32 neededColIndex = 0;
Attributes * attr = NULL;
Lng32 numCurrCols = 0;
Lng32 currColLen;
for (Lng32 i = 0; i < hdfsScanTdb().convertSkipListSize_; i++)
{
if (hdfsScanTdb().convertSkipList_[i] > 0)
{
attr = asciiSourceTD->getAttr(neededColIndex);
neededColIndex++;
}
else
attr = NULL;
currColLen = *(Lng32*)sourceData;
sourceData += sizeof(currColLen);
if (attr) // this is a needed column. We need to convert
{
*(short*)&hdfsAsciiSourceData_[attr->getVCLenIndOffset()] = currColLen;
if (attr->getNullFlag())
{
if (currColLen == 0)
*(short *)&hdfsAsciiSourceData_[attr->getNullIndOffset()] = -1;
else if (memcmp(sourceData, HIVE_DEFAULT_NULL_STRING, currColLen) == 0)
*(short *)&hdfsAsciiSourceData_[attr->getNullIndOffset()] = -1;
else
*(short *)&hdfsAsciiSourceData_[attr->getNullIndOffset()] = 0;
}
if (currColLen > 0)
{
// move address of data into the source operand.
// convertExpr will dereference this addr and get to the actual
// data.
*(Int64*)&hdfsAsciiSourceData_[attr->getOffset()] =
(Int64)sourceData;
}
} // if(attr)
numCurrCols++;
sourceData += currColLen;
}
if (numCurrCols != numOrcCols)
{
return -1;
}
workAtp_->getTupp(hdfsScanTdb().workAtpIndex_) = hdfsSqlTupp_;
workAtp_->getTupp(hdfsScanTdb().asciiTuppIndex_) = hdfsAsciiSourceTupp_;
// for later
workAtp_->getTupp(hdfsScanTdb().moveExprColsTuppIndex_) = moveExprColsTupp_;
err = 0;
if (convertExpr())
{
ex_expr::exp_return_type evalRetCode =
convertExpr()->eval(workAtp_, workAtp_);
if (evalRetCode == ex_expr::EXPR_ERROR)
err = -1;
else
err = 0;
}
return err;
}
ExWorkProcRetcode ExOrcScanTcb::work()
{
Lng32 retcode = 0;
short rc = 0;
while (!qparent_.down->isEmpty())
{
ex_queue_entry *pentry_down = qparent_.down->getHeadEntry();
if (pentry_down->downState.request == ex_queue::GET_NOMORE)
step_ = DONE;
switch (step_)
{
case NOT_STARTED:
{
matches_ = 0;
hdfsStats_ = NULL;
if (getStatsEntry())
hdfsStats_ = getStatsEntry()->castToExHdfsScanStats();
ex_assert(hdfsStats_, "hdfs stats cannot be null");
if (hdfsStats_)
hdfsStats_->init();
beginRangeNum_ = -1;
numRanges_ = -1;
if (hdfsScanTdb().getHdfsFileInfoList()->isEmpty())
{
step_ = DONE;
break;
}
myInstNum_ = getGlobals()->getMyInstanceNumber();
beginRangeNum_ =
*(Lng32*)hdfsScanTdb().getHdfsFileRangeBeginList()->get(myInstNum_);
numRanges_ =
*(Lng32*)hdfsScanTdb().getHdfsFileRangeNumList()->get(myInstNum_);
currRangeNum_ = beginRangeNum_;
if (numRanges_ > 0)
step_ = INIT_ORC_CURSOR;
else
step_ = DONE;
}
break;
case INIT_ORC_CURSOR:
{
/* orci_ = ExpORCinterface::newInstance(getHeap(),
(char*)hdfsScanTdb().hostName_,
*/
hdfo_ = (HdfsFileInfo*)
hdfsScanTdb().getHdfsFileInfoList()->get(currRangeNum_);
orcStartRowNum_ = hdfo_->getStartRow();
orcNumRows_ = hdfo_->getNumRows();
hdfsFileName_ = hdfo_->fileName();
sprintf(cursorId_, "%d", currRangeNum_);
if (orcNumRows_ == -1) // select all rows
orcStopRowNum_ = -1;
else
orcStopRowNum_ = orcStartRowNum_ + orcNumRows_ - 1;
step_ = OPEN_ORC_CURSOR;
}
break;
case OPEN_ORC_CURSOR:
{
retcode = orci_->scanOpen(hdfsFileName_,
orcStartRowNum_, orcStopRowNum_);
if (retcode < 0)
{
setupError(EXE_ERROR_FROM_LOB_INTERFACE, retcode, "ORC", "scanOpen",
orci_->getErrorText(-retcode));
step_ = HANDLE_ERROR;
break;
}
step_ = GET_ORC_ROW;
}
break;
case GET_ORC_ROW:
{
orcRow_ = hdfsScanBuffer_;
orcRowLen_ = hdfsScanTdb().hdfsBufSize_;
retcode = orci_->scanFetch(orcRow_, orcRowLen_, orcRowNum_,
numOrcCols_);
if (retcode < 0)
{
setupError(EXE_ERROR_FROM_LOB_INTERFACE, retcode, "ORC", "scanFetch",
orci_->getErrorText(-retcode));
step_ = HANDLE_ERROR;
break;
}
if (retcode == 100)
{
step_ = CLOSE_ORC_CURSOR;
break;
}
step_ = PROCESS_ORC_ROW;
}
break;
case PROCESS_ORC_ROW:
{
int formattedRowLength = 0;
ComDiagsArea *transformDiags = NULL;
short err =
extractAndTransformOrcSourceToSqlRow(orcRow_, orcRowLen_,
numOrcCols_, transformDiags);
if (err)
{
if (transformDiags)
pentry_down->setDiagsArea(transformDiags);
step_ = HANDLE_ERROR;
break;
}
if (hdfsStats_)
hdfsStats_->incAccessedRows();
workAtp_->getTupp(hdfsScanTdb().workAtpIndex_) =
hdfsSqlTupp_;
bool rowWillBeSelected = true;
if (selectPred())
{
ex_expr::exp_return_type evalRetCode =
selectPred()->eval(pentry_down->getAtp(), workAtp_);
if (evalRetCode == ex_expr::EXPR_FALSE)
rowWillBeSelected = false;
else if (evalRetCode == ex_expr::EXPR_ERROR)
{
step_ = HANDLE_ERROR;
break;
}
else
ex_assert(evalRetCode == ex_expr::EXPR_TRUE,
"invalid return code from expr eval");
}
if (rowWillBeSelected)
{
if (moveColsConvertExpr())
{
ex_expr::exp_return_type evalRetCode =
moveColsConvertExpr()->eval(workAtp_, workAtp_);
if (evalRetCode == ex_expr::EXPR_ERROR)
{
step_ = HANDLE_ERROR;
break;
}
}
if (hdfsStats_)
hdfsStats_->incUsedRows();
step_ = RETURN_ROW;
break;
}
step_ = GET_ORC_ROW;
}
break;
case RETURN_ROW:
{
if (qparent_.up->isFull())
return WORK_OK;
ex_queue_entry *up_entry = qparent_.up->getTailEntry();
up_entry->copyAtp(pentry_down);
up_entry->upState.parentIndex =
pentry_down->downState.parentIndex;
up_entry->upState.downIndex = qparent_.down->getHeadIndex();
up_entry->upState.status = ex_queue::Q_OK_MMORE;
if (moveExpr())
{
UInt32 maxRowLen = hdfsScanTdb().outputRowLength_;
UInt32 rowLen = maxRowLen;
if (hdfsScanTdb().useCifDefrag() &&
!pool_->currentBufferHasEnoughSpace((Lng32)hdfsScanTdb().outputRowLength_))
{
up_entry->getTupp(hdfsScanTdb().tuppIndex_) = defragTd_;
defragTd_->setReferenceCount(1);
ex_expr::exp_return_type evalRetCode =
moveExpr()->eval(up_entry->getAtp(), workAtp_,0,-1,&rowLen);
if (evalRetCode == ex_expr::EXPR_ERROR)
{
// Get diags from up_entry onto pentry_down, which
// is where the HANDLE_ERROR step expects it.
ComDiagsArea *diagsArea = pentry_down->getDiagsArea();
if (diagsArea == NULL)
{
diagsArea =
ComDiagsArea::allocate(getGlobals()->getDefaultHeap());
pentry_down->setDiagsArea (diagsArea);
}
pentry_down->getDiagsArea()->
mergeAfter(*up_entry->getDiagsArea());
up_entry->setDiagsArea(NULL);
step_ = HANDLE_ERROR;
break;
}
if (pool_->get_free_tuple(
up_entry->getTupp(hdfsScanTdb().tuppIndex_),
rowLen))
return WORK_POOL_BLOCKED;
str_cpy_all(up_entry->getTupp(hdfsScanTdb().tuppIndex_).getDataPointer(),
defragTd_->getTupleAddress(),
rowLen);
}
else
{
if (pool_->get_free_tuple(
up_entry->getTupp(hdfsScanTdb().tuppIndex_),
(Lng32)hdfsScanTdb().outputRowLength_))
return WORK_POOL_BLOCKED;
ex_expr::exp_return_type evalRetCode =
moveExpr()->eval(up_entry->getAtp(), workAtp_,0,-1,&rowLen);
if (evalRetCode == ex_expr::EXPR_ERROR)
{
// Get diags from up_entry onto pentry_down, which
// is where the HANDLE_ERROR step expects it.
ComDiagsArea *diagsArea = pentry_down->getDiagsArea();
if (diagsArea == NULL)
{
diagsArea =
ComDiagsArea::allocate(getGlobals()->getDefaultHeap());
pentry_down->setDiagsArea (diagsArea);
}
pentry_down->getDiagsArea()->
mergeAfter(*up_entry->getDiagsArea());
up_entry->setDiagsArea(NULL);
step_ = HANDLE_ERROR;
break;
}
if (hdfsScanTdb().useCif() && rowLen != maxRowLen)
{
pool_->resizeLastTuple(rowLen,
up_entry->getTupp(hdfsScanTdb().tuppIndex_).getDataPointer());
}
}
}
up_entry->upState.setMatchNo(++matches_);
if (matches_ == matchBrkPoint_)
brkpoint();
qparent_.up->insert();
// use ExOperStats now, to cover OPERATOR stats as well as
// ALL stats.
if (getStatsEntry())
getStatsEntry()->incActualRowsReturned();
workAtp_->setDiagsArea(NULL); // get rid of warnings.
if ((pentry_down->downState.request == ex_queue::GET_N) &&
(pentry_down->downState.requestValue == matches_))
step_ = CLOSE_ORC_CURSOR;
else
step_ = GET_ORC_ROW;
break;
}
case CLOSE_ORC_CURSOR:
{
retcode = orci_->scanClose();
if (retcode < 0)
{
setupError(EXE_ERROR_FROM_LOB_INTERFACE, retcode, "ORC", "scanClose",
orci_->getErrorText(-retcode));
step_ = HANDLE_ERROR;
break;
}
currRangeNum_++;
if (currRangeNum_ < (beginRangeNum_ + numRanges_))
{
// move to the next file.
step_ = INIT_ORC_CURSOR;
break;
}
step_ = DONE;
}
break;
case HANDLE_ERROR:
{
if (handleError(rc))
return rc;
step_ = DONE;
}
break;
case DONE:
{
if (handleDone(rc))
return rc;
step_ = NOT_STARTED;
}
break;
default:
{
break;
}
} // switch
} // while
return WORK_OK;
}
ExOrcFastAggrTcb::ExOrcFastAggrTcb(
const ComTdbOrcFastAggr &orcAggrTdb,
ex_globals * glob ) :
ExOrcScanTcb(orcAggrTdb, glob),
step_(NOT_STARTED)
{
if (orcAggrTdb.outputRowLength_ > 0)
aggrRow_ = new(glob->getDefaultHeap()) char[orcAggrTdb.outputRowLength_];
}
ExOrcFastAggrTcb::~ExOrcFastAggrTcb()
{
}
ExWorkProcRetcode ExOrcFastAggrTcb::work()
{
Lng32 retcode = 0;
short rc = 0;
while (!qparent_.down->isEmpty())
{
ex_queue_entry *pentry_down = qparent_.down->getHeadEntry();
if (pentry_down->downState.request == ex_queue::GET_NOMORE)
step_ = DONE;
switch (step_)
{
case NOT_STARTED:
{
matches_ = 0;
hdfsStats_ = NULL;
if (getStatsEntry())
hdfsStats_ = getStatsEntry()->castToExHdfsScanStats();
ex_assert(hdfsStats_, "hdfs stats cannot be null");
orcAggrTdb().getHdfsFileInfoList()->position();
rowCount_ = 0;
step_ = ORC_AGGR_INIT;
}
break;
case ORC_AGGR_INIT:
{
if (orcAggrTdb().getHdfsFileInfoList()->atEnd())
{
step_ = ORC_AGGR_PROJECT;
break;
}
hdfo_ = (HdfsFileInfo*)orcAggrTdb().getHdfsFileInfoList()->getNext();
hdfsFileName_ = hdfo_->fileName();
step_ = ORC_AGGR_EVAL;
}
break;
case ORC_AGGR_EVAL:
{
Int64 currRowCount = 0;
retcode = orci_->getRowCount(hdfsFileName_, currRowCount);
if (retcode < 0)
{
setupError(EXE_ERROR_FROM_LOB_INTERFACE, retcode, "ORC", "getRowCount",
orci_->getErrorText(-retcode));
step_ = HANDLE_ERROR;
break;
}
rowCount_ += currRowCount;
step_ = ORC_AGGR_INIT;
}
break;
case ORC_AGGR_PROJECT:
{
ExpTupleDesc * projTuppTD =
orcAggrTdb().workCriDesc_->getTupleDescriptor
(orcAggrTdb().workAtpIndex_);
Attributes * attr = projTuppTD->getAttr(0);
if (! attr)
{
step_ = HANDLE_ERROR;
break;
}
if (attr->getNullFlag())
{
*(short*)&aggrRow_[attr->getNullIndOffset()] = 0;
}
str_cpy_all(&aggrRow_[attr->getOffset()], (char*)&rowCount_, sizeof(rowCount_));
step_ = ORC_AGGR_RETURN;
}
break;
case ORC_AGGR_RETURN:
{
if (qparent_.up->isFull())
return WORK_OK;
short rc = 0;
if (moveRowToUpQueue(aggrRow_, orcAggrTdb().outputRowLength_,
&rc, FALSE))
return rc;
step_ = DONE;
}
break;
case HANDLE_ERROR:
{
if (handleError(rc))
return rc;
step_ = DONE;
}
break;
case DONE:
{
if (handleDone(rc))
return rc;
step_ = NOT_STARTED;
}
break;
} // switch
} // while
return WORK_OK;
}
| 1 | 12,809 | errno is a global variable that might be set by any system library call. It is dangerous to rely on it except right after the system call where it is set. But in this code, it looks like the library call is buried inside ExpLOBInterfaceSelectCursor. It would be safer if the latter function saved errno after whatever system library call it calls, and returns that as an "out" parameter, which this code could then check. | apache-trafodion | cpp |
@@ -50,7 +50,8 @@ class PackageNode(object):
def __repr__(self):
finfo = self._package.get_path()[:-len(PackageStore.PACKAGE_FILE_EXT)]
pinfo = self._prefix
- return "<%s %r %r>" % (self.__class__.__name__, finfo, pinfo)
+ kinfo = '\n'.join(self._keys()) if hasattr(self, '_keys') else ''
+ return "<%s %r:%r>\n%s" % (self.__class__.__name__, finfo, pinfo, kinfo)
class GroupNode(PackageNode): | 1 | """
Magic module that maps its submodules to Quilt tables.
Submodules have the following format: quilt.data.$user.$package.$table
E.g.:
import quilt.data.$user.$package as $package
print $package.$table
or
from quilt.data.$user.$package import $table
print $table
The corresponding data is looked up in `quilt_modules/$user/$package.h5`
in ancestors of the current directory.
"""
import imp
import os.path
import sys
from .tools.core import GroupNode as CoreGroupNode
from .tools.package import PackageException
from .tools.store import PackageStore
__path__ = [] # Required for submodules to work
class PackageNode(object):
"""
Abstract class that represents a group or a leaf node in a package.
"""
def __init__(self, package, prefix, node):
# Can't instantiate it directly
assert self.__class__ != PackageNode.__class__
self._package = package
self._prefix = prefix
self._node = node
def __eq__(self, other):
if isinstance(other, self.__class__):
return self._package == other._package and self._prefix == other._prefix
return NotImplemented
def __ne__(self, other):
return not self == other
def __hash__(self):
return hash((self._package, self._prefix))
def __repr__(self):
finfo = self._package.get_path()[:-len(PackageStore.PACKAGE_FILE_EXT)]
pinfo = self._prefix
return "<%s %r %r>" % (self.__class__.__name__, finfo, pinfo)
class GroupNode(PackageNode):
"""
Represents a group in a package. Allows accessing child objects using the dot notation.
"""
def __getattr__(self, name):
# TODO clean if... up since VALID_NAME_RE no longer allows leading _
if name.startswith('_'):
raise AttributeError
path = self._prefix + '/' + name
try:
return create_node(self._package, path)
except PackageException:
raise AttributeError("No such table or group: %s" % path)
def __dir__(self):
# https://mail.python.org/pipermail/python-ideas/2011-May/010321.html
return sorted(set((dir(type(self)) + list(self.__dict__) + self._keys())))
def _leaf_keys(self):
"""
every child key referencing a dataframe
"""
pref = self._prefix + '/'
return [k for k in self._keys()
if not isinstance(self._package.get(pref + k), CoreGroupNode)]
def _group_keys(self):
"""
every child key referencing a group that is not a dataframe
"""
pref = self._prefix + '/'
return [k for k in self._keys()
if isinstance(self._package.get(pref + k), CoreGroupNode)]
def _keys(self):
"""
keys directly accessible on this object via getattr or .
"""
return list(self._node.children)
class LeafNode(PackageNode):
"""
Represents a dataframe or a file. Allows accessing the contents using `()`.
"""
def __call__(self):
return self.data()
def data(self):
"""
Returns the contents of the node: a dataframe or a file path.
"""
return self._package.get_obj(self._node)
def create_node(package, prefix=''):
assert not prefix.endswith('/')
node = package.get(prefix)
if isinstance(node, CoreGroupNode):
return GroupNode(package, prefix, node)
else:
return LeafNode(package, prefix, node)
class FakeLoader(object):
"""
Fake module loader used to create intermediate user and package modules.
"""
def __init__(self, path):
self._path = path
def load_module(self, fullname):
"""
Returns an empty module.
"""
mod = sys.modules.setdefault(fullname, imp.new_module(fullname))
mod.__file__ = self._path
mod.__loader__ = self
mod.__path__ = []
mod.__package__ = fullname
return mod
class PackageLoader(object):
"""
Module loader for Quilt tables.
"""
def __init__(self, path, package):
self._path = path
self._package = package
def load_module(self, fullname):
"""
Returns an object that lazily looks up tables and groups.
"""
mod = sys.modules.get(fullname)
if mod is not None:
return mod
# We're creating an object rather than a module. It's a hack, but it's approved by Guido:
# https://mail.python.org/pipermail/python-ideas/2012-May/014969.html
mod = create_node(self._package)
sys.modules[fullname] = mod
return mod
class ModuleFinder(object):
"""
Looks up submodules.
"""
@staticmethod
def find_module(fullname, path=None):
"""
Looks up the table based on the module path.
"""
if not fullname.startswith(__name__ + '.'):
# Not a quilt submodule.
return None
submodule = fullname[len(__name__) + 1:]
parts = submodule.split('.')
if len(parts) == 1:
for store_dir in PackageStore.find_store_dirs():
# find contents
file_path = os.path.join(store_dir, parts[0])
if os.path.isdir(file_path):
return FakeLoader(file_path)
elif len(parts) == 2:
user, package = parts
pkgobj = PackageStore.find_package(user, package)
if pkgobj:
file_path = pkgobj.get_path()
return PackageLoader(file_path, pkgobj)
return None
sys.meta_path.append(ModuleFinder)
| 1 | 14,978 | `hasattr` is kinda terrible; just append extra info in the subclass. | quiltdata-quilt | py |
@@ -75,6 +75,16 @@ DAY = HOUR * 24
WEEK = DAY * 7
MONTH = DAY * 31
YEAR = DAY * 365
+
+# Set a flag to indicate whether the '%l' option can be used safely.
+# On Windows, in particular the %l option in strftime is not supported.
+#(It is not one of the documented Python formatters).
+try:
+ datetime.now().strftime("%a %l%p")
+ percent_l_supported = True
+except ValueError, e:
+ percent_l_supported = False
+
xAxisConfigs = (
dict(seconds=0.00, minorGridUnit=SEC, minorGridStep=5, majorGridUnit=MIN, majorGridStep=1, labelUnit=SEC, labelStep=5, format="%H:%M:%S", maxInterval=10*MIN),
dict(seconds=0.07, minorGridUnit=SEC, minorGridStep=10, majorGridUnit=MIN, majorGridStep=1, labelUnit=SEC, labelStep=10, format="%H:%M:%S", maxInterval=20*MIN), | 1 | """Copyright 2008 Orbitz WorldWide
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License."""
import os, cairo, math, itertools, re
import StringIO
from datetime import datetime, timedelta
from urllib import unquote_plus
from ConfigParser import SafeConfigParser
from django.conf import settings
from graphite.render.datalib import TimeSeries
from graphite.util import json
try: # See if there is a system installation of pytz first
import pytz
except ImportError: # Otherwise we fall back to Graphite's bundled version
from graphite.thirdparty import pytz
INFINITY = float('inf')
colorAliases = {
'black' : (0,0,0),
'white' : (255,255,255),
'blue' : (100,100,255),
'green' : (0,200,0),
'red' : (200,00,50),
'yellow' : (255,255,0),
'orange' : (255, 165, 0),
'purple' : (200,100,255),
'brown' : (150,100,50),
'cyan' : (0,255,255),
'aqua' : (0,150,150),
'gray' : (175,175,175),
'grey' : (175,175,175),
'magenta' : (255,0,255),
'pink' : (255,100,100),
'gold' : (200,200,0),
'rose' : (200,150,200),
'darkblue' : (0,0,255),
'darkgreen' : (0,255,0),
'darkred' : (255,0,0),
'darkgray' : (111,111,111),
'darkgrey' : (111,111,111),
}
# This gets overriden by graphTemplates.conf
defaultGraphOptions = dict(
background='black',
foreground='white',
majorline='white',
minorline='grey',
linecolors='blue,green,red,purple,brown,yellow,aqua,grey,magenta,pink,gold,rose',
fontname='Sans',
fontsize=10,
fontbold='false',
fontitalic='false',
)
#X-axis configurations (copied from rrdtool, this technique is evil & ugly but effective)
SEC = 1
MIN = 60
HOUR = MIN * 60
DAY = HOUR * 24
WEEK = DAY * 7
MONTH = DAY * 31
YEAR = DAY * 365
xAxisConfigs = (
dict(seconds=0.00, minorGridUnit=SEC, minorGridStep=5, majorGridUnit=MIN, majorGridStep=1, labelUnit=SEC, labelStep=5, format="%H:%M:%S", maxInterval=10*MIN),
dict(seconds=0.07, minorGridUnit=SEC, minorGridStep=10, majorGridUnit=MIN, majorGridStep=1, labelUnit=SEC, labelStep=10, format="%H:%M:%S", maxInterval=20*MIN),
dict(seconds=0.14, minorGridUnit=SEC, minorGridStep=15, majorGridUnit=MIN, majorGridStep=1, labelUnit=SEC, labelStep=15, format="%H:%M:%S", maxInterval=30*MIN),
dict(seconds=0.27, minorGridUnit=SEC, minorGridStep=30, majorGridUnit=MIN, majorGridStep=2, labelUnit=MIN, labelStep=1, format="%H:%M", maxInterval=2*HOUR),
dict(seconds=0.5, minorGridUnit=MIN, minorGridStep=1, majorGridUnit=MIN, majorGridStep=2, labelUnit=MIN, labelStep=1, format="%H:%M", maxInterval=2*HOUR),
dict(seconds=1.2, minorGridUnit=MIN, minorGridStep=1, majorGridUnit=MIN, majorGridStep=4, labelUnit=MIN, labelStep=2, format="%H:%M", maxInterval=3*HOUR),
dict(seconds=2, minorGridUnit=MIN, minorGridStep=1, majorGridUnit=MIN, majorGridStep=10, labelUnit=MIN, labelStep=5, format="%H:%M", maxInterval=6*HOUR),
dict(seconds=5, minorGridUnit=MIN, minorGridStep=2, majorGridUnit=MIN, majorGridStep=10, labelUnit=MIN, labelStep=10, format="%H:%M", maxInterval=12*HOUR),
dict(seconds=10, minorGridUnit=MIN, minorGridStep=5, majorGridUnit=MIN, majorGridStep=20, labelUnit=MIN, labelStep=20, format="%H:%M", maxInterval=1*DAY),
dict(seconds=30, minorGridUnit=MIN, minorGridStep=10, majorGridUnit=HOUR, majorGridStep=1, labelUnit=HOUR, labelStep=1, format="%H:%M", maxInterval=2*DAY),
dict(seconds=60, minorGridUnit=MIN, minorGridStep=30, majorGridUnit=HOUR, majorGridStep=2, labelUnit=HOUR, labelStep=2, format="%H:%M", maxInterval=2*DAY),
dict(seconds=100, minorGridUnit=HOUR, minorGridStep=2, majorGridUnit=HOUR, majorGridStep=4, labelUnit=HOUR, labelStep=4, format="%a %l%p", maxInterval=6*DAY),
dict(seconds=255, minorGridUnit=HOUR, minorGridStep=6, majorGridUnit=HOUR, majorGridStep=12, labelUnit=HOUR, labelStep=12, format="%m/%d %l%p"),
dict(seconds=600, minorGridUnit=HOUR, minorGridStep=6, majorGridUnit=DAY, majorGridStep=1, labelUnit=DAY, labelStep=1, format="%m/%d", maxInterval=14*DAY),
dict(seconds=600, minorGridUnit=HOUR, minorGridStep=12, majorGridUnit=DAY, majorGridStep=1, labelUnit=DAY, labelStep=1, format="%m/%d", maxInterval=365*DAY),
dict(seconds=2000, minorGridUnit=DAY, minorGridStep=1, majorGridUnit=DAY, majorGridStep=2, labelUnit=DAY, labelStep=2, format="%m/%d", maxInterval=365*DAY),
dict(seconds=4000, minorGridUnit=DAY, minorGridStep=2, majorGridUnit=DAY, majorGridStep=4, labelUnit=DAY, labelStep=4, format="%m/%d", maxInterval=365*DAY),
dict(seconds=8000, minorGridUnit=DAY, minorGridStep=3.5,majorGridUnit=DAY, majorGridStep=7, labelUnit=DAY, labelStep=7, format="%m/%d", maxInterval=365*DAY),
dict(seconds=16000, minorGridUnit=DAY, minorGridStep=7, majorGridUnit=DAY, majorGridStep=14, labelUnit=DAY, labelStep=14, format="%m/%d", maxInterval=365*DAY),
dict(seconds=32000, minorGridUnit=DAY, minorGridStep=15, majorGridUnit=DAY, majorGridStep=30, labelUnit=DAY, labelStep=30, format="%m/%d", maxInterval=365*DAY),
dict(seconds=64000, minorGridUnit=DAY, minorGridStep=30, majorGridUnit=DAY, majorGridStep=60, labelUnit=DAY, labelStep=60, format="%m/%d %Y"),
)
UnitSystems = {
'binary': (
('Pi', 1024.0**5),
('Ti', 1024.0**4),
('Gi', 1024.0**3),
('Mi', 1024.0**2),
('Ki', 1024.0 )),
'si': (
('P', 1000.0**5),
('T', 1000.0**4),
('G', 1000.0**3),
('M', 1000.0**2),
('K', 1000.0 )),
'none' : [],
}
class GraphError(Exception):
pass
class Graph:
customizable = ('width','height','margin','bgcolor','fgcolor', \
'fontName','fontSize','fontBold','fontItalic', \
'colorList','template','yAxisSide','outputFormat')
def __init__(self,**params):
self.params = params
self.data = params['data']
self.dataLeft = []
self.dataRight = []
self.secondYAxis = False
self.width = int( params.get('width',200) )
self.height = int( params.get('height',200) )
self.margin = int( params.get('margin',10) )
self.userTimeZone = params.get('tz')
self.logBase = params.get('logBase', None)
self.minorY = int(params.get('minorY', 1))
if self.logBase:
if self.logBase == 'e':
self.logBase = math.e
elif self.logBase <= 0:
self.logBase = None
params['logBase'] = None
else:
self.logBase = float(self.logBase)
if self.margin < 0:
self.margin = 10
self.area = {
'xmin' : self.margin + 10, # Need extra room when the time is near the left edge
'xmax' : self.width - self.margin,
'ymin' : self.margin,
'ymax' : self.height - self.margin,
}
self.loadTemplate( params.get('template','default') )
self.setupCairo( params.get('outputFormat','png').lower() )
opts = self.ctx.get_font_options()
opts.set_antialias( cairo.ANTIALIAS_NONE )
self.ctx.set_font_options( opts )
self.foregroundColor = params.get('fgcolor',self.defaultForeground)
self.backgroundColor = params.get('bgcolor',self.defaultBackground)
self.setColor( self.backgroundColor )
self.drawRectangle( 0, 0, self.width, self.height )
if 'colorList' in params:
colorList = unquote_plus( params['colorList'] ).split(',')
else:
colorList = self.defaultColorList
self.colors = itertools.cycle( colorList )
self.drawGraph(**params)
def setupCairo(self,outputFormat='png'):
self.outputFormat = outputFormat
if outputFormat == 'png':
self.surface = cairo.ImageSurface(cairo.FORMAT_ARGB32, self.width, self.height)
else:
self.surfaceData = StringIO.StringIO()
self.surface = cairo.SVGSurface(self.surfaceData, self.width, self.height)
self.ctx = cairo.Context(self.surface)
def setColor(self, value, alpha=1.0, forceAlpha=False):
if type(value) is tuple and len(value) == 3:
r,g,b = value
elif value in colorAliases:
r,g,b = colorAliases[value]
elif type(value) in (str,unicode) and len(value) >= 6:
s = value
if s[0] == '#': s = s[1:]
if s[0:3] == '%23': s = s[3:]
r,g,b = ( int(s[0:2],base=16), int(s[2:4],base=16), int(s[4:6],base=16) )
if len(s) == 8 and not forceAlpha:
alpha = float( int(s[6:8],base=16) ) / 255.0
else:
raise ValueError, "Must specify an RGB 3-tuple, an html color string, or a known color alias!"
r,g,b = [float(c) / 255.0 for c in (r,g,b)]
self.ctx.set_source_rgba(r,g,b,alpha)
def setFont(self, **params):
p = self.defaultFontParams.copy()
p.update(params)
self.ctx.select_font_face(p['name'], p['italic'], p['bold'])
self.ctx.set_font_size( float(p['size']) )
def getExtents(self,text=None,fontOptions={}):
if fontOptions:
self.setFont(**fontOptions)
F = self.ctx.font_extents()
extents = { 'maxHeight' : F[2], 'maxAscent' : F[0], 'maxDescent' : F[1] }
if text:
T = self.ctx.text_extents(text)
extents['width'] = T[4]
extents['height'] = T[3]
return extents
def drawRectangle(self, x, y, w, h, fill=True, dash=False):
if not fill:
o = self.ctx.get_line_width() / 2.0 #offset for borders so they are drawn as lines would be
x += o
y += o
w -= o
h -= o
self.ctx.rectangle(x,y,w,h)
if fill:
self.ctx.fill()
else:
if dash:
self.ctx.set_dash(dash,1)
else:
self.ctx.set_dash([],0)
self.ctx.stroke()
def drawText(self,text,x,y,font={},color={},align='left',valign='top',border=False,rotate=0):
if font: self.setFont(**font)
if color: self.setColor(**color)
extents = self.getExtents(text)
angle = math.radians(rotate)
origMatrix = self.ctx.get_matrix()
horizontal = {
'left' : 0,
'center' : extents['width'] / 2,
'right' : extents['width'],
}[align.lower()]
vertical = {
'top' : extents['maxAscent'],
'middle' : extents['maxHeight'] / 2 - extents['maxDescent'],
'bottom' : -extents['maxDescent'],
'baseline' : 0,
}[valign.lower()]
self.ctx.move_to(x,y)
self.ctx.rel_move_to( math.sin(angle) * -vertical, math.cos(angle) * vertical)
self.ctx.rotate(angle)
self.ctx.rel_move_to( -horizontal, 0 )
bx, by = self.ctx.get_current_point()
by -= extents['maxAscent']
self.ctx.text_path(text)
self.ctx.fill()
if border:
self.drawRectangle(bx, by, extents['width'], extents['maxHeight'], fill=False)
else:
self.ctx.set_matrix(origMatrix)
def drawTitle(self,text):
self.encodeHeader('title')
y = self.area['ymin']
x = self.width / 2
lineHeight = self.getExtents()['maxHeight']
for line in text.split('\n'):
self.drawText(line, x, y, align='center')
y += lineHeight
if self.params.get('yAxisSide') == 'right':
self.area['ymin'] = y
else:
self.area['ymin'] = y + self.margin
def drawLegend(self, elements, unique=False): #elements is [ (name,color,rightSide), (name,color,rightSide), ... ]
self.encodeHeader('legend')
if unique:
# remove duplicate names
namesSeen = []
newElements = []
for e in elements:
if e[0] not in namesSeen:
namesSeen.append(e[0])
newElements.append(e)
elements = newElements
# Check if there's enough room to use two columns.
rightSideLabels = False
padding = 5
longestName = sorted([e[0] for e in elements],key=len)[-1]
testSizeName = longestName + " " + longestName # Double it to check if there's enough room for 2 columns
testExt = self.getExtents(testSizeName)
testBoxSize = testExt['maxHeight'] - 1
testWidth = testExt['width'] + 2 * (testBoxSize + padding)
if testWidth + 50 < self.width:
rightSideLabels = True
if(self.secondYAxis and rightSideLabels):
extents = self.getExtents(longestName)
padding = 5
boxSize = extents['maxHeight'] - 1
lineHeight = extents['maxHeight'] + 1
labelWidth = extents['width'] + 2 * (boxSize + padding)
columns = max(1, math.floor( (self.width - self.area['xmin']) / labelWidth ))
numRight = len([name for (name,color,rightSide) in elements if rightSide])
numberOfLines = max(len(elements) - numRight, numRight)
columns = math.floor(columns / 2.0)
if columns < 1: columns = 1
legendHeight = numberOfLines * (lineHeight + padding)
self.area['ymax'] -= legendHeight #scoot the drawing area up to fit the legend
self.ctx.set_line_width(1.0)
x = self.area['xmin']
y = self.area['ymax'] + (2 * padding)
n = 0
xRight = self.area['xmax'] - self.area['xmin']
yRight = y
nRight = 0
for (name,color,rightSide) in elements:
self.setColor( color )
if rightSide:
nRight += 1
self.drawRectangle(xRight - padding,yRight,boxSize,boxSize)
self.setColor( 'darkgrey' )
self.drawRectangle(xRight - padding,yRight,boxSize,boxSize,fill=False)
self.setColor( self.foregroundColor )
self.drawText(name, xRight - boxSize, yRight, align='right')
xRight -= labelWidth
if nRight % columns == 0:
xRight = self.area['xmax'] - self.area['xmin']
yRight += lineHeight
else:
n += 1
self.drawRectangle(x,y,boxSize,boxSize)
self.setColor( 'darkgrey' )
self.drawRectangle(x,y,boxSize,boxSize,fill=False)
self.setColor( self.foregroundColor )
self.drawText(name, x + boxSize + padding, y, align='left')
x += labelWidth
if n % columns == 0:
x = self.area['xmin']
y += lineHeight
else:
extents = self.getExtents(longestName)
boxSize = extents['maxHeight'] - 1
lineHeight = extents['maxHeight'] + 1
labelWidth = extents['width'] + 2 * (boxSize + padding)
columns = math.floor( self.width / labelWidth )
if columns < 1: columns = 1
numberOfLines = math.ceil( float(len(elements)) / columns )
legendHeight = numberOfLines * (lineHeight + padding)
self.area['ymax'] -= legendHeight #scoot the drawing area up to fit the legend
self.ctx.set_line_width(1.0)
x = self.area['xmin']
y = self.area['ymax'] + (2 * padding)
for i,(name,color,rightSide) in enumerate(elements):
if rightSide:
self.setColor( color )
self.drawRectangle(x + labelWidth + padding,y,boxSize,boxSize)
self.setColor( 'darkgrey' )
self.drawRectangle(x + labelWidth + padding,y,boxSize,boxSize,fill=False)
self.setColor( self.foregroundColor )
self.drawText(name, x + labelWidth, y, align='right')
x += labelWidth
else:
self.setColor( color )
self.drawRectangle(x,y,boxSize,boxSize)
self.setColor( 'darkgrey' )
self.drawRectangle(x,y,boxSize,boxSize,fill=False)
self.setColor( self.foregroundColor )
self.drawText(name, x + boxSize + padding, y, align='left')
x += labelWidth
if (i + 1) % columns == 0:
x = self.area['xmin']
y += lineHeight
def encodeHeader(self,text):
self.ctx.save()
self.setColor( self.backgroundColor )
self.ctx.move_to(-88,-88) # identifier
for i, char in enumerate(text):
self.ctx.line_to(-ord(char), -i-1)
self.ctx.stroke()
self.ctx.restore()
def loadTemplate(self,template):
conf = SafeConfigParser()
if conf.read(settings.GRAPHTEMPLATES_CONF):
defaults = dict( conf.items('default') )
if template in conf.sections():
opts = dict( conf.items(template) )
else:
opts = defaults
else:
opts = defaults = defaultGraphOptions
self.defaultBackground = opts.get('background', defaults['background'])
self.defaultForeground = opts.get('foreground', defaults['foreground'])
self.defaultMajorGridLineColor = opts.get('majorline', defaults['majorline'])
self.defaultMinorGridLineColor = opts.get('minorline', defaults['minorline'])
self.defaultColorList = [c.strip() for c in opts.get('linecolors', defaults['linecolors']).split(',')]
fontName = opts.get('fontname', defaults['fontname'])
fontSize = float( opts.get('fontsize', defaults['fontsize']) )
fontBold = opts.get('fontbold', defaults['fontbold']).lower() == 'true'
fontItalic = opts.get('fontitalic', defaults['fontitalic']).lower() == 'true'
self.defaultFontParams = {
'name' : self.params.get('fontName',fontName),
'size' : int( self.params.get('fontSize',fontSize) ),
'bold' : self.params.get('fontBold',fontBold),
'italic' : self.params.get('fontItalic',fontItalic),
}
def output(self, fileObj):
if self.outputFormat == 'png':
self.surface.write_to_png(fileObj)
else:
metaData = {
'x': {
'start': self.startTime,
'end': self.endTime
},
'options': {
'lineWidth': self.lineWidth
},
'font': self.defaultFontParams,
'area': self.area,
'series': []
}
if not self.secondYAxis:
metaData['y'] = {
'top': self.yTop,
'bottom': self.yBottom,
'step': self.yStep,
'labels': self.yLabels,
'labelValues': self.yLabelValues
}
for series in self.data:
if 'stacked' not in series.options:
metaData['series'].append({
'name': series.name,
'start': series.start,
'end': series.end,
'step': series.step,
'valuesPerPoint': series.valuesPerPoint,
'color': series.color,
'data': series,
'options': series.options
})
self.surface.finish()
svgData = self.surfaceData.getvalue()
self.surfaceData.close()
svgData = svgData.replace('pt"', 'px"', 2) # we expect height/width in pixels, not points
svgData = svgData.replace('</svg>\n', '', 1)
svgData = svgData.replace('</defs>\n<g', '</defs>\n<g class="graphite"', 1)
# We encode headers using special paths with d^="M -88 -88"
# Find these, and turn them into <g> wrappers instead
def onHeaderPath(match):
name = ''
for char in re.findall(r'L -(\d+) -\d+', match.group(1)):
name += chr(int(char))
return '</g><g data-header="true" class="%s">' % name
svgData = re.sub(r'<path.+?d="M -88 -88 (.+?)"/>', onHeaderPath, svgData)
# Replace the first </g><g> with <g>, and close out the last </g> at the end
svgData = svgData.replace('</g><g data-header','<g data-header',1) + "</g>"
svgData = svgData.replace(' data-header="true"','')
fileObj.write(svgData)
fileObj.write("""<script>
<![CDATA[
metadata = %s
]]>
</script>
</svg>""" % json.dumps(metaData))
class LineGraph(Graph):
customizable = Graph.customizable + \
('title','vtitle','lineMode','lineWidth','hideLegend', \
'hideAxes','minXStep','hideGrid','majorGridLineColor', \
'minorGridLineColor','thickness','min','max', \
'graphOnly','yMin','yMax','yLimit','yStep','areaMode', \
'areaAlpha','drawNullAsZero','tz', 'yAxisSide','pieMode', \
'yUnitSystem', 'logBase','yMinLeft','yMinRight','yMaxLeft', \
'yMaxRight', 'yLimitLeft', 'yLimitRight', 'yStepLeft', \
'yStepRight', 'rightWidth', 'rightColor', 'rightDashed', \
'leftWidth', 'leftColor', 'leftDashed', 'xFormat', 'minorY', \
'hideYAxis', 'uniqueLegend', 'vtitleRight', 'yDivisors')
validLineModes = ('staircase','slope','connected')
validAreaModes = ('none','first','all','stacked')
validPieModes = ('maximum', 'minimum', 'average')
def drawGraph(self,**params):
# Make sure we've got datapoints to draw
if self.data:
startTime = min([series.start for series in self.data])
endTime = max([series.end for series in self.data])
timeRange = endTime - startTime
else:
timeRange = None
if not timeRange:
x = self.width / 2
y = self.height / 2
self.setColor('red')
self.setFont(size=math.log(self.width * self.height) )
self.drawText("No Data", x, y, align='center')
return
# Determine if we're doing a 2 y-axis graph.
for series in self.data:
if 'secondYAxis' in series.options:
self.dataRight.append(series)
else:
self.dataLeft.append(series)
if len(self.dataRight) > 0:
self.secondYAxis = True
#API compatibilty hacks
if params.get('graphOnly',False):
params['hideLegend'] = True
params['hideGrid'] = True
params['hideAxes'] = True
params['hideYAxis'] = False
params['yAxisSide'] = 'left'
params['title'] = ''
params['vtitle'] = ''
params['margin'] = 0
params['tz'] = ''
self.margin = 0
self.area['xmin'] = 0
self.area['xmax'] = self.width
self.area['ymin'] = 0
self.area['ymax'] = self.height
if 'yMin' not in params and 'min' in params:
params['yMin'] = params['min']
if 'yMax' not in params and 'max' in params:
params['yMax'] = params['max']
if 'lineWidth' not in params and 'thickness' in params:
params['lineWidth'] = params['thickness']
if 'yAxisSide' not in params:
params['yAxisSide'] = 'left'
if 'yUnitSystem' not in params:
params['yUnitSystem'] = 'si'
else:
params['yUnitSystem'] = str(params['yUnitSystem']).lower()
if params['yUnitSystem'] not in UnitSystems.keys():
params['yUnitSystem'] = 'si'
self.params = params
# Don't do any of the special right y-axis stuff if we're drawing 2 y-axes.
if self.secondYAxis:
params['yAxisSide'] = 'left'
# When Y Axis is labeled on the right, we subtract x-axis positions from the max,
# instead of adding to the minimum
if self.params.get('yAxisSide') == 'right':
self.margin = self.width
#Now to setup our LineGraph specific options
self.lineWidth = float( params.get('lineWidth', 1.2) )
self.lineMode = params.get('lineMode','slope').lower()
assert self.lineMode in self.validLineModes, "Invalid line mode!"
self.areaMode = params.get('areaMode','none').lower()
assert self.areaMode in self.validAreaModes, "Invalid area mode!"
self.pieMode = params.get('pieMode', 'maximum').lower()
assert self.pieMode in self.validPieModes, "Invalid pie mode!"
# Line mode slope does not work (or even make sense) for series that have
# only one datapoint. So if any series have one datapoint we force staircase mode.
if self.lineMode == 'slope':
for series in self.data:
if len(series) == 1:
self.lineMode = 'staircase'
break
if self.secondYAxis:
for series in self.data:
if 'secondYAxis' in series.options:
if 'rightWidth' in params:
series.options['lineWidth'] = params['rightWidth']
if 'rightDashed' in params:
series.options['dashed'] = params['rightDashed']
if 'rightColor' in params:
series.color = params['rightColor']
else:
if 'leftWidth' in params:
series.options['lineWidth'] = params['leftWidth']
if 'leftDashed' in params:
series.options['dashed'] = params['leftDashed']
if 'leftColor' in params:
series.color = params['leftColor']
for series in self.data:
if not hasattr(series, 'color'):
series.color = self.colors.next()
titleSize = self.defaultFontParams['size'] + math.floor( math.log(self.defaultFontParams['size']) )
self.setFont( size=titleSize )
self.setColor( self.foregroundColor )
if params.get('title'):
self.drawTitle( str(params['title']) )
if params.get('vtitle'):
self.drawVTitle( str(params['vtitle']) )
if self.secondYAxis and params.get('vtitleRight'):
self.drawVTitle( str(params['vtitleRight']), rightAlign=True )
self.setFont()
if not params.get('hideLegend', len(self.data) > settings.LEGEND_MAX_ITEMS):
elements = [ (series.name,series.color,series.options.get('secondYAxis')) for series in self.data if series.name ]
self.drawLegend(elements, params.get('uniqueLegend', False))
#Setup axes, labels, and grid
#First we adjust the drawing area size to fit X-axis labels
if not self.params.get('hideAxes',False):
self.area['ymax'] -= self.getExtents()['maxAscent'] * 2
self.startTime = min([series.start for series in self.data])
if self.lineMode == 'staircase':
self.endTime = max([series.end for series in self.data])
else:
self.endTime = max([(series.end - series.step) for series in self.data])
self.timeRange = self.endTime - self.startTime
#Now we consolidate our data points to fit in the currently estimated drawing area
self.consolidateDataPoints()
self.encodeHeader('axes')
#Now its time to fully configure the Y-axis and determine the space required for Y-axis labels
#Since we'll probably have to squeeze the drawing area to fit the Y labels, we may need to
#reconsolidate our data points, which in turn means re-scaling the Y axis, this process will
#repeat until we have accurate Y labels and enough space to fit our data points
currentXMin = self.area['xmin']
currentXMax = self.area['xmax']
if self.secondYAxis:
self.setupTwoYAxes()
else:
self.setupYAxis()
while currentXMin != self.area['xmin'] or currentXMax != self.area['xmax']: #see if the Y-labels require more space
self.consolidateDataPoints() #this can cause the Y values to change
currentXMin = self.area['xmin'] #so let's keep track of the previous Y-label space requirements
currentXMax = self.area['xmax']
if self.secondYAxis: #and recalculate their new requirements
self.setupTwoYAxes()
else:
self.setupYAxis()
#Now that our Y-axis is finalized, let's determine our X labels (this won't affect the drawing area)
self.setupXAxis()
if not self.params.get('hideAxes',False):
self.drawLabels()
if not self.params.get('hideGrid',False): #hideAxes implies hideGrid
self.encodeHeader('grid')
self.drawGridLines()
#Finally, draw the graph lines
self.encodeHeader('lines')
self.drawLines()
def drawVTitle(self, text, rightAlign=False):
lineHeight = self.getExtents()['maxHeight']
if rightAlign:
self.encodeHeader('vtitleRight')
x = self.area['xmax'] - lineHeight
y = self.height / 2
for line in text.split('\n'):
self.drawText(line, x, y, align='center', valign='baseline', rotate=90)
x -= lineHeight
self.area['xmax'] = x - self.margin - lineHeight
else:
self.encodeHeader('vtitle')
x = self.area['xmin'] + lineHeight
y = self.height / 2
for line in text.split('\n'):
self.drawText(line, x, y, align='center', valign='baseline', rotate=270)
x += lineHeight
self.area['xmin'] = x + self.margin + lineHeight
def getYCoord(self, value, side=None):
if "left" == side:
yLabelValues = self.yLabelValuesL
yTop = self.yTopL
yBottom = self.yBottomL
elif "right" == side:
yLabelValues = self.yLabelValuesR
yTop = self.yTopR
yBottom = self.yBottomR
else:
yLabelValues = self.yLabelValues
yTop = self.yTop
yBottom = self.yBottom
try:
highestValue = max(yLabelValues)
lowestValue = min(yLabelValues)
except ValueError:
highestValue = yTop
lowestValue = yBottom
pixelRange = self.area['ymax'] - self.area['ymin']
relativeValue = value - lowestValue
valueRange = highestValue - lowestValue
if self.logBase:
if value <= 0:
return None
relativeValue = math.log(value, self.logBase) - math.log(lowestValue, self.logBase)
valueRange = math.log(highestValue, self.logBase) - math.log(lowestValue, self.logBase)
pixelToValueRatio = pixelRange / valueRange
valueInPixels = pixelToValueRatio * relativeValue
return self.area['ymax'] - valueInPixels
def drawLines(self, width=None, dash=None, linecap='butt', linejoin='miter'):
if not width: width = self.lineWidth
self.ctx.set_line_width(width)
originalWidth = width
width = float(int(width) % 2) / 2
if dash:
self.ctx.set_dash(dash,1)
else:
self.ctx.set_dash([],0)
self.ctx.set_line_cap({
'butt' : cairo.LINE_CAP_BUTT,
'round' : cairo.LINE_CAP_ROUND,
'square' : cairo.LINE_CAP_SQUARE,
}[linecap])
self.ctx.set_line_join({
'miter' : cairo.LINE_JOIN_MITER,
'round' : cairo.LINE_JOIN_ROUND,
'bevel' : cairo.LINE_JOIN_BEVEL,
}[linejoin])
# stack the values
if self.areaMode == 'stacked' and not self.secondYAxis: #TODO Allow stacked area mode with secondYAxis
total = []
for series in self.data:
for i in range(len(series)):
if len(total) <= i: total.append(0)
if series[i] is not None:
original = series[i]
series[i] += total[i]
total[i] += original
# check whether there is an stacked metric
singleStacked = False
for series in self.data:
if 'stacked' in series.options:
singleStacked = True
if singleStacked:
self.data = sort_stacked(self.data)
# apply stacked setting on series based on areaMode
if self.areaMode == 'first':
self.data[0].options['stacked'] = True
elif self.areaMode != 'none':
for series in self.data:
series.options['stacked'] = True
# apply alpha channel and create separate stroke series
if self.params.get('areaAlpha'):
try:
alpha = float(self.params['areaAlpha'])
except ValueError:
alpha = 0.5
pass
strokeSeries = []
for series in self.data:
if 'stacked' in series.options:
series.options['alpha'] = alpha
newSeries = TimeSeries(series.name, series.start, series.end, series.step*series.valuesPerPoint, [x for x in series])
newSeries.xStep = series.xStep
newSeries.color = series.color
if 'secondYAxis' in series.options:
newSeries.options['secondYAxis'] = True
strokeSeries.append(newSeries)
self.data += strokeSeries
# setup the clip region
self.ctx.set_line_width(1.0)
self.ctx.rectangle(self.area['xmin'], self.area['ymin'], self.area['xmax'] - self.area['xmin'], self.area['ymax'] - self.area['ymin'])
self.ctx.clip()
self.ctx.set_line_width(originalWidth)
# save clip to restore once stacked areas are drawn
self.ctx.save()
clipRestored = False
for series in self.data:
if 'stacked' not in series.options:
# stacked areas are always drawn first. if this series is not stacked, we finished stacking.
# reset the clip region so lines can show up on top of the stacked areas.
if not clipRestored:
clipRestored = True
self.ctx.restore()
if 'lineWidth' in series.options:
self.ctx.set_line_width(series.options['lineWidth'])
if 'dashed' in series.options:
self.ctx.set_dash([ series.options['dashed'] ], 1)
else:
self.ctx.set_dash([], 0)
# Shift the beginning of drawing area to the start of the series if the
# graph itself has a larger range
missingPoints = (series.start - self.startTime) / series.step
startShift = series.xStep * (missingPoints / series.valuesPerPoint)
x = float(self.area['xmin']) + startShift + (self.lineWidth / 2.0)
y = float(self.area['ymin'])
startX = x
if series.options.get('invisible'):
self.setColor( series.color, 0, True )
else:
self.setColor( series.color, series.options.get('alpha') or 1.0 )
fromNone = True
for value in series:
if value != value: # convert NaN to None
value = None
if value is None and self.params.get('drawNullAsZero'):
value = 0.0
if value is None:
if not fromNone:
self.ctx.line_to(x, y)
if 'stacked' in series.options: #Close off and fill area before unknown interval
self.fillAreaAndClip(x, y, startX)
x += series.xStep
fromNone = True
else:
if self.secondYAxis:
if 'secondYAxis' in series.options:
y = self.getYCoord(value, "right")
else:
y = self.getYCoord(value, "left")
else:
y = self.getYCoord(value)
if y is None:
value = None
elif y < 0:
y = 0
if 'drawAsInfinite' in series.options and value > 0:
self.ctx.move_to(x, self.area['ymax'])
self.ctx.line_to(x, self.area['ymin'])
self.ctx.stroke()
x += series.xStep
continue
if fromNone:
startX = x
if self.lineMode == 'staircase':
if fromNone:
self.ctx.move_to(x, y)
else:
self.ctx.line_to(x, y)
x += series.xStep
self.ctx.line_to(x, y)
elif self.lineMode == 'slope':
if fromNone:
self.ctx.move_to(x, y)
self.ctx.line_to(x, y)
x += series.xStep
elif self.lineMode == 'connected':
self.ctx.line_to(x, y)
x += series.xStep
fromNone = False
if 'stacked' in series.options:
self.fillAreaAndClip(x-series.xStep, y, startX)
else:
self.ctx.stroke()
self.ctx.set_line_width(originalWidth) # return to the original line width
if 'dash' in series.options: # if we changed the dash setting before, change it back now
if dash:
self.ctx.set_dash(dash,1)
else:
self.ctx.set_dash([],0)
def fillAreaAndClip(self, x, y, startX=None):
startX = (startX or self.area['xmin'])
pattern = self.ctx.copy_path()
self.ctx.line_to(x, self.area['ymax']) # bottom endX
self.ctx.line_to(startX, self.area['ymax']) # bottom startX
self.ctx.close_path()
self.ctx.fill()
self.ctx.append_path(pattern)
self.ctx.line_to(x, self.area['ymax']) # bottom endX
self.ctx.line_to(self.area['xmax'], self.area['ymax']) # bottom right
self.ctx.line_to(self.area['xmax'], self.area['ymin']) # top right
self.ctx.line_to(self.area['xmin'], self.area['ymin']) # top left
self.ctx.line_to(self.area['xmin'], self.area['ymax']) # bottom left
self.ctx.line_to(startX, self.area['ymax']) # bottom startX
self.ctx.close_path()
self.ctx.clip()
def consolidateDataPoints(self):
numberOfPixels = self.graphWidth = self.area['xmax'] - self.area['xmin'] - (self.lineWidth + 1)
for series in self.data:
numberOfDataPoints = self.timeRange/series.step
minXStep = float( self.params.get('minXStep',1.0) )
divisor = self.timeRange / series.step
bestXStep = numberOfPixels / divisor
if bestXStep < minXStep:
drawableDataPoints = int( numberOfPixels / minXStep )
pointsPerPixel = math.ceil( float(numberOfDataPoints) / float(drawableDataPoints) )
series.consolidate(pointsPerPixel)
series.xStep = (numberOfPixels * pointsPerPixel) / numberOfDataPoints
else:
series.xStep = bestXStep
def setupYAxis(self):
seriesWithMissingValues = [ series for series in self.data if None in series ]
if self.params.get('drawNullAsZero') and seriesWithMissingValues:
yMinValue = 0.0
else:
yMinValue = safeMin( [safeMin(series) for series in self.data if not series.options.get('drawAsInfinite')] )
if self.areaMode == 'stacked':
length = safeMin( [len(series) for series in self.data if not series.options.get('drawAsInfinite')] )
sumSeries = []
for i in xrange(0, length):
sumSeries.append( safeSum( [series[i] for series in self.data if not series.options.get('drawAsInfinite')] ) )
yMaxValue = safeMax( sumSeries )
else:
yMaxValue = safeMax( [safeMax(series) for series in self.data if not series.options.get('drawAsInfinite')] )
if yMinValue is None:
yMinValue = 0.0
if yMaxValue is None:
yMaxValue = 1.0
if 'yMax' in self.params:
if self.params['yMax'] != 'max':
yMaxValue = self.params['yMax']
if 'yLimit' in self.params and self.params['yLimit'] < yMaxValue:
yMaxValue = self.params['yLimit']
if 'yMin' in self.params:
yMinValue = self.params['yMin']
if yMaxValue <= yMinValue:
yMaxValue = yMinValue + 1
yVariance = yMaxValue - yMinValue
if 'yUnitSystem' in self.params and self.params['yUnitSystem'] == 'binary':
order = math.log(yVariance, 2)
orderFactor = 2 ** math.floor(order)
else:
order = math.log10(yVariance)
orderFactor = 10 ** math.floor(order)
v = yVariance / orderFactor #we work with a scaled down yVariance for simplicity
yDivisors = str(self.params.get('yDivisors', '4,5,6'))
yDivisors = [int(d) for d in yDivisors.split(',')]
prettyValues = (0.1,0.2,0.25,0.5,1.0,1.2,1.25,1.5,2.0,2.25,2.5)
divisorInfo = []
for d in yDivisors:
q = v / d #our scaled down quotient, must be in the open interval (0,10)
p = closest(q, prettyValues) #the prettyValue our quotient is closest to
divisorInfo.append( ( p,abs(q-p)) ) #make a list so we can find the prettiest of the pretty
divisorInfo.sort(key=lambda i: i[1]) #sort our pretty values by "closeness to a factor"
prettyValue = divisorInfo[0][0] #our winner! Y-axis will have labels placed at multiples of our prettyValue
self.yStep = prettyValue * orderFactor #scale it back up to the order of yVariance
if 'yStep' in self.params:
self.yStep = self.params['yStep']
self.yBottom = self.yStep * math.floor( yMinValue / self.yStep ) #start labels at the greatest multiple of yStep <= yMinValue
self.yTop = self.yStep * math.ceil( yMaxValue / self.yStep ) #Extend the top of our graph to the lowest yStep multiple >= yMaxValue
if self.logBase and yMinValue > 0:
self.yBottom = math.pow(self.logBase, math.floor(math.log(yMinValue, self.logBase)))
self.yTop = math.pow(self.logBase, math.ceil(math.log(yMaxValue, self.logBase)))
elif self.logBase and yMinValue <= 0:
raise GraphError('Logarithmic scale specified with a dataset with a '
'minimum value less than or equal to zero')
if 'yMax' in self.params:
if self.params['yMax'] == 'max':
scale = 1.0 * yMaxValue / self.yTop
self.yStep *= (scale - 0.000001)
self.yTop = yMaxValue
else:
self.yTop = self.params['yMax'] * 1.0
if 'yMin' in self.params:
self.yBottom = self.params['yMin']
self.ySpan = self.yTop - self.yBottom
if self.ySpan == 0:
self.yTop += 1
self.ySpan += 1
self.graphHeight = self.area['ymax'] - self.area['ymin']
self.yScaleFactor = float(self.graphHeight) / float(self.ySpan)
if not self.params.get('hideAxes',False):
#Create and measure the Y-labels
def makeLabel(yValue):
yValue, prefix = format_units(yValue, self.yStep,
system=self.params.get('yUnitSystem'))
ySpan, spanPrefix = format_units(self.ySpan, self.yStep,
system=self.params.get('yUnitSystem'))
if yValue < 0.1:
return "%g %s" % (float(yValue), prefix)
elif yValue < 1.0:
return "%.2f %s" % (float(yValue), prefix)
if ySpan > 10 or spanPrefix != prefix:
if type(yValue) is float:
return "%.1f %s" % (float(yValue), prefix)
else:
return "%d %s " % (int(yValue), prefix)
elif ySpan > 3:
return "%.1f %s " % (float(yValue), prefix)
elif ySpan > 0.1:
return "%.2f %s " % (float(yValue), prefix)
else:
return "%g %s" % (float(yValue), prefix)
self.yLabelValues = self.getYLabelValues(self.yBottom, self.yTop, self.yStep)
self.yLabels = map(makeLabel,self.yLabelValues)
self.yLabelWidth = max([self.getExtents(label)['width'] for label in self.yLabels])
if not self.params.get('hideYAxis'):
if self.params.get('yAxisSide') == 'left': #scoot the graph over to the left just enough to fit the y-labels
xMin = self.margin + (self.yLabelWidth * 1.02)
if self.area['xmin'] < xMin:
self.area['xmin'] = xMin
else: #scoot the graph over to the right just enough to fit the y-labels
xMin = 0
xMax = self.margin - (self.yLabelWidth * 1.02)
if self.area['xmax'] >= xMax:
self.area['xmax'] = xMax
else:
self.yLabelValues = []
self.yLabels = []
self.yLabelWidth = 0.0
def setupTwoYAxes(self):
# I am Lazy.
Ldata = []
Rdata = []
seriesWithMissingValuesL = []
seriesWithMissingValuesR = []
self.yLabelsL = []
self.yLabelsR = []
Ldata += self.dataLeft
Rdata += self.dataRight
# Lots of coupled lines ahead. Will operate on Left data first then Right.
seriesWithMissingValuesL = [ series for series in Ldata if None in series ]
seriesWithMissingValuesR = [ series for series in Rdata if None in series ]
if self.params.get('drawNullAsZero') and seriesWithMissingValuesL:
yMinValueL = 0.0
else:
yMinValueL = safeMin( [safeMin(series) for series in Ldata if not series.options.get('drawAsInfinite')] )
if self.params.get('drawNullAsZero') and seriesWithMissingValuesR:
yMinValueR = 0.0
else:
yMinValueR = safeMin( [safeMin(series) for series in Rdata if not series.options.get('drawAsInfinite')] )
if self.areaMode == 'stacked':
yMaxValueL = safeSum( [safeMax(series) for series in Ldata] )
yMaxValueR = safeSum( [safeMax(series) for series in Rdata] )
else:
yMaxValueL = safeMax( [safeMax(series) for series in Ldata] )
yMaxValueR = safeMax( [safeMax(series) for series in Rdata] )
if yMinValueL is None:
yMinValueL = 0.0
if yMinValueR is None:
yMinValueR = 0.0
if yMaxValueL is None:
yMaxValueL = 1.0
if yMaxValueR is None:
yMaxValueR = 1.0
if 'yMaxLeft' in self.params:
yMaxValueL = self.params['yMaxLeft']
if 'yMaxRight' in self.params:
yMaxValueR = self.params['yMaxRight']
if 'yLimitLeft' in self.params and self.params['yLimitLeft'] < yMaxValueL:
yMaxValueL = self.params['yLimitLeft']
if 'yLimitRight' in self.params and self.params['yLimitRight'] < yMaxValueR:
yMaxValueR = self.params['yLimitRight']
if 'yMinLeft' in self.params:
yMinValueL = self.params['yMinLeft']
if 'yMinRight' in self.params:
yMinValueR = self.params['yMinRight']
if yMaxValueL <= yMinValueL:
yMaxValueL = yMinValueL + 1
if yMaxValueR <= yMinValueR:
yMaxValueR = yMinValueR + 1
yVarianceL = yMaxValueL - yMinValueL
yVarianceR = yMaxValueR - yMinValueR
orderL = math.log10(yVarianceL)
orderR = math.log10(yVarianceR)
orderFactorL = 10 ** math.floor(orderL)
orderFactorR = 10 ** math.floor(orderR)
vL = yVarianceL / orderFactorL #we work with a scaled down yVariance for simplicity
vR = yVarianceR / orderFactorR
yDivisors = str(self.params.get('yDivisors', '4,5,6'))
yDivisors = [int(d) for d in yDivisors.split(',')]
prettyValues = (0.1,0.2,0.25,0.5,1.0,1.2,1.25,1.5,2.0,2.25,2.5)
divisorInfoL = []
divisorInfoR = []
for d in yDivisors:
qL = vL / d #our scaled down quotient, must be in the open interval (0,10)
qR = vR / d
pL = closest(qL, prettyValues) #the prettyValue our quotient is closest to
pR = closest(qR, prettyValues)
divisorInfoL.append( ( pL,abs(qL-pL)) ) #make a list so we can find the prettiest of the pretty
divisorInfoR.append( ( pR,abs(qR-pR)) )
divisorInfoL.sort(key=lambda i: i[1]) #sort our pretty values by "closeness to a factor"
divisorInfoR.sort(key=lambda i: i[1])
prettyValueL = divisorInfoL[0][0] #our winner! Y-axis will have labels placed at multiples of our prettyValue
prettyValueR = divisorInfoR[0][0]
self.yStepL = prettyValueL * orderFactorL #scale it back up to the order of yVariance
self.yStepR = prettyValueR * orderFactorR
if 'yStepLeft' in self.params:
self.yStepL = self.params['yStepLeft']
if 'yStepRight' in self.params:
self.yStepR = self.params['yStepRight']
self.yBottomL = self.yStepL * math.floor( yMinValueL / self.yStepL ) #start labels at the greatest multiple of yStepL <= yMinValue
self.yBottomR = self.yStepR * math.floor( yMinValueR / self.yStepR ) #start labels at the greatest multiple of yStepR <= yMinValue
self.yTopL = self.yStepL * math.ceil( yMaxValueL / self.yStepL ) #Extend the top of our graph to the lowest yStepL multiple >= yMaxValue
self.yTopR = self.yStepR * math.ceil( yMaxValueR / self.yStepR ) #Extend the top of our graph to the lowest yStepR multiple >= yMaxValue
if self.logBase and yMinValueL > 0 and yMinValueR > 0: #TODO: Allow separate bases for L & R Axes.
self.yBottomL = math.pow(self.logBase, math.floor(math.log(yMinValueL, self.logBase)))
self.yTopL = math.pow(self.logBase, math.ceil(math.log(yMaxValueL, self.logBase)))
self.yBottomR = math.pow(self.logBase, math.floor(math.log(yMinValueR, self.logBase)))
self.yTopR = math.pow(self.logBase, math.ceil(math.log(yMaxValueR, self.logBase)))
elif self.logBase and ( yMinValueL <= 0 or yMinValueR <=0 ) :
raise GraphError('Logarithmic scale specified with a dataset with a '
'minimum value less than or equal to zero')
if 'yMaxLeft' in self.params:
self.yTopL = self.params['yMaxLeft']
if 'yMaxRight' in self.params:
self.yTopR = self.params['yMaxRight']
if 'yMinLeft' in self.params:
self.yBottomL = self.params['yMinLeft']
if 'yMinRight' in self.params:
self.yBottomR = self.params['yMinRight']
self.ySpanL = self.yTopL - self.yBottomL
self.ySpanR = self.yTopR - self.yBottomR
if self.ySpanL == 0:
self.yTopL += 1
self.ySpanL += 1
if self.ySpanR == 0:
self.yTopR += 1
self.ySpanR += 1
self.graphHeight = self.area['ymax'] - self.area['ymin']
self.yScaleFactorL = float(self.graphHeight) / float(self.ySpanL)
self.yScaleFactorR = float(self.graphHeight) / float(self.ySpanR)
#Create and measure the Y-labels
def makeLabel(yValue, yStep=None, ySpan=None):
yValue, prefix = format_units(yValue,yStep,system=self.params.get('yUnitSystem'))
ySpan, spanPrefix = format_units(ySpan,yStep,system=self.params.get('yUnitSystem'))
if yValue < 0.1:
return "%g %s" % (float(yValue), prefix)
elif yValue < 1.0:
return "%.2f %s" % (float(yValue), prefix)
if ySpan > 10 or spanPrefix != prefix:
if type(yValue) is float:
return "%.1f %s " % (float(yValue), prefix)
else:
return "%d %s " % (int(yValue), prefix)
elif ySpan > 3:
return "%.1f %s " % (float(yValue), prefix)
elif ySpan > 0.1:
return "%.2f %s " % (float(yValue), prefix)
else:
return "%g %s" % (float(yValue), prefix)
self.yLabelValuesL = self.getYLabelValues(self.yBottomL, self.yTopL, self.yStepL)
self.yLabelValuesR = self.getYLabelValues(self.yBottomR, self.yTopR, self.yStepR)
for value in self.yLabelValuesL: #can't use map() here self.yStepL and self.ySpanL are not iterable
self.yLabelsL.append( makeLabel(value,self.yStepL,self.ySpanL))
for value in self.yLabelValuesR:
self.yLabelsR.append( makeLabel(value,self.yStepR,self.ySpanR) )
self.yLabelWidthL = max([self.getExtents(label)['width'] for label in self.yLabelsL])
self.yLabelWidthR = max([self.getExtents(label)['width'] for label in self.yLabelsR])
#scoot the graph over to the left just enough to fit the y-labels
#xMin = self.margin + self.margin + (self.yLabelWidthL * 1.02)
xMin = self.margin + (self.yLabelWidthL * 1.02)
if self.area['xmin'] < xMin:
self.area['xmin'] = xMin
#scoot the graph over to the right just enough to fit the y-labels
xMax = self.width - (self.yLabelWidthR * 1.02)
if self.area['xmax'] >= xMax:
self.area['xmax'] = xMax
def getYLabelValues(self, minYValue, maxYValue, yStep=None):
vals = []
if self.logBase:
vals = list( logrange(self.logBase, minYValue, maxYValue) )
else:
vals = list( frange(minYValue, maxYValue, yStep) )
return vals
def setupXAxis(self):
if self.userTimeZone:
tzinfo = pytz.timezone(self.userTimeZone)
else:
tzinfo = pytz.timezone(settings.TIME_ZONE)
self.start_dt = datetime.fromtimestamp(self.startTime, tzinfo)
self.end_dt = datetime.fromtimestamp(self.endTime, tzinfo)
secondsPerPixel = float(self.timeRange) / float(self.graphWidth)
self.xScaleFactor = float(self.graphWidth) / float(self.timeRange) #pixels per second
potential = [c for c in xAxisConfigs if c['seconds'] <= secondsPerPixel and c.get('maxInterval', self.timeRange + 1) >= self.timeRange]
if potential:
self.xConf = potential[-1]
else:
self.xConf = xAxisConfigs[-1]
self.xLabelStep = self.xConf['labelUnit'] * self.xConf['labelStep']
self.xMinorGridStep = self.xConf['minorGridUnit'] * self.xConf['minorGridStep']
self.xMajorGridStep = self.xConf['majorGridUnit'] * self.xConf['majorGridStep']
def drawLabels(self):
#Draw the Y-labels
if not self.params.get('hideYAxis'):
if not self.secondYAxis:
for value,label in zip(self.yLabelValues,self.yLabels):
if self.params.get('yAxisSide') == 'left':
x = self.area['xmin'] - (self.yLabelWidth * 0.02)
else:
x = self.area['xmax'] + (self.yLabelWidth * 0.02) #Inverted for right side Y Axis
y = self.getYCoord(value)
if y is None:
value = None
elif y < 0:
y = 0
if self.params.get('yAxisSide') == 'left':
self.drawText(label, x, y, align='right', valign='middle')
else:
self.drawText(label, x, y, align='left', valign='middle') #Inverted for right side Y Axis
else: #Draws a right side and a Left side axis
for valueL,labelL in zip(self.yLabelValuesL,self.yLabelsL):
xL = self.area['xmin'] - (self.yLabelWidthL * 0.02)
yL = self.getYCoord(valueL, "left")
if yL is None:
value = None
elif yL < 0:
yL = 0
self.drawText(labelL, xL, yL, align='right', valign='middle')
### Right Side
for valueR,labelR in zip(self.yLabelValuesR,self.yLabelsR):
xR = self.area['xmax'] + (self.yLabelWidthR * 0.02) + 3 #Inverted for right side Y Axis
yR = self.getYCoord(valueR, "right")
if yR is None:
valueR = None
elif yR < 0:
yR = 0
self.drawText(labelR, xR, yR, align='left', valign='middle') #Inverted for right side Y Axis
(dt, x_label_delta) = find_x_times(self.start_dt, self.xConf['labelUnit'], self.xConf['labelStep'])
#Draw the X-labels
xFormat = self.params.get('xFormat', self.xConf['format'])
while dt < self.end_dt:
label = dt.strftime(xFormat)
x = self.area['xmin'] + (toSeconds(dt - self.start_dt) * self.xScaleFactor)
y = self.area['ymax'] + self.getExtents()['maxAscent']
self.drawText(label, x, y, align='center', valign='top')
dt += x_label_delta
def drawGridLines(self):
# Not sure how to handle this for 2 y-axes
# Just using the left side info for the grid.
#Horizontal grid lines
leftSide = self.area['xmin']
rightSide = self.area['xmax']
labels = []
if self.secondYAxis:
labels = self.yLabelValuesL
else:
labels = self.yLabelValues
if self.logBase:
labels.append(self.logBase * max(labels))
for i, value in enumerate(labels):
self.ctx.set_line_width(0.4)
self.setColor( self.params.get('majorGridLineColor',self.defaultMajorGridLineColor) )
if self.secondYAxis:
y = self.getYCoord(value,"left")
else:
y = self.getYCoord(value)
if y is None or y < 0:
continue
self.ctx.move_to(leftSide, y)
self.ctx.line_to(rightSide, y)
self.ctx.stroke()
# draw minor gridlines if this isn't the last label
if self.minorY >= 1 and i < (len(labels) - 1):
# in case graphite supports inverted Y axis now or someday
(valueLower, valueUpper) = sorted((value, labels[i+1]))
# each minor gridline is 1/minorY apart from the nearby gridlines.
# we calculate that distance, for adding to the value in the loop.
distance = ((valueUpper - valueLower) / float(1 + self.minorY))
# starting from the initial valueLower, we add the minor distance
# for each minor gridline that we wish to draw, and then draw it.
for minor in range(self.minorY):
self.ctx.set_line_width(0.3)
self.setColor( self.params.get('minorGridLineColor',self.defaultMinorGridLineColor) )
# the current minor gridline value is halfway between the current and next major gridline values
value = (valueLower + ((1+minor) * distance))
if self.logBase:
yTopFactor = self.logBase * self.logBase
else:
yTopFactor = 1
if self.secondYAxis:
if value >= (yTopFactor * self.yTopL):
continue
else:
if value >= (yTopFactor * self.yTop):
continue
if self.secondYAxis:
y = self.getYCoord(value,"left")
else:
y = self.getYCoord(value)
if y is None or y < 0:
continue
self.ctx.move_to(leftSide, y)
self.ctx.line_to(rightSide, y)
self.ctx.stroke()
#Vertical grid lines
top = self.area['ymin']
bottom = self.area['ymax']
# First we do the minor grid lines (majors will paint over them)
self.ctx.set_line_width(0.25)
self.setColor( self.params.get('minorGridLineColor',self.defaultMinorGridLineColor) )
(dt, x_minor_delta) = find_x_times(self.start_dt, self.xConf['minorGridUnit'], self.xConf['minorGridStep'])
while dt < self.end_dt:
x = self.area['xmin'] + (toSeconds(dt - self.start_dt) * self.xScaleFactor)
if x < self.area['xmax']:
self.ctx.move_to(x, bottom)
self.ctx.line_to(x, top)
self.ctx.stroke()
dt += x_minor_delta
# Now we do the major grid lines
self.ctx.set_line_width(0.33)
self.setColor( self.params.get('majorGridLineColor',self.defaultMajorGridLineColor) )
(dt, x_major_delta) = find_x_times(self.start_dt, self.xConf['majorGridUnit'], self.xConf['majorGridStep'])
while dt < self.end_dt:
x = self.area['xmin'] + (toSeconds(dt - self.start_dt) * self.xScaleFactor)
if x < self.area['xmax']:
self.ctx.move_to(x, bottom)
self.ctx.line_to(x, top)
self.ctx.stroke()
dt += x_major_delta
#Draw side borders for our graph area
self.ctx.set_line_width(0.5)
self.ctx.move_to(self.area['xmax'], bottom)
self.ctx.line_to(self.area['xmax'], top)
self.ctx.move_to(self.area['xmin'], bottom)
self.ctx.line_to(self.area['xmin'], top)
self.ctx.stroke()
class PieGraph(Graph):
customizable = Graph.customizable + \
('title','valueLabels','valueLabelsMin','hideLegend','pieLabels')
validValueLabels = ('none','number','percent')
def drawGraph(self,**params):
self.pieLabels = params.get('pieLabels', 'horizontal')
self.total = sum( [t[1] for t in self.data] )
self.slices = []
for name,value in self.data:
self.slices.append({
'name' : name,
'value' : value,
'percent' : value / self.total,
'color' : self.colors.next(),
})
titleSize = self.defaultFontParams['size'] + math.floor( math.log(self.defaultFontParams['size']) )
self.setFont( size=titleSize )
self.setColor( self.foregroundColor )
if params.get('title'):
self.drawTitle( params['title'] )
self.setFont()
if not params.get('hideLegend',False):
elements = [ (slice['name'],slice['color'],None) for slice in self.slices ]
self.drawLegend(elements)
self.drawSlices()
self.valueLabelsMin = float( params.get('valueLabelsMin',5) )
self.valueLabels = params.get('valueLabels','percent')
assert self.valueLabels in self.validValueLabels, \
"valueLabels=%s must be one of %s" % (self.valueLabels,self.validValueLabels)
if self.valueLabels != 'none':
self.drawLabels()
def drawSlices(self):
theta = 3.0 * math.pi / 2.0
halfX = (self.area['xmax'] - self.area['xmin']) / 2.0
halfY = (self.area['ymax'] - self.area['ymin']) / 2.0
self.x0 = x0 = self.area['xmin'] + halfX
self.y0 = y0 = self.area['ymin'] + halfY
self.radius = radius = min(halfX,halfY) * 0.95
for slice in self.slices:
self.setColor( slice['color'] )
self.ctx.move_to(x0,y0)
phi = theta + (2 * math.pi) * slice['percent']
self.ctx.arc( x0, y0, radius, theta, phi )
self.ctx.line_to(x0,y0)
self.ctx.fill()
slice['midAngle'] = (theta + phi) / 2.0
slice['midAngle'] %= 2.0 * math.pi
theta = phi
def drawLabels(self):
self.setFont()
self.setColor( 'black' )
for slice in self.slices:
if self.valueLabels == 'percent':
if (slice['percent'] * 100.0) < self.valueLabelsMin: continue
label = "%%%.2f" % (slice['percent'] * 100.0)
elif self.valueLabels == 'number':
if slice['value'] < self.valueLabelsMin: continue
if slice['value'] < 10 and slice['value'] != int(slice['value']):
label = "%.2f" % slice['value']
else:
label = str(int(slice['value']))
extents = self.getExtents(label)
theta = slice['midAngle']
x = self.x0 + (self.radius / 2.0 * math.cos(theta))
y = self.y0 + (self.radius / 2.0 * math.sin(theta))
if self.pieLabels == 'rotated':
if theta > (math.pi / 2.0) and theta <= (3.0 * math.pi / 2.0):
theta -= math.pi
self.drawText( label, x, y, align='center', valign='middle', rotate=math.degrees(theta) )
else:
self.drawText( label, x, y, align='center', valign='middle')
GraphTypes = {
'line' : LineGraph,
'pie' : PieGraph,
}
#Convience functions
def closest(number,neighbors):
distance = None
closestNeighbor = None
for neighbor in neighbors:
d = abs(neighbor - number)
if distance is None or d < distance:
distance = d
closestNeighbor = neighbor
return closestNeighbor
def frange(start,end,step):
f = start
while f <= end:
yield f
f += step
# Protect against rounding errors on very small float ranges
if f == start:
yield end
return
def toSeconds(t):
return (t.days * 86400) + t.seconds
def safeMin(args):
args = [arg for arg in args if arg not in (None, INFINITY)]
if args:
return min(args)
def safeMax(args):
args = [arg for arg in args if arg not in (None, INFINITY)]
if args:
return max(args)
def safeSum(values):
return sum([v for v in values if v not in (None, INFINITY)])
def any(args):
for arg in args:
if arg:
return True
return False
def sort_stacked(series_list):
stacked = [s for s in series_list if 'stacked' in s.options]
not_stacked = [s for s in series_list if 'stacked' not in s.options]
return stacked + not_stacked
def format_units(v, step=None, system="si"):
"""Format the given value in standardized units.
``system`` is either 'binary' or 'si'
For more info, see:
http://en.wikipedia.org/wiki/SI_prefix
http://en.wikipedia.org/wiki/Binary_prefix
"""
if step is None:
condition = lambda size: abs(v) >= size
else:
condition = lambda size: abs(v) >= size and step >= size
for prefix, size in UnitSystems[system]:
if condition(size):
v2 = v / size
if (v2 - math.floor(v2)) < 0.00000000001 and v > 1:
v2 = math.floor(v2)
return v2, prefix
if (v - math.floor(v)) < 0.00000000001 and v > 1 :
v = math.floor(v)
return v, ""
def find_x_times(start_dt, unit, step):
if unit == SEC:
dt = start_dt.replace(second=start_dt.second - (start_dt.second % step))
x_delta = timedelta(seconds=step)
elif unit == MIN:
dt = start_dt.replace(second=0, minute=start_dt.minute - (start_dt.minute % step))
x_delta = timedelta(minutes=step)
elif unit == HOUR:
dt = start_dt.replace(second=0, minute=0, hour=start_dt.hour - (start_dt.hour % step))
x_delta = timedelta(hours=step)
elif unit == DAY:
dt = start_dt.replace(second=0, minute=0, hour=0)
x_delta = timedelta(days=step)
else:
raise ValueError("Invalid unit: %s" % unit)
while dt < start_dt:
dt += x_delta
return (dt, x_delta)
def logrange(base, scale_min, scale_max):
current = scale_min
if scale_min > 0:
current = math.floor(math.log(scale_min, base))
factor = current
while current <= scale_max:
current = math.pow(base, factor)
yield current
factor += 1
| 1 | 7,873 | Fair warning, the way that exception block is written won't work in python3 Python 3.2.3 (default, Jun 8 2012, 05:36:09) [GCC 4.7.0 20120507 (Red Hat 4.7.0-5)] on linux2 Type "help", "copyright", "credits" or "license" for more information. > > > try: > > > ... raise ValueError("foo") > > > ... except ValueError,e: > > > File "<stdin>", line 3 > > > except ValueError, e: > > > ^ > > > SyntaxError: invalid syntax | graphite-project-graphite-web | py |
@@ -43,12 +43,15 @@ module Beaker
end
port = container.json["NetworkSettings"]["Ports"]["22/tcp"][0]["HostPort"]
+ forward_ssh_agent = @options['forward_ssh_agent'] || false
+
# Update host metadata
host['ip'] = ip
host['port'] = port
host['ssh'] = {
:password => root_password,
:port => port,
+ :forward_agent => forward_ssh_agent,
}
@logger.debug("node available as ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no root@#{ip} -p #{port}") | 1 | module Beaker
class Docker < Beaker::Hypervisor
def initialize(hosts, options)
require 'docker'
@options = options
@logger = options[:logger]
@hosts = hosts
# increase the http timeouts as provisioning images can be slow
::Docker.options = { :write_timeout => 300, :read_timeout => 300 }.merge(::Docker.options || {})
# assert that the docker-api gem can talk to your docker
# enpoint. Will raise if there is a version mismatch
::Docker.validate_version!
# Pass on all the logging from docker-api to the beaker logger instance
::Docker.logger = @logger
end
def provision
@logger.notify "Provisioning docker"
@hosts.each do |host|
@logger.notify "provisioning #{host.name}"
@logger.debug("Creating image")
image = ::Docker::Image.build(dockerfile_for(host), { :rm => true })
@logger.debug("Creating container from image #{image.id}")
container = ::Docker::Container.create({
'Image' => image.id,
'Hostname' => host.name,
})
@logger.debug("Starting container #{container.id}")
container.start({"PublishAllPorts" => true, "Privileged" => true})
# Find out where the ssh port is from the container
if ENV['DOCKER_HOST']
ip = URI.parse(ENV['DOCKER_HOST']).host
@logger.info("Using docker server at #{ip}")
else
ip = container.json["NetworkSettings"]["Ports"]["22/tcp"][0]["HostIp"]
end
port = container.json["NetworkSettings"]["Ports"]["22/tcp"][0]["HostPort"]
# Update host metadata
host['ip'] = ip
host['port'] = port
host['ssh'] = {
:password => root_password,
:port => port,
}
@logger.debug("node available as ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no root@#{ip} -p #{port}")
host['docker_container'] = container
host['docker_image'] = image
end
end
def cleanup
@logger.notify "Cleaning up docker"
@hosts.each do |host|
if container = host['docker_container']
@logger.debug("stop container #{container.id}")
begin
container.stop
sleep 2 # avoid a race condition where the root FS can't unmount
rescue Excon::Errors::ClientError => e
@logger.warn("stop of container #{container.id} failed: #{e.response.body}")
end
@logger.debug("delete container #{container.id}")
begin
container.delete
rescue Excon::Errors::ClientError => e
@logger.warn("deletion of container #{container.id} failed: #{e.response.body}")
end
end
# Do not remove the image if docker_reserve_image is set to true, otherwise remove it
if image = (host['docker_preserve_image'] ? nil : host['docker_image'])
@logger.debug("delete image #{image.id}")
begin
image.delete
rescue Excon::Errors::ClientError => e
@logger.warn("deletion of image #{image.id} failed: #{e.response.body}")
end
end
end
end
private
def root_password
'root'
end
def dockerfile_for(host)
# Warn if image is not define, empty or nil
@logger.error("Docker image undefined!") if (host['image']||= nil).to_s.empty?
# specify base image
dockerfile = <<-EOF
FROM #{host['image']}
EOF
# additional options to specify to the sshd
# may vary by platform
sshd_options = ''
# add platform-specific actions
case host['platform']
when /ubuntu/, /debian/
sshd_options = '-o "PermitRootLogin yes" -o "PasswordAuthentication yes"'
dockerfile += <<-EOF
RUN apt-get update
RUN apt-get install -y openssh-server openssh-client #{Beaker::HostPrebuiltSteps::DEBIAN_PACKAGES.join(' ')}
EOF
when /cumulus/
sshd_options = '-o "PermitRootLogin yes" -o "PasswordAuthentication yes"'
dockerfile += <<-EOF
RUN apt-get update
RUN apt-get install -y openssh-server openssh-client #{Beaker::HostPrebuiltSteps::CUMULUS_PACKAGES.join(' ')}
EOF
when /^el-/, /centos/, /fedora/, /redhat/, /eos/
dockerfile += <<-EOF
RUN yum clean all
RUN yum install -y sudo openssh-server openssh-clients #{Beaker::HostPrebuiltSteps::UNIX_PACKAGES.join(' ')}
RUN ssh-keygen -t rsa -f /etc/ssh/ssh_host_rsa_key
RUN ssh-keygen -t dsa -f /etc/ssh/ssh_host_dsa_key
EOF
when /opensuse/, /sles/
sshd_options = '-o "PermitRootLogin yes" -o "PasswordAuthentication yes" -o "UsePAM no"'
dockerfile += <<-EOF
RUN zypper -n in openssh #{Beaker::HostPrebuiltSteps::SLES_PACKAGES.join(' ')}
RUN ssh-keygen -t rsa -f /etc/ssh/ssh_host_rsa_key
RUN ssh-keygen -t dsa -f /etc/ssh/ssh_host_dsa_key
EOF
else
# TODO add more platform steps here
raise "platform #{host['platform']} not yet supported on docker"
end
# Make sshd directory, set root password
dockerfile += <<-EOF
RUN mkdir -p /var/run/sshd
RUN echo root:#{root_password} | chpasswd
EOF
# Any extra commands specified for the host
dockerfile += (host['docker_image_commands'] || []).map { |command|
"RUN #{command}\n"
}.join('')
# Override image entrypoint
if host['docker_image_entrypoint']
dockerfile += "ENTRYPOINT #{host['docker_image_entrypoint']}\n"
end
# How to start a sshd on port 22. May be an init for more supervision
cmd = host['docker_cmd'] || "/usr/sbin/sshd -D #{sshd_options}"
dockerfile += <<-EOF
EXPOSE 22
CMD #{cmd}
EOF
@logger.debug("Dockerfile is #{dockerfile}")
return dockerfile
end
end
end
| 1 | 8,104 | This only updates the metadata and not the actual thing you are trying to solve for the docker hypervisor. | voxpupuli-beaker | rb |
@@ -0,0 +1,18 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+/*
+Package debug provides access to enable internal debugging of opentelemetry.
+*/
+package debug // import "go.opentelemetry.io/otel/internal/debug" | 1 | 1 | 16,857 | should this package be internal? don't we want to use it e.g. in go-contrib? | open-telemetry-opentelemetry-go | go |
|
@@ -31,7 +31,7 @@ import (
//
// A Document can be represented as a map[string]int or a pointer to a struct. For
// structs, the exported fields are the document fields.
-type Document = interface{}
+type Document interface{}
// A Collection is a set of documents.
// TODO(jba): make the docstring look more like blob.Bucket. | 1 | // Copyright 2019 The Go Cloud Development Kit Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package docstore
import (
"context"
"sort"
"strings"
"unicode/utf8"
"gocloud.dev/internal/docstore/driver"
"gocloud.dev/internal/gcerr"
)
// A Document is a set of field-value pairs. One or more fields, called the key
// fields, must uniquely identify the document in the collection. You specify the key
// fields when you open a provider collection.
// A field name must be a valid UTF-8 string that does not contain a '.'.
//
// A Document can be represented as a map[string]int or a pointer to a struct. For
// structs, the exported fields are the document fields.
type Document = interface{}
// A Collection is a set of documents.
// TODO(jba): make the docstring look more like blob.Bucket.
type Collection struct {
driver driver.Collection
}
// NewCollection is intended for use by provider implementations.
var NewCollection = newCollection
// newCollection makes a Collection.
func newCollection(d driver.Collection) *Collection {
return &Collection{driver: d}
}
// RevisionField is the name of the document field used for document revision
// information, to implement optimistic locking.
// See the Revisions section of the package documentation.
const RevisionField = "DocstoreRevision"
// A FieldPath is a dot-separated sequence of UTF-8 field names. Examples:
// room
// room.size
// room.size.width
//
// A FieldPath can be used select top-level fields or elements of sub-documents.
// There is no way to select a single list element.
type FieldPath string
// Actions returns an ActionList that can be used to perform
// actions on the collection's documents.
func (c *Collection) Actions() *ActionList {
return &ActionList{coll: c}
}
// An ActionList is a sequence of actions that affect a single collection. The
// actions are performed in order. If an action fails, the ones following it are not
// executed.
type ActionList struct {
coll *Collection
actions []*Action
}
// An Action is a read or write on a single document.
// Use the methods of ActionList to create and execute Actions.
type Action struct {
kind driver.ActionKind
doc Document
fieldpaths []FieldPath // paths to retrieve, for Get
mods Mods // modifications to make, for Update
}
func (l *ActionList) add(a *Action) *ActionList {
l.actions = append(l.actions, a)
return l
}
// Create adds an action that creates a new document.
// The document must not already exist; an error for which gcerrors.Code returns
// AlreadyExists is returned if it does. If the document doesn't have key fields, it
// will be given key fields with unique values.
// TODO(jba): treat zero values for struct fields as not present?
func (l *ActionList) Create(doc Document) *ActionList {
return l.add(&Action{kind: driver.Create, doc: doc})
}
// Replace adds an action that replaces a document.
// The key fields must be set.
// The document must already exist; an error for which gcerrors.Code returns NotFound
// is returned if it does not.
// See the Revisions section of the package documentation for how revisions are
// handled.
func (l *ActionList) Replace(doc Document) *ActionList {
return l.add(&Action{kind: driver.Replace, doc: doc})
}
// Put adds an action that adds or replaces a document.
// The key fields must be set.
// The document may or may not already exist.
// See the Revisions section of the package documentation for how revisions are
// handled.
func (l *ActionList) Put(doc Document) *ActionList {
return l.add(&Action{kind: driver.Put, doc: doc})
}
// Delete adds an action that deletes a document.
// Only the key fields and RevisionField of doc are used.
// See the Revisions section of the package documentation for how revisions are
// handled.
// If doc has no revision and the document doesn't exist, nothing happens and no
// error is returned.
func (l *ActionList) Delete(doc Document) *ActionList {
// Rationale for not returning an error if the document does not exist:
// Returning an error might be informative and could be ignored, but if the
// semantics of an action list are to stop at first error, then we might abort a
// list of Deletes just because one of the docs was not present, and that seems
// wrong, or at least something you'd want to turn off.
return l.add(&Action{kind: driver.Delete, doc: doc})
}
// Get adds an action that retrieves a document.
// Only the key fields of doc are used.
// If fps is omitted, all the fields of doc are set to those of the
// retrieved document. If fps is present, only the given field paths are
// set. In both cases, other fields of doc are not touched.
func (l *ActionList) Get(doc Document, fps ...FieldPath) *ActionList {
return l.add(&Action{
kind: driver.Get,
doc: doc,
fieldpaths: fps,
})
}
// Update atomically applies Mods to doc, which must exist.
// Only the key and revision fields of doc are used.
//
// A modification will create a field if it doesn't exist.
//
// No field path in mods can be a prefix of another. (It makes no sense
// to, say, set foo but increment foo.bar.)
//
// See the Revisions section of the package documentation for how revisions are
// handled.
//
// It is undefined whether updating a sub-field of a non-map field will succeed.
// For instance, if the current document is {a: 1} and Update is called with the
// mod "a.b": 2, then either Update will fail, or it will succeed with the result
// {a: {b: 2}}.
//
// Update does not modify its doc argument. To obtain the new value of the document,
// call Get after calling Update.
func (l *ActionList) Update(doc Document, mods Mods) *ActionList {
return l.add(&Action{
kind: driver.Update,
doc: doc,
mods: mods,
})
}
// Mods is a map from field paths to modifications.
// At present, a modification is one of:
// - nil, to delete the field
// - any other value, to set the field to that value
// TODO(jba): add other kinds of modification
// See ActionList.Update.
type Mods map[FieldPath]interface{}
// Do executes the action list. If all the actions executed successfully, Do returns
// (number of actions, nil). If any failed, Do returns the number of successful
// actions and an error. In general there is no way to know which actions succeeded,
// but the error will contain as much information as possible about the failures.
func (l *ActionList) Do(ctx context.Context) (int, error) {
var das []*driver.Action
for _, a := range l.actions {
d, err := a.toDriverAction()
if err != nil {
return 0, wrapError(l.coll.driver, err)
}
das = append(das, d)
}
n, err := l.coll.driver.RunActions(ctx, das)
return n, wrapError(l.coll.driver, err)
}
func (a *Action) toDriverAction() (*driver.Action, error) {
ddoc, err := driver.NewDocument(a.doc)
if err != nil {
return nil, err
}
d := &driver.Action{Kind: a.kind, Doc: ddoc}
if a.fieldpaths != nil {
d.FieldPaths = make([][]string, len(a.fieldpaths))
for i, s := range a.fieldpaths {
fp, err := parseFieldPath(s)
if err != nil {
return nil, err
}
d.FieldPaths[i] = fp
}
}
if a.mods != nil {
// Convert mods from a map to a slice of (fieldPath, value) pairs.
// The map is easier for users to write, but the slice is easier
// to process.
// TODO(jba): check for prefix
// Sort keys so tests are deterministic.
var keys []string
for k := range a.mods {
keys = append(keys, string(k))
}
sort.Strings(keys)
for _, k := range keys {
k := FieldPath(k)
v := a.mods[k]
fp, err := parseFieldPath(k)
if err != nil {
return nil, err
}
d.Mods = append(d.Mods, driver.Mod{FieldPath: fp, Value: v})
}
}
return d, nil
}
// Create is a convenience for building and running a single-element action list.
// See ActionList.Create.
func (c *Collection) Create(ctx context.Context, doc Document) error {
_, err := c.Actions().Create(doc).Do(ctx)
return err
}
// Replace is a convenience for building and running a single-element action list.
// See ActionList.Replace.
func (c *Collection) Replace(ctx context.Context, doc Document) error {
_, err := c.Actions().Replace(doc).Do(ctx)
return err
}
// Put is a convenience for building and running a single-element action list.
// See ActionList.Put.
func (c *Collection) Put(ctx context.Context, doc Document) error {
_, err := c.Actions().Put(doc).Do(ctx)
return err
}
// Delete is a convenience for building and running a single-element action list.
// See ActionList.Delete.
func (c *Collection) Delete(ctx context.Context, doc Document) error {
_, err := c.Actions().Delete(doc).Do(ctx)
return err
}
// Get is a convenience for building and running a single-element action list.
// See ActionList.Get.
func (c *Collection) Get(ctx context.Context, doc Document, fps ...FieldPath) error {
_, err := c.Actions().Get(doc, fps...).Do(ctx)
return err
}
// Update is a convenience for building and running a single-element action list.
// See ActionList.Update.
func (c *Collection) Update(ctx context.Context, doc Document, mods Mods) error {
_, err := c.Actions().Update(doc, mods).Do(ctx)
return err
}
func parseFieldPath(fp FieldPath) ([]string, error) {
if len(fp) == 0 {
return nil, gcerr.Newf(gcerr.InvalidArgument, nil, "empty field path")
}
if !utf8.ValidString(string(fp)) {
return nil, gcerr.Newf(gcerr.InvalidArgument, nil, "invalid UTF-8 field path %q", fp)
}
parts := strings.Split(string(fp), ".")
for _, p := range parts {
if p == "" {
return nil, gcerr.Newf(gcerr.InvalidArgument, nil, "empty component in field path %q", fp)
}
}
return parts, nil
}
func wrapError(c driver.Collection, err error) error {
if err == nil {
return nil
}
if gcerr.DoNotWrap(err) {
return err
}
if _, ok := err.(*gcerr.Error); ok {
return err
}
return gcerr.New(c.ErrorCode(err), err, 2, "docstore")
}
// TODO(jba): ErrorAs
| 1 | 16,348 | Just curious, why did you make this change? | google-go-cloud | go |
@@ -1576,6 +1576,8 @@ def get_dummies(
"Length of 'prefix' ({}) did not match the length of "
"the columns being encoded ({}).".format(len(prefix), len(column_labels))
)
+ elif isinstance(prefix, dict):
+ prefix = [prefix[column_label[0]] for column_label in column_labels]
all_values = _reduce_spark_multi(
kdf._sdf, [F.collect_set(kdf._internal.spark_column_for(label)) for label in column_labels] | 1 | #
# Copyright (C) 2019 Databricks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Wrappers around spark that correspond to common pandas functions.
"""
from typing import Optional, Union, List, Tuple
from collections import OrderedDict
from collections.abc import Iterable
from functools import reduce
import itertools
import numpy as np
import pandas as pd
from pandas.api.types import is_list_like
from pyspark import sql as spark
from pyspark.sql import functions as F
from pyspark.sql.types import (
ByteType,
ShortType,
IntegerType,
LongType,
FloatType,
DoubleType,
BooleanType,
TimestampType,
DecimalType,
StringType,
DateType,
StructType,
)
from databricks import koalas as ks # For running doctests and reference resolution in PyCharm.
from databricks.koalas.base import IndexOpsMixin
from databricks.koalas.utils import (
default_session,
name_like_string,
scol_for,
validate_axis,
align_diff_frames,
)
from databricks.koalas.frame import DataFrame, _reduce_spark_multi
from databricks.koalas.internal import _InternalFrame
from databricks.koalas.series import Series, _col
__all__ = [
"from_pandas",
"range",
"read_csv",
"read_delta",
"read_table",
"read_spark_io",
"read_parquet",
"read_clipboard",
"read_excel",
"read_html",
"to_datetime",
"get_dummies",
"concat",
"melt",
"isna",
"isnull",
"notna",
"notnull",
"read_sql_table",
"read_sql_query",
"read_sql",
"read_json",
"merge",
"to_numeric",
"broadcast",
]
def from_pandas(pobj: Union["pd.DataFrame", "pd.Series"]) -> Union["Series", "DataFrame"]:
"""Create a Koalas DataFrame or Series from a pandas DataFrame or Series.
This is similar to Spark's `SparkSession.createDataFrame()` with pandas DataFrame,
but this also works with pandas Series and picks the index.
Parameters
----------
pobj : pandas.DataFrame or pandas.Series
pandas DataFrame or Series to read.
Returns
-------
Series or DataFrame
If a pandas Series is passed in, this function returns a Koalas Series.
If a pandas DataFrame is passed in, this function returns a Koalas DataFrame.
"""
if isinstance(pobj, pd.Series):
return Series(pobj)
elif isinstance(pobj, pd.DataFrame):
return DataFrame(pobj)
elif isinstance(pobj, pd.Index):
return DataFrame(pd.DataFrame(index=pobj)).index
else:
raise ValueError("Unknown data type: {}".format(type(pobj)))
_range = range # built-in range
def range(
start: int, end: Optional[int] = None, step: int = 1, num_partitions: Optional[int] = None
) -> DataFrame:
"""
Create a DataFrame with some range of numbers.
The resulting DataFrame has a single int64 column named `id`, containing elements in a range
from ``start`` to ``end`` (exclusive) with step value ``step``. If only the first parameter
(i.e. start) is specified, we treat it as the end value with the start value being 0.
This is similar to the range function in SparkSession and is used primarily for testing.
Parameters
----------
start : int
the start value (inclusive)
end : int, optional
the end value (exclusive)
step : int, optional, default 1
the incremental step
num_partitions : int, optional
the number of partitions of the DataFrame
Returns
-------
DataFrame
Examples
--------
When the first parameter is specified, we generate a range of values up till that number.
>>> ks.range(5)
id
0 0
1 1
2 2
3 3
4 4
When start, end, and step are specified:
>>> ks.range(start = 100, end = 200, step = 20)
id
0 100
1 120
2 140
3 160
4 180
"""
sdf = default_session().range(start=start, end=end, step=step, numPartitions=num_partitions)
return DataFrame(sdf)
def read_csv(
path,
sep=",",
header="infer",
names=None,
index_col=None,
usecols=None,
squeeze=False,
mangle_dupe_cols=True,
dtype=None,
parse_dates=False,
quotechar=None,
escapechar=None,
comment=None,
**options
):
"""Read CSV (comma-separated) file into DataFrame.
Parameters
----------
path : str
The path string storing the CSV file to be read.
sep : str, default ‘,’
Delimiter to use. Must be a single character.
header : int, list of int, default ‘infer’
Whether to to use as the column names, and the start of the data.
Default behavior is to infer the column names: if no names are passed
the behavior is identical to `header=0` and column names are inferred from
the first line of the file, if column names are passed explicitly then
the behavior is identical to `header=None`. Explicitly pass `header=0` to be
able to replace existing names
names : str or array-like, optional
List of column names to use. If file contains no header row, then you should
explicitly pass `header=None`. Duplicates in this list will cause an error to be issued.
If a string is given, it should be a DDL-formatted string in Spark SQL, which is
preferred to avoid schema inference for better performance.
index_col: str or list of str, optional, default: None
Index column of table in Spark.
usecols : list-like or callable, optional
Return a subset of the columns. If list-like, all elements must either be
positional (i.e. integer indices into the document columns) or strings that
correspond to column names provided either by the user in names or inferred
from the document header row(s).
If callable, the callable function will be evaluated against the column names,
returning names where the callable function evaluates to `True`.
squeeze : bool, default False
If the parsed data only contains one column then return a Series.
mangle_dupe_cols : bool, default True
Duplicate columns will be specified as 'X0', 'X1', ... 'XN', rather
than 'X' ... 'X'. Passing in False will cause data to be overwritten if
there are duplicate names in the columns.
Currently only `True` is allowed.
dtype : Type name or dict of column -> type, default None
Data type for data or columns. E.g. {‘a’: np.float64, ‘b’: np.int32} Use str or object
together with suitable na_values settings to preserve and not interpret dtype.
parse_dates : boolean or list of ints or names or list of lists or dict, default `False`.
Currently only `False` is allowed.
quotechar : str (length 1), optional
The character used to denote the start and end of a quoted item. Quoted items can include
the delimiter and it will be ignored.
escapechar : str (length 1), default None
One-character string used to escape delimiter
comment: str, optional
Indicates the line should not be parsed.
options : dict
All other options passed directly into Spark's data source.
Returns
-------
DataFrame
See Also
--------
DataFrame.to_csv : Write DataFrame to a comma-separated values (csv) file.
Examples
--------
>>> ks.read_csv('data.csv') # doctest: +SKIP
"""
if "options" in options and isinstance(options.get("options"), dict) and len(options) == 1:
options = options.get("options") # type: ignore
if mangle_dupe_cols is not True:
raise ValueError("mangle_dupe_cols can only be `True`: %s" % mangle_dupe_cols)
if parse_dates is not False:
raise ValueError("parse_dates can only be `False`: %s" % parse_dates)
if usecols is not None and not callable(usecols):
usecols = list(usecols)
if usecols is None or callable(usecols) or len(usecols) > 0:
reader = default_session().read
reader.option("inferSchema", True)
reader.option("sep", sep)
if header == "infer":
header = 0 if names is None else None
if header == 0:
reader.option("header", True)
elif header is None:
reader.option("header", False)
else:
raise ValueError("Unknown header argument {}".format(header))
if quotechar is not None:
reader.option("quote", quotechar)
if escapechar is not None:
reader.option("escape", escapechar)
if comment is not None:
if not isinstance(comment, str) or len(comment) != 1:
raise ValueError("Only length-1 comment characters supported")
reader.option("comment", comment)
reader.options(**options)
if isinstance(names, str):
sdf = reader.schema(names).csv(path)
else:
sdf = reader.csv(path)
if header is None:
sdf = sdf.selectExpr(
*["`%s` as `%s`" % (field.name, i) for i, field in enumerate(sdf.schema)]
)
if isinstance(names, list):
names = list(names)
if len(set(names)) != len(names):
raise ValueError("Found non-unique column index")
if len(names) != len(sdf.schema):
raise ValueError(
"The number of names [%s] does not match the number "
"of columns [%d]. Try names by a Spark SQL DDL-formatted "
"string." % (len(sdf.schema), len(names))
)
sdf = sdf.selectExpr(
*["`%s` as `%s`" % (field.name, name) for field, name in zip(sdf.schema, names)]
)
if usecols is not None:
if callable(usecols):
cols = [field.name for field in sdf.schema if usecols(field.name)]
missing = []
elif all(isinstance(col, int) for col in usecols):
cols = [field.name for i, field in enumerate(sdf.schema) if i in usecols]
missing = [
col
for col in usecols
if col >= len(sdf.schema) or sdf.schema[col].name not in cols
]
elif all(isinstance(col, str) for col in usecols):
cols = [field.name for field in sdf.schema if field.name in usecols]
missing = [col for col in usecols if col not in cols]
else:
raise ValueError(
"'usecols' must either be list-like of all strings, "
"all unicode, all integers or a callable."
)
if len(missing) > 0:
raise ValueError(
"Usecols do not match columns, columns expected but not " "found: %s" % missing
)
if len(cols) > 0:
sdf = sdf.select(cols)
else:
sdf = default_session().createDataFrame([], schema=StructType())
else:
sdf = default_session().createDataFrame([], schema=StructType())
index_map = _get_index_map(sdf, index_col)
kdf = DataFrame(_InternalFrame(spark_frame=sdf, index_map=index_map))
if dtype is not None:
if isinstance(dtype, dict):
for col, tpe in dtype.items():
kdf[col] = kdf[col].astype(tpe)
else:
for col in kdf.columns:
kdf[col] = kdf[col].astype(dtype)
if squeeze and len(kdf.columns) == 1:
return kdf[kdf.columns[0]]
return kdf
def read_json(path: str, index_col: Optional[Union[str, List[str]]] = None, **options):
"""
Convert a JSON string to pandas object.
Parameters
----------
path : string
File path
index_col : str or list of str, optional, default: None
Index column of table in Spark.
options : dict
All other options passed directly into Spark's data source.
Examples
--------
>>> df = ks.DataFrame([['a', 'b'], ['c', 'd']],
... columns=['col 1', 'col 2'])
>>> df.to_json(path=r'%s/read_json/foo.json' % path, num_files=1)
>>> ks.read_json(
... path=r'%s/read_json/foo.json' % path
... ).sort_values(by="col 1")
col 1 col 2
0 a b
1 c d
>>> df.to_json(path=r'%s/read_json/foo.json' % path, num_files=1, lineSep='___')
>>> ks.read_json(
... path=r'%s/read_json/foo.json' % path, lineSep='___'
... ).sort_values(by="col 1")
col 1 col 2
0 a b
1 c d
You can preserve the index in the roundtrip as below.
>>> df.to_json(path=r'%s/read_json/bar.json' % path, num_files=1, index_col="index")
>>> ks.read_json(
... path=r'%s/read_json/bar.json' % path, index_col="index"
... ).sort_values(by="col 1") # doctest: +NORMALIZE_WHITESPACE
col 1 col 2
index
0 a b
1 c d
"""
if "options" in options and isinstance(options.get("options"), dict) and len(options) == 1:
options = options.get("options") # type: ignore
return read_spark_io(path, format="json", index_col=index_col, **options)
def read_delta(
path: str,
version: Optional[str] = None,
timestamp: Optional[str] = None,
index_col: Optional[Union[str, List[str]]] = None,
**options
) -> DataFrame:
"""
Read a Delta Lake table on some file system and return a DataFrame.
If the Delta Lake table is already stored in the catalog (aka the metastore), use 'read_table'.
Parameters
----------
path : string
Path to the Delta Lake table.
version : string, optional
Specifies the table version (based on Delta's internal transaction version) to read from,
using Delta's time travel feature. This sets Delta's 'versionAsOf' option.
timestamp : string, optional
Specifies the table version (based on timestamp) to read from,
using Delta's time travel feature. This must be a valid date or timestamp string in Spark,
and sets Delta's 'timestampAsOf' option.
index_col : str or list of str, optional, default: None
Index column of table in Spark.
options
Additional options that can be passed onto Delta.
Returns
-------
DataFrame
See Also
--------
DataFrame.to_delta
read_table
read_spark_io
read_parquet
Examples
--------
>>> ks.range(1).to_delta('%s/read_delta/foo' % path)
>>> ks.read_delta('%s/read_delta/foo' % path)
id
0 0
>>> ks.range(10, 15, num_partitions=1).to_delta('%s/read_delta/foo' % path, mode='overwrite')
>>> ks.read_delta('%s/read_delta/foo' % path)
id
0 10
1 11
2 12
3 13
4 14
>>> ks.read_delta('%s/read_delta/foo' % path, version=0)
id
0 0
You can preserve the index in the roundtrip as below.
>>> ks.range(10, 15, num_partitions=1).to_delta(
... '%s/read_delta/bar' % path, index_col="index")
>>> ks.read_delta('%s/read_delta/bar' % path, index_col="index")
... # doctest: +NORMALIZE_WHITESPACE
id
index
0 10
1 11
2 12
3 13
4 14
"""
if "options" in options and isinstance(options.get("options"), dict) and len(options) == 1:
options = options.get("options") # type: ignore
if version is not None:
options["versionAsOf"] = version
if timestamp is not None:
options["timestampAsOf"] = timestamp
return read_spark_io(path, format="delta", index_col=index_col, **options)
def read_table(name: str, index_col: Optional[Union[str, List[str]]] = None) -> DataFrame:
"""
Read a Spark table and return a DataFrame.
Parameters
----------
name : string
Table name in Spark.
index_col : str or list of str, optional, default: None
Index column of table in Spark.
Returns
-------
DataFrame
See Also
--------
DataFrame.to_table
read_delta
read_parquet
read_spark_io
Examples
--------
>>> ks.range(1).to_table('%s.my_table' % db)
>>> ks.read_table('%s.my_table' % db)
id
0 0
>>> ks.range(1).to_table('%s.my_table' % db, index_col="index")
>>> ks.read_table('%s.my_table' % db, index_col="index") # doctest: +NORMALIZE_WHITESPACE
id
index
0 0
"""
sdf = default_session().read.table(name)
index_map = _get_index_map(sdf, index_col)
return DataFrame(_InternalFrame(spark_frame=sdf, index_map=index_map))
def read_spark_io(
path: Optional[str] = None,
format: Optional[str] = None,
schema: Union[str, "StructType"] = None,
index_col: Optional[Union[str, List[str]]] = None,
**options
) -> DataFrame:
"""Load a DataFrame from a Spark data source.
Parameters
----------
path : string, optional
Path to the data source.
format : string, optional
Specifies the output data source format. Some common ones are:
- 'delta'
- 'parquet'
- 'orc'
- 'json'
- 'csv'
schema : string or StructType, optional
Input schema. If none, Spark tries to infer the schema automatically.
The schema can either be a Spark StructType, or a DDL-formatted string like
`col0 INT, col1 DOUBLE`.
index_col : str or list of str, optional, default: None
Index column of table in Spark.
options : dict
All other options passed directly into Spark's data source.
See Also
--------
DataFrame.to_spark_io
DataFrame.read_table
DataFrame.read_delta
DataFrame.read_parquet
Examples
--------
>>> ks.range(1).to_spark_io('%s/read_spark_io/data.parquet' % path)
>>> ks.read_spark_io(
... '%s/read_spark_io/data.parquet' % path, format='parquet', schema='id long')
id
0 0
>>> ks.range(10, 15, num_partitions=1).to_spark_io('%s/read_spark_io/data.json' % path,
... format='json', lineSep='__')
>>> ks.read_spark_io(
... '%s/read_spark_io/data.json' % path, format='json', schema='id long', lineSep='__')
id
0 10
1 11
2 12
3 13
4 14
You can preserve the index in the roundtrip as below.
>>> ks.range(10, 15, num_partitions=1).to_spark_io('%s/read_spark_io/data.orc' % path,
... format='orc', index_col="index")
>>> ks.read_spark_io(
... path=r'%s/read_spark_io/data.orc' % path, format="orc", index_col="index")
... # doctest: +NORMALIZE_WHITESPACE
id
index
0 10
1 11
2 12
3 13
4 14
"""
if "options" in options and isinstance(options.get("options"), dict) and len(options) == 1:
options = options.get("options") # type: ignore
sdf = default_session().read.load(path=path, format=format, schema=schema, **options)
index_map = _get_index_map(sdf, index_col)
return DataFrame(_InternalFrame(spark_frame=sdf, index_map=index_map))
def read_parquet(path, columns=None, index_col=None, **options) -> DataFrame:
"""Load a parquet object from the file path, returning a DataFrame.
Parameters
----------
path : string
File path
columns : list, default=None
If not None, only these columns will be read from the file.
index_col : str or list of str, optional, default: None
Index column of table in Spark.
options : dict
All other options passed directly into Spark's data source.
Returns
-------
DataFrame
See Also
--------
DataFrame.to_parquet
DataFrame.read_table
DataFrame.read_delta
DataFrame.read_spark_io
Examples
--------
>>> ks.range(1).to_parquet('%s/read_spark_io/data.parquet' % path)
>>> ks.read_parquet('%s/read_spark_io/data.parquet' % path, columns=['id'])
id
0 0
You can preserve the index in the roundtrip as below.
>>> ks.range(1).to_parquet('%s/read_spark_io/data.parquet' % path, index_col="index")
>>> ks.read_parquet('%s/read_spark_io/data.parquet' % path, columns=['id'], index_col="index")
... # doctest: +NORMALIZE_WHITESPACE
id
index
0 0
"""
if "options" in options and isinstance(options.get("options"), dict) and len(options) == 1:
options = options.get("options") # type: ignore
if columns is not None:
columns = list(columns)
kdf = read_spark_io(path=path, format="parquet", options=options, index_col=index_col)
if columns is not None:
new_columns = [c for c in columns if c in kdf.columns]
if len(new_columns) > 0:
kdf = kdf[new_columns]
else:
sdf = default_session().createDataFrame([], schema=StructType())
index_map = _get_index_map(sdf, index_col)
return DataFrame(_InternalFrame(spark_frame=sdf, index_map=index_map))
return kdf
def read_clipboard(sep=r"\s+", **kwargs):
r"""
Read text from clipboard and pass to read_csv. See read_csv for the
full argument list
Parameters
----------
sep : str, default '\s+'
A string or regex delimiter. The default of '\s+' denotes
one or more whitespace characters.
See Also
--------
DataFrame.to_clipboard : Write text out to clipboard.
Returns
-------
parsed : DataFrame
"""
return from_pandas(pd.read_clipboard(sep, **kwargs))
def read_excel(
io,
sheet_name=0,
header=0,
names=None,
index_col=None,
usecols=None,
squeeze=False,
dtype=None,
engine=None,
converters=None,
true_values=None,
false_values=None,
skiprows=None,
nrows=None,
na_values=None,
keep_default_na=True,
verbose=False,
parse_dates=False,
date_parser=None,
thousands=None,
comment=None,
skipfooter=0,
convert_float=True,
mangle_dupe_cols=True,
**kwds
):
"""
Read an Excel file into a Koalas DataFrame.
Support both `xls` and `xlsx` file extensions from a local filesystem or URL.
Support an option to read a single sheet or a list of sheets.
Parameters
----------
io : str, file descriptor, pathlib.Path, ExcelFile or xlrd.Book
The string could be a URL. Valid URL schemes include http, ftp, s3,
gcs, and file. For file URLs, a host is expected. For instance, a local
file could be /path/to/workbook.xlsx.
sheet_name : str, int, list, or None, default 0
Strings are used for sheet names. Integers are used in zero-indexed
sheet positions. Lists of strings/integers are used to request
multiple sheets. Specify None to get all sheets.
Available cases:
* Defaults to ``0``: 1st sheet as a `DataFrame`
* ``1``: 2nd sheet as a `DataFrame`
* ``"Sheet1"``: Load sheet with name "Sheet1"
* ``[0, 1, "Sheet5"]``: Load first, second and sheet named "Sheet5"
as a dict of `DataFrame`
* None: All sheets.
header : int, list of int, default 0
Row (0-indexed) to use for the column labels of the parsed
DataFrame. If a list of integers is passed those row positions will
be combined into a ``MultiIndex``. Use None if there is no header.
names : array-like, default None
List of column names to use. If file contains no header row,
then you should explicitly pass header=None.
index_col : int, list of int, default None
Column (0-indexed) to use as the row labels of the DataFrame.
Pass None if there is no such column. If a list is passed,
those columns will be combined into a ``MultiIndex``. If a
subset of data is selected with ``usecols``, index_col
is based on the subset.
usecols : int, str, list-like, or callable default None
Return a subset of the columns.
* If None, then parse all columns.
* If str, then indicates comma separated list of Excel column letters
and column ranges (e.g. "A:E" or "A,C,E:F"). Ranges are inclusive of
both sides.
* If list of int, then indicates list of column numbers to be parsed.
* If list of string, then indicates list of column names to be parsed.
* If callable, then evaluate each column name against it and parse the
column if the callable returns ``True``.
squeeze : bool, default False
If the parsed data only contains one column then return a Series.
dtype : Type name or dict of column -> type, default None
Data type for data or columns. E.g. {'a': np.float64, 'b': np.int32}
Use `object` to preserve data as stored in Excel and not interpret dtype.
If converters are specified, they will be applied INSTEAD
of dtype conversion.
engine : str, default None
If io is not a buffer or path, this must be set to identify io.
Acceptable values are None or xlrd.
converters : dict, default None
Dict of functions for converting values in certain columns. Keys can
either be integers or column labels, values are functions that take one
input argument, the Excel cell content, and return the transformed
content.
true_values : list, default None
Values to consider as True.
false_values : list, default None
Values to consider as False.
skiprows : list-like
Rows to skip at the beginning (0-indexed).
nrows : int, default None
Number of rows to parse.
na_values : scalar, str, list-like, or dict, default None
Additional strings to recognize as NA/NaN. If dict passed, specific
per-column NA values. By default the following values are interpreted
as NaN.
keep_default_na : bool, default True
If na_values are specified and keep_default_na is False the default NaN
values are overridden, otherwise they're appended to.
verbose : bool, default False
Indicate number of NA values placed in non-numeric columns.
parse_dates : bool, list-like, or dict, default False
The behavior is as follows:
* bool. If True -> try parsing the index.
* list of int or names. e.g. If [1, 2, 3] -> try parsing columns 1, 2, 3
each as a separate date column.
* list of lists. e.g. If [[1, 3]] -> combine columns 1 and 3 and parse as
a single date column.
* dict, e.g. {{'foo' : [1, 3]}} -> parse columns 1, 3 as date and call
result 'foo'
If a column or index contains an unparseable date, the entire column or
index will be returned unaltered as an object data type. For non-standard
datetime parsing, use ``pd.to_datetime`` after ``pd.read_csv``
Note: A fast-path exists for iso8601-formatted dates.
date_parser : function, optional
Function to use for converting a sequence of string columns to an array of
datetime instances. The default uses ``dateutil.parser.parser`` to do the
conversion. Koalas will try to call `date_parser` in three different ways,
advancing to the next if an exception occurs: 1) Pass one or more arrays
(as defined by `parse_dates`) as arguments; 2) concatenate (row-wise) the
string values from the columns defined by `parse_dates` into a single array
and pass that; and 3) call `date_parser` once for each row using one or
more strings (corresponding to the columns defined by `parse_dates`) as
arguments.
thousands : str, default None
Thousands separator for parsing string columns to numeric. Note that
this parameter is only necessary for columns stored as TEXT in Excel,
any numeric columns will automatically be parsed, regardless of display
format.
comment : str, default None
Comments out remainder of line. Pass a character or characters to this
argument to indicate comments in the input file. Any data between the
comment string and the end of the current line is ignored.
skipfooter : int, default 0
Rows at the end to skip (0-indexed).
convert_float : bool, default True
Convert integral floats to int (i.e., 1.0 --> 1). If False, all numeric
data will be read in as floats: Excel stores all numbers as floats
internally.
mangle_dupe_cols : bool, default True
Duplicate columns will be specified as 'X', 'X.1', ...'X.N', rather than
'X'...'X'. Passing in False will cause data to be overwritten if there
are duplicate names in the columns.
**kwds : optional
Optional keyword arguments can be passed to ``TextFileReader``.
Returns
-------
DataFrame or dict of DataFrames
DataFrame from the passed in Excel file. See notes in sheet_name
argument for more information on when a dict of DataFrames is returned.
See Also
--------
DataFrame.to_excel : Write DataFrame to an Excel file.
DataFrame.to_csv : Write DataFrame to a comma-separated values (csv) file.
read_csv : Read a comma-separated values (csv) file into DataFrame.
Examples
--------
The file can be read using the file name as string or an open file object:
>>> ks.read_excel('tmp.xlsx', index_col=0) # doctest: +SKIP
Name Value
0 string1 1
1 string2 2
2 #Comment 3
>>> ks.read_excel(open('tmp.xlsx', 'rb'),
... sheet_name='Sheet3') # doctest: +SKIP
Unnamed: 0 Name Value
0 0 string1 1
1 1 string2 2
2 2 #Comment 3
Index and header can be specified via the `index_col` and `header` arguments
>>> ks.read_excel('tmp.xlsx', index_col=None, header=None) # doctest: +SKIP
0 1 2
0 NaN Name Value
1 0.0 string1 1
2 1.0 string2 2
3 2.0 #Comment 3
Column types are inferred but can be explicitly specified
>>> ks.read_excel('tmp.xlsx', index_col=0,
... dtype={'Name': str, 'Value': float}) # doctest: +SKIP
Name Value
0 string1 1.0
1 string2 2.0
2 #Comment 3.0
True, False, and NA values, and thousands separators have defaults,
but can be explicitly specified, too. Supply the values you would like
as strings or lists of strings!
>>> ks.read_excel('tmp.xlsx', index_col=0,
... na_values=['string1', 'string2']) # doctest: +SKIP
Name Value
0 None 1
1 None 2
2 #Comment 3
Comment lines in the excel input file can be skipped using the `comment` kwarg
>>> ks.read_excel('tmp.xlsx', index_col=0, comment='#') # doctest: +SKIP
Name Value
0 string1 1.0
1 string2 2.0
2 None NaN
"""
pdfs = pd.read_excel(
io=io,
sheet_name=sheet_name,
header=header,
names=names,
index_col=index_col,
usecols=usecols,
squeeze=squeeze,
dtype=dtype,
engine=engine,
converters=converters,
true_values=true_values,
false_values=false_values,
skiprows=skiprows,
nrows=nrows,
na_values=na_values,
keep_default_na=keep_default_na,
verbose=verbose,
parse_dates=parse_dates,
date_parser=date_parser,
thousands=thousands,
comment=comment,
skipfooter=skipfooter,
convert_float=convert_float,
mangle_dupe_cols=mangle_dupe_cols,
**kwds
)
if isinstance(pdfs, dict):
return OrderedDict([(key, from_pandas(value)) for key, value in pdfs.items()])
else:
return from_pandas(pdfs)
def read_html(
io,
match=".+",
flavor=None,
header=None,
index_col=None,
skiprows=None,
attrs=None,
parse_dates=False,
thousands=",",
encoding=None,
decimal=".",
converters=None,
na_values=None,
keep_default_na=True,
displayed_only=True,
):
r"""Read HTML tables into a ``list`` of ``DataFrame`` objects.
Parameters
----------
io : str or file-like
A URL, a file-like object, or a raw string containing HTML. Note that
lxml only accepts the http, ftp and file url protocols. If you have a
URL that starts with ``'https'`` you might try removing the ``'s'``.
match : str or compiled regular expression, optional
The set of tables containing text matching this regex or string will be
returned. Unless the HTML is extremely simple you will probably need to
pass a non-empty string here. Defaults to '.+' (match any non-empty
string). The default value will return all tables contained on a page.
This value is converted to a regular expression so that there is
consistent behavior between Beautiful Soup and lxml.
flavor : str or None, container of strings
The parsing engine to use. 'bs4' and 'html5lib' are synonymous with
each other, they are both there for backwards compatibility. The
default of ``None`` tries to use ``lxml`` to parse and if that fails it
falls back on ``bs4`` + ``html5lib``.
header : int or list-like or None, optional
The row (or list of rows for a :class:`~ks.MultiIndex`) to use to
make the columns headers.
index_col : int or list-like or None, optional
The column (or list of columns) to use to create the index.
skiprows : int or list-like or slice or None, optional
0-based. Number of rows to skip after parsing the column integer. If a
sequence of integers or a slice is given, will skip the rows indexed by
that sequence. Note that a single element sequence means 'skip the nth
row' whereas an integer means 'skip n rows'.
attrs : dict or None, optional
This is a dictionary of attributes that you can pass to use to identify
the table in the HTML. These are not checked for validity before being
passed to lxml or Beautiful Soup. However, these attributes must be
valid HTML table attributes to work correctly. For example, ::
attrs = {'id': 'table'}
is a valid attribute dictionary because the 'id' HTML tag attribute is
a valid HTML attribute for *any* HTML tag as per `this document
<http://www.w3.org/TR/html-markup/global-attributes.html>`__. ::
attrs = {'asdf': 'table'}
is *not* a valid attribute dictionary because 'asdf' is not a valid
HTML attribute even if it is a valid XML attribute. Valid HTML 4.01
table attributes can be found `here
<http://www.w3.org/TR/REC-html40/struct/tables.html#h-11.2>`__. A
working draft of the HTML 5 spec can be found `here
<http://www.w3.org/TR/html-markup/table.html>`__. It contains the
latest information on table attributes for the modern web.
parse_dates : bool, optional
See :func:`~ks.read_csv` for more details.
thousands : str, optional
Separator to use to parse thousands. Defaults to ``','``.
encoding : str or None, optional
The encoding used to decode the web page. Defaults to ``None``.``None``
preserves the previous encoding behavior, which depends on the
underlying parser library (e.g., the parser library will try to use
the encoding provided by the document).
decimal : str, default '.'
Character to recognize as decimal point (e.g. use ',' for European
data).
converters : dict, default None
Dict of functions for converting values in certain columns. Keys can
either be integers or column labels, values are functions that take one
input argument, the cell (not column) content, and return the
transformed content.
na_values : iterable, default None
Custom NA values
keep_default_na : bool, default True
If na_values are specified and keep_default_na is False the default NaN
values are overridden, otherwise they're appended to
displayed_only : bool, default True
Whether elements with "display: none" should be parsed
Returns
-------
dfs : list of DataFrames
See Also
--------
read_csv
DataFrame.to_html
"""
pdfs = pd.read_html(
io=io,
match=match,
flavor=flavor,
header=header,
index_col=index_col,
skiprows=skiprows,
attrs=attrs,
parse_dates=parse_dates,
thousands=thousands,
encoding=encoding,
decimal=decimal,
converters=converters,
na_values=na_values,
keep_default_na=keep_default_na,
displayed_only=displayed_only,
)
return [from_pandas(pdf) for pdf in pdfs]
# TODO: add `coerce_float` and 'parse_dates' parameters
def read_sql_table(table_name, con, schema=None, index_col=None, columns=None, **options):
"""
Read SQL database table into a DataFrame.
Given a table name and a JDBC URI, returns a DataFrame.
Parameters
----------
table_name : str
Name of SQL table in database.
con : str
A JDBC URI could be provided as as str.
.. note:: The URI must be JDBC URI instead of Python's database URI.
schema : str, default None
Name of SQL schema in database to query (if database flavor
supports this). Uses default schema if None (default).
index_col : str or list of str, optional, default: None
Column(s) to set as index(MultiIndex).
columns : list, default None
List of column names to select from SQL table.
options : dict
All other options passed directly into Spark's JDBC data source.
Returns
-------
DataFrame
A SQL table is returned as two-dimensional data structure with labeled
axes.
See Also
--------
read_sql_query : Read SQL query into a DataFrame.
read_sql : Read SQL query or database table into a DataFrame.
Examples
--------
>>> ks.read_sql_table('table_name', 'jdbc:postgresql:db_name') # doctest: +SKIP
"""
if "options" in options and isinstance(options.get("options"), dict) and len(options) == 1:
options = options.get("options") # type: ignore
reader = default_session().read
reader.option("dbtable", table_name)
reader.option("url", con)
if schema is not None:
reader.schema(schema)
reader.options(**options)
sdf = reader.format("jdbc").load()
index_map = _get_index_map(sdf, index_col)
kdf = DataFrame(_InternalFrame(spark_frame=sdf, index_map=index_map))
if columns is not None:
if isinstance(columns, str):
columns = [columns]
kdf = kdf[columns]
return kdf
# TODO: add `coerce_float`, `params`, and 'parse_dates' parameters
def read_sql_query(sql, con, index_col=None, **options):
"""Read SQL query into a DataFrame.
Returns a DataFrame corresponding to the result set of the query
string. Optionally provide an `index_col` parameter to use one of the
columns as the index, otherwise default index will be used.
.. note:: Some database might hit the issue of Spark: SPARK-27596
Parameters
----------
sql : string SQL query
SQL query to be executed.
con : str
A JDBC URI could be provided as as str.
.. note:: The URI must be JDBC URI instead of Python's database URI.
index_col : string or list of strings, optional, default: None
Column(s) to set as index(MultiIndex).
options : dict
All other options passed directly into Spark's JDBC data source.
Returns
-------
DataFrame
See Also
--------
read_sql_table : Read SQL database table into a DataFrame.
read_sql
Examples
--------
>>> ks.read_sql_query('SELECT * FROM table_name', 'jdbc:postgresql:db_name') # doctest: +SKIP
"""
if "options" in options and isinstance(options.get("options"), dict) and len(options) == 1:
options = options.get("options") # type: ignore
reader = default_session().read
reader.option("query", sql)
reader.option("url", con)
reader.options(**options)
sdf = reader.format("jdbc").load()
index_map = _get_index_map(sdf, index_col)
return DataFrame(_InternalFrame(spark_frame=sdf, index_map=index_map))
# TODO: add `coerce_float`, `params`, and 'parse_dates' parameters
def read_sql(sql, con, index_col=None, columns=None, **options):
"""
Read SQL query or database table into a DataFrame.
This function is a convenience wrapper around ``read_sql_table`` and
``read_sql_query`` (for backward compatibility). It will delegate
to the specific function depending on the provided input. A SQL query
will be routed to ``read_sql_query``, while a database table name will
be routed to ``read_sql_table``. Note that the delegated function might
have more specific notes about their functionality not listed here.
.. note:: Some database might hit the issue of Spark: SPARK-27596
Parameters
----------
sql : string
SQL query to be executed or a table name.
con : str
A JDBC URI could be provided as as str.
.. note:: The URI must be JDBC URI instead of Python's database URI.
index_col : string or list of strings, optional, default: None
Column(s) to set as index(MultiIndex).
columns : list, default: None
List of column names to select from SQL table (only used when reading
a table).
options : dict
All other options passed directly into Spark's JDBC data source.
Returns
-------
DataFrame
See Also
--------
read_sql_table : Read SQL database table into a DataFrame.
read_sql_query : Read SQL query into a DataFrame.
Examples
--------
>>> ks.read_sql('table_name', 'jdbc:postgresql:db_name') # doctest: +SKIP
>>> ks.read_sql('SELECT * FROM table_name', 'jdbc:postgresql:db_name') # doctest: +SKIP
"""
if "options" in options and isinstance(options.get("options"), dict) and len(options) == 1:
options = options.get("options") # type: ignore
striped = sql.strip()
if " " not in striped: # TODO: identify the table name or not more precisely.
return read_sql_table(sql, con, index_col=index_col, columns=columns, **options)
else:
return read_sql_query(sql, con, index_col=index_col, **options)
def to_datetime(
arg, errors="raise", format=None, unit=None, infer_datetime_format=False, origin="unix"
):
"""
Convert argument to datetime.
Parameters
----------
arg : integer, float, string, datetime, list, tuple, 1-d array, Series
or DataFrame/dict-like
errors : {'ignore', 'raise', 'coerce'}, default 'raise'
- If 'raise', then invalid parsing will raise an exception
- If 'coerce', then invalid parsing will be set as NaT
- If 'ignore', then invalid parsing will return the input
format : string, default None
strftime to parse time, eg "%d/%m/%Y", note that "%f" will parse
all the way up to nanoseconds.
unit : string, default None
unit of the arg (D,s,ms,us,ns) denote the unit, which is an
integer or float number. This will be based off the origin.
Example, with unit='ms' and origin='unix' (the default), this
would calculate the number of milliseconds to the unix epoch start.
infer_datetime_format : boolean, default False
If True and no `format` is given, attempt to infer the format of the
datetime strings, and if it can be inferred, switch to a faster
method of parsing them. In some cases this can increase the parsing
speed by ~5-10x.
origin : scalar, default 'unix'
Define the reference date. The numeric values would be parsed as number
of units (defined by `unit`) since this reference date.
- If 'unix' (or POSIX) time; origin is set to 1970-01-01.
- If 'julian', unit must be 'D', and origin is set to beginning of
Julian Calendar. Julian day number 0 is assigned to the day starting
at noon on January 1, 4713 BC.
- If Timestamp convertible, origin is set to Timestamp identified by
origin.
Returns
-------
ret : datetime if parsing succeeded.
Return type depends on input:
- list-like: DatetimeIndex
- Series: Series of datetime64 dtype
- scalar: Timestamp
In case when it is not possible to return designated types (e.g. when
any element of input is before Timestamp.min or after Timestamp.max)
return will have datetime.datetime type (or corresponding
array/Series).
Examples
--------
Assembling a datetime from multiple columns of a DataFrame. The keys can be
common abbreviations like ['year', 'month', 'day', 'minute', 'second',
'ms', 'us', 'ns']) or plurals of the same
>>> df = ks.DataFrame({'year': [2015, 2016],
... 'month': [2, 3],
... 'day': [4, 5]})
>>> ks.to_datetime(df)
0 2015-02-04
1 2016-03-05
Name: 0, dtype: datetime64[ns]
If a date does not meet the `timestamp limitations
<http://pandas.pydata.org/pandas-docs/stable/timeseries.html
#timeseries-timestamp-limits>`_, passing errors='ignore'
will return the original input instead of raising any exception.
Passing errors='coerce' will force an out-of-bounds date to NaT,
in addition to forcing non-dates (or non-parseable dates) to NaT.
>>> ks.to_datetime('13000101', format='%Y%m%d', errors='ignore')
datetime.datetime(1300, 1, 1, 0, 0)
>>> ks.to_datetime('13000101', format='%Y%m%d', errors='coerce')
NaT
Passing infer_datetime_format=True can often-times speedup a parsing
if its not an ISO8601 format exactly, but in a regular format.
>>> s = ks.Series(['3/11/2000', '3/12/2000', '3/13/2000'] * 1000)
>>> s.head()
0 3/11/2000
1 3/12/2000
2 3/13/2000
3 3/11/2000
4 3/12/2000
Name: 0, dtype: object
>>> import timeit
>>> timeit.timeit(
... lambda: repr(ks.to_datetime(s, infer_datetime_format=True)),
... number = 1) # doctest: +SKIP
0.35832712500000063
>>> timeit.timeit(
... lambda: repr(ks.to_datetime(s, infer_datetime_format=False)),
... number = 1) # doctest: +SKIP
0.8895321660000004
Using a unix epoch time
>>> ks.to_datetime(1490195805, unit='s')
Timestamp('2017-03-22 15:16:45')
>>> ks.to_datetime(1490195805433502912, unit='ns')
Timestamp('2017-03-22 15:16:45.433502912')
Using a non-unix epoch origin
>>> ks.to_datetime([1, 2, 3], unit='D', origin=pd.Timestamp('1960-01-01'))
DatetimeIndex(['1960-01-02', '1960-01-03', '1960-01-04'], dtype='datetime64[ns]', freq=None)
"""
def pandas_to_datetime(pser_or_pdf) -> Series[np.datetime64]:
if isinstance(pser_or_pdf, pd.DataFrame):
pser_or_pdf = pser_or_pdf[["year", "month", "day"]]
return pd.to_datetime(
pser_or_pdf,
errors=errors,
format=format,
unit=unit,
infer_datetime_format=infer_datetime_format,
origin=origin,
)
if isinstance(arg, Series):
return arg.transform_batch(pandas_to_datetime)
if isinstance(arg, DataFrame):
kdf = arg[["year", "month", "day"]]
return kdf.transform_batch(pandas_to_datetime)
return pd.to_datetime(
arg,
errors=errors,
format=format,
unit=unit,
infer_datetime_format=infer_datetime_format,
origin=origin,
)
def get_dummies(
data,
prefix=None,
prefix_sep="_",
dummy_na=False,
columns=None,
sparse=False,
drop_first=False,
dtype=None,
):
"""
Convert categorical variable into dummy/indicator variables, also
known as one hot encoding.
Parameters
----------
data : array-like, Series, or DataFrame
prefix : string, list of strings, or dict of strings, default None
String to append DataFrame column names.
Pass a list with length equal to the number of columns
when calling get_dummies on a DataFrame. Alternatively, `prefix`
can be a dictionary mapping column names to prefixes.
prefix_sep : string, default '_'
If appending prefix, separator/delimiter to use. Or pass a
list or dictionary as with `prefix.`
dummy_na : bool, default False
Add a column to indicate NaNs, if False NaNs are ignored.
columns : list-like, default None
Column names in the DataFrame to be encoded.
If `columns` is None then all the columns with
`object` or `category` dtype will be converted.
sparse : bool, default False
Whether the dummy-encoded columns should be be backed by
a :class:`SparseArray` (True) or a regular NumPy array (False).
In Koalas, this value must be "False".
drop_first : bool, default False
Whether to get k-1 dummies out of k categorical levels by removing the
first level.
dtype : dtype, default np.uint8
Data type for new columns. Only a single dtype is allowed.
Returns
-------
dummies : DataFrame
See Also
--------
Series.str.get_dummies
Examples
--------
>>> s = ks.Series(list('abca'))
>>> ks.get_dummies(s)
a b c
0 1 0 0
1 0 1 0
2 0 0 1
3 1 0 0
>>> df = ks.DataFrame({'A': ['a', 'b', 'a'], 'B': ['b', 'a', 'c'],
... 'C': [1, 2, 3]},
... columns=['A', 'B', 'C'])
>>> ks.get_dummies(df, prefix=['col1', 'col2'])
C col1_a col1_b col2_a col2_b col2_c
0 1 1 0 0 1 0
1 2 0 1 1 0 0
2 3 1 0 0 0 1
>>> ks.get_dummies(ks.Series(list('abcaa')))
a b c
0 1 0 0
1 0 1 0
2 0 0 1
3 1 0 0
4 1 0 0
>>> ks.get_dummies(ks.Series(list('abcaa')), drop_first=True)
b c
0 0 0
1 1 0
2 0 1
3 0 0
4 0 0
>>> ks.get_dummies(ks.Series(list('abc')), dtype=float)
a b c
0 1.0 0.0 0.0
1 0.0 1.0 0.0
2 0.0 0.0 1.0
"""
if sparse is not False:
raise NotImplementedError("get_dummies currently does not support sparse")
if columns is not None:
if not is_list_like(columns):
raise TypeError("Input must be a list-like for parameter `columns`")
if dtype is None:
dtype = "byte"
if isinstance(data, Series):
if prefix is not None:
prefix = [str(prefix)]
column_labels = [(data.name,)]
kdf = data.to_dataframe()
remaining_columns = []
else:
if isinstance(prefix, str):
raise NotImplementedError(
"get_dummies currently does not support prefix as string types"
)
kdf = data.copy()
if columns is None:
column_labels = [
label
for label in kdf._internal.column_labels
if isinstance(
kdf._internal.spark_type_for(label), _get_dummies_default_accept_types
)
]
else:
if isinstance(columns, (str, tuple)):
if isinstance(columns, str):
key = (columns,)
else:
key = columns
column_labels = [
label for label in kdf._internal.column_labels if label[: len(key)] == key
]
if len(column_labels) == 0:
raise KeyError(column_labels)
if prefix is None:
prefix = [
str(label[len(key) :])
if len(label) > len(key) + 1
else label[len(key)]
if len(label) == len(key) + 1
else ""
for label in column_labels
]
elif any(isinstance(col, str) for col in columns) and any(
isinstance(col, tuple) for col in columns
):
raise ValueError("Expected tuple, got str")
else:
column_labels = [
label
for key in columns
for label in kdf._internal.column_labels
if label == key or label[0] == key
]
if len(column_labels) == 0:
if columns is None:
return kdf
raise KeyError("{} not in index".format(columns))
if prefix is None:
prefix = [str(label) if len(label) > 1 else label[0] for label in column_labels]
column_labels_set = set(column_labels)
remaining_columns = [
kdf[label].rename(name_like_string(label))
for label in kdf._internal.column_labels
if label not in column_labels_set
]
if any(
not isinstance(kdf._internal.spark_type_for(label), _get_dummies_acceptable_types)
for label in column_labels
):
raise NotImplementedError(
"get_dummies currently only accept {} values".format(
", ".join([t.typeName() for t in _get_dummies_acceptable_types])
)
)
if prefix is not None and len(column_labels) != len(prefix):
raise ValueError(
"Length of 'prefix' ({}) did not match the length of "
"the columns being encoded ({}).".format(len(prefix), len(column_labels))
)
all_values = _reduce_spark_multi(
kdf._sdf, [F.collect_set(kdf._internal.spark_column_for(label)) for label in column_labels]
)
for i, label in enumerate(column_labels):
values = sorted(all_values[i])
if drop_first:
values = values[1:]
def column_name(value):
if prefix is None or prefix[i] == "":
return str(value)
else:
return "{}{}{}".format(prefix[i], prefix_sep, value)
for value in values:
remaining_columns.append(
(kdf[label].notnull() & (kdf[label] == value))
.astype(dtype)
.rename(column_name(value))
)
if dummy_na:
remaining_columns.append(kdf[label].isnull().astype(dtype).rename(column_name("nan")))
return kdf[remaining_columns]
# TODO: there are many parameters to implement and support. See Pandas's pd.concat.
def concat(objs, axis=0, join="outer", ignore_index=False):
"""
Concatenate pandas objects along a particular axis with optional set logic
along the other axes.
Parameters
----------
objs : a sequence of Series or DataFrame
Any None objects will be dropped silently unless
they are all None in which case a ValueError will be raised
axis : {0/'index', 1/'columns'}, default 0
The axis to concatenate along.
join : {'inner', 'outer'}, default 'outer'
How to handle indexes on other axis (or axes).
ignore_index : bool, default False
If True, do not use the index values along the concatenation axis. The
resulting axis will be labeled 0, ..., n - 1. This is useful if you are
concatenating objects where the concatenation axis does not have
meaningful indexing information. Note the index values on the other
axes are still respected in the join.
Returns
-------
object, type of objs
When concatenating all ``Series`` along the index (axis=0), a
``Series`` is returned. When ``objs`` contains at least one
``DataFrame``, a ``DataFrame`` is returned. When concatenating along
the columns (axis=1), a ``DataFrame`` is returned.
See Also
--------
Series.append : Concatenate Series.
DataFrame.join : Join DataFrames using indexes.
DataFrame.merge : Merge DataFrames by indexes or columns.
Examples
--------
Combine two ``Series``.
>>> s1 = ks.Series(['a', 'b'])
>>> s2 = ks.Series(['c', 'd'])
>>> ks.concat([s1, s2])
0 a
1 b
0 c
1 d
Name: 0, dtype: object
Clear the existing index and reset it in the result
by setting the ``ignore_index`` option to ``True``.
>>> ks.concat([s1, s2], ignore_index=True)
0 a
1 b
2 c
3 d
Name: 0, dtype: object
Combine two ``DataFrame`` objects with identical columns.
>>> df1 = ks.DataFrame([['a', 1], ['b', 2]],
... columns=['letter', 'number'])
>>> df1
letter number
0 a 1
1 b 2
>>> df2 = ks.DataFrame([['c', 3], ['d', 4]],
... columns=['letter', 'number'])
>>> df2
letter number
0 c 3
1 d 4
>>> ks.concat([df1, df2])
letter number
0 a 1
1 b 2
0 c 3
1 d 4
Combine ``DataFrame`` and ``Series`` objects with different columns.
>>> ks.concat([df2, s1, s2])
0 letter number
0 None c 3.0
1 None d 4.0
0 a None NaN
1 b None NaN
0 c None NaN
1 d None NaN
Combine ``DataFrame`` objects with overlapping columns
and return everything. Columns outside the intersection will
be filled with ``None`` values.
>>> df3 = ks.DataFrame([['c', 3, 'cat'], ['d', 4, 'dog']],
... columns=['letter', 'number', 'animal'])
>>> df3
letter number animal
0 c 3 cat
1 d 4 dog
>>> ks.concat([df1, df3])
animal letter number
0 None a 1
1 None b 2
0 cat c 3
1 dog d 4
Combine ``DataFrame`` objects with overlapping columns
and return only those that are shared by passing ``inner`` to
the ``join`` keyword argument.
>>> ks.concat([df1, df3], join="inner")
letter number
0 a 1
1 b 2
0 c 3
1 d 4
>>> df4 = ks.DataFrame([['bird', 'polly'], ['monkey', 'george']],
... columns=['animal', 'name'])
Combine with column axis.
>>> ks.concat([df1, df4], axis=1)
letter number animal name
0 a 1 bird polly
1 b 2 monkey george
"""
if isinstance(objs, (DataFrame, IndexOpsMixin)) or not isinstance(
objs, Iterable
): # TODO: support dict
raise TypeError(
"first argument must be an iterable of koalas "
"objects, you passed an object of type "
'"{name}"'.format(name=type(objs).__name__)
)
if len(objs) == 0:
raise ValueError("No objects to concatenate")
objs = list(filter(lambda obj: obj is not None, objs))
if len(objs) == 0:
raise ValueError("All objects passed were None")
for obj in objs:
if not isinstance(obj, (Series, DataFrame)):
raise TypeError(
"cannot concatenate object of type "
"'{name}"
"; only ks.Series "
"and ks.DataFrame are valid".format(name=type(objs).__name__)
)
axis = validate_axis(axis)
if axis == 1:
if isinstance(objs[0], ks.Series):
concat_kdf = objs[0].to_frame()
else:
concat_kdf = objs[0]
with ks.option_context("compute.ops_on_diff_frames", True):
def resolve_func(kdf, this_column_labels, that_column_labels):
duplicated_names = set(
this_column_label[1:] for this_column_label in this_column_labels
).intersection(
set(that_column_label[1:] for that_column_label in that_column_labels)
)
assert (
len(duplicated_names) > 0
), "inner or full join type does not include non-common columns"
pretty_names = [name_like_string(column_label) for column_label in duplicated_names]
raise ValueError(
"Labels have to be unique; however, got " "duplicated labels %s." % pretty_names
)
for kser_or_kdf in objs[1:]:
if isinstance(kser_or_kdf, Series):
# TODO: there is a corner case to optimize - when the series are from
# the same DataFrame.
that_kdf = kser_or_kdf.to_frame()
else:
that_kdf = kser_or_kdf
this_index_level = concat_kdf._internal.column_labels_level
that_index_level = that_kdf._internal.column_labels_level
if this_index_level > that_index_level:
concat_kdf = that_kdf._index_normalized_frame(concat_kdf)
if this_index_level < that_index_level:
that_kdf = concat_kdf._index_normalized_frame(that_kdf)
if join == "inner":
concat_kdf = align_diff_frames(
resolve_func, concat_kdf, that_kdf, fillna=False, how="inner",
)
elif join == "outer":
concat_kdf = align_diff_frames(
resolve_func, concat_kdf, that_kdf, fillna=False, how="full",
)
else:
raise ValueError(
"Only can inner (intersect) or outer (union) join the other axis."
)
if ignore_index:
concat_kdf.columns = list(map(str, _range(len(concat_kdf.columns))))
return concat_kdf
# Series, Series ...
# We should return Series if objects are all Series.
should_return_series = all(map(lambda obj: isinstance(obj, Series), objs))
# DataFrame, Series ... & Series, Series ...
# In this case, we should return DataFrame.
new_objs = []
for obj in objs:
if isinstance(obj, Series):
obj = obj.rename("0").to_dataframe()
new_objs.append(obj)
objs = new_objs
column_labels_levels = set(obj._internal.column_labels_level for obj in objs)
if len(column_labels_levels) != 1:
raise ValueError("MultiIndex columns should have the same levels")
# DataFrame, DataFrame, ...
# All Series are converted into DataFrame and then compute concat.
if not ignore_index:
indices_of_kdfs = [kdf.index for kdf in objs]
index_of_first_kdf = indices_of_kdfs[0]
for index_of_kdf in indices_of_kdfs:
if index_of_first_kdf.names != index_of_kdf.names:
raise ValueError(
"Index type and names should be same in the objects to concatenate. "
"You passed different indices "
"{index_of_first_kdf} and {index_of_kdf}".format(
index_of_first_kdf=index_of_first_kdf.names, index_of_kdf=index_of_kdf.names
)
)
column_labelses_of_kdfs = [kdf._internal.column_labels for kdf in objs]
if ignore_index:
index_names_of_kdfs = [[] for _ in objs]
else:
index_names_of_kdfs = [kdf._internal.index_names for kdf in objs]
if all(name == index_names_of_kdfs[0] for name in index_names_of_kdfs) and all(
idx == column_labelses_of_kdfs[0] for idx in column_labelses_of_kdfs
):
# If all columns are in the same order and values, use it.
kdfs = objs
merged_columns = column_labelses_of_kdfs[0]
else:
if join == "inner":
interested_columns = set.intersection(*map(set, column_labelses_of_kdfs))
# Keep the column order with its firsts DataFrame.
merged_columns = sorted(
list(
map(
lambda c: column_labelses_of_kdfs[0][column_labelses_of_kdfs[0].index(c)],
interested_columns,
)
)
)
kdfs = [kdf[merged_columns] for kdf in objs]
elif join == "outer":
# If there are columns unmatched, just sort the column names.
merged_columns = sorted(
list(set(itertools.chain.from_iterable(column_labelses_of_kdfs)))
)
kdfs = []
for kdf in objs:
columns_to_add = list(set(merged_columns) - set(kdf._internal.column_labels))
# TODO: NaN and None difference for missing values. pandas seems filling NaN.
sdf = kdf._sdf
for label in columns_to_add:
sdf = sdf.withColumn(name_like_string(label), F.lit(None))
data_columns = kdf._internal.data_spark_column_names + [
name_like_string(label) for label in columns_to_add
]
kdf = DataFrame(
kdf._internal.copy(
spark_frame=sdf,
column_labels=(kdf._internal.column_labels + columns_to_add),
data_spark_columns=[scol_for(sdf, col) for col in data_columns],
)
)
kdfs.append(kdf[merged_columns])
else:
raise ValueError("Only can inner (intersect) or outer (union) join the other axis.")
if ignore_index:
sdfs = [kdf._sdf.select(kdf._internal.data_spark_columns) for kdf in kdfs]
else:
sdfs = [
kdf._sdf.select(kdf._internal.index_spark_columns + kdf._internal.data_spark_columns)
for kdf in kdfs
]
concatenated = reduce(lambda x, y: x.union(y), sdfs)
index_map = None if ignore_index else kdfs[0]._internal.index_map
result_kdf = DataFrame(
kdfs[0]._internal.copy(
spark_frame=concatenated,
index_map=index_map,
data_spark_columns=[
scol_for(concatenated, col) for col in kdfs[0]._internal.data_spark_column_names
],
)
)
if should_return_series:
# If all input were Series, we should return Series.
return _col(result_kdf)
else:
return result_kdf
def melt(frame, id_vars=None, value_vars=None, var_name=None, value_name="value"):
return DataFrame.melt(frame, id_vars, value_vars, var_name, value_name)
melt.__doc__ = DataFrame.melt.__doc__
def isna(obj):
"""
Detect missing values for an array-like object.
This function takes a scalar or array-like object and indicates
whether values are missing (``NaN`` in numeric arrays, ``None`` or ``NaN``
in object arrays).
Parameters
----------
obj : scalar or array-like
Object to check for null or missing values.
Returns
-------
bool or array-like of bool
For scalar input, returns a scalar boolean.
For array input, returns an array of boolean indicating whether each
corresponding element is missing.
See Also
--------
notnull : Boolean inverse of pandas.isnull.
Series.isna : Detect missing values in a Series.
Series.isnull : Detect missing values in a Series.
DataFrame.isna : Detect missing values in a DataFrame.
DataFrame.isnull : Detect missing values in a DataFrame.
Index.isna : Detect missing values in an Index.
Index.isnull : Detect missing values in an Index.
Examples
--------
Scalar arguments (including strings) result in a scalar boolean.
>>> ks.isna('dog')
False
>>> ks.isna(np.nan)
True
ndarrays result in an ndarray of booleans.
>>> array = np.array([[1, np.nan, 3], [4, 5, np.nan]])
>>> array
array([[ 1., nan, 3.],
[ 4., 5., nan]])
>>> ks.isna(array)
array([[False, True, False],
[False, False, True]])
For Series and DataFrame, the same type is returned, containing booleans.
>>> df = ks.DataFrame({'a': ['ant', 'bee', 'cat'], 'b': ['dog', None, 'fly']})
>>> df
a b
0 ant dog
1 bee None
2 cat fly
>>> ks.isna(df)
a b
0 False False
1 False True
2 False False
>>> ks.isnull(df.b)
0 False
1 True
2 False
Name: b, dtype: bool
"""
if isinstance(obj, (DataFrame, Series)):
return obj.isnull()
else:
return pd.isnull(obj)
isnull = isna
def notna(obj):
"""
Detect existing (non-missing) values.
Return a boolean same-sized object indicating if the values are not NA.
Non-missing values get mapped to True. NA values, such as None or
:attr:`numpy.NaN`, get mapped to False values.
Returns
-------
bool or array-like of bool
Mask of bool values for each element that
indicates whether an element is not an NA value.
See Also
--------
isna : Detect missing values for an array-like object.
Series.notna : Boolean inverse of Series.isna.
Series.notnull :Boolean inverse of Series.isnull.
DataFrame.notna :Boolean inverse of DataFrame.isna.
DataFrame.notnull : Boolean inverse of DataFrame.isnull.
Index.notna : Boolean inverse of Index.isna.
Index.notnull : Boolean inverse of Index.isnull.
Examples
--------
Show which entries in a DataFrame are not NA.
>>> df = ks.DataFrame({'age': [5, 6, np.NaN],
... 'born': [pd.NaT, pd.Timestamp('1939-05-27'),
... pd.Timestamp('1940-04-25')],
... 'name': ['Alfred', 'Batman', ''],
... 'toy': [None, 'Batmobile', 'Joker']})
>>> df
age born name toy
0 5.0 NaT Alfred None
1 6.0 1939-05-27 Batman Batmobile
2 NaN 1940-04-25 Joker
>>> df.notnull()
age born name toy
0 True False True False
1 True True True True
2 False True True True
Show which entries in a Series are not NA.
>>> ser = ks.Series([5, 6, np.NaN])
>>> ser
0 5.0
1 6.0
2 NaN
Name: 0, dtype: float64
>>> ks.notna(ser)
0 True
1 True
2 False
Name: 0, dtype: bool
>>> ks.notna(ser.index)
True
"""
if isinstance(obj, (DataFrame, Series)):
return obj.notna()
else:
return pd.notna(obj)
notnull = notna
def merge(
obj,
right: "DataFrame",
how: str = "inner",
on: Union[str, List[str], Tuple[str, ...], List[Tuple[str, ...]]] = None,
left_on: Union[str, List[str], Tuple[str, ...], List[Tuple[str, ...]]] = None,
right_on: Union[str, List[str], Tuple[str, ...], List[Tuple[str, ...]]] = None,
left_index: bool = False,
right_index: bool = False,
suffixes: Tuple[str, str] = ("_x", "_y"),
) -> "DataFrame":
"""
Merge DataFrame objects with a database-style join.
The index of the resulting DataFrame will be one of the following:
- 0...n if no index is used for merging
- Index of the left DataFrame if merged only on the index of the right DataFrame
- Index of the right DataFrame if merged only on the index of the left DataFrame
- All involved indices if merged using the indices of both DataFrames
e.g. if `left` with indices (a, x) and `right` with indices (b, x), the result will
be an index (x, a, b)
Parameters
----------
right: Object to merge with.
how: Type of merge to be performed.
{'left', 'right', 'outer', 'inner'}, default 'inner'
left: use only keys from left frame, similar to a SQL left outer join; preserve key
order.
right: use only keys from right frame, similar to a SQL right outer join; preserve key
order.
outer: use union of keys from both frames, similar to a SQL full outer join; sort keys
lexicographically.
inner: use intersection of keys from both frames, similar to a SQL inner join;
preserve the order of the left keys.
on: Column or index level names to join on. These must be found in both DataFrames. If on
is None and not merging on indexes then this defaults to the intersection of the
columns in both DataFrames.
left_on: Column or index level names to join on in the left DataFrame. Can also
be an array or list of arrays of the length of the left DataFrame.
These arrays are treated as if they are columns.
right_on: Column or index level names to join on in the right DataFrame. Can also
be an array or list of arrays of the length of the right DataFrame.
These arrays are treated as if they are columns.
left_index: Use the index from the left DataFrame as the join key(s). If it is a
MultiIndex, the number of keys in the other DataFrame (either the index or a number of
columns) must match the number of levels.
right_index: Use the index from the right DataFrame as the join key. Same caveats as
left_index.
suffixes: Suffix to apply to overlapping column names in the left and right side,
respectively.
Returns
-------
DataFrame
A DataFrame of the two merged objects.
Examples
--------
>>> df1 = ks.DataFrame({'lkey': ['foo', 'bar', 'baz', 'foo'],
... 'value': [1, 2, 3, 5]},
... columns=['lkey', 'value'])
>>> df2 = ks.DataFrame({'rkey': ['foo', 'bar', 'baz', 'foo'],
... 'value': [5, 6, 7, 8]},
... columns=['rkey', 'value'])
>>> df1
lkey value
0 foo 1
1 bar 2
2 baz 3
3 foo 5
>>> df2
rkey value
0 foo 5
1 bar 6
2 baz 7
3 foo 8
Merge df1 and df2 on the lkey and rkey columns. The value columns have
the default suffixes, _x and _y, appended.
>>> merged = ks.merge(df1, df2, left_on='lkey', right_on='rkey')
>>> merged.sort_values(by=['lkey', 'value_x', 'rkey', 'value_y']) # doctest: +ELLIPSIS
lkey value_x rkey value_y
...bar 2 bar 6
...baz 3 baz 7
...foo 1 foo 5
...foo 1 foo 8
...foo 5 foo 5
...foo 5 foo 8
>>> left_kdf = ks.DataFrame({'A': [1, 2]})
>>> right_kdf = ks.DataFrame({'B': ['x', 'y']}, index=[1, 2])
>>> ks.merge(left_kdf, right_kdf, left_index=True, right_index=True).sort_index()
A B
1 2 x
>>> ks.merge(left_kdf, right_kdf, left_index=True, right_index=True, how='left').sort_index()
A B
0 1 None
1 2 x
>>> ks.merge(left_kdf, right_kdf, left_index=True, right_index=True, how='right').sort_index()
A B
1 2.0 x
2 NaN y
>>> ks.merge(left_kdf, right_kdf, left_index=True, right_index=True, how='outer').sort_index()
A B
0 1.0 None
1 2.0 x
2 NaN y
Notes
-----
As described in #263, joining string columns currently returns None for missing values
instead of NaN.
"""
return obj.merge(
right,
how=how,
on=on,
left_on=left_on,
right_on=right_on,
left_index=left_index,
right_index=right_index,
suffixes=suffixes,
)
def to_numeric(arg):
"""
Convert argument to a numeric type.
Parameters
----------
arg : scalar, list, tuple, 1-d array, or Series
Returns
-------
ret : numeric if parsing succeeded.
See Also
--------
DataFrame.astype : Cast argument to a specified dtype.
to_datetime : Convert argument to datetime.
to_timedelta : Convert argument to timedelta.
numpy.ndarray.astype : Cast a numpy array to a specified type.
Examples
--------
>>> kser = ks.Series(['1.0', '2', '-3'])
>>> kser
0 1.0
1 2
2 -3
Name: 0, dtype: object
>>> ks.to_numeric(kser)
0 1.0
1 2.0
2 -3.0
Name: 0, dtype: float32
If given Series contains invalid value to cast float, just cast it to `np.nan`
>>> kser = ks.Series(['apple', '1.0', '2', '-3'])
>>> kser
0 apple
1 1.0
2 2
3 -3
Name: 0, dtype: object
>>> ks.to_numeric(kser)
0 NaN
1 1.0
2 2.0
3 -3.0
Name: 0, dtype: float32
Also support for list, tuple, np.array, or a scalar
>>> ks.to_numeric(['1.0', '2', '-3'])
array([ 1., 2., -3.])
>>> ks.to_numeric(('1.0', '2', '-3'))
array([ 1., 2., -3.])
>>> ks.to_numeric(np.array(['1.0', '2', '-3']))
array([ 1., 2., -3.])
>>> ks.to_numeric('1.0')
1.0
"""
if isinstance(arg, Series):
return arg._with_new_scol(arg._internal.spark_column.cast("float"))
else:
return pd.to_numeric(arg)
def broadcast(obj):
"""
Marks a DataFrame as small enough for use in broadcast joins.
Parameters
----------
obj : DataFrame
Returns
-------
ret : DataFrame with broadcast hint.
See Also
--------
DataFrame.merge : Merge DataFrame objects with a database-style join.
DataFrame.join : Join columns of another DataFrame.
DataFrame.update : Modify in place using non-NA values from another DataFrame.
DataFrame.hint : Specifies some hint on the current DataFrame.
Examples
--------
>>> df1 = ks.DataFrame({'lkey': ['foo', 'bar', 'baz', 'foo'],
... 'value': [1, 2, 3, 5]},
... columns=['lkey', 'value'])
>>> df2 = ks.DataFrame({'rkey': ['foo', 'bar', 'baz', 'foo'],
... 'value': [5, 6, 7, 8]},
... columns=['rkey', 'value'])
>>> merged = df1.merge(ks.broadcast(df2), left_on='lkey', right_on='rkey')
>>> merged.explain() # doctest: +ELLIPSIS
== Physical Plan ==
...
...BroadcastHashJoin...
...
"""
if not isinstance(obj, DataFrame):
raise ValueError("Invalid type : expected DataFrame got {}".format(type(obj)))
return DataFrame(obj._internal.with_new_sdf(F.broadcast(obj._sdf)))
def _get_index_map(sdf: spark.DataFrame, index_col: Optional[Union[str, List[str]]] = None):
if index_col is not None:
if isinstance(index_col, str):
index_col = [index_col]
sdf_columns = set(sdf.columns)
for col in index_col:
if col not in sdf_columns:
raise KeyError(col)
index_map = OrderedDict((col, (col,)) for col in index_col)
else:
index_map = None # type: ignore
return index_map
_get_dummies_default_accept_types = (DecimalType, StringType, DateType)
_get_dummies_acceptable_types = _get_dummies_default_accept_types + (
ByteType,
ShortType,
IntegerType,
LongType,
FloatType,
DoubleType,
BooleanType,
TimestampType,
)
| 1 | 15,039 | Can you handle error cases such as `pd.get_dummies(pdf, prefix={"A": "foo"})`? | databricks-koalas | py |
@@ -166,8 +166,12 @@ func (agent *TestAgent) StartAgent() error {
Links: agent.Options.ContainerLinks,
}
+ if os.Getenv("ECS_FTEST_FORCE_NET_HOST") != "" {
+ hostConfig.NetworkMode = "host"
+ }
+
if agent.Options != nil {
- // Override the default docker envrionment variable
+ // Override the default docker environment variable
for key, value := range agent.Options.ExtraEnvironment {
envVarExists := false
for i, str := range dockerConfig.Env { | 1 | // +build !windows,functional
// Copyright 2014-2017 Amazon.com, Inc. or its affiliates. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License"). You may
// not use this file except in compliance with the License. A copy of the
// License is located at
//
// http://aws.amazon.com/apache2.0/
//
// or in the "license" file accompanying this file. This file is distributed
// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
// express or implied. See the License for the specific language governing
// permissions and limitations under the License.
package util
import (
"errors"
"io/ioutil"
"os"
"path/filepath"
"strings"
"testing"
"github.com/aws/amazon-ecs-agent/agent/ec2"
"github.com/aws/amazon-ecs-agent/agent/ecs_client/model/ecs"
"github.com/aws/amazon-ecs-agent/agent/utils"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/session"
docker "github.com/fsouza/go-dockerclient"
)
const (
defaultExecDriverPath = "/var/run/docker/execdriver"
logdir = "/log"
datadir = "/data"
ExecDriverDir = "/var/lib/docker/execdriver"
defaultCgroupPath = "/cgroup"
cacheDirectory = "/var/cache/ecs"
configDirectory = "/etc/ecs"
readOnly = ":ro"
dockerEndpoint = "/var/run/docker.sock"
)
var ECS *ecs.ECS
var Cluster string
func init() {
var ecsconfig aws.Config
if region := os.Getenv("AWS_REGION"); region != "" {
ecsconfig.Region = ®ion
}
if region := os.Getenv("AWS_DEFAULT_REGION"); region != "" {
ecsconfig.Region = ®ion
}
if ecsconfig.Region == nil {
if iid, err := ec2.NewEC2MetadataClient(nil).InstanceIdentityDocument(); err == nil {
ecsconfig.Region = &iid.Region
}
}
if envEndpoint := os.Getenv("ECS_BACKEND_HOST"); envEndpoint != "" {
ecsconfig.Endpoint = &envEndpoint
}
ECS = ecs.New(session.New(&ecsconfig))
Cluster = "ecs-functional-tests"
if envCluster := os.Getenv("ECS_CLUSTER"); envCluster != "" {
Cluster = envCluster
}
ECS.CreateCluster(&ecs.CreateClusterInput{
ClusterName: aws.String(Cluster),
})
}
// RunAgent launches the agent and returns an object which may be used to reference it.
// It will wait until the agent is correctly registered before returning.
// 'version' may be a docker image (e.g. amazon/amazon-ecs-agent:v1.0.0) with
// tag that may be used to run the agent. It defaults to
// 'amazon/amazon-ecs-agent:make', the version created locally by running
// 'make'
func RunAgent(t *testing.T, options *AgentOptions) *TestAgent {
agent := &TestAgent{t: t}
agentImage := "amazon/amazon-ecs-agent:make"
if envImage := os.Getenv("ECS_AGENT_IMAGE"); envImage != "" {
agentImage = envImage
}
agent.Image = agentImage
dockerClient, err := docker.NewClientFromEnv()
if err != nil {
t.Fatal(err)
}
agent.DockerClient = dockerClient
_, err = dockerClient.InspectImage(agentImage)
if err != nil {
err = dockerClient.PullImage(docker.PullImageOptions{Repository: agentImage}, docker.AuthConfiguration{})
if err != nil {
t.Fatal("Could not launch agent", err)
}
}
tmpdirOverride := os.Getenv("ECS_FTEST_TMP")
agentTempdir, err := ioutil.TempDir(tmpdirOverride, "ecs_integ_testdata")
if err != nil {
t.Fatal("Could not create temp dir for test")
}
logdir := filepath.Join(agentTempdir, "log")
datadir := filepath.Join(agentTempdir, "data")
os.Mkdir(logdir, 0755)
os.Mkdir(datadir, 0755)
agent.TestDir = agentTempdir
agent.Options = options
if options == nil {
agent.Options = &AgentOptions{}
}
t.Logf("Created directory %s to store test data in", agentTempdir)
err = agent.StartAgent()
if err != nil {
t.Fatal(err)
}
return agent
}
func (agent *TestAgent) StopAgent() error {
return agent.DockerClient.StopContainer(agent.DockerID, 10)
}
func (agent *TestAgent) StartAgent() error {
agent.t.Logf("Launching agent with image: %s\n", agent.Image)
dockerConfig := &docker.Config{
Image: agent.Image,
ExposedPorts: map[docker.Port]struct{}{
"51678/tcp": {},
},
Env: []string{
"ECS_CLUSTER=" + Cluster,
"ECS_DATADIR=/data",
"ECS_HOST_DATA_DIR=" + agent.TestDir,
"ECS_LOGLEVEL=debug",
"ECS_LOGFILE=/log/integ_agent.log",
"ECS_BACKEND_HOST=" + os.Getenv("ECS_BACKEND_HOST"),
"AWS_ACCESS_KEY_ID=" + os.Getenv("AWS_ACCESS_KEY_ID"),
"AWS_DEFAULT_REGION=" + *ECS.Config.Region,
"AWS_SECRET_ACCESS_KEY=" + os.Getenv("AWS_SECRET_ACCESS_KEY"),
"ECS_ENGINE_TASK_CLEANUP_WAIT_DURATION=" + os.Getenv("ECS_ENGINE_TASK_CLEANUP_WAIT_DURATION"),
},
Cmd: strings.Split(os.Getenv("ECS_FTEST_AGENT_ARGS"), " "),
}
// Append ECS_INSTANCE_ATTRIBUTES to dockerConfig
if attr := os.Getenv("ECS_INSTANCE_ATTRIBUTES"); attr != "" {
dockerConfig.Env = append(dockerConfig.Env, "ECS_INSTANCE_ATTRIBUTES="+attr)
}
binds := agent.getBindMounts()
hostConfig := &docker.HostConfig{
Binds: binds,
PortBindings: map[docker.Port][]docker.PortBinding{
"51678/tcp": {{HostIP: "0.0.0.0"}},
},
Links: agent.Options.ContainerLinks,
}
if agent.Options != nil {
// Override the default docker envrionment variable
for key, value := range agent.Options.ExtraEnvironment {
envVarExists := false
for i, str := range dockerConfig.Env {
if strings.HasPrefix(str, key+"=") {
dockerConfig.Env[i] = key + "=" + value
envVarExists = true
break
}
}
if !envVarExists {
dockerConfig.Env = append(dockerConfig.Env, key+"="+value)
}
}
for key, value := range agent.Options.PortBindings {
hostConfig.PortBindings[key] = []docker.PortBinding{{HostIP: value["HostIP"], HostPort: value["HostPort"]}}
dockerConfig.ExposedPorts[key] = struct{}{}
}
}
agentContainer, err := agent.DockerClient.CreateContainer(docker.CreateContainerOptions{
Config: dockerConfig,
HostConfig: hostConfig,
})
if err != nil {
agent.t.Fatal("Could not create agent container", err)
}
agent.DockerID = agentContainer.ID
agent.t.Logf("Agent started as docker container: %s\n", agentContainer.ID)
err = agent.DockerClient.StartContainer(agentContainer.ID, nil)
if err != nil {
return errors.New("Could not start agent container " + err.Error())
}
containerMetadata, err := agent.DockerClient.InspectContainer(agentContainer.ID)
if err != nil {
return errors.New("Could not inspect agent container: " + err.Error())
}
agent.IntrospectionURL = "http://localhost:" + containerMetadata.NetworkSettings.Ports["51678/tcp"][0].HostPort
return agent.verifyIntrospectionAPI()
}
// getBindMounts actually constructs volume binds for container's host config
// It also additionally checks for envrionment variables:
// * CGROUP_PATH: the cgroup path
// * EXECDRIVER_PATH: the path of metrics
func (agent *TestAgent) getBindMounts() []string {
var binds []string
cgroupPath := utils.DefaultIfBlank(os.Getenv("CGROUP_PATH"), defaultCgroupPath)
cgroupBind := cgroupPath + ":" + cgroupPath + readOnly
binds = append(binds, cgroupBind)
execdriverPath := utils.DefaultIfBlank(os.Getenv("EXECDRIVER_PATH"), defaultExecDriverPath)
execdriverBind := execdriverPath + ":" + ExecDriverDir + readOnly
binds = append(binds, execdriverBind)
hostLogDir := filepath.Join(agent.TestDir, "log")
hostDataDir := filepath.Join(agent.TestDir, "data")
hostConfigDir := filepath.Join(agent.TestDir, "config")
hostCacheDir := filepath.Join(agent.TestDir, "cache")
agent.Logdir = hostLogDir
binds = append(binds, hostLogDir+":"+logdir)
binds = append(binds, hostDataDir+":"+datadir)
binds = append(binds, dockerEndpoint+":"+dockerEndpoint)
binds = append(binds, hostConfigDir+":"+configDirectory)
binds = append(binds, hostCacheDir+":"+cacheDirectory)
return binds
}
func (agent *TestAgent) Cleanup() {
agent.platformIndependentCleanup()
}
| 1 | 17,946 | Where is this environment variable being set? | aws-amazon-ecs-agent | go |
@@ -22,9 +22,7 @@ class Registry(object):
module (:obj:`nn.Module`): Module to be registered.
"""
if not issubclass(module_class, nn.Module):
- raise TypeError(
- 'module must be a child of nn.Module, but got {}'.format(
- type(module_class)))
+ raise TypeError(f'module must be a child of nn.Module, but got {module_class}')
module_name = module_class.__name__
if module_name in self._module_dict:
raise KeyError('{} is already registered in {}'.format( | 1 | import torch.nn as nn
class Registry(object):
def __init__(self, name):
self._name = name
self._module_dict = dict()
@property
def name(self):
return self._name
@property
def module_dict(self):
return self._module_dict
def _register_module(self, module_class):
"""Register a module.
Args:
module (:obj:`nn.Module`): Module to be registered.
"""
if not issubclass(module_class, nn.Module):
raise TypeError(
'module must be a child of nn.Module, but got {}'.format(
type(module_class)))
module_name = module_class.__name__
if module_name in self._module_dict:
raise KeyError('{} is already registered in {}'.format(
module_name, self.name))
self._module_dict[module_name] = module_class
def register_module(self, cls):
self._register_module(cls)
return cls
BACKBONES = Registry('backbone')
NECKS = Registry('neck')
ROI_EXTRACTORS = Registry('roi_extractor')
HEADS = Registry('head')
DETECTORS = Registry('detector')
| 1 | 17,143 | `module_class ` is already a class and `type(module_class)` always returns `<class 'type'>` | open-mmlab-mmdetection | py |
@@ -110,6 +110,17 @@ class SparkFileWriterFactory extends BaseFileWriterFactory<InternalRow> {
builder.createWriterFunc(SparkOrcWriter::new);
}
+ @Override
+ protected void configureEqualityDelete(ORC.DeleteWriteBuilder builder) {
+ builder.createWriterFunc((iSchema, typDesc) -> new SparkOrcWriter(iSchema, typDesc));
+ }
+
+ @Override
+ protected void configurePositionDelete(ORC.DeleteWriteBuilder builder) {
+ builder.createWriterFunc((iSchema, typDesc) -> new SparkOrcWriter(iSchema, typDesc));
+ builder.transformPaths(path -> UTF8String.fromString(path.toString()));
+ }
+
private StructType dataSparkType() {
if (dataSparkType == null) {
Preconditions.checkNotNull(dataSchema(), "Data schema must not be null"); | 1 | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.iceberg.spark.source;
import java.util.Locale;
import java.util.Map;
import org.apache.iceberg.FileFormat;
import org.apache.iceberg.Schema;
import org.apache.iceberg.SortOrder;
import org.apache.iceberg.Table;
import org.apache.iceberg.avro.Avro;
import org.apache.iceberg.data.BaseFileWriterFactory;
import org.apache.iceberg.io.DeleteSchemaUtil;
import org.apache.iceberg.orc.ORC;
import org.apache.iceberg.parquet.Parquet;
import org.apache.iceberg.relocated.com.google.common.base.Preconditions;
import org.apache.iceberg.spark.SparkSchemaUtil;
import org.apache.iceberg.spark.data.SparkAvroWriter;
import org.apache.iceberg.spark.data.SparkOrcWriter;
import org.apache.iceberg.spark.data.SparkParquetWriters;
import org.apache.spark.sql.catalyst.InternalRow;
import org.apache.spark.sql.types.StructField;
import org.apache.spark.sql.types.StructType;
import org.apache.spark.unsafe.types.UTF8String;
import static org.apache.iceberg.MetadataColumns.DELETE_FILE_ROW_FIELD_NAME;
import static org.apache.iceberg.TableProperties.DEFAULT_FILE_FORMAT;
import static org.apache.iceberg.TableProperties.DEFAULT_FILE_FORMAT_DEFAULT;
import static org.apache.iceberg.TableProperties.DELETE_DEFAULT_FILE_FORMAT;
class SparkFileWriterFactory extends BaseFileWriterFactory<InternalRow> {
private StructType dataSparkType;
private StructType equalityDeleteSparkType;
private StructType positionDeleteSparkType;
SparkFileWriterFactory(Table table, FileFormat dataFileFormat, Schema dataSchema, StructType dataSparkType,
SortOrder dataSortOrder, FileFormat deleteFileFormat,
int[] equalityFieldIds, Schema equalityDeleteRowSchema, StructType equalityDeleteSparkType,
SortOrder equalityDeleteSortOrder, Schema positionDeleteRowSchema,
StructType positionDeleteSparkType) {
super(table, dataFileFormat, dataSchema, dataSortOrder, deleteFileFormat, equalityFieldIds,
equalityDeleteRowSchema, equalityDeleteSortOrder, positionDeleteRowSchema);
this.dataSparkType = dataSparkType;
this.equalityDeleteSparkType = equalityDeleteSparkType;
this.positionDeleteSparkType = positionDeleteSparkType;
}
static Builder builderFor(Table table) {
return new Builder(table);
}
@Override
protected void configureDataWrite(Avro.DataWriteBuilder builder) {
builder.createWriterFunc(ignored -> new SparkAvroWriter(dataSparkType()));
}
@Override
protected void configureEqualityDelete(Avro.DeleteWriteBuilder builder) {
builder.createWriterFunc(ignored -> new SparkAvroWriter(equalityDeleteSparkType()));
}
@Override
protected void configurePositionDelete(Avro.DeleteWriteBuilder builder) {
boolean withRow = positionDeleteSparkType().getFieldIndex(DELETE_FILE_ROW_FIELD_NAME).isDefined();
if (withRow) {
// SparkAvroWriter accepts just the Spark type of the row ignoring the path and pos
StructField rowField = positionDeleteSparkType().apply(DELETE_FILE_ROW_FIELD_NAME);
StructType positionDeleteRowSparkType = (StructType) rowField.dataType();
builder.createWriterFunc(ignored -> new SparkAvroWriter(positionDeleteRowSparkType));
}
}
@Override
protected void configureDataWrite(Parquet.DataWriteBuilder builder) {
builder.createWriterFunc(msgType -> SparkParquetWriters.buildWriter(dataSparkType(), msgType));
}
@Override
protected void configureEqualityDelete(Parquet.DeleteWriteBuilder builder) {
builder.createWriterFunc(msgType -> SparkParquetWriters.buildWriter(equalityDeleteSparkType(), msgType));
}
@Override
protected void configurePositionDelete(Parquet.DeleteWriteBuilder builder) {
builder.createWriterFunc(msgType -> SparkParquetWriters.buildWriter(positionDeleteSparkType(), msgType));
builder.transformPaths(path -> UTF8String.fromString(path.toString()));
}
@Override
protected void configureDataWrite(ORC.DataWriteBuilder builder) {
builder.createWriterFunc(SparkOrcWriter::new);
}
private StructType dataSparkType() {
if (dataSparkType == null) {
Preconditions.checkNotNull(dataSchema(), "Data schema must not be null");
this.dataSparkType = SparkSchemaUtil.convert(dataSchema());
}
return dataSparkType;
}
private StructType equalityDeleteSparkType() {
if (equalityDeleteSparkType == null) {
Preconditions.checkNotNull(equalityDeleteRowSchema(), "Equality delete schema must not be null");
this.equalityDeleteSparkType = SparkSchemaUtil.convert(equalityDeleteRowSchema());
}
return equalityDeleteSparkType;
}
private StructType positionDeleteSparkType() {
if (positionDeleteSparkType == null) {
// wrap the optional row schema into the position delete schema that contains path and position
Schema positionDeleteSchema = DeleteSchemaUtil.posDeleteSchema(positionDeleteRowSchema());
this.positionDeleteSparkType = SparkSchemaUtil.convert(positionDeleteSchema);
}
return positionDeleteSparkType;
}
static class Builder {
private final Table table;
private FileFormat dataFileFormat;
private Schema dataSchema;
private StructType dataSparkType;
private SortOrder dataSortOrder;
private FileFormat deleteFileFormat;
private int[] equalityFieldIds;
private Schema equalityDeleteRowSchema;
private StructType equalityDeleteSparkType;
private SortOrder equalityDeleteSortOrder;
private Schema positionDeleteRowSchema;
private StructType positionDeleteSparkType;
Builder(Table table) {
this.table = table;
Map<String, String> properties = table.properties();
String dataFileFormatName = properties.getOrDefault(DEFAULT_FILE_FORMAT, DEFAULT_FILE_FORMAT_DEFAULT);
this.dataFileFormat = FileFormat.valueOf(dataFileFormatName.toUpperCase(Locale.ENGLISH));
String deleteFileFormatName = properties.getOrDefault(DELETE_DEFAULT_FILE_FORMAT, dataFileFormatName);
this.deleteFileFormat = FileFormat.valueOf(deleteFileFormatName.toUpperCase(Locale.ENGLISH));
}
Builder dataFileFormat(FileFormat newDataFileFormat) {
this.dataFileFormat = newDataFileFormat;
return this;
}
Builder dataSchema(Schema newDataSchema) {
this.dataSchema = newDataSchema;
return this;
}
Builder dataSparkType(StructType newDataSparkType) {
this.dataSparkType = newDataSparkType;
return this;
}
Builder dataSortOrder(SortOrder newDataSortOrder) {
this.dataSortOrder = newDataSortOrder;
return this;
}
Builder deleteFileFormat(FileFormat newDeleteFileFormat) {
this.deleteFileFormat = newDeleteFileFormat;
return this;
}
Builder equalityFieldIds(int[] newEqualityFieldIds) {
this.equalityFieldIds = newEqualityFieldIds;
return this;
}
Builder equalityDeleteRowSchema(Schema newEqualityDeleteRowSchema) {
this.equalityDeleteRowSchema = newEqualityDeleteRowSchema;
return this;
}
Builder equalityDeleteSparkType(StructType newEqualityDeleteSparkType) {
this.equalityDeleteSparkType = newEqualityDeleteSparkType;
return this;
}
Builder equalityDeleteSortOrder(SortOrder newEqualityDeleteSortOrder) {
this.equalityDeleteSortOrder = newEqualityDeleteSortOrder;
return this;
}
Builder positionDeleteRowSchema(Schema newPositionDeleteRowSchema) {
this.positionDeleteRowSchema = newPositionDeleteRowSchema;
return this;
}
Builder positionDeleteSparkType(StructType newPositionDeleteSparkType) {
this.positionDeleteSparkType = newPositionDeleteSparkType;
return this;
}
SparkFileWriterFactory build() {
boolean noEqualityDeleteConf = equalityFieldIds == null && equalityDeleteRowSchema == null;
boolean fullEqualityDeleteConf = equalityFieldIds != null && equalityDeleteRowSchema != null;
Preconditions.checkArgument(noEqualityDeleteConf || fullEqualityDeleteConf,
"Equality field IDs and equality delete row schema must be set together");
return new SparkFileWriterFactory(
table, dataFileFormat, dataSchema, dataSparkType, dataSortOrder, deleteFileFormat,
equalityFieldIds, equalityDeleteRowSchema, equalityDeleteSparkType, equalityDeleteSortOrder,
positionDeleteRowSchema, positionDeleteSparkType);
}
}
}
| 1 | 43,045 | Nit: The lambda can be replaced with a method reference: `SparkOrcWriter::new` I think. | apache-iceberg | java |
@@ -1265,6 +1265,13 @@ def is_number(obj):
else: return False
+def is_int(obj, int_like=False):
+ real_int = isinstance(obj, int) or getattr(getattr(obj, 'dtype', None), 'kind', 'o') in 'ui'
+ if real_int or (int_like and hasattr(obj, 'is_integer') and obj.is_integer()):
+ return True
+ return False
+
+
class ProgressIndicator(param.Parameterized):
"""
Baseclass for any ProgressIndicator that indicates progress | 1 | import sys, warnings, operator
import json
import time
import types
import numbers
import inspect
import itertools
import string
import unicodedata
import datetime as dt
from collections import defaultdict
from contextlib import contextmanager
from distutils.version import LooseVersion as _LooseVersion
from functools import partial
from threading import Thread, Event
from types import FunctionType
import numpy as np
import param
try:
from cyordereddict import OrderedDict
except:
from collections import OrderedDict
# Python3 compatibility
if sys.version_info.major >= 3:
import builtins as builtins # noqa (compatibility)
if sys.version_info.minor > 3:
from collections.abc import Iterable # noqa (compatibility)
else:
from collections import Iterable # noqa (compatibility)
basestring = str
unicode = str
long = int
cmp = lambda a, b: (a>b)-(a<b)
generator_types = (zip, range, types.GeneratorType)
RecursionError = RecursionError if sys.version_info.minor > 4 else RuntimeError # noqa
_getargspec = inspect.getfullargspec
get_keywords = operator.attrgetter('varkw')
LooseVersion = _LooseVersion
else:
import __builtin__ as builtins # noqa (compatibility)
from collections import Iterable # noqa (compatibility)
basestring = basestring
unicode = unicode
from itertools import izip
generator_types = (izip, xrange, types.GeneratorType) # noqa
RecursionError = RuntimeError
_getargspec = inspect.getargspec
get_keywords = operator.attrgetter('keywords')
class LooseVersion(_LooseVersion):
"""
Subclassed to avoid unicode issues in python2
"""
def __init__ (self, vstring=None):
if isinstance(vstring, unicode):
vstring = str(vstring)
self.parse(vstring)
def __cmp__(self, other):
if isinstance(other, unicode):
other = str(other)
if isinstance(other, basestring):
other = LooseVersion(other)
return cmp(self.version, other.version)
numpy_version = LooseVersion(np.__version__)
param_version = LooseVersion(param.__version__)
datetime_types = (np.datetime64, dt.datetime, dt.date, dt.time)
timedelta_types = (np.timedelta64, dt.timedelta,)
arraylike_types = (np.ndarray,)
try:
import pandas as pd
except ImportError:
pd = None
if pd:
pandas_version = LooseVersion(pd.__version__)
try:
if pandas_version >= '0.24.0':
from pandas.core.dtypes.dtypes import DatetimeTZDtype as DatetimeTZDtypeType
from pandas.core.dtypes.generic import ABCSeries, ABCIndexClass
elif pandas_version > '0.20.0':
from pandas.core.dtypes.dtypes import DatetimeTZDtypeType
from pandas.core.dtypes.generic import ABCSeries, ABCIndexClass
else:
from pandas.types.dtypes import DatetimeTZDtypeType
from pandas.types.dtypes.generic import ABCSeries, ABCIndexClass
pandas_datetime_types = (pd.Timestamp, DatetimeTZDtypeType, pd.Period)
pandas_timedelta_types = (pd.Timedelta,)
datetime_types = datetime_types + pandas_datetime_types
timedelta_types = timedelta_types + pandas_timedelta_types
arraylike_types = arraylike_types + (ABCSeries, ABCIndexClass)
if pandas_version > '0.23.0':
from pandas.core.dtypes.generic import ABCExtensionArray
arraylike_types = arraylike_types + (ABCExtensionArray,)
except Exception as e:
param.main.warning('pandas could not register all extension types '
'imports failed with the following error: %s' % e)
try:
import cftime
cftime_types = (cftime.datetime,)
datetime_types += cftime_types
except:
cftime_types = ()
_STANDARD_CALENDARS = set(['standard', 'gregorian', 'proleptic_gregorian'])
class VersionError(Exception):
"Raised when there is a library version mismatch."
def __init__(self, msg, version=None, min_version=None, **kwargs):
self.version = version
self.min_version = min_version
super(VersionError, self).__init__(msg, **kwargs)
class Config(param.ParameterizedFunction):
"""
Set of boolean configuration values to change HoloViews' global
behavior. Typically used to control warnings relating to
deprecations or set global parameter such as style 'themes'.
"""
future_deprecations = param.Boolean(default=False, doc="""
Whether to warn about future deprecations""")
image_rtol = param.Number(default=10e-4, doc="""
The tolerance used to enforce regular sampling for regular,
gridded data where regular sampling is expected. Expressed as the
maximal allowable sampling difference between sample
locations.""")
no_padding = param.Boolean(default=False, doc="""
Disable default padding (introduced in 1.13.0).""")
warn_options_call = param.Boolean(default=True, doc="""
Whether to warn when the deprecated __call__ options syntax is
used (the opts method should now be used instead). It is
recommended that users switch this on to update any uses of
__call__ as it will be deprecated in future.""")
def __call__(self, **params):
self.param.set_param(**params)
return self
config = Config()
class HashableJSON(json.JSONEncoder):
"""
Extends JSONEncoder to generate a hashable string for as many types
of object as possible including nested objects and objects that are
not normally hashable. The purpose of this class is to generate
unique strings that once hashed are suitable for use in memoization
and other cases where deep equality must be tested without storing
the entire object.
By default JSONEncoder supports booleans, numbers, strings, lists,
tuples and dictionaries. In order to support other types such as
sets, datetime objects and mutable objects such as pandas Dataframes
or numpy arrays, HashableJSON has to convert these types to
datastructures that can normally be represented as JSON.
Support for other object types may need to be introduced in
future. By default, unrecognized object types are represented by
their id.
One limitation of this approach is that dictionaries with composite
keys (e.g tuples) are not supported due to the JSON spec.
"""
string_hashable = (dt.datetime,)
repr_hashable = ()
def default(self, obj):
if isinstance(obj, set):
return hash(frozenset(obj))
elif isinstance(obj, np.ndarray):
return obj.tolist()
if pd and isinstance(obj, (pd.Series, pd.DataFrame)):
return obj.to_csv(header=True).encode('utf-8')
elif isinstance(obj, self.string_hashable):
return str(obj)
elif isinstance(obj, self.repr_hashable):
return repr(obj)
try:
return hash(obj)
except:
return id(obj)
def merge_option_dicts(old_opts, new_opts):
"""
Update the old_opts option dictionary with the options defined in
new_opts. Instead of a shallow update as would be performed by calling
old_opts.update(new_opts), this updates the dictionaries of all option
types separately.
Given two dictionaries
old_opts = {'a': {'x': 'old', 'y': 'old'}}
and
new_opts = {'a': {'y': 'new', 'z': 'new'}, 'b': {'k': 'new'}}
this returns a dictionary
{'a': {'x': 'old', 'y': 'new', 'z': 'new'}, 'b': {'k': 'new'}}
"""
merged = dict(old_opts)
for option_type, options in new_opts.items():
if option_type not in merged:
merged[option_type] = {}
merged[option_type].update(options)
return merged
def merge_options_to_dict(options):
"""
Given a collection of Option objects or partial option dictionaries,
merge everything to a single dictionary.
"""
merged_options = {}
for obj in options:
if isinstance(obj,dict):
new_opts = obj
else:
new_opts = {obj.key: obj.kwargs}
merged_options = merge_option_dicts(merged_options, new_opts)
return merged_options
def deprecated_opts_signature(args, kwargs):
"""
Utility to help with the deprecation of the old .opts method signature
Returns whether opts.apply_groups should be used (as a bool) and the
corresponding options.
"""
from .options import Options
groups = set(Options._option_groups)
opts = {kw for kw in kwargs if kw != 'clone'}
apply_groups = False
options = None
new_kwargs = {}
if len(args) > 0 and isinstance(args[0], dict):
apply_groups = True
if (not set(args[0]).issubset(groups) and
all(isinstance(v, dict) and not set(v).issubset(groups)
for v in args[0].values())):
apply_groups = False
elif set(args[0].keys()) <= groups:
new_kwargs = args[0]
else:
options = args[0]
elif opts and opts.issubset(set(groups)):
apply_groups = True
elif kwargs.get('options', None) is not None:
apply_groups = True
elif not args and not kwargs:
apply_groups = True
return apply_groups, options, new_kwargs
class periodic(Thread):
"""
Run a callback count times with a given period without blocking.
If count is None, will run till timeout (which may be forever if None).
"""
def __init__(self, period, count, callback, timeout=None, block=False):
if isinstance(count, int):
if count < 0: raise ValueError('Count value must be positive')
elif not type(count) is type(None):
raise ValueError('Count value must be a positive integer or None')
if block is False and count is None and timeout is None:
raise ValueError('When using a non-blocking thread, please specify '
'either a count or a timeout')
super(periodic, self).__init__()
self.period = period
self.callback = callback
self.count = count
self.counter = 0
self.block = block
self.timeout = timeout
self._completed = Event()
self._start_time = None
@property
def completed(self):
return self._completed.is_set()
def start(self):
self._start_time = time.time()
if self.block is False:
super(periodic,self).start()
else:
self.run()
def stop(self):
self.timeout = None
self._completed.set()
def __repr__(self):
return 'periodic(%s, %s, %s)' % (self.period,
self.count,
callable_name(self.callback))
def __str__(self):
return repr(self)
def run(self):
while not self.completed:
if self.block:
time.sleep(self.period)
else:
self._completed.wait(self.period)
self.counter += 1
try:
self.callback(self.counter)
except Exception:
self.stop()
if self.timeout is not None:
dt = (time.time() - self._start_time)
if dt > self.timeout:
self.stop()
if self.counter == self.count:
self.stop()
def deephash(obj):
"""
Given an object, return a hash using HashableJSON. This hash is not
architecture, Python version or platform independent.
"""
try:
return hash(json.dumps(obj, cls=HashableJSON, sort_keys=True))
except:
return None
def tree_attribute(identifier):
"""
Predicate that returns True for custom attributes added to AttrTrees
that are not methods, properties or internal attributes.
These custom attributes start with a capitalized character when
applicable (not applicable to underscore or certain unicode characters)
"""
if identifier[0].upper().isupper() is False and identifier[0] != '_':
return True
else:
return identifier[0].isupper()
def argspec(callable_obj):
"""
Returns an ArgSpec object for functions, staticmethods, instance
methods, classmethods and partials.
Note that the args list for instance and class methods are those as
seen by the user. In other words, the first argument which is
conventionally called 'self' or 'cls' is omitted in these cases.
"""
if (isinstance(callable_obj, type)
and issubclass(callable_obj, param.ParameterizedFunction)):
# Parameterized function.__call__ considered function in py3 but not py2
spec = _getargspec(callable_obj.__call__)
args = spec.args[1:]
elif inspect.isfunction(callable_obj): # functions and staticmethods
spec = _getargspec(callable_obj)
args = spec.args
elif isinstance(callable_obj, partial): # partials
arglen = len(callable_obj.args)
spec = _getargspec(callable_obj.func)
args = [arg for arg in spec.args[arglen:] if arg not in callable_obj.keywords]
elif inspect.ismethod(callable_obj): # instance and class methods
spec = _getargspec(callable_obj)
args = spec.args[1:]
else: # callable objects
return argspec(callable_obj.__call__)
return inspect.ArgSpec(args=args,
varargs=spec.varargs,
keywords=get_keywords(spec),
defaults=spec.defaults)
def validate_dynamic_argspec(callback, kdims, streams):
"""
Utility used by DynamicMap to ensure the supplied callback has an
appropriate signature.
If validation succeeds, returns a list of strings to be zipped with
the positional arguments i.e kdim values. The zipped values can then
be merged with the stream values to pass everything to the Callable
as keywords.
If the callbacks use *args, None is returned to indicate that kdim
values must be passed to the Callable by position. In this
situation, Callable passes *args and **kwargs directly to the
callback.
If the callback doesn't use **kwargs, the accepted keywords are
validated against the stream parameter names.
"""
argspec = callback.argspec
name = callback.name
kdims = [kdim.name for kdim in kdims]
stream_params = stream_parameters(streams)
defaults = argspec.defaults if argspec.defaults else []
all_posargs = argspec.args[:-len(defaults)] if defaults else argspec.args
# Filter out any posargs for streams
posargs = [arg for arg in all_posargs if arg not in stream_params]
kwargs = argspec.args[-len(defaults):]
if argspec.keywords is None:
unassigned_streams = set(stream_params) - set(argspec.args)
if unassigned_streams:
unassigned = ','.join(unassigned_streams)
raise KeyError('Callable {name!r} missing keywords to '
'accept stream parameters: {unassigned}'.format(name=name,
unassigned=unassigned))
if len(posargs) > len(kdims) + len(stream_params):
raise KeyError('Callable {name!r} accepts more positional arguments than '
'there are kdims and stream parameters'.format(name=name))
if kdims == []: # Can be no posargs, stream kwargs already validated
return []
if set(kdims) == set(posargs): # Posargs match exactly, can all be passed as kwargs
return kdims
elif len(posargs) == len(kdims): # Posargs match kdims length, supplying names
if argspec.args[:len(kdims)] != posargs:
raise KeyError('Unmatched positional kdim arguments only allowed at '
'the start of the signature of {name!r}'.format(name=name))
return posargs
elif argspec.varargs: # Posargs missing, passed to Callable directly
return None
elif set(posargs) - set(kdims):
raise KeyError('Callable {name!r} accepts more positional arguments {posargs} '
'than there are key dimensions {kdims}'.format(name=name,
posargs=posargs,
kdims=kdims))
elif set(kdims).issubset(set(kwargs)): # Key dims can be supplied by keyword
return kdims
elif set(kdims).issubset(set(posargs+kwargs)):
return kdims
else:
raise KeyError('Callback {name!r} signature over {names} does not accommodate '
'required kdims {kdims}'.format(name=name,
names=list(set(posargs+kwargs)),
kdims=kdims))
def callable_name(callable_obj):
"""
Attempt to return a meaningful name identifying a callable or generator
"""
try:
if (isinstance(callable_obj, type)
and issubclass(callable_obj, param.ParameterizedFunction)):
return callable_obj.__name__
elif (isinstance(callable_obj, param.Parameterized)
and 'operation' in callable_obj.param):
return callable_obj.operation.__name__
elif isinstance(callable_obj, partial):
return str(callable_obj)
elif inspect.isfunction(callable_obj): # functions and staticmethods
return callable_obj.__name__
elif inspect.ismethod(callable_obj): # instance and class methods
meth = callable_obj
if sys.version_info < (3,0):
owner = meth.im_class if meth.im_self is None else meth.im_self
else:
owner = meth.__self__
if meth.__name__ == '__call__':
return type(owner).__name__
return '.'.join([owner.__name__, meth.__name__])
elif isinstance(callable_obj, types.GeneratorType):
return callable_obj.__name__
else:
return type(callable_obj).__name__
except:
return str(callable_obj)
def process_ellipses(obj, key, vdim_selection=False):
"""
Helper function to pad a __getitem__ key with the right number of
empty slices (i.e :) when the key contains an Ellipsis (...).
If the vdim_selection flag is true, check if the end of the key
contains strings or Dimension objects in obj. If so, extra padding
will not be applied for the value dimensions (i.e the resulting key
will be exactly one longer than the number of kdims). Note: this
flag should not be used for composite types.
"""
if getattr(getattr(key, 'dtype', None), 'kind', None) == 'b':
return key
wrapped_key = wrap_tuple(key)
if wrapped_key.count(Ellipsis)== 0:
return key
if wrapped_key.count(Ellipsis)!=1:
raise Exception("Only one ellipsis allowed at a time.")
dim_count = len(obj.dimensions())
index = wrapped_key.index(Ellipsis)
head = wrapped_key[:index]
tail = wrapped_key[index+1:]
padlen = dim_count - (len(head) + len(tail))
if vdim_selection:
# If the end of the key (i.e the tail) is in vdims, pad to len(kdims)+1
if wrapped_key[-1] in obj.vdims:
padlen = (len(obj.kdims) +1 ) - len(head+tail)
return head + ((slice(None),) * padlen) + tail
def bytes_to_unicode(value):
"""
Safely casts bytestring to unicode
"""
if isinstance(value, bytes):
return unicode(value.decode('utf-8'))
return value
def get_method_owner(method):
"""
Gets the instance that owns the supplied method
"""
if isinstance(method, partial):
method = method.func
return method.__self__ if sys.version_info.major >= 3 else method.im_self
def capitalize_unicode_name(s):
"""
Turns a string such as 'capital delta' into the shortened,
capitalized version, in this case simply 'Delta'. Used as a
transform in sanitize_identifier.
"""
index = s.find('capital')
if index == -1: return s
tail = s[index:].replace('capital', '').strip()
tail = tail[0].upper() + tail[1:]
return s[:index] + tail
class sanitize_identifier_fn(param.ParameterizedFunction):
"""
Sanitizes group/label values for use in AttrTree attribute
access. Depending on the version parameter, either sanitization
appropriate for Python 2 (no unicode gn identifiers allowed) or
Python 3 (some unicode allowed) is used.
Note that if you are using Python 3, you can switch to version 2
for compatibility but you cannot enable relaxed sanitization if
you are using Python 2.
Special characters are sanitized using their (lowercase) unicode
name using the unicodedata module. For instance:
>>> unicodedata.name(u'$').lower()
'dollar sign'
As these names are often very long, this parameterized function
allows filtered, substitutions and transforms to help shorten these
names appropriately.
"""
version = param.ObjectSelector(sys.version_info.major, objects=[2,3], doc="""
The sanitization version. If set to 2, more aggressive
sanitization appropriate for Python 2 is applied. Otherwise,
if set to 3, more relaxed, Python 3 sanitization is used.""")
capitalize = param.Boolean(default=True, doc="""
Whether the first letter should be converted to
uppercase. Note, this will only be applied to ASCII characters
in order to make sure paths aren't confused with method
names.""")
eliminations = param.List(['extended', 'accent', 'small', 'letter', 'sign', 'digit',
'latin', 'greek', 'arabic-indic', 'with', 'dollar'], doc="""
Lowercase strings to be eliminated from the unicode names in
order to shorten the sanitized name ( lowercase). Redundant
strings should be removed but too much elimination could cause
two unique strings to map to the same sanitized output.""")
substitutions = param.Dict(default={'circumflex':'power',
'asterisk':'times',
'solidus':'over'}, doc="""
Lowercase substitutions of substrings in unicode names. For
instance the ^ character has the name 'circumflex accent' even
though it is more typically used for exponentiation. Note that
substitutions occur after filtering and that there should be no
ordering dependence between substitutions.""")
transforms = param.List(default=[capitalize_unicode_name], doc="""
List of string transformation functions to apply after
filtering and substitution in order to further compress the
unicode name. For instance, the default capitalize_unicode_name
function will turn the string "capital delta" into "Delta".""")
disallowed = param.List(default=['trait_names', '_ipython_display_',
'_getAttributeNames'], doc="""
An explicit list of name that should not be allowed as
attribute names on Tree objects.
By default, prevents IPython from creating an entry called
Trait_names due to an inconvenient getattr check (during
tab-completion).""")
disable_leading_underscore = param.Boolean(default=False, doc="""
Whether leading underscores should be allowed to be sanitized
with the leading prefix.""")
aliases = param.Dict(default={}, doc="""
A dictionary of aliases mapping long strings to their short,
sanitized equivalents""")
prefix = 'A_'
_lookup_table = param.Dict(default={}, doc="""
Cache of previously computed sanitizations""")
@param.parameterized.bothmethod
def add_aliases(self_or_cls, **kwargs):
"""
Conveniently add new aliases as keyword arguments. For instance
you can add a new alias with add_aliases(short='Longer string')
"""
self_or_cls.aliases.update({v:k for k,v in kwargs.items()})
@param.parameterized.bothmethod
def remove_aliases(self_or_cls, aliases):
"""
Remove a list of aliases.
"""
for k,v in self_or_cls.aliases.items():
if v in aliases:
self_or_cls.aliases.pop(k)
@param.parameterized.bothmethod
def allowable(self_or_cls, name, disable_leading_underscore=None):
disabled_reprs = ['javascript', 'jpeg', 'json', 'latex',
'latex', 'pdf', 'png', 'svg', 'markdown']
disabled_ = (self_or_cls.disable_leading_underscore
if disable_leading_underscore is None
else disable_leading_underscore)
if disabled_ and name.startswith('_'):
return False
isrepr = any(('_repr_%s_' % el) == name for el in disabled_reprs)
return (name not in self_or_cls.disallowed) and not isrepr
@param.parameterized.bothmethod
def prefixed(self, identifier, version):
"""
Whether or not the identifier will be prefixed.
Strings that require the prefix are generally not recommended.
"""
invalid_starting = ['Mn', 'Mc', 'Nd', 'Pc']
if identifier.startswith('_'): return True
return((identifier[0] in string.digits) if version==2
else (unicodedata.category(identifier[0]) in invalid_starting))
@param.parameterized.bothmethod
def remove_diacritics(self_or_cls, identifier):
"""
Remove diacritics and accents from the input leaving other
unicode characters alone."""
chars = ''
for c in identifier:
replacement = unicodedata.normalize('NFKD', c).encode('ASCII', 'ignore')
if replacement != '':
chars += bytes_to_unicode(replacement)
else:
chars += c
return chars
@param.parameterized.bothmethod
def shortened_character_name(self_or_cls, c, eliminations=[], substitutions={}, transforms=[]):
"""
Given a unicode character c, return the shortened unicode name
(as a list of tokens) by applying the eliminations,
substitutions and transforms.
"""
name = unicodedata.name(c).lower()
# Filtering
for elim in eliminations:
name = name.replace(elim, '')
# Substitution
for i,o in substitutions.items():
name = name.replace(i, o)
for transform in transforms:
name = transform(name)
return ' '.join(name.strip().split()).replace(' ','_').replace('-','_')
def __call__(self, name, escape=True, version=None):
if name in [None, '']:
return name
elif name in self.aliases:
return self.aliases[name]
elif name in self._lookup_table:
return self._lookup_table[name]
name = bytes_to_unicode(name)
version = self.version if version is None else version
if not self.allowable(name):
raise AttributeError("String %r is in the disallowed list of attribute names: %r" % (name, self.disallowed))
if version == 2:
name = self.remove_diacritics(name)
if self.capitalize and name and name[0] in string.ascii_lowercase:
name = name[0].upper()+name[1:]
sanitized = (self.sanitize_py2(name) if version==2 else self.sanitize_py3(name))
if self.prefixed(name, version):
sanitized = self.prefix + sanitized
self._lookup_table[name] = sanitized
return sanitized
def _process_underscores(self, tokens):
"Strip underscores to make sure the number is correct after join"
groups = [[str(''.join(el))] if b else list(el)
for (b,el) in itertools.groupby(tokens, lambda k: k=='_')]
flattened = [el for group in groups for el in group]
processed = []
for token in flattened:
if token == '_': continue
if token.startswith('_'):
token = str(token[1:])
if token.endswith('_'):
token = str(token[:-1])
processed.append(token)
return processed
def sanitize_py2(self, name):
# This fix works but masks an issue in self.sanitize (py2)
prefix = '_' if name.startswith('_') else ''
valid_chars = string.ascii_letters+string.digits+'_'
return prefix + str('_'.join(self.sanitize(name, lambda c: c in valid_chars)))
def sanitize_py3(self, name):
if not name.isidentifier():
return '_'.join(self.sanitize(name, lambda c: ('_'+c).isidentifier()))
else:
return name
def sanitize(self, name, valid_fn):
"Accumulate blocks of hex and separate blocks by underscores"
invalid = {'\a':'a','\b':'b', '\v':'v','\f':'f','\r':'r'}
for cc in filter(lambda el: el in name, invalid.keys()):
raise Exception("Please use a raw string or escape control code '\%s'"
% invalid[cc])
sanitized, chars = [], ''
for split in name.split():
for c in split:
if valid_fn(c): chars += str(c) if c=='_' else c
else:
short = self.shortened_character_name(c, self.eliminations,
self.substitutions,
self.transforms)
sanitized.extend([chars] if chars else [])
if short != '':
sanitized.append(short)
chars = ''
if chars:
sanitized.extend([chars])
chars=''
return self._process_underscores(sanitized + ([chars] if chars else []))
sanitize_identifier = sanitize_identifier_fn.instance()
group_sanitizer = sanitize_identifier_fn.instance()
label_sanitizer = sanitize_identifier_fn.instance()
dimension_sanitizer = sanitize_identifier_fn.instance(capitalize=False)
def isscalar(val):
"""
Value is scalar or None
"""
return val is None or np.isscalar(val) or isinstance(val, datetime_types)
def isnumeric(val):
if isinstance(val, (basestring, bool, np.bool_)):
return False
try:
float(val)
return True
except:
return False
def asarray(arraylike, strict=True):
"""
Converts arraylike objects to NumPy ndarray types. Errors if
object is not arraylike and strict option is enabled.
"""
if isinstance(arraylike, np.ndarray):
return arraylike
elif isinstance(arraylike, list):
return np.asarray(arraylike, dtype=object)
elif not isinstance(arraylike, np.ndarray) and isinstance(arraylike, arraylike_types):
return arraylike.values
elif hasattr(arraylike, '__array__'):
return np.asarray(arraylike)
elif strict:
raise ValueError('Could not convert %s type to array' % type(arraylike))
return arraylike
nat_as_integer = np.datetime64('NAT').view('i8')
def isnat(val):
"""
Checks if the value is a NaT. Should only be called on datetimelike objects.
"""
if (isinstance(val, (np.datetime64, np.timedelta64)) or
(isinstance(val, np.ndarray) and val.dtype.kind == 'M')):
if numpy_version >= '1.13':
return np.isnat(val)
else:
return val.view('i8') == nat_as_integer
elif pd and val is pd.NaT:
return True
elif pd and isinstance(val, pandas_datetime_types+pandas_timedelta_types):
return pd.isna(val)
else:
return False
def isfinite(val):
"""
Helper function to determine if scalar or array value is finite extending
np.isfinite with support for None, string, datetime types.
"""
is_dask = is_dask_array(val)
if not np.isscalar(val) and not is_dask:
val = asarray(val, strict=False)
if val is None:
return False
elif is_dask:
import dask.array as da
return da.isfinite(val)
elif isinstance(val, np.ndarray):
if val.dtype.kind == 'M':
return ~isnat(val)
elif val.dtype.kind == 'O':
return np.array([isfinite(v) for v in val], dtype=bool)
elif val.dtype.kind in 'US':
return ~pd.isna(val) if pd else np.ones_like(val, dtype=bool)
finite = np.isfinite(val)
if pd and pandas_version >= '1.0.0':
finite &= ~pd.isna(val)
return finite
elif isinstance(val, datetime_types+timedelta_types):
return not isnat(val)
elif isinstance(val, (basestring, bytes)):
return True
finite = np.isfinite(val)
if pd and pandas_version >= '1.0.0':
if finite is pd.NA:
return False
return finite & (~pd.isna(val))
return finite
def isdatetime(value):
"""
Whether the array or scalar is recognized datetime type.
"""
if isinstance(value, np.ndarray):
return (value.dtype.kind == "M" or
(value.dtype.kind == "O" and len(value) and
isinstance(value[0], datetime_types)))
else:
return isinstance(value, datetime_types)
def find_minmax(lims, olims):
"""
Takes (a1, a2) and (b1, b2) as input and returns
(np.nanmin(a1, b1), np.nanmax(a2, b2)). Used to calculate
min and max values of a number of items.
"""
try:
limzip = zip(list(lims), list(olims), [np.nanmin, np.nanmax])
limits = tuple([float(fn([l, ol])) for l, ol, fn in limzip])
except:
limits = (np.NaN, np.NaN)
return limits
def find_range(values, soft_range=[]):
"""
Safely finds either the numerical min and max of
a set of values, falling back to the first and
the last value in the sorted list of values.
"""
try:
values = np.array(values)
values = np.squeeze(values) if len(values.shape) > 1 else values
if len(soft_range):
values = np.concatenate([values, soft_range])
if values.dtype.kind == 'M':
return values.min(), values.max()
with warnings.catch_warnings():
warnings.filterwarnings('ignore', r'All-NaN (slice|axis) encountered')
return np.nanmin(values), np.nanmax(values)
except:
try:
values = sorted(values)
return (values[0], values[-1])
except:
return (None, None)
def max_range(ranges, combined=True):
"""
Computes the maximal lower and upper bounds from a list bounds.
Args:
ranges (list of tuples): A list of range tuples
combined (boolean, optional): Whether to combine bounds
Whether range should be computed on lower and upper bound
independently or both at once
Returns:
The maximum range as a single tuple
"""
try:
with warnings.catch_warnings():
warnings.filterwarnings('ignore', r'All-NaN (slice|axis) encountered')
values = [tuple(np.NaN if v is None else v for v in r) for r in ranges]
if pd and any(isinstance(v, datetime_types) and not isinstance(v, cftime_types+(dt.time,))
for r in values for v in r):
converted = []
for l, h in values:
if isinstance(l, datetime_types) and isinstance(h, datetime_types):
l, h = (pd.Timestamp(l).to_datetime64(),
pd.Timestamp(h).to_datetime64())
converted.append((l, h))
values = converted
arr = np.array(values)
if not len(arr):
return np.NaN, np.NaN
elif arr.dtype.kind in 'OSU':
arr = list(python2sort([
v for r in values for v in r
if not is_nan(v) and v is not None]))
return arr[0], arr[-1]
elif arr.dtype.kind in 'M':
return ((arr.min(), arr.max()) if combined else
(arr[:, 0].min(), arr[:, 1].min()))
if combined:
return (np.nanmin(arr), np.nanmax(arr))
else:
return (np.nanmin(arr[:, 0]), np.nanmax(arr[:, 1]))
except:
return (np.NaN, np.NaN)
def range_pad(lower, upper, padding=None, log=False):
"""
Pads the range by a fraction of the interval
"""
if padding is not None and not isinstance(padding, tuple):
padding = (padding, padding)
if is_number(lower) and is_number(upper) and padding is not None:
if not isinstance(lower, datetime_types) and log and lower > 0 and upper > 0:
log_min = np.log(lower) / np.log(10)
log_max = np.log(upper) / np.log(10)
lspan = (log_max-log_min)*(1+padding[0]*2)
uspan = (log_max-log_min)*(1+padding[1]*2)
center = (log_min+log_max) / 2.0
start, end = np.power(10, center-lspan/2.), np.power(10, center+uspan/2.)
else:
if isinstance(lower, datetime_types) and not isinstance(lower, cftime_types):
# Ensure timedelta can be safely divided
lower, upper = np.datetime64(lower), np.datetime64(upper)
span = (upper-lower).astype('>m8[ns]')
else:
span = (upper-lower)
lpad = span*(padding[0])
upad = span*(padding[1])
start, end = lower-lpad, upper+upad
else:
start, end = lower, upper
return start, end
def dimension_range(lower, upper, hard_range, soft_range, padding=None, log=False):
"""
Computes the range along a dimension by combining the data range
with the Dimension soft_range and range.
"""
plower, pupper = range_pad(lower, upper, padding, log)
if isfinite(soft_range[0]) and soft_range[0] <= lower:
lower = soft_range[0]
else:
lower = max_range([(plower, None), (soft_range[0], None)])[0]
if isfinite(soft_range[1]) and soft_range[1] >= upper:
upper = soft_range[1]
else:
upper = max_range([(None, pupper), (None, soft_range[1])])[1]
dmin, dmax = hard_range
lower = lower if dmin is None or not isfinite(dmin) else dmin
upper = upper if dmax is None or not isfinite(dmax) else dmax
return lower, upper
def max_extents(extents, zrange=False):
"""
Computes the maximal extent in 2D and 3D space from
list of 4-tuples or 6-tuples. If zrange is enabled
all extents are converted to 6-tuples to compute
x-, y- and z-limits.
"""
if zrange:
num = 6
inds = [(0, 3), (1, 4), (2, 5)]
extents = [e if len(e) == 6 else (e[0], e[1], None,
e[2], e[3], None)
for e in extents]
else:
num = 4
inds = [(0, 2), (1, 3)]
arr = list(zip(*extents)) if extents else []
extents = [np.NaN] * num
if len(arr) == 0:
return extents
with warnings.catch_warnings():
warnings.filterwarnings('ignore', r'All-NaN (slice|axis) encountered')
for lidx, uidx in inds:
lower = [v for v in arr[lidx] if v is not None and not is_nan(v)]
upper = [v for v in arr[uidx] if v is not None and not is_nan(v)]
if lower and isinstance(lower[0], datetime_types):
extents[lidx] = np.min(lower)
elif any(isinstance(l, basestring) for l in lower):
extents[lidx] = np.sort(lower)[0]
elif lower:
extents[lidx] = np.nanmin(lower)
if upper and isinstance(upper[0], datetime_types):
extents[uidx] = np.max(upper)
elif any(isinstance(u, basestring) for u in upper):
extents[uidx] = np.sort(upper)[-1]
elif upper:
extents[uidx] = np.nanmax(upper)
return tuple(extents)
def int_to_alpha(n, upper=True):
"Generates alphanumeric labels of form A-Z, AA-ZZ etc."
casenum = 65 if upper else 97
label = ''
count= 0
if n == 0: return str(chr(n + casenum))
while n >= 0:
mod, div = n % 26, n
for _ in range(count):
div //= 26
div %= 26
if count == 0:
val = mod
else:
val = div
label += str(chr(val + casenum))
count += 1
n -= 26**count
return label[::-1]
def int_to_roman(input):
if type(input) != type(1):
raise TypeError("expected integer, got %s" % type(input))
if not 0 < input < 4000:
raise ValueError("Argument must be between 1 and 3999")
ints = (1000, 900, 500, 400, 100, 90, 50, 40, 10, 9, 5, 4, 1)
nums = ('M', 'CM', 'D', 'CD','C', 'XC','L','XL','X','IX','V','IV','I')
result = ""
for i in range(len(ints)):
count = int(input / ints[i])
result += nums[i] * count
input -= ints[i] * count
return result
def unique_iterator(seq):
"""
Returns an iterator containing all non-duplicate elements
in the input sequence.
"""
seen = set()
for item in seq:
if item not in seen:
seen.add(item)
yield item
def lzip(*args):
"""
zip function that returns a list.
"""
return list(zip(*args))
def unique_zip(*args):
"""
Returns a unique list of zipped values.
"""
return list(unique_iterator(zip(*args)))
def unique_array(arr):
"""
Returns an array of unique values in the input order.
Args:
arr (np.ndarray or list): The array to compute unique values on
Returns:
A new array of unique values
"""
if not len(arr):
return np.asarray(arr)
elif pd:
if isinstance(arr, np.ndarray) and arr.dtype.kind not in 'MO':
# Avoid expensive unpacking if not potentially datetime
return pd.unique(arr)
values = []
for v in arr:
if (isinstance(v, datetime_types) and
not isinstance(v, cftime_types)):
v = pd.Timestamp(v).to_datetime64()
values.append(v)
return pd.unique(values)
else:
arr = np.asarray(arr)
_, uniq_inds = np.unique(arr, return_index=True)
return arr[np.sort(uniq_inds)]
def match_spec(element, specification):
"""
Matches the group.label specification of the supplied
element against the supplied specification dictionary
returning the value of the best match.
"""
match_tuple = ()
match = specification.get((), {})
for spec in [type(element).__name__,
group_sanitizer(element.group, escape=False),
label_sanitizer(element.label, escape=False)]:
match_tuple += (spec,)
if match_tuple in specification:
match = specification[match_tuple]
return match
def python2sort(x,key=None):
if len(x) == 0: return x
it = iter(x)
groups = [[next(it)]]
for item in it:
for group in groups:
try:
item_precedence = item if key is None else key(item)
group_precedence = group[0] if key is None else key(group[0])
item_precedence < group_precedence # exception if not comparable
group.append(item)
break
except TypeError:
continue
else: # did not break, make new group
groups.append([item])
return itertools.chain.from_iterable(sorted(group, key=key) for group in groups)
def merge_dimensions(dimensions_list):
"""
Merges lists of fully or partially overlapping dimensions by
merging their values.
>>> from holoviews import Dimension
>>> dim_list = [[Dimension('A', values=[1, 2, 3]), Dimension('B')],
... [Dimension('A', values=[2, 3, 4])]]
>>> dimensions = merge_dimensions(dim_list)
>>> dimensions
[Dimension('A'), Dimension('B')]
>>> dimensions[0].values
[1, 2, 3, 4]
"""
dvalues = defaultdict(list)
dimensions = []
for dims in dimensions_list:
for d in dims:
dvalues[d.name].append(d.values)
if d not in dimensions:
dimensions.append(d)
dvalues = {k: list(unique_iterator(itertools.chain(*vals)))
for k, vals in dvalues.items()}
return [d.clone(values=dvalues.get(d.name, [])) for d in dimensions]
def dimension_sort(odict, kdims, vdims, key_index):
"""
Sorts data by key using usual Python tuple sorting semantics
or sorts in categorical order for any categorical Dimensions.
"""
sortkws = {}
ndims = len(kdims)
dimensions = kdims+vdims
indexes = [(dimensions[i], int(i not in range(ndims)),
i if i in range(ndims) else i-ndims)
for i in key_index]
cached_values = {d.name: [None]+list(d.values) for d in dimensions}
if len(set(key_index)) != len(key_index):
raise ValueError("Cannot sort on duplicated dimensions")
else:
sortkws['key'] = lambda x: tuple(cached_values[dim.name].index(x[t][d])
if dim.values else x[t][d]
for i, (dim, t, d) in enumerate(indexes))
if sys.version_info.major == 3:
return python2sort(odict.items(), **sortkws)
else:
return sorted(odict.items(), **sortkws)
# Copied from param should make param version public
def is_number(obj):
if isinstance(obj, numbers.Number): return True
elif isinstance(obj, (np.str_, np.unicode_)): return False
# The extra check is for classes that behave like numbers, such as those
# found in numpy, gmpy, etc.
elif (hasattr(obj, '__int__') and hasattr(obj, '__add__')): return True
# This is for older versions of gmpy
elif hasattr(obj, 'qdiv'): return True
else: return False
class ProgressIndicator(param.Parameterized):
"""
Baseclass for any ProgressIndicator that indicates progress
as a completion percentage.
"""
percent_range = param.NumericTuple(default=(0.0, 100.0), doc="""
The total percentage spanned by the progress bar when called
with a value between 0% and 100%. This allows an overall
completion in percent to be broken down into smaller sub-tasks
that individually complete to 100 percent.""")
label = param.String(default='Progress', allow_None=True, doc="""
The label of the current progress bar.""")
def __call__(self, completion):
raise NotImplementedError
def sort_topologically(graph):
"""
Stackless topological sorting.
graph = {
3: [1],
5: [3],
4: [2],
6: [4],
}
sort_topologically(graph)
[[1, 2], [3, 4], [5, 6]]
"""
levels_by_name = {}
names_by_level = defaultdict(list)
def add_level_to_name(name, level):
levels_by_name[name] = level
names_by_level[level].append(name)
def walk_depth_first(name):
stack = [name]
while(stack):
name = stack.pop()
if name in levels_by_name:
continue
if name not in graph or not graph[name]:
level = 0
add_level_to_name(name, level)
continue
children = graph[name]
children_not_calculated = [child for child in children if child not in levels_by_name]
if children_not_calculated:
stack.append(name)
stack.extend(children_not_calculated)
continue
level = 1 + max(levels_by_name[lname] for lname in children)
add_level_to_name(name, level)
for name in graph:
walk_depth_first(name)
return list(itertools.takewhile(lambda x: x is not None,
(names_by_level.get(i, None)
for i in itertools.count())))
def is_cyclic(graph):
"""
Return True if the directed graph g has a cycle. The directed graph
should be represented as a dictionary mapping of edges for each node.
"""
path = set()
def visit(vertex):
path.add(vertex)
for neighbour in graph.get(vertex, ()):
if neighbour in path or visit(neighbour):
return True
path.remove(vertex)
return False
return any(visit(v) for v in graph)
def one_to_one(graph, nodes):
"""
Return True if graph contains only one to one mappings. The
directed graph should be represented as a dictionary mapping of
edges for each node. Nodes should be passed a simple list.
"""
edges = itertools.chain.from_iterable(graph.values())
return len(graph) == len(nodes) and len(set(edges)) == len(nodes)
def get_overlay_spec(o, k, v):
"""
Gets the type.group.label + key spec from an Element in an Overlay.
"""
k = wrap_tuple(k)
return ((type(v).__name__, v.group, v.label) + k if len(o.kdims) else
(type(v).__name__,) + k)
def layer_sort(hmap):
"""
Find a global ordering for layers in a HoloMap of CompositeOverlay
types.
"""
orderings = {}
for o in hmap:
okeys = [get_overlay_spec(o, k, v) for k, v in o.data.items()]
if len(okeys) == 1 and not okeys[0] in orderings:
orderings[okeys[0]] = []
else:
orderings.update({k: [] if k == v else [v] for k, v in zip(okeys[1:], okeys)})
return [i for g in sort_topologically(orderings) for i in sorted(g)]
def layer_groups(ordering, length=2):
"""
Splits a global ordering of Layers into groups based on a slice of
the spec. The grouping behavior can be modified by changing the
length of spec the entries are grouped by.
"""
group_orderings = defaultdict(list)
for el in ordering:
group_orderings[el[:length]].append(el)
return group_orderings
def group_select(selects, length=None, depth=None):
"""
Given a list of key tuples to select, groups them into sensible
chunks to avoid duplicating indexing operations.
"""
if length == None and depth == None:
length = depth = len(selects[0])
getter = operator.itemgetter(depth-length)
if length > 1:
selects = sorted(selects, key=getter)
grouped_selects = defaultdict(dict)
for k, v in itertools.groupby(selects, getter):
grouped_selects[k] = group_select(list(v), length-1, depth)
return grouped_selects
else:
return list(selects)
def iterative_select(obj, dimensions, selects, depth=None):
"""
Takes the output of group_select selecting subgroups iteratively,
avoiding duplicating select operations.
"""
ndims = len(dimensions)
depth = depth if depth is not None else ndims
items = []
if isinstance(selects, dict):
for k, v in selects.items():
items += iterative_select(obj.select(**{dimensions[ndims-depth]: k}),
dimensions, v, depth-1)
else:
for s in selects:
items.append((s, obj.select(**{dimensions[-1]: s[-1]})))
return items
def get_spec(obj):
"""
Gets the spec from any labeled data object.
"""
return (obj.__class__.__name__,
obj.group, obj.label)
def is_dataframe(data):
"""
Checks whether the supplied data is of DataFrame type.
"""
dd = None
if 'dask.dataframe' in sys.modules and 'pandas' in sys.modules:
import dask.dataframe as dd
return((pd is not None and isinstance(data, pd.DataFrame)) or
(dd is not None and isinstance(data, dd.DataFrame)))
def is_series(data):
"""
Checks whether the supplied data is of Series type.
"""
dd = None
if 'dask.dataframe' in sys.modules:
import dask.dataframe as dd
return((pd is not None and isinstance(data, pd.Series)) or
(dd is not None and isinstance(data, dd.Series)))
def is_dask_array(data):
da = None
if 'dask.array' in sys.modules:
import dask.array as da
return (da is not None and isinstance(data, da.Array))
def get_param_values(data):
params = dict(kdims=data.kdims, vdims=data.vdims,
label=data.label)
if (data.group != data.param.objects(False)['group'].default and not
isinstance(type(data).group, property)):
params['group'] = data.group
return params
def is_param_method(obj, has_deps=False):
"""Whether the object is a method on a parameterized object.
Args:
obj: Object to check
has_deps (boolean, optional): Check for dependencies
Whether to also check whether the method has been annotated
with param.depends
Returns:
A boolean value indicating whether the object is a method
on a Parameterized object and if enabled whether it has any
dependencies
"""
parameterized = (inspect.ismethod(obj) and
isinstance(get_method_owner(obj), param.Parameterized))
if parameterized and has_deps:
return getattr(obj, "_dinfo", {}).get('dependencies')
return parameterized
def resolve_dependent_value(value):
"""Resolves parameter dependencies on the supplied value
Resolves parameter values, Parameterized instance methods and
parameterized functions with dependencies on the supplied value.
Args:
value: A value which will be resolved
Returns:
A new dictionary where any parameter dependencies have been
resolved.
"""
if 'panel' in sys.modules:
from panel.widgets.base import Widget
if isinstance(value, Widget):
value = value.param.value
if is_param_method(value, has_deps=True):
value = value()
elif isinstance(value, param.Parameter) and isinstance(value.owner, param.Parameterized):
value = getattr(value.owner, value.name)
elif isinstance(value, FunctionType) and hasattr(value, '_dinfo'):
deps = value._dinfo
args = (getattr(p.owner, p.name) for p in deps.get('dependencies', []))
kwargs = {k: getattr(p.owner, p.name) for k, p in deps.get('kw', {}).items()}
value = value(*args, **kwargs)
return value
def resolve_dependent_kwargs(kwargs):
"""Resolves parameter dependencies in the supplied dictionary
Resolves parameter values, Parameterized instance methods and
parameterized functions with dependencies in the supplied
dictionary.
Args:
kwargs (dict): A dictionary of keyword arguments
Returns:
A new dictionary with where any parameter dependencies have been
resolved.
"""
return {k: resolve_dependent_value(v) for k, v in kwargs.items()}
@contextmanager
def disable_constant(parameterized):
"""
Temporarily set parameters on Parameterized object to
constant=False.
"""
params = parameterized.param.objects('existing').values()
constants = [p.constant for p in params]
for p in params:
p.constant = False
try:
yield
except:
raise
finally:
for (p, const) in zip(params, constants):
p.constant = const
def get_ndmapping_label(ndmapping, attr):
"""
Function to get the first non-auxiliary object
label attribute from an NdMapping.
"""
label = None
els = itervalues(ndmapping.data)
while label is None:
try:
el = next(els)
except StopIteration:
return None
if not getattr(el, '_auxiliary_component', True):
label = getattr(el, attr)
if attr == 'group':
tp = type(el).__name__
if tp == label:
return None
return label
def wrap_tuple(unwrapped):
""" Wraps any non-tuple types in a tuple """
return (unwrapped if isinstance(unwrapped, tuple) else (unwrapped,))
def stream_name_mapping(stream, exclude_params=['name'], reverse=False):
"""
Return a complete dictionary mapping between stream parameter names
to their applicable renames, excluding parameters listed in
exclude_params.
If reverse is True, the mapping is from the renamed strings to the
original stream parameter names.
"""
filtered = [k for k in stream.param if k not in exclude_params]
mapping = {k:stream._rename.get(k,k) for k in filtered}
if reverse:
return {v:k for k,v in mapping.items()}
else:
return mapping
def rename_stream_kwargs(stream, kwargs, reverse=False):
"""
Given a stream and a kwargs dictionary of parameter values, map to
the corresponding dictionary where the keys are substituted with the
appropriately renamed string.
If reverse, the output will be a dictionary using the original
parameter names given a dictionary using the renamed equivalents.
"""
mapped_kwargs = {}
mapping = stream_name_mapping(stream, reverse=reverse)
for k,v in kwargs.items():
if k not in mapping:
msg = 'Could not map key {key} {direction} renamed equivalent'
direction = 'from' if reverse else 'to'
raise KeyError(msg.format(key=repr(k), direction=direction))
mapped_kwargs[mapping[k]] = v
return mapped_kwargs
def stream_parameters(streams, no_duplicates=True, exclude=['name']):
"""
Given a list of streams, return a flat list of parameter name,
excluding those listed in the exclude list.
If no_duplicates is enabled, a KeyError will be raised if there are
parameter name clashes across the streams.
"""
param_groups = []
for s in streams:
if not s.contents and isinstance(s.hashkey, dict):
param_groups.append(list(s.hashkey))
else:
param_groups.append(list(s.contents))
names = [name for group in param_groups for name in group]
if no_duplicates:
clashes = sorted(set([n for n in names if names.count(n) > 1]))
clash_streams = []
for s in streams:
for c in clashes:
if c in s.contents or (not s.contents and isinstance(s.hashkey, dict) and c in s.hashkey):
clash_streams.append(s)
if [c for c in clashes if c != '_memoize_key']:
clashing = ', '.join([repr(c) for c in clash_streams[:-1]])
raise Exception('The supplied stream objects %s and %s '
'clash on the following parameters: %r'
% (clashing, clash_streams[-1], clashes))
return [name for name in names if name not in exclude]
def dimensionless_contents(streams, kdims, no_duplicates=True):
"""
Return a list of stream parameters that have not been associated
with any of the key dimensions.
"""
names = stream_parameters(streams, no_duplicates)
return [name for name in names if name not in kdims]
def unbound_dimensions(streams, kdims, no_duplicates=True):
"""
Return a list of dimensions that have not been associated with
any streams.
"""
params = stream_parameters(streams, no_duplicates)
return [d for d in kdims if d not in params]
def wrap_tuple_streams(unwrapped, kdims, streams):
"""
Fills in tuple keys with dimensioned stream values as appropriate.
"""
param_groups = [(s.contents.keys(), s) for s in streams]
pairs = [(name,s) for (group, s) in param_groups for name in group]
substituted = []
for pos,el in enumerate(wrap_tuple(unwrapped)):
if el is None and pos < len(kdims):
matches = [(name,s) for (name,s) in pairs if name==kdims[pos].name]
if len(matches) == 1:
(name, stream) = matches[0]
el = stream.contents[name]
substituted.append(el)
return tuple(substituted)
def drop_streams(streams, kdims, keys):
"""
Drop any dimensioned streams from the keys and kdims.
"""
stream_params = stream_parameters(streams)
inds, dims = zip(*[(ind, kdim) for ind, kdim in enumerate(kdims)
if kdim not in stream_params])
get = operator.itemgetter(*inds) # itemgetter used for performance
keys = (get(k) for k in keys)
return dims, ([wrap_tuple(k) for k in keys] if len(inds) == 1 else list(keys))
def itervalues(obj):
"Get value iterator from dictionary for Python 2 and 3"
return iter(obj.values()) if sys.version_info.major == 3 else obj.itervalues()
def iterkeys(obj):
"Get key iterator from dictionary for Python 2 and 3"
return iter(obj.keys()) if sys.version_info.major == 3 else obj.iterkeys()
def get_unique_keys(ndmapping, dimensions):
inds = [ndmapping.get_dimension_index(dim) for dim in dimensions]
getter = operator.itemgetter(*inds)
return unique_iterator(getter(key) if len(inds) > 1 else (key[inds[0]],)
for key in ndmapping.data.keys())
def unpack_group(group, getter):
for k, v in group.iterrows():
obj = v.values[0]
key = getter(k)
if hasattr(obj, 'kdims'):
yield (key, obj)
else:
yield (wrap_tuple(key), obj)
def capitalize(string):
"""
Capitalizes the first letter of a string.
"""
return string[0].upper() + string[1:]
def get_path(item):
"""
Gets a path from an Labelled object or from a tuple of an existing
path and a labelled object. The path strings are sanitized and
capitalized.
"""
sanitizers = [group_sanitizer, label_sanitizer]
if isinstance(item, tuple):
path, item = item
if item.label:
if len(path) > 1 and item.label == path[1]:
path = path[:2]
else:
path = path[:1] + (item.label,)
else:
path = path[:1]
else:
path = (item.group, item.label) if item.label else (item.group,)
return tuple(capitalize(fn(p)) for (p, fn) in zip(path, sanitizers))
def make_path_unique(path, counts, new):
"""
Given a path, a list of existing paths and counts for each of the
existing paths.
"""
added = False
while any(path == c[:i] for c in counts for i in range(1, len(c)+1)):
count = counts[path]
counts[path] += 1
if (not new and len(path) > 1) or added:
path = path[:-1]
else:
added = True
path = path + (int_to_roman(count),)
if len(path) == 1:
path = path + (int_to_roman(counts.get(path, 1)),)
if path not in counts:
counts[path] = 1
return path
class ndmapping_groupby(param.ParameterizedFunction):
"""
Apply a groupby operation to an NdMapping, using pandas to improve
performance (if available).
"""
sort = param.Boolean(default=False, doc='Whether to apply a sorted groupby')
def __call__(self, ndmapping, dimensions, container_type,
group_type, sort=False, **kwargs):
try:
import pandas # noqa (optional import)
groupby = self.groupby_pandas
except:
groupby = self.groupby_python
return groupby(ndmapping, dimensions, container_type,
group_type, sort=sort, **kwargs)
@param.parameterized.bothmethod
def groupby_pandas(self_or_cls, ndmapping, dimensions, container_type,
group_type, sort=False, **kwargs):
if 'kdims' in kwargs:
idims = [ndmapping.get_dimension(d) for d in kwargs['kdims']]
else:
idims = [dim for dim in ndmapping.kdims if dim not in dimensions]
all_dims = [d.name for d in ndmapping.kdims]
inds = [ndmapping.get_dimension_index(dim) for dim in idims]
getter = operator.itemgetter(*inds) if inds else lambda x: tuple()
multi_index = pd.MultiIndex.from_tuples(ndmapping.keys(), names=all_dims)
df = pd.DataFrame(list(map(wrap_tuple, ndmapping.values())), index=multi_index)
# TODO: Look at sort here
kwargs = dict(dict(get_param_values(ndmapping), kdims=idims), sort=sort, **kwargs)
groups = ((wrap_tuple(k), group_type(OrderedDict(unpack_group(group, getter)), **kwargs))
for k, group in df.groupby(level=[d.name for d in dimensions], sort=sort))
if sort:
selects = list(get_unique_keys(ndmapping, dimensions))
groups = sorted(groups, key=lambda x: selects.index(x[0]))
return container_type(groups, kdims=dimensions, sort=sort)
@param.parameterized.bothmethod
def groupby_python(self_or_cls, ndmapping, dimensions, container_type,
group_type, sort=False, **kwargs):
idims = [dim for dim in ndmapping.kdims if dim not in dimensions]
dim_names = [dim.name for dim in dimensions]
selects = get_unique_keys(ndmapping, dimensions)
selects = group_select(list(selects))
groups = [(k, group_type((v.reindex(idims) if hasattr(v, 'kdims')
else [((), v)]), **kwargs))
for k, v in iterative_select(ndmapping, dim_names, selects)]
return container_type(groups, kdims=dimensions)
def cartesian_product(arrays, flat=True, copy=False):
"""
Efficient cartesian product of a list of 1D arrays returning the
expanded array views for each dimensions. By default arrays are
flattened, which may be controlled with the flat flag. The array
views can be turned into regular arrays with the copy flag.
"""
arrays = np.broadcast_arrays(*np.ix_(*arrays))
if flat:
return tuple(arr.flatten() if copy else arr.flat for arr in arrays)
return tuple(arr.copy() if copy else arr for arr in arrays)
def cross_index(values, index):
"""
Allows efficiently indexing into a cartesian product without
expanding it. The values should be defined as a list of iterables
making up the cartesian product and a linear index, returning
the cross product of the values at the supplied index.
"""
lengths = [len(v) for v in values]
length = np.product(lengths)
if index >= length:
raise IndexError('Index %d out of bounds for cross-product of size %d'
% (index, length))
indexes = []
for i in range(1, len(values))[::-1]:
p = np.product(lengths[-i:])
indexes.append(index//p)
index -= indexes[-1] * p
indexes.append(index)
return tuple(v[i] for v, i in zip(values, indexes))
def arglexsort(arrays):
"""
Returns the indices of the lexicographical sorting
order of the supplied arrays.
"""
dtypes = ','.join(array.dtype.str for array in arrays)
recarray = np.empty(len(arrays[0]), dtype=dtypes)
for i, array in enumerate(arrays):
recarray['f%s' % i] = array
return recarray.argsort()
def dimensioned_streams(dmap):
"""
Given a DynamicMap return all streams that have any dimensioned
parameters i.e parameters also listed in the key dimensions.
"""
dimensioned = []
for stream in dmap.streams:
stream_params = stream_parameters([stream])
if set([str(k) for k in dmap.kdims]) & set(stream_params):
dimensioned.append(stream)
return dimensioned
def expand_grid_coords(dataset, dim):
"""
Expand the coordinates along a dimension of the gridded
dataset into an ND-array matching the dimensionality of
the dataset.
"""
irregular = [d.name for d in dataset.kdims
if d is not dim and dataset.interface.irregular(dataset, d)]
if irregular:
array = dataset.interface.coords(dataset, dim, True)
example = dataset.interface.values(dataset, irregular[0], True, False)
return array * np.ones_like(example)
else:
arrays = [dataset.interface.coords(dataset, d.name, True)
for d in dataset.kdims]
idx = dataset.get_dimension_index(dim)
return cartesian_product(arrays, flat=False)[idx].T
def dt64_to_dt(dt64):
"""
Safely converts NumPy datetime64 to a datetime object.
"""
ts = (dt64 - np.datetime64('1970-01-01T00:00:00')) / np.timedelta64(1, 's')
return dt.datetime(1970,1,1,0,0,0) + dt.timedelta(seconds=ts)
def is_nan(x):
"""
Checks whether value is NaN on arbitrary types
"""
try:
return np.isnan(x)
except:
return False
def bound_range(vals, density, time_unit='us'):
"""
Computes a bounding range and density from a number of samples
assumed to be evenly spaced. Density is rounded to machine precision
using significant digits reported by sys.float_info.dig.
"""
if not len(vals):
return(np.nan, np.nan, density, False)
low, high = vals.min(), vals.max()
invert = False
if len(vals) > 1 and vals[0] > vals[1]:
invert = True
if not density:
with warnings.catch_warnings():
warnings.filterwarnings('ignore', r'invalid value encountered in double_scalars')
full_precision_density = compute_density(low, high, len(vals)-1)
density = round(full_precision_density, sys.float_info.dig)
if density == 0:
density = full_precision_density
if density == 0:
raise ValueError('Could not determine Image density, ensure it has a non-zero range.')
halfd = 0.5/density
if isinstance(low, datetime_types):
halfd = np.timedelta64(int(round(halfd)), time_unit)
return low-halfd, high+halfd, density, invert
def validate_regular_sampling(values, rtol=10e-6):
"""
Validates regular sampling of a 1D array ensuring that the difference
in sampling steps is at most rtol times the smallest sampling step.
Returns a boolean indicating whether the sampling is regular.
"""
diffs = np.diff(values)
return (len(diffs) < 1) or abs(diffs.min()-diffs.max()) < abs(diffs.min()*rtol)
def compute_density(start, end, length, time_unit='us'):
"""
Computes a grid density given the edges and number of samples.
Handles datetime grids correctly by computing timedeltas and
computing a density for the given time_unit.
"""
if isinstance(start, int): start = float(start)
if isinstance(end, int): end = float(end)
diff = end-start
if isinstance(diff, timedelta_types):
if isinstance(diff, np.timedelta64):
diff = np.timedelta64(diff, time_unit).tolist()
tscale = 1./np.timedelta64(1, time_unit).tolist().total_seconds()
return (length/(diff.total_seconds()*tscale))
else:
return length/diff
def date_range(start, end, length, time_unit='us'):
"""
Computes a date range given a start date, end date and the number
of samples.
"""
step = (1./compute_density(start, end, length, time_unit))
if pd and isinstance(start, pd.Timestamp):
start = start.to_datetime64()
step = np.timedelta64(int(round(step)), time_unit)
return start+step/2.+np.arange(length)*step
def parse_datetime(date):
"""
Parses dates specified as string or integer or pandas Timestamp
"""
if pd is None:
raise ImportError('Parsing dates from strings requires pandas')
return pd.to_datetime(date).to_datetime64()
def parse_datetime_selection(sel):
"""
Parses string selection specs as datetimes.
"""
if isinstance(sel, basestring) or isdatetime(sel):
sel = parse_datetime(sel)
if isinstance(sel, slice):
if isinstance(sel.start, basestring) or isdatetime(sel.start):
sel = slice(parse_datetime(sel.start), sel.stop)
if isinstance(sel.stop, basestring) or isdatetime(sel.stop):
sel = slice(sel.start, parse_datetime(sel.stop))
if isinstance(sel, (set, list)):
sel = [parse_datetime(v) if isinstance(v, basestring) else v for v in sel]
return sel
def dt_to_int(value, time_unit='us'):
"""
Converts a datetime type to an integer with the supplied time unit.
"""
if pd:
if isinstance(value, pd.Period):
value = value.to_timestamp()
if isinstance(value, pd.Timestamp):
try:
value = value.to_datetime64()
except:
value = np.datetime64(value.to_pydatetime())
elif isinstance(value, cftime_types):
return cftime_to_timestamp(value, time_unit)
if isinstance(value, dt.date):
value = dt.datetime(*value.timetuple()[:6])
# Handle datetime64 separately
if isinstance(value, np.datetime64):
try:
value = np.datetime64(value, 'ns')
tscale = (np.timedelta64(1, time_unit)/np.timedelta64(1, 'ns'))
return value.tolist()/tscale
except:
# If it can't handle ns precision fall back to datetime
value = value.tolist()
if time_unit == 'ns':
tscale = 1e9
else:
tscale = 1./np.timedelta64(1, time_unit).tolist().total_seconds()
try:
# Handle python3
return int(value.timestamp() * tscale)
except:
# Handle python2
return (time.mktime(value.timetuple()) + value.microsecond / 1e6) * tscale
def cftime_to_timestamp(date, time_unit='us'):
"""Converts cftime to timestamp since epoch in milliseconds
Non-standard calendars (e.g. Julian or no leap calendars)
are converted to standard Gregorian calendar. This can cause
extra space to be added for dates that don't exist in the original
calendar. In order to handle these dates correctly a custom bokeh
model with support for other calendars would have to be defined.
Args:
date: cftime datetime object (or array)
Returns:
time_unit since 1970-01-01 00:00:00
"""
import cftime
utime = cftime.utime('microseconds since 1970-01-01 00:00:00')
if time_unit == 'us':
tscale = 1
else:
tscale = (np.timedelta64(1, 'us')/np.timedelta64(1, time_unit))
return utime.date2num(date)*tscale
def search_indices(values, source):
"""
Given a set of values returns the indices of each of those values
in the source array.
"""
orig_indices = source.argsort()
return orig_indices[np.searchsorted(source[orig_indices], values)]
def compute_edges(edges):
"""
Computes edges as midpoints of the bin centers. The first and
last boundaries are equidistant from the first and last midpoints
respectively.
"""
edges = np.asarray(edges)
if edges.dtype.kind == 'i':
edges = edges.astype('f')
midpoints = (edges[:-1] + edges[1:])/2.0
boundaries = (2*edges[0] - midpoints[0], 2*edges[-1] - midpoints[-1])
return np.concatenate([boundaries[:1], midpoints, boundaries[-1:]])
def mimebundle_to_html(bundle):
"""
Converts a MIME bundle into HTML.
"""
if isinstance(bundle, tuple):
data, metadata = bundle
else:
data = bundle
html = data.get('text/html', '')
if 'application/javascript' in data:
js = data['application/javascript']
html += '\n<script type="application/javascript">{js}</script>'.format(js=js)
return html
def numpy_scalar_to_python(scalar):
"""
Converts a NumPy scalar to a regular python type.
"""
scalar_type = type(scalar)
if np.issubclass_(scalar_type, np.float_):
return float(scalar)
elif np.issubclass_(scalar_type, np.int_):
return int(scalar)
return scalar
def closest_match(match, specs, depth=0):
"""
Recursively iterates over type, group, label and overlay key,
finding the closest matching spec.
"""
new_specs = []
match_lengths = []
for i, spec in specs:
if spec[0] == match[0]:
new_specs.append((i, spec[1:]))
else:
if all(isinstance(s[0], basestring) for s in [spec, match]):
match_length = max(i for i in range(len(match[0]))
if match[0].startswith(spec[0][:i]))
elif is_number(match[0]) and is_number(spec[0]):
match_length = -abs(match[0]-spec[0])
else:
match_length = 0
match_lengths.append((i, match_length, spec[0]))
if len(new_specs) == 1:
return new_specs[0][0]
elif new_specs:
depth = depth+1
return closest_match(match[1:], new_specs, depth)
else:
if depth == 0 or not match_lengths:
return None
else:
return sorted(match_lengths, key=lambda x: -x[1])[0][0]
| 1 | 23,616 | It would be nice to have a docstring that says what range of integer-like objects will be accepted, as well as what the int_like argument is meant for. | holoviz-holoviews | py |
@@ -0,0 +1,5 @@
+class CreateLicensesView < ActiveRecord::Migration
+ def change
+ create_view :licenses
+ end
+end | 1 | 1 | 15,987 | How do we manage updates to this view down the road? | thoughtbot-upcase | rb |
|
@@ -19,6 +19,7 @@ package org.openqa.selenium.grid.graphql;
import graphql.schema.DataFetcher;
import graphql.schema.DataFetchingEnvironment;
+
import org.openqa.selenium.grid.distributor.Distributor;
import org.openqa.selenium.internal.Require;
| 1 | // Licensed to the Software Freedom Conservancy (SFC) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The SFC licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package org.openqa.selenium.grid.graphql;
import graphql.schema.DataFetcher;
import graphql.schema.DataFetchingEnvironment;
import org.openqa.selenium.grid.distributor.Distributor;
import org.openqa.selenium.internal.Require;
import java.net.URI;
public class GridData implements DataFetcher {
private final Distributor distributor;
private final URI publicUri;
public GridData(Distributor distributor, URI publicUri) {
this.distributor = Require.nonNull("Distributor", distributor);
this.publicUri = Require.nonNull("Grid's public URI", publicUri);
}
@Override
public Object get(DataFetchingEnvironment environment) {
return new Grid(distributor, publicUri);
}
}
| 1 | 17,791 | We can revert this to reduce the diff of the PR. | SeleniumHQ-selenium | js |
@@ -1174,8 +1174,11 @@ public abstract class BasePointsFormatTestCase extends BaseIndexFileFormatTestCa
Document doc = new Document();
doc.add(new IntPoint("id", 0));
- w.addDocument(doc);
- // now we write another segment where the id field does have points:
+ IllegalArgumentException ex =
+ expectThrows(IllegalArgumentException.class, () -> w.addDocument(doc));
+ assertEquals(
+ "cannot change field \"id\" from index options=DOCS to inconsistent index options=NONE",
+ ex.getMessage());
w.forceMerge(1);
IOUtils.close(w, dir); | 1 | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.lucene.index;
import static org.apache.lucene.search.DocIdSetIterator.NO_MORE_DOCS;
import java.io.IOException;
import java.math.BigInteger;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.BitSet;
import java.util.List;
import org.apache.lucene.analysis.MockAnalyzer;
import org.apache.lucene.codecs.Codec;
import org.apache.lucene.document.BinaryPoint;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
import org.apache.lucene.document.FieldType;
import org.apache.lucene.document.IntPoint;
import org.apache.lucene.document.NumericDocValuesField;
import org.apache.lucene.document.StringField;
import org.apache.lucene.index.PointValues.IntersectVisitor;
import org.apache.lucene.index.PointValues.Relation;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.MockDirectoryWrapper;
import org.apache.lucene.util.Bits;
import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.IOUtils;
import org.apache.lucene.util.NumericUtils;
import org.apache.lucene.util.Rethrow;
import org.apache.lucene.util.TestUtil;
/**
* Abstract class to do basic tests for a points format. NOTE: This test focuses on the points impl,
* nothing else. The [stretch] goal is for this test to be so thorough in testing a new PointsFormat
* that if this test passes, then all Lucene/Solr tests should also pass. Ie, if there is some bug
* in a given PointsFormat that this test fails to catch then this test needs to be improved!
*/
public abstract class BasePointsFormatTestCase extends BaseIndexFileFormatTestCase {
@Override
protected void addRandomFields(Document doc) {
final int numValues = random().nextInt(3);
for (int i = 0; i < numValues; i++) {
doc.add(new IntPoint("f", random().nextInt()));
}
}
public void testBasic() throws Exception {
Directory dir = getDirectory(20);
IndexWriterConfig iwc = newIndexWriterConfig();
iwc.setMergePolicy(newLogMergePolicy());
IndexWriter w = new IndexWriter(dir, iwc);
byte[] point = new byte[4];
for (int i = 0; i < 20; i++) {
Document doc = new Document();
NumericUtils.intToSortableBytes(i, point, 0);
doc.add(new BinaryPoint("dim", point));
w.addDocument(doc);
}
w.forceMerge(1);
w.close();
DirectoryReader r = DirectoryReader.open(dir);
LeafReader sub = getOnlyLeafReader(r);
PointValues values = sub.getPointValues("dim");
// Simple test: make sure intersect can visit every doc:
BitSet seen = new BitSet();
values.intersect(
new IntersectVisitor() {
@Override
public Relation compare(byte[] minPacked, byte[] maxPacked) {
return Relation.CELL_CROSSES_QUERY;
}
public void visit(int docID) {
throw new IllegalStateException();
}
public void visit(int docID, byte[] packedValue) {
seen.set(docID);
assertEquals(docID, NumericUtils.sortableBytesToInt(packedValue, 0));
}
});
assertEquals(20, seen.cardinality());
IOUtils.close(r, dir);
}
public void testMerge() throws Exception {
Directory dir = getDirectory(20);
IndexWriterConfig iwc = newIndexWriterConfig();
iwc.setMergePolicy(newLogMergePolicy());
IndexWriter w = new IndexWriter(dir, iwc);
byte[] point = new byte[4];
for (int i = 0; i < 20; i++) {
Document doc = new Document();
NumericUtils.intToSortableBytes(i, point, 0);
doc.add(new BinaryPoint("dim", point));
w.addDocument(doc);
if (i == 10) {
w.commit();
}
}
w.forceMerge(1);
w.close();
DirectoryReader r = DirectoryReader.open(dir);
LeafReader sub = getOnlyLeafReader(r);
PointValues values = sub.getPointValues("dim");
// Simple test: make sure intersect can visit every doc:
BitSet seen = new BitSet();
values.intersect(
new IntersectVisitor() {
@Override
public Relation compare(byte[] minPacked, byte[] maxPacked) {
return Relation.CELL_CROSSES_QUERY;
}
public void visit(int docID) {
throw new IllegalStateException();
}
public void visit(int docID, byte[] packedValue) {
seen.set(docID);
assertEquals(docID, NumericUtils.sortableBytesToInt(packedValue, 0));
}
});
assertEquals(20, seen.cardinality());
IOUtils.close(r, dir);
}
public void testAllPointDocsDeletedInSegment() throws Exception {
Directory dir = getDirectory(20);
IndexWriterConfig iwc = newIndexWriterConfig();
IndexWriter w = new IndexWriter(dir, iwc);
byte[] point = new byte[4];
for (int i = 0; i < 10; i++) {
Document doc = new Document();
NumericUtils.intToSortableBytes(i, point, 0);
doc.add(new BinaryPoint("dim", point));
doc.add(new NumericDocValuesField("id", i));
doc.add(newStringField("x", "x", Field.Store.NO));
w.addDocument(doc);
}
w.addDocument(new Document());
w.deleteDocuments(new Term("x", "x"));
if (random().nextBoolean()) {
w.forceMerge(1);
}
w.close();
DirectoryReader r = DirectoryReader.open(dir);
assertEquals(1, r.numDocs());
Bits liveDocs = MultiBits.getLiveDocs(r);
for (LeafReaderContext ctx : r.leaves()) {
PointValues values = ctx.reader().getPointValues("dim");
NumericDocValues idValues = ctx.reader().getNumericDocValues("id");
if (idValues == null) {
// this is (surprisingly) OK, because if the random IWC flushes all 10 docs before the 11th
// doc is added, and force merge runs, it
// will drop the 100% deleted segments, and the "id" field never exists in the final single
// doc segment
continue;
}
int[] docIDToID = new int[ctx.reader().maxDoc()];
int docID;
while ((docID = idValues.nextDoc()) != NO_MORE_DOCS) {
docIDToID[docID] = (int) idValues.longValue();
}
if (values != null) {
BitSet seen = new BitSet();
values.intersect(
new IntersectVisitor() {
@Override
public Relation compare(byte[] minPacked, byte[] maxPacked) {
return Relation.CELL_CROSSES_QUERY;
}
public void visit(int docID) {
throw new IllegalStateException();
}
public void visit(int docID, byte[] packedValue) {
if (liveDocs.get(docID)) {
seen.set(docID);
}
assertEquals(docIDToID[docID], NumericUtils.sortableBytesToInt(packedValue, 0));
}
});
assertEquals(0, seen.cardinality());
}
}
IOUtils.close(r, dir);
}
/** Make sure we close open files, delete temp files, etc., on exception */
public void testWithExceptions() throws Exception {
int numDocs = atLeast(1000);
int numBytesPerDim = TestUtil.nextInt(random(), 2, PointValues.MAX_NUM_BYTES);
int numDims = TestUtil.nextInt(random(), 1, PointValues.MAX_DIMENSIONS);
int numIndexDims =
TestUtil.nextInt(random(), 1, Math.min(numDims, PointValues.MAX_INDEX_DIMENSIONS));
byte[][][] docValues = new byte[numDocs][][];
for (int docID = 0; docID < numDocs; docID++) {
byte[][] values = new byte[numDims][];
for (int dim = 0; dim < numDims; dim++) {
values[dim] = new byte[numBytesPerDim];
random().nextBytes(values[dim]);
}
docValues[docID] = values;
}
// Keep retrying until we 1) we allow a big enough heap, and 2) we hit a random IOExc from MDW:
boolean done = false;
while (done == false) {
try (MockDirectoryWrapper dir = newMockFSDirectory(createTempDir())) {
try {
dir.setRandomIOExceptionRate(0.05);
dir.setRandomIOExceptionRateOnOpen(0.05);
verify(dir, docValues, null, numDims, numIndexDims, numBytesPerDim, true);
} catch (IllegalStateException ise) {
done = handlePossiblyFakeException(ise);
} catch (AssertionError ae) {
if (ae.getMessage() != null && ae.getMessage().contains("does not exist; files=")) {
// OK: likely we threw the random IOExc when IW was asserting the commit files exist
done = true;
} else {
throw ae;
}
} catch (IllegalArgumentException iae) {
// This just means we got a too-small maxMB for the maxPointsInLeafNode; just retry w/
// more heap
assertTrue(
iae.getMessage()
.contains("either increase maxMBSortInHeap or decrease maxPointsInLeafNode"));
} catch (IOException ioe) {
done = handlePossiblyFakeException(ioe);
}
}
}
}
// TODO: merge w/ BaseIndexFileFormatTestCase.handleFakeIOException
private boolean handlePossiblyFakeException(Exception e) {
Throwable ex = e;
while (ex != null) {
String message = ex.getMessage();
if (message != null
&& (message.contains("a random IOException")
|| message.contains("background merge hit exception"))) {
return true;
}
ex = ex.getCause();
}
Rethrow.rethrow(e);
// dead code yet javac disagrees:
return false;
}
public void testMultiValued() throws Exception {
int numBytesPerDim = TestUtil.nextInt(random(), 2, PointValues.MAX_NUM_BYTES);
int numDims = TestUtil.nextInt(random(), 1, PointValues.MAX_DIMENSIONS);
int numIndexDims =
TestUtil.nextInt(random(), 1, Math.min(PointValues.MAX_INDEX_DIMENSIONS, numDims));
int numDocs = TEST_NIGHTLY ? atLeast(1000) : atLeast(100);
List<byte[][]> docValues = new ArrayList<>();
List<Integer> docIDs = new ArrayList<>();
for (int docID = 0; docID < numDocs; docID++) {
int numValuesInDoc = TestUtil.nextInt(random(), 1, 5);
for (int ord = 0; ord < numValuesInDoc; ord++) {
docIDs.add(docID);
byte[][] values = new byte[numDims][];
for (int dim = 0; dim < numDims; dim++) {
values[dim] = new byte[numBytesPerDim];
random().nextBytes(values[dim]);
}
docValues.add(values);
}
}
byte[][][] docValuesArray = docValues.toArray(new byte[docValues.size()][][]);
int[] docIDsArray = new int[docIDs.size()];
for (int i = 0; i < docIDsArray.length; i++) {
docIDsArray[i] = docIDs.get(i);
}
verify(docValuesArray, docIDsArray, numDims, numIndexDims, numBytesPerDim);
}
public void testAllEqual() throws Exception {
int numBytesPerDim = TestUtil.nextInt(random(), 2, PointValues.MAX_NUM_BYTES);
int numDims = TestUtil.nextInt(random(), 1, PointValues.MAX_INDEX_DIMENSIONS);
int numDocs = atLeast(1000);
byte[][][] docValues = new byte[numDocs][][];
for (int docID = 0; docID < numDocs; docID++) {
if (docID == 0) {
byte[][] values = new byte[numDims][];
for (int dim = 0; dim < numDims; dim++) {
values[dim] = new byte[numBytesPerDim];
random().nextBytes(values[dim]);
}
docValues[docID] = values;
} else {
docValues[docID] = docValues[0];
}
}
verify(docValues, null, numDims, numBytesPerDim);
}
public void testOneDimEqual() throws Exception {
int numBytesPerDim = TestUtil.nextInt(random(), 2, PointValues.MAX_NUM_BYTES);
int numDims = TestUtil.nextInt(random(), 1, PointValues.MAX_INDEX_DIMENSIONS);
int numDocs = atLeast(1000);
int theEqualDim = random().nextInt(numDims);
byte[][][] docValues = new byte[numDocs][][];
for (int docID = 0; docID < numDocs; docID++) {
byte[][] values = new byte[numDims][];
for (int dim = 0; dim < numDims; dim++) {
values[dim] = new byte[numBytesPerDim];
random().nextBytes(values[dim]);
}
docValues[docID] = values;
if (docID > 0) {
docValues[docID][theEqualDim] = docValues[0][theEqualDim];
}
}
verify(docValues, null, numDims, numBytesPerDim);
}
// this should trigger run-length compression with lengths that are greater than 255
public void testOneDimTwoValues() throws Exception {
int numBytesPerDim = TestUtil.nextInt(random(), 2, PointValues.MAX_NUM_BYTES);
int numDims = TestUtil.nextInt(random(), 1, PointValues.MAX_INDEX_DIMENSIONS);
int numDocs = atLeast(1000);
int theDim = random().nextInt(numDims);
byte[] value1 = new byte[numBytesPerDim];
random().nextBytes(value1);
byte[] value2 = new byte[numBytesPerDim];
random().nextBytes(value2);
byte[][][] docValues = new byte[numDocs][][];
for (int docID = 0; docID < numDocs; docID++) {
byte[][] values = new byte[numDims][];
for (int dim = 0; dim < numDims; dim++) {
if (dim == theDim) {
values[dim] = random().nextBoolean() ? value1 : value2;
} else {
values[dim] = new byte[numBytesPerDim];
random().nextBytes(values[dim]);
}
}
docValues[docID] = values;
}
verify(docValues, null, numDims, numBytesPerDim);
}
// Tests on N-dimensional points where each dimension is a BigInteger
public void testBigIntNDims() throws Exception {
int numDocs = atLeast(200);
try (Directory dir = getDirectory(numDocs)) {
int numBytesPerDim = TestUtil.nextInt(random(), 2, PointValues.MAX_NUM_BYTES);
int numDims = TestUtil.nextInt(random(), 1, PointValues.MAX_INDEX_DIMENSIONS);
IndexWriterConfig iwc = newIndexWriterConfig(new MockAnalyzer(random()));
// We rely on docIDs not changing:
iwc.setMergePolicy(newLogMergePolicy());
RandomIndexWriter w = new RandomIndexWriter(random(), dir, iwc);
BigInteger[][] docs = new BigInteger[numDocs][];
for (int docID = 0; docID < numDocs; docID++) {
BigInteger[] values = new BigInteger[numDims];
if (VERBOSE) {
System.out.println(" docID=" + docID);
}
byte[][] bytes = new byte[numDims][];
for (int dim = 0; dim < numDims; dim++) {
values[dim] = randomBigInt(numBytesPerDim);
bytes[dim] = new byte[numBytesPerDim];
NumericUtils.bigIntToSortableBytes(values[dim], numBytesPerDim, bytes[dim], 0);
if (VERBOSE) {
System.out.println(" " + dim + " -> " + values[dim]);
}
}
docs[docID] = values;
Document doc = new Document();
doc.add(new BinaryPoint("field", bytes));
w.addDocument(doc);
}
DirectoryReader r = w.getReader();
w.close();
int iters = atLeast(100);
for (int iter = 0; iter < iters; iter++) {
if (VERBOSE) {
System.out.println("\nTEST: iter=" + iter);
}
// Random N dims rect query:
BigInteger[] queryMin = new BigInteger[numDims];
BigInteger[] queryMax = new BigInteger[numDims];
for (int dim = 0; dim < numDims; dim++) {
queryMin[dim] = randomBigInt(numBytesPerDim);
queryMax[dim] = randomBigInt(numBytesPerDim);
if (queryMin[dim].compareTo(queryMax[dim]) > 0) {
BigInteger x = queryMin[dim];
queryMin[dim] = queryMax[dim];
queryMax[dim] = x;
}
if (VERBOSE) {
System.out.println(
" " + dim + "\n min=" + queryMin[dim] + "\n max=" + queryMax[dim]);
}
}
final BitSet hits = new BitSet();
for (LeafReaderContext ctx : r.leaves()) {
PointValues dimValues = ctx.reader().getPointValues("field");
if (dimValues == null) {
continue;
}
final int docBase = ctx.docBase;
dimValues.intersect(
new IntersectVisitor() {
@Override
public void visit(int docID) {
hits.set(docBase + docID);
// System.out.println("visit docID=" + docID);
}
@Override
public void visit(int docID, byte[] packedValue) {
// System.out.println("visit check docID=" + docID);
for (int dim = 0; dim < numDims; dim++) {
BigInteger x =
NumericUtils.sortableBytesToBigInt(
packedValue, dim * numBytesPerDim, numBytesPerDim);
if (x.compareTo(queryMin[dim]) < 0 || x.compareTo(queryMax[dim]) > 0) {
// System.out.println(" no");
return;
}
}
// System.out.println(" yes");
hits.set(docBase + docID);
}
@Override
public Relation compare(byte[] minPacked, byte[] maxPacked) {
boolean crosses = false;
for (int dim = 0; dim < numDims; dim++) {
BigInteger min =
NumericUtils.sortableBytesToBigInt(
minPacked, dim * numBytesPerDim, numBytesPerDim);
BigInteger max =
NumericUtils.sortableBytesToBigInt(
maxPacked, dim * numBytesPerDim, numBytesPerDim);
assert max.compareTo(min) >= 0;
if (max.compareTo(queryMin[dim]) < 0 || min.compareTo(queryMax[dim]) > 0) {
return Relation.CELL_OUTSIDE_QUERY;
} else if (min.compareTo(queryMin[dim]) < 0
|| max.compareTo(queryMax[dim]) > 0) {
crosses = true;
}
}
if (crosses) {
return Relation.CELL_CROSSES_QUERY;
} else {
return Relation.CELL_INSIDE_QUERY;
}
}
});
}
for (int docID = 0; docID < numDocs; docID++) {
BigInteger[] docValues = docs[docID];
boolean expected = true;
for (int dim = 0; dim < numDims; dim++) {
BigInteger x = docValues[dim];
if (x.compareTo(queryMin[dim]) < 0 || x.compareTo(queryMax[dim]) > 0) {
expected = false;
break;
}
}
boolean actual = hits.get(docID);
assertEquals("docID=" + docID, expected, actual);
}
}
r.close();
}
}
public void testRandomBinaryTiny() throws Exception {
doTestRandomBinary(10);
}
public void testRandomBinaryMedium() throws Exception {
doTestRandomBinary(200);
}
@Nightly
public void testRandomBinaryBig() throws Exception {
assumeFalse("too slow with SimpleText", Codec.getDefault().getName().equals("SimpleText"));
doTestRandomBinary(200000);
}
private void doTestRandomBinary(int count) throws Exception {
int numDocs = TestUtil.nextInt(random(), count, count * 2);
int numBytesPerDim = TestUtil.nextInt(random(), 2, PointValues.MAX_NUM_BYTES);
int numDataDims = TestUtil.nextInt(random(), 1, PointValues.MAX_INDEX_DIMENSIONS);
int numIndexDims = TestUtil.nextInt(random(), 1, numDataDims);
byte[][][] docValues = new byte[numDocs][][];
for (int docID = 0; docID < numDocs; docID++) {
byte[][] values = new byte[numDataDims][];
for (int dim = 0; dim < numDataDims; dim++) {
values[dim] = new byte[numBytesPerDim];
// TODO: sometimes test on a "small" volume too, so we test the high density cases, higher
// chance of boundary, etc. cases:
random().nextBytes(values[dim]);
}
docValues[docID] = values;
}
verify(docValues, null, numDataDims, numIndexDims, numBytesPerDim);
}
private void verify(byte[][][] docValues, int[] docIDs, int numDims, int numBytesPerDim)
throws Exception {
verify(docValues, docIDs, numDims, numDims, numBytesPerDim);
}
/**
* docIDs can be null, for the single valued case, else it maps value to docID, but all values for
* one doc must be adjacent
*/
private void verify(
byte[][][] docValues, int[] docIDs, int numDataDims, int numIndexDims, int numBytesPerDim)
throws Exception {
try (Directory dir = getDirectory(docValues.length)) {
while (true) {
try {
verify(dir, docValues, docIDs, numDataDims, numIndexDims, numBytesPerDim, false);
return;
} catch (IllegalArgumentException iae) {
iae.printStackTrace();
// This just means we got a too-small maxMB for the maxPointsInLeafNode; just retry
assertTrue(
iae.getMessage()
.contains("either increase maxMBSortInHeap or decrease maxPointsInLeafNode"));
}
}
}
}
private byte[] flattenBinaryPoint(byte[][] value, int numDataDims, int numBytesPerDim) {
byte[] result = new byte[value.length * numBytesPerDim];
for (int d = 0; d < numDataDims; ++d) {
System.arraycopy(value[d], 0, result, d * numBytesPerDim, numBytesPerDim);
}
return result;
}
/** test selective indexing */
private void verify(
Directory dir,
byte[][][] docValues,
int[] ids,
int numDims,
int numIndexDims,
int numBytesPerDim,
boolean expectExceptions)
throws Exception {
int numValues = docValues.length;
if (VERBOSE) {
System.out.println(
"TEST: numValues="
+ numValues
+ " numDims="
+ numDims
+ " numIndexDims="
+ numIndexDims
+ " numBytesPerDim="
+ numBytesPerDim);
}
// RandomIndexWriter is too slow:
boolean useRealWriter = docValues.length > 10000;
IndexWriterConfig iwc;
if (useRealWriter) {
iwc = new IndexWriterConfig(new MockAnalyzer(random()));
} else {
iwc = newIndexWriterConfig();
}
if (expectExceptions) {
MergeScheduler ms = iwc.getMergeScheduler();
if (ms instanceof ConcurrentMergeScheduler) {
((ConcurrentMergeScheduler) ms).setSuppressExceptions();
}
}
RandomIndexWriter w = new RandomIndexWriter(random(), dir, iwc);
DirectoryReader r = null;
// Compute actual min/max values:
byte[][] expectedMinValues = new byte[numDims][];
byte[][] expectedMaxValues = new byte[numDims][];
for (int ord = 0; ord < docValues.length; ord++) {
for (int dim = 0; dim < numDims; dim++) {
if (ord == 0) {
expectedMinValues[dim] = new byte[numBytesPerDim];
System.arraycopy(docValues[ord][dim], 0, expectedMinValues[dim], 0, numBytesPerDim);
expectedMaxValues[dim] = new byte[numBytesPerDim];
System.arraycopy(docValues[ord][dim], 0, expectedMaxValues[dim], 0, numBytesPerDim);
} else {
// TODO: it's cheating that we use StringHelper.compare for "truth": what if it's buggy?
if (Arrays.compareUnsigned(
docValues[ord][dim], 0, numBytesPerDim, expectedMinValues[dim], 0, numBytesPerDim)
< 0) {
System.arraycopy(docValues[ord][dim], 0, expectedMinValues[dim], 0, numBytesPerDim);
}
if (Arrays.compareUnsigned(
docValues[ord][dim], 0, numBytesPerDim, expectedMaxValues[dim], 0, numBytesPerDim)
> 0) {
System.arraycopy(docValues[ord][dim], 0, expectedMaxValues[dim], 0, numBytesPerDim);
}
}
}
}
// 20% of the time we add into a separate directory, then at some point use
// addIndexes to bring the indexed point values to the main directory:
Directory saveDir;
RandomIndexWriter saveW;
int addIndexesAt;
if (random().nextInt(5) == 1) {
saveDir = dir;
saveW = w;
dir = getDirectory(numValues);
if (useRealWriter) {
iwc = new IndexWriterConfig(new MockAnalyzer(random()));
} else {
iwc = newIndexWriterConfig();
}
if (expectExceptions) {
MergeScheduler ms = iwc.getMergeScheduler();
if (ms instanceof ConcurrentMergeScheduler) {
((ConcurrentMergeScheduler) ms).setSuppressExceptions();
}
}
w = new RandomIndexWriter(random(), dir, iwc);
addIndexesAt = TestUtil.nextInt(random(), 1, numValues - 1);
} else {
saveW = null;
saveDir = null;
addIndexesAt = 0;
}
try {
FieldType fieldType = new FieldType();
fieldType.setDimensions(numDims, numIndexDims, numBytesPerDim);
fieldType.freeze();
Document doc = null;
int lastID = -1;
for (int ord = 0; ord < numValues; ord++) {
int id;
if (ids == null) {
id = ord;
} else {
id = ids[ord];
}
if (id != lastID) {
if (doc != null) {
if (useRealWriter) {
w.w.addDocument(doc);
} else {
w.addDocument(doc);
}
}
doc = new Document();
doc.add(new NumericDocValuesField("id", id));
}
// pack the binary point
byte[] val = flattenBinaryPoint(docValues[ord], numDims, numBytesPerDim);
doc.add(new BinaryPoint("field", val, fieldType));
lastID = id;
if (random().nextInt(30) == 17) {
// randomly index some documents without this field
if (useRealWriter) {
w.w.addDocument(new Document());
} else {
w.addDocument(new Document());
}
if (VERBOSE) {
System.out.println("add empty doc");
}
}
if (random().nextInt(30) == 17) {
// randomly index some documents with this field, but we will delete them:
Document xdoc = new Document();
val = flattenBinaryPoint(docValues[ord], numDims, numBytesPerDim);
xdoc.add(new BinaryPoint("field", val, fieldType));
xdoc.add(new StringField("nukeme", "yes", Field.Store.NO));
if (useRealWriter) {
w.w.addDocument(xdoc);
} else {
w.addDocument(xdoc);
}
if (VERBOSE) {
System.out.println("add doc doc-to-delete");
}
if (random().nextInt(5) == 1) {
if (useRealWriter) {
w.w.deleteDocuments(new Term("nukeme", "yes"));
} else {
w.deleteDocuments(new Term("nukeme", "yes"));
}
}
}
if (VERBOSE) {
System.out.println(" ord=" + ord + " id=" + id);
for (int dim = 0; dim < numDims; dim++) {
System.out.println(" dim=" + dim + " value=" + new BytesRef(docValues[ord][dim]));
}
}
if (saveW != null && ord >= addIndexesAt) {
switchIndex(w, dir, saveW);
w = saveW;
dir = saveDir;
saveW = null;
saveDir = null;
}
}
w.addDocument(doc);
w.deleteDocuments(new Term("nukeme", "yes"));
if (random().nextBoolean()) {
if (VERBOSE) {
System.out.println("\nTEST: now force merge");
}
w.forceMerge(1);
}
r = w.getReader();
w.close();
if (VERBOSE) {
System.out.println("TEST: reader=" + r);
}
NumericDocValues idValues = MultiDocValues.getNumericValues(r, "id");
int[] docIDToID = new int[r.maxDoc()];
{
int docID;
while ((docID = idValues.nextDoc()) != NO_MORE_DOCS) {
docIDToID[docID] = (int) idValues.longValue();
}
}
Bits liveDocs = MultiBits.getLiveDocs(r);
// Verify min/max values are correct:
byte[] minValues = new byte[numIndexDims * numBytesPerDim];
Arrays.fill(minValues, (byte) 0xff);
byte[] maxValues = new byte[numIndexDims * numBytesPerDim];
for (LeafReaderContext ctx : r.leaves()) {
PointValues dimValues = ctx.reader().getPointValues("field");
if (dimValues == null) {
continue;
}
byte[] leafMinValues = dimValues.getMinPackedValue();
byte[] leafMaxValues = dimValues.getMaxPackedValue();
for (int dim = 0; dim < numIndexDims; dim++) {
if (Arrays.compareUnsigned(
leafMinValues,
dim * numBytesPerDim,
dim * numBytesPerDim + numBytesPerDim,
minValues,
dim * numBytesPerDim,
dim * numBytesPerDim + numBytesPerDim)
< 0) {
System.arraycopy(
leafMinValues,
dim * numBytesPerDim,
minValues,
dim * numBytesPerDim,
numBytesPerDim);
}
if (Arrays.compareUnsigned(
leafMaxValues,
dim * numBytesPerDim,
dim * numBytesPerDim + numBytesPerDim,
maxValues,
dim * numBytesPerDim,
dim * numBytesPerDim + numBytesPerDim)
> 0) {
System.arraycopy(
leafMaxValues,
dim * numBytesPerDim,
maxValues,
dim * numBytesPerDim,
numBytesPerDim);
}
}
}
byte[] scratch = new byte[numBytesPerDim];
for (int dim = 0; dim < numIndexDims; dim++) {
System.arraycopy(minValues, dim * numBytesPerDim, scratch, 0, numBytesPerDim);
// System.out.println("dim=" + dim + " expectedMin=" + new BytesRef(expectedMinValues[dim])
// + " min=" + new BytesRef(scratch));
assertTrue(Arrays.equals(expectedMinValues[dim], scratch));
System.arraycopy(maxValues, dim * numBytesPerDim, scratch, 0, numBytesPerDim);
// System.out.println("dim=" + dim + " expectedMax=" + new BytesRef(expectedMaxValues[dim])
// + " max=" + new BytesRef(scratch));
assertTrue(Arrays.equals(expectedMaxValues[dim], scratch));
}
int iters = atLeast(100);
for (int iter = 0; iter < iters; iter++) {
if (VERBOSE) {
System.out.println("\nTEST: iter=" + iter);
}
// Random N dims rect query:
byte[][] queryMin = new byte[numIndexDims][];
byte[][] queryMax = new byte[numIndexDims][];
for (int dim = 0; dim < numIndexDims; dim++) {
queryMin[dim] = new byte[numBytesPerDim];
random().nextBytes(queryMin[dim]);
queryMax[dim] = new byte[numBytesPerDim];
random().nextBytes(queryMax[dim]);
if (Arrays.compareUnsigned(
queryMin[dim], 0, numBytesPerDim, queryMax[dim], 0, numBytesPerDim)
> 0) {
byte[] x = queryMin[dim];
queryMin[dim] = queryMax[dim];
queryMax[dim] = x;
}
}
if (VERBOSE) {
for (int dim = 0; dim < numIndexDims; dim++) {
System.out.println(
" dim="
+ dim
+ "\n queryMin="
+ new BytesRef(queryMin[dim])
+ "\n queryMax="
+ new BytesRef(queryMax[dim]));
}
}
final BitSet hits = new BitSet();
for (LeafReaderContext ctx : r.leaves()) {
PointValues dimValues = ctx.reader().getPointValues("field");
if (dimValues == null) {
continue;
}
final int docBase = ctx.docBase;
dimValues.intersect(
new PointValues.IntersectVisitor() {
@Override
public void visit(int docID) {
if (liveDocs == null || liveDocs.get(docBase + docID)) {
hits.set(docIDToID[docBase + docID]);
}
// System.out.println("visit docID=" + docID);
}
@Override
public void visit(int docID, byte[] packedValue) {
if (liveDocs != null && liveDocs.get(docBase + docID) == false) {
return;
}
for (int dim = 0; dim < numIndexDims; dim++) {
// System.out.println(" dim=" + dim + " value=" + new BytesRef(packedValue,
// dim*numBytesPerDim, numBytesPerDim));
if (Arrays.compareUnsigned(
packedValue,
dim * numBytesPerDim,
dim * numBytesPerDim + numBytesPerDim,
queryMin[dim],
0,
numBytesPerDim)
< 0
|| Arrays.compareUnsigned(
packedValue,
dim * numBytesPerDim,
dim * numBytesPerDim + numBytesPerDim,
queryMax[dim],
0,
numBytesPerDim)
> 0) {
// System.out.println(" no");
return;
}
}
// System.out.println(" yes");
hits.set(docIDToID[docBase + docID]);
}
@Override
public Relation compare(byte[] minPacked, byte[] maxPacked) {
boolean crosses = false;
// System.out.println("compare");
for (int dim = 0; dim < numIndexDims; dim++) {
if (Arrays.compareUnsigned(
maxPacked,
dim * numBytesPerDim,
dim * numBytesPerDim + numBytesPerDim,
queryMin[dim],
0,
numBytesPerDim)
< 0
|| Arrays.compareUnsigned(
minPacked,
dim * numBytesPerDim,
dim * numBytesPerDim + numBytesPerDim,
queryMax[dim],
0,
numBytesPerDim)
> 0) {
// System.out.println(" query_outside_cell");
return Relation.CELL_OUTSIDE_QUERY;
} else if (Arrays.compareUnsigned(
minPacked,
dim * numBytesPerDim,
dim * numBytesPerDim + numBytesPerDim,
queryMin[dim],
0,
numBytesPerDim)
< 0
|| Arrays.compareUnsigned(
maxPacked,
dim * numBytesPerDim,
dim * numBytesPerDim + numBytesPerDim,
queryMax[dim],
0,
numBytesPerDim)
> 0) {
crosses = true;
}
}
if (crosses) {
// System.out.println(" query_crosses_cell");
return Relation.CELL_CROSSES_QUERY;
} else {
// System.out.println(" cell_inside_query");
return Relation.CELL_INSIDE_QUERY;
}
}
});
}
BitSet expected = new BitSet();
for (int ord = 0; ord < numValues; ord++) {
boolean matches = true;
for (int dim = 0; dim < numIndexDims; dim++) {
byte[] x = docValues[ord][dim];
if (Arrays.compareUnsigned(x, 0, numBytesPerDim, queryMin[dim], 0, numBytesPerDim) < 0
|| Arrays.compareUnsigned(x, 0, numBytesPerDim, queryMax[dim], 0, numBytesPerDim)
> 0) {
matches = false;
break;
}
}
if (matches) {
int id;
if (ids == null) {
id = ord;
} else {
id = ids[ord];
}
expected.set(id);
}
}
int limit = Math.max(expected.length(), hits.length());
int failCount = 0;
int successCount = 0;
for (int id = 0; id < limit; id++) {
if (expected.get(id) != hits.get(id)) {
System.out.println("FAIL: id=" + id);
failCount++;
} else {
successCount++;
}
}
if (failCount != 0) {
for (int docID = 0; docID < r.maxDoc(); docID++) {
System.out.println(" docID=" + docID + " id=" + docIDToID[docID]);
}
fail(failCount + " docs failed; " + successCount + " docs succeeded");
}
}
} finally {
IOUtils.closeWhileHandlingException(r, w, saveW, saveDir == null ? null : dir);
}
}
public void testAddIndexes() throws IOException {
Directory dir1 = newDirectory();
RandomIndexWriter w = new RandomIndexWriter(random(), dir1);
Document doc = new Document();
doc.add(new IntPoint("int1", 17));
w.addDocument(doc);
doc = new Document();
doc.add(new IntPoint("int2", 42));
w.addDocument(doc);
w.close();
// Different field number assigments:
Directory dir2 = newDirectory();
w = new RandomIndexWriter(random(), dir2);
doc = new Document();
doc.add(new IntPoint("int2", 42));
w.addDocument(doc);
doc = new Document();
doc.add(new IntPoint("int1", 17));
w.addDocument(doc);
w.close();
Directory dir = newDirectory();
w = new RandomIndexWriter(random(), dir);
w.addIndexes(new Directory[] {dir1, dir2});
w.forceMerge(1);
DirectoryReader r = w.getReader();
IndexSearcher s = newSearcher(r, false);
assertEquals(2, s.count(IntPoint.newExactQuery("int1", 17)));
assertEquals(2, s.count(IntPoint.newExactQuery("int2", 42)));
r.close();
w.close();
dir.close();
dir1.close();
dir2.close();
}
private void switchIndex(RandomIndexWriter w, Directory dir, RandomIndexWriter saveW)
throws IOException {
if (random().nextBoolean()) {
// Add via readers:
try (DirectoryReader r = w.getReader()) {
if (random().nextBoolean()) {
// Add via CodecReaders:
List<CodecReader> subs = new ArrayList<>();
for (LeafReaderContext context : r.leaves()) {
subs.add((CodecReader) context.reader());
}
if (VERBOSE) {
System.out.println("TEST: now use addIndexes(CodecReader[]) to switch writers");
}
saveW.addIndexes(subs.toArray(new CodecReader[subs.size()]));
} else {
if (VERBOSE) {
System.out.println(
"TEST: now use TestUtil.addIndexesSlowly(DirectoryReader[]) to switch writers");
}
TestUtil.addIndexesSlowly(saveW.w, r);
}
}
} else {
// Add via directory:
if (VERBOSE) {
System.out.println("TEST: now use addIndexes(Directory[]) to switch writers");
}
w.close();
saveW.addIndexes(new Directory[] {dir});
}
w.close();
dir.close();
}
private BigInteger randomBigInt(int numBytes) {
BigInteger x = new BigInteger(numBytes * 8 - 1, random());
if (random().nextBoolean()) {
x = x.negate();
}
return x;
}
private Directory getDirectory(int numPoints) throws IOException {
Directory dir;
if (numPoints > 100000) {
dir = newFSDirectory(createTempDir("TestBKDTree"));
} else {
dir = newDirectory();
}
// dir = FSDirectory.open(createTempDir());
return dir;
}
@Override
protected boolean mergeIsStable() {
// suppress this test from base class: merges for BKD trees are not stable because the tree
// created by merge will have a different
// structure than the tree created by adding points separately
return false;
}
// LUCENE-7491
public void testMixedSchema() throws Exception {
Directory dir = newDirectory();
IndexWriterConfig iwc = newIndexWriterConfig();
RandomIndexWriter w = new RandomIndexWriter(random(), dir, iwc);
iwc.setMaxBufferedDocs(2);
for (int i = 0; i < 2; i++) {
Document doc = new Document();
doc.add(new StringField("id", Integer.toString(i), Field.Store.NO));
doc.add(new IntPoint("int", i));
w.addDocument(doc);
}
// index has 1 segment now (with 2 docs) and that segment does have points, but the "id" field
// in particular does NOT
Document doc = new Document();
doc.add(new IntPoint("id", 0));
w.addDocument(doc);
// now we write another segment where the id field does have points:
w.forceMerge(1);
IOUtils.close(w, dir);
}
}
| 1 | 39,264 | I think we should refactor or drop this test, as it is not testing the points format now, but IndexingChain/FieldsInfos' logic. Maybe we could rename the test `testMergeMissing` and configure the first segment to not have the `id` field at all. | apache-lucene-solr | java |
@@ -6,15 +6,6 @@ module RSpec
# @private
# Produces progress output while bisecting.
class BisectProgressFormatter < BaseTextFormatter
- # We've named all events with a `bisect_` prefix to prevent naming collisions.
- Formatters.register self, :bisect_starting, :bisect_original_run_complete,
- :bisect_round_started, :bisect_individual_run_complete,
- :bisect_complete, :bisect_repro_command,
- :bisect_failed, :bisect_aborted,
- :bisect_round_ignoring_ids, :bisect_round_detected_multiple_culprits,
- :bisect_dependency_check_started, :bisect_dependency_check_passed,
- :bisect_dependency_check_failed
-
def bisect_starting(notification)
@round_count = 0
options = notification.original_cli_args.join(' ') | 1 | RSpec::Support.require_rspec_core "formatters/base_text_formatter"
module RSpec
module Core
module Formatters
# @private
# Produces progress output while bisecting.
class BisectProgressFormatter < BaseTextFormatter
# We've named all events with a `bisect_` prefix to prevent naming collisions.
Formatters.register self, :bisect_starting, :bisect_original_run_complete,
:bisect_round_started, :bisect_individual_run_complete,
:bisect_complete, :bisect_repro_command,
:bisect_failed, :bisect_aborted,
:bisect_round_ignoring_ids, :bisect_round_detected_multiple_culprits,
:bisect_dependency_check_started, :bisect_dependency_check_passed,
:bisect_dependency_check_failed
def bisect_starting(notification)
@round_count = 0
options = notification.original_cli_args.join(' ')
output.puts "Bisect started using options: #{options.inspect}"
output.print "Running suite to find failures..."
end
def bisect_original_run_complete(notification)
failures = Helpers.pluralize(notification.failed_example_ids.size, "failing example")
non_failures = Helpers.pluralize(notification.non_failing_example_ids.size, "non-failing example")
output.puts " (#{Helpers.format_duration(notification.duration)})"
output.puts "Starting bisect with #{failures} and #{non_failures}."
end
def bisect_dependency_check_started(_notification)
output.print "Checking that failure(s) are order-dependent.."
end
def bisect_dependency_check_passed(_notification)
output.puts " failure appears to be order-dependent"
end
def bisect_dependency_check_failed(_notification)
output.puts " failure(s) do not require any non-failures to run first"
end
def bisect_round_started(notification, include_trailing_space=true)
@round_count += 1
range_desc = notification.candidate_range.description
output.print "\nRound #{@round_count}: bisecting over non-failing #{range_desc}"
output.print " " if include_trailing_space
end
def bisect_round_ignoring_ids(notification)
range_desc = notification.ignore_range.description
output.print " ignoring #{range_desc}"
output.print " (#{Helpers.format_duration(notification.duration)})"
end
def bisect_round_detected_multiple_culprits(notification)
output.print " multiple culprits detected - splitting candidates"
output.print " (#{Helpers.format_duration(notification.duration)})"
end
def bisect_individual_run_complete(_)
output.print '.'
end
def bisect_complete(notification)
output.puts "\nBisect complete! Reduced necessary non-failing examples " \
"from #{notification.original_non_failing_count} to " \
"#{notification.remaining_count} in " \
"#{Helpers.format_duration(notification.duration)}."
end
def bisect_repro_command(notification)
output.puts "\nThe minimal reproduction command is:\n #{notification.repro}"
end
def bisect_failed(notification)
output.puts "\nBisect failed! #{notification.failure_explanation}"
end
def bisect_aborted(notification)
output.puts "\n\nBisect aborted!"
output.puts "\nThe most minimal reproduction command discovered so far is:\n #{notification.repro}"
end
end
# @private
# Produces detailed debug output while bisecting. Used when
# bisect is performed while the `DEBUG_RSPEC_BISECT` ENV var is used.
# Designed to provide details for us when we need to troubleshoot bisect bugs.
class BisectDebugFormatter < BisectProgressFormatter
Formatters.register self, :bisect_original_run_complete, :bisect_individual_run_start,
:bisect_individual_run_complete, :bisect_round_ignoring_ids
def bisect_original_run_complete(notification)
output.puts " (#{Helpers.format_duration(notification.duration)})"
output.puts " - #{describe_ids 'Failing examples', notification.failed_example_ids}"
output.puts " - #{describe_ids 'Non-failing examples', notification.non_failing_example_ids}"
end
def bisect_individual_run_start(notification)
output.print "\n - Running: #{notification.command}"
end
def bisect_individual_run_complete(notification)
output.print " (#{Helpers.format_duration(notification.duration)})"
end
def bisect_dependency_check_passed(_notification)
output.print "\n - Failure appears to be order-dependent"
end
def bisect_dependency_check_failed(_notification)
output.print "\n - Failure is not order-dependent"
end
def bisect_round_started(notification)
super(notification, false)
end
def bisect_round_ignoring_ids(notification)
output.print "\n - #{describe_ids 'Examples we can safely ignore', notification.ids_to_ignore}"
output.print "\n - #{describe_ids 'Remaining non-failing examples', notification.remaining_ids}"
end
def bisect_round_detected_multiple_culprits(_notification)
output.print "\n - Multiple culprits detected - splitting candidates"
end
private
def describe_ids(description, ids)
organized_ids = Formatters::Helpers.organize_ids(ids)
formatted_ids = organized_ids.map { |id| " - #{id}" }.join("\n")
"#{description} (#{ids.size}):\n#{formatted_ids}"
end
end
end
end
end
| 1 | 16,943 | not sure I follow why all this goes away? | rspec-rspec-core | rb |
@@ -46,7 +46,7 @@ def test_insert_mode(file_name, source, input_text, auto_insert, quteproc):
quteproc.press_keys(input_text)
elif source == 'clipboard':
quteproc.send_cmd(':debug-set-fake-clipboard "{}"'.format(input_text))
- quteproc.send_cmd(':paste-primary')
+ quteproc.send_cmd(':insert-text {clipboard}')
quteproc.send_cmd(':hint all')
quteproc.send_cmd(':follow-hint a') | 1 | # vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:
# Copyright 2016 Florian Bruhin (The Compiler) <[email protected]>
#
# This file is part of qutebrowser.
#
# qutebrowser is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# qutebrowser is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with qutebrowser. If not, see <http://www.gnu.org/licenses/>.
"""Test insert mode settings on html files."""
import logging
import json
import pytest
@pytest.mark.parametrize('file_name, source, input_text, auto_insert', [
('textarea.html', 'clipboard', 'qutebrowser', 'false'),
('textarea.html', 'keypress', 'superqutebrowser', 'false'),
('input.html', 'clipboard', 'amazingqutebrowser', 'false'),
('input.html', 'keypress', 'awesomequtebrowser', 'false'),
('autofocus.html', 'keypress', 'cutebrowser', 'true'),
])
def test_insert_mode(file_name, source, input_text, auto_insert, quteproc):
url_path = 'data/insert_mode_settings/html/{}'.format(file_name)
quteproc.open_path(url_path)
quteproc.set_setting('input', 'auto-insert-mode', auto_insert)
quteproc.send_cmd(':hint all')
quteproc.send_cmd(':follow-hint a')
quteproc.wait_for(message='Clicked editable element!')
quteproc.send_cmd(':debug-set-fake-clipboard')
if source == 'keypress':
quteproc.press_keys(input_text)
elif source == 'clipboard':
quteproc.send_cmd(':debug-set-fake-clipboard "{}"'.format(input_text))
quteproc.send_cmd(':paste-primary')
quteproc.send_cmd(':hint all')
quteproc.send_cmd(':follow-hint a')
quteproc.wait_for(message='Clicked editable element!')
quteproc.send_cmd(':enter-mode caret')
quteproc.send_cmd(':toggle-selection')
quteproc.send_cmd(':move-to-prev-word')
quteproc.send_cmd(':yank-selected')
expected_message = '{} chars yanked to clipboard'.format(len(input_text))
quteproc.mark_expected(category='message',
loglevel=logging.INFO,
message=expected_message)
quteproc.wait_for(
message='Setting fake clipboard: {}'.format(json.dumps(input_text)))
def test_auto_leave_insert_mode(quteproc):
url_path = 'data/insert_mode_settings/html/autofocus.html'
quteproc.open_path(url_path)
quteproc.set_setting('input', 'auto-leave-insert-mode', 'true')
quteproc.press_keys('abcd')
quteproc.send_cmd(':hint all')
# Select the disabled input box to leave insert mode
quteproc.send_cmd(':follow-hint s')
quteproc.wait_for(message='Clicked non-editable element!')
quteproc.send_cmd(':enter-mode caret')
quteproc.send_cmd(':paste-primary')
expected_message = ('paste-primary: This command is only allowed in '
'insert mode.')
quteproc.mark_expected(category='message',
loglevel=logging.ERROR,
message=expected_message)
| 1 | 15,898 | Is this `{clipboard}` or `{primary}`, as the deprecation message for `:paste-primary` says? | qutebrowser-qutebrowser | py |
@@ -46,6 +46,18 @@ var (
Usage: "OpenVPN subnet netmask",
Value: "255.255.255.0",
}
+ // FlagOpenVPNPriceMinute sets the price per minute for provided OpenVPN service.
+ FlagOpenVPNPriceMinute = cli.Uint64Flag{
+ Name: "openvpn.price-minute",
+ Usage: "Sets the price of the OpenVPN service per minute.",
+ Value: 50000,
+ }
+ // FlagOpenVPNPriceGB sets the price per GiB for provided OpenVPN service.
+ FlagOpenVPNPriceGB = cli.Uint64Flag{
+ Name: "openvpn.price-gb",
+ Usage: "Sets the price of the OpenVPN service per minute.",
+ Value: 7000000,
+ }
)
// RegisterFlagsServiceOpenvpn registers OpenVPN CLI flags for parsing them later | 1 | /*
* Copyright (C) 2019 The "MysteriumNetwork/node" Authors.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package config
import (
"github.com/urfave/cli/v2"
)
var (
// FlagOpenvpnProtocol protocol for OpenVPN to use.
FlagOpenvpnProtocol = cli.StringFlag{
Name: "openvpn.proto",
Usage: "OpenVPN protocol to use. Options: { udp, tcp }",
Value: "udp",
}
// FlagOpenvpnPort port for OpenVPN to use.
FlagOpenvpnPort = cli.IntFlag{
Name: "openvpn.port",
Usage: "OpenVPN port to use. If not specified, random port will be used",
Value: 0,
}
// FlagOpenvpnSubnet OpenVPN subnet that will be used for connecting clients.
FlagOpenvpnSubnet = cli.StringFlag{
Name: "openvpn.subnet",
Usage: "OpenVPN subnet that will be used to connecting VPN clients",
Value: "10.8.0.0",
}
// FlagOpenvpnNetmask OpenVPN subnet netmask.
FlagOpenvpnNetmask = cli.StringFlag{
Name: "openvpn.netmask",
Usage: "OpenVPN subnet netmask",
Value: "255.255.255.0",
}
)
// RegisterFlagsServiceOpenvpn registers OpenVPN CLI flags for parsing them later
func RegisterFlagsServiceOpenvpn(flags *[]cli.Flag) {
*flags = append(*flags,
&FlagOpenvpnProtocol,
&FlagOpenvpnPort,
&FlagOpenvpnSubnet,
&FlagOpenvpnNetmask,
)
}
// ParseFlagsServiceOpenvpn parses CLI flags and registers value to configuration
func ParseFlagsServiceOpenvpn(ctx *cli.Context) {
Current.ParseStringFlag(ctx, FlagOpenvpnProtocol)
Current.ParseIntFlag(ctx, FlagOpenvpnPort)
Current.ParseStringFlag(ctx, FlagOpenvpnSubnet)
Current.ParseStringFlag(ctx, FlagOpenvpnNetmask)
}
| 1 | 16,175 | Human unreadable. IMHO user should input MYST value: 0.0006 @chompomonim, opinions? | mysteriumnetwork-node | go |
@@ -643,15 +643,6 @@ class NVDAObject(documentBase.TextContainerObject,baseObject.ScriptableObject):
return self.presType_layout
if role in (controlTypes.ROLE_TABLEROW,controlTypes.ROLE_TABLECOLUMN,controlTypes.ROLE_TABLECELL) and (not config.conf["documentFormatting"]["reportTables"] or not config.conf["documentFormatting"]["reportTableCellCoords"]):
return self.presType_layout
- if role in (controlTypes.ROLE_TABLEROW,controlTypes.ROLE_TABLECOLUMN):
- try:
- table=self.table
- except NotImplementedError:
- table=None
- if table:
- # This is part of a real table, so the cells will report row/column information.
- # Therefore, this object is just for layout.
- return self.presType_layout
return self.presType_content
def _get_simpleParent(self): | 1 | # -*- coding: UTF-8 -*-
#NVDAObjects/__init__.py
#A part of NonVisual Desktop Access (NVDA)
#Copyright (C) 2006-2017 NV Access Limited, Peter Vágner, Aleksey Sadovoy, Patrick Zajda, Babbage B.V., Davy Kager
#This file is covered by the GNU General Public License.
#See the file COPYING for more details.
"""Module that contains the base NVDA object type"""
from new import instancemethod
import time
import re
import weakref
from logHandler import log
import review
import eventHandler
from displayModel import DisplayModelTextInfo
import baseObject
import documentBase
import speech
import ui
import api
import textInfos.offsets
import config
import controlTypes
import appModuleHandler
import treeInterceptorHandler
import braille
import globalPluginHandler
import brailleInput
class NVDAObjectTextInfo(textInfos.offsets.OffsetsTextInfo):
"""A default TextInfo which is used to enable text review of information about widgets that don't support text content.
The L{NVDAObject.basicText} attribute is used as the text to expose.
"""
locationText=None
def _get_unit_mouseChunk(self):
return textInfos.UNIT_STORY
def _getStoryText(self):
return self.obj.basicText
def _getStoryLength(self):
return len(self._getStoryText())
def _getTextRange(self,start,end):
text=self._getStoryText()
return text[start:end]
class InvalidNVDAObject(RuntimeError):
"""Raised by NVDAObjects during construction to inform that this object is invalid.
In this case, for the purposes of NVDA, the object should be considered non-existent.
Therefore, L{DynamicNVDAObjectType} will return C{None} if this exception is raised.
"""
class DynamicNVDAObjectType(baseObject.ScriptableObject.__class__):
_dynamicClassCache={}
def __call__(self,chooseBestAPI=True,**kwargs):
if chooseBestAPI:
APIClass=self.findBestAPIClass(kwargs)
if not APIClass: return None
else:
APIClass=self
# Instantiate the requested class.
try:
obj=APIClass.__new__(APIClass,**kwargs)
obj.APIClass=APIClass
if isinstance(obj,self):
obj.__init__(**kwargs)
except InvalidNVDAObject, e:
log.debugWarning("Invalid NVDAObject: %s" % e, stack_info=True)
return None
clsList = []
if "findOverlayClasses" in APIClass.__dict__:
obj.findOverlayClasses(clsList)
else:
clsList.append(APIClass)
# Allow app modules to choose overlay classes.
appModule=obj.appModule
# optimisation: The base implementation of chooseNVDAObjectOverlayClasses does nothing,
# so only call this method if it's been overridden.
if appModule and not hasattr(appModule.chooseNVDAObjectOverlayClasses, "_isBase"):
appModule.chooseNVDAObjectOverlayClasses(obj, clsList)
# Allow global plugins to choose overlay classes.
for plugin in globalPluginHandler.runningPlugins:
if "chooseNVDAObjectOverlayClasses" in plugin.__class__.__dict__:
plugin.chooseNVDAObjectOverlayClasses(obj, clsList)
# Determine the bases for the new class.
bases=[]
for index in xrange(len(clsList)):
# A class doesn't need to be a base if it is already implicitly included by being a superclass of a previous base.
if index==0 or not issubclass(clsList[index-1],clsList[index]):
bases.append(clsList[index])
# Construct the new class.
if len(bases) == 1:
# We only have one base, so there's no point in creating a dynamic type.
newCls=bases[0]
else:
bases=tuple(bases)
newCls=self._dynamicClassCache.get(bases,None)
if not newCls:
name="Dynamic_%s"%"".join([x.__name__ for x in clsList])
newCls=type(name,bases,{})
self._dynamicClassCache[bases]=newCls
oldMro=frozenset(obj.__class__.__mro__)
# Mutate obj into the new class.
obj.__class__=newCls
# Initialise the overlay classes.
for cls in reversed(newCls.__mro__):
if cls in oldMro:
# This class was part of the initially constructed object, so its constructor would have been called.
continue
initFunc=cls.__dict__.get("initOverlayClass")
if initFunc:
initFunc(obj)
# Bind gestures specified on the class.
try:
obj.bindGestures(getattr(cls, "_%s__gestures" % cls.__name__))
except AttributeError:
pass
# Allow app modules to make minor tweaks to the instance.
if appModule and hasattr(appModule,"event_NVDAObject_init"):
appModule.event_NVDAObject_init(obj)
return obj
@classmethod
def clearDynamicClassCache(cls):
"""Clear the dynamic class cache.
This should be called when a plugin is unloaded so that any used overlay classes in the unloaded plugin can be garbage collected.
"""
cls._dynamicClassCache.clear()
class NVDAObject(documentBase.TextContainerObject,baseObject.ScriptableObject):
"""NVDA's representation of a single control/widget.
Every widget, regardless of how it is exposed by an application or the operating system, is represented by a single NVDAObject instance.
This allows NVDA to work with all widgets in a uniform way.
An NVDAObject provides information about the widget (e.g. its name, role and value),
as well as functionality to manipulate it (e.g. perform an action or set focus).
Events for the widget are handled by special event methods on the object.
Commands triggered by input from the user can also be handled by special methods called scripts.
See L{ScriptableObject} for more details.
The only attribute that absolutely must be provided is L{processID}.
However, subclasses should provide at least the L{name} and L{role} attributes in order for the object to be meaningful to the user.
Attributes such as L{parent}, L{firstChild}, L{next} and L{previous} link an instance to other NVDAObjects in the hierarchy.
In order to facilitate access to text exposed by a widget which supports text content (e.g. an editable text control),
a L{textInfos.TextInfo} should be implemented and the L{TextInfo} attribute should specify this class.
There are two main types of NVDAObject classes:
* API classes, which provide the core functionality to work with objects exposed using a particular API (e.g. MSAA/IAccessible).
* Overlay classes, which supplement the core functionality provided by an API class to handle a specific widget or type of widget.
Most developers need only be concerned with overlay classes.
The overlay classes to be used for an instance are determined using the L{findOverlayClasses} method on the API class.
An L{AppModule} can also choose overlay classes for an instance using the L{AppModule.chooseNVDAObjectOverlayClasses} method.
"""
__metaclass__=DynamicNVDAObjectType
cachePropertiesByDefault = True
#: The TextInfo class this object should use to provide access to text.
#: @type: type; L{textInfos.TextInfo}
TextInfo=NVDAObjectTextInfo
#: Indicates if the text selection is anchored at the start.
#: The anchored position is the end that doesn't move when extending or shrinking the selection.
#: For example, if you have no selection and you press shift+rightArrow to select the next character,
#: this will be True.
#: In contrast, if you have no selection and you press shift+leftArrow to select the previous character,
#: this will be False.
#: If the selection is anchored at the end or there is no information this is C{False}.
#: @type: bool
isTextSelectionAnchoredAtStart=True
@classmethod
def findBestAPIClass(cls,kwargs,relation=None):
"""
Finds out the highest-level APIClass this object can get to given these kwargs, and updates the kwargs and returns the APIClass.
@param relation: the relationship of a possible new object of this type to another object creating it (e.g. parent).
@param type: string
@param kwargs: the arguments necessary to construct an object of the class this method was called on.
@type kwargs: dictionary
@returns: the new APIClass
@rtype: DynamicNVDAObjectType
"""
newAPIClass=cls
if 'getPossibleAPIClasses' in newAPIClass.__dict__:
for possibleAPIClass in newAPIClass.getPossibleAPIClasses(kwargs,relation=relation):
if 'kwargsFromSuper' not in possibleAPIClass.__dict__:
log.error("possible API class %s does not implement kwargsFromSuper"%possibleAPIClass)
continue
if possibleAPIClass.kwargsFromSuper(kwargs,relation=relation):
return possibleAPIClass.findBestAPIClass(kwargs,relation=relation)
return newAPIClass if newAPIClass is not NVDAObject else None
@classmethod
def getPossibleAPIClasses(cls,kwargs,relation=None):
"""
Provides a generator which can generate all the possible API classes (in priority order) that inherit directly from the class it was called on.
@param relation: the relationship of a possible new object of this type to another object creating it (e.g. parent).
@param type: string
@param kwargs: the arguments necessary to construct an object of the class this method was called on.
@type kwargs: dictionary
@returns: a generator
@rtype: generator
"""
import NVDAObjects.window
yield NVDAObjects.window.Window
@classmethod
def kwargsFromSuper(cls,kwargs,relation=None):
"""
Finds out if this class can be instanciated from the given super kwargs.
If so it updates the kwargs to contain everything it will need to instanciate this class, and returns True.
If this class can not be instanciated, it returns False and kwargs is not touched.
@param relation: why is this class being instanciated? parent, focus, foreground etc...
@type relation: string
@param kwargs: the kwargs for constructing this class's super class.
@type kwargs: dict
@rtype: boolean
"""
raise NotImplementedError
def findOverlayClasses(self, clsList):
"""Chooses overlay classes which should be added to this object's class structure after the object has been initially instantiated.
After an NVDAObject class (normally an API-level class) is instantiated, this method is called on the instance to choose appropriate overlay classes.
This method may use properties, etc. on the instance to make this choice.
The object's class structure is then mutated to contain these classes.
L{initOverlayClass} is then called for each class which was not part of the initially instantiated object.
This process allows an NVDAObject to be dynamically created using the most appropriate NVDAObject subclass at each API level.
Classes should be listed with subclasses first. That is, subclasses should generally call super and then append their own classes to the list.
For example: Called on an IAccessible NVDAObjectThe list might contain DialogIaccessible (a subclass of IAccessible), Edit (a subclass of Window).
@param clsList: The list of classes, which will be modified by this method if appropriate.
@type clsList: list of L{NVDAObject}
"""
clsList.append(NVDAObject)
beTransparentToMouse=False #:If true then NVDA will never consider the mouse to be on this object, rather it will be on an ancestor.
@staticmethod
def objectFromPoint(x,y):
"""Retreaves an NVDAObject instance representing a control in the Operating System at the given x and y coordinates.
@param x: the x coordinate.
@type x: int
@param y: the y coordinate.
@param y: int
@return: The object at the given x and y coordinates.
@rtype: L{NVDAObject}
"""
kwargs={}
APIClass=NVDAObject.findBestAPIClass(kwargs,relation=(x,y))
return APIClass(chooseBestAPI=False,**kwargs) if APIClass else None
@staticmethod
def objectWithFocus():
"""Retreaves the object representing the control currently with focus in the Operating System. This differens from NVDA's focus object as this focus object is the real focus object according to the Operating System, not according to NVDA.
@return: the object with focus.
@rtype: L{NVDAObject}
"""
kwargs={}
APIClass=NVDAObject.findBestAPIClass(kwargs,relation="focus")
if not APIClass:
return None
obj=APIClass(chooseBestAPI=False,**kwargs)
if not obj:
return None
focusRedirect=obj.focusRedirect
if focusRedirect:
obj=focusRedirect
return obj
@staticmethod
def objectInForeground():
"""Retreaves the object representing the current foreground control according to the Operating System. This differes from NVDA's foreground object as this object is the real foreground object according to the Operating System, not according to NVDA.
@return: the foreground object
@rtype: L{NVDAObject}
"""
kwargs={}
APIClass=NVDAObject.findBestAPIClass(kwargs,relation="foreground")
return APIClass(chooseBestAPI=False,**kwargs) if APIClass else None
def __init__(self):
super(NVDAObject,self).__init__()
self._mouseEntered=False #:True if the mouse has entered this object (for use in L{event_mouseMoved})
self.textRepresentationLineLength=None #:If an integer greater than 0 then lines of text in this object are always this long.
def _isEqual(self,other):
"""Calculates if this object is equal to another object. Used by L{NVDAObject.__eq__}.
@param other: the other object to compare with.
@type other: L{NVDAObject}
@return: True if equal, false otherwise.
@rtype: boolean
"""
return True
def __eq__(self,other):
"""Compaires the objects' memory addresses, their type, and uses L{NVDAObject._isEqual} to see if they are equal.
"""
if self is other:
return True
if type(self) is not type(other):
return False
return self._isEqual(other)
def __ne__(self,other):
"""The opposite to L{NVDAObject.__eq__}
"""
return not self.__eq__(other)
focusRedirect=None #: Another object which should be treeted as the focus if focus is ever given to this object.
def _get_treeInterceptorClass(self):
"""
If this NVDAObject should use a treeInterceptor, then this property provides the L{treeInterceptorHandler.TreeInterceptor} class it should use.
If not then it should be not implemented.
"""
raise NotImplementedError
#: Whether to create a tree interceptor for this object.
#: This is only relevant if L{treeInterceptorClass} is valid.
#: Normally, this should be C{True}.
#: However, for some objects (e.g. ARIA applications), a tree interceptor shouldn't be used by default,
#: but the user may wish to override this.
#: In this case, this can be set to C{False} and updated later.
#: @type: bool
shouldCreateTreeInterceptor = True
def _get_treeInterceptor(self):
"""Retreaves the treeInterceptor associated with this object.
If a treeInterceptor has not been specifically set, the L{treeInterceptorHandler} is asked if it can find a treeInterceptor containing this object.
@return: the treeInterceptor
@rtype: L{treeInterceptorHandler.TreeInterceptor}
"""
if hasattr(self,'_treeInterceptor'):
ti=self._treeInterceptor
if isinstance(ti,weakref.ref):
ti=ti()
if ti and ti in treeInterceptorHandler.runningTable:
return ti
else:
self._treeInterceptor=None
return None
else:
ti=treeInterceptorHandler.getTreeInterceptor(self)
if ti:
self._treeInterceptor=weakref.ref(ti)
return ti
def _set_treeInterceptor(self,obj):
"""Specifically sets a treeInterceptor to be associated with this object.
"""
if obj:
self._treeInterceptor=weakref.ref(obj)
else: #We can't point a weakref to None, so just set the private variable to None, it can handle that
self._treeInterceptor=None
def _get_appModule(self):
"""Retreaves the appModule representing the application this object is a part of by asking L{appModuleHandler}.
@return: the appModule
@rtype: L{appModuleHandler.AppModule}
"""
if not hasattr(self,'_appModuleRef'):
a=appModuleHandler.getAppModuleForNVDAObject(self)
if a:
self._appModuleRef=weakref.ref(a)
return a
else:
return self._appModuleRef()
def _get_name(self):
"""The name or label of this object (example: the text of a button).
@rtype: basestring
"""
return ""
def _get_role(self):
"""The role or type of control this object represents (example: button, list, dialog).
@return: a ROLE_* constant from L{controlTypes}
@rtype: int
"""
return controlTypes.ROLE_UNKNOWN
def _get_roleText(self):
"""
A custom role string for this object, which is used for braille and speech presentation, which will override the standard label for this object's role property.
No string is provided by default, meaning that NVDA will fall back to using role.
Examples of where this property might be overridden are shapes in Powerpoint, or ARIA role descriptions.
"""
return None
def _get_value(self):
"""The value of this object (example: the current percentage of a scrollbar, the selected option in a combo box).
@rtype: basestring
"""
return ""
def _get_description(self):
"""The description or help text of this object.
@rtype: basestring
"""
return ""
def _get_controllerFor(self):
"""Retreaves the object/s that this object controls."""
return []
def _get_actionCount(self):
"""Retreaves the number of actions supported by this object."""
return 0
def getActionName(self,index=None):
"""Retreaves the name of an action supported by this object.
If index is not given then the default action will be used if it exists.
@param index: the optional 0-based index of the wanted action.
@type index: int
@return: the action's name
@rtype: basestring
"""
raise NotImplementedError
def doAction(self,index=None):
"""Performs an action supported by this object.
If index is not given then the default action will be used if it exists.
"""
raise NotImplementedError
def _get_defaultActionIndex(self):
"""Retreaves the index of the action that is the default."""
return 0
def _get_keyboardShortcut(self):
"""The shortcut key that activates this object(example: alt+t).
@rtype: basestring
"""
return ""
def _get_isInForeground(self):
"""
Finds out if this object is currently within the foreground.
"""
raise NotImplementedError
def _get_states(self):
"""Retreaves the current states of this object (example: selected, focused).
@return: a set of STATE_* constants from L{controlTypes}.
@rtype: set of int
"""
return set()
def _get_location(self):
"""The location of this object on the screen.
@return: left, top, width and height of the object.
@rtype: tuple of int
"""
raise NotImplementedError
def _get_locationText(self):
"""A message that explains the location of the object in friendly terms."""
location=self.location
if not location:
return None
(left,top,width,height)=location
deskLocation=api.getDesktopObject().location
(deskLeft,deskTop,deskWidth,deskHeight)=deskLocation
percentFromLeft=(float(left-deskLeft)/deskWidth)*100
percentFromTop=(float(top-deskTop)/deskHeight)*100
percentWidth=(float(width)/deskWidth)*100
percentHeight=(float(height)/deskHeight)*100
# Translators: Reports navigator object's dimensions (example output: object edges positioned 20 per cent from left edge of screen, 10 per cent from top edge of screen, width is 40 per cent of screen, height is 50 per cent of screen).
return _("Object edges positioned {left:.1f} per cent from left edge of screen, {top:.1f} per cent from top edge of screen, width is {width:.1f} per cent of screen, height is {height:.1f} per cent of screen").format(left=percentFromLeft,top=percentFromTop,width=percentWidth,height=percentHeight)
def _get_parent(self):
"""Retreaves this object's parent (the object that contains this object).
@return: the parent object if it exists else None.
@rtype: L{NVDAObject} or None
"""
return None
def _get_container(self):
"""
Exactly like parent, however another object at this same sibling level may be retreaved first (e.g. a groupbox). Mostly used when presenting context such as focus ancestry.
"""
# Cache parent.
parent = self.parent
self.parent = parent
return parent
def _get_next(self):
"""Retreaves the object directly after this object with the same parent.
@return: the next object if it exists else None.
@rtype: L{NVDAObject} or None
"""
return None
def _get_previous(self):
"""Retreaves the object directly before this object with the same parent.
@return: the previous object if it exists else None.
@rtype: L{NVDAObject} or None
"""
return None
def _get_firstChild(self):
"""Retreaves the first object that this object contains.
@return: the first child object if it exists else None.
@rtype: L{NVDAObject} or None
"""
return None
def _get_lastChild(self):
"""Retreaves the last object that this object contains.
@return: the last child object if it exists else None.
@rtype: L{NVDAObject} or None
"""
return None
def _get_children(self):
"""Retreaves a list of all the objects directly contained by this object (who's parent is this object).
@rtype: list of L{NVDAObject}
"""
children=[]
child=self.firstChild
while child:
children.append(child)
child=child.next
return children
def getChild(self, index):
"""Retrieve a child by index.
@note: Subclasses may override this if they have an efficient way to retrieve a single, arbitrary child.
The base implementation uses L{children}.
@param index: The 0-based index of the child to retrieve.
@type index: int
@return: The child.
@rtype: L{NVDAObject}
"""
return self.children[index]
def _get_rowNumber(self):
"""Retreaves the row number of this object if it is in a table.
@rtype: int
"""
raise NotImplementedError
def _get_columnNumber(self):
"""Retreaves the column number of this object if it is in a table.
@rtype: int
"""
raise NotImplementedError
def _get_cellCoordsText(self):
"""
An alternative text representation of cell coordinates e.g. "a1". Will override presentation of rowNumber and columnNumber.
Only implement if the representation is really different.
"""
return None
def _get_rowCount(self):
"""Retreaves the number of rows this object contains if its a table.
@rtype: int
"""
raise NotImplementedError
def _get_columnCount(self):
"""Retreaves the number of columns this object contains if its a table.
@rtype: int
"""
raise NotImplementedError
def _get_rowHeaderText(self):
"""The text of the row headers for this cell.
@rtype: str
"""
raise NotImplementedError
def _get_columnHeaderText(self):
"""The text of the column headers for this cell.
@rtype: str
"""
raise NotImplementedError
def _get_table(self):
"""Retreaves the object that represents the table that this object is contained in, if this object is a table cell.
@rtype: L{NVDAObject}
"""
raise NotImplementedError
def _get_tableID(self):
"""The identifier of the table associated with this object if it is a table cell.
This identifier must distinguish this table from other tables.
If this is not implemented, table cell information will still be reported,
but row and column information will always be reported
even if the user moves to a cell in the same row/column.
"""
raise NotImplementedError
def _get_recursiveDescendants(self):
"""Recursively traverse and return the descendants of this object.
This is a depth-first forward traversal.
@return: The recursive descendants of this object.
@rtype: generator of L{NVDAObject}
"""
for child in self.children:
yield child
for recursiveChild in child.recursiveDescendants:
yield recursiveChild
presType_unavailable="unavailable"
presType_layout="layout"
presType_content="content"
def _get_presentationType(self):
states=self.states
if controlTypes.STATE_INVISIBLE in states or controlTypes.STATE_UNAVAILABLE in states:
return self.presType_unavailable
role=self.role
#Static text should be content only if it really use usable text
if role==controlTypes.ROLE_STATICTEXT:
text=self.makeTextInfo(textInfos.POSITION_ALL).text
return self.presType_content if text and not text.isspace() else self.presType_layout
if role in (controlTypes.ROLE_UNKNOWN, controlTypes.ROLE_PANE, controlTypes.ROLE_TEXTFRAME, controlTypes.ROLE_ROOTPANE, controlTypes.ROLE_LAYEREDPANE, controlTypes.ROLE_SCROLLPANE, controlTypes.ROLE_SPLITPANE, controlTypes.ROLE_SECTION, controlTypes.ROLE_PARAGRAPH, controlTypes.ROLE_TITLEBAR, controlTypes.ROLE_LABEL, controlTypes.ROLE_WHITESPACE,controlTypes.ROLE_BORDER):
return self.presType_layout
name = self.name
description = self.description
if not name and not description:
if role in (controlTypes.ROLE_WINDOW,controlTypes.ROLE_PANEL, controlTypes.ROLE_PROPERTYPAGE, controlTypes.ROLE_TEXTFRAME, controlTypes.ROLE_GROUPING,controlTypes.ROLE_OPTIONPANE,controlTypes.ROLE_INTERNALFRAME,controlTypes.ROLE_FORM,controlTypes.ROLE_TABLEBODY):
return self.presType_layout
if role == controlTypes.ROLE_TABLE and not config.conf["documentFormatting"]["reportTables"]:
return self.presType_layout
if role in (controlTypes.ROLE_TABLEROW,controlTypes.ROLE_TABLECOLUMN,controlTypes.ROLE_TABLECELL) and (not config.conf["documentFormatting"]["reportTables"] or not config.conf["documentFormatting"]["reportTableCellCoords"]):
return self.presType_layout
if role in (controlTypes.ROLE_TABLEROW,controlTypes.ROLE_TABLECOLUMN):
try:
table=self.table
except NotImplementedError:
table=None
if table:
# This is part of a real table, so the cells will report row/column information.
# Therefore, this object is just for layout.
return self.presType_layout
return self.presType_content
def _get_simpleParent(self):
obj=self.parent
while obj and obj.presentationType!=self.presType_content:
obj=obj.parent
return obj
def _findSimpleNext(self,useChild=False,useParent=True,goPrevious=False):
nextPrevAttrib="next" if not goPrevious else "previous"
firstLastChildAttrib="firstChild" if not goPrevious else "lastChild"
found=None
if useChild:
child=getattr(self,firstLastChildAttrib)
childPresType=child.presentationType if child else None
if childPresType==self.presType_content:
found=child
elif childPresType==self.presType_layout:
found=child._findSimpleNext(useChild=True,useParent=False,goPrevious=goPrevious)
elif child:
found=child._findSimpleNext(useChild=False,useParent=False,goPrevious=goPrevious)
if found:
return found
next=getattr(self,nextPrevAttrib)
nextPresType=next.presentationType if next else None
if nextPresType==self.presType_content:
found=next
elif nextPresType==self.presType_layout:
found=next._findSimpleNext(useChild=True,useParent=False,goPrevious=goPrevious)
elif next:
found=next._findSimpleNext(useChild=False,useParent=False,goPrevious=goPrevious)
if found:
return found
parent=self.parent if useParent else None
while parent and parent.presentationType!=self.presType_content:
next=parent._findSimpleNext(useChild=False,useParent=False,goPrevious=goPrevious)
if next:
return next
parent=parent.parent
def _get_simpleNext(self):
return self._findSimpleNext()
def _get_simplePrevious(self):
return self._findSimpleNext(goPrevious=True)
def _get_simpleFirstChild(self):
child=self.firstChild
if not child:
return None
presType=child.presentationType
if presType!=self.presType_content: return child._findSimpleNext(useChild=(presType!=self.presType_unavailable),useParent=False)
return child
def _get_simpleLastChild(self):
child=self.lastChild
if not child:
return None
presType=child.presentationType
if presType!=self.presType_content: return child._findSimpleNext(useChild=(presType!=self.presType_unavailable),useParent=False,goPrevious=True)
return child
def _get_childCount(self):
"""Retreaves the number of children this object contains.
@rtype: int
"""
return len(self.children)
def _get_activeChild(self):
"""Retreaves the child of this object that currently has, or contains, the focus.
@return: the active child if it has one else None
@rtype: L{NVDAObject} or None
"""
return None
def _get_isFocusable(self):
"""Whether this object is focusable.
@rtype: bool
"""
return controlTypes.STATE_FOCUSABLE in self.states
def _get_hasFocus(self):
"""Whether this object has focus.
@rtype: bool
"""
return controlTypes.STATE_FOCUSED in self.states
def setFocus(self):
"""
Tries to force this object to take the focus.
"""
pass
def scrollIntoView(self):
"""Scroll this object into view on the screen if possible.
"""
raise NotImplementedError
def _get_labeledBy(self):
"""Retreaves the object that this object is labeled by (example: the static text label beside an edit field).
@return: the label object if it has one else None.
@rtype: L{NVDAObject} or None
"""
return None
def _get_positionInfo(self):
"""Retreaves position information for this object such as its level, its index with in a group, and the number of items in that group.
@return: a dictionary containing any of level, groupIndex and similarItemsInGroup.
@rtype: dict
"""
return {}
def _get_processID(self):
"""Retreaves an identifyer of the process this object is a part of.
@rtype: int
"""
raise NotImplementedError
def _get_isProtected(self):
"""
@return: True if this object is protected (hides its input for passwords), or false otherwise
@rtype: boolean
"""
return False
def _get_indexInParent(self):
"""The index of this object in its parent object.
@return: The 0 based index, C{None} if there is no parent.
@rtype: int
@raise NotImplementedError: If not supported by the underlying object.
"""
raise NotImplementedError
def _get_flowsTo(self):
"""The object to which content flows from this object.
@return: The object to which this object flows, C{None} if none.
@rtype: L{NVDAObject}
@raise NotImplementedError: If not supported by the underlying object.
"""
raise NotImplementedError
def _get_flowsFrom(self):
"""The object from which content flows to this object.
@return: The object from which this object flows, C{None} if none.
@rtype: L{NVDAObject}
@raise NotImplementedError: If not supported by the underlying object.
"""
raise NotImplementedError
def _get_isPresentableFocusAncestor(self):
"""Determine if this object should be presented to the user in the focus ancestry.
@return: C{True} if it should be presented in the focus ancestry, C{False} if not.
@rtype: bool
"""
if self.presentationType in (self.presType_layout, self.presType_unavailable):
return False
if self.role in (controlTypes.ROLE_TREEVIEWITEM, controlTypes.ROLE_LISTITEM, controlTypes.ROLE_PROGRESSBAR, controlTypes.ROLE_EDITABLETEXT):
return False
return True
def _get_statusBar(self):
"""Finds the closest status bar in relation to this object.
@return: the found status bar else None
@rtype: L{NVDAObject} or None
"""
return None
def _get_isCurrent(self):
"""Gets the value that indicates whether this object is the current element in a set of related
elements. This maps to aria-current. Normally returns False. If this object is current
it will return one of the following values: True, "page", "step", "location", "date", "time"
"""
return False
def _get_shouldAcceptShowHideCaretEvent(self):
"""Some objects/applications send show/hide caret events when we don't expect it, such as when the cursor is blinking.
@return: if show/hide caret events should be accepted for this object.
@rtype: Boolean
"""
return True
def reportFocus(self):
"""Announces this object in a way suitable such that it gained focus.
"""
speech.speakObject(self,reason=controlTypes.REASON_FOCUS)
def _get_placeholder(self):
"""If it exists for this object get the value of the placeholder text.
For example this might be the aria-placeholder text for a field in a web page.
@return: the placeholder text else None
@rtype: String or None
"""
log.debug("Potential unimplemented child class: %r" %self)
return None
def _get_landmark(self):
"""If this object represents an ARIA landmark, fetches the ARIA landmark role.
@return: ARIA landmark role else None
@rtype: String or None
"""
return None
def _reportErrorInPreviousWord(self):
try:
# self might be a descendant of the text control; e.g. Symphony.
# We want to deal with the entire text, so use the caret object.
info = api.getCaretObject().makeTextInfo(textInfos.POSITION_CARET)
# This gets called for characters which might end a word; e.g. space.
# The character before the caret is the word end.
# The one before that is the last of the word, which is what we want.
info.move(textInfos.UNIT_CHARACTER, -2)
info.expand(textInfos.UNIT_CHARACTER)
fields = info.getTextWithFields()
except RuntimeError:
return
except:
# Focus probably moved.
log.debugWarning("Error fetching last character of previous word", exc_info=True)
return
for command in fields:
if isinstance(command, textInfos.FieldCommand) and command.command == "formatChange" and command.field.get("invalid-spelling"):
break
else:
# No error.
return
import nvwave
nvwave.playWaveFile(r"waves\textError.wav")
def event_liveRegionChange(self):
"""
A base implementation for live region change events.
"""
name=self.name
if name:
ui.message(name)
def event_typedCharacter(self,ch):
if config.conf["documentFormatting"]["reportSpellingErrors"] and config.conf["keyboard"]["alertForSpellingErrors"] and (
# Not alpha, apostrophe or control.
ch.isspace() or (ch >= u" " and ch not in u"'\x7f" and not ch.isalpha())
):
# Reporting of spelling errors is enabled and this character ends a word.
self._reportErrorInPreviousWord()
speech.speakTypedCharacters(ch)
import winUser
if config.conf["keyboard"]["beepForLowercaseWithCapslock"] and ch.islower() and winUser.getKeyState(winUser.VK_CAPITAL)&1:
import tones
tones.beep(3000,40)
def event_mouseMove(self,x,y):
if not self._mouseEntered and config.conf['mouse']['reportObjectRoleOnMouseEnter']:
speech.cancelSpeech()
speech.speakObjectProperties(self,role=True)
speechWasCanceled=True
else:
speechWasCanceled=False
self._mouseEntered=True
try:
info=self.makeTextInfo(textInfos.Point(x,y))
except NotImplementedError:
info=NVDAObjectTextInfo(self,textInfos.POSITION_FIRST)
except LookupError:
return
if config.conf["reviewCursor"]["followMouse"]:
api.setReviewPosition(info)
info.expand(info.unit_mouseChunk)
oldInfo=getattr(self,'_lastMouseTextInfoObject',None)
self._lastMouseTextInfoObject=info
if not oldInfo or info.__class__!=oldInfo.__class__ or info.compareEndPoints(oldInfo,"startToStart")!=0 or info.compareEndPoints(oldInfo,"endToEnd")!=0:
text=info.text
notBlank=False
if text:
for ch in text:
if not ch.isspace() and ch!=u'\ufffc':
notBlank=True
if notBlank:
if not speechWasCanceled:
speech.cancelSpeech()
speech.speakText(text)
def event_stateChange(self):
if self is api.getFocusObject():
speech.speakObjectProperties(self,states=True, reason=controlTypes.REASON_CHANGE)
braille.handler.handleUpdate(self)
def event_focusEntered(self):
if self.role in (controlTypes.ROLE_MENUBAR,controlTypes.ROLE_POPUPMENU,controlTypes.ROLE_MENUITEM):
speech.cancelSpeech()
return
if self.isPresentableFocusAncestor:
speech.speakObject(self,reason=controlTypes.REASON_FOCUSENTERED)
def event_gainFocus(self):
"""
This code is executed if a gain focus event is received by this object.
"""
self.reportFocus()
braille.handler.handleGainFocus(self)
brailleInput.handler.handleGainFocus(self)
def event_foreground(self):
"""Called when the foreground window changes.
This method should only perform tasks specific to the foreground window changing.
L{event_focusEntered} or L{event_gainFocus} will be called for this object, so this method should not speak/braille the object, etc.
"""
speech.cancelSpeech()
def event_becomeNavigatorObject(self, isFocus=False):
"""Called when this object becomes the navigator object.
@param isFocus: true if the navigator object was set due to a focus change.
@type isFocus: bool
"""
# When the navigator object follows the focus and braille is auto tethered to review,
# we should not update braille with the new review position as a tether to focus is due.
if braille.handler.shouldAutoTether and isFocus:
return
braille.handler.handleReviewMove(shouldAutoTether=not isFocus)
def event_valueChange(self):
if self is api.getFocusObject():
speech.speakObjectProperties(self, value=True, reason=controlTypes.REASON_CHANGE)
braille.handler.handleUpdate(self)
def event_nameChange(self):
if self is api.getFocusObject():
speech.speakObjectProperties(self, name=True, reason=controlTypes.REASON_CHANGE)
braille.handler.handleUpdate(self)
def event_descriptionChange(self):
if self is api.getFocusObject():
speech.speakObjectProperties(self, description=True, reason=controlTypes.REASON_CHANGE)
braille.handler.handleUpdate(self)
def event_caret(self):
if self is api.getFocusObject() and not eventHandler.isPendingEvents("gainFocus"):
braille.handler.handleCaretMove(self)
brailleInput.handler.handleCaretMove(self)
review.handleCaretMove(self)
def _get_flatReviewPosition(self):
"""Locates a TextInfo positioned at this object, in the closest flat review."""
parent=self.simpleParent
while parent:
ti=parent.treeInterceptor
if ti and self in ti and ti.rootNVDAObject==parent:
return ti.makeTextInfo(self)
if issubclass(parent.TextInfo,DisplayModelTextInfo):
try:
return parent.makeTextInfo(api.getReviewPosition().pointAtStart)
except (NotImplementedError,LookupError):
pass
try:
return parent.makeTextInfo(self)
except (NotImplementedError,RuntimeError):
pass
return parent.makeTextInfo(textInfos.POSITION_FIRST)
parent=parent.simpleParent
def _get_basicText(self):
newTime=time.time()
oldTime=getattr(self,'_basicTextTime',0)
if newTime-oldTime>0.5:
self._basicText=u" ".join([x for x in self.name, self.value, self.description if isinstance(x, basestring) and len(x) > 0 and not x.isspace()])
if len(self._basicText)==0:
self._basicText=u""
else:
self._basicTextTime=newTime
return self._basicText
def _get__isTextEmpty(self):
"""
@return C{True} if the text contained in the object is considered empty by the underlying implementation. In most cases this will match {isCollapsed}, however some implementations may consider a single space or line feed as an empty range.
"""
ti = self.makeTextInfo(textInfos.POSITION_FIRST)
ti.move(textInfos.UNIT_CHARACTER, 1, endPoint="end")
return ti.isCollapsed
@staticmethod
def _formatLongDevInfoString(string, truncateLen=250):
"""Format a potentially long string value for inclusion in devInfo.
This should be used for arbitrary string values which aren't usually useful in debugging past a certain length.
If the string is too long to be useful, it will be truncated.
This string should be included as returned. There is no need to call repr.
@param string: The string to format.
@type string: nbasestring
@param truncateLen: The length at which to truncate the string.
@type truncateLen: int
@return: The formatted string.
@rtype: basestring
"""
if isinstance(string, basestring) and len(string) > truncateLen:
return "%r (truncated)" % string[:truncateLen]
return repr(string)
def _get_devInfo(self):
"""Information about this object useful to developers.
Subclasses may extend this, calling the superclass property first.
@return: A list of text strings providing information about this object useful to developers.
@rtype: list of str
"""
info = []
try:
ret = repr(self.name)
except Exception as e:
ret = "exception: %s" % e
info.append("name: %s" % ret)
try:
ret = self.role
for name, const in controlTypes.__dict__.iteritems():
if name.startswith("ROLE_") and ret == const:
ret = name
break
except Exception as e:
ret = "exception: %s" % e
info.append("role: %s" % ret)
try:
stateConsts = dict((const, name) for name, const in controlTypes.__dict__.iteritems() if name.startswith("STATE_"))
ret = ", ".join(
stateConsts.get(state) or str(state)
for state in self.states)
except Exception as e:
ret = "exception: %s" % e
info.append("states: %s" % ret)
try:
ret = repr(self.isFocusable)
except Exception as e:
ret = "exception: %s" % e
info.append("isFocusable: %s" % ret)
try:
ret = repr(self.hasFocus)
except Exception as e:
ret = "exception: %s" % e
info.append("hasFocus: %s" % ret)
try:
ret = repr(self)
except Exception as e:
ret = "exception: %s" % e
info.append("Python object: %s" % ret)
try:
ret = repr(self.__class__.__mro__)
except Exception as e:
ret = "exception: %s" % e
info.append("Python class mro: %s" % ret)
try:
ret = repr(self.description)
except Exception as e:
ret = "exception: %s" % e
info.append("description: %s" % ret)
try:
ret = repr(self.location)
except Exception as e:
ret = "exception: %s" % e
info.append("location: %s" % ret)
formatLong = self._formatLongDevInfoString
try:
ret = formatLong(self.value)
except Exception as e:
ret = "exception: %s" % e
info.append("value: %s" % ret)
try:
ret = repr(self.appModule)
except Exception as e:
ret = "exception: %s" % e
info.append("appModule: %s" % ret)
try:
ret = repr(self.appModule.productName)
except Exception as e:
ret = "exception: %s" % e
info.append("appModule.productName: %s" % ret)
try:
ret = repr(self.appModule.productVersion)
except Exception as e:
ret = "exception: %s" % e
info.append("appModule.productVersion: %s" % ret)
try:
ret = repr(self.TextInfo)
except Exception as e:
ret = "exception: %s" % e
info.append("TextInfo: %s" % ret)
return info
def _get_sleepMode(self):
"""Whether NVDA should sleep for this object (e.g. it is self-voicing).
If C{True}, all events and script requests for this object are silently dropped.
@rtype: bool
"""
if self.appModule:
return self.appModule.sleepMode
return False
# Don't cache sleepMode, as it is derived from a property which might change
# and we want the changed value immediately.
_cache_sleepMode = False
def _get_mathMl(self):
"""Obtain the MathML markup for an object containing math content.
This will only be called (and thus only needs to be implemented) for
objects with a role of L{controlTypes.ROLE_MATH}.
@raise LookupError: If MathML can't be retrieved for this object.
"""
raise NotImplementedError
#: The language/locale of this object.
#: @type: basestring
language = None
def _get__hasNavigableText(self):
# The generic NVDAObjectTextInfo by itself is never enough to be navigable
if self.TextInfo is NVDAObjectTextInfo:
return False
role = self.role
states = self.states
if role in (controlTypes.ROLE_EDITABLETEXT,controlTypes.ROLE_TERMINAL,controlTypes.ROLE_DOCUMENT):
# Edit fields, terminals and documents are always navigable
return True
elif controlTypes.STATE_EDITABLE in states:
# Anything that is specifically editable is navigable
return True
else:
return False
| 1 | 21,500 | May be you could elaborate on why you removed this? I see some of this logic has been moved to ia2Web, but couldn't there be cases outside ia2Web (i.e. UIA) in which case ROLE_TABLEROW and ROLE_TABLECOLUMN are mapped to objects, in which case these objects will be of presentation type content erroneously? | nvaccess-nvda | py |
@@ -2637,10 +2637,10 @@ void Game::playerAcceptTrade(uint32_t playerId)
player->setTradeState(TRADE_ACCEPT);
if (tradePartner->getTradeState() == TRADE_ACCEPT) {
- Item* tradeItem1 = player->tradeItem;
- Item* tradeItem2 = tradePartner->tradeItem;
+ Item* playerTradeItem = player->tradeItem;
+ Item* tradePartnerItem = tradePartner->tradeItem;
- if (!g_events->eventPlayerOnTradeAccept(player, tradePartner, tradeItem1, tradeItem2)) {
+ if (!g_events->eventPlayerOnTradeAccept(player, tradePartner, playerTradeItem, tradePartnerItem)) {
internalCloseTrade(player);
return;
} | 1 | /**
* The Forgotten Server - a free and open-source MMORPG server emulator
* Copyright (C) 2018 Mark Samman <[email protected]>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*/
#include "otpch.h"
#include "pugicast.h"
#include "actions.h"
#include "bed.h"
#include "configmanager.h"
#include "creature.h"
#include "creatureevent.h"
#include "databasetasks.h"
#include "events.h"
#include "game.h"
#include "globalevent.h"
#include "iologindata.h"
#include "iomarket.h"
#include "items.h"
#include "monster.h"
#include "movement.h"
#include "scheduler.h"
#include "server.h"
#include "spells.h"
#include "talkaction.h"
#include "weapons.h"
extern ConfigManager g_config;
extern Actions* g_actions;
extern Chat* g_chat;
extern TalkActions* g_talkActions;
extern Spells* g_spells;
extern Vocations g_vocations;
extern GlobalEvents* g_globalEvents;
extern CreatureEvents* g_creatureEvents;
extern Events* g_events;
extern CreatureEvents* g_creatureEvents;
extern Monsters g_monsters;
extern MoveEvents* g_moveEvents;
extern Weapons* g_weapons;
Game::Game()
{
offlineTrainingWindow.choices.emplace_back("Sword Fighting and Shielding", SKILL_SWORD);
offlineTrainingWindow.choices.emplace_back("Axe Fighting and Shielding", SKILL_AXE);
offlineTrainingWindow.choices.emplace_back("Club Fighting and Shielding", SKILL_CLUB);
offlineTrainingWindow.choices.emplace_back("Distance Fighting and Shielding", SKILL_DISTANCE);
offlineTrainingWindow.choices.emplace_back("Magic Level and Shielding", SKILL_MAGLEVEL);
offlineTrainingWindow.buttons.emplace_back("Okay", 1);
offlineTrainingWindow.buttons.emplace_back("Cancel", 0);
offlineTrainingWindow.defaultEnterButton = 1;
offlineTrainingWindow.defaultEscapeButton = 0;
offlineTrainingWindow.priority = true;
}
Game::~Game()
{
for (const auto& it : guilds) {
delete it.second;
}
}
void Game::start(ServiceManager* manager)
{
serviceManager = manager;
g_scheduler.addEvent(createSchedulerTask(EVENT_LIGHTINTERVAL, std::bind(&Game::checkLight, this)));
g_scheduler.addEvent(createSchedulerTask(EVENT_CREATURE_THINK_INTERVAL, std::bind(&Game::checkCreatures, this, 0)));
g_scheduler.addEvent(createSchedulerTask(EVENT_DECAYINTERVAL, std::bind(&Game::checkDecay, this)));
}
GameState_t Game::getGameState() const
{
return gameState;
}
void Game::setWorldType(WorldType_t type)
{
worldType = type;
}
void Game::setGameState(GameState_t newState)
{
if (gameState == GAME_STATE_SHUTDOWN) {
return; //this cannot be stopped
}
if (gameState == newState) {
return;
}
gameState = newState;
switch (newState) {
case GAME_STATE_INIT: {
loadExperienceStages();
groups.load();
g_chat->load();
map.spawns.startup();
raids.loadFromXml();
raids.startup();
quests.loadFromXml();
mounts.loadFromXml();
loadMotdNum();
loadPlayersRecord();
g_globalEvents->startup();
break;
}
case GAME_STATE_SHUTDOWN: {
g_globalEvents->execute(GLOBALEVENT_SHUTDOWN);
//kick all players that are still online
auto it = players.begin();
while (it != players.end()) {
it->second->kickPlayer(true);
it = players.begin();
}
saveMotdNum();
saveGameState();
g_dispatcher.addTask(
createTask(std::bind(&Game::shutdown, this)));
g_scheduler.stop();
g_databaseTasks.stop();
g_dispatcher.stop();
break;
}
case GAME_STATE_CLOSED: {
/* kick all players without the CanAlwaysLogin flag */
auto it = players.begin();
while (it != players.end()) {
if (!it->second->hasFlag(PlayerFlag_CanAlwaysLogin)) {
it->second->kickPlayer(true);
it = players.begin();
} else {
++it;
}
}
saveGameState();
break;
}
default:
break;
}
}
void Game::saveGameState()
{
if (gameState == GAME_STATE_NORMAL) {
setGameState(GAME_STATE_MAINTAIN);
}
std::cout << "Saving server..." << std::endl;
for (const auto& it : players) {
it.second->loginPosition = it.second->getPosition();
IOLoginData::savePlayer(it.second);
}
Map::save();
g_databaseTasks.flush();
if (gameState == GAME_STATE_MAINTAIN) {
setGameState(GAME_STATE_NORMAL);
}
}
bool Game::loadMainMap(const std::string& filename)
{
Monster::despawnRange = g_config.getNumber(ConfigManager::DEFAULT_DESPAWNRANGE);
Monster::despawnRadius = g_config.getNumber(ConfigManager::DEFAULT_DESPAWNRADIUS);
return map.loadMap("data/world/" + filename + ".otbm", true);
}
void Game::loadMap(const std::string& path)
{
map.loadMap(path, false);
}
Cylinder* Game::internalGetCylinder(Player* player, const Position& pos) const
{
if (pos.x != 0xFFFF) {
return map.getTile(pos);
}
//container
if (pos.y & 0x40) {
uint8_t from_cid = pos.y & 0x0F;
return player->getContainerByID(from_cid);
}
//inventory
return player;
}
Thing* Game::internalGetThing(Player* player, const Position& pos, int32_t index, uint32_t spriteId, stackPosType_t type) const
{
if (pos.x != 0xFFFF) {
Tile* tile = map.getTile(pos);
if (!tile) {
return nullptr;
}
Thing* thing;
switch (type) {
case STACKPOS_LOOK: {
return tile->getTopVisibleThing(player);
}
case STACKPOS_MOVE: {
Item* item = tile->getTopDownItem();
if (item && item->isMoveable()) {
thing = item;
} else {
thing = tile->getTopVisibleCreature(player);
}
break;
}
case STACKPOS_USEITEM: {
thing = tile->getUseItem(index);
break;
}
case STACKPOS_TOPDOWN_ITEM: {
thing = tile->getTopDownItem();
break;
}
case STACKPOS_USETARGET: {
thing = tile->getTopVisibleCreature(player);
if (!thing) {
thing = tile->getUseItem(index);
}
break;
}
default: {
thing = nullptr;
break;
}
}
if (player && tile->hasFlag(TILESTATE_SUPPORTS_HANGABLE)) {
//do extra checks here if the thing is accessable
if (thing && thing->getItem()) {
if (tile->hasProperty(CONST_PROP_ISVERTICAL)) {
if (player->getPosition().x + 1 == tile->getPosition().x) {
thing = nullptr;
}
} else { // horizontal
if (player->getPosition().y + 1 == tile->getPosition().y) {
thing = nullptr;
}
}
}
}
return thing;
}
//container
if (pos.y & 0x40) {
uint8_t fromCid = pos.y & 0x0F;
Container* parentContainer = player->getContainerByID(fromCid);
if (!parentContainer) {
return nullptr;
}
if (parentContainer->getID() == ITEM_BROWSEFIELD) {
Tile* tile = parentContainer->getTile();
if (tile && tile->hasFlag(TILESTATE_SUPPORTS_HANGABLE)) {
if (tile->hasProperty(CONST_PROP_ISVERTICAL)) {
if (player->getPosition().x + 1 == tile->getPosition().x) {
return nullptr;
}
} else { // horizontal
if (player->getPosition().y + 1 == tile->getPosition().y) {
return nullptr;
}
}
}
}
uint8_t slot = pos.z;
return parentContainer->getItemByIndex(player->getContainerIndex(fromCid) + slot);
} else if (pos.y == 0 && pos.z == 0) {
const ItemType& it = Item::items.getItemIdByClientId(spriteId);
if (it.id == 0) {
return nullptr;
}
int32_t subType;
if (it.isFluidContainer() && index < static_cast<int32_t>(sizeof(reverseFluidMap) / sizeof(uint8_t))) {
subType = reverseFluidMap[index];
} else {
subType = -1;
}
return findItemOfType(player, it.id, true, subType);
}
//inventory
slots_t slot = static_cast<slots_t>(pos.y);
return player->getInventoryItem(slot);
}
void Game::internalGetPosition(Item* item, Position& pos, uint8_t& stackpos)
{
pos.x = 0;
pos.y = 0;
pos.z = 0;
stackpos = 0;
Cylinder* topParent = item->getTopParent();
if (topParent) {
if (Player* player = dynamic_cast<Player*>(topParent)) {
pos.x = 0xFFFF;
Container* container = dynamic_cast<Container*>(item->getParent());
if (container) {
pos.y = static_cast<uint16_t>(0x40) | static_cast<uint16_t>(player->getContainerID(container));
pos.z = container->getThingIndex(item);
stackpos = pos.z;
} else {
pos.y = player->getThingIndex(item);
stackpos = pos.y;
}
} else if (Tile* tile = topParent->getTile()) {
pos = tile->getPosition();
stackpos = tile->getThingIndex(item);
}
}
}
Creature* Game::getCreatureByID(uint32_t id)
{
if (id <= Player::playerAutoID) {
return getPlayerByID(id);
} else if (id <= Monster::monsterAutoID) {
return getMonsterByID(id);
} else if (id <= Npc::npcAutoID) {
return getNpcByID(id);
}
return nullptr;
}
Monster* Game::getMonsterByID(uint32_t id)
{
if (id == 0) {
return nullptr;
}
auto it = monsters.find(id);
if (it == monsters.end()) {
return nullptr;
}
return it->second;
}
Npc* Game::getNpcByID(uint32_t id)
{
if (id == 0) {
return nullptr;
}
auto it = npcs.find(id);
if (it == npcs.end()) {
return nullptr;
}
return it->second;
}
Player* Game::getPlayerByID(uint32_t id)
{
if (id == 0) {
return nullptr;
}
auto it = players.find(id);
if (it == players.end()) {
return nullptr;
}
return it->second;
}
Creature* Game::getCreatureByName(const std::string& s)
{
if (s.empty()) {
return nullptr;
}
const std::string& lowerCaseName = asLowerCaseString(s);
auto m_it = mappedPlayerNames.find(lowerCaseName);
if (m_it != mappedPlayerNames.end()) {
return m_it->second;
}
for (const auto& it : npcs) {
if (lowerCaseName == asLowerCaseString(it.second->getName())) {
return it.second;
}
}
for (const auto& it : monsters) {
if (lowerCaseName == asLowerCaseString(it.second->getName())) {
return it.second;
}
}
return nullptr;
}
Npc* Game::getNpcByName(const std::string& s)
{
if (s.empty()) {
return nullptr;
}
const char* npcName = s.c_str();
for (const auto& it : npcs) {
if (strcasecmp(npcName, it.second->getName().c_str()) == 0) {
return it.second;
}
}
return nullptr;
}
Player* Game::getPlayerByName(const std::string& s)
{
if (s.empty()) {
return nullptr;
}
auto it = mappedPlayerNames.find(asLowerCaseString(s));
if (it == mappedPlayerNames.end()) {
return nullptr;
}
return it->second;
}
Player* Game::getPlayerByGUID(const uint32_t& guid)
{
if (guid == 0) {
return nullptr;
}
for (const auto& it : players) {
if (guid == it.second->getGUID()) {
return it.second;
}
}
return nullptr;
}
ReturnValue Game::getPlayerByNameWildcard(const std::string& s, Player*& player)
{
size_t strlen = s.length();
if (strlen == 0 || strlen > 20) {
return RETURNVALUE_PLAYERWITHTHISNAMEISNOTONLINE;
}
if (s.back() == '~') {
const std::string& query = asLowerCaseString(s.substr(0, strlen - 1));
std::string result;
ReturnValue ret = wildcardTree.findOne(query, result);
if (ret != RETURNVALUE_NOERROR) {
return ret;
}
player = getPlayerByName(result);
} else {
player = getPlayerByName(s);
}
if (!player) {
return RETURNVALUE_PLAYERWITHTHISNAMEISNOTONLINE;
}
return RETURNVALUE_NOERROR;
}
Player* Game::getPlayerByAccount(uint32_t acc)
{
for (const auto& it : players) {
if (it.second->getAccount() == acc) {
return it.second;
}
}
return nullptr;
}
bool Game::internalPlaceCreature(Creature* creature, const Position& pos, bool extendedPos /*=false*/, bool forced /*= false*/)
{
if (creature->getParent() != nullptr) {
return false;
}
if (!map.placeCreature(pos, creature, extendedPos, forced)) {
return false;
}
creature->incrementReferenceCounter();
creature->setID();
creature->addList();
return true;
}
bool Game::placeCreature(Creature* creature, const Position& pos, bool extendedPos /*=false*/, bool forced /*= false*/)
{
if (!internalPlaceCreature(creature, pos, extendedPos, forced)) {
return false;
}
SpectatorHashSet spectators;
map.getSpectators(spectators, creature->getPosition(), true);
for (Creature* spectator : spectators) {
if (Player* tmpPlayer = spectator->getPlayer()) {
tmpPlayer->sendCreatureAppear(creature, creature->getPosition(), true);
}
}
for (Creature* spectator : spectators) {
spectator->onCreatureAppear(creature, true);
}
creature->getParent()->postAddNotification(creature, nullptr, 0);
addCreatureCheck(creature);
creature->onPlacedCreature();
return true;
}
bool Game::removeCreature(Creature* creature, bool isLogout/* = true*/)
{
if (creature->isRemoved()) {
return false;
}
Tile* tile = creature->getTile();
std::vector<int32_t> oldStackPosVector;
SpectatorHashSet spectators;
map.getSpectators(spectators, tile->getPosition(), true);
for (Creature* spectator : spectators) {
if (Player* player = spectator->getPlayer()) {
oldStackPosVector.push_back(player->canSeeCreature(creature) ? tile->getStackposOfCreature(player, creature) : -1);
}
}
tile->removeCreature(creature);
const Position& tilePosition = tile->getPosition();
//send to client
size_t i = 0;
for (Creature* spectator : spectators) {
if (Player* player = spectator->getPlayer()) {
player->sendRemoveTileThing(tilePosition, oldStackPosVector[i++]);
}
}
//event method
for (Creature* spectator : spectators) {
spectator->onRemoveCreature(creature, isLogout);
}
creature->getParent()->postRemoveNotification(creature, nullptr, 0);
creature->removeList();
creature->setRemoved();
ReleaseCreature(creature);
removeCreatureCheck(creature);
for (Creature* summon : creature->summons) {
summon->setSkillLoss(false);
removeCreature(summon);
}
return true;
}
void Game::playerMoveThing(uint32_t playerId, const Position& fromPos,
uint16_t spriteId, uint8_t fromStackPos, const Position& toPos, uint8_t count)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
uint8_t fromIndex = 0;
if (fromPos.x == 0xFFFF) {
if (fromPos.y & 0x40) {
fromIndex = fromPos.z;
} else {
fromIndex = static_cast<uint8_t>(fromPos.y);
}
} else {
fromIndex = fromStackPos;
}
Thing* thing = internalGetThing(player, fromPos, fromIndex, 0, STACKPOS_MOVE);
if (!thing) {
player->sendCancelMessage(RETURNVALUE_NOTPOSSIBLE);
return;
}
if (Creature* movingCreature = thing->getCreature()) {
Tile* tile = map.getTile(toPos);
if (!tile) {
player->sendCancelMessage(RETURNVALUE_NOTPOSSIBLE);
return;
}
if (Position::areInRange<1, 1, 0>(movingCreature->getPosition(), player->getPosition())) {
SchedulerTask* task = createSchedulerTask(1000,
std::bind(&Game::playerMoveCreatureByID, this, player->getID(),
movingCreature->getID(), movingCreature->getPosition(), tile->getPosition()));
player->setNextActionTask(task);
} else {
playerMoveCreature(player, movingCreature, movingCreature->getPosition(), tile);
}
} else if (thing->getItem()) {
Cylinder* toCylinder = internalGetCylinder(player, toPos);
if (!toCylinder) {
player->sendCancelMessage(RETURNVALUE_NOTPOSSIBLE);
return;
}
playerMoveItem(player, fromPos, spriteId, fromStackPos, toPos, count, thing->getItem(), toCylinder);
}
}
void Game::playerMoveCreatureByID(uint32_t playerId, uint32_t movingCreatureId, const Position& movingCreatureOrigPos, const Position& toPos)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
Creature* movingCreature = getCreatureByID(movingCreatureId);
if (!movingCreature) {
return;
}
Tile* toTile = map.getTile(toPos);
if (!toTile) {
player->sendCancelMessage(RETURNVALUE_NOTPOSSIBLE);
return;
}
playerMoveCreature(player, movingCreature, movingCreatureOrigPos, toTile);
}
void Game::playerMoveCreature(Player* player, Creature* movingCreature, const Position& movingCreatureOrigPos, Tile* toTile)
{
if (!player->canDoAction()) {
uint32_t delay = player->getNextActionTime();
SchedulerTask* task = createSchedulerTask(delay, std::bind(&Game::playerMoveCreatureByID,
this, player->getID(), movingCreature->getID(), movingCreatureOrigPos, toTile->getPosition()));
player->setNextActionTask(task);
return;
}
player->setNextActionTask(nullptr);
if (!Position::areInRange<1, 1, 0>(movingCreatureOrigPos, player->getPosition())) {
//need to walk to the creature first before moving it
std::forward_list<Direction> listDir;
if (player->getPathTo(movingCreatureOrigPos, listDir, 0, 1, true, true)) {
g_dispatcher.addTask(createTask(std::bind(&Game::playerAutoWalk,
this, player->getID(), listDir)));
SchedulerTask* task = createSchedulerTask(1500, std::bind(&Game::playerMoveCreatureByID, this,
player->getID(), movingCreature->getID(), movingCreatureOrigPos, toTile->getPosition()));
player->setNextWalkActionTask(task);
} else {
player->sendCancelMessage(RETURNVALUE_THEREISNOWAY);
}
return;
}
if ((!movingCreature->isPushable() && !player->hasFlag(PlayerFlag_CanPushAllCreatures)) ||
(movingCreature->isInGhostMode() && !player->isAccessPlayer())) {
player->sendCancelMessage(RETURNVALUE_NOTMOVEABLE);
return;
}
//check throw distance
const Position& movingCreaturePos = movingCreature->getPosition();
const Position& toPos = toTile->getPosition();
if ((Position::getDistanceX(movingCreaturePos, toPos) > movingCreature->getThrowRange()) || (Position::getDistanceY(movingCreaturePos, toPos) > movingCreature->getThrowRange()) || (Position::getDistanceZ(movingCreaturePos, toPos) * 4 > movingCreature->getThrowRange())) {
player->sendCancelMessage(RETURNVALUE_DESTINATIONOUTOFREACH);
return;
}
if (player != movingCreature) {
if (toTile->hasFlag(TILESTATE_BLOCKPATH)) {
player->sendCancelMessage(RETURNVALUE_NOTENOUGHROOM);
return;
} else if ((movingCreature->getZone() == ZONE_PROTECTION && !toTile->hasFlag(TILESTATE_PROTECTIONZONE)) || (movingCreature->getZone() == ZONE_NOPVP && !toTile->hasFlag(TILESTATE_NOPVPZONE))) {
player->sendCancelMessage(RETURNVALUE_NOTPOSSIBLE);
return;
} else {
if (CreatureVector* tileCreatures = toTile->getCreatures()) {
for (Creature* tileCreature : *tileCreatures) {
if (!tileCreature->isInGhostMode()) {
player->sendCancelMessage(RETURNVALUE_NOTENOUGHROOM);
return;
}
}
}
Npc* movingNpc = movingCreature->getNpc();
if (movingNpc && !Spawns::isInZone(movingNpc->getMasterPos(), movingNpc->getMasterRadius(), toPos)) {
player->sendCancelMessage(RETURNVALUE_NOTENOUGHROOM);
return;
}
}
}
if (!g_events->eventPlayerOnMoveCreature(player, movingCreature, movingCreaturePos, toPos)) {
return;
}
ReturnValue ret = internalMoveCreature(*movingCreature, *toTile);
if (ret != RETURNVALUE_NOERROR) {
player->sendCancelMessage(ret);
}
}
ReturnValue Game::internalMoveCreature(Creature* creature, Direction direction, uint32_t flags /*= 0*/)
{
creature->setLastPosition(creature->getPosition());
const Position& currentPos = creature->getPosition();
Position destPos = getNextPosition(direction, currentPos);
Player* player = creature->getPlayer();
bool diagonalMovement = (direction & DIRECTION_DIAGONAL_MASK) != 0;
if (player && !diagonalMovement) {
//try go up
if (currentPos.z != 8 && creature->getTile()->hasHeight(3)) {
Tile* tmpTile = map.getTile(currentPos.x, currentPos.y, currentPos.getZ() - 1);
if (tmpTile == nullptr || (tmpTile->getGround() == nullptr && !tmpTile->hasFlag(TILESTATE_BLOCKSOLID))) {
tmpTile = map.getTile(destPos.x, destPos.y, destPos.getZ() - 1);
if (tmpTile && tmpTile->getGround() && !tmpTile->hasFlag(TILESTATE_BLOCKSOLID)) {
flags |= FLAG_IGNOREBLOCKITEM | FLAG_IGNOREBLOCKCREATURE;
if (!tmpTile->hasFlag(TILESTATE_FLOORCHANGE)) {
player->setDirection(direction);
destPos.z--;
}
}
}
}
//try go down
if (currentPos.z != 7 && currentPos.z == destPos.z) {
Tile* tmpTile = map.getTile(destPos.x, destPos.y, destPos.z);
if (tmpTile == nullptr || (tmpTile->getGround() == nullptr && !tmpTile->hasFlag(TILESTATE_BLOCKSOLID))) {
tmpTile = map.getTile(destPos.x, destPos.y, destPos.z + 1);
if (tmpTile && tmpTile->hasHeight(3)) {
flags |= FLAG_IGNOREBLOCKITEM | FLAG_IGNOREBLOCKCREATURE;
player->setDirection(direction);
destPos.z++;
}
}
}
}
Tile* toTile = map.getTile(destPos);
if (!toTile) {
return RETURNVALUE_NOTPOSSIBLE;
}
return internalMoveCreature(*creature, *toTile, flags);
}
ReturnValue Game::internalMoveCreature(Creature& creature, Tile& toTile, uint32_t flags /*= 0*/)
{
//check if we can move the creature to the destination
ReturnValue ret = toTile.queryAdd(0, creature, 1, flags);
if (ret != RETURNVALUE_NOERROR) {
return ret;
}
map.moveCreature(creature, toTile);
if (creature.getParent() != &toTile) {
return RETURNVALUE_NOERROR;
}
int32_t index = 0;
Item* toItem = nullptr;
Tile* subCylinder = nullptr;
Tile* toCylinder = &toTile;
Tile* fromCylinder = nullptr;
uint32_t n = 0;
while ((subCylinder = toCylinder->queryDestination(index, creature, &toItem, flags)) != toCylinder) {
map.moveCreature(creature, *subCylinder);
if (creature.getParent() != subCylinder) {
//could happen if a script move the creature
fromCylinder = nullptr;
break;
}
fromCylinder = toCylinder;
toCylinder = subCylinder;
flags = 0;
//to prevent infinite loop
if (++n >= MAP_MAX_LAYERS) {
break;
}
}
if (fromCylinder) {
const Position& fromPosition = fromCylinder->getPosition();
const Position& toPosition = toCylinder->getPosition();
if (fromPosition.z != toPosition.z && (fromPosition.x != toPosition.x || fromPosition.y != toPosition.y)) {
Direction dir = getDirectionTo(fromPosition, toPosition);
if ((dir & DIRECTION_DIAGONAL_MASK) == 0) {
internalCreatureTurn(&creature, dir);
}
}
}
return RETURNVALUE_NOERROR;
}
void Game::playerMoveItemByPlayerID(uint32_t playerId, const Position& fromPos, uint16_t spriteId, uint8_t fromStackPos, const Position& toPos, uint8_t count)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
playerMoveItem(player, fromPos, spriteId, fromStackPos, toPos, count, nullptr, nullptr);
}
void Game::playerMoveItem(Player* player, const Position& fromPos,
uint16_t spriteId, uint8_t fromStackPos, const Position& toPos, uint8_t count, Item* item, Cylinder* toCylinder)
{
if (!player->canDoAction()) {
uint32_t delay = player->getNextActionTime();
SchedulerTask* task = createSchedulerTask(delay, std::bind(&Game::playerMoveItemByPlayerID, this,
player->getID(), fromPos, spriteId, fromStackPos, toPos, count));
player->setNextActionTask(task);
return;
}
player->setNextActionTask(nullptr);
if (item == nullptr) {
uint8_t fromIndex = 0;
if (fromPos.x == 0xFFFF) {
if (fromPos.y & 0x40) {
fromIndex = fromPos.z;
} else {
fromIndex = static_cast<uint8_t>(fromPos.y);
}
} else {
fromIndex = fromStackPos;
}
Thing* thing = internalGetThing(player, fromPos, fromIndex, 0, STACKPOS_MOVE);
if (!thing || !thing->getItem()) {
player->sendCancelMessage(RETURNVALUE_NOTPOSSIBLE);
return;
}
item = thing->getItem();
}
if (item->getClientID() != spriteId) {
player->sendCancelMessage(RETURNVALUE_NOTPOSSIBLE);
return;
}
Cylinder* fromCylinder = internalGetCylinder(player, fromPos);
if (fromCylinder == nullptr) {
player->sendCancelMessage(RETURNVALUE_NOTPOSSIBLE);
return;
}
if (toCylinder == nullptr) {
toCylinder = internalGetCylinder(player, toPos);
if (toCylinder == nullptr) {
player->sendCancelMessage(RETURNVALUE_NOTPOSSIBLE);
return;
}
}
if (!item->isPushable() || item->hasAttribute(ITEM_ATTRIBUTE_UNIQUEID)) {
player->sendCancelMessage(RETURNVALUE_NOTMOVEABLE);
return;
}
const Position& playerPos = player->getPosition();
const Position& mapFromPos = fromCylinder->getTile()->getPosition();
if (playerPos.z != mapFromPos.z) {
player->sendCancelMessage(playerPos.z > mapFromPos.z ? RETURNVALUE_FIRSTGOUPSTAIRS : RETURNVALUE_FIRSTGODOWNSTAIRS);
return;
}
if (!Position::areInRange<1, 1>(playerPos, mapFromPos)) {
//need to walk to the item first before using it
std::forward_list<Direction> listDir;
if (player->getPathTo(item->getPosition(), listDir, 0, 1, true, true)) {
g_dispatcher.addTask(createTask(std::bind(&Game::playerAutoWalk,
this, player->getID(), listDir)));
SchedulerTask* task = createSchedulerTask(400, std::bind(&Game::playerMoveItemByPlayerID, this,
player->getID(), fromPos, spriteId, fromStackPos, toPos, count));
player->setNextWalkActionTask(task);
} else {
player->sendCancelMessage(RETURNVALUE_THEREISNOWAY);
}
return;
}
const Tile* toCylinderTile = toCylinder->getTile();
const Position& mapToPos = toCylinderTile->getPosition();
//hangable item specific code
if (item->isHangable() && toCylinderTile->hasFlag(TILESTATE_SUPPORTS_HANGABLE)) {
//destination supports hangable objects so need to move there first
bool vertical = toCylinderTile->hasProperty(CONST_PROP_ISVERTICAL);
if (vertical) {
if (playerPos.x + 1 == mapToPos.x) {
player->sendCancelMessage(RETURNVALUE_NOTPOSSIBLE);
return;
}
} else { // horizontal
if (playerPos.y + 1 == mapToPos.y) {
player->sendCancelMessage(RETURNVALUE_NOTPOSSIBLE);
return;
}
}
if (!Position::areInRange<1, 1, 0>(playerPos, mapToPos)) {
Position walkPos = mapToPos;
if (vertical) {
walkPos.x++;
} else {
walkPos.y++;
}
Position itemPos = fromPos;
uint8_t itemStackPos = fromStackPos;
if (fromPos.x != 0xFFFF && Position::areInRange<1, 1>(mapFromPos, playerPos)
&& !Position::areInRange<1, 1, 0>(mapFromPos, walkPos)) {
//need to pickup the item first
Item* moveItem = nullptr;
ReturnValue ret = internalMoveItem(fromCylinder, player, INDEX_WHEREEVER, item, count, &moveItem);
if (ret != RETURNVALUE_NOERROR) {
player->sendCancelMessage(ret);
return;
}
//changing the position since its now in the inventory of the player
internalGetPosition(moveItem, itemPos, itemStackPos);
}
std::forward_list<Direction> listDir;
if (player->getPathTo(walkPos, listDir, 0, 0, true, true)) {
g_dispatcher.addTask(createTask(std::bind(&Game::playerAutoWalk,
this, player->getID(), listDir)));
SchedulerTask* task = createSchedulerTask(400, std::bind(&Game::playerMoveItemByPlayerID, this,
player->getID(), itemPos, spriteId, itemStackPos, toPos, count));
player->setNextWalkActionTask(task);
} else {
player->sendCancelMessage(RETURNVALUE_THEREISNOWAY);
}
return;
}
}
if ((Position::getDistanceX(playerPos, mapToPos) > item->getThrowRange()) ||
(Position::getDistanceY(playerPos, mapToPos) > item->getThrowRange()) ||
(Position::getDistanceZ(mapFromPos, mapToPos) * 4 > item->getThrowRange())) {
player->sendCancelMessage(RETURNVALUE_DESTINATIONOUTOFREACH);
return;
}
if (!canThrowObjectTo(mapFromPos, mapToPos)) {
player->sendCancelMessage(RETURNVALUE_CANNOTTHROW);
return;
}
if (!g_events->eventPlayerOnMoveItem(player, item, count, fromPos, toPos, fromCylinder, toCylinder)) {
return;
}
uint8_t toIndex = 0;
if (toPos.x == 0xFFFF) {
if (toPos.y & 0x40) {
toIndex = toPos.z;
} else {
toIndex = static_cast<uint8_t>(toPos.y);
}
}
ReturnValue ret = internalMoveItem(fromCylinder, toCylinder, toIndex, item, count, nullptr, 0, player);
if (ret != RETURNVALUE_NOERROR) {
player->sendCancelMessage(ret);
} else {
g_events->eventPlayerOnItemMoved(player, item, count, fromPos, toPos, fromCylinder, toCylinder);
}
}
ReturnValue Game::internalMoveItem(Cylinder* fromCylinder, Cylinder* toCylinder, int32_t index,
Item* item, uint32_t count, Item** _moveItem, uint32_t flags /*= 0*/, Creature* actor/* = nullptr*/, Item* tradeItem/* = nullptr*/)
{
Tile* fromTile = fromCylinder->getTile();
if (fromTile) {
auto it = browseFields.find(fromTile);
if (it != browseFields.end() && it->second == fromCylinder) {
fromCylinder = fromTile;
}
}
Item* toItem = nullptr;
Cylinder* subCylinder;
int floorN = 0;
while ((subCylinder = toCylinder->queryDestination(index, *item, &toItem, flags)) != toCylinder) {
toCylinder = subCylinder;
flags = 0;
//to prevent infinite loop
if (++floorN >= MAP_MAX_LAYERS) {
break;
}
}
//destination is the same as the source?
if (item == toItem) {
return RETURNVALUE_NOERROR; //silently ignore move
}
//check if we can add this item
ReturnValue ret = toCylinder->queryAdd(index, *item, count, flags, actor);
if (ret == RETURNVALUE_NEEDEXCHANGE) {
//check if we can add it to source cylinder
ret = fromCylinder->queryAdd(fromCylinder->getThingIndex(item), *toItem, toItem->getItemCount(), 0);
if (ret == RETURNVALUE_NOERROR) {
//check how much we can move
uint32_t maxExchangeQueryCount = 0;
ReturnValue retExchangeMaxCount = fromCylinder->queryMaxCount(INDEX_WHEREEVER, *toItem, toItem->getItemCount(), maxExchangeQueryCount, 0);
if (retExchangeMaxCount != RETURNVALUE_NOERROR && maxExchangeQueryCount == 0) {
return retExchangeMaxCount;
}
if (toCylinder->queryRemove(*toItem, toItem->getItemCount(), flags) == RETURNVALUE_NOERROR) {
int32_t oldToItemIndex = toCylinder->getThingIndex(toItem);
toCylinder->removeThing(toItem, toItem->getItemCount());
fromCylinder->addThing(toItem);
if (oldToItemIndex != -1) {
toCylinder->postRemoveNotification(toItem, fromCylinder, oldToItemIndex);
}
int32_t newToItemIndex = fromCylinder->getThingIndex(toItem);
if (newToItemIndex != -1) {
fromCylinder->postAddNotification(toItem, toCylinder, newToItemIndex);
}
ret = toCylinder->queryAdd(index, *item, count, flags);
toItem = nullptr;
}
}
}
if (ret != RETURNVALUE_NOERROR) {
return ret;
}
//check how much we can move
uint32_t maxQueryCount = 0;
ReturnValue retMaxCount = toCylinder->queryMaxCount(index, *item, count, maxQueryCount, flags);
if (retMaxCount != RETURNVALUE_NOERROR && maxQueryCount == 0) {
return retMaxCount;
}
uint32_t m;
if (item->isStackable()) {
m = std::min<uint32_t>(count, maxQueryCount);
} else {
m = maxQueryCount;
}
Item* moveItem = item;
//check if we can remove this item
ret = fromCylinder->queryRemove(*item, m, flags);
if (ret != RETURNVALUE_NOERROR) {
return ret;
}
if (tradeItem) {
if (toCylinder->getItem() == tradeItem) {
return RETURNVALUE_NOTENOUGHROOM;
}
Cylinder* tmpCylinder = toCylinder->getParent();
while (tmpCylinder) {
if (tmpCylinder->getItem() == tradeItem) {
return RETURNVALUE_NOTENOUGHROOM;
}
tmpCylinder = tmpCylinder->getParent();
}
}
//remove the item
int32_t itemIndex = fromCylinder->getThingIndex(item);
Item* updateItem = nullptr;
fromCylinder->removeThing(item, m);
//update item(s)
if (item->isStackable()) {
uint32_t n;
if (item->equals(toItem)) {
n = std::min<uint32_t>(100 - toItem->getItemCount(), m);
toCylinder->updateThing(toItem, toItem->getID(), toItem->getItemCount() + n);
updateItem = toItem;
} else {
n = 0;
}
int32_t newCount = m - n;
if (newCount > 0) {
moveItem = item->clone();
moveItem->setItemCount(newCount);
} else {
moveItem = nullptr;
}
if (item->isRemoved()) {
ReleaseItem(item);
}
}
//add item
if (moveItem /*m - n > 0*/) {
toCylinder->addThing(index, moveItem);
}
if (itemIndex != -1) {
fromCylinder->postRemoveNotification(item, toCylinder, itemIndex);
}
if (moveItem) {
int32_t moveItemIndex = toCylinder->getThingIndex(moveItem);
if (moveItemIndex != -1) {
toCylinder->postAddNotification(moveItem, fromCylinder, moveItemIndex);
}
}
if (updateItem) {
int32_t updateItemIndex = toCylinder->getThingIndex(updateItem);
if (updateItemIndex != -1) {
toCylinder->postAddNotification(updateItem, fromCylinder, updateItemIndex);
}
}
if (_moveItem) {
if (moveItem) {
*_moveItem = moveItem;
} else {
*_moveItem = item;
}
}
//we could not move all, inform the player
if (item->isStackable() && maxQueryCount < count) {
return retMaxCount;
}
return ret;
}
ReturnValue Game::internalAddItem(Cylinder* toCylinder, Item* item, int32_t index /*= INDEX_WHEREEVER*/,
uint32_t flags/* = 0*/, bool test/* = false*/)
{
uint32_t remainderCount = 0;
return internalAddItem(toCylinder, item, index, flags, test, remainderCount);
}
ReturnValue Game::internalAddItem(Cylinder* toCylinder, Item* item, int32_t index,
uint32_t flags, bool test, uint32_t& remainderCount)
{
if (toCylinder == nullptr || item == nullptr) {
return RETURNVALUE_NOTPOSSIBLE;
}
Cylinder* destCylinder = toCylinder;
Item* toItem = nullptr;
toCylinder = toCylinder->queryDestination(index, *item, &toItem, flags);
//check if we can add this item
ReturnValue ret = toCylinder->queryAdd(index, *item, item->getItemCount(), flags);
if (ret != RETURNVALUE_NOERROR) {
return ret;
}
/*
Check if we can move add the whole amount, we do this by checking against the original cylinder,
since the queryDestination can return a cylinder that might only hold a part of the full amount.
*/
uint32_t maxQueryCount = 0;
ret = destCylinder->queryMaxCount(INDEX_WHEREEVER, *item, item->getItemCount(), maxQueryCount, flags);
if (ret != RETURNVALUE_NOERROR) {
return ret;
}
if (test) {
return RETURNVALUE_NOERROR;
}
if (item->isStackable() && item->equals(toItem)) {
uint32_t m = std::min<uint32_t>(item->getItemCount(), maxQueryCount);
uint32_t n = std::min<uint32_t>(100 - toItem->getItemCount(), m);
toCylinder->updateThing(toItem, toItem->getID(), toItem->getItemCount() + n);
int32_t count = m - n;
if (count > 0) {
if (item->getItemCount() != count) {
Item* remainderItem = item->clone();
remainderItem->setItemCount(count);
if (internalAddItem(destCylinder, remainderItem, INDEX_WHEREEVER, flags, false) != RETURNVALUE_NOERROR) {
ReleaseItem(remainderItem);
remainderCount = count;
}
} else {
toCylinder->addThing(index, item);
int32_t itemIndex = toCylinder->getThingIndex(item);
if (itemIndex != -1) {
toCylinder->postAddNotification(item, nullptr, itemIndex);
}
}
} else {
//fully merged with toItem, item will be destroyed
item->onRemoved();
ReleaseItem(item);
int32_t itemIndex = toCylinder->getThingIndex(toItem);
if (itemIndex != -1) {
toCylinder->postAddNotification(toItem, nullptr, itemIndex);
}
}
} else {
toCylinder->addThing(index, item);
int32_t itemIndex = toCylinder->getThingIndex(item);
if (itemIndex != -1) {
toCylinder->postAddNotification(item, nullptr, itemIndex);
}
}
return RETURNVALUE_NOERROR;
}
ReturnValue Game::internalRemoveItem(Item* item, int32_t count /*= -1*/, bool test /*= false*/, uint32_t flags /*= 0*/)
{
Cylinder* cylinder = item->getParent();
if (cylinder == nullptr) {
return RETURNVALUE_NOTPOSSIBLE;
}
Tile* fromTile = cylinder->getTile();
if (fromTile) {
auto it = browseFields.find(fromTile);
if (it != browseFields.end() && it->second == cylinder) {
cylinder = fromTile;
}
}
if (count == -1) {
count = item->getItemCount();
}
//check if we can remove this item
ReturnValue ret = cylinder->queryRemove(*item, count, flags | FLAG_IGNORENOTMOVEABLE);
if (ret != RETURNVALUE_NOERROR) {
return ret;
}
if (!item->canRemove()) {
return RETURNVALUE_NOTPOSSIBLE;
}
if (!test) {
int32_t index = cylinder->getThingIndex(item);
//remove the item
cylinder->removeThing(item, count);
if (item->isRemoved()) {
item->onRemoved();
ReleaseItem(item);
}
cylinder->postRemoveNotification(item, nullptr, index);
}
return RETURNVALUE_NOERROR;
}
ReturnValue Game::internalPlayerAddItem(Player* player, Item* item, bool dropOnMap /*= true*/, slots_t slot /*= CONST_SLOT_WHEREEVER*/)
{
uint32_t remainderCount = 0;
ReturnValue ret = internalAddItem(player, item, static_cast<int32_t>(slot), 0, false, remainderCount);
if (remainderCount != 0) {
Item* remainderItem = Item::CreateItem(item->getID(), remainderCount);
ReturnValue remaindRet = internalAddItem(player->getTile(), remainderItem, INDEX_WHEREEVER, FLAG_NOLIMIT);
if (remaindRet != RETURNVALUE_NOERROR) {
ReleaseItem(remainderItem);
}
}
if (ret != RETURNVALUE_NOERROR && dropOnMap) {
ret = internalAddItem(player->getTile(), item, INDEX_WHEREEVER, FLAG_NOLIMIT);
}
return ret;
}
Item* Game::findItemOfType(Cylinder* cylinder, uint16_t itemId,
bool depthSearch /*= true*/, int32_t subType /*= -1*/) const
{
if (cylinder == nullptr) {
return nullptr;
}
std::vector<Container*> containers;
for (size_t i = cylinder->getFirstIndex(), j = cylinder->getLastIndex(); i < j; ++i) {
Thing* thing = cylinder->getThing(i);
if (!thing) {
continue;
}
Item* item = thing->getItem();
if (!item) {
continue;
}
if (item->getID() == itemId && (subType == -1 || subType == item->getSubType())) {
return item;
}
if (depthSearch) {
Container* container = item->getContainer();
if (container) {
containers.push_back(container);
}
}
}
size_t i = 0;
while (i < containers.size()) {
Container* container = containers[i++];
for (Item* item : container->getItemList()) {
if (item->getID() == itemId && (subType == -1 || subType == item->getSubType())) {
return item;
}
Container* subContainer = item->getContainer();
if (subContainer) {
containers.push_back(subContainer);
}
}
}
return nullptr;
}
bool Game::removeMoney(Cylinder* cylinder, uint64_t money, uint32_t flags /*= 0*/)
{
if (cylinder == nullptr) {
return false;
}
if (money == 0) {
return true;
}
std::vector<Container*> containers;
std::multimap<uint32_t, Item*> moneyMap;
uint64_t moneyCount = 0;
for (size_t i = cylinder->getFirstIndex(), j = cylinder->getLastIndex(); i < j; ++i) {
Thing* thing = cylinder->getThing(i);
if (!thing) {
continue;
}
Item* item = thing->getItem();
if (!item) {
continue;
}
Container* container = item->getContainer();
if (container) {
containers.push_back(container);
} else {
const uint32_t worth = item->getWorth();
if (worth != 0) {
moneyCount += worth;
moneyMap.emplace(worth, item);
}
}
}
size_t i = 0;
while (i < containers.size()) {
Container* container = containers[i++];
for (Item* item : container->getItemList()) {
Container* tmpContainer = item->getContainer();
if (tmpContainer) {
containers.push_back(tmpContainer);
} else {
const uint32_t worth = item->getWorth();
if (worth != 0) {
moneyCount += worth;
moneyMap.emplace(worth, item);
}
}
}
}
if (moneyCount < money) {
return false;
}
for (const auto& moneyEntry : moneyMap) {
Item* item = moneyEntry.second;
if (moneyEntry.first < money) {
internalRemoveItem(item);
money -= moneyEntry.first;
} else if (moneyEntry.first > money) {
const uint32_t worth = moneyEntry.first / item->getItemCount();
const uint32_t removeCount = std::ceil(money / static_cast<double>(worth));
addMoney(cylinder, (worth * removeCount) - money, flags);
internalRemoveItem(item, removeCount);
break;
} else {
internalRemoveItem(item);
break;
}
}
return true;
}
void Game::addMoney(Cylinder* cylinder, uint64_t money, uint32_t flags /*= 0*/)
{
if (money == 0) {
return;
}
uint32_t crystalCoins = money / 10000;
money -= crystalCoins * 10000;
while (crystalCoins > 0) {
const uint16_t count = std::min<uint32_t>(100, crystalCoins);
Item* remaindItem = Item::CreateItem(ITEM_CRYSTAL_COIN, count);
ReturnValue ret = internalAddItem(cylinder, remaindItem, INDEX_WHEREEVER, flags);
if (ret != RETURNVALUE_NOERROR) {
internalAddItem(cylinder->getTile(), remaindItem, INDEX_WHEREEVER, FLAG_NOLIMIT);
}
crystalCoins -= count;
}
uint16_t platinumCoins = money / 100;
if (platinumCoins != 0) {
Item* remaindItem = Item::CreateItem(ITEM_PLATINUM_COIN, platinumCoins);
ReturnValue ret = internalAddItem(cylinder, remaindItem, INDEX_WHEREEVER, flags);
if (ret != RETURNVALUE_NOERROR) {
internalAddItem(cylinder->getTile(), remaindItem, INDEX_WHEREEVER, FLAG_NOLIMIT);
}
money -= platinumCoins * 100;
}
if (money != 0) {
Item* remaindItem = Item::CreateItem(ITEM_GOLD_COIN, money);
ReturnValue ret = internalAddItem(cylinder, remaindItem, INDEX_WHEREEVER, flags);
if (ret != RETURNVALUE_NOERROR) {
internalAddItem(cylinder->getTile(), remaindItem, INDEX_WHEREEVER, FLAG_NOLIMIT);
}
}
}
Item* Game::transformItem(Item* item, uint16_t newId, int32_t newCount /*= -1*/)
{
if (item->getID() == newId && (newCount == -1 || (newCount == item->getSubType() && newCount != 0))) { //chargeless item placed on map = infinite
return item;
}
Cylinder* cylinder = item->getParent();
if (cylinder == nullptr) {
return nullptr;
}
Tile* fromTile = cylinder->getTile();
if (fromTile) {
auto it = browseFields.find(fromTile);
if (it != browseFields.end() && it->second == cylinder) {
cylinder = fromTile;
}
}
int32_t itemIndex = cylinder->getThingIndex(item);
if (itemIndex == -1) {
return item;
}
if (!item->canTransform()) {
return item;
}
const ItemType& newType = Item::items[newId];
if (newType.id == 0) {
return item;
}
const ItemType& curType = Item::items[item->getID()];
if (curType.alwaysOnTop != newType.alwaysOnTop) {
//This only occurs when you transform items on tiles from a downItem to a topItem (or vice versa)
//Remove the old, and add the new
cylinder->removeThing(item, item->getItemCount());
cylinder->postRemoveNotification(item, cylinder, itemIndex);
item->setID(newId);
if (newCount != -1) {
item->setSubType(newCount);
}
cylinder->addThing(item);
Cylinder* newParent = item->getParent();
if (newParent == nullptr) {
ReleaseItem(item);
return nullptr;
}
newParent->postAddNotification(item, cylinder, newParent->getThingIndex(item));
return item;
}
if (curType.type == newType.type) {
//Both items has the same type so we can safely change id/subtype
if (newCount == 0 && (item->isStackable() || item->hasAttribute(ITEM_ATTRIBUTE_CHARGES))) {
if (item->isStackable()) {
internalRemoveItem(item);
return nullptr;
} else {
int32_t newItemId = newId;
if (curType.id == newType.id) {
newItemId = curType.decayTo;
}
if (newItemId < 0) {
internalRemoveItem(item);
return nullptr;
} else if (newItemId != newId) {
//Replacing the the old item with the new while maintaining the old position
Item* newItem = Item::CreateItem(newItemId, 1);
if (newItem == nullptr) {
return nullptr;
}
cylinder->replaceThing(itemIndex, newItem);
cylinder->postAddNotification(newItem, cylinder, itemIndex);
item->setParent(nullptr);
cylinder->postRemoveNotification(item, cylinder, itemIndex);
ReleaseItem(item);
return newItem;
} else {
return transformItem(item, newItemId);
}
}
} else {
cylinder->postRemoveNotification(item, cylinder, itemIndex);
uint16_t itemId = item->getID();
int32_t count = item->getSubType();
if (curType.id != newType.id) {
if (newType.group != curType.group) {
item->setDefaultSubtype();
}
itemId = newId;
}
if (newCount != -1 && newType.hasSubType()) {
count = newCount;
}
cylinder->updateThing(item, itemId, count);
cylinder->postAddNotification(item, cylinder, itemIndex);
return item;
}
}
//Replacing the the old item with the new while maintaining the old position
Item* newItem;
if (newCount == -1) {
newItem = Item::CreateItem(newId);
} else {
newItem = Item::CreateItem(newId, newCount);
}
if (newItem == nullptr) {
return nullptr;
}
cylinder->replaceThing(itemIndex, newItem);
cylinder->postAddNotification(newItem, cylinder, itemIndex);
item->setParent(nullptr);
cylinder->postRemoveNotification(item, cylinder, itemIndex);
ReleaseItem(item);
return newItem;
}
ReturnValue Game::internalTeleport(Thing* thing, const Position& newPos, bool pushMove/* = true*/, uint32_t flags /*= 0*/)
{
if (newPos == thing->getPosition()) {
return RETURNVALUE_NOERROR;
} else if (thing->isRemoved()) {
return RETURNVALUE_NOTPOSSIBLE;
}
Tile* toTile = map.getTile(newPos);
if (!toTile) {
return RETURNVALUE_NOTPOSSIBLE;
}
if (Creature* creature = thing->getCreature()) {
ReturnValue ret = toTile->queryAdd(0, *creature, 1, FLAG_NOLIMIT);
if (ret != RETURNVALUE_NOERROR) {
return ret;
}
map.moveCreature(*creature, *toTile, !pushMove);
return RETURNVALUE_NOERROR;
} else if (Item* item = thing->getItem()) {
return internalMoveItem(item->getParent(), toTile, INDEX_WHEREEVER, item, item->getItemCount(), nullptr, flags);
}
return RETURNVALUE_NOTPOSSIBLE;
}
Item* searchForItem(Container* container, uint16_t itemId)
{
for (ContainerIterator it = container->iterator(); it.hasNext(); it.advance()) {
if ((*it)->getID() == itemId) {
return *it;
}
}
return nullptr;
}
slots_t getSlotType(const ItemType& it)
{
slots_t slot = CONST_SLOT_RIGHT;
if (it.weaponType != WeaponType_t::WEAPON_SHIELD) {
int32_t slotPosition = it.slotPosition;
if (slotPosition & SLOTP_HEAD) {
slot = CONST_SLOT_HEAD;
} else if (slotPosition & SLOTP_NECKLACE) {
slot = CONST_SLOT_NECKLACE;
} else if (slotPosition & SLOTP_ARMOR) {
slot = CONST_SLOT_ARMOR;
} else if (slotPosition & SLOTP_LEGS) {
slot = CONST_SLOT_LEGS;
} else if (slotPosition & SLOTP_FEET) {
slot = CONST_SLOT_FEET ;
} else if (slotPosition & SLOTP_RING) {
slot = CONST_SLOT_RING;
} else if (slotPosition & SLOTP_AMMO) {
slot = CONST_SLOT_AMMO;
} else if (slotPosition & SLOTP_TWO_HAND || slotPosition & SLOTP_LEFT) {
slot = CONST_SLOT_LEFT;
}
}
return slot;
}
//Implementation of player invoked events
void Game::playerEquipItem(uint32_t playerId, uint16_t spriteId)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
Item* item = player->getInventoryItem(CONST_SLOT_BACKPACK);
if (!item) {
return;
}
Container* backpack = item->getContainer();
if (!backpack) {
return;
}
const ItemType& it = Item::items.getItemIdByClientId(spriteId);
slots_t slot = getSlotType(it);
Item* slotItem = player->getInventoryItem(slot);
Item* equipItem = searchForItem(backpack, it.id);
if (slotItem && slotItem->getID() == it.id && (!it.stackable || slotItem->getItemCount() == 100 || !equipItem)) {
internalMoveItem(slotItem->getParent(), player, CONST_SLOT_WHEREEVER, slotItem, slotItem->getItemCount(), nullptr);
} else if (equipItem) {
internalMoveItem(equipItem->getParent(), player, slot, equipItem, equipItem->getItemCount(), nullptr);
}
}
void Game::playerMove(uint32_t playerId, Direction direction)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
player->resetIdleTime();
player->setNextWalkActionTask(nullptr);
player->startAutoWalk(std::forward_list<Direction> { direction });
}
bool Game::playerBroadcastMessage(Player* player, const std::string& text) const
{
if (!player->hasFlag(PlayerFlag_CanBroadcast)) {
return false;
}
std::cout << "> " << player->getName() << " broadcasted: \"" << text << "\"." << std::endl;
for (const auto& it : players) {
it.second->sendPrivateMessage(player, TALKTYPE_BROADCAST, text);
}
return true;
}
void Game::playerCreatePrivateChannel(uint32_t playerId)
{
Player* player = getPlayerByID(playerId);
if (!player || !player->isPremium()) {
return;
}
ChatChannel* channel = g_chat->createChannel(*player, CHANNEL_PRIVATE);
if (!channel || !channel->addUser(*player)) {
return;
}
player->sendCreatePrivateChannel(channel->getId(), channel->getName());
}
void Game::playerChannelInvite(uint32_t playerId, const std::string& name)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
PrivateChatChannel* channel = g_chat->getPrivateChannel(*player);
if (!channel) {
return;
}
Player* invitePlayer = getPlayerByName(name);
if (!invitePlayer) {
return;
}
if (player == invitePlayer) {
return;
}
channel->invitePlayer(*player, *invitePlayer);
}
void Game::playerChannelExclude(uint32_t playerId, const std::string& name)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
PrivateChatChannel* channel = g_chat->getPrivateChannel(*player);
if (!channel) {
return;
}
Player* excludePlayer = getPlayerByName(name);
if (!excludePlayer) {
return;
}
if (player == excludePlayer) {
return;
}
channel->excludePlayer(*player, *excludePlayer);
}
void Game::playerRequestChannels(uint32_t playerId)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
player->sendChannelsDialog();
}
void Game::playerOpenChannel(uint32_t playerId, uint16_t channelId)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
ChatChannel* channel = g_chat->addUserToChannel(*player, channelId);
if (!channel) {
return;
}
const InvitedMap* invitedUsers = channel->getInvitedUsers();
const UsersMap* users;
if (!channel->isPublicChannel()) {
users = &channel->getUsers();
} else {
users = nullptr;
}
player->sendChannel(channel->getId(), channel->getName(), users, invitedUsers);
}
void Game::playerCloseChannel(uint32_t playerId, uint16_t channelId)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
g_chat->removeUserFromChannel(*player, channelId);
}
void Game::playerOpenPrivateChannel(uint32_t playerId, std::string& receiver)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
if (!IOLoginData::formatPlayerName(receiver)) {
player->sendCancelMessage("A player with this name does not exist.");
return;
}
if (player->getName() == receiver) {
player->sendCancelMessage("You cannot set up a private message channel with yourself.");
return;
}
player->sendOpenPrivateChannel(receiver);
}
void Game::playerCloseNpcChannel(uint32_t playerId)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
SpectatorHashSet spectators;
map.getSpectators(spectators, player->getPosition());
for (Creature* spectator : spectators) {
if (Npc* npc = spectator->getNpc()) {
npc->onPlayerCloseChannel(player);
}
}
}
void Game::playerReceivePing(uint32_t playerId)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
player->receivePing();
}
void Game::playerReceivePingBack(uint32_t playerId)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
player->sendPingBack();
}
void Game::playerAutoWalk(uint32_t playerId, const std::forward_list<Direction>& listDir)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
player->resetIdleTime();
player->setNextWalkTask(nullptr);
player->startAutoWalk(listDir);
}
void Game::playerStopAutoWalk(uint32_t playerId)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
player->stopWalk();
}
void Game::playerUseItemEx(uint32_t playerId, const Position& fromPos, uint8_t fromStackPos, uint16_t fromSpriteId,
const Position& toPos, uint8_t toStackPos, uint16_t toSpriteId)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
bool isHotkey = (fromPos.x == 0xFFFF && fromPos.y == 0 && fromPos.z == 0);
if (isHotkey && !g_config.getBoolean(ConfigManager::AIMBOT_HOTKEY_ENABLED)) {
return;
}
Thing* thing = internalGetThing(player, fromPos, fromStackPos, fromSpriteId, STACKPOS_USEITEM);
if (!thing) {
player->sendCancelMessage(RETURNVALUE_NOTPOSSIBLE);
return;
}
Item* item = thing->getItem();
if (!item || !item->isUseable() || item->getClientID() != fromSpriteId) {
player->sendCancelMessage(RETURNVALUE_CANNOTUSETHISOBJECT);
return;
}
Position walkToPos = fromPos;
ReturnValue ret = g_actions->canUse(player, fromPos);
if (ret == RETURNVALUE_NOERROR) {
ret = g_actions->canUse(player, toPos, item);
if (ret == RETURNVALUE_TOOFARAWAY) {
walkToPos = toPos;
}
}
if (ret != RETURNVALUE_NOERROR) {
if (ret == RETURNVALUE_TOOFARAWAY) {
Position itemPos = fromPos;
uint8_t itemStackPos = fromStackPos;
if (fromPos.x != 0xFFFF && toPos.x != 0xFFFF && Position::areInRange<1, 1, 0>(fromPos, player->getPosition()) &&
!Position::areInRange<1, 1, 0>(fromPos, toPos)) {
Item* moveItem = nullptr;
ret = internalMoveItem(item->getParent(), player, INDEX_WHEREEVER, item, item->getItemCount(), &moveItem);
if (ret != RETURNVALUE_NOERROR) {
player->sendCancelMessage(ret);
return;
}
//changing the position since its now in the inventory of the player
internalGetPosition(moveItem, itemPos, itemStackPos);
}
std::forward_list<Direction> listDir;
if (player->getPathTo(walkToPos, listDir, 0, 1, true, true)) {
g_dispatcher.addTask(createTask(std::bind(&Game::playerAutoWalk, this, player->getID(), listDir)));
SchedulerTask* task = createSchedulerTask(400, std::bind(&Game::playerUseItemEx, this,
playerId, itemPos, itemStackPos, fromSpriteId, toPos, toStackPos, toSpriteId));
player->setNextWalkActionTask(task);
} else {
player->sendCancelMessage(RETURNVALUE_THEREISNOWAY);
}
return;
}
player->sendCancelMessage(ret);
return;
}
if (!player->canDoAction()) {
uint32_t delay = player->getNextActionTime();
SchedulerTask* task = createSchedulerTask(delay, std::bind(&Game::playerUseItemEx, this,
playerId, fromPos, fromStackPos, fromSpriteId, toPos, toStackPos, toSpriteId));
player->setNextActionTask(task);
return;
}
player->resetIdleTime();
player->setNextActionTask(nullptr);
g_actions->useItemEx(player, fromPos, toPos, toStackPos, item, isHotkey);
}
void Game::playerUseItem(uint32_t playerId, const Position& pos, uint8_t stackPos,
uint8_t index, uint16_t spriteId)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
bool isHotkey = (pos.x == 0xFFFF && pos.y == 0 && pos.z == 0);
if (isHotkey && !g_config.getBoolean(ConfigManager::AIMBOT_HOTKEY_ENABLED)) {
return;
}
Thing* thing = internalGetThing(player, pos, stackPos, spriteId, STACKPOS_USEITEM);
if (!thing) {
player->sendCancelMessage(RETURNVALUE_NOTPOSSIBLE);
return;
}
Item* item = thing->getItem();
if (!item || item->isUseable() || item->getClientID() != spriteId) {
player->sendCancelMessage(RETURNVALUE_CANNOTUSETHISOBJECT);
return;
}
ReturnValue ret = g_actions->canUse(player, pos);
if (ret != RETURNVALUE_NOERROR) {
if (ret == RETURNVALUE_TOOFARAWAY) {
std::forward_list<Direction> listDir;
if (player->getPathTo(pos, listDir, 0, 1, true, true)) {
g_dispatcher.addTask(createTask(std::bind(&Game::playerAutoWalk,
this, player->getID(), listDir)));
SchedulerTask* task = createSchedulerTask(400, std::bind(&Game::playerUseItem, this,
playerId, pos, stackPos, index, spriteId));
player->setNextWalkActionTask(task);
return;
}
ret = RETURNVALUE_THEREISNOWAY;
}
player->sendCancelMessage(ret);
return;
}
if (!player->canDoAction()) {
uint32_t delay = player->getNextActionTime();
SchedulerTask* task = createSchedulerTask(delay, std::bind(&Game::playerUseItem, this,
playerId, pos, stackPos, index, spriteId));
player->setNextActionTask(task);
return;
}
player->resetIdleTime();
player->setNextActionTask(nullptr);
g_actions->useItem(player, pos, index, item, isHotkey);
}
void Game::playerUseWithCreature(uint32_t playerId, const Position& fromPos, uint8_t fromStackPos, uint32_t creatureId, uint16_t spriteId)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
Creature* creature = getCreatureByID(creatureId);
if (!creature) {
return;
}
if (!Position::areInRange<7, 5, 0>(creature->getPosition(), player->getPosition())) {
return;
}
bool isHotkey = (fromPos.x == 0xFFFF && fromPos.y == 0 && fromPos.z == 0);
if (!g_config.getBoolean(ConfigManager::AIMBOT_HOTKEY_ENABLED)) {
if (creature->getPlayer() || isHotkey) {
player->sendCancelMessage(RETURNVALUE_DIRECTPLAYERSHOOT);
return;
}
}
Thing* thing = internalGetThing(player, fromPos, fromStackPos, spriteId, STACKPOS_USEITEM);
if (!thing) {
player->sendCancelMessage(RETURNVALUE_NOTPOSSIBLE);
return;
}
Item* item = thing->getItem();
if (!item || !item->isUseable() || item->getClientID() != spriteId) {
player->sendCancelMessage(RETURNVALUE_CANNOTUSETHISOBJECT);
return;
}
Position toPos = creature->getPosition();
Position walkToPos = fromPos;
ReturnValue ret = g_actions->canUse(player, fromPos);
if (ret == RETURNVALUE_NOERROR) {
ret = g_actions->canUse(player, toPos, item);
if (ret == RETURNVALUE_TOOFARAWAY) {
walkToPos = toPos;
}
}
if (ret != RETURNVALUE_NOERROR) {
if (ret == RETURNVALUE_TOOFARAWAY) {
Position itemPos = fromPos;
uint8_t itemStackPos = fromStackPos;
if (fromPos.x != 0xFFFF && Position::areInRange<1, 1, 0>(fromPos, player->getPosition()) && !Position::areInRange<1, 1, 0>(fromPos, toPos)) {
Item* moveItem = nullptr;
ret = internalMoveItem(item->getParent(), player, INDEX_WHEREEVER, item, item->getItemCount(), &moveItem);
if (ret != RETURNVALUE_NOERROR) {
player->sendCancelMessage(ret);
return;
}
//changing the position since its now in the inventory of the player
internalGetPosition(moveItem, itemPos, itemStackPos);
}
std::forward_list<Direction> listDir;
if (player->getPathTo(walkToPos, listDir, 0, 1, true, true)) {
g_dispatcher.addTask(createTask(std::bind(&Game::playerAutoWalk,
this, player->getID(), listDir)));
SchedulerTask* task = createSchedulerTask(400, std::bind(&Game::playerUseWithCreature, this,
playerId, itemPos, itemStackPos, creatureId, spriteId));
player->setNextWalkActionTask(task);
} else {
player->sendCancelMessage(RETURNVALUE_THEREISNOWAY);
}
return;
}
player->sendCancelMessage(ret);
return;
}
if (!player->canDoAction()) {
uint32_t delay = player->getNextActionTime();
SchedulerTask* task = createSchedulerTask(delay, std::bind(&Game::playerUseWithCreature, this,
playerId, fromPos, fromStackPos, creatureId, spriteId));
player->setNextActionTask(task);
return;
}
player->resetIdleTime();
player->setNextActionTask(nullptr);
g_actions->useItemEx(player, fromPos, creature->getPosition(), creature->getParent()->getThingIndex(creature), item, isHotkey, creature);
}
void Game::playerCloseContainer(uint32_t playerId, uint8_t cid)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
player->closeContainer(cid);
player->sendCloseContainer(cid);
}
void Game::playerMoveUpContainer(uint32_t playerId, uint8_t cid)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
Container* container = player->getContainerByID(cid);
if (!container) {
return;
}
Container* parentContainer = dynamic_cast<Container*>(container->getRealParent());
if (!parentContainer) {
Tile* tile = container->getTile();
if (!tile) {
return;
}
auto it = browseFields.find(tile);
if (it == browseFields.end()) {
parentContainer = new Container(tile);
parentContainer->incrementReferenceCounter();
browseFields[tile] = parentContainer;
g_scheduler.addEvent(createSchedulerTask(30000, std::bind(&Game::decreaseBrowseFieldRef, this, tile->getPosition())));
} else {
parentContainer = it->second;
}
}
player->addContainer(cid, parentContainer);
player->sendContainer(cid, parentContainer, parentContainer->hasParent(), player->getContainerIndex(cid));
}
void Game::playerUpdateContainer(uint32_t playerId, uint8_t cid)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
Container* container = player->getContainerByID(cid);
if (!container) {
return;
}
player->sendContainer(cid, container, container->hasParent(), player->getContainerIndex(cid));
}
void Game::playerRotateItem(uint32_t playerId, const Position& pos, uint8_t stackPos, const uint16_t spriteId)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
Thing* thing = internalGetThing(player, pos, stackPos, 0, STACKPOS_TOPDOWN_ITEM);
if (!thing) {
return;
}
Item* item = thing->getItem();
if (!item || item->getClientID() != spriteId || !item->isRotatable() || item->hasAttribute(ITEM_ATTRIBUTE_UNIQUEID)) {
player->sendCancelMessage(RETURNVALUE_NOTPOSSIBLE);
return;
}
if (pos.x != 0xFFFF && !Position::areInRange<1, 1, 0>(pos, player->getPosition())) {
std::forward_list<Direction> listDir;
if (player->getPathTo(pos, listDir, 0, 1, true, true)) {
g_dispatcher.addTask(createTask(std::bind(&Game::playerAutoWalk,
this, player->getID(), listDir)));
SchedulerTask* task = createSchedulerTask(400, std::bind(&Game::playerRotateItem, this,
playerId, pos, stackPos, spriteId));
player->setNextWalkActionTask(task);
} else {
player->sendCancelMessage(RETURNVALUE_THEREISNOWAY);
}
return;
}
uint16_t newId = Item::items[item->getID()].rotateTo;
if (newId != 0) {
transformItem(item, newId);
}
}
void Game::playerWriteItem(uint32_t playerId, uint32_t windowTextId, const std::string& text)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
uint16_t maxTextLength = 0;
uint32_t internalWindowTextId = 0;
Item* writeItem = player->getWriteItem(internalWindowTextId, maxTextLength);
if (text.length() > maxTextLength || windowTextId != internalWindowTextId) {
return;
}
if (!writeItem || writeItem->isRemoved()) {
player->sendCancelMessage(RETURNVALUE_NOTPOSSIBLE);
return;
}
Cylinder* topParent = writeItem->getTopParent();
Player* owner = dynamic_cast<Player*>(topParent);
if (owner && owner != player) {
player->sendCancelMessage(RETURNVALUE_NOTPOSSIBLE);
return;
}
if (!Position::areInRange<1, 1, 0>(writeItem->getPosition(), player->getPosition())) {
player->sendCancelMessage(RETURNVALUE_NOTPOSSIBLE);
return;
}
for (auto creatureEvent : player->getCreatureEvents(CREATURE_EVENT_TEXTEDIT)) {
if (!creatureEvent->executeTextEdit(player, writeItem, text)) {
player->setWriteItem(nullptr);
return;
}
}
if (!text.empty()) {
if (writeItem->getText() != text) {
writeItem->setText(text);
writeItem->setWriter(player->getName());
writeItem->setDate(time(nullptr));
}
} else {
writeItem->resetText();
writeItem->resetWriter();
writeItem->resetDate();
}
uint16_t newId = Item::items[writeItem->getID()].writeOnceItemId;
if (newId != 0) {
transformItem(writeItem, newId);
}
player->setWriteItem(nullptr);
}
void Game::playerBrowseField(uint32_t playerId, const Position& pos)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
const Position& playerPos = player->getPosition();
if (playerPos.z != pos.z) {
player->sendCancelMessage(playerPos.z > pos.z ? RETURNVALUE_FIRSTGOUPSTAIRS : RETURNVALUE_FIRSTGODOWNSTAIRS);
return;
}
if (!Position::areInRange<1, 1>(playerPos, pos)) {
std::forward_list<Direction> listDir;
if (player->getPathTo(pos, listDir, 0, 1, true, true)) {
g_dispatcher.addTask(createTask(std::bind(&Game::playerAutoWalk,
this, player->getID(), listDir)));
SchedulerTask* task = createSchedulerTask(400, std::bind(
&Game::playerBrowseField, this, playerId, pos
));
player->setNextWalkActionTask(task);
} else {
player->sendCancelMessage(RETURNVALUE_THEREISNOWAY);
}
return;
}
Tile* tile = map.getTile(pos);
if (!tile) {
return;
}
if (!g_events->eventPlayerOnBrowseField(player, pos)) {
return;
}
Container* container;
auto it = browseFields.find(tile);
if (it == browseFields.end()) {
container = new Container(tile);
container->incrementReferenceCounter();
browseFields[tile] = container;
g_scheduler.addEvent(createSchedulerTask(30000, std::bind(&Game::decreaseBrowseFieldRef, this, tile->getPosition())));
} else {
container = it->second;
}
uint8_t dummyContainerId = 0xF - ((pos.x % 3) * 3 + (pos.y % 3));
Container* openContainer = player->getContainerByID(dummyContainerId);
if (openContainer) {
player->onCloseContainer(openContainer);
player->closeContainer(dummyContainerId);
} else {
player->addContainer(dummyContainerId, container);
player->sendContainer(dummyContainerId, container, false, 0);
}
}
void Game::playerSeekInContainer(uint32_t playerId, uint8_t containerId, uint16_t index)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
Container* container = player->getContainerByID(containerId);
if (!container || !container->hasPagination()) {
return;
}
if ((index % container->capacity()) != 0 || index >= container->size()) {
return;
}
player->setContainerIndex(containerId, index);
player->sendContainer(containerId, container, container->hasParent(), index);
}
void Game::playerUpdateHouseWindow(uint32_t playerId, uint8_t listId, uint32_t windowTextId, const std::string& text)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
uint32_t internalWindowTextId;
uint32_t internalListId;
House* house = player->getEditHouse(internalWindowTextId, internalListId);
if (house && house->canEditAccessList(internalListId, player) && internalWindowTextId == windowTextId && listId == 0) {
house->setAccessList(internalListId, text);
}
player->setEditHouse(nullptr);
}
void Game::playerRequestTrade(uint32_t playerId, const Position& pos, uint8_t stackPos,
uint32_t tradePlayerId, uint16_t spriteId)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
Player* tradePartner = getPlayerByID(tradePlayerId);
if (!tradePartner || tradePartner == player) {
player->sendTextMessage(MESSAGE_INFO_DESCR, "Sorry, not possible.");
return;
}
if (!Position::areInRange<2, 2, 0>(tradePartner->getPosition(), player->getPosition())) {
std::ostringstream ss;
ss << tradePartner->getName() << " tells you to move closer.";
player->sendTextMessage(MESSAGE_INFO_DESCR, ss.str());
return;
}
if (!canThrowObjectTo(tradePartner->getPosition(), player->getPosition())) {
player->sendCancelMessage(RETURNVALUE_CREATUREISNOTREACHABLE);
return;
}
Thing* tradeThing = internalGetThing(player, pos, stackPos, 0, STACKPOS_TOPDOWN_ITEM);
if (!tradeThing) {
player->sendCancelMessage(RETURNVALUE_NOTPOSSIBLE);
return;
}
Item* tradeItem = tradeThing->getItem();
if (tradeItem->getClientID() != spriteId || !tradeItem->isPickupable() || tradeItem->hasAttribute(ITEM_ATTRIBUTE_UNIQUEID)) {
player->sendCancelMessage(RETURNVALUE_NOTPOSSIBLE);
return;
}
const Position& playerPosition = player->getPosition();
const Position& tradeItemPosition = tradeItem->getPosition();
if (playerPosition.z != tradeItemPosition.z) {
player->sendCancelMessage(playerPosition.z > tradeItemPosition.z ? RETURNVALUE_FIRSTGOUPSTAIRS : RETURNVALUE_FIRSTGODOWNSTAIRS);
return;
}
if (!Position::areInRange<1, 1>(tradeItemPosition, playerPosition)) {
std::forward_list<Direction> listDir;
if (player->getPathTo(pos, listDir, 0, 1, true, true)) {
g_dispatcher.addTask(createTask(std::bind(&Game::playerAutoWalk,
this, player->getID(), listDir)));
SchedulerTask* task = createSchedulerTask(400, std::bind(&Game::playerRequestTrade, this,
playerId, pos, stackPos, tradePlayerId, spriteId));
player->setNextWalkActionTask(task);
} else {
player->sendCancelMessage(RETURNVALUE_THEREISNOWAY);
}
return;
}
Container* tradeItemContainer = tradeItem->getContainer();
if (tradeItemContainer) {
for (const auto& it : tradeItems) {
Item* item = it.first;
if (tradeItem == item) {
player->sendTextMessage(MESSAGE_INFO_DESCR, "This item is already being traded.");
return;
}
if (tradeItemContainer->isHoldingItem(item)) {
player->sendTextMessage(MESSAGE_INFO_DESCR, "This item is already being traded.");
return;
}
Container* container = item->getContainer();
if (container && container->isHoldingItem(tradeItem)) {
player->sendTextMessage(MESSAGE_INFO_DESCR, "This item is already being traded.");
return;
}
}
} else {
for (const auto& it : tradeItems) {
Item* item = it.first;
if (tradeItem == item) {
player->sendTextMessage(MESSAGE_INFO_DESCR, "This item is already being traded.");
return;
}
Container* container = item->getContainer();
if (container && container->isHoldingItem(tradeItem)) {
player->sendTextMessage(MESSAGE_INFO_DESCR, "This item is already being traded.");
return;
}
}
}
Container* tradeContainer = tradeItem->getContainer();
if (tradeContainer && tradeContainer->getItemHoldingCount() + 1 > 100) {
player->sendTextMessage(MESSAGE_INFO_DESCR, "You can not trade more than 100 items.");
return;
}
if (!g_events->eventPlayerOnTradeRequest(player, tradePartner, tradeItem)) {
return;
}
internalStartTrade(player, tradePartner, tradeItem);
}
bool Game::internalStartTrade(Player* player, Player* tradePartner, Item* tradeItem)
{
if (player->tradeState != TRADE_NONE && !(player->tradeState == TRADE_ACKNOWLEDGE && player->tradePartner == tradePartner)) {
player->sendCancelMessage(RETURNVALUE_YOUAREALREADYTRADING);
return false;
} else if (tradePartner->tradeState != TRADE_NONE && tradePartner->tradePartner != player) {
player->sendCancelMessage(RETURNVALUE_THISPLAYERISALREADYTRADING);
return false;
}
player->tradePartner = tradePartner;
player->tradeItem = tradeItem;
player->tradeState = TRADE_INITIATED;
tradeItem->incrementReferenceCounter();
tradeItems[tradeItem] = player->getID();
player->sendTradeItemRequest(player->getName(), tradeItem, true);
if (tradePartner->tradeState == TRADE_NONE) {
std::ostringstream ss;
ss << player->getName() << " wants to trade with you.";
tradePartner->sendTextMessage(MESSAGE_EVENT_ADVANCE, ss.str());
tradePartner->tradeState = TRADE_ACKNOWLEDGE;
tradePartner->tradePartner = player;
} else {
Item* counterOfferItem = tradePartner->tradeItem;
player->sendTradeItemRequest(tradePartner->getName(), counterOfferItem, false);
tradePartner->sendTradeItemRequest(player->getName(), tradeItem, false);
}
return true;
}
void Game::playerAcceptTrade(uint32_t playerId)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
if (!(player->getTradeState() == TRADE_ACKNOWLEDGE || player->getTradeState() == TRADE_INITIATED)) {
return;
}
Player* tradePartner = player->tradePartner;
if (!tradePartner) {
return;
}
if (!canThrowObjectTo(tradePartner->getPosition(), player->getPosition())) {
player->sendCancelMessage(RETURNVALUE_CREATUREISNOTREACHABLE);
return;
}
player->setTradeState(TRADE_ACCEPT);
if (tradePartner->getTradeState() == TRADE_ACCEPT) {
Item* tradeItem1 = player->tradeItem;
Item* tradeItem2 = tradePartner->tradeItem;
if (!g_events->eventPlayerOnTradeAccept(player, tradePartner, tradeItem1, tradeItem2)) {
internalCloseTrade(player);
return;
}
player->setTradeState(TRADE_TRANSFER);
tradePartner->setTradeState(TRADE_TRANSFER);
auto it = tradeItems.find(tradeItem1);
if (it != tradeItems.end()) {
ReleaseItem(it->first);
tradeItems.erase(it);
}
it = tradeItems.find(tradeItem2);
if (it != tradeItems.end()) {
ReleaseItem(it->first);
tradeItems.erase(it);
}
bool isSuccess = false;
ReturnValue ret1 = internalAddItem(tradePartner, tradeItem1, INDEX_WHEREEVER, 0, true);
ReturnValue ret2 = internalAddItem(player, tradeItem2, INDEX_WHEREEVER, 0, true);
if (ret1 == RETURNVALUE_NOERROR && ret2 == RETURNVALUE_NOERROR) {
ret1 = internalRemoveItem(tradeItem1, tradeItem1->getItemCount(), true);
ret2 = internalRemoveItem(tradeItem2, tradeItem2->getItemCount(), true);
if (ret1 == RETURNVALUE_NOERROR && ret2 == RETURNVALUE_NOERROR) {
Cylinder* cylinder1 = tradeItem1->getParent();
Cylinder* cylinder2 = tradeItem2->getParent();
uint32_t count1 = tradeItem1->getItemCount();
uint32_t count2 = tradeItem2->getItemCount();
ret1 = internalMoveItem(cylinder1, tradePartner, INDEX_WHEREEVER, tradeItem1, count1, nullptr, FLAG_IGNOREAUTOSTACK, nullptr, tradeItem2);
if (ret1 == RETURNVALUE_NOERROR) {
internalMoveItem(cylinder2, player, INDEX_WHEREEVER, tradeItem2, count2, nullptr, FLAG_IGNOREAUTOSTACK);
tradeItem1->onTradeEvent(ON_TRADE_TRANSFER, tradePartner);
tradeItem2->onTradeEvent(ON_TRADE_TRANSFER, player);
isSuccess = true;
}
}
}
if (!isSuccess) {
std::string errorDescription;
if (tradePartner->tradeItem) {
errorDescription = getTradeErrorDescription(ret1, tradeItem1);
tradePartner->sendTextMessage(MESSAGE_EVENT_ADVANCE, errorDescription);
tradePartner->tradeItem->onTradeEvent(ON_TRADE_CANCEL, tradePartner);
}
if (player->tradeItem) {
errorDescription = getTradeErrorDescription(ret2, tradeItem2);
player->sendTextMessage(MESSAGE_EVENT_ADVANCE, errorDescription);
player->tradeItem->onTradeEvent(ON_TRADE_CANCEL, player);
}
}
player->setTradeState(TRADE_NONE);
player->tradeItem = nullptr;
player->tradePartner = nullptr;
player->sendTradeClose();
tradePartner->setTradeState(TRADE_NONE);
tradePartner->tradeItem = nullptr;
tradePartner->tradePartner = nullptr;
tradePartner->sendTradeClose();
}
}
std::string Game::getTradeErrorDescription(ReturnValue ret, Item* item)
{
if (item) {
if (ret == RETURNVALUE_NOTENOUGHCAPACITY) {
std::ostringstream ss;
ss << "You do not have enough capacity to carry";
if (item->isStackable() && item->getItemCount() > 1) {
ss << " these objects.";
} else {
ss << " this object.";
}
ss << "\n " << item->getWeightDescription();
return ss.str();
} else if (ret == RETURNVALUE_NOTENOUGHROOM || ret == RETURNVALUE_CONTAINERNOTENOUGHROOM) {
std::ostringstream ss;
ss << "You do not have enough room to carry";
if (item->isStackable() && item->getItemCount() > 1) {
ss << " these objects.";
} else {
ss << " this object.";
}
return ss.str();
}
}
return "Trade could not be completed.";
}
void Game::playerLookInTrade(uint32_t playerId, bool lookAtCounterOffer, uint8_t index)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
Player* tradePartner = player->tradePartner;
if (!tradePartner) {
return;
}
Item* tradeItem;
if (lookAtCounterOffer) {
tradeItem = tradePartner->getTradeItem();
} else {
tradeItem = player->getTradeItem();
}
if (!tradeItem) {
return;
}
const Position& playerPosition = player->getPosition();
const Position& tradeItemPosition = tradeItem->getPosition();
int32_t lookDistance = std::max<int32_t>(Position::getDistanceX(playerPosition, tradeItemPosition),
Position::getDistanceY(playerPosition, tradeItemPosition));
if (index == 0) {
g_events->eventPlayerOnLookInTrade(player, tradePartner, tradeItem, lookDistance);
return;
}
Container* tradeContainer = tradeItem->getContainer();
if (!tradeContainer) {
return;
}
std::vector<const Container*> containers {tradeContainer};
size_t i = 0;
while (i < containers.size()) {
const Container* container = containers[i++];
for (Item* item : container->getItemList()) {
Container* tmpContainer = item->getContainer();
if (tmpContainer) {
containers.push_back(tmpContainer);
}
if (--index == 0) {
g_events->eventPlayerOnLookInTrade(player, tradePartner, item, lookDistance);
return;
}
}
}
}
void Game::playerCloseTrade(uint32_t playerId)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
internalCloseTrade(player);
}
void Game::internalCloseTrade(Player* player)
{
Player* tradePartner = player->tradePartner;
if ((tradePartner && tradePartner->getTradeState() == TRADE_TRANSFER) || player->getTradeState() == TRADE_TRANSFER) {
return;
}
if (player->getTradeItem()) {
auto it = tradeItems.find(player->getTradeItem());
if (it != tradeItems.end()) {
ReleaseItem(it->first);
tradeItems.erase(it);
}
player->tradeItem->onTradeEvent(ON_TRADE_CANCEL, player);
player->tradeItem = nullptr;
}
player->setTradeState(TRADE_NONE);
player->tradePartner = nullptr;
player->sendTextMessage(MESSAGE_STATUS_SMALL, "Trade cancelled.");
player->sendTradeClose();
if (tradePartner) {
if (tradePartner->getTradeItem()) {
auto it = tradeItems.find(tradePartner->getTradeItem());
if (it != tradeItems.end()) {
ReleaseItem(it->first);
tradeItems.erase(it);
}
tradePartner->tradeItem->onTradeEvent(ON_TRADE_CANCEL, tradePartner);
tradePartner->tradeItem = nullptr;
}
tradePartner->setTradeState(TRADE_NONE);
tradePartner->tradePartner = nullptr;
tradePartner->sendTextMessage(MESSAGE_STATUS_SMALL, "Trade cancelled.");
tradePartner->sendTradeClose();
}
}
void Game::playerPurchaseItem(uint32_t playerId, uint16_t spriteId, uint8_t count, uint8_t amount,
bool ignoreCap/* = false*/, bool inBackpacks/* = false*/)
{
if (amount == 0 || amount > 100) {
return;
}
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
int32_t onBuy, onSell;
Npc* merchant = player->getShopOwner(onBuy, onSell);
if (!merchant) {
return;
}
const ItemType& it = Item::items.getItemIdByClientId(spriteId);
if (it.id == 0) {
return;
}
uint8_t subType;
if (it.isSplash() || it.isFluidContainer()) {
subType = clientFluidToServer(count);
} else {
subType = count;
}
if (!player->hasShopItemForSale(it.id, subType)) {
return;
}
merchant->onPlayerTrade(player, onBuy, it.id, subType, amount, ignoreCap, inBackpacks);
}
void Game::playerSellItem(uint32_t playerId, uint16_t spriteId, uint8_t count, uint8_t amount, bool ignoreEquipped)
{
if (amount == 0 || amount > 100) {
return;
}
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
int32_t onBuy, onSell;
Npc* merchant = player->getShopOwner(onBuy, onSell);
if (!merchant) {
return;
}
const ItemType& it = Item::items.getItemIdByClientId(spriteId);
if (it.id == 0) {
return;
}
uint8_t subType;
if (it.isSplash() || it.isFluidContainer()) {
subType = clientFluidToServer(count);
} else {
subType = count;
}
merchant->onPlayerTrade(player, onSell, it.id, subType, amount, ignoreEquipped);
}
void Game::playerCloseShop(uint32_t playerId)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
player->closeShopWindow();
}
void Game::playerLookInShop(uint32_t playerId, uint16_t spriteId, uint8_t count)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
int32_t onBuy, onSell;
Npc* merchant = player->getShopOwner(onBuy, onSell);
if (!merchant) {
return;
}
const ItemType& it = Item::items.getItemIdByClientId(spriteId);
if (it.id == 0) {
return;
}
int32_t subType;
if (it.isFluidContainer() || it.isSplash()) {
subType = clientFluidToServer(count);
} else {
subType = count;
}
if (!player->hasShopItemForSale(it.id, subType)) {
return;
}
if (!g_events->eventPlayerOnLookInShop(player, &it, subType)) {
return;
}
std::ostringstream ss;
ss << "You see " << Item::getDescription(it, 1, nullptr, subType);
player->sendTextMessage(MESSAGE_INFO_DESCR, ss.str());
}
void Game::playerLookAt(uint32_t playerId, const Position& pos, uint8_t stackPos)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
Thing* thing = internalGetThing(player, pos, stackPos, 0, STACKPOS_LOOK);
if (!thing) {
player->sendCancelMessage(RETURNVALUE_NOTPOSSIBLE);
return;
}
Position thingPos = thing->getPosition();
if (!player->canSee(thingPos)) {
player->sendCancelMessage(RETURNVALUE_NOTPOSSIBLE);
return;
}
Position playerPos = player->getPosition();
int32_t lookDistance;
if (thing != player) {
lookDistance = std::max<int32_t>(Position::getDistanceX(playerPos, thingPos), Position::getDistanceY(playerPos, thingPos));
if (playerPos.z != thingPos.z) {
lookDistance += 15;
}
} else {
lookDistance = -1;
}
g_events->eventPlayerOnLook(player, pos, thing, stackPos, lookDistance);
}
void Game::playerLookInBattleList(uint32_t playerId, uint32_t creatureId)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
Creature* creature = getCreatureByID(creatureId);
if (!creature) {
return;
}
if (!player->canSeeCreature(creature)) {
return;
}
const Position& creaturePos = creature->getPosition();
if (!player->canSee(creaturePos)) {
return;
}
int32_t lookDistance;
if (creature != player) {
const Position& playerPos = player->getPosition();
lookDistance = std::max<int32_t>(Position::getDistanceX(playerPos, creaturePos), Position::getDistanceY(playerPos, creaturePos));
if (playerPos.z != creaturePos.z) {
lookDistance += 15;
}
} else {
lookDistance = -1;
}
g_events->eventPlayerOnLookInBattleList(player, creature, lookDistance);
}
void Game::playerCancelAttackAndFollow(uint32_t playerId)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
playerSetAttackedCreature(playerId, 0);
playerFollowCreature(playerId, 0);
player->stopWalk();
}
void Game::playerSetAttackedCreature(uint32_t playerId, uint32_t creatureId)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
if (player->getAttackedCreature() && creatureId == 0) {
player->setAttackedCreature(nullptr);
player->sendCancelTarget();
return;
}
Creature* attackCreature = getCreatureByID(creatureId);
if (!attackCreature) {
player->setAttackedCreature(nullptr);
player->sendCancelTarget();
return;
}
ReturnValue ret = Combat::canTargetCreature(player, attackCreature);
if (ret != RETURNVALUE_NOERROR) {
player->sendCancelMessage(ret);
player->sendCancelTarget();
player->setAttackedCreature(nullptr);
return;
}
player->setAttackedCreature(attackCreature);
g_dispatcher.addTask(createTask(std::bind(&Game::updateCreatureWalk, this, player->getID())));
}
void Game::playerFollowCreature(uint32_t playerId, uint32_t creatureId)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
player->setAttackedCreature(nullptr);
g_dispatcher.addTask(createTask(std::bind(&Game::updateCreatureWalk, this, player->getID())));
player->setFollowCreature(getCreatureByID(creatureId));
}
void Game::playerSetFightModes(uint32_t playerId, fightMode_t fightMode, bool chaseMode, bool secureMode)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
player->setFightMode(fightMode);
player->setChaseMode(chaseMode);
player->setSecureMode(secureMode);
}
void Game::playerRequestAddVip(uint32_t playerId, const std::string& name)
{
if (name.length() > 20) {
return;
}
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
Player* vipPlayer = getPlayerByName(name);
if (!vipPlayer) {
uint32_t guid;
bool specialVip;
std::string formattedName = name;
if (!IOLoginData::getGuidByNameEx(guid, specialVip, formattedName)) {
player->sendTextMessage(MESSAGE_STATUS_SMALL, "A player with this name does not exist.");
return;
}
if (specialVip && !player->hasFlag(PlayerFlag_SpecialVIP)) {
player->sendTextMessage(MESSAGE_STATUS_SMALL, "You can not add this player.");
return;
}
player->addVIP(guid, formattedName, VIPSTATUS_OFFLINE);
} else {
if (vipPlayer->hasFlag(PlayerFlag_SpecialVIP) && !player->hasFlag(PlayerFlag_SpecialVIP)) {
player->sendTextMessage(MESSAGE_STATUS_SMALL, "You can not add this player.");
return;
}
if (!vipPlayer->isInGhostMode() || player->isAccessPlayer()) {
player->addVIP(vipPlayer->getGUID(), vipPlayer->getName(), VIPSTATUS_ONLINE);
} else {
player->addVIP(vipPlayer->getGUID(), vipPlayer->getName(), VIPSTATUS_OFFLINE);
}
}
}
void Game::playerRequestRemoveVip(uint32_t playerId, uint32_t guid)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
player->removeVIP(guid);
}
void Game::playerRequestEditVip(uint32_t playerId, uint32_t guid, const std::string& description, uint32_t icon, bool notify)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
player->editVIP(guid, description, icon, notify);
}
void Game::playerTurn(uint32_t playerId, Direction dir)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
if (!g_events->eventPlayerOnTurn(player, dir)) {
return;
}
player->resetIdleTime();
internalCreatureTurn(player, dir);
}
void Game::playerRequestOutfit(uint32_t playerId)
{
if (!g_config.getBoolean(ConfigManager::ALLOW_CHANGEOUTFIT)) {
return;
}
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
player->sendOutfitWindow();
}
void Game::playerToggleMount(uint32_t playerId, bool mount)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
player->toggleMount(mount);
}
void Game::playerChangeOutfit(uint32_t playerId, Outfit_t outfit)
{
if (!g_config.getBoolean(ConfigManager::ALLOW_CHANGEOUTFIT)) {
return;
}
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
const Outfit* playerOutfit = Outfits::getInstance().getOutfitByLookType(player->getSex(), outfit.lookType);
if (!playerOutfit) {
outfit.lookMount = 0;
}
if (outfit.lookMount != 0) {
Mount* mount = mounts.getMountByClientID(outfit.lookMount);
if (!mount) {
return;
}
if (!player->hasMount(mount)) {
return;
}
if (player->isMounted()) {
Mount* prevMount = mounts.getMountByID(player->getCurrentMount());
if (prevMount) {
changeSpeed(player, mount->speed - prevMount->speed);
}
player->setCurrentMount(mount->id);
} else {
player->setCurrentMount(mount->id);
outfit.lookMount = 0;
}
} else if (player->isMounted()) {
player->dismount();
}
if (player->canWear(outfit.lookType, outfit.lookAddons)) {
player->defaultOutfit = outfit;
if (player->hasCondition(CONDITION_OUTFIT)) {
return;
}
internalCreatureChangeOutfit(player, outfit);
}
}
void Game::playerShowQuestLog(uint32_t playerId)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
player->sendQuestLog();
}
void Game::playerShowQuestLine(uint32_t playerId, uint16_t questId)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
Quest* quest = quests.getQuestByID(questId);
if (!quest) {
return;
}
player->sendQuestLine(quest);
}
void Game::playerSay(uint32_t playerId, uint16_t channelId, SpeakClasses type,
const std::string& receiver, const std::string& text)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
player->resetIdleTime();
if (playerSaySpell(player, type, text)) {
return;
}
uint32_t muteTime = player->isMuted();
if (muteTime > 0) {
std::ostringstream ss;
ss << "You are still muted for " << muteTime << " seconds.";
player->sendTextMessage(MESSAGE_STATUS_SMALL, ss.str());
return;
}
if (!text.empty() && text.front() == '/' && player->isAccessPlayer()) {
return;
}
if (type != TALKTYPE_PRIVATE_PN) {
player->removeMessageBuffer();
}
switch (type) {
case TALKTYPE_SAY:
internalCreatureSay(player, TALKTYPE_SAY, text, false);
break;
case TALKTYPE_WHISPER:
playerWhisper(player, text);
break;
case TALKTYPE_YELL:
playerYell(player, text);
break;
case TALKTYPE_PRIVATE_TO:
case TALKTYPE_PRIVATE_RED_TO:
playerSpeakTo(player, type, receiver, text);
break;
case TALKTYPE_CHANNEL_O:
case TALKTYPE_CHANNEL_Y:
case TALKTYPE_CHANNEL_R1:
g_chat->talkToChannel(*player, type, text, channelId);
break;
case TALKTYPE_PRIVATE_PN:
playerSpeakToNpc(player, text);
break;
case TALKTYPE_BROADCAST:
playerBroadcastMessage(player, text);
break;
default:
break;
}
}
bool Game::playerSaySpell(Player* player, SpeakClasses type, const std::string& text)
{
std::string words = text;
TalkActionResult_t result = g_talkActions->playerSaySpell(player, type, words);
if (result == TALKACTION_BREAK) {
return true;
}
result = g_spells->playerSaySpell(player, words);
if (result == TALKACTION_BREAK) {
if (!g_config.getBoolean(ConfigManager::EMOTE_SPELLS)) {
return internalCreatureSay(player, TALKTYPE_SAY, words, false);
} else {
return internalCreatureSay(player, TALKTYPE_MONSTER_SAY, words, false);
}
} else if (result == TALKACTION_FAILED) {
return true;
}
return false;
}
void Game::playerWhisper(Player* player, const std::string& text)
{
SpectatorHashSet spectators;
map.getSpectators(spectators, player->getPosition(), false, false,
Map::maxClientViewportX, Map::maxClientViewportX,
Map::maxClientViewportY, Map::maxClientViewportY);
//send to client
for (Creature* spectator : spectators) {
if (Player* spectatorPlayer = spectator->getPlayer()) {
if (!Position::areInRange<1, 1>(player->getPosition(), spectatorPlayer->getPosition())) {
spectatorPlayer->sendCreatureSay(player, TALKTYPE_WHISPER, "pspsps");
} else {
spectatorPlayer->sendCreatureSay(player, TALKTYPE_WHISPER, text);
}
}
}
//event method
for (Creature* spectator : spectators) {
spectator->onCreatureSay(player, TALKTYPE_WHISPER, text);
}
}
bool Game::playerYell(Player* player, const std::string& text)
{
if (player->getLevel() == 1) {
player->sendTextMessage(MESSAGE_STATUS_SMALL, "You may not yell as long as you are on level 1.");
return false;
}
if (player->hasCondition(CONDITION_YELLTICKS)) {
player->sendCancelMessage(RETURNVALUE_YOUAREEXHAUSTED);
return false;
}
if (player->getAccountType() < ACCOUNT_TYPE_GAMEMASTER) {
Condition* condition = Condition::createCondition(CONDITIONID_DEFAULT, CONDITION_YELLTICKS, 30000, 0);
player->addCondition(condition);
}
internalCreatureSay(player, TALKTYPE_YELL, asUpperCaseString(text), false);
return true;
}
bool Game::playerSpeakTo(Player* player, SpeakClasses type, const std::string& receiver,
const std::string& text)
{
Player* toPlayer = getPlayerByName(receiver);
if (!toPlayer) {
player->sendTextMessage(MESSAGE_STATUS_SMALL, "A player with this name is not online.");
return false;
}
if (type == TALKTYPE_PRIVATE_RED_TO && (player->hasFlag(PlayerFlag_CanTalkRedPrivate) || player->getAccountType() >= ACCOUNT_TYPE_GAMEMASTER)) {
type = TALKTYPE_PRIVATE_RED_FROM;
} else {
type = TALKTYPE_PRIVATE_FROM;
}
toPlayer->sendPrivateMessage(player, type, text);
toPlayer->onCreatureSay(player, type, text);
if (toPlayer->isInGhostMode() && !player->isAccessPlayer()) {
player->sendTextMessage(MESSAGE_STATUS_SMALL, "A player with this name is not online.");
} else {
std::ostringstream ss;
ss << "Message sent to " << toPlayer->getName() << '.';
player->sendTextMessage(MESSAGE_STATUS_SMALL, ss.str());
}
return true;
}
void Game::playerSpeakToNpc(Player* player, const std::string& text)
{
SpectatorHashSet spectators;
map.getSpectators(spectators, player->getPosition());
for (Creature* spectator : spectators) {
if (spectator->getNpc()) {
spectator->onCreatureSay(player, TALKTYPE_PRIVATE_PN, text);
}
}
}
//--
bool Game::canThrowObjectTo(const Position& fromPos, const Position& toPos, bool checkLineOfSight /*= true*/,
int32_t rangex /*= Map::maxClientViewportX*/, int32_t rangey /*= Map::maxClientViewportY*/) const
{
return map.canThrowObjectTo(fromPos, toPos, checkLineOfSight, rangex, rangey);
}
bool Game::isSightClear(const Position& fromPos, const Position& toPos, bool floorCheck) const
{
return map.isSightClear(fromPos, toPos, floorCheck);
}
bool Game::internalCreatureTurn(Creature* creature, Direction dir)
{
if (creature->getDirection() == dir) {
return false;
}
creature->setDirection(dir);
//send to client
SpectatorHashSet spectators;
map.getSpectators(spectators, creature->getPosition(), true, true);
for (Creature* spectator : spectators) {
spectator->getPlayer()->sendCreatureTurn(creature);
}
return true;
}
bool Game::internalCreatureSay(Creature* creature, SpeakClasses type, const std::string& text,
bool ghostMode, SpectatorHashSet* spectatorsPtr/* = nullptr*/, const Position* pos/* = nullptr*/)
{
if (text.empty()) {
return false;
}
if (!pos) {
pos = &creature->getPosition();
}
SpectatorHashSet spectators;
if (!spectatorsPtr || spectatorsPtr->empty()) {
// This somewhat complex construct ensures that the cached SpectatorHashSet
// is used if available and if it can be used, else a local vector is
// used (hopefully the compiler will optimize away the construction of
// the temporary when it's not used).
if (type != TALKTYPE_YELL && type != TALKTYPE_MONSTER_YELL) {
map.getSpectators(spectators, *pos, false, false,
Map::maxClientViewportX, Map::maxClientViewportX,
Map::maxClientViewportY, Map::maxClientViewportY);
} else {
map.getSpectators(spectators, *pos, true, false, 18, 18, 14, 14);
}
} else {
spectators = (*spectatorsPtr);
}
//send to client
for (Creature* spectator : spectators) {
if (Player* tmpPlayer = spectator->getPlayer()) {
if (!ghostMode || tmpPlayer->canSeeCreature(creature)) {
tmpPlayer->sendCreatureSay(creature, type, text, pos);
}
}
}
//event method
for (Creature* spectator : spectators) {
spectator->onCreatureSay(creature, type, text);
}
return true;
}
void Game::checkCreatureWalk(uint32_t creatureId)
{
Creature* creature = getCreatureByID(creatureId);
if (creature && creature->getHealth() > 0) {
creature->onWalk();
cleanup();
}
}
void Game::updateCreatureWalk(uint32_t creatureId)
{
Creature* creature = getCreatureByID(creatureId);
if (creature && creature->getHealth() > 0) {
creature->goToFollowCreature();
}
}
void Game::checkCreatureAttack(uint32_t creatureId)
{
Creature* creature = getCreatureByID(creatureId);
if (creature && creature->getHealth() > 0) {
creature->onAttacking(0);
}
}
void Game::addCreatureCheck(Creature* creature)
{
creature->creatureCheck = true;
if (creature->inCheckCreaturesVector) {
// already in a vector
return;
}
creature->inCheckCreaturesVector = true;
checkCreatureLists[uniform_random(0, EVENT_CREATURECOUNT - 1)].push_back(creature);
creature->incrementReferenceCounter();
}
void Game::removeCreatureCheck(Creature* creature)
{
if (creature->inCheckCreaturesVector) {
creature->creatureCheck = false;
}
}
void Game::checkCreatures(size_t index)
{
g_scheduler.addEvent(createSchedulerTask(EVENT_CHECK_CREATURE_INTERVAL, std::bind(&Game::checkCreatures, this, (index + 1) % EVENT_CREATURECOUNT)));
auto& checkCreatureList = checkCreatureLists[index];
auto it = checkCreatureList.begin(), end = checkCreatureList.end();
while (it != end) {
Creature* creature = *it;
if (creature->creatureCheck) {
if (creature->getHealth() > 0) {
creature->onThink(EVENT_CREATURE_THINK_INTERVAL);
creature->onAttacking(EVENT_CREATURE_THINK_INTERVAL);
creature->executeConditions(EVENT_CREATURE_THINK_INTERVAL);
} else {
creature->onDeath();
}
++it;
} else {
creature->inCheckCreaturesVector = false;
it = checkCreatureList.erase(it);
ReleaseCreature(creature);
}
}
cleanup();
}
void Game::changeSpeed(Creature* creature, int32_t varSpeedDelta)
{
int32_t varSpeed = creature->getSpeed() - creature->getBaseSpeed();
varSpeed += varSpeedDelta;
creature->setSpeed(varSpeed);
//send to clients
SpectatorHashSet spectators;
map.getSpectators(spectators, creature->getPosition(), false, true);
for (Creature* spectator : spectators) {
spectator->getPlayer()->sendChangeSpeed(creature, creature->getStepSpeed());
}
}
void Game::internalCreatureChangeOutfit(Creature* creature, const Outfit_t& outfit)
{
if (!g_events->eventCreatureOnChangeOutfit(creature, outfit)) {
return;
}
creature->setCurrentOutfit(outfit);
if (creature->isInvisible()) {
return;
}
//send to clients
SpectatorHashSet spectators;
map.getSpectators(spectators, creature->getPosition(), true, true);
for (Creature* spectator : spectators) {
spectator->getPlayer()->sendCreatureChangeOutfit(creature, outfit);
}
}
void Game::internalCreatureChangeVisible(Creature* creature, bool visible)
{
//send to clients
SpectatorHashSet spectators;
map.getSpectators(spectators, creature->getPosition(), true, true);
for (Creature* spectator : spectators) {
spectator->getPlayer()->sendCreatureChangeVisible(creature, visible);
}
}
void Game::changeLight(const Creature* creature)
{
//send to clients
SpectatorHashSet spectators;
map.getSpectators(spectators, creature->getPosition(), true, true);
for (Creature* spectator : spectators) {
spectator->getPlayer()->sendCreatureLight(creature);
}
}
bool Game::combatBlockHit(CombatDamage& damage, Creature* attacker, Creature* target, bool checkDefense, bool checkArmor, bool field)
{
if (damage.primary.type == COMBAT_NONE && damage.secondary.type == COMBAT_NONE) {
return true;
}
if (target->getPlayer() && target->isInGhostMode()) {
return true;
}
if (damage.primary.value > 0) {
return false;
}
static const auto sendBlockEffect = [this](BlockType_t blockType, CombatType_t combatType, const Position& targetPos) {
if (blockType == BLOCK_DEFENSE) {
addMagicEffect(targetPos, CONST_ME_POFF);
} else if (blockType == BLOCK_ARMOR) {
addMagicEffect(targetPos, CONST_ME_BLOCKHIT);
} else if (blockType == BLOCK_IMMUNITY) {
uint8_t hitEffect = 0;
switch (combatType) {
case COMBAT_UNDEFINEDDAMAGE: {
return;
}
case COMBAT_ENERGYDAMAGE:
case COMBAT_FIREDAMAGE:
case COMBAT_PHYSICALDAMAGE:
case COMBAT_ICEDAMAGE:
case COMBAT_DEATHDAMAGE: {
hitEffect = CONST_ME_BLOCKHIT;
break;
}
case COMBAT_EARTHDAMAGE: {
hitEffect = CONST_ME_GREEN_RINGS;
break;
}
case COMBAT_HOLYDAMAGE: {
hitEffect = CONST_ME_HOLYDAMAGE;
break;
}
default: {
hitEffect = CONST_ME_POFF;
break;
}
}
addMagicEffect(targetPos, hitEffect);
}
};
BlockType_t primaryBlockType, secondaryBlockType;
if (damage.primary.type != COMBAT_NONE) {
damage.primary.value = -damage.primary.value;
primaryBlockType = target->blockHit(attacker, damage.primary.type, damage.primary.value, checkDefense, checkArmor, field);
damage.primary.value = -damage.primary.value;
sendBlockEffect(primaryBlockType, damage.primary.type, target->getPosition());
} else {
primaryBlockType = BLOCK_NONE;
}
if (damage.secondary.type != COMBAT_NONE) {
damage.secondary.value = -damage.secondary.value;
secondaryBlockType = target->blockHit(attacker, damage.secondary.type, damage.secondary.value, false, false, field);
damage.secondary.value = -damage.secondary.value;
sendBlockEffect(secondaryBlockType, damage.secondary.type, target->getPosition());
} else {
secondaryBlockType = BLOCK_NONE;
}
return (primaryBlockType != BLOCK_NONE) && (secondaryBlockType != BLOCK_NONE);
}
void Game::combatGetTypeInfo(CombatType_t combatType, Creature* target, TextColor_t& color, uint8_t& effect)
{
switch (combatType) {
case COMBAT_PHYSICALDAMAGE: {
Item* splash = nullptr;
switch (target->getRace()) {
case RACE_VENOM:
color = TEXTCOLOR_LIGHTGREEN;
effect = CONST_ME_HITBYPOISON;
splash = Item::CreateItem(ITEM_SMALLSPLASH, FLUID_SLIME);
break;
case RACE_BLOOD:
color = TEXTCOLOR_RED;
effect = CONST_ME_DRAWBLOOD;
if (const Tile* tile = target->getTile()) {
if (!tile->hasFlag(TILESTATE_PROTECTIONZONE)) {
splash = Item::CreateItem(ITEM_SMALLSPLASH, FLUID_BLOOD);
}
}
break;
case RACE_UNDEAD:
color = TEXTCOLOR_LIGHTGREY;
effect = CONST_ME_HITAREA;
break;
case RACE_FIRE:
color = TEXTCOLOR_ORANGE;
effect = CONST_ME_DRAWBLOOD;
break;
case RACE_ENERGY:
color = TEXTCOLOR_ELECTRICPURPLE;
effect = CONST_ME_ENERGYHIT;
break;
default:
color = TEXTCOLOR_NONE;
effect = CONST_ME_NONE;
break;
}
if (splash) {
internalAddItem(target->getTile(), splash, INDEX_WHEREEVER, FLAG_NOLIMIT);
startDecay(splash);
}
break;
}
case COMBAT_ENERGYDAMAGE: {
color = TEXTCOLOR_ELECTRICPURPLE;
effect = CONST_ME_ENERGYHIT;
break;
}
case COMBAT_EARTHDAMAGE: {
color = TEXTCOLOR_LIGHTGREEN;
effect = CONST_ME_GREEN_RINGS;
break;
}
case COMBAT_DROWNDAMAGE: {
color = TEXTCOLOR_LIGHTBLUE;
effect = CONST_ME_LOSEENERGY;
break;
}
case COMBAT_FIREDAMAGE: {
color = TEXTCOLOR_ORANGE;
effect = CONST_ME_HITBYFIRE;
break;
}
case COMBAT_ICEDAMAGE: {
color = TEXTCOLOR_SKYBLUE;
effect = CONST_ME_ICEATTACK;
break;
}
case COMBAT_HOLYDAMAGE: {
color = TEXTCOLOR_YELLOW;
effect = CONST_ME_HOLYDAMAGE;
break;
}
case COMBAT_DEATHDAMAGE: {
color = TEXTCOLOR_DARKRED;
effect = CONST_ME_SMALLCLOUDS;
break;
}
case COMBAT_LIFEDRAIN: {
color = TEXTCOLOR_RED;
effect = CONST_ME_MAGIC_RED;
break;
}
default: {
color = TEXTCOLOR_NONE;
effect = CONST_ME_NONE;
break;
}
}
}
bool Game::combatChangeHealth(Creature* attacker, Creature* target, CombatDamage& damage)
{
const Position& targetPos = target->getPosition();
if (damage.primary.value > 0) {
if (target->getHealth() <= 0) {
return false;
}
Player* attackerPlayer;
if (attacker) {
attackerPlayer = attacker->getPlayer();
} else {
attackerPlayer = nullptr;
}
Player* targetPlayer = target->getPlayer();
if (attackerPlayer && targetPlayer && attackerPlayer->getSkull() == SKULL_BLACK && attackerPlayer->getSkullClient(targetPlayer) == SKULL_NONE) {
return false;
}
if (damage.origin != ORIGIN_NONE) {
const auto& events = target->getCreatureEvents(CREATURE_EVENT_HEALTHCHANGE);
if (!events.empty()) {
for (CreatureEvent* creatureEvent : events) {
creatureEvent->executeHealthChange(target, attacker, damage);
}
damage.origin = ORIGIN_NONE;
return combatChangeHealth(attacker, target, damage);
}
}
int32_t realHealthChange = target->getHealth();
target->gainHealth(attacker, damage.primary.value);
realHealthChange = target->getHealth() - realHealthChange;
if (realHealthChange > 0 && !target->isInGhostMode()) {
std::stringstream ss;
ss << realHealthChange << (realHealthChange != 1 ? " hitpoints." : " hitpoint.");
std::string damageString = ss.str();
std::string spectatorMessage;
TextMessage message;
message.position = targetPos;
message.primary.value = realHealthChange;
message.primary.color = TEXTCOLOR_PASTELRED;
SpectatorHashSet spectators;
map.getSpectators(spectators, targetPos, false, true);
for (Creature* spectator : spectators) {
Player* tmpPlayer = spectator->getPlayer();
if (tmpPlayer == attackerPlayer && attackerPlayer != targetPlayer) {
ss.str({});
ss << "You heal " << target->getNameDescription() << " for " << damageString;
message.type = MESSAGE_HEALED;
message.text = ss.str();
} else if (tmpPlayer == targetPlayer) {
ss.str({});
if (!attacker) {
ss << "You were healed";
} else if (targetPlayer == attackerPlayer) {
ss << "You healed yourself";
} else {
ss << "You were healed by " << attacker->getNameDescription();
}
ss << " for " << damageString;
message.type = MESSAGE_HEALED;
message.text = ss.str();
} else {
if (spectatorMessage.empty()) {
ss.str({});
if (!attacker) {
ss << ucfirst(target->getNameDescription()) << " was healed";
} else {
ss << ucfirst(attacker->getNameDescription()) << " healed ";
if (attacker == target) {
ss << (targetPlayer ? (targetPlayer->getSex() == PLAYERSEX_FEMALE ? "herself" : "himself") : "itself");
} else {
ss << target->getNameDescription();
}
}
ss << " for " << damageString;
spectatorMessage = ss.str();
}
message.type = MESSAGE_HEALED_OTHERS;
message.text = spectatorMessage;
}
tmpPlayer->sendTextMessage(message);
}
}
} else {
if (!target->isAttackable()) {
if (!target->isInGhostMode()) {
addMagicEffect(targetPos, CONST_ME_POFF);
}
return true;
}
Player* attackerPlayer;
if (attacker) {
attackerPlayer = attacker->getPlayer();
} else {
attackerPlayer = nullptr;
}
Player* targetPlayer = target->getPlayer();
if (attackerPlayer && targetPlayer && attackerPlayer->getSkull() == SKULL_BLACK && attackerPlayer->getSkullClient(targetPlayer) == SKULL_NONE) {
return false;
}
damage.primary.value = std::abs(damage.primary.value);
damage.secondary.value = std::abs(damage.secondary.value);
int32_t healthChange = damage.primary.value + damage.secondary.value;
if (healthChange == 0) {
return true;
}
if (attackerPlayer) {
uint16_t chance = attackerPlayer->getSpecialSkill(SPECIALSKILL_LIFELEECHCHANCE);
if (chance != 0 && uniform_random(1, 100) <= chance) {
CombatDamage lifeLeech;
lifeLeech.primary.value = std::round(healthChange * (attackerPlayer->getSpecialSkill(SPECIALSKILL_LIFELEECHAMOUNT) / 100.));
g_game.combatChangeHealth(nullptr, attackerPlayer, lifeLeech);
}
chance = attackerPlayer->getSpecialSkill(SPECIALSKILL_MANALEECHCHANCE);
if (chance != 0 && uniform_random(1, 100) <= chance) {
CombatDamage manaLeech;
manaLeech.primary.value = std::round(healthChange * (attackerPlayer->getSpecialSkill(SPECIALSKILL_LIFELEECHAMOUNT) / 100.));
g_game.combatChangeMana(nullptr, attackerPlayer, manaLeech);
}
chance = attackerPlayer->getSpecialSkill(SPECIALSKILL_CRITICALHITCHANCE);
if (chance != 0 && uniform_random(1, 100) <= chance) {
healthChange += std::round(healthChange * (attackerPlayer->getSpecialSkill(SPECIALSKILL_CRITICALHITAMOUNT) / 100.));
g_game.addMagicEffect(target->getPosition(), CONST_ME_CRITICAL_DAMAGE);
}
}
TextMessage message;
message.position = targetPos;
SpectatorHashSet spectators;
if (targetPlayer && target->hasCondition(CONDITION_MANASHIELD) && damage.primary.type != COMBAT_UNDEFINEDDAMAGE) {
int32_t manaDamage = std::min<int32_t>(targetPlayer->getMana(), healthChange);
if (manaDamage != 0) {
if (damage.origin != ORIGIN_NONE) {
const auto& events = target->getCreatureEvents(CREATURE_EVENT_MANACHANGE);
if (!events.empty()) {
for (CreatureEvent* creatureEvent : events) {
creatureEvent->executeManaChange(target, attacker, damage);
}
healthChange = damage.primary.value + damage.secondary.value;
if (healthChange == 0) {
return true;
}
manaDamage = std::min<int32_t>(targetPlayer->getMana(), healthChange);
}
}
targetPlayer->drainMana(attacker, manaDamage);
map.getSpectators(spectators, targetPos, true, true);
addMagicEffect(spectators, targetPos, CONST_ME_LOSEENERGY);
std::stringstream ss;
std::string damageString = std::to_string(manaDamage);
std::string spectatorMessage;
message.primary.value = manaDamage;
message.primary.color = TEXTCOLOR_BLUE;
for (Creature* spectator : spectators) {
Player* tmpPlayer = spectator->getPlayer();
if (tmpPlayer->getPosition().z != targetPos.z) {
continue;
}
if (tmpPlayer == attackerPlayer && attackerPlayer != targetPlayer) {
ss.str({});
ss << ucfirst(target->getNameDescription()) << " loses " << damageString + " mana due to your attack.";
message.type = MESSAGE_DAMAGE_DEALT;
message.text = ss.str();
} else if (tmpPlayer == targetPlayer) {
ss.str({});
ss << "You lose " << damageString << " mana";
if (!attacker) {
ss << '.';
} else if (targetPlayer == attackerPlayer) {
ss << " due to your own attack.";
} else {
ss << " due to an attack by " << attacker->getNameDescription() << '.';
}
message.type = MESSAGE_DAMAGE_RECEIVED;
message.text = ss.str();
} else {
if (spectatorMessage.empty()) {
ss.str({});
ss << ucfirst(target->getNameDescription()) << " loses " << damageString + " mana";
if (attacker) {
ss << " due to ";
if (attacker == target) {
ss << (targetPlayer->getSex() == PLAYERSEX_FEMALE ? "her own attack" : "his own attack");
} else {
ss << "an attack by " << attacker->getNameDescription();
}
}
ss << '.';
spectatorMessage = ss.str();
}
message.type = MESSAGE_DAMAGE_OTHERS;
message.text = spectatorMessage;
}
tmpPlayer->sendTextMessage(message);
}
damage.primary.value -= manaDamage;
if (damage.primary.value < 0) {
damage.secondary.value = std::max<int32_t>(0, damage.secondary.value + damage.primary.value);
damage.primary.value = 0;
}
}
}
int32_t realDamage = damage.primary.value + damage.secondary.value;
if (realDamage == 0) {
return true;
}
if (damage.origin != ORIGIN_NONE) {
const auto& events = target->getCreatureEvents(CREATURE_EVENT_HEALTHCHANGE);
if (!events.empty()) {
for (CreatureEvent* creatureEvent : events) {
creatureEvent->executeHealthChange(target, attacker, damage);
}
damage.origin = ORIGIN_NONE;
return combatChangeHealth(attacker, target, damage);
}
}
int32_t targetHealth = target->getHealth();
if (damage.primary.value >= targetHealth) {
damage.primary.value = targetHealth;
damage.secondary.value = 0;
} else if (damage.secondary.value) {
damage.secondary.value = std::min<int32_t>(damage.secondary.value, targetHealth - damage.primary.value);
}
realDamage = damage.primary.value + damage.secondary.value;
if (realDamage == 0) {
return true;
}
if (spectators.empty()) {
map.getSpectators(spectators, targetPos, true, true);
}
message.primary.value = damage.primary.value;
message.secondary.value = damage.secondary.value;
uint8_t hitEffect;
if (message.primary.value) {
combatGetTypeInfo(damage.primary.type, target, message.primary.color, hitEffect);
if (hitEffect != CONST_ME_NONE) {
addMagicEffect(spectators, targetPos, hitEffect);
}
}
if (message.secondary.value) {
combatGetTypeInfo(damage.secondary.type, target, message.secondary.color, hitEffect);
if (hitEffect != CONST_ME_NONE) {
addMagicEffect(spectators, targetPos, hitEffect);
}
}
if (message.primary.color != TEXTCOLOR_NONE || message.secondary.color != TEXTCOLOR_NONE) {
std::stringstream ss;
ss << realDamage << (realDamage != 1 ? " hitpoints" : " hitpoint");
std::string damageString = ss.str();
std::string spectatorMessage;
for (Creature* spectator : spectators) {
Player* tmpPlayer = spectator->getPlayer();
if (tmpPlayer->getPosition().z != targetPos.z) {
continue;
}
if (tmpPlayer == attackerPlayer && attackerPlayer != targetPlayer) {
ss.str({});
ss << ucfirst(target->getNameDescription()) << " loses " << damageString << " due to your attack.";
message.type = MESSAGE_DAMAGE_DEALT;
message.text = ss.str();
} else if (tmpPlayer == targetPlayer) {
ss.str({});
ss << "You lose " << damageString;
if (!attacker) {
ss << '.';
} else if (targetPlayer == attackerPlayer) {
ss << " due to your own attack.";
} else {
ss << " due to an attack by " << attacker->getNameDescription() << '.';
}
message.type = MESSAGE_DAMAGE_RECEIVED;
message.text = ss.str();
} else {
message.type = MESSAGE_DAMAGE_OTHERS;
if (spectatorMessage.empty()) {
ss.str({});
ss << ucfirst(target->getNameDescription()) << " loses " << damageString;
if (attacker) {
ss << " due to ";
if (attacker == target) {
if (targetPlayer) {
ss << (targetPlayer->getSex() == PLAYERSEX_FEMALE ? "her own attack" : "his own attack");
} else {
ss << "its own attack";
}
} else {
ss << "an attack by " << attacker->getNameDescription();
}
}
ss << '.';
spectatorMessage = ss.str();
}
message.text = spectatorMessage;
}
tmpPlayer->sendTextMessage(message);
}
}
if (realDamage >= targetHealth) {
for (CreatureEvent* creatureEvent : target->getCreatureEvents(CREATURE_EVENT_PREPAREDEATH)) {
if (!creatureEvent->executeOnPrepareDeath(target, attacker)) {
return false;
}
}
}
target->drainHealth(attacker, realDamage);
addCreatureHealth(spectators, target);
}
return true;
}
bool Game::combatChangeMana(Creature* attacker, Creature* target, CombatDamage& damage)
{
Player* targetPlayer = target->getPlayer();
if (!targetPlayer) {
return true;
}
int32_t manaChange = damage.primary.value + damage.secondary.value;
if (manaChange > 0) {
if (attacker) {
const Player* attackerPlayer = attacker->getPlayer();
if (attackerPlayer && attackerPlayer->getSkull() == SKULL_BLACK && attackerPlayer->getSkullClient(target) == SKULL_NONE) {
return false;
}
}
if (damage.origin != ORIGIN_NONE) {
const auto& events = target->getCreatureEvents(CREATURE_EVENT_MANACHANGE);
if (!events.empty()) {
for (CreatureEvent* creatureEvent : events) {
creatureEvent->executeManaChange(target, attacker, damage);
}
damage.origin = ORIGIN_NONE;
return combatChangeMana(attacker, target, damage);
}
}
int32_t realManaChange = targetPlayer->getMana();
targetPlayer->changeMana(manaChange);
realManaChange = targetPlayer->getMana() - realManaChange;
if (realManaChange > 0 && !targetPlayer->isInGhostMode()) {
TextMessage message(MESSAGE_HEALED, "You gained " + std::to_string(realManaChange) + " mana.");
message.position = target->getPosition();
message.primary.value = realManaChange;
message.primary.color = TEXTCOLOR_MAYABLUE;
targetPlayer->sendTextMessage(message);
}
} else {
const Position& targetPos = target->getPosition();
if (!target->isAttackable()) {
if (!target->isInGhostMode()) {
addMagicEffect(targetPos, CONST_ME_POFF);
}
return false;
}
Player* attackerPlayer;
if (attacker) {
attackerPlayer = attacker->getPlayer();
} else {
attackerPlayer = nullptr;
}
if (attackerPlayer && attackerPlayer->getSkull() == SKULL_BLACK && attackerPlayer->getSkullClient(targetPlayer) == SKULL_NONE) {
return false;
}
int32_t manaLoss = std::min<int32_t>(targetPlayer->getMana(), -manaChange);
BlockType_t blockType = target->blockHit(attacker, COMBAT_MANADRAIN, manaLoss);
if (blockType != BLOCK_NONE) {
addMagicEffect(targetPos, CONST_ME_POFF);
return false;
}
if (manaLoss <= 0) {
return true;
}
if (damage.origin != ORIGIN_NONE) {
const auto& events = target->getCreatureEvents(CREATURE_EVENT_MANACHANGE);
if (!events.empty()) {
for (CreatureEvent* creatureEvent : events) {
creatureEvent->executeManaChange(target, attacker, damage);
}
damage.origin = ORIGIN_NONE;
return combatChangeMana(attacker, target, damage);
}
}
targetPlayer->drainMana(attacker, manaLoss);
std::stringstream ss;
std::string damageString = std::to_string(manaLoss);
std::string spectatorMessage;
TextMessage message;
message.position = targetPos;
message.primary.value = manaLoss;
message.primary.color = TEXTCOLOR_BLUE;
SpectatorHashSet spectators;
map.getSpectators(spectators, targetPos, false, true);
for (Creature* spectator : spectators) {
Player* tmpPlayer = spectator->getPlayer();
if (tmpPlayer == attackerPlayer && attackerPlayer != targetPlayer) {
ss.str({});
ss << ucfirst(target->getNameDescription()) << " loses " << damageString << " mana due to your attack.";
message.type = MESSAGE_DAMAGE_DEALT;
message.text = ss.str();
} else if (tmpPlayer == targetPlayer) {
ss.str({});
ss << "You lose " << damageString << " mana";
if (!attacker) {
ss << '.';
} else if (targetPlayer == attackerPlayer) {
ss << " due to your own attack.";
} else {
ss << " mana due to an attack by " << attacker->getNameDescription() << '.';
}
message.type = MESSAGE_DAMAGE_RECEIVED;
message.text = ss.str();
} else {
if (spectatorMessage.empty()) {
ss.str({});
ss << ucfirst(target->getNameDescription()) << " loses " << damageString << " mana";
if (attacker) {
ss << " due to ";
if (attacker == target) {
ss << (targetPlayer->getSex() == PLAYERSEX_FEMALE ? "her own attack" : "his own attack");
} else {
ss << "an attack by " << attacker->getNameDescription();
}
}
ss << '.';
spectatorMessage = ss.str();
}
message.type = MESSAGE_DAMAGE_OTHERS;
message.text = spectatorMessage;
}
tmpPlayer->sendTextMessage(message);
}
}
return true;
}
void Game::addCreatureHealth(const Creature* target)
{
SpectatorHashSet spectators;
map.getSpectators(spectators, target->getPosition(), true, true);
addCreatureHealth(spectators, target);
}
void Game::addCreatureHealth(const SpectatorHashSet& spectators, const Creature* target)
{
for (Creature* spectator : spectators) {
if (Player* tmpPlayer = spectator->getPlayer()) {
tmpPlayer->sendCreatureHealth(target);
}
}
}
void Game::addMagicEffect(const Position& pos, uint8_t effect)
{
SpectatorHashSet spectators;
map.getSpectators(spectators, pos, true, true);
addMagicEffect(spectators, pos, effect);
}
void Game::addMagicEffect(const SpectatorHashSet& spectators, const Position& pos, uint8_t effect)
{
for (Creature* spectator : spectators) {
if (Player* tmpPlayer = spectator->getPlayer()) {
tmpPlayer->sendMagicEffect(pos, effect);
}
}
}
void Game::addDistanceEffect(const Position& fromPos, const Position& toPos, uint8_t effect)
{
SpectatorHashSet spectators;
map.getSpectators(spectators, fromPos, false, true);
map.getSpectators(spectators, toPos, false, true);
addDistanceEffect(spectators, fromPos, toPos, effect);
}
void Game::addDistanceEffect(const SpectatorHashSet& spectators, const Position& fromPos, const Position& toPos, uint8_t effect)
{
for (Creature* spectator : spectators) {
if (Player* tmpPlayer = spectator->getPlayer()) {
tmpPlayer->sendDistanceShoot(fromPos, toPos, effect);
}
}
}
void Game::startDecay(Item* item)
{
if (!item || !item->canDecay()) {
return;
}
ItemDecayState_t decayState = item->getDecaying();
if (decayState == DECAYING_TRUE) {
return;
}
if (item->getDuration() > 0) {
item->incrementReferenceCounter();
item->setDecaying(DECAYING_TRUE);
toDecayItems.push_front(item);
} else {
internalDecayItem(item);
}
}
void Game::internalDecayItem(Item* item)
{
const ItemType& it = Item::items[item->getID()];
if (it.decayTo != 0) {
Item* newItem = transformItem(item, it.decayTo);
startDecay(newItem);
} else {
ReturnValue ret = internalRemoveItem(item);
if (ret != RETURNVALUE_NOERROR) {
std::cout << "[Debug - Game::internalDecayItem] internalDecayItem failed, error code: " << static_cast<uint32_t>(ret) << ", item id: " << item->getID() << std::endl;
}
}
}
void Game::checkDecay()
{
g_scheduler.addEvent(createSchedulerTask(EVENT_DECAYINTERVAL, std::bind(&Game::checkDecay, this)));
size_t bucket = (lastBucket + 1) % EVENT_DECAY_BUCKETS;
auto it = decayItems[bucket].begin(), end = decayItems[bucket].end();
while (it != end) {
Item* item = *it;
if (!item->canDecay()) {
item->setDecaying(DECAYING_FALSE);
ReleaseItem(item);
it = decayItems[bucket].erase(it);
continue;
}
int32_t duration = item->getDuration();
int32_t decreaseTime = std::min<int32_t>(EVENT_DECAYINTERVAL * EVENT_DECAY_BUCKETS, duration);
duration -= decreaseTime;
item->decreaseDuration(decreaseTime);
if (duration <= 0) {
it = decayItems[bucket].erase(it);
internalDecayItem(item);
ReleaseItem(item);
} else if (duration < EVENT_DECAYINTERVAL * EVENT_DECAY_BUCKETS) {
it = decayItems[bucket].erase(it);
size_t newBucket = (bucket + ((duration + EVENT_DECAYINTERVAL / 2) / 1000)) % EVENT_DECAY_BUCKETS;
if (newBucket == bucket) {
internalDecayItem(item);
ReleaseItem(item);
} else {
decayItems[newBucket].push_back(item);
}
} else {
++it;
}
}
lastBucket = bucket;
cleanup();
}
void Game::checkLight()
{
g_scheduler.addEvent(createSchedulerTask(EVENT_LIGHTINTERVAL, std::bind(&Game::checkLight, this)));
lightHour += lightHourDelta;
if (lightHour > 1440) {
lightHour -= 1440;
}
if (std::abs(lightHour - SUNRISE) < 2 * lightHourDelta) {
lightState = LIGHT_STATE_SUNRISE;
} else if (std::abs(lightHour - SUNSET) < 2 * lightHourDelta) {
lightState = LIGHT_STATE_SUNSET;
}
int32_t newLightLevel = lightLevel;
bool lightChange = false;
switch (lightState) {
case LIGHT_STATE_SUNRISE: {
newLightLevel += (LIGHT_LEVEL_DAY - LIGHT_LEVEL_NIGHT) / 30;
lightChange = true;
break;
}
case LIGHT_STATE_SUNSET: {
newLightLevel -= (LIGHT_LEVEL_DAY - LIGHT_LEVEL_NIGHT) / 30;
lightChange = true;
break;
}
default:
break;
}
if (newLightLevel <= LIGHT_LEVEL_NIGHT) {
lightLevel = LIGHT_LEVEL_NIGHT;
lightState = LIGHT_STATE_NIGHT;
} else if (newLightLevel >= LIGHT_LEVEL_DAY) {
lightLevel = LIGHT_LEVEL_DAY;
lightState = LIGHT_STATE_DAY;
} else {
lightLevel = newLightLevel;
}
if (lightChange) {
LightInfo lightInfo = getWorldLightInfo();
for (const auto& it : players) {
it.second->sendWorldLight(lightInfo);
}
}
}
LightInfo Game::getWorldLightInfo() const
{
return {lightLevel, 0xD7};
}
void Game::shutdown()
{
std::cout << "Shutting down..." << std::flush;
g_scheduler.shutdown();
g_databaseTasks.shutdown();
g_dispatcher.shutdown();
map.spawns.clear();
raids.clear();
cleanup();
if (serviceManager) {
serviceManager->stop();
}
ConnectionManager::getInstance().closeAll();
std::cout << " done!" << std::endl;
}
void Game::cleanup()
{
//free memory
for (auto creature : ToReleaseCreatures) {
creature->decrementReferenceCounter();
}
ToReleaseCreatures.clear();
for (auto item : ToReleaseItems) {
item->decrementReferenceCounter();
}
ToReleaseItems.clear();
for (Item* item : toDecayItems) {
const uint32_t dur = item->getDuration();
if (dur >= EVENT_DECAYINTERVAL * EVENT_DECAY_BUCKETS) {
decayItems[lastBucket].push_back(item);
} else {
decayItems[(lastBucket + 1 + dur / 1000) % EVENT_DECAY_BUCKETS].push_back(item);
}
}
toDecayItems.clear();
}
void Game::ReleaseCreature(Creature* creature)
{
ToReleaseCreatures.push_back(creature);
}
void Game::ReleaseItem(Item* item)
{
ToReleaseItems.push_back(item);
}
void Game::broadcastMessage(const std::string& text, MessageClasses type) const
{
std::cout << "> Broadcasted message: \"" << text << "\"." << std::endl;
for (const auto& it : players) {
it.second->sendTextMessage(type, text);
}
}
void Game::updateCreatureWalkthrough(const Creature* creature)
{
//send to clients
SpectatorHashSet spectators;
map.getSpectators(spectators, creature->getPosition(), true, true);
for (Creature* spectator : spectators) {
Player* tmpPlayer = spectator->getPlayer();
tmpPlayer->sendCreatureWalkthrough(creature, tmpPlayer->canWalkthroughEx(creature));
}
}
void Game::updateCreatureSkull(const Creature* creature)
{
if (getWorldType() != WORLD_TYPE_PVP) {
return;
}
SpectatorHashSet spectators;
map.getSpectators(spectators, creature->getPosition(), true, true);
for (Creature* spectator : spectators) {
spectator->getPlayer()->sendCreatureSkull(creature);
}
}
void Game::updatePlayerShield(Player* player)
{
SpectatorHashSet spectators;
map.getSpectators(spectators, player->getPosition(), true, true);
for (Creature* spectator : spectators) {
spectator->getPlayer()->sendCreatureShield(player);
}
}
void Game::updatePlayerHelpers(const Player& player)
{
uint32_t creatureId = player.getID();
uint16_t helpers = player.getHelpers();
SpectatorHashSet spectators;
map.getSpectators(spectators, player.getPosition(), true, true);
for (Creature* spectator : spectators) {
spectator->getPlayer()->sendCreatureHelpers(creatureId, helpers);
}
}
void Game::updateCreatureType(Creature* creature)
{
const Player* masterPlayer = nullptr;
uint32_t creatureId = creature->getID();
CreatureType_t creatureType = creature->getType();
if (creatureType == CREATURETYPE_MONSTER) {
const Creature* master = creature->getMaster();
if (master) {
masterPlayer = master->getPlayer();
if (masterPlayer) {
creatureType = CREATURETYPE_SUMMON_OTHERS;
}
}
}
//send to clients
SpectatorHashSet spectators;
map.getSpectators(spectators, creature->getPosition(), true, true);
if (creatureType == CREATURETYPE_SUMMON_OTHERS) {
for (Creature* spectator : spectators) {
Player* player = spectator->getPlayer();
if (masterPlayer == player) {
player->sendCreatureType(creatureId, CREATURETYPE_SUMMON_OWN);
} else {
player->sendCreatureType(creatureId, creatureType);
}
}
} else {
for (Creature* spectator : spectators) {
spectator->getPlayer()->sendCreatureType(creatureId, creatureType);
}
}
}
void Game::updatePremium(Account& account)
{
bool save = false;
time_t timeNow = time(nullptr);
if (account.premiumDays != 0 && account.premiumDays != std::numeric_limits<uint16_t>::max()) {
if (account.lastDay == 0) {
account.lastDay = timeNow;
save = true;
} else {
uint32_t days = (timeNow - account.lastDay) / 86400;
if (days > 0) {
if (days >= account.premiumDays) {
account.premiumDays = 0;
account.lastDay = 0;
} else {
account.premiumDays -= days;
time_t remainder = (timeNow - account.lastDay) % 86400;
account.lastDay = timeNow - remainder;
}
save = true;
}
}
} else if (account.lastDay != 0) {
account.lastDay = 0;
save = true;
}
if (save && !IOLoginData::saveAccount(account)) {
std::cout << "> ERROR: Failed to save account: " << account.name << "!" << std::endl;
}
}
void Game::loadMotdNum()
{
Database& db = Database::getInstance();
DBResult_ptr result = db.storeQuery("SELECT `value` FROM `server_config` WHERE `config` = 'motd_num'");
if (result) {
motdNum = result->getNumber<uint32_t>("value");
} else {
db.executeQuery("INSERT INTO `server_config` (`config`, `value`) VALUES ('motd_num', '0')");
}
result = db.storeQuery("SELECT `value` FROM `server_config` WHERE `config` = 'motd_hash'");
if (result) {
motdHash = result->getString("value");
if (motdHash != transformToSHA1(g_config.getString(ConfigManager::MOTD))) {
++motdNum;
}
} else {
db.executeQuery("INSERT INTO `server_config` (`config`, `value`) VALUES ('motd_hash', '')");
}
}
void Game::saveMotdNum() const
{
Database& db = Database::getInstance();
std::ostringstream query;
query << "UPDATE `server_config` SET `value` = '" << motdNum << "' WHERE `config` = 'motd_num'";
db.executeQuery(query.str());
query.str(std::string());
query << "UPDATE `server_config` SET `value` = '" << transformToSHA1(g_config.getString(ConfigManager::MOTD)) << "' WHERE `config` = 'motd_hash'";
db.executeQuery(query.str());
}
void Game::checkPlayersRecord()
{
const size_t playersOnline = getPlayersOnline();
if (playersOnline > playersRecord) {
uint32_t previousRecord = playersRecord;
playersRecord = playersOnline;
for (auto& it : g_globalEvents->getEventMap(GLOBALEVENT_RECORD)) {
it.second.executeRecord(playersRecord, previousRecord);
}
updatePlayersRecord();
}
}
void Game::updatePlayersRecord() const
{
Database& db = Database::getInstance();
std::ostringstream query;
query << "UPDATE `server_config` SET `value` = '" << playersRecord << "' WHERE `config` = 'players_record'";
db.executeQuery(query.str());
}
void Game::loadPlayersRecord()
{
Database& db = Database::getInstance();
DBResult_ptr result = db.storeQuery("SELECT `value` FROM `server_config` WHERE `config` = 'players_record'");
if (result) {
playersRecord = result->getNumber<uint32_t>("value");
} else {
db.executeQuery("INSERT INTO `server_config` (`config`, `value`) VALUES ('players_record', '0')");
}
}
uint64_t Game::getExperienceStage(uint32_t level)
{
if (!stagesEnabled) {
return g_config.getNumber(ConfigManager::RATE_EXPERIENCE);
}
if (useLastStageLevel && level >= lastStageLevel) {
return stages[lastStageLevel];
}
return stages[level];
}
bool Game::loadExperienceStages()
{
pugi::xml_document doc;
pugi::xml_parse_result result = doc.load_file("data/XML/stages.xml");
if (!result) {
printXMLError("Error - Game::loadExperienceStages", "data/XML/stages.xml", result);
return false;
}
for (auto stageNode : doc.child("stages").children()) {
if (strcasecmp(stageNode.name(), "config") == 0) {
stagesEnabled = stageNode.attribute("enabled").as_bool();
} else {
uint32_t minLevel, maxLevel, multiplier;
pugi::xml_attribute minLevelAttribute = stageNode.attribute("minlevel");
if (minLevelAttribute) {
minLevel = pugi::cast<uint32_t>(minLevelAttribute.value());
} else {
minLevel = 1;
}
pugi::xml_attribute maxLevelAttribute = stageNode.attribute("maxlevel");
if (maxLevelAttribute) {
maxLevel = pugi::cast<uint32_t>(maxLevelAttribute.value());
} else {
maxLevel = 0;
lastStageLevel = minLevel;
useLastStageLevel = true;
}
pugi::xml_attribute multiplierAttribute = stageNode.attribute("multiplier");
if (multiplierAttribute) {
multiplier = pugi::cast<uint32_t>(multiplierAttribute.value());
} else {
multiplier = 1;
}
if (useLastStageLevel) {
stages[lastStageLevel] = multiplier;
} else {
for (uint32_t i = minLevel; i <= maxLevel; ++i) {
stages[i] = multiplier;
}
}
}
}
return true;
}
void Game::playerInviteToParty(uint32_t playerId, uint32_t invitedId)
{
if (playerId == invitedId) {
return;
}
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
Player* invitedPlayer = getPlayerByID(invitedId);
if (!invitedPlayer || invitedPlayer->isInviting(player)) {
return;
}
if (invitedPlayer->getParty()) {
std::ostringstream ss;
ss << invitedPlayer->getName() << " is already in a party.";
player->sendTextMessage(MESSAGE_INFO_DESCR, ss.str());
return;
}
Party* party = player->getParty();
if (!party) {
party = new Party(player);
} else if (party->getLeader() != player) {
return;
}
party->invitePlayer(*invitedPlayer);
}
void Game::playerJoinParty(uint32_t playerId, uint32_t leaderId)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
Player* leader = getPlayerByID(leaderId);
if (!leader || !leader->isInviting(player)) {
return;
}
Party* party = leader->getParty();
if (!party || party->getLeader() != leader) {
return;
}
if (player->getParty()) {
player->sendTextMessage(MESSAGE_INFO_DESCR, "You are already in a party.");
return;
}
party->joinParty(*player);
}
void Game::playerRevokePartyInvitation(uint32_t playerId, uint32_t invitedId)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
Party* party = player->getParty();
if (!party || party->getLeader() != player) {
return;
}
Player* invitedPlayer = getPlayerByID(invitedId);
if (!invitedPlayer || !player->isInviting(invitedPlayer)) {
return;
}
party->revokeInvitation(*invitedPlayer);
}
void Game::playerPassPartyLeadership(uint32_t playerId, uint32_t newLeaderId)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
Party* party = player->getParty();
if (!party || party->getLeader() != player) {
return;
}
Player* newLeader = getPlayerByID(newLeaderId);
if (!newLeader || !player->isPartner(newLeader)) {
return;
}
party->passPartyLeadership(newLeader);
}
void Game::playerLeaveParty(uint32_t playerId)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
Party* party = player->getParty();
if (!party || player->hasCondition(CONDITION_INFIGHT)) {
return;
}
party->leaveParty(player);
}
void Game::playerEnableSharedPartyExperience(uint32_t playerId, bool sharedExpActive)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
Party* party = player->getParty();
if (!party || (player->hasCondition(CONDITION_INFIGHT) && player->getZone() != ZONE_PROTECTION)) {
return;
}
party->setSharedExperience(player, sharedExpActive);
}
void Game::sendGuildMotd(uint32_t playerId)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
Guild* guild = player->getGuild();
if (guild) {
player->sendChannelMessage("Message of the Day", guild->getMotd(), TALKTYPE_CHANNEL_R1, CHANNEL_GUILD);
}
}
void Game::kickPlayer(uint32_t playerId, bool displayEffect)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
player->kickPlayer(displayEffect);
}
void Game::playerReportRuleViolation(uint32_t playerId, const std::string& targetName, uint8_t reportType, uint8_t reportReason, const std::string& comment, const std::string& translation)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
g_events->eventPlayerOnReportRuleViolation(player, targetName, reportType, reportReason, comment, translation);
}
void Game::playerReportBug(uint32_t playerId, const std::string& message, const Position& position, uint8_t category)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
g_events->eventPlayerOnReportBug(player, message, position, category);
}
void Game::playerDebugAssert(uint32_t playerId, const std::string& assertLine, const std::string& date, const std::string& description, const std::string& comment)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
// TODO: move debug assertions to database
FILE* file = fopen("client_assertions.txt", "a");
if (file) {
fprintf(file, "----- %s - %s (%s) -----\n", formatDate(time(nullptr)).c_str(), player->getName().c_str(), convertIPToString(player->getIP()).c_str());
fprintf(file, "%s\n%s\n%s\n%s\n", assertLine.c_str(), date.c_str(), description.c_str(), comment.c_str());
fclose(file);
}
}
void Game::playerLeaveMarket(uint32_t playerId)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
player->setInMarket(false);
}
void Game::playerBrowseMarket(uint32_t playerId, uint16_t spriteId)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
if (!player->isInMarket()) {
return;
}
const ItemType& it = Item::items.getItemIdByClientId(spriteId);
if (it.id == 0) {
return;
}
if (it.wareId == 0) {
return;
}
const MarketOfferList& buyOffers = IOMarket::getActiveOffers(MARKETACTION_BUY, it.id);
const MarketOfferList& sellOffers = IOMarket::getActiveOffers(MARKETACTION_SELL, it.id);
player->sendMarketBrowseItem(it.id, buyOffers, sellOffers);
player->sendMarketDetail(it.id);
}
void Game::playerBrowseMarketOwnOffers(uint32_t playerId)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
if (!player->isInMarket()) {
return;
}
const MarketOfferList& buyOffers = IOMarket::getOwnOffers(MARKETACTION_BUY, player->getGUID());
const MarketOfferList& sellOffers = IOMarket::getOwnOffers(MARKETACTION_SELL, player->getGUID());
player->sendMarketBrowseOwnOffers(buyOffers, sellOffers);
}
void Game::playerBrowseMarketOwnHistory(uint32_t playerId)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
if (!player->isInMarket()) {
return;
}
const HistoryMarketOfferList& buyOffers = IOMarket::getOwnHistory(MARKETACTION_BUY, player->getGUID());
const HistoryMarketOfferList& sellOffers = IOMarket::getOwnHistory(MARKETACTION_SELL, player->getGUID());
player->sendMarketBrowseOwnHistory(buyOffers, sellOffers);
}
void Game::playerCreateMarketOffer(uint32_t playerId, uint8_t type, uint16_t spriteId, uint16_t amount, uint32_t price, bool anonymous)
{
if (amount == 0 || amount > 64000) {
return;
}
if (price == 0 || price > 999999999) {
return;
}
if (type != MARKETACTION_BUY && type != MARKETACTION_SELL) {
return;
}
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
if (!player->isInMarket()) {
return;
}
if (g_config.getBoolean(ConfigManager::MARKET_PREMIUM) && !player->isPremium()) {
player->sendMarketLeave();
return;
}
const ItemType& itt = Item::items.getItemIdByClientId(spriteId);
if (itt.id == 0 || itt.wareId == 0) {
return;
}
const ItemType& it = Item::items.getItemIdByClientId(itt.wareId);
if (it.id == 0 || it.wareId == 0) {
return;
}
if (!it.stackable && amount > 2000) {
return;
}
const uint32_t maxOfferCount = g_config.getNumber(ConfigManager::MAX_MARKET_OFFERS_AT_A_TIME_PER_PLAYER);
if (maxOfferCount != 0 && IOMarket::getPlayerOfferCount(player->getGUID()) >= maxOfferCount) {
return;
}
uint64_t fee = (price / 100.) * amount;
if (fee < 20) {
fee = 20;
} else if (fee > 1000) {
fee = 1000;
}
if (type == MARKETACTION_SELL) {
if (fee > player->bankBalance) {
return;
}
DepotChest* depotChest = player->getDepotChest(player->getLastDepotId(), false);
if (!depotChest) {
return;
}
std::forward_list<Item*> itemList = getMarketItemList(it.wareId, amount, depotChest, player->getInbox());
if (itemList.empty()) {
return;
}
if (it.stackable) {
uint16_t tmpAmount = amount;
for (Item* item : itemList) {
uint16_t removeCount = std::min<uint16_t>(tmpAmount, item->getItemCount());
tmpAmount -= removeCount;
internalRemoveItem(item, removeCount);
if (tmpAmount == 0) {
break;
}
}
} else {
for (Item* item : itemList) {
internalRemoveItem(item);
}
}
player->bankBalance -= fee;
} else {
uint64_t totalPrice = static_cast<uint64_t>(price) * amount;
totalPrice += fee;
if (totalPrice > player->bankBalance) {
return;
}
player->bankBalance -= totalPrice;
}
IOMarket::createOffer(player->getGUID(), static_cast<MarketAction_t>(type), it.id, amount, price, anonymous);
player->sendMarketEnter(player->getLastDepotId());
const MarketOfferList& buyOffers = IOMarket::getActiveOffers(MARKETACTION_BUY, it.id);
const MarketOfferList& sellOffers = IOMarket::getActiveOffers(MARKETACTION_SELL, it.id);
player->sendMarketBrowseItem(it.id, buyOffers, sellOffers);
}
void Game::playerCancelMarketOffer(uint32_t playerId, uint32_t timestamp, uint16_t counter)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
if (!player->isInMarket()) {
return;
}
MarketOfferEx offer = IOMarket::getOfferByCounter(timestamp, counter);
if (offer.id == 0 || offer.playerId != player->getGUID()) {
return;
}
if (offer.type == MARKETACTION_BUY) {
player->bankBalance += static_cast<uint64_t>(offer.price) * offer.amount;
player->sendMarketEnter(player->getLastDepotId());
} else {
const ItemType& it = Item::items[offer.itemId];
if (it.id == 0) {
return;
}
if (it.stackable) {
uint16_t tmpAmount = offer.amount;
while (tmpAmount > 0) {
int32_t stackCount = std::min<int32_t>(100, tmpAmount);
Item* item = Item::CreateItem(it.id, stackCount);
if (internalAddItem(player->getInbox(), item, INDEX_WHEREEVER, FLAG_NOLIMIT) != RETURNVALUE_NOERROR) {
delete item;
break;
}
tmpAmount -= stackCount;
}
} else {
int32_t subType;
if (it.charges != 0) {
subType = it.charges;
} else {
subType = -1;
}
for (uint16_t i = 0; i < offer.amount; ++i) {
Item* item = Item::CreateItem(it.id, subType);
if (internalAddItem(player->getInbox(), item, INDEX_WHEREEVER, FLAG_NOLIMIT) != RETURNVALUE_NOERROR) {
delete item;
break;
}
}
}
}
IOMarket::moveOfferToHistory(offer.id, OFFERSTATE_CANCELLED);
offer.amount = 0;
offer.timestamp += g_config.getNumber(ConfigManager::MARKET_OFFER_DURATION);
player->sendMarketCancelOffer(offer);
player->sendMarketEnter(player->getLastDepotId());
}
void Game::playerAcceptMarketOffer(uint32_t playerId, uint32_t timestamp, uint16_t counter, uint16_t amount)
{
if (amount == 0 || amount > 64000) {
return;
}
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
if (!player->isInMarket()) {
return;
}
MarketOfferEx offer = IOMarket::getOfferByCounter(timestamp, counter);
if (offer.id == 0) {
return;
}
if (amount > offer.amount) {
return;
}
const ItemType& it = Item::items[offer.itemId];
if (it.id == 0) {
return;
}
uint64_t totalPrice = static_cast<uint64_t>(offer.price) * amount;
if (offer.type == MARKETACTION_BUY) {
DepotChest* depotChest = player->getDepotChest(player->getLastDepotId(), false);
if (!depotChest) {
return;
}
std::forward_list<Item*> itemList = getMarketItemList(it.wareId, amount, depotChest, player->getInbox());
if (itemList.empty()) {
return;
}
Player* buyerPlayer = getPlayerByGUID(offer.playerId);
if (!buyerPlayer) {
buyerPlayer = new Player(nullptr);
if (!IOLoginData::loadPlayerById(buyerPlayer, offer.playerId)) {
delete buyerPlayer;
return;
}
}
if (it.stackable) {
uint16_t tmpAmount = amount;
for (Item* item : itemList) {
uint16_t removeCount = std::min<uint16_t>(tmpAmount, item->getItemCount());
tmpAmount -= removeCount;
internalRemoveItem(item, removeCount);
if (tmpAmount == 0) {
break;
}
}
} else {
for (Item* item : itemList) {
internalRemoveItem(item);
}
}
player->bankBalance += totalPrice;
if (it.stackable) {
uint16_t tmpAmount = amount;
while (tmpAmount > 0) {
uint16_t stackCount = std::min<uint16_t>(100, tmpAmount);
Item* item = Item::CreateItem(it.id, stackCount);
if (internalAddItem(buyerPlayer->getInbox(), item, INDEX_WHEREEVER, FLAG_NOLIMIT) != RETURNVALUE_NOERROR) {
delete item;
break;
}
tmpAmount -= stackCount;
}
} else {
int32_t subType;
if (it.charges != 0) {
subType = it.charges;
} else {
subType = -1;
}
for (uint16_t i = 0; i < amount; ++i) {
Item* item = Item::CreateItem(it.id, subType);
if (internalAddItem(buyerPlayer->getInbox(), item, INDEX_WHEREEVER, FLAG_NOLIMIT) != RETURNVALUE_NOERROR) {
delete item;
break;
}
}
}
if (buyerPlayer->isOffline()) {
IOLoginData::savePlayer(buyerPlayer);
delete buyerPlayer;
} else {
buyerPlayer->onReceiveMail();
}
} else {
if (totalPrice > player->bankBalance) {
return;
}
player->bankBalance -= totalPrice;
if (it.stackable) {
uint16_t tmpAmount = amount;
while (tmpAmount > 0) {
uint16_t stackCount = std::min<uint16_t>(100, tmpAmount);
Item* item = Item::CreateItem(it.id, stackCount);
if (internalAddItem(player->getInbox(), item, INDEX_WHEREEVER, FLAG_NOLIMIT) != RETURNVALUE_NOERROR) {
delete item;
break;
}
tmpAmount -= stackCount;
}
} else {
int32_t subType;
if (it.charges != 0) {
subType = it.charges;
} else {
subType = -1;
}
for (uint16_t i = 0; i < amount; ++i) {
Item* item = Item::CreateItem(it.id, subType);
if (internalAddItem(player->getInbox(), item, INDEX_WHEREEVER, FLAG_NOLIMIT) != RETURNVALUE_NOERROR) {
delete item;
break;
}
}
}
Player* sellerPlayer = getPlayerByGUID(offer.playerId);
if (sellerPlayer) {
sellerPlayer->bankBalance += totalPrice;
} else {
IOLoginData::increaseBankBalance(offer.playerId, totalPrice);
}
player->onReceiveMail();
}
const int32_t marketOfferDuration = g_config.getNumber(ConfigManager::MARKET_OFFER_DURATION);
IOMarket::appendHistory(player->getGUID(), (offer.type == MARKETACTION_BUY ? MARKETACTION_SELL : MARKETACTION_BUY), offer.itemId, amount, offer.price, offer.timestamp + marketOfferDuration, OFFERSTATE_ACCEPTEDEX);
IOMarket::appendHistory(offer.playerId, offer.type, offer.itemId, amount, offer.price, offer.timestamp + marketOfferDuration, OFFERSTATE_ACCEPTED);
offer.amount -= amount;
if (offer.amount == 0) {
IOMarket::deleteOffer(offer.id);
} else {
IOMarket::acceptOffer(offer.id, amount);
}
player->sendMarketEnter(player->getLastDepotId());
offer.timestamp += marketOfferDuration;
player->sendMarketAcceptOffer(offer);
}
void Game::parsePlayerExtendedOpcode(uint32_t playerId, uint8_t opcode, const std::string& buffer)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
for (CreatureEvent* creatureEvent : player->getCreatureEvents(CREATURE_EVENT_EXTENDED_OPCODE)) {
creatureEvent->executeExtendedOpcode(player, opcode, buffer);
}
}
std::forward_list<Item*> Game::getMarketItemList(uint16_t wareId, uint16_t sufficientCount, DepotChest* depotChest, Inbox* inbox)
{
std::forward_list<Item*> itemList;
uint16_t count = 0;
std::list<Container*> containers { depotChest, inbox };
do {
Container* container = containers.front();
containers.pop_front();
for (Item* item : container->getItemList()) {
Container* c = item->getContainer();
if (c && !c->empty()) {
containers.push_back(c);
continue;
}
const ItemType& itemType = Item::items[item->getID()];
if (itemType.wareId != wareId) {
continue;
}
if (c && (!itemType.isContainer() || c->capacity() != itemType.maxItems)) {
continue;
}
if (!item->hasMarketAttributes()) {
continue;
}
itemList.push_front(item);
count += Item::countByType(item, -1);
if (count >= sufficientCount) {
return itemList;
}
}
} while (!containers.empty());
return std::forward_list<Item*>();
}
void Game::forceAddCondition(uint32_t creatureId, Condition* condition)
{
Creature* creature = getCreatureByID(creatureId);
if (!creature) {
delete condition;
return;
}
creature->addCondition(condition, true);
}
void Game::forceRemoveCondition(uint32_t creatureId, ConditionType_t type)
{
Creature* creature = getCreatureByID(creatureId);
if (!creature) {
return;
}
creature->removeCondition(type, true);
}
void Game::sendOfflineTrainingDialog(Player* player)
{
if (!player) {
return;
}
if (!player->hasModalWindowOpen(offlineTrainingWindow.id)) {
player->sendModalWindow(offlineTrainingWindow);
}
}
void Game::playerAnswerModalWindow(uint32_t playerId, uint32_t modalWindowId, uint8_t button, uint8_t choice)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
if (!player->hasModalWindowOpen(modalWindowId)) {
return;
}
player->onModalWindowHandled(modalWindowId);
// offline training, hardcoded
if (modalWindowId == std::numeric_limits<uint32_t>::max()) {
if (button == 1) {
if (choice == SKILL_SWORD || choice == SKILL_AXE || choice == SKILL_CLUB || choice == SKILL_DISTANCE || choice == SKILL_MAGLEVEL) {
BedItem* bedItem = player->getBedItem();
if (bedItem && bedItem->sleep(player)) {
player->setOfflineTrainingSkill(choice);
return;
}
}
} else {
player->sendTextMessage(MESSAGE_EVENT_ADVANCE, "Offline training aborted.");
}
player->setBedItem(nullptr);
} else {
for (auto creatureEvent : player->getCreatureEvents(CREATURE_EVENT_MODALWINDOW)) {
creatureEvent->executeModalWindow(player, modalWindowId, button, choice);
}
}
}
void Game::addPlayer(Player* player)
{
const std::string& lowercase_name = asLowerCaseString(player->getName());
mappedPlayerNames[lowercase_name] = player;
wildcardTree.insert(lowercase_name);
players[player->getID()] = player;
}
void Game::removePlayer(Player* player)
{
const std::string& lowercase_name = asLowerCaseString(player->getName());
mappedPlayerNames.erase(lowercase_name);
wildcardTree.remove(lowercase_name);
players.erase(player->getID());
}
void Game::addNpc(Npc* npc)
{
npcs[npc->getID()] = npc;
}
void Game::removeNpc(Npc* npc)
{
npcs.erase(npc->getID());
}
void Game::addMonster(Monster* monster)
{
monsters[monster->getID()] = monster;
}
void Game::removeMonster(Monster* monster)
{
monsters.erase(monster->getID());
}
Guild* Game::getGuild(uint32_t id) const
{
auto it = guilds.find(id);
if (it == guilds.end()) {
return nullptr;
}
return it->second;
}
void Game::addGuild(Guild* guild)
{
guilds[guild->getId()] = guild;
}
void Game::removeGuild(uint32_t guildId)
{
guilds.erase(guildId);
}
void Game::decreaseBrowseFieldRef(const Position& pos)
{
Tile* tile = map.getTile(pos.x, pos.y, pos.z);
if (!tile) {
return;
}
auto it = browseFields.find(tile);
if (it != browseFields.end()) {
it->second->decrementReferenceCounter();
}
}
void Game::internalRemoveItems(std::vector<Item*> itemList, uint32_t amount, bool stackable)
{
if (stackable) {
for (Item* item : itemList) {
if (item->getItemCount() > amount) {
internalRemoveItem(item, amount);
break;
} else {
amount -= item->getItemCount();
internalRemoveItem(item);
}
}
} else {
for (Item* item : itemList) {
internalRemoveItem(item);
}
}
}
BedItem* Game::getBedBySleeper(uint32_t guid) const
{
auto it = bedSleepersMap.find(guid);
if (it == bedSleepersMap.end()) {
return nullptr;
}
return it->second;
}
void Game::setBedSleeper(BedItem* bed, uint32_t guid)
{
bedSleepersMap[guid] = bed;
}
void Game::removeBedSleeper(uint32_t guid)
{
auto it = bedSleepersMap.find(guid);
if (it != bedSleepersMap.end()) {
bedSleepersMap.erase(it);
}
}
Item* Game::getUniqueItem(uint16_t uniqueId)
{
auto it = uniqueItems.find(uniqueId);
if (it == uniqueItems.end()) {
return nullptr;
}
return it->second;
}
bool Game::addUniqueItem(uint16_t uniqueId, Item* item)
{
auto result = uniqueItems.emplace(uniqueId, item);
if (!result.second) {
std::cout << "Duplicate unique id: " << uniqueId << std::endl;
}
return result.second;
}
void Game::removeUniqueItem(uint16_t uniqueId)
{
auto it = uniqueItems.find(uniqueId);
if (it != uniqueItems.end()) {
uniqueItems.erase(it);
}
}
bool Game::reload(ReloadTypes_t reloadType)
{
switch (reloadType) {
case RELOAD_TYPE_ACTIONS: return g_actions->reload();
case RELOAD_TYPE_CHAT: return g_chat->load();
case RELOAD_TYPE_CONFIG: return g_config.reload();
case RELOAD_TYPE_CREATURESCRIPTS: return g_creatureEvents->reload();
case RELOAD_TYPE_EVENTS: return g_events->load();
case RELOAD_TYPE_GLOBALEVENTS: return g_globalEvents->reload();
case RELOAD_TYPE_ITEMS: return Item::items.reload();
case RELOAD_TYPE_MONSTERS: return g_monsters.reload();
case RELOAD_TYPE_MOUNTS: return mounts.reload();
case RELOAD_TYPE_MOVEMENTS: return g_moveEvents->reload();
case RELOAD_TYPE_NPCS: {
Npcs::reload();
return true;
}
case RELOAD_TYPE_QUESTS: return quests.reload();
case RELOAD_TYPE_RAIDS: return raids.reload() && raids.startup();
case RELOAD_TYPE_SPELLS: {
if (!g_spells->reload()) {
std::cout << "[Error - Game::reload] Failed to reload spells." << std::endl;
std::terminate();
} else if (!g_monsters.reload()) {
std::cout << "[Error - Game::reload] Failed to reload monsters." << std::endl;
std::terminate();
}
return true;
}
case RELOAD_TYPE_TALKACTIONS: return g_talkActions->reload();
case RELOAD_TYPE_WEAPONS: {
bool results = g_weapons->reload();
g_weapons->loadDefaults();
return results;
}
default: {
if (!g_spells->reload()) {
std::cout << "[Error - Game::reload] Failed to reload spells." << std::endl;
std::terminate();
} else if (!g_monsters.reload()) {
std::cout << "[Error - Game::reload] Failed to reload monsters." << std::endl;
std::terminate();
}
g_actions->reload();
g_config.reload();
g_creatureEvents->reload();
g_monsters.reload();
g_moveEvents->reload();
Npcs::reload();
raids.reload() && raids.startup();
g_talkActions->reload();
Item::items.reload();
g_weapons->reload();
g_weapons->loadDefaults();
quests.reload();
mounts.reload();
g_globalEvents->reload();
g_events->load();
g_chat->load();
return true;
}
}
}
| 1 | 15,606 | This is not consistent. Either rename the first to `tradePlayerItem` (which is ugly) or the second to `partnerTradeItem` (:+1:) | otland-forgottenserver | cpp |
@@ -50,6 +50,7 @@ func deps() {
"go get -u github.com/alecthomas/gometalinter",
"gometalinter --install",
"go get -u github.com/stretchr/testify",
+ "go get -u github.com/xeipuuv/gojsonschema",
}
for _, name := range list { | 1 | package main
import (
"fmt"
"log"
"os"
"os/exec"
"runtime"
"strings"
)
var lineBreak = "\n"
func init() {
log.SetFlags(0)
if runtime.GOOS == "windows" {
lineBreak = "\r\n"
}
}
// run executes a given command on the shell, like
// `run("git status")`
func run(name string) string {
args := strings.Split(name, " ")
return runParts(args...)
}
func runParts(args ...string) string {
name := strings.Join(args, " ")
cmd := exec.Command(args[0], args[1:]...) // #nosec
log.Println(name)
out, err := cmd.CombinedOutput()
if err != nil {
log.Printf("%s", out)
log.Fatalf("Command '%s' failed: %s\n", name, err)
}
return strings.Trim(string(out), lineBreak)
}
// deps installs all dependencies
func deps() {
log.Println("Installing dependencies...")
list := []string{
"go get -u github.com/whyrusleeping/gx",
"go get -u github.com/whyrusleeping/gx-go",
"./fetch_go-ipfs_deps.sh",
"gx install",
"go get -u github.com/alecthomas/gometalinter",
"gometalinter --install",
"go get -u github.com/stretchr/testify",
}
for _, name := range list {
log.Println(run(name))
}
}
// lint runs linting using gometalinter
func lint(packages ...string) {
if len(packages) == 0 {
packages = []string{"./..."}
}
log.Printf("Linting %s ...\n", strings.Join(packages, " "))
// Run fast linters batched together
configs := []string{
"gometalinter",
"--skip=sharness",
"--skip=vendor",
"--disable-all",
}
fastLinters := []string{
"--enable=vet",
"--enable=gofmt",
"--enable=misspell",
"--enable=goconst",
"--enable=golint",
"--enable=errcheck",
"--min-occurrences=6", // for goconst
}
log.Println(runParts(append(append(configs, fastLinters...), packages...)...))
slowLinters := []string{
"--deadline=10m",
"--enable=unconvert",
"--enable=gosimple",
"--enable=megacheck",
"--enable=varcheck",
"--enable=structcheck",
"--enable=deadcode",
}
log.Println(runParts(append(append(configs, slowLinters...), packages...)...))
}
func build() {
log.Println("Building...")
commit := run("git log -n 1 --format=%H")
log.Println(
runParts(
"go", "build",
"-ldflags", fmt.Sprintf("-X github.com/filecoin-project/go-filecoin/flags.Commit=%s", commit),
"-v", "-o", "go-filecoin", ".",
),
)
}
func install() {
log.Println("Installing...")
log.Println(runParts("go", "install"))
}
// test executes tests and passes along all additional arguments to `go test`.
func test(args ...string) {
log.Println("Testing...")
log.Println(run(fmt.Sprintf("go test ./... %s", strings.Join(args, " "))))
}
func main() {
args := os.Args[1:]
if len(args) == 0 {
log.Fatalf("Missing command")
}
cmd := args[0]
switch cmd {
case "deps":
deps()
case "lint":
lint(args[1:]...)
case "build":
build()
case "test":
test(args[1:]...)
case "install":
install()
case "best":
build()
test(args[1:]...)
case "all":
deps()
lint()
build()
test(args[1:]...)
default:
log.Fatalf("Unknown command: %s\n", cmd)
}
}
| 1 | 11,654 | is there a reason we don't want to gx this dependency? | filecoin-project-venus | go |
@@ -26,6 +26,18 @@
require_once("../inc/util.inc");
+// strip leading AMD or NVIDIA
+//
+function strip_vendor($model) {
+ foreach (array("AMD ", "NVIDIA ") as $maker) {
+ $n = strlen($maker);
+ if (substr($model, 0, $n) == $maker) {
+ return substr($model, $n);
+ }
+ }
+ return $model;
+}
+
// take a host.serialnum field (which may encode several GPUs)
// and extract the model name for the given vendor
// | 1 | <?php
// This file is part of BOINC.
// http://boinc.berkeley.edu
// Copyright (C) 2011 University of California
//
// BOINC is free software; you can redistribute it and/or modify it
// under the terms of the GNU Lesser General Public License
// as published by the Free Software Foundation,
// either version 3 of the License, or (at your option) any later version.
//
// BOINC is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
// See the GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with BOINC. If not, see <http://www.gnu.org/licenses/>.
// generate a page of the best-performing GPU models.
//
// "best-performing" is defined as minimizing the average of
//
// elapsed_time(J)/rsc_fpops_est(J)
// over completed jobs J currently in the DB
require_once("../inc/util.inc");
// take a host.serialnum field (which may encode several GPUs)
// and extract the model name for the given vendor
//
function get_gpu_model($x, $vendor) {
$descs = explode("]", $x);
array_pop($descs);
foreach ($descs as $desc) {
$desc = trim($desc, "[");
$d = explode("|", $desc);
if ($d[0] == "BOINC") continue;
if ($d[0] != $vendor) continue;
return $d[1];
}
return null;
}
function add_model($model, $r, $wu, &$models) {
if (array_key_exists($model, $models)) {
$models[$model]->count++;
$models[$model]->time += $r->elapsed_time/$wu->rsc_fpops_est;
} else {
$x = new StdClass;
$x->count = 1;
$x->time = $r->elapsed_time/$wu->rsc_fpops_est;
$models[$model] = $x;
}
}
// return a data structure containing GPU usage info for a vendor
// $x->total: combined list
// $x->windows
// $x->linux
// $x->mac
//
function get_gpu_list($vendor, $alt_vendor=null) {
$clause = "plan_class like '%$vendor%'";
if ($alt_vendor) {
$clause .= " or plan_class like '%$alt_vendor%'";
}
$avs = BoincAppVersion::enum($clause);
if (count($avs) == 0) {
$x = new StdClass;
$x->total = array();
return $x;
}
$av_ids = "";
foreach($avs as $av) {
$av_ids .= "$av->id, ";
}
if ($vendor == "cuda") {
$av_ids .= "-3";
} else if ($vendor == "ati") {
$av_ids .= "-4";
} else if ($vendor == "intel_gpu") {
$av_ids .= "-5";
} else {
$av_ids .= "0";
}
$t = time() - 30*86400;
//echo "start enum $vendor $av_ids\n";
$results = BoincResult::enum(
"app_version_id in ($av_ids) and create_time > $t and elapsed_time>100 limit 2000"
);
//echo "end enum\n";
$total = array();
$win = array();
$linux = array();
$mac = array();
foreach ($results as $r) {
$h = BoincHost::lookup_id($r->hostid);
if (!$h) continue;
$wu = BoincWorkunit::lookup_id($r->workunitid);
if (!$wu) continue;
$v = "";
if ($vendor == "cuda") {
$v = "CUDA";
} else if ($vendor == "intel_gpu") {
$v = "INTEL";
} else {
$v = "CAL";
}
$model = get_gpu_model($h->serialnum, $v);
if (!$model) continue;
add_model($model, $r, $wu, $total);
if (strstr($h->os_name, "Windows")) {
add_model($model, $r, $wu, $win);
}
if (strstr($h->os_name, "Linux")) {
add_model($model, $r, $wu, $linux);
}
if (strstr($h->os_name, "Darwin")) {
add_model($model, $r, $wu, $mac);
}
}
$x = new StdClass;
$x->total = $total;
$x->win = $win;
$x->linux = $linux;
$x->mac = $mac;
return $x;
}
function get_gpu_lists() {
$x = new StdClass;
$x->cuda = get_gpu_list("cuda", "nvidia");
$x->ati = get_gpu_list("ati", "amd");
$x->intel_gpu = get_gpu_list("intel_gpu");
$x->time = time();
return $x;
}
function gpucmp($x1, $x2) {
return $x1->avg_time > $x2->avg_time;
}
function show_list($models, $name) {
echo "<td><h2>$name</h2>\n";
if (!count($models)) {
echo tra("No GPU tasks reported")."</td>\n";
return;
}
$max_count = 0;
foreach ($models as $model=>$x) {
if ($x->count > $max_count) $max_count = $x->count;
$x->avg_time = $x->time/$x->count;
}
$min_time = 1e9;
foreach ($models as $model=>$x) {
if ($x->count < $max_count/10) continue;
if ($x->avg_time < $min_time) $min_time = $x->avg_time;
}
uasort($models, 'gpucmp');
echo "<ol>\n";
foreach ($models as $model=>$x) {
if ($x->count < $max_count/10) continue;
$s = number_format($min_time/$x->avg_time, 3);
echo "<li>($s) $model\n";
}
echo "</ol></td>\n";
}
function show_vendor($vendor, $x) {
echo "<h2>$vendor</h2>\n";
if (!count($x->total)) {
echo tra("No GPU tasks reported");
return;
}
$have_win = count($x->win)>0;
$have_mac = count($x->mac)>0;
$have_linux = count($x->linux)>0;
$n = 0;
if ($have_win) $n++;
if ($have_mac) $n++;
if ($have_linux) $n++;
$show_total = $n>1;
start_table();
echo "<tr>";
if ($show_total) {
show_list($x->total, "Total");
}
show_list($x->win, "Windows");
show_list($x->linux, "Linux");
show_list($x->mac, "Mac");
echo "</tr></table>\n";
}
$d = get_cached_data(86400);
$data = FALSE;
if ($d) {
$data = unserialize($d);
}
if (!$data) {
$data = get_gpu_lists();
set_cached_data(86400, serialize($data));
}
page_head(tra("Top GPU models"));
echo tra("The following lists show the most productive GPU models on different platforms. Relative speeds, measured by average elapsed time of tasks, are shown in parentheses.");
show_vendor("NVIDIA", $data->cuda);
show_vendor("ATI/AMD", $data->ati);
show_vendor("Intel", $data->intel_gpu);
echo "<p>Generated ".time_str($data->time);
page_tail();
?>
| 1 | 15,842 | I'd suggest to add "ATI " and "Intel(R) " to this list | BOINC-boinc | php |
@@ -127,8 +127,8 @@ class Status extends ReportWidgetBase
}
}
- foreach ($missingPlugins as $pluginCode) {
- $warnings[] = Lang::get('backend::lang.warnings.plugin_missing', ['name' => '<strong>'.$pluginCode.'</strong>']);
+ foreach ($missingPlugins as $plugin) {
+ $warnings[] = Lang::get('backend::lang.warnings.plugin_missing', ['name' => '<strong>'.$plugin['code'].'</strong>']);
}
return $warnings; | 1 | <?php namespace System\ReportWidgets;
use Lang;
use Config;
use BackendAuth;
use System\Models\Parameter;
use System\Models\LogSetting;
use System\Classes\UpdateManager;
use System\Classes\PluginManager;
use Backend\Classes\ReportWidgetBase;
use System\Models\EventLog;
use System\Models\RequestLog;
use System\Models\PluginVersion;
use Exception;
/**
* System status report widget.
*
* @package october\system
* @author Alexey Bobkov, Samuel Georges
*/
class Status extends ReportWidgetBase
{
/**
* @var string A unique alias to identify this widget.
*/
protected $defaultAlias = 'status';
/**
* Renders the widget.
*/
public function render()
{
try {
$this->loadData();
}
catch (Exception $ex) {
$this->vars['error'] = $ex->getMessage();
}
return $this->makePartial('widget');
}
public function defineProperties()
{
return [
'title' => [
'title' => 'backend::lang.dashboard.widget_title_label',
'default' => 'backend::lang.dashboard.status.widget_title_default',
'type' => 'string',
'validationPattern' => '^.+$',
'validationMessage' => 'backend::lang.dashboard.widget_title_error',
]
];
}
protected function loadData()
{
$manager = UpdateManager::instance();
$this->vars['canUpdate'] = BackendAuth::getUser()->hasAccess('system.manage_updates');
$this->vars['updates'] = $manager->check();
$this->vars['warnings'] = $this->getSystemWarnings();
$this->vars['coreBuild'] = Parameter::get('system::core.build');
$this->vars['eventLog'] = EventLog::count();
$this->vars['eventLogMsg'] = LogSetting::get('log_events', false) ? false : true;
$this->vars['requestLog'] = RequestLog::count();
$this->vars['requestLogMsg'] = LogSetting::get('log_requests', false) ? false : true;
$this->vars['appBirthday'] = PluginVersion::orderBy('created_at')->value('created_at');
}
public function onLoadWarningsForm()
{
$this->vars['warnings'] = $this->getSystemWarnings();
return $this->makePartial('warnings_form');
}
protected function getSystemWarnings()
{
$warnings = [];
$missingPlugins = PluginManager::instance()->findMissingDependencies();
$writablePaths = [
temp_path(),
storage_path(),
storage_path('app'),
storage_path('logs'),
storage_path('framework'),
storage_path('cms'),
storage_path('cms/cache'),
storage_path('cms/twig'),
storage_path('cms/combiner'),
];
if (in_array('Cms', Config::get('cms.loadModules', []))) {
$writablePaths[] = themes_path();
}
if (Config::get('app.debug', true)) {
$warnings[] = Lang::get('backend::lang.warnings.debug');
}
if (Config::get('develop.decompileBackendAssets', false)) {
$warnings[] = Lang::get('backend::lang.warnings.decompileBackendAssets');
}
$requiredExtensions = [
'GD' => extension_loaded('gd'),
'fileinfo' => extension_loaded('fileinfo'),
'Zip' => class_exists('ZipArchive'),
'cURL' => function_exists('curl_init') && defined('CURLOPT_FOLLOWLOCATION'),
'OpenSSL' => function_exists('openssl_random_pseudo_bytes'),
];
foreach ($writablePaths as $path) {
if (!is_writable($path)) {
$warnings[] = Lang::get('backend::lang.warnings.permissions', ['name' => '<strong>'.$path.'</strong>']);
}
}
foreach ($requiredExtensions as $extension => $installed) {
if (!$installed) {
$warnings[] = Lang::get('backend::lang.warnings.extension', ['name' => '<strong>'.$extension.'</strong>']);
}
}
foreach ($missingPlugins as $pluginCode) {
$warnings[] = Lang::get('backend::lang.warnings.plugin_missing', ['name' => '<strong>'.$pluginCode.'</strong>']);
}
return $warnings;
}
}
| 1 | 18,538 | This is using a different lang key, we should switch it to using the new key and remove the old key if it is no longer used. | octobercms-october | php |
@@ -76,8 +76,9 @@ type SpecHandler interface {
}
var (
- nameRegex = regexp.MustCompile(api.Name + "=([0-9A-Za-z_-]+),?")
- nodesRegex = regexp.MustCompile(api.SpecNodes + "=([0-9A-Za-z_-]+),?")
+ nameRegex = regexp.MustCompile(api.Name + "=([0-9A-Za-z_-]+),?")
+ //nodesRegex = regexp.MustCompile(api.SpecNodes + "=([0-9A-Za-z_-]+),*")
+ nodesRegex = regexp.MustCompile(api.SpecNodes + "=('[0-9A-Za-z,_-]+'),*|" + api.SpecNodes + "=([0-9A-Za-z_-]+),*")
parentRegex = regexp.MustCompile(api.SpecParent + "=([A-Za-z]+),?")
sizeRegex = regexp.MustCompile(api.SpecSize + "=([0-9A-Za-z]+),?")
scaleRegex = regexp.MustCompile(api.SpecScale + "=([0-9]+),?") | 1 | package spec
import (
"fmt"
"regexp"
"strconv"
"strings"
"github.com/libopenstorage/openstorage/api"
"github.com/libopenstorage/openstorage/pkg/parser"
"github.com/libopenstorage/openstorage/pkg/units"
)
// SpecHandler provides conversion function from what gets passed in over the
// plugin API to an api.VolumeSpec object.
type SpecHandler interface {
// SpecOptsFromString parses options from the name and returns in a map.
// The input string should have known keys in the following format:
// "scale=value;size=value;name=volname"
// If the spec was parsed, it returns:
// (true, options_map, parsed_name)
// If the input string didn't contain the name, it returns:
// (false, nil, inputString)
SpecOptsFromString(inputString string) (
bool,
map[string]string,
string,
)
// SpecFromString parses options from the name.
// If the scheduler was unable to pass in the volume spec via the API,
// the spec can be passed in via the name in the format:
// "key=value;key=value;name=volname"
// source is populated if key parent=<volume_id> is specified.
// If the spec was parsed, it returns:
// (true, parsed_spec, locator, source, parsed_name)
// If the input string didn't contain the string, it returns:
// (false, DefaultSpec(), nil, nil, inputString)
SpecFromString(inputString string) (
bool,
*api.VolumeSpec,
*api.VolumeLocator,
*api.Source,
string,
)
// SpecFromOpts parses in docker options passed in the the docker run
// command of the form --opt name=value
// source is populated if --opt parent=<volume_id> is specified.
// If the options are validated then it returns:
// (resultant_VolumeSpec, source, locator, nil)
// If the options have invalid values then it returns:
// (nil, nil, nil, error)
SpecFromOpts(opts map[string]string) (
*api.VolumeSpec,
*api.VolumeLocator,
*api.Source,
error,
)
// UpdateSpecFromOpts parses in volume options passed through the opts map and updates given spec, locator & source
// If the options are validated then it returns:
// (resultant_VolumeSpec, source, locator, nil)
// If the options have invalid values then it returns:
// (nil, nil, nil, error)
UpdateSpecFromOpts(opts map[string]string, spec *api.VolumeSpec, locator *api.VolumeLocator, source *api.Source) (
*api.VolumeSpec,
*api.VolumeLocator,
*api.Source,
error,
)
// Returns a default VolumeSpec if no docker options or string encoding
// was provided.
DefaultSpec() *api.VolumeSpec
}
var (
nameRegex = regexp.MustCompile(api.Name + "=([0-9A-Za-z_-]+),?")
nodesRegex = regexp.MustCompile(api.SpecNodes + "=([0-9A-Za-z_-]+),?")
parentRegex = regexp.MustCompile(api.SpecParent + "=([A-Za-z]+),?")
sizeRegex = regexp.MustCompile(api.SpecSize + "=([0-9A-Za-z]+),?")
scaleRegex = regexp.MustCompile(api.SpecScale + "=([0-9]+),?")
fsRegex = regexp.MustCompile(api.SpecFilesystem + "=([0-9A-Za-z]+),?")
bsRegex = regexp.MustCompile(api.SpecBlockSize + "=([0-9]+),?")
haRegex = regexp.MustCompile(api.SpecHaLevel + "=([0-9]+),?")
cosRegex = regexp.MustCompile(api.SpecPriority + "=([A-Za-z]+),?")
sharedRegex = regexp.MustCompile(api.SpecShared + "=([A-Za-z]+),?")
journalRegex = regexp.MustCompile(api.SpecJournal + "=([A-Za-z]+),?")
sharedV4Regex = regexp.MustCompile(api.SpecSharedV4 + "=([A-Za-z]+),?")
cascadedRegex = regexp.MustCompile(api.SpecCascaded + "=([A-Za-z]+),?")
passphraseRegex = regexp.MustCompile(api.SpecPassphrase + "=([0-9A-Za-z_@./#&+-]+),?")
stickyRegex = regexp.MustCompile(api.SpecSticky + "=([A-Za-z]+),?")
secureRegex = regexp.MustCompile(api.SpecSecure + "=([A-Za-z]+),?")
zonesRegex = regexp.MustCompile(api.SpecZones + "=([A-Za-z]+),?")
racksRegex = regexp.MustCompile(api.SpecRacks + "=([A-Za-z]+),?")
rackRegex = regexp.MustCompile(api.SpecRack + "=([A-Za-z]+),?")
aggrRegex = regexp.MustCompile(api.SpecAggregationLevel + "=([0-9]+|" +
api.SpecAutoAggregationValue + "),?")
compressedRegex = regexp.MustCompile(api.SpecCompressed + "=([A-Za-z]+),?")
snapScheduleRegex = regexp.MustCompile(api.SpecSnapshotSchedule +
`=([A-Za-z0-9:;@=#]+),?`)
ioProfileRegex = regexp.MustCompile(api.SpecIoProfile + "=([0-9A-Za-z_-]+),?")
)
type specHandler struct {
}
// NewSpecHandler returns a new SpecHandler interface
func NewSpecHandler() SpecHandler {
return &specHandler{}
}
func (d *specHandler) cosLevel(cos string) (api.CosType, error) {
cos = strings.ToLower(cos)
switch cos {
case "high", "3":
return api.CosType_HIGH, nil
case "medium", "2":
return api.CosType_MEDIUM, nil
case "low", "1", "":
return api.CosType_LOW, nil
}
return api.CosType_NONE,
fmt.Errorf("Cos must be one of %q | %q | %q", "high", "medium", "low")
}
func (d *specHandler) getVal(r *regexp.Regexp, str string) (bool, string) {
found := r.FindString(str)
if found == "" {
return false, ""
}
submatches := r.FindStringSubmatch(str)
if len(submatches) < 2 {
return false, ""
}
val := submatches[1]
return true, val
}
func (d *specHandler) DefaultSpec() *api.VolumeSpec {
return &api.VolumeSpec{
VolumeLabels: make(map[string]string),
Format: api.FSType_FS_TYPE_EXT4,
HaLevel: 1,
}
}
func (d *specHandler) UpdateSpecFromOpts(opts map[string]string, spec *api.VolumeSpec, locator *api.VolumeLocator,
source *api.Source) (*api.VolumeSpec, *api.VolumeLocator, *api.Source, error) {
nodeList := make([]string, 0)
if spec == nil {
spec = d.DefaultSpec()
}
if source == nil {
source = &api.Source{}
}
if locator == nil {
locator = &api.VolumeLocator{
VolumeLabels: make(map[string]string),
}
}
for k, v := range opts {
switch k {
case api.SpecNodes:
inputNodes := strings.Split(v, ",")
for _, node := range inputNodes {
if len(node) != 0 {
nodeList = append(nodeList, node)
}
}
spec.ReplicaSet = &api.ReplicaSet{Nodes: nodeList}
case api.SpecParent:
source.Parent = v
case api.SpecEphemeral:
spec.Ephemeral, _ = strconv.ParseBool(v)
case api.SpecSize:
if size, err := units.Parse(v); err != nil {
return nil, nil, nil, err
} else {
spec.Size = uint64(size)
}
case api.SpecScale:
if scale, err := strconv.ParseUint(v, 10, 64); err == nil {
spec.Scale = uint32(scale)
}
case api.SpecFilesystem:
if value, err := api.FSTypeSimpleValueOf(v); err != nil {
return nil, nil, nil, err
} else {
spec.Format = value
}
case api.SpecBlockSize:
if blockSize, err := units.Parse(v); err != nil {
return nil, nil, nil, err
} else {
spec.BlockSize = blockSize
}
case api.SpecHaLevel:
haLevel, _ := strconv.ParseInt(v, 10, 64)
spec.HaLevel = haLevel
case api.SpecPriority:
cos, err := d.cosLevel(v)
if err != nil {
return nil, nil, nil, err
}
spec.Cos = cos
case api.SpecPriorityAlias:
cos, err := d.cosLevel(v)
if err != nil {
return nil, nil, nil, err
}
spec.Cos = cos
case api.SpecDedupe:
spec.Dedupe, _ = strconv.ParseBool(v)
case api.SpecSnapshotInterval:
snapshotInterval, _ := strconv.ParseUint(v, 10, 32)
spec.SnapshotInterval = uint32(snapshotInterval)
case api.SpecSnapshotSchedule:
spec.SnapshotSchedule = v
case api.SpecAggregationLevel:
if v == api.SpecAutoAggregationValue {
spec.AggregationLevel = api.AutoAggregation
} else {
aggregationLevel, _ := strconv.ParseUint(v, 10, 32)
spec.AggregationLevel = uint32(aggregationLevel)
}
case api.SpecShared:
if shared, err := strconv.ParseBool(v); err != nil {
return nil, nil, nil, err
} else {
spec.Shared = shared
}
case api.SpecJournal:
if journal, err := strconv.ParseBool(v); err != nil {
return nil, nil, nil, err
} else {
spec.Journal = journal
}
case api.SpecSharedV4:
if sharedV4, err := strconv.ParseBool(v); err != nil {
return nil, nil, nil, err
} else {
spec.Sharedv4 = sharedV4
}
case api.SpecCascaded:
if cascaded, err := strconv.ParseBool(v); err != nil {
return nil, nil, nil, err
} else {
spec.Cascaded = cascaded
}
case api.SpecSticky:
if sticky, err := strconv.ParseBool(v); err != nil {
return nil, nil, nil, err
} else {
spec.Sticky = sticky
}
case api.SpecSecure:
if secure, err := strconv.ParseBool(v); err != nil {
return nil, nil, nil, err
} else {
spec.Encrypted = secure
}
case api.SpecPassphrase:
spec.Encrypted = true
spec.Passphrase = v
case api.SpecGroup:
spec.Group = &api.Group{Id: v}
case api.SpecGroupEnforce:
if groupEnforced, err := strconv.ParseBool(v); err != nil {
return nil, nil, nil, err
} else {
spec.GroupEnforced = groupEnforced
}
case api.SpecZones, api.SpecRacks:
locator.VolumeLabels[k] = v
case api.SpecRack:
locator.VolumeLabels[api.SpecRacks] = v
case api.SpecCompressed:
if compressed, err := strconv.ParseBool(v); err != nil {
return nil, nil, nil, err
} else {
spec.Compressed = compressed
}
case api.SpecLabels:
if labels, err := parser.LabelsFromString(v); err != nil {
return nil, nil, nil, err
} else {
for k, v := range labels {
locator.VolumeLabels[k] = v
}
}
case api.SpecIoProfile:
if ioProfile, err := api.IoProfileSimpleValueOf(v); err != nil {
return nil, nil, nil, err
} else {
spec.IoProfile = ioProfile
}
default:
spec.VolumeLabels[k] = v
}
}
return spec, locator, source, nil
}
func (d *specHandler) SpecFromOpts(
opts map[string]string,
) (*api.VolumeSpec, *api.VolumeLocator, *api.Source, error) {
source := &api.Source{}
locator := &api.VolumeLocator{
VolumeLabels: make(map[string]string),
}
spec := d.DefaultSpec()
return d.UpdateSpecFromOpts(opts, spec, locator, source)
}
func (d *specHandler) SpecOptsFromString(
str string,
) (bool, map[string]string, string) {
// If we can't parse the name, the rest of the spec is invalid.
ok, name := d.getVal(nameRegex, str)
if !ok {
return false, nil, str
}
opts := make(map[string]string)
if ok, sz := d.getVal(sizeRegex, str); ok {
opts[api.SpecSize] = sz
}
if ok, nodes := d.getVal(nodesRegex, str); ok {
opts[api.SpecNodes] = nodes
}
if ok, parent := d.getVal(parentRegex, str); ok {
opts[api.SpecParent] = parent
}
if ok, scale := d.getVal(scaleRegex, str); ok {
opts[api.SpecScale] = scale
}
if ok, fs := d.getVal(fsRegex, str); ok {
opts[api.SpecFilesystem] = fs
}
if ok, bs := d.getVal(bsRegex, str); ok {
opts[api.SpecBlockSize] = bs
}
if ok, ha := d.getVal(haRegex, str); ok {
opts[api.SpecHaLevel] = ha
}
if ok, priority := d.getVal(cosRegex, str); ok {
opts[api.SpecPriority] = priority
}
if ok, shared := d.getVal(sharedRegex, str); ok {
opts[api.SpecShared] = shared
}
if ok, journal := d.getVal(journalRegex, str); ok {
opts[api.SpecJournal] = journal
}
if ok, nfs := d.getVal(sharedV4Regex, str); ok {
opts[api.SpecSharedV4] = nfs
}
if ok, cascaded := d.getVal(cascadedRegex, str); ok {
opts[api.SpecCascaded] = cascaded
}
if ok, sticky := d.getVal(stickyRegex, str); ok {
opts[api.SpecSticky] = sticky
}
if ok, secure := d.getVal(secureRegex, str); ok {
opts[api.SpecSecure] = secure
}
if ok, passphrase := d.getVal(passphraseRegex, str); ok {
opts[api.SpecPassphrase] = passphrase
}
if ok, zones := d.getVal(zonesRegex, str); ok {
opts[api.SpecZones] = zones
}
if ok, racks := d.getVal(racksRegex, str); ok {
opts[api.SpecRacks] = racks
} else {
if ok, rack := d.getVal(rackRegex, str); ok {
opts[api.SpecRack] = rack
}
}
if ok, aggregationLvl := d.getVal(aggrRegex, str); ok {
opts[api.SpecAggregationLevel] = aggregationLvl
}
if ok, compressed := d.getVal(compressedRegex, str); ok {
opts[api.SpecCompressed] = compressed
}
if ok, sched := d.getVal(snapScheduleRegex, str); ok {
opts[api.SpecSnapshotSchedule] = strings.Replace(sched, "#", ",", -1)
}
if ok, ioProfile := d.getVal(ioProfileRegex, str); ok {
opts[api.SpecIoProfile] = ioProfile
}
return true, opts, name
}
func (d *specHandler) SpecFromString(
str string,
) (bool, *api.VolumeSpec, *api.VolumeLocator, *api.Source, string) {
ok, opts, name := d.SpecOptsFromString(str)
if !ok {
return false, d.DefaultSpec(), nil, nil, name
}
spec, locator, source, err := d.SpecFromOpts(opts)
if err != nil {
return false, d.DefaultSpec(), nil, nil, name
}
return true, spec, locator, source, name
}
| 1 | 6,772 | shouldn't this work? nodesRegex = regexp.MustCompile(api.SpecNodes + "=(('[0-9A-Za-z,_-]+')|([0-9A-Za-z_-]+)),?") | libopenstorage-openstorage | go |
@@ -1,5 +1,6 @@
class Account::Encrypter
- def before_create(account)
+ def before_validation(account)
+ return unless account.new_record?
assign_activation_code_to_random_hash(account)
encrypt_salt(account)
end | 1 | class Account::Encrypter
def before_create(account)
assign_activation_code_to_random_hash(account)
encrypt_salt(account)
end
def before_save(account)
encrypt_email(account) if account.email_changed?
encrypt_password(account) if account.password.present?
end
private
def assign_activation_code_to_random_hash(account)
account.activation_code = SecureRandom.hex(20)
end
def encrypt_email(account)
account.email_md5 = Digest::MD5.hexdigest(account.email.downcase).to_s
end
def encrypt_salt(account)
account.salt = Account::Authenticator.encrypt(Time.now.to_s, account.login)
end
def encrypt_password(account)
account.crypted_password = Account::Authenticator.encrypt(account.password, account.salt)
end
end
| 1 | 6,864 | You mentioned this change was prompted because the `before_create` action was actually a defect. This will be done only for a new record; why is `before_validation`, which will be called repeatedly as accounts get updated and saved, correct whereas `before_create` is not? It looks like one would want to assign and activation code, etc., only once. | blackducksoftware-ohloh-ui | rb |
@@ -95,6 +95,19 @@ namespace Datadog.Trace
}
}
+ if (Settings.GlobalSamplingRate != null)
+ {
+ var globalRate = (float)Settings.GlobalSamplingRate;
+ if (globalRate < 0f || globalRate > 1f)
+ {
+ Log.Warning("{0} configuration of {1} is out of range", ConfigurationKeys.GlobalSamplingRate, Settings.GlobalSamplingRate);
+ }
+ else
+ {
+ Sampler.RegisterRule(new GlobalSamplingRule(globalRate));
+ }
+ }
+
// Register callbacks to make sure we flush the traces before exiting
AppDomain.CurrentDomain.ProcessExit += CurrentDomain_ProcessExit;
AppDomain.CurrentDomain.UnhandledException += CurrentDomain_UnhandledException; | 1 | using System;
using System.Collections.Generic;
using System.Diagnostics;
using System.Reflection;
using System.Threading;
using System.Threading.Tasks;
using Datadog.Trace.Agent;
using Datadog.Trace.Configuration;
using Datadog.Trace.DogStatsd;
using Datadog.Trace.Logging;
using Datadog.Trace.Sampling;
using Datadog.Trace.Vendors.StatsdClient;
namespace Datadog.Trace
{
/// <summary>
/// The tracer is responsible for creating spans and flushing them to the Datadog agent
/// </summary>
public class Tracer : IDatadogTracer
{
private const string UnknownServiceName = "UnknownService";
private static readonly Vendors.Serilog.ILogger Log = DatadogLogging.For<Tracer>();
/// <summary>
/// The number of Tracer instances that have been created and not yet destroyed.
/// This is used in the heartbeat metrics to estimate the number of
/// "live" Tracers that could potentially be sending traces to the Agent.
/// </summary>
private static int _liveTracerCount;
private readonly IScopeManager _scopeManager;
private readonly IAgentWriter _agentWriter;
private readonly Timer _heartbeatTimer;
static Tracer()
{
// create the default global Tracer
Instance = new Tracer();
}
/// <summary>
/// Initializes a new instance of the <see cref="Tracer"/> class with default settings.
/// </summary>
public Tracer()
: this(settings: null, agentWriter: null, sampler: null, scopeManager: null, statsd: null)
{
}
/// <summary>
/// Initializes a new instance of the <see cref="Tracer"/>
/// class using the specified <see cref="IConfigurationSource"/>.
/// </summary>
/// <param name="settings">
/// A <see cref="TracerSettings"/> instance with the desired settings,
/// or null to use the default configuration sources.
/// </param>
public Tracer(TracerSettings settings)
: this(settings, agentWriter: null, sampler: null, scopeManager: null, statsd: null)
{
}
internal Tracer(TracerSettings settings, IAgentWriter agentWriter, ISampler sampler, IScopeManager scopeManager, IStatsd statsd)
{
// update the count of Tracer instances
Interlocked.Increment(ref _liveTracerCount);
Settings = settings ?? TracerSettings.FromDefaultSources();
// if not configured, try to determine an appropriate service name
DefaultServiceName = Settings.ServiceName ??
GetApplicationName() ??
UnknownServiceName;
// only set DogStatsdClient if tracer metrics are enabled
if (Settings.TracerMetricsEnabled)
{
Statsd = statsd ?? CreateDogStatsdClient(Settings, DefaultServiceName);
}
// fall back to default implementations of each dependency if not provided
IApi apiClient = new Api(Settings.AgentUri, delegatingHandler: null, Statsd);
_agentWriter = agentWriter ?? new AgentWriter(apiClient, Statsd);
_scopeManager = scopeManager ?? new AsyncLocalScopeManager();
Sampler = sampler ?? new RuleBasedSampler(new RateLimiter(Settings.MaxTracesSubmittedPerSecond));
if (!string.IsNullOrWhiteSpace(Settings.CustomSamplingRules))
{
// User has opted in, ensure rate limiter is used
RuleBasedSampler.OptInTracingWithoutLimits();
foreach (var rule in CustomSamplingRule.BuildFromConfigurationString(Settings.CustomSamplingRules))
{
Sampler.RegisterRule(rule);
}
}
// Register callbacks to make sure we flush the traces before exiting
AppDomain.CurrentDomain.ProcessExit += CurrentDomain_ProcessExit;
AppDomain.CurrentDomain.UnhandledException += CurrentDomain_UnhandledException;
Console.CancelKeyPress += Console_CancelKeyPress;
// start the heartbeat loop
_heartbeatTimer = new Timer(HeartbeatCallback, state: null, dueTime: TimeSpan.Zero, period: TimeSpan.FromMinutes(1));
// If configured, add/remove the correlation identifiers into the
// LibLog logging context when a scope is activated/closed
if (Settings.LogsInjectionEnabled)
{
InitializeLibLogScopeEventSubscriber(_scopeManager);
}
}
/// <summary>
/// Finalizes an instance of the <see cref="Tracer"/> class.
/// </summary>
~Tracer()
{
// update the count of Tracer instances
Interlocked.Decrement(ref _liveTracerCount);
}
/// <summary>
/// Gets or sets the global tracer object
/// </summary>
public static Tracer Instance { get; set; }
/// <summary>
/// Gets the active scope
/// </summary>
public Scope ActiveScope => _scopeManager.Active;
/// <summary>
/// Gets a value indicating whether debugging mode is enabled.
/// </summary>
/// <value><c>true</c> is debugging is enabled, otherwise <c>false</c>.</value>
bool IDatadogTracer.IsDebugEnabled => Settings.DebugEnabled;
/// <summary>
/// Gets the default service name for traces where a service name is not specified.
/// </summary>
public string DefaultServiceName { get; }
/// <summary>
/// Gets this tracer's settings.
/// </summary>
public TracerSettings Settings { get; }
/// <summary>
/// Gets the tracer's scope manager, which determines which span is currently active, if any.
/// </summary>
IScopeManager IDatadogTracer.ScopeManager => _scopeManager;
/// <summary>
/// Gets the <see cref="ISampler"/> instance used by this <see cref="IDatadogTracer"/> instance.
/// </summary>
ISampler IDatadogTracer.Sampler => Sampler;
internal ISampler Sampler { get; }
internal IStatsd Statsd { get; }
/// <summary>
/// Create a new Tracer with the given parameters
/// </summary>
/// <param name="agentEndpoint">The agent endpoint where the traces will be sent (default is http://localhost:8126).</param>
/// <param name="defaultServiceName">Default name of the service (default is the name of the executing assembly).</param>
/// <param name="isDebugEnabled">Turns on all debug logging (this may have an impact on application performance).</param>
/// <returns>The newly created tracer</returns>
public static Tracer Create(Uri agentEndpoint = null, string defaultServiceName = null, bool isDebugEnabled = false)
{
// Keep supporting this older public method by creating a TracerConfiguration
// from default sources, overwriting the specified settings, and passing that to the constructor.
var configuration = TracerSettings.FromDefaultSources();
configuration.DebugEnabled = isDebugEnabled;
if (agentEndpoint != null)
{
configuration.AgentUri = agentEndpoint;
}
if (defaultServiceName != null)
{
configuration.ServiceName = defaultServiceName;
}
return new Tracer(configuration);
}
/// <summary>
/// Make a span active and return a scope that can be disposed to close the span
/// </summary>
/// <param name="span">The span to activate</param>
/// <param name="finishOnClose">If set to false, closing the returned scope will not close the enclosed span </param>
/// <returns>A Scope object wrapping this span</returns>
public Scope ActivateSpan(Span span, bool finishOnClose = true)
{
return _scopeManager.Activate(span, finishOnClose);
}
/// <summary>
/// This is a shortcut for <see cref="StartSpan"/> and <see cref="ActivateSpan"/>, it creates a new span with the given parameters and makes it active.
/// </summary>
/// <param name="operationName">The span's operation name</param>
/// <param name="parent">The span's parent</param>
/// <param name="serviceName">The span's service name</param>
/// <param name="startTime">An explicit start time for that span</param>
/// <param name="ignoreActiveScope">If set the span will not be a child of the currently active span</param>
/// <param name="finishOnClose">If set to false, closing the returned scope will not close the enclosed span </param>
/// <returns>A scope wrapping the newly created span</returns>
public Scope StartActive(string operationName, ISpanContext parent = null, string serviceName = null, DateTimeOffset? startTime = null, bool ignoreActiveScope = false, bool finishOnClose = true)
{
var span = StartSpan(operationName, parent, serviceName, startTime, ignoreActiveScope);
return _scopeManager.Activate(span, finishOnClose);
}
/// <summary>
/// Creates a new <see cref="Span"/> with the specified parameters.
/// </summary>
/// <param name="operationName">The span's operation name</param>
/// <param name="parent">The span's parent</param>
/// <param name="serviceName">The span's service name</param>
/// <param name="startTime">An explicit start time for that span</param>
/// <param name="ignoreActiveScope">If set the span will not be a child of the currently active span</param>
/// <returns>The newly created span</returns>
public Span StartSpan(string operationName, ISpanContext parent = null, string serviceName = null, DateTimeOffset? startTime = null, bool ignoreActiveScope = false)
{
if (parent == null && !ignoreActiveScope)
{
parent = _scopeManager.Active?.Span?.Context;
}
ITraceContext traceContext;
// try to get the trace context (from local spans) or
// sampling priority (from propagated spans),
// otherwise start a new trace context
if (parent is SpanContext parentSpanContext)
{
traceContext = parentSpanContext.TraceContext ??
new TraceContext(this)
{
SamplingPriority = parentSpanContext.SamplingPriority
};
}
else
{
traceContext = new TraceContext(this);
}
var finalServiceName = serviceName ?? parent?.ServiceName ?? DefaultServiceName;
var spanContext = new SpanContext(parent, traceContext, finalServiceName);
var span = new Span(spanContext, startTime)
{
OperationName = operationName,
};
var env = Settings.Environment;
// automatically add the "env" tag if defined
if (!string.IsNullOrWhiteSpace(env))
{
span.SetTag(Tags.Env, env);
}
// Apply any global tags
if (Settings.GlobalTags.Count > 0)
{
foreach (var entry in Settings.GlobalTags)
{
span.SetTag(entry.Key, entry.Value);
}
}
traceContext.AddSpan(span);
return span;
}
/// <summary>
/// Writes the specified <see cref="Span"/> collection to the agent writer.
/// </summary>
/// <param name="trace">The <see cref="Span"/> collection to write.</param>
void IDatadogTracer.Write(List<Span> trace)
{
_agentWriter.WriteTrace(trace);
}
/// <summary>
/// Create an Uri to the Agent using host and port from
/// the specified <paramref name="settings"/>.
/// </summary>
/// <param name="settings">A <see cref="TracerSettings"/> object </param>
/// <returns>An Uri that can be used to send traces to the Agent.</returns>
internal static Uri GetAgentUri(TracerSettings settings)
{
return settings.AgentUri;
}
internal async Task FlushAsync()
{
await _agentWriter.FlushAndCloseAsync();
}
/// <summary>
/// Gets an "application name" for the executing application by looking at
/// the hosted app name (.NET Framework on IIS only), assembly name, and process name.
/// </summary>
/// <returns>The default service name.</returns>
private static string GetApplicationName()
{
try
{
#if !NETSTANDARD2_0
// System.Web.dll is only available on .NET Framework
if (System.Web.Hosting.HostingEnvironment.IsHosted)
{
// if this app is an ASP.NET application, return "SiteName/ApplicationVirtualPath".
// note that ApplicationVirtualPath includes a leading slash.
return (System.Web.Hosting.HostingEnvironment.SiteName + System.Web.Hosting.HostingEnvironment.ApplicationVirtualPath).TrimEnd('/');
}
#endif
return Assembly.GetEntryAssembly()?.GetName().Name ??
Process.GetCurrentProcess().ProcessName;
}
catch (Exception ex)
{
Log.Error(ex, "Error creating default service name.");
return null;
}
}
private static IStatsd CreateDogStatsdClient(TracerSettings settings, string serviceName)
{
var frameworkDescription = FrameworkDescription.Create();
string[] constantTags =
{
"lang:.NET",
$"lang_interpreter:{frameworkDescription.Name}",
$"lang_version:{frameworkDescription.ProductVersion}",
$"tracer_version:{TracerConstants.AssemblyVersion}",
$"service_name:{serviceName}"
};
var statsdUdp = new StatsdUDP(settings.AgentUri.DnsSafeHost, settings.DogStatsdPort, StatsdConfig.DefaultStatsdMaxUDPPacketSize);
return new Statsd(statsdUdp, new RandomGenerator(), new StopWatchFactory(), prefix: string.Empty, constantTags);
}
private void InitializeLibLogScopeEventSubscriber(IScopeManager scopeManager)
{
new LibLogScopeEventSubscriber(scopeManager);
}
private void CurrentDomain_ProcessExit(object sender, EventArgs e)
{
_agentWriter.FlushAndCloseAsync().Wait();
}
private void CurrentDomain_UnhandledException(object sender, UnhandledExceptionEventArgs e)
{
_agentWriter.FlushAndCloseAsync().Wait();
}
private void Console_CancelKeyPress(object sender, ConsoleCancelEventArgs e)
{
_agentWriter.FlushAndCloseAsync().Wait();
}
private void HeartbeatCallback(object state)
{
if (Statsd != null)
{
// use the count of Tracer instances as the heartbeat value
// to estimate the number of "live" Tracers than can potentially
// send traces to the Agent
Statsd.AppendSetGauge(TracerMetricNames.Health.Heartbeat, _liveTracerCount);
Statsd.Send();
}
}
}
}
| 1 | 16,385 | What's the rationale for a default setting to not use a GlobalSamplingRate of 1? I don't know much about the sampling rate stuff | DataDog-dd-trace-dotnet | .cs |
@@ -24,7 +24,8 @@ module Windows
'group' => 'Administrators',
'puppetpath' => '`cygpath -smF 35`/PuppetLabs/puppet/etc',
'puppetvardir' => '`cygpath -smF 35`/PuppetLabs/puppet/var',
- 'puppetbindir' => '`cygpath -F 38`/Puppet Labs/Puppet Enterprise/bin',
+ #if an x86 Program Files dir exists then use it, default to just Program Files
+ 'puppetbindir' => '$( [ -d "/cygdrive/c/Program Files (x86)" ] && echo "/cygdrive/c/Program Files (x86)" || echo "/cygdrive/c/Program Files" )/Puppet Labs/Puppet Enterprise/bin',
'pathseparator' => ';',
})
end | 1 | require File.expand_path(File.join(File.dirname(__FILE__), '..', 'host'))
require File.expand_path(File.join(File.dirname(__FILE__), '..', 'command_factory'))
require File.expand_path(File.join(File.dirname(__FILE__), '..', 'command'))
require File.expand_path(File.join(File.dirname(__FILE__), '..', 'options'))
module Windows
class Host < Beaker::Host
require File.expand_path(File.join(File.dirname(__FILE__), 'windows', 'user'))
require File.expand_path(File.join(File.dirname(__FILE__), 'windows', 'group'))
require File.expand_path(File.join(File.dirname(__FILE__), 'windows', 'exec'))
require File.expand_path(File.join(File.dirname(__FILE__), 'windows', 'pkg'))
require File.expand_path(File.join(File.dirname(__FILE__), 'windows', 'file'))
include Windows::User
include Windows::Group
include Windows::File
include Windows::Exec
include Windows::Pkg
def self.pe_defaults
h = Beaker::Options::OptionsHash.new
h.merge({
'user' => 'Administrator',
'group' => 'Administrators',
'puppetpath' => '`cygpath -smF 35`/PuppetLabs/puppet/etc',
'puppetvardir' => '`cygpath -smF 35`/PuppetLabs/puppet/var',
'puppetbindir' => '`cygpath -F 38`/Puppet Labs/Puppet Enterprise/bin',
'pathseparator' => ';',
})
end
def self.foss_defaults
h = Beaker::Options::OptionsHash.new
h.merge({
'user' => 'Administrator',
'group' => 'Administrators',
'puppetpath' => '`cygpath -smF 35`/PuppetLabs/puppet/etc',
'puppetvardir' => '`cygpath -smF 35`/PuppetLabs/puppet/var',
'hieralibdir' => '`cygpath -w /opt/puppet-git-repos/hiera/lib`',
'hierapuppetlibdir' => '`cygpath -w /opt/puppet-git-repos/hiera-puppet/lib`',
# PATH related variables need to be Unix, which cygwin converts
'hierabindir' => '/opt/puppet-git-repos/hiera/bin',
'pathseparator' => ';',
})
end
end
end
| 1 | 4,643 | Don't we know if it's 64 by this point? Do we have to test for the directory on every call, or can't we just split the value based on the platform? | voxpupuli-beaker | rb |
@@ -35,6 +35,19 @@ class NoteListUtils {
})
);
+ menu.append(
+ new MenuItem({
+ label: _('Assign Notebook'),
+ click: async () => {
+ props.dispatch({
+ type: 'WINDOW_COMMAND',
+ name: 'moveToFolder',
+ noteIds: noteIds,
+ });
+ },
+ })
+ );
+
menu.append(
new MenuItem({
label: _('Duplicate'), | 1 | const BaseModel = require('lib/BaseModel');
const { _ } = require('lib/locale.js');
const { bridge } = require('electron').remote.require('./bridge');
const Menu = bridge().Menu;
const MenuItem = bridge().MenuItem;
const eventManager = require('../../eventManager');
const InteropService = require('lib/services/InteropService');
const InteropServiceHelper = require('../../InteropServiceHelper.js');
const Note = require('lib/models/Note');
const ExternalEditWatcher = require('lib/services/ExternalEditWatcher');
const { substrWithEllipsis } = require('lib/string-utils');
class NoteListUtils {
static makeContextMenu(noteIds, props) {
const notes = noteIds.map(id => BaseModel.byId(props.notes, id));
let hasEncrypted = false;
for (let i = 0; i < notes.length; i++) {
if (notes[i].encryption_applied) hasEncrypted = true;
}
const menu = new Menu();
if (!hasEncrypted) {
menu.append(
new MenuItem({
label: _('Add or remove tags'),
click: async () => {
props.dispatch({
type: 'WINDOW_COMMAND',
name: 'setTags',
noteIds: noteIds,
});
},
})
);
menu.append(
new MenuItem({
label: _('Duplicate'),
click: async () => {
for (let i = 0; i < noteIds.length; i++) {
const note = await Note.load(noteIds[i]);
await Note.duplicate(noteIds[i], {
uniqueTitle: _('%s - Copy', note.title),
});
}
},
})
);
if (props.watchedNoteFiles.indexOf(noteIds[0]) < 0) {
menu.append(
new MenuItem({
label: _('Edit in external editor'),
enabled: noteIds.length === 1,
click: async () => {
this.startExternalEditing(noteIds[0]);
},
})
);
} else {
menu.append(
new MenuItem({
label: _('Stop external editing'),
enabled: noteIds.length === 1,
click: async () => {
this.stopExternalEditing(noteIds[0]);
},
})
);
}
if (noteIds.length <= 1) {
menu.append(
new MenuItem({
label: _('Switch between note and to-do type'),
click: async () => {
for (let i = 0; i < noteIds.length; i++) {
const note = await Note.load(noteIds[i]);
await Note.save(Note.toggleIsTodo(note), { userSideValidation: true });
eventManager.emit('noteTypeToggle', { noteId: note.id });
}
},
})
);
} else {
const switchNoteType = async (noteIds, type) => {
for (let i = 0; i < noteIds.length; i++) {
const note = await Note.load(noteIds[i]);
const newNote = Note.changeNoteType(note, type);
if (newNote === note) continue;
await Note.save(newNote, { userSideValidation: true });
eventManager.emit('noteTypeToggle', { noteId: note.id });
}
};
menu.append(
new MenuItem({
label: _('Switch to note type'),
click: async () => {
await switchNoteType(noteIds, 'note');
},
})
);
menu.append(
new MenuItem({
label: _('Switch to to-do type'),
click: async () => {
await switchNoteType(noteIds, 'todo');
},
})
);
}
menu.append(
new MenuItem({
label: _('Copy Markdown link'),
click: async () => {
const { clipboard } = require('electron');
const links = [];
for (let i = 0; i < noteIds.length; i++) {
const note = await Note.load(noteIds[i]);
links.push(Note.markdownTag(note));
}
clipboard.writeText(links.join(' '));
},
})
);
menu.append(
new MenuItem({
label: _('Share note...'),
click: async () => {
console.info('NOTE IDS', noteIds);
props.dispatch({
type: 'WINDOW_COMMAND',
name: 'commandShareNoteDialog',
noteIds: noteIds.slice(),
});
},
})
);
const exportMenu = new Menu();
const ioService = new InteropService();
const ioModules = ioService.modules();
for (let i = 0; i < ioModules.length; i++) {
const module = ioModules[i];
if (module.type !== 'exporter') continue;
if (noteIds.length > 1 && module.canDoMultiExport === false) continue;
exportMenu.append(
new MenuItem({
label: module.fullLabel(),
click: async () => {
await InteropServiceHelper.export(props.dispatch.bind(this), module, { sourceNoteIds: noteIds });
},
})
);
}
exportMenu.append(
new MenuItem({
label: `PDF - ${_('PDF File')}`,
click: () => {
props.dispatch({
type: 'WINDOW_COMMAND',
name: 'exportPdf',
noteIds: noteIds,
});
},
})
);
const exportMenuItem = new MenuItem({ label: _('Export'), submenu: exportMenu });
menu.append(exportMenuItem);
}
menu.append(
new MenuItem({
label: _('Delete'),
click: async () => {
await this.confirmDeleteNotes(noteIds);
},
})
);
return menu;
}
static async confirmDeleteNotes(noteIds) {
if (!noteIds.length) return;
let msg = '';
if (noteIds.length === 1) {
const note = await Note.load(noteIds[0]);
if (!note) return;
msg = _('Delete note "%s"?', substrWithEllipsis(note.title, 0, 32));
} else {
msg = _('Delete these %d notes?', noteIds.length);
}
const ok = bridge().showConfirmMessageBox(msg, {
buttons: [_('Delete'), _('Cancel')],
defaultId: 1,
});
if (!ok) return;
await Note.batchDelete(noteIds);
}
static async startExternalEditing(noteId) {
try {
const note = await Note.load(noteId);
ExternalEditWatcher.instance().openAndWatch(note);
} catch (error) {
bridge().showErrorMessageBox(_('Error opening note in editor: %s', error.message));
}
}
static async stopExternalEditing(noteId) {
ExternalEditWatcher.instance().stopWatching(noteId);
}
}
module.exports = NoteListUtils;
| 1 | 12,152 | It's not async | laurent22-joplin | js |
@@ -66,11 +66,11 @@ namespace Examples.Console
AppContext.SetSwitch("System.Net.Http.SocketsHttpHandler.Http2UnencryptedSupport", true);
providerBuilder
- .AddOtlpExporter(o =>
+ .AddOtlpExporter((exporterOptions, metricReaderOptions) =>
{
- o.MetricReaderType = MetricReaderType.Periodic;
- o.PeriodicExportingMetricReaderOptions.ExportIntervalMilliseconds = options.DefaultCollectionPeriodMilliseconds;
- o.AggregationTemporality = options.IsDelta ? AggregationTemporality.Delta : AggregationTemporality.Cumulative;
+ metricReaderOptions.MetricReaderType = MetricReaderType.Periodic;
+ metricReaderOptions.PeriodicExportingMetricReaderOptions.ExportIntervalMilliseconds = options.DefaultCollectionPeriodMilliseconds;
+ metricReaderOptions.Temporality = options.IsDelta ? AggregationTemporality.Delta : AggregationTemporality.Cumulative;
});
}
else | 1 | // <copyright file="TestMetrics.cs" company="OpenTelemetry Authors">
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// </copyright>
using System;
using System.Collections.Generic;
using System.Diagnostics;
using System.Diagnostics.Metrics;
using System.Threading;
using System.Threading.Tasks;
using OpenTelemetry;
using OpenTelemetry.Metrics;
using OpenTelemetry.Resources;
namespace Examples.Console
{
internal class TestMetrics
{
internal static object Run(MetricsOptions options)
{
using var meter = new Meter("TestMeter");
var providerBuilder = Sdk.CreateMeterProviderBuilder()
.SetResourceBuilder(ResourceBuilder.CreateDefault().AddService("myservice"))
.AddMeter(meter.Name); // All instruments from this meter are enabled.
if (options.UseExporter.ToLower() == "otlp")
{
/*
* Prerequisite to run this example:
* Set up an OpenTelemetry Collector to run on local docker.
*
* Open a terminal window at the examples/Console/ directory and
* launch the OpenTelemetry Collector with an OTLP receiver, by running:
*
* - On Unix based systems use:
* docker run --rm -it -p 4317:4317 -v $(pwd):/cfg otel/opentelemetry-collector:0.33.0 --config=/cfg/otlp-collector-example/config.yaml
*
* - On Windows use:
* docker run --rm -it -p 4317:4317 -v "%cd%":/cfg otel/opentelemetry-collector:0.33.0 --config=/cfg/otlp-collector-example/config.yaml
*
* Open another terminal window at the examples/Console/ directory and
* launch the OTLP example by running:
*
* dotnet run metrics --useExporter otlp
*
* The OpenTelemetry Collector will output all received metrics to the stdout of its terminal.
*
*/
// Adding the OtlpExporter creates a GrpcChannel.
// This switch must be set before creating a GrpcChannel when calling an insecure gRPC service.
// See: https://docs.microsoft.com/aspnet/core/grpc/troubleshoot#call-insecure-grpc-services-with-net-core-client
AppContext.SetSwitch("System.Net.Http.SocketsHttpHandler.Http2UnencryptedSupport", true);
providerBuilder
.AddOtlpExporter(o =>
{
o.MetricReaderType = MetricReaderType.Periodic;
o.PeriodicExportingMetricReaderOptions.ExportIntervalMilliseconds = options.DefaultCollectionPeriodMilliseconds;
o.AggregationTemporality = options.IsDelta ? AggregationTemporality.Delta : AggregationTemporality.Cumulative;
});
}
else
{
providerBuilder
.AddConsoleExporter(o =>
{
o.MetricReaderType = MetricReaderType.Periodic;
o.PeriodicExportingMetricReaderOptions.ExportIntervalMilliseconds = options.DefaultCollectionPeriodMilliseconds;
o.AggregationTemporality = options.IsDelta ? AggregationTemporality.Delta : AggregationTemporality.Cumulative;
});
}
using var provider = providerBuilder.Build();
Counter<int> counter = null;
if (options.FlagCounter ?? true)
{
counter = meter.CreateCounter<int>("counter", "things", "A count of things");
}
Histogram<int> histogram = null;
if (options.FlagHistogram ?? false)
{
histogram = meter.CreateHistogram<int>("histogram");
}
if (options.FlagGauge ?? false)
{
var observableCounter = meter.CreateObservableGauge("gauge", () =>
{
return new List<Measurement<int>>()
{
new Measurement<int>(
(int)Process.GetCurrentProcess().PrivateMemorySize64,
new KeyValuePair<string, object>("tag1", "value1")),
};
});
}
var cts = new CancellationTokenSource();
var tasks = new List<Task>();
for (int i = 0; i < options.NumTasks; i++)
{
var taskno = i;
tasks.Add(Task.Run(() =>
{
System.Console.WriteLine($"Task started {taskno + 1}/{options.NumTasks}.");
var loops = 0;
while (!cts.IsCancellationRequested)
{
if (options.MaxLoops > 0 && loops >= options.MaxLoops)
{
break;
}
histogram?.Record(10);
histogram?.Record(
100,
new KeyValuePair<string, object>("tag1", "value1"));
histogram?.Record(
200,
new KeyValuePair<string, object>("tag1", "value2"),
new KeyValuePair<string, object>("tag2", "value2"));
histogram?.Record(
100,
new KeyValuePair<string, object>("tag1", "value1"));
histogram?.Record(
200,
new KeyValuePair<string, object>("tag2", "value2"),
new KeyValuePair<string, object>("tag1", "value2"));
counter?.Add(10);
counter?.Add(
100,
new KeyValuePair<string, object>("tag1", "value1"));
counter?.Add(
200,
new KeyValuePair<string, object>("tag1", "value2"),
new KeyValuePair<string, object>("tag2", "value2"));
counter?.Add(
100,
new KeyValuePair<string, object>("tag1", "value1"));
counter?.Add(
200,
new KeyValuePair<string, object>("tag2", "value2"),
new KeyValuePair<string, object>("tag1", "value2"));
loops++;
}
}));
}
cts.CancelAfter(options.RunTime);
System.Console.WriteLine($"Wait for {options.RunTime} milliseconds.");
while (!cts.IsCancellationRequested)
{
Task.Delay(1000).Wait();
}
Task.WaitAll(tasks.ToArray());
return null;
}
}
}
| 1 | 23,017 | Maybe we could add some use of `exporterOptions` in this example, otherwise the proper code would replace this variable name with an underscore? | open-telemetry-opentelemetry-dotnet | .cs |
@@ -41,6 +41,11 @@ class Search extends WidgetBase
*/
public $scope;
+ /**
+ * @var bool Search on every key stroke.
+ */
+ public $everykey = true;
+
//
// Object properties
// | 1 | <?php namespace Backend\Widgets;
use Lang;
use Backend\Classes\WidgetBase;
/**
* Search Widget
* Used for building a toolbar, Renders a search container.
*
* @package october\backend
* @author Alexey Bobkov, Samuel Georges
*/
class Search extends WidgetBase
{
//
// Configurable properties
//
/**
* @var string Search placeholder text.
*/
public $prompt;
/**
* @var bool Field show grow when selected.
*/
public $growable = true;
/**
* @var string Custom partial file definition, in context of the controller.
*/
public $partial;
/**
* @var string Defines the search mode. Commonly passed to the searchWhere() query.
*/
public $mode;
/**
* @var string Custom scope method name. Commonly passed to the query.
*/
public $scope;
//
// Object properties
//
/**
* {@inheritDoc}
*/
protected $defaultAlias = 'search';
/**
* @var string Active search term pulled from session data.
*/
protected $activeTerm;
/**
* @var array List of CSS classes to apply to the list container element.
*/
public $cssClasses = [];
/**
* Initialize the widget, called by the constructor and free from its parameters.
*/
public function init()
{
$this->fillFromConfig([
'prompt',
'partial',
'growable',
'scope',
'mode',
]);
/*
* Add CSS class styles
*/
$this->cssClasses[] = 'icon search';
if ($this->growable) {
$this->cssClasses[] = 'growable';
}
}
/**
* Renders the widget.
*/
public function render()
{
$this->prepareVars();
if ($this->partial) {
return $this->controller->makePartial($this->partial);
}
else {
return $this->makePartial('search');
}
}
/**
* Prepares the view data
*/
public function prepareVars()
{
$this->vars['cssClasses'] = implode(' ', $this->cssClasses);
$this->vars['placeholder'] = Lang::get($this->prompt);
$this->vars['value'] = $this->getActiveTerm();
}
/**
* Search field has been submitted.
*/
public function onSubmit()
{
/*
* Save or reset search term in session
*/
$this->setActiveTerm(post($this->getName()));
/*
* Trigger class event, merge results as viewable array
*/
$params = func_get_args();
$result = $this->fireEvent('search.submit', [$params]);
if ($result && is_array($result)) {
return call_user_func_array('array_merge', $result);
}
}
/**
* Returns an active search term for this widget instance.
*/
public function getActiveTerm()
{
return $this->activeTerm = $this->getSession('term', '');
}
/**
* Sets an active search term for this widget instance.
*/
public function setActiveTerm($term)
{
if (strlen($term)) {
$this->putSession('term', $term);
}
else {
$this->resetSession();
}
$this->activeTerm = $term;
}
/**
* Returns a value suitable for the field name property.
* @return string
*/
public function getName()
{
return $this->alias . '[term]';
}
}
| 1 | 11,982 | Maybe would be better to rename it to "onEveryKey" or "fireOnEveryKey". | octobercms-october | php |
@@ -832,7 +832,7 @@ public class FilePage implements java.io.Serializable {
// Always allow preview for PrivateUrlUser
return true;
} else {
- return FileUtil.isPreviewAllowed(fileMetadata);
+ return fileDownloadHelper.isPreviewAllowed(fileMetadata);
}
}
| 1 | /*
* To change this license header, choose License Headers in Project Properties.
* To change this template file, choose Tools | Templates
* and open the template in the editor.
*/
package edu.harvard.iq.dataverse;
import edu.harvard.iq.dataverse.DatasetVersionServiceBean.RetrieveDatasetVersionResponse;
import edu.harvard.iq.dataverse.dataaccess.SwiftAccessIO;
import edu.harvard.iq.dataverse.authorization.AuthenticationServiceBean;
import edu.harvard.iq.dataverse.authorization.Permission;
import edu.harvard.iq.dataverse.authorization.users.ApiToken;
import edu.harvard.iq.dataverse.authorization.users.AuthenticatedUser;
import edu.harvard.iq.dataverse.authorization.users.PrivateUrlUser;
import edu.harvard.iq.dataverse.authorization.users.User;
import edu.harvard.iq.dataverse.dataaccess.StorageIO;
import edu.harvard.iq.dataverse.engine.command.Command;
import edu.harvard.iq.dataverse.engine.command.exception.CommandException;
import edu.harvard.iq.dataverse.engine.command.exception.IllegalCommandException;
import edu.harvard.iq.dataverse.engine.command.impl.CreateNewDatasetCommand;
import edu.harvard.iq.dataverse.engine.command.impl.PersistProvFreeFormCommand;
import edu.harvard.iq.dataverse.engine.command.impl.RestrictFileCommand;
import edu.harvard.iq.dataverse.engine.command.impl.UpdateDatasetVersionCommand;
import edu.harvard.iq.dataverse.export.ExportException;
import edu.harvard.iq.dataverse.export.ExportService;
import edu.harvard.iq.dataverse.export.spi.Exporter;
import edu.harvard.iq.dataverse.externaltools.ExternalTool;
import edu.harvard.iq.dataverse.externaltools.ExternalToolHandler;
import edu.harvard.iq.dataverse.externaltools.ExternalToolServiceBean;
import edu.harvard.iq.dataverse.makedatacount.MakeDataCountLoggingServiceBean;
import edu.harvard.iq.dataverse.makedatacount.MakeDataCountLoggingServiceBean.MakeDataCountEntry;
import edu.harvard.iq.dataverse.makedatacount.MakeDataCountUtil;
import edu.harvard.iq.dataverse.privateurl.PrivateUrl;
import edu.harvard.iq.dataverse.privateurl.PrivateUrlServiceBean;
import edu.harvard.iq.dataverse.settings.SettingsServiceBean;
import edu.harvard.iq.dataverse.util.BundleUtil;
import edu.harvard.iq.dataverse.util.FileUtil;
import edu.harvard.iq.dataverse.util.JsfHelper;
import static edu.harvard.iq.dataverse.util.JsfHelper.JH;
import edu.harvard.iq.dataverse.util.SystemConfig;
import java.io.IOException;
import java.time.format.DateTimeFormatter;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.Comparator;
import java.util.List;
import java.util.Set;
import java.util.logging.Logger;
import javax.ejb.EJB;
import javax.ejb.EJBException;
import javax.faces.application.FacesMessage;
import javax.faces.component.UIComponent;
import javax.faces.context.FacesContext;
import javax.faces.validator.ValidatorException;
import javax.faces.view.ViewScoped;
import javax.inject.Inject;
import javax.inject.Named;
import javax.validation.ConstraintViolation;
import org.primefaces.PrimeFaces;
import org.primefaces.component.tabview.TabView;
import org.primefaces.event.TabChangeEvent;
/**
*
* @author skraffmi
*
*/
@ViewScoped
@Named("FilePage")
public class FilePage implements java.io.Serializable {
private FileMetadata fileMetadata;
private Long fileId;
private String version;
private DataFile file;
private GuestbookResponse guestbookResponse;
private int selectedTabIndex;
private Dataset editDataset;
private Dataset dataset;
private List<DatasetVersion> datasetVersionsForTab;
private List<FileMetadata> fileMetadatasForTab;
private String persistentId;
private List<ExternalTool> configureTools;
private List<ExternalTool> exploreTools;
private List<ExternalTool> toolsWithPreviews;
private Long datasetVersionId;
/**
* Have the terms been met so that the Preview tab can show the preview?
*/
private boolean termsMet;
@EJB
DataFileServiceBean datafileService;
@EJB
DatasetVersionServiceBean datasetVersionService;
@EJB
PermissionServiceBean permissionService;
@EJB
SettingsServiceBean settingsService;
@EJB
FileDownloadServiceBean fileDownloadService;
@EJB
GuestbookResponseServiceBean guestbookResponseService;
@EJB
AuthenticationServiceBean authService;
@EJB
SystemConfig systemConfig;
@Inject
DataverseSession session;
@EJB
EjbDataverseEngine commandEngine;
@EJB
ExternalToolServiceBean externalToolService;
@EJB
PrivateUrlServiceBean privateUrlService;
@Inject
DataverseRequestServiceBean dvRequestService;
@Inject
PermissionsWrapper permissionsWrapper;
@Inject
FileDownloadHelper fileDownloadHelper;
@Inject
MakeDataCountLoggingServiceBean mdcLogService;
@Inject
SettingsWrapper settingsWrapper;
@Inject
EmbargoServiceBean embargoService;
private static final Logger logger = Logger.getLogger(FilePage.class.getCanonicalName());
private boolean fileDeleteInProgress = false;
public String init() {
if (fileId != null || persistentId != null) {
// ---------------------------------------
// Set the file and datasetVersion
// ---------------------------------------
if (fileId != null) {
file = datafileService.find(fileId);
} else if (persistentId != null) {
file = datafileService.findByGlobalId(persistentId);
if (file != null) {
fileId = file.getId();
}
}
if (file == null || fileId == null) {
return permissionsWrapper.notFound();
}
// Is the Dataset harvested?
if (file.getOwner().isHarvested()) {
// if so, we'll simply forward to the remote URL for the original
// source of this harvested dataset:
String originalSourceURL = file.getOwner().getRemoteArchiveURL();
if (originalSourceURL != null && !originalSourceURL.equals("")) {
logger.fine("redirecting to " + originalSourceURL);
try {
FacesContext.getCurrentInstance().getExternalContext().redirect(originalSourceURL);
} catch (IOException ioex) {
// must be a bad URL...
// we don't need to do anything special here - we'll redirect
// to the local 404 page, below.
logger.warning("failed to issue a redirect to " + originalSourceURL);
}
}
return permissionsWrapper.notFound();
}
RetrieveDatasetVersionResponse retrieveDatasetVersionResponse;
Long getDatasetVersionID = null;
if (datasetVersionId == null) {
retrieveDatasetVersionResponse = datasetVersionService.selectRequestedVersion(file.getOwner().getVersions(), version);
getDatasetVersionID = retrieveDatasetVersionResponse.getDatasetVersion().getId();
} else {
getDatasetVersionID = datasetVersionId;
}
fileMetadata = datafileService.findFileMetadataByDatasetVersionIdAndDataFileId(getDatasetVersionID, fileId);
if (fileMetadata == null) {
logger.fine("fileMetadata is null! Checking finding most recent version file was in.");
fileMetadata = datafileService.findMostRecentVersionFileIsIn(file);
if (fileMetadata == null) {
return permissionsWrapper.notFound();
}
}
// If this DatasetVersion is unpublished and permission is doesn't have permissions:
// > Go to the Login page
//
// Check permisisons
Boolean authorized = (fileMetadata.getDatasetVersion().isReleased())
|| (!fileMetadata.getDatasetVersion().isReleased() && this.canViewUnpublishedDataset());
if (!authorized) {
return permissionsWrapper.notAuthorized();
}
//termsOfAccess = fileMetadata.getDatasetVersion().getTermsOfUseAndAccess().getTermsOfAccess();
//fileAccessRequest = fileMetadata.getDatasetVersion().getTermsOfUseAndAccess().isFileAccessRequest();
this.guestbookResponse = this.guestbookResponseService.initGuestbookResponseForFragment(fileMetadata, session);
if(fileMetadata.getDatasetVersion().isPublished()) {
MakeDataCountEntry entry = new MakeDataCountEntry(FacesContext.getCurrentInstance(), dvRequestService, fileMetadata.getDatasetVersion());
mdcLogService.logEntry(entry);
}
// Find external tools based on their type, the file content type, and whether
// ingest has created a derived file for that type
// Currently, tabular data files are the only type of derived file created, so
// isTabularData() works - true for tabular types where a .tab file has been
// created and false for other mimetypes
String contentType = file.getContentType();
//For tabular data, indicate successful ingest by returning a contentType for the derived .tab file
if (file.isTabularData()) {
contentType=DataFileServiceBean.MIME_TYPE_TSV_ALT;
}
configureTools = externalToolService.findFileToolsByTypeAndContentType(ExternalTool.Type.CONFIGURE, contentType);
exploreTools = externalToolService.findFileToolsByTypeAndContentType(ExternalTool.Type.EXPLORE, contentType);
Collections.sort(exploreTools, CompareExternalToolName);
toolsWithPreviews = sortExternalTools();
if(!toolsWithPreviews.isEmpty()){
setSelectedTool(toolsWithPreviews.get(0));
}
} else {
return permissionsWrapper.notFound();
}
return null;
}
private boolean canViewUnpublishedDataset() {
return permissionsWrapper.canViewUnpublishedDataset( dvRequestService.getDataverseRequest(), fileMetadata.getDatasetVersion().getDataset());
}
public boolean canPublishDataset(){
return permissionsWrapper.canIssuePublishDatasetCommand(fileMetadata.getDatasetVersion().getDataset());
}
public FileMetadata getFileMetadata() {
return fileMetadata;
}
public Long getDatasetVersionId() {
return datasetVersionId;
}
public void setDatasetVersionId(Long datasetVersionId) {
this.datasetVersionId = datasetVersionId;
}
private List<ExternalTool> sortExternalTools(){
List<ExternalTool> retList = externalToolService.findFileToolsByTypeAndContentType(ExternalTool.Type.PREVIEW, file.getContentType());
Collections.sort(retList, CompareExternalToolName);
return retList;
}
public boolean isDownloadPopupRequired() {
if(fileMetadata.getId() == null || fileMetadata.getDatasetVersion().getId() == null ){
return false;
}
return FileUtil.isDownloadPopupRequired(fileMetadata.getDatasetVersion());
}
public boolean isRequestAccessPopupRequired() {
if(fileMetadata.getId() == null || fileMetadata.getDatasetVersion().getId() == null ){
return false;
}
return FileUtil.isRequestAccessPopupRequired(fileMetadata.getDatasetVersion());
}
public void setFileMetadata(FileMetadata fileMetadata) {
this.fileMetadata = fileMetadata;
}
public DataFile getFile() {
return file;
}
public void setFile(DataFile file) {
this.file = file;
}
public Long getFileId() {
return fileId;
}
public void setFileId(Long fileId) {
this.fileId = fileId;
}
public String getVersion() {
return version;
}
public void setVersion(String version) {
this.version = version;
}
public List< String[]> getExporters(){
List<String[]> retList = new ArrayList<>();
String myHostURL = systemConfig.getDataverseSiteUrl();
for (String [] provider : ExportService.getInstance().getExportersLabels() ){
String formatName = provider[1];
String formatDisplayName = provider[0];
Exporter exporter = null;
try {
exporter = ExportService.getInstance().getExporter(formatName);
} catch (ExportException ex) {
exporter = null;
}
if (exporter != null && exporter.isAvailableToUsers()) {
// Not all metadata exports should be presented to the web users!
// Some are only for harvesting clients.
String[] temp = new String[2];
temp[0] = formatDisplayName;
temp[1] = myHostURL + "/api/datasets/export?exporter=" + formatName + "&persistentId=" + fileMetadata.getDatasetVersion().getDataset().getGlobalIdString();
retList.add(temp);
}
}
return retList;
}
public String saveProvFreeform(String freeformTextInput, DataFile dataFileFromPopup) throws CommandException {
editDataset = this.file.getOwner();
file.setProvEntityName(dataFileFromPopup.getProvEntityName()); //passing this value into the file being saved here is pretty hacky.
Command cmd;
for (FileMetadata fmw : editDataset.getEditVersion().getFileMetadatas()) {
if (fmw.getDataFile().equals(this.fileMetadata.getDataFile())) {
cmd = new PersistProvFreeFormCommand(dvRequestService.getDataverseRequest(), file, freeformTextInput);
commandEngine.submit(cmd);
}
}
save();
init();
return returnToDraftVersion();
}
public String restrictFile(boolean restricted) throws CommandException{
String fileNames = null;
editDataset = this.file.getOwner();
if (restricted) { // get values from access popup
editDataset.getEditVersion().getTermsOfUseAndAccess().setTermsOfAccess(termsOfAccess);
editDataset.getEditVersion().getTermsOfUseAndAccess().setFileAccessRequest(fileAccessRequest);
}
Command cmd;
for (FileMetadata fmw : editDataset.getEditVersion().getFileMetadatas()) {
if (fmw.getDataFile().equals(this.fileMetadata.getDataFile())) {
fileNames += fmw.getLabel();
//fmw.setRestricted(restricted);
cmd = new RestrictFileCommand(fmw.getDataFile(), dvRequestService.getDataverseRequest(), restricted);
commandEngine.submit(cmd);
}
}
if (fileNames != null) {
String successMessage = BundleUtil.getStringFromBundle("file.restricted.success");
successMessage = successMessage.replace("{0}", fileNames);
JsfHelper.addFlashMessage(successMessage);
}
save();
init();
return returnToDraftVersion();
}
private List<FileMetadata> filesToBeDeleted = new ArrayList<>();
public String deleteFile() {
String fileNames = this.getFileMetadata().getLabel();
editDataset = this.getFileMetadata().getDataFile().getOwner();
FileMetadata markedForDelete = null;
for (FileMetadata fmd : editDataset.getEditVersion().getFileMetadatas()) {
if (fmd.getDataFile().getId().equals(fileId)) {
markedForDelete = fmd;
}
}
if (markedForDelete.getId() != null) {
// the file already exists as part of this dataset
// so all we remove is the file from the fileMetadatas (for display)
// and let the delete be handled in the command (by adding it to the filesToBeDeleted list
editDataset.getEditVersion().getFileMetadatas().remove(markedForDelete);
filesToBeDeleted.add(markedForDelete);
} else {
List<FileMetadata> filesToKeep = new ArrayList<>();
for (FileMetadata fmo : editDataset.getEditVersion().getFileMetadatas()) {
if (!fmo.getDataFile().getId().equals(this.getFile().getId())) {
filesToKeep.add(fmo);
}
}
editDataset.getEditVersion().setFileMetadatas(filesToKeep);
}
fileDeleteInProgress = true;
save();
return returnToDatasetOnly();
}
private int activeTabIndex;
public int getActiveTabIndex() {
return activeTabIndex;
}
public void setActiveTabIndex(int activeTabIndex) {
this.activeTabIndex = activeTabIndex;
}
public void tabChanged(TabChangeEvent event) {
TabView tv = (TabView) event.getComponent();
this.activeTabIndex = tv.getActiveIndex();
if (this.activeTabIndex == 1 || this.activeTabIndex == 2 ) {
setFileMetadatasForTab(loadFileMetadataTabList());
} else {
setFileMetadatasForTab( new ArrayList<>());
}
}
private List<FileMetadata> loadFileMetadataTabList() {
List<DataFile> allfiles = allRelatedFiles();
List<FileMetadata> retList = new ArrayList<>();
for (DatasetVersion versionLoop : fileMetadata.getDatasetVersion().getDataset().getVersions()) {
boolean foundFmd = false;
if (versionLoop.isReleased() || versionLoop.isDeaccessioned() || permissionService.on(fileMetadata.getDatasetVersion().getDataset()).has(Permission.ViewUnpublishedDataset)) {
foundFmd = false;
for (DataFile df : allfiles) {
FileMetadata fmd = datafileService.findFileMetadataByDatasetVersionIdAndDataFileId(versionLoop.getId(), df.getId());
if (fmd != null) {
fmd.setContributorNames(datasetVersionService.getContributorsNames(versionLoop));
FileVersionDifference fvd = new FileVersionDifference(fmd, getPreviousFileMetadata(fmd));
fmd.setFileVersionDifference(fvd);
retList.add(fmd);
foundFmd = true;
break;
}
}
//no File metadata found make dummy one
if (!foundFmd) {
FileMetadata dummy = new FileMetadata();
dummy.setDatasetVersion(versionLoop);
dummy.setDataFile(null);
FileVersionDifference fvd = new FileVersionDifference(dummy, getPreviousFileMetadata(versionLoop));
dummy.setFileVersionDifference(fvd);
retList.add(dummy);
}
}
}
return retList;
}
private FileMetadata getPreviousFileMetadata(DatasetVersion currentversion) {
List<DataFile> allfiles = allRelatedFiles();
boolean foundCurrent = false;
DatasetVersion priorVersion = null;
for (DatasetVersion versionLoop : fileMetadata.getDatasetVersion().getDataset().getVersions()) {
if (foundCurrent) {
priorVersion = versionLoop;
break;
}
if (versionLoop.equals(currentversion)) {
foundCurrent = true;
}
}
if (priorVersion != null && priorVersion.getFileMetadatasSorted() != null) {
for (FileMetadata fmdTest : priorVersion.getFileMetadatasSorted()) {
for (DataFile fileTest : allfiles) {
if (fmdTest.getDataFile().equals(fileTest)) {
return fmdTest;
}
}
}
}
return null;
}
private FileMetadata getPreviousFileMetadata(FileMetadata fmdIn){
DataFile dfPrevious = datafileService.findPreviousFile(fmdIn.getDataFile());
DatasetVersion dvPrevious = null;
boolean gotCurrent = false;
for (DatasetVersion dvloop: fileMetadata.getDatasetVersion().getDataset().getVersions()){
if(gotCurrent){
dvPrevious = dvloop;
break;
}
if(dvloop.equals(fmdIn.getDatasetVersion())){
gotCurrent = true;
}
}
List<DataFile> allfiles = allRelatedFiles();
if (dvPrevious != null && dvPrevious.getFileMetadatasSorted() != null) {
for (FileMetadata fmdTest : dvPrevious.getFileMetadatasSorted()) {
for (DataFile fileTest : allfiles) {
if (fmdTest.getDataFile().equals(fileTest)) {
return fmdTest;
}
}
}
}
Long dfId = fmdIn.getDataFile().getId();
if (dfPrevious != null){
dfId = dfPrevious.getId();
}
Long versionId = null;
if (dvPrevious !=null){
versionId = dvPrevious.getId();
}
FileMetadata fmd = datafileService.findFileMetadataByDatasetVersionIdAndDataFileId(versionId, dfId);
return fmd;
}
public List<FileMetadata> getFileMetadatasForTab() {
return fileMetadatasForTab;
}
public void setFileMetadatasForTab(List<FileMetadata> fileMetadatasForTab) {
this.fileMetadatasForTab = fileMetadatasForTab;
}
public String getPersistentId() {
return persistentId;
}
public void setPersistentId(String persistentId) {
this.persistentId = persistentId;
}
public List<DatasetVersion> getDatasetVersionsForTab() {
return datasetVersionsForTab;
}
public void setDatasetVersionsForTab(List<DatasetVersion> datasetVersionsForTab) {
this.datasetVersionsForTab = datasetVersionsForTab;
}
public boolean isTermsMet() {
return termsMet;
}
public void setTermsMet(boolean termsMet) {
this.termsMet = termsMet;
}
public String save() {
// Validate
Set<ConstraintViolation> constraintViolations = this.fileMetadata.getDatasetVersion().validate();
if (!constraintViolations.isEmpty()) {
//JsfHelper.addFlashMessage(JH.localize("dataset.message.validationError"));
fileDeleteInProgress = false;
JH.addMessage(FacesMessage.SEVERITY_ERROR, BundleUtil.getStringFromBundle("dataset.message.validationError"));
//FacesContext.getCurrentInstance().addMessage(null, new FacesMessage(FacesMessage.SEVERITY_ERROR, "Validation Error", "See below for details."));
return "";
}
Command<Dataset> cmd;
boolean updateCommandSuccess = false;
Long deleteFileId = null;
String deleteStorageLocation = null;
if (!filesToBeDeleted.isEmpty()) {
// We want to delete the file (there's always only one file with this page)
editDataset.getEditVersion().getFileMetadatas().remove(filesToBeDeleted.get(0));
deleteFileId = filesToBeDeleted.get(0).getDataFile().getId();
deleteStorageLocation = datafileService.getPhysicalFileToDelete(filesToBeDeleted.get(0).getDataFile());
}
try {
cmd = new UpdateDatasetVersionCommand(editDataset, dvRequestService.getDataverseRequest(), filesToBeDeleted);
commandEngine.submit(cmd);
updateCommandSuccess = true;
} catch (EJBException ex) {
StringBuilder error = new StringBuilder();
error.append(ex).append(" ");
error.append(ex.getMessage()).append(" ");
Throwable cause = ex;
while (cause.getCause()!= null) {
cause = cause.getCause();
error.append(cause).append(" ");
error.append(cause.getMessage()).append(" ");
}
return null;
} catch (CommandException ex) {
fileDeleteInProgress = false;
FacesContext.getCurrentInstance().addMessage(null, new FacesMessage(FacesMessage.SEVERITY_ERROR, BundleUtil.getStringFromBundle("dataset.save.fail"), " - " + ex.toString()));
return null;
}
if (fileDeleteInProgress) {
if (updateCommandSuccess) {
if (deleteStorageLocation != null) {
// Finalize the delete of the physical file
// (File service will double-check that the datafile no
// longer exists in the database, before proceeding to
// delete the physical file)
try {
datafileService.finalizeFileDelete(deleteFileId, deleteStorageLocation);
} catch (IOException ioex) {
logger.warning("Failed to delete the physical file associated with the deleted datafile id="
+ deleteFileId + ", storage location: " + deleteStorageLocation);
}
}
}
JsfHelper.addSuccessMessage(BundleUtil.getStringFromBundle("file.message.deleteSuccess"));
fileDeleteInProgress = false;
} else {
JsfHelper.addSuccessMessage(BundleUtil.getStringFromBundle("file.message.editSuccess"));
}
setVersion("DRAFT");
return "";
}
private Boolean thumbnailAvailable = null;
public boolean isThumbnailAvailable(FileMetadata fileMetadata) {
// new and optimized logic:
// - check download permission here (should be cached - so it's free!)
// - only then ask the file service if the thumbnail is available/exists.
// the service itself no longer checks download permissions.
// (Also, cache the result the first time the check is performed...
// remember - methods referenced in "rendered=..." attributes are
// called *multiple* times as the page is loading!)
if (thumbnailAvailable != null) {
return thumbnailAvailable;
}
if (!fileDownloadHelper.canDownloadFile(fileMetadata)) {
thumbnailAvailable = false;
} else {
thumbnailAvailable = datafileService.isThumbnailAvailable(fileMetadata.getDataFile());
}
return thumbnailAvailable;
}
private String returnToDatasetOnly(){
return "/dataset.xhtml?persistentId=" + editDataset.getGlobalIdString() + "&version=DRAFT" + "&faces-redirect=true";
}
private String returnToDraftVersion(){
return "/file.xhtml?fileId=" + fileId + "&version=DRAFT&faces-redirect=true";
}
public FileDownloadServiceBean getFileDownloadService() {
return fileDownloadService;
}
public void setFileDownloadService(FileDownloadServiceBean fileDownloadService) {
this.fileDownloadService = fileDownloadService;
}
public GuestbookResponseServiceBean getGuestbookResponseService() {
return guestbookResponseService;
}
public void setGuestbookResponseService(GuestbookResponseServiceBean guestbookResponseService) {
this.guestbookResponseService = guestbookResponseService;
}
public GuestbookResponse getGuestbookResponse() {
return guestbookResponse;
}
public void setGuestbookResponse(GuestbookResponse guestbookResponse) {
this.guestbookResponse = guestbookResponse;
}
public boolean canUpdateDataset() {
return permissionsWrapper.canUpdateDataset(dvRequestService.getDataverseRequest(), this.file.getOwner());
}
public int getSelectedTabIndex() {
return selectedTabIndex;
}
public void setSelectedTabIndex(int selectedTabIndex) {
this.selectedTabIndex = selectedTabIndex;
}
public boolean isSwiftStorage () {
Boolean swiftBool = false;
if (file.getStorageIdentifier().startsWith("swift://")){
swiftBool = true;
}
return swiftBool;
}
public boolean showComputeButton () {
if (isSwiftStorage() && (settingsService.getValueForKey(SettingsServiceBean.Key.ComputeBaseUrl) != null)) {
return true;
}
return false;
}
public SwiftAccessIO getSwiftObject() {
try {
StorageIO<DataFile> storageIO = getFile().getStorageIO();
if (storageIO != null && storageIO instanceof SwiftAccessIO) {
return (SwiftAccessIO)storageIO;
} else {
logger.fine("FilePage: Failed to cast storageIO as SwiftAccessIO");
}
} catch (IOException e) {
logger.fine("FilePage: Failed to get storageIO");
}
return null;
}
public String getSwiftContainerName(){
SwiftAccessIO swiftObject = getSwiftObject();
try {
swiftObject.open();
return swiftObject.getSwiftContainerName();
} catch (IOException e){
logger.info("FilePage: Failed to open swift object");
}
return "";
}
public String getComputeUrl() throws IOException {
SwiftAccessIO swiftObject = getSwiftObject();
if (swiftObject != null) {
swiftObject.open();
//generate a temp url for a file
if (settingsService.isTrueForKey(SettingsServiceBean.Key.PublicInstall, false)) {
return settingsService.getValueForKey(SettingsServiceBean.Key.ComputeBaseUrl) + "?" + this.getFile().getOwner().getGlobalIdString() + "=" + swiftObject.getSwiftFileName();
}
return settingsService.getValueForKey(SettingsServiceBean.Key.ComputeBaseUrl) + "?" + this.getFile().getOwner().getGlobalIdString() + "=" + swiftObject.getSwiftFileName() + "&temp_url_sig=" + swiftObject.getTempUrlSignature() + "&temp_url_expires=" + swiftObject.getTempUrlExpiry();
}
return "";
}
private List<DataFile> allRelatedFiles() {
List<DataFile> dataFiles = new ArrayList<>();
DataFile dataFileToTest = fileMetadata.getDataFile();
Long rootDataFileId = dataFileToTest.getRootDataFileId();
if (rootDataFileId < 0) {
dataFiles.add(dataFileToTest);
} else {
dataFiles.addAll(datafileService.findAllRelatedByRootDatafileId(rootDataFileId));
}
return dataFiles;
}
// wrappermethod to see if the file has been deleted (or replaced) in the current version
public boolean isDeletedFile () {
if (file.getDeleted() == null) {
file.setDeleted(datafileService.hasBeenDeleted(file));
}
return file.getDeleted();
}
/**
* To help with replace development
* @return
*/
public boolean isReplacementFile(){
return this.datafileService.isReplacementFile(this.getFile());
}
public boolean isPubliclyDownloadable() {
return FileUtil.isPubliclyDownloadable(fileMetadata);
}
/**
* In Dataverse 4.19 and below file preview was determined by
* isPubliclyDownloadable. Now we always allow a PrivateUrlUser to preview
* files.
*/
public boolean isPreviewAllowed() {
if (session.getUser() instanceof PrivateUrlUser) {
// Always allow preview for PrivateUrlUser
return true;
} else {
return FileUtil.isPreviewAllowed(fileMetadata);
}
}
private Boolean lockedFromEditsVar;
private Boolean lockedFromDownloadVar;
/**
* Authors are not allowed to edit but curators are allowed - when Dataset is inReview
* For all other locks edit should be locked for all editors.
*/
public boolean isLockedFromEdits() {
if(null == dataset) {
dataset = fileMetadata.getDataFile().getOwner();
}
if(null == lockedFromEditsVar) {
try {
permissionService.checkEditDatasetLock(dataset, dvRequestService.getDataverseRequest(), new UpdateDatasetVersionCommand(dataset, dvRequestService.getDataverseRequest()));
lockedFromEditsVar = false;
} catch (IllegalCommandException ex) {
lockedFromEditsVar = true;
}
}
return lockedFromEditsVar;
}
public boolean isLockedFromDownload(){
if(null == dataset) {
dataset = fileMetadata.getDataFile().getOwner();
}
if (null == lockedFromDownloadVar) {
try {
permissionService.checkDownloadFileLock(dataset, dvRequestService.getDataverseRequest(), new CreateNewDatasetCommand(dataset, dvRequestService.getDataverseRequest()));
lockedFromDownloadVar = false;
} catch (IllegalCommandException ex) {
lockedFromDownloadVar = true;
}
}
return lockedFromDownloadVar;
}
public String getPublicDownloadUrl() {
try {
StorageIO<DataFile> storageIO = getFile().getStorageIO();
if (storageIO instanceof SwiftAccessIO) {
String fileDownloadUrl = null;
try {
SwiftAccessIO<DataFile> swiftIO = (SwiftAccessIO<DataFile>) storageIO;
swiftIO.open();
//if its a public install, lets just give users the permanent URL!
if (systemConfig.isPublicInstall()){
fileDownloadUrl = swiftIO.getRemoteUrl();
} else {
//TODO: if a user has access to this file, they should be given the swift url
// perhaps even we could use this as the "private url"
fileDownloadUrl = swiftIO.getTemporarySwiftUrl();
}
logger.info("Swift url: " + fileDownloadUrl);
return fileDownloadUrl;
} catch (Exception e) {
e.printStackTrace();
}
}
} catch (Exception e){
e.printStackTrace();
}
return FileUtil.getPublicDownloadUrl(systemConfig.getDataverseSiteUrl(), persistentId, fileId);
}
public List<ExternalTool> getConfigureTools() {
return configureTools;
}
public List<ExternalTool> getExploreTools() {
return exploreTools;
}
public List<ExternalTool> getToolsWithPreviews() {
return toolsWithPreviews;
}
private ExternalTool selectedTool;
public ExternalTool getSelectedTool() {
return selectedTool;
}
public void setSelectedTool(ExternalTool selectedTool) {
this.selectedTool = selectedTool;
}
public String preview(ExternalTool externalTool) {
ApiToken apiToken = null;
User user = session.getUser();
if (user instanceof AuthenticatedUser) {
apiToken = authService.findApiTokenByUser((AuthenticatedUser) user);
} else if (user instanceof PrivateUrlUser) {
PrivateUrlUser privateUrlUser = (PrivateUrlUser) user;
PrivateUrl privateUrl = privateUrlService.getPrivateUrlFromDatasetId(privateUrlUser.getDatasetId());
privateUrl.getToken();
apiToken = new ApiToken();
apiToken.setTokenString(privateUrl.getToken());
}
if(externalTool == null){
return "";
}
ExternalToolHandler externalToolHandler = new ExternalToolHandler(externalTool, file, apiToken, getFileMetadata(), session.getLocaleCode());
String toolUrl = externalToolHandler.getToolUrlForPreviewMode();
return toolUrl;
}
//Provenance fragment bean calls this to show error dialogs after popup failure
//This can probably be replaced by calling JsfHelper from the provpopup bean
public void showProvError() {
JH.addMessage(FacesMessage.SEVERITY_ERROR, BundleUtil.getStringFromBundle("file.metadataTab.provenance.error"));
}
private static final Comparator<ExternalTool> CompareExternalToolName = new Comparator<ExternalTool>() {
@Override
public int compare(ExternalTool o1, ExternalTool o2) {
return o1.getDisplayName().toUpperCase().compareTo(o2.getDisplayName().toUpperCase());
}
};
public void showPreview(GuestbookResponse guestbookResponse) {
boolean response = fileDownloadHelper.writeGuestbookAndShowPreview(guestbookResponse);
if (response == true) {
termsMet = true;
} else {
JH.addMessage(FacesMessage.SEVERITY_ERROR, BundleUtil.getStringFromBundle("dataset.guestbookResponse.showPreview.errorMessage"), BundleUtil.getStringFromBundle("dataset.guestbookResponse.showPreview.errorDetail"));
}
}
private String termsOfAccess;
private boolean fileAccessRequest;
public String getTermsOfAccess() {
return termsOfAccess;
}
public void setTermsOfAccess(String termsOfAccess) {
this.termsOfAccess = termsOfAccess;
}
public boolean isFileAccessRequest() {
return fileAccessRequest;
}
public void setFileAccessRequest(boolean fileAccessRequest) {
this.fileAccessRequest = fileAccessRequest;
}
public boolean isAnonymizedAccess() {
if(session.getUser() instanceof PrivateUrlUser) {
return ((PrivateUrlUser)session.getUser()).hasAnonymizedAccess();
}
return false;
}
public boolean isValidEmbargoSelection() {
if (!fileMetadata.getDataFile().isReleased()) {
return true;
}
return false;
}
public boolean isExistingEmbargo() {
if (!fileMetadata.getDataFile().isReleased() && (fileMetadata.getDataFile().getEmbargo() != null)) {
return true;
}
return false;
}
public boolean isEmbargoForWholeSelection() {
return isValidEmbargoSelection();
}
public Embargo getSelectionEmbargo() {
return selectionEmbargo;
}
public void setSelectionEmbargo(Embargo selectionEmbargo) {
this.selectionEmbargo = selectionEmbargo;
}
private Embargo selectionEmbargo = new Embargo();
private boolean removeEmbargo=false;
public boolean isRemoveEmbargo() {
return removeEmbargo;
}
public void setRemoveEmbargo(boolean removeEmbargo) {
boolean existing = this.removeEmbargo;
this.removeEmbargo = removeEmbargo;
if (existing != this.removeEmbargo) {
logger.info("State flip");
selectionEmbargo = new Embargo();
if (removeEmbargo) {
selectionEmbargo = new Embargo(null, null);
}
}
PrimeFaces.current().resetInputs("fileForm:embargoInputs");
}
public String saveEmbargo() {
if(isRemoveEmbargo() || (selectionEmbargo.getDateAvailable()==null && selectionEmbargo.getReason()==null)) {
selectionEmbargo=null;
}
Embargo emb = null;
// Note: this.fileMetadata.getDataFile() is not the same object as this.file.
// (Not sure there's a good reason for this other than that's the way it is.)
// So changes to this.fileMetadata.getDataFile() will not be saved with
// editDataset = this.file.getOwner() set as it is below.
if (!file.isReleased()) {
emb = file.getEmbargo();
if (emb != null) {
logger.fine("Before: " + emb.getDataFiles().size());
emb.getDataFiles().remove(fileMetadata.getDataFile());
logger.fine("After: " + emb.getDataFiles().size());
}
if (selectionEmbargo != null) {
embargoService.merge(selectionEmbargo);
}
file.setEmbargo(selectionEmbargo);
if (emb != null && !emb.getDataFiles().isEmpty()) {
emb = null;
}
}
if(selectionEmbargo!=null) {
embargoService.save(selectionEmbargo, ((AuthenticatedUser)session.getUser()).getIdentifier());
}
// success message:
String successMessage = BundleUtil.getStringFromBundle("file.assignedEmbargo.success");
logger.fine(successMessage);
successMessage = successMessage.replace("{0}", "Selected Files");
JsfHelper.addFlashMessage(successMessage);
selectionEmbargo = new Embargo();
//Caller has to set editDataset before calling save()
editDataset = this.file.getOwner();
save();
init();
if(emb!=null) {
embargoService.deleteById(emb.getId(),((AuthenticatedUser)session.getUser()).getIdentifier());
}
return returnToDraftVersion();
}
public void clearEmbargoPopup() {
setRemoveEmbargo(false);
selectionEmbargo = new Embargo();
PrimeFaces.current().resetInputs("fileForm:embargoInputs");
}
public void clearSelectionEmbargo() {
selectionEmbargo = new Embargo();
PrimeFaces.current().resetInputs("fileForm:embargoInputs");
}
public boolean isCantRequestDueToEmbargo() {
return FileUtil.isActivelyEmbargoed(fileMetadata);
}
public String getEmbargoPhrase() {
//Should only be getting called when there is an embargo
if(file.isReleased()) {
if(FileUtil.isActivelyEmbargoed(file)) {
return BundleUtil.getStringFromBundle("embargoed.until");
} else {
return BundleUtil.getStringFromBundle("embargoed.wasthrough");
}
} else {
return BundleUtil.getStringFromBundle("embargoed.willbeuntil");
}
}
}
| 1 | 45,554 | if we're switching to calling FileDownloadHelper, we can just call that directly from the xhtml (see line 357 for example) and then remove this method completely. This is because the other thing it does is check PrivateURLUser, but the FileDownloadHelper method already does that. (and while we're at it, we can remove the method from FileUtil as this is the only place that method is called) | IQSS-dataverse | java |
@@ -274,11 +274,7 @@ public abstract class GapicInterfaceConfig implements InterfaceConfig {
}
public GapicMethodConfig getMethodConfig(String methodSimpleName, String fullName) {
- GapicMethodConfig methodConfig = getMethodConfigMap().get(methodSimpleName);
- if (methodConfig == null) {
- throw new IllegalArgumentException("no method config for method '" + fullName + "'");
- }
- return methodConfig;
+ return getMethodConfigMap().get(methodSimpleName);
}
@Override | 1 | /* Copyright 2016 Google Inc
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.google.api.codegen.config;
import com.google.api.codegen.CollectionConfigProto;
import com.google.api.codegen.IamResourceProto;
import com.google.api.codegen.InterfaceConfigProto;
import com.google.api.codegen.MethodConfigProto;
import com.google.api.codegen.transformer.RetryDefinitionsTransformer;
import com.google.api.gax.retrying.RetrySettings;
import com.google.api.tools.framework.model.Diag;
import com.google.api.tools.framework.model.DiagCollector;
import com.google.api.tools.framework.model.Field;
import com.google.api.tools.framework.model.Interface;
import com.google.api.tools.framework.model.Method;
import com.google.api.tools.framework.model.Model;
import com.google.api.tools.framework.model.SimpleLocation;
import com.google.api.tools.framework.model.TypeRef;
import com.google.auto.value.AutoValue;
import com.google.common.base.Strings;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.ImmutableSet;
import java.util.ArrayList;
import java.util.List;
import javax.annotation.Nullable;
/**
* GapicInterfaceConfig represents the client code-gen config for an API interface, and includes the
* configuration for methods and resource names.
*
* <p>In grpc-based Gapic clients, an API interface is defined by a "service" section in a proto
* file.
*/
@AutoValue
public abstract class GapicInterfaceConfig implements InterfaceConfig {
private static final String SERVICE_ADDRESS_PARAM = "service_address";
private static final String SCOPES_PARAM = "scopes";
private static final ImmutableSet<String> CONSTRUCTOR_PARAMS =
ImmutableSet.<String>of(SERVICE_ADDRESS_PARAM, SCOPES_PARAM);
public Interface getInterface() {
return getInterfaceModel().getInterface();
}
@Override
public abstract ProtoInterfaceModel getInterfaceModel();
@Override
public abstract List<GapicMethodConfig> getMethodConfigs();
@Nullable
@Override
public abstract SmokeTestConfig getSmokeTestConfig();
abstract ImmutableMap<String, GapicMethodConfig> getMethodConfigMap();
@Override
public abstract ImmutableMap<String, ImmutableSet<String>> getRetryCodesDefinition();
@Override
public abstract ImmutableMap<String, RetrySettings> getRetrySettingsDefinition();
@Override
public abstract ImmutableList<FieldModel> getIamResources();
@Override
public abstract ImmutableList<String> getRequiredConstructorParams();
@Override
public abstract ImmutableList<SingleResourceNameConfig> getSingleResourceNameConfigs();
@Override
public abstract String getManualDoc();
@Nullable
public abstract String getInterfaceNameOverride();
@Override
public String getName() {
return hasInterfaceNameOverride() ? getInterfaceNameOverride() : getInterface().getSimpleName();
}
@Override
public String getRawName() {
return getInterface().getSimpleName();
}
@Override
public boolean hasInterfaceNameOverride() {
return getInterfaceNameOverride() != null;
}
/**
* Creates an instance of GapicInterfaceConfig based on ConfigProto, linking up method
* configurations with specified methods in methodConfigMap. On errors, null will be returned, and
* diagnostics are reported to the model.
*/
@Nullable
static GapicInterfaceConfig createInterfaceConfig(
DiagCollector diagCollector,
String language,
InterfaceConfigProto interfaceConfigProto,
Interface apiInterface,
String interfaceNameOverride,
ResourceNameMessageConfigs messageConfigs,
ImmutableMap<String, ResourceNameConfig> resourceNameConfigs) {
ImmutableMap<String, ImmutableSet<String>> retryCodesDefinition =
RetryDefinitionsTransformer.createRetryCodesDefinition(diagCollector, interfaceConfigProto);
ImmutableMap<String, RetrySettings> retrySettingsDefinition =
RetryDefinitionsTransformer.createRetrySettingsDefinition(
diagCollector, interfaceConfigProto);
List<GapicMethodConfig> methodConfigs = null;
ImmutableMap<String, GapicMethodConfig> methodConfigMap = null;
if (retryCodesDefinition != null && retrySettingsDefinition != null) {
methodConfigMap =
createMethodConfigMap(
diagCollector,
language,
interfaceConfigProto,
apiInterface,
messageConfigs,
resourceNameConfigs,
retryCodesDefinition.keySet(),
retrySettingsDefinition.keySet());
methodConfigs = createMethodConfigs(methodConfigMap, interfaceConfigProto);
}
SmokeTestConfig smokeTestConfig =
createSmokeTestConfig(diagCollector, apiInterface, interfaceConfigProto);
ImmutableList<FieldModel> iamResources =
createIamResources(
apiInterface.getModel(),
interfaceConfigProto.getExperimentalFeatures().getIamResourcesList());
ImmutableList<String> requiredConstructorParams =
ImmutableList.<String>copyOf(interfaceConfigProto.getRequiredConstructorParamsList());
for (String param : interfaceConfigProto.getRequiredConstructorParamsList()) {
if (!CONSTRUCTOR_PARAMS.contains(param)) {
diagCollector.addDiag(
Diag.error(SimpleLocation.TOPLEVEL, "Unsupported constructor param: %s", param));
}
}
ImmutableList.Builder<SingleResourceNameConfig> resourcesBuilder = ImmutableList.builder();
for (CollectionConfigProto collectionConfigProto : interfaceConfigProto.getCollectionsList()) {
String entityName = collectionConfigProto.getEntityName();
ResourceNameConfig resourceName = resourceNameConfigs.get(entityName);
if (resourceName == null || !(resourceName instanceof SingleResourceNameConfig)) {
diagCollector.addDiag(
Diag.error(
SimpleLocation.TOPLEVEL,
"Inconsistent configuration - single resource name %s specified for interface, "
+ " but was not found in GapicProductConfig configuration.",
entityName));
return null;
}
resourcesBuilder.add((SingleResourceNameConfig) resourceName);
}
ImmutableList<SingleResourceNameConfig> singleResourceNames = resourcesBuilder.build();
String manualDoc = Strings.nullToEmpty(interfaceConfigProto.getLangDoc().get(language)).trim();
if (diagCollector.hasErrors()) {
return null;
} else {
return new AutoValue_GapicInterfaceConfig(
new ProtoInterfaceModel(apiInterface),
methodConfigs,
smokeTestConfig,
methodConfigMap,
retryCodesDefinition,
retrySettingsDefinition,
iamResources,
requiredConstructorParams,
singleResourceNames,
manualDoc,
interfaceNameOverride);
}
}
private static SmokeTestConfig createSmokeTestConfig(
DiagCollector diagCollector,
Interface apiInterface,
InterfaceConfigProto interfaceConfigProto) {
if (interfaceConfigProto.hasSmokeTest()) {
return SmokeTestConfig.createSmokeTestConfig(
new ProtoInterfaceModel(apiInterface),
interfaceConfigProto.getSmokeTest(),
diagCollector);
} else {
return null;
}
}
private static ImmutableMap<String, GapicMethodConfig> createMethodConfigMap(
DiagCollector diagCollector,
String language,
InterfaceConfigProto interfaceConfigProto,
Interface apiInterface,
ResourceNameMessageConfigs messageConfigs,
ImmutableMap<String, ResourceNameConfig> resourceNameConfigs,
ImmutableSet<String> retryCodesConfigNames,
ImmutableSet<String> retryParamsConfigNames) {
ImmutableMap.Builder<String, GapicMethodConfig> methodConfigMapBuilder = ImmutableMap.builder();
for (MethodConfigProto methodConfigProto : interfaceConfigProto.getMethodsList()) {
Interface targetInterface =
getTargetInterface(apiInterface, methodConfigProto.getRerouteToGrpcInterface());
Method method = targetInterface.lookupMethod(methodConfigProto.getName());
if (method == null) {
diagCollector.addDiag(
Diag.error(
SimpleLocation.TOPLEVEL, "method not found: %s", methodConfigProto.getName()));
continue;
}
GapicMethodConfig methodConfig =
GapicMethodConfig.createMethodConfig(
diagCollector,
language,
methodConfigProto,
method,
messageConfigs,
resourceNameConfigs,
retryCodesConfigNames,
retryParamsConfigNames);
if (methodConfig == null) {
continue;
}
methodConfigMapBuilder.put(methodConfigProto.getName(), methodConfig);
}
if (diagCollector.getErrorCount() > 0) {
return null;
} else {
return methodConfigMapBuilder.build();
}
}
static <T> List<T> createMethodConfigs(
ImmutableMap<String, T> methodConfigMap, InterfaceConfigProto interfaceConfigProto) {
List<T> methodConfigs = new ArrayList<>();
for (MethodConfigProto methodConfigProto : interfaceConfigProto.getMethodsList()) {
methodConfigs.add(methodConfigMap.get(methodConfigProto.getName()));
}
return methodConfigs;
}
/** Returns the GapicMethodConfig for the given method. */
@Override
public GapicMethodConfig getMethodConfig(MethodModel method) {
return getMethodConfig(method.getSimpleName(), method.getFullName());
}
/** Returns the GapicMethodConfig for the given method. */
public GapicMethodConfig getMethodConfig(Method method) {
return getMethodConfig(method.getSimpleName(), method.getFullName());
}
public GapicMethodConfig getMethodConfig(String methodSimpleName, String fullName) {
GapicMethodConfig methodConfig = getMethodConfigMap().get(methodSimpleName);
if (methodConfig == null) {
throw new IllegalArgumentException("no method config for method '" + fullName + "'");
}
return methodConfig;
}
@Override
public boolean hasDefaultServiceAddress() {
return !getRequiredConstructorParams().contains(SERVICE_ADDRESS_PARAM);
}
@Override
public boolean hasDefaultServiceScopes() {
return !getRequiredConstructorParams().contains(SCOPES_PARAM);
}
@Override
public boolean hasDefaultInstance() {
return getRequiredConstructorParams().size() == 0;
}
/**
* If rerouteToGrpcInterface is set, then looks up that interface and returns it, otherwise
* returns the value of defaultInterface.
*/
public static Interface getTargetInterface(
Interface defaultInterface, String rerouteToGrpcInterface) {
Interface targetInterface = defaultInterface;
if (!Strings.isNullOrEmpty(rerouteToGrpcInterface)) {
targetInterface =
defaultInterface.getModel().getSymbolTable().lookupInterface(rerouteToGrpcInterface);
if (targetInterface == null) {
throw new IllegalArgumentException(
"reroute_to_grpc_interface not found: " + rerouteToGrpcInterface);
}
}
return targetInterface;
}
/** Creates a list of fields that can be turned into IAM resources */
private static ImmutableList<FieldModel> createIamResources(
Model model, List<IamResourceProto> resources) {
ImmutableList.Builder<FieldModel> fields = ImmutableList.builder();
for (IamResourceProto resource : resources) {
TypeRef type = model.getSymbolTable().lookupType(resource.getType());
if (type == null) {
throw new IllegalArgumentException("type not found: " + resource.getType());
}
if (!type.isMessage()) {
throw new IllegalArgumentException("type must be a message: " + type);
}
Field field = type.getMessageType().lookupField(resource.getField());
if (field == null) {
throw new IllegalArgumentException(
String.format(
"type %s does not have field %s", resource.getType(), resource.getField()));
}
fields.add(new ProtoField(field));
}
return fields.build();
}
@Override
public boolean hasPageStreamingMethods() {
for (MethodConfig methodConfig : getMethodConfigs()) {
if (methodConfig.isPageStreaming()) {
return true;
}
}
return false;
}
@Override
public boolean hasBatchingMethods() {
for (MethodConfig methodConfig : getMethodConfigs()) {
if (methodConfig.isBatching()) {
return true;
}
}
return false;
}
@Override
public boolean hasGrpcStreamingMethods() {
for (MethodConfig methodConfig : getMethodConfigs()) {
if (methodConfig.isGrpcStreaming()) {
return true;
}
}
return false;
}
@Override
public boolean hasGrpcStreamingMethods(GrpcStreamingConfig.GrpcStreamingType streamingType) {
for (GapicMethodConfig methodConfig : getMethodConfigs()) {
if (methodConfig.isGrpcStreaming() && methodConfig.getGrpcStreamingType() == streamingType) {
return true;
}
}
return false;
}
@Override
public boolean hasLongRunningOperations() {
for (MethodConfig methodConfig : getMethodConfigs()) {
if (methodConfig.isLongRunningOperation()) {
return true;
}
}
return false;
}
}
| 1 | 24,130 | is `fullName` still required as a parameter? | googleapis-gapic-generator | java |
@@ -74,7 +74,7 @@ public class SeleniumServer extends JettyServer {
getClass().getClassLoader())
.asSubclass(Routable.class);
Constructor<? extends Routable> constructor = rcHandler.getConstructor(ActiveSessions.class);
- LOG.info("Bound legacy RC support");
+ LOG.finest("Bound legacy RC support");
return constructor.newInstance(sessions);
} catch (ReflectiveOperationException e) {
// Do nothing. | 1 | // Licensed to the Software Freedom Conservancy (SFC) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The SFC licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package org.openqa.selenium.remote.server;
import static java.net.HttpURLConnection.HTTP_NOT_FOUND;
import static java.util.concurrent.TimeUnit.SECONDS;
import static org.openqa.selenium.remote.http.Contents.utf8String;
import static org.openqa.selenium.remote.http.Route.combine;
import com.beust.jcommander.JCommander;
import org.openqa.selenium.grid.config.AnnotatedConfig;
import org.openqa.selenium.jetty.server.JettyServer;
import org.openqa.selenium.grid.server.BaseServerFlags;
import org.openqa.selenium.grid.server.BaseServerOptions;
import org.openqa.selenium.grid.server.HelpFlags;
import org.openqa.selenium.remote.http.HttpRequest;
import org.openqa.selenium.remote.http.HttpResponse;
import org.openqa.selenium.remote.http.Routable;
import org.openqa.selenium.remote.http.Route;
import org.openqa.selenium.remote.server.jmx.JMXHelper;
import org.openqa.selenium.remote.server.jmx.ManagedService;
import java.io.UncheckedIOException;
import java.lang.reflect.Constructor;
import java.util.Map;
import java.util.logging.Logger;
import javax.management.ObjectName;
import javax.servlet.Servlet;
/**
* Provides a server that can launch and manage selenium sessions.
*/
@ManagedService(objectName = "org.seleniumhq.server:type=SeleniumServer")
public class SeleniumServer extends JettyServer {
private final static Logger LOG = Logger.getLogger(SeleniumServer.class.getName());
private final BaseServerOptions configuration;
private Map<String, Class<? extends Servlet>> extraServlets;
private ObjectName objectName;
private ActiveSessions allSessions;
public SeleniumServer(BaseServerOptions configuration) {
super(configuration);
this.configuration = configuration;
objectName = new JMXHelper().register(this).getObjectName();
}
private Routable getRcHandler(ActiveSessions sessions) {
try {
Class<? extends Routable> rcHandler = Class.forName(
"com.thoughtworks.selenium.webdriven.WebDriverBackedSeleniumHandler",
false,
getClass().getClassLoader())
.asSubclass(Routable.class);
Constructor<? extends Routable> constructor = rcHandler.getConstructor(ActiveSessions.class);
LOG.info("Bound legacy RC support");
return constructor.newInstance(sessions);
} catch (ReflectiveOperationException e) {
// Do nothing.
}
return new Routable() {
@Override
public boolean matches(HttpRequest req) {
return false;
}
@Override
public HttpResponse execute(HttpRequest req) throws UncheckedIOException {
return null;
}
};
}
@Override
public JettyServer start() {
long inactiveSessionTimeoutSeconds = Long.MAX_VALUE / 1000;
NewSessionPipeline pipeline = DefaultPipeline.createDefaultPipeline().create();
allSessions = new ActiveSessions(inactiveSessionTimeoutSeconds, SECONDS);
Servlet driverServlet = new WebDriverServlet(allSessions, pipeline);
addServlet(driverServlet, "/wd/hub/*");
addServlet(driverServlet, "/webdriver/*");
Route route = Route.matching(req -> true)
.to(() -> req -> new HttpResponse()
.setStatus(HTTP_NOT_FOUND)
.setContent(utf8String("Not handler found for " + req)));
Routable rcHandler = getRcHandler(allSessions);
if (rcHandler != null) {
route = combine(route, rcHandler);
}
setHandler(route);
super.start();
LOG.info(String.format("Selenium Server is up and running on port %s", configuration.getPort()));
return this;
}
/**
* Stops the Jetty server
*/
@Override
public void stop() {
try {
super.stop();
} finally {
new JMXHelper().unregister(objectName);
stopAllBrowsers();
}
}
private void stopAllBrowsers() {
if (allSessions == null) {
return;
}
allSessions.getAllSessions().parallelStream()
.forEach(session -> {
try {
session.stop();
} catch (Exception ignored) {
// Ignored
}
});
}
public static void main(String[] args) {
HelpFlags helpFlags = new HelpFlags();
BaseServerFlags flags = new BaseServerFlags(4444);
JCommander commands = JCommander.newBuilder().addObject(flags).addObject(helpFlags).build();
commands.parse(args);
if (helpFlags.displayHelp(commands, System.err)) {
return;
}
SeleniumServer server = new SeleniumServer(new BaseServerOptions(new AnnotatedConfig(flags)));
server.start();
}
}
| 1 | 17,125 | This informational message is important to users. Please leave. | SeleniumHQ-selenium | rb |
@@ -470,7 +470,6 @@ func (o *initEnvOpts) deployEnv(app *config.Application) error {
Name: o.Name,
AppName: o.AppName(),
Prod: o.IsProduction,
- PublicLoadBalancer: true, // TODO: configure this based on user input or service Type needs?
ToolsAccountPrincipalARN: caller.RootUserARN,
AppDNSName: app.Domain,
AdditionalTags: app.Tags, | 1 | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package cli
import (
"errors"
"fmt"
"net"
"strings"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/copilot-cli/internal/pkg/aws/cloudformation"
"github.com/aws/copilot-cli/internal/pkg/aws/ec2"
"github.com/aws/copilot-cli/internal/pkg/aws/identity"
"github.com/aws/copilot-cli/internal/pkg/aws/profile"
"github.com/aws/copilot-cli/internal/pkg/aws/sessions"
"github.com/aws/copilot-cli/internal/pkg/config"
"github.com/aws/copilot-cli/internal/pkg/deploy"
deploycfn "github.com/aws/copilot-cli/internal/pkg/deploy/cloudformation"
"github.com/aws/copilot-cli/internal/pkg/deploy/cloudformation/stack"
"github.com/aws/copilot-cli/internal/pkg/term/color"
"github.com/aws/copilot-cli/internal/pkg/term/log"
termprogress "github.com/aws/copilot-cli/internal/pkg/term/progress"
"github.com/aws/copilot-cli/internal/pkg/term/prompt"
"github.com/aws/copilot-cli/internal/pkg/term/selector"
"github.com/spf13/cobra"
"github.com/spf13/pflag"
)
const (
envInitNamePrompt = "What is your environment's name?"
envInitNameHelpPrompt = "A unique identifier for an environment (e.g. dev, test, prod)."
envInitDefaultEnvConfirmPrompt = `Would you like to use the default configuration for a new environment?
- A new VPC with 2 AZs, 2 public subnets and 2 private subnets
- A new ECS Cluster
- New IAM Roles to manage services in your environment
`
envInitVPCSelectPrompt = "Which VPC would you like to use?"
envInitPublicSubnetsSelectPrompt = "Which public subnets would you like to use?"
envInitPrivateSubnetsSelectPrompt = "Which private subnets would you like to use?"
envInitVPCCIDRPrompt = "What VPC CIDR would you like to use?"
envInitVPCCIDRPromptHelp = "CIDR used for your VPC. For example: 10.1.0.0/16"
envInitPublicCIDRPrompt = "What CIDR would you like to use for your public subnets?"
envInitPublicCIDRPromptHelp = "CIDRs used for your public subnets. For example: 10.1.0.0/24,10.1.1.0/24"
envInitPrivateCIDRPrompt = "What CIDR would you like to use for your private subnets?"
envInitPrivateCIDRPromptHelp = "CIDRs used for your private subnets. For example: 10.1.2.0/24,10.1.3.0/24"
fmtEnvInitCredsPrompt = "Which credentials would you like to use to create %s?"
envInitCredsHelpPrompt = `The credentials are used to create your environment in an AWS account and region.
To learn more:
https://github.com/aws/copilot-cli/wiki/credentials#environment-credentials`
envInitRegionPrompt = "Which region?"
envInitDefaultRegionOption = "us-west-2"
fmtDeployEnvStart = "Proposing infrastructure changes for the %s environment."
fmtDeployEnvComplete = "Environment %s already exists in application %s.\n"
fmtDeployEnvFailed = "Failed to accept changes for the %s environment.\n"
fmtDNSDelegationStart = "Sharing DNS permissions for this application to account %s."
fmtDNSDelegationFailed = "Failed to grant DNS permissions to account %s.\n"
fmtDNSDelegationComplete = "Shared DNS permissions for this application to account %s.\n"
fmtStreamEnvStart = "Creating the infrastructure for the %s environment."
fmtStreamEnvFailed = "Failed to create the infrastructure for the %s environment.\n"
fmtStreamEnvComplete = "Created the infrastructure for the %s environment.\n"
fmtAddEnvToAppStart = "Linking account %s and region %s to application %s."
fmtAddEnvToAppFailed = "Failed to link account %s and region %s to application %s.\n"
fmtAddEnvToAppComplete = "Linked account %s and region %s to application %s.\n"
)
var (
errNamedProfilesNotFound = fmt.Errorf("no named AWS profiles found, run %s first please", color.HighlightCode("aws configure"))
envInitDefaultConfigSelectOption = "Yes, use default."
envInitAdjustEnvResourcesSelectOption = "Yes, but I'd like configure the default resources (CIDR ranges)."
envInitImportEnvResourcesSelectOption = "No, I'd like to import existing resources (VPC, subnets)."
envInitCustomizedEnvTypes = []string{envInitDefaultConfigSelectOption, envInitAdjustEnvResourcesSelectOption, envInitImportEnvResourcesSelectOption}
)
type importVPCVars struct {
ID string
PublicSubnetIDs []string
PrivateSubnetIDs []string
}
func (v importVPCVars) isSet() bool {
if v.ID != "" {
return true
}
return len(v.PublicSubnetIDs) > 0 || len(v.PrivateSubnetIDs) > 0
}
type adjustVPCVars struct {
CIDR net.IPNet
PublicSubnetCIDRs []string
PrivateSubnetCIDRs []string
}
func (v adjustVPCVars) isSet() bool {
if v.CIDR.String() != emptyIPNet.String() {
return true
}
return len(v.PublicSubnetCIDRs) != 0 || len(v.PrivateSubnetCIDRs) != 0
}
type tempCredsVars struct {
AccessKeyID string
SecretAccessKey string
SessionToken string
}
func (v tempCredsVars) isSet() bool {
return v.AccessKeyID != "" && v.SecretAccessKey != ""
}
type initEnvVars struct {
*GlobalOpts
Name string // Name for the environment.
Profile string // The named profile to use for credential retrieval. Mutually exclusive with TempCreds.
IsProduction bool // True means retain resources even after deletion.
DefaultConfig bool // True means using default environment configuration.
ImportVPC importVPCVars // Existing VPC resources to use instead of creating new ones.
AdjustVPC adjustVPCVars // Configure parameters for VPC resources generated while initializing an environment.
TempCreds tempCredsVars // Temporary credentials to initialize the environment. Mutually exclusive with the Profile.
Region string // The region to create the environment in.
}
type initEnvOpts struct {
initEnvVars
// Interfaces to interact with dependencies.
sessProvider sessionProvider
store store
envDeployer deployer
appDeployer deployer
identity identityService
envIdentity identityService
ec2Client ec2Client
prog progress
selVPC ec2Selector
selCreds credsSelector
sess *session.Session // Session pointing to environment's AWS account and region.
}
func newInitEnvOpts(vars initEnvVars) (*initEnvOpts, error) {
store, err := config.NewStore()
if err != nil {
return nil, err
}
sessProvider := sessions.NewProvider()
defaultSession, err := sessProvider.Default()
if err != nil {
return nil, err
}
cfg, err := profile.NewConfig()
if err != nil {
return nil, fmt.Errorf("read named profiles: %w", err)
}
return &initEnvOpts{
initEnvVars: vars,
sessProvider: sessProvider,
store: store,
appDeployer: deploycfn.New(defaultSession),
identity: identity.New(defaultSession),
prog: termprogress.NewSpinner(),
selCreds: &selector.CredsSelect{
Session: sessProvider,
Profile: cfg,
Prompt: vars.prompt,
},
}, nil
}
// Validate returns an error if the values passed by flags are invalid.
func (o *initEnvOpts) Validate() error {
if o.Name != "" {
if err := validateEnvironmentName(o.Name); err != nil {
return err
}
}
if o.AppName() == "" {
return fmt.Errorf("no application found: run %s or %s into your workspace please", color.HighlightCode("app init"), color.HighlightCode("cd"))
}
if err := o.validateCustomizedResources(); err != nil {
return err
}
return o.validateCredentials()
}
// Ask asks for fields that are required but not passed in.
func (o *initEnvOpts) Ask() error {
if err := o.askEnvName(); err != nil {
return err
}
if err := o.askEnvSession(); err != nil {
return err
}
if err := o.askEnvRegion(); err != nil {
return err
}
return o.askCustomizedResources()
}
// Execute deploys a new environment with CloudFormation and adds it to SSM.
func (o *initEnvOpts) Execute() error {
// Initialize environment clients if not set.
if o.envIdentity == nil {
o.envIdentity = identity.New(o.sess)
}
if o.envDeployer == nil {
o.envDeployer = deploycfn.New(o.sess)
}
app, err := o.store.GetApplication(o.AppName())
if err != nil {
// Ensure the app actually exists before we do a deployment.
return err
}
if app.RequiresDNSDelegation() {
if err := o.delegateDNSFromApp(app); err != nil {
return fmt.Errorf("granting DNS permissions: %w", err)
}
}
// 1. Start creating the CloudFormation stack for the environment.
if err := o.deployEnv(app); err != nil {
return err
}
// 2. Get the environment
env, err := o.envDeployer.GetEnvironment(o.AppName(), o.Name)
if err != nil {
return fmt.Errorf("get environment struct for %s: %w", o.Name, err)
}
env.Prod = o.IsProduction
// 3. Add the stack set instance to the app stackset.
if err := o.addToStackset(app, env); err != nil {
return err
}
// 4. Store the environment in SSM.
if err := o.store.CreateEnvironment(env); err != nil {
return fmt.Errorf("store environment: %w", err)
}
log.Successf("Created environment %s in region %s under application %s.\n",
color.HighlightUserInput(env.Name), color.Emphasize(env.Region), color.HighlightUserInput(env.App))
return nil
}
// RecommendedActions returns follow-up actions the user can take after successfully executing the command.
func (o *initEnvOpts) RecommendedActions() []string {
return nil
}
func (o *initEnvOpts) validateCustomizedResources() error {
if o.ImportVPC.isSet() && o.AdjustVPC.isSet() {
return errors.New("cannot specify both import vpc flags and configure vpc flags")
}
if (o.ImportVPC.isSet() || o.AdjustVPC.isSet()) && o.DefaultConfig {
return fmt.Errorf("cannot import or configure vpc if --%s is set", defaultConfigFlag)
}
return nil
}
func (o *initEnvOpts) askEnvName() error {
if o.Name != "" {
return nil
}
envName, err := o.prompt.Get(envInitNamePrompt, envInitNameHelpPrompt, validateEnvironmentName)
if err != nil {
return fmt.Errorf("get environment name: %w", err)
}
o.Name = envName
return nil
}
func (o *initEnvOpts) askEnvSession() error {
if o.Profile != "" {
sess, err := o.sessProvider.FromProfile(o.Profile)
if err != nil {
return fmt.Errorf("create session from profile %s: %w", o.Profile, err)
}
o.sess = sess
return nil
}
if o.TempCreds.isSet() {
sess, err := o.sessProvider.FromStaticCreds(o.TempCreds.AccessKeyID, o.TempCreds.SecretAccessKey, o.TempCreds.SessionToken)
if err != nil {
return err
}
o.sess = sess
return nil
}
sess, err := o.selCreds.Creds(fmt.Sprintf(fmtEnvInitCredsPrompt, color.HighlightUserInput(o.Name)), envInitCredsHelpPrompt)
if err != nil {
return fmt.Errorf("select creds: %w", err)
}
o.sess = sess
return nil
}
func (o *initEnvOpts) askEnvRegion() error {
region := aws.StringValue(o.sess.Config.Region)
if o.Region != "" {
region = o.Region
}
if region == "" {
v, err := o.prompt.Get(envInitRegionPrompt, "", nil, prompt.WithDefaultInput(envInitDefaultRegionOption))
if err != nil {
return fmt.Errorf("get environment region: %w", err)
}
region = v
}
o.sess.Config.Region = aws.String(region)
return nil
}
func (o *initEnvOpts) askCustomizedResources() error {
if o.DefaultConfig {
return nil
}
if o.ImportVPC.isSet() {
return o.askImportResources()
}
if o.AdjustVPC.isSet() {
return o.askAdjustResources()
}
adjustOrImport, err := o.prompt.SelectOne(
envInitDefaultEnvConfirmPrompt, "",
envInitCustomizedEnvTypes)
if err != nil {
return fmt.Errorf("select adjusting or importing resources: %w", err)
}
switch adjustOrImport {
case envInitImportEnvResourcesSelectOption:
return o.askImportResources()
case envInitAdjustEnvResourcesSelectOption:
return o.askAdjustResources()
case envInitDefaultConfigSelectOption:
return nil
}
return nil
}
func (o *initEnvOpts) askImportResources() error {
if o.selVPC == nil {
o.selVPC = selector.NewEC2Select(o.prompt, ec2.New(o.sess))
}
if o.ImportVPC.ID == "" {
vpcID, err := o.selVPC.VPC(envInitVPCSelectPrompt, "")
if err != nil {
if err == selector.ErrVPCNotFound {
log.Errorf(`No existing VPCs were found. You can either:
- Create a new VPC first and then import it.
- Use the default Copilot environment configuration.
`)
}
return fmt.Errorf("select VPC: %w", err)
}
o.ImportVPC.ID = vpcID
}
if o.ec2Client == nil {
o.ec2Client = ec2.New(o.sess)
}
dnsSupport, err := o.ec2Client.HasDNSSupport(o.ImportVPC.ID)
if err != nil {
return fmt.Errorf("check if VPC %s has DNS support enabled: %w", o.ImportVPC.ID, err)
}
if !dnsSupport {
log.Errorln(`Looks like you're creating an environment using a VPC with DNS support *disabled*.
Copilot cannot create services in VPCs without DNS support. We recommend enabling this property.
To learn more about the issue:
https://aws.amazon.com/premiumsupport/knowledge-center/ecs-pull-container-api-error-ecr/`)
return fmt.Errorf("VPC %s has no DNS support enabled", o.ImportVPC.ID)
}
if o.ImportVPC.PublicSubnetIDs == nil {
publicSubnets, err := o.selVPC.PublicSubnets(envInitPublicSubnetsSelectPrompt, "", o.ImportVPC.ID)
if err != nil {
if err == selector.ErrSubnetsNotFound {
log.Errorf(`No existing public subnets were found in VPC %s. You can either:
- Create new public subnets and then import them.
- Use the default Copilot environment configuration.`, o.ImportVPC.ID)
}
return fmt.Errorf("select public subnets: %w", err)
}
o.ImportVPC.PublicSubnetIDs = publicSubnets
}
if o.ImportVPC.PrivateSubnetIDs == nil {
privateSubnets, err := o.selVPC.PrivateSubnets(envInitPrivateSubnetsSelectPrompt, "", o.ImportVPC.ID)
if err != nil {
if err == selector.ErrSubnetsNotFound {
log.Errorf(`No existing private subnets were found in VPC %s. You can either:
- Create new private subnets and then import them.
- Use the default Copilot environment configuration.`, o.ImportVPC.ID)
}
return fmt.Errorf("select private subnets: %w", err)
}
o.ImportVPC.PrivateSubnetIDs = privateSubnets
}
return nil
}
func (o *initEnvOpts) askAdjustResources() error {
if o.AdjustVPC.CIDR.String() == emptyIPNet.String() {
vpcCIDRString, err := o.prompt.Get(envInitVPCCIDRPrompt, envInitVPCCIDRPromptHelp, validateCIDR,
prompt.WithDefaultInput(stack.DefaultVPCCIDR))
if err != nil {
return fmt.Errorf("get VPC CIDR: %w", err)
}
_, vpcCIDR, err := net.ParseCIDR(vpcCIDRString)
if err != nil {
return fmt.Errorf("parse VPC CIDR: %w", err)
}
o.AdjustVPC.CIDR = *vpcCIDR
}
if o.AdjustVPC.PublicSubnetCIDRs == nil {
publicCIDR, err := o.prompt.Get(envInitPublicCIDRPrompt, envInitPublicCIDRPromptHelp, validateCIDRSlice,
prompt.WithDefaultInput(stack.DefaultPublicSubnetCIDRs))
if err != nil {
return fmt.Errorf("get public subnet CIDRs: %w", err)
}
o.AdjustVPC.PublicSubnetCIDRs = strings.Split(publicCIDR, ",")
}
if o.AdjustVPC.PrivateSubnetCIDRs == nil {
privateCIDR, err := o.prompt.Get(envInitPrivateCIDRPrompt, envInitPrivateCIDRPromptHelp, validateCIDRSlice,
prompt.WithDefaultInput(stack.DefaultPrivateSubnetCIDRs))
if err != nil {
return fmt.Errorf("get private subnet CIDRs: %w", err)
}
o.AdjustVPC.PrivateSubnetCIDRs = strings.Split(privateCIDR, ",")
}
return nil
}
func (o *initEnvOpts) importVPCConfig() *deploy.ImportVPCConfig {
if o.DefaultConfig || !o.ImportVPC.isSet() {
return nil
}
return &deploy.ImportVPCConfig{
ID: o.ImportVPC.ID,
PrivateSubnetIDs: o.ImportVPC.PrivateSubnetIDs,
PublicSubnetIDs: o.ImportVPC.PublicSubnetIDs,
}
}
func (o *initEnvOpts) adjustVPCConfig() *deploy.AdjustVPCConfig {
if o.DefaultConfig || !o.AdjustVPC.isSet() {
return nil
}
return &deploy.AdjustVPCConfig{
CIDR: o.AdjustVPC.CIDR.String(),
PrivateSubnetCIDRs: o.AdjustVPC.PrivateSubnetCIDRs,
PublicSubnetCIDRs: o.AdjustVPC.PublicSubnetCIDRs,
}
}
func (o *initEnvOpts) deployEnv(app *config.Application) error {
caller, err := o.identity.Get()
if err != nil {
return fmt.Errorf("get identity: %w", err)
}
deployEnvInput := &deploy.CreateEnvironmentInput{
Name: o.Name,
AppName: o.AppName(),
Prod: o.IsProduction,
PublicLoadBalancer: true, // TODO: configure this based on user input or service Type needs?
ToolsAccountPrincipalARN: caller.RootUserARN,
AppDNSName: app.Domain,
AdditionalTags: app.Tags,
AdjustVPCConfig: o.adjustVPCConfig(),
ImportVPCConfig: o.importVPCConfig(),
}
o.prog.Start(fmt.Sprintf(fmtDeployEnvStart, color.HighlightUserInput(o.Name)))
if err := o.envDeployer.DeployEnvironment(deployEnvInput); err != nil {
var existsErr *cloudformation.ErrStackAlreadyExists
if errors.As(err, &existsErr) {
// Do nothing if the stack already exists.
o.prog.Stop(log.Ssuccessf(fmtDeployEnvComplete,
color.HighlightUserInput(o.Name), color.HighlightUserInput(o.AppName())))
return nil
}
o.prog.Stop(log.Serrorf(fmtDeployEnvFailed, color.HighlightUserInput(o.Name)))
return err
}
// Display updates while the deployment is happening.
o.prog.Start(fmt.Sprintf(fmtStreamEnvStart, color.HighlightUserInput(o.Name)))
stackEvents, responses := o.envDeployer.StreamEnvironmentCreation(deployEnvInput)
for stackEvent := range stackEvents {
o.prog.Events(o.humanizeEnvironmentEvents(stackEvent))
}
resp := <-responses
if resp.Err != nil {
o.prog.Stop(log.Serrorf(fmtStreamEnvFailed, color.HighlightUserInput(o.Name)))
return resp.Err
}
o.prog.Stop(log.Ssuccessf(fmtStreamEnvComplete, color.HighlightUserInput(o.Name)))
return nil
}
func (o *initEnvOpts) addToStackset(app *config.Application, env *config.Environment) error {
o.prog.Start(fmt.Sprintf(fmtAddEnvToAppStart, color.Emphasize(env.AccountID), color.Emphasize(env.Region), color.HighlightUserInput(o.AppName())))
if err := o.appDeployer.AddEnvToApp(app, env); err != nil {
o.prog.Stop(log.Serrorf(fmtAddEnvToAppFailed, color.Emphasize(env.AccountID), color.Emphasize(env.Region), color.HighlightUserInput(o.AppName())))
return fmt.Errorf("deploy env %s to application %s: %w", env.Name, app.Name, err)
}
o.prog.Stop(log.Ssuccessf(fmtAddEnvToAppComplete, color.Emphasize(env.AccountID), color.Emphasize(env.Region), color.HighlightUserInput(o.AppName())))
return nil
}
func (o *initEnvOpts) delegateDNSFromApp(app *config.Application) error {
envAccount, err := o.envIdentity.Get()
if err != nil {
return fmt.Errorf("getting environment account ID for DNS Delegation: %w", err)
}
// By default, our DNS Delegation permits same account delegation.
if envAccount.Account == app.AccountID {
return nil
}
o.prog.Start(fmt.Sprintf(fmtDNSDelegationStart, color.HighlightUserInput(envAccount.Account)))
if err := o.appDeployer.DelegateDNSPermissions(app, envAccount.Account); err != nil {
o.prog.Stop(log.Serrorf(fmtDNSDelegationFailed, color.HighlightUserInput(envAccount.Account)))
return err
}
o.prog.Stop(log.Ssuccessf(fmtDNSDelegationComplete, color.HighlightUserInput(envAccount.Account)))
return nil
}
func (o *initEnvOpts) humanizeEnvironmentEvents(resourceEvents []deploy.ResourceEvent) []termprogress.TabRow {
matcher := map[termprogress.Text]termprogress.ResourceMatcher{
textVPC: func(event deploy.Resource) bool {
return event.Type == "AWS::EC2::VPC"
},
textInternetGateway: func(event deploy.Resource) bool {
return event.Type == "AWS::EC2::InternetGateway" ||
event.Type == "AWS::EC2::VPCGatewayAttachment"
},
textPublicSubnets: func(event deploy.Resource) bool {
return event.Type == "AWS::EC2::Subnet" &&
strings.HasPrefix(event.LogicalName, "Public")
},
textPrivateSubnets: func(event deploy.Resource) bool {
return event.Type == "AWS::EC2::Subnet" &&
strings.HasPrefix(event.LogicalName, "Private")
},
textRouteTables: func(event deploy.Resource) bool {
return strings.Contains(event.LogicalName, "Route")
},
textECSCluster: func(event deploy.Resource) bool {
return event.Type == "AWS::ECS::Cluster"
},
textALB: func(event deploy.Resource) bool {
return strings.Contains(event.LogicalName, "LoadBalancer") ||
strings.Contains(event.Type, "ElasticLoadBalancingV2")
},
}
return termprogress.HumanizeResourceEvents(o.envProgressOrder(), resourceEvents, matcher, defaultResourceCounts)
}
func (o *initEnvOpts) envProgressOrder() (order []termprogress.Text) {
if !o.ImportVPC.isSet() {
order = append(order, []termprogress.Text{textVPC, textInternetGateway, textPublicSubnets, textPrivateSubnets, textRouteTables}...)
}
order = append(order, []termprogress.Text{textECSCluster, textALB}...)
return
}
func (o *initEnvOpts) validateCredentials() error {
if o.Profile != "" && o.TempCreds.AccessKeyID != "" {
return fmt.Errorf("cannot specify both --%s and --%s", profileFlag, accessKeyIDFlag)
}
if o.Profile != "" && o.TempCreds.SecretAccessKey != "" {
return fmt.Errorf("cannot specify both --%s and --%s", profileFlag, secretAccessKeyFlag)
}
if o.Profile != "" && o.TempCreds.SessionToken != "" {
return fmt.Errorf("cannot specify both --%s and --%s", profileFlag, sessionTokenFlag)
}
return nil
}
// BuildEnvInitCmd builds the command for adding an environment.
func BuildEnvInitCmd() *cobra.Command {
vars := initEnvVars{
GlobalOpts: NewGlobalOpts(),
}
cmd := &cobra.Command{
Use: "init",
Short: "Creates a new environment in your application.",
Example: `
Creates a test environment in your "default" AWS profile using default configuration.
/code $ copilot env init --name test --profile default --default-config
Creates a prod-iad environment using your "prod-admin" AWS profile.
/code $ copilot env init --name prod-iad --profile prod-admin --prod
Creates an environment with imported VPC resources.
/code $ copilot env init --import-vpc-id vpc-099c32d2b98cdcf47 \
/code --import-public-subnets subnet-013e8b691862966cf,subnet -014661ebb7ab8681a \
/code --import-private-subnets subnet-055fafef48fb3c547,subnet-00c9e76f288363e7f
Creates an environment with overrided CIDRs.
/code $ copilot env init --override-vpc-cidr 10.1.0.0/16 \
/code --override-public-cidrs 10.1.0.0/24,10.1.1.0/24 \
/code --override-private-cidrs 10.1.2.0/24,10.1.3.0/24`,
RunE: runCmdE(func(cmd *cobra.Command, args []string) error {
opts, err := newInitEnvOpts(vars)
if err != nil {
return err
}
if err := opts.Validate(); err != nil {
return err
}
if err := opts.Ask(); err != nil {
return err
}
return opts.Execute()
}),
}
cmd.Flags().StringVarP(&vars.Name, nameFlag, nameFlagShort, "", envFlagDescription)
cmd.Flags().StringVar(&vars.Profile, profileFlag, "", profileFlagDescription)
cmd.Flags().StringVar(&vars.TempCreds.AccessKeyID, accessKeyIDFlag, "", accessKeyIDFlagDescription)
cmd.Flags().StringVar(&vars.TempCreds.SecretAccessKey, secretAccessKeyFlag, "", secretAccessKeyFlagDescription)
cmd.Flags().StringVar(&vars.TempCreds.SessionToken, sessionTokenFlag, "", sessionTokenFlagDescription)
cmd.Flags().StringVar(&vars.Region, regionFlag, "", envRegionTokenFlagDescription)
cmd.Flags().BoolVar(&vars.IsProduction, prodEnvFlag, false, prodEnvFlagDescription)
cmd.Flags().StringVar(&vars.ImportVPC.ID, vpcIDFlag, "", vpcIDFlagDescription)
cmd.Flags().StringSliceVar(&vars.ImportVPC.PublicSubnetIDs, publicSubnetsFlag, nil, publicSubnetsFlagDescription)
cmd.Flags().StringSliceVar(&vars.ImportVPC.PrivateSubnetIDs, privateSubnetsFlag, nil, privateSubnetsFlagDescription)
cmd.Flags().IPNetVar(&vars.AdjustVPC.CIDR, vpcCIDRFlag, net.IPNet{}, vpcCIDRFlagDescription)
// TODO: use IPNetSliceVar when it is available (https://github.com/spf13/pflag/issues/273).
cmd.Flags().StringSliceVar(&vars.AdjustVPC.PublicSubnetCIDRs, publicSubnetCIDRsFlag, nil, publicSubnetCIDRsFlagDescription)
cmd.Flags().StringSliceVar(&vars.AdjustVPC.PrivateSubnetCIDRs, privateSubnetCIDRsFlag, nil, privateSubnetCIDRsFlagDescription)
cmd.Flags().BoolVar(&vars.DefaultConfig, defaultConfigFlag, false, defaultConfigFlagDescription)
flags := pflag.NewFlagSet("Common", pflag.ContinueOnError)
flags.AddFlag(cmd.Flags().Lookup(nameFlag))
flags.AddFlag(cmd.Flags().Lookup(profileFlag))
flags.AddFlag(cmd.Flags().Lookup(accessKeyIDFlag))
flags.AddFlag(cmd.Flags().Lookup(secretAccessKeyFlag))
flags.AddFlag(cmd.Flags().Lookup(sessionTokenFlag))
flags.AddFlag(cmd.Flags().Lookup(regionFlag))
flags.AddFlag(cmd.Flags().Lookup(defaultConfigFlag))
flags.AddFlag(cmd.Flags().Lookup(prodEnvFlag))
resourcesImportFlag := pflag.NewFlagSet("Import Existing Resources", pflag.ContinueOnError)
resourcesImportFlag.AddFlag(cmd.Flags().Lookup(vpcIDFlag))
resourcesImportFlag.AddFlag(cmd.Flags().Lookup(publicSubnetsFlag))
resourcesImportFlag.AddFlag(cmd.Flags().Lookup(privateSubnetsFlag))
resourcesConfigFlag := pflag.NewFlagSet("Configure Default Resources", pflag.ContinueOnError)
resourcesConfigFlag.AddFlag(cmd.Flags().Lookup(vpcCIDRFlag))
resourcesConfigFlag.AddFlag(cmd.Flags().Lookup(publicSubnetCIDRsFlag))
resourcesConfigFlag.AddFlag(cmd.Flags().Lookup(privateSubnetCIDRsFlag))
cmd.Annotations = map[string]string{
// The order of the sections we want to display.
"sections": "Common,Import Existing Resources,Configure Default Resources",
"Common": flags.FlagUsages(),
"Import Existing Resources": resourcesImportFlag.FlagUsages(),
"Configure Default Resources": resourcesConfigFlag.FlagUsages(),
}
cmd.SetUsageTemplate(`{{h1 "Usage"}}{{if .Runnable}}
{{.UseLine}}{{end}}{{$annotations := .Annotations}}{{$sections := split .Annotations.sections ","}}{{if gt (len $sections) 0}}
{{range $i, $sectionName := $sections}}{{h1 (print $sectionName " Flags")}}
{{(index $annotations $sectionName) | trimTrailingWhitespaces}}{{if ne (inc $i) (len $sections)}}
{{end}}{{end}}{{end}}{{if .HasAvailableInheritedFlags}}
{{h1 "Global Flags"}}
{{.InheritedFlags.FlagUsages | trimTrailingWhitespaces}}{{end}}{{if .HasExample}}
{{h1 "Examples"}}{{code .Example}}{{end}}
`)
return cmd
}
| 1 | 15,083 | Why do we want to remove this one? Are we planning to substitute it? | aws-copilot-cli | go |
@@ -4,13 +4,14 @@ import (
"fmt"
"testing"
- "github.com/golang/protobuf/proto"
+ legacyProto "github.com/golang/protobuf/proto" // nolint: staticcheck // deprecated library needed until WithDetails can take v2
"github.com/spiffe/spire/pkg/common/nodeutil"
"github.com/spiffe/spire/proto/spire/common"
"github.com/spiffe/spire/proto/spire/types"
"github.com/stretchr/testify/require"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
+ "google.golang.org/protobuf/proto"
)
func TestIsAgentBanned(t *testing.T) { | 1 | package nodeutil_test
import (
"fmt"
"testing"
"github.com/golang/protobuf/proto"
"github.com/spiffe/spire/pkg/common/nodeutil"
"github.com/spiffe/spire/proto/spire/common"
"github.com/spiffe/spire/proto/spire/types"
"github.com/stretchr/testify/require"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
)
func TestIsAgentBanned(t *testing.T) {
require.True(t, nodeutil.IsAgentBanned(&common.AttestedNode{}))
require.False(t, nodeutil.IsAgentBanned(&common.AttestedNode{CertSerialNumber: "non-empty-serial"}))
}
func TestShouldAgentReattest(t *testing.T) {
agentExpired := &types.PermissionDeniedDetails{
Reason: types.PermissionDeniedDetails_AGENT_EXPIRED,
}
agentNotActive := &types.PermissionDeniedDetails{
Reason: types.PermissionDeniedDetails_AGENT_NOT_ACTIVE,
}
agentNotAttested := &types.PermissionDeniedDetails{
Reason: types.PermissionDeniedDetails_AGENT_NOT_ATTESTED,
}
agentBanned := &types.PermissionDeniedDetails{
Reason: types.PermissionDeniedDetails_AGENT_BANNED,
}
require.False(t, nodeutil.ShouldAgentReattest(nil))
require.True(t, nodeutil.ShouldAgentReattest(getError(t, codes.PermissionDenied, agentExpired)))
require.True(t, nodeutil.ShouldAgentReattest(getError(t, codes.PermissionDenied, agentNotActive)))
require.True(t, nodeutil.ShouldAgentReattest(getError(t, codes.PermissionDenied, agentNotAttested)))
require.False(t, nodeutil.ShouldAgentReattest(getError(t, codes.PermissionDenied, agentBanned)))
require.False(t, nodeutil.ShouldAgentReattest(getError(t, codes.Unknown, agentExpired)))
require.False(t, nodeutil.ShouldAgentReattest(getError(t, codes.Unknown, agentNotActive)))
require.False(t, nodeutil.ShouldAgentReattest(getError(t, codes.Unknown, agentNotAttested)))
require.False(t, nodeutil.ShouldAgentReattest(getError(t, codes.Unknown, agentBanned)))
require.False(t, nodeutil.ShouldAgentReattest(getError(t, codes.PermissionDenied, &types.Status{})))
require.False(t, nodeutil.ShouldAgentReattest(getError(t, codes.PermissionDenied, nil)))
}
func getError(t *testing.T, code codes.Code, details proto.Message) error {
st := status.New(code, "some error")
if details != nil {
var err error
st, err = st.WithDetails(details)
require.NoError(t, err)
}
return fmt.Errorf("extra info: %w", st.Err())
}
| 1 | 15,127 | We may create an issue to track this so we don't forget? | spiffe-spire | go |
@@ -1,9 +1,12 @@
class ApplicationController < ActionController::Base
+ helper AvatarHelper
+ helper ButtonHelper
+
protect_from_forgery with: :exception
attr_reader :page_context
helper_method :page_context
-
+
before_action :store_location
def initialize(*params) | 1 | class ApplicationController < ActionController::Base
protect_from_forgery with: :exception
attr_reader :page_context
helper_method :page_context
before_action :store_location
def initialize(*params)
@page_context = {}
super(*params)
end
rescue_from ParamRecordNotFound do
render_404
end
protected
# TODO: Fix me when sessions are real
def session_required
error(message: t(:must_be_logged_in), status: :unauthorized) unless logged_in?
end
def admin_session_required
error(message: t(:not_authorized), status: :unauthorized) unless current_user_is_admin?
end
def current_user
return @cached_current_user if @cached_current_user_checked
@cached_current_user_checked = true
@cached_current_user = find_user_in_session || find_remembered_user || NullAccount.new
session[:account_id] = @cached_current_user.id if @cached_current_user
@cached_current_user
end
helper_method :current_user
def logged_in?
current_user.id != nil
end
helper_method :logged_in?
def current_user_is_admin?
logged_in? && current_user.admin?
end
helper_method :current_user_is_admin?
def current_user_can_manage?
return true if current_user_is_admin?
logged_in? && current_project && current_project.active_managers.map(&:account_id).include?(current_user.id)
end
helper_method :current_user_can_manage?
def current_project
begin
param = params[:project_id].presence || params[:id]
@current_project ||= Project.find_by_url_name!(param)
rescue ActiveRecord::RecordNotFound
raise ParamRecordNotFound
rescue e
raise e
end
@current_project
end
helper_method :current_project
def read_only_mode?
false
end
helper_method :read_only_mode?
def request_format
format = 'html' if request.format.html?
format ||= params[:format]
format
end
def error(message:, status:)
@error = { message: message }
render_with_format 'error', status: status
end
def render_404
error message: t(:four_oh_four), status: :not_found
end
def render_with_format(action, status: :ok)
render "#{action}.#{request_format}", status: status
end
def store_location
session[:return_to] = request.fullpath
end
def redirect_back(default = root_path)
redirect_to(session[:return_to] || default)
session[:return_to] = nil
end
private
def find_user_in_session
Account.find_by_id(session[:account_id])
end
def find_remembered_user
cookies[:auth_token] ? Account.find_by_remember_token(cookies[:auth_token]) : nil
end
end
| 1 | 6,796 | This file has the executable bit set. | blackducksoftware-ohloh-ui | rb |
@@ -177,6 +177,7 @@ func New(path string, baseKey []byte, o *Options, logger logging.Logger) (db *DB
if db.capacity == 0 {
db.capacity = defaultCapacity
}
+ db.logger.Info("setting db capacity to :", db.capacity)
if maxParallelUpdateGC > 0 {
db.updateGCSem = make(chan struct{}, maxParallelUpdateGC)
} | 1 | // Copyright 2018 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package localstore
import (
"encoding/binary"
"errors"
"os"
"runtime/pprof"
"sync"
"time"
"github.com/ethersphere/bee/pkg/logging"
"github.com/ethersphere/bee/pkg/shed"
"github.com/ethersphere/bee/pkg/storage"
"github.com/ethersphere/bee/pkg/swarm"
"github.com/ethersphere/bee/pkg/tags"
"github.com/prometheus/client_golang/prometheus"
"github.com/syndtr/goleveldb/leveldb"
)
var _ storage.Storer = &DB{}
var (
// ErrInvalidMode is retuned when an unknown Mode
// is provided to the function.
ErrInvalidMode = errors.New("invalid mode")
)
var (
// Default value for Capacity DB option.
defaultCapacity uint64 = 5000000
// Limit the number of goroutines created by Getters
// that call updateGC function. Value 0 sets no limit.
maxParallelUpdateGC = 1000
)
// DB is the local store implementation and holds
// database related objects.
type DB struct {
shed *shed.DB
tags *tags.Tags
// schema name of loaded data
schemaName shed.StringField
// retrieval indexes
retrievalDataIndex shed.Index
retrievalAccessIndex shed.Index
// push syncing index
pushIndex shed.Index
// push syncing subscriptions triggers
pushTriggers []chan struct{}
pushTriggersMu sync.RWMutex
// pull syncing index
pullIndex shed.Index
// pull syncing subscriptions triggers per bin
pullTriggers map[uint8][]chan struct{}
pullTriggersMu sync.RWMutex
// binIDs stores the latest chunk serial ID for every
// proximity order bin
binIDs shed.Uint64Vector
// garbage collection index
gcIndex shed.Index
// garbage collection exclude index for pinned contents
gcExcludeIndex shed.Index
// pin files Index
pinIndex shed.Index
// field that stores number of intems in gc index
gcSize shed.Uint64Field
// garbage collection is triggered when gcSize exceeds
// the capacity value
capacity uint64
// triggers garbage collection event loop
collectGarbageTrigger chan struct{}
// a buffered channel acting as a semaphore
// to limit the maximal number of goroutines
// created by Getters to call updateGC function
updateGCSem chan struct{}
// a wait group to ensure all updateGC goroutines
// are done before closing the database
updateGCWG sync.WaitGroup
// baseKey is the overlay address
baseKey []byte
batchMu sync.Mutex
// this channel is closed when close function is called
// to terminate other goroutines
close chan struct{}
// protect Close method from exiting before
// garbage collection and gc size write workers
// are done
collectGarbageWorkerDone chan struct{}
putToGCCheck func([]byte) bool
// wait for all subscriptions to finish before closing
// underlaying BadgerDB to prevent possible panics from
// iterators
subscritionsWG sync.WaitGroup
metrics metrics
logger logging.Logger
}
// Options struct holds optional parameters for configuring DB.
type Options struct {
// Capacity is a limit that triggers garbage collection when
// number of items in gcIndex equals or exceeds it.
Capacity uint64
// MetricsPrefix defines a prefix for metrics names.
MetricsPrefix string
Tags *tags.Tags
// PutSetCheckFunc is a function called after a Put of a chunk
// to verify whether that chunk needs to be Set and added to
// garbage collection index too
PutToGCCheck func([]byte) bool
}
// New returns a new DB. All fields and indexes are initialized
// and possible conflicts with schema from existing database is checked.
// One goroutine for writing batches is created.
func New(path string, baseKey []byte, o *Options, logger logging.Logger) (db *DB, err error) {
if o == nil {
// default options
o = &Options{
Capacity: defaultCapacity,
}
}
if o.PutToGCCheck == nil {
o.PutToGCCheck = func(_ []byte) bool { return false }
}
db = &DB{
capacity: o.Capacity,
baseKey: baseKey,
tags: o.Tags,
// channel collectGarbageTrigger
// needs to be buffered with the size of 1
// to signal another event if it
// is triggered during already running function
collectGarbageTrigger: make(chan struct{}, 1),
close: make(chan struct{}),
collectGarbageWorkerDone: make(chan struct{}),
putToGCCheck: o.PutToGCCheck,
metrics: newMetrics(),
logger: logger,
}
if db.capacity == 0 {
db.capacity = defaultCapacity
}
if maxParallelUpdateGC > 0 {
db.updateGCSem = make(chan struct{}, maxParallelUpdateGC)
}
db.shed, err = shed.NewDB(path)
if err != nil {
return nil, err
}
// Identify current storage schema by arbitrary name.
db.schemaName, err = db.shed.NewStringField("schema-name")
if err != nil {
return nil, err
}
schemaName, err := db.schemaName.Get()
if err != nil && !errors.Is(err, leveldb.ErrNotFound) {
return nil, err
}
if schemaName == "" {
// initial new localstore run
err := db.schemaName.Put(DbSchemaCurrent)
if err != nil {
return nil, err
}
} else {
// execute possible migrations
err = db.migrate(schemaName)
if err != nil {
return nil, err
}
}
// Persist gc size.
db.gcSize, err = db.shed.NewUint64Field("gc-size")
if err != nil {
return nil, err
}
// Index storing actual chunk address, data and bin id.
db.retrievalDataIndex, err = db.shed.NewIndex("Address->StoreTimestamp|BinID|Data", shed.IndexFuncs{
EncodeKey: func(fields shed.Item) (key []byte, err error) {
return fields.Address, nil
},
DecodeKey: func(key []byte) (e shed.Item, err error) {
e.Address = key
return e, nil
},
EncodeValue: func(fields shed.Item) (value []byte, err error) {
b := make([]byte, 16)
binary.BigEndian.PutUint64(b[:8], fields.BinID)
binary.BigEndian.PutUint64(b[8:16], uint64(fields.StoreTimestamp))
value = append(b, fields.Data...)
return value, nil
},
DecodeValue: func(keyItem shed.Item, value []byte) (e shed.Item, err error) {
e.StoreTimestamp = int64(binary.BigEndian.Uint64(value[8:16]))
e.BinID = binary.BigEndian.Uint64(value[:8])
e.Data = value[16:]
return e, nil
},
})
if err != nil {
return nil, err
}
// Index storing access timestamp for a particular address.
// It is needed in order to update gc index keys for iteration order.
db.retrievalAccessIndex, err = db.shed.NewIndex("Address->AccessTimestamp", shed.IndexFuncs{
EncodeKey: func(fields shed.Item) (key []byte, err error) {
return fields.Address, nil
},
DecodeKey: func(key []byte) (e shed.Item, err error) {
e.Address = key
return e, nil
},
EncodeValue: func(fields shed.Item) (value []byte, err error) {
b := make([]byte, 8)
binary.BigEndian.PutUint64(b, uint64(fields.AccessTimestamp))
return b, nil
},
DecodeValue: func(keyItem shed.Item, value []byte) (e shed.Item, err error) {
e.AccessTimestamp = int64(binary.BigEndian.Uint64(value))
return e, nil
},
})
if err != nil {
return nil, err
}
// pull index allows history and live syncing per po bin
db.pullIndex, err = db.shed.NewIndex("PO|BinID->Hash|Tag", shed.IndexFuncs{
EncodeKey: func(fields shed.Item) (key []byte, err error) {
key = make([]byte, 41)
key[0] = db.po(swarm.NewAddress(fields.Address))
binary.BigEndian.PutUint64(key[1:9], fields.BinID)
return key, nil
},
DecodeKey: func(key []byte) (e shed.Item, err error) {
e.BinID = binary.BigEndian.Uint64(key[1:9])
return e, nil
},
EncodeValue: func(fields shed.Item) (value []byte, err error) {
value = make([]byte, 36) // 32 bytes address, 4 bytes tag
copy(value, fields.Address)
if fields.Tag != 0 {
binary.BigEndian.PutUint32(value[32:], fields.Tag)
}
return value, nil
},
DecodeValue: func(keyItem shed.Item, value []byte) (e shed.Item, err error) {
e.Address = value[:32]
if len(value) > 32 {
e.Tag = binary.BigEndian.Uint32(value[32:])
}
return e, nil
},
})
if err != nil {
return nil, err
}
// create a vector for bin IDs
db.binIDs, err = db.shed.NewUint64Vector("bin-ids")
if err != nil {
return nil, err
}
// create a pull syncing triggers used by SubscribePull function
db.pullTriggers = make(map[uint8][]chan struct{})
// push index contains as yet unsynced chunks
db.pushIndex, err = db.shed.NewIndex("StoreTimestamp|Hash->Tags", shed.IndexFuncs{
EncodeKey: func(fields shed.Item) (key []byte, err error) {
key = make([]byte, 40)
binary.BigEndian.PutUint64(key[:8], uint64(fields.StoreTimestamp))
copy(key[8:], fields.Address)
return key, nil
},
DecodeKey: func(key []byte) (e shed.Item, err error) {
e.Address = key[8:]
e.StoreTimestamp = int64(binary.BigEndian.Uint64(key[:8]))
return e, nil
},
EncodeValue: func(fields shed.Item) (value []byte, err error) {
tag := make([]byte, 4)
binary.BigEndian.PutUint32(tag, fields.Tag)
return tag, nil
},
DecodeValue: func(keyItem shed.Item, value []byte) (e shed.Item, err error) {
if len(value) == 4 { // only values with tag should be decoded
e.Tag = binary.BigEndian.Uint32(value)
}
return e, nil
},
})
if err != nil {
return nil, err
}
// create a push syncing triggers used by SubscribePush function
db.pushTriggers = make([]chan struct{}, 0)
// gc index for removable chunk ordered by ascending last access time
db.gcIndex, err = db.shed.NewIndex("AccessTimestamp|BinID|Hash->nil", shed.IndexFuncs{
EncodeKey: func(fields shed.Item) (key []byte, err error) {
b := make([]byte, 16, 16+len(fields.Address))
binary.BigEndian.PutUint64(b[:8], uint64(fields.AccessTimestamp))
binary.BigEndian.PutUint64(b[8:16], fields.BinID)
key = append(b, fields.Address...)
return key, nil
},
DecodeKey: func(key []byte) (e shed.Item, err error) {
e.AccessTimestamp = int64(binary.BigEndian.Uint64(key[:8]))
e.BinID = binary.BigEndian.Uint64(key[8:16])
e.Address = key[16:]
return e, nil
},
EncodeValue: func(fields shed.Item) (value []byte, err error) {
return nil, nil
},
DecodeValue: func(keyItem shed.Item, value []byte) (e shed.Item, err error) {
return e, nil
},
})
if err != nil {
return nil, err
}
// Create a index structure for storing pinned chunks and their pin counts
db.pinIndex, err = db.shed.NewIndex("Hash->PinCounter", shed.IndexFuncs{
EncodeKey: func(fields shed.Item) (key []byte, err error) {
return fields.Address, nil
},
DecodeKey: func(key []byte) (e shed.Item, err error) {
e.Address = key
return e, nil
},
EncodeValue: func(fields shed.Item) (value []byte, err error) {
b := make([]byte, 8)
binary.BigEndian.PutUint64(b[:8], fields.PinCounter)
return b, nil
},
DecodeValue: func(keyItem shed.Item, value []byte) (e shed.Item, err error) {
e.PinCounter = binary.BigEndian.Uint64(value[:8])
return e, nil
},
})
if err != nil {
return nil, err
}
// Create a index structure for excluding pinned chunks from gcIndex
db.gcExcludeIndex, err = db.shed.NewIndex("Hash->nil", shed.IndexFuncs{
EncodeKey: func(fields shed.Item) (key []byte, err error) {
return fields.Address, nil
},
DecodeKey: func(key []byte) (e shed.Item, err error) {
e.Address = key
return e, nil
},
EncodeValue: func(fields shed.Item) (value []byte, err error) {
return nil, nil
},
DecodeValue: func(keyItem shed.Item, value []byte) (e shed.Item, err error) {
return e, nil
},
})
if err != nil {
return nil, err
}
// start garbage collection worker
go db.collectGarbageWorker()
return db, nil
}
// Close closes the underlying database.
func (db *DB) Close() (err error) {
close(db.close)
// wait for all handlers to finish
done := make(chan struct{})
go func() {
db.updateGCWG.Wait()
db.subscritionsWG.Wait()
// wait for gc worker to
// return before closing the shed
<-db.collectGarbageWorkerDone
close(done)
}()
select {
case <-done:
case <-time.After(5 * time.Second):
db.logger.Errorf("localstore closed with still active goroutines")
// Print a full goroutine dump to debug blocking.
// TODO: use a logger to write a goroutine profile
prof := pprof.Lookup("goroutine")
err = prof.WriteTo(os.Stdout, 2)
if err != nil {
return err
}
}
return db.shed.Close()
}
// po computes the proximity order between the address
// and database base key.
func (db *DB) po(addr swarm.Address) (bin uint8) {
return uint8(swarm.Proximity(db.baseKey, addr.Bytes()))
}
// DebugIndices returns the index sizes for all indexes in localstore
// the returned map keys are the index name, values are the number of elements in the index
func (db *DB) DebugIndices() (indexInfo map[string]int, err error) {
indexInfo = make(map[string]int)
for k, v := range map[string]shed.Index{
"retrievalDataIndex": db.retrievalDataIndex,
"retrievalAccessIndex": db.retrievalAccessIndex,
"pushIndex": db.pushIndex,
"pullIndex": db.pullIndex,
"gcIndex": db.gcIndex,
"gcExcludeIndex": db.gcExcludeIndex,
"pinIndex": db.pinIndex,
} {
indexSize, err := v.Count()
if err != nil {
return indexInfo, err
}
indexInfo[k] = indexSize
}
val, err := db.gcSize.Get()
if err != nil {
return indexInfo, err
}
indexInfo["gcSize"] = int(val)
return indexInfo, err
}
// chunkToItem creates new Item with data provided by the Chunk.
func chunkToItem(ch swarm.Chunk) shed.Item {
return shed.Item{
Address: ch.Address().Bytes(),
Data: ch.Data(),
Tag: ch.TagID(),
}
}
// addressToItem creates new Item with a provided address.
func addressToItem(addr swarm.Address) shed.Item {
return shed.Item{
Address: addr.Bytes(),
}
}
// addressesToItems constructs a slice of Items with only
// addresses set on them.
func addressesToItems(addrs ...swarm.Address) []shed.Item {
items := make([]shed.Item, len(addrs))
for i, addr := range addrs {
items[i] = shed.Item{
Address: addr.Bytes(),
}
}
return items
}
// now is a helper function that returns a current unix timestamp
// in UTC timezone.
// It is set in the init function for usage in production, and
// optionally overridden in tests for data validation.
var now func() int64
func init() {
// set the now function
now = func() (t int64) {
return time.Now().UTC().UnixNano()
}
}
// totalTimeMetric logs a message about time between provided start time
// and the time when the function is called and sends a resetting timer metric
// with provided name appended with ".total-time".
func totalTimeMetric(metric prometheus.Counter, start time.Time) {
totalTime := time.Since(start)
metric.Add(float64(totalTime))
}
| 1 | 10,303 | Improve the formatting of the message `.Infof("setting db capacity to: %v", db.capacity)` There is a space before `:` and this way it is easier to see the formatting. | ethersphere-bee | go |
@@ -434,12 +434,13 @@ RTPSParticipantImpl* RTPSDomainImpl::find_local_participant(
RTPSReader* RTPSDomainImpl::find_local_reader(
const GUID_t& reader_guid)
{
- std::lock_guard<std::mutex> guard(RTPSDomain::m_mutex);
+ std::unique_lock<std::mutex> lock(RTPSDomain::m_mutex);
for (const RTPSDomain::t_p_RTPSParticipant& participant : RTPSDomain::m_RTPSParticipants)
{
if (participant.second->getGuid().guidPrefix == reader_guid.guidPrefix)
{
// Participant found, forward the query
+ lock.unlock();
return participant.second->find_local_reader(reader_guid);
}
} | 1 | // Copyright 2016 Proyectos y Sistemas de Mantenimiento SL (eProsima).
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
/*
* @file RTPSDomain.cpp
*/
#include <fastdds/rtps/RTPSDomain.h>
#include <chrono>
#include <thread>
#include <cstdlib>
#include <regex>
#include <fastdds/dds/log/Log.hpp>
#include <fastdds/rtps/history/WriterHistory.h>
#include <fastdds/rtps/participant/RTPSParticipant.h>
#include <fastdds/rtps/reader/RTPSReader.h>
#include <fastdds/rtps/writer/RTPSWriter.h>
#include <rtps/transport/UDPv4Transport.h>
#include <rtps/transport/UDPv6Transport.h>
#include <rtps/transport/test_UDPv4Transport.h>
#include <fastrtps/utils/IPFinder.h>
#include <fastrtps/utils/IPLocator.h>
#include <fastrtps/utils/System.h>
#include <fastrtps/utils/md5.h>
#include <fastrtps/xmlparser/XMLProfileManager.h>
#include <rtps/RTPSDomainImpl.hpp>
#include <rtps/participant/RTPSParticipantImpl.h>
#include <rtps/common/GuidUtils.hpp>
#include <utils/Host.hpp>
namespace eprosima {
namespace fastrtps {
namespace rtps {
static void guid_prefix_create(
uint32_t ID,
GuidPrefix_t& guidP)
{
eprosima::fastdds::rtps::GuidUtils::instance().guid_prefix_create(ID, guidP);
}
std::mutex RTPSDomain::m_mutex;
std::atomic<uint32_t> RTPSDomain::m_maxRTPSParticipantID(1);
std::vector<RTPSDomain::t_p_RTPSParticipant> RTPSDomain::m_RTPSParticipants;
std::set<uint32_t> RTPSDomain::m_RTPSParticipantIDs;
void RTPSDomain::stopAll()
{
std::unique_lock<std::mutex> lock(m_mutex);
logInfo(RTPS_PARTICIPANT, "DELETING ALL ENDPOINTS IN THIS DOMAIN");
while (m_RTPSParticipants.size() > 0)
{
RTPSDomain::t_p_RTPSParticipant participant = m_RTPSParticipants.back();
m_RTPSParticipantIDs.erase(m_RTPSParticipantIDs.find(participant.second->getRTPSParticipantID()));
m_RTPSParticipants.pop_back();
lock.unlock();
RTPSDomain::removeRTPSParticipant_nts(participant);
lock.lock();
}
logInfo(RTPS_PARTICIPANT, "RTPSParticipants deleted correctly ");
std::this_thread::sleep_for(std::chrono::milliseconds(100));
}
RTPSParticipant* RTPSDomain::createParticipant(
uint32_t domain_id,
const RTPSParticipantAttributes& attrs,
RTPSParticipantListener* listen)
{
return createParticipant(domain_id, true, attrs, listen);
}
RTPSParticipant* RTPSDomain::createParticipant(
uint32_t domain_id,
bool enabled,
const RTPSParticipantAttributes& attrs,
RTPSParticipantListener* listen)
{
logInfo(RTPS_PARTICIPANT, "");
RTPSParticipantAttributes PParam = attrs;
if (PParam.builtin.discovery_config.leaseDuration < c_TimeInfinite &&
PParam.builtin.discovery_config.leaseDuration <=
PParam.builtin.discovery_config.leaseDuration_announcementperiod)
{
logError(RTPS_PARTICIPANT,
"RTPSParticipant Attributes: LeaseDuration should be >= leaseDuration announcement period");
return nullptr;
}
uint32_t ID;
{
std::lock_guard<std::mutex> guard(m_mutex);
if (PParam.participantID < 0)
{
ID = getNewId();
while (m_RTPSParticipantIDs.insert(ID).second == false)
{
ID = getNewId();
}
}
else
{
ID = PParam.participantID;
if (m_RTPSParticipantIDs.insert(ID).second == false)
{
logError(RTPS_PARTICIPANT, "RTPSParticipant with the same ID already exists");
return nullptr;
}
}
}
if (!PParam.defaultUnicastLocatorList.isValid())
{
logError(RTPS_PARTICIPANT, "Default Unicast Locator List contains invalid Locator");
return nullptr;
}
if (!PParam.defaultMulticastLocatorList.isValid())
{
logError(RTPS_PARTICIPANT, "Default Multicast Locator List contains invalid Locator");
return nullptr;
}
PParam.participantID = ID;
LocatorList_t loc;
IPFinder::getIP4Address(&loc);
// Generate a new GuidPrefix_t
GuidPrefix_t guidP;
guid_prefix_create(ID, guidP);
RTPSParticipant* p = new RTPSParticipant(nullptr);
RTPSParticipantImpl* pimpl = nullptr;
// If we force the participant to have a specific prefix we must define a different persistence GuidPrefix_t that
// would ensure builtin endpoints are able to differentiate between a communication loss and a participant recovery
if (PParam.prefix != c_GuidPrefix_Unknown)
{
pimpl = new RTPSParticipantImpl(domain_id, PParam, PParam.prefix, guidP, p, listen);
}
else
{
pimpl = new RTPSParticipantImpl(domain_id, PParam, guidP, p, listen);
}
// Above constructors create the sender resources. If a given listening port cannot be allocated an iterative
// mechanism will allocate another by default. Change the default listening port is unacceptable for server
// discovery.
if ((PParam.builtin.discovery_config.discoveryProtocol == DiscoveryProtocol_t::SERVER
|| PParam.builtin.discovery_config.discoveryProtocol == DiscoveryProtocol_t::BACKUP)
&& pimpl->did_mutation_took_place_on_meta(
PParam.builtin.metatrafficMulticastLocatorList,
PParam.builtin.metatrafficUnicastLocatorList))
{
// we do not log an error because the library may use participant creation as a trial for server existence
logInfo(RTPS_PARTICIPANT, "Server wasn't able to allocate the specified listening port");
delete pimpl;
return nullptr;
}
// Check there is at least one transport registered.
if (!pimpl->networkFactoryHasRegisteredTransports())
{
logError(RTPS_PARTICIPANT, "Cannot create participant, because there is any transport");
delete pimpl;
return nullptr;
}
#if HAVE_SECURITY
// Check security was correctly initialized
if (!pimpl->is_security_initialized())
{
logError(RTPS_PARTICIPANT, "Cannot create participant due to security initialization error");
delete pimpl;
return nullptr;
}
#endif // if HAVE_SECURITY
{
std::lock_guard<std::mutex> guard(m_mutex);
m_RTPSParticipants.push_back(t_p_RTPSParticipant(p, pimpl));
}
if (enabled)
{
// Start protocols
pimpl->enable();
}
return p;
}
bool RTPSDomain::removeRTPSParticipant(
RTPSParticipant* p)
{
if (p != nullptr)
{
p->mp_impl->disable();
std::unique_lock<std::mutex> lock(m_mutex);
for (auto it = m_RTPSParticipants.begin(); it != m_RTPSParticipants.end(); ++it)
{
if (it->second->getGuid().guidPrefix == p->getGuid().guidPrefix)
{
RTPSDomain::t_p_RTPSParticipant participant = *it;
m_RTPSParticipants.erase(it);
m_RTPSParticipantIDs.erase(m_RTPSParticipantIDs.find(participant.second->getRTPSParticipantID()));
lock.unlock();
removeRTPSParticipant_nts(participant);
return true;
}
}
}
logError(RTPS_PARTICIPANT, "RTPSParticipant not valid or not recognized");
return false;
}
void RTPSDomain::removeRTPSParticipant_nts(
RTPSDomain::t_p_RTPSParticipant& participant)
{
delete(participant.second);
}
RTPSWriter* RTPSDomain::createRTPSWriter(
RTPSParticipant* p,
WriterAttributes& watt,
WriterHistory* hist,
WriterListener* listen)
{
RTPSParticipantImpl* impl = RTPSDomainImpl::find_local_participant(p->getGuid());
if (impl)
{
RTPSWriter* ret_val = nullptr;
if (impl->createWriter(&ret_val, watt, hist, listen))
{
return ret_val;
}
}
return nullptr;
}
RTPSWriter* RTPSDomain::createRTPSWriter(
RTPSParticipant* p,
WriterAttributes& watt,
const std::shared_ptr<IPayloadPool>& payload_pool,
WriterHistory* hist,
WriterListener* listen)
{
RTPSParticipantImpl* impl = RTPSDomainImpl::find_local_participant(p->getGuid());
if (impl)
{
RTPSWriter* ret_val = nullptr;
if (impl->createWriter(&ret_val, watt, payload_pool, hist, listen))
{
return ret_val;
}
}
return nullptr;
}
bool RTPSDomain::removeRTPSWriter(
RTPSWriter* writer)
{
if (writer != nullptr)
{
std::unique_lock<std::mutex> lock(m_mutex);
for (auto it = m_RTPSParticipants.begin(); it != m_RTPSParticipants.end(); ++it)
{
if (it->first->getGuid().guidPrefix == writer->getGuid().guidPrefix)
{
t_p_RTPSParticipant participant = *it;
lock.unlock();
return participant.second->deleteUserEndpoint((Endpoint*)writer);
}
}
}
return false;
}
RTPSReader* RTPSDomain::createRTPSReader(
RTPSParticipant* p,
ReaderAttributes& ratt,
ReaderHistory* rhist,
ReaderListener* rlisten)
{
RTPSParticipantImpl* impl = RTPSDomainImpl::find_local_participant(p->getGuid());
if (impl)
{
RTPSReader* reader;
if (impl->createReader(&reader, ratt, rhist, rlisten))
{
return reader;
}
}
return nullptr;
}
RTPSReader* RTPSDomain::createRTPSReader(
RTPSParticipant* p,
ReaderAttributes& ratt,
const std::shared_ptr<IPayloadPool>& payload_pool,
ReaderHistory* rhist,
ReaderListener* rlisten)
{
RTPSParticipantImpl* impl = RTPSDomainImpl::find_local_participant(p->getGuid());
if (impl)
{
RTPSReader* reader;
if (impl->createReader(&reader, ratt, payload_pool, rhist, rlisten))
{
return reader;
}
}
return nullptr;
}
bool RTPSDomain::removeRTPSReader(
RTPSReader* reader)
{
if (reader != nullptr)
{
std::unique_lock<std::mutex> lock(m_mutex);
for (auto it = m_RTPSParticipants.begin(); it != m_RTPSParticipants.end(); ++it)
{
if (it->first->getGuid().guidPrefix == reader->getGuid().guidPrefix)
{
t_p_RTPSParticipant participant = *it;
lock.unlock();
return participant.second->deleteUserEndpoint((Endpoint*)reader);
}
}
}
return false;
}
RTPSParticipant* RTPSDomain::clientServerEnvironmentCreationOverride(
uint32_t domain_id,
bool enabled,
const RTPSParticipantAttributes& att,
RTPSParticipantListener* listen /*= nullptr*/)
{
// Check the specified discovery protocol: if other than simple it has priority over ros environment variable
if (att.builtin.discovery_config.discoveryProtocol != DiscoveryProtocol_t::SIMPLE)
{
logInfo(DOMAIN, "Detected non simple discovery protocol attributes."
<< " Ignoring auto default client-server setup.");
return nullptr;
}
// we only make the attributes copy when we are sure is worth
RTPSParticipantAttributes client_att(att);
// Retrieve the info from the environment variable
if (!load_environment_server_info(client_att.builtin.discovery_config.m_DiscoveryServers))
{
// it's not an error, the environment variable may not be set. Any issue with environment
// variable syntax is logError already
return nullptr;
}
logInfo(DOMAIN, "Detected auto client-server environment variable."
"Trying to create client with the default server setup.");
client_att.builtin.discovery_config.discoveryProtocol = DiscoveryProtocol_t::CLIENT;
// RemoteServerAttributes already fill in above
RTPSParticipant* part = RTPSDomain::createParticipant(domain_id, enabled, client_att, listen);
if (nullptr != part)
{
// client successfully created
logInfo(DOMAIN, "Auto default server-client setup. Default client created.");
return part;
}
// unable to create auto server-client default participants
logError(DOMAIN, "Auto default server-client setup. Unable to create the client.");
return nullptr;
}
void RTPSDomainImpl::create_participant_guid(
int32_t& participant_id,
GUID_t& guid)
{
if (participant_id < 0)
{
std::lock_guard<std::mutex> guard(RTPSDomain::m_mutex);
do
{
participant_id = RTPSDomain::getNewId();
} while (RTPSDomain::m_RTPSParticipantIDs.find(participant_id) != RTPSDomain::m_RTPSParticipantIDs.end());
}
guid_prefix_create(participant_id, guid.guidPrefix);
guid.entityId = c_EntityId_RTPSParticipant;
}
RTPSParticipantImpl* RTPSDomainImpl::find_local_participant(
const GUID_t& guid)
{
std::lock_guard<std::mutex> guard(RTPSDomain::m_mutex);
for (const RTPSDomain::t_p_RTPSParticipant& participant : RTPSDomain::m_RTPSParticipants)
{
if (participant.second->getGuid().guidPrefix == guid.guidPrefix)
{
// Participant found, forward the query
return participant.second;
}
}
return nullptr;
}
RTPSReader* RTPSDomainImpl::find_local_reader(
const GUID_t& reader_guid)
{
std::lock_guard<std::mutex> guard(RTPSDomain::m_mutex);
for (const RTPSDomain::t_p_RTPSParticipant& participant : RTPSDomain::m_RTPSParticipants)
{
if (participant.second->getGuid().guidPrefix == reader_guid.guidPrefix)
{
// Participant found, forward the query
return participant.second->find_local_reader(reader_guid);
}
}
return nullptr;
}
RTPSWriter* RTPSDomainImpl::find_local_writer(
const GUID_t& writer_guid)
{
std::lock_guard<std::mutex> guard(RTPSDomain::m_mutex);
for (const RTPSDomain::t_p_RTPSParticipant& participant : RTPSDomain::m_RTPSParticipants)
{
if (participant.second->getGuid().guidPrefix == writer_guid.guidPrefix)
{
// Participant found, forward the query
return participant.second->find_local_writer(writer_guid);
}
}
return nullptr;
}
/**
* Check whether intraprocess delivery should be used between two GUIDs.
*
* @param local_guid GUID of the local endpoint performing the query.
* @param matched_guid GUID being queried about.
*
* @returns true when intraprocess delivery is enabled, false otherwise.
*/
bool RTPSDomainImpl::should_intraprocess_between(
const GUID_t& local_guid,
const GUID_t& matched_guid)
{
if (!local_guid.is_on_same_process_as(matched_guid))
{
// Not on the same process, should not use intraprocess mechanism.
return false;
}
if (local_guid.entityId == c_EntityId_SPDPWriter || local_guid.entityId == c_EntityId_SPDPReader)
{
// Always disabled for PDP, to avoid inter-domain communications.
return false;
}
switch (xmlparser::XMLProfileManager::library_settings().intraprocess_delivery)
{
case IntraprocessDeliveryType::INTRAPROCESS_FULL:
return true;
case IntraprocessDeliveryType::INTRAPROCESS_USER_DATA_ONLY:
return !matched_guid.is_builtin();
case IntraprocessDeliveryType::INTRAPROCESS_OFF:
default:
break;
}
return false;
}
} // namespace rtps
} // namespace fastrtps
} // namespace eprosima
| 1 | 21,859 | This lock cannot be released here, as it is protecting m_RTPSParticipants and the participant reference. | eProsima-Fast-DDS | cpp |
@@ -967,6 +967,8 @@ Query.prototype.find = function (conditions, callback) {
this.merge(conditions);
}
+ prepareDiscriminatorCriteria(this);
+
try {
this.cast(this.model);
this._castError = null; | 1 | /*!
* Module dependencies.
*/
var mquery = require('mquery');
var util = require('util');
var events = require('events');
var utils = require('./utils');
var Promise = require('./promise');
var helpers = require('./queryhelpers');
var Types = require('./schema/index');
var Document = require('./document');
var QueryStream = require('./querystream');
/**
* Query constructor used for building queries.
*
* ####Example:
*
* var query = new Query();
* query.setOptions({ lean : true });
* query.collection(model.collection);
* query.where('age').gte(21).exec(callback);
*
* @param {Object} [options]
* @param {Object} [model]
* @param {Object} [conditions]
* @param {Object} [collection] Mongoose collection
* @api public
*/
function Query(conditions, options, model, collection) {
// this stuff is for dealing with custom queries created by #toConstructor
if (!this._mongooseOptions) {
this._mongooseOptions = options || {};
} else {
// this is the case where we have a CustomQuery, we need to check if we got
// options passed in, and if we did, merge them in
if (options) {
var keys = Object.keys(options);
for (var i=0; i < keys.length; i++) {
var k = keys[i];
this._mongooseOptions[k] = options[k];
}
}
}
if (collection) {
this.mongooseCollection = collection;
}
if (model) {
this.model = model;
}
// this is needed because map reduce returns a model that can be queried, but
// all of the queries on said model should be lean
if (this.model && this.model._mapreduce) {
this.lean();
}
// inherit mquery
mquery.call(this, this.mongooseCollection, options);
if (conditions) {
this.find(conditions);
}
}
/*!
* inherit mquery
*/
Query.prototype = new mquery;
Query.prototype.constructor = Query;
Query.base = mquery.prototype;
/**
* Flag to opt out of using `$geoWithin`.
*
* mongoose.Query.use$geoWithin = false;
*
* MongoDB 2.4 deprecated the use of `$within`, replacing it with `$geoWithin`. Mongoose uses `$geoWithin` by default (which is 100% backward compatible with $within). If you are running an older version of MongoDB, set this flag to `false` so your `within()` queries continue to work.
*
* @see http://docs.mongodb.org/manual/reference/operator/geoWithin/
* @default true
* @property use$geoWithin
* @memberOf Query
* @receiver Query
* @api public
*/
Query.use$geoWithin = mquery.use$geoWithin;
/**
* Converts this query to a customized, reusable query constructor with all arguments and options retained.
*
* ####Example
*
* // Create a query for adventure movies and read from the primary
* // node in the replica-set unless it is down, in which case we'll
* // read from a secondary node.
* var query = Movie.find({ tags: 'adventure' }).read('primaryPreferred');
*
* // create a custom Query constructor based off these settings
* var Adventure = query.toConstructor();
*
* // Adventure is now a subclass of mongoose.Query and works the same way but with the
* // default query parameters and options set.
* Adventure().exec(callback)
*
* // further narrow down our query results while still using the previous settings
* Adventure().where({ name: /^Life/ }).exec(callback);
*
* // since Adventure is a stand-alone constructor we can also add our own
* // helper methods and getters without impacting global queries
* Adventure.prototype.startsWith = function (prefix) {
* this.where({ name: new RegExp('^' + prefix) })
* return this;
* }
* Object.defineProperty(Adventure.prototype, 'highlyRated', {
* get: function () {
* this.where({ rating: { $gt: 4.5 }});
* return this;
* }
* })
* Adventure().highlyRated.startsWith('Life').exec(callback)
*
* New in 3.7.3
*
* @return {Query} subclass-of-Query
* @api public
*/
Query.prototype.toConstructor = function toConstructor () {
function CustomQuery (criteria, options) {
if (!(this instanceof CustomQuery))
return new CustomQuery(criteria, options);
Query.call(this, criteria, options || null);
}
util.inherits(CustomQuery, Query);
// set inherited defaults
var p = CustomQuery.prototype;
p.options = {};
p.setOptions(this.options);
p.op = this.op;
p._conditions = utils.clone(this._conditions);
p._fields = utils.clone(this._fields);
p._update = utils.clone(this._update);
p._path = this._path;
p._distict = this._distinct;
p._collection = this._collection;
p.model = this.model;
p.mongooseCollection = this.mongooseCollection;
p._mongooseOptions = this._mongooseOptions;
return CustomQuery;
}
/**
* Specifies a javascript function or expression to pass to MongoDBs query system.
*
* ####Example
*
* query.$where('this.comments.length > 10 || this.name.length > 5')
*
* // or
*
* query.$where(function () {
* return this.comments.length > 10 || this.name.length > 5;
* })
*
* ####NOTE:
*
* Only use `$where` when you have a condition that cannot be met using other MongoDB operators like `$lt`.
* **Be sure to read about all of [its caveats](http://docs.mongodb.org/manual/reference/operator/where/) before using.**
*
* @see $where http://docs.mongodb.org/manual/reference/operator/where/
* @method $where
* @param {String|Function} js javascript string or function
* @return {Query} this
* @memberOf Query
* @method $where
* @api public
*/
/**
* Specifies a `path` for use with chaining.
*
* ####Example
*
* // instead of writing:
* User.find({age: {$gte: 21, $lte: 65}}, callback);
*
* // we can instead write:
* User.where('age').gte(21).lte(65);
*
* // passing query conditions is permitted
* User.find().where({ name: 'vonderful' })
*
* // chaining
* User
* .where('age').gte(21).lte(65)
* .where('name', /^vonderful/i)
* .where('friends').slice(10)
* .exec(callback)
*
* @method where
* @memberOf Query
* @param {String|Object} [path]
* @param {any} [val]
* @return {Query} this
* @api public
*/
/**
* Specifies the complementary comparison value for paths specified with `where()`
*
* ####Example
*
* User.where('age').equals(49);
*
* // is the same as
*
* User.where('age', 49);
*
* @method equals
* @memberOf Query
* @param {Object} val
* @return {Query} this
* @api public
*/
/**
* Specifies arguments for an `$or` condition.
*
* ####Example
*
* query.or([{ color: 'red' }, { status: 'emergency' }])
*
* @see $or http://docs.mongodb.org/manual/reference/operator/or/
* @method or
* @memberOf Query
* @param {Array} array array of conditions
* @return {Query} this
* @api public
*/
/**
* Specifies arguments for a `$nor` condition.
*
* ####Example
*
* query.nor([{ color: 'green' }, { status: 'ok' }])
*
* @see $nor http://docs.mongodb.org/manual/reference/operator/nor/
* @method nor
* @memberOf Query
* @param {Array} array array of conditions
* @return {Query} this
* @api public
*/
/**
* Specifies arguments for a `$and` condition.
*
* ####Example
*
* query.and([{ color: 'green' }, { status: 'ok' }])
*
* @method and
* @memberOf Query
* @see $and http://docs.mongodb.org/manual/reference/operator/and/
* @param {Array} array array of conditions
* @return {Query} this
* @api public
*/
/**
* Specifies a $gt query condition.
*
* When called with one argument, the most recent path passed to `where()` is used.
*
* ####Example
*
* Thing.find().where('age').gt(21)
*
* // or
* Thing.find().gt('age', 21)
*
* @method gt
* @memberOf Query
* @param {String} [path]
* @param {Number} val
* @see $gt http://docs.mongodb.org/manual/reference/operator/gt/
* @api public
*/
/**
* Specifies a $gte query condition.
*
* When called with one argument, the most recent path passed to `where()` is used.
*
* @method gte
* @memberOf Query
* @param {String} [path]
* @param {Number} val
* @see $gte http://docs.mongodb.org/manual/reference/operator/gte/
* @api public
*/
/**
* Specifies a $lt query condition.
*
* When called with one argument, the most recent path passed to `where()` is used.
*
* @method lt
* @memberOf Query
* @param {String} [path]
* @param {Number} val
* @see $lt http://docs.mongodb.org/manual/reference/operator/lt/
* @api public
*/
/**
* Specifies a $lte query condition.
*
* When called with one argument, the most recent path passed to `where()` is used.
*
* @method lte
* @see $lte http://docs.mongodb.org/manual/reference/operator/lte/
* @memberOf Query
* @param {String} [path]
* @param {Number} val
* @api public
*/
/**
* Specifies a $ne query condition.
*
* When called with one argument, the most recent path passed to `where()` is used.
*
* @see $ne http://docs.mongodb.org/manual/reference/operator/ne/
* @method ne
* @memberOf Query
* @param {String} [path]
* @param {Number} val
* @api public
*/
/**
* Specifies an $in query condition.
*
* When called with one argument, the most recent path passed to `where()` is used.
*
* @see $in http://docs.mongodb.org/manual/reference/operator/in/
* @method in
* @memberOf Query
* @param {String} [path]
* @param {Number} val
* @api public
*/
/**
* Specifies an $nin query condition.
*
* When called with one argument, the most recent path passed to `where()` is used.
*
* @see $nin http://docs.mongodb.org/manual/reference/operator/nin/
* @method nin
* @memberOf Query
* @param {String} [path]
* @param {Number} val
* @api public
*/
/**
* Specifies an $all query condition.
*
* When called with one argument, the most recent path passed to `where()` is used.
*
* @see $all http://docs.mongodb.org/manual/reference/operator/all/
* @method all
* @memberOf Query
* @param {String} [path]
* @param {Number} val
* @api public
*/
/**
* Specifies a $size query condition.
*
* When called with one argument, the most recent path passed to `where()` is used.
*
* ####Example
*
* MyModel.where('tags').size(0).exec(function (err, docs) {
* if (err) return handleError(err);
*
* assert(Array.isArray(docs));
* console.log('documents with 0 tags', docs);
* })
*
* @see $size http://docs.mongodb.org/manual/reference/operator/size/
* @method size
* @memberOf Query
* @param {String} [path]
* @param {Number} val
* @api public
*/
/**
* Specifies a $regex query condition.
*
* When called with one argument, the most recent path passed to `where()` is used.
*
* @see $regex http://docs.mongodb.org/manual/reference/operator/regex/
* @method regex
* @memberOf Query
* @param {String} [path]
* @param {Number} val
* @api public
*/
/**
* Specifies a $maxDistance query condition.
*
* When called with one argument, the most recent path passed to `where()` is used.
*
* @see $maxDistance http://docs.mongodb.org/manual/reference/operator/maxDistance/
* @method maxDistance
* @memberOf Query
* @param {String} [path]
* @param {Number} val
* @api public
*/
/**
* Specifies a `$mod` condition
*
* @method mod
* @memberOf Query
* @param {String} [path]
* @param {Number} val
* @return {Query} this
* @see $mod http://docs.mongodb.org/manual/reference/operator/mod/
* @api public
*/
/**
* Specifies an `$exists` condition
*
* ####Example
*
* // { name: { $exists: true }}
* Thing.where('name').exists()
* Thing.where('name').exists(true)
* Thing.find().exists('name')
*
* // { name: { $exists: false }}
* Thing.where('name').exists(false);
* Thing.find().exists('name', false);
*
* @method exists
* @memberOf Query
* @param {String} [path]
* @param {Number} val
* @return {Query} this
* @see $exists http://docs.mongodb.org/manual/reference/operator/exists/
* @api public
*/
/**
* Specifies an `$elemMatch` condition
*
* ####Example
*
* query.elemMatch('comment', { author: 'autobot', votes: {$gte: 5}})
*
* query.where('comment').elemMatch({ author: 'autobot', votes: {$gte: 5}})
*
* query.elemMatch('comment', function (elem) {
* elem.where('author').equals('autobot');
* elem.where('votes').gte(5);
* })
*
* query.where('comment').elemMatch(function (elem) {
* elem.where({ author: 'autobot' });
* elem.where('votes').gte(5);
* })
*
* @method elemMatch
* @memberOf Query
* @param {String|Object|Function} path
* @param {Object|Function} criteria
* @return {Query} this
* @see $elemMatch http://docs.mongodb.org/manual/reference/operator/elemMatch/
* @api public
*/
/**
* Defines a `$within` or `$geoWithin` argument for geo-spatial queries.
*
* ####Example
*
* query.where(path).within().box()
* query.where(path).within().circle()
* query.where(path).within().geometry()
*
* query.where('loc').within({ center: [50,50], radius: 10, unique: true, spherical: true });
* query.where('loc').within({ box: [[40.73, -73.9], [40.7, -73.988]] });
* query.where('loc').within({ polygon: [[],[],[],[]] });
*
* query.where('loc').within([], [], []) // polygon
* query.where('loc').within([], []) // box
* query.where('loc').within({ type: 'LineString', coordinates: [...] }); // geometry
*
* **MUST** be used after `where()`.
*
* ####NOTE:
*
* As of Mongoose 3.7, `$geoWithin` is always used for queries. To change this behavior, see [Query.use$geoWithin](#query_Query-use%2524geoWithin).
*
* ####NOTE:
*
* In Mongoose 3.7, `within` changed from a getter to a function. If you need the old syntax, use [this](https://github.com/ebensing/mongoose-within).
*
* @method within
* @see $polygon http://docs.mongodb.org/manual/reference/operator/polygon/
* @see $box http://docs.mongodb.org/manual/reference/operator/box/
* @see $geometry http://docs.mongodb.org/manual/reference/operator/geometry/
* @see $center http://docs.mongodb.org/manual/reference/operator/center/
* @see $centerSphere http://docs.mongodb.org/manual/reference/operator/centerSphere/
* @memberOf Query
* @return {Query} this
* @api public
*/
/**
* Specifies a $slice projection for an array.
*
* ####Example
*
* query.slice('comments', 5)
* query.slice('comments', -5)
* query.slice('comments', [10, 5])
* query.where('comments').slice(5)
* query.where('comments').slice([-10, 5])
*
* @method slice
* @memberOf Query
* @param {String} [path]
* @param {Number} val number/range of elements to slice
* @return {Query} this
* @see mongodb http://www.mongodb.org/display/DOCS/Retrieving+a+Subset+of+Fields#RetrievingaSubsetofFields-RetrievingaSubrangeofArrayElements
* @see $slice http://docs.mongodb.org/manual/reference/projection/slice/#prj._S_slice
* @api public
*/
/**
* Specifies the maximum number of documents the query will return.
*
* ####Example
*
* query.limit(20)
*
* ####Note
*
* Cannot be used with `distinct()`
*
* @method limit
* @memberOf Query
* @param {Number} val
* @api public
*/
/**
* Specifies the number of documents to skip.
*
* ####Example
*
* query.skip(100).limit(20)
*
* ####Note
*
* Cannot be used with `distinct()`
*
* @method skip
* @memberOf Query
* @param {Number} val
* @see cursor.skip http://docs.mongodb.org/manual/reference/method/cursor.skip/
* @api public
*/
/**
* Specifies the maxScan option.
*
* ####Example
*
* query.maxScan(100)
*
* ####Note
*
* Cannot be used with `distinct()`
*
* @method maxScan
* @memberOf Query
* @param {Number} val
* @see maxScan http://docs.mongodb.org/manual/reference/operator/maxScan/
* @api public
*/
/**
* Specifies the batchSize option.
*
* ####Example
*
* query.batchSize(100)
*
* ####Note
*
* Cannot be used with `distinct()`
*
* @method batchSize
* @memberOf Query
* @param {Number} val
* @see batchSize http://docs.mongodb.org/manual/reference/method/cursor.batchSize/
* @api public
*/
/**
* Specifies the `comment` option.
*
* ####Example
*
* query.comment('login query')
*
* ####Note
*
* Cannot be used with `distinct()`
*
* @method comment
* @memberOf Query
* @param {Number} val
* @see comment http://docs.mongodb.org/manual/reference/operator/comment/
* @api public
*/
/**
* Specifies this query as a `snapshot` query.
*
* ####Example
*
* query.snapshot() // true
* query.snapshot(true)
* query.snapshot(false)
*
* ####Note
*
* Cannot be used with `distinct()`
*
* @method snapshot
* @memberOf Query
* @see snapshot http://docs.mongodb.org/manual/reference/operator/snapshot/
* @return {Query} this
* @api public
*/
/**
* Sets query hints.
*
* ####Example
*
* query.hint({ indexA: 1, indexB: -1})
*
* ####Note
*
* Cannot be used with `distinct()`
*
* @method hint
* @memberOf Query
* @param {Object} val a hint object
* @return {Query} this
* @see $hint http://docs.mongodb.org/manual/reference/operator/hint/
* @api public
*/
/**
* Specifies which document fields to include or exclude
*
* When using string syntax, prefixing a path with `-` will flag that path as excluded. When a path does not have the `-` prefix, it is included. Lastly, if a path is prefixed with `+`, it forces inclusion of the path, which is useful for paths excluded at the [schema level](/docs/api.html#schematype_SchemaType-select).
*
* ####Example
*
* // include a and b, exclude c
* query.select('a b -c');
*
* // or you may use object notation, useful when
* // you have keys already prefixed with a "-"
* query.select({a: 1, b: 1, c: 0});
*
* // force inclusion of field excluded at schema level
* query.select('+path')
*
* ####NOTE:
*
* Cannot be used with `distinct()`.
*
* _v2 had slightly different syntax such as allowing arrays of field names. This support was removed in v3._
*
* @method select
* @memberOf Query
* @param {Object|String} arg
* @return {Query} this
* @see SchemaType
* @api public
*/
/**
* _DEPRECATED_ Sets the slaveOk option.
*
* **Deprecated** in MongoDB 2.2 in favor of [read preferences](#query_Query-read).
*
* ####Example:
*
* query.slaveOk() // true
* query.slaveOk(true)
* query.slaveOk(false)
*
* @method slaveOk
* @memberOf Query
* @deprecated use read() preferences instead if on mongodb >= 2.2
* @param {Boolean} v defaults to true
* @see mongodb http://docs.mongodb.org/manual/applications/replication/#read-preference
* @see slaveOk http://docs.mongodb.org/manual/reference/method/rs.slaveOk/
* @see read() #query_Query-read
* @return {Query} this
* @api public
*/
/**
* Determines the MongoDB nodes from which to read.
*
* ####Preferences:
*
* primary - (default) Read from primary only. Operations will produce an error if primary is unavailable. Cannot be combined with tags.
* secondary Read from secondary if available, otherwise error.
* primaryPreferred Read from primary if available, otherwise a secondary.
* secondaryPreferred Read from a secondary if available, otherwise read from the primary.
* nearest All operations read from among the nearest candidates, but unlike other modes, this option will include both the primary and all secondaries in the random selection.
*
* Aliases
*
* p primary
* pp primaryPreferred
* s secondary
* sp secondaryPreferred
* n nearest
*
* ####Example:
*
* new Query().read('primary')
* new Query().read('p') // same as primary
*
* new Query().read('primaryPreferred')
* new Query().read('pp') // same as primaryPreferred
*
* new Query().read('secondary')
* new Query().read('s') // same as secondary
*
* new Query().read('secondaryPreferred')
* new Query().read('sp') // same as secondaryPreferred
*
* new Query().read('nearest')
* new Query().read('n') // same as nearest
*
* // read from secondaries with matching tags
* new Query().read('s', [{ dc:'sf', s: 1 },{ dc:'ma', s: 2 }])
*
* Read more about how to use read preferrences [here](http://docs.mongodb.org/manual/applications/replication/#read-preference) and [here](http://mongodb.github.com/node-mongodb-native/driver-articles/anintroductionto1_1and2_2.html#read-preferences).
*
* @method read
* @memberOf Query
* @param {String} pref one of the listed preference options or aliases
* @param {Array} [tags] optional tags for this query
* @see mongodb http://docs.mongodb.org/manual/applications/replication/#read-preference
* @see driver http://mongodb.github.com/node-mongodb-native/driver-articles/anintroductionto1_1and2_2.html#read-preferences
* @return {Query} this
* @api public
*/
/**
* Merges another Query or conditions object into this one.
*
* When a Query is passed, conditions, field selection and options are merged.
*
* New in 3.7.0
*
* @method merge
* @memberOf Query
* @param {Query|Object} source
* @return {Query} this
*/
/**
* Sets query options.
*
* ####Options:
*
* - [tailable](http://www.mongodb.org/display/DOCS/Tailable+Cursors) *
* - [sort](http://www.mongodb.org/display/DOCS/Advanced+Queries#AdvancedQueries-%7B%7Bsort(\)%7D%7D) *
* - [limit](http://www.mongodb.org/display/DOCS/Advanced+Queries#AdvancedQueries-%7B%7Blimit%28%29%7D%7D) *
* - [skip](http://www.mongodb.org/display/DOCS/Advanced+Queries#AdvancedQueries-%7B%7Bskip%28%29%7D%7D) *
* - [maxscan](http://www.mongodb.org/display/DOCS/Advanced+Queries#AdvancedQueries-%24maxScan) *
* - [batchSize](http://www.mongodb.org/display/DOCS/Advanced+Queries#AdvancedQueries-%7B%7BbatchSize%28%29%7D%7D) *
* - [comment](http://www.mongodb.org/display/DOCS/Advanced+Queries#AdvancedQueries-%24comment) *
* - [snapshot](http://www.mongodb.org/display/DOCS/Advanced+Queries#AdvancedQueries-%7B%7Bsnapshot%28%29%7D%7D) *
* - [hint](http://www.mongodb.org/display/DOCS/Advanced+Queries#AdvancedQueries-%24hint) *
* - [slaveOk](http://docs.mongodb.org/manual/applications/replication/#read-preference) *
* - [lean](./api.html#query_Query-lean) *
* - [safe](http://www.mongodb.org/display/DOCS/getLastError+Command)
*
* _* denotes a query helper method is also available_
*
* @param {Object} options
* @api public
*/
Query.prototype.setOptions = function (options, overwrite) {
// overwrite is only for internal use
if (overwrite) {
// ensure that _mongooseOptions & options are two different objects
this._mongooseOptions = (options && utils.clone(options)) || {};
this.options = options || {};
if('populate' in options) {
this.populate(this._mongooseOptions);
}
return this;
}
if (!(options && 'Object' == options.constructor.name)) {
return this;
}
return Query.base.setOptions.call(this, options);
}
/**
* Returns fields selection for this query.
*
* @method _fieldsForExec
* @return {Object}
* @api private
*/
/**
* Return an update document with corrected $set operations.
*
* @method _updateForExec
* @api private
*/
/**
* Makes sure _path is set.
*
* @method _ensurePath
* @param {String} method
* @api private
*/
/**
* Determines if `conds` can be merged using `mquery().merge()`
*
* @method canMerge
* @memberOf Query
* @param {Object} conds
* @return {Boolean}
* @api private
*/
/**
* Returns default options for this query.
*
* @param {Model} model
* @api private
*/
Query.prototype._optionsForExec = function (model) {
var options = Query.base._optionsForExec.call(this);
delete options.populate;
if (!model) {
return options;
} else {
if (!('safe' in options) && model.schema.options.safe) {
options.safe = model.schema.options.safe;
}
if(!('readPreference' in options) && model.schema.options.read) {
options.readPreference = model.schema.options.read;
}
return options;
}
};
/**
* Sets the lean option.
*
* Documents returned from queries with the `lean` option enabled are plain javascript objects, not [MongooseDocuments](#document-js). They have no `save` method, getters/setters or other Mongoose magic applied.
*
* ####Example:
*
* new Query().lean() // true
* new Query().lean(true)
* new Query().lean(false)
*
* Model.find().lean().exec(function (err, docs) {
* docs[0] instanceof mongoose.Document // false
* });
*
* This is a [great](https://groups.google.com/forum/#!topic/mongoose-orm/u2_DzDydcnA/discussion) option in high-performance read-only scenarios, especially when combined with [stream](#query_Query-stream).
*
* @param {Boolean} bool defaults to true
* @return {Query} this
* @api public
*/
Query.prototype.lean = function (v) {
this._mongooseOptions.lean = arguments.length ? !!v : true;
return this;
}
/**
* Finds documents.
*
* When no `callback` is passed, the query is not executed.
*
* ####Example
*
* query.find({ name: 'Los Pollos Hermanos' }).find(callback)
*
* @param {Object} [criteria] mongodb selector
* @param {Function} [callback]
* @return {Query} this
* @api public
*/
Query.prototype.find = function (conditions, callback) {
if ('function' == typeof conditions) {
callback = conditions;
conditions = {};
} else if (conditions instanceof Document) {
conditions = conditions.toObject();
}
if (mquery.canMerge(conditions)) {
this.merge(conditions);
}
try {
this.cast(this.model);
this._castError = null;
} catch (err) {
this._castError = err;
}
// if we don't have a callback, then just return the query object
if (!callback) {
return Query.base.find.call(this);
}
var promise = new Promise(callback);
if (this._castError) {
promise.error(this._castError);
return this;
}
this._applyPaths();
this._fields = this._castFields(this._fields);
var fields = this._fieldsForExec();
var options = this._mongooseOptions;
var self = this;
return Query.base.find.call(this, {}, cb);
function cb(err, docs) {
if (err) return promise.error(err);
if (0 === docs.length) {
return promise.complete(docs);
}
if (!options.populate) {
return true === options.lean
? promise.complete(docs)
: completeMany(self.model, docs, fields, self, null, promise);
}
var pop = helpers.preparePopulationOptionsMQ(self, options);
self.model.populate(docs, pop, function (err, docs) {
if(err) return promise.error(err);
return true === options.lean
? promise.complete(docs)
: completeMany(self.model, docs, fields, self, pop, promise);
});
}
}
/*!
* hydrates many documents
*
* @param {Model} model
* @param {Array} docs
* @param {Object} fields
* @param {Query} self
* @param {Array} [pop] array of paths used in population
* @param {Promise} promise
*/
function completeMany (model, docs, fields, self, pop, promise) {
var arr = [];
var count = docs.length;
var len = count;
var opts = pop ?
{ populated: pop }
: undefined;
for (var i=0; i < len; ++i) {
arr[i] = new model(undefined, fields, true);
arr[i].init(docs[i], opts, function (err) {
if (err) return promise.error(err);
--count || promise.complete(arr);
});
}
}
/**
* Declares the query a findOne operation. When executed, the first found document is passed to the callback.
*
* Passing a `callback` executes the query.
*
* ####Example
*
* var query = Kitten.where({ color: 'white' });
* query.findOne(function (err, kitten) {
* if (err) return handleError(err);
* if (kitten) {
* // doc may be null if no document matched
* }
* });
*
* @param {Object|Query} [criteria] mongodb selector
* @param {Function} [callback]
* @return {Query} this
* @see findOne http://docs.mongodb.org/manual/reference/method/db.collection.findOne/
* @api public
*/
Query.prototype.findOne = function (conditions, fields, options, callback) {
if ('function' == typeof conditions) {
callback = conditions;
conditions = null;
fields = null;
options = null;
}
if ('function' == typeof fields) {
callback = fields;
options = null;
fields = null;
}
if ('function' == typeof options) {
callback = options;
options = null;
}
// make sure we don't send in the whole Document to merge()
if (conditions instanceof Document) {
conditions = conditions.toObject();
}
if (options) {
this.setOptions(options);
}
if (fields) {
this.select(fields);
}
if (mquery.canMerge(conditions)) {
this.merge(conditions);
}
try {
this.cast(this.model);
this._castError = null;
} catch (err) {
this._castError = err;
}
if (!callback) {
// already merged in the conditions, don't need to send them in.
return Query.base.findOne.call(this);
}
var promise = new Promise(callback);
if (this._castError) {
promise.error(this._castError);
return this;
}
this._applyPaths();
this._fields = this._castFields(this._fields);
var options = this._mongooseOptions;
var fields = this._fieldsForExec();
var self = this;
// don't pass in the conditions because we already merged them in
Query.base.findOne.call(this, {}, function cb (err, doc) {
if (err) return promise.error(err);
if (!doc) return promise.complete(null);
if (!options.populate) {
return true === options.lean
? promise.complete(doc)
: completeOne(self.model, doc, fields, self, null, promise);
}
var pop = helpers.preparePopulationOptionsMQ(self, options);
self.model.populate(doc, pop, function (err, doc) {
if (err) return promise.error(err);
return true === options.lean
? promise.complete(doc)
: completeOne(self.model, doc, fields, self, pop, promise);
});
})
return this;
}
/**
* Specifying this query as a `count` query.
*
* Passing a `callback` executes the query.
*
* ####Example:
*
* var countQuery = model.where({ 'color': 'black' }).count();
*
* query.count({ color: 'black' }).count(callback)
*
* query.count({ color: 'black' }, callback)
*
* query.where('color', 'black').count(function (err, count) {
* if (err) return handleError(err);
* console.log('there are %d kittens', count);
* })
*
* @param {Object} [criteria] mongodb selector
* @param {Function} [callback]
* @return {Query} this
* @see count http://docs.mongodb.org/manual/reference/method/db.collection.count/
* @api public
*/
Query.prototype.count = function (conditions, callback) {
if ('function' == typeof conditions) {
callback = conditions;
conditions = undefined;
}
if (mquery.canMerge(conditions)) {
this.merge(conditions);
}
try {
this.cast(this.model);
} catch (err) {
callback(err);
return this;
}
return Query.base.count.call(this, {}, callback);
}
/**
* Declares or executes a distict() operation.
*
* Passing a `callback` executes the query.
*
* ####Example
*
* distinct(criteria, field, fn)
* distinct(criteria, field)
* distinct(field, fn)
* distinct(field)
* distinct(fn)
* distinct()
*
* @param {Object|Query} [criteria]
* @param {String} [field]
* @param {Function} [callback]
* @return {Query} this
* @see distinct http://docs.mongodb.org/manual/reference/method/db.collection.distinct/
* @api public
*/
Query.prototype.distinct = function (conditions, field, callback) {
if (!callback) {
if('function' == typeof field) {
callback = field;
if ('string' == typeof conditions) {
field = conditions;
conditions = undefined;
}
}
switch (typeof conditions) {
case 'string':
field = conditions;
conditions = undefined;
break;
case 'function':
callback = conditions;
field = undefined;
conditions = undefined;
break;
}
}
if (conditions instanceof Document) {
conditions = conditions.toObject();
}
if (mquery.canMerge(conditions)) {
this.merge(conditions)
}
try {
this.cast(this.model);
} catch (err) {
callback(err);
return this;
}
return Query.base.distinct.call(this, {}, field, callback);
}
/**
* Sets the sort order
*
* If an object is passed, values allowed are `asc`, `desc`, `ascending`, `descending`, `1`, and `-1`.
*
* If a string is passed, it must be a space delimited list of path names. The
* sort order of each path is ascending unless the path name is prefixed with `-`
* which will be treated as descending.
*
* ####Example
*
* // sort by "field" ascending and "test" descending
* query.sort({ field: 'asc', test: -1 });
*
* // equivalent
* query.sort('field -test');
*
* ####Note
*
* Cannot be used with `distinct()`
*
* @param {Object|String} arg
* @return {Query} this
* @see cursor.sort http://docs.mongodb.org/manual/reference/method/cursor.sort/
* @api public
*/
Query.prototype.sort = function (arg) {
var nArg = {};
if (arguments.length > 1) {
throw new Error("sort() only takes 1 Argument");
}
if (Array.isArray(arg)) {
// time to deal with the terrible syntax
for (var i=0; i < arg.length; i++) {
if (!Array.isArray(arg[i])) throw new Error("Invalid sort() argument.");
nArg[arg[i][0]] = arg[i][1];
}
} else {
nArg = arg;
}
return Query.base.sort.call(this, nArg);
}
/**
* Declare and/or execute this query as a remove() operation.
*
* ####Example
*
* Model.remove({ artist: 'Anne Murray' }, callback)
*
* ####Note
*
* The operation is only executed when a callback is passed. To force execution without a callback (which would be an unsafe write), we must first call remove() and then execute it by using the `exec()` method.
*
* // not executed
* var query = Model.find().remove({ name: 'Anne Murray' })
*
* // executed
* query.remove({ name: 'Anne Murray' }, callback)
* query.remove({ name: 'Anne Murray' }).remove(callback)
*
* // executed without a callback (unsafe write)
* query.exec()
*
* // summary
* query.remove(conds, fn); // executes
* query.remove(conds)
* query.remove(fn) // executes
* query.remove()
*
* @param {Object|Query} [criteria] mongodb selector
* @param {Function} [callback]
* @return {Query} this
* @see remove http://docs.mongodb.org/manual/reference/method/db.collection.remove/
* @api public
*/
Query.prototype.remove = function (cond, callback) {
if ('function' == typeof cond) {
callback = cond;
cond = null;
}
var cb = 'function' == typeof callback;
try {
this.cast(this.model);
} catch (err) {
if (cb) return process.nextTick(callback.bind(null, err));
return this;
}
return Query.base.remove.call(this, cond, callback);
}
/*!
* hydrates a document
*
* @param {Model} model
* @param {Document} doc
* @param {Object} fields
* @param {Query} self
* @param {Array} [pop] array of paths used in population
* @param {Promise} promise
*/
function completeOne (model, doc, fields, self, pop, promise) {
var opts = pop ?
{ populated: pop }
: undefined;
var casted = new model(undefined, fields, true);
casted.init(doc, opts, function (err) {
if (err) return promise.error(err);
promise.complete(casted);
});
}
/**
* Issues a mongodb [findAndModify](http://www.mongodb.org/display/DOCS/findAndModify+Command) update command.
*
* Finds a matching document, updates it according to the `update` arg, passing any `options`, and returns the found document (if any) to the callback. The query executes immediately if `callback` is passed.
*
* ####Available options
*
* - `new`: bool - true to return the modified document rather than the original. defaults to true
* - `upsert`: bool - creates the object if it doesn't exist. defaults to false.
* - `sort`: if multiple docs are found by the conditions, sets the sort order to choose which doc to update
*
* ####Examples
*
* query.findOneAndUpdate(conditions, update, options, callback) // executes
* query.findOneAndUpdate(conditions, update, options) // returns Query
* query.findOneAndUpdate(conditions, update, callback) // executes
* query.findOneAndUpdate(conditions, update) // returns Query
* query.findOneAndUpdate(update, callback) // returns Query
* query.findOneAndUpdate(update) // returns Query
* query.findOneAndUpdate(callback) // executes
* query.findOneAndUpdate() // returns Query
*
* @method findOneAndUpdate
* @memberOf Query
* @param {Object|Query} [query]
* @param {Object} [doc]
* @param {Object} [options]
* @param {Function} [callback]
* @see mongodb http://www.mongodb.org/display/DOCS/findAndModify+Command
* @return {Query} this
* @api public
*/
/**
* Issues a mongodb [findAndModify](http://www.mongodb.org/display/DOCS/findAndModify+Command) remove command.
*
* Finds a matching document, removes it, passing the found document (if any) to the callback. Executes immediately if `callback` is passed.
*
* ####Available options
*
* - `sort`: if multiple docs are found by the conditions, sets the sort order to choose which doc to update
*
* ####Examples
*
* A.where().findOneAndRemove(conditions, options, callback) // executes
* A.where().findOneAndRemove(conditions, options) // return Query
* A.where().findOneAndRemove(conditions, callback) // executes
* A.where().findOneAndRemove(conditions) // returns Query
* A.where().findOneAndRemove(callback) // executes
* A.where().findOneAndRemove() // returns Query
*
* @method findOneAndRemove
* @memberOf Query
* @param {Object} [conditions]
* @param {Object} [options]
* @param {Function} [callback]
* @return {Query} this
* @see mongodb http://www.mongodb.org/display/DOCS/findAndModify+Command
* @api public
*/
/**
* Override mquery.prototype._findAndModify to provide casting etc.
*
* @param {String} type - either "remove" or "update"
* @param {Function} callback
* @api private
*/
Query.prototype._findAndModify = function (type, callback) {
if ('function' != typeof callback) {
throw new Error("Expected callback in _findAndModify");
}
var model = this.model
, promise = new Promise(callback)
, self = this
, castedQuery
, castedDoc
, fields
, opts;
castedQuery = castQuery(this);
if (castedQuery instanceof Error) {
process.nextTick(promise.error.bind(promise, castedQuery));
return promise;
}
opts = this._optionsForExec(model);
if ('remove' == type) {
opts.remove = true;
} else {
if (!('new' in opts)) opts.new = true;
if (!('upsert' in opts)) opts.upsert = false;
castedDoc = castDoc(this);
if (!castedDoc) {
if (opts.upsert) {
// still need to do the upsert to empty doc
castedDoc = { $set: {} };
} else {
return this.findOne(callback);
}
} else if (castedDoc instanceof Error) {
process.nextTick(promise.error.bind(promise, castedDoc));
return promise;
}
}
this._applyPaths();
var self = this;
var options = this._mongooseOptions;
if (this._fields) {
fields = utils.clone(this._fields);
opts.fields = this._castFields(fields);
if (opts.fields instanceof Error) {
process.nextTick(promise.error.bind(promise, opts.fields));
return promise;
}
}
this._collection.findAndModify(castedQuery, castedDoc, opts, utils.tick(cb));
function cb (err, doc) {
if (err) return promise.error(err);
if (!doc || (utils.isObject(doc) && Object.keys(doc).length === 0)) {
return promise.complete(null);
}
if (!options.populate) {
return true === options.lean
? promise.complete(doc)
: completeOne(self.model, doc, fields, self, null, promise);
}
var pop = helpers.preparePopulationOptionsMQ(self, options);
self.model.populate(doc, pop, function (err, doc) {
if (err) return promise.error(err);
return true === options.lean
? promise.complete(doc)
: completeOne(self.model, doc, fields, self, pop, promise);
});
}
return promise;
}
/**
* Declare and/or execute this query as an update() operation.
*
* _All paths passed that are not $atomic operations will become $set ops._
*
* ####Example
*
* Model.where({ _id: id }).update({ title: 'words' })
*
* // becomes
*
* Model.where({ _id: id }).update({ $set: { title: 'words' }})
*
* ####Note
*
* Passing an empty object `{}` as the doc will result in a no-op unless the `overwrite` option is passed. Without the `overwrite` option set, the update operation will be ignored and the callback executed without sending the command to MongoDB so as to prevent accidently overwritting documents in the collection.
*
* ####Note
*
* The operation is only executed when a callback is passed. To force execution without a callback (which would be an unsafe write), we must first call update() and then execute it by using the `exec()` method.
*
* var q = Model.where({ _id: id });
* q.update({ $set: { name: 'bob' }}).update(); // not executed
*
* q.update({ $set: { name: 'bob' }}).exec(); // executed as unsafe
*
* // keys that are not $atomic ops become $set.
* // this executes the same command as the previous example.
* q.update({ name: 'bob' }).exec();
*
* // overwriting with empty docs
* var q = Model.where({ _id: id }).setOptions({ overwrite: true })
* q.update({ }, callback); // executes
*
* // multi update with overwrite to empty doc
* var q = Model.where({ _id: id });
* q.setOptions({ multi: true, overwrite: true })
* q.update({ });
* q.update(callback); // executed
*
* // multi updates
* Model.where()
* .update({ name: /^match/ }, { $set: { arr: [] }}, { multi: true }, callback)
*
* // more multi updates
* Model.where()
* .setOptions({ multi: true })
* .update({ $set: { arr: [] }}, callback)
*
* // single update by default
* Model.where({ email: '[email protected]' })
* .update({ $inc: { counter: 1 }}, callback)
*
* API summary
*
* update(criteria, doc, options, cb) // executes
* update(criteria, doc, options)
* update(criteria, doc, cb) // executes
* update(criteria, doc)
* update(doc, cb) // executes
* update(doc)
* update(cb) // executes
* update(true) // executes (unsafe write)
* update()
*
* @param {Object} [criteria]
* @param {Object} [doc] the update command
* @param {Object} [options]
* @param {Function} [callback]
* @return {Query} this
* @see Model.update http://localhost:8088/docs/api.html#model_Model.update
* @see update http://docs.mongodb.org/manual/reference/method/db.collection.update/
* @api public
*/
Query.prototype.update = function (conditions, doc, options, callback) {
if ('function' === typeof options) {
// Scenario: update(conditions, doc, callback)
callback = options;
options = null;
} else if ('function' === typeof doc) {
// Scenario: update(doc, callback);
callback = doc;
doc = conditions;
conditions = {};
options = null;
} else if ('function' === typeof conditions) {
callback = conditions;
conditions = undefined;
doc = undefined;
options = undefined;
}
// make sure we don't send in the whole Document to merge()
if (conditions instanceof Document) {
conditions = conditions.toObject();
}
// strict is an option used in the update checking, make sure it gets set
if (options) {
if ('strict' in options) {
this._mongooseOptions.strict = options.strict;
}
}
// if doc is undefined at this point, this means this function is being
// executed by exec(not always see below). Grab the update doc from here in
// order to validate
// This could also be somebody calling update() or update({}). Probably not a
// common use case, check for _update to make sure we don't do anything bad
if (!doc && this._update) {
doc = this._updateForExec();
}
if (conditions) {
this._conditions = conditions;
}
// validate the selector part of the query
var castedQuery = castQuery(this);
if (castedQuery instanceof Error) {
if(callback) {
callback(castedQuery);
return this;
} else {
throw castedQuery;
}
}
// validate the update part of the query
var castedDoc;
try {
castedDoc = this._castUpdate(doc, options && options.overwrite);
} catch (err) {
if (callback) {
callback(err);
return this;
} else {
throw err;
}
}
if (!castedDoc) {
callback && callback(null, 0);
return this;
}
return Query.base.update.call(this, castedQuery, castedDoc, options, callback);
}
/**
* Executes the query
*
* ####Examples:
*
* var promise = query.exec();
* var promise = query.exec('update');
*
* query.exec(callback);
* query.exec('find', callback);
*
* @param {String|Function} [operation]
* @param {Function} [callback]
* @return {Promise}
* @api public
*/
Query.prototype.exec = function exec (op, callback) {
var promise = new Promise();
if ('function' == typeof op) {
callback = op;
op = null;
} else if ('string' == typeof op) {
this.op = op;
}
if (callback) promise.addBack(callback);
if (!this.op) {
promise.complete();
return promise;
}
Query.base.exec.call(this, op, promise.resolve.bind(promise));
return promise;
}
/**
* Finds the schema for `path`. This is different than
* calling `schema.path` as it also resolves paths with
* positional selectors (something.$.another.$.path).
*
* @param {String} path
* @api private
*/
Query.prototype._getSchema = function _getSchema (path) {
return this.model._getSchema(path);
}
/*!
* These operators require casting docs
* to real Documents for Update operations.
*/
var castOps = {
$push: 1
, $pushAll: 1
, $addToSet: 1
, $set: 1
};
/*!
* These operators should be cast to numbers instead
* of their path schema type.
*/
var numberOps = {
$pop: 1
, $unset: 1
, $inc: 1
}
/**
* Casts obj for an update command.
*
* @param {Object} obj
* @return {Object} obj after casting its values
* @api private
*/
Query.prototype._castUpdate = function _castUpdate (obj, overwrite) {
if (!obj) return undefined;
var ops = Object.keys(obj)
, i = ops.length
, ret = {}
, hasKeys
, val
while (i--) {
var op = ops[i];
// if overwrite is set, don't do any of the special $set stuff
if ('$' !== op[0] && !overwrite) {
// fix up $set sugar
if (!ret.$set) {
if (obj.$set) {
ret.$set = obj.$set;
} else {
ret.$set = {};
}
}
ret.$set[op] = obj[op];
ops.splice(i, 1);
if (!~ops.indexOf('$set')) ops.push('$set');
} else if ('$set' === op) {
if (!ret.$set) {
ret[op] = obj[op];
}
} else {
ret[op] = obj[op];
}
}
// cast each value
i = ops.length;
// if we get passed {} for the update, we still need to respect that when it
// is an overwrite scenario
if (overwrite) {
hasKeys = true;
}
while (i--) {
op = ops[i];
val = ret[op];
if ('Object' === val.constructor.name && !overwrite) {
hasKeys |= this._walkUpdatePath(val, op);
} else if (overwrite && 'Object' === ret.constructor.name) {
// if we are just using overwrite, cast the query and then we will
// *always* return the value, even if it is an empty object. We need to
// set hasKeys above because we need to account for the case where the
// user passes {} and wants to clobber the whole document
// Also, _walkUpdatePath expects an operation, so give it $set since that
// is basically what we're doing
this._walkUpdatePath(ret, '$set');
} else {
var msg = 'Invalid atomic update value for ' + op + '. '
+ 'Expected an object, received ' + typeof val;
throw new Error(msg);
}
}
return hasKeys && ret;
}
/**
* Walk each path of obj and cast its values
* according to its schema.
*
* @param {Object} obj - part of a query
* @param {String} op - the atomic operator ($pull, $set, etc)
* @param {String} pref - path prefix (internal only)
* @return {Bool} true if this path has keys to update
* @api private
*/
Query.prototype._walkUpdatePath = function _walkUpdatePath (obj, op, pref) {
var prefix = pref ? pref + '.' : ''
, keys = Object.keys(obj)
, i = keys.length
, hasKeys = false
, schema
, key
, val
var strict = 'strict' in this._mongooseOptions
? this._mongooseOptions.strict
: this.model.schema.options.strict;
while (i--) {
key = keys[i];
val = obj[key];
if (val && 'Object' === val.constructor.name) {
// watch for embedded doc schemas
schema = this._getSchema(prefix + key);
if (schema && schema.caster && op in castOps) {
// embedded doc schema
if (strict && !schema) {
// path is not in our strict schema
if ('throw' == strict) {
throw new Error('Field `' + key + '` is not in schema.');
} else {
// ignore paths not specified in schema
delete obj[key];
}
} else {
hasKeys = true;
if ('$each' in val) {
obj[key] = {
$each: this._castUpdateVal(schema, val.$each, op)
}
if (val.$slice) {
obj[key].$slice = val.$slice | 0;
}
if (val.$sort) {
obj[key].$sort = val.$sort;
}
} else {
obj[key] = this._castUpdateVal(schema, val, op);
}
}
} else {
hasKeys |= this._walkUpdatePath(val, op, prefix + key);
}
} else {
schema = '$each' === key
? this._getSchema(pref)
: this._getSchema(prefix + key);
var skip = strict &&
!schema &&
!/real|nested/.test(this.model.schema.pathType(prefix + key));
if (skip) {
if ('throw' == strict) {
throw new Error('Field `' + prefix + key + '` is not in schema.');
} else {
delete obj[key];
}
} else {
hasKeys = true;
obj[key] = this._castUpdateVal(schema, val, op, key);
}
}
}
return hasKeys;
}
/**
* Casts `val` according to `schema` and atomic `op`.
*
* @param {Schema} schema
* @param {Object} val
* @param {String} op - the atomic operator ($pull, $set, etc)
* @param {String} [$conditional]
* @api private
*/
Query.prototype._castUpdateVal = function _castUpdateVal (schema, val, op, $conditional) {
if (!schema) {
// non-existing schema path
return op in numberOps
? Number(val)
: val
}
if (schema.caster && op in castOps &&
(utils.isObject(val) || Array.isArray(val))) {
// Cast values for ops that add data to MongoDB.
// Ensures embedded documents get ObjectIds etc.
var tmp = schema.cast(val);
if (Array.isArray(val)) {
val = tmp;
} else {
val = tmp[0];
}
}
if (op in numberOps) return Number(val);
if (/^\$/.test($conditional)) return schema.castForQuery($conditional, val);
return schema.castForQuery(val)
}
/*!
* castQuery
* @api private
*/
function castQuery (query) {
try {
return query.cast(query.model);
} catch (err) {
return err;
}
}
/*!
* castDoc
* @api private
*/
function castDoc (query) {
try {
return query._castUpdate(query._update);
} catch (err) {
return err;
}
}
/**
* Specifies paths which should be populated with other documents.
*
* ####Example:
*
* Kitten.findOne().populate('owner').exec(function (err, kitten) {
* console.log(kitten.owner.name) // Max
* })
*
* Kitten.find().populate({
* path: 'owner'
* , select: 'name'
* , match: { color: 'black' }
* , options: { sort: { name: -1 }}
* }).exec(function (err, kittens) {
* console.log(kittens[0].owner.name) // Zoopa
* })
*
* // alternatively
* Kitten.find().populate('owner', 'name', null, {sort: { name: -1 }}).exec(function (err, kittens) {
* console.log(kittens[0].owner.name) // Zoopa
* })
*
* Paths are populated after the query executes and a response is received. A separate query is then executed for each path specified for population. After a response for each query has also been returned, the results are passed to the callback.
*
* @param {Object|String} path either the path to populate or an object specifying all parameters
* @param {Object|String} [select] Field selection for the population query
* @param {Model} [model] The name of the model you wish to use for population. If not specified, the name is looked up from the Schema ref.
* @param {Object} [match] Conditions for the population query
* @param {Object} [options] Options for the population query (sort, etc)
* @see population ./populate.html
* @see Query#select #query_Query-select
* @see Model.populate #model_Model.populate
* @return {Query} this
* @api public
*/
Query.prototype.populate = function () {
var res = utils.populate.apply(null, arguments);
var opts = this._mongooseOptions;
if (!utils.isObject(opts.populate)) {
opts.populate = {};
}
for (var i = 0; i < res.length; ++i) {
opts.populate[res[i].path] = res[i];
}
return this;
}
/**
* Casts this query to the schema of `model`
*
* ####Note
*
* If `obj` is present, it is cast instead of this query.
*
* @param {Model} model
* @param {Object} [obj]
* @return {Object}
* @api public
*/
Query.prototype.cast = function (model, obj) {
obj || (obj = this._conditions);
var schema = model.schema
, paths = Object.keys(obj)
, i = paths.length
, any$conditionals
, schematype
, nested
, path
, type
, val;
while (i--) {
path = paths[i];
val = obj[path];
if ('$or' === path || '$nor' === path || '$and' === path) {
var k = val.length
, orComponentQuery;
while (k--) {
orComponentQuery = new Query(val[k], {}, null, this.mongooseCollection);
orComponentQuery.cast(model);
val[k] = orComponentQuery._conditions;
}
} else if (path === '$where') {
type = typeof val;
if ('string' !== type && 'function' !== type) {
throw new Error("Must have a string or function for $where");
}
if ('function' === type) {
obj[path] = val.toString();
}
continue;
} else {
if (!schema) {
// no casting for Mixed types
continue;
}
schematype = schema.path(path);
if (!schematype) {
// Handle potential embedded array queries
var split = path.split('.')
, j = split.length
, pathFirstHalf
, pathLastHalf
, remainingConds
, castingQuery;
// Find the part of the var path that is a path of the Schema
while (j--) {
pathFirstHalf = split.slice(0, j).join('.');
schematype = schema.path(pathFirstHalf);
if (schematype) break;
}
// If a substring of the input path resolves to an actual real path...
if (schematype) {
// Apply the casting; similar code for $elemMatch in schema/array.js
if (schematype.caster && schematype.caster.schema) {
remainingConds = {};
pathLastHalf = split.slice(j).join('.');
remainingConds[pathLastHalf] = val;
castingQuery = new Query(remainingConds, {}, null, this.mongooseCollection);
castingQuery.cast(schematype.caster);
obj[path] = castingQuery._conditions[pathLastHalf];
} else {
obj[path] = val;
}
continue;
}
if (utils.isObject(val)) {
// handle geo schemas that use object notation
// { loc: { long: Number, lat: Number }
var geo = val.$near ? '$near' :
val.$nearSphere ? '$nearSphere' :
val.$within ? '$within' :
val.$geoIntersects ? '$geoIntersects' : '';
if (!geo) {
continue;
}
var numbertype = new Types.Number('__QueryCasting__')
var value = val[geo];
if (val.$maxDistance) {
val.$maxDistance = numbertype.castForQuery(val.$maxDistance);
}
if ('$within' == geo) {
var withinType = value.$center
|| value.$centerSphere
|| value.$box
|| value.$polygon;
if (!withinType) {
throw new Error('Bad $within paramater: ' + JSON.stringify(val));
}
value = withinType;
} else if ('$near' == geo &&
'string' == typeof value.type && Array.isArray(value.coordinates)) {
// geojson; cast the coordinates
value = value.coordinates;
} else if (('$near' == geo || '$geoIntersects' == geo) &&
value.$geometry && 'string' == typeof value.$geometry.type &&
Array.isArray(value.$geometry.coordinates)) {
// geojson; cast the coordinates
value = value.$geometry.coordinates;
}
;(function _cast (val) {
if (Array.isArray(val)) {
val.forEach(function (item, i) {
if (Array.isArray(item) || utils.isObject(item)) {
return _cast(item);
}
val[i] = numbertype.castForQuery(item);
});
} else {
var nearKeys= Object.keys(val);
var nearLen = nearKeys.length;
while (nearLen--) {
var nkey = nearKeys[nearLen];
var item = val[nkey];
if (Array.isArray(item) || utils.isObject(item)) {
_cast(item);
val[nkey] = item;
} else {
val[nkey] = numbertype.castForQuery(item);
}
}
}
})(value);
}
} else if (val === null || val === undefined) {
continue;
} else if ('Object' === val.constructor.name) {
any$conditionals = Object.keys(val).some(function (k) {
return k.charAt(0) === '$' && k !== '$id' && k !== '$ref';
});
if (!any$conditionals) {
obj[path] = schematype.castForQuery(val);
} else {
var ks = Object.keys(val)
, k = ks.length
, $cond;
while (k--) {
$cond = ks[k];
nested = val[$cond];
if ('$exists' === $cond) {
if ('boolean' !== typeof nested) {
throw new Error("$exists parameter must be Boolean");
}
continue;
}
if ('$type' === $cond) {
if ('number' !== typeof nested) {
throw new Error("$type parameter must be Number");
}
continue;
}
if ('$not' === $cond) {
this.cast(model, nested);
} else {
val[$cond] = schematype.castForQuery($cond, nested);
}
}
}
} else {
obj[path] = schematype.castForQuery(val);
}
}
}
return obj;
}
/**
* Casts selected field arguments for field selection with mongo 2.2
*
* query.select({ ids: { $elemMatch: { $in: [hexString] }})
*
* @param {Object} fields
* @see https://github.com/LearnBoost/mongoose/issues/1091
* @see http://docs.mongodb.org/manual/reference/projection/elemMatch/
* @api private
*/
Query.prototype._castFields = function _castFields (fields) {
var selected
, elemMatchKeys
, keys
, key
, out
, i
if (fields) {
keys = Object.keys(fields);
elemMatchKeys = [];
i = keys.length;
// collect $elemMatch args
while (i--) {
key = keys[i];
if (fields[key].$elemMatch) {
selected || (selected = {});
selected[key] = fields[key];
elemMatchKeys.push(key);
}
}
}
if (selected) {
// they passed $elemMatch, cast em
try {
out = this.cast(this.model, selected);
} catch (err) {
return err;
}
// apply the casted field args
i = elemMatchKeys.length;
while (i--) {
key = elemMatchKeys[i];
fields[key] = out[key];
}
}
return fields;
}
/**
* Applies schematype selected options to this query.
* @api private
*/
Query.prototype._applyPaths = function applyPaths () {
// determine if query is selecting or excluding fields
var fields = this._fields
, exclude
, keys
, ki
if (fields) {
keys = Object.keys(fields);
ki = keys.length;
while (ki--) {
if ('+' == keys[ki][0]) continue;
exclude = 0 === fields[keys[ki]];
break;
}
}
// if selecting, apply default schematype select:true fields
// if excluding, apply schematype select:false fields
var selected = []
, excluded = []
, seen = [];
analyzeSchema(this.model.schema);
switch (exclude) {
case true:
excluded.length && this.select('-' + excluded.join(' -'));
break;
case false:
selected.length && this.select(selected.join(' '));
break;
case undefined:
// user didn't specify fields, implies returning all fields.
// only need to apply excluded fields
excluded.length && this.select('-' + excluded.join(' -'));
break;
}
return seen = excluded = selected = keys = fields = null;
function analyzeSchema (schema, prefix) {
prefix || (prefix = '');
// avoid recursion
if (~seen.indexOf(schema)) return;
seen.push(schema);
schema.eachPath(function (path, type) {
if (prefix) path = prefix + '.' + path;
analyzePath(path, type);
// array of subdocs?
if (type.schema) {
analyzeSchema(type.schema, path);
}
});
}
function analyzePath (path, type) {
if ('boolean' != typeof type.selected) return;
var plusPath = '+' + path;
if (fields && plusPath in fields) {
// forced inclusion
delete fields[plusPath];
// if there are other fields being included, add this one
// if no other included fields, leave this out (implied inclusion)
if (false === exclude && keys.length > 1 && !~keys.indexOf(path)) {
fields[path] = 1;
}
return
};
// check for parent exclusions
var root = path.split('.')[0];
if (~excluded.indexOf(root)) return;
;(type.selected ? selected : excluded).push(path);
}
}
/**
* Casts selected field arguments for field selection with mongo 2.2
*
* query.select({ ids: { $elemMatch: { $in: [hexString] }})
*
* @param {Object} fields
* @see https://github.com/LearnBoost/mongoose/issues/1091
* @see http://docs.mongodb.org/manual/reference/projection/elemMatch/
* @api private
*/
Query.prototype._castFields = function _castFields (fields) {
var selected
, elemMatchKeys
, keys
, key
, out
, i
if (fields) {
keys = Object.keys(fields);
elemMatchKeys = [];
i = keys.length;
// collect $elemMatch args
while (i--) {
key = keys[i];
if (fields[key].$elemMatch) {
selected || (selected = {});
selected[key] = fields[key];
elemMatchKeys.push(key);
}
}
}
if (selected) {
// they passed $elemMatch, cast em
try {
out = this.cast(this.model, selected);
} catch (err) {
return err;
}
// apply the casted field args
i = elemMatchKeys.length;
while (i--) {
key = elemMatchKeys[i];
fields[key] = out[key];
}
}
return fields;
}
/**
* Returns a Node.js 0.8 style [read stream](http://nodejs.org/docs/v0.8.21/api/stream.html#stream_readable_stream) interface.
*
* ####Example
*
* // follows the nodejs 0.8 stream api
* Thing.find({ name: /^hello/ }).stream().pipe(res)
*
* // manual streaming
* var stream = Thing.find({ name: /^hello/ }).stream();
*
* stream.on('data', function (doc) {
* // do something with the mongoose document
* }).on('error', function (err) {
* // handle the error
* }).on('close', function () {
* // the stream is closed
* });
*
* ####Valid options
*
* - `transform`: optional function which accepts a mongoose document. The return value of the function will be emitted on `data`.
*
* ####Example
*
* // JSON.stringify all documents before emitting
* var stream = Thing.find().stream({ transform: JSON.stringify });
* stream.pipe(writeStream);
*
* @return {QueryStream}
* @param {Object} [options]
* @see QueryStream
* @api public
*/
Query.prototype.stream = function stream (opts) {
return new QueryStream(this, opts);
}
// the rest of these are basically to support older Mongoose syntax with mquery
/**
* _DEPRECATED_ Alias of `maxScan`
*
* @deprecated
* @see maxScan #query_Query-maxScan
* @method maxscan
* @memberOf Query
*/
Query.prototype.maxscan = Query.base.maxScan;
/**
* Sets the tailable option (for use with capped collections).
*
* ####Example
*
* query.tailable() // true
* query.tailable(true)
* query.tailable(false)
*
* ####Note
*
* Cannot be used with `distinct()`
*
* @param {Boolean} bool defaults to true
* @see tailable http://docs.mongodb.org/manual/tutorial/create-tailable-cursor/
* @api public
*/
Query.prototype.tailable = function (val, opts) {
// we need to support the tailable({ awaitdata : true }) as well as the
// tailable(true, {awaitdata :true}) syntax that mquery does not support
if (val && val.constructor.name == 'Object') {
opts = val;
val = true;
}
if (val === undefined) {
val = true;
}
if (opts && opts.awaitdata) this.options.awaitdata = true;
return Query.base.tailable.call(this, val);
}
/**
* Declares an intersects query for `geometry()`.
*
* ####Example
*
* query.where('path').intersects().geometry({
* type: 'LineString'
* , coordinates: [[180.0, 11.0], [180, 9.0]]
* })
*
* query.where('path').intersects({
* type: 'LineString'
* , coordinates: [[180.0, 11.0], [180, 9.0]]
* })
*
* ####NOTE:
*
* **MUST** be used after `where()`.
*
* ####NOTE:
*
* In Mongoose 3.7, `intersects` changed from a getter to a function. If you need the old syntax, use [this](https://github.com/ebensing/mongoose-within).
*
* @method intersects
* @memberOf Query
* @param {Object} [arg]
* @return {Query} this
* @see $geometry http://docs.mongodb.org/manual/reference/operator/geometry/
* @see geoIntersects http://docs.mongodb.org/manual/reference/operator/geoIntersects/
* @api public
*/
/**
* Specifies a `$geometry` condition
*
* ####Example
*
* var polyA = [[[ 10, 20 ], [ 10, 40 ], [ 30, 40 ], [ 30, 20 ]]]
* query.where('loc').within().geometry({ type: 'Polygon', coordinates: polyA })
*
* // or
* var polyB = [[ 0, 0 ], [ 1, 1 ]]
* query.where('loc').within().geometry({ type: 'LineString', coordinates: polyB })
*
* // or
* var polyC = [ 0, 0 ]
* query.where('loc').within().geometry({ type: 'Point', coordinates: polyC })
*
* // or
* query.where('loc').intersects().geometry({ type: 'Point', coordinates: polyC })
*
* The argument is assigned to the most recent path passed to `where()`.
*
* ####NOTE:
*
* `geometry()` **must** come after either `intersects()` or `within()`.
*
* The `object` argument must contain `type` and `coordinates` properties.
* - type {String}
* - coordinates {Array}
*
* @method geometry
* @memberOf Query
* @param {Object} object Must contain a `type` property which is a String and a `coordinates` property which is an Array. See the examples.
* @return {Query} this
* @see $geometry http://docs.mongodb.org/manual/reference/operator/geometry/
* @see http://docs.mongodb.org/manual/release-notes/2.4/#new-geospatial-indexes-with-geojson-and-improved-spherical-geometry
* @see http://www.mongodb.org/display/DOCS/Geospatial+Indexing
* @api public
*/
/**
* Specifies a `$near` or `$nearSphere` condition
*
* These operators return documents sorted by distance.
*
* ####Example
*
* query.where('loc').near({ center: [10, 10] });
* query.where('loc').near({ center: [10, 10], maxDistance: 5 });
* query.where('loc').near({ center: [10, 10], maxDistance: 5, spherical: true });
* query.near('loc', { center: [10, 10], maxDistance: 5 });
*
* @method near
* @memberOf Query
* @param {String} [path]
* @param {Object} val
* @return {Query} this
* @see $near http://docs.mongodb.org/manual/reference/operator/near/
* @see $nearSphere http://docs.mongodb.org/manual/reference/operator/nearSphere/
* @see $maxDistance http://docs.mongodb.org/manual/reference/operator/maxDistance/
* @see http://www.mongodb.org/display/DOCS/Geospatial+Indexing
* @api public
*/
/*!
* Overwriting mquery is needed to support a couple different near() forms found in older
* versions of mongoose
* near([1,1])
* near(1,1)
* near(field, [1,2])
* near(field, 1, 2)
* In addition to all of the normal forms supported by mquery
*/
Query.prototype.near = function () {
var params = [];
var sphere = this._mongooseOptions.nearSphere;
// TODO refactor
if (arguments.length === 1) {
if (Array.isArray(arguments[0])) {
params.push({ center: arguments[0], spherical: sphere });
} else if ('string' == typeof arguments[0]) {
// just passing a path
params.push(arguments[0]);
} else if (utils.isObject(arguments[0])) {
if ('boolean' != typeof arguments[0].spherical) {
arguments[0].spherical = sphere;
}
params.push(arguments[0]);
} else {
throw new TypeError('invalid argument');
}
} else if (arguments.length === 2) {
if ('number' == typeof arguments[0] && 'number' == typeof arguments[1]) {
params.push({ center: [arguments[0], arguments[1]], spherical: sphere});
} else if ('string' == typeof arguments[0] && Array.isArray(arguments[1])) {
params.push(arguments[0]);
params.push({ center: arguments[1], spherical: sphere });
} else if ('string' == typeof arguments[0] && utils.isObject(arguments[1])) {
params.push(arguments[0]);
if ('boolean' != typeof arguments[1].spherical) {
arguments[1].spherical = sphere;
}
params.push(arguments[1]);
} else {
throw new TypeError('invalid argument');
}
} else if (arguments.length === 3) {
if ('string' == typeof arguments[0] && 'number' == typeof arguments[1]
&& 'number' == typeof arguments[2]) {
params.push(arguments[0]);
params.push({ center: [arguments[1], arguments[2]], spherical: sphere });
} else {
throw new TypeError('invalid argument');
}
} else {
throw new TypeError('invalid argument');
}
return Query.base.near.apply(this, params);
}
/**
* _DEPRECATED_ Specifies a `$nearSphere` condition
*
* ####Example
*
* query.where('loc').nearSphere({ center: [10, 10], maxDistance: 5 });
*
* **Deprecated.** Use `query.near()` instead with the `spherical` option set to `true`.
*
* ####Example
*
* query.where('loc').near({ center: [10, 10], spherical: true });
*
* @deprecated
* @see near() #query_Query-near
* @see $near http://docs.mongodb.org/manual/reference/operator/near/
* @see $nearSphere http://docs.mongodb.org/manual/reference/operator/nearSphere/
* @see $maxDistance http://docs.mongodb.org/manual/reference/operator/maxDistance/
*/
Query.prototype.nearSphere = function () {
this._mongooseOptions.nearSphere = true;
this.near.apply(this, arguments);
return this;
}
/**
* Specifies a $polygon condition
*
* ####Example
*
* query.where('loc').within().polygon([10,20], [13, 25], [7,15])
* query.polygon('loc', [10,20], [13, 25], [7,15])
*
* @method polygon
* @memberOf Query
* @param {String|Array} [path]
* @param {Array|Object} [coordinatePairs...]
* @return {Query} this
* @see $polygon http://docs.mongodb.org/manual/reference/operator/polygon/
* @see http://www.mongodb.org/display/DOCS/Geospatial+Indexing
* @api public
*/
/**
* Specifies a $box condition
*
* ####Example
*
* var lowerLeft = [40.73083, -73.99756]
* var upperRight= [40.741404, -73.988135]
*
* query.where('loc').within().box(lowerLeft, upperRight)
* query.box({ ll : lowerLeft, ur : upperRight })
*
* @method box
* @memberOf Query
* @see $box http://docs.mongodb.org/manual/reference/operator/box/
* @see within() Query#within #query_Query-within
* @see http://www.mongodb.org/display/DOCS/Geospatial+Indexing
* @param {Object} val
* @param [Array] Upper Right Coords
* @return {Query} this
* @api public
*/
/*!
* this is needed to support the mongoose syntax of:
* box(field, { ll : [x,y], ur : [x2,y2] })
* box({ ll : [x,y], ur : [x2,y2] })
*/
Query.prototype.box = function (ll, ur) {
if (!Array.isArray(ll) && utils.isObject(ll)) {
ur = ll.ur;
ll = ll.ll;
}
return Query.base.box.call(this, ll, ur);
}
/**
* Specifies a $center or $centerSphere condition.
*
* ####Example
*
* var area = { center: [50, 50], radius: 10, unique: true }
* query.where('loc').within().circle(area)
* // alternatively
* query.circle('loc', area);
*
* // spherical calculations
* var area = { center: [50, 50], radius: 10, unique: true, spherical: true }
* query.where('loc').within().circle(area)
* // alternatively
* query.circle('loc', area);
*
* New in 3.7.0
*
* @method circle
* @memberOf Query
* @param {String} [path]
* @param {Object} area
* @return {Query} this
* @see $center http://docs.mongodb.org/manual/reference/operator/center/
* @see $centerSphere http://docs.mongodb.org/manual/reference/operator/centerSphere/
* @see $geoWithin http://docs.mongodb.org/manual/reference/operator/within/
* @see http://www.mongodb.org/display/DOCS/Geospatial+Indexing
* @api public
*/
/**
* _DEPRECATED_ Alias for [circle](#query_Query-circle)
*
* **Deprecated.** Use [circle](#query_Query-circle) instead.
*
* @deprecated
* @method center
* @memberOf Query
* @api public
*/
Query.prototype.center = Query.base.circle;
/**
* _DEPRECATED_ Specifies a $centerSphere condition
*
* **Deprecated.** Use [circle](#query_Query-circle) instead.
*
* ####Example
*
* var area = { center: [50, 50], radius: 10 };
* query.where('loc').within().centerSphere(area);
*
* @deprecated
* @param {String} [path]
* @param {Object} val
* @return {Query} this
* @see http://www.mongodb.org/display/DOCS/Geospatial+Indexing
* @see $centerSphere http://docs.mongodb.org/manual/reference/operator/centerSphere/
* @api public
*/
Query.prototype.centerSphere = function () {
if (arguments[0] && arguments[0].constructor.name == 'Object') {
arguments[0].spherical = true;
}
if (arguments[1] && arguments[1].constructor.name == 'Object') {
arguments[1].spherical = true;
}
Query.base.circle.apply(this, arguments);
}
/*!
* Export
*/
module.exports = Query;
| 1 | 12,037 | we'll need this in `_findAndModify` too | Automattic-mongoose | js |
@@ -148,7 +148,7 @@ public class SmartStoreInspectorActivity extends Activity implements AdapterView
isGlobal = bundle == null || !bundle.containsKey(IS_GLOBAL_STORE) || bundle.getBoolean(IS_GLOBAL_STORE) || !hasUser;
// dbName is set to DBOpenHelper.DEFAULT_DB_NAME
// if no bundle, or no value for dbName in bundle
- dbName = bundle == null || !bundle.containsKey(DB_NAME) ? DBOpenHelper.DEFAULT_DB_NAME : bundle.getString(DB_NAME);
+ dbName = bundle == null || !bundle.containsKey(DB_NAME) ? DBOpenHelper.DEFAULT_DB_NAME : bundle.getString(DB_NAME, DBOpenHelper.DEFAULT_DB_NAME);
}
private void setupSpinner() { | 1 | /*
* Copyright (c) 2014-present, salesforce.com, inc.
* All rights reserved.
* Redistribution and use of this software in source and binary forms, with or
* without modification, are permitted provided that the following conditions
* are met:
* - Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
* - Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* - Neither the name of salesforce.com, inc. nor the names of its contributors
* may be used to endorse or promote products derived from this software without
* specific prior written permission of salesforce.com, inc.
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
package com.salesforce.androidsdk.smartstore.ui;
import android.app.Activity;
import android.app.AlertDialog;
import android.content.Intent;
import android.os.Bundle;
import android.text.SpannableString;
import android.text.Spanned;
import android.text.TextUtils;
import android.util.Pair;
import android.view.View;
import android.view.animation.Animation;
import android.view.animation.AnimationUtils;
import android.view.animation.GridLayoutAnimationController;
import android.widget.AdapterView;
import android.widget.ArrayAdapter;
import android.widget.EditText;
import android.widget.GridView;
import android.widget.MultiAutoCompleteTextView;
import android.widget.MultiAutoCompleteTextView.Tokenizer;
import android.widget.Spinner;
import com.salesforce.androidsdk.accounts.UserAccount;
import com.salesforce.androidsdk.smartstore.R;
import com.salesforce.androidsdk.smartstore.app.SmartStoreSDKManager;
import com.salesforce.androidsdk.smartstore.store.DBOpenHelper;
import com.salesforce.androidsdk.smartstore.store.QuerySpec;
import com.salesforce.androidsdk.smartstore.store.SmartSqlHelper;
import com.salesforce.androidsdk.smartstore.store.SmartStore;
import com.salesforce.androidsdk.smartstore.util.SmartStoreLogger;
import com.salesforce.androidsdk.util.JSONObjectHelper;
import org.json.JSONArray;
import org.json.JSONException;
import org.json.JSONObject;
import java.util.ArrayList;
import java.util.LinkedList;
import java.util.List;
import java.util.Locale;
public class SmartStoreInspectorActivity extends Activity implements AdapterView.OnItemSelectedListener {
// Keys for extras bundle
private static final String IS_GLOBAL_STORE = "isGlobalStore";
private static final String DB_NAME = "dbName";
private static final String TAG = "SmartStoreInspectorActivity";
// Default page size / index
private static final int DEFAULT_PAGE_SIZE = 100;
private static final int DEFAULT_PAGE_INDEX = 0;
public static final String USER_STORE = " (user store)";
public static final String GLOBAL_STORE = " (global store)";
public static final String DEFAULT_STORE = "default";
// Store
private String dbName;
private boolean isGlobal;
private SmartStore smartStore;
private List<String> allStores;
// View elements
private Spinner spinner;
private MultiAutoCompleteTextView queryText;
private EditText pageSizeText;
private EditText pageIndexText;
private GridView resultGrid;
// Test support
private String lastAlertTitle;
private String lastAlertMessage;
private JSONArray lastResults;
// Default queries
private String SOUPS_QUERY = String.format(Locale.US, "select %s from %s", SmartStore.SOUP_NAME_COL, SmartStore.SOUP_ATTRS_TABLE);
private String INDICES_QUERY = String.format(Locale.US, "select %s, %s, %s from %s", SmartStore.SOUP_NAME_COL, SmartStore.PATH_COL, SmartStore.COLUMN_TYPE_COL, SmartStore.SOUP_INDEX_MAP_TABLE);
/**
* Create intent to bring up inspector
* @param parentActivity
* @param isGlobal pass true to get an inspector for the default global smartstore
* pass false to get an inspector for the default user smartstore
* @param dbName
* @return
*/
public static Intent getIntent(Activity parentActivity, boolean isGlobal, String dbName) {
final Bundle bundle = new Bundle();
bundle.putBoolean(IS_GLOBAL_STORE, isGlobal);
bundle.putString(DB_NAME, dbName);
final Intent intent = new Intent(parentActivity, SmartStoreInspectorActivity.class);
intent.putExtras(bundle);
return intent;
}
@Override
protected void onCreate(Bundle savedInstanceState) {
super.onCreate(savedInstanceState);
readExtras();
setContentView(R.layout.sf__inspector);
getActionBar().setTitle(R.string.sf__inspector_title);
spinner = findViewById(R.id.sf__inspector_stores_spinner);
queryText = findViewById(R.id.sf__inspector_query_text);
pageSizeText = findViewById(R.id.sf__inspector_pagesize_text);
pageIndexText = findViewById(R.id.sf__inspector_pageindex_text);
resultGrid = findViewById(R.id.sf__inspector_result_grid);
setupSpinner();
}
@Override
protected void onResume() {
super.onResume();
setupStore(isGlobal, dbName);
}
private void readExtras() {
Bundle bundle = getIntent().getExtras();
boolean hasUser = SmartStoreSDKManager.getInstance().getUserAccountManager().getCachedCurrentUser() != null;
// isGlobal is set to true
// if no bundle, or no value for isGlobalStore in bundle, or true specified for isGlobalStore in bundle, or there is no current user
isGlobal = bundle == null || !bundle.containsKey(IS_GLOBAL_STORE) || bundle.getBoolean(IS_GLOBAL_STORE) || !hasUser;
// dbName is set to DBOpenHelper.DEFAULT_DB_NAME
// if no bundle, or no value for dbName in bundle
dbName = bundle == null || !bundle.containsKey(DB_NAME) ? DBOpenHelper.DEFAULT_DB_NAME : bundle.getString(DB_NAME);
}
private void setupSpinner() {
SmartStoreSDKManager mgr = SmartStoreSDKManager.getInstance();
allStores = new ArrayList<>();
for (String dbName : mgr.getUserStoresPrefixList()) allStores.add(getDisplayNameForStore(false, dbName));
for (String dbName : mgr.getGlobalStoresPrefixList()) allStores.add(getDisplayNameForStore(true, dbName));
int selectedStoreIndex = allStores.indexOf(getDisplayNameForStore(this.isGlobal, this.dbName));
spinner.setAdapter(new ArrayAdapter<>(this, android.R.layout.simple_spinner_item, allStores));
spinner.setSelection(selectedStoreIndex);
spinner.setOnItemSelectedListener(this);
}
private String getDisplayNameForStore(boolean isGlobal, String dbName) {
return (dbName.equals(DBOpenHelper.DEFAULT_DB_NAME) ? DEFAULT_STORE : dbName) + (isGlobal ? GLOBAL_STORE : USER_STORE);
}
private Pair<Boolean, String> getStoreFromDisplayName(String storeDisplayName) {
boolean isGlobal;
String dbName;
if (storeDisplayName.endsWith(GLOBAL_STORE)) {
isGlobal = true;
dbName = storeDisplayName.substring(0, storeDisplayName.length() - GLOBAL_STORE.length());
}
else {
isGlobal = false;
dbName = storeDisplayName.substring(0, storeDisplayName.length() - USER_STORE.length());
}
dbName = dbName.equals(DEFAULT_STORE) ? DBOpenHelper.DEFAULT_DB_NAME : dbName;
return new Pair<>(isGlobal, dbName);
}
private void setupStore(boolean isGlobal, String dbName) {
SmartStoreSDKManager mgr = SmartStoreSDKManager.getInstance();
UserAccount currentUser = mgr.getUserAccountManager().getCachedCurrentUser();
if (this.isGlobal != isGlobal || !this.dbName.equals(dbName) || smartStore == null) {
this.isGlobal = isGlobal;
this.dbName = dbName;
smartStore = isGlobal ? mgr.getGlobalSmartStore(dbName) : mgr.getSmartStore(dbName, currentUser, null);
setupAutocomplete(queryText);
}
}
/**
* Called when item selected in stores drop down
* @param adapterView
* @param view
* @param i
* @param l
*/
@Override
public void onItemSelected(AdapterView<?> adapterView, View view, int i, long l) {
Pair<Boolean, String> selectedStore = getStoreFromDisplayName(allStores.get(i));
setupStore(selectedStore.first, selectedStore.second);
}
/**
* Called when no item is selected in stores drop down
* @param adapterView
*/
@Override
public void onNothingSelected(AdapterView<?> adapterView) {
}
/**
* Called when "Clear" button is clicked
*
* @param v
*/
public void onClearClick(View v) {
reset();
}
/**
* Reset activity to its original state
*/
public void reset() {
queryText.setText("");
pageSizeText.setText("");
pageIndexText.setText("");
resultGrid.setAdapter(null);
lastAlertTitle = null;
lastAlertMessage = null;
lastResults = null;
}
/**
* @return title of last alert shown (used by tests)
*/
public String getLastAlertTitle() {
return lastAlertTitle;
}
/**
* @return message of last alert shown (used by tests)
*/
public String getLastAlertMessage() {
return lastAlertMessage;
}
/**
* @return last results shown (used by tests)
*/
public JSONArray getLastResults() {
return lastResults;
}
/**
* Called when "Run" button is clicked
*
* @param v
*/
public void onRunClick(View v) {
runQuery();
}
/**
* Called when "Soups" button is clicked
*
* @param v
*/
public void onSoupsClick(View v) {
List<String> names = smartStore.getAllSoupNames();
if (names.size() == 0) {
showAlert(null, getString(R.string.sf__inspector_no_soups_found));
return;
}
if (names.size() > 100) {
queryText.setText(SOUPS_QUERY);
} else {
StringBuilder sb = new StringBuilder();
boolean first = true;
for (String name : names) {
if (!first)
sb.append(" union ");
sb.append("select '");
sb.append(name);
sb.append("', count(*) from {");
sb.append(name);
sb.append("}");
first = false;
}
queryText.setText(sb.toString());
}
runQuery();
}
/**
* Called when "Indices" button is clicked
*
* @param v
*/
public void onIndicesClick(View v) {
queryText.setText(INDICES_QUERY);
runQuery();
}
/**
* Helper method that builds query spec from typed query, runs it and
* updates result grid
*/
private void runQuery() {
try {
String query = queryText.getText().toString();
if (query.length() == 0) {
showAlert(null, getString(R.string.sf__inspector_no_query_specified));
return;
}
int pageSize = getInt(pageSizeText, DEFAULT_PAGE_SIZE);
int pageIndex = getInt(pageIndexText, DEFAULT_PAGE_INDEX);
QuerySpec querySpec = QuerySpec
.buildSmartQuerySpec(query, pageSize);
showResult(smartStore.query(querySpec, pageIndex));
} catch (Exception e) {
showAlert(e.getClass().getSimpleName(), e.getMessage());
}
}
/**
* Helper function to get integer typed in a text field Returns defaultValue
* if no integer were typed
*
* @param textField
* @param defaultValue
* @return
*/
private int getInt(EditText textField, int defaultValue) {
String s = textField.getText().toString();
if (s.length() == 0) {
return defaultValue;
} else {
return Integer.parseInt(s);
}
}
private void showAlert(String title, String message) {
lastAlertTitle = title;
lastAlertMessage = message;
new AlertDialog.Builder(this).setTitle(title)
.setMessage(message).show();
}
/**
* Helper method to populate result grid with query result set (expected to
* be a JSONArray of JSONArray's)
*
* @param result
* @throws JSONException
*/
private void showResult(JSONArray result) throws JSONException {
lastResults = result;
ArrayAdapter<String> adapter = new ArrayAdapter<String>(this,
R.layout.sf__inspector_result_cell);
if (result.length() == 0) {
showAlert(null, getString(R.string.sf__inspector_no_rows_returned));
}
for (int j = 0; j < result.length(); j++) {
JSONArray row = result.getJSONArray(j);
for (int i = 0; i < row.length(); i++) {
Object val = JSONObjectHelper.opt(row, i);
adapter.add(val instanceof JSONObject
? ((JSONObject) val).toString(2)
: (val == null ? "null" : val.toString()));
}
}
int numColumns = (result.length() > 0 ? result.getJSONArray(0).length()
: 0);
resultGrid.setNumColumns(numColumns);
resultGrid.setAdapter(adapter);
animateGridView(resultGrid);
}
/**
* Helper method to attach animation to grid view
*
* @param gridView
*/
private void animateGridView(GridView gridView) {
Animation animation = AnimationUtils.loadAnimation(this,
android.R.anim.fade_in);
GridLayoutAnimationController animationController = new GridLayoutAnimationController(
animation, 0f, 0.1f);
gridView.setLayoutAnimation(animationController);
animationController.start();
}
/**
* Helper method to setup auto-complete for query input field
*
* @param textView
*/
private void setupAutocomplete(MultiAutoCompleteTextView textView) {
ArrayAdapter<String> adapter = new ArrayAdapter<String>(this,
android.R.layout.simple_dropdown_item_1line);
// Adding {soupName} and {soupName:specialField}
List<String> names = new LinkedList<String>();
names.addAll(smartStore.getAllSoupNames());
for (String name : names) {
adapter.add("{" + name + "}");
adapter.add("{" + name + ":" + SmartSqlHelper.SOUP + "}");
adapter.add("{" + name + ":" + SmartStore.SOUP_ENTRY_ID + "}");
adapter.add("{" + name + ":" + SmartStore.SOUP_LAST_MODIFIED_DATE
+ "}");
}
// Adding {soupName:indexedPath}
try {
JSONArray result = smartStore.query(QuerySpec.buildSmartQuerySpec(
"SELECT soupName, path FROM soup_index_map", 1000), 0);
for (int j = 0; j < result.length(); j++) {
JSONArray row = result.getJSONArray(j);
adapter.add("{" + row.getString(0) + ":" + row.getString(1)
+ "}");
}
} catch (JSONException e) {
SmartStoreLogger.e(TAG, "Error occurred while parsing JSON", e);
}
// Adding some SQL keywords
adapter.add("select");
adapter.add("from");
adapter.add("where");
adapter.add("order by");
adapter.add("asc");
adapter.add("desc");
adapter.add("group by");
textView.setAdapter(adapter);
textView.setTokenizer(new QueryTokenizer());
}
}
/**
* Tokenized used by query auto-complete field
*
* @author wmathurin
*
*/
class QueryTokenizer implements Tokenizer {
public int findTokenStart(CharSequence text, int cursor) {
int i = cursor;
while (i > 0 && text.charAt(i - 1) != ' ') {
i--;
}
return i;
}
public int findTokenEnd(CharSequence text, int cursor) {
int i = cursor;
int len = text.length();
while (i < len) {
if (text.charAt(i) == ' ') {
return i;
} else {
i++;
}
}
return len;
}
public CharSequence terminateToken(CharSequence text) {
int i = text.length();
while (i > 0 && text.charAt(i - 1) == ' ') {
i--;
}
if (i > 0 && text.charAt(i - 1) == ' ') {
return text;
} else {
if (text instanceof Spanned) {
SpannableString sp = new SpannableString(text + " ");
TextUtils.copySpansFrom((Spanned) text, 0, text.length(),
Object.class, sp, 0);
return sp;
} else {
return text;
}
}
}
} | 1 | 17,647 | This is the fix for the crash. The function to create the intent to launch this activity requires `dbName`, so it has to be set to `null`. In such cases, the value for `dbName` will be set to `null` and cause issues throughout this activity. This adds a default value if the explicitly assigned value in `null`. | forcedotcom-SalesforceMobileSDK-Android | java |
@@ -412,6 +412,11 @@ class WordDocument(UIADocumentWithTableNavigation,WordDocumentNode,WordDocumentB
# Microsoft Word duplicates the full title of the document on this control, which is redundant as it appears in the title of the app itself.
name=u""
+ def event_textChange(self):
+ # Ensure Braille is updated when text changes,
+ # As Microsoft Word does not fire caret events when typing text, even though the caret does move.
+ braille.handler.handleCaretMove(self)
+
def event_UIA_notification(self, activityId=None, **kwargs):
# #10851: in recent Word 365 releases, UIA notification will cause NVDA to announce edit functions
# such as "delete back word" when Control+Backspace is pressed. | 1 | # This file is covered by the GNU General Public License.
# A part of NonVisual Desktop Access (NVDA)
# See the file COPYING for more details.
# Copyright (C) 2016-2021 NV Access Limited, Joseph Lee, Jakub Lukowicz
from comtypes import COMError
from collections import defaultdict
from scriptHandler import isScriptWaiting
import textInfos
import eventHandler
import UIAHandler
from logHandler import log
import controlTypes
import ui
import speech
import review
import braille
import api
import browseMode
from UIABrowseMode import UIABrowseModeDocument, UIADocumentWithTableNavigation, UIATextAttributeQuicknavIterator, TextAttribUIATextInfoQuickNavItem
from UIAUtils import *
from . import UIA, UIATextInfo
from NVDAObjects.window.winword import (
WordDocument as WordDocumentBase,
WordDocumentTextInfo as LegacyWordDocumentTextInfo
)
from scriptHandler import script
"""Support for Microsoft Word via UI Automation."""
#: the non-printable unicode character that represents the end of cell or end of row mark in Microsoft Word
END_OF_ROW_MARK = '\x07'
class ElementsListDialog(browseMode.ElementsListDialog):
ELEMENT_TYPES=(browseMode.ElementsListDialog.ELEMENT_TYPES[0],browseMode.ElementsListDialog.ELEMENT_TYPES[1],
# Translators: The label of a radio button to select the type of element
# in the browse mode Elements List dialog.
("annotation", _("&Annotations")),
# Translators: The label of a radio button to select the type of element
# in the browse mode Elements List dialog.
("error", _("&Errors")),
)
class RevisionUIATextInfoQuickNavItem(TextAttribUIATextInfoQuickNavItem):
attribID=UIAHandler.UIA_AnnotationTypesAttributeId
wantedAttribValues={UIAHandler.AnnotationType_InsertionChange,UIAHandler.AnnotationType_DeletionChange,UIAHandler.AnnotationType_TrackChanges}
@property
def label(self):
text=self.textInfo.text
if UIAHandler.AnnotationType_InsertionChange in self.attribValues:
# Translators: The label shown for an insertion change
return _(u"insertion: {text}").format(text=text)
elif UIAHandler.AnnotationType_DeletionChange in self.attribValues:
# Translators: The label shown for a deletion change
return _(u"deletion: {text}").format(text=text)
else:
# Translators: The general label shown for track changes
return _(u"track change: {text}").format(text=text)
def getCommentInfoFromPosition(position):
"""
Fetches information about the comment located at the given position in a word document.
@param position: a TextInfo representing the span of the comment in the word document.
@type L{TextInfo}
@return: A dictionary containing keys of comment, author and date
@rtype: dict
"""
val=position._rangeObj.getAttributeValue(UIAHandler.UIA_AnnotationObjectsAttributeId)
if not val:
return
try:
UIAElementArray=val.QueryInterface(UIAHandler.IUIAutomationElementArray)
except COMError:
return
for index in range(UIAElementArray.length):
UIAElement=UIAElementArray.getElement(index)
UIAElement=UIAElement.buildUpdatedCache(UIAHandler.handler.baseCacheRequest)
typeID = UIAElement.GetCurrentPropertyValue(UIAHandler.UIA_AnnotationAnnotationTypeIdPropertyId)
# Use Annotation Type Comment if available
if typeID == UIAHandler.AnnotationType_Comment:
comment = UIAElement.GetCurrentPropertyValue(UIAHandler.UIA_NamePropertyId)
author = UIAElement.GetCurrentPropertyValue(UIAHandler.UIA_AnnotationAuthorPropertyId)
date = UIAElement.GetCurrentPropertyValue(UIAHandler.UIA_AnnotationDateTimePropertyId)
return dict(comment=comment, author=author, date=date)
else:
obj = UIA(UIAElement=UIAElement)
if (
not obj.parent
# Because the name of this object is language sensetive check if it has UIA Annotation Pattern
or not obj.parent.UIAElement.getCurrentPropertyValue(
UIAHandler.UIA_IsAnnotationPatternAvailablePropertyId
)
):
continue
comment = obj.makeTextInfo(textInfos.POSITION_ALL).text
tempObj = obj.previous.previous
authorObj = tempObj or obj.previous
author = authorObj.name
if not tempObj:
return dict(comment=comment, author=author)
dateObj = obj.previous
date = dateObj.name
return dict(comment=comment, author=author, date=date)
def getPresentableCommentInfoFromPosition(commentInfo):
if "date" not in commentInfo:
# Translators: The message reported for a comment in Microsoft Word
return _("Comment: {comment} by {author}").format(**commentInfo)
# Translators: The message reported for a comment in Microsoft Word
return _("Comment: {comment} by {author} on {date}").format(**commentInfo)
class CommentUIATextInfoQuickNavItem(TextAttribUIATextInfoQuickNavItem):
attribID=UIAHandler.UIA_AnnotationTypesAttributeId
wantedAttribValues={UIAHandler.AnnotationType_Comment,}
@property
def label(self):
commentInfo=getCommentInfoFromPosition(self.textInfo)
return getPresentableCommentInfoFromPosition(commentInfo)
class WordDocumentTextInfo(UIATextInfo):
def _ensureRangeVisibility(self):
try:
inView = self.pointAtStart in self.obj.location
except LookupError:
inView = False
if not inView:
self._rangeObj.ScrollIntoView(True)
def updateSelection(self):
# #9611: The document must be scrolled so that the range is visible on screen
# Otherwise trying to set the selection to the range
# may cause the selection to remain on the wrong page.
self._ensureRangeVisibility()
super().updateSelection()
def updateCaret(self):
# #9611: The document must be scrolled so that the range is visible on screen
# Otherwise trying to set the caret to the range
# may cause the caret to remain on the wrong page.
self._ensureRangeVisibility()
super().updateCaret()
def _get_locationText(self):
point = self.pointAtStart
# UIA has no good way yet to convert coordinates into user-configured distances such as inches or centimetres.
# Nor can it give us specific distances from the edge of a page.
# Therefore for now, get the screen coordinates, and if the word object model is available, use our legacy code to get the location text.
om=self.obj.WinwordWindowObject
if not om:
return super(WordDocumentTextInfo,self).locationText
try:
r=om.rangeFromPoint(point.x,point.y)
except (COMError,NameError):
log.debugWarning("MS Word object model does not support rangeFromPoint")
return super(WordDocumentTextInfo,self).locationText
from NVDAObjects.window.winword import WordDocumentTextInfo as WordObjectModelTextInfo
i=WordObjectModelTextInfo(self.obj,None,_rangeObj=r)
return i.locationText
def _getTextWithFields_text(self,textRange,formatConfig,UIAFormatUnits=None):
if UIAFormatUnits is None and self.UIAFormatUnits:
# Word documents must always split by a unit the first time, as an entire text chunk can give valid annotation types
UIAFormatUnits=self.UIAFormatUnits
return super(WordDocumentTextInfo,self)._getTextWithFields_text(textRange,formatConfig,UIAFormatUnits=UIAFormatUnits)
def _get_controlFieldNVDAObjectClass(self):
return WordDocumentNode
def _getControlFieldForUIAObject(self, obj, isEmbedded=False, startOfNode=False, endOfNode=False):
# Ignore strange editable text fields surrounding most inner fields (links, table cells etc)
automationID=obj.UIAElement.cachedAutomationID
field = super(WordDocumentTextInfo, self)._getControlFieldForUIAObject(
obj,
isEmbedded=isEmbedded,
startOfNode=startOfNode,
endOfNode=endOfNode
)
if automationID.startswith('UIA_AutomationId_Word_Page_'):
field['page-number']=automationID.rsplit('_',1)[-1]
elif obj.UIAElement.cachedControlType==UIAHandler.UIA_GroupControlTypeId and obj.name:
field['role']=controlTypes.Role.EMBEDDEDOBJECT
field['alwaysReportName']=True
elif obj.UIAElement.cachedControlType==UIAHandler.UIA_CustomControlTypeId and obj.name:
# Include foot note and endnote identifiers
field['content']=obj.name
field['role']=controlTypes.Role.LINK
if obj.role==controlTypes.Role.LIST or obj.role==controlTypes.Role.EDITABLETEXT:
field['states'].add(controlTypes.State.READONLY)
if obj.role==controlTypes.Role.LIST:
# To stay compatible with the older MS Word implementation, don't expose lists in word documents as actual lists. This suppresses announcement of entering and exiting them.
# Note that bullets and numbering are still announced of course.
# Eventually we'll want to stop suppressing this, but for now this is more confusing than good (as in many cases announcing of new bullets when pressing enter causes exit and then enter to be spoken).
field['role']=controlTypes.Role.EDITABLETEXT
if obj.role==controlTypes.Role.GRAPHIC:
# Label graphics with a description before name as name seems to be auto-generated (E.g. "rectangle")
field['content'] = (
field.pop('description', None)
or obj.description
or field.pop('name', None)
or obj.name
)
# #11430: Read-only tables, such as in the Outlook message viewer
# should be treated as layout tables,
# if they have either 1 column or 1 row.
if (
obj.appModule.appName == 'outlook'
and obj.role == controlTypes.Role.TABLE
and controlTypes.State.READONLY in obj.states
and (
obj.rowCount <= 1
or obj.columnCount <= 1
)
):
field['table-layout'] = True
return field
def _getTextFromUIARange(self, textRange):
t=super(WordDocumentTextInfo,self)._getTextFromUIARange(textRange)
if t:
# HTML emails expose a lot of vertical tab chars in their text
# Really better as carage returns
t=t.replace('\v','\r')
# Remove end-of-row markers from the text - they are not useful
t = t.replace(END_OF_ROW_MARK, '')
return t
def _isEndOfRow(self):
""" Is this textInfo positioned on an end-of-row mark? """
info=self.copy()
info.expand(textInfos.UNIT_CHARACTER)
return info._rangeObj.getText(-1)==u'\u0007'
def move(self,unit,direction,endPoint=None):
if endPoint is None:
res=super(WordDocumentTextInfo,self).move(unit,direction)
if res==0:
return 0
# Skip over end of Row marks
while self._isEndOfRow():
if self.move(unit,1 if direction>0 else -1)==0:
break
return res
return super(WordDocumentTextInfo,self).move(unit,direction,endPoint)
def expand(self,unit):
super(WordDocumentTextInfo,self).expand(unit)
# #7970: MS Word refuses to expand to line when on the final line and it is blank.
# This among other things causes a newly inserted bullet not to be spoken or brailled.
# Therefore work around this by detecting if the expand to line failed, and moving the end of the range to the end of the document manually.
if self.isCollapsed:
if self.move(unit,1,endPoint="end")==0:
docInfo=self.obj.makeTextInfo(textInfos.POSITION_ALL)
self.setEndPoint(docInfo,"endToEnd")
def getTextWithFields(self,formatConfig=None):
fields = None
# #11043: when a non-collapsed text range is positioned within a blank table cell
# MS Word does not return the table cell as an enclosing element,
# Thus NVDa thinks the range is not inside the cell.
# This can be detected by asking for the first 2 characters of the range's text,
# Which will either be an empty string, or the single end-of-row mark.
# Anything else means it is not on an empty table cell,
# or the range really does span more than the cell itself.
# If this situation is detected,
# copy and collapse the range, and fetch the content from that instead,
# As a collapsed range on an empty cell does correctly return the table cell as its first enclosing element.
if not self.isCollapsed:
rawText = self._rangeObj.GetText(2)
if not rawText or rawText == END_OF_ROW_MARK:
r = self.copy()
r.end = r.start
fields = super(WordDocumentTextInfo, r).getTextWithFields(formatConfig=formatConfig)
if fields is None:
fields = super().getTextWithFields(formatConfig=formatConfig)
if len(fields)==0:
# Nothing to do... was probably a collapsed range.
return fields
# Sometimes embedded objects and graphics In MS Word can cause a controlStart then a controlEnd with no actual formatChange / text in the middle.
# SpeakTextInfo always expects that the first lot of controlStarts will always contain some text.
# Therefore ensure that the first lot of controlStarts does contain some text by inserting a blank formatChange and empty string in this case.
for index in range(len(fields)):
field=fields[index]
if isinstance(field,textInfos.FieldCommand) and field.command=="controlStart":
continue
elif isinstance(field,textInfos.FieldCommand) and field.command=="controlEnd":
formatChange=textInfos.FieldCommand("formatChange",textInfos.FormatField())
fields.insert(index,formatChange)
fields.insert(index+1,"")
break
##7971: Microsoft Word exposes list bullets as part of the actual text.
# This then confuses NVDA's braille cursor routing as it expects that there is a one-to-one mapping between characters in the text string and unit character moves.
# Therefore, detect when at the start of a list, and strip the bullet from the text string, placing it in the text's formatField as line-prefix.
listItemStarted=False
lastFormatField=None
for index in range(len(fields)):
field=fields[index]
if isinstance(field,textInfos.FieldCommand) and field.command=="controlStart":
if field.field.get('role')==controlTypes.Role.LISTITEM and field.field.get('_startOfNode'):
# We are in the start of a list item.
listItemStarted=True
elif isinstance(field,textInfos.FieldCommand) and field.command=="formatChange":
# This is the most recent formatField we have seen.
lastFormatField=field.field
elif listItemStarted and isinstance(field,str):
# This is the first text string within the list.
# Remove the text up to the first space, and store it as line-prefix which NVDA will appropriately speak/braille as a bullet.
try:
spaceIndex=field.index(' ')
except ValueError:
log.debugWarning("No space found in this text string")
break
prefix=field[0:spaceIndex]
fields[index]=field[spaceIndex+1:]
lastFormatField['line-prefix']=prefix
# Let speech know that line-prefix is safe to be spoken always, as it will only be exposed on the very first formatField on the list item.
lastFormatField['line-prefix_speakAlways']=True
break
else:
# Not a controlStart, formatChange or text string. Nothing to do.
break
# Fill in page number attributes where NVDA expects
try:
page=fields[0].field['page-number']
except KeyError:
page=None
if page is not None:
for field in fields:
if isinstance(field,textInfos.FieldCommand) and isinstance(field.field,textInfos.FormatField):
field.field['page-number']=page
# MS Word can sometimes return a higher ancestor in its textRange's children.
# E.g. a table inside a table header.
# This does not cause a loop, but does cause information to be doubled
# Detect these duplicates and remove them from the generated fields.
seenStarts=set()
pendingRemoves=[]
index=0
for index,field in enumerate(fields):
if isinstance(field,textInfos.FieldCommand) and field.command=="controlStart":
runtimeID=field.field['runtimeID']
if not runtimeID:
continue
if runtimeID in seenStarts:
pendingRemoves.append(field.field)
else:
seenStarts.add(runtimeID)
elif seenStarts:
seenStarts.clear()
index=0
while index<len(fields):
field=fields[index]
if isinstance(field,textInfos.FieldCommand) and any(x is field.field for x in pendingRemoves):
del fields[index]
else:
index+=1
return fields
class WordBrowseModeDocument(UIABrowseModeDocument):
def shouldSetFocusToObj(self,obj):
# Ignore strange editable text fields surrounding most inner fields (links, table cells etc)
if obj.role==controlTypes.Role.EDITABLETEXT and obj.UIAElement.cachedAutomationID.startswith('UIA_AutomationId_Word_Content'):
return False
return super(WordBrowseModeDocument,self).shouldSetFocusToObj(obj)
def shouldPassThrough(self,obj,reason=None):
# Ignore strange editable text fields surrounding most inner fields (links, table cells etc)
if obj.role==controlTypes.Role.EDITABLETEXT and obj.UIAElement.cachedAutomationID.startswith('UIA_AutomationId_Word_Content'):
return False
return super(WordBrowseModeDocument,self).shouldPassThrough(obj,reason=reason)
def script_tab(self,gesture):
oldBookmark=self.rootNVDAObject.makeTextInfo(textInfos.POSITION_SELECTION).bookmark
gesture.send()
noTimeout,newInfo=self.rootNVDAObject._hasCaretMoved(oldBookmark,timeout=1)
if not newInfo:
return
info=self.makeTextInfo(textInfos.POSITION_SELECTION)
if not info.isCollapsed:
speech.speakTextInfo(info, reason=controlTypes.OutputReason.FOCUS)
script_shiftTab=script_tab
def _iterNodesByType(self,nodeType,direction="next",pos=None):
if nodeType=="annotation":
comments=UIATextAttributeQuicknavIterator(CommentUIATextInfoQuickNavItem,nodeType,self,pos,direction=direction)
revisions=UIATextAttributeQuicknavIterator(RevisionUIATextInfoQuickNavItem,nodeType,self,pos,direction=direction)
return browseMode.mergeQuickNavItemIterators([comments,revisions],direction)
return super(WordBrowseModeDocument,self)._iterNodesByType(nodeType,direction=direction,pos=pos)
ElementsListDialog=ElementsListDialog
class WordDocumentNode(UIA):
TextInfo=WordDocumentTextInfo
def _get_role(self):
role=super(WordDocumentNode,self).role
# Footnote / endnote elements currently have a role of unknown. Force them to editableText so that theyr text is presented correctly
if role==controlTypes.Role.UNKNOWN:
role=controlTypes.Role.EDITABLETEXT
return role
class WordDocument(UIADocumentWithTableNavigation,WordDocumentNode,WordDocumentBase):
treeInterceptorClass=WordBrowseModeDocument
shouldCreateTreeInterceptor=False
announceEntireNewLine=True
# Microsoft Word duplicates the full title of the document on this control, which is redundant as it appears in the title of the app itself.
name=u""
def event_UIA_notification(self, activityId=None, **kwargs):
# #10851: in recent Word 365 releases, UIA notification will cause NVDA to announce edit functions
# such as "delete back word" when Control+Backspace is pressed.
if activityId == "AccSN2": # Delete activity ID
return
super(WordDocument, self).event_UIA_notification(**kwargs)
# The following overide of the EditableText._caretMoveBySentenceHelper private method
# Falls back to the MS Word object model if available.
# This override should be removed as soon as UI Automation in MS Word has the ability to move by sentence.
def _caretMoveBySentenceHelper(self, gesture, direction):
if isScriptWaiting():
return
if not self.WinwordSelectionObject:
# Legacy object model not available.
# Translators: a message when navigating by sentence is unavailable in MS Word
ui.message(_("Navigating by sentence not supported in this document"))
gesture.send()
return
# Using the legacy object model,
# Move the caret to the next sentence in the requested direction.
legacyInfo = LegacyWordDocumentTextInfo(self, textInfos.POSITION_CARET)
legacyInfo.move(textInfos.UNIT_SENTENCE, direction)
# Save the start of the sentence for future use
legacyStart = legacyInfo.copy()
# With the legacy object model,
# Move the caret to the end of the new sentence.
legacyInfo.move(textInfos.UNIT_SENTENCE, 1)
legacyInfo.updateCaret()
# Fetch the caret position (end of the next sentence) with UI automation.
endInfo = self.makeTextInfo(textInfos.POSITION_CARET)
# Move the caret back to the start of the next sentence,
# where it should be left for the user.
legacyStart.updateCaret()
# Fetch the new caret position (start of the next sentence) with UI Automation.
startInfo = self.makeTextInfo(textInfos.POSITION_CARET)
# Make a UI automation text range spanning the entire next sentence.
info = startInfo.copy()
info.end = endInfo.end
# Speak the sentence moved to
speech.speakTextInfo(info, unit=textInfos.UNIT_SENTENCE, reason=controlTypes.OutputReason.CARET)
# Forget the word currently being typed as the user has moved the caret somewhere else.
speech.clearTypedWordBuffer()
# Alert review and braille the caret has moved to its new position
review.handleCaretMove(info)
braille.handler.handleCaretMove(self)
@script(
gesture="kb:NVDA+alt+c",
# Translators: a description for a script that reports the comment at the caret.
description=_("Reports the text of the comment where the System caret is located.")
)
def script_reportCurrentComment(self,gesture):
caretInfo=self.makeTextInfo(textInfos.POSITION_CARET)
commentInfo = getCommentInfoFromPosition(caretInfo)
if commentInfo is not None:
ui.message(getPresentableCommentInfoFromPosition(commentInfo))
else:
# Translators: a message when there is no comment to report in Microsoft Word
ui.message(_("No comments"))
return
| 1 | 34,383 | Shouldn't we also trigger vision update here, so that if someone has caret highlighting enabled the correct character is highlighted? | nvaccess-nvda | py |
@@ -338,7 +338,11 @@ public class GoGapicSurfaceTransformer implements ModelToViewTransformer<ProtoAp
void addXExampleImports(InterfaceContext context, Iterable<? extends MethodModel> methods) {
ImportTypeTable typeTable = context.getImportTypeTable();
typeTable.saveNicknameFor("context;;;");
- typeTable.saveNicknameFor(context.getProductConfig().getPackageName() + ";;;");
+ typeTable.saveNicknameFor(
+ context.getProductConfig().getPackageName()
+ + ";"
+ + context.getNamer().getLocalPackageName()
+ + ";;");
for (MethodModel method : methods) {
method.getAndSaveRequestTypeName(context.getImportTypeTable(), context.getNamer()); | 1 | /* Copyright 2016 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.google.api.codegen.transformer.go;
import com.google.api.codegen.RetryParamsDefinitionProto;
import com.google.api.codegen.config.ApiModel;
import com.google.api.codegen.config.GapicInterfaceConfig;
import com.google.api.codegen.config.GapicProductConfig;
import com.google.api.codegen.config.InterfaceConfig;
import com.google.api.codegen.config.InterfaceModel;
import com.google.api.codegen.config.MethodConfig;
import com.google.api.codegen.config.MethodModel;
import com.google.api.codegen.config.ProductConfig;
import com.google.api.codegen.config.ProductServiceConfig;
import com.google.api.codegen.config.ProtoApiModel;
import com.google.api.codegen.config.ProtoInterfaceModel;
import com.google.api.codegen.gapic.GapicCodePathMapper;
import com.google.api.codegen.transformer.ApiCallableTransformer;
import com.google.api.codegen.transformer.DefaultFeatureConfig;
import com.google.api.codegen.transformer.FeatureConfig;
import com.google.api.codegen.transformer.FileHeaderTransformer;
import com.google.api.codegen.transformer.GapicInterfaceContext;
import com.google.api.codegen.transformer.GrpcStubTransformer;
import com.google.api.codegen.transformer.ImportTypeTable;
import com.google.api.codegen.transformer.InterfaceContext;
import com.google.api.codegen.transformer.MethodContext;
import com.google.api.codegen.transformer.ModelToViewTransformer;
import com.google.api.codegen.transformer.ModelTypeTable;
import com.google.api.codegen.transformer.PageStreamingTransformer;
import com.google.api.codegen.transformer.PathTemplateTransformer;
import com.google.api.codegen.transformer.ServiceTransformer;
import com.google.api.codegen.transformer.StaticLangApiMethodTransformer;
import com.google.api.codegen.transformer.SurfaceNamer;
import com.google.api.codegen.util.CommonRenderingUtil;
import com.google.api.codegen.util.go.GoTypeTable;
import com.google.api.codegen.viewmodel.ImportSectionView;
import com.google.api.codegen.viewmodel.LongRunningOperationDetailView;
import com.google.api.codegen.viewmodel.PackageInfoView;
import com.google.api.codegen.viewmodel.PageStreamingDescriptorClassView;
import com.google.api.codegen.viewmodel.RetryConfigDefinitionView;
import com.google.api.codegen.viewmodel.ServiceDocView;
import com.google.api.codegen.viewmodel.StaticLangApiMethodView;
import com.google.api.codegen.viewmodel.StaticLangClientExampleFileView;
import com.google.api.codegen.viewmodel.StaticLangClientFileView;
import com.google.api.codegen.viewmodel.ViewModel;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.ImmutableTable;
import java.io.File;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
import java.util.Collections;
import java.util.EnumSet;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.TreeMap;
import java.util.regex.Pattern;
public class GoGapicSurfaceTransformer implements ModelToViewTransformer<ProtoApiModel> {
private static final String API_TEMPLATE_FILENAME = "go/main.snip";
private static final String SAMPLE_TEMPLATE_FILENAME = "go/example.snip";
private static final String DOC_TEMPLATE_FILENAME = "go/doc.snip";
private static final int COMMENT_LINE_LENGTH = 75;
private static final Pattern versionPattern = Pattern.compile("v\\d.*beta");
private final ApiCallableTransformer apiCallableTransformer = new ApiCallableTransformer();
private final StaticLangApiMethodTransformer apiMethodTransformer =
new StaticLangApiMethodTransformer();
private final FeatureConfig featureConfig = new DefaultFeatureConfig();
private final FileHeaderTransformer fileHeaderTransformer =
new FileHeaderTransformer(new GoImportSectionTransformer());
private final GrpcStubTransformer grpcStubTransformer = new GrpcStubTransformer();
private final PageStreamingTransformer pageStreamingTransformer = new PageStreamingTransformer();
private final PathTemplateTransformer pathTemplateTransformer = new PathTemplateTransformer();
private final ServiceTransformer serviceTransformer = new ServiceTransformer();
private final ProductServiceConfig productServiceConfig = new ProductServiceConfig();
private final GapicCodePathMapper pathMapper;
public GoGapicSurfaceTransformer(GapicCodePathMapper pathMapper) {
this.pathMapper = pathMapper;
}
@Override
public List<String> getTemplateFileNames() {
return Arrays.asList(API_TEMPLATE_FILENAME, DOC_TEMPLATE_FILENAME, SAMPLE_TEMPLATE_FILENAME);
}
@Override
public List<ViewModel> transform(ProtoApiModel model, GapicProductConfig productConfig) {
List<ViewModel> models = new ArrayList<>();
GoSurfaceNamer namer = new GoSurfaceNamer(productConfig.getPackageName());
for (InterfaceModel apiInterface : model.getInterfaces()) {
if (!productConfig.hasInterfaceConfig(apiInterface)) {
continue;
}
GapicInterfaceContext context =
GapicInterfaceContext.create(
apiInterface, productConfig, createTypeTable(), namer, featureConfig);
models.add(generate(context));
context =
GapicInterfaceContext.create(
apiInterface, productConfig, createTypeTable(), namer, featureConfig);
models.add(generateExample(context));
}
models.add(generatePackageInfo(model, productConfig, namer));
return models;
}
private StaticLangClientFileView generate(GapicInterfaceContext context) {
StaticLangClientFileView.Builder view = StaticLangClientFileView.newBuilder();
SurfaceNamer namer = context.getNamer();
ApiModel model = context.getApiModel();
ProtoInterfaceModel apiInterface = context.getInterfaceModel();
GapicProductConfig productConfig = context.getProductConfig();
GapicInterfaceConfig interfaceConfig = context.getInterfaceConfig();
view.templateFileName(API_TEMPLATE_FILENAME);
view.serviceDoc(serviceTransformer.generateServiceDoc(context, null, productConfig));
view.domainLayerLocation(productConfig.getDomainLayerLocation());
view.clientTypeName(namer.getApiWrapperClassName(context.getInterfaceConfig()));
view.clientConstructorName(namer.getApiWrapperClassConstructorName(interfaceConfig));
view.defaultClientOptionFunctionName(namer.getDefaultApiSettingsFunctionName(interfaceConfig));
view.defaultCallOptionFunctionName(namer.getDefaultCallSettingsFunctionName(interfaceConfig));
view.callOptionsTypeName(namer.getCallSettingsTypeName(interfaceConfig));
view.serviceOriginalName(model.getTitle());
view.servicePhraseName(namer.getServicePhraseName(interfaceConfig));
String outputPath = pathMapper.getOutputPath(apiInterface.getFullName(), productConfig);
String fileName = namer.getServiceFileName(context.getInterfaceConfig());
view.outputPath(outputPath + File.separator + fileName);
List<RetryConfigDefinitionView> retryDef =
generateRetryConfigDefinitions(context, context.getSupportedMethods());
view.retryPairDefinitions(retryDef);
view.callSettings(apiCallableTransformer.generateCallSettings(context));
List<StaticLangApiMethodView> apiMethods =
generateApiMethods(context, context.getSupportedMethods());
view.apiMethods(apiMethods);
// If any methods have header request params, "fmt" is needed for `fmt.Sprintf` calls.
if (apiMethods.stream().anyMatch(m -> !m.headerRequestParams().isEmpty())) {
context.getImportTypeTable().saveNicknameFor("fmt;;;");
}
// In Go, multiple methods share the same iterator type, one iterator type per resource type.
// We have to dedupe the iterators.
Map<String, PageStreamingDescriptorClassView> iterators = new TreeMap<>();
for (PageStreamingDescriptorClassView desc :
pageStreamingTransformer.generateDescriptorClasses(context)) {
iterators.put(desc.typeName(), desc);
}
view.pageStreamingDescriptorClasses(new ArrayList<>(iterators.values()));
// Same with long running operations.
Map<String, LongRunningOperationDetailView> lros = new TreeMap<>();
for (StaticLangApiMethodView apiMethod : apiMethods) {
LongRunningOperationDetailView lro = apiMethod.operationMethod();
if (lro != null) {
lros.put(lro.clientReturnTypeName(), lro);
}
}
view.lroDetailViews(new ArrayList<>(lros.values()));
view.serviceHostname(productServiceConfig.getServiceHostname(context.getServiceAddress()));
view.servicePort(productServiceConfig.getServicePort(context.getServiceAddress()));
view.stubs(grpcStubTransformer.generateGrpcStubs(context));
addXApiImports(context, context.getSupportedMethods());
view.fileHeader(fileHeaderTransformer.generateFileHeader(context));
return view.build();
}
private StaticLangClientExampleFileView generateExample(InterfaceContext context) {
StaticLangClientExampleFileView.Builder view = StaticLangClientExampleFileView.newBuilder();
SurfaceNamer namer = context.getNamer();
InterfaceModel apiInterface = context.getInterfaceModel();
ProductConfig productConfig = context.getProductConfig();
InterfaceConfig interfaceConfig = context.getInterfaceConfig();
view.templateFileName(SAMPLE_TEMPLATE_FILENAME);
String outputPath = pathMapper.getOutputPath(apiInterface.getFullName(), productConfig);
String fileName = namer.getExampleFileName(context.getInterfaceConfig());
view.outputPath(outputPath + File.separator + fileName);
view.clientTypeName(namer.getApiWrapperClassName(context.getInterfaceConfig()));
view.clientConstructorName(namer.getApiWrapperClassConstructorName(interfaceConfig));
view.clientConstructorExampleName(
namer.getApiWrapperClassConstructorExampleName(interfaceConfig));
view.apiMethods(generateApiMethods(context, context.getPublicMethods()));
// Examples are different from the API. In particular, we use short declaration
// and so we omit most type names. We only need
// - Context, to initialize the client
// - The VKit generated library, that's what the sample is for
// - The input types of the methods, to initialize the requests
// So, we clear all imports; addXExampleImports will add back the ones we want.
context.getImportTypeTable().getImports().clear();
addXExampleImports(context, context.getPublicMethods());
view.fileHeader(fileHeaderTransformer.generateFileHeader(context));
return view.build();
}
private PackageInfoView generatePackageInfo(
ApiModel model, GapicProductConfig productConfig, SurfaceNamer namer) {
String outputPath = productConfig.getPackageName();
String fileName = "doc.go";
PackageInfoView.Builder packageInfo = PackageInfoView.newBuilder();
packageInfo.templateFileName(DOC_TEMPLATE_FILENAME);
packageInfo.outputPath(outputPath + File.separator + fileName);
packageInfo.serviceTitle(model.getTitle());
packageInfo.importPath(productConfig.getPackageName());
packageInfo.serviceDocs(Collections.<ServiceDocView>emptyList());
packageInfo.packageDoc(
CommonRenderingUtil.getDocLines(model.getDocumentationSummary(), COMMENT_LINE_LENGTH));
packageInfo.domainLayerLocation(productConfig.getDomainLayerLocation());
packageInfo.authScopes(model.getAuthScopes(productConfig));
packageInfo.fileHeader(
fileHeaderTransformer.generateFileHeader(
productConfig, ImportSectionView.newBuilder().build(), namer));
packageInfo.releaseLevel(productConfig.getReleaseLevel());
packageInfo.isInferredBeta(isInferredBetaVersion(productConfig.getPackageName()));
return packageInfo.build();
}
static ModelTypeTable createTypeTable() {
return new ModelTypeTable(new GoTypeTable(), new GoModelTypeNameConverter());
}
@VisibleForTesting
List<StaticLangApiMethodView> generateApiMethods(
InterfaceContext context, Iterable<? extends MethodModel> methods) {
List<StaticLangApiMethodView> apiMethods = new ArrayList<>();
for (MethodModel method : methods) {
MethodConfig methodConfig = context.getMethodConfig(method);
MethodContext methodContext = context.asRequestMethodContext(method);
if (method.getRequestStreaming() || method.getResponseStreaming()) {
apiMethods.add(
apiMethodTransformer.generateGrpcStreamingRequestObjectMethod(methodContext));
} else if (methodConfig.isPageStreaming()) {
apiMethods.add(apiMethodTransformer.generatePagedRequestObjectMethod(methodContext));
} else if (methodConfig.isLongRunningOperation()) {
apiMethods.add(apiMethodTransformer.generateOperationRequestObjectMethod(methodContext));
} else {
apiMethods.add(apiMethodTransformer.generateRequestObjectMethod(methodContext));
}
}
return apiMethods;
}
@VisibleForTesting
List<RetryConfigDefinitionView> generateRetryConfigDefinitions(
InterfaceContext context, List<MethodModel> methods) {
Set<RetryConfigDefinitionView.Name> retryNames = new HashSet<>();
for (MethodModel method : methods) {
MethodConfig conf = context.getMethodConfig(method);
retryNames.add(
RetryConfigDefinitionView.Name.create(
conf.getRetrySettingsConfigName(), conf.getRetryCodesConfigName()));
}
TreeMap<RetryConfigDefinitionView.Name, RetryConfigDefinitionView> retryDef = new TreeMap<>();
Map<String, ImmutableList<String>> retryCodesDef =
context.getInterfaceConfig().getRetryCodesConfig().getRetryCodesDefinition();
ImmutableMap<String, RetryParamsDefinitionProto> retryParamsDef =
context.getInterfaceConfig().getRetrySettingsDefinition();
for (RetryConfigDefinitionView.Name name : retryNames) {
ImmutableList<String> codes = retryCodesDef.get(name.retryCodesConfigName());
if (codes.isEmpty()) {
continue;
}
List<String> retryCodeNames = new ArrayList<>();
for (String code : codes) {
retryCodeNames.add(context.getNamer().getStatusCodeName(code));
}
retryDef.put(
name,
RetryConfigDefinitionView.newBuilder()
.name(name)
.retryCodes(retryCodeNames)
.params(retryParamsDef.get(name.retrySettingsConfigName()))
.build());
}
if (!retryDef.isEmpty()) {
context.getImportTypeTable().saveNicknameFor("time;;;");
context.getImportTypeTable().saveNicknameFor("google.golang.org/grpc/codes;;;");
}
return new ArrayList<>(retryDef.values());
}
private static final String EMPTY_PROTO_PKG = "github.com/golang/protobuf/ptypes/empty";
@VisibleForTesting
void addXApiImports(InterfaceContext context, Collection<MethodModel> methods) {
ImportTypeTable typeTable = context.getImportTypeTable();
typeTable.saveNicknameFor("context;;;");
typeTable.saveNicknameFor("google.golang.org/grpc;;;");
typeTable.saveNicknameFor("github.com/googleapis/gax-go/v2;gax;;");
typeTable.saveNicknameFor("google.golang.org/api/option;;;");
typeTable.saveNicknameFor("google.golang.org/api/transport;;;");
typeTable.saveNicknameFor("google.golang.org/grpc/metadata;;;");
typeTable.getImports().remove(EMPTY_PROTO_PKG);
addContextImports(context, ImportContext.CLIENT, methods);
}
@VisibleForTesting
void addXExampleImports(InterfaceContext context, Iterable<? extends MethodModel> methods) {
ImportTypeTable typeTable = context.getImportTypeTable();
typeTable.saveNicknameFor("context;;;");
typeTable.saveNicknameFor(context.getProductConfig().getPackageName() + ";;;");
for (MethodModel method : methods) {
method.getAndSaveRequestTypeName(context.getImportTypeTable(), context.getNamer());
}
addContextImports(context, ImportContext.EXAMPLE, methods);
}
private void addContextImports(
InterfaceContext context,
ImportContext importContext,
Iterable<? extends MethodModel> methods) {
for (ImportKind kind : getImportKinds(context.getInterfaceConfig(), methods)) {
ImmutableList<String> imps = CONTEXTUAL_IMPORTS.get(importContext, kind);
if (imps != null) {
for (String imp : imps) {
context.getImportTypeTable().saveNicknameFor(imp);
}
}
}
}
private Set<ImportKind> getImportKinds(
InterfaceConfig interfaceConfig, Iterable<? extends MethodModel> methods) {
EnumSet<ImportKind> kinds = EnumSet.noneOf(ImportKind.class);
for (MethodModel method : methods) {
if (method.getResponseStreaming()) {
kinds.add(ImportKind.SERVER_STREAM);
}
MethodConfig methodConfig = interfaceConfig.getMethodConfig(method);
if (methodConfig.isLongRunningOperation()) {
kinds.add(ImportKind.LRO);
}
if (methodConfig.isPageStreaming()) {
kinds.add(ImportKind.PAGE_STREAM);
}
}
return kinds;
}
@VisibleForTesting
public boolean isInferredBetaVersion(String packageName) {
int indexOfVersionString = packageName.lastIndexOf("/");
String versionString = packageName.substring(indexOfVersionString + 1);
return versionPattern.matcher(versionString).find();
}
private enum ImportContext {
CLIENT,
EXAMPLE,
}
private enum ImportKind {
PAGE_STREAM,
LRO,
SERVER_STREAM,
}
private static final ImmutableTable<ImportContext, ImportKind, ImmutableList<String>>
CONTEXTUAL_IMPORTS =
ImmutableTable.<ImportContext, ImportKind, ImmutableList<String>>builder()
.put(
ImportContext.CLIENT,
ImportKind.PAGE_STREAM,
ImmutableList.<String>of(
"math;;;",
"google.golang.org/api/iterator;;;",
"github.com/golang/protobuf/proto;;;"))
.put(
ImportContext.EXAMPLE,
ImportKind.PAGE_STREAM,
ImmutableList.<String>of("google.golang.org/api/iterator;;;"))
.put(
ImportContext.CLIENT,
ImportKind.LRO,
ImmutableList.<String>of(
"time;;;",
"cloud.google.com/go/longrunning;;;",
"cloud.google.com/go/longrunning/autogen;lroauto;;"))
.put(
ImportContext.EXAMPLE,
ImportKind.SERVER_STREAM,
ImmutableList.<String>of("io;;;"))
.build();
}
| 1 | 28,192 | Woah, how does this change, which includes semicolons, result in the baseline change? Are the semicolon chars just part of the internal representation of the import type? | googleapis-gapic-generator | java |
@@ -51,7 +51,7 @@ class _NvdaLocationData:
self.whichNVDA = builtIn.get_variable_value("${whichNVDA}", "source")
if self.whichNVDA == "source":
self._runNVDAFilePath = _pJoin(self.repoRoot, "source/nvda.pyw")
- self.baseNVDACommandline = f"pyw -3.7-32 {self._runNVDAFilePath}"
+ self.baseNVDACommandline = f"pyw {self._runNVDAFilePath}"
elif self.whichNVDA == "installed":
self._runNVDAFilePath = _pJoin(_expandvars('%PROGRAMFILES%'), 'nvda', 'nvda.exe')
self.baseNVDACommandline = f'"{str(self._runNVDAFilePath)}"' | 1 | # A part of NonVisual Desktop Access (NVDA)
# Copyright (C) 2020 NV Access Limited
# This file may be used under the terms of the GNU General Public License, version 2 or later.
# For more details see: https://www.gnu.org/licenses/gpl-2.0.html
"""This file provides robot library functions for NVDA system tests.
It contains helper methods for system tests, most specifically related to NVDA
- setup config,
- starting
- quiting
- config cleanup
This is in contrast with the `SystemTestSpy/speechSpy*.py files,
which provide library functions related to monitoring NVDA and asserting NVDA output.
"""
# imported methods start with underscore (_) so they don't get imported into robot files as keywords
from os.path import join as _pJoin, abspath as _abspath, expandvars as _expandvars
import tempfile as _tempFile
from typing import Optional
from robotremoteserver import (
test_remote_server as _testRemoteServer,
stop_remote_server as _stopRemoteServer,
)
from SystemTestSpy import (
_blockUntilConditionMet,
_getLib,
_nvdaSpyAlias,
configManager
)
# Imported for type information
from robot.libraries.BuiltIn import BuiltIn
from robot.libraries.OperatingSystem import OperatingSystem as _OpSysLib
from robot.libraries.Process import Process as _Process
from robot.libraries.Remote import Remote as _Remote
builtIn: BuiltIn = BuiltIn()
opSys: _OpSysLib = _getLib('OperatingSystem')
process: _Process = _getLib('Process')
class _NvdaLocationData:
def __init__(self):
# robot is expected to be run from the NVDA repo root directory. We want all repo specific
# paths to be relative to this. This would allow us to change where it is run from if we decided to.
self.repoRoot = _abspath("./")
self.stagingDir = _tempFile.gettempdir()
opSys.directory_should_exist(self.stagingDir)
self.whichNVDA = builtIn.get_variable_value("${whichNVDA}", "source")
if self.whichNVDA == "source":
self._runNVDAFilePath = _pJoin(self.repoRoot, "source/nvda.pyw")
self.baseNVDACommandline = f"pyw -3.7-32 {self._runNVDAFilePath}"
elif self.whichNVDA == "installed":
self._runNVDAFilePath = _pJoin(_expandvars('%PROGRAMFILES%'), 'nvda', 'nvda.exe')
self.baseNVDACommandline = f'"{str(self._runNVDAFilePath)}"'
else:
raise AssertionError("RobotFramework should be run with argument: '-v whichNVDA [source|installed]'")
self.profileDir = _pJoin(self.stagingDir, "nvdaProfile")
self.logPath = _pJoin(self.profileDir, 'nvda.log')
self.preservedLogsDir = _pJoin(
builtIn.get_variable_value("${OUTPUT DIR}"),
"nvdaTestRunLogs"
)
def ensurePathsExist(self):
opSys.file_should_exist(self._runNVDAFilePath, "Unable to start NVDA unless path exists.")
opSys.create_directory(self.profileDir)
opSys.create_directory(self.preservedLogsDir)
_locations = _NvdaLocationData()
class NvdaLib:
"""Robot Framework library for interacting with NVDA.
Notable:
- NvdaLib.nvdaSpy is a library instance for getting speech and other information out of NVDA
"""
def __init__(self):
self.nvdaSpy = None #: Optional[SystemTestSpy.speechSpyGlobalPlugin.NVDASpyLib]
self.nvdaHandle: Optional[int] = None
@staticmethod
def _createTestIdFileName(name):
suiteName = builtIn.get_variable_value("${SUITE NAME}")
testName = builtIn.get_variable_value("${TEST NAME}")
outputFileName = f"{suiteName}-{testName}-{name}".replace(" ", "_")
return outputFileName
@staticmethod
def setup_nvda_profile(configFileName):
configManager.setupProfile(
_locations.repoRoot,
configFileName,
_locations.stagingDir
)
@staticmethod
def teardown_nvda_profile():
configManager.teardownProfile(
_locations.stagingDir
)
nvdaProcessAlias = 'nvdaAlias'
_spyServerPort = 8270 # is `registered by IANA` for remote server usage. Two ASCII values:'RF'
_spyServerURI = f'http://127.0.0.1:{_spyServerPort}'
_spyAlias = _nvdaSpyAlias
def _startNVDAProcess(self):
"""Start NVDA.
Use debug logging, replacing any current instance, using the system test profile directory
"""
_locations.ensurePathsExist()
command = (
f"{_locations.baseNVDACommandline}"
f" --debug-logging"
f" -r"
f" -c \"{_locations.profileDir}\""
f" --log-file \"{_locations.logPath}\""
)
self.nvdaHandle = handle = process.start_process(
command,
shell=True,
alias=self.nvdaProcessAlias,
stdout=_pJoin(_locations.preservedLogsDir, self._createTestIdFileName("stdout.txt")),
stderr=_pJoin(_locations.preservedLogsDir, self._createTestIdFileName("stderr.txt")),
)
return handle
def _connectToRemoteServer(self):
"""Connects to the nvdaSpyServer
Because we do not know how far through the startup NVDA is, we have to poll
to check that the server is available. Importing the library immediately seems
to succeed, but then calling a keyword later fails with RuntimeError:
"Connection to remote server broken: [Errno 10061]
No connection could be made because the target machine actively refused it"
Instead we wait until the remote server is available before importing the library and continuing.
"""
builtIn.log(f"Waiting for {self._spyAlias} to be available at: {self._spyServerURI}", level='DEBUG')
# Importing the 'Remote' library always succeeds, even when a connection can not be made.
# If that happens, then some 'Remote' keyword will fail at some later point.
# therefore we use '_testRemoteServer' to ensure that we can in fact connect before proceeding.
_blockUntilConditionMet(
getValue=lambda: _testRemoteServer(self._spyServerURI, log=False),
giveUpAfterSeconds=10,
errorMessage=f"Unable to connect to {self._spyAlias}",
)
builtIn.log(f"Connecting to {self._spyAlias}", level='DEBUG')
# If any remote call takes longer than this, the connection will be closed!
maxRemoteKeywordDurationSeconds = 30
builtIn.import_library(
"Remote", # name of library to import
# Arguments to construct the library instance:
f"uri={self._spyServerURI}",
f"timeout={maxRemoteKeywordDurationSeconds}",
# Set an alias for the imported library instance
"WITH NAME",
self._spyAlias,
)
builtIn.log(f"Getting {self._spyAlias} library instance", level='DEBUG')
self.nvdaSpy = self._addMethodsToSpy(builtIn.get_library_instance(self._spyAlias))
# Ensure that keywords timeout before `timeout` given to `Remote` library,
# otherwise we lose control over NVDA.
self.nvdaSpy.init_max_keyword_duration(maxSeconds=maxRemoteKeywordDurationSeconds)
@staticmethod
def _addMethodsToSpy(remoteLib: _Remote):
""" Adds a method for each keywords on the remote library.
@param remoteLib: the library to augment with methods.
@rtype: SystemTestSpy.speechSpyGlobalPlugin.NVDASpyLib
@return: The library augmented with methods for all keywords.
"""
# Add methods back onto the lib so they can be called directly rather than manually calling run_keyword
def _makeKeywordCaller(lib, keyword):
def runKeyword(*args, **kwargs):
builtIn.log(
f"{keyword}"
f"{f' {args}' if args else ''}"
f"{f' {kwargs}' if kwargs else ''}"
)
return lib.run_keyword(keyword, args, kwargs)
return runKeyword
for name in remoteLib.get_keyword_names():
setattr(
remoteLib,
name,
_makeKeywordCaller(remoteLib, name)
)
return remoteLib
def start_NVDA(self, settingsFileName):
builtIn.log(f"Starting NVDA with config: {settingsFileName}")
self.setup_nvda_profile(settingsFileName)
nvdaProcessHandle = self._startNVDAProcess()
process.process_should_be_running(nvdaProcessHandle)
self._connectToRemoteServer()
self.nvdaSpy.wait_for_NVDA_startup_to_complete()
return nvdaProcessHandle
def save_NVDA_log(self):
"""NVDA logs are saved to the ${OUTPUT DIR}/nvdaTestRunLogs/${SUITE NAME}-${TEST NAME}-nvda.log"""
builtIn.log("Saving NVDA log")
saveToPath = self.create_preserved_test_output_filename("nvda.log")
opSys.copy_file(
_locations.logPath,
saveToPath
)
builtIn.log(f"Log saved to: {saveToPath}", level='DEBUG')
def create_preserved_test_output_filename(self, fileName):
"""EG for nvda.log path will become:
${OUTPUT DIR}/nvdaTestRunLogs/${SUITE NAME}-${TEST NAME}-nvda.log
"""
return _pJoin(_locations.preservedLogsDir, self._createTestIdFileName(fileName))
def quit_NVDA(self):
builtIn.log("Stopping nvdaSpy server: {}".format(self._spyServerURI))
try:
_stopRemoteServer(self._spyServerURI, log=False)
process.run_process(
f"{_locations.baseNVDACommandline} -q --disable-addons",
shell=True,
)
process.wait_for_process(self.nvdaHandle)
except Exception:
raise
finally:
self.save_NVDA_log()
# remove the spy so that if nvda is run manually against this config it does not interfere.
self.teardown_nvda_profile()
def getSpyLib():
""" Gets the spy library instance. This has been augmented with methods for all supported keywords.
Requires NvdaLib and nvdaSpy (remote library - see speechSpyGlobalPlugin) to be initialised.
On failure check order of keywords in Robot log and NVDA log for failures.
@rtype: SystemTestSpy.speechSpyGlobalPlugin.NVDASpyLib
@return: Remote NVDA spy Robot Framework library.
"""
nvdaLib = _getLib("NvdaLib")
spy = nvdaLib.nvdaSpy
if spy is None:
raise AssertionError("Spy not yet available, check order of keywords and NVDA log for errors.")
return spy
| 1 | 31,712 | Can't you just use runnvda.bat here? | nvaccess-nvda | py |
@@ -23,17 +23,6 @@
#include "selectors.h"
#include "vstring.h"
-/* To get rid of unused parameter warning in
- * -Wextra */
-#ifdef UNUSED
-#elif defined(__GNUC__)
-# define UNUSED(x) UNUSED_ ## x __attribute__((unused))
-#elif defined(__LCLINT__)
-# define UNUSED(x) /*@unused@*/ x
-#else
-# define UNUSED(x) x
-#endif
-
typedef enum {
K_INTERFACE,
K_IMPLEMENTATION, | 1 |
/*
* Copyright (c) 2010, Vincent Berthoux
*
* This source code is released for free distribution under the terms of the
* GNU General Public License version 2 or (at your option) any later version.
*
* This module contains functions for generating tags for Objective C
* language files.
*/
/*
* INCLUDE FILES
*/
#include "general.h" /* must always come first */
#include <string.h>
#include "keyword.h"
#include "entry.h"
#include "options.h"
#include "read.h"
#include "routines.h"
#include "selectors.h"
#include "vstring.h"
/* To get rid of unused parameter warning in
* -Wextra */
#ifdef UNUSED
#elif defined(__GNUC__)
# define UNUSED(x) UNUSED_ ## x __attribute__((unused))
#elif defined(__LCLINT__)
# define UNUSED(x) /*@unused@*/ x
#else
# define UNUSED(x) x
#endif
typedef enum {
K_INTERFACE,
K_IMPLEMENTATION,
K_PROTOCOL,
K_METHOD,
K_CLASSMETHOD,
K_VAR,
K_FIELD,
K_FUNCTION,
K_PROPERTY,
K_TYPEDEF,
K_STRUCT,
K_ENUM,
K_MACRO
} objcKind;
static kindOption ObjcKinds[] = {
{TRUE, 'i', "interface", "class interface"},
{TRUE, 'I', "implementation", "class implementation"},
{TRUE, 'P', "protocol", "Protocol"},
{TRUE, 'm', "method", "Object's method"},
{TRUE, 'c', "class", "Class' method"},
{TRUE, 'v', "var", "Global variable"},
{TRUE, 'F', "field", "Object field"},
{TRUE, 'f', "function", "A function"},
{TRUE, 'p', "property", "A property"},
{TRUE, 't', "typedef", "A type alias"},
{TRUE, 's', "struct", "A type structure"},
{TRUE, 'e', "enum", "An enumeration"},
{TRUE, 'M', "macro", "A preprocessor macro"},
};
typedef enum {
ObjcTYPEDEF,
ObjcSTRUCT,
ObjcENUM,
ObjcIMPLEMENTATION,
ObjcINTERFACE,
ObjcPROTOCOL,
ObjcENCODE,
ObjcSYNCHRONIZED,
ObjcSELECTOR,
ObjcPROPERTY,
ObjcEND,
ObjcDEFS,
ObjcCLASS,
ObjcPRIVATE,
ObjcPACKAGE,
ObjcPUBLIC,
ObjcPROTECTED,
ObjcSYNTHESIZE,
ObjcDYNAMIC,
ObjcOPTIONAL,
ObjcREQUIRED,
ObjcSTRING,
ObjcIDENTIFIER,
Tok_COMA, /* ',' */
Tok_PLUS, /* '+' */
Tok_MINUS, /* '-' */
Tok_PARL, /* '(' */
Tok_PARR, /* ')' */
Tok_CurlL, /* '{' */
Tok_CurlR, /* '}' */
Tok_SQUAREL, /* '[' */
Tok_SQUARER, /* ']' */
Tok_semi, /* ';' */
Tok_dpoint, /* ':' */
Tok_Sharp, /* '#' */
Tok_Backslash, /* '\\' */
Tok_EOL, /* '\r''\n' */
Tok_any,
Tok_EOF /* END of file */
} objcKeyword;
typedef objcKeyword objcToken;
static const keywordTable objcKeywordTable[] = {
{"typedef", ObjcTYPEDEF},
{"struct", ObjcSTRUCT},
{"enum", ObjcENUM},
{"@implementation", ObjcIMPLEMENTATION},
{"@interface", ObjcINTERFACE},
{"@protocol", ObjcPROTOCOL},
{"@encode", ObjcENCODE},
{"@property", ObjcPROPERTY},
{"@synchronized", ObjcSYNCHRONIZED},
{"@selector", ObjcSELECTOR},
{"@end", ObjcEND},
{"@defs", ObjcDEFS},
{"@class", ObjcCLASS},
{"@private", ObjcPRIVATE},
{"@package", ObjcPACKAGE},
{"@public", ObjcPUBLIC},
{"@protected", ObjcPROTECTED},
{"@synthesize", ObjcSYNTHESIZE},
{"@dynamic", ObjcDYNAMIC},
{"@optional", ObjcOPTIONAL},
{"@required", ObjcREQUIRED},
};
static langType Lang_ObjectiveC;
/*//////////////////////////////////////////////////////////////////
//// lexingInit */
typedef struct _lexingState {
vString *name; /* current parsed identifier/operator */
const unsigned char *cp; /* position in stream */
} lexingState;
/*//////////////////////////////////////////////////////////////////////
//// Lexing */
static boolean isNum (char c)
{
return c >= '0' && c <= '9';
}
static boolean isLowerAlpha (char c)
{
return c >= 'a' && c <= 'z';
}
static boolean isUpperAlpha (char c)
{
return c >= 'A' && c <= 'Z';
}
static boolean isAlpha (char c)
{
return isLowerAlpha (c) || isUpperAlpha (c);
}
static boolean isIdent (char c)
{
return isNum (c) || isAlpha (c) || c == '_';
}
static boolean isSpace (char c)
{
return c == ' ' || c == '\t';
}
/* return true if it end with an end of line */
static void eatWhiteSpace (lexingState * st)
{
const unsigned char *cp = st->cp;
while (isSpace (*cp))
cp++;
st->cp = cp;
}
static void eatString (lexingState * st)
{
boolean lastIsBackSlash = FALSE;
boolean unfinished = TRUE;
const unsigned char *c = st->cp + 1;
while (unfinished)
{
/* end of line should never happen.
* we tolerate it */
if (c == NULL || c[0] == '\0')
break;
else if (*c == '"' && !lastIsBackSlash)
unfinished = FALSE;
else
lastIsBackSlash = *c == '\\';
c++;
}
st->cp = c;
}
static void eatComment (lexingState * st)
{
boolean unfinished = TRUE;
boolean lastIsStar = FALSE;
const unsigned char *c = st->cp + 2;
while (unfinished)
{
/* we've reached the end of the line..
* so we have to reload a line... */
if (c == NULL || *c == '\0')
{
st->cp = readLineFromInputFile ();
/* WOOPS... no more input...
* we return, next lexing read
* will be null and ok */
if (st->cp == NULL)
return;
c = st->cp;
}
/* we've reached the end of the comment */
else if (*c == '/' && lastIsStar)
unfinished = FALSE;
else
{
lastIsStar = '*' == *c;
c++;
}
}
st->cp = c;
}
static void readIdentifier (lexingState * st)
{
const unsigned char *p;
vStringClear (st->name);
/* first char is a simple letter */
if (isAlpha (*st->cp) || *st->cp == '_')
vStringPut (st->name, (int) *st->cp);
/* Go till you get identifier chars */
for (p = st->cp + 1; isIdent (*p); p++)
vStringPut (st->name, (int) *p);
st->cp = p;
vStringTerminate (st->name);
}
/* read the @something directives */
static void readIdentifierObjcDirective (lexingState * st)
{
const unsigned char *p;
vStringClear (st->name);
/* first char is a simple letter */
if (*st->cp == '@')
vStringPut (st->name, (int) *st->cp);
/* Go till you get identifier chars */
for (p = st->cp + 1; isIdent (*p); p++)
vStringPut (st->name, (int) *p);
st->cp = p;
vStringTerminate (st->name);
}
/* The lexer is in charge of reading the file.
* Some of sub-lexer (like eatComment) also read file.
* lexing is finished when the lexer return Tok_EOF */
static objcKeyword lex (lexingState * st)
{
int retType;
/* handling data input here */
while (st->cp == NULL || st->cp[0] == '\0')
{
st->cp = readLineFromInputFile ();
if (st->cp == NULL)
return Tok_EOF;
return Tok_EOL;
}
if (isAlpha (*st->cp))
{
readIdentifier (st);
retType = lookupKeyword (vStringValue (st->name), Lang_ObjectiveC);
if (retType == -1) /* If it's not a keyword */
{
return ObjcIDENTIFIER;
}
else
{
return retType;
}
}
else if (*st->cp == '@')
{
readIdentifierObjcDirective (st);
retType = lookupKeyword (vStringValue (st->name), Lang_ObjectiveC);
if (retType == -1) /* If it's not a keyword */
{
return Tok_any;
}
else
{
return retType;
}
}
else if (isSpace (*st->cp))
{
eatWhiteSpace (st);
return lex (st);
}
else
switch (*st->cp)
{
case '(':
st->cp++;
return Tok_PARL;
case '\\':
st->cp++;
return Tok_Backslash;
case '#':
st->cp++;
return Tok_Sharp;
case '/':
if (st->cp[1] == '*') /* ergl, a comment */
{
eatComment (st);
return lex (st);
}
else if (st->cp[1] == '/')
{
st->cp = NULL;
return lex (st);
}
else
{
st->cp++;
return Tok_any;
}
break;
case ')':
st->cp++;
return Tok_PARR;
case '{':
st->cp++;
return Tok_CurlL;
case '}':
st->cp++;
return Tok_CurlR;
case '[':
st->cp++;
return Tok_SQUAREL;
case ']':
st->cp++;
return Tok_SQUARER;
case ',':
st->cp++;
return Tok_COMA;
case ';':
st->cp++;
return Tok_semi;
case ':':
st->cp++;
return Tok_dpoint;
case '"':
eatString (st);
return Tok_any;
case '+':
st->cp++;
return Tok_PLUS;
case '-':
st->cp++;
return Tok_MINUS;
default:
st->cp++;
break;
}
/* default return if nothing is recognized,
* shouldn't happen, but at least, it will
* be handled without destroying the parsing. */
return Tok_any;
}
/*//////////////////////////////////////////////////////////////////////
//// Parsing */
typedef void (*parseNext) (vString * const ident, objcToken what);
/********** Helpers */
/* This variable hold the 'parser' which is going to
* handle the next token */
static parseNext toDoNext;
/* Special variable used by parser eater to
* determine which action to put after their
* job is finished. */
static parseNext comeAfter;
/* Used by some parsers detecting certain token
* to revert to previous parser. */
static parseNext fallback;
/********** Grammar */
static void globalScope (vString * const ident, objcToken what);
static void parseMethods (vString * const ident, objcToken what);
static void parseImplemMethods (vString * const ident, objcToken what);
static vString *tempName = NULL;
static vString *parentName = NULL;
static objcKind parentType = K_INTERFACE;
/* used to prepare tag for OCaml, just in case their is a need to
* add additional information to the tag. */
static void prepareTag (tagEntryInfo * tag, vString const *name, objcKind kind)
{
initTagEntry (tag, vStringValue (name), &(ObjcKinds[kind]));
if (parentName != NULL)
{
tag->extensionFields.scopeKind = &(ObjcKinds[parentType]);
tag->extensionFields.scopeName = vStringValue (parentName);
}
}
static void pushEnclosingContext (const vString * parent, objcKind type)
{
vStringCopy (parentName, parent);
parentType = type;
}
static void popEnclosingContext (void)
{
vStringClear (parentName);
}
/* Used to centralise tag creation, and be able to add
* more information to it in the future */
static void addTag (vString * const ident, int kind)
{
tagEntryInfo toCreate;
if (! ObjcKinds[kind].enabled)
return;
prepareTag (&toCreate, ident, kind);
makeTagEntry (&toCreate);
}
static objcToken waitedToken, fallBackToken;
/* Ignore everything till waitedToken and jump to comeAfter.
* If the "end" keyword is encountered break, doesn't remember
* why though. */
static void tillToken (vString * const UNUSED (ident), objcToken what)
{
if (what == waitedToken)
toDoNext = comeAfter;
}
static void tillTokenOrFallBack (vString * const UNUSED (ident), objcToken what)
{
if (what == waitedToken)
toDoNext = comeAfter;
else if (what == fallBackToken)
{
toDoNext = fallback;
}
}
static int ignoreBalanced_count = 0;
static void ignoreBalanced (vString * const UNUSED (ident), objcToken what)
{
switch (what)
{
case Tok_PARL:
case Tok_CurlL:
case Tok_SQUAREL:
ignoreBalanced_count++;
break;
case Tok_PARR:
case Tok_CurlR:
case Tok_SQUARER:
ignoreBalanced_count--;
break;
default:
/* don't care */
break;
}
if (ignoreBalanced_count == 0)
toDoNext = comeAfter;
}
static void parseFields (vString * const ident, objcToken what)
{
switch (what)
{
case Tok_CurlR:
toDoNext = &parseMethods;
break;
case Tok_SQUAREL:
case Tok_PARL:
toDoNext = &ignoreBalanced;
comeAfter = &parseFields;
break;
/* we got an identifier, keep track of it */
case ObjcIDENTIFIER:
vStringCopy (tempName, ident);
break;
/* our last kept identifier must be our variable name =) */
case Tok_semi:
addTag (tempName, K_FIELD);
vStringClear (tempName);
break;
default:
/* NOTHING */
break;
}
}
static objcKind methodKind;
static vString *fullMethodName;
static vString *prevIdent;
static void parseMethodsName (vString * const ident, objcToken what)
{
switch (what)
{
case Tok_PARL:
toDoNext = &tillToken;
comeAfter = &parseMethodsName;
waitedToken = Tok_PARR;
break;
case Tok_dpoint:
vStringCat (fullMethodName, prevIdent);
vStringCatS (fullMethodName, ":");
vStringClear (prevIdent);
break;
case ObjcIDENTIFIER:
vStringCopy (prevIdent, ident);
break;
case Tok_CurlL:
case Tok_semi:
/* method name is not simple */
if (vStringLength (fullMethodName) != '\0')
{
addTag (fullMethodName, methodKind);
vStringClear (fullMethodName);
}
else
addTag (prevIdent, methodKind);
toDoNext = &parseMethods;
parseImplemMethods (ident, what);
vStringClear (prevIdent);
break;
default:
break;
}
}
static void parseMethodsImplemName (vString * const ident, objcToken what)
{
switch (what)
{
case Tok_PARL:
toDoNext = &tillToken;
comeAfter = &parseMethodsImplemName;
waitedToken = Tok_PARR;
break;
case Tok_dpoint:
vStringCat (fullMethodName, prevIdent);
vStringCatS (fullMethodName, ":");
vStringClear (prevIdent);
break;
case ObjcIDENTIFIER:
vStringCopy (prevIdent, ident);
break;
case Tok_CurlL:
case Tok_semi:
/* method name is not simple */
if (vStringLength (fullMethodName) != '\0')
{
addTag (fullMethodName, methodKind);
vStringClear (fullMethodName);
}
else
addTag (prevIdent, methodKind);
toDoNext = &parseImplemMethods;
parseImplemMethods (ident, what);
vStringClear (prevIdent);
break;
default:
break;
}
}
static void parseImplemMethods (vString * const ident, objcToken what)
{
switch (what)
{
case Tok_PLUS: /* + */
toDoNext = &parseMethodsImplemName;
methodKind = K_CLASSMETHOD;
break;
case Tok_MINUS: /* - */
toDoNext = &parseMethodsImplemName;
methodKind = K_METHOD;
break;
case ObjcEND: /* @end */
popEnclosingContext ();
toDoNext = &globalScope;
break;
case Tok_CurlL: /* { */
toDoNext = &ignoreBalanced;
ignoreBalanced (ident, what);
comeAfter = &parseImplemMethods;
break;
default:
break;
}
}
static void parseProperty (vString * const ident, objcToken what)
{
switch (what)
{
case Tok_PARL:
toDoNext = &tillToken;
comeAfter = &parseProperty;
waitedToken = Tok_PARR;
break;
/* we got an identifier, keep track of it */
case ObjcIDENTIFIER:
vStringCopy (tempName, ident);
break;
/* our last kept identifier must be our variable name =) */
case Tok_semi:
addTag (tempName, K_PROPERTY);
vStringClear (tempName);
toDoNext = &parseMethods;
break;
default:
break;
}
}
static void parseMethods (vString * const UNUSED (ident), objcToken what)
{
switch (what)
{
case Tok_PLUS: /* + */
toDoNext = &parseMethodsName;
methodKind = K_CLASSMETHOD;
break;
case Tok_MINUS: /* - */
toDoNext = &parseMethodsName;
methodKind = K_METHOD;
break;
case ObjcPROPERTY:
toDoNext = &parseProperty;
break;
case ObjcEND: /* @end */
popEnclosingContext ();
toDoNext = &globalScope;
break;
case Tok_CurlL: /* { */
toDoNext = &parseFields;
break;
default:
break;
}
}
static void parseProtocol (vString * const ident, objcToken what)
{
if (what == ObjcIDENTIFIER)
{
pushEnclosingContext (ident, K_PROTOCOL);
addTag (ident, K_PROTOCOL);
}
toDoNext = &parseMethods;
}
static void parseImplementation (vString * const ident, objcToken what)
{
if (what == ObjcIDENTIFIER)
{
addTag (ident, K_IMPLEMENTATION);
pushEnclosingContext (ident, K_IMPLEMENTATION);
}
toDoNext = &parseImplemMethods;
}
static void parseInterface (vString * const ident, objcToken what)
{
if (what == ObjcIDENTIFIER)
{
addTag (ident, K_INTERFACE);
pushEnclosingContext (ident, K_INTERFACE);
}
toDoNext = &parseMethods;
}
static void parseStructMembers (vString * const ident, objcToken what)
{
static parseNext prev = NULL;
if (prev != NULL)
{
comeAfter = prev;
prev = NULL;
}
switch (what)
{
case ObjcIDENTIFIER:
vStringCopy (tempName, ident);
break;
case Tok_semi: /* ';' */
addTag (tempName, K_FIELD);
vStringClear (tempName);
break;
/* some types are complex, the only one
* we will loose is the function type.
*/
case Tok_CurlL: /* '{' */
case Tok_PARL: /* '(' */
case Tok_SQUAREL: /* '[' */
toDoNext = &ignoreBalanced;
prev = comeAfter;
comeAfter = &parseStructMembers;
ignoreBalanced (ident, what);
break;
case Tok_CurlR:
toDoNext = comeAfter;
break;
default:
/* don't care */
break;
}
}
/* Called just after the struct keyword */
static boolean parseStruct_gotName = FALSE;
static void parseStruct (vString * const ident, objcToken what)
{
switch (what)
{
case ObjcIDENTIFIER:
if (!parseStruct_gotName)
{
addTag (ident, K_STRUCT);
pushEnclosingContext (ident, K_STRUCT);
parseStruct_gotName = TRUE;
}
else
{
parseStruct_gotName = FALSE;
popEnclosingContext ();
toDoNext = comeAfter;
comeAfter (ident, what);
}
break;
case Tok_CurlL:
toDoNext = &parseStructMembers;
break;
/* maybe it was just a forward declaration
* in which case, we pop the context */
case Tok_semi:
if (parseStruct_gotName)
popEnclosingContext ();
toDoNext = comeAfter;
comeAfter (ident, what);
break;
default:
/* we don't care */
break;
}
}
/* Parse enumeration members, ignoring potential initialization */
static parseNext parseEnumFields_prev = NULL;
static void parseEnumFields (vString * const ident, objcToken what)
{
if (parseEnumFields_prev != NULL)
{
comeAfter = parseEnumFields_prev;
parseEnumFields_prev = NULL;
}
switch (what)
{
case ObjcIDENTIFIER:
addTag (ident, K_ENUM);
parseEnumFields_prev = comeAfter;
waitedToken = Tok_COMA;
/* last item might not have a coma */
fallBackToken = Tok_CurlR;
fallback = comeAfter;
comeAfter = parseEnumFields;
toDoNext = &tillTokenOrFallBack;
break;
case Tok_CurlR:
toDoNext = comeAfter;
popEnclosingContext ();
break;
default:
/* don't care */
break;
}
}
/* parse enum ... { ... */
static boolean parseEnum_named = FALSE;
static void parseEnum (vString * const ident, objcToken what)
{
switch (what)
{
case ObjcIDENTIFIER:
if (!parseEnum_named)
{
addTag (ident, K_ENUM);
pushEnclosingContext (ident, K_ENUM);
parseEnum_named = TRUE;
}
else
{
parseEnum_named = FALSE;
popEnclosingContext ();
toDoNext = comeAfter;
comeAfter (ident, what);
}
break;
case Tok_CurlL: /* '{' */
toDoNext = &parseEnumFields;
parseEnum_named = FALSE;
break;
case Tok_semi: /* ';' */
if (parseEnum_named)
popEnclosingContext ();
toDoNext = comeAfter;
comeAfter (ident, what);
break;
default:
/* don't care */
break;
}
}
/* Parse something like
* typedef .... ident ;
* ignoring the defined type but in the case of struct,
* in which case struct are parsed.
*/
static void parseTypedef (vString * const ident, objcToken what)
{
switch (what)
{
case ObjcSTRUCT:
toDoNext = &parseStruct;
comeAfter = &parseTypedef;
break;
case ObjcENUM:
toDoNext = &parseEnum;
comeAfter = &parseTypedef;
break;
case ObjcIDENTIFIER:
vStringCopy (tempName, ident);
break;
case Tok_semi: /* ';' */
addTag (tempName, K_TYPEDEF);
vStringClear (tempName);
toDoNext = &globalScope;
break;
default:
/* we don't care */
break;
}
}
static boolean ignorePreprocStuff_escaped = FALSE;
static void ignorePreprocStuff (vString * const UNUSED (ident), objcToken what)
{
switch (what)
{
case Tok_Backslash:
ignorePreprocStuff_escaped = TRUE;
break;
case Tok_EOL:
if (ignorePreprocStuff_escaped)
{
ignorePreprocStuff_escaped = FALSE;
}
else
{
toDoNext = &globalScope;
}
break;
default:
ignorePreprocStuff_escaped = FALSE;
break;
}
}
static void parseMacroName (vString * const ident, objcToken what)
{
if (what == ObjcIDENTIFIER)
addTag (ident, K_MACRO);
toDoNext = &ignorePreprocStuff;
}
static void parsePreproc (vString * const ident, objcToken what)
{
switch (what)
{
case ObjcIDENTIFIER:
if (strcmp (vStringValue (ident), "define") == 0)
toDoNext = &parseMacroName;
else
toDoNext = &ignorePreprocStuff;
break;
default:
toDoNext = &ignorePreprocStuff;
break;
}
}
/* Handle the "strong" top levels, all 'big' declarations
* happen here */
static void globalScope (vString * const ident, objcToken what)
{
switch (what)
{
case Tok_Sharp:
toDoNext = &parsePreproc;
break;
case ObjcSTRUCT:
toDoNext = &parseStruct;
comeAfter = &globalScope;
break;
case ObjcIDENTIFIER:
/* we keep track of the identifier if we
* come across a function. */
vStringCopy (tempName, ident);
break;
case Tok_PARL:
/* if we find an opening parenthesis it means we
* found a function (or a macro...) */
addTag (tempName, K_FUNCTION);
vStringClear (tempName);
comeAfter = &globalScope;
toDoNext = &ignoreBalanced;
ignoreBalanced (ident, what);
break;
case ObjcINTERFACE:
toDoNext = &parseInterface;
break;
case ObjcIMPLEMENTATION:
toDoNext = &parseImplementation;
break;
case ObjcPROTOCOL:
toDoNext = &parseProtocol;
break;
case ObjcTYPEDEF:
toDoNext = parseTypedef;
comeAfter = &globalScope;
break;
case Tok_CurlL:
comeAfter = &globalScope;
toDoNext = &ignoreBalanced;
ignoreBalanced (ident, what);
break;
case ObjcEND:
case ObjcPUBLIC:
case ObjcPROTECTED:
case ObjcPRIVATE:
default:
/* we don't care */
break;
}
}
/*////////////////////////////////////////////////////////////////
//// Deal with the system */
static void findObjcTags (void)
{
vString *name = vStringNew ();
lexingState st;
objcToken tok;
parentName = vStringNew ();
tempName = vStringNew ();
fullMethodName = vStringNew ();
prevIdent = vStringNew ();
/* (Re-)initialize state variables, this might be a second file */
comeAfter = NULL;
fallback = NULL;
parentType = K_INTERFACE;
ignoreBalanced_count = 0;
methodKind = 0;
parseStruct_gotName = FALSE;
parseEnumFields_prev = NULL;
parseEnum_named = FALSE;
ignorePreprocStuff_escaped = FALSE;
st.name = vStringNew ();
st.cp = readLineFromInputFile ();
toDoNext = &globalScope;
tok = lex (&st);
while (tok != Tok_EOF)
{
(*toDoNext) (st.name, tok);
tok = lex (&st);
}
vStringDelete(st.name);
vStringDelete (name);
vStringDelete (parentName);
vStringDelete (tempName);
vStringDelete (fullMethodName);
vStringDelete (prevIdent);
parentName = NULL;
tempName = NULL;
prevIdent = NULL;
fullMethodName = NULL;
}
static void objcInitialize (const langType language)
{
Lang_ObjectiveC = language;
}
extern parserDefinition *ObjcParser (void)
{
static const char *const extensions[] = { "mm", "m", "h",
NULL };
static const char *const aliases[] = { "objc", "objective-c",
NULL };
static selectLanguage selectors[] = { selectByObjectiveCAndMatLabKeywords,
selectByObjectiveCKeywords,
NULL };
parserDefinition *def = parserNewFull ("ObjectiveC", KIND_FILE_ALT);
def->kinds = ObjcKinds;
def->kindCount = ARRAY_SIZE (ObjcKinds);
def->extensions = extensions;
def->aliases = aliases;
def->parser = findObjcTags;
def->initialize = objcInitialize;
def->selectLanguage = selectors;
def->keywordTable = objcKeywordTable;
def->keywordCount = ARRAY_SIZE (objcKeywordTable);
return def;
}
| 1 | 14,174 | these definitions of UNUSED aren't the same as the one you imported, so callers should be fixed (if any) | universal-ctags-ctags | c |
@@ -404,6 +404,7 @@ func (client *cniClient) createIPAMNetworkConfig(cfg *Config) (string, *libcni.N
ipamNetworkConfig := IPAMNetworkConfig{
Name: ECSIPAMPluginName,
+ Type: ECSIPAMPluginName,
CNIVersion: client.cniVersion,
IPAM: ipamConfig,
} | 1 | // Copyright 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License"). You may
// not use this file except in compliance with the License. A copy of the
// License is located at
//
// http://aws.amazon.com/apache2.0/
//
// or in the "license" file accompanying this file. This file is distributed
// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
// express or implied. See the License for the specific language governing
// permissions and limitations under the License.
package ecscni
import (
"context"
"encoding/json"
"fmt"
"net"
"os"
"os/exec"
"path/filepath"
"strings"
"time"
apieni "github.com/aws/amazon-ecs-agent/agent/api/eni"
"github.com/aws/amazon-ecs-agent/agent/logger"
"github.com/cihub/seelog"
"github.com/containernetworking/cni/libcni"
cnitypes "github.com/containernetworking/cni/pkg/types"
"github.com/containernetworking/cni/pkg/types/current"
"github.com/pkg/errors"
)
const (
currentCNISpec = "0.3.1"
// ECSCNIVersion, ECSCNIGitHash, VPCCNIGitHash needs to be updated every time CNI plugin is updated
currentECSCNIVersion = "2019.06.0"
currentECSCNIGitHash = "91ccefc8864ec14a32bd2b9d7e7de3060b685383"
currentVPCCNIGitHash = "85d8d2b11deb6cb3fd0e911e12379ddc0d7019ba"
vpcCNIPluginPath = "/log/vpc-branch-eni.log"
vpcCNIPluginInterfaceType = "vlan"
)
// CNIClient defines the method of setting/cleaning up container namespace
type CNIClient interface {
// Version returns the version of the plugin
Version(string) (string, error)
// Capabilities returns the capabilities supported by a plugin
Capabilities(string) ([]string, error)
// SetupNS sets up the namespace of container
SetupNS(context.Context, *Config, time.Duration) (*current.Result, error)
// CleanupNS cleans up the container namespace
CleanupNS(context.Context, *Config, time.Duration) error
// ReleaseIPResource marks the ip available in the ipam db
ReleaseIPResource(context.Context, *Config, time.Duration) error
}
// cniClient is the client to call plugin and setup the network
type cniClient struct {
pluginsPath string
cniVersion string
subnet string
libcni libcni.CNI
}
// NewClient creates a client of ecscni which is used to invoke the plugin
func NewClient(cfg *Config) CNIClient {
libcniConfig := &libcni.CNIConfig{
Path: []string{cfg.PluginsPath},
}
return &cniClient{
pluginsPath: cfg.PluginsPath,
cniVersion: cfg.MinSupportedCNIVersion,
subnet: ecsSubnet,
libcni: libcniConfig,
}
}
// SetupNS will set up the namespace of container, including create the bridge
// and the veth pair, move the eni to container namespace, setup the routes
func (client *cniClient) SetupNS(ctx context.Context,
cfg *Config,
timeout time.Duration) (*current.Result, error) {
ctx, cancel := context.WithTimeout(ctx, timeout)
defer cancel()
type output struct {
result *current.Result
err error
}
return client.setupNS(ctx, cfg)
}
func (client *cniClient) setupNS(ctx context.Context, cfg *Config) (*current.Result, error) {
runtimeConfig := libcni.RuntimeConf{
ContainerID: cfg.ContainerID,
NetNS: fmt.Sprintf(netnsFormat, cfg.ContainerPID),
}
seelog.Debugf("[ECSCNI] Starting ENI (%s) setup in the the container namespace: %s", cfg.ENIID, cfg.ContainerID)
os.Setenv("ECS_CNI_LOGLEVEL", logger.GetLevel())
defer os.Unsetenv("ECS_CNI_LOGLEVEL")
// Invoke eni plugin ADD command based on the type of eni plugin
if cfg.InterfaceAssociationProtocol == apieni.VLANInterfaceAssociationProtocol {
seelog.Debugf("[ECSVPCCNI] Starting VPC ENI (%s) setup in the the container namespace: %s", cfg.ENIID, cfg.ContainerID)
os.Setenv("VPC_CNI_LOG_LEVEL", logger.GetLevel())
os.Setenv("VPC_CNI_LOG_FILE", vpcCNIPluginPath)
defer os.Unsetenv("VPC_CNI_LOG_LEVEL")
defer os.Unsetenv("VPC_CNI_LOG_FILE")
result, err := client.add(ctx, runtimeConfig, cfg, client.createBranchENINetworkConfig)
if err != nil {
return nil, errors.Wrap(err, "branch cni setup: invoke branch eni plugin failed")
}
seelog.Debugf("[ECSVPCCNI] Branch ENI setup done: %s", result.String())
} else {
result, err := client.add(ctx, runtimeConfig, cfg, client.createENINetworkConfig)
if err != nil {
return nil, errors.Wrap(err, "cni setup: invoke eni plugin failed")
}
seelog.Debugf("[ECSCNI] ENI setup done: %s", result.String())
}
// Invoke bridge plugin ADD command
result, err := client.add(ctx, runtimeConfig, cfg, client.createBridgeNetworkConfigWithIPAM)
if err != nil {
return nil, errors.Wrap(err, "cni setup: invoke bridge plugin failed")
}
if cfg.AppMeshCNIEnabled {
// Invoke app mesh plugin ADD command
seelog.Debug("[APPMESH] Starting aws-appmesh setup")
_, err = client.add(ctx, runtimeConfig, cfg, client.createAppMeshConfig)
if err != nil {
return nil, errors.Wrap(err, "cni setup: invoke app mesh plugin failed")
}
seelog.Debug("[APPMESH] Set up aws-appmesh done")
}
seelog.Debugf("[ECSCNI] Set up container namespace done: %s", result.String())
if _, err = result.GetAsVersion(currentCNISpec); err != nil {
seelog.Warnf("[ECSCNI] Unable to convert result to spec version %s; error: %v; result is of version: %s",
currentCNISpec, err, result.Version())
return nil, err
}
var curResult *current.Result
curResult, ok := result.(*current.Result)
if !ok {
return nil, errors.Errorf(
"cni setup: unable to convert result to expected version '%s'",
result.String())
}
return curResult, nil
}
// CleanupNS will clean up the container namespace, including remove the veth
// pair and stop the dhclient
func (client *cniClient) CleanupNS(
ctx context.Context,
cfg *Config,
timeout time.Duration) error {
ctx, cancel := context.WithTimeout(ctx, timeout)
defer cancel()
return client.cleanupNS(ctx, cfg)
}
func (client *cniClient) cleanupNS(ctx context.Context, cfg *Config) error {
runtimeConfig := libcni.RuntimeConf{
ContainerID: cfg.ContainerID,
NetNS: fmt.Sprintf(netnsFormat, cfg.ContainerPID),
}
seelog.Debugf("[ECSCNI] Starting clean up the container namespace: %s", cfg.ContainerID)
os.Setenv("ECS_CNI_LOGLEVEL", logger.GetLevel())
defer os.Unsetenv("ECS_CNI_LOGLEVEL")
// clean up the network namespace is separate from releasing the IP from IPAM
err := client.del(ctx, runtimeConfig, cfg, client.createBridgeNetworkConfigWithoutIPAM)
if err != nil {
return errors.Wrap(err, "cni cleanup: invoke bridge plugin failed")
}
seelog.Debugf("[ECSCNI] bridge cleanup done: %s", cfg.ContainerID)
// clean up eni network namespace
if cfg.InterfaceAssociationProtocol == apieni.VLANInterfaceAssociationProtocol {
os.Setenv("VPC_CNI_LOG_LEVEL", logger.GetLevel())
os.Setenv("VPC_CNI_LOG_FILE", vpcCNIPluginPath)
defer os.Unsetenv("VPC_CNI_LOG_LEVEL")
defer os.Unsetenv("VPC_CNI_LOG_FILE")
err = client.del(ctx, runtimeConfig, cfg, client.createBranchENINetworkConfig)
if err != nil {
return errors.Wrap(err, "VPC cni cleanup: invoke eni plugin failed")
}
seelog.Debugf("[ECSVPCCNI] container namespace cleanup done: %s", cfg.ContainerID)
} else {
err = client.del(ctx, runtimeConfig, cfg, client.createENINetworkConfig)
if err != nil {
return errors.Wrap(err, "cni cleanup: invoke eni plugin failed")
}
seelog.Debugf("[ECSCNI] container namespace cleanup done: %s", cfg.ContainerID)
}
if cfg.AppMeshCNIEnabled {
// clean up app mesh network namespace
seelog.Debug("[APPMESH] Starting clean up aws-appmesh namespace")
err = client.del(ctx, runtimeConfig, cfg, client.createAppMeshConfig)
if err != nil {
return errors.Wrap(err, "cni cleanup: invoke app mesh plugin failed")
}
seelog.Debug("[APPMESH] Clean up aws-appmesh namespace done")
}
return nil
}
// ReleaseIPResource marks the ip available in the ipam db
func (client *cniClient) ReleaseIPResource(ctx context.Context, cfg *Config, timeout time.Duration) error {
ctx, cancel := context.WithTimeout(ctx, timeout)
defer cancel()
runtimeConfig := libcni.RuntimeConf{
ContainerID: cfg.ContainerID,
NetNS: fmt.Sprintf(netnsFormat, cfg.ContainerPID),
}
seelog.Debugf("[ECSCNI] Releasing the ip resource from ipam db, id: [%s], ip: [%v]", cfg.ID, cfg.IPAMV4Address)
os.Setenv("ECS_CNI_LOGLEVEL", logger.GetLevel())
defer os.Unsetenv("ECS_CNI_LOGLEVEL")
return client.del(ctx, runtimeConfig, cfg, client.createIPAMNetworkConfig)
}
// add invokes the ADD command of the given plugin
func (client *cniClient) add(ctx context.Context, runtimeConfig libcni.RuntimeConf,
cfg *Config,
pluginConfigFunc func(*Config) (string, *libcni.NetworkConfig, error)) (cnitypes.Result, error) {
deviceName, networkConfig, err := pluginConfigFunc(cfg)
if err != nil {
return nil, err
}
runtimeConfig.IfName = deviceName
return client.libcni.AddNetwork(ctx, networkConfig, &runtimeConfig)
}
// del invokes the DEL command of the given plugin
func (client *cniClient) del(ctx context.Context, runtimeConfig libcni.RuntimeConf,
cfg *Config,
pluginConfigFunc func(*Config) (string, *libcni.NetworkConfig, error)) error {
deviceName, networkConfig, err := pluginConfigFunc(cfg)
if err != nil {
return err
}
runtimeConfig.IfName = deviceName
return client.libcni.DelNetwork(ctx, networkConfig, &runtimeConfig)
}
// createBridgeNetworkConfigWithIPAM creates the config of bridge for ADD command, where
// bridge plugin acquires the IP and route information from IPAM
func (client *cniClient) createBridgeNetworkConfigWithIPAM(cfg *Config) (string, *libcni.NetworkConfig, error) {
// Create the bridge config first
bridgeConfig := client.createBridgeConfig(cfg)
// Create the ipam config
ipamConfig, err := client.createIPAMConfig(cfg)
if err != nil {
return "", nil, errors.Wrap(err, "createBridgeNetworkConfigWithIPAM: create ipam configuration failed")
}
bridgeConfig.IPAM = ipamConfig
networkConfig, err := client.constructNetworkConfig(bridgeConfig, ECSBridgePluginName)
if err != nil {
return "", nil, errors.Wrap(err, "createBridgeNetworkConfigWithIPAM: construct bridge and ipam network configuration failed")
}
return defaultVethName, networkConfig, nil
}
// createBridgeNetworkConfigWithoutIPAM creates the config of the bridge for removal
func (client *cniClient) createBridgeNetworkConfigWithoutIPAM(cfg *Config) (string, *libcni.NetworkConfig, error) {
networkConfig, err := client.constructNetworkConfig(client.createBridgeConfig(cfg), ECSBridgePluginName)
if err != nil {
return "", nil, errors.Wrap(err, "createBridgeNetworkConfigWithoutIPAM: construct bridge network configuration failed")
}
return defaultVethName, networkConfig, nil
}
func (client *cniClient) createBridgeConfig(cfg *Config) BridgeConfig {
bridgeName := defaultBridgeName
if len(cfg.BridgeName) != 0 {
bridgeName = cfg.BridgeName
}
bridgeConfig := BridgeConfig{
Type: ECSBridgePluginName,
CNIVersion: client.cniVersion,
BridgeName: bridgeName,
}
return bridgeConfig
}
// constructNetworkConfig takes in the config from agent and construct the configuration
// that's accepted by the libcni
func (client *cniClient) constructNetworkConfig(cfg interface{}, plugin string) (*libcni.NetworkConfig, error) {
configBytes, err := json.Marshal(cfg)
if err != nil {
seelog.Errorf("[ECSCNI] Marshal configuration for plugin %s failed, error: %v", plugin, err)
return nil, err
}
networkConfig := &libcni.NetworkConfig{
Network: &cnitypes.NetConf{
Type: plugin,
CNIVersion: client.cniVersion,
},
Bytes: configBytes,
}
return networkConfig, nil
}
func (client *cniClient) createENINetworkConfig(cfg *Config) (string, *libcni.NetworkConfig, error) {
eniConf := ENIConfig{
Type: ECSENIPluginName,
CNIVersion: client.cniVersion,
ENIID: cfg.ENIID,
IPV4Address: cfg.ENIIPV4Address,
IPV6Address: cfg.ENIIPV6Address,
MACAddress: cfg.ENIMACAddress,
BlockInstanceMetdata: cfg.BlockInstanceMetdata,
SubnetGatewayIPV4Address: cfg.SubnetGatewayIPV4Address,
}
networkConfig, err := client.constructNetworkConfig(eniConf, ECSENIPluginName)
if err != nil {
return "", nil, errors.Wrap(err, "createENINetworkConfig: construct the eni network configuration failed")
}
return defaultENIName, networkConfig, nil
}
func (client *cniClient) createAppMeshConfig(cfg *Config) (string, *libcni.NetworkConfig, error) {
appMeshConfig := AppMeshConfig{
Type: ECSAppMeshPluginName,
CNIVersion: client.cniVersion,
IgnoredUID: cfg.IgnoredUID,
IgnoredGID: cfg.IgnoredGID,
ProxyIngressPort: cfg.ProxyIngressPort,
ProxyEgressPort: cfg.ProxyEgressPort,
AppPorts: cfg.AppPorts,
EgressIgnoredPorts: cfg.EgressIgnoredPorts,
EgressIgnoredIPs: cfg.EgressIgnoredIPs,
}
networkConfig, err := client.constructNetworkConfig(appMeshConfig, ECSAppMeshPluginName)
if err != nil {
return "", nil, errors.Wrap(err, "createAppMeshNetworkConfig: construct the app mesh network configuration failed")
}
return defaultAppMeshIfName, networkConfig, nil
}
func (client *cniClient) createBranchENINetworkConfig(cfg *Config) (string, *libcni.NetworkConfig, error) {
// cfg.ENIIPV4Address does not have a prefix length while BranchIPAddress expects a prefix length
// cfg.SubnetGatewayIPV4Address has prefix length while BranchGatewayIPAddress does not expect the prefix length
stringSlice := strings.Split(cfg.SubnetGatewayIPV4Address, "/")
ENIIPV4Address := cfg.ENIIPV4Address + "/" + stringSlice[1]
BranchGatewayIPAddress := stringSlice[0]
eniConf := BranchENIConfig{
TrunkMACAddress: cfg.TrunkMACAddress,
Type: ECSBranchENIPluginName,
CNIVersion: client.cniVersion,
BranchVlanID: cfg.BranchVlanID,
BranchIPAddress: ENIIPV4Address,
BranchMACAddress: cfg.ENIMACAddress,
BlockInstanceMetdata: cfg.BlockInstanceMetdata,
BranchGatewayIPAddress: BranchGatewayIPAddress,
InterfaceType: vpcCNIPluginInterfaceType,
}
networkConfig, err := client.constructNetworkConfig(eniConf, ECSBranchENIPluginName)
if err != nil {
return "", nil, errors.Wrap(err, "createENINetworkConfig: construct the eni network configuration failed")
}
return defaultENIName, networkConfig, nil
}
// createIPAMNetworkConfig constructs the ipam configuration accepted by libcni
func (client *cniClient) createIPAMNetworkConfig(cfg *Config) (string, *libcni.NetworkConfig, error) {
ipamConfig, err := client.createIPAMConfig(cfg)
if err != nil {
return defaultVethName, nil, errors.Wrap(err, "createIPAMNetworkConfig: create ipam network configuration failed")
}
ipamNetworkConfig := IPAMNetworkConfig{
Name: ECSIPAMPluginName,
CNIVersion: client.cniVersion,
IPAM: ipamConfig,
}
networkConfig, err := client.constructNetworkConfig(ipamNetworkConfig, ECSIPAMPluginName)
if err != nil {
return "", nil, errors.Wrap(err, "createIPAMNetworkConfig: construct ipam network configuration failed")
}
return defaultVethName, networkConfig, nil
}
func (client *cniClient) createIPAMConfig(cfg *Config) (IPAMConfig, error) {
_, dst, err := net.ParseCIDR(TaskIAMRoleEndpoint)
if err != nil {
return IPAMConfig{}, err
}
routes := []*cnitypes.Route{
{
Dst: *dst,
},
}
for _, route := range cfg.AdditionalLocalRoutes {
seelog.Debugf("[ECSCNI] Adding an additional route for %s", route)
ipNetRoute := (net.IPNet)(route)
routes = append(routes, &cnitypes.Route{Dst: ipNetRoute})
}
ipamConfig := IPAMConfig{
Type: ECSIPAMPluginName,
CNIVersion: client.cniVersion,
IPV4Subnet: client.subnet,
IPV4Address: cfg.IPAMV4Address,
ID: cfg.ID,
IPV4Routes: routes,
}
return ipamConfig, nil
}
// Version returns the version of the plugin
func (client *cniClient) Version(name string) (string, error) {
file := filepath.Join(client.pluginsPath, name)
// Check if the plugin file exists before executing it
_, err := os.Stat(file)
if err != nil {
return "", err
}
cmd := exec.Command(file, versionCommand)
versionInfo, err := cmd.Output()
if err != nil {
return "", err
}
version := &cniPluginVersion{}
// versionInfo is of the format
// {"version":"2017.06.0","dirty":true,"gitShortHash":"226db36"}
// Unmarshal this
err = json.Unmarshal(versionInfo, version)
if err != nil {
return "", errors.Wrapf(err, "ecscni: unmarshal version from string: %s", versionInfo)
}
return version.str(), nil
}
// cniPluginVersion is used to convert the JSON output of the
// '--version' command into a string
type cniPluginVersion struct {
Version string `json:"version"`
Dirty bool `json:"dirty"`
Hash string `json:"gitShortHash"`
}
// str generates a string version of the CNI plugin version
// Example:
// {"version":"2017.06.0","dirty":true,"gitShortHash":"226db36"} => @226db36-2017.06.0
// {"version":"2017.06.0","dirty":false,"gitShortHash":"326db36"} => 326db36-2017.06.0
func (version *cniPluginVersion) str() string {
ver := ""
if version.Dirty {
ver = "@"
}
return ver + version.Hash + "-" + version.Version
}
// Capabilities returns the capabilities supported by a plugin
func (client *cniClient) Capabilities(name string) ([]string, error) {
file := filepath.Join(client.pluginsPath, name)
// Check if the plugin file exists before executing it
_, err := os.Stat(file)
if err != nil {
return nil, errors.Wrapf(err, "ecscni: unable to describe file info for '%s'", file)
}
cmd := exec.Command(file, capabilitiesCommand)
capabilitiesInfo, err := cmd.Output()
if err != nil {
return nil, errors.Wrapf(err, "ecscni: failed invoking capabilities command for '%s'", name)
}
capabilities := &struct {
Capabilities []string `json:"capabilities"`
}{}
err = json.Unmarshal(capabilitiesInfo, capabilities)
if err != nil {
return nil, errors.Wrapf(err, "ecscni: failed to unmarshal capabilities for '%s' from string: %s", name, capabilitiesInfo)
}
return capabilities.Capabilities, nil
}
| 1 | 22,853 | is type a free string too? | aws-amazon-ecs-agent | go |
@@ -27,15 +27,16 @@ class WrapFunction(nn.Module):
return self.wrapped_function(*args, **kwargs)
-def ort_validate(model, feats, onnx_io='tmp.onnx'):
+def ort_validate(model_func, feats, onnx_io='tmp.onnx'):
"""Validate the output of the onnxruntime backend is the same as the output
generated by torch.
Args:
- model (nn.Module): the model to be verified
+ model_func : the function of model to be verified
feats (list(torch.Tensor) | torch.Tensor): the input of model
onnx_io (str): the name of onnx output file
"""
+ model = WrapFunction(model_func)
model.cpu().eval()
with torch.no_grad():
torch.onnx.export( | 1 | import warnings
from os import path as osp
import numpy as np
import onnx
import onnxruntime as ort
import torch
import torch.nn as nn
ort_custom_op_path = ''
try:
from mmcv.ops import get_onnxruntime_op_path
ort_custom_op_path = get_onnxruntime_op_path()
except (ImportError, ModuleNotFoundError):
warnings.warn('If input model has custom op from mmcv, \
you may have to build mmcv with ONNXRuntime from source.')
class WrapFunction(nn.Module):
"""Wrap the function to be tested for torch.onnx.export tracking."""
def __init__(self, wrapped_function):
super(WrapFunction, self).__init__()
self.wrapped_function = wrapped_function
def forward(self, *args, **kwargs):
return self.wrapped_function(*args, **kwargs)
def ort_validate(model, feats, onnx_io='tmp.onnx'):
"""Validate the output of the onnxruntime backend is the same as the output
generated by torch.
Args:
model (nn.Module): the model to be verified
feats (list(torch.Tensor) | torch.Tensor): the input of model
onnx_io (str): the name of onnx output file
"""
model.cpu().eval()
with torch.no_grad():
torch.onnx.export(
model,
feats,
onnx_io,
export_params=True,
keep_initializers_as_inputs=True,
do_constant_folding=True,
verbose=False,
opset_version=11)
onnx_outputs = verify_model(feats)
torch_outputs = convert_result_list(model.forward(feats))
torch_outputs = [
torch_output.detach().numpy() for torch_output in torch_outputs
]
# match torch_outputs and onnx_outputs
for i in range(len(onnx_outputs)):
np.testing.assert_allclose(
torch_outputs[i], onnx_outputs[i], rtol=1e-03, atol=1e-05)
def verify_model(feat, onnx_io='tmp.onnx'):
"""Run the model in onnxruntime env.
Args:
feat (list[Tensor]): A list of tensors from torch.rand,
each is a 4D-tensor.
Returns:
list[np.array]: onnxruntime infer result, each is a np.array
"""
onnx_model = onnx.load(onnx_io)
onnx.checker.check_model(onnx_model)
session_options = ort.SessionOptions()
# register custom op for onnxruntime
if osp.exists(ort_custom_op_path):
session_options.register_custom_ops_library(ort_custom_op_path)
sess = ort.InferenceSession(onnx_io, session_options)
if isinstance(feat, torch.Tensor):
onnx_outputs = sess.run(None,
{sess.get_inputs()[0].name: feat.numpy()})
else:
onnx_outputs = sess.run(None, {
sess.get_inputs()[i].name: feat[i].numpy()
for i in range(len(feat))
})
return onnx_outputs
def convert_result_list(outputs):
"""Convert the torch forward outputs containing tuple or list to a list
only containing torch.Tensor.
Args:
output (list(Tensor) | tuple(list(Tensor) | ...): the outputs
in torch env, maybe containing nested structures such as list
or tuple.
Returns:
list(Tensor): a list only containing torch.Tensor
"""
# recursive end condition
if isinstance(outputs, torch.Tensor):
return [outputs]
ret = []
for sub in outputs:
ret += convert_result_list(sub)
return ret
| 1 | 22,988 | here input could be a normal function or an instance of torch.nn.Module. | open-mmlab-mmdetection | py |
@@ -65,6 +65,7 @@ module Travis
sh.echo 'Please open any issues at https://github.com/travis-ci/travis-ci/issues/new and cc @domenkozar @garbas @matthewbauer @grahamc', ansi: :green
sh.cmd "nix-env --version"
+ sh.cmd "nix-instantiate --eval -E 'with import <nixpkgs> {}; lib.version or lib.nixpkgsVersion'"
end
def script | 1 | # Maintained by
# - Domen Kožar @domenkozar <[email protected]>
# - Rok Garbas @garbas <[email protected]>
# - Matthew Bauer @matthewbauer <[email protected]>
# - Graham Christensen @grahamc <[email protected]>
module Travis
module Build
class Script
class Nix < Script
DEFAULTS = {
nix: '2.0.4'
}
def export
super
# prevent curl from polluting logs but still show errors
sh.export 'NIX_CURL_FLAGS', '-sS'
end
def configure
super
sh.cmd "echo '-s' >> ~/.curlrc"
sh.cmd "echo '-S' >> ~/.curlrc"
sh.cmd "echo '--retry 3' >> ~/.curlrc"
# Nix needs to be able to exec on /tmp on Linux
# This will emit an error in the container but
# it's still needed for "trusty" Linux.
if config[:os] == 'linux'
sh.cmd "sudo mount -o remount,exec /run"
sh.cmd "sudo mount -o remount,exec /run/user"
sh.cmd "sudo mkdir -p -m 0755 /nix/"
sh.cmd "sudo chown $USER /nix/"
# Set nix config dir and make config Hydra compatible
sh.cmd "echo 'build-max-jobs = 4' | sudo tee /etc/nix/nix.conf > /dev/null"
end
end
def setup
super
version = config[:nix]
sh.fold 'nix.install' do
sh.cmd "wget --retry-connrefused --waitretry=1 -O /tmp/nix-install https://nixos.org/releases/nix/nix-#{version}/install"
sh.cmd "yes | sh /tmp/nix-install"
if config[:os] == 'linux'
# single-user install (linux)
sh.cmd 'source ${TRAVIS_HOME}/.nix-profile/etc/profile.d/nix.sh'
else
# multi-user install (macos)
sh.cmd 'source /nix/var/nix/profiles/default/etc/profile.d/nix-daemon.sh'
end
end
end
def announce
super
sh.echo 'Nix support for Travis CI is community maintained.', ansi: :green
sh.echo 'Please open any issues at https://github.com/travis-ci/travis-ci/issues/new and cc @domenkozar @garbas @matthewbauer @grahamc', ansi: :green
sh.cmd "nix-env --version"
end
def script
sh.cmd 'nix-build'
end
end
end
end
end
| 1 | 16,719 | Example output: "19.03.git.a7f4a860d0c" At some point `nixpkgsVersion` was renamed to `version` since someone may use an old channel we fallback to `nixpkgsVersion` for backwards compatibility. | travis-ci-travis-build | rb |
@@ -39,8 +39,6 @@ class LanguageTreeReadAction
/**
- * @IsGranted("SETTINGS_READ")
- *
* @SWG\Tag(name="Language")
* @SWG\Parameter(
* name="language", | 1 | <?php
/**
* Copyright © Bold Brand Commerce Sp. z o.o. All rights reserved.
* See LICENSE.txt for license details.
*/
declare(strict_types = 1);
namespace Ergonode\Core\Application\Controller\Api\LanguageTree;
use Ergonode\Api\Application\Response\SuccessResponse;
use Ergonode\Core\Domain\Repository\LanguageTreeRepositoryInterface;
use Ergonode\Core\Domain\ValueObject\Language;
use Sensio\Bundle\FrameworkExtraBundle\Configuration\IsGranted;
use Swagger\Annotations as SWG;
use Symfony\Component\HttpFoundation\Response;
use Symfony\Component\Routing\Annotation\Route;
/**
* @Route(
* path="/tree",
* methods={"GET"}
* )
*/
class LanguageTreeReadAction
{
/**
* @var LanguageTreeRepositoryInterface
*/
private LanguageTreeRepositoryInterface $repository;
/**
* @param LanguageTreeRepositoryInterface $repository
*/
public function __construct(LanguageTreeRepositoryInterface $repository)
{
$this->repository = $repository;
}
/**
* @IsGranted("SETTINGS_READ")
*
* @SWG\Tag(name="Language")
* @SWG\Parameter(
* name="language",
* in="path",
* type="string",
* required=true,
* default="en_GB",
* description="Language Code",
* )
* @SWG\Response(
* response=200,
* description="Returns Language tree"
* )
* @SWG\Response(
* response=404,
* description="Not found"
* )
*
* @param Language $language
*
* @return Response
*/
public function __invoke(Language $language): Response
{
$tree = $this->repository->load();
return new SuccessResponse($tree);
}
}
| 1 | 8,781 | Class import is therefore redundant I guess :) | ergonode-backend | php |
@@ -58,11 +58,13 @@ const (
infoTryingToStopNode = "Trying to stop the node..."
infoNodeSuccessfullyStopped = "The node was successfully stopped."
infoNodeStatus = "Last committed block: %d\nTime since last block: %s\nSync Time: %s\nLast consensus protocol: %s\nNext consensus protocol: %s\nRound for next consensus protocol: %d\nNext consensus protocol supported: %v"
+ warnNodeCreationIPFailure = "Parsing passed IP %v failed: need a valid IPv4 or IPv6 address with a specified port number. Acting as though IP was not supplied."
errorNodeNotDetected = "Algorand node does not appear to be running: %s"
errorNodeStatus = "Cannot contact Algorand node: %s."
errorNodeFailedToStart = "Algorand node failed to start: %s"
errorNodeRunning = "Node must be stopped before writing APIToken"
errorNodeFailGenToken = "Cannot generate API token: %s"
+ errorNodeCreation = "Error during node creation: %v"
errorKill = "Cannot kill node: %s"
errorCloningNode = "Error cloning the node: %s"
infoNodeCloned = "Node cloned successfully to: %s" | 1 | // Copyright (C) 2019 Algorand, Inc.
// This file is part of go-algorand
//
// go-algorand is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as
// published by the Free Software Foundation, either version 3 of the
// License, or (at your option) any later version.
//
// go-algorand is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
package main
const (
// General
errorNoDataDirectory = "Data directory not specified. Please use -d or set $ALGORAND_DATA in your environment. Exiting."
errorOneDataDirSupported = "One one data directory can be specified for this command."
errorRequestFail = "Error processing command: %s"
errorGenesisIDFail = "Error determining kmd folder (%s). Ensure the node is running in %s."
errorDirectoryNotExist = "Specified directory '%s' does not exist."
// Account
infoNoAccounts = "Did not find any account. Please import or create a new one."
infoRenamedAccount = "Renamed account '%s' to '%s'"
infoImportedKey = "Imported %s"
infoExportedKey = "Exported key for account %s: \"%s\""
infoImportedNKeys = "Imported %d key%s"
infoCreatedNewAccount = "Created new account with address %s"
errorNameAlreadyTaken = "The account name '%s' is already taken, please choose another."
errorNameDoesntExist = "An account named '%s' does not exist."
infoSetAccountToDefault = "Set account '%s' to be the default account"
errorSigningTX = "Couldn't sign tx with kmd: %s"
errorOnlineTX = "Couldn't sign tx: %s (for multisig accounts, write tx to file and sign manually)"
errorConstructingTX = "Couldn't construct tx: %s"
errorBroadcastingTX = "Couldn't broadcast tx with algod: %s"
warnMultisigDuplicatesDetected = "Warning: one or more duplicate addresses detected in multisig account creation. This will effectively give the duplicated address(es) extra signature weight. Continuing multisig account creation."
errLastRoundInvalid = "roundLastValid needs to be well after the current round (%d)"
errExistingPartKey = "Account already has a participation key valid at least until roundLastValid (%d) - current is %d"
errorSeedConversion = "Got private key for account %s, but was unable to convert to seed: %s"
errorMnemonicConversion = "Got seed for account %s, but was unable to convert to mnemonic: %s"
// KMD
infoKMDStopped = "Stopped kmd"
infoKMDAlreadyStarted = "kmd is already running"
infoKMDAlreadyStopped = "kmd doesn't appear to be running"
infoKMDStarted = "Successfully started kmd"
errorKMDFailedToStart = "Failed to start kmd: %s"
errorKMDFailedToStop = "Failed to stop kmd: %s"
// Node
infoNodeStart = "Algorand node successfully started!"
infoNodeAlreadyStarted = "Algorand node was already started!"
infoTryingToStopNode = "Trying to stop the node..."
infoNodeSuccessfullyStopped = "The node was successfully stopped."
infoNodeStatus = "Last committed block: %d\nTime since last block: %s\nSync Time: %s\nLast consensus protocol: %s\nNext consensus protocol: %s\nRound for next consensus protocol: %d\nNext consensus protocol supported: %v"
errorNodeNotDetected = "Algorand node does not appear to be running: %s"
errorNodeStatus = "Cannot contact Algorand node: %s."
errorNodeFailedToStart = "Algorand node failed to start: %s"
errorNodeRunning = "Node must be stopped before writing APIToken"
errorNodeFailGenToken = "Cannot generate API token: %s"
errorKill = "Cannot kill node: %s"
errorCloningNode = "Error cloning the node: %s"
infoNodeCloned = "Node cloned successfully to: %s"
infoNodeWroteToken = "Successfully wrote new API token: %s"
infoNodePendingTxnsDescription = "Pending Transactions (Truncated max=%d, Total in pool=%d): "
infoNodeNoPendingTxnsDescription = "None"
infoDataDir = "[Data Directory: %s]"
errLoadingConfig = "Error loading Config file from '%s': %v"
// Clerk
infoTxIssued = "Sent %d MicroAlgos from account %s to address %s, transaction ID: %s. Fee set to %d"
infoTxCommitted = "Transaction %s committed in round %d"
infoTxPending = "Transaction %s still pending as of round %d"
malformedNote = "Cannot base64-decode note %s: %s"
fileReadError = "Cannot read file %s: %s"
fileWriteError = "Cannot write file %s: %s"
txDecodeError = "Cannot decode transactions from %s: %s"
txDupError = "Duplicate transaction %s in %s"
txLengthError = "Transaction list length mismatch"
txMergeMismatch = "Cannot merge transactions: transaction IDs differ"
txMergeError = "Cannot merge signatures: %v"
txNoFilesError = "No input filenames specified"
soFlagError = "-s is not meaningful without -o"
infoRawTxIssued = "Raw transaction ID %s issued"
txPoolError = "Transaction %s kicked out of local node pool: %s"
infoAutoFeeSet = "Automatically set fee to %d MicroAlgos"
loggingNotConfigured = "Remote logging is not currently configured and won't be enabled"
loggingNotEnabled = "Remote logging is current disabled"
loggingEnabled = "Remote logging is enabled. Node = %s, Guid = %s"
infoNetworkAlreadyExists = "Network Root Directory '%s' already exists"
errorCreateNetwork = "Error creating private network: %s"
infoNetworkCreated = "Network %s created under %s"
errorLoadingNetwork = "Error loading deployed network: %s"
errorStartingNetwork = "Error starting deployed network: %s"
infoNetworkStarted = "Network Started under %s"
infoNetworkStopped = "Network Stopped under %s"
infoNetworkDeleted = "Network Deleted under %s"
// Wallet
infoRecoveryPrompt = "Please type your recovery mnemonic below, and hit return when you are done: "
infoChoosePasswordPrompt = "Please choose a password for wallet '%s': "
infoPasswordConfirmation = "Please confirm the password: "
infoCreatingWallet = "Creating wallet..."
infoCreatedWallet = "Created wallet '%s'"
infoBackupExplanation = "Your new wallet has a backup phrase that can be used for recovery.\nKeeping this backup phrase safe is extremely important.\nWould you like to see it now? (Y/n): "
infoPrintedBackupPhrase = "Your backup phrase is printed below.\nKeep this information safe -- never share it with anyone!"
infoBackupPhrase = "\n\x1B[32m%s\033[0m"
infoNoWallets = "No wallets found. You can create a wallet with `goal wallet new`"
errorCouldntCreateWallet = "Couldn't create wallet: %s"
errorCouldntInitializeWallet = "Couldn't initialize wallet: %s"
errorCouldntExportMDK = "Couldn't export master derivation key: %s"
errorCouldntMakeMnemonic = "Couldn't make mnemonic: %s"
errorCouldntListWallets = "Couldn't list wallets: %s"
errorPasswordConfirmation = "Password confirmation did not match"
errorBadMnemonic = "Problem with mnemonic: %s"
errorBadRecoveredKey = "Recovered invalid key"
errorFailedToReadResponse = "Couldn't read response: %s"
errorFailedToReadPassword = "Couldn't read password: %s"
// Commands
infoPasswordPrompt = "Please enter the password for wallet '%s': "
infoSetWalletToDefault = "Set wallet '%s' to be the default wallet"
errCouldNotListWallets = "Couldn't list wallets: %s"
errNoWallets = "No wallets found. Create a new wallet with `goal wallet new [wallet name]`"
errNoDefaultWallet = "No default wallet found. Specify a wallet by name with -w, or set a default with `goal wallet -f [wallet name]"
errFindingWallet = "Couldn't find wallet: %s"
errWalletNameAmbiguous = "More than one wallet named '%s' exists. Please remove any wallets with the same name from the kmd wallet directory"
errWalletIDDuplicate = "More than one wallet with ID '%s' exists. Please remove any wallets with the samd ID from the kmd wallet directory"
errGettingWalletName = "Couldn't get wallet name from ID '%s': %s"
errWalletNotFound = "Wallet '%s' not found"
errDefaultWalletNotFound = "Wallet with ID '%s' not found. Was the default wallet deleted?"
errGettingToken = "Couldn't get token for wallet '%s' (ID: %s): %s"
)
| 1 | 35,673 | If parsing fails, don't act as no IP specified. Error out. | algorand-go-algorand | go |
@@ -187,7 +187,9 @@ public class OverviewFragment extends Fragment implements View.OnClickListener,
final Object updateSync = new Object();
- public enum CHARTTYPE {PRE,BAS, IOB, COB, DEV, SEN};
+ public enum CHARTTYPE {PRE, BAS, IOB, COB, DEV, SEN}
+
+ ;
private static final ScheduledExecutorService worker = Executors.newSingleThreadScheduledExecutor();
private static ScheduledFuture<?> scheduledUpdate = null;
| 1 | package info.nightscout.androidaps.plugins.Overview;
import android.annotation.SuppressLint;
import android.app.Activity;
import android.app.NotificationManager;
import android.content.ActivityNotFoundException;
import android.content.Context;
import android.content.DialogInterface;
import android.content.Intent;
import android.content.pm.PackageManager;
import android.graphics.Color;
import android.graphics.Paint;
import android.os.Bundle;
import android.os.Handler;
import android.support.v4.app.Fragment;
import android.support.v4.app.FragmentActivity;
import android.support.v4.app.FragmentManager;
import android.support.v4.content.ContextCompat;
import android.support.v4.content.res.ResourcesCompat;
import android.support.v7.app.AlertDialog;
import android.support.v7.widget.CardView;
import android.support.v7.widget.LinearLayoutManager;
import android.support.v7.widget.PopupMenu;
import android.support.v7.widget.RecyclerView;
import android.text.SpannableString;
import android.text.style.ForegroundColorSpan;
import android.util.DisplayMetrics;
import android.util.TypedValue;
import android.view.ContextMenu;
import android.view.HapticFeedbackConstants;
import android.view.LayoutInflater;
import android.view.Menu;
import android.view.MenuItem;
import android.view.View;
import android.view.ViewGroup;
import android.widget.Button;
import android.widget.CheckBox;
import android.widget.CompoundButton;
import android.widget.ImageButton;
import android.widget.LinearLayout;
import android.widget.TextView;
import com.crashlytics.android.answers.CustomEvent;
import com.jjoe64.graphview.GraphView;
import com.squareup.otto.Subscribe;
import org.json.JSONException;
import org.json.JSONObject;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.text.DecimalFormat;
import java.util.Calendar;
import java.util.Date;
import java.util.List;
import java.util.Objects;
import java.util.concurrent.Executors;
import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.ScheduledFuture;
import java.util.concurrent.TimeUnit;
import info.nightscout.androidaps.Config;
import info.nightscout.androidaps.Constants;
import info.nightscout.androidaps.MainApp;
import info.nightscout.androidaps.R;
import info.nightscout.androidaps.data.DetailedBolusInfo;
import info.nightscout.androidaps.data.GlucoseStatus;
import info.nightscout.androidaps.data.IobTotal;
import info.nightscout.androidaps.data.Profile;
import info.nightscout.androidaps.data.QuickWizardEntry;
import info.nightscout.androidaps.db.BgReading;
import info.nightscout.androidaps.db.CareportalEvent;
import info.nightscout.androidaps.db.DatabaseHelper;
import info.nightscout.androidaps.db.ExtendedBolus;
import info.nightscout.androidaps.db.Source;
import info.nightscout.androidaps.db.TempTarget;
import info.nightscout.androidaps.db.TemporaryBasal;
import info.nightscout.androidaps.events.EventCareportalEventChange;
import info.nightscout.androidaps.events.EventExtendedBolusChange;
import info.nightscout.androidaps.events.EventInitializationChanged;
import info.nightscout.androidaps.events.EventPreferenceChange;
import info.nightscout.androidaps.events.EventPumpStatusChanged;
import info.nightscout.androidaps.events.EventRefreshOverview;
import info.nightscout.androidaps.events.EventTempBasalChange;
import info.nightscout.androidaps.events.EventTempTargetChange;
import info.nightscout.androidaps.events.EventTreatmentChange;
import info.nightscout.androidaps.interfaces.PluginBase;
import info.nightscout.androidaps.interfaces.PumpDescription;
import info.nightscout.androidaps.interfaces.PumpInterface;
import info.nightscout.androidaps.plugins.Careportal.CareportalFragment;
import info.nightscout.androidaps.plugins.Careportal.Dialogs.NewNSTreatmentDialog;
import info.nightscout.androidaps.plugins.Careportal.OptionsToShow;
import info.nightscout.androidaps.plugins.ConfigBuilder.ConfigBuilderPlugin;
import info.nightscout.androidaps.plugins.ConstraintsObjectives.ObjectivesPlugin;
import info.nightscout.androidaps.plugins.IobCobCalculator.AutosensData;
import info.nightscout.androidaps.plugins.IobCobCalculator.IobCobCalculatorPlugin;
import info.nightscout.androidaps.plugins.IobCobCalculator.events.EventAutosensCalculationFinished;
import info.nightscout.androidaps.plugins.Loop.LoopPlugin;
import info.nightscout.androidaps.plugins.Loop.events.EventNewOpenLoopNotification;
import info.nightscout.androidaps.plugins.NSClientInternal.broadcasts.BroadcastAckAlarm;
import info.nightscout.androidaps.plugins.NSClientInternal.data.NSDeviceStatus;
import info.nightscout.androidaps.plugins.Overview.Dialogs.CalibrationDialog;
import info.nightscout.androidaps.plugins.Overview.Dialogs.ErrorHelperActivity;
import info.nightscout.androidaps.plugins.Overview.Dialogs.NewCarbsDialog;
import info.nightscout.androidaps.plugins.Overview.Dialogs.NewInsulinDialog;
import info.nightscout.androidaps.plugins.Overview.Dialogs.NewTreatmentDialog;
import info.nightscout.androidaps.plugins.Overview.Dialogs.WizardDialog;
import info.nightscout.androidaps.plugins.Overview.activities.QuickWizardListActivity;
import info.nightscout.androidaps.plugins.Overview.events.EventDismissNotification;
import info.nightscout.androidaps.plugins.Overview.events.EventSetWakeLock;
import info.nightscout.androidaps.plugins.Overview.graphData.GraphData;
import info.nightscout.androidaps.plugins.Overview.notifications.Notification;
import info.nightscout.androidaps.plugins.Overview.notifications.NotificationStore;
import info.nightscout.androidaps.plugins.SourceDexcomG5.SourceDexcomG5Plugin;
import info.nightscout.androidaps.plugins.SourceXdrip.SourceXdripPlugin;
import info.nightscout.androidaps.plugins.Treatments.fragments.ProfileViewerDialog;
import info.nightscout.androidaps.queue.Callback;
import info.nightscout.utils.BolusWizard;
import info.nightscout.utils.DateUtil;
import info.nightscout.utils.DecimalFormatter;
import info.nightscout.utils.FabricPrivacy;
import info.nightscout.utils.NSUpload;
import info.nightscout.utils.OKDialog;
import info.nightscout.utils.Profiler;
import info.nightscout.utils.SP;
import info.nightscout.utils.SingleClickButton;
import info.nightscout.utils.ToastUtils;
public class OverviewFragment extends Fragment implements View.OnClickListener, View.OnLongClickListener {
private static Logger log = LoggerFactory.getLogger(OverviewFragment.class);
TextView timeView;
TextView bgView;
TextView arrowView;
TextView timeAgoView;
TextView deltaView;
TextView avgdeltaView;
TextView baseBasalView;
TextView extendedBolusView;
TextView activeProfileView;
TextView iobView;
TextView cobView;
TextView apsModeView;
TextView tempTargetView;
TextView pumpStatusView;
TextView pumpDeviceStatusView;
TextView openapsDeviceStatusView;
TextView uploaderDeviceStatusView;
LinearLayout loopStatusLayout;
LinearLayout pumpStatusLayout;
GraphView bgGraph;
GraphView iobGraph;
ImageButton chartButton;
TextView iage;
TextView cage;
TextView sage;
TextView pbage;
RecyclerView notificationsView;
LinearLayoutManager llm;
LinearLayout acceptTempLayout;
SingleClickButton acceptTempButton;
SingleClickButton treatmentButton;
SingleClickButton wizardButton;
SingleClickButton calibrationButton;
SingleClickButton insulinButton;
SingleClickButton carbsButton;
SingleClickButton cgmButton;
SingleClickButton quickWizardButton;
CheckBox lockScreen;
boolean smallWidth;
boolean smallHeight;
public static boolean shorttextmode = false;
private boolean accepted;
private int rangeToDisplay = 6; // for graph
Handler sLoopHandler = new Handler();
Runnable sRefreshLoop = null;
final Object updateSync = new Object();
public enum CHARTTYPE {PRE,BAS, IOB, COB, DEV, SEN};
private static final ScheduledExecutorService worker = Executors.newSingleThreadScheduledExecutor();
private static ScheduledFuture<?> scheduledUpdate = null;
public OverviewFragment() {
super();
}
@Override
public View onCreateView(LayoutInflater inflater, ViewGroup container,
Bundle savedInstanceState) {
try {
//check screen width
final DisplayMetrics dm = new DisplayMetrics();
getActivity().getWindowManager().getDefaultDisplay().getMetrics(dm);
int screen_width = dm.widthPixels;
int screen_height = dm.heightPixels;
smallWidth = screen_width <= Constants.SMALL_WIDTH;
smallHeight = screen_height <= Constants.SMALL_HEIGHT;
boolean landscape = screen_height < screen_width;
View view;
if (MainApp.sResources.getBoolean(R.bool.isTablet) && (Config.NSCLIENT || Config.G5UPLOADER)) {
view = inflater.inflate(R.layout.overview_fragment_nsclient_tablet, container, false);
} else if (Config.NSCLIENT || Config.G5UPLOADER) {
view = inflater.inflate(R.layout.overview_fragment_nsclient, container, false);
shorttextmode = true;
} else if (smallHeight || landscape) {
view = inflater.inflate(R.layout.overview_fragment_smallheight, container, false);
} else {
view = inflater.inflate(R.layout.overview_fragment, container, false);
}
timeView = (TextView) view.findViewById(R.id.overview_time);
bgView = (TextView) view.findViewById(R.id.overview_bg);
arrowView = (TextView) view.findViewById(R.id.overview_arrow);
if (smallWidth) {
arrowView.setTextSize(TypedValue.COMPLEX_UNIT_DIP, 35);
}
timeAgoView = (TextView) view.findViewById(R.id.overview_timeago);
deltaView = (TextView) view.findViewById(R.id.overview_delta);
avgdeltaView = (TextView) view.findViewById(R.id.overview_avgdelta);
baseBasalView = (TextView) view.findViewById(R.id.overview_basebasal);
extendedBolusView = (TextView) view.findViewById(R.id.overview_extendedbolus);
activeProfileView = (TextView) view.findViewById(R.id.overview_activeprofile);
pumpStatusView = (TextView) view.findViewById(R.id.overview_pumpstatus);
pumpDeviceStatusView = (TextView) view.findViewById(R.id.overview_pump);
openapsDeviceStatusView = (TextView) view.findViewById(R.id.overview_openaps);
uploaderDeviceStatusView = (TextView) view.findViewById(R.id.overview_uploader);
loopStatusLayout = (LinearLayout) view.findViewById(R.id.overview_looplayout);
pumpStatusLayout = (LinearLayout) view.findViewById(R.id.overview_pumpstatuslayout);
pumpStatusView.setBackgroundColor(MainApp.sResources.getColor(R.color.colorInitializingBorder));
iobView = (TextView) view.findViewById(R.id.overview_iob);
cobView = (TextView) view.findViewById(R.id.overview_cob);
apsModeView = (TextView) view.findViewById(R.id.overview_apsmode);
tempTargetView = (TextView) view.findViewById(R.id.overview_temptarget);
iage = (TextView) view.findViewById(R.id.careportal_insulinage);
cage = (TextView) view.findViewById(R.id.careportal_canulaage);
sage = (TextView) view.findViewById(R.id.careportal_sensorage);
pbage = (TextView) view.findViewById(R.id.careportal_pbage);
bgGraph = (GraphView) view.findViewById(R.id.overview_bggraph);
iobGraph = (GraphView) view.findViewById(R.id.overview_iobgraph);
treatmentButton = (SingleClickButton) view.findViewById(R.id.overview_treatmentbutton);
treatmentButton.setOnClickListener(this);
wizardButton = (SingleClickButton) view.findViewById(R.id.overview_wizardbutton);
wizardButton.setOnClickListener(this);
insulinButton = (SingleClickButton) view.findViewById(R.id.overview_insulinbutton);
if (insulinButton != null)
insulinButton.setOnClickListener(this);
carbsButton = (SingleClickButton) view.findViewById(R.id.overview_carbsbutton);
if (carbsButton != null)
carbsButton.setOnClickListener(this);
acceptTempButton = (SingleClickButton) view.findViewById(R.id.overview_accepttempbutton);
if (acceptTempButton != null)
acceptTempButton.setOnClickListener(this);
quickWizardButton = (SingleClickButton) view.findViewById(R.id.overview_quickwizardbutton);
quickWizardButton.setOnClickListener(this);
quickWizardButton.setOnLongClickListener(this);
calibrationButton = (SingleClickButton) view.findViewById(R.id.overview_calibrationbutton);
if (calibrationButton != null)
calibrationButton.setOnClickListener(this);
cgmButton = (SingleClickButton) view.findViewById(R.id.overview_cgmbutton);
if (cgmButton != null)
cgmButton.setOnClickListener(this);
acceptTempLayout = (LinearLayout) view.findViewById(R.id.overview_accepttemplayout);
notificationsView = (RecyclerView) view.findViewById(R.id.overview_notifications);
notificationsView.setHasFixedSize(true);
llm = new LinearLayoutManager(view.getContext());
notificationsView.setLayoutManager(llm);
bgGraph.getGridLabelRenderer().setGridColor(MainApp.sResources.getColor(R.color.graphgrid));
bgGraph.getGridLabelRenderer().reloadStyles();
iobGraph.getGridLabelRenderer().setGridColor(MainApp.sResources.getColor(R.color.graphgrid));
iobGraph.getGridLabelRenderer().reloadStyles();
iobGraph.getGridLabelRenderer().setHorizontalLabelsVisible(false);
bgGraph.getGridLabelRenderer().setLabelVerticalWidth(50);
iobGraph.getGridLabelRenderer().setLabelVerticalWidth(50);
iobGraph.getGridLabelRenderer().setNumVerticalLabels(5);
rangeToDisplay = SP.getInt(R.string.key_rangetodisplay, 6);
bgGraph.setOnLongClickListener(new View.OnLongClickListener() {
@Override
public boolean onLongClick(View v) {
rangeToDisplay += 6;
rangeToDisplay = rangeToDisplay > 24 ? 6 : rangeToDisplay;
SP.putInt(R.string.key_rangetodisplay, rangeToDisplay);
updateGUI("rangeChange");
return false;
}
});
setupChartMenu(view);
lockScreen = (CheckBox) view.findViewById(R.id.overview_lockscreen);
if (lockScreen != null) {
lockScreen.setChecked(SP.getBoolean("lockscreen", false));
lockScreen.setOnCheckedChangeListener(new CompoundButton.OnCheckedChangeListener() {
@Override
public void onCheckedChanged(CompoundButton buttonView, boolean isChecked) {
SP.putBoolean("lockscreen", isChecked);
MainApp.bus().post(new EventSetWakeLock(isChecked));
}
});
}
return view;
} catch (Exception e) {
FabricPrivacy.logException(e);
log.debug("Runtime Exception", e);
}
return null;
}
private void setupChartMenu(View view) {
chartButton = (ImageButton) view.findViewById(R.id.overview_chartMenuButton);
chartButton.setOnClickListener(new View.OnClickListener() {
@Override
public void onClick(View v) {
final LoopPlugin.LastRun finalLastRun = LoopPlugin.lastRun;
final boolean predictionsAvailable = finalLastRun != null && finalLastRun.request.hasPredictions;
MenuItem item;
CharSequence title;
SpannableString s;
PopupMenu popup = new PopupMenu(v.getContext(), v);
if(predictionsAvailable) {
item = popup.getMenu().add(Menu.NONE, CHARTTYPE.PRE.ordinal(), Menu.NONE, "Predictions");
title = item.getTitle();
s = new SpannableString(title);
s.setSpan(new ForegroundColorSpan(ResourcesCompat.getColor(getResources(), R.color.prediction, null)), 0, s.length(), 0);
item.setTitle(s);
item.setCheckable(true);
item.setChecked(SP.getBoolean("showprediction", true));
}
item = popup.getMenu().add(Menu.NONE, CHARTTYPE.BAS.ordinal(), Menu.NONE, MainApp.gs(R.string.overview_show_basals));
title = item.getTitle();
s = new SpannableString(title);
s.setSpan(new ForegroundColorSpan(ResourcesCompat.getColor(getResources(), R.color.basal, null)), 0, s.length(), 0);
item.setTitle(s);
item.setCheckable(true);
item.setChecked(SP.getBoolean("showbasals", true));
item = popup.getMenu().add(Menu.NONE, CHARTTYPE.IOB.ordinal(), Menu.NONE, MainApp.gs(R.string.overview_show_iob));
title = item.getTitle();
s = new SpannableString(title);
s.setSpan(new ForegroundColorSpan(ResourcesCompat.getColor(getResources(), R.color.iob, null)), 0, s.length(), 0);
item.setTitle(s);
item.setCheckable(true);
item.setChecked(SP.getBoolean("showiob", true));
item = popup.getMenu().add(Menu.NONE, CHARTTYPE.COB.ordinal(), Menu.NONE, MainApp.gs(R.string.overview_show_cob));
title = item.getTitle();
s = new SpannableString(title);
s.setSpan(new ForegroundColorSpan(ResourcesCompat.getColor(getResources(), R.color.cob, null)), 0, s.length(), 0);
item.setTitle(s);
item.setCheckable(true);
item.setChecked(SP.getBoolean("showcob", true));
item = popup.getMenu().add(Menu.NONE, CHARTTYPE.DEV.ordinal(), Menu.NONE, MainApp.gs(R.string.overview_show_deviations));
title = item.getTitle();
s = new SpannableString(title);
s.setSpan(new ForegroundColorSpan(ResourcesCompat.getColor(getResources(), R.color.deviations, null)), 0, s.length(), 0);
item.setTitle(s);
item.setCheckable(true);
item.setChecked(SP.getBoolean("showdeviations", false));
item = popup.getMenu().add(Menu.NONE, CHARTTYPE.SEN.ordinal(), Menu.NONE, MainApp.gs(R.string.overview_show_sensitivity));
title = item.getTitle();
s = new SpannableString(title);
s.setSpan(new ForegroundColorSpan(ResourcesCompat.getColor(getResources(), R.color.ratio, null)), 0, s.length(), 0);
item.setTitle(s);
item.setCheckable(true);
item.setChecked(SP.getBoolean("showratios", false));
popup.setOnMenuItemClickListener(new PopupMenu.OnMenuItemClickListener() {
@Override
public boolean onMenuItemClick(MenuItem item) {
if (item.getItemId() == CHARTTYPE.PRE.ordinal()) {
SP.putBoolean("showprediction", !item.isChecked());
} else if (item.getItemId() == CHARTTYPE.BAS.ordinal()) {
SP.putBoolean("showbasals", !item.isChecked());
} else if (item.getItemId() == CHARTTYPE.IOB.ordinal()) {
SP.putBoolean("showiob", !item.isChecked());
} else if (item.getItemId() == CHARTTYPE.COB.ordinal()) {
SP.putBoolean("showcob", !item.isChecked());
} else if (item.getItemId() == CHARTTYPE.DEV.ordinal()) {
SP.putBoolean("showdeviations", !item.isChecked());
} else if (item.getItemId() == CHARTTYPE.SEN.ordinal()) {
SP.putBoolean("showratios", !item.isChecked());
}
scheduleUpdateGUI("onGraphCheckboxesCheckedChanged");
return true;
}
});
chartButton.setImageResource(R.drawable.ic_arrow_drop_up_white_24dp);
popup.setOnDismissListener(new PopupMenu.OnDismissListener() {
@Override
public void onDismiss(PopupMenu menu) {
chartButton.setImageResource(R.drawable.ic_arrow_drop_down_white_24dp);
}
});
popup.show();
}
});
}
@Override
public void onCreateContextMenu(ContextMenu menu, View v, ContextMenu.ContextMenuInfo menuInfo) {
super.onCreateContextMenu(menu, v, menuInfo);
if (v == apsModeView) {
final LoopPlugin activeloop = ConfigBuilderPlugin.getActiveLoop();
final PumpDescription pumpDescription = ConfigBuilderPlugin.getActivePump().getPumpDescription();
if (activeloop == null)
return;
menu.setHeaderTitle(MainApp.sResources.getString(R.string.loop));
if (activeloop.isEnabled(PluginBase.LOOP)) {
menu.add(MainApp.sResources.getString(R.string.disableloop));
if (!activeloop.isSuspended()) {
menu.add(MainApp.sResources.getString(R.string.suspendloopfor1h));
menu.add(MainApp.sResources.getString(R.string.suspendloopfor2h));
menu.add(MainApp.sResources.getString(R.string.suspendloopfor3h));
menu.add(MainApp.sResources.getString(R.string.suspendloopfor10h));
if (pumpDescription.tempDurationStep15mAllowed)
menu.add(MainApp.sResources.getString(R.string.disconnectpumpfor15m));
if (pumpDescription.tempDurationStep30mAllowed)
menu.add(MainApp.sResources.getString(R.string.disconnectpumpfor30m));
menu.add(MainApp.sResources.getString(R.string.disconnectpumpfor1h));
menu.add(MainApp.sResources.getString(R.string.disconnectpumpfor2h));
menu.add(MainApp.sResources.getString(R.string.disconnectpumpfor3h));
} else {
menu.add(MainApp.sResources.getString(R.string.resume));
}
}
if (!activeloop.isEnabled(PluginBase.LOOP))
menu.add(MainApp.sResources.getString(R.string.enableloop));
} else if (v == activeProfileView) {
menu.setHeaderTitle(MainApp.sResources.getString(R.string.profile));
menu.add(MainApp.sResources.getString(R.string.danar_viewprofile));
menu.add(MainApp.sResources.getString(R.string.careportal_profileswitch));
}
}
@Override
public boolean onContextItemSelected(MenuItem item) {
final LoopPlugin activeloop = ConfigBuilderPlugin.getActiveLoop();
if (item.getTitle().equals(MainApp.sResources.getString(R.string.disableloop))) {
activeloop.setFragmentEnabled(PluginBase.LOOP, false);
activeloop.setFragmentVisible(PluginBase.LOOP, false);
MainApp.getConfigBuilder().storeSettings();
updateGUI("suspendmenu");
MainApp.getConfigBuilder().getCommandQueue().cancelTempBasal(true, new Callback() {
@Override
public void run() {
if (!result.success) {
ToastUtils.showToastInUiThread(MainApp.instance().getApplicationContext(), MainApp.sResources.getString(R.string.tempbasaldeliveryerror));
}
}
});
NSUpload.uploadOpenAPSOffline(24 * 60); // upload 24h, we don't know real duration
return true;
} else if (item.getTitle().equals(MainApp.sResources.getString(R.string.enableloop))) {
activeloop.setFragmentEnabled(PluginBase.LOOP, true);
activeloop.setFragmentVisible(PluginBase.LOOP, true);
MainApp.getConfigBuilder().storeSettings();
updateGUI("suspendmenu");
NSUpload.uploadOpenAPSOffline(0);
return true;
} else if (item.getTitle().equals(MainApp.sResources.getString(R.string.resume))) {
activeloop.suspendTo(0L);
updateGUI("suspendmenu");
MainApp.getConfigBuilder().getCommandQueue().cancelTempBasal(true, new Callback() {
@Override
public void run() {
if (!result.success) {
ToastUtils.showToastInUiThread(MainApp.instance().getApplicationContext(), MainApp.sResources.getString(R.string.tempbasaldeliveryerror));
}
}
});
NSUpload.uploadOpenAPSOffline(0);
return true;
} else if (item.getTitle().equals(MainApp.sResources.getString(R.string.suspendloopfor1h))) {
MainApp.getConfigBuilder().suspendLoop(60);
updateGUI("suspendmenu");
return true;
} else if (item.getTitle().equals(MainApp.sResources.getString(R.string.suspendloopfor2h))) {
MainApp.getConfigBuilder().suspendLoop(120);
updateGUI("suspendmenu");
return true;
} else if (item.getTitle().equals(MainApp.sResources.getString(R.string.suspendloopfor3h))) {
MainApp.getConfigBuilder().suspendLoop(180);
updateGUI("suspendmenu");
return true;
} else if (item.getTitle().equals(MainApp.sResources.getString(R.string.suspendloopfor10h))) {
MainApp.getConfigBuilder().suspendLoop(600);
updateGUI("suspendmenu");
return true;
} else if (item.getTitle().equals(MainApp.sResources.getString(R.string.disconnectpumpfor15m))) {
MainApp.getConfigBuilder().disconnectPump(15);
updateGUI("suspendmenu");
return true;
} else if (item.getTitle().equals(MainApp.sResources.getString(R.string.disconnectpumpfor30m))) {
MainApp.getConfigBuilder().disconnectPump(30);
updateGUI("suspendmenu");
return true;
} else if (item.getTitle().equals(MainApp.sResources.getString(R.string.disconnectpumpfor1h))) {
MainApp.getConfigBuilder().disconnectPump(60);
updateGUI("suspendmenu");
return true;
} else if (item.getTitle().equals(MainApp.sResources.getString(R.string.disconnectpumpfor2h))) {
MainApp.getConfigBuilder().disconnectPump(120);
updateGUI("suspendmenu");
return true;
} else if (item.getTitle().equals(MainApp.sResources.getString(R.string.disconnectpumpfor3h))) {
MainApp.getConfigBuilder().disconnectPump(180);
updateGUI("suspendmenu");
return true;
} else if (item.getTitle().equals(MainApp.sResources.getString(R.string.careportal_profileswitch))) {
NewNSTreatmentDialog newDialog = new NewNSTreatmentDialog();
final OptionsToShow profileswitch = CareportalFragment.PROFILESWITCHDIRECT;
profileswitch.executeProfileSwitch = true;
newDialog.setOptions(profileswitch, R.string.careportal_profileswitch);
newDialog.show(getFragmentManager(), "NewNSTreatmentDialog");
} else if (item.getTitle().equals(MainApp.sResources.getString(R.string.danar_viewprofile))) {
ProfileViewerDialog pvd = ProfileViewerDialog.newInstance(System.currentTimeMillis());
FragmentManager manager = getFragmentManager();
pvd.show(manager, "ProfileViewDialog");
}
return super.onContextItemSelected(item);
}
@Override
public void onClick(View v) {
boolean xdrip = MainApp.getSpecificPlugin(SourceXdripPlugin.class) != null && MainApp.getSpecificPlugin(SourceXdripPlugin.class).isEnabled(PluginBase.BGSOURCE);
boolean g5 = MainApp.getSpecificPlugin(SourceDexcomG5Plugin.class) != null && MainApp.getSpecificPlugin(SourceDexcomG5Plugin.class).isEnabled(PluginBase.BGSOURCE);
String units = MainApp.getConfigBuilder().getProfileUnits();
FragmentManager manager = getFragmentManager();
switch (v.getId()) {
case R.id.overview_accepttempbutton:
onClickAcceptTemp();
break;
case R.id.overview_quickwizardbutton:
onClickQuickwizard();
break;
case R.id.overview_wizardbutton:
WizardDialog wizardDialog = new WizardDialog();
wizardDialog.show(manager, "WizardDialog");
break;
case R.id.overview_calibrationbutton:
if (xdrip) {
CalibrationDialog calibrationDialog = new CalibrationDialog();
calibrationDialog.show(manager, "CalibrationDialog");
} else if (g5) {
try {
Intent i = new Intent("com.dexcom.cgm.activities.MeterEntryActivity");
startActivity(i);
} catch (ActivityNotFoundException e) {
ToastUtils.showToastInUiThread(getActivity(), MainApp.gs(R.string.g5appnotdetected));
}
}
break;
case R.id.overview_cgmbutton:
if (xdrip)
openCgmApp("com.eveningoutpost.dexdrip");
else if (g5 && units.equals(Constants.MGDL))
openCgmApp("com.dexcom.cgm.region5.mgdl");
else if (g5 && units.equals(Constants.MMOL))
openCgmApp("com.dexcom.cgm.region5.mmol");
break;
case R.id.overview_treatmentbutton:
NewTreatmentDialog treatmentDialogFragment = new NewTreatmentDialog();
treatmentDialogFragment.show(manager, "TreatmentDialog");
break;
case R.id.overview_insulinbutton:
new NewInsulinDialog().show(manager, "InsulinDialog");
break;
case R.id.overview_carbsbutton:
new NewCarbsDialog().show(manager, "CarbsDialog");
break;
case R.id.overview_pumpstatus:
if (ConfigBuilderPlugin.getActivePump().isSuspended() || !ConfigBuilderPlugin.getActivePump().isInitialized())
ConfigBuilderPlugin.getCommandQueue().readStatus("RefreshClicked", null);
break;
}
}
public boolean openCgmApp(String packageName) {
PackageManager packageManager = getContext().getPackageManager();
try {
Intent intent = packageManager.getLaunchIntentForPackage(packageName);
if (intent == null) {
throw new ActivityNotFoundException();
}
intent.addCategory(Intent.CATEGORY_LAUNCHER);
getContext().startActivity(intent);
return true;
} catch (ActivityNotFoundException e) {
new AlertDialog.Builder(getContext())
.setMessage(R.string.error_starting_cgm)
.setPositiveButton("OK", null)
.show();
return false;
}
}
@Override
public boolean onLongClick(View v) {
switch (v.getId()) {
case R.id.overview_quickwizardbutton:
Intent i = new Intent(v.getContext(), QuickWizardListActivity.class);
startActivity(i);
return true;
}
return false;
}
private void onClickAcceptTemp() {
Profile profile = MainApp.getConfigBuilder().getProfile();
if (ConfigBuilderPlugin.getActiveLoop() != null && profile != null) {
ConfigBuilderPlugin.getActiveLoop().invoke("Accept temp button", false);
final LoopPlugin.LastRun finalLastRun = LoopPlugin.lastRun;
if (finalLastRun != null && finalLastRun.lastAPSRun != null && finalLastRun.constraintsProcessed.isChangeRequested()) {
AlertDialog.Builder builder = new AlertDialog.Builder(getContext());
builder.setTitle(getContext().getString(R.string.confirmation));
builder.setMessage(getContext().getString(R.string.setbasalquestion) + "\n" + finalLastRun.constraintsProcessed);
builder.setPositiveButton(getContext().getString(R.string.ok), new DialogInterface.OnClickListener() {
public void onClick(DialogInterface dialog, int id) {
hideTempRecommendation();
clearNotification();
MainApp.getConfigBuilder().applyTBRRequest(finalLastRun.constraintsProcessed, profile, new Callback() {
@Override
public void run() {
if (result.enacted) {
finalLastRun.tbrSetByPump = result;
finalLastRun.lastEnact = new Date();
finalLastRun.lastOpenModeAccept = new Date();
NSUpload.uploadDeviceStatus();
ObjectivesPlugin objectivesPlugin = MainApp.getSpecificPlugin(ObjectivesPlugin.class);
if (objectivesPlugin != null) {
ObjectivesPlugin.manualEnacts++;
ObjectivesPlugin.saveProgress();
}
}
scheduleUpdateGUI("onClickAcceptTemp");
}
});
FabricPrivacy.getInstance().logCustom(new CustomEvent("AcceptTemp"));
}
});
builder.setNegativeButton(getContext().getString(R.string.cancel), null);
builder.show();
}
}
}
void onClickQuickwizard() {
final BgReading actualBg = DatabaseHelper.actualBg();
final Profile profile = MainApp.getConfigBuilder().getProfile();
final TempTarget tempTarget = MainApp.getConfigBuilder().getTempTargetFromHistory();
final QuickWizardEntry quickWizardEntry = OverviewPlugin.getPlugin().quickWizard.getActive();
if (quickWizardEntry != null && actualBg != null && profile != null) {
quickWizardButton.setVisibility(View.VISIBLE);
final BolusWizard wizard = quickWizardEntry.doCalc(profile, tempTarget, actualBg, true);
final JSONObject boluscalcJSON = new JSONObject();
try {
boluscalcJSON.put("eventTime", DateUtil.toISOString(new Date()));
boluscalcJSON.put("targetBGLow", wizard.targetBGLow);
boluscalcJSON.put("targetBGHigh", wizard.targetBGHigh);
boluscalcJSON.put("isf", wizard.sens);
boluscalcJSON.put("ic", wizard.ic);
boluscalcJSON.put("iob", -(wizard.insulingFromBolusIOB + wizard.insulingFromBasalsIOB));
boluscalcJSON.put("bolusiobused", true);
boluscalcJSON.put("basaliobused", true);
boluscalcJSON.put("bg", actualBg.valueToUnits(profile.getUnits()));
boluscalcJSON.put("insulinbg", wizard.insulinFromBG);
boluscalcJSON.put("insulinbgused", true);
boluscalcJSON.put("bgdiff", wizard.bgDiff);
boluscalcJSON.put("insulincarbs", wizard.insulinFromCarbs);
boluscalcJSON.put("carbs", quickWizardEntry.carbs());
boluscalcJSON.put("othercorrection", 0d);
boluscalcJSON.put("insulintrend", wizard.insulinFromTrend);
boluscalcJSON.put("insulin", wizard.calculatedTotalInsulin);
} catch (JSONException e) {
log.error("Unhandled exception", e);
}
if (wizard.calculatedTotalInsulin > 0d && quickWizardEntry.carbs() > 0d) {
DecimalFormat formatNumber2decimalplaces = new DecimalFormat("0.00");
String confirmMessage = getString(R.string.entertreatmentquestion);
Double insulinAfterConstraints = MainApp.getConfigBuilder().applyBolusConstraints(wizard.calculatedTotalInsulin);
Integer carbsAfterConstraints = MainApp.getConfigBuilder().applyCarbsConstraints(quickWizardEntry.carbs());
confirmMessage += "\n" + getString(R.string.bolus) + ": " + formatNumber2decimalplaces.format(insulinAfterConstraints) + "U";
confirmMessage += "\n" + getString(R.string.carbs) + ": " + carbsAfterConstraints + "g";
if (!insulinAfterConstraints.equals(wizard.calculatedTotalInsulin) || !carbsAfterConstraints.equals(quickWizardEntry.carbs())) {
AlertDialog.Builder builder = new AlertDialog.Builder(getContext());
builder.setTitle(MainApp.sResources.getString(R.string.treatmentdeliveryerror));
builder.setMessage(getString(R.string.constraints_violation) + "\n" + getString(R.string.changeyourinput));
builder.setPositiveButton(MainApp.sResources.getString(R.string.ok), null);
builder.show();
return;
}
final Double finalInsulinAfterConstraints = insulinAfterConstraints;
final Integer finalCarbsAfterConstraints = carbsAfterConstraints;
final Context context = getContext();
final AlertDialog.Builder builder = new AlertDialog.Builder(context);
accepted = false;
builder.setTitle(MainApp.sResources.getString(R.string.confirmation));
builder.setMessage(confirmMessage);
builder.setPositiveButton(getString(R.string.ok), new DialogInterface.OnClickListener() {
public void onClick(DialogInterface dialog, int id) {
synchronized (builder) {
if (accepted) {
log.debug("guarding: already accepted");
return;
}
accepted = true;
if (finalInsulinAfterConstraints > 0 || finalCarbsAfterConstraints > 0) {
if (wizard.superBolus) {
final LoopPlugin activeloop = ConfigBuilderPlugin.getActiveLoop();
if (activeloop != null) {
activeloop.superBolusTo(System.currentTimeMillis() + 2 * 60L * 60 * 1000);
MainApp.bus().post(new EventRefreshOverview("WizardDialog"));
}
ConfigBuilderPlugin.getCommandQueue().tempBasalPercent(0, 120, true, new Callback() {
@Override
public void run() {
if (!result.success) {
Intent i = new Intent(MainApp.instance(), ErrorHelperActivity.class);
i.putExtra("soundid", R.raw.boluserror);
i.putExtra("status", result.comment);
i.putExtra("title", MainApp.sResources.getString(R.string.tempbasaldeliveryerror));
i.addFlags(Intent.FLAG_ACTIVITY_NEW_TASK);
MainApp.instance().startActivity(i);
}
}
});
}
DetailedBolusInfo detailedBolusInfo = new DetailedBolusInfo();
detailedBolusInfo.eventType = CareportalEvent.BOLUSWIZARD;
detailedBolusInfo.insulin = finalInsulinAfterConstraints;
detailedBolusInfo.carbs = finalCarbsAfterConstraints;
detailedBolusInfo.context = context;
detailedBolusInfo.boluscalc = boluscalcJSON;
detailedBolusInfo.source = Source.USER;
ConfigBuilderPlugin.getCommandQueue().bolus(detailedBolusInfo, new Callback() {
@Override
public void run() {
if (!result.success) {
Intent i = new Intent(MainApp.instance(), ErrorHelperActivity.class);
i.putExtra("soundid", R.raw.boluserror);
i.putExtra("status", result.comment);
i.putExtra("title", MainApp.sResources.getString(R.string.treatmentdeliveryerror));
i.addFlags(Intent.FLAG_ACTIVITY_NEW_TASK);
MainApp.instance().startActivity(i);
}
}
});
FabricPrivacy.getInstance().logCustom(new CustomEvent("QuickWizard"));
}
}
}
});
builder.setNegativeButton(getString(R.string.cancel), null);
builder.show();
}
}
}
@Override
public void onPause() {
super.onPause();
MainApp.bus().unregister(this);
sLoopHandler.removeCallbacksAndMessages(null);
unregisterForContextMenu(apsModeView);
unregisterForContextMenu(activeProfileView);
}
@Override
public void onResume() {
super.onResume();
MainApp.bus().register(this);
sRefreshLoop = new Runnable() {
@Override
public void run() {
scheduleUpdateGUI("refreshLoop");
sLoopHandler.postDelayed(sRefreshLoop, 60 * 1000L);
}
};
sLoopHandler.postDelayed(sRefreshLoop, 60 * 1000L);
registerForContextMenu(apsModeView);
registerForContextMenu(activeProfileView);
updateGUI("onResume");
}
@Subscribe
public void onStatusEvent(final EventInitializationChanged ev) {
scheduleUpdateGUI("EventInitializationChanged");
}
@Subscribe
public void onStatusEvent(final EventPreferenceChange ev) {
scheduleUpdateGUI("EventPreferenceChange");
}
@Subscribe
public void onStatusEvent(final EventRefreshOverview ev) {
scheduleUpdateGUI(ev.from);
}
@Subscribe
public void onStatusEvent(final EventAutosensCalculationFinished ev) {
scheduleUpdateGUI("EventAutosensCalculationFinished");
}
@Subscribe
public void onStatusEvent(final EventTreatmentChange ev) {
scheduleUpdateGUI("EventTreatmentChange");
}
@Subscribe
public void onStatusEvent(final EventCareportalEventChange ev) {
scheduleUpdateGUI("EventCareportalEventChange");
}
@Subscribe
public void onStatusEvent(final EventTempBasalChange ev) {
scheduleUpdateGUI("EventTempBasalChange");
}
@Subscribe
public void onStatusEvent(final EventExtendedBolusChange ev) {
scheduleUpdateGUI("EventExtendedBolusChange");
}
// Handled by EventAutosensCalculationFinished
// @Subscribe
// public void onStatusEvent(final EventNewBG ev) {
// scheduleUpdateGUI("EventNewBG");
// }
@Subscribe
public void onStatusEvent(final EventNewOpenLoopNotification ev) {
scheduleUpdateGUI("EventNewOpenLoopNotification");
}
// Handled by EventAutosensCalculationFinished
// @Subscribe
// public void onStatusEvent(final EventNewBasalProfile ev) {
// scheduleUpdateGUI("EventNewBasalProfile");
// }
@Subscribe
public void onStatusEvent(final EventTempTargetChange ev) {
scheduleUpdateGUI("EventTempTargetChange");
}
@Subscribe
public void onStatusEvent(final EventPumpStatusChanged s) {
Activity activity = getActivity();
if (activity != null)
activity.runOnUiThread(new Runnable() {
@Override
public void run() {
updatePumpStatus(s.textStatus());
}
});
}
private void hideTempRecommendation() {
Activity activity = getActivity();
if (activity != null)
activity.runOnUiThread(new Runnable() {
@Override
public void run() {
if (acceptTempLayout != null)
acceptTempLayout.setVisibility(View.GONE);
}
});
}
private void clearNotification() {
NotificationManager notificationManager =
(NotificationManager) MainApp.instance().getSystemService(Context.NOTIFICATION_SERVICE);
notificationManager.cancel(Constants.notificationID);
}
private void updatePumpStatus(String status) {
if (!status.equals("")) {
pumpStatusView.setText(status);
pumpStatusLayout.setVisibility(View.VISIBLE);
loopStatusLayout.setVisibility(View.GONE);
} else {
pumpStatusLayout.setVisibility(View.GONE);
loopStatusLayout.setVisibility(View.VISIBLE);
}
}
public void scheduleUpdateGUI(final String from) {
class UpdateRunnable implements Runnable {
public void run() {
Activity activity = getActivity();
if (activity != null)
activity.runOnUiThread(new Runnable() {
@Override
public void run() {
updateGUI(from);
scheduledUpdate = null;
}
});
}
}
// prepare task for execution in 400 msec
// cancel waiting task to prevent multiple updates
if (scheduledUpdate != null)
scheduledUpdate.cancel(false);
Runnable task = new UpdateRunnable();
final int msec = 500;
scheduledUpdate = worker.schedule(task, msec, TimeUnit.MILLISECONDS);
}
@SuppressLint("SetTextI18n")
public void updateGUI(final String from) {
log.debug("updateGUI entered from: " + from);
final Date updateGUIStart = new Date();
if (getActivity() == null)
return;
if (timeView != null) { //must not exists
timeView.setText(DateUtil.timeString(new Date()));
}
if (!MainApp.getConfigBuilder().isProfileValid("Overview")) {// app not initialized yet
pumpStatusView.setText(R.string.noprofileset);
pumpStatusLayout.setVisibility(View.VISIBLE);
loopStatusLayout.setVisibility(View.GONE);
return;
}
pumpStatusLayout.setVisibility(View.GONE);
loopStatusLayout.setVisibility(View.VISIBLE);
updateNotifications();
CareportalFragment.updateAge(getActivity(), sage, iage, cage, pbage);
BgReading actualBG = DatabaseHelper.actualBg();
BgReading lastBG = DatabaseHelper.lastBg();
final PumpInterface pump = ConfigBuilderPlugin.getActivePump();
final Profile profile = MainApp.getConfigBuilder().getProfile();
if (profile == null) {
pumpStatusView.setText(R.string.noprofileset);
pumpStatusLayout.setVisibility(View.VISIBLE);
loopStatusLayout.setVisibility(View.GONE);
return;
}
final String units = profile.getUnits();
final double lowLine = OverviewPlugin.getPlugin().determineLowLine(units);
final double highLine = OverviewPlugin.getPlugin().determineHighLine(units);
//Start with updating the BG as it is unaffected by loop.
// **** BG value ****
if (lastBG != null) {
int color = MainApp.sResources.getColor(R.color.inrange);
if (lastBG.valueToUnits(units) < lowLine)
color = MainApp.sResources.getColor(R.color.low);
else if (lastBG.valueToUnits(units) > highLine)
color = MainApp.sResources.getColor(R.color.high);
bgView.setText(lastBG.valueToUnitsToString(units));
arrowView.setText(lastBG.directionToSymbol());
bgView.setTextColor(color);
arrowView.setTextColor(color);
GlucoseStatus glucoseStatus = GlucoseStatus.getGlucoseStatusData();
if (glucoseStatus != null) {
deltaView.setText("Δ " + Profile.toUnitsString(glucoseStatus.delta, glucoseStatus.delta * Constants.MGDL_TO_MMOLL, units) + " " + units);
if (avgdeltaView != null)
avgdeltaView.setText("øΔ15m: " + Profile.toUnitsString(glucoseStatus.short_avgdelta, glucoseStatus.short_avgdelta * Constants.MGDL_TO_MMOLL, units) +
" øΔ40m: " + Profile.toUnitsString(glucoseStatus.long_avgdelta, glucoseStatus.long_avgdelta * Constants.MGDL_TO_MMOLL, units));
} else {
deltaView.setText("Δ " + MainApp.sResources.getString(R.string.notavailable));
if (avgdeltaView != null)
avgdeltaView.setText("");
}
}
// open loop mode
final LoopPlugin.LastRun finalLastRun = LoopPlugin.lastRun;
if (Config.APS && pump.getPumpDescription().isTempBasalCapable) {
apsModeView.setVisibility(View.VISIBLE);
apsModeView.setBackgroundColor(MainApp.sResources.getColor(R.color.loopenabled));
apsModeView.setTextColor(Color.BLACK);
final LoopPlugin activeloop = ConfigBuilderPlugin.getActiveLoop();
if (activeloop != null && activeloop.isEnabled(activeloop.getType()) && activeloop.isSuperBolus()) {
apsModeView.setBackgroundColor(MainApp.sResources.getColor(R.color.looppumpsuspended));
apsModeView.setText(String.format(MainApp.sResources.getString(R.string.loopsuperbolusfor), activeloop.minutesToEndOfSuspend()));
apsModeView.setTextColor(Color.WHITE);
} else if (activeloop != null && activeloop.isEnabled(activeloop.getType()) && activeloop.isSuspended()) {
apsModeView.setBackgroundColor(MainApp.sResources.getColor(R.color.looppumpsuspended));
apsModeView.setText(String.format(MainApp.sResources.getString(R.string.loopsuspendedfor), activeloop.minutesToEndOfSuspend()));
apsModeView.setTextColor(Color.WHITE);
} else if (pump.isSuspended()) {
apsModeView.setBackgroundColor(MainApp.sResources.getColor(R.color.looppumpsuspended));
apsModeView.setText(MainApp.sResources.getString(R.string.pumpsuspended));
apsModeView.setTextColor(Color.WHITE);
} else if (activeloop != null && activeloop.isEnabled(activeloop.getType())) {
if (MainApp.getConfigBuilder().isClosedModeEnabled()) {
apsModeView.setText(MainApp.sResources.getString(R.string.closedloop));
} else {
apsModeView.setText(MainApp.sResources.getString(R.string.openloop));
}
} else {
apsModeView.setBackgroundColor(MainApp.sResources.getColor(R.color.loopdisabled));
apsModeView.setText(MainApp.sResources.getString(R.string.disabledloop));
apsModeView.setTextColor(Color.WHITE);
}
} else {
apsModeView.setVisibility(View.GONE);
}
// temp target
TempTarget tempTarget = MainApp.getConfigBuilder().getTempTargetFromHistory();
if (tempTarget != null) {
tempTargetView.setTextColor(Color.BLACK);
tempTargetView.setBackgroundColor(MainApp.sResources.getColor(R.color.tempTargetBackground));
tempTargetView.setVisibility(View.VISIBLE);
tempTargetView.setText(Profile.toTargetRangeString(tempTarget.low, tempTarget.high, Constants.MGDL, units) + " " + DateUtil.untilString(tempTarget.end()));
} else {
tempTargetView.setTextColor(Color.WHITE);
tempTargetView.setBackgroundColor(MainApp.sResources.getColor(R.color.tempTargetDisabledBackground));
tempTargetView.setText(Profile.toTargetRangeString(profile.getTargetLow(), profile.getTargetHigh(), units, units));
tempTargetView.setVisibility(View.VISIBLE);
}
// **** Temp button ****
if (acceptTempLayout != null) {
boolean showAcceptButton = !MainApp.getConfigBuilder().isClosedModeEnabled(); // Open mode needed
showAcceptButton = showAcceptButton && finalLastRun != null && finalLastRun.lastAPSRun != null; // aps result must exist
showAcceptButton = showAcceptButton && (finalLastRun.lastOpenModeAccept == null || finalLastRun.lastOpenModeAccept.getTime() < finalLastRun.lastAPSRun.getTime()); // never accepted or before last result
showAcceptButton = showAcceptButton && finalLastRun.constraintsProcessed.isChangeRequested(); // change is requested
if (showAcceptButton && pump.isInitialized() && !pump.isSuspended() && ConfigBuilderPlugin.getActiveLoop() != null) {
acceptTempLayout.setVisibility(View.VISIBLE);
acceptTempButton.setText(getContext().getString(R.string.setbasalquestion) + "\n" + finalLastRun.constraintsProcessed);
} else {
acceptTempLayout.setVisibility(View.GONE);
}
}
// **** Calibration & CGM buttons ****
boolean xDripIsBgSource = MainApp.getSpecificPlugin(SourceXdripPlugin.class) != null && MainApp.getSpecificPlugin(SourceXdripPlugin.class).isEnabled(PluginBase.BGSOURCE);
boolean g5IsBgSource = MainApp.getSpecificPlugin(SourceDexcomG5Plugin.class) != null && MainApp.getSpecificPlugin(SourceDexcomG5Plugin.class).isEnabled(PluginBase.BGSOURCE);
boolean bgAvailable = DatabaseHelper.actualBg() != null;
if (calibrationButton != null) {
if ((xDripIsBgSource || g5IsBgSource) && bgAvailable && SP.getBoolean(R.string.key_show_calibration_button, true)) {
calibrationButton.setVisibility(View.VISIBLE);
} else {
calibrationButton.setVisibility(View.GONE);
}
}
if (cgmButton != null) {
if (xDripIsBgSource && SP.getBoolean(R.string.key_show_cgm_button, false)) {
cgmButton.setVisibility(View.VISIBLE);
} else if (g5IsBgSource && SP.getBoolean(R.string.key_show_cgm_button, false)) {
cgmButton.setVisibility(View.VISIBLE);
} else {
cgmButton.setVisibility(View.GONE);
}
}
final TemporaryBasal activeTemp = MainApp.getConfigBuilder().getTempBasalFromHistory(System.currentTimeMillis());
String basalText = "";
if (shorttextmode) {
if (activeTemp != null) {
basalText = "T: " + activeTemp.toStringVeryShort();
} else {
basalText = DecimalFormatter.to2Decimal(profile.getBasal()) + "U/h";
}
baseBasalView.setOnClickListener(new View.OnClickListener() {
@Override
public void onClick(View v) {
String fullText = MainApp.sResources.getString(R.string.pump_basebasalrate_label) + ": " + DecimalFormatter.to2Decimal(profile.getBasal()) + "U/h\n";
if (activeTemp != null) {
fullText += MainApp.sResources.getString(R.string.pump_tempbasal_label) + ": " + activeTemp.toStringFull();
}
OKDialog.show(getActivity(), MainApp.sResources.getString(R.string.basal), fullText, null);
}
});
} else {
if (activeTemp != null) {
basalText = activeTemp.toStringFull() + " ";
}
if (Config.NSCLIENT || Config.G5UPLOADER)
basalText += "(" + DecimalFormatter.to2Decimal(profile.getBasal()) + " U/h)";
else if (pump.getPumpDescription().isTempBasalCapable) {
basalText += "(" + DecimalFormatter.to2Decimal(pump.getBaseBasalRate()) + "U/h)";
}
}
if (activeTemp != null) {
baseBasalView.setTextColor(MainApp.sResources.getColor(R.color.basal));
} else {
baseBasalView.setTextColor(Color.WHITE);
}
baseBasalView.setText(basalText);
final ExtendedBolus extendedBolus = MainApp.getConfigBuilder().getExtendedBolusFromHistory(System.currentTimeMillis());
String extendedBolusText = "";
if (extendedBolusView != null) { // must not exists in all layouts
if (shorttextmode) {
if (extendedBolus != null && !pump.isFakingTempsByExtendedBoluses()) {
extendedBolusText = DecimalFormatter.to2Decimal(extendedBolus.absoluteRate()) + "U/h";
}
extendedBolusView.setText(extendedBolusText);
extendedBolusView.setOnClickListener(new View.OnClickListener() {
@Override
public void onClick(View v) {
OKDialog.show(getActivity(), MainApp.sResources.getString(R.string.extendedbolus), extendedBolus.toString(), null);
}
});
} else {
if (extendedBolus != null && !pump.isFakingTempsByExtendedBoluses()) {
extendedBolusText = extendedBolus.toString();
}
extendedBolusView.setText(extendedBolusText);
}
if (extendedBolusText.equals(""))
extendedBolusView.setVisibility(View.GONE);
else
extendedBolusView.setVisibility(View.VISIBLE);
}
activeProfileView.setText(MainApp.getConfigBuilder().getProfileName());
activeProfileView.setBackgroundColor(Color.GRAY);
tempTargetView.setOnLongClickListener(new View.OnLongClickListener() {
@Override
public boolean onLongClick(View view) {
view.performHapticFeedback(HapticFeedbackConstants.LONG_PRESS);
NewNSTreatmentDialog newTTDialog = new NewNSTreatmentDialog();
final OptionsToShow temptarget = CareportalFragment.TEMPTARGET;
temptarget.executeTempTarget = true;
newTTDialog.setOptions(temptarget, R.string.careportal_temporarytarget);
newTTDialog.show(getFragmentManager(), "NewNSTreatmentDialog");
return true;
}
});
tempTargetView.setLongClickable(true);
// QuickWizard button
QuickWizardEntry quickWizardEntry = OverviewPlugin.getPlugin().quickWizard.getActive();
if (quickWizardEntry != null && lastBG != null && pump.isInitialized() && !pump.isSuspended()) {
quickWizardButton.setVisibility(View.VISIBLE);
String text = quickWizardEntry.buttonText() + "\n" + DecimalFormatter.to0Decimal(quickWizardEntry.carbs()) + "g";
BolusWizard wizard = quickWizardEntry.doCalc(profile, tempTarget, lastBG, false);
text += " " + DecimalFormatter.to2Decimal(wizard.calculatedTotalInsulin) + "U";
quickWizardButton.setText(text);
if (wizard.calculatedTotalInsulin <= 0)
quickWizardButton.setVisibility(View.GONE);
} else
quickWizardButton.setVisibility(View.GONE);
// **** Various treatment buttons ****
if (carbsButton != null) {
if (SP.getBoolean(R.string.key_show_carbs_button, true)
&& (!ConfigBuilderPlugin.getActivePump().getPumpDescription().storesCarbInfo ||
(pump.isInitialized() && !pump.isSuspended()))) {
carbsButton.setVisibility(View.VISIBLE);
} else {
carbsButton.setVisibility(View.GONE);
}
}
if (pump.isInitialized() && !pump.isSuspended()) {
if (treatmentButton != null) {
if (SP.getBoolean(R.string.key_show_treatment_button, false)) {
treatmentButton.setVisibility(View.VISIBLE);
} else {
treatmentButton.setVisibility(View.GONE);
}
}
if (wizardButton != null) {
if (SP.getBoolean(R.string.key_show_wizard_button, true)) {
wizardButton.setVisibility(View.VISIBLE);
} else {
wizardButton.setVisibility(View.GONE);
}
}
if (insulinButton != null) {
if (SP.getBoolean(R.string.key_show_insulin_button, true)) {
insulinButton.setVisibility(View.VISIBLE);
} else {
insulinButton.setVisibility(View.GONE);
}
}
}
// **** BG value ****
if (lastBG == null) { //left this here as it seems you want to exit at this point if it is null...
return;
}
Integer flag = bgView.getPaintFlags();
if (actualBG == null) {
flag |= Paint.STRIKE_THRU_TEXT_FLAG;
} else
flag &= ~Paint.STRIKE_THRU_TEXT_FLAG;
bgView.setPaintFlags(flag);
Long agoMsec = System.currentTimeMillis() - lastBG.date;
int agoMin = (int) (agoMsec / 60d / 1000d);
timeAgoView.setText(String.format(MainApp.sResources.getString(R.string.minago), agoMin));
// iob
MainApp.getConfigBuilder().updateTotalIOBTreatments();
MainApp.getConfigBuilder().updateTotalIOBTempBasals();
final IobTotal bolusIob = MainApp.getConfigBuilder().getLastCalculationTreatments().round();
final IobTotal basalIob = MainApp.getConfigBuilder().getLastCalculationTempBasals().round();
if (shorttextmode) {
String iobtext = DecimalFormatter.to2Decimal(bolusIob.iob + basalIob.basaliob) + "U";
iobView.setText(iobtext);
iobView.setOnClickListener(new View.OnClickListener() {
@Override
public void onClick(View v) {
String iobtext = DecimalFormatter.to2Decimal(bolusIob.iob + basalIob.basaliob) + "U\n"
+ getString(R.string.bolus) + ": " + DecimalFormatter.to2Decimal(bolusIob.iob) + "U\n"
+ getString(R.string.basal) + ": " + DecimalFormatter.to2Decimal(basalIob.basaliob) + "U\n";
OKDialog.show(getActivity(), MainApp.sResources.getString(R.string.iob), iobtext, null);
}
});
} else if (MainApp.sResources.getBoolean(R.bool.isTablet)) {
String iobtext = DecimalFormatter.to2Decimal(bolusIob.iob + basalIob.basaliob) + "U ("
+ getString(R.string.bolus) + ": " + DecimalFormatter.to2Decimal(bolusIob.iob) + "U "
+ getString(R.string.basal) + ": " + DecimalFormatter.to2Decimal(basalIob.basaliob) + "U)";
iobView.setText(iobtext);
} else {
String iobtext = DecimalFormatter.to2Decimal(bolusIob.iob + basalIob.basaliob) + "U ("
+ DecimalFormatter.to2Decimal(bolusIob.iob) + "/"
+ DecimalFormatter.to2Decimal(basalIob.basaliob) + ")";
iobView.setText(iobtext);
}
// cob
if (cobView != null) { // view must not exists
String cobText = "";
AutosensData autosensData = IobCobCalculatorPlugin.getPlugin().getLastAutosensData("Overview COB");
if (autosensData != null)
cobText = (int) autosensData.cob + " g";
cobView.setText(cobText);
}
final boolean predictionsAvailable = finalLastRun != null && finalLastRun.request.hasPredictions;
// pump status from ns
if (pumpDeviceStatusView != null) {
pumpDeviceStatusView.setText(NSDeviceStatus.getInstance().getPumpStatus());
pumpDeviceStatusView.setOnClickListener(new View.OnClickListener() {
@Override
public void onClick(View v) {
OKDialog.show(getActivity(), MainApp.sResources.getString(R.string.pump), NSDeviceStatus.getInstance().getExtendedPumpStatus(), null);
}
});
}
// OpenAPS status from ns
if (openapsDeviceStatusView != null) {
openapsDeviceStatusView.setText(NSDeviceStatus.getInstance().getOpenApsStatus());
openapsDeviceStatusView.setOnClickListener(new View.OnClickListener() {
@Override
public void onClick(View v) {
OKDialog.show(getActivity(), MainApp.sResources.getString(R.string.openaps), NSDeviceStatus.getInstance().getExtendedOpenApsStatus(), null);
}
});
}
// Uploader status from ns
if (uploaderDeviceStatusView != null) {
uploaderDeviceStatusView.setText(NSDeviceStatus.getInstance().getUploaderStatus());
uploaderDeviceStatusView.setOnClickListener(new View.OnClickListener() {
@Override
public void onClick(View v) {
OKDialog.show(getActivity(), MainApp.sResources.getString(R.string.uploader), NSDeviceStatus.getInstance().getExtendedUploaderStatus(), null);
}
});
}
// ****** GRAPH *******
new Thread(new Runnable() {
@Override
public void run() {
// allign to hours
Calendar calendar = Calendar.getInstance();
calendar.setTimeInMillis(System.currentTimeMillis());
calendar.set(Calendar.MILLISECOND, 0);
calendar.set(Calendar.SECOND, 0);
calendar.set(Calendar.MINUTE, 0);
calendar.add(Calendar.HOUR, 1);
int hoursToFetch;
final long toTime;
final long fromTime;
final long endTime;
if (predictionsAvailable && SP.getBoolean("showprediction", false)) {
int predHours = (int) (Math.ceil(finalLastRun.constraintsProcessed.getLatestPredictionsTime() - System.currentTimeMillis()) / (60 * 60 * 1000));
predHours = Math.min(2, predHours);
predHours = Math.max(0, predHours);
hoursToFetch = rangeToDisplay - predHours;
toTime = calendar.getTimeInMillis() + 100000; // little bit more to avoid wrong rounding - Graphview specific
fromTime = toTime - hoursToFetch * 60 * 60 * 1000L;
endTime = toTime + predHours * 60 * 60 * 1000L;
} else {
hoursToFetch = rangeToDisplay;
toTime = calendar.getTimeInMillis() + 100000; // little bit more to avoid wrong rounding - Graphview specific
fromTime = toTime - hoursToFetch * 60 * 60 * 1000L;
endTime = toTime;
}
final long now = System.currentTimeMillis();
// ------------------ 1st graph
Profiler.log(log, from + " - 1st graph - START", updateGUIStart);
final GraphData graphData = new GraphData(bgGraph, IobCobCalculatorPlugin.getPlugin());
// **** In range Area ****
graphData.addInRangeArea(fromTime, endTime, lowLine, highLine);
// **** BG ****
if (predictionsAvailable && SP.getBoolean("showprediction", false))
graphData.addBgReadings(fromTime, toTime, lowLine, highLine, finalLastRun.constraintsProcessed);
else
graphData.addBgReadings(fromTime, toTime, lowLine, highLine, null);
// set manual x bounds to have nice steps
graphData.formatAxis(fromTime, endTime);
// Treatments
graphData.addTreatments(fromTime, endTime);
// add basal data
if (pump.getPumpDescription().isTempBasalCapable && SP.getBoolean("showbasals", true)) {
graphData.addBasals(fromTime, now, lowLine / graphData.maxY / 1.2d);
}
// add target line
graphData.addTargetLine(fromTime, toTime, profile);
// **** NOW line ****
graphData.addNowLine(now);
// ------------------ 2nd graph
Profiler.log(log, from + " - 2nd graph - START", updateGUIStart);
final GraphData secondGraphData = new GraphData(iobGraph, IobCobCalculatorPlugin.getPlugin());
boolean useIobForScale = false;
boolean useCobForScale = false;
boolean useDevForScale = false;
boolean useRatioForScale = false;
boolean useDSForScale = false;
if (SP.getBoolean("showiob", true)) {
useIobForScale = true;
} else if (SP.getBoolean("showcob", true)) {
useCobForScale = true;
} else if (SP.getBoolean("showdeviations", false)) {
useDevForScale = true;
} else if (SP.getBoolean("showratios", false)) {
useRatioForScale = true;
} else if (Config.displayDeviationSlope) {
useDSForScale = true;
}
if (SP.getBoolean("showiob", true))
secondGraphData.addIob(fromTime, now, useIobForScale, 1d);
if (SP.getBoolean("showcob", true))
secondGraphData.addCob(fromTime, now, useCobForScale, useCobForScale ? 1d : 0.5d);
if (SP.getBoolean("showdeviations", false))
secondGraphData.addDeviations(fromTime, now, useDevForScale, 1d);
if (SP.getBoolean("showratios", false))
secondGraphData.addRatio(fromTime, now, useRatioForScale, 1d);
if (Config.displayDeviationSlope)
secondGraphData.addDeviationSlope(fromTime, now, useDSForScale, 1d);
// **** NOW line ****
// set manual x bounds to have nice steps
secondGraphData.formatAxis(fromTime, endTime);
secondGraphData.addNowLine(now);
// do GUI update
FragmentActivity activity = getActivity();
if (activity != null) {
activity.runOnUiThread(new Runnable() {
@Override
public void run() {
if (SP.getBoolean("showiob", true) || SP.getBoolean("showcob", true) || SP.getBoolean("showdeviations", false) || SP.getBoolean("showratios", false) || Config.displayDeviationSlope) {
iobGraph.setVisibility(View.VISIBLE);
} else {
iobGraph.setVisibility(View.GONE);
}
// finally enforce drawing of graphs
graphData.performUpdate();
secondGraphData.performUpdate();
Profiler.log(log, from + " - onDataChanged", updateGUIStart);
}
});
}
}
}).start();
Profiler.log(log, from, updateGUIStart);
}
//Notifications
static class RecyclerViewAdapter extends RecyclerView.Adapter<RecyclerViewAdapter.NotificationsViewHolder> {
List<Notification> notificationsList;
RecyclerViewAdapter(List<Notification> notificationsList) {
this.notificationsList = notificationsList;
}
@Override
public NotificationsViewHolder onCreateViewHolder(ViewGroup viewGroup, int viewType) {
View v = LayoutInflater.from(viewGroup.getContext()).inflate(R.layout.overview_notification_item, viewGroup, false);
return new NotificationsViewHolder(v);
}
@Override
public void onBindViewHolder(NotificationsViewHolder holder, int position) {
Notification notification = notificationsList.get(position);
holder.dismiss.setTag(notification);
if (Objects.equals(notification.text, MainApp.sResources.getString(R.string.nsalarm_staledata)))
holder.dismiss.setText("snooze");
holder.text.setText(notification.text);
holder.time.setText(DateUtil.timeString(notification.date));
if (notification.level == Notification.URGENT)
holder.cv.setBackgroundColor(ContextCompat.getColor(MainApp.instance(), R.color.notificationUrgent));
else if (notification.level == Notification.NORMAL)
holder.cv.setBackgroundColor(ContextCompat.getColor(MainApp.instance(), R.color.notificationNormal));
else if (notification.level == Notification.LOW)
holder.cv.setBackgroundColor(ContextCompat.getColor(MainApp.instance(), R.color.notificationLow));
else if (notification.level == Notification.INFO)
holder.cv.setBackgroundColor(ContextCompat.getColor(MainApp.instance(), R.color.notificationInfo));
else if (notification.level == Notification.ANNOUNCEMENT)
holder.cv.setBackgroundColor(ContextCompat.getColor(MainApp.instance(), R.color.notificationAnnouncement));
}
@Override
public int getItemCount() {
return notificationsList.size();
}
@Override
public void onAttachedToRecyclerView(RecyclerView recyclerView) {
super.onAttachedToRecyclerView(recyclerView);
}
static class NotificationsViewHolder extends RecyclerView.ViewHolder implements View.OnClickListener {
CardView cv;
TextView time;
TextView text;
Button dismiss;
NotificationsViewHolder(View itemView) {
super(itemView);
cv = (CardView) itemView.findViewById(R.id.notification_cardview);
time = (TextView) itemView.findViewById(R.id.notification_time);
text = (TextView) itemView.findViewById(R.id.notification_text);
dismiss = (Button) itemView.findViewById(R.id.notification_dismiss);
dismiss.setOnClickListener(this);
}
@Override
public void onClick(View v) {
Notification notification = (Notification) v.getTag();
switch (v.getId()) {
case R.id.notification_dismiss:
MainApp.bus().post(new EventDismissNotification(notification.id));
if (notification.nsAlarm != null) {
BroadcastAckAlarm.handleClearAlarm(notification.nsAlarm, MainApp.instance().getApplicationContext(), 60 * 60 * 1000L);
}
// Adding current time to snooze if we got staleData
log.debug("Notification text is: " + notification.text);
if (notification.text.equals(MainApp.sResources.getString(R.string.nsalarm_staledata))) {
NotificationStore nstore = OverviewPlugin.getPlugin().notificationStore;
long msToSnooze = SP.getInt("nsalarm_staledatavalue", 15) * 60 * 1000L;
log.debug("snooze nsalarm_staledatavalue in minutes is " + SP.getInt("nsalarm_staledatavalue", 15) + "\n in ms is: " + msToSnooze + " currentTimeMillis is: " + System.currentTimeMillis());
nstore.snoozeTo(System.currentTimeMillis() + (SP.getInt("nsalarm_staledatavalue", 15) * 60 * 1000L));
}
break;
}
}
}
}
void updateNotifications() {
Activity activity = getActivity();
if (activity != null)
activity.runOnUiThread(new Runnable() {
@Override
public void run() {
NotificationStore nstore = OverviewPlugin.getPlugin().notificationStore;
nstore.removeExpired();
nstore.unSnooze();
if (nstore.store.size() > 0) {
RecyclerViewAdapter adapter = new RecyclerViewAdapter(nstore.store);
notificationsView.setAdapter(adapter);
notificationsView.setVisibility(View.VISIBLE);
} else {
notificationsView.setVisibility(View.GONE);
}
}
});
}
}
| 1 | 30,079 | ... here the semicolon wanted to run away from the enum ;) | MilosKozak-AndroidAPS | java |
@@ -76,12 +76,12 @@ public class ReplicateFromLeader {
}
log.info("Will start replication from leader with poll interval: {}", pollIntervalStr );
- NamedList<Object> slaveConfig = new NamedList<>();
- slaveConfig.add("fetchFromLeader", Boolean.TRUE);
- slaveConfig.add(ReplicationHandler.SKIP_COMMIT_ON_MASTER_VERSION_ZERO, switchTransactionLog);
- slaveConfig.add("pollInterval", pollIntervalStr);
+ NamedList<Object> secondaryConfig = new NamedList<>();
+ secondaryConfig.add("fetchFromLeader", Boolean.TRUE);
+ secondaryConfig.add(ReplicationHandler.SKIP_COMMIT_ON_MASTER_VERSION_ZERO, switchTransactionLog);
+ secondaryConfig.add("pollInterval", pollIntervalStr);
NamedList<Object> replicationConfig = new NamedList<>();
- replicationConfig.add("slave", slaveConfig);
+ replicationConfig.add("secondary", secondaryConfig);
String lastCommitVersion = getCommitVersion(core);
if (lastCommitVersion != null) { | 1 | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.solr.cloud;
import java.lang.invoke.MethodHandles;
import org.apache.lucene.index.IndexCommit;
import org.apache.solr.common.SolrException;
import org.apache.solr.common.params.ModifiableSolrParams;
import org.apache.solr.common.util.NamedList;
import org.apache.solr.core.CoreContainer;
import org.apache.solr.core.SolrConfig;
import org.apache.solr.core.SolrCore;
import org.apache.solr.handler.IndexFetcher;
import org.apache.solr.handler.ReplicationHandler;
import org.apache.solr.request.LocalSolrQueryRequest;
import org.apache.solr.request.SolrQueryRequest;
import org.apache.solr.update.CommitUpdateCommand;
import org.apache.solr.update.SolrIndexWriter;
import org.apache.solr.update.UpdateLog;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class ReplicateFromLeader {
private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
private final CoreContainer cc;
private final String coreName;
private volatile ReplicationHandler replicationProcess;
private volatile long lastVersion = 0;
public ReplicateFromLeader(CoreContainer cc, String coreName) {
this.cc = cc;
this.coreName = coreName;
}
/**
* Start a replication handler thread that will periodically pull indices from the shard leader
* @param switchTransactionLog if true, ReplicationHandler will rotate the transaction log once
* the replication is done
*/
public void startReplication(boolean switchTransactionLog) throws InterruptedException {
try (SolrCore core = cc.getCore(coreName)) {
if (core == null) {
if (cc.isShutDown()) {
return;
} else {
throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "SolrCore not found:" + coreName + " in " + cc.getLoadedCoreNames());
}
}
SolrConfig.UpdateHandlerInfo uinfo = core.getSolrConfig().getUpdateHandlerInfo();
String pollIntervalStr = "00:00:03";
if (System.getProperty("jetty.testMode") != null) {
pollIntervalStr = "00:00:01";
}
if (uinfo.autoCommmitMaxTime != -1) {
pollIntervalStr = toPollIntervalStr(uinfo.autoCommmitMaxTime/2);
} else if (uinfo.autoSoftCommmitMaxTime != -1) {
pollIntervalStr = toPollIntervalStr(uinfo.autoSoftCommmitMaxTime/2);
}
log.info("Will start replication from leader with poll interval: {}", pollIntervalStr );
NamedList<Object> slaveConfig = new NamedList<>();
slaveConfig.add("fetchFromLeader", Boolean.TRUE);
slaveConfig.add(ReplicationHandler.SKIP_COMMIT_ON_MASTER_VERSION_ZERO, switchTransactionLog);
slaveConfig.add("pollInterval", pollIntervalStr);
NamedList<Object> replicationConfig = new NamedList<>();
replicationConfig.add("slave", slaveConfig);
String lastCommitVersion = getCommitVersion(core);
if (lastCommitVersion != null) {
lastVersion = Long.parseLong(lastCommitVersion);
}
replicationProcess = new ReplicationHandler();
if (switchTransactionLog) {
replicationProcess.setPollListener((solrCore, fetchResult) -> {
if (fetchResult == IndexFetcher.IndexFetchResult.INDEX_FETCH_SUCCESS) {
String commitVersion = getCommitVersion(core);
if (commitVersion == null) return;
if (Long.parseLong(commitVersion) == lastVersion) return;
UpdateLog updateLog = solrCore.getUpdateHandler().getUpdateLog();
SolrQueryRequest req = new LocalSolrQueryRequest(core,
new ModifiableSolrParams());
CommitUpdateCommand cuc = new CommitUpdateCommand(req, false);
cuc.setVersion(Long.parseLong(commitVersion));
updateLog.commitAndSwitchToNewTlog(cuc);
lastVersion = Long.parseLong(commitVersion);
}
});
}
replicationProcess.init(replicationConfig);
replicationProcess.inform(core);
}
}
public static String getCommitVersion(SolrCore solrCore) {
IndexCommit commit = solrCore.getDeletionPolicy().getLatestCommit();
try {
String commitVersion = commit.getUserData().get(SolrIndexWriter.COMMIT_COMMAND_VERSION);
if (commitVersion == null) return null;
else return commitVersion;
} catch (Exception e) {
log.warn("Cannot get commit command version from index commit point ",e);
return null;
}
}
private static String toPollIntervalStr(int ms) {
int sec = ms/1000;
int hour = sec / 3600;
sec = sec % 3600;
int min = sec / 60;
sec = sec % 60;
return hour + ":" + min + ":" + sec;
}
public void stopReplication() {
if (replicationProcess != null) {
replicationProcess.shutdown();
}
}
}
| 1 | 35,998 | Note that this is in the context of SolrCloud, so "secondary" doesn't apply and should be instead follower | apache-lucene-solr | java |
@@ -1578,11 +1578,11 @@ EOF
assert_response :success, "can't get changesets by closed-ness"
assert_changesets [3, 5, 6, 7, 8, 9]
- get :query, :closed => "true", :user => users(:normal_user).id
+ get :query, :closed => "true", :user => users(:normal_user)
assert_response :success, "can't get changesets by closed-ness and user"
assert_changesets [3, 6, 8]
- get :query, :closed => "true", :user => users(:public_user).id
+ get :query, :closed => "true", :user => users(:public_user)
assert_response :success, "can't get changesets by closed-ness and user"
assert_changesets [7]
| 1 | require "test_helper"
require "changeset_controller"
class ChangesetControllerTest < ActionController::TestCase
api_fixtures
##
# test all routes which lead to this controller
def test_routes
assert_routing(
{ :path => "/api/0.6/changeset/create", :method => :put },
{ :controller => "changeset", :action => "create" }
)
assert_routing(
{ :path => "/api/0.6/changeset/1/upload", :method => :post },
{ :controller => "changeset", :action => "upload", :id => "1" }
)
assert_routing(
{ :path => "/api/0.6/changeset/1/download", :method => :get },
{ :controller => "changeset", :action => "download", :id => "1" }
)
assert_routing(
{ :path => "/api/0.6/changeset/1/expand_bbox", :method => :post },
{ :controller => "changeset", :action => "expand_bbox", :id => "1" }
)
assert_routing(
{ :path => "/api/0.6/changeset/1", :method => :get },
{ :controller => "changeset", :action => "read", :id => "1" }
)
assert_routing(
{ :path => "/api/0.6/changeset/1/subscribe", :method => :post },
{ :controller => "changeset", :action => "subscribe", :id => "1" }
)
assert_routing(
{ :path => "/api/0.6/changeset/1/unsubscribe", :method => :post },
{ :controller => "changeset", :action => "unsubscribe", :id => "1" }
)
assert_routing(
{ :path => "/api/0.6/changeset/1", :method => :put },
{ :controller => "changeset", :action => "update", :id => "1" }
)
assert_routing(
{ :path => "/api/0.6/changeset/1/close", :method => :put },
{ :controller => "changeset", :action => "close", :id => "1" }
)
assert_routing(
{ :path => "/api/0.6/changeset/1/comment", :method => :post },
{ :controller => "changeset", :action => "comment", :id => "1" }
)
assert_routing(
{ :path => "/api/0.6/changeset/comment/1/hide", :method => :post },
{ :controller => "changeset", :action => "hide_comment", :id => "1" }
)
assert_routing(
{ :path => "/api/0.6/changeset/comment/1/unhide", :method => :post },
{ :controller => "changeset", :action => "unhide_comment", :id => "1" }
)
assert_routing(
{ :path => "/api/0.6/changesets", :method => :get },
{ :controller => "changeset", :action => "query" }
)
assert_routing(
{ :path => "/changeset/1/comments/feed", :method => :get },
{ :controller => "changeset", :action => "comments_feed", :id => "1", :format => "rss" }
)
assert_routing(
{ :path => "/user/name/history", :method => :get },
{ :controller => "changeset", :action => "list", :display_name => "name" }
)
assert_routing(
{ :path => "/user/name/history/feed", :method => :get },
{ :controller => "changeset", :action => "feed", :display_name => "name", :format => :atom }
)
assert_routing(
{ :path => "/history/friends", :method => :get },
{ :controller => "changeset", :action => "list", :friends => true, :format => :html }
)
assert_routing(
{ :path => "/history/nearby", :method => :get },
{ :controller => "changeset", :action => "list", :nearby => true, :format => :html }
)
assert_routing(
{ :path => "/history", :method => :get },
{ :controller => "changeset", :action => "list" }
)
assert_routing(
{ :path => "/history/feed", :method => :get },
{ :controller => "changeset", :action => "feed", :format => :atom }
)
assert_routing(
{ :path => "/history/comments/feed", :method => :get },
{ :controller => "changeset", :action => "comments_feed", :format => "rss" }
)
end
# -----------------------
# Test simple changeset creation
# -----------------------
def test_create
basic_authorization users(:normal_user).email, "test"
# Create the first user's changeset
content "<osm><changeset>" +
"<tag k='created_by' v='osm test suite checking changesets'/>" +
"</changeset></osm>"
put :create
assert_require_public_data
basic_authorization users(:public_user).email, "test"
# Create the first user's changeset
content "<osm><changeset>" +
"<tag k='created_by' v='osm test suite checking changesets'/>" +
"</changeset></osm>"
put :create
assert_response :success, "Creation of changeset did not return sucess status"
newid = @response.body.to_i
# check end time, should be an hour ahead of creation time
cs = Changeset.find(newid)
duration = cs.closed_at - cs.created_at
# the difference can either be a rational, or a floating point number
# of seconds, depending on the code path taken :-(
if duration.class == Rational
assert_equal Rational(1, 24), duration, "initial idle timeout should be an hour (#{cs.created_at} -> #{cs.closed_at})"
else
# must be number of seconds...
assert_equal 3600, duration.round, "initial idle timeout should be an hour (#{cs.created_at} -> #{cs.closed_at})"
end
# checks if uploader was subscribed
assert_equal 1, cs.subscribers.length
end
def test_create_invalid
basic_authorization users(:normal_user).email, "test"
content "<osm><changeset></osm>"
put :create
assert_require_public_data
## Try the public user
basic_authorization users(:public_user).email, "test"
content "<osm><changeset></osm>"
put :create
assert_response :bad_request, "creating a invalid changeset should fail"
end
def test_create_invalid_no_content
## First check with no auth
put :create
assert_response :unauthorized, "shouldn't be able to create a changeset with no auth"
## Now try to with the non-public user
basic_authorization users(:normal_user).email, "test"
put :create
assert_require_public_data
## Try the inactive user
basic_authorization users(:inactive_user).email, "test"
put :create
assert_inactive_user
## Now try to use the public user
basic_authorization users(:public_user).email, "test"
put :create
assert_response :bad_request, "creating a changeset with no content should fail"
end
def test_create_wrong_method
basic_authorization users(:public_user).email, "test"
get :create
assert_response :method_not_allowed
post :create
assert_response :method_not_allowed
end
##
# check that the changeset can be read and returns the correct
# document structure.
def test_read
changeset_id = changesets(:normal_user_first_change).id
get :read, :id => changeset_id
assert_response :success, "cannot get first changeset"
assert_select "osm[version='#{API_VERSION}'][generator='OpenStreetMap server']", 1
assert_select "osm>changeset[id='#{changeset_id}']", 1
assert_select "osm>changeset>discussion", 0
get :read, :id => changeset_id, :include_discussion => true
assert_response :success, "cannot get first changeset with comments"
assert_select "osm[version='#{API_VERSION}'][generator='OpenStreetMap server']", 1
assert_select "osm>changeset[id='#{changeset_id}']", 1
assert_select "osm>changeset>discussion", 1
assert_select "osm>changeset>discussion>comment", 0
changeset_id = changesets(:normal_user_closed_change).id
create_list(:changeset_comment, 3, :changeset_id => changeset_id)
get :read, :id => changeset_id, :include_discussion => true
assert_response :success, "cannot get closed changeset with comments"
assert_select "osm[version='#{API_VERSION}'][generator='OpenStreetMap server']", 1
assert_select "osm>changeset[id='#{changeset_id}']", 1
assert_select "osm>changeset>discussion", 1
assert_select "osm>changeset>discussion>comment", 3
end
##
# check that a changeset that doesn't exist returns an appropriate message
def test_read_not_found
[0, -32, 233455644, "afg", "213"].each do |id|
begin
get :read, :id => id
assert_response :not_found, "should get a not found"
rescue ActionController::UrlGenerationError => ex
assert_match /No route matches/, ex.to_s
end
end
end
##
# test that the user who opened a change can close it
def test_close
## Try without authentication
put :close, :id => changesets(:public_user_first_change).id
assert_response :unauthorized
## Try using the non-public user
basic_authorization users(:normal_user).email, "test"
put :close, :id => changesets(:normal_user_first_change).id
assert_require_public_data
## The try with the public user
basic_authorization users(:public_user).email, "test"
cs_id = changesets(:public_user_first_change).id
put :close, :id => cs_id
assert_response :success
# test that it really is closed now
cs = Changeset.find(cs_id)
assert(!cs.is_open?,
"changeset should be closed now (#{cs.closed_at} > #{Time.now.getutc}.")
end
##
# test that a different user can't close another user's changeset
def test_close_invalid
basic_authorization users(:public_user).email, "test"
put :close, :id => changesets(:normal_user_first_change).id
assert_response :conflict
assert_equal "The user doesn't own that changeset", @response.body
end
##
# test that you can't close using another method
def test_close_method_invalid
basic_authorization users(:public_user).email, "test"
cs_id = changesets(:public_user_first_change).id
get :close, :id => cs_id
assert_response :method_not_allowed
post :close, :id => cs_id
assert_response :method_not_allowed
end
##
# check that you can't close a changeset that isn't found
def test_close_not_found
cs_ids = [0, -132, "123"]
# First try to do it with no auth
cs_ids.each do |id|
begin
put :close, :id => id
assert_response :unauthorized, "Shouldn't be able close the non-existant changeset #{id}, when not authorized"
rescue ActionController::UrlGenerationError => ex
assert_match /No route matches/, ex.to_s
end
end
# Now try with auth
basic_authorization users(:public_user).email, "test"
cs_ids.each do |id|
begin
put :close, :id => id
assert_response :not_found, "The changeset #{id} doesn't exist, so can't be closed"
rescue ActionController::UrlGenerationError => ex
assert_match /No route matches/, ex.to_s
end
end
end
##
# upload something simple, but valid and check that it can
# be read back ok
# Also try without auth and another user.
def test_upload_simple_valid
## Try with no auth
changeset_id = changesets(:public_user_first_change).id
# simple diff to change a node, way and relation by removing
# their tags
diff = <<EOF
<osmChange>
<modify>
<node id='1' lon='0' lat='0' changeset='#{changeset_id}' version='1'/>
<way id='1' changeset='#{changeset_id}' version='1'>
<nd ref='3'/>
</way>
</modify>
<modify>
<relation id='1' changeset='#{changeset_id}' version='1'>
<member type='way' role='some' ref='3'/>
<member type='node' role='some' ref='5'/>
<member type='relation' role='some' ref='3'/>
</relation>
</modify>
</osmChange>
EOF
# upload it
content diff
post :upload, :id => changeset_id
assert_response :unauthorized,
"shouldnn't be able to upload a simple valid diff to changeset: #{@response.body}"
## Now try with a private user
basic_authorization users(:normal_user).email, "test"
changeset_id = changesets(:normal_user_first_change).id
# simple diff to change a node, way and relation by removing
# their tags
diff = <<EOF
<osmChange>
<modify>
<node id='1' lon='0' lat='0' changeset='#{changeset_id}' version='1'/>
<way id='1' changeset='#{changeset_id}' version='1'>
<nd ref='3'/>
</way>
</modify>
<modify>
<relation id='1' changeset='#{changeset_id}' version='1'>
<member type='way' role='some' ref='3'/>
<member type='node' role='some' ref='5'/>
<member type='relation' role='some' ref='3'/>
</relation>
</modify>
</osmChange>
EOF
# upload it
content diff
post :upload, :id => changeset_id
assert_response :forbidden,
"can't upload a simple valid diff to changeset: #{@response.body}"
## Now try with the public user
basic_authorization users(:public_user).email, "test"
changeset_id = changesets(:public_user_first_change).id
# simple diff to change a node, way and relation by removing
# their tags
diff = <<EOF
<osmChange>
<modify>
<node id='1' lon='0' lat='0' changeset='#{changeset_id}' version='1'/>
<way id='1' changeset='#{changeset_id}' version='1'>
<nd ref='3'/>
</way>
</modify>
<modify>
<relation id='1' changeset='#{changeset_id}' version='1'>
<member type='way' role='some' ref='3'/>
<member type='node' role='some' ref='5'/>
<member type='relation' role='some' ref='3'/>
</relation>
</modify>
</osmChange>
EOF
# upload it
content diff
post :upload, :id => changeset_id
assert_response :success,
"can't upload a simple valid diff to changeset: #{@response.body}"
# check that the changes made it into the database
assert_equal 0, Node.find(1).tags.size, "node 1 should now have no tags"
assert_equal 0, Way.find(1).tags.size, "way 1 should now have no tags"
assert_equal 0, Relation.find(1).tags.size, "relation 1 should now have no tags"
end
##
# upload something which creates new objects using placeholders
def test_upload_create_valid
basic_authorization users(:public_user).email, "test"
cs_id = changesets(:public_user_first_change).id
# simple diff to create a node way and relation using placeholders
diff = <<EOF
<osmChange>
<create>
<node id='-1' lon='0' lat='0' changeset='#{cs_id}'>
<tag k='foo' v='bar'/>
<tag k='baz' v='bat'/>
</node>
<way id='-1' changeset='#{cs_id}'>
<nd ref='3'/>
</way>
</create>
<create>
<relation id='-1' changeset='#{cs_id}'>
<member type='way' role='some' ref='3'/>
<member type='node' role='some' ref='5'/>
<member type='relation' role='some' ref='3'/>
</relation>
</create>
</osmChange>
EOF
# upload it
content diff
post :upload, :id => cs_id
assert_response :success,
"can't upload a simple valid creation to changeset: #{@response.body}"
# check the returned payload
assert_select "diffResult[version='#{API_VERSION}'][generator='OpenStreetMap server']", 1
assert_select "diffResult>node", 1
assert_select "diffResult>way", 1
assert_select "diffResult>relation", 1
# inspect the response to find out what the new element IDs are
doc = XML::Parser.string(@response.body).parse
new_node_id = doc.find("//diffResult/node").first["new_id"].to_i
new_way_id = doc.find("//diffResult/way").first["new_id"].to_i
new_rel_id = doc.find("//diffResult/relation").first["new_id"].to_i
# check the old IDs are all present and negative one
assert_equal -1, doc.find("//diffResult/node").first["old_id"].to_i
assert_equal -1, doc.find("//diffResult/way").first["old_id"].to_i
assert_equal -1, doc.find("//diffResult/relation").first["old_id"].to_i
# check the versions are present and equal one
assert_equal 1, doc.find("//diffResult/node").first["new_version"].to_i
assert_equal 1, doc.find("//diffResult/way").first["new_version"].to_i
assert_equal 1, doc.find("//diffResult/relation").first["new_version"].to_i
# check that the changes made it into the database
assert_equal 2, Node.find(new_node_id).tags.size, "new node should have two tags"
assert_equal 0, Way.find(new_way_id).tags.size, "new way should have no tags"
assert_equal 0, Relation.find(new_rel_id).tags.size, "new relation should have no tags"
end
##
# test a complex delete where we delete elements which rely on eachother
# in the same transaction.
def test_upload_delete
basic_authorization users(:public_user).display_name, "test"
diff = XML::Document.new
diff.root = XML::Node.new "osmChange"
delete = XML::Node.new "delete"
diff.root << delete
delete << current_relations(:visible_relation).to_xml_node
delete << current_relations(:used_relation).to_xml_node
delete << current_ways(:used_way).to_xml_node
delete << current_nodes(:node_used_by_relationship).to_xml_node
# update the changeset to one that this user owns
changeset_id = changesets(:public_user_first_change).id
%w(node way relation).each do |type|
delete.find("//osmChange/delete/#{type}").each do |n|
n["changeset"] = changeset_id.to_s
end
end
# upload it
content diff
post :upload, :id => changeset_id
assert_response :success,
"can't upload a deletion diff to changeset: #{@response.body}"
# check the response is well-formed
assert_select "diffResult>node", 1
assert_select "diffResult>way", 1
assert_select "diffResult>relation", 2
# check that everything was deleted
assert_equal false, Node.find(current_nodes(:node_used_by_relationship).id).visible
assert_equal false, Way.find(current_ways(:used_way).id).visible
assert_equal false, Relation.find(current_relations(:visible_relation).id).visible
assert_equal false, Relation.find(current_relations(:used_relation).id).visible
end
##
# test uploading a delete with no lat/lon, as they are optional in
# the osmChange spec.
def test_upload_nolatlon_delete
basic_authorization users(:public_user).display_name, "test"
node = current_nodes(:public_visible_node)
cs = changesets(:public_user_first_change)
diff = "<osmChange><delete><node id='#{node.id}' version='#{node.version}' changeset='#{cs.id}'/></delete></osmChange>"
# upload it
content diff
post :upload, :id => cs.id
assert_response :success,
"can't upload a deletion diff to changeset: #{@response.body}"
# check the response is well-formed
assert_select "diffResult>node", 1
# check that everything was deleted
assert_equal false, Node.find(node.id).visible
end
def test_repeated_changeset_create
30.times do
basic_authorization users(:public_user).email, "test"
# create a temporary changeset
content "<osm><changeset>" +
"<tag k='created_by' v='osm test suite checking changesets'/>" +
"</changeset></osm>"
assert_difference "Changeset.count", 1 do
put :create
end
assert_response :success
end
end
def test_upload_large_changeset
basic_authorization users(:public_user).email, "test"
# create a changeset
content "<osm><changeset/></osm>"
put :create
assert_response :success, "Should be able to create a changeset: #{@response.body}"
changeset_id = @response.body.to_i
# upload some widely-spaced nodes, spiralling positive and negative to cause
# largest bbox over-expansion possible.
diff = <<EOF
<osmChange>
<create>
<node id='-1' lon='-20' lat='-10' changeset='#{changeset_id}'/>
<node id='-10' lon='20' lat='10' changeset='#{changeset_id}'/>
<node id='-2' lon='-40' lat='-20' changeset='#{changeset_id}'/>
<node id='-11' lon='40' lat='20' changeset='#{changeset_id}'/>
<node id='-3' lon='-60' lat='-30' changeset='#{changeset_id}'/>
<node id='-12' lon='60' lat='30' changeset='#{changeset_id}'/>
<node id='-4' lon='-80' lat='-40' changeset='#{changeset_id}'/>
<node id='-13' lon='80' lat='40' changeset='#{changeset_id}'/>
<node id='-5' lon='-100' lat='-50' changeset='#{changeset_id}'/>
<node id='-14' lon='100' lat='50' changeset='#{changeset_id}'/>
<node id='-6' lon='-120' lat='-60' changeset='#{changeset_id}'/>
<node id='-15' lon='120' lat='60' changeset='#{changeset_id}'/>
<node id='-7' lon='-140' lat='-70' changeset='#{changeset_id}'/>
<node id='-16' lon='140' lat='70' changeset='#{changeset_id}'/>
<node id='-8' lon='-160' lat='-80' changeset='#{changeset_id}'/>
<node id='-17' lon='160' lat='80' changeset='#{changeset_id}'/>
<node id='-9' lon='-179.9' lat='-89.9' changeset='#{changeset_id}'/>
<node id='-18' lon='179.9' lat='89.9' changeset='#{changeset_id}'/>
</create>
</osmChange>
EOF
# upload it, which used to cause an error like "PGError: ERROR:
# integer out of range" (bug #2152). but shouldn't any more.
content diff
post :upload, :id => changeset_id
assert_response :success,
"can't upload a spatially-large diff to changeset: #{@response.body}"
# check that the changeset bbox is within bounds
cs = Changeset.find(changeset_id)
assert cs.min_lon >= -180 * GeoRecord::SCALE, "Minimum longitude (#{cs.min_lon / GeoRecord::SCALE}) should be >= -180 to be valid."
assert cs.max_lon <= 180 * GeoRecord::SCALE, "Maximum longitude (#{cs.max_lon / GeoRecord::SCALE}) should be <= 180 to be valid."
assert cs.min_lat >= -90 * GeoRecord::SCALE, "Minimum latitude (#{cs.min_lat / GeoRecord::SCALE}) should be >= -90 to be valid."
assert cs.max_lat >= 90 * GeoRecord::SCALE, "Maximum latitude (#{cs.max_lat / GeoRecord::SCALE}) should be <= 90 to be valid."
end
##
# test that deleting stuff in a transaction doesn't bypass the checks
# to ensure that used elements are not deleted.
def test_upload_delete_invalid
basic_authorization users(:public_user).email, "test"
diff = XML::Document.new
diff.root = XML::Node.new "osmChange"
delete = XML::Node.new "delete"
diff.root << delete
delete << current_relations(:public_visible_relation).to_xml_node
delete << current_ways(:used_way).to_xml_node
delete << current_nodes(:node_used_by_relationship).to_xml_node
# upload it
content diff
post :upload, :id => 2
assert_response :precondition_failed,
"shouldn't be able to upload a invalid deletion diff: #{@response.body}"
assert_equal "Precondition failed: Way 3 is still used by relations 1.", @response.body
# check that nothing was, in fact, deleted
assert_equal true, Node.find(current_nodes(:node_used_by_relationship).id).visible
assert_equal true, Way.find(current_ways(:used_way).id).visible
assert_equal true, Relation.find(current_relations(:visible_relation).id).visible
end
##
# test that a conditional delete of an in use object works.
def test_upload_delete_if_unused
basic_authorization users(:public_user).email, "test"
diff = XML::Document.new
diff.root = XML::Node.new "osmChange"
delete = XML::Node.new "delete"
diff.root << delete
delete["if-unused"] = ""
delete << current_relations(:public_used_relation).to_xml_node
delete << current_ways(:used_way).to_xml_node
delete << current_nodes(:node_used_by_relationship).to_xml_node
# upload it
content diff
post :upload, :id => 2
assert_response :success,
"can't do a conditional delete of in use objects: #{@response.body}"
# check the returned payload
assert_select "diffResult[version='#{API_VERSION}'][generator='OpenStreetMap server']", 1
assert_select "diffResult>node", 1
assert_select "diffResult>way", 1
assert_select "diffResult>relation", 1
# parse the response
doc = XML::Parser.string(@response.body).parse
# check the old IDs are all present and what we expect
assert_equal current_nodes(:node_used_by_relationship).id, doc.find("//diffResult/node").first["old_id"].to_i
assert_equal current_ways(:used_way).id, doc.find("//diffResult/way").first["old_id"].to_i
assert_equal current_relations(:public_used_relation).id, doc.find("//diffResult/relation").first["old_id"].to_i
# check the new IDs are all present and unchanged
assert_equal current_nodes(:node_used_by_relationship).id, doc.find("//diffResult/node").first["new_id"].to_i
assert_equal current_ways(:used_way).id, doc.find("//diffResult/way").first["new_id"].to_i
assert_equal current_relations(:public_used_relation).id, doc.find("//diffResult/relation").first["new_id"].to_i
# check the new versions are all present and unchanged
assert_equal current_nodes(:node_used_by_relationship).version, doc.find("//diffResult/node").first["new_version"].to_i
assert_equal current_ways(:used_way).version, doc.find("//diffResult/way").first["new_version"].to_i
assert_equal current_relations(:public_used_relation).version, doc.find("//diffResult/relation").first["new_version"].to_i
# check that nothing was, in fact, deleted
assert_equal true, Node.find(current_nodes(:node_used_by_relationship).id).visible
assert_equal true, Way.find(current_ways(:used_way).id).visible
assert_equal true, Relation.find(current_relations(:public_used_relation).id).visible
end
##
# upload an element with a really long tag value
def test_upload_invalid_too_long_tag
basic_authorization users(:public_user).email, "test"
cs_id = changesets(:public_user_first_change).id
# simple diff to create a node way and relation using placeholders
diff = <<EOF
<osmChange>
<create>
<node id='-1' lon='0' lat='0' changeset='#{cs_id}'>
<tag k='foo' v='#{'x' * 256}'/>
</node>
</create>
</osmChange>
EOF
# upload it
content diff
post :upload, :id => cs_id
assert_response :bad_request,
"shoudln't be able to upload too long a tag to changeset: #{@response.body}"
end
##
# upload something which creates new objects and inserts them into
# existing containers using placeholders.
def test_upload_complex
basic_authorization users(:public_user).email, "test"
cs_id = changesets(:public_user_first_change).id
# simple diff to create a node way and relation using placeholders
diff = <<EOF
<osmChange>
<create>
<node id='-1' lon='0' lat='0' changeset='#{cs_id}'>
<tag k='foo' v='bar'/>
<tag k='baz' v='bat'/>
</node>
</create>
<modify>
<way id='1' changeset='#{cs_id}' version='1'>
<nd ref='-1'/>
<nd ref='3'/>
</way>
<relation id='1' changeset='#{cs_id}' version='1'>
<member type='way' role='some' ref='3'/>
<member type='node' role='some' ref='-1'/>
<member type='relation' role='some' ref='3'/>
</relation>
</modify>
</osmChange>
EOF
# upload it
content diff
post :upload, :id => cs_id
assert_response :success,
"can't upload a complex diff to changeset: #{@response.body}"
# check the returned payload
assert_select "diffResult[version='#{API_VERSION}'][generator='#{GENERATOR}']", 1
assert_select "diffResult>node", 1
assert_select "diffResult>way", 1
assert_select "diffResult>relation", 1
# inspect the response to find out what the new element IDs are
doc = XML::Parser.string(@response.body).parse
new_node_id = doc.find("//diffResult/node").first["new_id"].to_i
# check that the changes made it into the database
assert_equal 2, Node.find(new_node_id).tags.size, "new node should have two tags"
assert_equal [new_node_id, 3], Way.find(1).nds, "way nodes should match"
Relation.find(1).members.each do |type, id, _role|
if type == "node"
assert_equal new_node_id, id, "relation should contain new node"
end
end
end
##
# create a diff which references several changesets, which should cause
# a rollback and none of the diff gets committed
def test_upload_invalid_changesets
basic_authorization users(:public_user).email, "test"
cs_id = changesets(:public_user_first_change).id
# simple diff to create a node way and relation using placeholders
diff = <<EOF
<osmChange>
<modify>
<node id='1' lon='0' lat='0' changeset='#{cs_id}' version='1'/>
<way id='1' changeset='#{cs_id}' version='1'>
<nd ref='3'/>
</way>
</modify>
<modify>
<relation id='1' changeset='#{cs_id}' version='1'>
<member type='way' role='some' ref='3'/>
<member type='node' role='some' ref='5'/>
<member type='relation' role='some' ref='3'/>
</relation>
</modify>
<create>
<node id='-1' lon='0' lat='0' changeset='4'>
<tag k='foo' v='bar'/>
<tag k='baz' v='bat'/>
</node>
</create>
</osmChange>
EOF
# cache the objects before uploading them
node = current_nodes(:visible_node)
way = current_ways(:visible_way)
rel = current_relations(:visible_relation)
# upload it
content diff
post :upload, :id => cs_id
assert_response :conflict,
"uploading a diff with multiple changsets should have failed"
# check that objects are unmodified
assert_nodes_are_equal(node, Node.find(1))
assert_ways_are_equal(way, Way.find(1))
assert_relations_are_equal(rel, Relation.find(1))
end
##
# upload multiple versions of the same element in the same diff.
def test_upload_multiple_valid
basic_authorization users(:public_user).email, "test"
cs_id = changesets(:public_user_first_change).id
# change the location of a node multiple times, each time referencing
# the last version. doesn't this depend on version numbers being
# sequential?
diff = <<EOF
<osmChange>
<modify>
<node id='1' lon='0' lat='0' changeset='#{cs_id}' version='1'/>
<node id='1' lon='1' lat='0' changeset='#{cs_id}' version='2'/>
<node id='1' lon='1' lat='1' changeset='#{cs_id}' version='3'/>
<node id='1' lon='1' lat='2' changeset='#{cs_id}' version='4'/>
<node id='1' lon='2' lat='2' changeset='#{cs_id}' version='5'/>
<node id='1' lon='3' lat='2' changeset='#{cs_id}' version='6'/>
<node id='1' lon='3' lat='3' changeset='#{cs_id}' version='7'/>
<node id='1' lon='9' lat='9' changeset='#{cs_id}' version='8'/>
</modify>
</osmChange>
EOF
# upload it
content diff
post :upload, :id => cs_id
assert_response :success,
"can't upload multiple versions of an element in a diff: #{@response.body}"
# check the response is well-formed. its counter-intuitive, but the
# API will return multiple elements with the same ID and different
# version numbers for each change we made.
assert_select "diffResult>node", 8
end
##
# upload multiple versions of the same element in the same diff, but
# keep the version numbers the same.
def test_upload_multiple_duplicate
basic_authorization users(:public_user).email, "test"
cs_id = changesets(:public_user_first_change).id
diff = <<EOF
<osmChange>
<modify>
<node id='1' lon='0' lat='0' changeset='#{cs_id}' version='1'/>
<node id='1' lon='1' lat='1' changeset='#{cs_id}' version='1'/>
</modify>
</osmChange>
EOF
# upload it
content diff
post :upload, :id => cs_id
assert_response :conflict,
"shouldn't be able to upload the same element twice in a diff: #{@response.body}"
end
##
# try to upload some elements without specifying the version
def test_upload_missing_version
basic_authorization users(:public_user).email, "test"
cs_id = changesets(:public_user_first_change).id
diff = <<EOF
<osmChange>
<modify>
<node id='1' lon='1' lat='1' changeset='cs_id'/>
</modify>
</osmChange>
EOF
# upload it
content diff
post :upload, :id => cs_id
assert_response :bad_request,
"shouldn't be able to upload an element without version: #{@response.body}"
end
##
# try to upload with commands other than create, modify, or delete
def test_action_upload_invalid
basic_authorization users(:public_user).email, "test"
cs_id = changesets(:public_user_first_change).id
diff = <<EOF
<osmChange>
<ping>
<node id='1' lon='1' lat='1' changeset='#{cs_id}' />
</ping>
</osmChange>
EOF
content diff
post :upload, :id => cs_id
assert_response :bad_request, "Shouldn't be able to upload a diff with the action ping"
assert_equal @response.body, "Unknown action ping, choices are create, modify, delete"
end
##
# upload a valid changeset which has a mixture of whitespace
# to check a bug reported by ivansanchez (#1565).
def test_upload_whitespace_valid
basic_authorization users(:public_user).email, "test"
changeset_id = changesets(:public_user_first_change).id
diff = <<EOF
<osmChange>
<modify><node id='1' lon='0' lat='0' changeset='#{changeset_id}'
version='1'></node>
<node id='1' lon='1' lat='1' changeset='#{changeset_id}' version='2'><tag k='k' v='v'/></node></modify>
<modify>
<relation id='1' changeset='#{changeset_id}' version='1'><member
type='way' role='some' ref='3'/><member
type='node' role='some' ref='5'/>
<member type='relation' role='some' ref='3'/>
</relation>
</modify></osmChange>
EOF
# upload it
content diff
post :upload, :id => changeset_id
assert_response :success,
"can't upload a valid diff with whitespace variations to changeset: #{@response.body}"
# check the response is well-formed
assert_select "diffResult>node", 2
assert_select "diffResult>relation", 1
# check that the changes made it into the database
assert_equal 1, Node.find(1).tags.size, "node 1 should now have one tag"
assert_equal 0, Relation.find(1).tags.size, "relation 1 should now have no tags"
end
##
# upload a valid changeset which has a mixture of whitespace
# to check a bug reported by ivansanchez.
def test_upload_reuse_placeholder_valid
basic_authorization users(:public_user).email, "test"
changeset_id = changesets(:public_user_first_change).id
diff = <<EOF
<osmChange>
<create>
<node id='-1' lon='0' lat='0' changeset='#{changeset_id}'>
<tag k="foo" v="bar"/>
</node>
</create>
<modify>
<node id='-1' lon='1' lat='1' changeset='#{changeset_id}' version='1'/>
</modify>
<delete>
<node id='-1' lon='2' lat='2' changeset='#{changeset_id}' version='2'/>
</delete>
</osmChange>
EOF
# upload it
content diff
post :upload, :id => changeset_id
assert_response :success,
"can't upload a valid diff with re-used placeholders to changeset: #{@response.body}"
# check the response is well-formed
assert_select "diffResult>node", 3
assert_select "diffResult>node[old_id='-1']", 3
end
##
# test what happens if a diff upload re-uses placeholder IDs in an
# illegal way.
def test_upload_placeholder_invalid
basic_authorization users(:public_user).email, "test"
changeset_id = changesets(:public_user_first_change).id
diff = <<EOF
<osmChange>
<create>
<node id='-1' lon='0' lat='0' changeset='#{changeset_id}' version='1'/>
<node id='-1' lon='1' lat='1' changeset='#{changeset_id}' version='1'/>
<node id='-1' lon='2' lat='2' changeset='#{changeset_id}' version='2'/>
</create>
</osmChange>
EOF
# upload it
content diff
post :upload, :id => changeset_id
assert_response :bad_request,
"shouldn't be able to re-use placeholder IDs"
end
##
# test that uploading a way referencing invalid placeholders gives a
# proper error, not a 500.
def test_upload_placeholder_invalid_way
basic_authorization users(:public_user).email, "test"
changeset_id = changesets(:public_user_first_change).id
diff = <<EOF
<osmChange>
<create>
<node id="-1" lon="0" lat="0" changeset="#{changeset_id}" version="1"/>
<node id="-2" lon="1" lat="1" changeset="#{changeset_id}" version="1"/>
<node id="-3" lon="2" lat="2" changeset="#{changeset_id}" version="1"/>
<way id="-1" changeset="#{changeset_id}" version="1">
<nd ref="-1"/>
<nd ref="-2"/>
<nd ref="-3"/>
<nd ref="-4"/>
</way>
</create>
</osmChange>
EOF
# upload it
content diff
post :upload, :id => changeset_id
assert_response :bad_request,
"shouldn't be able to use invalid placeholder IDs"
assert_equal "Placeholder node not found for reference -4 in way -1", @response.body
# the same again, but this time use an existing way
diff = <<EOF
<osmChange>
<create>
<node id="-1" lon="0" lat="0" changeset="#{changeset_id}" version="1"/>
<node id="-2" lon="1" lat="1" changeset="#{changeset_id}" version="1"/>
<node id="-3" lon="2" lat="2" changeset="#{changeset_id}" version="1"/>
<way id="1" changeset="#{changeset_id}" version="1">
<nd ref="-1"/>
<nd ref="-2"/>
<nd ref="-3"/>
<nd ref="-4"/>
</way>
</create>
</osmChange>
EOF
# upload it
content diff
post :upload, :id => changeset_id
assert_response :bad_request,
"shouldn't be able to use invalid placeholder IDs"
assert_equal "Placeholder node not found for reference -4 in way 1", @response.body
end
##
# test that uploading a relation referencing invalid placeholders gives a
# proper error, not a 500.
def test_upload_placeholder_invalid_relation
basic_authorization users(:public_user).email, "test"
changeset_id = changesets(:public_user_first_change).id
diff = <<EOF
<osmChange>
<create>
<node id="-1" lon="0" lat="0" changeset="#{changeset_id}" version="1"/>
<node id="-2" lon="1" lat="1" changeset="#{changeset_id}" version="1"/>
<node id="-3" lon="2" lat="2" changeset="#{changeset_id}" version="1"/>
<relation id="-1" changeset="#{changeset_id}" version="1">
<member type="node" role="foo" ref="-1"/>
<member type="node" role="foo" ref="-2"/>
<member type="node" role="foo" ref="-3"/>
<member type="node" role="foo" ref="-4"/>
</relation>
</create>
</osmChange>
EOF
# upload it
content diff
post :upload, :id => changeset_id
assert_response :bad_request,
"shouldn't be able to use invalid placeholder IDs"
assert_equal "Placeholder Node not found for reference -4 in relation -1.", @response.body
# the same again, but this time use an existing way
diff = <<EOF
<osmChange>
<create>
<node id="-1" lon="0" lat="0" changeset="#{changeset_id}" version="1"/>
<node id="-2" lon="1" lat="1" changeset="#{changeset_id}" version="1"/>
<node id="-3" lon="2" lat="2" changeset="#{changeset_id}" version="1"/>
<relation id="1" changeset="#{changeset_id}" version="1">
<member type="node" role="foo" ref="-1"/>
<member type="node" role="foo" ref="-2"/>
<member type="node" role="foo" ref="-3"/>
<member type="way" role="bar" ref="-1"/>
</relation>
</create>
</osmChange>
EOF
# upload it
content diff
post :upload, :id => changeset_id
assert_response :bad_request,
"shouldn't be able to use invalid placeholder IDs"
assert_equal "Placeholder Way not found for reference -1 in relation 1.", @response.body
end
##
# test what happens if a diff is uploaded containing only a node
# move.
def test_upload_node_move
basic_authorization users(:public_user).email, "test"
content "<osm><changeset>" +
"<tag k='created_by' v='osm test suite checking changesets'/>" +
"</changeset></osm>"
put :create
assert_response :success
changeset_id = @response.body.to_i
old_node = current_nodes(:visible_node)
diff = XML::Document.new
diff.root = XML::Node.new "osmChange"
modify = XML::Node.new "modify"
xml_old_node = old_node.to_xml_node
xml_old_node["lat"] = 2.0.to_s
xml_old_node["lon"] = 2.0.to_s
xml_old_node["changeset"] = changeset_id.to_s
modify << xml_old_node
diff.root << modify
# upload it
content diff
post :upload, :id => changeset_id
assert_response :success,
"diff should have uploaded OK"
# check the bbox
changeset = Changeset.find(changeset_id)
assert_equal 1 * GeoRecord::SCALE, changeset.min_lon, "min_lon should be 1 degree"
assert_equal 2 * GeoRecord::SCALE, changeset.max_lon, "max_lon should be 2 degrees"
assert_equal 1 * GeoRecord::SCALE, changeset.min_lat, "min_lat should be 1 degree"
assert_equal 2 * GeoRecord::SCALE, changeset.max_lat, "max_lat should be 2 degrees"
end
##
# test what happens if a diff is uploaded adding a node to a way.
def test_upload_way_extend
basic_authorization users(:public_user).email, "test"
content "<osm><changeset>" +
"<tag k='created_by' v='osm test suite checking changesets'/>" +
"</changeset></osm>"
put :create
assert_response :success
changeset_id = @response.body.to_i
old_way = current_ways(:visible_way)
diff = XML::Document.new
diff.root = XML::Node.new "osmChange"
modify = XML::Node.new "modify"
xml_old_way = old_way.to_xml_node
nd_ref = XML::Node.new "nd"
nd_ref["ref"] = current_nodes(:visible_node).id.to_s
xml_old_way << nd_ref
xml_old_way["changeset"] = changeset_id.to_s
modify << xml_old_way
diff.root << modify
# upload it
content diff
post :upload, :id => changeset_id
assert_response :success,
"diff should have uploaded OK"
# check the bbox
changeset = Changeset.find(changeset_id)
assert_equal 1 * GeoRecord::SCALE, changeset.min_lon, "min_lon should be 1 degree"
assert_equal 3 * GeoRecord::SCALE, changeset.max_lon, "max_lon should be 3 degrees"
assert_equal 1 * GeoRecord::SCALE, changeset.min_lat, "min_lat should be 1 degree"
assert_equal 3 * GeoRecord::SCALE, changeset.max_lat, "max_lat should be 3 degrees"
end
##
# test for more issues in #1568
def test_upload_empty_invalid
basic_authorization users(:public_user).email, "test"
["<osmChange/>",
"<osmChange></osmChange>",
"<osmChange><modify/></osmChange>",
"<osmChange><modify></modify></osmChange>"].each do |diff|
# upload it
content diff
post :upload, :id => changesets(:public_user_first_change).id
assert_response(:success, "should be able to upload " +
"empty changeset: " + diff)
end
end
##
# test that the X-Error-Format header works to request XML errors
def test_upload_xml_errors
basic_authorization users(:public_user).email, "test"
# try and delete a node that is in use
diff = XML::Document.new
diff.root = XML::Node.new "osmChange"
delete = XML::Node.new "delete"
diff.root << delete
delete << current_nodes(:node_used_by_relationship).to_xml_node
# upload it
content diff
error_format "xml"
post :upload, :id => 2
assert_response :success,
"failed to return error in XML format"
# check the returned payload
assert_select "osmError[version='#{API_VERSION}'][generator='OpenStreetMap server']", 1
assert_select "osmError>status", 1
assert_select "osmError>message", 1
end
##
# when we make some simple changes we get the same changes back from the
# diff download.
def test_diff_download_simple
## First try with the normal user, which should get a forbidden
basic_authorization(users(:normal_user).email, "test")
# create a temporary changeset
content "<osm><changeset>" +
"<tag k='created_by' v='osm test suite checking changesets'/>" +
"</changeset></osm>"
put :create
assert_response :forbidden
## Now try with the public user
basic_authorization(users(:public_user).email, "test")
# create a temporary changeset
content "<osm><changeset>" +
"<tag k='created_by' v='osm test suite checking changesets'/>" +
"</changeset></osm>"
put :create
assert_response :success
changeset_id = @response.body.to_i
# add a diff to it
diff = <<EOF
<osmChange>
<modify>
<node id='1' lon='0' lat='0' changeset='#{changeset_id}' version='1'/>
<node id='1' lon='1' lat='0' changeset='#{changeset_id}' version='2'/>
<node id='1' lon='1' lat='1' changeset='#{changeset_id}' version='3'/>
<node id='1' lon='1' lat='2' changeset='#{changeset_id}' version='4'/>
<node id='1' lon='2' lat='2' changeset='#{changeset_id}' version='5'/>
<node id='1' lon='3' lat='2' changeset='#{changeset_id}' version='6'/>
<node id='1' lon='3' lat='3' changeset='#{changeset_id}' version='7'/>
<node id='1' lon='9' lat='9' changeset='#{changeset_id}' version='8'/>
</modify>
</osmChange>
EOF
# upload it
content diff
post :upload, :id => changeset_id
assert_response :success,
"can't upload multiple versions of an element in a diff: #{@response.body}"
get :download, :id => changeset_id
assert_response :success
assert_select "osmChange", 1
assert_select "osmChange>modify", 8
assert_select "osmChange>modify>node", 8
end
##
# culled this from josm to ensure that nothing in the way that josm
# is formatting the request is causing it to fail.
#
# NOTE: the error turned out to be something else completely!
def test_josm_upload
basic_authorization(users(:public_user).email, "test")
# create a temporary changeset
content "<osm><changeset>" +
"<tag k='created_by' v='osm test suite checking changesets'/>" +
"</changeset></osm>"
put :create
assert_response :success
changeset_id = @response.body.to_i
diff = <<OSMFILE
<osmChange version="0.6" generator="JOSM">
<create version="0.6" generator="JOSM">
<node id='-1' visible='true' changeset='#{changeset_id}' lat='51.49619982187321' lon='-0.18722061869438314' />
<node id='-2' visible='true' changeset='#{changeset_id}' lat='51.496359883909605' lon='-0.18653093576241928' />
<node id='-3' visible='true' changeset='#{changeset_id}' lat='51.49598132358285' lon='-0.18719613290981638' />
<node id='-4' visible='true' changeset='#{changeset_id}' lat='51.4961591711078' lon='-0.18629015888084607' />
<node id='-5' visible='true' changeset='#{changeset_id}' lat='51.49582126021711' lon='-0.18708186591517145' />
<node id='-6' visible='true' changeset='#{changeset_id}' lat='51.49591018437858' lon='-0.1861432441734455' />
<node id='-7' visible='true' changeset='#{changeset_id}' lat='51.49560784152179' lon='-0.18694719410005425' />
<node id='-8' visible='true' changeset='#{changeset_id}' lat='51.49567389979617' lon='-0.1860289771788006' />
<node id='-9' visible='true' changeset='#{changeset_id}' lat='51.49543761398892' lon='-0.186820684213126' />
<way id='-10' action='modiy' visible='true' changeset='#{changeset_id}'>
<nd ref='-1' />
<nd ref='-2' />
<nd ref='-3' />
<nd ref='-4' />
<nd ref='-5' />
<nd ref='-6' />
<nd ref='-7' />
<nd ref='-8' />
<nd ref='-9' />
<tag k='highway' v='residential' />
<tag k='name' v='Foobar Street' />
</way>
</create>
</osmChange>
OSMFILE
# upload it
content diff
post :upload, :id => changeset_id
assert_response :success,
"can't upload a diff from JOSM: #{@response.body}"
get :download, :id => changeset_id
assert_response :success
assert_select "osmChange", 1
assert_select "osmChange>create>node", 9
assert_select "osmChange>create>way", 1
assert_select "osmChange>create>way>nd", 9
assert_select "osmChange>create>way>tag", 2
end
##
# when we make some complex changes we get the same changes back from the
# diff download.
def test_diff_download_complex
basic_authorization(users(:public_user).email, "test")
# create a temporary changeset
content "<osm><changeset>" +
"<tag k='created_by' v='osm test suite checking changesets'/>" +
"</changeset></osm>"
put :create
assert_response :success
changeset_id = @response.body.to_i
# add a diff to it
diff = <<EOF
<osmChange>
<delete>
<node id='1' lon='0' lat='0' changeset='#{changeset_id}' version='1'/>
</delete>
<create>
<node id='-1' lon='9' lat='9' changeset='#{changeset_id}' version='0'/>
<node id='-2' lon='8' lat='9' changeset='#{changeset_id}' version='0'/>
<node id='-3' lon='7' lat='9' changeset='#{changeset_id}' version='0'/>
</create>
<modify>
<node id='3' lon='20' lat='15' changeset='#{changeset_id}' version='1'/>
<way id='1' changeset='#{changeset_id}' version='1'>
<nd ref='3'/>
<nd ref='-1'/>
<nd ref='-2'/>
<nd ref='-3'/>
</way>
</modify>
</osmChange>
EOF
# upload it
content diff
post :upload, :id => changeset_id
assert_response :success,
"can't upload multiple versions of an element in a diff: #{@response.body}"
get :download, :id => changeset_id
assert_response :success
assert_select "osmChange", 1
assert_select "osmChange>create", 3
assert_select "osmChange>delete", 1
assert_select "osmChange>modify", 2
assert_select "osmChange>create>node", 3
assert_select "osmChange>delete>node", 1
assert_select "osmChange>modify>node", 1
assert_select "osmChange>modify>way", 1
end
def test_changeset_download
tag = create(:old_node_tag, :old_node => nodes(:used_node_2))
get :download, :id => changesets(:normal_user_first_change).id
assert_response :success
assert_template nil
# print @response.body
# FIXME: needs more assert_select tests
assert_select "osmChange[version='#{API_VERSION}'][generator='#{GENERATOR}']" do
assert_select "create", :count => 5
assert_select "create>node[id='#{nodes(:used_node_2).node_id}'][visible='#{nodes(:used_node_2).visible?}'][version='#{nodes(:used_node_2).version}']" do
assert_select "tag[k='#{tag.k}'][v='#{tag.v}']"
end
assert_select "create>node[id='#{nodes(:visible_node).node_id}']"
end
end
##
# check that the bounding box of a changeset gets updated correctly
# FIXME: This should really be moded to a integration test due to the with_controller
def test_changeset_bbox
basic_authorization users(:public_user).email, "test"
# create a new changeset
content "<osm><changeset/></osm>"
put :create
assert_response :success, "Creating of changeset failed."
changeset_id = @response.body.to_i
# add a single node to it
with_controller(NodeController.new) do
content "<osm><node lon='1' lat='2' changeset='#{changeset_id}'/></osm>"
put :create
assert_response :success, "Couldn't create node."
end
# get the bounding box back from the changeset
get :read, :id => changeset_id
assert_response :success, "Couldn't read back changeset."
assert_select "osm>changeset[min_lon='1.0']", 1
assert_select "osm>changeset[max_lon='1.0']", 1
assert_select "osm>changeset[min_lat='2.0']", 1
assert_select "osm>changeset[max_lat='2.0']", 1
# add another node to it
with_controller(NodeController.new) do
content "<osm><node lon='2' lat='1' changeset='#{changeset_id}'/></osm>"
put :create
assert_response :success, "Couldn't create second node."
end
# get the bounding box back from the changeset
get :read, :id => changeset_id
assert_response :success, "Couldn't read back changeset for the second time."
assert_select "osm>changeset[min_lon='1.0']", 1
assert_select "osm>changeset[max_lon='2.0']", 1
assert_select "osm>changeset[min_lat='1.0']", 1
assert_select "osm>changeset[max_lat='2.0']", 1
# add (delete) a way to it, which contains a point at (3,3)
with_controller(WayController.new) do
content update_changeset(current_ways(:visible_way).to_xml,
changeset_id)
put :delete, :id => current_ways(:visible_way).id
assert_response :success, "Couldn't delete a way."
end
# get the bounding box back from the changeset
get :read, :id => changeset_id
assert_response :success, "Couldn't read back changeset for the third time."
# note that the 3.1 here is because of the bbox overexpansion
assert_select "osm>changeset[min_lon='1.0']", 1
assert_select "osm>changeset[max_lon='3.1']", 1
assert_select "osm>changeset[min_lat='1.0']", 1
assert_select "osm>changeset[max_lat='3.1']", 1
end
##
# test that the changeset :include method works as it should
def test_changeset_include
basic_authorization users(:public_user).display_name, "test"
# create a new changeset
content "<osm><changeset/></osm>"
put :create
assert_response :success, "Creating of changeset failed."
changeset_id = @response.body.to_i
# NOTE: the include method doesn't over-expand, like inserting
# a real method does. this is because we expect the client to
# know what it is doing!
check_after_include(changeset_id, 1, 1, [1, 1, 1, 1])
check_after_include(changeset_id, 3, 3, [1, 1, 3, 3])
check_after_include(changeset_id, 4, 2, [1, 1, 4, 3])
check_after_include(changeset_id, 2, 2, [1, 1, 4, 3])
check_after_include(changeset_id, -1, -1, [-1, -1, 4, 3])
check_after_include(changeset_id, -2, 5, [-2, -1, 4, 5])
end
##
# test that a not found, wrong method with the expand bbox works as expected
def test_changeset_expand_bbox_error
basic_authorization users(:public_user).display_name, "test"
# create a new changeset
content "<osm><changeset/></osm>"
put :create
assert_response :success, "Creating of changeset failed."
changeset_id = @response.body.to_i
lon = 58.2
lat = -0.45
# Try and put
content "<osm><node lon='#{lon}' lat='#{lat}'/></osm>"
put :expand_bbox, :id => changeset_id
assert_response :method_not_allowed, "shouldn't be able to put a bbox expand"
# Try to get the update
content "<osm><node lon='#{lon}' lat='#{lat}'/></osm>"
get :expand_bbox, :id => changeset_id
assert_response :method_not_allowed, "shouldn't be able to get a bbox expand"
# Try to use a hopefully missing changeset
content "<osm><node lon='#{lon}' lat='#{lat}'/></osm>"
post :expand_bbox, :id => changeset_id + 13245
assert_response :not_found, "shouldn't be able to do a bbox expand on a nonexistant changeset"
end
##
# test the query functionality of changesets
def test_query
get :query, :bbox => "-10,-10, 10, 10"
assert_response :success, "can't get changesets in bbox"
assert_changesets [1, 4, 6]
get :query, :bbox => "4.5,4.5,4.6,4.6"
assert_response :success, "can't get changesets in bbox"
assert_changesets [1]
# not found when looking for changesets of non-existing users
get :query, :user => User.maximum(:id) + 1
assert_response :not_found
get :query, :display_name => " "
assert_response :not_found
# can't get changesets of user 1 without authenticating
get :query, :user => users(:normal_user).id
assert_response :not_found, "shouldn't be able to get changesets by non-public user (ID)"
get :query, :display_name => users(:normal_user).display_name
assert_response :not_found, "shouldn't be able to get changesets by non-public user (name)"
# but this should work
basic_authorization "[email protected]", "test"
get :query, :user => users(:normal_user).id
assert_response :success, "can't get changesets by user ID"
assert_changesets [1, 3, 6, 8]
get :query, :display_name => users(:normal_user).display_name
assert_response :success, "can't get changesets by user name"
assert_changesets [1, 3, 6, 8]
# check that the correct error is given when we provide both UID and name
get :query, :user => users(:normal_user).id, :display_name => users(:normal_user).display_name
assert_response :bad_request, "should be a bad request to have both ID and name specified"
get :query, :user => users(:normal_user).id, :open => true
assert_response :success, "can't get changesets by user and open"
assert_changesets [1]
get :query, :time => "2007-12-31"
assert_response :success, "can't get changesets by time-since"
assert_changesets [1, 2, 4, 5, 6]
get :query, :time => "2008-01-01T12:34Z"
assert_response :success, "can't get changesets by time-since with hour"
assert_changesets [1, 2, 4, 5, 6]
get :query, :time => "2007-12-31T23:59Z,2008-01-01T00:01Z"
assert_response :success, "can't get changesets by time-range"
assert_changesets [1, 5, 6]
get :query, :open => "true"
assert_response :success, "can't get changesets by open-ness"
assert_changesets [1, 2, 4]
get :query, :closed => "true"
assert_response :success, "can't get changesets by closed-ness"
assert_changesets [3, 5, 6, 7, 8, 9]
get :query, :closed => "true", :user => users(:normal_user).id
assert_response :success, "can't get changesets by closed-ness and user"
assert_changesets [3, 6, 8]
get :query, :closed => "true", :user => users(:public_user).id
assert_response :success, "can't get changesets by closed-ness and user"
assert_changesets [7]
get :query, :changesets => "1,2,3"
assert_response :success, "can't get changesets by id (as comma-separated string)"
assert_changesets [1, 2, 3]
get :query, :changesets => ""
assert_response :bad_request, "should be a bad request since changesets is empty"
end
##
# check that errors are returned if garbage is inserted
# into query strings
def test_query_invalid
["abracadabra!",
"1,2,3,F",
";drop table users;"].each do |bbox|
get :query, :bbox => bbox
assert_response :bad_request, "'#{bbox}' isn't a bbox"
end
["now()",
"00-00-00",
";drop table users;",
",",
"-,-"].each do |time|
get :query, :time => time
assert_response :bad_request, "'#{time}' isn't a valid time range"
end
["me",
"foobar",
"-1",
"0"].each do |uid|
get :query, :user => uid
assert_response :bad_request, "'#{uid}' isn't a valid user ID"
end
end
##
# check updating tags on a changeset
def test_changeset_update
## First try with the non-public user
changeset = changesets(:normal_user_first_change)
new_changeset = changeset.to_xml
new_tag = XML::Node.new "tag"
new_tag["k"] = "tagtesting"
new_tag["v"] = "valuetesting"
new_changeset.find("//osm/changeset").first << new_tag
content new_changeset
# try without any authorization
put :update, :id => changeset.id
assert_response :unauthorized
# try with the wrong authorization
basic_authorization users(:public_user).email, "test"
put :update, :id => changeset.id
assert_response :conflict
# now this should get an unauthorized
basic_authorization users(:normal_user).email, "test"
put :update, :id => changeset.id
assert_require_public_data "user with their data non-public, shouldn't be able to edit their changeset"
## Now try with the public user
changeset = changesets(:public_user_first_change)
create(:changeset_tag, :changeset => changeset)
new_changeset = changeset.to_xml
new_tag = XML::Node.new "tag"
new_tag["k"] = "tagtesting"
new_tag["v"] = "valuetesting"
new_changeset.find("//osm/changeset").first << new_tag
content new_changeset
# try without any authorization
@request.env["HTTP_AUTHORIZATION"] = nil
put :update, :id => changeset.id
assert_response :unauthorized
# try with the wrong authorization
basic_authorization users(:second_public_user).email, "test"
put :update, :id => changeset.id
assert_response :conflict
# now this should work...
basic_authorization users(:public_user).email, "test"
put :update, :id => changeset.id
assert_response :success
assert_select "osm>changeset[id='#{changeset.id}']", 1
assert_select "osm>changeset>tag", 2
assert_select "osm>changeset>tag[k='tagtesting'][v='valuetesting']", 1
end
##
# check that a user different from the one who opened the changeset
# can't modify it.
def test_changeset_update_invalid
basic_authorization users(:public_user).email, "test"
changeset = changesets(:normal_user_first_change)
new_changeset = changeset.to_xml
new_tag = XML::Node.new "tag"
new_tag["k"] = "testing"
new_tag["v"] = "testing"
new_changeset.find("//osm/changeset").first << new_tag
content new_changeset
put :update, :id => changeset.id
assert_response :conflict
end
##
# check that a changeset can contain a certain max number of changes.
## FIXME should be changed to an integration test due to the with_controller
def test_changeset_limits
basic_authorization users(:public_user).email, "test"
# open a new changeset
content "<osm><changeset/></osm>"
put :create
assert_response :success, "can't create a new changeset"
cs_id = @response.body.to_i
# start the counter just short of where the changeset should finish.
offset = 10
# alter the database to set the counter on the changeset directly,
# otherwise it takes about 6 minutes to fill all of them.
changeset = Changeset.find(cs_id)
changeset.num_changes = Changeset::MAX_ELEMENTS - offset
changeset.save!
with_controller(NodeController.new) do
# create a new node
content "<osm><node changeset='#{cs_id}' lat='0.0' lon='0.0'/></osm>"
put :create
assert_response :success, "can't create a new node"
node_id = @response.body.to_i
get :read, :id => node_id
assert_response :success, "can't read back new node"
node_doc = XML::Parser.string(@response.body).parse
node_xml = node_doc.find("//osm/node").first
# loop until we fill the changeset with nodes
offset.times do |i|
node_xml["lat"] = rand.to_s
node_xml["lon"] = rand.to_s
node_xml["version"] = (i + 1).to_s
content node_doc
put :update, :id => node_id
assert_response :success, "attempt #{i} should have succeeded"
end
# trying again should fail
node_xml["lat"] = rand.to_s
node_xml["lon"] = rand.to_s
node_xml["version"] = offset.to_s
content node_doc
put :update, :id => node_id
assert_response :conflict, "final attempt should have failed"
end
changeset = Changeset.find(cs_id)
assert_equal Changeset::MAX_ELEMENTS + 1, changeset.num_changes
# check that the changeset is now closed as well
assert(!changeset.is_open?,
"changeset should have been auto-closed by exceeding " +
"element limit.")
end
##
# This should display the last 20 changesets closed
def test_list
get :list, :format => "html"
assert_response :success
assert_template "history"
assert_template :layout => "map"
assert_select "h2", :text => "Changesets", :count => 1
xhr :get, :list, :format => "html", :list => "1"
assert_response :success
assert_template "list"
check_list_result(Changeset.all)
end
##
# This should display the last 20 changesets closed
def test_list_xhr
xhr :get, :list, :format => "html"
assert_response :success
assert_template "history"
assert_template :layout => "xhr"
assert_select "h2", :text => "Changesets", :count => 1
xhr :get, :list, :format => "html", :list => "1"
assert_response :success
assert_template "list"
check_list_result(Changeset.all)
end
##
# This should display the last 20 changesets closed in a specific area
def test_list_bbox
get :list, :format => "html", :bbox => "4.5,4.5,5.5,5.5"
assert_response :success
assert_template "history"
assert_template :layout => "map"
assert_select "h2", :text => "Changesets", :count => 1
xhr :get, :list, :format => "html", :bbox => "4.5,4.5,5.5,5.5", :list => "1"
assert_response :success
assert_template "list"
check_list_result(Changeset.where("min_lon < 55000000 and max_lon > 45000000 and min_lat < 55000000 and max_lat > 45000000"))
end
##
# Checks the display of the user changesets listing
def test_list_user
user = users(:public_user)
get :list, :format => "html", :display_name => user.display_name
assert_response :success
assert_template "history"
xhr :get, :list, :format => "html", :display_name => user.display_name, :list => "1"
assert_response :success
assert_template "list"
check_list_result(user.changesets)
end
##
# Checks the display of the user changesets listing for a private user
def test_list_private_user
user = users(:normal_user)
get :list, :format => "html", :display_name => user.display_name
assert_response :success
assert_template "history"
xhr :get, :list, :format => "html", :display_name => user.display_name, :list => "1"
assert_response :success
assert_template "list"
check_list_result(Changeset.none)
end
##
# Check the not found of the list user changesets
def test_list_user_not_found
get :list, :format => "html", :display_name => "Some random user"
assert_response :not_found
assert_template "user/no_such_user"
xhr :get, :list, :format => "html", :display_name => "Some random user", :list => "1"
assert_response :not_found
assert_template "user/no_such_user"
end
##
# Checks the display of the friends changesets listing
def test_list_friends
user = users(:normal_user)
get :list, :friends => true
assert_response :redirect
assert_redirected_to :controller => :user, :action => :login, :referer => friend_changesets_path
session[:user] = user.id
get :list, :friends => true
assert_response :success
assert_template "history"
xhr :get, :list, :friends => true, :list => "1"
assert_response :success
assert_template "list"
check_list_result(Changeset.where(:user => user.friend_users.identifiable))
end
##
# Checks the display of the nearby user changesets listing
def test_list_nearby
user = users(:normal_user)
get :list, :nearby => true
assert_response :redirect
assert_redirected_to :controller => :user, :action => :login, :referer => nearby_changesets_path
session[:user] = user.id
get :list, :nearby => true
assert_response :success
assert_template "history"
xhr :get, :list, :nearby => true, :list => "1"
assert_response :success
assert_template "list"
check_list_result(Changeset.where(:user => user.nearby))
end
##
# Check that we can't request later pages of the changesets list
def test_list_max_id
xhr :get, :list, :format => "html", :max_id => 4
assert_response :success
assert_template "history"
assert_template :layout => "xhr"
assert_select "h2", :text => "Changesets", :count => 1
xhr :get, :list, :format => "html", :list => "1", :max_id => 4
assert_response :success
assert_template "list"
check_list_result(Changeset.where("id <= 4"))
end
##
# This should display the last 20 changesets closed
def test_feed
get :feed, :format => :atom
assert_response :success
assert_template "list"
assert_equal "application/atom+xml", response.content_type
check_feed_result(Changeset.all)
end
##
# This should display the last 20 changesets closed in a specific area
def test_feed_bbox
get :feed, :format => :atom, :bbox => "4.5,4.5,5.5,5.5"
assert_response :success
assert_template "list"
assert_equal "application/atom+xml", response.content_type
check_feed_result(Changeset.where("min_lon < 55000000 and max_lon > 45000000 and min_lat < 55000000 and max_lat > 45000000"))
end
##
# Checks the display of the user changesets feed
def test_feed_user
user = users(:public_user)
get :feed, :format => :atom, :display_name => user.display_name
assert_response :success
assert_template "list"
assert_equal "application/atom+xml", response.content_type
check_feed_result(user.changesets)
end
##
# Check the not found of the user changesets feed
def test_feed_user_not_found
get :feed, :format => "atom", :display_name => "Some random user"
assert_response :not_found
end
##
# Check that we can't request later pages of the changesets feed
def test_feed_max_id
get :feed, :format => "atom", :max_id => 100
assert_response :redirect
assert_redirected_to :action => :feed
end
##
# check that the changeset download for a changeset with a redacted
# element in it doesn't contain that element.
def test_diff_download_redacted
changeset_id = changesets(:public_user_first_change).id
get :download, :id => changeset_id
assert_response :success
assert_select "osmChange", 1
# this changeset contains node 17 in versions 1 & 2, but 1 should
# be hidden.
assert_select "osmChange node[id='17']", 1
assert_select "osmChange node[id='17'][version='1']", 0
end
##
# create comment success
def test_create_comment_success
basic_authorization(users(:public_user).email, "test")
assert_difference "ChangesetComment.count", 1 do
assert_no_difference "ActionMailer::Base.deliveries.size" do
post :comment, :id => changesets(:normal_user_closed_change).id, :text => "This is a comment"
end
end
assert_response :success
changeset = changesets(:normal_user_subscribed_change)
changeset.subscribers.push(users(:normal_user))
changeset.subscribers.push(users(:public_user))
changeset.subscribers.push(users(:suspended_user))
changeset.subscribers.push(users(:deleted_user))
assert_difference "ChangesetComment.count", 1 do
assert_difference "ActionMailer::Base.deliveries.size", 1 do
post :comment, :id => changeset.id, :text => "This is a comment"
end
end
assert_response :success
email = ActionMailer::Base.deliveries.first
assert_equal 1, email.to.length
assert_equal "[OpenStreetMap] test2 has commented on one of your changesets", email.subject
assert_equal "[email protected]", email.to.first
ActionMailer::Base.deliveries.clear
basic_authorization(users(:second_public_user).email, "test")
assert_difference "ChangesetComment.count", 1 do
assert_difference "ActionMailer::Base.deliveries.size", 2 do
post :comment, :id => changeset.id, :text => "This is a comment"
end
end
assert_response :success
email = ActionMailer::Base.deliveries.find { |e| e.to.first == "[email protected]" }
assert_not_nil email
assert_equal 1, email.to.length
assert_equal "[OpenStreetMap] pulibc_test2 has commented on one of your changesets", email.subject
email = ActionMailer::Base.deliveries.find { |e| e.to.first == "[email protected]" }
assert_not_nil email
assert_equal 1, email.to.length
assert_equal "[OpenStreetMap] pulibc_test2 has commented on a changeset you are interested in", email.subject
ActionMailer::Base.deliveries.clear
end
##
# create comment fail
def test_create_comment_fail
# unauthorized
post :comment, :id => changesets(:normal_user_closed_change).id, :text => "This is a comment"
assert_response :unauthorized
basic_authorization(users(:public_user).email, "test")
# bad changeset id
assert_no_difference "ChangesetComment.count" do
post :comment, :id => 999111, :text => "This is a comment"
end
assert_response :not_found
# not closed changeset
assert_no_difference "ChangesetComment.count" do
post :comment, :id => changesets(:normal_user_first_change).id, :text => "This is a comment"
end
assert_response :conflict
# no text
assert_no_difference "ChangesetComment.count" do
post :comment, :id => changesets(:normal_user_closed_change).id
end
assert_response :bad_request
# empty text
assert_no_difference "ChangesetComment.count" do
post :comment, :id => changesets(:normal_user_closed_change).id, :text => ""
end
assert_response :bad_request
end
##
# test subscribe success
def test_subscribe_success
basic_authorization(users(:public_user).email, "test")
changeset = changesets(:normal_user_closed_change)
assert_difference "changeset.subscribers.count", 1 do
post :subscribe, :id => changeset.id
end
assert_response :success
end
##
# test subscribe fail
def test_subscribe_fail
# unauthorized
changeset = changesets(:normal_user_closed_change)
assert_no_difference "changeset.subscribers.count" do
post :subscribe, :id => changeset.id
end
assert_response :unauthorized
basic_authorization(users(:public_user).email, "test")
# bad changeset id
assert_no_difference "changeset.subscribers.count" do
post :subscribe, :id => 999111
end
assert_response :not_found
# not closed changeset
changeset = changesets(:normal_user_first_change)
assert_no_difference "changeset.subscribers.count" do
post :subscribe, :id => changeset.id
end
assert_response :conflict
# trying to subscribe when already subscribed
changeset = changesets(:normal_user_subscribed_change)
changeset.subscribers.push(users(:public_user))
assert_no_difference "changeset.subscribers.count" do
post :subscribe, :id => changeset.id
end
assert_response :conflict
end
##
# test unsubscribe success
def test_unsubscribe_success
basic_authorization(users(:public_user).email, "test")
changeset = changesets(:normal_user_subscribed_change)
changeset.subscribers.push(users(:public_user))
assert_difference "changeset.subscribers.count", -1 do
post :unsubscribe, :id => changeset.id
end
assert_response :success
end
##
# test unsubscribe fail
def test_unsubscribe_fail
# unauthorized
changeset = changesets(:normal_user_closed_change)
assert_no_difference "changeset.subscribers.count" do
post :unsubscribe, :id => changeset.id
end
assert_response :unauthorized
basic_authorization(users(:public_user).email, "test")
# bad changeset id
assert_no_difference "changeset.subscribers.count" do
post :unsubscribe, :id => 999111
end
assert_response :not_found
# not closed changeset
changeset = changesets(:normal_user_first_change)
assert_no_difference "changeset.subscribers.count" do
post :unsubscribe, :id => changeset.id
end
assert_response :conflict
# trying to unsubscribe when not subscribed
changeset = changesets(:normal_user_closed_change)
assert_no_difference "changeset.subscribers.count" do
post :unsubscribe, :id => changeset.id
end
assert_response :not_found
end
##
# test hide comment fail
def test_hide_comment_fail
# unauthorized
comment = create(:changeset_comment)
assert_equal true, comment.visible
post :hide_comment, :id => comment.id
assert_response :unauthorized
assert_equal true, comment.reload.visible
basic_authorization(users(:public_user).email, "test")
# not a moderator
post :hide_comment, :id => comment.id
assert_response :forbidden
assert_equal true, comment.reload.visible
basic_authorization(users(:moderator_user).email, "test")
# bad comment id
post :hide_comment, :id => 999111
assert_response :not_found
assert_equal true, comment.reload.visible
end
##
# test hide comment succes
def test_hide_comment_success
comment = create(:changeset_comment)
assert_equal true, comment.visible
basic_authorization(users(:moderator_user).email, "test")
post :hide_comment, :id => comment.id
assert_response :success
assert_equal false, comment.reload.visible
end
##
# test unhide comment fail
def test_unhide_comment_fail
# unauthorized
comment = create(:changeset_comment, :visible => false)
assert_equal false, comment.visible
post :unhide_comment, :id => comment.id
assert_response :unauthorized
assert_equal false, comment.reload.visible
basic_authorization(users(:public_user).email, "test")
# not a moderator
post :unhide_comment, :id => comment.id
assert_response :forbidden
assert_equal false, comment.reload.visible
basic_authorization(users(:moderator_user).email, "test")
# bad comment id
post :unhide_comment, :id => 999111
assert_response :not_found
assert_equal false, comment.reload.visible
end
##
# test unhide comment succes
def test_unhide_comment_success
comment = create(:changeset_comment, :visible => false)
assert_equal false, comment.visible
basic_authorization(users(:moderator_user).email, "test")
post :unhide_comment, :id => comment.id
assert_response :success
assert_equal true, comment.reload.visible
end
##
# test comments feed
def test_comments_feed
create_list(:changeset_comment, 3, :changeset_id => changesets(:normal_user_closed_change).id)
get :comments_feed, :format => "rss"
assert_response :success
assert_equal "application/rss+xml", @response.content_type
assert_select "rss", :count => 1 do
assert_select "channel", :count => 1 do
assert_select "item", :count => 3
end
end
get :comments_feed, :format => "rss", :limit => 2
assert_response :success
assert_equal "application/rss+xml", @response.content_type
assert_select "rss", :count => 1 do
assert_select "channel", :count => 1 do
assert_select "item", :count => 2
end
end
get :comments_feed, :id => changesets(:normal_user_closed_change), :format => "rss"
assert_response :success
assert_equal "application/rss+xml", @response.content_type
assert_select "rss", :count => 1 do
assert_select "channel", :count => 1 do
assert_select "item", :count => 3
end
end
end
##
# test comments feed
def test_comments_feed_bad_limit
get :comments_feed, :format => "rss", :limit => 0
assert_response :bad_request
get :comments_feed, :format => "rss", :limit => 100001
assert_response :bad_request
end
private
##
# boilerplate for checking that certain changesets exist in the
# output.
def assert_changesets(ids)
assert_select "osm>changeset", ids.size
ids.each do |id|
assert_select "osm>changeset[id='#{id}']", 1
end
end
##
# call the include method and assert properties of the bbox
def check_after_include(changeset_id, lon, lat, bbox)
content "<osm><node lon='#{lon}' lat='#{lat}'/></osm>"
post :expand_bbox, :id => changeset_id
assert_response :success, "Setting include of changeset failed: #{@response.body}"
# check exactly one changeset
assert_select "osm>changeset", 1
assert_select "osm>changeset[id='#{changeset_id}']", 1
# check the bbox
doc = XML::Parser.string(@response.body).parse
changeset = doc.find("//osm/changeset").first
assert_equal bbox[0], changeset["min_lon"].to_f, "min lon"
assert_equal bbox[1], changeset["min_lat"].to_f, "min lat"
assert_equal bbox[2], changeset["max_lon"].to_f, "max lon"
assert_equal bbox[3], changeset["max_lat"].to_f, "max lat"
end
##
# update the changeset_id of a way element
def update_changeset(xml, changeset_id)
xml_attr_rewrite(xml, "changeset", changeset_id)
end
##
# update an attribute in a way element
def xml_attr_rewrite(xml, name, value)
xml.find("//osm/way").first[name] = value.to_s
xml
end
##
# check the result of a list
def check_list_result(changesets)
changesets = changesets.where("num_changes > 0")
.order(:created_at => :desc)
.limit(20)
assert changesets.size <= 20
assert_select "ol.changesets", :count => [changesets.size, 1].min do
assert_select "li", :count => changesets.size
changesets.each do |changeset|
assert_select "li#changeset_#{changeset.id}", :count => 1
end
end
end
##
# check the result of a feed
def check_feed_result(changesets)
changesets = changesets.where("num_changes > 0")
.order(:created_at => :desc)
.limit(20)
assert changesets.size <= 20
assert_select "feed", :count => [changesets.size, 1].min do
assert_select "> title", :count => 1, :text => /^Changesets/
assert_select "> entry", :count => changesets.size
changesets.each do |changeset|
assert_select "> entry > id", changeset_url(:id => changeset.id)
end
end
end
end
| 1 | 10,563 | This one is a query parameter and probably really should be ID and in fact I'm mystified as to how this is working because the controller code definitely wants a number for that parameter so the test framework must be converting it back to an ID as best I can tell. | openstreetmap-openstreetmap-website | rb |
@@ -4177,9 +4177,11 @@ RelExpr * FileScan::preCodeGen(Generator * generator,
TRUE);
if (isHiveTable())
- // assign individual files and blocks to each ESPs
- ((NodeMap *) getPartFunc()->getNodeMap())->assignScanInfos(hiveSearchKey_);
- generator->setProcessLOB(TRUE);
+ {
+ // assign individual files and blocks to each ESPs
+ ((NodeMap *) getPartFunc()->getNodeMap())->assignScanInfos(hiveSearchKey_);
+ generator->setProcessLOB(TRUE);
+ }
}
| 1 | /**********************************************************************
// @@@ START COPYRIGHT @@@
//
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
//
// @@@ END COPYRIGHT @@@
**********************************************************************/
/* -*-C++-*-
*****************************************************************************
*
* File: GenPreCode.C
* Description: Fixes up the query tree before code generation.
* This is the post-opt and pre-gen stage.
* Created: 4/15/95
* Language: C++
*
*
*****************************************************************************
*/
#define SQLPARSERGLOBALS_FLAGS // must precede all #include's
#define SQLPARSERGLOBALS_NADEFAULTS
#include "Platform.h"
#include <math.h>
#include "OperTypeEnum.h"
#include "Sqlcomp.h"
#include "GroupAttr.h"
#include "AllRelExpr.h"
#include "RelPackedRows.h"
#include "ReadTableDef.h"
#include "Generator.h"
#include "GenExpGenerator.h"
#include "dfs2rec.h"
#include "vegrewritepairs.h"
#include "exp_clause_derived.h"
#include "keycolumns.h"
#include "ValueDesc.h"
#include "BindWA.h"
#include "TriggerDB.h"
#include "Cost.h"
#include "CostMethod.h"
#include "ItmFlowControlFunction.h"
#include "UdfDllInteraction.h"
#include "StmtDDLNode.h"
#include "NATable.h"
#include "NumericType.h"
#include "CmpStatement.h"
#include "OptimizerSimulator.h"
#include "ItemFunc.h"
#include "ControlDB.h"
#include "CmpSeabaseDDL.h"
#include "NAExecTrans.h"
#include "exp_function.h"
#include "SqlParserGlobals.h" // must be last #include
extern ItemExpr * buildComparisonPred ( ItemExpr *, ItemExpr *, ItemExpr *,
OperatorTypeEnum,
NABoolean specialNulls=FALSE //++MV - Irena
);
// -----------------------------------------------------------------------
// generateKeyExpr()
//
// This method is used by the code generator for building expressions
// that are of the form <key column> = <value> for each key column.
//
// Parameters:
//
// const ValueIdSet & externalInputs
// IN : The set of values that are available here and can be
// used for replacing any wildcards that appear in the
// listOfKeyValues.
//
// const ValueIdList & listOfKeyColumns
// IN : A read-only reference to the list of key columns
// corresponding to which certain key values have
// been chosen.
//
// const ValueIdList & listOfKeyValues
// IN : A read-only reference to a list of key values that
// are chosen for the corresponding listOfKeyColumns.
// Values for missing key columns have already been
// computed and supplied in this list.
//
// ValueIdList & listOfKeyExpr
// OUT: An assignment expression of the form <key column> = <value>
// for each key column.
//
// -----------------------------------------------------------------------
static void generateKeyExpr(const ValueIdSet & externalInputs,
const ValueIdList & listOfKeyColumns,
const ValueIdList & listOfKeyValues,
ValueIdList & listOfKeyExpr,
Generator* generator,
NABoolean replicatePredicates = FALSE)
{
ItemExpr * keyExpr;
CollIndex keyCount = listOfKeyColumns.entries();
for (CollIndex keyNum = 0; keyNum < keyCount; keyNum++)
{
// Build the assignment expression.
ItemExpr *ieKeyVal = listOfKeyValues[keyNum].getItemExpr()
->replaceVEGExpressions(externalInputs, externalInputs,
FALSE, NULL, replicatePredicates);
ItemExpr *ieKeyCol = listOfKeyColumns[keyNum].getItemExpr();
ValueId KeyColId = ieKeyCol->getValueId();
keyExpr = new(generator->wHeap()) BiRelat(ITM_EQUAL,
ieKeyCol,
ieKeyVal);
// Synthesize its type for and assign a ValueId to it.
keyExpr->synthTypeAndValueId();
// INsert it in the list of key expressions
listOfKeyExpr.insertAt(keyNum, keyExpr->getValueId());
} // end For Loop
} // static generateKeyExpr()
static NABoolean processConstHBaseKeys(Generator * generator,
RelExpr *relExpr,
const SearchKey *skey,
const IndexDesc *idesc,
const ValueIdSet &executorPreds,
NAList<HbaseSearchKey*> &mySearchKeys,
ListOfUniqueRows &listOfUpdUniqueRows,
ListOfRangeRows &listOfUpdSubsetRows)
{
if (! skey)
return TRUE;
// convert built-in search key to entries with constants, if possible
if (skey->areAllKeysConstants(TRUE))
{
ValueIdSet nonKeyColumnSet;
idesc->getNonKeyColumnSet(nonKeyColumnSet);
// seed keyPreds with only the full key predicate from skey
ValueIdSet keyPreds = skey->getFullKeyPredicates();
// include executorPreds and selection predicates
// but exclude the full key predicates.
ValueIdSet exePreds;
exePreds += executorPreds;
exePreds += relExpr->getSelectionPred();
exePreds.subtractSet(keyPreds);
HbaseSearchKey::makeHBaseSearchKeys(
skey,
skey->getIndexDesc()->getIndexKey(),
skey->getIndexDesc()->getOrderOfKeyValues(),
relExpr->getGroupAttr()->getCharacteristicInputs(),
TRUE, /* forward scan */
keyPreds,
nonKeyColumnSet,
idesc,
relExpr->getGroupAttr()->getCharacteristicOutputs(),
mySearchKeys);
// Include any remaining key predicates that have not been
// picked up (to be used as the HBase search keys).
exePreds += keyPreds;
TableDesc *tdesc = NULL;
if (mySearchKeys.entries()>0)
{
switch (relExpr->getOperatorType())
{
case REL_HBASE_ACCESS:
{
HbaseAccess *hba = static_cast<HbaseAccess *>(relExpr);
hba->setSearchKey(NULL);
hba->executorPred() = exePreds;
tdesc = hba->getTableDesc();
}
break;
case REL_HBASE_DELETE:
{
HbaseDelete *hbd = static_cast<HbaseDelete *>(relExpr);
hbd->setSearchKey(NULL);
hbd->beginKeyPred().clear();
hbd->endKeyPred().clear();
hbd->executorPred() = exePreds;
tdesc = hbd->getTableDesc();
}
break;
case REL_HBASE_UPDATE:
{
HbaseUpdate *hbu = static_cast<HbaseUpdate *>(relExpr);
hbu->setSearchKey(NULL);
hbu->beginKeyPred().clear();
hbu->endKeyPred().clear();
hbu->executorPred() = exePreds;
tdesc = hbu->getTableDesc();
}
break;
default:
CMPASSERT(tdesc); // unsupported operator type
break;
} // switch
relExpr->selectionPred().clear();
}
if (HbaseAccess::processSQHbaseKeyPreds(generator,
mySearchKeys,
listOfUpdUniqueRows,
listOfUpdSubsetRows))
return FALSE;
} // key uses all constants
return TRUE;
}
//
// replaceVEGExpressions1() - a helper routine for ItemExpr::replaceVEGExpressions()
//
// NOTE: The code in this routine came from the previous version of
// ItemExpr::replaceVEGExpressions(). It has been pulled out
// into a separate routine so that the C++ compiler will produce
// code that needs signficantly less stack space for the
// recursive ItemExpr::replaceVEGExpressions() routine.
//
ItemExpr * ItemExpr::replaceVEGExpressions1( VEGRewritePairs* lookup )
{
// see if this expression is already in there
ValueId rewritten;
if (lookup->getRewritten(rewritten /* out */, getValueId()))
{
if (rewritten == NULL_VALUE_ID)
return NULL;
else
return rewritten.getItemExpr();
}
return (ItemExpr *)( (char *)(NULL) -1 ) ;
}
//
// replaceVEGExpressions2() - a helper routine for ItemExpr::replaceVEGExpressions()
//
// NOTE: The code in this routine came from the previous version of
// ItemExpr::replaceVEGExpressions(). It has been pulled out
// into a separate routine so that the C++ compiler will produce
// code that needs signficantly less stack space for the
// recursive ItemExpr::replaceVEGExpressions() routine.
//
void ItemExpr::replaceVEGExpressions2( Int32 index
, const ValueIdSet& availableValues
, const ValueIdSet& inputValues
, ValueIdSet& currAvailableValues
, const GroupAttributes * left_ga
, const GroupAttributes * right_ga
)
{
// If we have asked that the EquiPredicate resolve
// each child of the equipred by available values from the
// respectively input GAs, make sure we pick the right one.
// First we find out what GA covers the current EquiPred child
// we are processing (0 or 1), and pick the one that covers, unless
// both GAs do. If both GAs cover, the just make sure we pick a
// different one for each child. The hash join will later fix up
// the predicate expression to match its children.
// If none of the GAs covers, we have a problem...
// This fix was put in to solve solution: 10-100722-1962
ValueIdSet dummy;
NABoolean leftGaCovers = left_ga->covers(child(index)->getValueId(),
inputValues,
dummy);
NABoolean rightGaCovers = right_ga->covers(child(index)->getValueId(),
inputValues,
dummy);
if (leftGaCovers == FALSE && rightGaCovers == FALSE)
{
// for the moment it is assumed that this code is only
// executed for hash and merge joins, and in general each
// side of the expression should be coverd by a child.
// So if we have neither, we have a problem ..
cout << "Unable to pick GA to use: " << getArity() << endl;
CMPASSERT(FALSE);
}
else
{
const GroupAttributes *coveringGa = NULL;
currAvailableValues.clear();
currAvailableValues += inputValues;
if (leftGaCovers && rightGaCovers)
coveringGa = (index == 0 ? left_ga : right_ga);
else
coveringGa = (leftGaCovers ? left_ga : right_ga);
currAvailableValues += coveringGa->getCharacteristicOutputs();
}
}
// -----------------------------------------------------------------------
// ItemExpr::replaceVEGExpressions()
// It performs a top-down, left-to-right tree walk in the ItemExpr tree
// and expands any wildcards (VEGReference or VEGPredicate expressions)
// by replacing them with an expression that belongs to the
// availableValues.
// IF isKeyPredicate is TRUE then the ItemExpr is a KeyPredicate:
// A KeyPredicate is of a restricted form. If we are here it is
// because the predicate is a KeyPredicate. Then, it must satisfy
// very specific characteristics (see Key::isAKeyPredicate(...))
// for instance, one of its sides must be a key column
// This method *guarantees* that a key predicate will be
// generated from the rewritten predicate (i.e. we avoid
// cases like VegRef{T1.A, 2} > 7 being generated like
// 2 > 7 when T1.A is a key column.
// -----------------------------------------------------------------------
ItemExpr * ItemExpr::replaceVEGExpressions
(const ValueIdSet& availableValues,
const ValueIdSet& inputValues,
NABoolean thisIsAnMdamKeyPredicate,
VEGRewritePairs* lookup,
NABoolean replicateExpression,
const ValueIdSet * joinInputAndPotentialOutput,
const IndexDesc * iDesc,
const GroupAttributes * left_ga,
const GroupAttributes * right_ga)
{
// ---------------------------------------------------------------------
// If this expression has already been resolved because it exists in
// availableValues, the replacement of VEGReferences is not required.
// ---------------------------------------------------------------------
if (availableValues.contains(getValueId()))
return this; // terminate processing
ItemExpr* iePtr = this;
if (lookup && replicateExpression) // if lookup table is present
{
ItemExpr* tmpIePtr = ItemExpr::replaceVEGExpressions1( lookup ) ;
if ( tmpIePtr != (ItemExpr *)( (char *)(NULL) -1 ) )
return tmpIePtr ;
};
if (replicateExpression)
iePtr = copyTopNode(0, CmpCommon::statementHeap());
// virtual copy constructor
// -----------------------------------------------------------------------
// In the case of mdam key predicates we need to be careful with
// binary operators whose child is a VegRef that contains both a
// key column and a constant because the rewrite logic for VEGRef
// favors the generation of constants over other ItemExprs. In
// MDAM we *need* to generate the key column and not the constant.
// With the gated logic below we ensure this.
// -----------------------------------------------------------------------
if (thisIsAnMdamKeyPredicate)
{
#if DEBUG
// at the moment it is assumed the left and right ga's are only
// used for hash/merge joins equijoin predicates and with the
// mdamKeyPredicate flag turned off. If this assumption is no longer
// true we need to add some additional code in this "if" clause.
GENASSERT(left_ga == NULL && right_ga == NULL);
#endif
switch (getArity())
{
case 0: // const, VEGRef, and VEGPred have arity 0
break; // If it reached here it means that
// the ItemExpr does not need to do any special
// processing for this operator (i.e. a constant)
// VEG predicates should never reach here
case 1: // Example: T1.A IS NULL
{
ItemExpr *newChild;
// the child must be a key column:
newChild =
child(0)->replaceVEGExpressions(availableValues
,inputValues
,TRUE // no constants!
,lookup
,replicateExpression
,joinInputAndPotentialOutput
,iDesc
);
if (newChild != iePtr->child(0))
{
if (replicateExpression)
iePtr = iePtr->copyTopNode(NULL, CmpCommon::statementHeap());
iePtr->child(0) = newChild;
}
}
break;
case 2:
case 3:
{
// Rewrite children (one of them MUST be a key column, the
// other MUST be a constant or a host var)
ItemExpr
*leftChild = NULL,
*rightChild = NULL,
*thirdChild = NULL;
OperatorTypeEnum newOperType = getOperatorType();
if ((child(0)->getOperatorType() == ITM_VEG_REFERENCE)
OR
(child(1)->getOperatorType() == ITM_VEG_REFERENCE))
{
//---------------------------------------------------------
// Assume we have an expression of
// the form VegRef{T1.A, 2} > 7
//------------------------------------------------------
// Force the generation of a key column by
// telling replacevegexprs not to generate them:
leftChild =
child(0)->replaceVEGExpressions(availableValues
,inputValues
,TRUE // want key col
,lookup
,replicateExpression
,joinInputAndPotentialOutput
,iDesc
);
// generate a constant in this branch
rightChild =
child(1)->replaceVEGExpressions(availableValues
,inputValues
,FALSE // want constant
,lookup
,replicateExpression
,joinInputAndPotentialOutput
,iDesc
);
// However, the above will fail if the predicate is
// of the form
// 7 < VegRef{T1.A,2}, thus, if it failed, redrive with
// the roles reversed:
if (leftChild == NULL OR rightChild == NULL)
{
leftChild =
child(1)->replaceVEGExpressions(availableValues
,inputValues
,TRUE // want constant
,lookup
,replicateExpression
,joinInputAndPotentialOutput
,iDesc
);
rightChild =
child(0)->replaceVEGExpressions(availableValues
,inputValues
,FALSE // want key col
,lookup
,replicateExpression
,joinInputAndPotentialOutput
,iDesc
);
// We have reversed the operands, reverse
// the operator if it is a greater/eq BiRelat operator:
switch(getOperatorType())
{
case ITM_LESS:
case ITM_LESS_EQ:
case ITM_GREATER:
case ITM_GREATER_EQ:
// need to reverse!
newOperType =
((BiRelat*)iePtr)->getReverseOperatorType();
break;
}
} // if need to reverse operands
// now we must have succeeded!
CMPASSERT(leftChild != NULL && rightChild != NULL);
} // if one of the children of the operator is a reference
else
{
// No children are references, normal rewrite:
leftChild =
child(0)->replaceVEGExpressions(availableValues,
inputValues,
FALSE, // constants OK
lookup,
replicateExpression,
joinInputAndPotentialOutput,
iDesc);
rightChild =
child(1)->replaceVEGExpressions(availableValues,
inputValues,
FALSE, // constants OK
lookup,
replicateExpression,
joinInputAndPotentialOutput,
iDesc);
CMPASSERT(leftChild != NULL && rightChild != NULL);
}
if (getArity() == 3)
{ // rewrite the exclusion part of the PA key predicate:
thirdChild =
child(2)->replaceVEGExpressions(availableValues,
inputValues,
thisIsAnMdamKeyPredicate,
lookup,
replicateExpression,
joinInputAndPotentialOutput,
iDesc);
}
if (iePtr->child(0) != (void *)leftChild OR
iePtr->child(1) != (void *)rightChild OR
(thirdChild AND iePtr->child(2) != (void *)thirdChild) OR
iePtr->getOperatorType() != newOperType)
{
// we have to change data members, make a copy of the
// node if other users may share this node
if (replicateExpression)
iePtr = iePtr->copyTopNode(NULL, CmpCommon::statementHeap());
// Set the left and right children of the iePtr
// to their rewritten nodes:
// $$ What happens to all those nodes that were
// $$ replicated and the rewrite failed?
iePtr->child(0) = leftChild;
iePtr->child(1) = rightChild;
if (thirdChild)
iePtr->child(2) = thirdChild;
iePtr->setOperatorType(newOperType);
}
break;
} // case 2, case 3
default:
// $$ modify this when predicates of arity > 3 come into
// $$ existance
cout << "Invalid arity: " << getArity() << endl;
CMPASSERT(FALSE); // No predicates of arity > 3 (so far)
}
}
else // ItemExpr is not an mdam key predicate, go ahead with the rewrite:
for (Lng32 index = 0; index < getArity(); index++)
{
ValueIdSet currAvailableValues(availableValues);
if (left_ga != NULL &&
right_ga != NULL &&
getArity() == 2 )
{
ItemExpr::replaceVEGExpressions2( index
, availableValues
, inputValues
, currAvailableValues
, left_ga
, right_ga
) ;
}
ItemExpr *newChild = child(index)->replaceVEGExpressions(
currAvailableValues,
inputValues,
FALSE, // this is not a key predicate
lookup,
replicateExpression,
joinInputAndPotentialOutput,
iDesc);
if ( newChild->isPreCodeGenNATypeChanged())
iePtr->setpreCodeGenNATypeChangeStatus();
// is the result a different ItemExpr or does iePtr not point to
// the (possibly unchanged) result yet?
if (iePtr->child(index) != (void *)newChild)
{
if (iePtr == this AND replicateExpression)
{
// don't change "this" if it may be shared, make a
// copy instead and also copy the unchanged children
// so far
iePtr = iePtr->copyTopNode(NULL, CmpCommon::statementHeap());
for (Int32 j = 0; j < index; j++)
iePtr->child(j) = this->child(j);
}
iePtr->child(index) = newChild;
}
}
if(lookup && replicateExpression && iePtr != this)
{
iePtr->synthTypeAndValueId(FALSE);
lookup->insert(getValueId(), iePtr->getValueId());
}
return iePtr;
} // ItemExpr::replaceVEGExpressions()
// -----------------------------------------------------------------------
// ValueIdUnion::replaceVEGExpressions()
// The parameter replicateExpression is ignored because the
// ValueIdUnion implements a special policy for rewriting
// an ItemExpr, in that it manages three sets of values.
// -----------------------------------------------------------------------
ItemExpr * ValueIdUnion::replaceVEGExpressions
(const ValueIdSet& availableValues,
const ValueIdSet& inputValues,
NABoolean thisIsAnMdamKeyPredicate,
VEGRewritePairs* lookup,
NABoolean replicateExpression,
const ValueIdSet * joinInputAndPotentialOutput,
const IndexDesc * iDesc,
const GroupAttributes * left_ga,
const GroupAttributes * right_ga)
{
CMPASSERT(NOT thisIsAnMdamKeyPredicate); // sanity check
// we are ignoring the replicateExpression and
// joinInputAndPotentialOutput flags ..
ValueIdUnion * viduPtr = (ValueIdUnion *)this;
// ---------------------------------------------------------------------
// If this expression has already been resolved because it exists in
// availableValues, the replacement of VEGExpressions is not required.
// ---------------------------------------------------------------------
if (availableValues.contains(getValueId()) )
return this;
for(CollIndex i = 0; i < entries(); i++) {
viduPtr->
#pragma nowarn(1506) // warning elimination
setSource(i,
#pragma warn(1506) // warning elimination
#pragma nowarn(1506) // warning elimination
(viduPtr->getSource(i).getItemExpr()
#pragma warn(1506) // warning elimination
->replaceVEGExpressions(availableValues,inputValues,
thisIsAnMdamKeyPredicate,lookup,
FALSE, /* replicateExpression default */
NULL,/*joinInputAndPotentialOutput default*/
iDesc,
left_ga,
right_ga))
->getValueId());
}
// If the result is not this ValueIdUnion
if (viduPtr->getResult() != viduPtr->getValueId())
viduPtr->setResult((viduPtr->getResult().getItemExpr()
->replaceVEGExpressions(availableValues,
inputValues,
thisIsAnMdamKeyPredicate,
lookup,
FALSE,/*replicateExpression*/
NULL, /*joinInputAndPotentialOutput*/
iDesc,
left_ga,
right_ga))
->getValueId());
return this;
} // ValueIdUnion::replaceVEGExpressions()
// -----------------------------------------------------------------------
// VEGPredicate::replaceVEGExpressions()
// The parameter replicateExpression is ignored because the
// VEGPredicate implements a special policy for rewriting
// an ItemExpr. The policies are implemented by replaceVEGPredicate().
// -----------------------------------------------------------------------
ItemExpr * VEGPredicate::replaceVEGExpressions
(const ValueIdSet& availableValues,
const ValueIdSet& inputValues,
NABoolean /* thisIsAnMdamKeyPredicate*/,
VEGRewritePairs* lookup,
NABoolean /*replicateExpression*/,
const ValueIdSet * joinInputAndPotentialOutput,
const IndexDesc * iDesc,
const GroupAttributes * /* left_ga */,
const GroupAttributes * /* right_ga */)
{
// we ignore the thisIsAnMdamKeyPredicate flag, and so we also ignore the
// iDesc for VEGPredicates. No need to guarantee a keyColumn.
return replaceVEGPredicate(availableValues,inputValues,lookup,joinInputAndPotentialOutput);
} // VEGPredicate::replaceVEGExpressions()
// -----------------------------------------------------------------------
// VEGReference::replaceVEGExpressions()
// The parameter replicateExpression is ignored because the
// VEGReference implements a special policy for rewriting
// an ItemExpr. The policies are implemented by replaceVEGReference().
// -----------------------------------------------------------------------
ItemExpr * VEGReference::replaceVEGExpressions
(const ValueIdSet& availableValues,
const ValueIdSet& inputValues,
NABoolean thisIsAnMdamKeyPredicate,
VEGRewritePairs* /*lookup*/,
NABoolean /*replicateExpression*/,
const ValueIdSet * joinInputAndPotentialOutput,
const IndexDesc * iDesc,
const GroupAttributes * /* left_ga */ ,
const GroupAttributes * /* right_ga */ )
{
// we ignore the replicateExpression, lookup and
// joinInputAndPotentialOutput parameters.
return replaceVEGReference(availableValues,inputValues,
thisIsAnMdamKeyPredicate, iDesc);
} // VEGReference::replaceVEGExpressions()
// -----------------------------------------------------------------------
// ItemExpr::replaceOperandsOfInstantiateNull()
// This method is used by the code generator for replacing the
// operands of an ITM_INSTANTIATE_NULL with a value that belongs
// to availableValues.
// -----------------------------------------------------------------------
void ItemExpr::replaceOperandsOfInstantiateNull(
const ValueIdSet & availableValues,
const ValueIdSet & inputValues)
{
switch (getOperatorType())
{
case ITM_INSTANTIATE_NULL:
{
child(0) = child(0)->replaceVEGExpressions(availableValues,inputValues);
break;
}
default:
{
for (Lng32 i = 0; i < getArity(); i++)
{
child(i) = child(i)->replaceVEGExpressions(availableValues,
inputValues);
}
break;
}
}
} // ItemExpr::replaceOperandsOfInstantiateNull()
// -----------------------------------------------------------------------
// VEG::setBridgeValue()
// -----------------------------------------------------------------------
void VEG::setBridgeValue(const ValueId & bridgeValueId)
{
bridgeValues_ += bridgeValueId;
} // VEG::setBridgeValue()
// -----------------------------------------------------------------------
// VEG::markAsReferenced()
// Add a member of the set to the referenced values set to indicate
// that it has been used (at least once) in a "=" predicate that
// was generated by the code generator.
// -----------------------------------------------------------------------
void VEG::markAsReferenced(const ValueId & vid)
{
referencedValues_ += vid;
switch (vid.getItemExpr()->getOperatorType())
{
case ITM_INDEXCOLUMN:
// Also add the ValueId of the column from the base table, which is
// used as the key column for an index.
referencedValues_ += ((IndexColumn *)(vid.getItemExpr()))
->getDefinition();
break;
default:
break;
}
} // VEG::markAsReferenced()
// -----------------------------------------------------------------------
// VEGPredicate::replaceVEGPredicate
//
// This method is used by the code generator for replacing a
// reference to a VEGPredicate with an tree of equality predicates.
// Each equality predicate is between two values that belong to
// the VEG as well as to availableValues.
//
// Terminology :
// ***********
// VEG
// A ValueId Equality Group. It is a set of values such that its members
// have an equality predicate specified on them.
//
// availableValues
// This is the set of values that are available at the relational operator
// with which the VEGPredicate is associated. It is usually the set union
// of the Charactersitic Inputs of the operator with the Characteristic
// Outputs of each of its children.
//
// inputValues
// This is the set of values that is being provided to this node
// from above, and therefore is constant for each invocation of
// the operator when executing.
// This are good values to use to build key predicates.
//
// bridgeValues
// This is a set of values for which "=" predicates MUST be generated
// for correctness as well as to guarantee that transitivity is upheld.
// For example, the following query:
//
// select ax, by, cx, dy
// from (select A.x, B.y from A join B on A.x = B.y) T1(ax,by)
// join (select C.x, D.y from C join D on C.x = D.y) T2(cx,dy)
// on T1.ax = T2.cx
//
// shows two "islands" (self-contained pool of rows) defined by the
// derived tables T1 and T2 respectively. It is possible to deduce
// that A.x = D.y only after the predicate A.x = C.x has been applied.
// The values A.x, C.x establish the transitivity between the two
// islands. Such values are called inter-island links or bridge values.
//
// referencedValues
// A subset of the members of the VEG. Each member in this set is
// referenced in at least one "=" predicate that was generated by
// a call to replaceVEGPredicate.
//
// unboundValues
// The unbound values of a VEG are those that require an "="
// predicate to be generated between them. It is given by
// bridge values union available values intersect members of the VEG.
//
// Note that if the outputs of the join have already been resolved then
// joinInputAndPotentialOutput should really be joinInputAndOutputValues.
// All potential output values are no longer available, only the resolved
// values. Please see similar comment in Hashjoin::PrecodeGen.
// -----------------------------------------------------------------------
ItemExpr * VEGPredicate::replaceVEGPredicate(const ValueIdSet& origAvailableValues,
const ValueIdSet& origInputValues,
VEGRewritePairs* lookup,
const ValueIdSet * joinInputAndPotentialOutput)
{
// If we want processing to be idempotent, check to see if we have
// already written this VEGPredicate. And if so, return the rewritten
// result.
if (lookup) // if lookup table is present
{
// see if this expression is already in there
ValueId rewritten;
if (lookup->getRewritten(rewritten /* out */,getValueId()))
{
if (rewritten == NULL_VALUE_ID)
return NULL;
else
return rewritten.getItemExpr();
}
};
// We assume that inputValues is a (perhaps improper) subset of
// available values. Verify this.
ValueIdSet scratchPad;
scratchPad = origInputValues;
scratchPad -= origAvailableValues;
GenAssert(scratchPad.isEmpty(),"NOT scratchPad.isEmpty()");
// Replace VEGReferences in the members of this VEG.
// Copy values in the set and expand wild cards in the copy.
ValueIdSet vegMembers;
vegMembers.replaceVEGExpressionsAndCopy(getVEG()->getAllValues());
// Constants are not passed as input values but they are available.
// Have availableValues and availableInputs contain the VEG members
// that are constant values.
ValueIdSet availableValues = origAvailableValues;
ValueIdSet inputValues = origInputValues;
ValueIdSet vegConstants;
vegMembers.getConstants(vegConstants);
availableValues += vegConstants;
inputValues += vegConstants;
// If each member of this VEG is referenced in at least one "=" predicate
// that was generated here and there is only one "unbound" value remaining,
// then we are done. Terminate the generation of more "=" predicates.
if ( (vegMembers == getVEG()->getReferencedValues())
AND (getVEG()->getBridgeValues().entries() < 2) )
return NULL;
ItemExpr * rootPtr = NULL;
// We can only bind those values that are available here.
ValueIdSet valuesToBeBound = vegMembers;
valuesToBeBound.intersectSet(availableValues);
ValueIdSet unReferencedValues = vegMembers;
unReferencedValues -= getVEG()->getReferencedValues();
// Compute the set of values that are available, but
// are already referenced and are not a bridge value.
scratchPad = valuesToBeBound;
scratchPad -= unReferencedValues;
scratchPad -= getVEG()->getBridgeValues();
valuesToBeBound -= scratchPad;
// look for an invariant among the input values
ValueIdSet vegInputs = valuesToBeBound;
vegInputs.intersectSet(inputValues);
// If we didn't have any input values that were a member of the
// VEG then pick the invariant from the bridge Values
if (vegInputs.isEmpty())
{
vegInputs = valuesToBeBound;
vegInputs.intersectSet(getVEG()->getBridgeValues());
}
// If no input values are part of the VEG and there are
// no available bridge value then just pick any of the
// remaining (unreferenced) values
if (vegInputs.isEmpty())
{
vegInputs = valuesToBeBound;
}
// look for an invariant value
ValueId iterExprId, invariantExprId;
NABoolean invariantChosen = FALSE;
if (NOT vegInputs.isEmpty())
{
for (invariantExprId = vegInputs.init();
vegInputs.next(invariantExprId);
vegInputs.advance(invariantExprId))
{
//check if the item expr is a non-strict constant
//a strict constant is somethine like cos(1)
//where as cos(?p) can be considered a constant
//in the non-strict definition since it remains
//constant for a given execution of a query - Solution 10-020912-1647
if (invariantExprId.getItemExpr()->doesExprEvaluateToConstant(FALSE))
{
invariantChosen = TRUE;
break;
}
} // endfor
// if invariantExprId does not contain the ValueId of a constant value,
// then it must be initialized to contain any one value from
// the input values.
if (NOT invariantChosen)
{
if (vegInputs.entries() <= 1)
vegInputs.getFirst(invariantExprId);
else {
// The EXISTS query reported in case 10-091027-8459, soln
// 10-091028-5770 exposed a flaw in this code that used to
// implicitly assume that the first element of vegInputs is
// always a valid choice for an invariantExprId. When replacing
// a semijoin's VEGPredicate, the invariantExprId must be a
// member of that semijoin's characteristic output. Otherwise,
// *Join::preCodeGen hjp.replaceVEGExpressions() will silently
// delete that equijoin predicate and incorrectly generate a
// cartesian product.
scratchPad = vegInputs;
if (joinInputAndPotentialOutput) {
// for an outer join, joinInputAndPotentialOutput will have
// instantiate_null wrappers. intersectSetDeep digs into
// those wrappers.
scratchPad.intersectSetDeep(*joinInputAndPotentialOutput);
}
#ifdef _DEBUG
// we want to GenAssert here but regress/core/test027 raises
// a false alarm. So, for now, we don't.
// GenAssert(!scratchPad.isEmpty(),"vegInputs.isEmpty()");
#endif
if (scratchPad.isEmpty())
vegInputs.getFirst(invariantExprId);
else
scratchPad.getFirst(invariantExprId);
}
}
// remove it from further consideration
valuesToBeBound -= invariantExprId;
} // endif (NOT vegInputs.isEmpty())
else // have no values
{
// The predicate pushdown logic places predicates on those
// operators where it knows that values will be available
// for evaluating the predicate.
// If you have reached this point because of a bug,
// ****************************************************************
// DO NOT EVEN CONSIDER COMMENTING OUT THE FOLLOWING ASSERT.
// ****************************************************************
GenAssert(NOT valuesToBeBound.isEmpty(),"valuesToBeBound.isEmpty()");
// ****************************************************************
// YOU WILL BE DELIBERATELY MASKING OUT A SERIOUS BUG IF YOU
// DISABLE THE ASSERT STATEMENT ABOVE. DON'T TOUCH IT!
// ****************************************************************
}
if (valuesToBeBound.entries() >= 1)
{
// Replace this reference to the VEG with a tree of '=' predicates.
for (iterExprId = valuesToBeBound.init();
valuesToBeBound.next(iterExprId);
valuesToBeBound.advance(iterExprId))
{
rootPtr = buildComparisonPred
( rootPtr, iterExprId.getItemExpr(),
invariantExprId.getItemExpr(), ITM_EQUAL,
getSpecialNulls() //++MV - Irena
);
getVEG()->markAsReferenced(iterExprId);
}
}
else
{
// We have only the invariant. Generate an IS NOT NULL if it
// is nullable and has not been compared with someone else.
// MVs:
// If specialNulls option is set, nulls are values (null=null)
// and ITM_IS_NOT_NULL filters out some valid rows also.
// For more info on specialNulls -- see <ItemOther.h>
if (NOT getVEG()->getReferencedValues().contains(invariantExprId) &&
invariantExprId.getType().supportsSQLnull() &&
NOT getVEG()->getVEGPredicate()->getSpecialNulls() // ++MV - Irena
)
{
rootPtr = new(CmpCommon::statementHeap())
UnLogic(ITM_IS_NOT_NULL, invariantExprId.getItemExpr());
}
}
// mark as referenced the invariant. Make it the Bridge value
getVEG()->markAsReferenced(invariantExprId);
getVEG()->removeBridgeValues(valuesToBeBound);
getVEG()->setBridgeValue(invariantExprId);
// Assign a ValueId to the "=" and synthesize the type for the expression.
if (rootPtr != NULL) {
rootPtr->synthTypeAndValueId();
// If there is a lookup table, enter the rewritten tree in the table
if (lookup)
{
if (rootPtr)
lookup->insert(getValueId(),rootPtr->getValueId());
else
lookup->insert(getValueId(),NULL_VALUE_ID);
}
}
// Return the tree of '=' predicates (or NULL)
return rootPtr;
} // VEGPredicate::replaceVEGPredicate()
// -----------------------------------------------------------------------
// VEGReference::replaceVEGReference
// This method is used by the code generator. for replacing a
// VEGReference with one of its candidate values
// thisIsAnMdamKeyPredicate is FALSE by default. However, when
// key predicates are being rewritten, it should be set to TRUE
// when we need to guarantee that a key column must be generated by
// the veg reference.
// In this case,
// then bridge values MUST NOT be usen because we need to pick either
// a constant or a key column (depending on the child we are
// working on (see ItemExpr::replaceVEGExpressions(...))
// -----------------------------------------------------------------------
ItemExpr *
VEGReference::replaceVEGReference(const ValueIdSet &origAvailableValues,
const ValueIdSet &origInputValues,
NABoolean thisIsAnMdamKeyPredicate,
const IndexDesc *iDesc)
{
ItemExpr *result = NULL;
#ifndef _DEBUG
const NABoolean VEG_DEBUG = FALSE;
#else
NABoolean VEG_DEBUG = getenv("VEG_DEBUG") != NULL;
#endif
// We assume that inputValues is a (perhaps improper) subset of
// available values. Verify it.
ValueIdSet scratchPad;
scratchPad = origInputValues;
scratchPad -= origAvailableValues;
GenAssert(scratchPad.isEmpty(),"NOT scratchPad.isEmpty()");
// Copy values in the set and expand wild cards in the copy.
ValueIdSet valuesToBeBound;
valuesToBeBound.replaceVEGExpressionsAndCopy(getVEG()->getAllValues());
// Constants are not passed as input values but they are available
// Have availableValues and availableInputs contain the VEG members
// that are constant values
ValueIdSet availableValues = origAvailableValues;
ValueIdSet inputValues = origInputValues;
// --------------------------------------------------------------------
// Don't add constants if the caller don't want them to be generated
// from this vegref (i.e. when thisIsAnMdamKeyPredicate is TRUE)
// --------------------------------------------------------------------
ValueIdSet vegConstants;
valuesToBeBound.getConstants(vegConstants);
if (NOT thisIsAnMdamKeyPredicate)
{
availableValues += vegConstants;
inputValues += vegConstants;
}
if (VEG_DEBUG)
{
NAString av,iv,vb;
availableValues.unparse(av);
inputValues.unparse(iv);
valuesToBeBound.unparse(vb);
cout << endl;
cout << "VEGReference " << getValueId() << " :" << endl;
cout << "AV: " << av << endl;
cout << "IV: " << iv << endl;
cout << "VB: " << vb << endl;
}
// -----------------------------------------------------------------------
//
// The commented out code implements a different resolution strategies
// for VEGReference. Inputs are no longer favored. This is in order to
// handle peculiar scenario where a predicate is not pushed down to the
// right hand side of a NJ even if it's covered because of the special
// semantics of the NJ itself (left join). The inputs from the operators
// in the right leg of the NJ shouldn't be used to resolve the output
// values since the VEGPred which relates the two hasn't been evaluated.
//
// This support is not ready yet for FCS, and therefore the code has been
// commented out.
// -----------------------------------------------------------------------
#if 0
// non-input available values:
ValueIdSet nonInputAvailableValues = availableValues;
nonInputAvailableValues -= inputValues;
#endif
// We can only bind those values that are available here.
valuesToBeBound.intersectSet(availableValues);
#if 0
// try using nonInputAvailableValues first.
ValueIdSet nonInputValuesToBeBound = valuesToBeBound;
nonInputValuesToBeBound.intersectSet(nonInputAvailableValues);
// try not to use input values since some predicate might not have
// be evaluated yet.
if ( (NOT thisIsAnMdamKeyPredicate) AND
(NOT nonInputValuesToBeBound.isEmpty()) )
{
// Try to pick a bridge value.
ValueIdSet candidateValues = nonInputValuesToBeBound;
candidateValues.intersectSet(getVEG()->getBridgeValues());
// If unsuccessful, try to pick any of the remaining unreferenced.
if (candidateValues.isEmpty())
{
candidateValues = nonInputValuesToBeBound;
}
CMPASSERT(NOT candidateValues.isEmpty());
ValueId resultVid;
candidateValues.getFirst(resultVid);
return resultVid.getItemExpr();
}
#endif
if (thisIsAnMdamKeyPredicate )
{
GenAssert(iDesc != NULL,"VEGReference::replaceVEGReference: Mdam KeyPredicates flag requires an iDesc to go with");
if (iDesc != NULL)
{
ValueIdSet keyCols = iDesc->getIndexKey();
for (ValueId exprId = keyCols.init();
keyCols.next(exprId);
keyCols.advance(exprId))
{
// pick the first value - assuming it is the key column..
if (valuesToBeBound.contains(exprId))
{
result = exprId.getItemExpr();
break;
}
}
}
if (result && NOT (result->getValueId().getType() == getValueId().getType()) )
result->setpreCodeGenNATypeChangeStatus();
return result; // A null is fine here.
}
// look for an invariant among the input values
ValueIdSet vegInputs = valuesToBeBound;
vegInputs.intersectSet(inputValues);
// If we didn't have any input values that were a member of the
// VEG then pick the invariant from the bridge Values
// Do not use bridge values for key predicates:
if ((NOT thisIsAnMdamKeyPredicate) && vegInputs.isEmpty())
{
vegInputs = valuesToBeBound;
vegInputs.intersectSet(getVEG()->getBridgeValues());
if (VEG_DEBUG)
{
NAString vb,br;
valuesToBeBound.unparse(vb);
// Stupid, ValueIdSet::unparse should be declared const;
// for now, just cast away constness...
ValueIdSet(getVEG()->getBridgeValues()).unparse(br);
cout << "VB: " << vb << endl;
cout << "BR: " << br << endl;
}
}
// If no input values are part of the VEG and there are
// no available bridge value then just pick any of the
// remaining (unreferenced) values
if (vegInputs.isEmpty())
{
vegInputs = valuesToBeBound;
}
// look for a constant value
ValueId invariantExprId;
NABoolean invariantChosen = FALSE;
if (NOT vegInputs.isEmpty())
{
for (invariantExprId = vegInputs.init();
vegInputs.next(invariantExprId);
vegInputs.advance(invariantExprId))
{
//check if the item expr is a non-strict constant
//a strict constant is somethine like cos(1)
//where as cos(?p) can be considered a constant
//in the non-strict definition since it remains
//constant for a given execution of a query - Solution 10-020912-1647
if (invariantExprId.getItemExpr()->doesExprEvaluateToConstant(FALSE))
{
invariantChosen = TRUE;
break;
}
} // endfor
// if invariantExprId does not contain the ValueId of a constant value,
// then it must be initialized to contain any one value from
// the input values.
if (NOT invariantChosen)
{
vegInputs.getFirst(invariantExprId);
}
// we found the invariant assign it!
result = invariantExprId.getItemExpr();
CMPASSERT(result != NULL);
} // endif (NOT vegInputs.isEmpty())
else // have no values
{
// It is ok for an MDAM key pred to not have valuesToBeBound because
// this is how ItemExpr::replaceVEGExpressions guarantees the generation of
// key predicates. It expects a NULL pointer sometimes
if (NOT thisIsAnMdamKeyPredicate)
{
// If there is a VEGReference to the value then a member of
// the VEG should be available.
GenAssert(NOT valuesToBeBound.isEmpty(),"valuesToBeBound.isEmpty()");
}
}
// result can be NULL only if thisIsAnMdamKeyPredicate is TRUE (see note above)
if (NOT thisIsAnMdamKeyPredicate)
{
CMPASSERT(result);
}
if (VEG_DEBUG)
{
// coverity cid 10004 thinks result may be null but we know it is not.
// coverity[var_deref_model]
cout << "Result: " << result->getValueId() << endl;
}
// see if NAType has changed, if so need to rebind it
if (result && NOT (result->getValueId().getType() == getValueId().getType()) )
{
result->setpreCodeGenNATypeChangeStatus();
}
return result;
} // VEGReference::replaceVEGReference()
// -----------------------------------------------------------------------
// RelExpr::getOutputValuesOfMyChilren()
// Accumulates the characteristic outputs of all my children for
// operators that have one or more children. Returns the
// potential output values for operators that can have no children.
// -----------------------------------------------------------------------
void RelExpr::getOutputValuesOfMyChildren(ValueIdSet & vs) const
{
ValueIdSet valueMask;
Lng32 nc = getArity();
if (nc > 0)
{
for (Lng32 i = 0; i < nc; i++)
{
valueMask += child(i)->getGroupAttr()->getCharacteristicOutputs();
}
}
else // if leaf operators, use all available values
{
getPotentialOutputValues(valueMask);
}
// Copy values in the set and expand wild cards in the copy.
vs.clear();
vs.replaceVEGExpressionsAndCopy(valueMask);
} // RelExpr::getOutputValuesOfMyChildren()
// -----------------------------------------------------------------------
// RelExpr::getInputValuesFromParentAndChildren()
// Uses getOutputValuesOfMyChildren() to collect the output values
// and adds the characteristic input values of this operator to them.
// -----------------------------------------------------------------------
void RelExpr::getInputValuesFromParentAndChildren(ValueIdSet & vs) const
{
getOutputValuesOfMyChildren(vs);
vs += getGroupAttr()->getCharacteristicInputs();
} // RelExpr::getInputValuesFromParentAndChildren()
// -----------------------------------------------------------------------
// RelExpr::getInputAndPotentialOutputValues()
// Uses getPotentialOutputs() to collect the output values
// and adds the characteristic input values of this operator to them.
// -----------------------------------------------------------------------
void RelExpr::getInputAndPotentialOutputValues(ValueIdSet & vs) const
{
ValueIdSet potentialOutputValues;
getPotentialOutputValues(potentialOutputValues);
potentialOutputValues += getGroupAttr()->getCharacteristicInputs();
vs.clear();
vs.replaceVEGExpressionsAndCopy(potentialOutputValues);
} // RelExpr::getInputAndPotentialOutputValues()
// -----------------------------------------------------------------------
// GenericUpdate::replaceVEGExpressionsAndGet...
// -----------------------------------------------------------------------
void GenericUpdate::getInputValuesFromParentAndChildren(ValueIdSet & vs) const
{
ValueIdSet updTableCols;
ValueIdSet vs2;
updTableCols.insertList (getIndexDesc()->getIndexColumns());
// updTableCols.insertList(getTableDesc()->getColumnVEGList());
vs2.replaceVEGExpressionsAndCopy(updTableCols);
getOutputValuesOfMyChildren(vs);
vs += getGroupAttr()->getCharacteristicInputs();
vs += vs2;
} // GenericUpdate::getInputValuesFromParentAndChildren()
// -----------------------------------------------------------------------
// HbaseDelete::replaceVEGExpressionsAndGet...
// -----------------------------------------------------------------------
void HbaseDelete::getInputValuesFromParentAndChildren(ValueIdSet & vs) const
{
// Do not include IndexColumn as the input values. Otherwise, we will
// have duplicated predicates in Executor predicate in HbaseDelete.
getOutputValuesOfMyChildren(vs);
vs += getGroupAttr()->getCharacteristicInputs();
} // HbaseDelete::getInputValuesFromParentAndChildren()
// -----------------------------------------------------------------------
// RelExpr::preCodeGen()
//
// RelExpr * result
// OUT: a node that calls preCodeGen for its child should replace
// that child with the result value. This allows preCodeGen
// to transform the RelExpr tree. Examples for such trans-
// formations are additional exchange nodes for repartitioning.
// Generator * generator
// INOUT: a global work area with useful helper methods
// const ValueIdSet & externalInputs
// IN: a value id set with values that already have been
// replaced such that they don't contain VEGies any more.
// Use this set to replace VEGies for expressions that depend
// on the characteristic inputs of the node.
// ValueIdSet & pulledNewInputs
// OUT: a set of value ids that the node wants to add to its
// characteristic inputs ("pull" from its parent). There are
// several cases in which we need to add value ids to
// characteristic inputs during preCodeGen:
// a) partition input variables for parallel execution,
// b) the COMMON datetime function which needs to be generated
// by the root node,
// c) an "open cursor timestamp" that helps a materialize node
// to decide whether it can reuse its materialized table.
// -----------------------------------------------------------------------
RelExpr * RelExpr::preCodeGen(Generator * generator,
const ValueIdSet & externalInputs,
ValueIdSet &pulledNewInputs)
{
if (nodeIsPreCodeGenned())
return this;
// Check if the pivs of this operator and it's child are the same.
// If they are not, make them the same.
replacePivs();
// Resolve the VEGReferences and VEGPredicates, if any, that appear
// in the Characteristic Inputs, in terms of the externalInputs.
getGroupAttr()->resolveCharacteristicInputs(externalInputs);
// My Characteristic Inputs become the external inputs for my children.
Int32 nc = getArity();
for (Int32 index = 0; index < nc; index++)
{
ValueIdSet childPulledInputs;
child(index) = child(index)->preCodeGen(generator,
externalInputs,
childPulledInputs);
if (! child(index).getPtr())
return NULL;
// process additional input value ids the child wants
getGroupAttr()->addCharacteristicInputs(childPulledInputs);
pulledNewInputs += childPulledInputs;
}
// The VEG expressions in the selection predicates and the characteristic
// outputs can reference any expression that is either a potential output
// or a characteristic input for this RelExpr. Supply these values for
// rewriting the VEG expressions.
ValueIdSet availableValues;
getInputAndPotentialOutputValues(availableValues);
// Rewrite the selection predicates.
NABoolean replicatePredicates = TRUE;
selectionPred().replaceVEGExpressions
(availableValues,
getGroupAttr()->getCharacteristicInputs(),
FALSE, // no need to generate key predicates here
0 /* no need for idempotence here */,
replicatePredicates
);
getGroupAttr()->resolveCharacteristicOutputs
(availableValues,
getGroupAttr()->getCharacteristicInputs());
markAsPreCodeGenned();
return this;
} // RelExpr::preCodeGen
//
// Recuvsively call the method on each RelExpr node, accumulating
// # of rows from each node.
//
void RelExpr::prepareDopReduction(Generator* generator)
{
pcgEspFragment* currentEspFragmentPCG = generator->getCurrentEspFragmentPCG();
if ( currentEspFragmentPCG )
currentEspFragmentPCG->accumulateRows(getEstRowsUsed());
Int32 nc = getArity();
for (Lng32 i = 0; i < nc; i++)
{
child(i)->prepareDopReduction(generator);
}
}
void Exchange::prepareDopReduction(Generator* generator)
{
pcgEspFragment* parentEspFragPCG = generator->getCurrentEspFragmentPCG();
//
// Save the current pcg fragment and add myself as the child to it.
//
if ( parentEspFragPCG ) {
parentEspFragPCG->accumulateRows(getEstRowsUsed());
parentEspFragPCG->addChild(this);
}
//
// Let the global pointer point at my pcg esp fragment (for the
// fragment rooted at me). Do this only for above-DP2 Exchanges.
// Note a PA is represented by an Exchange with "execute in Master or ESP"
// as location. So a PA exchange with a SCAN as a child will have an empty
// childPcgEsp array.
//
generator->setCurrentEspFragmentPCG
(
(getPhysicalProperty()->getPlanExecutionLocation() != EXECUTE_IN_DP2)
? getEspFragPCG() : NULL
);
child(0)->prepareDopReduction(generator);
//
// Restore the pcg esp fragment
//
generator->setCurrentEspFragmentPCG(parentEspFragPCG);
// Try to reduce the dop and if it fails, invalidate any proposed
// dop reductions for this.
//
if ( parentEspFragPCG &&
parentEspFragPCG ->tryToReduceDoP() == FALSE )
parentEspFragPCG->invalidate();
}
RelExpr * RelRoot::preCodeGen(Generator * generator,
const ValueIdSet & /* externalInputs */,
ValueIdSet &pulledNewInputs)
{
if (nodeIsPreCodeGenned())
return this;
// For all the inputVars, if it is with UNKNOWN data type, make it a
// varchar type. This is from SQL/MP extension. Example query
// select ?p1 from any-table;
if (isTrueRoot()) {
CollIndex i;
ValueId vid;
ValueIdList vidList = inputVars();
for ( i=0; i < vidList.entries(); i++ )
if ((vid=vidList[i]).getType().getTypeQualifier() == NA_UNKNOWN_TYPE) {
vid.coerceType(NA_CHARACTER_TYPE);
}
}
// if root has GET_N indication set, insert a FirstN node.
// Usually this transformation is done in the binder, but in
// some special cases it is not.
// For example, if there is an 'order by' in the query, then
// the Sort node is added by the optimizer. In this case, we
// want to add the FirstN node on top of the Sort node and not
// below it. If we add the FirstN node in the binder, the optimizer
// will add the Sort node on top of the FirstN node. Maybe we
// can teach optimizer to do this.
if ((getFirstNRows() != -1) || (getFirstNRowsParam()))
{
RelExpr * firstn = new(generator->wHeap()) FirstN(child(0),
getFirstNRows(),
getFirstNRowsParam());
// move my child's attributes to the firstN node.
// Estimated rows will be mine.
firstn->setEstRowsUsed(getEstRowsUsed());
firstn->setMaxCardEst(getMaxCardEst());
firstn->setInputCardinality(child(0)->getInputCardinality());
firstn->setPhysicalProperty(child(0)->getPhysicalProperty());
firstn->setGroupAttr(child(0)->getGroupAttr());
//10-060516-6532 -Begin
//When FIRSTN node is created after optimization phase, the cost
//of that node does not matter.But, display_explain and explain
//show zero operator costs and rollup cost which confuses the user.
//Also, the VQP crashes when cost tab for FIRSTN node is selected.
//So, creating a cost object will fix this.
//The operator cost is zero and rollup cost is same as it childs.
Cost* firstnNodecost = new HEAP Cost();
firstn->setOperatorCost(firstnNodecost);
Cost* rollupcost = (Cost *)(child(0)->getRollUpCost());
*rollupcost += *firstnNodecost;
firstn->setRollUpCost(rollupcost);
//10-060516-6532 -End
setChild(0, firstn);
// reset firstN indication in the root node.
setFirstNRows(-1);
setFirstNRowsParam(NULL);
}
if (isTrueRoot())
{
// Set the internal format to use for the plan being generated ...
// Checks the CQD COMPRESSED_INTERNAL_FORMAT to decide whether to use
// SQLARK_EXPLODED_FORMAT or SQLMX_ALIGNED_FORMAT as the internal
// data format
// When the CIF CQD is set to SYSTEM we decide whether to use aligned or exploded format
// as the tuple format for the whole query. In precodeGEn we visit all the copy
// operators (Hash join, hash group by, exchange and sort) in a query
// tree and keep a count of the nodes that are in favor of aligned format and those
// that are in favor of exploded format.
// The final decision about the tuple format for the whole query will depend on those two
// numbers. if the number of nodes in favor of aligned format is greater than those
// in favor of exploded than aligned format is select otherwise exploded is selected
// The function that determine the format for each of the copy operators + relroot
// is determineInternalFormat(..) is is called in the precodeGen of the copy operators
generator->initNNodes();
isCIFOn_ = FALSE;
if ((CmpCommon::getDefault(COMPRESSED_INTERNAL_FORMAT) == DF_ON )||
generator->isFastExtract() ||
generator->containsFastExtract())
{
isCIFOn_ = TRUE;
generator->setCompressedInternalFormat();
}
else
if ( CmpCommon::getDefault(COMPRESSED_INTERNAL_FORMAT) == DF_OFF )
{
generator->setExplodedInternalFormat();
}
else
{
NABoolean resize = FALSE;
NABoolean considerBufferDefrag = FALSE;
ValueIdSet vidSet = child(0)->getGroupAttr()->getCharacteristicOutputs();
ExpTupleDesc::TupleDataFormat tupleFormat =
generator->determineInternalFormat(
vidSet,
this,
resize,
RelExpr::CIF_SYSTEM,
FALSE,
considerBufferDefrag);
if (tupleFormat == ExpTupleDesc::SQLMX_ALIGNED_FORMAT)
{
generator->incNCIFNodes();
}
else
{
generator->decNCIFNodes();
}
}
//generator->setInternalFormat();
// Some operators will revert the internal format back to exploded format
// when they are directly under the root node - such as the top level ESPs,
// Sort, and HJ operators.
// This is so there is no bottleneck in the master flipping the data back
// to exploded format (required for bulk move out).
child(0)->setParentIsRoot( TRUE );
// create a list of NATypes corresponding to each entry in the
// userColumnList_ in RETDesc. Used by generator to convert to
// this type during output expr code gen.
// The value ids in userColumnList_ cannot be used as the type
// corresponding to that value id may change due to VEG transformation
// in the preCodeGen phase.
if (getRETDesc()->createNATypeForUserColumnList(CmpCommon::statementHeap()))
{
// error case.
GenAssert(FALSE, "error from createNATypeForUserColumnList.");
}
if ( (child(0)->getOperatorType() == REL_EXCHANGE) &&
(child(0)->child(0)->getOperatorType() == REL_COMPOUND_STMT) )
{
((Exchange *)((RelExpr *)child(0)))->setDP2TransactionIndicator( TRUE );
}
}
unsigned short prevNumBMOs = 0;
CostScalar prevBMOsMemoryUsage;
if (isTrueRoot())
{
if (oltOptInfo().oltAnyOpt())
{
if (treeContainsEspExchange())
{
// turn off oltQueryOptimization if the query plan contains an
// esp_exchange.
// 10-070316-3325: childOperType_ = REL_UNARY_DELETE
// 10-080118-9942: select query contains esp_exchange that is
// not directly under root.
oltOptInfo().setOltOpt(FALSE);
}
else if (childOperType() == REL_SCAN)
{
// if this was a scan query to start with but is no longer
// a scan query(which means it got transformed to join, etc),
// then turn off oltQueryOptimization.
RelExpr *childExpr = child(0)->castToRelExpr();
if (childExpr->getOperatorType() == REL_FIRST_N)
childExpr = childExpr->child(0)->castToRelExpr();
if ((childExpr->getOperatorType() != REL_EXCHANGE) &&
(childExpr->getOperatorType() != REL_HBASE_ACCESS))
oltOptInfo().setOltCliOpt(FALSE);
}
} // oltAnyOpt
*generator->oltOptInfo() = oltOptInfo();
if (generator->oltOptInfo()->oltAnyOpt())
{
// Also, PubSub streams' STREAM_TIMEOUT not handled by opt'd root
if (getGroupAttr()->isStream())
{
generator->oltOptInfo()->setOltCliOpt(FALSE);
}
if (CmpCommon::getDefault(EID_SPACE_USAGE_OPT) == DF_ON)
{
generator->setDoEidSpaceUsageOpt(TRUE);
}
else
{
generator->setDoEidSpaceUsageOpt(FALSE);
}
// olt opt not chosen if ALL stats are being collected.
// We may support this case later.
// In case of operator stats, don't disable OLT optimization
// But, when the query is OLT optimized, switch it to pertable stats
if ((generator->computeStats()) &&
((generator->collectStatsType() == ComTdb::ALL_STATS)))
generator->oltOptInfo()->setOltOpt(FALSE);
if (CmpCommon::getDefault(OLT_QUERY_OPT) == DF_OFF)
generator->oltOptInfo()->setOltOpt(FALSE);
// In the case of an embedded insert,
// do not execute the query OLT optimized.
if (getGroupAttr()->isEmbeddedInsert())
generator->oltOptInfo()->setOltMsgOpt(FALSE);
#ifdef _DEBUG
if (getenv("NO_OLT_QUERY_OPT"))
generator->oltOptInfo()->setOltOpt(FALSE);
#endif
if (generator->oltOptInfo()->oltEidOpt())
{
generator->oltOptInfo()->setOltEidLeanOpt(FALSE);
if (generator->doEidSpaceUsageOpt())
{
generator->oltOptInfo()->setOltEidLeanOpt(TRUE);
}
}
if (CmpCommon::getDefault(OLT_QUERY_OPT_LEAN) == DF_OFF)
generator->oltOptInfo()->setOltEidLeanOpt(FALSE);
} // oltAnyOpt
// mark exchange operator for maxOneRow optimization.
RelExpr *childExpr = child(0)->castToRelExpr();
NABoolean doMaxOneRowOpt = TRUE;
NABoolean doMaxOneInputRowOpt = FALSE;
NABoolean firstN = FALSE;
RelExpr *exchExpr = NULL;
if (NOT generator->doEidSpaceUsageOpt())
{
doMaxOneRowOpt = FALSE;
doMaxOneInputRowOpt = FALSE;
}
else
{
doMaxOneRowOpt = TRUE;
doMaxOneInputRowOpt = TRUE;
}
if (childExpr->getOperatorType() == REL_FIRST_N)
{
firstN = TRUE;
if (((FirstN *)childExpr)->getFirstNRows() != 1)
doMaxOneRowOpt = FALSE;
childExpr = childExpr->child(0)->castToRelExpr();
}
if ((childExpr->getOperatorType() != REL_EXCHANGE) ||
(childExpr->child(0)->castToRelExpr()->
getPhysicalProperty()->getPlanExecutionLocation() != EXECUTE_IN_DP2))
{
doMaxOneRowOpt = FALSE;
doMaxOneInputRowOpt = FALSE;
}
else
{
exchExpr = childExpr;
childExpr = childExpr->child(0)->castToRelExpr();
if (NOT childExpr->getOperator().match(REL_FORCE_ANY_SCAN))
{
doMaxOneInputRowOpt = FALSE;
}
else if (childExpr->getOperatorType() == REL_FILE_SCAN)
{
FileScan * s = (FileScan *)childExpr;
if (NOT firstN)
doMaxOneRowOpt = FALSE;
if ((s->getGroupAttr()->isStream()) ||
(s->accessOptions().accessType() == SKIP_CONFLICT_))
{
//doMaxOneInputRowOpt = FALSE;
//doMaxOneRowOpt = FALSE;
}
}
}
if (doMaxOneRowOpt)
{
exchExpr->oltOptInfo().setMaxOneRowReturned(TRUE);
}
if (doMaxOneInputRowOpt)
{
exchExpr->oltOptInfo().setMaxOneInputRow(TRUE);
}
generator->setUpdErrorInternalOnError(FALSE);
if (rollbackOnError())
generator->setUpdErrorOnError(FALSE);
else
generator->setUpdErrorOnError(TRUE);
if (CmpCommon::getDefault(UPD_ABORT_ON_ERROR) == DF_ON)
generator->setUpdAbortOnError(TRUE);
else
generator->setUpdAbortOnError(FALSE);
if (CmpCommon::getDefault(UPD_PARTIAL_ON_ERROR) == DF_ON)
generator->setUpdPartialOnError(TRUE);
else
generator->setUpdPartialOnError(FALSE);
if (CmpCommon::getDefault(UPD_SAVEPOINT_ON_ERROR) == DF_ON)
generator->setUpdSavepointOnError(TRUE);
else
generator->setUpdSavepointOnError(FALSE);
generator->setSkipUnavailablePartition(FALSE);
if ((childOperType() == REL_SCAN) &&
(CmpCommon::getDefault(SKIP_UNAVAILABLE_PARTITION) == DF_ON))
generator->setSkipUnavailablePartition(TRUE);
if (avoidHalloween_)
{
// At beginning of preCodeGen, assume DP2Locks will be
// used. The NestedJoin::preCodeGen will change this
// if its left child is a sort.
generator->setHalloweenProtection(Generator::DP2LOCKS);
}
if (generator->getBindWA()->getUdrStoiList().entries () > 0)
generator->setAqrEnabled(FALSE);
// Reset the accumulated # of BMOs and memory usages in
// the generator
prevNumBMOs = generator->replaceNumBMOs(0);
prevBMOsMemoryUsage = generator->replaceBMOsMemoryUsage(0);
} // true root
// propagate the need to return top sorted N rows to all sort
// nodes in the query.
if (needFirstSortedRows() == TRUE)
{
needSortedNRows(TRUE);
}
// Delete any VEGReference that appear in the Characteristic Inputs.
// The Characteristic Inputs of the root of the execution plan MUST
// only contain external dataflow inputs that are provided by the
// user. The VEGReferences may have been introduced as a side-effect
// of predicate pushdown. They are redundant in the Characteristic
// Inputs of the root.
ValueIdSet availableValues;
for (ValueId exprId = getGroupAttr()->getCharacteristicInputs().init();
getGroupAttr()->getCharacteristicInputs().next(exprId);
getGroupAttr()->getCharacteristicInputs().advance(exprId) )
{
if (exprId.getItemExpr()->getOperatorType() != ITM_VEG_REFERENCE)
availableValues += exprId;
}
getGroupAttr()->setCharacteristicInputs(availableValues);
// If this is the root for a parallel extract producer query then
// there should be an Exchange node immediately below and we need to
// set a flag in that Exchange.
if (numExtractStreams_ > 0)
{
if (child(0)->getOperatorType() == REL_EXCHANGE)
{
Exchange *e = (Exchange *) child(0)->castToRelExpr();
e->setExtractProducerFlag();
}
// fix for soln 10-090506-1407: parallel extract for a union distinct
// can sometimes have root->mapvalueidsl->exchange. It should be OK.
else if (child(0)->getOperatorType() == REL_MAP_VALUEIDS &&
child(0)->child(0)->getOperatorType() == REL_EXCHANGE)
{
Exchange *e = (Exchange *) child(0)->child(0)->castToRelExpr();
e->setExtractProducerFlag();
}
}
//
// If there is no hard requirement on #ESPs, reduce the dop based on
// the total # of rows processed per ESP. The reduction can modify
// the number of partitions attribute of the partition function stored
// in the synthesized physical property of an Exchange operator.
//
// CQD DOP_REDUCTION_ROWCOUNT_THRESHOLD set to 0.0 will disable the
// feature.
float threshold;
ActiveSchemaDB()->
getDefaults().getFloat(DOP_REDUCTION_ROWCOUNT_THRESHOLD, threshold);
if ( threshold > 0.0 && CURRSTMT_OPTDEFAULTS->getRequiredESPs() <= 0 ) {
generator->setCurrentEspFragmentPCG(NULL); // reset the 'global'
// to the current esp frag.
RelExpr::prepareDopReduction(generator);
RelExpr::doDopReduction();
}
// Now walk through the execution plan and initialize it for code generation.
child(0) = child(0)->preCodeGen(generator,
getGroupAttr()->getCharacteristicInputs(),
pulledNewInputs);
if (! child(0).getPtr())
return NULL;
if (! RelExpr::preCodeGen(
generator,
getGroupAttr()->getCharacteristicInputs(),
pulledNewInputs))
return NULL;
if ( isTrueRoot() && CmpCommon::getDefault(COMPRESSED_INTERNAL_FORMAT) == DF_SYSTEM)
{
if (generator->getNCIFNodes()>0)
{
isCIFOn_ = TRUE;
generator->setCompressedInternalFormat();
}
else
{
generator->setExplodedInternalFormat();
isCIFOn_ = FALSE;
}
}
// If the RelRoot is marked as a parallel extract producer then the
// root's child must be an Exchange and the child must also be
// marked for parallel extract. Even though we checked the type of
// the child a few lines above, we do it again here because the call
// to RelExpr::preCodeGen can potentially eliminate Exchange nodes.
NABoolean extractPlanLooksOK = TRUE;
if (numExtractStreams_ > 0)
{
if (child(0)->getOperatorType() == REL_EXCHANGE)
{
Exchange *e = (Exchange *) child(0)->castToRelExpr();
if (!e->getExtractProducerFlag())
extractPlanLooksOK = FALSE;
}
// fix for soln 10-090506-1407: parallel extract for a union distinct
// can sometimes have root->mapvalueidsl->exchange. It should be OK.
else if (child(0)->getOperatorType() == REL_MAP_VALUEIDS &&
child(0)->child(0)->getOperatorType() == REL_EXCHANGE)
{
Exchange *e = (Exchange *) child(0)->child(0)->castToRelExpr();
if (!e->getExtractProducerFlag())
extractPlanLooksOK = FALSE;
}
else
{
extractPlanLooksOK = FALSE;
}
if (!extractPlanLooksOK)
{
*CmpCommon::diags() << DgSqlCode(-7004);
GenExit();
return NULL;
}
}
// Accumulate the values that are provided as inputs by my parent
// together with the values that are produced as outputs by my
// children. Use these values for rewriting the VEG expressions.
getInputValuesFromParentAndChildren(availableValues);
// Rebuild the computable expressions using a bridge value, if possible
compExpr().replaceVEGExpressions
(availableValues, getGroupAttr()->getCharacteristicInputs());
// Rebuild the required order
reqdOrder().replaceVEGExpressions
(availableValues, getGroupAttr()->getCharacteristicInputs());
// Rebuild the pkey list
pkeyList().replaceVEGExpressions
(availableValues, getGroupAttr()->getCharacteristicInputs());
// add internally generated inputs to the input vars and make sure that
// the root isn't left with "pulled" input values that aren't "internal"
// inputs (the assert will most likely fire for leftover partition input
// variables)
inputVars().insertSet(generator->getInternalInputs());
pulledNewInputs -= (ValueIdSet) inputVars();
GenAssert(pulledNewInputs.isEmpty(),"root can't produce these values");
// propagate the need to return top sorted N rows to all sort
// nodes in the query.
if (needFirstSortedRows() == TRUE)
{
needSortedNRows(TRUE);
}
// Do not rollback on error for INTERNAL REFRESH commands.
if (isRootOfInternalRefresh())
{
generator->setUpdErrorInternalOnError(TRUE);
generator->setUpdAbortOnError(FALSE);
generator->setUpdPartialOnError(FALSE);
generator->setUpdSavepointOnError(FALSE);
}
// do not abort transaction for internal compiles, even if abort
// is needed for this statement.
// Catman depends on no abort for individual IUD stmts.
// It aborts the transaction when it gets an error from cli.
if ( ( CmpCommon::context()->internalCompile() == CmpContext::INTERNAL_MODULENAME ) ||
( CmpCommon::statement()->isSMDRecompile() )
)
{
generator->setUpdErrorInternalOnError(TRUE);
generator->setUpdAbortOnError(FALSE);
generator->setUpdPartialOnError(FALSE);
generator->setUpdSavepointOnError(FALSE);
}
oltOptInfo().setOltCliOpt(generator->oltOptInfo()->oltCliOpt());
if ((isTrueRoot()) &&
(CmpCommon::getDefault(LAST0_MODE) == DF_ON) &&
(child(0)))
{
OperatorTypeEnum op = child(0)->getOperatorType();
if (op != REL_DESCRIBE &&
op != REL_EXPLAIN &&
op != REL_DDL &&
op != REL_LOCK &&
op != REL_UNLOCK &&
op != REL_SET_TIMEOUT &&
op != REL_STATISTICS &&
op != REL_TRANSACTION &&
op != REL_EXE_UTIL)
{
// do not return any rows at runtime.
// Setting of -2 tells executor to simulate [last 0]
// without having to put [last 0] in the query.
setFirstNRows(-2);
}
}
if (isTrueRoot())
{
// if warnings 6008 or 6011 were raised, set missingStats indication.
if (CmpCommon::diags()->containsWarning(SINGLE_COLUMN_STATS_NEEDED) ||
CmpCommon::diags()->containsWarning(SINGLE_COLUMN_STATS_NEEDED_AUTO))
{
generator->compilerStatsInfo().setMissingStats(TRUE);
}
// change the following number(16) to whatever is considered 'large'.
//#define LARGE_NUMBER_OF_JOINS 16
//if (generator->compilerStatsInfo().totalJoins() > LARGE_NUMBER_OF_JOINS)
//generator->compilerStatsInfo().setLargeNumOfJoins(TRUE);
// set mandatoryXP indication in generator.
if (hasMandatoryXP())
generator->compilerStatsInfo().setMandatoryCrossProduct(TRUE);
// Remember # of BMOs that children's preCodeGen found for my fragment.
setNumBMOs( generator->replaceNumBMOs(prevNumBMOs) );
setBMOsMemoryUsage( generator->replaceBMOsMemoryUsage(prevBMOsMemoryUsage) );
// Compute the total available memory quota for BMOs
NADefaults &defs = ActiveSchemaDB()->getDefaults();
// total per CPU
double m = defs.getAsDouble(EXE_MEMORY_LIMIT_PER_CPU) * (1024*1024);
// total memory usage for all nBMOs
double m1 = (generator->getTotalNBMOsMemoryPerCPU()).value();
// total memory limit for all BMOs
double m2 = m-m1;
double ratio =
defs.getAsDouble(EXE_MEMORY_LIMIT_NONBMOS_PERCENT) / 100;
if ( m2 < 0 ) {
// EXE_MEMORY_LIMIT_PER_CPU is set too small, set the total
// memory limit for BMOs to zero. When the memory quota for
// each BMO is computed (via method RelExpr::computeMemoryQuota()),
// the lower-bound for each BMO will kick in and each will receive
// a quota equal to the lower-bound value.
m2 = 0;
} else {
// nBMOs use more memory than the portion, adjust m2 to
// that of (1-ratio)*m
if (m1 > m*ratio )
m2 = m*(1-ratio);
}
generator->setBMOsMemoryLimitPerCPU(m2);
}
if (isTrueRoot())
{
if (generator->isAqrWnrInsert())
{
ExeUtilWnrInsert * wi = new(generator->getBindWA()->wHeap())
ExeUtilWnrInsert(generator->utilInsertTable(),
child(0)->castToRelExpr());
child(0)->markAsBound();
wi->bindNode(generator->getBindWA());
if (generator->getBindWA()->errStatus())
return NULL;
// Use the same characteristic inputs and outputs as my child
wi->setGroupAttr(new(generator->wHeap())
GroupAttributes(*(child(0)->getGroupAttr())));
//pass along some of the estimates
wi->setEstRowsUsed(child(0)->getEstRowsUsed());
wi->setMaxCardEst(child(0)->getMaxCardEst());
wi->setInputCardinality(child(0)->getInputCardinality());
wi->setPhysicalProperty(child(0)->getPhysicalProperty());
wi->setOperatorCost(0);
wi->setRollUpCost(child(0)->getRollUpCost());
if (! wi->preCodeGen(generator,
getGroupAttr()->getCharacteristicInputs(),
pulledNewInputs))
return NULL;
child(0) = wi;
}
}
// if blob values are being selected out, retrieve them and return them either in file
// or as a stream
if (isTrueRoot())
{
RETDesc * rd = getRETDesc();
const ColumnDescList * cdl = rd->getColumnList();
for (CollIndex i = 0; i < compExpr().entries(); i++)
{
ValueId val_id = compExpr()[i];
ItemExpr * expr = val_id.getItemExpr();
if ((val_id.getType().isLob()))/* &&
((expr->getOperatorType() == ITM_BASECOLUMN) ||
(expr->getOperatorType() == ITM_INDEXCOLUMN)))*/
{
LOBconvertHandle * lc = new(generator->wHeap())
LOBconvertHandle(val_id.getItemExpr(), LOBoper::STRING_);
lc->bindNode(generator->getBindWA());
lc->preCodeGen(generator);
compExpr().removeAt(i);
compExpr().insertAt(i, lc->getValueId());
ColumnDesc *cd = (*cdl)[i];
NAColumn * col = cd->getValueId().getNAColumn(TRUE);
if (col)
{
lc->lobNum() = col->lobNum();
lc->lobStorageType() = col->lobStorageType();
lc->lobStorageLocation() = col->lobStorageLocation();
}
cd->setValueId(lc->getValueId());
rd->changeNATypeForUserColumnList(i, &lc->getValueId().getType());
}
} // for
if (getPredExprTree())
{
getPredExprTree()->preCodeGen(generator);
}
} // isTrueRoot
setHdfsAccess(generator->hdfsAccess());
markAsPreCodeGenned();
#ifdef _DEBUG
if(getenv("SHOW_PLAN"))
{
NAString plan;
unparse(plan);
printf("PLAN: %s\n",convertNAString(plan,generator->wHeap()));
}
#endif
return this;
} // RelRoot::preCodeGen
RelExpr * Join::preCodeGen(Generator * generator,
const ValueIdSet & externalInputs,
ValueIdSet &pulledNewInputs)
{
if (nodeIsPreCodeGenned())
return this;
// Check if the pivs of this operator and it's child are the same.
// If they are not, make them the same.
replacePivs();
// In the case of an embedded insert,
// and there is a selection predicate,
// we need to retrieve the stored available outputs
// from the GenericUpdate group attr.
ValueIdSet availableGUOutputs;
// clear any prefix sort key
generator->clearPrefixSortKey();
if (getGroupAttr()->isEmbeddedInsert() &&
!selectionPred().isEmpty() &&
getArity() > 1)
{
if (child(1)->getArity() > 0)
child(1)->child(0)->getInputAndPotentialOutputValues(availableGUOutputs);
}
NABoolean isALeftJoin = (getOperator().match(REL_ANY_LEFT_JOIN));
NABoolean isARightJoin = (getOperator().match(REL_ANY_RIGHT_JOIN));
ValueIdSet availableValues;
ValueIdSet childPulledInputs;
if (isALeftJoin)
{
ValueId instNullId, exprId, vid;
// Prune the nullInstatiatedOutputs list.Retain only those values
// that are either:
// 1) The external dataflow inputs to the Join.
// 2) The Characteristic Outputs of the Join.
// 3) The Characteristic Outputs of the first child of the Join.
// 4) Values required for evaluating the selection expression
// on the Join.
// Discard all other values.
availableValues = getGroupAttr()->getCharacteristicInputs();
availableValues += child(0)->getGroupAttr()->getCharacteristicOutputs();
ValueIdSet discardSet;
CollIndex ne = nullInstantiatedOutput().entries();
for (CollIndex j = 0; j < ne; j++) // NT_PORT FIX SK 07/16/96
{
instNullId = nullInstantiatedOutput_[j];
GenAssert(instNullId.getItemExpr()->getOperatorType() == ITM_INSTANTIATE_NULL,"NOT instNullId.getItemExpr()->getOperatorType() == ITM_INSTANTIATE_NULL");
// Access the operand of the InstantiateNull
exprId = (((InstantiateNull *)(instNullId.getItemExpr()))
->getExpr()->getValueId());
if ( (NOT availableValues.contains(exprId)) AND
(NOT getGroupAttr()->getCharacteristicOutputs()
.referencesTheGivenValue(instNullId, vid)) AND
(NOT selectionPred().referencesTheGivenValue(instNullId, vid)) )
{
discardSet += nullInstantiatedOutput_[j];
}
}
// Delete all those elements that do not require null instantiation.
for (exprId = discardSet.init(); discardSet.next(exprId);
discardSet.advance(exprId))
{
nullInstantiatedOutput_.remove(exprId);
}
} // endif (getOperator().match(REL_ANY_LEFT_JOIN))
else // Null Instantiation will not be necessary.
nullInstantiatedOutput().clear(); // clear in case a LJ was transformed to an IJ
if (isARightJoin)
{
ValueId instNullIdForRightJoin, exprIdForRightJoin, vidForRightJoin;
ValueIdSet discardSetForRightJoin;
// Prune the nullInstatiatedOutputs list.Retain only those values
// that are either:
// 1) The external dataflow inputs to the Join.
// 2) The Characteristic Outputs of the Join.
// 3) The Characteristic Outputs of the second child of the Join.
// 4) Values required for evaluating the selection expression
// on the Join.
// Discard all other values.
availableValues = getGroupAttr()->getCharacteristicInputs();
availableValues += child(1)->getGroupAttr()->getCharacteristicOutputs();
CollIndex neR = nullInstantiatedForRightJoinOutput().entries();
for (CollIndex j = 0; j < neR; j++) // NT_PORT FIX SK 07/16/96
{
instNullIdForRightJoin = nullInstantiatedForRightJoinOutput_[j];
GenAssert(instNullIdForRightJoin.getItemExpr()->getOperatorType() == ITM_INSTANTIATE_NULL,"NOT instNullId.getItemExpr()->getOperatorType() == ITM_INSTANTIATE_NULL");
// Access the operand of the InstantiateNull
exprIdForRightJoin = (((InstantiateNull *)(instNullIdForRightJoin.getItemExpr()))
->getExpr()->getValueId());
if ( (NOT availableValues.contains(exprIdForRightJoin)) AND
(NOT getGroupAttr()->getCharacteristicOutputs()
.referencesTheGivenValue(instNullIdForRightJoin,
vidForRightJoin)) AND
(NOT selectionPred().referencesTheGivenValue(instNullIdForRightJoin,
vidForRightJoin)) )
{
discardSetForRightJoin += nullInstantiatedForRightJoinOutput_[j];
}
}
// Delete all those elements that do not require null instantiation.
for (exprIdForRightJoin = discardSetForRightJoin.init();
discardSetForRightJoin.next(exprIdForRightJoin);
discardSetForRightJoin.advance(exprIdForRightJoin))
{
nullInstantiatedForRightJoinOutput_.remove(exprIdForRightJoin);
}
} // endif (getOperator().match(REL_ANY_RIGHT_JOIN))
else // Null Instantiation will not be necessary.
nullInstantiatedForRightJoinOutput().clear(); // clear in case a LJ was transformed to an IJ
// Resolve the VEGReferences and VEGPredicates, if any, that appear
// in the Characteristic Inputs, in terms of the externalInputs.
getGroupAttr()->resolveCharacteristicInputs(externalInputs);
availableValues = getGroupAttr()->getCharacteristicInputs();
bool precodeHalloweenLHSofTSJ = false;
bool savePrecodeHalloweenLHSofTSJ = false;
if ((getHalloweenForceSort() != NO_SELF_REFERENCE) &&
(generator->getR251HalloweenPrecode()))
{
savePrecodeHalloweenLHSofTSJ =
generator->setPrecodeHalloweenLHSofTSJ(true);
precodeHalloweenLHSofTSJ = true;
if (getHalloweenForceSort() == FORCED)
generator->setHalloweenSortForced();
}
NABoolean savedOltMsgOpt = generator->oltOptInfo()->oltMsgOpt();
// My Characteristic Inputs become the external inputs for my left child.
child(0) = child(0)->preCodeGen(generator,availableValues,childPulledInputs);
if (! child(0).getPtr())
return NULL;
// For HashJoin Min/Max optimization
if (isHashJoin())
{
HashJoin *hj = (HashJoin *)this;
for(CollIndex i = hj->getStartMinMaxIndex(); i < hj->getEndMinMaxIndex(); i++)
{
// A scan may have decided to use the min/max values that
// belongs to this join, remove them from the
// childPulledInputs. We do not need to pull them from the
// parent as this Hash Join will generate them.
if(generator->getWillUseMinMaxKeys()[i] != NULL_VALUE_ID) {
childPulledInputs -= generator->getMinVals()[i];
childPulledInputs -= generator->getMaxVals()[i];
}
// Clear the candidate values generated by this HashJoin, We
// are done with the left child, so no one else can use
// these values.
generator->getMinMaxKeys()[i] = NULL_VALUE_ID;
generator->getMinVals()[i] = NULL_VALUE_ID;
generator->getMaxVals()[i] = NULL_VALUE_ID;
}
}
if (precodeHalloweenLHSofTSJ)
{
generator->setPrecodeHalloweenLHSofTSJ(savePrecodeHalloweenLHSofTSJ);
if (generator->getUnblockedHalloweenScans() == 0)
{
// Turn off DP2_LOCKS for codeGen, using either the FORCED_SORT
// or PASSIVE values.
if (getHalloweenForceSort() == FORCED)
{
generator->setHalloweenProtection(Generator::FORCED_SORT);
}
else
generator->setHalloweenProtection(Generator::PASSIVE);
}
else if (updateSelectValueIdMap() && updateTableDesc() &&
(NOT updateTableDesc()->getNATable()->getClusteringIndex()->hasSyskey()))
{
// if the key columns of the table being inserted into are
// equal to constants or inputs then no sort is required
// to enforce Halloween blocking. Example statements are
// update tt set a = 1 ;(a is the primary key for table tt)
// insert into tt select * from tt where a = 1 ;
ValueIdList reqdOrder ;
updateSelectValueIdMap()->rewriteValueIdListDown(
updateTableDesc()->getClusteringIndex()->getOrderOfKeyValues(),
reqdOrder);
reqdOrder.removeCoveredExprs(
getGroupAttr()->getCharacteristicInputs());
if (reqdOrder.isEmpty())
{
generator->setHalloweenProtection(Generator::PASSIVE);
}
}
}
NABoolean leftMultipleRowsReturned =
generator->oltOptInfo()->multipleRowsReturned();
// if nested join and left child could return multiple rows, then
// disable olt msg opt for the right child. This is done since
// olt msg opt can only handle input and output of max 1 row.
if ((getOperatorType() == REL_NESTED_JOIN) ||
(getOperatorType() == REL_LEFT_NESTED_JOIN) ||
(getOperatorType() == REL_NESTED_SEMIJOIN) ||
(getOperatorType() == REL_NESTED_ANTI_SEMIJOIN) ||
(getOperatorType() == REL_NESTED_JOIN_FLOW))
{
if (generator->oltOptInfo()->multipleRowsReturned())
{
generator->oltOptInfo()->setOltMsgOpt(FALSE);
}
}
// process additional input value ids the child wants
// (see RelExpr::preCodeGen())
getGroupAttr()->addCharacteristicInputs(childPulledInputs);
pulledNewInputs += childPulledInputs;
availableValues += childPulledInputs;
childPulledInputs.clear();
// If this is a tuple substitution join that is implemented by the nested join
// method, then the values produced as output by my left child can be used as
// "external" inputs by my right child.
NABoolean replicatePredicates = TRUE;
ValueIdSet joinInputAndPotentialOutput;
getInputAndPotentialOutputValues(joinInputAndPotentialOutput);
if (isTSJ())
{
availableValues += child(0)->getGroupAttr()->getCharacteristicOutputs();
// For a TSJ the joinPred() is a predicate between the inputs
// and the first child that could not be pushed down to the first
// child because it is either a left join or an anti-semi-join
// Rebuild the join predicate tree now
joinPred().replaceVEGExpressions
(availableValues,
getGroupAttr()->getCharacteristicInputs(),
FALSE, // no key predicates here
0 /* no need for idempotence here */,
replicatePredicates,
NULL /* not a groupByAgg */,
&joinInputAndPotentialOutput
);
}
bool didSetRHS = false;
bool saveSetRHS = false;
if (generator->getPrecodeHalloweenLHSofTSJ() &&
isNestedJoin())
{
saveSetRHS = generator->setPrecodeRHSofNJ(true);
didSetRHS = true;
}
// Process the right child
child(1) = child(1)->preCodeGen(generator,availableValues,childPulledInputs);
if (! child(1).getPtr())
return NULL;
if (didSetRHS)
generator->setPrecodeRHSofNJ(saveSetRHS);
NABoolean rightMultipleRowsReturned =
generator->oltOptInfo()->multipleRowsReturned();
if (leftMultipleRowsReturned || rightMultipleRowsReturned)
generator->oltOptInfo()->setMultipleRowsReturned(TRUE);
// process additional input value ids the child wants
// (see RelExpr::preCodeGen())
getGroupAttr()->addCharacteristicInputs(childPulledInputs);
pulledNewInputs += childPulledInputs;
// Accumulate the values that are provided as inputs by my parent
// together with the values that are produced as outputs by my
// children. Use these values for rewriting the VEG expressions.
getInputValuesFromParentAndChildren(availableValues);
// Rebuild the join predicate tree
if (! isTSJ())
joinPred().replaceVEGExpressions
(availableValues,
getGroupAttr()->getCharacteristicInputs(),
FALSE, // no key predicates here
0 /* no need for idempotence here */,
replicatePredicates,
NULL /* not a groupByAgg */,
&joinInputAndPotentialOutput
);
if (isALeftJoin)
{
// Replace the operands of the ITM_INSTANTIATE_NULL with values from
// the Characteristic Outputs of the right child.
// The following values are available for resolving the nullInstantiatedOuptut
// 1) The external dataflow inputs to the Join.
// 2) The Characteristic Outputs of the second (right) child of the Join.
// 3) The Characteristic Outputs of the first(left)child of the Join.
// Needed when nested_join plan is chosen.
availableValues = getGroupAttr()->getCharacteristicInputs();
availableValues += child(1)->getGroupAttr()->getCharacteristicOutputs();
availableValues += child(0)->getGroupAttr()->getCharacteristicOutputs();
nullInstantiatedOutput_.replaceOperandsOfInstantiateNull
(availableValues,
getGroupAttr()->getCharacteristicInputs());
}
if (isARightJoin)
{
// Replace the operands of the ITM_INSTANTIATE_NULL with values from
// the Characteristic Outputs of the left child.
// The following values are available for resolving the nullInstantiatedForRightJoinOutput
// 1) The external dataflow inputs to the Join.
// 2) The Characteristic Outputs of the first (left) child of the Join.
availableValues = getGroupAttr()->getCharacteristicInputs();
availableValues += child(0)->getGroupAttr()->getCharacteristicOutputs();
nullInstantiatedForRightJoinOutput_.replaceOperandsOfInstantiateNull
(availableValues,
getGroupAttr()->getCharacteristicInputs());
}
// The VEG expressions in the selection predicates and the characteristic
// outputs can reference any expression that is either a potential output
// or a characteristic input for this RelExpr. Supply these values for
// rewriting the VEG expressions.
getInputAndPotentialOutputValues(availableValues);
// If this is an embedded insert, with a selection predicate,
// add in the characteristic outputs from the generic update RelExpr
if (getGroupAttr()->isEmbeddedInsert() &&
!selectionPred().isEmpty())
{
availableValues += availableGUOutputs;
}
// Rebuild the selection predicate tree.
selectionPred().replaceVEGExpressions
(availableValues,
getGroupAttr()->getCharacteristicInputs(),
FALSE, // no need for key predicates here
0 /* no need for idempotence here */,
replicatePredicates
);
//New code was added to avoid the following situation:
//
// Query: select max(t1.a) from t1,t2 where t1.a = t2.b;
// Plan: shortcut_groupby
// |
// esp_exchange
// |
// merge_join in parallel 4 ways on
// |
// | |
// scan t2 scan T1
//
// By the time we get to precodegen merge_join has orderby
// on VEG(a,b) and characteristic output VEG(a,b)
// because scan T2 get precode gen'd first it resolves its
// orderby VEG(a,b) to t2.b this also changes orderby VEG
// in merge_join and thereafter to T2.b. Now when merge join
// resolves it characteristic output it resolves it to T1.a because
// T1 is first in the from clause and T1.a has a smaller value id and
// so the combined set of T1. and T2's characteristic output has T1.a
// in front of T2.b. Now esp_exchange during code gen time expects
// T2.b to be characteristic output of the child because it needs to
// do merge of sorted streams of its orderby value which is T2.b.
// this causes an assertion failure because merge_join produces T1.a.
// Following code counters that by making sure that if the sort key is
// part of the available values then characteristic output first gets
// resolved by sortkey then by rest of the available values.
//
ValueIdSet sortKey = getPhysicalProperty()->getSortKey();
sortKey = sortKey.simplifyOrderExpr();
sortKey.intersectSet(availableValues);
if(sortKey.entries())
{
ValueIdSet reqOutput = getGroupAttr()->getCharacteristicOutputs();
ValueIdSet copyOfSet(reqOutput);
ValueIdSet inputValues;
ValueIdSet newExpr;
ItemExpr * iePtr;
// ---------------------------------------------------------------------
// Iterate over the predicate factors in the given predicate tree.
// ---------------------------------------------------------------------
for (ValueId exprId = copyOfSet.init();
copyOfSet.next(exprId); copyOfSet.advance(exprId))
{
// -----------------------------------------------------------------
// Walk through the item expression tree and replace any
// VEGPredicates or VEGReferences that are found.
// -----------------------------------------------------------------
iePtr = exprId.getItemExpr()->replaceVEGExpressions(availableValues,
inputValues,
FALSE,
NULL,
FALSE);
if (iePtr) // expression was not discarded
{
iePtr->synthTypeAndValueId(TRUE); // redrive type synthesis
if (iePtr != exprId.getItemExpr()) // a replacement was done
{
reqOutput.subtractElement(exprId); // remove existing ValueId
reqOutput += iePtr->getValueId(); // replace with a new one
}
}
} // loop over predTree
getGroupAttr()->setCharacteristicOutputs(reqOutput);
}
// Rewrite the Characteristic Outputs.
getGroupAttr()->resolveCharacteristicOutputs
(availableValues,
getGroupAttr()->getCharacteristicInputs());
// propagate the children olt settings in case of a pushed down to dp2 NLJ
if ( !getPhysicalProperty()->executeInDP2() OR
!(generator->getBindWA()->getTopRoot()->getInliningInfo()).isUsedForMvLogging()
)
{
generator->oltOptInfo()->setOltMsgOpt(savedOltMsgOpt);
}
// In the case of an embedded insert,
// set the generator is embedded insert flag to TRUE.
if (getGroupAttr()->isEmbeddedInsert())
generator->setEmbeddedInsert(TRUE) ;
markAsPreCodeGenned();
// Done.
return this;
} // Join::preCodeGen()
RelExpr * GenericUtilExpr::preCodeGen(Generator * generator,
const ValueIdSet & externalInputs,
ValueIdSet &pulledNewInputs)
{
if (nodeIsPreCodeGenned())
return this;
if (! RelExpr::preCodeGen(generator,externalInputs,pulledNewInputs))
return NULL;
if (xnNeeded())
{
generator->setUpdSavepointOnError(FALSE);
generator->setUpdPartialOnError(FALSE);
}
markAsPreCodeGenned();
// Done.
return this;
}
RelExpr * ExeUtilExpr::preCodeGen(Generator * generator,
const ValueIdSet & externalInputs,
ValueIdSet &pulledNewInputs)
{
if (nodeIsPreCodeGenned())
return this;
if (! GenericUtilExpr::preCodeGen(generator,externalInputs,pulledNewInputs))
return NULL;
if (NOT aqrSupported())
generator->setAqrEnabled(FALSE);
markAsPreCodeGenned();
// Done.
return this;
}
// xnCanBeStarted is set to true if the whole ddl operation can run in one transaction
// It is set to false, then the DDL implementation methods manages the transaction
short DDLExpr::ddlXnsInfo(NABoolean &isDDLxn, NABoolean &xnCanBeStarted)
{
ExprNode * ddlNode = getDDLNode();
xnCanBeStarted = TRUE;
// When the DDL transaction is not turned on via CQD
if (NOT ddlXns())
{
if ((dropHbase()) ||
(purgedataHbase()) ||
(initHbase()) ||
(createMDViews()) ||
(dropMDViews()) ||
(initAuthorization()) ||
(dropAuthorization()) ||
(addSeqTable()) ||
(createRepos()) ||
(dropRepos()) ||
(upgradeRepos()) ||
(addSchemaObjects()) ||
(updateVersion()))
{
// transaction will be started and commited in called methods.
xnCanBeStarted = FALSE;
}
if (((ddlNode) && (ddlNode->castToStmtDDLNode()) &&
(NOT ddlNode->castToStmtDDLNode()->ddlXns())) &&
((ddlNode->getOperatorType() == DDL_DROP_SCHEMA) ||
(ddlNode->getOperatorType() == DDL_CLEANUP_OBJECTS) ||
(ddlNode->getOperatorType() == DDL_ALTER_TABLE_ADD_CONSTRAINT_PRIMARY_KEY) ||
(ddlNode->getOperatorType() == DDL_ALTER_TABLE_ALTER_COLUMN_SET_SG_OPTION) ||
(ddlNode->getOperatorType() == DDL_CREATE_INDEX) ||
(ddlNode->getOperatorType() == DDL_POPULATE_INDEX) ||
(ddlNode->getOperatorType() == DDL_CREATE_TABLE) ||
(ddlNode->getOperatorType() == DDL_ALTER_TABLE_DROP_COLUMN) ||
(ddlNode->getOperatorType() == DDL_ALTER_TABLE_ALTER_COLUMN_DATATYPE) ||
(ddlNode->getOperatorType() == DDL_DROP_TABLE)))
{
// transaction will be started and commited in called methods.
xnCanBeStarted = FALSE;
}
isDDLxn = FALSE;
}
else // When the DDL transaction is turned on
{
isDDLxn = FALSE;
if (ddlNode && ddlNode->castToStmtDDLNode() &&
ddlNode->castToStmtDDLNode()->ddlXns())
isDDLxn = TRUE;
if (purgedataHbase() || upgradeRepos())
// transaction will be started and commited in called methods.
xnCanBeStarted = FALSE;
if ((ddlNode && ddlNode->castToStmtDDLNode() &&
ddlNode->castToStmtDDLNode()->ddlXns()) &&
((ddlNode->getOperatorType() == DDL_CLEANUP_OBJECTS) ||
(ddlNode->getOperatorType() == DDL_ALTER_TABLE_DROP_COLUMN) ||
(ddlNode->getOperatorType() == DDL_ALTER_SCHEMA) ||
(ddlNode->getOperatorType() == DDL_CREATE_INDEX) ||
(ddlNode->getOperatorType() == DDL_POPULATE_INDEX) ||
(ddlNode->getOperatorType() == DDL_ALTER_TABLE_ALTER_COLUMN_DATATYPE) ||
(ddlNode->getOperatorType() == DDL_ALTER_TABLE_ALTER_HBASE_OPTIONS) ||
(ddlNode->getOperatorType() == DDL_ALTER_INDEX_ALTER_HBASE_OPTIONS) ||
(ddlNode->getOperatorType() == DDL_ALTER_TABLE_RENAME)))
{
// transaction will be started and commited in called methods.
xnCanBeStarted = FALSE;
}
}
return 0;
}
RelExpr * DDLExpr::preCodeGen(Generator * generator,
const ValueIdSet & externalInputs,
ValueIdSet &pulledNewInputs)
{
if (nodeIsPreCodeGenned())
return this;
if (! GenericUtilExpr::preCodeGen(generator,externalInputs,pulledNewInputs))
return NULL;
if ((specialDDL()) ||
(initHbase_))
{
generator->setAqrEnabled(FALSE);
}
NABoolean startXn = FALSE;
NABoolean ddlXns = FALSE;
if (ddlXnsInfo(ddlXns, startXn))
return NULL;
if (ddlXns && startXn)
xnNeeded() = TRUE;
else
xnNeeded() = FALSE;
markAsPreCodeGenned();
// Done.
return this;
}
RelExpr * NestedJoinFlow::preCodeGen(Generator * generator,
const ValueIdSet &externalInputs,
ValueIdSet &pulledNewInputs)
{
if (nodeIsPreCodeGenned())
return this;
/* child(0) = child(0)->preCodeGen(
generator,
externalInputs,
pulledNewInputs);
if (! child(0).getPtr())
return NULL;
*/
RelExpr * nj =
NestedJoin::preCodeGen(generator, externalInputs, pulledNewInputs);
if (nj == NULL)
return NULL;
return nj;
}
RelExpr * NestedJoin::preCodeGen(Generator * generator,
const ValueIdSet &externalInputs,
ValueIdSet &pulledNewInputs)
{
if (nodeIsPreCodeGenned())
return this;
NABoolean espExchangeWithMerge = FALSE;
NABoolean childIsBlocking = FALSE;
if ((getHalloweenForceSort() != NO_SELF_REFERENCE) &&
(!generator->getR251HalloweenPrecode()))
{
GenAssert(Generator::NOT_SELF_REF != generator->getHalloweenProtection(),
"Inconsistency in Generator and NestedJoin.");
// Look for either of two patterns on the left hand side:
// sort or exchange+sort.
if (child(0)->getOperatorType() == REL_SORT)
childIsBlocking = TRUE;
else
if ((child(0)->getOperatorType() == REL_EXCHANGE) &&
(child(0)->child(0)->getOperatorType() == REL_SORT))
{
childIsBlocking = TRUE;
// The espExchangeWithMerge flag is used to conditionally
// assert that the exchange will merge. The assertion
// is deferred until after preCodeGen on the left subtree,
// because the Exchange::doesMerge() method should not be
// called until Exchange::preCodeGen is finished.
espExchangeWithMerge = TRUE;
}
if (childIsBlocking)
{
if (getHalloweenForceSort() == FORCED)
{
if (espExchangeWithMerge)
((Sort *)(child(0)->child(0).getPtr()))->
markAsHalloweenProtection();
else
((Sort *)(child(0).getPtr()))->markAsHalloweenProtection();
generator->setHalloweenProtection(Generator::FORCED_SORT);
}
else
generator->setHalloweenProtection(Generator::PASSIVE);
}
else if (updateSelectValueIdMap() && updateTableDesc() &&
(NOT updateTableDesc()->getNATable()->getClusteringIndex()->hasSyskey()))
{
// if the key columns of the table being inserted into are
// equal to constants or inputs then no sort is required
// to enforce Halloween blocking. Example statements are
// update tt set a = 1 ;(a is the primary key for table tt)
// insert into tt select * from tt where a = 1 ;
ValueIdList reqdOrder ;
updateSelectValueIdMap()->rewriteValueIdListDown(
updateTableDesc()->getClusteringIndex()->getOrderOfKeyValues(),
reqdOrder);
reqdOrder.removeCoveredExprs(
getGroupAttr()->getCharacteristicInputs());
if (reqdOrder.isEmpty())
{
generator->setHalloweenProtection(Generator::PASSIVE);
}
}
}
// Insert a probe cache above the inner table if applicable
if ( isProbeCacheApplicable(
castToRelExpr()->getPhysicalProperty()->getPlanExecutionLocation()
)
)
{
ProbeCache *probeCache =
new (generator->wHeap()) ProbeCache(
child(1), getDefault(GEN_PROBE_CACHE_NUM_ENTRIES),
generator->wHeap());
// look for an aggregate right child node
RelExpr *rightChildExpr = child(1).getPtr();
GroupByAgg *rightChildGrby = NULL;
RelExpr *rightChildExch = NULL;
MapValueIds *rightChildMvi = NULL;
ValueIdMap *optionalMap = NULL;
NABoolean done = FALSE;
while (!done)
{
if (rightChildExpr->getOperator().match(REL_ANY_GROUP))
{
rightChildGrby = (GroupByAgg *) rightChildExpr;
done = TRUE;
}
else if (rightChildExpr->getOperator() == REL_EXCHANGE)
{
if (rightChildExch == NULL)
rightChildExch = rightChildExpr;
else
done = TRUE; // can't handle more than one exchange
}
else if (rightChildExpr->getOperator() == REL_MAP_VALUEIDS)
{
if (rightChildMvi == NULL)
{
rightChildMvi = (MapValueIds *) rightChildExpr;
optionalMap = &rightChildMvi->getMap();
}
else
done = TRUE; // can't handle more than one MVI
}
else
done = TRUE;
if (!done)
rightChildExpr = rightChildExpr->child(0);
}
// Among other things, this will give the probeCache
// the characteristic inputs and outputs of the
// inner table.
probeCache->setGroupAttr(new(generator->wHeap())
GroupAttributes(*(child(1)->getGroupAttr())));
// Try to pull up predicates from the child, if that reduces
// the char. inputs sent to the child. We only try this right
// now if the child is an aggregate or groupby.
if (rightChildGrby &&
CmpCommon::getDefault(NESTED_JOIN_CACHE_PREDS) != DF_OFF &&
(// if right child exchange exists, it must have same char inputs
rightChildExch == NULL ||
rightChildExch->getGroupAttr()->getCharacteristicInputs() ==
rightChildGrby->getGroupAttr()->getCharacteristicInputs()) &&
(rightChildMvi == NULL ||
rightChildMvi->getGroupAttr()->getCharacteristicInputs() ==
rightChildGrby->getGroupAttr()->getCharacteristicInputs()))
{
ValueIdSet pcAvailableInputs(
probeCache->getGroupAttr()->getCharacteristicInputs());
// predicates can refer to both char. inputs and outputs
pcAvailableInputs +=
probeCache->getGroupAttr()->getCharacteristicOutputs();
// note that this will overwrite the ProbeCache's selection preds
rightChildGrby->tryToPullUpPredicatesInPreCodeGen(
pcAvailableInputs,
probeCache->selectionPred(),
optionalMap);
// adjust char. inputs of intervening nodes - this is not
// exactly good style, just overwriting the char. inputs, but
// hopefully we'll get away with it at this stage in the
// processing
if (rightChildExch)
rightChildExch->getGroupAttr()->setCharacteristicInputs(
rightChildGrby->getGroupAttr()->getCharacteristicInputs());
if (rightChildMvi)
rightChildMvi->getGroupAttr()->setCharacteristicInputs(
rightChildGrby->getGroupAttr()->getCharacteristicInputs());
}
// propagate estimates, physical properties, and costings
// from the child to the ProbeCache:
probeCache->setEstRowsUsed(child(1)->getEstRowsUsed());
probeCache->setMaxCardEst(child(1)->getMaxCardEst());
probeCache->setInputCardinality(child(1)->getInputCardinality());
probeCache->setPhysicalProperty(child(1)->getPhysicalProperty());
probeCache->setOperatorCost(0);
probeCache->setRollUpCost(child(1)->getRollUpCost());
// Glue the ProbeCache to the NestedJoin's right leg.
child(1) = probeCache;
}
if (isTSJForUndo())
{
Sort *sortNode = new(generator->wHeap()) Sort(child(0));
ItemExpr *sk = new (generator->wHeap()) SystemLiteral(1);
sk->synthTypeAndValueId(TRUE);
ValueIdList skey;
skey.insert(sk->getValueId());
sortNode->getSortKey() = skey;
// Use the same characteristic inputs and outputs as the left child
sortNode->setGroupAttr(new(generator->wHeap())
GroupAttributes(*(child(0)->getGroupAttr())));
//pass along some of the estimates
sortNode->setEstRowsUsed(child(0)->getEstRowsUsed());
sortNode->setMaxCardEst(child(0)->getMaxCardEst());
sortNode->setInputCardinality(child(0)->getInputCardinality());
sortNode->setPhysicalProperty(child(0)->getPhysicalProperty());
sortNode->setCollectNFErrors();
sortNode->setOperatorCost(0);
sortNode->setRollUpCost(child(0)->getRollUpCost());
child(0) = sortNode;
}
if ( childIsBlocking &&
generator->preCodeGenParallelOperator() )
{
if (espExchangeWithMerge == FALSE)
{
// A "halloween sort" needs to ensure that if it is parallel, but executes
// in the same ESP as the generic update's TSJ flow node, then the Sort
// will block until all scans are finished.
((Sort *)(child(0).getPtr()))->doCheckAccessToSelfRefTable();
}
else
{
// An ESP Exchange can be eliminated in its preCodeGen method if it is
// redundant. If this happens, then the Sort will be executing in the
// same ESP as the TSJ after all. So we set this flag now, so that the
// Exchange preCodeGen will call doCheckAccessToSelfRefTable() for the
// Sort before eliminating itself. This is part of the fix for Sol
// 10-090310-9876.
((Exchange *)(child(0).getPtr()))->markHalloweenSortIsMyChild();
}
}
RelExpr * re =
Join::preCodeGen(generator, externalInputs, pulledNewInputs);
if ( espExchangeWithMerge &&
(child(0)->getOperatorType() == REL_EXCHANGE))
GenAssert(((Exchange *)((RelExpr *)child(0)))->doesMerge(),
"Exchange operator does not block for Halloween problem.");
generator->compilerStatsInfo().nj()++;
return re;
}
RelExpr * MergeJoin::preCodeGen(Generator * generator,
const ValueIdSet & externalInputs,
ValueIdSet &pulledNewInputs)
{
if (nodeIsPreCodeGenned())
return this;
if (! Join::preCodeGen(generator, externalInputs, pulledNewInputs))
return 0;
// Accumulate the values that are provided as inputs by my parent
// together with the values that are produced as outputs by my
// children. Use these values for rewriting the VEG expressions.
ValueIdSet availableValues;
getInputValuesFromParentAndChildren(availableValues);
// find if the left child and/or the right child will have atmost
// one matching row. If so, an faster merge join implementation
// will be used at runtime.
ValueIdSet vidSet = getOrderedMJPreds();
ValueIdSet valuesUsedForPredicates;
computeValuesReqdForPredicates(vidSet,
valuesUsedForPredicates);
leftUnique() =
child(0)->getGroupAttr()->isUnique(valuesUsedForPredicates);
rightUnique() =
child(1)->getGroupAttr()->isUnique(valuesUsedForPredicates);
ValueIdList mjp(getOrderedMJPreds());
NABoolean replicatePredicates = TRUE;
/* For merge join the characteristic outputs have already been resolved
by the time the equijoin preds are resolved below. The outputs are
resolved at the very end of Join::precodegen, which was called a few
lines above. Therefore when we resolve the equijoin preds we have
only the actually resolved output values available. We do not have
all the potential output values available.
*/
ValueIdSet joinInputAndOutputValues;
joinInputAndOutputValues = getGroupAttr()->getCharacteristicInputs();
joinInputAndOutputValues += getGroupAttr()->getCharacteristicOutputs();
// Pass in the children GAs so that the equipreds can have one side
// resolved to one child and the other side resolved to the other child.
// solution 10-100722-1962
mjp.replaceVEGExpressions
(availableValues,
getGroupAttr()->getCharacteristicInputs(),
FALSE, // no key predicates here
0 /* no need for idempotence here */,
replicatePredicates,
NULL /* not a groupByAgg */,
&joinInputAndOutputValues,
NULL /* no indexDesc since we have no key preds*/,
child(0)->getGroupAttr(),
child(1)->getGroupAttr());
// must have at least 1 merge join predicate
GenAssert(!mjp.isEmpty(),"mjp.isEmpty()");
// The generator expects the merge join predicates to be in the form
// leftcol = rightcol where leftcol references a column from the left
// table and rightcol references a column from the right table. Switch
// the expression if it is the other way around. Also handle rare cases
// where a VEGPred is resolved into two equalities connected by an AND.
//
ValueIdSet dummy1;
ValueIdList newJoinPreds;
ValueIdList newLeftOrder;
ValueIdList newRightOrder;
CollIndex ne = (CollIndex)(mjp.entries());
NABoolean isANewJoinPred ;
for (CollIndex i = 0; i < ne; i++)
{
// Will store all the conjuncts under the pred mjp[i] being considered.
ValueIdSet conjuncts;
conjuncts.clear();
conjuncts.insert(mjp[i]);
ValueIdSet finerConjuncts;
do
{
finerConjuncts.clear();
// Go through the set of conjuncts, breaking down any AND seen into
// finer conjuncts.
//
for (ValueId vid = conjuncts.init();
conjuncts.next(vid);
conjuncts.advance(vid))
{
ItemExpr *pred = vid.getItemExpr();
if (pred->getOperatorType() == ITM_AND)
{
// Found another AND, break it down into finer conjuncts. Store
// them in finerConjuncts so that we can return to them later.
//
finerConjuncts.insert(pred->child(0)->getValueId());
finerConjuncts.insert(pred->child(1)->getValueId());
}
else
{
// This is the "finest" conjunct - cannot be broken down further.
// Make sure it's in the form of (leftCol = rightCol). Add the
// equality predicate to the final list of MJ predicates. leftOrder
// and rightOrder are set up correspondingly so that they match up
// with the predicates.
//
GenAssert(pred->getOperatorType() == ITM_EQUAL,
"pred->getOperatorType() != ITM_EQUAL");
ItemExpr *left = pred->child(0)->castToItemExpr();
ItemExpr *right = pred->child(1)->castToItemExpr();
isANewJoinPred = TRUE;
NABoolean child0Covered = child(0).getGroupAttr()->covers(left->getValueId(),
getGroupAttr()->getCharacteristicInputs(),
dummy1) ;
NABoolean child1Covered = child(1).getGroupAttr()->covers(right->getValueId(),
getGroupAttr()->getCharacteristicInputs(),
dummy1) ;
if (NOT (child0Covered && child1Covered))
{
//++MV - Irena
// save the pred's specialNulls_ flag before replacing the pred
BiRelat *biRelat = new(generator->wHeap()) BiRelat(ITM_EQUAL, right, left);
// restore specialNulls_
biRelat->setSpecialNulls(((BiRelat*)pred)->getSpecialNulls());
biRelat->bindNode(generator->getBindWA());
pred = biRelat;
//--MV - Irena
child0Covered = child(0).getGroupAttr()->covers(right->getValueId(),
getGroupAttr()->getCharacteristicInputs(),
dummy1) ;
child1Covered = child(1).getGroupAttr()->covers(left->getValueId(),
getGroupAttr()->getCharacteristicInputs(),
dummy1) ;
if(!(child0Covered && child1Covered))
{
if (isInnerNonSemiJoin())
{
selectionPred() += pred->getValueId();
}
else
{
// for an outer or semi join, the ON clause is stored in "joinPred"
// while the WHERE clause is stored in "selectionPred".
joinPred() += pred->getValueId();
}
isANewJoinPred = FALSE;
}
}
if (isANewJoinPred)
{
// Store the finest conjuncts in the final list of MJ predicates.
// Make sure the list is matched up with corresponding leftOrder
// and rightOrder.
//
newJoinPreds.insert(pred->getValueId());
newLeftOrder.insert(getLeftSortOrder()[i]);
newRightOrder.insert(getRightSortOrder()[i]);
}
}
} // for over conjuncts.
// Come back to process the new set of broken-down conjuncts if the set
// is non-empty.
//
conjuncts = finerConjuncts;
} while (NOT conjuncts.isEmpty());
} // for over mjp.
if (ne > 0)
GenAssert(NOT newJoinPreds.isEmpty(), "MergeJoin::PreCodeGen has no resolved join predicates");
// Count merge join as a Big Memory Operator (BMO) if use of BMO quota
// is enabled for merge join.
if (CmpCommon::getDefaultLong(MJ_BMO_QUOTA_PERCENT) != 0)
{
generator->incrNumBMOs();
}
setOrderedMJPreds(newJoinPreds);
setLeftSortOrder(newLeftOrder);
setRightSortOrder(newRightOrder);
generator->compilerStatsInfo().mj()++;
markAsPreCodeGenned();
return this;
} // MergeJoin::preCodeGen()
RelExpr * HashJoin::preCodeGen(Generator * generator,
const ValueIdSet & externalInputs,
ValueIdSet &pulledNewInputs)
{
if (nodeIsPreCodeGenned())
return this;
if ( CmpCommon::getDefault(COMPRESSED_INTERNAL_FORMAT) == DF_SYSTEM)
{
NABoolean resize = FALSE;
NABoolean defrag = FALSE;
ValueIdSet vidSet0 = child(0)->getGroupAttr()->getCharacteristicOutputs();
ValueIdSet vidSet1 = child(1)->getGroupAttr()->getCharacteristicOutputs();
ExpTupleDesc::TupleDataFormat tupleFormat =
determineInternalFormat( vidSet1,
vidSet0,
this,
resize,
generator,
FALSE,
defrag);
cacheTupleFormatAndResizeFlag(tupleFormat, resize, defrag);
if (tupleFormat == ExpTupleDesc::SQLMX_ALIGNED_FORMAT)
{
generator->incNCIFNodes();
}
else
{
generator->decNCIFNodes();
}
}
// Determine if we should attempt to use the HashJoin min/max optimization.
NABoolean useMinMaxOpt =
((CmpCommon::getDefault(GEN_HSHJ_MIN_MAX_OPT) == DF_ON) &&
! getEquiJoinPredicates().isEmpty() &&
! isLeftJoin() &&
! isRightJoin() &&
! isAntiSemiJoin());
// These indexes define the subset of min max values which belong to this HashJoin.
CollIndex startMinMaxIndex = 0;
CollIndex endMinMaxIndex = 0;
// If min/max opt is used, these lists are used to hold local copies of the
// generators min and max values. These are the min and max values
// generated by HashJoins that may be used by scans.
ValueIdList myMinVals;
ValueIdList myMaxVals;
// If min/max opt is used, this list are used to hold a local copy
// of the generators minmaxKeys. These are the values for which min
// and max values are available
ValueIdList myMinMaxKeys;
if (useMinMaxOpt) {
// This HashJoin will append to the end of the generator lists.
//
startMinMaxIndex = generator->getMinMaxKeys().entries();
// Find the candidate values from the right hand side of the join.
// For now, only consider VEGPreds.
for (ValueId valId = getEquiJoinPredicates().init();
getEquiJoinPredicates().next(valId);
getEquiJoinPredicates().advance(valId)) {
ItemExpr * itemExpr = valId.getItemExpr();
NAType *mmType = NULL;
if (itemExpr->getOperatorType() == ITM_VEG_PREDICATE) {
VEGPredicate *vPred = (VEGPredicate *)itemExpr;
VEGReference *vRef = vPred->getVEG()->getVEGReference();
mmType = vRef->getValueId().getType().newCopy(generator->wHeap());
}
// mmType is the type of the VEGRef relating a left and right value.
// We will compute the Min and Max using this type
if(mmType) {
// Min/Max are typed as nullable.
mmType->setNullable(true);
// Construct the host vars which will represent the min and
// max values for this join key.
char name[80];
sprintf(name, "_sys_MinVal%d", generator->getMinMaxKeys().entries());
ItemExpr *minVal = new(generator->wHeap())
HostVar(name,
mmType,
TRUE);
sprintf(name, "_sys_MaxVal%d", generator->getMinMaxKeys().entries());
ItemExpr *maxVal = new(generator->wHeap())
HostVar(name,
mmType,
TRUE);
minVal->synthTypeAndValueId();
maxVal->synthTypeAndValueId();
// Insert the value and min and max into generator lists to
// make the available to scans as key predicates.
generator->getMinMaxKeys().insert(itemExpr->getValueId());
generator->getMinVals().insert(minVal->getValueId());
generator->getMaxVals().insert(maxVal->getValueId());
// Initialize the 'will use' list to a NULL_VALUE_ID. A scan
// that decides to use the min max values will change this
// entry to be the same as the corresponding entry in the
// minMaxKeys list.
generator->getWillUseMinMaxKeys().insert(NULL_VALUE_ID);
}
}
// This is the end index (exclusive) for this HashJoin.
endMinMaxIndex = generator->getMinMaxKeys().entries();
// Keep local copies of the generators lists.
myMinVals = generator->getMinVals();
myMaxVals = generator->getMaxVals();
myMinMaxKeys = generator->getMinMaxKeys();
}
// Register the start and end indexes for this Hash Join
// (Join::preCodeGen() needs to have access to the indexes)
setStartMinMaxIndex(startMinMaxIndex);
setEndMinMaxIndex(endMinMaxIndex);
if (! Join::preCodeGen(generator, externalInputs, pulledNewInputs))
return NULL;
// List for min and max values that will be used by a scan and which
// will be generated by this HashJoin
minMaxVals_.clear();
minMaxCols_.clear();
{
// For each min/max value belonging to this HashJoin, check to see
// if any scan decided to use it. If so, add the min and max
// values to the list. Also, clear the 'will use' entry as no
// other HashJoin can supply this value.
for (CollIndex i = startMinMaxIndex; i < endMinMaxIndex; i++)
{
if (generator->getWillUseMinMaxKeys()[i] != NULL_VALUE_ID)
{
minMaxVals_.insert(myMinVals[i]);
minMaxVals_.insert(myMaxVals[i]);
VEGPredicate *vPred = (VEGPredicate *)myMinMaxKeys[i].getItemExpr();
VEGReference *vRef = vPred->getVEG()->getVEGReference();
minMaxCols_.insert(vRef->getValueId());
generator->getWillUseMinMaxKeys()[i] = NULL_VALUE_ID;
}
}
// If we have some minMaxCols, then replace any VEGReferences.
if (minMaxCols_.entries())
{
ValueIdSet availForMinMax;
availForMinMax += child(1)->getGroupAttr()->getCharacteristicOutputs();
availForMinMax += getGroupAttr()->getCharacteristicInputs();
minMaxCols_.replaceVEGExpressions(availForMinMax,
getGroupAttr()->getCharacteristicInputs());
}
}
// Accumulate the values that are provided as inputs by my parent
// together with the values that are produced as outputs by my
// children. Use these values for rewriting the VEG expressions.
ValueIdSet availableValues;
getInputValuesFromParentAndChildren(availableValues);
ValueIdSet hjp(getEquiJoinPredicates());
NABoolean replicatePredicates = TRUE;
/* For hash join the characteristic outputs have already been resolved
by the time the equijoin preds are resolved below. The outputs are
resolved at the very end of Join::precodegen, which was called a few
lines above. Therefore when we resolve the equijoin preds we have
only the actually resolved output values available. We do not have
all the potential output values available.
*/
ValueIdSet joinInputAndOutputValues;
joinInputAndOutputValues = getGroupAttr()->getCharacteristicInputs();
joinInputAndOutputValues += getGroupAttr()->getCharacteristicOutputs();
// Pass in the children GAs so that the equipreds can have one side
// resolved to one child and the other side resolved to the other child.
// solution 10-100722-1962
hjp.replaceVEGExpressions
(availableValues,
getGroupAttr()->getCharacteristicInputs(),
FALSE, // no key predicates here
0 /* no need for idempotence here */,
replicatePredicates,
NULL /* not a groupByAgg */,
&joinInputAndOutputValues,
NULL /* no indexDesc since we have no key preds*/,
child(0)->getGroupAttr(),
child(1)->getGroupAttr());
// Will store the rewritten hjp's which compile with the format of
// leftCol= rightCol.
//
ValueIdSet newJoinPreds;
if (hjp.isEmpty())
{
}
else
{
// The generator expects the hash join predicates to be in the form
// leftcol = rightcol where leftcol references a column from the left
// table and rightcol references a column from the right table. Switch
// the expression if it is the other way around. Also handle rare cases
// where a VEGPred is resolved into two equalities connected by an AND.
//
ValueIdSet dummy1;
NABoolean isANewJoinPred ;
do
{
ValueIdSet finerConjuncts;
finerConjuncts.clear();
for (ValueId vid = hjp.init();
hjp.next(vid);
hjp.advance(vid))
{
ItemExpr *pred = vid.getItemExpr();
// Break this up into the finer conjuncts. Store them in a separate
// set so that we can return to it later.
// of the set so that we could return
if (pred->getOperatorType() == ITM_AND)
{
finerConjuncts.insert(pred->child(0)->getValueId());
finerConjuncts.insert(pred->child(1)->getValueId());
}
else
{
GenAssert(pred->getOperatorType() == ITM_EQUAL,
"pred->getOperatorType() != ITM_EQUAL");
ItemExpr *left = pred->child(0)->castToItemExpr();
ItemExpr *right = pred->child(1)->castToItemExpr();
isANewJoinPred = TRUE;
NABoolean child0Covered = child(0).getGroupAttr()->covers(left->getValueId(),
getGroupAttr()->getCharacteristicInputs(),
dummy1) ;
NABoolean child1Covered = child(1).getGroupAttr()->covers(right->getValueId(),
getGroupAttr()->getCharacteristicInputs(),
dummy1) ;
if (NOT (child0Covered && child1Covered))
{
//++MV - Irena
// save the pred's specialNulls_ flag before replacing the pred
BiRelat *biRelat = new(generator->wHeap()) BiRelat(ITM_EQUAL, right, left);
// restore specialNulls_
biRelat->setSpecialNulls(((BiRelat*)pred)->getSpecialNulls());
biRelat->bindNode(generator->getBindWA());
pred = biRelat;
//--MV - Irena
child0Covered = child(0).getGroupAttr()->covers(right->getValueId(),
getGroupAttr()->getCharacteristicInputs(),
dummy1) ;
child1Covered = child(1).getGroupAttr()->covers(left->getValueId(),
getGroupAttr()->getCharacteristicInputs(),
dummy1) ;
if(!(child0Covered && child1Covered))
{
if (isInnerNonSemiJoin())
{
selectionPred() += pred->getValueId();
}
else
{
// for an outer or semi join, the ON clause is stored in "joinPred"
// while the WHERE clause is stored in "selectionPred".
joinPred() += pred->getValueId();
}
isANewJoinPred = FALSE;
}
}
if (isANewJoinPred)
newJoinPreds.insert(pred->getValueId());
}
} // for over hjp.
// Come back to process the new set of broken-down conjuncts if the set
// is non-empty.
//
hjp = finerConjuncts;
} while (NOT hjp.isEmpty());
GenAssert(NOT newJoinPreds.isEmpty(), "HashJoin::PreCodeGen has no resolved join predicates");
}
// Value IDs given to the right/inner child
ValueIdSet valuesGivenToRightChild =
child(1)->getGroupAttr()->getCharacteristicInputs();
if ( ! valuesGivenToRightChild.isEmpty() ) {
// Accumulate the values that are provided as inputs by my parent
// together with the values that are produced as outputs by my
// children. Use these values for rewriting the VEG expressions.
ValueIdSet availableValues;
const ValueIdSet & HJInputs = getGroupAttr()->getCharacteristicInputs();
getInputValuesFromParentAndChildren(availableValues);
valuesGivenToRightChild.replaceVEGExpressions(availableValues, HJInputs);
}
// before computing the move and check expressions, add one more
// value to "valuesGivenToRightChild": a statement execution count
// that will cause re-hashing each time the statement is
// re-executed. It is not legal to keep a hash table across
// statement executions (and possibly transactions).
ValueId execCount = generator->getOrAddStatementExecutionCount();
valuesGivenToRightChild += execCount;
pulledNewInputs += execCount;
getGroupAttr()->addCharacteristicInputs(pulledNewInputs);
// add move and search expressions
for (ValueId val_id = valuesGivenToRightChild.init();
valuesGivenToRightChild.next(val_id);
valuesGivenToRightChild.advance(val_id)) {
ItemExpr * item_expr = val_id.getItemExpr();
// add this converted value to the map table.
Convert * conv_node = new(generator->wHeap()) Convert (item_expr);
// bind/type propagate the new node
conv_node->bindNode(generator->getBindWA());
moveInputValues().insert(conv_node->getValueId());
// add the search condition
BiRelat * bi_relat = new(generator->wHeap())
BiRelat(ITM_EQUAL, item_expr, conv_node);
bi_relat->allocValueId();
checkInputValues().insert(bi_relat->getValueId());
} // for val_id
// Count this BMO and add its needed memory to the total needed
generator->incrNumBMOs();
if ((ActiveSchemaDB()->getDefaults()).getAsDouble(EXE_MEMORY_LIMIT_PER_CPU) > 0)
generator->incrBMOsMemory(getEstimatedRunTimeMemoryUsage(TRUE));
// store the transformed predicates back into the hash join node
storeEquiJoinPredicates(newJoinPreds);
generator->compilerStatsInfo().hj()++;
//
// case of hash anti semi join optimization (NOT IN)
// add/build expression to detect inner and outer null :
// checkOuteNullexpr_ and checkInnerNullExpr_
addCheckNullExpressions(generator->wHeap());
markAsPreCodeGenned();
return this;
} // HashJoin::preCodeGen()
RelExpr * FileScan::preCodeGen(Generator * generator,
const ValueIdSet & externalInputs,
ValueIdSet &pulledNewInputs)
{
if (nodeIsPreCodeGenned())
return this;
const PartitioningFunction* myPartFunc = getPartFunc();
NABoolean usePartKeyPreds =
(isHbaseTable() &&
myPartFunc &&
myPartFunc->isPartitioned() &&
!myPartFunc->isAReplicationPartitioningFunction());
if (isRewrittenMV())
generator->setNonCacheableMVQRplan(TRUE);
if (usePartKeyPreds)
{
// partition key predicates will be applied to this file scan,
// "pull" the partition input values from the parent
pulledNewInputs += myPartFunc->getPartitionInputValues();
getGroupAttr()->addCharacteristicInputs(myPartFunc->getPartitionInputValues());
}
// Resolve the VEGReferences and VEGPredicates, if any, that appear
// in the Characteristic Inputs, in terms of the externalInputs.
getGroupAttr()->resolveCharacteristicInputs(externalInputs);
// The VEG expressions in the selection predicates and the characteristic
// outputs can reference any expression that is either a potential output
// or a characteristic input for this RelExpr. Supply these values for
// rewriting the VEG expressions.
ValueIdSet availableValues;
getInputAndPotentialOutputValues(availableValues);
sampledColumns().replaceVEGExpressions
(availableValues, getGroupAttr()->getCharacteristicInputs());
// Rewrite the partitioning function in terms of the available values.
if (getIndexDesc()->isPartitioned())
getIndexDesc()->getPartitioningFunction()->preCodeGen(availableValues);
// VEGPredicates that are key predicates but are also replicated in
// the executor predicates must be replaced with the same expression
// in both the places after they are rewritten. The VEGRewritePairs
// data structure, when passed to replaceVEGExpressions(), causes
// replaceVEGExpressions() to be idempotent.
VEGRewritePairs vegPairs(generator->wHeap());
ValueIdSet partKeyPredsHBase;
if (usePartKeyPreds)
{
// add the partitioning key predicates to this scan node,
// to make sure that each ESP reads only the part of the
// data that it is supposed to process
ValueId saltCol;
if (myPartFunc->isATableHashPartitioningFunction())
{
// find the _SALT_ column and make a partitioning key
// predicate for it
const ValueIdList &keyCols = getIndexDesc()->getIndexKey();
// the first salt column we find in the key is the one
// we are looking for
for (CollIndex i=0; i<keyCols.entries(); i++)
if (keyCols[i].isSaltColumn())
{
saltCol = keyCols[i];
break;
}
if (saltCol != NULL_VALUE_ID)
((TableHashPartitioningFunction *) myPartFunc)->
createPartitioningKeyPredicatesForSaltedTable(saltCol);
}
partKeyPredsHBase = myPartFunc->getPartitioningKeyPredicates();
}
if (getMdamKeyPtr() != NULL)
{
NABoolean replicatePredicates = TRUE;
// mdamKeyPtr()->print(); // for debugging purposes
ValueIdSet executorPredicates;
ValueIdSet augmentedPreds = getSelectionPredicates();
const LogPhysPartitioningFunction *logPhysPartFunc =
getPartFunc()->castToLogPhysPartitioningFunction();
if (!partKeyPredsHBase.isEmpty())
{
augmentedPreds += partKeyPredsHBase;
mdamKeyPtr()->setNoExePred(FALSE);
}
augmentedPreds += getComputedPredicates();
if ( logPhysPartFunc != NULL )
{
LogPhysPartitioningFunction::logPartType logPartType =
logPhysPartFunc->getLogPartType();
if ( logPartType == LogPhysPartitioningFunction::LOGICAL_SUBPARTITIONING
OR logPartType == LogPhysPartitioningFunction::HORIZONTAL_PARTITION_SLICING
)
augmentedPreds += logPhysPartFunc->getPartitioningKeyPredicates();
}
mdamKeyPtr()->preCodeGen(executorPredicates,
augmentedPreds,
availableValues,
getGroupAttr()->getCharacteristicInputs(),
&vegPairs,
replicatePredicates,
!partKeyPredsHBase.isEmpty());
setExecutorPredicates(executorPredicates);
// mdamKeyPtr()->print(); // for debugging purposes
}
else if (! isHiveTable() &&
(getSearchKey() || !partKeyPredsHBase.isEmpty()))
{
// ---------------------------------------------------
// --------------------- Rewrite preds for search key:
// ---------------------------------------------------
if (!partKeyPredsHBase.isEmpty())
{
// These predicates can compete with other key predicates;
// decide which of them to use as key preds and which as
// executor preds:
// - No search key: Use part key preds as search key
// - Search key with non-unique preds: Replace it with
// a new search key with part key preds
// - Search key with unique preds (unlikely, this shouldn't
// have been a parallel query): add part key preds as
// executor preds
ValueIdSet combinedInputs(externalInputs);
combinedInputs += pulledNewInputs;
ValueIdSet existingKeyPreds;
if (getSearchKey())
existingKeyPreds += getSearchKey()->getKeyPredicates();
// create a new search key that has the partitioning key preds
SearchKey * partKeySearchKey =
myPartFunc->createSearchKey(getIndexDesc(),
combinedInputs,
existingKeyPreds);
ValueIdSet exePreds(partKeySearchKey->getExecutorPredicates());
NABoolean replaceSearchKey = !(getSearchKey() &&
getSearchKey()->isUnique());
if (getSearchKey())
exePreds += getSearchKey()->getExecutorPredicates();
// pick one search key and add the remaining
// predicates (if any) to exePreds
if (replaceSearchKey)
setSearchKey(partKeySearchKey);
else
exePreds += partKeySearchKey->getKeyPredicates();
searchKey()->setExecutorPredicates(exePreds);
}
NABoolean replicatePredicates = TRUE;
setExecutorPredicates(searchKey()->getExecutorPredicates());
// Rebuild the search key expressions
ValueIdSet& keyPred = searchKey()->keyPredicates();
keyPred.replaceVEGExpressions
(availableValues,
getGroupAttr()->getCharacteristicInputs(),
FALSE, // no need for key predicate generation here
&vegPairs,
replicatePredicates);
// Rebuild the executor predicate tree
executorPred().replaceVEGExpressions
(availableValues,
getGroupAttr()->getCharacteristicInputs(),
FALSE, // no need for key predicate generation here
&vegPairs,
replicatePredicates
);
// Generate the begin and end keys.
if ( getDoUseSearchKey() ) {
generateKeyExpr(getGroupAttr()->getCharacteristicInputs(),
getIndexDesc()->getIndexKey(),
getSearchKey()->getBeginKeyValues(),
beginKeyPred_,
generator,
replicatePredicates);
generateKeyExpr(getGroupAttr()->getCharacteristicInputs(),
getIndexDesc()->getIndexKey(),
getSearchKey()->getEndKeyValues(),
endKeyPred_,
generator,
replicatePredicates);
}
// Check to see if there are any MIN/MAX values coming from a
// HashJoin which could be used as begin/end key values for the
// leading key of this scan. Don't consider doing this if this
// is a unique scan (can't improve on that) or if the leading
// key is already unique or if both the begin and end key are
// exclusive (min max are inclusive and no easy way to mix
// them).
if (generator->getMinMaxKeys().entries() &&
(getSearchKey()->getBeginKeyValues()[0] !=
getSearchKey()->getEndKeyValues()[0]) &&
(!getSearchKey()->isBeginKeyExclusive() ||
!getSearchKey()->isEndKeyExclusive())) {
// The keys of the scan.
const ValueIdList &keys = getIndexDesc()->getIndexKey();
ValueId minMaxKeyCol = keys[0];
IndexColumn *ixCol = (IndexColumn *) (minMaxKeyCol.getItemExpr());
BaseColumn *baseCol = NULL;
ValueId underlyingCol;
NABoolean needToComputeActualMinMax = FALSE;
ItemExpr *computedColExpr = NULL;
// The candidate values for min and max.
const ValueIdList &minMaxKeys = generator->getMinMaxKeys();
CollIndex keyIdx = NULL_COLL_INDEX;
// Determine how min/max is related to begin/end. depends
// on ordering (ASC vs DESC) and scan direction (forward vs
// reverse)
NABoolean ascKey =
getIndexDesc()->getNAFileSet()->getIndexKeyColumns().isAscending(0);
if(getReverseScan())
ascKey = !ascKey;
// If the leading key column is a divisioning column, then
// look for min/max values of an underlying column
GenAssert(ixCol->getOperatorType() == ITM_INDEXCOLUMN,
"unexpected object type");
baseCol =
(BaseColumn *) (((IndexColumn *) ixCol)->getDefinition().getItemExpr());
GenAssert(baseCol->getOperatorType() == ITM_BASECOLUMN,
"unexpected object type");
if (baseCol->getNAColumn()->isDivisioningColumn()) {
ValueIdSet underlyingCols;
baseCol->getUnderlyingColumnsForCC(underlyingCols);
if (underlyingCols.entries() == 1) {
// We have a leading division column that's computed from
// 1 base column, now get the underlying column and the
// divisioning expression
needToComputeActualMinMax = TRUE;
underlyingCols.getFirst(minMaxKeyCol);
computedColExpr = baseCol->getComputedColumnExpr().getItemExpr();
BaseColumn *underlyingBaseCol =
(BaseColumn *) minMaxKeyCol.getItemExpr();
GenAssert(underlyingBaseCol->getOperatorType() == ITM_BASECOLUMN,
"unexpected object type");
// the computed column expression has been rewritten to use
// VEGRefs, so get the corresponding VEGRef for the underlying column
underlyingCol = underlyingBaseCol->getTableDesc()->
getColumnVEGList()[underlyingBaseCol->getColNumber()];
}
}
// Check all the candidate values. If any one of them matches
// the leading key of this scan, then select it for use in the
// begin/end key value of the leading key.
// Scalar min/max functions cause an exponential growth when
// combined with each other, see ItmScalarMinMax::codeGen()
Int32 limitItems = 3 ; // use at most 3
for(CollIndex i = 0; i < minMaxKeys.entries() && limitItems; i++) {
ValueId mmKeyId = minMaxKeys[i];
if(mmKeyId != NULL_VALUE_ID) {
ItemExpr *mmItem = mmKeyId.getItemExpr();
if (mmItem->getOperatorType() == ITM_VEG_PREDICATE) {
VEGPredicate *vPred = (VEGPredicate *)mmItem;
const ValueIdSet &members = vPred->getVEG()->getAllValues();
if (members.contains(minMaxKeyCol)) {
// some other operator is producing min/max values
// for our leading key column, now check whether we
// can use them
keyIdx = i;
// Indicate in the 'will use' list that we will use these
// min/max values. This will indicate to the HashJoin that
// it should produce these values.
generator->getWillUseMinMaxKeys()[keyIdx] =
generator->getMinMaxKeys()[keyIdx];
addMinMaxHJColumn(baseCol->getValueId());
limitItems-- ; // one more is used
// If we can use a min/max value for the begin key, do so...
if(!getSearchKey()->isBeginKeyExclusive()) {
ItemExpr *keyPred = getBeginKeyPred()[0].getItemExpr();
ItemExpr *currentBeg = keyPred->child(1);
// Get the proper begin key (min or max) that came from
// the HashJoin
ValueId hashJoinBeg = (ascKey ?
generator->getMinVals()[keyIdx] :
generator->getMaxVals()[keyIdx]);
// Construct an expression which determines at runtime
// which BK to use. Either the existing one or the one
// coming from HashJoin whichever is larger (smaller).
//
ItemExpr *newBeg = hashJoinBeg.getItemExpr();
if (needToComputeActualMinMax) {
ValueIdMap divExprMap;
ValueId computedBeg;
// If hashJoinBeg is :sysHV1 and the computed column
// expression is A/100, then the begin value for
// the computed column is :sysHV1/100. Do this
// rewrite by using a ValueIdMap
divExprMap.addMapEntry(underlyingCol, hashJoinBeg);
divExprMap.rewriteValueIdDown(computedColExpr->getValueId(),
computedBeg);
newBeg = computedBeg.getItemExpr();
}
newBeg = new (generator->wHeap())
ItmScalarMinMax((ascKey ? ITM_SCALAR_MAX : ITM_SCALAR_MIN),
currentBeg,
newBeg);
newBeg->synthTypeAndValueId();
// Replace the RHS of the key pred.
keyPred->child(1) = newBeg->getValueId();
// The value coming from the HashJoin must be in out inputs.
getGroupAttr()->addCharacteristicInputs(hashJoinBeg);
// And we must pull those values from the HashJoin.
pulledNewInputs += hashJoinBeg;
availableValues += hashJoinBeg;
}
// If we can use a min/max value for the end key, do so...
if(!getSearchKey()->isEndKeyExclusive()) {
ItemExpr *keyPred = getEndKeyPred()[0].getItemExpr();
ItemExpr *currentEnd = keyPred->child(1);
// Get the proper end key (max or min) that came from
// the HashJoin
ValueId hashJoinEnd = (ascKey ?
generator->getMaxVals()[keyIdx] :
generator->getMinVals()[keyIdx]);
// Construct an expression which determines at runtime
// which EK to use. Either the existing one or the one
// coming from HashJoin whichever is smaller (larger).
//
ItemExpr *newEnd = hashJoinEnd.getItemExpr();
if (needToComputeActualMinMax) {
ValueIdMap divExprMap;
ValueId computedEnd;
divExprMap.addMapEntry(underlyingCol, hashJoinEnd);
divExprMap.rewriteValueIdDown(computedColExpr->getValueId(),
computedEnd);
newEnd = computedEnd.getItemExpr();
}
newEnd = new (generator->wHeap())
ItmScalarMinMax((ascKey ? ITM_SCALAR_MIN : ITM_SCALAR_MAX),
currentEnd,
newEnd);
newEnd->synthTypeAndValueId();
// Replace the RHS of the key pred.
keyPred->child(1) = newEnd->getValueId();
// The value coming from the HashJoin must be in out inputs.
getGroupAttr()->addCharacteristicInputs(hashJoinEnd);
// And we must pull those values from the HashJoin.
pulledNewInputs += hashJoinEnd;
availableValues += hashJoinEnd;
}
}
}
}
}
}
}
else
{
// Hive table scan (HBase scan has executor preds set up already)
if (isHiveTable())
setExecutorPredicates(selectionPred());
// Rebuild the executor predicate tree
executorPred().replaceVEGExpressions
(availableValues,
getGroupAttr()->getCharacteristicInputs(),
FALSE, // no need for key predicate generation here
&vegPairs,
TRUE);
if (isHiveTable())
// assign individual files and blocks to each ESPs
((NodeMap *) getPartFunc()->getNodeMap())->assignScanInfos(hiveSearchKey_);
generator->setProcessLOB(TRUE);
}
// Selection predicates are not needed anymore:
selectionPred().clear();
// Add the sampled columns to the set of available values. This is
// basically a kluge to get the GroupAttributes right.
availableValues += sampledColumns();
// This call also rewrites predicates
// $$$ Does it need vegPairs too? $$$
getGroupAttr()->resolveCharacteristicOutputs
(availableValues,
getGroupAttr()->getCharacteristicInputs());
generator->oltOptInfo()->mayDisableOperStats(&oltOptInfo());
markAsPreCodeGenned();
return this;
} // FileScan::preCodeGen()
RelExpr * GenericUpdate::preCodeGen(Generator * generator,
const ValueIdSet & externalInputs,
ValueIdSet &pulledNewInputs)
{
if (nodeIsPreCodeGenned())
return this;
if (! RelExpr::preCodeGen(generator,externalInputs,pulledNewInputs))
return NULL;
// Determine whether OLT optimization must be avoided.
if (getGroupAttr()->isEmbeddedUpdateOrDelete())
{
generator->oltOptInfo()->setOltMsgOpt(FALSE);
generator->setUpdAbortOnError(TRUE);
generator->setUpdSavepointOnError(FALSE);
generator->setUpdPartialOnError(FALSE);
generator->setUpdErrorOnError(FALSE);
}
if ((accessOptions().accessType() == SKIP_CONFLICT_) ||
(getGroupAttr()->isStream()) ||
(newRecBeforeExprArray().entries() > 0)) // set on rollback
{
generator->oltOptInfo()->setOltEidOpt(FALSE);
oltOptInfo().setOltEidOpt(FALSE);
setExpandShortRows(FALSE);
generator->setUpdAbortOnError(TRUE);
generator->setUpdSavepointOnError(FALSE);
generator->setUpdPartialOnError(FALSE);
generator->setUpdErrorOnError(FALSE);
}
// If RI, IM, MV or triggers are being used, abort on error.
// This is because internal data consistency
// cannot be guaranteed for these cases.
if ((getInliningInfo().hasInlinedActions()) ||
(getInliningInfo().isEffectiveGU()))
{
// cannot do partial updates.
generator->setUpdPartialOnError(FALSE);
if (CmpCommon::getDefault(COMP_BOOL_206) == DF_ON)
{
if (NOT ((getInliningInfo().hasTriggers()) ||
(getInliningInfo().hasRI()) ||
(getInliningInfo().hasIM()) ||
(getInliningInfo().isMVLoggingInlined())))
{
generator->setUpdAbortOnError(TRUE);
generator->setUpdSavepointOnError(FALSE);
generator->setUpdErrorOnError(FALSE);
}
else
generator->setUpdErrorOnError(FALSE);
}
else
{
// abort on error for non-IM cases(RI,MV,Trig).
if ((NOT getInliningInfo().hasIM()) ||
(getInliningInfo().hasRI()))
{
generator->setUpdAbortOnError(TRUE);
generator->setUpdSavepointOnError(FALSE);
generator->setUpdErrorOnError(FALSE);
}
else
generator->setUpdErrorOnError(FALSE);
}
}
// If RI, MV or triggers are being used, turn off the lean optimization for
// the complete plan; all other optimizations will still apply.
if ( generator->oltOptInfo()->oltEidLeanOpt() &&
( getInliningInfo().hasTriggers() ||
getInliningInfo().hasRI() ||
getInliningInfo().isMVLoggingInlined() ) )
{
generator->oltOptInfo()->setOltEidLeanOpt(FALSE);
oltOptInfo().setOltEidLeanOpt(FALSE);
}
// Accumulate the values that are provided as inputs by my parent
// together with the values that are produced as outputs by my
// children. Use these values for rewriting the VEG expressions.
ValueIdSet availableValues;
getInputValuesFromParentAndChildren(availableValues);
newRecExpr_.replaceVEGExpressions
(availableValues,
getGroupAttr()->getCharacteristicInputs());
newRecBeforeExpr_.replaceVEGExpressions
(availableValues,
getGroupAttr()->getCharacteristicInputs());
executorPred().replaceVEGExpressions
(availableValues,
getGroupAttr()->getCharacteristicInputs());
// VEGPredicates that are key predicates but are also replicated
// in the executor predicates must be replaced with the same
// expression in both places after they are rewritten.
// Therefore, we want replaceVEGExpressions() processing to be
// idempotent. By passing the VEGRewritePairs data structure
// to replaceVEGExpressions(), we get idempotence.
VEGRewritePairs lookup(generator->wHeap()); // so replaceVEGExpressions will be idempotent
if (getSearchKey() == NULL)
{
// Begin and end key preds may already be available.
beginKeyPred_.replaceVEGExpressions
(availableValues,
getGroupAttr()->getCharacteristicInputs(),
FALSE, // no need for key predicate generation here
&lookup);
endKeyPred_.replaceVEGExpressions
(availableValues,
getGroupAttr()->getCharacteristicInputs(),
FALSE, // no need for key predicate generation here
&lookup);
// In the case of an embedded insert from VALUES,
// any predicates need to have their VEGreferences resolved.
if (getGroupAttr()->isEmbeddedInsert())
{
NABoolean replicatePredicates = TRUE;
// Rebuild the executor predicate tree
executorPred().replaceVEGExpressions
(availableValues,
getGroupAttr()->getCharacteristicInputs(),
FALSE, // no need for key predicate generation
&lookup,
replicatePredicates
);
}
}
else
{
// Build begin and end key predicates from the search key structure.
//## It *might* be a good idea to add here:
//## CMPASSERT(beginKeyPred_.isEmpty() && endKeyPred_.isEmpty());
//## as that *seems* to be the assumption here.
//## (But I haven't the time to make the change and test it.)
ValueIdSet& keyPred = getSearchKey()->keyPredicates();
NABoolean replicatePredicates = TRUE;
// Rebuild the search key expressions
keyPred.replaceVEGExpressions
(availableValues,
getGroupAttr()->getCharacteristicInputs(),
FALSE, // no need for key predicate generation
&lookup,
replicatePredicates);
// Rebuild the executor predicate tree
executorPred().replaceVEGExpressions
(availableValues,
getGroupAttr()->getCharacteristicInputs(),
FALSE, // no need for key predicate generation
&lookup,
replicatePredicates
);
// Generate the begin and end keys.
generateKeyExpr(getGroupAttr()->getCharacteristicInputs(),
getIndexDesc()->getIndexKey(),
getSearchKey()->getBeginKeyValues(),
beginKeyPred_,
generator);
generateKeyExpr(getGroupAttr()->getCharacteristicInputs(),
getIndexDesc()->getIndexKey(),
getSearchKey()->getEndKeyValues(),
endKeyPred_,
generator);
}
// ---------------------------------------------------------------------
// Rewrite the check constraint expressions.
// ---------------------------------------------------------------------
checkConstraints().replaceVEGExpressions
(availableValues,
getGroupAttr()->getCharacteristicInputs());
generator->setFoundAnUpdate(TRUE);
generator->setPartnAccessChildIUD();
#ifdef _DEBUG
// Compile in the index maintenance ... just for testing
//
if(getenv("IM_COMPILE"))
generator->imUpdateRel() = this;
#endif
if (oltOptLean() &&
((isinBlockStmt()) ||
(getTableDesc()->getNATable()->hasAddedColumn()) ||
(getTableDesc()->getNATable()->hasVarcharColumn())))
{
oltOptInfo().setOltEidLeanOpt(FALSE);
}
generator->setSkipUnavailablePartition(FALSE);
if (isMtsStatement())
generator->setEmbeddedIUDWithLast1(TRUE) ;
if (isMerge())
{
// Accumulate the values that are provided as inputs by my parent
// together with the values that are produced as outputs by my
// children. Use these values for rewriting the VEG expressions.
ValueIdSet availableValues;
getInputValuesFromParentAndChildren(availableValues);
mergeInsertRecExpr().replaceVEGExpressions
(availableValues,
getGroupAttr()->getCharacteristicInputs());
mergeUpdatePred().replaceVEGExpressions
(availableValues,
getGroupAttr()->getCharacteristicInputs());
ValueIdList tempVIDlist;
getTableDesc()->getIdentityColumn(tempVIDlist);
NAColumn *identityCol = NULL;
if (tempVIDlist.entries() > 0)
{
ValueId valId = tempVIDlist[0];
identityCol = valId.getNAColumn();
}
if (((getOperatorType() == REL_HBASE_DELETE) ||
(getOperatorType() == REL_HBASE_UPDATE)) &&
(getTableDesc()->getNATable()->getClusteringIndex()->hasSyskey()))
{
*CmpCommon::diags() << DgSqlCode(-3241)
<< DgString0(" SYSKEY not allowed.");
GenExit();
}
if ((getOperatorType() != REL_HBASE_UPDATE) &&
(mergeInsertRecExpr().entries() > 0) &&
(CmpCommon::getDefault(COMP_BOOL_175) == DF_OFF))
{
// MERGE with INSERT is limited to HBase updates unless
// the CQD is on
*CmpCommon::diags() << DgSqlCode(-3241)
<< DgString0(" This MERGE is not allowed with INSERT.");
GenExit();
}
if (oltOpt())
{
// if no update expr and only insert expr is specified for
// this MERGE stmt, turn off olt opt.
//
if (newRecExprArray().entries() == 0)
oltOptInfo().setOltEidOpt(FALSE);
oltOptInfo().setOltEidLeanOpt(FALSE);
}
generator->setUpdErrorOnError(FALSE);
generator->setUpdSavepointOnError(FALSE);
} // isMerge
generator->oltOptInfo()->mayDisableOperStats(&oltOptInfo());
// Part of the fix for Soln 10-100425-9755. Don't AQR a
// positioned update/delete because part of the recovery
// for the error that triggers the AQR is rollback transaction
// and this causes the referenced cursor to be closed. The other
// part of the fix is in compiler cache: positioned update/deletes
// will not be cached, and this should reduce the need to handle
// errors with AQR, e.g., timestamp mismatch errors.
if (updateCurrentOf())
generator->setAqrEnabled(FALSE);
if (getTableDesc()->getNATable()->hasLobColumn())
{
oltOptInfo().setOltOpt(FALSE);
generator->oltOptInfo()->setOltOpt(FALSE);
generator->setAqrEnabled(FALSE);
generator->setUpdAbortOnError(TRUE);
generator->setUpdSavepointOnError(FALSE);
}
if ((isNoRollback()) ||
(generator->getTransMode()->getRollbackMode() == TransMode::NO_ROLLBACK_))
{
generator->setWithNoRollbackUsed(isNoRollback());
if (CmpCommon::getDefault(AQR_WNR) == DF_OFF)
generator->setAqrEnabled(FALSE);
}
if (((getInliningInfo().hasInlinedActions()) ||
(getInliningInfo().isEffectiveGU())) &&
(getInliningInfo().hasRI()))
{
generator->setRIinliningForTrafIUD(TRUE);
}
if (precondition_.entries() > 0)
{
ValueIdSet availableValues;
getInputValuesFromParentAndChildren(availableValues);
precondition_.
replaceVEGExpressions(availableValues,
getGroupAttr()->getCharacteristicInputs());
}
markAsPreCodeGenned();
return this;
} // GenericUpdate::preCodeGen()
RelExpr * Update::preCodeGen(Generator * generator,
const ValueIdSet & externalInputs,
ValueIdSet &pulledNewInputs)
{
if (nodeIsPreCodeGenned())
return this;
if (! GenericUpdate::preCodeGen(generator,externalInputs,pulledNewInputs))
return NULL;
markAsPreCodeGenned();
return this;
}
RelExpr * MergeUpdate::preCodeGen(Generator * generator,
const ValueIdSet & externalInputs,
ValueIdSet &pulledNewInputs)
{
if (nodeIsPreCodeGenned())
return this;
if (! Update::preCodeGen(generator,externalInputs,pulledNewInputs))
return NULL;
markAsPreCodeGenned();
return this;
}
RelExpr * UpdateCursor::preCodeGen(Generator * generator,
const ValueIdSet & externalInputs,
ValueIdSet &pulledNewInputs)
{
if (nodeIsPreCodeGenned())
return this;
if (! Update::preCodeGen(generator,externalInputs,pulledNewInputs))
return NULL;
// primary key columns cannot be updated, yet. After RI support
// is in, they could be updated.
const NAColumnArray & key_column_array =
getTableDesc()->getNATable()->getClusteringIndex()->getIndexKeyColumns();
ValueIdSet& val_id_set = newRecExpr();
ValueId val_id;
for (val_id = val_id_set.init(); val_id_set.next(val_id); val_id_set.advance(val_id))
{
ItemExpr * item_expr = val_id.getItemExpr();
for (short i = 0; i < getTableDesc()->getNATable()->getKeyCount(); i++)
{
const char * key_colname = key_column_array[i]->getColName();
const char * upd_colname = ((BaseColumn *)
(item_expr->child(0)->castToItemExpr()))->
getColName();
if ((strcmp(key_colname, upd_colname) == 0) &&
(item_expr->getOperatorType() == ITM_ASSIGN) &&
(((Assign*)item_expr)->isUserSpecified()))
{
*CmpCommon::diags() << DgSqlCode(-4033)
<< DgColumnName(key_colname);
GenExit();
}
}
}
generator->oltOptInfo()->mayDisableOperStats(&oltOptInfo());
markAsPreCodeGenned();
return this;
} // UpdateCursor::preCodeGen()
RelExpr * Delete::preCodeGen(Generator * generator,
const ValueIdSet & externalInputs,
ValueIdSet &pulledNewInputs)
{
if (nodeIsPreCodeGenned())
return this;
if (! GenericUpdate::preCodeGen(generator,externalInputs,pulledNewInputs))
return NULL;
markAsPreCodeGenned();
return this;
}
RelExpr * MergeDelete::preCodeGen(Generator * generator,
const ValueIdSet & externalInputs,
ValueIdSet &pulledNewInputs)
{
if (nodeIsPreCodeGenned())
return this;
if (! Delete::preCodeGen(generator,externalInputs,pulledNewInputs))
return NULL;
markAsPreCodeGenned();
return this;
}
static NABoolean hasColReference(ItemExpr * ie)
{
if (! ie)
return FALSE;
if ((ie->getOperatorType() == ITM_BASECOLUMN) ||
(ie->getOperatorType() == ITM_INDEXCOLUMN) ||
(ie->getOperatorType() == ITM_REFERENCE))
return TRUE;
for (Lng32 i = 0; i < ie->getArity(); i++)
{
if (hasColReference(ie->child(i)))
return TRUE;
}
return FALSE;
}
void HbaseAccess::addReferenceFromItemExprTree(ItemExpr * ie,
NABoolean addCol, NABoolean addHBF,
ValueIdSet &colRefVIDset)
{
if (! ie)
return;
if ((ie->getOperatorType() == ITM_BASECOLUMN) ||
(ie->getOperatorType() == ITM_INDEXCOLUMN) ||
(ie->getOperatorType() == ITM_REFERENCE))
{
if (addCol)
colRefVIDset.insert(ie->getValueId());
return;
}
if (ie->getOperatorType() == ITM_HBASE_TIMESTAMP)
{
if (addHBF)
{
colRefVIDset.insert(ie->getValueId());
}
return;
}
if (ie->getOperatorType() == ITM_HBASE_VERSION)
{
if (addHBF)
{
colRefVIDset.insert(ie->getValueId());
}
return;
}
for (Lng32 i = 0; i < ie->getArity(); i++)
{
addReferenceFromItemExprTree(ie->child(i), addCol, addHBF, colRefVIDset);
}
return;
}
void HbaseAccess::addColReferenceFromVIDlist(const ValueIdList &exprList,
ValueIdSet &colRefVIDset)
{
for (CollIndex i = 0; i < exprList.entries(); i++)
{
addReferenceFromItemExprTree(exprList[i].getItemExpr(),
TRUE, FALSE, colRefVIDset);
}
}
void HbaseAccess::addReferenceFromVIDset(const ValueIdSet &exprList,
NABoolean addCol, NABoolean addHBF,
ValueIdSet &colRefVIDset)
{
for (ValueId v = exprList.init(); exprList.next(v); exprList.advance(v))
{
addReferenceFromItemExprTree(v.getItemExpr(), addCol, addHBF, colRefVIDset);
}
}
void HbaseAccess::addColReferenceFromRightChildOfVIDarray(ValueIdArray &exprList,
ValueIdSet &colRefVIDset)
{
for (CollIndex i = 0; i < exprList.entries(); i++)
{
addReferenceFromItemExprTree(exprList[i].getItemExpr()->child(1),
TRUE, FALSE, colRefVIDset);
}
}
static NABoolean isEqGetExpr(ItemExpr * ie, ValueId &vid, NABoolean &isConstParam,
const char * colName)
{
NABoolean found = FALSE;
isConstParam = FALSE;
if (ie && ie->getOperatorType() == ITM_EQUAL)
{
ItemExpr * child0 = ie->child(0)->castToItemExpr();
ItemExpr * child1 = ie->child(1)->castToItemExpr();
if ((ie->child(0)->getOperatorType() == ITM_BASECOLUMN) &&
(((BaseColumn*)ie->child(0)->castToItemExpr())->getNAColumn()->getColName() == colName) &&
(NOT hasColReference(ie->child(1))))
{
if (ie->child(1)->getOperatorType() == ITM_CONSTANT)
{
found = TRUE;
vid = ie->child(1)->getValueId();
}
else if (ie->child(1)->getOperatorType() == ITM_CACHE_PARAM)
{
found = TRUE;
isConstParam = TRUE;
vid = ie->child(1)->getValueId();
}
}
else if ((ie->child(1)->getOperatorType() == ITM_BASECOLUMN) &&
(((BaseColumn*)ie->child(1)->castToItemExpr())->getNAColumn()->getColName() == colName) &&
(NOT hasColReference(ie->child(0))))
{
if (ie->child(0)->getOperatorType() == ITM_CONSTANT)
{
found = TRUE;
vid = ie->child(0)->getValueId();
}
else if (ie->child(0)->getOperatorType() == ITM_CACHE_PARAM)
{
found = TRUE;
isConstParam = TRUE;
vid = ie->child(0)->getValueId();
}
}
else if ((ie->child(0)->getOperatorType() == ITM_INDEXCOLUMN) &&
(((IndexColumn*)ie->child(0)->castToItemExpr())->getNAColumn()->getColName() == colName) &&
(NOT hasColReference(ie->child(1))))
{
if (ie->child(1)->getOperatorType() == ITM_CONSTANT)
{
found = TRUE;
vid = ie->child(1)->getValueId();
}
else if (ie->child(1)->getOperatorType() == ITM_CACHE_PARAM)
{
found = TRUE;
isConstParam = TRUE;
vid = ie->child(1)->getValueId();
}
}
else if ((ie->child(1)->getOperatorType() == ITM_INDEXCOLUMN) &&
(((IndexColumn*)ie->child(1)->castToItemExpr())->getNAColumn()->getColName() == colName) &&
(NOT hasColReference(ie->child(0))))
{
if (ie->child(0)->getOperatorType() == ITM_CONSTANT)
{
found = TRUE;
vid = ie->child(0)->getValueId();
}
else if (ie->child(0)->getOperatorType() == ITM_CACHE_PARAM)
{
found = TRUE;
isConstParam = TRUE;
vid = ie->child(0)->getValueId();
}
}
else if ((ie->child(0)->getOperatorType() == ITM_REFERENCE) &&
(((ColReference*)ie->child(0)->castToItemExpr())->getCorrNameObj().getQualifiedNameObj().getObjectName() == colName) &&
(NOT hasColReference(ie->child(1))))
{
if (ie->child(1)->getOperatorType() == ITM_CONSTANT)
{
found = TRUE;
vid = ie->child(1)->getValueId();
}
else if (ie->child(1)->getOperatorType() == ITM_CACHE_PARAM)
{
found = TRUE;
isConstParam = TRUE;
vid = ie->child(1)->getValueId();
}
}
else if ((ie->child(1)->getOperatorType() == ITM_REFERENCE) &&
(((ColReference*)ie->child(1)->castToItemExpr())->getCorrNameObj().getQualifiedNameObj().getObjectName() == colName) &&
(NOT hasColReference(ie->child(0))))
{
if (ie->child(0)->getOperatorType() == ITM_CONSTANT)
{
found = TRUE;
vid = ie->child(0)->getValueId();
}
else if (ie->child(0)->getOperatorType() == ITM_CACHE_PARAM)
{
found = TRUE;
isConstParam = TRUE;
vid = ie->child(0)->getValueId();
}
}
}
return found;
}
RelExpr * HbaseDelete::preCodeGen(Generator * generator,
const ValueIdSet & externalInputs,
ValueIdSet &pulledNewInputs)
{
if (nodeIsPreCodeGenned())
return this;
// if a column list is specified, make sure all column names are of valid hbase
// column name format ("ColFam:ColNam")
if (csl())
{
for (Lng32 i = 0; i < csl()->entries(); i++)
{
const NAString * nas = (*csl())[i];
std::string colFam;
std::string colName;
if (nas)
{
ExFunctionHbaseColumnLookup::extractColFamilyAndName(
nas->data(), -1, FALSE, colFam, colName);
}
if (colFam.empty())
{
*CmpCommon::diags() << DgSqlCode(-1426)
<< DgString0(nas->data());
GenExit();
}
} // for
} // if
if (!processConstHBaseKeys(
generator,
this,
getSearchKey(),
getIndexDesc(),
executorPred(),
getHbaseSearchKeys(),
listOfDelUniqueRows_,
listOfDelSubsetRows_))
return NULL;
if (! Delete::preCodeGen(generator, externalInputs, pulledNewInputs))
return NULL;
if (((getTableDesc()->getNATable()->isHbaseRowTable()) ||
(getTableDesc()->getNATable()->isHbaseCellTable())) &&
(producesOutputs()))
{
*CmpCommon::diags() << DgSqlCode(-1425)
<< DgTableName(getTableDesc()->getNATable()->getTableName().
getQualifiedNameAsAnsiString())
<< DgString0("Reason: Cannot return values from an hbase insert, update or delete.");
GenExit();
}
NABoolean isAlignedFormat = getTableDesc()->getNATable()->isAlignedFormat(getIndexDesc());
if (producesOutputs())
{
retColRefSet_ = getIndexDesc()->getIndexColumns();
}
else
{
ValueIdSet colRefSet;
// create the list of columns that need to be retrieved from hbase .
// first add all columns referenced in the executor pred.
HbaseAccess::addReferenceFromVIDset(executorPred(), TRUE, TRUE, colRefSet);
if ((getTableDesc()->getNATable()->getExtendedQualName().getSpecialType() == ExtendedQualName::INDEX_TABLE))
{
for (ValueId valId = executorPred().init();
executorPred().next(valId);
executorPred().advance(valId))
{
ItemExpr * ie = valId.getItemExpr();
if (ie->getOperatorType() == ITM_EQUAL)
{
BiRelat * br = (BiRelat*)ie;
br->setSpecialNulls(TRUE);
}
}
} // index_table
if ((getTableDesc()->getNATable()->isHbaseRowTable()) ||
(getTableDesc()->getNATable()->isHbaseCellTable()) ||
isAlignedFormat)
{
for (Lng32 i = 0; i < getIndexDesc()->getIndexColumns().entries(); i++)
{
retColRefSet_.insert(getIndexDesc()->getIndexColumns()[i]);
}
}
for (ValueId valId = colRefSet.init();
colRefSet.next(valId);
colRefSet.advance(valId))
{
ValueId dummyValId;
if (NOT getGroupAttr()->getCharacteristicInputs().referencesTheGivenValue(valId, dummyValId))
{
if ((valId.getItemExpr()->getOperatorType() == ITM_HBASE_TIMESTAMP) ||
(valId.getItemExpr()->getOperatorType() == ITM_HBASE_VERSION))
{
*CmpCommon::diags() << DgSqlCode(-3242)
<< DgString0("Illegal use of Hbase Timestamp or Hbase Version function.");
GenExit();
}
retColRefSet_.insert(valId);
}
}
if (NOT ((getTableDesc()->getNATable()->isHbaseRowTable()) ||
(getTableDesc()->getNATable()->isHbaseCellTable()) ||
(isAlignedFormat)))
{
// add all the key columns. If values are missing in hbase, then atleast the key
// value is needed to retrieve a row.
HbaseAccess::addColReferenceFromVIDlist(getIndexDesc()->getIndexKey(), retColRefSet_);
}
if (getTableDesc()->getNATable()->hasLobColumn())
{
for (Lng32 i = 0; i < getIndexDesc()->getIndexColumns().entries(); i++)
{
const ValueId vid = getIndexDesc()->getIndexColumns()[i];
retColRefSet_.insert(vid);
}
}
}
NABoolean inlinedActions = FALSE;
if ((getInliningInfo().hasInlinedActions()) ||
(getInliningInfo().isEffectiveGU()))
inlinedActions = TRUE;
NABoolean isUnique = FALSE;
if (listOfDelSubsetRows_.entries() == 0)
{
if ((getSearchKey() && getSearchKey()->isUnique()) &&
(listOfDelUniqueRows_.entries() == 0))
isUnique = TRUE;
else if ((NOT (getSearchKey() && getSearchKey()->isUnique())) &&
(listOfDelUniqueRows_.entries() == 1) &&
(listOfDelUniqueRows_[0].rowIds_.entries() == 1))
isUnique = TRUE;
}
NABoolean hbaseRowsetVSBBopt =
(CmpCommon::getDefault(HBASE_ROWSET_VSBB_OPT) == DF_ON);
if ((getTableDesc()->getNATable()->isHbaseMapTable()) ||
(getTableDesc()->getNATable()->isHbaseRowTable()) ||
(getTableDesc()->getNATable()->isHbaseCellTable()))
hbaseRowsetVSBBopt = FALSE;
if (getInliningInfo().isIMGU()) {
// There is no need to do checkAndDelete for IM
canDoCheckAndUpdel() = FALSE;
uniqueHbaseOper() = FALSE;
if ((generator->oltOptInfo()->multipleRowsReturned()) &&
(hbaseRowsetVSBBopt) &&
(NOT generator->isRIinliningForTrafIUD()) &&
(NOT getTableDesc()->getNATable()->hasLobColumn()))
uniqueRowsetHbaseOper() = TRUE;
}
else
if (isUnique)
{
//If this unique delete is not part of a rowset operation ,
//don't allow it to be cancelled.
if (!generator->oltOptInfo()->multipleRowsReturned())
generator->setMayNotCancel(TRUE);
uniqueHbaseOper() = TRUE;
canDoCheckAndUpdel() = FALSE;
if ((NOT producesOutputs()) &&
(NOT inlinedActions) &&
(executorPred().isEmpty()))
{
if ((generator->oltOptInfo()->multipleRowsReturned()) &&
(hbaseRowsetVSBBopt) &&
(NOT generator->isRIinliningForTrafIUD()) &&
(NOT getTableDesc()->getNATable()->hasLobColumn()))
uniqueRowsetHbaseOper() = TRUE;
else if ((NOT generator->oltOptInfo()->multipleRowsReturned()) &&
(listOfDelUniqueRows_.entries() == 0))
{
if ((CmpCommon::getDefault(HBASE_CHECK_AND_UPDEL_OPT) == DF_ON) &&
(CmpCommon::getDefault(HBASE_SQL_IUD_SEMANTICS) == DF_ON) &&
(NOT isAlignedFormat))
canDoCheckAndUpdel() = TRUE;
}
}
}
if ((producesOutputs()) &&
((NOT isUnique) || (getUpdateCKorUniqueIndexKey())))
{
// Cannot do olt msg opt if:
// -- values are to be returned and unique operation is not being used.
// -- or this delete was transformed from an update of pkey/index key
// set an indication that multiple rows will be returned.
generator->oltOptInfo()->setMultipleRowsReturned(TRUE);
generator->oltOptInfo()->setOltCliOpt(FALSE);
}
if (getTableDesc()->getNATable()->hasLobColumn())
{
canDoCheckAndUpdel() = FALSE;
uniqueRowsetHbaseOper() = FALSE;
}
generator->setUpdSavepointOnError(FALSE);
generator->setUpdPartialOnError(FALSE);
// if unique oper with no index maintanence and autocommit is on, then
// do not require a trnsaction.
// Use hbase or region transactions.
// Hbase guarantees single row consistency.
Int64 transId = -1;
if (CmpCommon::getDefault(TRAF_NO_DTM_XN) == DF_ON)
{
// no transaction needed
noDTMxn() = TRUE;
}
else if ((uniqueHbaseOper()) &&
(NOT cursorHbaseOper()) &&
(NOT uniqueRowsetHbaseOper()) &&
(NOT inlinedActions) &&
(generator->getTransMode()->getAutoCommit() == TransMode::ON_) &&
(! NAExecTrans(0, transId)) &&
(NOT generator->oltOptInfo()->multipleRowsReturned()))
{
// no DTM transaction needed
useRegionXn() = FALSE;
if (CmpCommon::getDefault(TRAF_USE_REGION_XN) == DF_ON)
useRegionXn() = TRUE;
}
else
{
generator->setTransactionFlag(TRUE);
if ((NOT uniqueHbaseOper()) ||
(cursorHbaseOper()) ||
(uniqueRowsetHbaseOper()) ||
(inlinedActions) ||
(generator->oltOptInfo()->multipleRowsReturned()))
generator->setUpdAbortOnError(TRUE);
}
// flag for hbase tables
generator->setHdfsAccess(TRUE);
markAsPreCodeGenned();
return this;
}
RelExpr * HbaseUpdate::preCodeGen(Generator * generator,
const ValueIdSet & externalInputs,
ValueIdSet &pulledNewInputs)
{
if (nodeIsPreCodeGenned())
return this;
if (getTableDesc()->getNATable()->isHbaseMapTable())
{
*CmpCommon::diags() << DgSqlCode(-1425)
<< DgTableName(getTableDesc()->getNATable()->getTableName().
getQualifiedNameAsAnsiString())
<< DgString0("Reason: update not yet supported.");
GenExit();
}
if (!processConstHBaseKeys(
generator,
this,
getSearchKey(),
getIndexDesc(),
executorPred(),
getHbaseSearchKeys(),
listOfUpdUniqueRows_,
listOfUpdSubsetRows_))
return NULL;
// if (! GenericUpdate::preCodeGen(generator, externalInputs, pulledNewInputs))
// return NULL;
if (! UpdateCursor::preCodeGen(generator, externalInputs, pulledNewInputs))
return NULL;
CollIndex totalColCount = getTableDesc()->getColumnList().entries();
NABoolean isAlignedFormat = getTableDesc()->getNATable()->isAlignedFormat(getIndexDesc());
if (isAlignedFormat &&
(newRecExprArray().entries() > 0) &&
(newRecExprArray().entries() < totalColCount))
{
ValueIdArray holeyArray(totalColCount);
Lng32 i;
for (i = 0; i < newRecExprArray().entries(); i++)
{
ItemExpr * assign = newRecExprArray()[i].getItemExpr();
const NAColumn *nacol = assign->child(0).getNAColumn();
Lng32 colPos = nacol->getPosition();
holeyArray.insertAt(colPos, assign->getValueId());
} // for
for (i = 0; i < totalColCount; i++)
{
if (! (holeyArray.used(i)))
{
BaseColumn * bc = (BaseColumn*)getTableDesc()->getColumnList()[i].getItemExpr();
CMPASSERT(bc->getOperatorType() == ITM_BASECOLUMN);
ValueId srcId = getIndexDesc()->getIndexColumns()[i];
ItemExpr * an =
new(generator->wHeap()) Assign(bc, srcId.getItemExpr(), FALSE);
an->bindNode(generator->getBindWA());
holeyArray.insertAt(i, an->getValueId());
} // if
} // for
newRecExprArray().clear();
newRecExprArray() = holeyArray;
} // if aligned
if ((isMerge()) &&
(mergeInsertRecExpr().entries() > 0))
{
if ((listOfUpdSubsetRows_.entries() > 0) ||
(getSearchKey() && (NOT getSearchKey()->isUnique())))
{
*CmpCommon::diags() << DgSqlCode(-3241)
<< DgString0(" Non-unique ON clause not allowed with INSERT.");
GenExit();
}
}
if (((getTableDesc()->getNATable()->isHbaseRowTable()) ||
(getTableDesc()->getNATable()->isHbaseCellTable())) &&
(producesOutputs()))
{
*CmpCommon::diags() << DgSqlCode(-1425)
<< DgTableName(getTableDesc()->getNATable()->getTableName().
getQualifiedNameAsAnsiString())
<< DgString0("Reason: Cannot return values from an hbase insert, update or delete.");
GenExit();
}
NABoolean canDoRowsetOper = TRUE;
NABoolean canDoCheckAndUpdate = TRUE;
NABoolean needToGetCols = FALSE;
if (producesOutputs())
{
retColRefSet_ = getIndexDesc()->getIndexColumns();
}
else
{
ValueIdSet colRefSet;
// create the list of columns that need to be retrieved from hbase .
// first add all columns referenced in the executor pred.
HbaseAccess::addReferenceFromVIDset(executorPred(), TRUE, TRUE, colRefSet);
if ((getTableDesc()->getNATable()->getExtendedQualName().getSpecialType() == ExtendedQualName::INDEX_TABLE))
{
for (ValueId valId = executorPred().init();
executorPred().next(valId);
executorPred().advance(valId))
{
ItemExpr * ie = valId.getItemExpr();
if (ie->getOperatorType() == ITM_EQUAL)
{
BiRelat * br = (BiRelat*)ie;
br->setSpecialNulls(TRUE);
}
}
}
// add all columns referenced in the right side of the update expr.
HbaseAccess::addColReferenceFromRightChildOfVIDarray(newRecExprArray(), colRefSet);
if (isMerge())
HbaseAccess::addReferenceFromVIDset(mergeUpdatePred(), TRUE, FALSE, colRefSet);
if ((getTableDesc()->getNATable()->isHbaseRowTable()) ||
(getTableDesc()->getNATable()->isHbaseCellTable()) ||
(isAlignedFormat))
{
for (Lng32 i = 0; i < getIndexDesc()->getIndexColumns().entries(); i++)
{
retColRefSet_.insert(getIndexDesc()->getIndexColumns()[i]);
}
}
else
{
for (ValueId valId = colRefSet.init();
colRefSet.next(valId);
colRefSet.advance(valId))
{
ValueId dummyValId;
if (NOT getGroupAttr()->getCharacteristicInputs().referencesTheGivenValue(valId, dummyValId))
{
if ((valId.getItemExpr()->getOperatorType() == ITM_HBASE_TIMESTAMP) ||
(valId.getItemExpr()->getOperatorType() == ITM_HBASE_VERSION))
{
*CmpCommon::diags() << DgSqlCode(-3242)
<< DgString0("Illegal use of Hbase Timestamp or Hbase Version function.");
GenExit();
}
retColRefSet_.insert(valId);
}
}
}
if (retColRefSet_.entries() > 0)
{
needToGetCols = TRUE;
canDoRowsetOper = FALSE;
canDoCheckAndUpdate = FALSE;
}
// nullable and added columns in the row may be missing. That will cause
// a row to not be returned if those are the only columns that are being
// retrieved.
// To make sure that a row is always returned, add the key columns. These are
// guaranteed to be present in an hbase row.
HbaseAccess::addColReferenceFromVIDlist(getIndexDesc()->getIndexKey(), retColRefSet_);
}
NABoolean inlinedActions = FALSE;
if ((getInliningInfo().hasInlinedActions()) ||
(getInliningInfo().isEffectiveGU()))
inlinedActions = TRUE;
NABoolean isUnique = FALSE;
if (listOfUpdSubsetRows_.entries() == 0)
{
if ((getSearchKey() && getSearchKey()->isUnique()) &&
(listOfUpdUniqueRows_.entries() == 0))
isUnique = TRUE;
else if ((NOT (getSearchKey() && getSearchKey()->isUnique())) &&
(listOfUpdUniqueRows_.entries() == 1) &&
(listOfUpdUniqueRows_[0].rowIds_.entries() == 1))
isUnique = TRUE;
}
if (getInliningInfo().isIMGU()) {
// There is no need to checkAndPut for IM
canDoCheckAndUpdel() = FALSE;
uniqueHbaseOper() = FALSE;
if ((generator->oltOptInfo()->multipleRowsReturned()) &&
(CmpCommon::getDefault(HBASE_ROWSET_VSBB_OPT) == DF_ON) &&
(NOT generator->isRIinliningForTrafIUD()))
uniqueRowsetHbaseOper() = TRUE;
}
else
if (isUnique)
{
//If this unique delete is not part of a rowset operation ,
//don't allow it to be cancelled.
if (!generator->oltOptInfo()->multipleRowsReturned())
generator->setMayNotCancel(TRUE);
uniqueHbaseOper() = TRUE;
canDoCheckAndUpdel() = FALSE;
if ((NOT isMerge()) &&
(NOT producesOutputs()) &&
(executorPred().isEmpty()) &&
(NOT needToGetCols) &&
(NOT inlinedActions))
{
if ((generator->oltOptInfo()->multipleRowsReturned()) &&
(CmpCommon::getDefault(HBASE_ROWSET_VSBB_OPT) == DF_ON) &&
(NOT generator->isRIinliningForTrafIUD()))
uniqueRowsetHbaseOper() = TRUE;
else if ((NOT generator->oltOptInfo()->multipleRowsReturned()) &&
(listOfUpdUniqueRows_.entries() == 0))
{
if ((CmpCommon::getDefault(HBASE_CHECK_AND_UPDEL_OPT) == DF_ON) &&
(NOT isAlignedFormat))
canDoCheckAndUpdel() = TRUE;
}
}
}
else if (producesOutputs())
{
// Cannot do olt msg opt if:
// -- values are to be returned and unique operation is not being used.
// set an indication that multiple rows will be returned.
generator->oltOptInfo()->setMultipleRowsReturned(TRUE);
generator->oltOptInfo()->setOltCliOpt(FALSE);
}
generator->setUpdSavepointOnError(FALSE);
generator->setUpdPartialOnError(FALSE);
// if unique oper with no index maintanence and autocommit is on, then
// do not require a transaction.
// Use hbase or region transactions.
// Hbase guarantees single row consistency.
Int64 transId = -1;
if (CmpCommon::getDefault(TRAF_NO_DTM_XN) == DF_ON)
{
// no transaction needed
noDTMxn() = TRUE;
}
else if ((uniqueHbaseOper()) &&
(NOT isMerge()) &&
(NOT cursorHbaseOper()) &&
(NOT uniqueRowsetHbaseOper()) &&
(NOT inlinedActions) &&
(generator->getTransMode()->getAutoCommit() == TransMode::ON_) &&
(! NAExecTrans(0, transId)) &&
(NOT generator->oltOptInfo()->multipleRowsReturned()))
{
// no DTM transaction needed
useRegionXn() = FALSE;
if (CmpCommon::getDefault(TRAF_USE_REGION_XN) == DF_ON)
useRegionXn() = TRUE;
}
else
{
generator->setTransactionFlag(TRUE);
if ((NOT uniqueHbaseOper()) ||
(isMerge()) ||
(cursorHbaseOper()) ||
(uniqueRowsetHbaseOper()) ||
(inlinedActions) ||
(generator->oltOptInfo()->multipleRowsReturned()))
generator->setUpdAbortOnError(TRUE);
}
// flag for hbase tables
generator->setHdfsAccess(TRUE);
if (getTableDesc()->getNATable()->hasLobColumn())
{
for (CollIndex i = 0; i < newRecExprArray().entries(); i++)
{
NAColumn * col =
newRecExprArray()[i].getItemExpr()->child(0)->castToItemExpr()->
getValueId().getNAColumn(TRUE);
ItemExpr * val =
newRecExprArray()[i].getItemExpr()->child(1)->castToItemExpr();
if ((col->getType()->isLob()) &&
(val->getOperatorType() == ITM_LOBUPDATE))
{
LOBupdate * lu = (LOBupdate*)val;
lu->updatedTableObjectUID() =
getIndexDesc()->getPrimaryTableDesc()->
getNATable()->objectUid().castToInt64();
lu->updatedTableSchemaName() = "\"";
lu->updatedTableSchemaName() +=
getTableDesc()->getNATable()->
getTableName().getCatalogName();
lu->updatedTableSchemaName().append("\".\"");
lu->updatedTableSchemaName().
append(getTableDesc()->getNATable()->
getTableName().getSchemaName());
lu->updatedTableSchemaName() += "\"";
lu->lobSize() = col->getType()->getPrecision();
lu->lobNum() = col->lobNum();
if (lu->lobStorageType() == Lob_Empty)
{
lu->lobStorageType() = col->lobStorageType();
}
if (lu->lobStorageType() != col->lobStorageType())
{
*CmpCommon::diags() << DgSqlCode(-1432)
<< DgInt0((Int32)lu->lobStorageType())
<< DgInt1((Int32)col->lobStorageType())
<< DgString0(col->getColName());
GenExit();
}
lu->lobStorageLocation() = col->lobStorageLocation();
}
} // for
} // if
markAsPreCodeGenned();
return this;
}
RelExpr * HiveInsert::preCodeGen(Generator * generator,
const ValueIdSet & externalInputs,
ValueIdSet &pulledNewInputs)
{
if (nodeIsPreCodeGenned())
return this;
generator->setHiveAccess(TRUE);
generator->setProcessLOB(TRUE);
return GenericUpdate::preCodeGen(generator, externalInputs, pulledNewInputs);
}
RelExpr * HbaseInsert::preCodeGen(Generator * generator,
const ValueIdSet & externalInputs,
ValueIdSet &pulledNewInputs)
{
if (nodeIsPreCodeGenned())
return this;
// char. outputs are set to empty after in RelExpr::genPreCode sometimes,
// after a call to resolveCharOutputs. We need to remember if a returnRow
// tdb flag should be set, even if no output columns are required
if (getIsTrafLoadPrep() && !getGroupAttr()->getCharacteristicOutputs().isEmpty())
setReturnRow(TRUE);
if (! GenericUpdate::preCodeGen(generator, externalInputs, pulledNewInputs))
return NULL;
NABoolean inlinedActions = FALSE;
if ((getInliningInfo().hasInlinedActions()) ||
(getInliningInfo().isEffectiveGU()))
inlinedActions = TRUE;
if (((getTableDesc()->getNATable()->isHbaseRowTable()) ||
(getTableDesc()->getNATable()->isHbaseCellTable())) &&
(producesOutputs()))
{
*CmpCommon::diags() << DgSqlCode(-1425)
<< DgTableName(getTableDesc()->getNATable()->getTableName().
getQualifiedNameAsAnsiString())
<< DgString0("Reason: Cannot return values from an hbase insert, update or delete.");
GenExit();
}
if ((isUpsert()) &&
((getInsertType() == Insert::VSBB_INSERT_USER) ||
(getInsertType() == Insert::UPSERT_LOAD)))
{
if ((inlinedActions || producesOutputs())&& !getIsTrafLoadPrep())
setInsertType(Insert::SIMPLE_INSERT);
}
// if there are blob columns, use simple inserts.
if ( getTableDesc()->getNATable()->hasLobColumn())
{
setInsertType(Insert::SIMPLE_INSERT);
NAColumnArray colArray;
NAColumn *col;
for (CollIndex ii = 0;
ii < newRecExprArray().entries(); ii++)
{
ItemExpr *assignExpr =
newRecExprArray()[ii].getItemExpr();
ValueId tgtValueId =
assignExpr->child(0)->castToItemExpr()->getValueId();
ValueId srcValueId =
assignExpr->child(1)->castToItemExpr()->getValueId();
col = tgtValueId.getNAColumn( TRUE );
ItemExpr * child1Expr = assignExpr->child(1);
if (srcValueId.getType().isLob())
{
LOBinsert * li = NULL;
if ((child1Expr->getOperatorType() != ITM_LOBINSERT) &&
(child1Expr->getOperatorType() != ITM_LOBUPDATE))
{
li = new(generator->wHeap())
LOBinsert(child1Expr, NULL, LOBoper::LOB_);
li->insertedTableObjectUID() =
getIndexDesc()->getPrimaryTableDesc()->
getNATable()->objectUid().castToInt64();
li->insertedTableSchemaName() = "\"";
li->insertedTableSchemaName() +=
getTableDesc()->getNATable()->
getTableName().getCatalogName();
li->insertedTableSchemaName().append("\".\"");
li->insertedTableSchemaName().
append(getTableDesc()->getNATable()->
getTableName().getSchemaName());
li->insertedTableSchemaName() += "\"";
li->lobSize() = tgtValueId.getType().getPrecision();
li->lobFsType() = tgtValueId.getType().getFSDatatype();
li->lobNum() = col->lobNum();
if ((child1Expr->getOperatorType() == ITM_CONSTANT) &&
!(((ConstValue *)child1Expr)->isNull()))
if (li->lobStorageType() != col->lobStorageType())
{
*CmpCommon::diags() << DgSqlCode(-1432)
<< DgInt0((Int32)li->lobStorageType())
<< DgInt1((Int32)col->lobStorageType())
<< DgString0(col->getColName());
GenExit();
}
li->lobStorageLocation() = col->lobStorageLocation();
li->bindNode(generator->getBindWA());
child1Expr = li;
assignExpr->child(1) = child1Expr;
}
else if (child1Expr->getOperatorType() == ITM_LOBINSERT)
{
li = (LOBinsert*)child1Expr;
li->insertedTableObjectUID() =
getIndexDesc()->getPrimaryTableDesc()->
getNATable()->objectUid().castToInt64();
li->insertedTableSchemaName() = "\"";
li->insertedTableSchemaName() +=
getTableDesc()->getNATable()->
getTableName().getCatalogName();
li->insertedTableSchemaName().append("\".\"");
li->insertedTableSchemaName().
append(getTableDesc()->getNATable()->
getTableName().getSchemaName());
li->insertedTableSchemaName() += "\"";
li->lobNum() = col->lobNum();
//If we are initializing an empty_lob, assume the storage
//type of the underlying column
if (li->lobStorageType() == Lob_Empty)
{
li->lobStorageType() = col->lobStorageType();
}
if (li->lobStorageType() != col->lobStorageType())
{
*CmpCommon::diags() << DgSqlCode(-1432)
<< DgInt0((Int32)li->lobStorageType())
<< DgInt1((Int32)col->lobStorageType())
<< DgString0(col->getColName());
GenExit();
}
li->lobStorageLocation() = col->lobStorageLocation();
li->lobSize() = tgtValueId.getType().getPrecision();
if (li->lobFsType() != tgtValueId.getType().getFSDatatype())
{
// create a new LOBinsert node since fsType has changed.
ItemExpr * liChild = li->child(0);
ItemExpr * liChild1 = li->child(1);
li = new(generator->wHeap())
LOBinsert(liChild, liChild1, li->getObj());
li->insertedTableObjectUID() =
getIndexDesc()->getPrimaryTableDesc()->
getNATable()->objectUid().castToInt64();
li->insertedTableSchemaName() = "\"";
li->insertedTableSchemaName() +=
getTableDesc()->getNATable()->
getTableName().getCatalogName();
li->insertedTableSchemaName().append("\".\"");
li->insertedTableSchemaName().
append(getTableDesc()->getNATable()->
getTableName().getSchemaName());
li->insertedTableSchemaName() += "\"";
//li->lobSize() = srcValueId.getType().getPrecision();
li->lobSize() = tgtValueId.getType().getPrecision();
li->lobFsType() = tgtValueId.getType().getFSDatatype();
li->lobNum() = col->lobNum();
li->lobStorageLocation() = col->lobStorageLocation();
li->bindNode(generator->getBindWA());
assignExpr->child(1) = li;
}
} // lobinsert
GenAssert(li, "must have a LobInsert node");
} // lob
}
}
if ((getInsertType() == Insert::SIMPLE_INSERT) &&
(NOT getTableDesc()->getNATable()->hasLobColumn()))
uniqueHbaseOper() = TRUE;
generator->setUpdSavepointOnError(FALSE);
generator->setUpdPartialOnError(FALSE);
// if unique oper with no index maintanence and autocommit is on, then
// do not require a trnsaction.
// Use hbase or region transactions.
// Hbase guarantees single row consistency.
Int64 transId = -1;
if ((CmpCommon::getDefault(TRAF_NO_DTM_XN) == DF_ON) ||
(isNoRollback()) ||
((isUpsert()) && (insertType_ == UPSERT_LOAD)))
{
// no transaction needed
noDTMxn() = TRUE;
}
else if ((uniqueHbaseOper()) &&
(NOT uniqueRowsetHbaseOper()) &&
(NOT inlinedActions) &&
(generator->getTransMode()->getAutoCommit() == TransMode::ON_) &&
(! NAExecTrans(0, transId)) &&
(NOT generator->oltOptInfo()->multipleRowsReturned()))
{
// no DTM transaction needed
useRegionXn() = FALSE;
if (CmpCommon::getDefault(TRAF_USE_REGION_XN) == DF_ON)
useRegionXn() = TRUE;
}
else
{
generator->setTransactionFlag(TRUE);
if ((NOT uniqueHbaseOper()) ||
(uniqueRowsetHbaseOper()) ||
(inlinedActions) ||
(generator->oltOptInfo()->multipleRowsReturned()))
generator->setUpdAbortOnError(TRUE);
}
return this;
}
RelExpr * ExeUtilFastDelete::preCodeGen(Generator * generator,
const ValueIdSet & externalInputs,
ValueIdSet &pulledNewInputs)
{
if (nodeIsPreCodeGenned())
return this;
return ExeUtilExpr::preCodeGen(generator,externalInputs,pulledNewInputs);
}
RelExpr * ExeUtilHiveTruncate::preCodeGen(Generator * generator,
const ValueIdSet & externalInputs,
ValueIdSet &pulledNewInputs)
{
if (nodeIsPreCodeGenned())
return this;
return ExeUtilExpr::preCodeGen(generator,externalInputs,pulledNewInputs);
}
RelExpr * ExeUtilLobExtract::preCodeGen(Generator * generator,
const ValueIdSet & externalInputs,
ValueIdSet &pulledNewInputs)
{
if (nodeIsPreCodeGenned())
return this;
if (! ExeUtilExpr::preCodeGen(generator,externalInputs,pulledNewInputs))
return NULL;
ValueIdSet availableValues;
for (ValueId exprId = getGroupAttr()->getCharacteristicInputs().init();
getGroupAttr()->getCharacteristicInputs().next(exprId);
getGroupAttr()->getCharacteristicInputs().advance(exprId) )
{
if (exprId.getItemExpr()->getOperatorType() != ITM_VEG_REFERENCE)
availableValues += exprId;
}
getGroupAttr()->setCharacteristicInputs(availableValues);
getInputValuesFromParentAndChildren(availableValues);
if (handle_)
handle_->replaceVEGExpressions
(availableValues, getGroupAttr()->getCharacteristicInputs());
markAsPreCodeGenned();
// Done.
return this;
}
RelExpr * ExeUtilLobUpdate::preCodeGen(Generator * generator,
const ValueIdSet & externalInputs,
ValueIdSet &pulledNewInputs)
{
if (nodeIsPreCodeGenned())
return this;
if (! ExeUtilExpr::preCodeGen(generator,externalInputs,pulledNewInputs))
return NULL;
ValueIdSet availableValues;
for (ValueId exprId = getGroupAttr()->getCharacteristicInputs().init();
getGroupAttr()->getCharacteristicInputs().next(exprId);
getGroupAttr()->getCharacteristicInputs().advance(exprId) )
{
if (exprId.getItemExpr()->getOperatorType() != ITM_VEG_REFERENCE)
availableValues += exprId;
}
getGroupAttr()->setCharacteristicInputs(availableValues);
getInputValuesFromParentAndChildren(availableValues);
if (handle_)
handle_->replaceVEGExpressions
(availableValues, getGroupAttr()->getCharacteristicInputs());
xnNeeded() = TRUE;
markAsPreCodeGenned();
// Done.
return this;
}
RelExpr * HashGroupBy::preCodeGen(Generator * generator,
const ValueIdSet & externalInputs,
ValueIdSet &pulledNewInputs)
{
if (nodeIsPreCodeGenned())
return this;
if ( CmpCommon::getDefault(COMPRESSED_INTERNAL_FORMAT) == DF_SYSTEM)
{
NABoolean resize = FALSE;
NABoolean defrag = FALSE;
ValueIdSet vidSet = child(0)->getGroupAttr()->getCharacteristicOutputs();
ExpTupleDesc::TupleDataFormat tupleFormat =
determineInternalFormat( vidSet,
this,
resize,
generator,
FALSE,
defrag);
cacheTupleFormatAndResizeFlag(tupleFormat, resize, defrag);
if (tupleFormat == ExpTupleDesc::SQLMX_ALIGNED_FORMAT)
{
generator->incNCIFNodes();
}
else
{
generator->decNCIFNodes();
}
}
return GroupByAgg::preCodeGen(generator, externalInputs, pulledNewInputs);
}
RelExpr * GroupByAgg::preCodeGen(Generator * generator,
const ValueIdSet & externalInputs,
ValueIdSet &pulledNewInputs)
{
if (nodeIsPreCodeGenned())
return this;
// Check if the pivs of this operator and it's child are the same.
// If they are not, make them the same.
replacePivs();
generator->clearPrefixSortKey();
// Resolve the VEGReferences and VEGPredicates, if any, that appear
// in the Characteristic Inputs, in terms of the externalInputs.
getGroupAttr()->resolveCharacteristicInputs(externalInputs);
// My Characteristic Inputs become the external inputs for my child.
child(0) = child(0)->preCodeGen(generator,
getGroupAttr()->getCharacteristicInputs(),
pulledNewInputs);
if (! child(0).getPtr())
return NULL;
if ((getOperatorType() == REL_SHORTCUT_GROUPBY)
&& (getFirstNRows() == 1))
{
RelExpr * firstnNode = new(generator->wHeap()) FirstN(child(0),
getFirstNRows());
firstnNode->setEstRowsUsed(getEstRowsUsed());
firstnNode->setMaxCardEst(getMaxCardEst());
firstnNode->setInputCardinality(child(0)->getInputCardinality());
firstnNode->setPhysicalProperty(child(0)->getPhysicalProperty());
firstnNode->setGroupAttr(child(0)->getGroupAttr());
//10-060516-6532 -Begin
//When FIRSTN node is created after optimization phase, the cost
//of that node does not matter.But, display_explain and explain
//show zero operator costs and rollup cost which confuses the user.
//Also, the VQP crashes when cost tab for FIRSTN node is selected.
//So, creating a cost object will fix this.
//The operator cost is zero and rollup cost is same as it childs.
Cost* firstnNodecost = new HEAP Cost();
firstnNode->setOperatorCost(firstnNodecost);
Cost* rollupcost = (Cost *)(child(0)->getRollUpCost());
*rollupcost += *firstnNodecost;
firstnNode->setRollUpCost(rollupcost);
//10-060516-6532 -End
firstnNode =
firstnNode->preCodeGen(generator,
getGroupAttr()->getCharacteristicInputs(),
pulledNewInputs);
if (! firstnNode)
return NULL;
setChild(0, firstnNode);
}
getGroupAttr()->addCharacteristicInputs(pulledNewInputs);
// Accumulate the values that are provided as inputs by my parent
// together with the values that are produced as outputs by my
// children. Use these values for rewriting the VEG expressions.
ValueIdSet availableValues;
getInputValuesFromParentAndChildren(availableValues);
NABoolean replicatePredicates = TRUE;
// Rebuild the grouping expressions tree. Use bridge values, if possible
groupExpr().replaceVEGExpressions
(availableValues,
getGroupAttr()->getCharacteristicInputs(),
FALSE, // No key predicates need to be generated here
NULL,
replicatePredicates,
&getGroupAttr()->getCharacteristicOutputs());
// Rebuild the rollup grouping expressions tree. Use bridge values, if possible
rollupGroupExprList().replaceVEGExpressions
(availableValues,
getGroupAttr()->getCharacteristicInputs(),
FALSE, // No key predicates need to be generated here
NULL,
replicatePredicates,
&getGroupAttr()->getCharacteristicOutputs());
// Rebuild the aggregate expressions tree
aggregateExpr().replaceVEGExpressions
(availableValues,
getGroupAttr()->getCharacteristicInputs());
if (CmpCommon::getDefault(COMP_BOOL_211) == DF_ON)
{
ValueIdSet constantsInGroupExpr ;
groupExpr().getConstantExprs(constantsInGroupExpr,FALSE);
if (constantsInGroupExpr.entries() > 0)
{
if (constantsInGroupExpr.entries() == groupExpr().entries())
{
ValueId vid ;
constantsInGroupExpr.getFirst(vid);
constantsInGroupExpr.remove(vid);
}
groupExpr() -= constantsInGroupExpr ;
}
}
// The VEG expressions in the selection predicates and the characteristic
// outputs can reference any expression that is either a potential output
// or a characteristic input for this RelExpr. Supply these values for
// rewriting the VEG expressions.
getInputAndPotentialOutputValues(availableValues);
selectionPred().replaceVEGExpressions
(availableValues,
getGroupAttr()->getCharacteristicInputs(),
FALSE, // No key predicates need to be generated here
NULL,
replicatePredicates);
getGroupAttr()->resolveCharacteristicOutputs
(availableValues,
getGroupAttr()->getCharacteristicInputs());
// if the grouping is executed in DP2, we don't do overflow
// handling. This also means, that it is a partial group by
// Do not do overflow handling for any partial groupby.
//
NABoolean isPartialGroupBy = (isAPartialGroupByNonLeaf() ||
isAPartialGroupByLeaf());
// The old way, only groupbys in DP2 are considered partial
//
if (CmpCommon::getDefault(COMP_BOOL_152) == DF_ON) {
isPartialGroupBy = executeInDP2();
}
if ((getOperatorType() == REL_HASHED_GROUPBY) && !isPartialGroupBy) {
// Count this BMO and add its needed memory to the total needed
generator->incrNumBMOs();
if ((ActiveSchemaDB()->getDefaults()).getAsDouble(EXE_MEMORY_LIMIT_PER_CPU) > 0)
generator->incrBMOsMemory(getEstimatedRunTimeMemoryUsage(TRUE));
}
markAsPreCodeGenned();
// Done.
return this;
} // GroupByAgg::preCodeGen()
RelExpr * MergeUnion::preCodeGen(Generator * generator,
const ValueIdSet & externalInputs,
ValueIdSet &pulledNewInputs)
{
if (nodeIsPreCodeGenned())
return this;
// A temporary union (blocked union introduced for inlining after trigger)
// should not get here. Should be removed in optimization phase.
GenAssert(!getIsTemporary(), "Expecting this blocked union to be removed by this phase");
// Check if the pivs of this operator and it's child are the same.
// If they are not, make them the same.
replacePivs();
// clear any prefix sort key in generator work area
generator->clearPrefixSortKey();
// Resolve the VEGReferences and VEGPredicates, if any, that appear
// in the Characteristic Inputs, in terms of the externalInputs.
getGroupAttr()->resolveCharacteristicInputs(externalInputs);
// Predicate pushdown causes the Characteristic Inputs and Outputs
// of the union to be set precisely to those values that are
// required by one of its descendants or by one of its ancestors,
// respectively. However, the colMapTable_ contains all the values
// that the MergeUnion is capable of producing. The colMapTable_
// is rebuilt here to contain exactly those values that appear in
// the Characteristic Outputs.
//
// The output of the union is defined by the ValueIdUnion
// expressions that are maintained in the colMapTable_.
//
ValueIdSet charOutputs = getGroupAttr()->getCharacteristicOutputs();
colMapTable().clear();
for (ValueId v = charOutputs.init();
charOutputs.next(v); charOutputs.advance(v))
{
if (v.getItemExpr()->getOperatorType() != ITM_VALUEIDUNION)
{
// "other" available values besides the value being considered.
ValueIdSet availableValues = charOutputs;
availableValues -= v;
// -------------------------------------------------------------------
// see whether the value being considered is covered by the remaining
// values. that is, whether it is an expression in termes of the
// other vid union's.
// -------------------------------------------------------------------
ValueIdSet outputId;
outputId.insert(v);
outputId.removeUnCoveredExprs(availableValues);
// -------------------------------------------------------------------
// v removed from outputId. that means it's not covered by remaining
// vid union's. add the vid union's v is in terms of to colMapTable.
// the node needs to produce it. Instead of producing the expression,
// change the node to produce just the vid union, the expression can
// be evaluatated at the parent.
// -------------------------------------------------------------------
if (outputId.isEmpty())
{
#pragma nowarn(1506) // warning elimination
Int32 leftIndex = getLeftMap().getTopValues().index(v);
#pragma warn(1506) // warning elimination
#pragma nowarn(1506) // warning elimination
Int32 rightIndex = getRightMap().getTopValues().index(v);
#pragma warn(1506) // warning elimination
CMPASSERT((leftIndex != NULL_COLL_INDEX) &&
(rightIndex != NULL_COLL_INDEX));
ItemExpr *ptr = new(CmpCommon::statementHeap())
ValueIdUnion(getLeftMap().getBottomValues()[leftIndex],
getRightMap().getBottomValues()[rightIndex],v);
v.replaceItemExpr(ptr);
colMapTable().insert(v);
}
}
else
colMapTable().insert(v);
}
// My Characteristic Inputs become the external inputs for my children.
Lng32 nc = (Lng32)getArity();
const ValueIdSet & inputs = getGroupAttr()->getCharacteristicInputs();
for (Lng32 index = 0; index < nc; index++)
{
ValueIdSet pulledInputs;
child(index) = child(index)->preCodeGen(generator,inputs,pulledInputs);
if (child(index).getPtr() == NULL) return NULL;
pulledNewInputs += pulledInputs;
}
// Accumulate the values that are provided as inputs by my parent
// together with the values that are produced as outputs by my
// children. Use these values for rewriting the VEG expressions.
ValueIdSet availableValues;
getInputValuesFromParentAndChildren(availableValues);
// Rebuild the colMapTable
colMapTable().replaceVEGExpressions(availableValues,inputs);
// Rebuild the sortOrder.
sortOrder_.replaceVEGExpressions(availableValues,inputs);
// Rebuild the merge expression
if (mergeExpr_)
{
mergeExpr_ = mergeExpr_->replaceVEGExpressions(availableValues,inputs);
//10-061219-1283:Set the second arugment to TRUE to redrive typesynthesis of children.
mergeExpr_->synthTypeAndValueId(TRUE,TRUE);
}
// The VEG expressions in the selection predicates and the characteristic
// outputs can reference any expression that is either a potential output
// or a characteristic input for this RelExpr. Supply these values for
// rewriting the VEG expressions.
getInputAndPotentialOutputValues(availableValues);
// Rebuild the selection predicate tree.
selectionPred().replaceVEGExpressions(availableValues,inputs);
getGroupAttr()->resolveCharacteristicOutputs(availableValues,inputs);
// Rebuild the conditional expression.
condExpr().replaceVEGExpressions(availableValues,
getGroupAttr()->getCharacteristicInputs());
if (!getUnionForIF() && !getInliningInfo().isIMUnion())
generator->oltOptInfo()->setMultipleRowsReturned(TRUE);
markAsPreCodeGenned();
return this;
} // MergeUnion::preCodeGen()
RelExpr * MapValueIds::preCodeGen(Generator * generator,
const ValueIdSet & externalInputs,
ValueIdSet &pulledNewInputs)
{
const ValueIdList &upperValues = map_.getTopValues();
const ValueIdList &lowerValues = map_.getBottomValues();
if (nodeIsPreCodeGenned())
return this;
// Check if the pivs of this operator and it's child are the same.
// If they are not, make them the same.
replacePivs();
// Resolve the VEGReferences and VEGPredicates, if any, that appear
// in the Characteristic Inputs, in terms of the externalInputs.
getGroupAttr()->resolveCharacteristicInputs(externalInputs);
// My Characteristic Inputs become the external inputs for my children.
child(0) = child(0)->preCodeGen(
generator,
getGroupAttr()->getCharacteristicInputs(),
pulledNewInputs);
if (child(0).getPtr() == NULL)
return NULL;
getGroupAttr()->addCharacteristicInputs(pulledNewInputs);
if (cseRef_)
{
// -------------------------------------------------------------
// This MapValueIds represents a common subexpression.
//
// We need to take some actions here to help with VEG rewrite,
// since we eliminated some nodes from the tree, while the
// VEGies still contain all equated values, including those that
// got eliminated. Furthermore, the one tree that was chosen for
// materialization got moved and we need to make sure that the
// place where we scan the temp table produces the same ValueIds
// that were marked as "Bridge Values" when we processed the
// insert into temp statement.
// -------------------------------------------------------------
ValueIdSet cseVEGPreds;
const ValueIdList &vegCols(cseRef_->getColumnList());
ValueIdSet nonVegCols(cseRef_->getNonVEGColumns());
NABoolean isAnalyzingConsumer =
(CmpCommon::statement()->getCSEInfo(cseRef_->getName())->
getIdOfAnalyzingConsumer() == cseRef_->getId());
ValueIdSet availableValues(
getGroupAttr()->getCharacteristicInputs());
valuesNeededForVEGRewrite_ += cseRef_->getNonVEGColumns();
availableValues += valuesNeededForVEGRewrite_;
// find all the VEG predicates of the original columns that this
// common subexpression represents...
for (CollIndex v=0; v<vegCols.entries(); v++)
if (vegCols[v].getItemExpr()->getOperatorType() == ITM_VEG_REFERENCE)
{
// look at one particular VEG that is produced by this
// query tree
VEG *veg =
static_cast<VEGReference *>(vegCols[v].getItemExpr())->getVEG();
if (isAnalyzingConsumer && veg->getBridgeValues().entries() > 0)
{
// If we are looking at the analyzing consumer, then
// its child tree "C" got transformed into an
// "insert overwrite table "temp" select * from "C".
// This insert into temp statement chose some VEG
// member(s) as the "bridge value(s)". Find these bridge
// values and choose one to represent the VEG here.
const ValueIdSet &vegMembers(veg->getAllValues());
// collect all VEG members produced and subtract them
// from the values to be used for VEG rewrite
ValueIdSet subtractions(cseRef_->getNonVEGColumns());
// then add back only the bridge value
ValueIdSet additions;
// get the VEG members produced by child C
subtractions.intersectSet(vegMembers);
// augment the base columns with their index columns,
// the bridge value is likely an index column
for (ValueId v=subtractions.init();
subtractions.next(v);
subtractions.advance(v))
if (v.getItemExpr()->getOperatorType() == ITM_BASECOLUMN)
{
subtractions +=
static_cast<BaseColumn *>(v.getItemExpr())->getEIC();
}
// now find a bridge value (or values) that we can
// produce
additions = subtractions;
additions.intersectSet(veg->getBridgeValues());
// if we found it, then adjust availableValues
if (additions.entries() > 0)
{
availableValues -= subtractions;
availableValues += additions;
// do the same for valuesNeededForVEGRewrite_,
// which will be used for rewriting the char.
// outputs
valuesNeededForVEGRewrite_ -= subtractions;
valuesNeededForVEGRewrite_ += additions;
}
}
cseVEGPreds += veg->getVEGPredicate()->getValueId();
} // a VEGRef
// Replace the VEGPredicates, pretending that we still have
// the original tree below us, not the materialized temp
// table. This will hopefully keep the bookkeeping in the
// VEGies correct by setting the right referenced values
// and choosing the right bridge values.
cseVEGPreds.replaceVEGExpressions(
availableValues,
getGroupAttr()->getCharacteristicInputs());
} // this MapValueIds is for a common subexpression
// ---------------------------------------------------------------------
// The MapValueIds node describes a mapping between expressions used
// by its child tree and expressions used by its parent tree. The
// generator will make sure that the output values of the child tree
// and the input values from the parent get passed in the correct
// buffers.
// ---------------------------------------------------------------------
// ---------------------------------------------------------------------
// Replacing VEGReferences in those mapped expressions is not possible
// in all cases; we have to restrict the kind of mappings that can
// be done for expressions involving VEGs. This method assumes that
// references to VEGs do not get altered during the rewrite, in other
// words it assumes mappings of the kind
//
// a) sum(VEGRef(a,b,c)) <----> VEGRef(a,b,c)
//
// and it disallows mappings of the kind
//
// b) count(VEGRef(a,b,c)) <-----> 1
// c) VEGRef(a,b,c) <-----> VEGRef(d,e,f)
//
// Mappings of type b) will still work, as long as the VEGRef is contained
// in some other mapped expression. A possible extension is to store
// in the MapValueIds node which element(s) of which VEGRef should
// be replaced in this step, but this information is hard to get
// during optimization, unless we are looking at a scan node.
// ---------------------------------------------------------------------
// ---------------------------------------------------------------------
// The map contains many mappings, not all of which will have to
// be evaluated by the generator. Only those values that are either
// characteristic output values or are referenced by characteristic
// output values will actually be mapped at execution time. Therefore
// we first determine the actually needed mappings with the coverTest
// method.
// ---------------------------------------------------------------------
GroupAttributes emptyGA;
ValueIdSet coveredExpr;
ValueIdSet referencedUpperValues;
ValueIdMap newMap;
emptyGA.setCharacteristicInputs(getGroupAttr()->getCharacteristicInputs());
emptyGA.coverTest(
getGroupAttr()->getCharacteristicOutputs(), // the expressions needed
upperValues, // offer the upper values as extra inputs
coveredExpr, // doesn't matter which outputs are covered
referencedUpperValues); // get those upper values needed by the outputs
// Compute the values that are available here.
ValueIdSet lowerAvailableValues;
getOutputValuesOfMyChildren(lowerAvailableValues);
lowerAvailableValues += getGroupAttr()->getCharacteristicInputs();
// The VEGReferences that are resolved can appear as leaves of the
// expressions contained in lowerAvailableValues. These values are
// required for remapping the upperValues.
ValueIdSet leafValues;
ValueId x;
for (x = lowerAvailableValues.init();
lowerAvailableValues.next(x);
lowerAvailableValues.advance(x))
x.getItemExpr()->getLeafValueIds(leafValues);
lowerAvailableValues += leafValues;
ValueIdSet upperAvailableValues(valuesNeededForVEGRewrite_);
// The addition of the lower available values is only necessary to
// avoid an assertion failure in VEGReference::replaceVEGReference().
upperAvailableValues += lowerAvailableValues;
// ---------------------------------------------------------------------
// now walk through each needed mapping and replace wildcards in both its
// upper and lower expressions
// ---------------------------------------------------------------------
for (CollIndex i = 0; i < upperValues.entries(); i++)
{
if (referencedUpperValues.contains(upperValues[i]))
{
ItemExpr *newUpper;
ItemExpr *newLower;
// This mapping is actually required, expand wild cards for it
// We used to resolve the upper values using the
// upperAvailableValues. Note that these available values
// might not actually be available to this node. This could
// sometimes cause problems if the VEGRef was resolved to the
// 'wrong' value and the value is in a VEGPRed above. This
// would cause VEGPRed to be resolved incorrectly and
// possibly drop some join predicates.
// Don't need to replace the VEGgies in the upper since they
// will never be codeGen'ed. Just need to replace them with
// a suitable substitute.
// If it is a VEG_REF, then replace it with a surrogate
// (NATypeToItem) otherwise leave it as is. (Don't use the
// surrogate for all upper values because there are some
// MVIds that have BaseColumns in the upper values. These
// MVIds are introduced by Triggers. And these BaseColumns
// are used in other operators in other parts of the tree
// where they are expected to always be BaseColumns. So
// mapping them here will cause problems elsewhere). In any
// case, all we need to do here is to get rid of the
// VEGRefs.
//
newLower = lowerValues[i]
.getItemExpr()
->replaceVEGExpressions
(lowerAvailableValues,
getGroupAttr()->getCharacteristicInputs());
newUpper = upperValues[i].getItemExpr();
if (upperValues[i] != lowerValues[i])
{
if (newUpper->getOperatorType() == ITM_VEG_REFERENCE)
{
if (valuesNeededForVEGRewrite_.entries() > 0)
// If this node is used to map the outputs of one
// table to those of another, upperAvailableValues
// has been constructed to contain the base column a
// vegref should map to, so we use that instead of a
// created surrogate.
newUpper = newUpper->replaceVEGExpressions
(upperAvailableValues,
getGroupAttr()->getCharacteristicInputs());
else {
NAType *mapType =
upperValues[i].getType().newCopy(generator->wHeap());
// Create replacement for VEGRef
//
ItemExpr *mapping =
new(generator->wHeap()) NATypeToItem(mapType);
ValueId id = upperValues[i];
// Replace in ValueDescArray. All instances of this ID
// will now map to the surrogate.
//
id.replaceItemExpr(mapping);
newUpper = upperValues[i].getItemExpr();
}
}
} else {
// since they are the same, make upper equal to lower..
newUpper = newLower;
}
// add the mapping that may have been rewritten to the new map
newMap.addMapEntry(newUpper->getValueId(),newLower->getValueId());
}
}
// now replace the map with the recomputed mappings
map_ = newMap;
// The selectionPred() on a MapValueId should have been pushed down
// by the optimizer.
GenAssert(selectionPred().isEmpty(),"NOT selectionPred().isEmpty()");
// The VEG expressions in the selection predicates and the characteristic
// outputs can reference any expression that is either a potential output
// or a characteristic input for this RelExpr. Supply these values for
// rewriting the VEG expressions.
// Be thrifty. Reuse coveredExpr for gathering the input and output values.
getInputAndPotentialOutputValues(coveredExpr);
// Add the value that is being fabricated by the MapValueIds to the values
// that are produced by its child and flow throught the MapValueIds.
lowerAvailableValues += coveredExpr;
getGroupAttr()->resolveCharacteristicOutputs
(lowerAvailableValues,
getGroupAttr()->getCharacteristicInputs());
markAsPreCodeGenned();
return this;
} // MapValueIds::preCodeGen()
RelExpr * Sort::preCodeGen(Generator * generator,
const ValueIdSet & externalInputs,
ValueIdSet &pulledNewInputs)
{
if (nodeIsPreCodeGenned())
return this;
//else
// cerr << "Possible error..."
// Check if the pivs of this operator and it's child are the same.
// If they are not, make them the same.
replacePivs();
// Resolve the VEGReferences and VEGPredicates, if any, that appear
// in the Characteristic Inputs, in terms of the externalInputs.
getGroupAttr()->resolveCharacteristicInputs(externalInputs);
// if doing Partial Sorting, store partial sort key in generator work area
// if the split-top node is providing this underneath, protect the order
// else clear the partial sort key
ValueIdList prefixSortKey = getPrefixSortKey();
generator->clearPrefixSortKey();
if (!prefixSortKey.isEmpty())
generator->setPrefixSortKey(prefixSortKey);
PhysicalProperty* unPreCodeGendPP = NULL;
// Protect against scan of self-referencing table partitions
// completing asynchronously, thus allowing the various instances
// of SORT to start returning rows before all scans are complete.
// Let the PartitionAccess::preCodeGen and Exchange::preCodeGen
// work together to detect this. Part of the fix for solution
// 10-071204-9253.
bool doCheckUnsycHalloweenScans = false;
// solution 10-100310-8659
bool fixSolution8659 = false;
int numUnblockedHalloweenScansBefore =
generator->getUnblockedHalloweenScans();
bool needToRestoreLSH = false;
bool saveLSH = generator->getPrecodeHalloweenLHSofTSJ();
// This is the pre-R2.5.1 test that triggers the check unblocked access.
// Note that it indirectly depends on COMP_BOOL_166 OFF.
if (checkAccessToSelfRefTable_)
doCheckUnsycHalloweenScans = true;
// This is the R2.5.1 way -- see solution 10-100310-8659.
if ((generator->getPrecodeHalloweenLHSofTSJ()) &&
(!generator->getPrecodeRHSofNJ()))
{
if (generator->getHalloweenSortForced())
markAsHalloweenProtection();
if (generator->preCodeGenParallelOperator() &&
!generator->getHalloweenESPonLHS())
{
doCheckUnsycHalloweenScans = true;
fixSolution8659 = true;
}
else
{
// This serial sort is enough to block the
// scan of the target table. No need for further
// checking. Notice this serial vs. parallel sort test
// was made in NestedJoin::preCodeGen before the fix
// for 10-100310-8659.
doCheckUnsycHalloweenScans = false;
// More for 10-100310-8659 - don't call incUnblockedHalloweenScans
// below this node.
generator->setPrecodeHalloweenLHSofTSJ(false);
needToRestoreLSH = true;
GenAssert(generator->unsyncdSortFound() == FALSE,
"Unknown operator set unsyncdSortFound.");
}
}
if (doCheckUnsycHalloweenScans)
{
generator->setCheckUnsyncdSort(TRUE);
// Preserve a copy of the child's physical properties
// as it is before preCodeGen is called for the child.
// Also, in this copy of the physical properties, use
// a copy of the child's partitioning function. This
// will be used in case we need to insert an ESP for
// halloween protection.
unPreCodeGendPP = new (CmpCommon::statementHeap())
PhysicalProperty(*child(0)->getPhysicalProperty(),
child(0)->getPhysicalProperty()->getSortKey(),
child(0)->getPhysicalProperty()->getSortOrderType(),
child(0)->getPhysicalProperty()->getDp2SortOrderPartFunc(),
child(0)->getPhysicalProperty()->
getPartitioningFunction()->copy()
);
}
if ( CmpCommon::getDefault(COMPRESSED_INTERNAL_FORMAT) == DF_SYSTEM)
{
NABoolean resize = FALSE;
NABoolean defrag = FALSE;
// get the char outputs and not the child's
ValueIdSet vidSet = getGroupAttr()->getCharacteristicOutputs();
ExpTupleDesc::TupleDataFormat tupleFormat =
determineInternalFormat( vidSet,
this,
resize,
generator,
FALSE,
defrag);
cacheTupleFormatAndResizeFlag(tupleFormat, resize, defrag);
if (tupleFormat == ExpTupleDesc::SQLMX_ALIGNED_FORMAT)
{
generator->incNCIFNodes();
}
else
{
generator->decNCIFNodes();
}
}
// My Characteristic Inputs become the external inputs for my child.
child(0) = child(0)->preCodeGen(generator,
getGroupAttr()->getCharacteristicInputs(),
pulledNewInputs);
generator->clearPrefixSortKey();
if (! child(0).getPtr())
return NULL;
if (needToRestoreLSH)
generator->setPrecodeHalloweenLHSofTSJ(saveLSH);
getGroupAttr()->addCharacteristicInputs(pulledNewInputs);
// Accumulate the values that are provided as inputs by my parent
// together with the values that are produced as outputs by my
// children. Use these values for rewriting the VEG expressions.
ValueIdSet availableValues;
getInputValuesFromParentAndChildren(availableValues);
// ----------------------------------------------------------------------
// Replace VEGReferences in the order by list
// Bugfix: sol# 10-020909-1555/56: the last argument, if not explicitly
// stated, defaults to FALSE, and causes a shallow copy of the tree.
// ----------------------------------------------------------------------
sortKey_.replaceVEGExpressions
(availableValues,
getGroupAttr()->getCharacteristicInputs(),
FALSE, // default
NULL, // default
TRUE); // bugfix
// The VEG expressions in the selection predicates and the characteristic
// outputs can reference any expression that is either a potential output
// or a characteristic input for this RelExpr. Supply these values for
// rewriting the VEG expressions.
getInputAndPotentialOutputValues(availableValues);
getGroupAttr()->resolveCharacteristicOutputs
(availableValues,
getGroupAttr()->getCharacteristicInputs());
//Consider Sort as part of BMO memory participant if not partial sort.
if (prefixSortKey.entries() == 0 ||
CmpCommon::getDefault(COMP_BOOL_84) == DF_ON)
{
if (CmpCommon::getDefault(SORT_MEMORY_QUOTA_SYSTEM) != DF_OFF) {
generator->incrNumBMOs();
if ((ActiveSchemaDB()->getDefaults()).getAsDouble(EXE_MEMORY_LIMIT_PER_CPU) > 0)
generator->incrBMOsMemory(getEstimatedRunTimeMemoryUsage(TRUE));
}
}
markAsPreCodeGenned();
// Part of the fix for solution 10-071204-9253.
// Modified for 10-100310-8659
if (doCheckUnsycHalloweenScans && generator->unsyncdSortFound())
{
RelExpr *newChild = generator->insertEspExchange(child(0),
unPreCodeGendPP);
((Exchange *)newChild)->markAsHalloweenProtection();
newChild =
newChild->preCodeGen(generator, externalInputs, pulledNewInputs);
GenAssert(newChild->getOperatorType() == REL_EXCHANGE,
"Exchange eliminated despite our best efforts.");
child(0) = newChild;
// Now that an ESP is inserted above the scans, this sort operator
// does block the scans, so we can discount them.
if (fixSolution8659)
{
generator->setUnsyncdSortFound(FALSE);
generator->setUnblockedHalloweenScans(
numUnblockedHalloweenScansBefore);
}
}
return this;
} // Sort::preCodeGen()
RelExpr * SortFromTop::preCodeGen(Generator * generator,
const ValueIdSet & externalInputs,
ValueIdSet &pulledNewInputs)
{
return Sort::preCodeGen(generator, externalInputs, pulledNewInputs);
}
RelExpr *ProbeCache::preCodeGen(Generator * generator,
const ValueIdSet & externalInputs,
ValueIdSet &pulledNewInputs)
{
if (nodeIsPreCodeGenned())
return this;
// Check if the pivs of this operator and it's child are the same.
// If they are not, make them the same.
replacePivs();
// Resolve the VEGReferences and VEGPredicates, if any, that appear
// in the Characteristic Inputs, in terms of the externalInputs.
getGroupAttr()->resolveCharacteristicInputs(externalInputs);
// My Characteristic Inputs become the external inputs for my child.
child(0) = child(0)->preCodeGen(generator,
getGroupAttr()->getCharacteristicInputs(),
pulledNewInputs);
if (! child(0).getPtr())
return NULL;
// add one more value to "valuesGivenToChild_": a statement execution
// count that will invalidate cache each time the statement is
// re-executed. It would be incorrect to cache across
// statement executions (and possibly transactions).
ValueId execCount = generator->getOrAddStatementExecutionCount();
pulledNewInputs += execCount;
getGroupAttr()->addCharacteristicInputs(pulledNewInputs);
// Accumulate the values that are provided as inputs by my parent
// together with the values that are produced as outputs by my
// children. Use these values for rewriting the VEG expressions.
ValueIdSet availableValues;
getInputValuesFromParentAndChildren(availableValues);
// Rewrite the selection predicates.
NABoolean replicatePredicates = TRUE;
selectionPred().replaceVEGExpressions
(availableValues,
getGroupAttr()->getCharacteristicInputs(),
FALSE, // no need to generate key predicates here
0 /* no need for idempotence here */,
replicatePredicates
);
getGroupAttr()->resolveCharacteristicOutputs
(availableValues,
getGroupAttr()->getCharacteristicInputs());
/*
TBD - maybe ProbeCache as BMO memory participant??
if(CmpCommon::getDefault(PROBE_CACHE_MEMORY_QUOTA_SYSTEM) != DF_OFF)
generator->incrNumBMOs();
*/
if ((ActiveSchemaDB()->getDefaults()).getAsDouble(EXE_MEMORY_LIMIT_PER_CPU) > 0)
generator->incrNBMOsMemoryPerCPU(getEstimatedRunTimeMemoryUsage(TRUE));
markAsPreCodeGenned();
return this;
} // ProbeCache::preCodeGen()
RelExpr * Exchange::preCodeGen(Generator * generator,
const ValueIdSet & externalInputs,
ValueIdSet &pulledNewInputs)
{
if (nodeIsPreCodeGenned())
return this;
// Set a flag if this is a parallel extract consumer. The flag for
// extract producer queries gets set earlier in RelRoot::codeGen()
if (child(0)->getOperatorType() == REL_EXTRACT_SOURCE)
{
isExtractConsumer_ = TRUE;
GenAssert(!isExtractProducer_,
"One extact query cannot be both producer and consumer");
}
const PhysicalProperty* sppOfChild = child(0)->getPhysicalProperty();
NABoolean PivsReplaced = FALSE;
if (sppOfChild->getPlanExecutionLocation() == EXECUTE_IN_DP2) {
// If this is not an ESP exchange, then check if the pivs of this op
// and it's child are the same. If they are not, make them the same.
// We don't do this for an ESP exchange because an ESP exchange
// denotes an ESP process boundary and the child's pivs
// do not have to be the same as the parent and in fact should
// not be the same.
replacePivs();
PivsReplaced = TRUE;
}
RelExpr *result = this;
// ---------------------------------------------------------------------
// copy important info from the properties into data members
// ---------------------------------------------------------------------
storePhysPropertiesInNode(generator->getPrefixSortKey());
// If this is a parallel extract producer query:
// - do a few checks to make sure the plan is valid
// - store a copy of the root's select list
if (isExtractProducer_)
{
RelRoot *root = generator->getBindWA()->getTopRoot();
// The plan is valid if this is an ESP exchange the number of
// bottom partitions matches the number of requested streams.
ComUInt32 numRequestedStreams = root->getNumExtractStreams();
ComUInt32 numBottomEsps = (ComUInt32)
getBottomPartitioningFunction()->getCountOfPartitions();
if (!isEspExchange() || (numRequestedStreams != numBottomEsps))
{
*CmpCommon::diags() << DgSqlCode(-7004);
GenExit();
return NULL;
}
// Make a copy of the root's select list
extractSelectList_ = new (generator->wHeap())
ValueIdList(root->compExpr());
// Do a coverage test to see find values in the select list that
// this operator cannot already provide
ValueIdSet valuesIDontHave(*extractSelectList_);
ValueIdSet coveredExpr;
ValueIdSet referencedUpperValues;
getGroupAttr()->coverTest(valuesIDontHave, // expressions needed
externalInputs, // extra inputs
coveredExpr, // covered exprs
referencedUpperValues); // new values needed
// Add the needed values to characteristic inputs
pulledNewInputs += referencedUpperValues;
getGroupAttr()->addCharacteristicInputs(referencedUpperValues);
}
// ---------------------------------------------------------------------
// Resolve the VEGReferences and VEGPredicates, if any, that appear
// in the Characteristic Inputs, in terms of the externalInputs.
// ---------------------------------------------------------------------
ValueIdSet saveCharInputs = getGroupAttr()->getCharacteristicInputs();
getGroupAttr()->resolveCharacteristicInputs(externalInputs);
// variables that store the result of the major decisions:
//
// makeThisExchangeAPapa: if this is a PAPA node, then make this
// node the PAPA (and add a PA below it)
// eliminateThisExchange: get rid of this node either because
// it represents a sole PA or because it is
// a redundant ESP exchange
// topPartFunc_: the partitioning function produced by
// this node after we're done with preCodeGen
// bottomPartFunc_: the partitioning function produced by
// the child of this node
// paPartFunc: the partitioning function produced by the
// PA node inserted below
// lbpf LogPhysPartitioningFunction of the child,
// if the child has such a part. function
NABoolean makeThisExchangeAPapa = FALSE;
NABoolean eliminateThisExchange = FALSE;
const PartitioningFunction *paPartFunc = topPartFunc_;
const LogPhysPartitioningFunction *lppf = NULL;
if (isDP2Exchange() AND
bottomPartFunc_->isALogPhysPartitioningFunction())
{
lppf = bottomPartFunc_->castToLogPhysPartitioningFunction();
if (lppf->getUsePapa() || getGroupAttr()->isEmbeddedUpdateOrDelete())
{
// Will a merge of sorted streams need to be done?
if (NOT sortKeyForMyOutput_.isEmpty())
{
Lng32 maxPartsPerGroup;
// Since a merge of sorted streams is needed, we must
// ensure that there is one PA for every partition in every
// process. The optimizer should already have set this up
// correctly, but sometimes, due to plan stealing, the value
// can be wrong. This code is really a patch for the plan
// stealing problem. We could try to fix the plan stealing
// problem, but that would adversely affect compile time.
// To set the number of clients (i.e. PAs) we must cast away
// the const-ness, sorry.
if (topPartFunc_->isAGroupingOf(*bottomPartFunc_,
&maxPartsPerGroup))
{
((LogPhysPartitioningFunction*)lppf)->setNumOfClients(
maxPartsPerGroup * topPartFunc_->getCountOfPartitions());
}
else
{
((LogPhysPartitioningFunction*)lppf)->setNumOfClients(
bottomPartFunc_->getCountOfPartitions() *
topPartFunc_->getCountOfPartitions());
}
}
// Keep this exchange and make it the PAPA node. The PA
// nodes below the PAPA will actually produce a partitioning
// scheme that is identical to that of the DP2 operator below,
// since the PAPA splits its requests into smaller ones that
// do not span DP2 partition boundaries.
makeThisExchangeAPapa = TRUE;
paPartFunc = bottomPartFunc_;
}
}
if (!PivsReplaced && isRedundant_)
replacePivs();
// flag to decide whether to use the characteristic inputs or outputs
// as input the to the CIF determineInternalFormatFunction
// if the the child is an insert or update then we consider the chars input
// otherwise we use the chars outputs
NABoolean useCharInputs = FALSE;
// ---------------------------------------------------------------------
// If the child of this Exchange executes in DP2, then allocate a
// PartitionAccess operator. It should have the same Group Attributes
// as its child.
// ---------------------------------------------------------------------
NABoolean savedOltMsgOpt = generator->oltOptInfo()->oltMsgOpt();
NABoolean inputOltMsgOpt = generator->oltOptInfo()->oltMsgOpt();
unsigned short prevNumBMOs = generator->replaceNumBMOs(0);
CostScalar prevBMOsMemoryUsage = generator->replaceBMOsMemoryUsage(0);
// These are used to fix solution 10-071204-9253 and for
// solution 10-100310-8659.
bool needToRestoreParallel = false;
NABoolean savedParallelSetting = FALSE;
bool needToRestoreCheckUnsync = false;
NABoolean savedCheckUnsyncdSort = FALSE;
bool needToRestoreLHS = false;
bool halloweenLHSofTSJ = generator->getPrecodeHalloweenLHSofTSJ();
bool needToRestoreESP = false;
bool halloweenESPonLHS = generator->getHalloweenESPonLHS();
if (isEspExchange() && getBottomPartitioningFunction()->isPartitioned())
{
// Tell any child NJ that its Halloween blocking operator (SORT)
// is operating in parallel.
savedParallelSetting = generator->preCodeGenParallelOperator();
generator->setPreCodeGenParallelOperator(TRUE);
needToRestoreParallel = true;
}
if (isEspExchange() && halloweenLHSofTSJ)
{
if ( !isRedundant_ )
{
// Tell any parallel SORT below that it doesn't have to check
// unsyncd access.
needToRestoreESP = true;
generator->setHalloweenESPonLHS(true);
}
savedCheckUnsyncdSort = generator->checkUnsyncdSort();
if (savedCheckUnsyncdSort == TRUE)
{
// savedCheckUnsyncdSort tells me there is a parallel SORT above this
// exchange. This ESP guarantees that all instances of the SORT will
// block until all instances of this ESP finish. So tell any child
// PARTITION ACCESS that its scan of a self-referencing is sync'd.
generator->setCheckUnsyncdSort(FALSE);
needToRestoreCheckUnsync = true;
// More for 10-100310-8659 - don't call incUnblockedHalloweenScans
// below this node.
halloweenLHSofTSJ = generator->setPrecodeHalloweenLHSofTSJ(false);
needToRestoreLHS = true;
}
}
else if (isEspExchange() &&
// this isPartitioned() condition is probably a bug, but
// to be safe I am not fixing it now.
getBottomPartitioningFunction()->isPartitioned())
{
// Tell any child PARTITION ACCESS that its scan of a self-referencing
// table is synchronized by an ESP exchange. That is, any blocking
// SORT operator above this exchange will not get any rows until all
// scans have finished.
savedCheckUnsyncdSort = generator->checkUnsyncdSort();
generator->setCheckUnsyncdSort(FALSE);
needToRestoreCheckUnsync = true;
}
if (halloweenSortIsMyChild_ && isRedundant_)
{
// Before eliminating itself, and before preCodeGen'ing the child
// tree, this Exchange will tell its child (a Sort) that it needs to
// check for unsynchronized access to the target table of a
// self-referencing update. This is part of the fix for
// solution 10-090310-9876.
((Sort *)(child(0).getPtr()))->doCheckAccessToSelfRefTable();
// Note for solution 10-100310-8659 -- the halloweenSortIsMyChild_
// flag will only be set when the COMP_BOOL_166 is used to revert
// to pre-bugfix behavior. With the fix for 10-100310-8659, the
// Sort uses the Generator's flags (precodeHalloweenLHSofTSJ and
// precodeRHSofNJ) to know if it needs check access to the target
// table. In other words, unless COMP_BOOL_166 is used, this
// is dead code.
}
if ( CmpCommon::getDefault(COMPRESSED_INTERNAL_FORMAT) == DF_SYSTEM)
{
NABoolean resize = FALSE;
NABoolean defrag = FALSE;
ValueIdSet vidSet;
if (!useCharInputs)
{
vidSet = child(0)->getGroupAttr()->getCharacteristicOutputs();
}
else
{
vidSet = saveCharInputs;
}
ExpTupleDesc::TupleDataFormat tupleFormat =
determineInternalFormat( vidSet,
this,
resize,
generator,
FALSE,
defrag);
cacheTupleFormatAndResizeFlag(tupleFormat, resize, defrag);
if (tupleFormat == ExpTupleDesc::SQLMX_ALIGNED_FORMAT)
{
generator->incNCIFNodes();
}
else
{
generator->decNCIFNodes();
}
}
// For HashJoin MIN/MAX optimization. If this is an ESP Exchange,
// block all candidate values for min/max optimization from going
// below this Exchange. Restore them upon return from
// preCodeGen'ing the child.
ValueIdList minMaxKeys, minVals, maxVals, willUseMinMaxKeys;
if(isEspExchange()) {
// Save the current values.
minMaxKeys = generator->getMinMaxKeys();
minVals = generator->getMinVals();
maxVals = generator->getMaxVals();
willUseMinMaxKeys = generator->getWillUseMinMaxKeys();
// Clear the current values.
generator->getMinMaxKeys().clear();
generator->getMinVals().clear();
generator->getMaxVals().clear();
generator->getWillUseMinMaxKeys().clear();
}
// ---------------------------------------------------------------------
// Perform preCodeGen on the child (including PA node if we created it)
// ---------------------------------------------------------------------
child(0) = child(0)->preCodeGen(
generator,
getGroupAttr()->getCharacteristicInputs(),
pulledNewInputs);
// For HashJoin MIN/MAX optimization.
if(isEspExchange()) {
// Restore the saved values.
generator->getMinMaxKeys() = minMaxKeys;
generator->getMinVals() = minVals;
generator->getMaxVals() = maxVals;
generator->getWillUseMinMaxKeys() = willUseMinMaxKeys;
}
if (needToRestoreParallel)
generator->setPreCodeGenParallelOperator(savedParallelSetting);
if (needToRestoreCheckUnsync)
generator->setCheckUnsyncdSort(savedCheckUnsyncdSort);
if (needToRestoreLHS)
generator->setPrecodeHalloweenLHSofTSJ(halloweenLHSofTSJ);
if (needToRestoreESP)
generator->setHalloweenESPonLHS(halloweenESPonLHS);
setNumBMOs( generator->replaceNumBMOs(prevNumBMOs) );
setBMOsMemoryUsage( generator->replaceBMOsMemoryUsage(prevBMOsMemoryUsage) );
if (! child(0).getPtr())
return NULL;
generator->oltOptInfo()->setOltMsgOpt(savedOltMsgOpt);
// Decide whether this Exchange should try to eliminate itself.
if (child(0)->castToRelExpr()->getOperatorType() == REL_EXE_UTIL)
{
// No, the REL_EXE_UTIL must execute in an ESP.
}
else if (skipRedundancyCheck_)
{
// No, the ESP was inserted just to force blocking of
// data from SORT instances, to help prevent Halloween
// problem -- see Soln 10-071204-9253.
}
else
{
// Yes, perform the redundancy check.
eliminateThisExchange = (isRedundant_ OR
(isDP2Exchange() AND NOT makeThisExchangeAPapa));
}
// ---------------------------------------------------------------------
// Determine which partition input values need to be supplied by our
// parent and which are produced by this exchange node. PA or PAPA
// exchange nodes (DP2 exchange nodes) do not produce any partition
// input values themselves, just ask the parent to produce the PIVs
// needed by the child. ESP exchanges produce the PIVs for their bottom
// partition function, and this is also true for added repartitioning
// exchanges.
// ---------------------------------------------------------------------
if (isEspExchange())
{
pulledNewInputs -= bottomPartFunc_->getPartitionInputValues();
setBottomPartitionInputValues(
bottomPartFunc_->getPartitionInputValuesLayout());
}
getGroupAttr()->addCharacteristicInputs(pulledNewInputs);
// ---------------------------------------------------------------------
// The VEG expressions in the selection predicates and the characteristic
// outputs can reference any expression that is either a potential output
// or a characteristic input for this RelExpr. Supply these values for
// rewriting the VEG expressions.
// ---------------------------------------------------------------------
ValueIdSet availableValues;
getInputAndPotentialOutputValues(availableValues);
// Accumulate the values that are provided as inputs by my parent
// together with the values that are produced as outputs by my
// children. Use these values for rewriting the VEG expressions.
// ---------------------------------------------------------------------
// Rewrite the copy of the sort key which will be used for merging
// rows. The VEGRef on the column being sorted may be preceeded by
// an InverseOrder itemExpr (in case the shortcut_grby rule has fired)
// The InverseOrder itemExpr will not perform a copy of the sortKey
// before replacing VEGExpressions unless replicateExpression is set
// to TRUE below. This avoids inverse(VEGRef_60(T1.a = T2.a)) being
// resolved to T1.a in two different exchange nodes, even though T1.a
// is not available at the second exchange node.
// ---------------------------------------------------------------------
NABoolean replicateExpression = TRUE;
sortKeyForMyOutput_.replaceVEGExpressions
(availableValues,
getGroupAttr()->getCharacteristicInputs(),
FALSE, // no key predicates here
0 /* no need for idempotence here */,
replicateExpression
);
// ---------------------------------------------------------------------
// Rewrite the partitioning expression, if the repartitioning function
// contains one. A ReplicationPartitioningFunction does not contain
// a partititioning expression because it uses a broadcast for
// replicating rows to its consumers.
// ---------------------------------------------------------------------
if (isEspExchange())
{
PartitioningFunction * rpf;
// need to cast away const-ness to create partitioning expr, sorry
rpf = (PartitioningFunction *) topPartFunc_;
rpf->createPartitioningExpression();
rpf->preCodeGen(availableValues);
}
// ---------------------------------------------------------------------
// For a parallel extract producer query, rewrite our copy of the
// root's select list
// ---------------------------------------------------------------------
if (isExtractProducer_)
{
extractSelectList_->
replaceVEGExpressions(availableValues,
getGroupAttr()->getCharacteristicInputs());
}
// ---------------------------------------------------------------------
// Resolve characteristic outputs.
// ---------------------------------------------------------------------
getGroupAttr()->resolveCharacteristicOutputs
(availableValues,
getGroupAttr()->getCharacteristicInputs());
generator->oltOptInfo()->mayDisableOperStats(&oltOptInfo());
// ---------------------------------------------------------------------
// From here on we add or remove exchange nodes, but this node is
// ready and does not need to be processed again should we call
// preCodeGen for it again.
// ---------------------------------------------------------------------
markAsPreCodeGenned();
// ---------------------------------------------------------------------
// Eliminate this exchange if it simply represented the PA node or
// if it is redundant. Do not eliminate the exchange if it is a
// parallel extract producer or consumer.
// ---------------------------------------------------------------------
if (isExtractProducer_ || isExtractConsumer_)
eliminateThisExchange = FALSE;
if (eliminateThisExchange)
{
result = child(0).getPtr();
// transfer the # of BMOs and their memory usages to generator as
// this exchange node is to be discarded.
generator->incrBMOsMemoryPerFrag(getBMOsMemoryUsage());
generator->incrNumBMOsPerFrag(getNumBMOs());
}
if ((isEspExchange()) &&
(NOT eliminateThisExchange))
{
// generator->setUpdAbortOnError(TRUE);
generator->setUpdSavepointOnError(FALSE);
generator->setUpdErrorOnError(FALSE);
generator->compilerStatsInfo().exchangeOps()++;
generator->compilerStatsInfo().dop() =
(UInt16)MAXOF(generator->compilerStatsInfo().dop(),
getBottomPartitioningFunction()->getCountOfPartitions());
if ( getNumBMOs() > 0 )
generator->incTotalESPs();
// If the exchange uses SeaMonster, set a flag in the generator
// to indicate that some part of the query does use SeaMonster
if (thisExchangeCanUseSM(generator->getBindWA()))
generator->setQueryUsesSM(TRUE);
} // isEspExchange() && !eliminateThisExchange
if ((ActiveSchemaDB()->getDefaults()).getAsDouble(EXE_MEMORY_LIMIT_PER_CPU) > 0)
generator->incrNBMOsMemoryPerCPU(getEstimatedRunTimeMemoryUsage(TRUE));
return result;
} // Exchange::preCodeGen()
RelExpr * Tuple::preCodeGen(Generator * generator,
const ValueIdSet & externalInputs,
ValueIdSet &pulledNewInputs)
{
if (nodeIsPreCodeGenned())
return this;
if (! RelExpr::preCodeGen(generator,externalInputs,pulledNewInputs))
return NULL;
// Accumulate the values that are provided as inputs by my parent
// together with the values that are produced as outputs by my
// children. Use these values for rewriting the VEG expressions.
ValueIdSet availableValues = getGroupAttr()->getCharacteristicInputs();
tupleExpr().replaceVEGExpressions
(availableValues,
getGroupAttr()->getCharacteristicInputs());
return this;
}
ItemExpr * BuiltinFunction::preCodeGen(Generator * generator)
{
ItemExpr * retExpr = NULL;
if (nodeIsPreCodeGenned())
return getReplacementExpr();
if (CmpCommon::getDefault(OR_PRED_KEEP_CAST_VC_UCS2) == DF_ON) {
// part of temporary workaround to yotta dp2 killer problem:
// keep cast for upper(cast name as varchar(n) char set ucs2)
switch (getOperatorType()) {
case ITM_UPPER:
case ITM_LOWER:
case ITM_SUBSTR:
case ITM_TRIM:
if (child(0)->getOperatorType() == ITM_CAST) {
Cast *arg = (Cast*)child(0)->castToItemExpr();
const NAType& typ = arg->getValueId().getType();
if (arg->matchChildType() &&
arg->child(0)->getValueId().getType() == typ &&
typ.getTypeQualifier() == NA_CHARACTER_TYPE &&
typ.isVaryingLen() &&
((CharType*)(&typ))->getCharSet() == CharInfo::UCS2) {
// don't skip codegen for the cast of
// "upper(cast name as varchar(n) char set ucs2) IN <inlist>"
arg->setMatchChildType(FALSE);
}
}
}
}
if (! ItemExpr::preCodeGen(generator))
return NULL;
switch (getOperatorType())
{
case ITM_QUERYID_EXTRACT:
{
// convert arguments to ISO88591 character set
if (child(0)->castToItemExpr()->getValueId().getType().getTypeQualifier() ==
NA_CHARACTER_TYPE)
{
const CharType &typ0 =
(const CharType &) (child(0)->castToItemExpr()->getValueId().getType());
if (typ0.getCharSet() != CharInfo::ISO88591)
{
// the executor method assumes an ASCII string for the query id, so
// convert the value to a fixed char type in the ISO88591 char set
SQLChar * newTyp0 = new(generator->wHeap())
SQLChar(typ0.getCharLimitInUCS2or4chars(),
typ0.supportsSQLnullLogical(),
typ0.isUpshifted(),
typ0.isCaseinsensitive(),
typ0.isVaryingLen(),
CharInfo::ISO88591);
child(0) = new (generator->wHeap()) Cast(child(0), newTyp0);
child(0)->bindNode(generator->getBindWA());
child(0) = child(0)->preCodeGen(generator);
}
}
if (child(1)->castToItemExpr()->getValueId().getType().getTypeQualifier() ==
NA_CHARACTER_TYPE)
{
const CharType &typ1 =
(const CharType &) (child(1)->castToItemExpr()->getValueId().getType());
if (typ1.getCharSet() != CharInfo::ISO88591)
{
// the executor method assumes an ASCII string for the query id, so
// convert the value to a fixed char type in the ISO88591 char set
SQLChar * newTyp1 = new(generator->wHeap())
SQLChar(typ1.getCharLimitInUCS2or4chars(),
typ1.supportsSQLnullLogical(),
typ1.isUpshifted(),
typ1.isCaseinsensitive(),
typ1.isVaryingLen(),
CharInfo::ISO88591);
child(1) = new (generator->wHeap()) Cast(child(1), newTyp1);
child(1)->bindNode(generator->getBindWA());
child(1) = child(1)->preCodeGen(generator);
}
}
}
retExpr = this;
break;
default:
{
retExpr = this;
}
break;
} // switch
setReplacementExpr(retExpr);
markAsPreCodeGenned();
return retExpr;
}
/*
ItemExpr * Abs::preCodeGen(Generator * generator)
{
// The ABS function has the distinction of being the sole BuiltinFunction
// that a) generates a new replacementExpr tree
// and b) can appear in the select-list (compExpr).
//
// What happens is that code is generated for the ABS replacement CASE
// TWICE, once in PartitionAccess eid, once in RelRoot generateOutputExpr:
// the latter fails with a GenMapTable assert failing to find info for
// the column in "SELECT ABS(col) FROM t;"
// ("SELECT ABS(-1) FROM t;" and "SELECT ABS(col),col FROM T;" work fine --
// but of course they generate twice as much code as necessary,
// however harmless/idempotent it may be...)
//
// We therefore cannot handle this one discrepant case neatly in
// preCodeGen/codeGen -- it is fixed instead by having the Binder
// upstream rewrite an ABS as the equivalent CASE.
//
// Genesis 10-980112-5942.
//
GenAssert(FALSE, "Abs::preCodeGen should be unreachable code!");
return NULL;
//if (nodeIsPreCodeGenned())
// return getReplacementExpr();
//
//ItemExpr * newExpr =
// generator->getExpGenerator()->createExprTree(
// "CASE WHEN @A1 < 0 THEN - @A1 ELSE @A1 END", 0, 1, child(0));
//
//newExpr->bindNode(generator->getBindWA());
//setReplacementExpr(newExpr->preCodeGen(generator));
//markAsPreCodeGenned();
//return getReplacementExpr();
}
*/
ItemExpr * Abs::preCodeGen(Generator * generator)
{
if (nodeIsPreCodeGenned())
return getReplacementExpr();
if (! ItemExpr::preCodeGen(generator))
return NULL;
NAType * result_type = (NAType *)(&(getValueId().getType()));
NAType * type_op1 = (NAType *)(&(child(0)->castToItemExpr()->getValueId().getType()));
if (! (*result_type == *type_op1))
{
// Insert a cast node to convert child to a result type.
child(0) = new (generator->wHeap())
Cast(child(0), result_type);
child(0)->bindNode(generator->getBindWA());
child(0) = child(0)->preCodeGen(generator);
if (! child(0).getPtr())
return NULL;
}
markAsPreCodeGenned();
return this;
} // Abs::preCodeGen()
ItemExpr * AggrMinMax::preCodeGen(Generator * generator)
{
if (nodeIsPreCodeGenned())
return getReplacementExpr();
if (! ItemExpr::preCodeGen(generator))
return NULL;
// if my child's attributes EXCEPT for nullability are not the
// same as mine, do a conversion.
NABoolean doConversion = FALSE;
const NAType &myType = getValueId().getType();
const NAType &childType = child(0)->castToItemExpr()->getValueId().getType();
if (NOT (myType == childType)) // something is different
{
if ((myType.supportsSQLnull() &&
childType.supportsSQLnull()) ||
((NOT myType.supportsSQLnull()) &&
(NOT childType.supportsSQLnull())))
doConversion = TRUE; // both nullable or not nullable,
// something else is different
else if (myType.supportsSQLnull() &&
NOT childType.supportsSQLnull())
{
// create a new my type with the same null attr as child.
NAType * newType = myType.newCopy(generator->wHeap());
newType->resetSQLnullFlag();
if (NOT(*newType == childType))
doConversion = TRUE;
delete newType;
}
else
{
// Fix for solution ID 10-031121-1505)
// I dont think we the following assert is correct
// During VEG resolution a MIN/MAX() function can have a
// NON-NULLABLE child replaced by a nullable child, consider
// as an example the following query where i2 is not null:
//
// SELECT MIN(T0.i2)
// FROM D12 T0
// WHERE
// ?pa2 = T0.i2
// GROUP BY T0.i1;
//
// In the above case i2 will be replaced by ?pa2 when the VEG
// (i2, ?pa2) is resolved. Therefore it is possible to get a
// nullable child for a non-nullable aggregate. In the above
// case the aggregate is non-nullable because i2 is non-nullable.
// In such a case MIN(?pa2) would never be executed if ?pa2 is NULL
// because predicate '?pa2 = T0.i2' will not select any rows when
// ?pa2 is NULL (I am not sure how a parameter is set to NULL, for host
// vars we can use the NULL indicator, not sure how we pass in NULL using
// parameters).
//
// Assert on the following condition
// The condition where I am not nullable and my child is nullable,
// is an error case.
//GenAssert(0, "AggrMinMax::preCodeGen::Should not reach here.");
doConversion = TRUE;
}
}
if (doConversion)
{
// Insert a cast node to convert child to a result type.
child(0) = new (generator->wHeap()) Cast(child(0), &myType);
child(0)->bindNode(generator->getBindWA());
child(0) = child(0)->preCodeGen(generator);
if (! child(0).getPtr())
return NULL;
}
markAsPreCodeGenned();
return this;
} // AggrMinMax::preCodeGen()
ItemExpr * Between::preCodeGen(Generator * generator)
{
if (nodeIsPreCodeGenned())
return getReplacementExpr();
// transform "A BETWEEN B AND C" to "A >= B AND A <= C"
ItemExpr * newExpr =
generator->getExpGenerator()->createExprTree(
"@A1 >= @A2 AND @A1 <= @A3", 0, 3, child(0), child(1), child(2));
newExpr->bindNode(generator->getBindWA());
setReplacementExpr(newExpr->preCodeGen(generator));
markAsPreCodeGenned();
return getReplacementExpr();
}
// BiArithCount::preCodeGen
//
// The BiArithCount executor clause requires that all of the operands
// be of the same type. preCodeGen introduces cast operators on the
// input operands if necessary to enforce this requirement.
//
ItemExpr * BiArithCount::preCodeGen(Generator * generator)
{
if (nodeIsPreCodeGenned())
return this;
// Get a local handle on common generator objects.
//
CollHeap *wHeap = generator->wHeap();
const NAType &resultType = getValueId().getType();
const NAType &op1Type = child(0)->castToItemExpr()->getValueId().getType();
const NAType &op2Type = child(1)->castToItemExpr()->getValueId().getType();
// If the first operand type does not match that of the result,
// cast it to the result type.
//
if(!(op1Type == resultType))
{
child(0) = new(wHeap) Cast(child(0)->castToItemExpr(),
resultType.newCopy(wHeap),
ITM_CAST);
child(0)->synthTypeAndValueId();
}
// Ditto for the second operand.
//
if(!(op2Type == resultType))
{
child(1) = new(wHeap) Cast(child(1)->castToItemExpr(),
resultType.newCopy(wHeap),
ITM_CAST);
child(1)->synthTypeAndValueId();
}
return BiArith::preCodeGen(generator);
}
// BiArithSum::preCodeGen
//
// The BiArithSum executor clause requires that all of the operands
// be of the same type. preCodeGen introduces cast operators on the
// input operands if necessary to enforce this requirement.
//
ItemExpr * BiArithSum::preCodeGen(Generator * generator)
{
if (nodeIsPreCodeGenned())
return this;
// Get a local handle on common generator objects.
//
CollHeap *wHeap = generator->wHeap();
// Get a handle on the operand types.
//
const NAType &resultType = getValueId().getType();
const NAType &op1Type = child(0)->castToItemExpr()->getValueId().getType();
const NAType &op2Type = child(1)->castToItemExpr()->getValueId().getType();
// If the first operand type does not match that of the result,
// cast it to the result type.
//
if(!(op1Type == resultType))
{
child(0) = new(wHeap) Cast(child(0)->castToItemExpr(),
resultType.newCopy(wHeap),
ITM_CAST);
child(0)->synthTypeAndValueId();
}
// Ditto for the second operand.
//
if(!(op2Type == resultType))
{
child(1) = new(wHeap) Cast(child(1)->castToItemExpr(),
resultType.newCopy(wHeap),
ITM_CAST);
child(1)->synthTypeAndValueId();
}
ItemExpr *result = BiArith::preCodeGen(generator);
if (! result)
return NULL;
ItemExpr *outExpr = NULL;
Lng32 rc = generator->getExpGenerator()->foldConstants(child(0), &outExpr);
if ((rc == 0) &&
(outExpr))
{
child(0) = outExpr->preCodeGen(generator);
}
rc = generator->getExpGenerator()->foldConstants(child(1), &outExpr);
if ((rc == 0) &&
(outExpr))
{
child(1) = outExpr->preCodeGen(generator);
}
return this;
}
ItemExpr * BiArith::preCodeGen(Generator * generator)
{
if (nodeIsPreCodeGenned())
return this;
if (! ItemExpr::preCodeGen(generator))
return NULL;
NAType * result_type = (NAType *)(&(getValueId().getType()));
NAType * type_op1 = (NAType *)(&(child(0)->castToItemExpr()->getValueId().getType()));
NAType * type_op2 = (NAType *)(&(child(1)->castToItemExpr()->getValueId().getType()));
if (result_type->isComplexType())
{
if ((getOperatorType() == ITM_PLUS) ||
(getOperatorType() == ITM_MINUS))
{
child(0) = generator->getExpGenerator()->matchScales(child(0)->getValueId(),
*result_type);
child(1) = generator->getExpGenerator()->matchScales(child(1)->getValueId(),
*result_type);
}
else
if (getOperatorType() == ITM_DIVIDE)
{
// before doing the division, the numerator has to be upscaled.
// Lets find out how much.
// NS = numerator scale
// DS = denominator scale
// RS = result scale
// Upscale = (RS - NS) + DS
// Newscale = NS + Upscale = RS + DS
Lng32 newscale
= ((NumericType *)result_type)->getScale() +
((NumericType *)type_op2)->getScale();
if (newscale != ((NumericType *)type_op1)->getScale()) {
NAType * new_type = result_type->newCopy(generator->wHeap());
((NumericType *)new_type)->setScale(newscale);
child(0) = generator->getExpGenerator()->matchScales(
child(0)->getValueId(),
*new_type);
}
}
type_op1 = (NAType *)(&(child(0)->getValueId().getType()));
type_op2 = (NAType *)(&(child(1)->getValueId().getType()));
if (result_type->getFSDatatype() == type_op1->getFSDatatype())
{
if (((getOperatorType() == ITM_PLUS) ||
(getOperatorType() == ITM_MINUS) ||
(getOperatorType() == ITM_DIVIDE)) &&
(result_type->getNominalSize() != type_op1->getNominalSize()))
{
child(0) = new(generator->wHeap()) Cast(child(0), result_type);
}
}
else
{
if ((getOperatorType() == ITM_PLUS) ||
(getOperatorType() == ITM_MINUS) ||
(getOperatorType() == ITM_DIVIDE))
{
child(0) = new(generator->wHeap()) Cast(child(0), result_type);
}
else
{
child(0) =
new(generator->wHeap()) Cast(child(0),
result_type->synthesizeType(SYNTH_RULE_PASS_THRU_NUM,
*type_op1,
*result_type,
generator->wHeap()));
}
}
if (result_type->getFSDatatype() == type_op2->getFSDatatype())
{
if (((getOperatorType() == ITM_PLUS) ||
(getOperatorType() == ITM_MINUS)) &&
(result_type->getNominalSize() != type_op2->getNominalSize()))
{
child(1) = new(generator->wHeap()) Cast(child(1), result_type);
}
}
else
{
if ((getOperatorType() == ITM_PLUS) ||
(getOperatorType() == ITM_MINUS))
{
child(1) = new(generator->wHeap()) Cast(child(1), result_type);
}
else
{
child(1) =
new(generator->wHeap()) Cast(child(1),
result_type->synthesizeType(SYNTH_RULE_PASS_THRU_NUM,
*type_op2,
*result_type,
generator->wHeap()));
}
}
child(0)->bindNode(generator->getBindWA());
child(1)->bindNode(generator->getBindWA());
child(0) = child(0)->preCodeGen(generator);
if (! child(0).getPtr())
return NULL;
child(1) = child(1)->preCodeGen(generator);
if (! child(1).getPtr())
return NULL;
markAsPreCodeGenned();
return this;
}
// following is for simple types.
SimpleType * attr_result = (SimpleType *)
(ExpGenerator::convertNATypeToAttributes
(getValueId().getType(), generator->wHeap()));
SimpleType * attr_op1 = (SimpleType *)
(ExpGenerator::convertNATypeToAttributes
(child(0)->getValueId().getType(), generator->wHeap()));
SimpleType * attr_op2 = (SimpleType *)
(ExpGenerator::convertNATypeToAttributes
(child(1)->getValueId().getType(), generator->wHeap()));
// see if conversion needed before arithmetic operation could be done.
Int32 matchScale = 0;
if (result_type->getTypeQualifier() == NA_NUMERIC_TYPE)
{
// match scales
if ((getOperatorType() == ITM_PLUS) ||
(getOperatorType() == ITM_MINUS))
{
child(0) = generator->getExpGenerator()->matchScales(child(0)->getValueId(),
*result_type);
child(1) = generator->getExpGenerator()->matchScales(child(1)->getValueId(),
*result_type);
}
else
if (getOperatorType() == ITM_DIVIDE)
{
// before doing the division, the numerator has to be upscaled.
// Lets find out how much.
// NS = numerator scale
// DS = denominator scale
// RS = result scale
// Upscale = (RS - NS) + DS
// Newscale = NS + Upscale = RS + DS
Lng32 newscale
= ((NumericType *)result_type)->getScale() +
((NumericType *)type_op2)->getScale();
if (newscale != ((NumericType *)type_op1)->getScale()) {
NAType * new_type = result_type->newCopy(generator->wHeap());
((NumericType *)new_type)->setScale(newscale);
child(0) = generator->getExpGenerator()->matchScales(
child(0)->getValueId(),
*new_type);
matchScale = 1;
}
}
}
else if (result_type->getTypeQualifier() == NA_INTERVAL_TYPE) {
switch (getOperatorType()) {
case ITM_PLUS:
case ITM_MINUS:
if (type_op1->getTypeQualifier() == NA_DATETIME_TYPE) {
#pragma nowarn(1506) // warning elimination
Lng32 fp1 = ((DatetimeType *) type_op1)->getFractionPrecision();
#pragma warn(1506) // warning elimination
#pragma nowarn(1506) // warning elimination
Lng32 fp2 = ((DatetimeType *) type_op2)->getFractionPrecision();
#pragma warn(1506) // warning elimination
if (fp1 < fp2) {
child(0) = new(generator->wHeap()) Cast(child(0), type_op2);
child(0)->bindNode(generator->getBindWA());
} else if (fp1 > fp2) {
child(1) = new(generator->wHeap()) Cast(child(1), type_op1);
child(1)->bindNode(generator->getBindWA());
}
} else {
child(0) = generator->getExpGenerator()->matchIntervalEndFields(
child(0)->getValueId(),
*result_type);
child(1) = generator->getExpGenerator()->matchIntervalEndFields(
child(1)->getValueId(),
*result_type);
child(0) = generator->getExpGenerator()->matchScales(
child(0)->getValueId(),
*result_type);
child(1) = generator->getExpGenerator()->matchScales(
child(1)->getValueId(),
*result_type);
type_op1 = (NAType *)(&(child(0)->getValueId().getType()));
type_op2 = (NAType *)(&(child(1)->getValueId().getType()));
if (result_type->getNominalSize() != type_op1->getNominalSize()) {
child(0) = new(generator->wHeap()) Cast(child(0), result_type);
child(0)->bindNode(generator->getBindWA());
}
if (result_type->getNominalSize() != type_op2->getNominalSize()) {
child(1) = new(generator->wHeap()) Cast(child(1), result_type);
child(1)->bindNode(generator->getBindWA());
}
}
break;
case ITM_TIMES: {
//
// Unfortunately, the multiply node may be the root ItemExpr node, and
// we can't change the root ItemExpr node since its ValueId has already
// been stored away in the parent RelExpr's ValueIdLists. We'll have to
// move the expression down, e.g.
//
// * <-- same root --> *
// / \ / \
// I N becomes I 1
// |
// *
// / \
// N N
// |
// I
//
if (type_op1->getTypeQualifier() == NA_INTERVAL_TYPE)
child(0) = generator->getExpGenerator()->convertIntervalToNumeric(
child(0)->getValueId());
else
child(1) = generator->getExpGenerator()->convertIntervalToNumeric(
child(1)->getValueId());
char str[20];
strcpy(str, "@A1 * @A2");
child(0) = generator->getExpGenerator()->createExprTree(str, 0,
2,
child(0),
child(1));
child(0)->bindNode(generator->getBindWA());
child(0) = generator->getExpGenerator()->convertNumericToInterval(
child(0)->getValueId(),
*result_type);
strcpy(str, "001"); // to make sure it is not a tinyint
child(1) = generator->getExpGenerator()->createExprTree(str, CharInfo::ISO88591);
child(1)->bindNode(generator->getBindWA());
type_op2 = (NAType *)(&(child(1)->getValueId().getType()));
if ((result_type->getNominalSize() != type_op2->getNominalSize()) ||
(type_op2->getFSDatatype() != REC_BIN16_SIGNED)) {
IntervalType *interval = (IntervalType *) result_type;
const Int16 DisAmbiguate = 0;
child(1) = new(generator->wHeap()) Cast(child(1),
new(generator->wHeap()) SQLNumeric(TRUE, /* signed */
#pragma nowarn(1506) // warning elimination
interval->getTotalPrecision(),
0,
DisAmbiguate, // added for 64bit proj.
interval->supportsSQLnull()));
#pragma warn(1506) // warning elimination
child(1)->bindNode(generator->getBindWA());
}
break;
}
case ITM_DIVIDE: {
//
// Unfortunately, the divide node may be the root ItemExpr node, and
// we can't change the root ItemExpr node since its ValueId has already
// been stored away in the parent RelExpr's ValueIdLists. We'll have to
// move the expression down, e.g.
//
// div <-- same root --> div
// / \ / \
// I N becomes I 1
// |
// div
// / \
// N N
// |
// I
//
child(0) = generator->getExpGenerator()->convertIntervalToNumeric(
child(0)->getValueId());
char str[20];
strcpy(str, "@A1 / @A2");
child(0) = generator->getExpGenerator()->createExprTree(str, 0,
2,
child(0),
child(1));
child(0)->bindNode(generator->getBindWA());
child(0) = generator->getExpGenerator()->convertNumericToInterval(
child(0)->getValueId(),
*result_type);
strcpy(str, "001"); // to make sure it is not a tinyint
child(1) = generator->getExpGenerator()->createExprTree(str, CharInfo::ISO88591);
child(1)->bindNode(generator->getBindWA());
type_op2 = (NAType *)(&(child(1)->getValueId().getType()));
if ((result_type->getNominalSize() != type_op2->getNominalSize()) ||
(type_op2->getFSDatatype() != REC_BIN16_SIGNED)) {
IntervalType *interval = (IntervalType *) result_type;
const Int16 DisAmbiguate = 0;
child(1) = new(generator->wHeap()) Cast(child(1),
new(generator->wHeap()) SQLNumeric(TRUE, /* signed */
#pragma nowarn(1506) // warning elimination
interval->getTotalPrecision(),
0,
DisAmbiguate, // added for 64bit proj.
interval->supportsSQLnull()));
#pragma warn(1506) // warning elimination
child(1)->bindNode(generator->getBindWA());
}
break;
}
default:
break;
}
} else if (result_type->getTypeQualifier() == NA_DATETIME_TYPE) {
switch (getOperatorType()) {
case ITM_PLUS:
case ITM_MINUS: {
if ((type_op1->getTypeQualifier() == NA_INTERVAL_TYPE) &&
(((IntervalType*) type_op1)->getEndField() == REC_DATE_SECOND)) {
#pragma nowarn(1506) // warning elimination
Lng32 sourceScale = ((IntervalType *) type_op1)->getFractionPrecision();
Lng32 targetScale = ((DatetimeType *) type_op2)->getFractionPrecision();
#pragma warn(1506) // warning elimination
child(0) = generator->getExpGenerator()->scaleBy10x(
child(0)->getValueId(),
targetScale - sourceScale);
} else if ((type_op2->getTypeQualifier() == NA_INTERVAL_TYPE) &&
(((IntervalType*) type_op2)->getEndField() == REC_DATE_SECOND)) {
#pragma nowarn(1506) // warning elimination
Lng32 targetScale = ((DatetimeType *) type_op1)->getFractionPrecision();
Lng32 sourceScale = ((IntervalType *) type_op2)->getFractionPrecision();
#pragma warn(1506) // warning elimination
child(1) = generator->getExpGenerator()->scaleBy10x(
child(1)->getValueId(),
targetScale - sourceScale);
}
// Extend the datetime to contain a YEAR field if needed. The
// value will need to be extended if it contains a DAY field but
// does not already contain a YEAR field. This is necessary
// since with the introduction of non-standard SQL/MP datetime
// types, it is possible to have a datetime value which has a
// DAY field but not a YEAR or not a MONTH field. In this
// situation, it is not possible to define a meaningful way to
// do the operation. Does the DAY field wrap at 30, 31, 28, or
// 29. So to make this operation meaningful, the value is
// extended to the current timestamp.
//
if (type_op1->getTypeQualifier() == NA_DATETIME_TYPE) {
if(((DatetimeType *) type_op1)->containsField(REC_DATE_DAY) &&
! ((DatetimeType *) type_op1)->containsField(REC_DATE_YEAR)) {
// Need to extend the given datetime value in order to be
// able to do the operation. Extend the value out to the
// YEAR field.
//
DatetimeType *extendedType =
DatetimeType::constructSubtype(type_op1->supportsSQLnull(),
REC_DATE_YEAR,
((DatetimeType *)type_op1)->getEndField(),
((DatetimeType *)type_op1)->getFractionPrecision(),
generator->wHeap());
// Cast the given value to the extended type.
//
child(0) = new (generator->wHeap()) Cast(child(0), extendedType);
child(0)->bindNode(generator->getBindWA());
}
} else {
if(((DatetimeType *) type_op2)->containsField(REC_DATE_DAY) &&
! ((DatetimeType *) type_op2)->containsField(REC_DATE_YEAR)) {
// Need to extend the given datetime value in order to be
// able to do the operation. Extend the value out to the
// YEAR field.
//
DatetimeType *extendedType =
DatetimeType::constructSubtype(type_op2->supportsSQLnull(),
REC_DATE_YEAR,
((DatetimeType *)type_op2)->getEndField(),
((DatetimeType *)type_op2)->getFractionPrecision(),
generator->wHeap());
// Cast the given value to the extended type.
//
child(1) = new (generator->wHeap()) Cast(child(1), extendedType);
child(1)->bindNode(generator->getBindWA());
}
}
break;
}
default:
break;
}
}
// NABoolean convertRoundedDivResult = FALSE;
// If this arith operation is supported at runtime, then no
// conversion is needed. Done for result numeric type only.
if (result_type->getTypeQualifier() == NA_NUMERIC_TYPE)
{
child(0) = child(0)->preCodeGen(generator);
if (! child(0).getPtr())
return NULL;
child(1) = child(1)->preCodeGen(generator);
if (! child(1).getPtr())
return NULL;
attr_result = (SimpleType *)
(ExpGenerator::convertNATypeToAttributes
(getValueId().getType(), generator->wHeap()));
attr_op1 = (SimpleType *)
(ExpGenerator::convertNATypeToAttributes
(child(0)->getValueId().getType(), generator->wHeap()));
attr_op2 = (SimpleType *)
(ExpGenerator::convertNATypeToAttributes
(child(1)->getValueId().getType(), generator->wHeap()));
ex_arith_clause temp_clause(getOperatorType(), NULL, NULL,
getRoundingMode(), getDivToDownscale());
if (temp_clause.isArithSupported(getOperatorType(),
attr_op1,
attr_op2,
attr_result
))
{
markAsPreCodeGenned();
return this;
}
}
// if the datatype or lengths of child and this don't match, then
// conversion is needed.
type_op1 = (NAType *)(&(child(0)->getValueId().getType()));
type_op2 = (NAType *)(&(child(1)->getValueId().getType()));
if ((result_type->getTypeQualifier() != NA_INTERVAL_TYPE) &&
(result_type->getTypeQualifier() != NA_DATETIME_TYPE) &&
((result_type->getFSDatatype() != type_op1->getFSDatatype()) ||
(result_type->getNominalSize() != type_op1->getNominalSize())))
{
// If the result type is not a float, make sure that the following
// Cast does not scale (for floats we have do do scaling). This is
// done by using the result type but changing the scale to the scale
// of the operand
NAType * new_type = result_type->newCopy(generator->wHeap());
if ((result_type->getFSDatatype() < REC_MIN_FLOAT) ||
(result_type->getFSDatatype() > REC_MAX_FLOAT)) {
((NumericType *)new_type)->
setScale(((NumericType *)type_op1)->getScale());
};
child(0) = new(generator->wHeap()) Cast(child(0), new_type,
ITM_CAST, FALSE);
}
if ((result_type->getTypeQualifier() != NA_INTERVAL_TYPE) &&
(result_type->getTypeQualifier() != NA_DATETIME_TYPE) &&
((result_type->getFSDatatype() != type_op2->getFSDatatype()) ||
(result_type->getNominalSize() != type_op2->getNominalSize())))
{
NAType * new_type = result_type->newCopy(generator->wHeap());
if ((result_type->getFSDatatype() < REC_MIN_FLOAT) ||
(result_type->getFSDatatype() > REC_MAX_FLOAT) ||
matchScale)
{
((NumericType *)new_type)->
setScale(((NumericType *)type_op2)->getScale());
};
child(1) = new(generator->wHeap()) Cast(child(1), new_type,
ITM_CAST, FALSE);
}
child(0)->bindNode(generator->getBindWA());
child(1)->bindNode(generator->getBindWA());
child(0) = child(0)->preCodeGen(generator);
if (! child(0).getPtr())
return NULL;
child(1) = child(1)->preCodeGen(generator);
if (! child(1).getPtr())
return NULL;
markAsPreCodeGenned();
return this;
} // BiArith::preCodeGen()
ItemExpr * UnArith::preCodeGen(Generator * generator)
{
if (nodeIsPreCodeGenned())
return this;
if (! ItemExpr::preCodeGen(generator))
return NULL;
return this;
}
ItemExpr * BiLogic::preCodeGen(Generator * generator)
{
if (nodeIsPreCodeGenned())
return this;
if (! ItemExpr::preCodeGen(generator))
return NULL;
ItemExpr *result = this;
ItemExpr *INlhs = NULL;
if (CmpCommon::getDefault(OR_PRED_ADD_BLOCK_TO_IN_LIST) == DF_ON
&& createdFromINlist() && (INlhs=getINlhs())!=NULL)
{
// ItmBlockFunction serves like the "C/C++ comma" expression that
// 1) evaluates its 1st operand, 2nd operand, and
// 2) returns its 2nd operand as value of that expression.
// ItmBlockFunction also has the codegen property that
// its 1st operand is evaluated (codegen'ed) only once
// even if 1st operand occurs multiple times in 2nd operand.
// So, given "UPPER(n) IN ('a', 'b')" that has been converted to
// ItmBlockFunction
// / \
// U OR
// / \
// = =
// / \ / \
// U a U b
// "UPPER(n)", represented as U, is evaluated once even if
// it's used multiple times in the OR expression.
// Trying to add ItmBlockFunction early in the parser (ie, in
// sqlparseraux.cpp convertINvaluesToOR() causes a lot of grief
// especially in cardinality estimation code. So, we resort to
// doing it late, here in precodegen.
result = new(generator->wHeap()) ItmBlockFunction(INlhs, result);
result->synthTypeAndValueId();
result->markAsPreCodeGenned();
return result;
}
markAsPreCodeGenned();
return result;
}
ItemExpr * BiRelat::preCodeGen(Generator * generator)
{
if (nodeIsPreCodeGenned())
return this;
// transform multivalue predicates to single-value comparisons.
ItemExpr * newNode = transformMultiValuePredicate();
if (newNode)
{
#ifdef _DEBUG
// NAString unp;
// unparse(unp);
// cerr << "BiRelat::preCodeGen - " << unp << " needed to be transformed!"
// << endl;
// I don't think we should ever have an untransformed MVP at this stage!
#endif
// transformMultiValuePredicate() cannot do synthTypeAndValue()
// because it is also called from the normalizer in places
// where it needs to postpone it.
newNode->synthTypeAndValueId();
return newNode->preCodeGen(generator);
}
if (! ItemExpr::preCodeGen(generator))
return NULL;
NAType * type_op1 = (NAType *)(&(child(0)->getValueId().getType()));
NAType * type_op2 = (NAType *)(&(child(1)->getValueId().getType()));
if ((type_op1->isComplexType()) || (type_op2->isComplexType()))
{
// find the 'super' type
const NAType *result_type = type_op1->synthesizeType(SYNTH_RULE_UNION,
*type_op1,
*type_op2,
generator->wHeap());
CMPASSERT(result_type);
if (result_type->getTypeQualifier() == NA_NUMERIC_TYPE)
{
// match scales
child(0) = generator->getExpGenerator()->matchScales(child(0)->getValueId(),
*result_type);
child(1) = generator->getExpGenerator()->matchScales(child(1)->getValueId(),
*result_type);
}
type_op1 = (NAType *)(&(child(0)->getValueId().getType()));
type_op2 = (NAType *)(&(child(1)->getValueId().getType()));
if ((result_type->getFSDatatype() != type_op1->getFSDatatype()) ||
(result_type->getNominalSize() != type_op1->getNominalSize()))
{
child(0) = new(generator->wHeap()) Cast(child(0), result_type);
}
if ((result_type->getFSDatatype() != type_op2->getFSDatatype()) ||
(result_type->getNominalSize() != type_op2->getNominalSize()))
{
child(1) = new(generator->wHeap()) Cast(child(1), result_type);
}
child(0)->bindNode(generator->getBindWA());
child(1)->bindNode(generator->getBindWA());
child(0) = child(0)->preCodeGen(generator);
if (! child(0).getPtr())
return NULL;
child(1) = child(1)->preCodeGen(generator);
if (! child(1).getPtr())
return NULL;
markAsPreCodeGenned();
return this;
}
const NAType &type1A =
child(0)->castToItemExpr()->getValueId().getType();
const NAType &type2A =
child(1)->castToItemExpr()->getValueId().getType();
if ((type1A.getTypeQualifier() == NA_CHARACTER_TYPE) &&
(type2A.getTypeQualifier() == NA_CHARACTER_TYPE))
{
const CharType &cType1A = (CharType&)type1A;
const CharType &cType2A = (CharType&)type2A;
CharInfo::Collation cType1A_coll = cType1A.getCollation();
CharInfo::Collation cType2A_coll = cType2A.getCollation();
//
// When Implicit Casting And Translation feature is enabled, it is
// possible for the binder to allow a comparision between an ISO88591-type
// value and a UCS2-type value to be passed through to the generator.
// If that happens, we throw in a Translate node at this point.
//
CharInfo::CharSet cType1A_CS = cType1A.getCharSet() ;
CharInfo::CharSet cType2A_CS = cType2A.getCharSet() ;
if ( ( cType1A_CS != cType2A_CS ) &&
( cType1A_CS != CharInfo::UnknownCharSet ) &&
( cType2A_CS != CharInfo::UnknownCharSet ) )
{
Int32 chld_to_trans = 0;
if ( cType1A_CS != CharInfo::ISO88591 )
{
if ( (cType1A_CS == CharInfo::UNICODE) ) chld_to_trans = 1;
if ( (cType1A_CS == CharInfo::UTF8) && (cType2A_CS != CharInfo::UNICODE) ) chld_to_trans = 1;
if ( (cType1A_CS == CharInfo::SJIS) && (cType2A_CS == CharInfo::ISO88591) ) chld_to_trans = 1;
}
Int32 tran_type = Translate::UNKNOWN_TRANSLATION;
if ( chld_to_trans == 0 )
tran_type = find_translate_type( cType1A_CS, cType2A_CS );
else
tran_type = find_translate_type( cType2A_CS, cType1A_CS );
ItemExpr * newChild = NULL;
newChild = new (generator->wHeap()) Translate(child(chld_to_trans), tran_type);
newChild = newChild->bindNode(generator->getBindWA());
newChild = newChild->preCodeGen(generator);
if (! newChild)
return NULL;
setChild(chld_to_trans, newChild);
}
else if ( cType1A_coll != cType2A_coll &&
cType1A_CS == CharInfo::ISO88591 &&
cType1A_CS == cType2A_CS &&
child(1)->getOperatorType() == ITM_CONSTANT &&
CollationInfo::isSystemCollation(cType1A_coll))
{
ItemExpr * pNewChild2 = NULL;
NAType * pNewType2 = cType2A.newCopy(generator->wHeap());
CharType * pNewCType2 = NULL;
if (pNewType2 != NULL)
pNewCType2 = (CharType*)pNewType2;
if (pNewCType2 != NULL)
pNewCType2->setCollation(cType1A_coll);
pNewChild2 = new (generator->wHeap()) Cast(child(1), pNewCType2);
pNewChild2 = pNewChild2->bindNode(generator->getBindWA());
pNewChild2 = pNewChild2->preCodeGen(generator);
if (pNewChild2 == NULL)
return NULL;
setChild(1, pNewChild2);
}
// Regenerate the types...before we continue with rest of code
type_op1 = (NAType *)(&(child(0)->getValueId().getType()));
type_op2 = (NAType *)(&(child(1)->getValueId().getType()));
ItemExpr * pChild1 = child(1)->castToItemExpr();
const NAType &type1 = pChild1->getValueId().getType();
const CharType &cType1 = (CharType&)type1;
ItemExpr * pChild2 = child(1)->castToItemExpr();
const NAType &type2 = pChild2->getValueId().getType();
const CharType &cType2 = (CharType&)type2;
CharInfo::Collation coll1 = cType1.getCollation();
CharInfo::Collation coll2 = cType2.getCollation();
CMPASSERT(coll1==coll2);
//LCOV_EXCL_START : cnu - Should not count in Code Coverage until we support non-binary collation in SQ
if (CollationInfo::isSystemCollation(coll1))
{
setCollationEncodeComp(TRUE);
{
ItemExpr * newIe1 = child(0);
ItemExpr * newIe2 = child(1);
if (! (cType1 == cType2))
{
NAType *resultType ;
Lng32 len = MAXOF(cType1.getMaxLenInBytesOrNAWChars(), cType2.getMaxLenInBytesOrNAWChars());
Lng32 Prec= MAXOF(cType1.getStrCharLimit(), cType2.getStrCharLimit());
if (len != cType1.getMaxLenInBytesOrNAWChars())
{
if (DFS2REC::isAnyVarChar(cType1.getFSDatatype()))
{
resultType = new (generator->wHeap())
SQLVarChar( CharLenInfo(Prec, len),
cType1.supportsSQLnull(),
cType1.isUpshifted(),
cType1.isCaseinsensitive(),
cType1.getCharSet(),
cType1.getCollation(),
cType1.getCoercibility()
);
}
else
{
resultType = new (generator->wHeap())
SQLChar( CharLenInfo(Prec, len),
cType1.supportsSQLnull(),
cType1.isUpshifted(),
cType1.isCaseinsensitive(),
FALSE,
cType1.getCharSet(),
cType1.getCollation(),
cType1.getCoercibility()
);
}
newIe1 = new(generator->wHeap()) Cast(newIe1,resultType);
}
if (len != cType2.getMaxLenInBytesOrNAWChars())
{
if (DFS2REC::isAnyVarChar(cType2.getFSDatatype()))
{
resultType = new (generator->wHeap())
SQLVarChar( CharLenInfo(Prec, len),
cType2.supportsSQLnull(),
cType2.isUpshifted(),
cType2.isCaseinsensitive(),
cType2.getCharSet(),
cType2.getCollation(),
cType2.getCoercibility()
);
}
else
{
resultType = new (generator->wHeap())
SQLChar( CharLenInfo(Prec, len),
cType2.supportsSQLnull(),
cType2.isUpshifted(),
cType2.isCaseinsensitive(),
FALSE,
cType2.getCharSet(),
cType2.getCollation(),
cType2.getCoercibility()
);
}
newIe2 = new(generator->wHeap()) Cast(newIe2,resultType);
}
}
ItemExpr * newEncode;
newEncode =
new(generator->wHeap())
CompEncode(newIe1,FALSE, -1, CollationInfo::Compare);
newEncode->bindNode(generator->getBindWA());
newEncode = newEncode->preCodeGen(generator);
if (!newEncode)
return NULL;
setChild(0, newEncode);
newEncode =
new(generator->wHeap())
CompEncode(newIe2, FALSE, -1,CollationInfo::Compare);
newEncode->bindNode(generator->getBindWA());
newEncode = newEncode->preCodeGen(generator);
if (!newEncode)
return NULL;
setChild(1, newEncode);
}
}
//LCOV_EXCL_STOP : cnu - Should not count in Code Coverage until we support non-binary collation in SQ
else
{
// update both operands if case insensitive comparions
// are to be done.
NABoolean doCIcomp =
((cType1.isCaseinsensitive()) && (cType2.isCaseinsensitive()));
ItemExpr * newChild = NULL;
if ((doCIcomp) &&
(NOT cType1.isUpshifted()))
{
newChild = child(0);
// Add UPPER except if it is NULL constant value.
if (newChild->getOperatorType() != ITM_CONSTANT || !((ConstValue *)newChild)->isNull())
newChild = new (generator->wHeap()) Upper(newChild);
newChild = newChild->bindNode(generator->getBindWA());
if (! newChild || generator->getBindWA()->errStatus())
return NULL;
newChild = newChild->preCodeGen(generator);
if (! newChild)
return NULL;
setChild(0, newChild);
}
if ((doCIcomp) &&
(NOT cType2.isUpshifted()))
{
newChild = child(1);
// Add UPPER except if it is NULL constant value.
if (newChild->getOperatorType() != ITM_CONSTANT || !((ConstValue *)newChild)->isNull())
newChild = new (generator->wHeap()) Upper(newChild);
newChild = newChild->bindNode(generator->getBindWA());
if (! newChild || generator->getBindWA()->errStatus())
return NULL;
newChild = newChild->preCodeGen(generator);
if (! newChild)
return NULL;
setChild(1, newChild);
}
}
}
// following is for simple types.
const NAType &type1B =
child(0)->castToItemExpr()->getValueId().getType();
const NAType &type2B =
child(1)->castToItemExpr()->getValueId().getType();
SimpleType * attr_op1 = (SimpleType *)
(ExpGenerator::convertNATypeToAttributes(type1B, generator->wHeap()));
SimpleType * attr_op2 = (SimpleType *)
(ExpGenerator::convertNATypeToAttributes(type2B, generator->wHeap()));
ex_comp_clause temp_clause;
temp_clause.setInstruction(getOperatorType(),
attr_op1,
attr_op2
);
if ((temp_clause.getInstruction() == COMP_NOT_SUPPORTED) &&
(type1B.getTypeQualifier() == NA_NUMERIC_TYPE) &&
(type2B.getTypeQualifier() == NA_NUMERIC_TYPE))
{
const NumericType &numOp1 = (NumericType&)type1B;
const NumericType &numOp2 = (NumericType&)type2B;
if ((numOp1.isExact() && numOp2.isExact()) &&
((numOp1.getFSDatatype() == REC_BIN64_UNSIGNED) ||
(numOp2.getFSDatatype() == REC_BIN64_UNSIGNED)))
{
if (numOp1.getFSDatatype() == REC_BIN64_UNSIGNED)
{
// add a Cast node to convert op2 to sqllargeint.
ItemExpr * newOp2 =
new (generator->wHeap())
Cast(child(1),
new (generator->wHeap())
SQLLargeInt(numOp2.isSigned(),
numOp2.supportsSQLnull()));
newOp2 = newOp2->bindNode(generator->getBindWA());
newOp2 = newOp2->preCodeGen(generator);
if (! newOp2)
return NULL;
setChild(1, newOp2);
attr_op2 = (SimpleType *)
(ExpGenerator::convertNATypeToAttributes(
newOp2->getValueId().getType(), generator->wHeap()));
}
else
{
// add a Cast node to convert op1 to sqllargeint.
ItemExpr * newOp1 =
new (generator->wHeap())
Cast(child(0),
new (generator->wHeap())
SQLLargeInt(numOp1.isSigned(),
numOp1.supportsSQLnull()));
newOp1 = newOp1->bindNode(generator->getBindWA());
newOp1 = newOp1->preCodeGen(generator);
if (! newOp1)
return NULL;
setChild(0, newOp1);
attr_op1 = (SimpleType *)
(ExpGenerator::convertNATypeToAttributes(
newOp1->getValueId().getType(), generator->wHeap()));
}
temp_clause.setInstruction(getOperatorType(),
attr_op1,
attr_op2
);
} // convert
}
if (temp_clause.getInstruction() != COMP_NOT_SUPPORTED)
{
NABoolean doConstFolding = FALSE;
if ((temp_clause.getInstruction() == ASCII_COMP) &&
(CmpCommon::getDefault(CONSTANT_FOLDING) == DF_ON))
{
if (((child(0)->getOperatorType() == ITM_CONSTANT) &&
(child(1)->getOperatorType() != ITM_CONSTANT)) ||
((child(1)->getOperatorType() == ITM_CONSTANT) &&
(child(0)->getOperatorType() != ITM_CONSTANT)) &&
(type_op1->getFSDatatype() == REC_BYTE_F_ASCII) &&
(type_op2->getFSDatatype() == REC_BYTE_F_ASCII))
{
if (((child(0)->getOperatorType() == ITM_CONSTANT) &&
(type_op1->getNominalSize() < type_op2->getNominalSize())) ||
((child(1)->getOperatorType() == ITM_CONSTANT) &&
(type_op2->getNominalSize() < type_op1->getNominalSize())))
{
doConstFolding = TRUE;
}
}
}
if (NOT doConstFolding)
{
markAsPreCodeGenned();
return this;
}
}
// conversion needed before comparison could be done.
// find the 'super' type
UInt32 flags =
((CmpCommon::getDefault(LIMIT_MAX_NUMERIC_PRECISION) == DF_ON)
? NAType::LIMIT_MAX_NUMERIC_PRECISION : 0);
if (CmpCommon::getDefault(ALLOW_INCOMPATIBLE_OPERATIONS) == DF_ON)
{
flags |= NAType::ALLOW_INCOMP_OPER;
}
const NAType *result_type = type_op1->synthesizeType(SYNTH_RULE_UNION,
*type_op1,
*type_op2,
generator->wHeap(),
&flags);
CMPASSERT(result_type);
if (result_type->getTypeQualifier() == NA_NUMERIC_TYPE)
{
// match scales
child(0) = generator->getExpGenerator()->matchScales(child(0)->getValueId(),
*result_type);
child(1) = generator->getExpGenerator()->matchScales(child(1)->getValueId(),
*result_type);
}
else if (result_type->getTypeQualifier() == NA_DATETIME_TYPE) {
#pragma nowarn(1506) // warning elimination
Lng32 fp1 = ((DatetimeType *) type_op1)->getFractionPrecision();
#pragma warn(1506) // warning elimination
#pragma nowarn(1506) // warning elimination
Lng32 fp2 = ((DatetimeType *) type_op2)->getFractionPrecision();
#pragma warn(1506) // warning elimination
#pragma nowarn(1506) // warning elimination
Lng32 fpResult = ((DatetimeType *) result_type)->getFractionPrecision();
#pragma warn(1506) // warning elimination
if (fp1 != fpResult) {
child(0) = new(generator->wHeap()) Cast(child(0), result_type,
ITM_CAST, FALSE);
child(0)->bindNode(generator->getBindWA());
}
if (fp2 != fpResult) {
child(1) = new(generator->wHeap()) Cast(child(1), result_type,
ITM_CAST, FALSE);
child(1)->bindNode(generator->getBindWA());
}
} else if (result_type->getTypeQualifier() == NA_INTERVAL_TYPE) {
child(0) = generator->getExpGenerator()->matchIntervalEndFields(
child(0)->getValueId(),
*result_type);
child(1) = generator->getExpGenerator()->matchIntervalEndFields(
child(1)->getValueId(),
*result_type);
child(0) = generator->getExpGenerator()->matchScales(
child(0)->getValueId(),
*result_type);
child(1) = generator->getExpGenerator()->matchScales(
child(1)->getValueId(),
*result_type);
type_op1 = (NAType *)(&(child(0)->getValueId().getType()));
type_op2 = (NAType *)(&(child(1)->getValueId().getType()));
if (result_type->getNominalSize() != type_op1->getNominalSize()) {
child(0) = new(generator->wHeap()) Cast(child(0), result_type,
ITM_CAST, FALSE);
child(0)->bindNode(generator->getBindWA());
}
if (result_type->getNominalSize() != type_op2->getNominalSize()) {
child(1) = new(generator->wHeap()) Cast(child(1), result_type,
ITM_CAST, FALSE);
child(1)->bindNode(generator->getBindWA());
}
}
// if the datatype or lengths of child and this don't match, then
// conversion is needed.
type_op1 = (NAType *)(&(child(0)->getValueId().getType()));
type_op2 = (NAType *)(&(child(1)->getValueId().getType()));
if ((result_type->getTypeQualifier() != NA_INTERVAL_TYPE) &&
((result_type->getFSDatatype() != type_op1->getFSDatatype()) ||
(result_type->getNominalSize() != type_op1->getNominalSize())))
{
child(0) = new(generator->wHeap()) Cast(child(0), result_type,
ITM_CAST, FALSE);
}
if ((result_type->getTypeQualifier() != NA_INTERVAL_TYPE) &&
((result_type->getFSDatatype() != type_op2->getFSDatatype()) ||
(result_type->getNominalSize() != type_op2->getNominalSize())))
{
child(1) = new(generator->wHeap()) Cast(child(1), result_type,
ITM_CAST, FALSE);
}
// bind/type propagate the new nodes
child(0)->bindNode(generator->getBindWA());
child(1)->bindNode(generator->getBindWA());
child(0) = child(0)->preCodeGen(generator);
if (! child(0).getPtr())
return NULL;
child(1) = child(1)->preCodeGen(generator);
if (! child(1).getPtr())
return NULL;
ItemExpr *outExpr = NULL;
Lng32 rc = generator->getExpGenerator()->foldConstants(child(0), &outExpr);
if ((rc == 0) &&
(outExpr))
{
child(0) = outExpr->preCodeGen(generator);
}
rc = generator->getExpGenerator()->foldConstants(child(1), &outExpr);
if ((rc == 0) &&
(outExpr))
{
child(1) = outExpr->preCodeGen(generator);
}
markAsPreCodeGenned();
return this;
} // BiRelat::preCodeGen()
ItemExpr * Assign::preCodeGen(Generator * generator)
{
if (nodeIsPreCodeGenned())
return this;
child(1) = generator->getExpGenerator()->matchIntervalEndFields(
child(1)->getValueId(),
getValueId().getType());
child(1) = generator->getExpGenerator()->matchScales(child(1)->getValueId(),
getValueId().getType());
child(1)->bindNode(generator->getBindWA());
child(1) = child(1)->preCodeGen(generator);
if (! child(1).getPtr())
return NULL;
markAsPreCodeGenned();
return this;
} // Assign::preCodeGen()
ItemExpr * BaseColumn::preCodeGen(Generator * generator)
{
if (nodeIsPreCodeGenned())
return this;
ItemExpr * i = convertExternalType(generator);
if (i == NULL)
return NULL;
return i;
}
ItemExpr * BitOperFunc::preCodeGen(Generator * generator)
{
if (nodeIsPreCodeGenned())
return this;
if (getOperatorType() == ITM_BITEXTRACT)
{
// convert 2nd and 3rd operands to Int32 signed.
for (Int32 i = 1; i < getArity(); i++)
{
const NAType &typ = child(i)->getValueId().getType();
if (typ.getFSDatatype() != REC_BIN32_UNSIGNED)
{
ItemExpr * newChild =
new (generator->wHeap())
Cast(child(i),
new (generator->wHeap()) SQLInt(FALSE,
typ.supportsSQLnullLogical()));
setChild(i, newChild);
child(i)->bindNode(generator->getBindWA());
child(i) = child(i)->preCodeGen(generator);
if (! child(i).getPtr())
return NULL;
} // if
} // for
}
else
{
for (Int32 i = 0; i < getArity(); i++)
{
const NAType &typ = child(i)->getValueId().getType();
if (NOT (getValueId().getType() == typ))
{
NAType *resultType =
getValueId().getType().newCopy(generator->wHeap());
ItemExpr * newChild =
new (generator->wHeap()) Cast(child(i), resultType);
setChild(i, newChild);
}
child(i)->bindNode(generator->getBindWA());
child(i) = child(i)->preCodeGen(generator);
if (! child(i).getPtr())
return NULL;
}
}
markAsPreCodeGenned();
return this;
} // BitOperFunc::preCodeGen()
ItemExpr * Cast::preCodeGen(Generator * generator)
{
if (nodeIsPreCodeGenned())
return this;
child(0)->bindNode(generator->getBindWA());
child(0) = child(0)->preCodeGen(generator);
if (! child(0).getPtr())
return NULL;
// if a special cast node, see if my child's data attributes are
// the same as my data attributes. If they are, return pointer to
// my child.
if ((matchChildType()) &&
(child(0)->getValueId().getType() == getValueId().getType()))
{
markAsPreCodeGenned();
return child(0);
}
NABuiltInTypeEnum sourceTypeQual =
child(0)->getValueId().getType().getTypeQualifier();
NABuiltInTypeEnum targetTypeQual =
getValueId().getType().getTypeQualifier();
// If this is a NARROW operation, but it is not possible to result
// in an error, no reason to use NARROW. Convert the NARROW to the
// equivalent CAST.
if (getOperatorType() == ITM_NARROW)
{
const NAType * sourceType = &(child(0)->getValueId().getType());
const NAType * targetType = &(getValueId().getType());
if (!sourceType->errorsCanOccur(*targetType))
{
ItemExpr *c = new(generator->wHeap()) Cast(child(0), targetType);
c->bindNode(generator->getBindWA());
return c->preCodeGen(generator);
}
}
if (generator->getExpGenerator()->handleUnsupportedCast(this))
return NULL;
const NAType &srcNAType = child(0)->getValueId().getType();
const NAType &tgtNAType = getValueId().getType();
short srcFsType = srcNAType.getFSDatatype();
short tgtFsType = tgtNAType.getFSDatatype();
if ((sourceTypeQual == NA_NUMERIC_TYPE) &&
(targetTypeQual == NA_DATETIME_TYPE))
{
// binder has already verified that this is a valid conversion
// in special1 mode.
NumericType &sourceType =
(NumericType &)(child(0)->getValueId().getType());
DatetimeType &targetType =
(DatetimeType &)(getValueId().getType());
if (sourceType.getFSDatatype() != REC_BIN64_SIGNED)
{
// doing a numeric to date conversion
// convert source to largeint.
ItemExpr * newChild =
new (generator->wHeap())
Cast(child(0),
new (generator->wHeap())
SQLLargeInt(TRUE,
child(0)->castToItemExpr()->
getValueId().getType().supportsSQLnull()));
newChild = newChild->bindNode(generator->getBindWA());
newChild = newChild->preCodeGen(generator);
if (! newChild)
return NULL;
setChild(0, newChild);
}
}
if ((sourceTypeQual == NA_DATETIME_TYPE) &&
(targetTypeQual == NA_NUMERIC_TYPE))
{
// binder has already verified that this is a valid conversion
// in special1 mode.
DatetimeType &sourceType =
(DatetimeType &)(child(0)->getValueId().getType());
NumericType &targetType =
(NumericType &)(getValueId().getType());
if (targetType.getFSDatatype() != REC_BIN64_SIGNED)
{
// doing a date to numeric conversion.
// convert source to largeint.
ItemExpr * newChild =
new (generator->wHeap())
Cast(child(0),
new (generator->wHeap())
SQLLargeInt(TRUE,
child(0)->castToItemExpr()->
getValueId().getType().supportsSQLnull()));
newChild = newChild->bindNode(generator->getBindWA());
newChild = newChild->preCodeGen(generator);
if (! newChild)
return NULL;
setChild(0, newChild);
}
} // numeric to date conversion
if ((CmpCommon::getDefault(ALLOW_INCOMPATIBLE_OPERATIONS) == DF_ON) &&
(sourceTypeQual == NA_NUMERIC_TYPE) &&
(targetTypeQual == NA_INTERVAL_TYPE))
{
NumericType &sourceType =
(NumericType &)(child(0)->getValueId().getType());
if (NOT sourceType.isExact())
{
// doing a float numeric to interval conversion.
// convert source to corresponding exact numeric (largeint).
// This is the largest interval type that is supported.
ItemExpr * newChild =
new (generator->wHeap())
Cast(child(0),
new (generator->wHeap())
SQLLargeInt(TRUE,
child(0)->castToItemExpr()->
getValueId().getType().supportsSQLnull()));
newChild = newChild->bindNode(generator->getBindWA());
newChild = newChild->preCodeGen(generator);
if (! newChild)
return NULL;
setChild(0, newChild);
}
} // numeric to date conversion
if ((sourceTypeQual == NA_DATETIME_TYPE) &&
(targetTypeQual == NA_DATETIME_TYPE)) {
DatetimeType &sourceType =
(DatetimeType &)(child(0)->getValueId().getType());
DatetimeType &targetType =
(DatetimeType &)(getValueId().getType());
if (targetType.getStartField() < sourceType.getStartField()) {
// Must provide some fields from the current time stamp
//
// The following code generates the current timestamp as a
// string and extracts the needed leading fields and appends to
// this the given value (child(0)) as a string. The result is a
// string which contains the given datetime value extended to
// the YEAR field with the current timestamp.
//
// Buffer to hold new expression string.
//
char str[200];
// Offset (in bytes) from the start of the current timestamp
// (represented as a char. string) to the first field needed in
// the extension.
//
// - Subtract 1 from the start field to make the value zero based.
//
// - Each field has a least 3 bytes (2 for the value and 1 for the
// delimiter)
//
// - Add 1, since the substring function is 1 based.
//
Int32 leadFieldsOffset = ((targetType.getStartField() - 1) * 3) + 1;
// - Add 2 extra for the year field if it is being skiped over
// since it has 4 bytes of value.
//
if (leadFieldsOffset > 1)
leadFieldsOffset += 2;
// Size (in bytes) of the leading fields represented as a
// character string taken from the current timestamp
//
// - Subtract 1 from the start field to make the value zero based.
//
// - Each field has a least 3 bytes (2 for the value and 1 for the
// delimiter)
//
// - Add 2 extra for the year field (which will always be one of
// the extended fields) since it has 4 bytes of value.
//
// - Subtract the leadFieldsOffset ( - 1 to make it zero based).
//
Int32 leadFieldsSize = ((((sourceType.getStartField() - 1) * 3) + 2)
- (leadFieldsOffset - 1));
// Size (in bytes) of the source value represented as a
// character string.
//
#pragma nowarn(1506) // warning elimination
Int32 sourceFieldsSize = sourceType.getDisplayLength();
#pragma warn(1506) // warning elimination
// Construct an expression (string) to concatinate the given
// value with the required fields from the current timestamp as
// a string, then cast this string as a datetime value, that can
// be cast to the desired result.
//
// Example :
//
// cast(DATETIME 'dd hh:mm:ss' DAY TO SECOND as DATETIME MONTH to MINUTE)
//
// current timestamp (as string) | "YYYY-MM-DD HH:MM:SS.FFFFFF"
// |
// leadFieldsOffset = ((2-1)*3)+1 +2 = | --6--^
// |
// leadFieldsSize = (((3-1)*3)+2) - 5 =| ^3^
// |
// result of substring(cts from 1 to 8)| "MM-"
// |
// value to be extended (as string) | "dd hh:mm:ss"
// |
// result of string concat. (as string)| "MM-dd hh:mm:ss"
// |
// Cast to a datetime MONTH TO SECOND | Mdhms
// |
// Original (this) cast to result | Mdhm
//
str_sprintf(str,
"CAST((SUBSTRING(CAST(CURRENT AS CHAR(19)) "
"FROM %d FOR %d) || CAST(@A1 AS CHAR(%d))) "
"AS DATETIME %s TO %s)",
leadFieldsOffset,
leadFieldsSize,
sourceFieldsSize,
targetType.getFieldName(targetType.getStartField()),
((sourceType.getEndField() == REC_DATE_SECOND)
? "FRACTION(6)"
: sourceType.getFieldName(sourceType.getEndField())));
GenAssert(str_len(str) < 199,"Internal Datetime Error Cast::preCodeGen");
ItemExpr * newExpr =
generator->getExpGenerator()->createExprTree(str, 0,
1, child(0));
newExpr->bindNode(generator->getBindWA());
child(0) = newExpr->preCodeGen(generator);
}
}
// Call matchScales only if both datatypes aren't intervals.
// (We make the exception for intervals because Cast is able
// to match the scales of intervals itself.)
// Also, we suppress the call to matchScales() for a narrow.
// This is because narrow will handle the scaling differently.
// Conversions from float to bignum are also not scaled here. Scaling
// is done in BigNum::castFrom method.
if (NOT ((getOperatorType() == ITM_NARROW) ||
((sourceTypeQual == NA_INTERVAL_TYPE) &&
(targetTypeQual == NA_INTERVAL_TYPE)) ||
((DFS2REC::isFloat(srcFsType)) &&
(DFS2REC::isBigNum(tgtFsType)))))
{
child(0) = generator->getExpGenerator()->matchScales(
child(0)->getValueId(),
getValueId().getType());
child(0) = child(0)->preCodeGen(generator);
if (! child(0).getPtr())
return NULL;
}
// For a numeric NARROW, check if scaling is needed.
if (targetTypeQual == NA_NUMERIC_TYPE &&
getOperatorType() == ITM_NARROW)
{
GenAssert(sourceTypeQual == NA_NUMERIC_TYPE,
"source type and target type incompatible in NARROW");
const NumericType * sourceNumType
= (const NumericType *)(&child(0)->getValueId().getType());
const NumericType * targetNumType
= (const NumericType *)(&getValueId().getType());
if (sourceNumType->getScale() != targetNumType->getScale())
{
// We need to scale the value. We don't want to use the
// usual scaling method of simply multiplying or dividing
// the result because we need to capture truncations
// and overflows at run time. The Narrow operator supports
// scaling for the BigNum-to-any-numeric type case.
// Therefore, we first cast the value to BigNum,
// then narrow it down.
// Soln 10-041105-1519
// Dont introduce the CAST operator if the target is already a BigNum
// because NARROW does not support scaling for the BigNum-to-BigNum
// case. Use the usual scaling method instead.
if (targetNumType->isBigNum())
{
child(0) = generator->getExpGenerator()->matchScales(
child(0)->getValueId(), *targetNumType);
}
else
{
Lng32 intermediatePrecision = sourceNumType->getPrecision();
Lng32 intermediateScale = sourceNumType->getScale();
// SQLBigNum takes decimal precision, so if the source
// has binary precision, we need to adjust.
if (sourceNumType->binaryPrecision())
{
// Can fit three binary digits in the space of one
// decimal digit. The '+5' in the precision calculation
// allows for an extra digit before and after the
// radix point.
intermediatePrecision = (intermediatePrecision+5)/3;
}
// If we need to cast an approximate, increase the length
// and scale so that the number can be represented now that
// it won't have an exponent.
// In each of the cases below, the formula used to calculate
// precision is:
//
// intermediatePrecision = 2 * <max exponent>
// + <# significant digits in mantissa> + 1
//
// We use 2 * <max exponent> to take into account the
// maximum positive exponent as well as the maximum
// negative exponent.
//
// The formula used to calculate scale is:
//
// intermediateScale = <max exponent> +
// <# significant digits in mantissa> - 1
//
// Here the exponent and digits are understood to be decimal,
// not binary.
//
// For the various kinds of floats we have:
//
// Kind Max exponent Decimal digits in Mantissa
// ----------- ------------ --------------------------
// IEEE 32 bit 38 7
// IEEE 64 bit 308 17
if (sourceNumType->getFSDatatype() == REC_IEEE_FLOAT32)
{
intermediatePrecision = 84; // (2 x 38) + 7 + 1 = 84
intermediateScale = 44; // 38 + 7 - 1 = 44
}
else if (sourceNumType->getFSDatatype() == REC_IEEE_FLOAT64)
{
intermediatePrecision = 634; // (2 x 308) + 17 + 1 = 634
intermediateScale = 324; // 308 + 17 - 1 = 324
}
NAType * intermediateType =
new(generator->wHeap())
SQLBigNum(intermediatePrecision,
intermediateScale,
(sourceNumType->isBigNum() &&
((SQLBigNum*)sourceNumType)->isARealBigNum()),
TRUE, // make it signed
sourceNumType->supportsSQLnull(),
NULL);
child(0) = new(generator->wHeap()) Cast(child(0),intermediateType);
child(0)->bindNode(generator->getBindWA());
if (generator->getExpGenerator()->handleUnsupportedCast((Cast*)child(0)->castToItemExpr()))
return NULL;
// To suppress insertion of multiplying/dividing, mark Cast as
// already pre-code-genned.
child(0)->markAsPreCodeGenned();
}
}
}
if ((sourceTypeQual == NA_CHARACTER_TYPE) &&
((tgtFsType == REC_BLOB) ||
(tgtFsType == REC_CLOB)))
{
LOBconvertHandle * lc = new(generator->wHeap())
LOBconvertHandle(child(0), LOBoper::LOB_);
lc->bindNode(generator->getBindWA());
lc->preCodeGen(generator);
child(0) = lc;
}
if (getArity() > 1)
{
child(1)->bindNode(generator->getBindWA());
child(1) = child(1)->preCodeGen(generator);
if (! child(1).getPtr())
return NULL;
}
ItemExpr *result = this;
markAsPreCodeGenned();
return result;
} // Cast::preCodeGen()
ItemExpr * CharFunc::preCodeGen(Generator * generator)
{
if (nodeIsPreCodeGenned())
return this;
const NAType &typ1 = child(0)->getValueId().getType();
// Insert a cast node to convert child to an INT.
child(0) = new (generator->wHeap())
Cast(child(0), new (generator->wHeap()) SQLInt(FALSE,
typ1.supportsSQLnullLogical()));
child(0)->bindNode(generator->getBindWA());
child(0) = child(0)->preCodeGen(generator);
if (! child(0).getPtr())
return NULL;
markAsPreCodeGenned();
return this;
} // CharFunc::preCodeGen()
ItemExpr * CompEncode::preCodeGen(Generator * generator)
{
if (nodeIsPreCodeGenned())
return this;
// during key encode expr generation, no need to convert external
// column types(like tandem floats) to their internal
// equivalent(ieee floats). Avoid doing preCodeGen in these cases.
// Do this only for child leaf nodes (columns, hostvar, params, literals).
//
if (NOT (child(0)->getValueId().getType().isExternalType() &&
child(0)->getArity() == 0)) {
child(0) = child(0)->preCodeGen(generator);
}
markAsPreCodeGenned();
return this;
} // CompEncode::preCodeGen()
ItemExpr * CompDecode::preCodeGen(Generator * generator)
{
if (nodeIsPreCodeGenned())
return this;
return CompEncode::preCodeGen(generator);
} // CompDecode::preCodeGen()
ItemExpr * Convert::preCodeGen(Generator * generator)
{
if (nodeIsPreCodeGenned())
return this;
// Since this CONVERT will convert its child to the original
// ExternalType, no need to ask it to first be cast to an internal
// type. So, do not call precodegen in these cases.
// Do this only for child leaf nodes (columns, hostvar, params, literals).
//
if (NOT (child(0)->getValueId().getType().isExternalType() &&
child(0)->getArity() == 0)) {
child(0) = child(0)->preCodeGen(generator);
}
markAsPreCodeGenned();
return this;
} // Convert::preCodeGen()
ItemExpr * ConvertTimestamp::preCodeGen(Generator * generator)
{
if (nodeIsPreCodeGenned())
return this;
//
// If the operand is not a largeint with a scale of 0, convert it to one.
//
NumericType *numeric = (NumericType *)(&(child(0)->getValueId().getType()));
if ((numeric->getFSDatatype() != REC_BIN64_SIGNED) ||
(numeric->getScale() != 0)) {
child(0) = new(generator->wHeap())
Cast(child(0),
new(generator->wHeap())
SQLLargeInt(TRUE, numeric->supportsSQLnull()));
child(0)->bindNode(generator->getBindWA());
}
child(0) = child(0)->preCodeGen(generator);
if (! child(0).getPtr())
return NULL;
markAsPreCodeGenned();
return this;
} // ConvertTimestamp::preCodeGen()
ItemExpr * Extract::preCodeGen(Generator * generator)
{
if (nodeIsPreCodeGenned())
return this;
//
// If the operand is an interval and the extract field is not the end field,
// convert the interval to the units of the extract field.
// Set the dataconversionerror param to Cast so conversion error
// (truncation) could be ignored at runtime.
//
NAType * type_op1 = (NAType *)(&(child(0)->getValueId().getType()));
if ((type_op1->getTypeQualifier() == NA_INTERVAL_TYPE) &&
(getExtractField() < ((IntervalType *) type_op1)->getEndField())) {
IntervalType *interval = (IntervalType *) type_op1;
ItemExpr *dataConvError = new(generator->wHeap()) ConstValue(1234567890);
child(0) = new(generator->wHeap())
Cast(child(0), dataConvError,
new(generator->wHeap())
SQLInterval(interval->supportsSQLnull(),
interval->getStartField(),
interval->getLeadingPrecision(),
getExtractField()),
ITM_NARROW);
child(0)->bindNode(generator->getBindWA());
}
child(0) = child(0)->preCodeGen(generator);
if (! child(0).getPtr())
return NULL;
markAsPreCodeGenned();
return this;
} // Extract::preCodeGen()
ItemExpr * Format::preCodeGen(Generator * generator)
{
return BuiltinFunction::preCodeGen(generator);
}
ItemExpr * JulianTimestamp::preCodeGen(Generator * generator)
{
if (nodeIsPreCodeGenned())
return this;
//
// If the operand is not a timestamp with a fractional precision of 6,
// convert it to one.
//
DatetimeType *dt = (DatetimeType *)(&(child(0)->getValueId().getType()));
if ((dt->getSubtype() != DatetimeType::SUBTYPE_SQLTimestamp) ||
(dt->getFractionPrecision() != 6)) {
child(0) = new(generator->wHeap())
Cast(child(0),
new(generator->wHeap())
SQLTimestamp(dt->supportsSQLnull(), 6));
child(0)->bindNode(generator->getBindWA());
}
child(0) = child(0)->preCodeGen(generator);
if (! child(0).getPtr())
return NULL;
markAsPreCodeGenned();
return this;
} // JulianTimestamp::preCodeGen()
ItemExpr * Hash::preCodeGen(Generator * generator)
{
if (nodeIsPreCodeGenned())
return getReplacementExpr();
ItemExpr *result = this;
// ---------------------------------------------------------------------
// In the optimizer, a hash function accepts a comma-separated list
// of columns. In the executor, replace this with the HashComb of the hash
// functions of the individual list elements. NOTE: once error handling
// is in place we need to make sure that no errors are generated from
// this.
// ---------------------------------------------------------------------
if (child(0)->getOperatorType() == ITM_ITEM_LIST)
{
// child is a multi-valued expression, transform into multiple
// hash expressions
ExprValueId treePtr = child(0);
ItemExprTreeAsList hashValues(&treePtr,
ITM_ITEM_LIST,
LEFT_LINEAR_TREE);
// this expression becomes the hash operator for the first
// hash value
child(0) = hashValues[0];
const NAType &childType = child(0)->getValueId().getType();
if(childType.getTypeQualifier() == NA_CHARACTER_TYPE) {
const CharType &chType = (CharType&)childType;
CharInfo::Collation coll = chType.getCollation();
//LCOV_EXCL_START : cnu - Should not count in Code Coverage until we support non-binary collation in SQ
if (CollationInfo::isSystemCollation(coll))
{
child(0) = new(generator->wHeap())
CompEncode(child(0),FALSE, -1, CollationInfo::Compare);
child(0) = child(0)->bindNode(generator->getBindWA());
}
//LCOV_EXCL_STOP : cnu - Should not count in Code Coverage until we support non-binary collation in SQ
else
{
//--------------------------
if ((chType.isCaseinsensitive()) &&
(NOT casesensitiveHash()) &&
(NOT chType.isUpshifted())) {
child(0) = new (generator->wHeap()) Upper(child(0));
child(0) = child(0)->bindNode(generator->getBindWA());
}
}
}
// add hash expressions for all other hash values and HashComb
// them together
CollIndex nc = hashValues.entries();
for (CollIndex i = 1; i < nc; i++)
{
ItemExpr *hi = hashValues[i];
const NAType &childType = hi->getValueId().getType();
if(childType.getTypeQualifier() == NA_CHARACTER_TYPE) {
const CharType &chType = (CharType&)childType;
CharInfo::Collation coll = chType.getCollation();
//LCOV_EXCL_START : cnu - Should not count in Code Coverage until we support non-binary collation in SQ
if (CollationInfo::isSystemCollation(coll))
{
hi = new(generator->wHeap())
CompEncode(hi,FALSE, -1, CollationInfo::Compare);
hi = hi->bindNode(generator->getBindWA());
}
//LCOV_EXCL_STOP : cnu - Should not count in Code Coverage until we support non-binary collation in SQ
else
{
//-----------------------------
if ((chType.isCaseinsensitive()) &&
(NOT casesensitiveHash()) &&
(NOT chType.isUpshifted())) {
hi = new (generator->wHeap()) Upper(hi);
hi = hi->bindNode(generator->getBindWA());
}
//-----------------------
}
}
ItemExpr *hv = new(generator->wHeap()) Hash(hi);
result = new(generator->wHeap()) HashComb(result,hv);
}
result->bindNode(generator->getBindWA());
} else {
const NAType &childType = child(0)->getValueId().getType();
if(childType.getTypeQualifier() == NA_CHARACTER_TYPE) {
const CharType &chType = (CharType&)childType;
CharInfo::Collation coll = chType.getCollation();
//LCOV_EXCL_START : cnu - Should not count in Code Coverage until we support non-binary collation in SQ
if (CollationInfo::isSystemCollation(coll))
{
child(0) = new (generator->wHeap())
CompEncode(child(0), FALSE, -1, CollationInfo::Compare);
child(0) = child(0)->bindNode(generator->getBindWA());
}
//LCOV_EXCL_STOP : cnu - Should not count in Code Coverage until we support non-binary collation in SQ
else
{
if ((chType.isCaseinsensitive()) &&
(NOT casesensitiveHash()) &&
(NOT chType.isUpshifted())) {
child(0) = new (generator->wHeap()) Upper(child(0));
child(0) = child(0)->bindNode(generator->getBindWA());
}
}
}
}
// do generic tasks for pre-code generation (e.g. recurse to the children)
setReplacementExpr(result->ItemExpr::preCodeGen(generator));
markAsPreCodeGenned();
return getReplacementExpr();
} // Hash::preCodeGen()
ItemExpr * HiveHash::preCodeGen(Generator * generator)
{
if (nodeIsPreCodeGenned())
return getReplacementExpr();
ItemExpr *result = this;
// ---------------------------------------------------------------------
// In the optimizer, a hash function accepts a comma-separated list
// of columns. In the executor, replace this with the HashComb of the hash
// functions of the individual list elements. NOTE: once error handling
// is in place we need to make sure that no errors are generated from
// this.
// ---------------------------------------------------------------------
if (child(0)->getOperatorType() == ITM_ITEM_LIST)
{
// child is a multi-valued expression, transform into multiple
// hash expressions
ExprValueId treePtr = child(0);
ItemExprTreeAsList hivehashValues(&treePtr,
ITM_ITEM_LIST,
LEFT_LINEAR_TREE);
// this expression becomes the hash operator for the first
// hash value
child(0) = hivehashValues[0];
const NAType &childType = child(0)->getValueId().getType();
if(childType.getTypeQualifier() == NA_CHARACTER_TYPE) {
const CharType &chType = (CharType&)childType;
CharInfo::Collation coll = chType.getCollation();
//LCOV_EXCL_START : cnu - Should not count in Code Coverage until we support non-binary collation in SQ
if (CollationInfo::isSystemCollation(coll))
{
child(0) = new(generator->wHeap())
CompEncode(child(0),FALSE, -1, CollationInfo::Compare);
child(0) = child(0)->bindNode(generator->getBindWA());
}
//LCOV_EXCL_STOP : cnu - Should not count in Code Coverage until we support non-binary collation in SQ
else
{
//--------------------------
if ((chType.isCaseinsensitive()) &&
(NOT casesensitiveHash()) &&
(NOT chType.isUpshifted())) {
child(0) = new (generator->wHeap()) Upper(child(0));
child(0) = child(0)->bindNode(generator->getBindWA());
}
}
}
// add hash expressions for all other hash values and HiveHashComb
// them together
CollIndex nc = hivehashValues.entries();
for (CollIndex i = 1; i < nc; i++)
{
ItemExpr *hi = hivehashValues[i];
const NAType &childType = hi->getValueId().getType();
if(childType.getTypeQualifier() == NA_CHARACTER_TYPE) {
const CharType &chType = (CharType&)childType;
CharInfo::Collation coll = chType.getCollation();
//LCOV_EXCL_START : cnu - Should not count in Code Coverage until we support non-binary collation in SQ
if (CollationInfo::isSystemCollation(coll))
{
hi = new(generator->wHeap())
CompEncode(hi,FALSE, -1, CollationInfo::Compare);
hi = hi->bindNode(generator->getBindWA());
}
//LCOV_EXCL_STOP : cnu - Should not count in Code Coverage until we support non-binary collation in SQ
else
{
//-----------------------------
if ((chType.isCaseinsensitive()) &&
(NOT casesensitiveHash()) &&
(NOT chType.isUpshifted())) {
hi = new (generator->wHeap()) Upper(hi);
hi = hi->bindNode(generator->getBindWA());
}
//-----------------------
}
}
ItemExpr *hv = new(generator->wHeap()) HiveHash(hi);
result = new(generator->wHeap()) HiveHashComb(result,hv);
}
result->bindNode(generator->getBindWA());
} else {
const NAType &childType = child(0)->getValueId().getType();
if(childType.getTypeQualifier() == NA_CHARACTER_TYPE) {
const CharType &chType = (CharType&)childType;
CharInfo::Collation coll = chType.getCollation();
//LCOV_EXCL_START : cnu - Should not count in Code Coverage until we support non-binary collation in SQ
if (CollationInfo::isSystemCollation(coll))
{
child(0) = new (generator->wHeap())
CompEncode(child(0), FALSE, -1, CollationInfo::Compare);
child(0) = child(0)->bindNode(generator->getBindWA());
}
//LCOV_EXCL_STOP : cnu - Should not count in Code Coverage until we support non-binary collation in SQ
else
{
if ((chType.isCaseinsensitive()) &&
(NOT casesensitiveHash()) &&
(NOT chType.isUpshifted())) {
child(0) = new (generator->wHeap()) Upper(child(0));
child(0) = child(0)->bindNode(generator->getBindWA());
}
}
}
}
// do generic tasks for pre-code generation (e.g. recurse to the children)
setReplacementExpr(result->ItemExpr::preCodeGen(generator));
markAsPreCodeGenned();
return getReplacementExpr();
} // Hash::preCodeGen()
// --------------------------------------------------------------
// member functions for HashDistPartHash operator
// Hash Function used by Hash Partitioning. This function cannot change
// once Hash Partitioning is released! Defined for all data types,
// returns a 32 bit non-nullable hash value for the data item.
//--------------------------------------------------------------
ItemExpr * HashDistPartHash::preCodeGen(Generator * generator)
{
if (nodeIsPreCodeGenned())
return getReplacementExpr();
ItemExpr *result = this;
// ---------------------------------------------------------------------
// In the optimizer, a hash function accepts a comma-separated list
// of columns. Replace this with the HashComb of the hash functions
// of the individual list elements.
// ---------------------------------------------------------------------
if (child(0)->getOperatorType() == ITM_ITEM_LIST)
{
// child is a multi-valued expression, transform into multiple
// hash expressions
ExprValueId treePtr = child(0);
ItemExprTreeAsList hashValues(&treePtr,
ITM_ITEM_LIST,
LEFT_LINEAR_TREE);
// this expression becomes the hash operator for the first
// hash value
child(0) = hashValues[0];
const NAType &childType = child(0)->getValueId().getType();
if(childType.getTypeQualifier() == NA_CHARACTER_TYPE) {
const CharType &chType = (CharType&)childType;
CharInfo::Collation coll = chType.getCollation();
//LCOV_EXCL_START : cnu - Should not count in Code Coverage until we support non-binary collation in SQ
if (CollationInfo::isSystemCollation(coll))
{
if (child(0)->getOperatorType() == ITM_NARROW)
{
ItemExpr* narrowsChild = child(0)->child(0);
const NAType &narrowsChildType= narrowsChild->getValueId().getType();
CMPASSERT(narrowsChildType.getTypeQualifier() == NA_CHARACTER_TYPE);
NAType *newType= narrowsChildType.newCopy(generator->wHeap());
CharType * newCharType = (CharType *) newType;
newCharType->setDataStorageSize(chType.getDataStorageSize());
child(0)->getValueId().changeType(newCharType);
}
child(0) = new(generator->wHeap())
CompEncode(child(0),FALSE, -1, CollationInfo::Compare);
child(0) = child(0)->bindNode(generator->getBindWA());
}
//LCOV_EXCL_STOP : cnu - Should not count in Code Coverage until we support non-binary collation in SQ
else
{
if ((chType.isCaseinsensitive()) &&
(NOT chType.isUpshifted())) {
child(0) = new (generator->wHeap()) Upper(child(0));
child(0) = child(0)->bindNode(generator->getBindWA());
}
}
}
// add hash expressions for all other hash values and HashComb
// them together
CollIndex nc = hashValues.entries();
for (CollIndex i = 1; i < nc; i++)
{
ItemExpr *hi = hashValues[i];
const NAType &childType = hi->getValueId().getType();
if(childType.getTypeQualifier() == NA_CHARACTER_TYPE) {
const CharType &chType = (CharType&)childType;
CharInfo::Collation coll = chType.getCollation();
//LCOV_EXCL_START : cnu - Should not count in Code Coverage until we support non-binary collation in SQ
if (CollationInfo::isSystemCollation(coll))
{
//Solution 10-081216-8006
if (hi->getOperatorType() == ITM_NARROW)
{
ItemExpr* narrowsChild = hi->child(0);
const NAType &narrowsChildType= narrowsChild->getValueId().getType();
CMPASSERT(narrowsChildType.getTypeQualifier() == NA_CHARACTER_TYPE);
NAType *newType= narrowsChildType.newCopy(generator->wHeap());
CharType * newCharType = (CharType *) newType;
newCharType->setDataStorageSize(chType.getDataStorageSize());
hi->getValueId().changeType(newCharType);
}
hi = new(generator->wHeap())
CompEncode(hi,FALSE, -1, CollationInfo::Compare);
hi = hi->bindNode(generator->getBindWA());
}
//LCOV_EXCL_STOP : cnu - Should not count in Code Coverage until we support non-binary collation in SQ
else
{
if ((chType.isCaseinsensitive()) &&
(NOT chType.isUpshifted())) {
hi = new (generator->wHeap()) Upper(hi);
hi = hi->bindNode(generator->getBindWA());
}
}
}
ItemExpr *hv =
new(generator->wHeap()) HashDistPartHash(hi);
result = new(generator->wHeap())
HashDistPartHashComb(result,hv);
}
result->bindNode(generator->getBindWA());
} else {
const NAType &childType = child(0)->getValueId().getType();
if(childType.getTypeQualifier() == NA_CHARACTER_TYPE) {
const CharType &chType = (CharType&)childType;
CharInfo::Collation coll = chType.getCollation();
//LCOV_EXCL_START : cnu - Should not count in Code Coverage until we support non-binary collation in SQ
if (CollationInfo::isSystemCollation(coll))
{
//Solution 10-081216-8006
if (child(0)->getOperatorType() == ITM_NARROW)
{
ItemExpr* narrowsChild = child(0)->child(0);
const NAType &narrowsChildType= narrowsChild->getValueId().getType();
CMPASSERT(narrowsChildType.getTypeQualifier() == NA_CHARACTER_TYPE);
NAType *newType= narrowsChildType.newCopy(generator->wHeap());
CharType * newCharType = (CharType *) newType;
newCharType->setDataStorageSize(chType.getDataStorageSize());
child(0)->getValueId().changeType(newCharType);
}
child(0) = new(generator->wHeap())
CompEncode(child(0),FALSE, -1, CollationInfo::Compare);
child(0) = child(0)->bindNode(generator->getBindWA());
}
//LCOV_EXCL_STOP : cnu - Should not count in Code Coverage until we support non-binary collation in SQ
else
{
if ((chType.isCaseinsensitive()) &&
(NOT chType.isUpshifted())) {
child(0) = new (generator->wHeap()) Upper(child(0));
child(0) = child(0)->bindNode(generator->getBindWA());
}
}
}
}
// do generic tasks for pre-code generation (e.g. recurse to the children)
setReplacementExpr(result->ItemExpr::preCodeGen(generator));
markAsPreCodeGenned();
return getReplacementExpr();
} // HashDistPartHash::preCodeGen()
ItemExpr * HostVar::preCodeGen(Generator * generator)
{
if (nodeIsPreCodeGenned())
return this;
ItemExpr * i = convertExternalType(generator);
if (i == NULL)
return NULL;
return i;
}
ItemExpr * IndexColumn::preCodeGen(Generator * generator)
{
if (nodeIsPreCodeGenned())
return this;
ItemExpr * i = convertExternalType(generator);
if (i == NULL)
return NULL;
return i;
}
ItemExpr * Generator::addCompDecodeForDerialization(ItemExpr * ie, NABoolean isAlignedFormat)
{
if (!ie)
return NULL;
if ((ie->getOperatorType() == ITM_BASECOLUMN) ||
(ie->getOperatorType() == ITM_INDEXCOLUMN))
{
if (! isAlignedFormat && HbaseAccess::isEncodingNeededForSerialization(ie))
{
ItemExpr * newNode = new(wHeap()) CompDecode
(ie, &ie->getValueId().getType(), FALSE, TRUE);
newNode->bindNode(getBindWA());
if (getBindWA()->errStatus())
return NULL;
newNode = newNode->preCodeGen(this);
if (! newNode)
return NULL;
return newNode;
}
else
return ie;
}
for (Lng32 i = 0; i < ie->getArity(); i++)
{
ItemExpr * nie = addCompDecodeForDerialization(ie->child(i), isAlignedFormat);
if (nie)
ie->setChild(i, nie);
}
return ie;
}
ItemExpr * HbaseTimestamp::preCodeGen(Generator * generator)
{
if (nodeIsPreCodeGenned())
return getReplacementExpr();
if (! ItemExpr::preCodeGen(generator))
return NULL;
markAsPreCodeGenned();
return this;
}
ItemExpr * HbaseVersion::preCodeGen(Generator * generator)
{
if (nodeIsPreCodeGenned())
return getReplacementExpr();
if (! ItemExpr::preCodeGen(generator))
return NULL;
markAsPreCodeGenned();
return this;
}
ItemExpr * LOBoper::preCodeGen(Generator * generator)
{
generator->setProcessLOB(TRUE);
return BuiltinFunction::preCodeGen(generator);
}
ItemExpr * LOBconvert::preCodeGen(Generator * generator)
{
NAColumn * col = child(0)->getValueId().getNAColumn(TRUE);
if (col)
{
lobNum() = col->lobNum();
lobStorageType() = col->lobStorageType();
lobStorageLocation() = col->lobStorageLocation();
}
return LOBoper::preCodeGen(generator);
}
ItemExpr * LOBupdate::preCodeGen(Generator * generator)
{
return LOBoper::preCodeGen(generator);
}
ItemExpr * MathFunc::preCodeGen(Generator * generator)
{
if (nodeIsPreCodeGenned())
return this;
for (Int32 i = 0; i < getArity(); i++)
{
const NAType &typ = child(i)->getValueId().getType();
// Insert a cast node to convert child to a double precision.
child(i) = new (generator->wHeap())
Cast(child(i),
new (generator->wHeap()) SQLDoublePrecision(
typ.supportsSQLnullLogical()));
child(i)->bindNode(generator->getBindWA());
child(i) = child(i)->preCodeGen(generator);
if (! child(i).getPtr())
return NULL;
}
markAsPreCodeGenned();
return this;
} // MathFunc::preCodeGen()
ItemExpr * Modulus::preCodeGen(Generator * generator)
{
if (nodeIsPreCodeGenned())
return this;
for (Int32 i = 0; i < 2; i++)
{
const NumericType &typ
= (NumericType&)child(i)->getValueId().getType();
if (typ.isDecimal())
{
// Insert a cast node to convert child to an LARGEINT.
child(i) = new (generator->wHeap())
Cast(child(i), new (generator->wHeap()) SQLLargeInt(TRUE,
typ.supportsSQLnullLogical()));
}
child(i)->bindNode(generator->getBindWA());
child(i) = child(i)->preCodeGen(generator);
if (! child(i).getPtr())
return NULL;
}
markAsPreCodeGenned();
return this;
} // Modulus::preCodeGen()
ItemExpr * ItemExpr::convertExternalType(Generator * generator)
{
BindWA * bindWA = generator->getBindWA();
if (getValueId().getType().isExternalType())
{
// this type is not supported internally.
// Convert it to an equivalent internal type.
ItemExpr * c = new (bindWA->wHeap())
Cast(this, getValueId().getType().equivalentType(bindWA->wHeap()));
c->synthTypeAndValueId();
// mark 'this' as precodegenned so we don't go thru
// this path again.
markAsPreCodeGenned();
c = c->preCodeGen(generator);
unmarkAsPreCodeGenned();
return c;
}
else
return this;
}
ItemExpr * Parameter::preCodeGen(Generator * generator)
{
if (nodeIsPreCodeGenned())
return this;
ItemExpr * i = convertExternalType(generator);
if (i == NULL)
return NULL;
return i;
}
ItemExpr * PivotGroup::preCodeGen(Generator * generator)
{
if (nodeIsPreCodeGenned())
return getReplacementExpr();
if (! ItemExpr::preCodeGen(generator))
return NULL;
ItemExpr * childExpr = child(0)->castToItemExpr();
const NAType &type1 =
childExpr->getValueId().getType();
if (type1.getTypeQualifier() != NA_CHARACTER_TYPE)
{
Lng32 displayLen = type1.getDisplayLength(
type1.getFSDatatype(),
type1.getNominalSize(),
type1.getPrecision(),
type1.getScale(),
0);
NAType * newType = new(generator->getBindWA()->wHeap())
SQLVarChar(displayLen, type1.supportsSQLnull());
childExpr = new (generator->getBindWA()->wHeap()) Cast(childExpr, newType);
childExpr = childExpr->bindNode(generator->getBindWA());
if (! childExpr || generator->getBindWA()->errStatus())
return NULL;
childExpr = childExpr->preCodeGen(generator);
if (! childExpr)
return NULL;
child(0) = childExpr;
}
markAsPreCodeGenned();
return this;
}
ItemExpr * RandomNum::preCodeGen(Generator * generator)
{
if (nodeIsPreCodeGenned())
return this;
if (child(0))
{
const NAType &typ1 = child(0)->getValueId().getType();
// Insert a cast node to convert child to an INT.
child(0) = new (generator->wHeap())
Cast(child(0), new (generator->wHeap()) SQLInt(FALSE,
typ1.supportsSQLnullLogical()));
child(0)->bindNode(generator->getBindWA());
child(0) = child(0)->preCodeGen(generator);
if (! child(0).getPtr())
return NULL;
}
markAsPreCodeGenned();
return this;
} // RandomNum::preCodeGen()
ItemExpr * Repeat::preCodeGen(Generator * generator)
{
if (nodeIsPreCodeGenned())
return this;
const NAType &typ2 = child(1)->getValueId().getType();
// Insert a cast node to convert child 2 to an INT.
child(1) = new (generator->wHeap())
Cast(child(1), new (generator->wHeap()) SQLInt(FALSE,
typ2.supportsSQLnullLogical()));
child(1)->bindNode(generator->getBindWA());
for (Int32 i = 0; i < getArity(); i++)
{
if (child(i))
{
child(i) = child(i)->preCodeGen(generator);
if (! child(i).getPtr())
return NULL;
}
}
markAsPreCodeGenned();
return this;
} // Repeat::preCodeGen()
ItemExpr *ReplaceNull::preCodeGen(Generator *generator)
{
if (nodeIsPreCodeGenned())
return getReplacementExpr();
NAType *dstAType = getValueId().getType().newCopy(generator->wHeap());
const NAType& dstBType = getValueId().getType();
if(child(0) == child(1))
{
dstAType->setNullable(TRUE);
}
child(1) = new(generator->wHeap()) Cast(child(1), dstAType);
child(2) = new(generator->wHeap()) Cast(child(2), &dstBType);
child(1)->bindNode(generator->getBindWA());
child(2)->bindNode(generator->getBindWA());
setReplacementExpr(ItemExpr::preCodeGen(generator));
markAsPreCodeGenned();
return getReplacementExpr();
}
ItemExpr * TriRelational::preCodeGen(Generator * generator)
{
if (nodeIsPreCodeGenned())
return getReplacementExpr();
// ---------------------------------------------------------------------
// The executor does not handle tri-relational operators. It either
// handles key exclusion expressions if the operator is part or a key
// predicate, or the tri-relational operator gets converted into
// a case statement (see comment in file ItemFunc.h).
// ---------------------------------------------------------------------
NABoolean lessOrLe = (getOperatorType() == ITM_LESS_OR_LE);
BiRelat *exclusive = new(generator->wHeap()) BiRelat(
(IFX lessOrLe THENX ITM_LESS ELSEX ITM_GREATER),
child(0),
child(1));
BiRelat *inclusive = new(generator->wHeap()) BiRelat(
(IFX lessOrLe THENX ITM_LESS_EQ ELSEX ITM_GREATER_EQ),
child(0),
child(1));
exclusive->setSpecialNulls(getSpecialNulls());
inclusive->setSpecialNulls(getSpecialNulls());
ItemExpr * result = new(generator->wHeap()) Case(
NULL,
new(generator->wHeap()) IfThenElse(
child(2),
exclusive,
inclusive));
result->bindNode(generator->getBindWA());
// do generic tasks for pre-code generation (e.g. recurse to the children)
setReplacementExpr(result->preCodeGen(generator));
markAsPreCodeGenned();
return getReplacementExpr();
} // TriRelational::preCodeGen()
ItemExpr *
HashDistrib::preCodeGen(Generator * generator)
{
if (nodeIsPreCodeGenned())
return this;
if (! ItemExpr::preCodeGen(generator))
return NULL;
// Assert that the operands are unsigned int.
//
NumericType *numeric = (NumericType *)(&(child(0)->getValueId().getType()));
GenAssert(numeric->getFSDatatype()==REC_BIN32_UNSIGNED &&
numeric->getScale()==0,
"invalid first operand type to function HashDistrib");
numeric = (NumericType *)(&(child(1)->getValueId().getType()));
GenAssert(numeric->getFSDatatype()==REC_BIN32_UNSIGNED &&
numeric->getScale()==0,
"invalid second operand type to function HashDistrib");
markAsPreCodeGenned();
return this;
}
ItemExpr * ProgDistribKey::preCodeGen(Generator * generator)
{
if (nodeIsPreCodeGenned())
return this;
// Assert that all operands are of type unsigned int.
//
for (Int32 i=0; i<3; i++)
{
NumericType *numeric =
(NumericType *)(&(child(i)->getValueId().getType()));
GenAssert(numeric->getFSDatatype()==REC_BIN32_UNSIGNED &&
numeric->getScale()==0,
"invalid operand type to function ProgDistribKey");
}
markAsPreCodeGenned();
return this;
}
ItemExpr *
PAGroup::preCodeGen(Generator * generator)
{
if (nodeIsPreCodeGenned())
return this;
if (! ItemExpr::preCodeGen(generator))
return NULL;
// Assert that the operands are unsigned int.
//
NumericType *numeric = (NumericType *)(&(child(0)->getValueId().getType()));
GenAssert(numeric->getFSDatatype()==REC_BIN32_UNSIGNED &&
numeric->getScale()==0,
"invalid first operand type to function PAGroup");
numeric = (NumericType *)(&(child(1)->getValueId().getType()));
GenAssert(numeric->getFSDatatype()==REC_BIN32_UNSIGNED &&
numeric->getScale()==0,
"invalid second operand type to function PAGroup");
numeric = (NumericType *)(&(child(2)->getValueId().getType()));
GenAssert(numeric->getFSDatatype()==REC_BIN32_UNSIGNED &&
numeric->getScale()==0,
"invalid third operand type to function PAGroup");
markAsPreCodeGenned();
return this;
}
ItemExpr *
ScalarVariance::preCodeGen(Generator *generator)
{
if (nodeIsPreCodeGenned())
return this;
if (! ItemExpr::preCodeGen(generator))
return NULL;
NumericType *result_type =
(NumericType *)(&(getValueId().getType()));
NumericType *type_op1 =
(NumericType *)(&(child(0)->castToItemExpr()->getValueId().getType()));
NumericType *type_op2 =
(NumericType *)(&(child(1)->castToItemExpr()->getValueId().getType()));
NumericType *type_op3 =
(NumericType *)(&(child(1)->castToItemExpr()->getValueId().getType()));
GenAssert(result_type->getTypeQualifier() == NA_NUMERIC_TYPE &&
type_op1->getTypeQualifier() == NA_NUMERIC_TYPE &&
type_op2->getTypeQualifier() == NA_NUMERIC_TYPE &&
type_op3->getTypeQualifier() == NA_NUMERIC_TYPE &&
!result_type->isExact() &&
!type_op1->isExact() &&
!type_op2->isExact() &&
!type_op3->isExact() &&
result_type->getBinaryPrecision() == SQL_DOUBLE_PRECISION &&
type_op1->getBinaryPrecision() == SQL_DOUBLE_PRECISION &&
type_op2->getBinaryPrecision() == SQL_DOUBLE_PRECISION &&
type_op3->getBinaryPrecision() == SQL_DOUBLE_PRECISION,
"ScalarVariance: Invalid Inputs");
markAsPreCodeGenned();
return this;
}
ItemExpr * Substring::preCodeGen(Generator * generator)
{
if (nodeIsPreCodeGenned())
return this;
child(0) = child(0)->preCodeGen(generator);
if (! child(0).getPtr())
return NULL;
for (Int32 i = 1; i < getArity(); i++)
{
if (child(i))
{
const NAType &typ1 = child(i)->getValueId().getType();
// Insert a cast node to convert child to an INT.
child(i) = new (generator->wHeap())
Cast(child(i), new (generator->wHeap()) SQLInt(TRUE,
typ1.supportsSQLnullLogical()));
child(i)->bindNode(generator->getBindWA());
child(i) = child(i)->preCodeGen(generator);
if (! child(i).getPtr())
return NULL;
}
}
markAsPreCodeGenned();
return this;
} // Substring::preCodeGen()
ItemExpr * ItemExpr::preCodeGen(Generator * generator)
{
if (nodeIsPreCodeGenned())
return this;
Lng32 nc = (Lng32)getArity();
for (Lng32 index = 0; index < nc; index++)
{
child(index) = child(index)->preCodeGen(generator);
if (! child(index).getPtr())
return NULL;
}
markAsPreCodeGenned();
return this;
} // ItemExpr::preCodeGen()
// ---------------------------------------------------------
// Methods for class VEGRewritePairs
// ---------------------------------------------------------
VEGRewritePairs::VEGRewritePairs(CollHeap* heap)
: heap_(heap),
vegRewritePairs_(&valueIdHashFunc, 1009, TRUE, heap)
{
}
ULng32 VEGRewritePairs::valueIdHashFunc(const CollIndex & v)
{
return (ULng32)v;
}
const VEGRewritePairs::VEGRewritePair *
VEGRewritePairs::getPair( const ValueId& original) const
{
CollIndex k(original);
return vegRewritePairs_.getFirstValue(&k);
} // getPair(..)
NABoolean VEGRewritePairs::
getRewritten(ValueId& rewritten, const ValueId& original) const
{
NABoolean found = FALSE;
const VEGRewritePairs::VEGRewritePair * vrPairPtr = NULL;
if (vrPairPtr = getPair(original)){
rewritten = vrPairPtr->getRewritten();
found = TRUE;
}
return found;
} // getRewritten
VEGRewritePairs::~VEGRewritePairs()
{
clear();
} // VEGRewritePairs::~VEGRewritePairs()
void
VEGRewritePairs::insert(const ValueId& original,
const ValueId& rewritten)
{
// Precondition:
// original must have not been rewritten before:
CMPASSERT(getPair(original) == NULL);
VEGRewritePairs::VEGRewritePair * vrPairPtr =
new (heap_) VEGRewritePairs::VEGRewritePair(original,rewritten);
CMPASSERT(vrPairPtr != NULL);
CollIndex* key = (CollIndex*) new (heap_) CollIndex(original);
vegRewritePairs_.insert(key, vrPairPtr);
}
void VEGRewritePairs::VEGRewritePair::print(FILE *ofd) const
{
#pragma nowarn(1506) // warning elimination
Lng32 orId = CollIndex(original_),
#pragma warn(1506) // warning elimination
#pragma nowarn(1506) // warning elimination
reId = CollIndex(rewritten_);
#pragma warn(1506) // warning elimination
fprintf(ofd,"<%d, %d>",orId,reId);
}
void VEGRewritePairs::print( FILE* ofd,
const char* indent,
const char* title) const
{
#pragma nowarn(1506) // warning elimination
BUMP_INDENT(indent);
#pragma warn(1506) // warning elimination
fprintf(ofd,"%s %s\n%s",NEW_INDENT,title,NEW_INDENT);
CollIndex *key;
VEGRewritePair *value;
NAHashDictionaryIterator<CollIndex, VEGRewritePair> iter(vegRewritePairs_);
for (CollIndex i=0; i < iter.entries(); i++)
{
iter.getNext(key, value);
value->print(ofd);
}
}
// PhysTranspose::preCodeGen() -------------------------------------------
// Perform local query rewrites such as for the creation and
// population of intermediate tables, for accessing partitioned
// data. Rewrite the value expressions after minimizing the dataflow
// using the transitive closure of equality predicates.
//
// PhysTranspose::preCodeGen() - is basically the same as the RelExpr::
// preCodeGen() except that here we replace the VEG references in the
// transUnionVals() as well as the selectionPred().
//
// Parameters:
//
// Generator *generator
// IN/OUT : A pointer to the generator object which contains the state,
// and tools (e.g. expression generator) to generate code for
// this node.
//
// ValueIdSet &externalInputs
// IN : The set of external Inputs available to this node.
//
//
RelExpr * PhysTranspose::preCodeGen(Generator * generator,
const ValueIdSet &externalInputs,
ValueIdSet &pulledNewInputs)
{
if (nodeIsPreCodeGenned())
return this;
// Check if the pivs of this operator and it's child are the same.
// If they are not, make them the same.
replacePivs();
// Resolve the VEGReferences and VEGPredicates, if any, that appear
// in the Characteristic Inputs, in terms of the externalInputs.
//
getGroupAttr()->resolveCharacteristicInputs(externalInputs);
// My Characteristic Inputs become the external inputs for my children.
//
Int32 nc = getArity();
for (Int32 index = 0; index < nc; index++)
{
ValueIdSet childPulledInputs;
child(index) = child(index)->preCodeGen(generator,
externalInputs,
pulledNewInputs);
if (! child(index).getPtr())
return NULL;
// process additional input value ids the child wants
getGroupAttr()->addCharacteristicInputs(childPulledInputs);
pulledNewInputs += childPulledInputs;
}
// The VEG expressions in the selection predicates and the characteristic
// outputs can reference any expression that is either a potential output
// or a characteristic input for this RelExpr. Supply these values for
// rewriting the VEG expressions.
//
ValueIdSet availableValues;
getInputValuesFromParentAndChildren(availableValues);
// The transUnionVals have access to only the Input Values.
// These can come from the parent of be the outputs of the child.
//
for(CollIndex v = 0; v < transUnionVectorSize(); v++) {
ValueIdList valIdList = transUnionVector()[v];
valIdList.replaceVEGExpressions
(availableValues,
getGroupAttr()->getCharacteristicInputs());
}
// The selectionPred has access to the output values generated by transpose.
// as well as any input values from the parent or child.
//
getInputAndPotentialOutputValues(availableValues);
// Rewrite the selection predicates.
//
NABoolean replicatePredicates = TRUE;
selectionPred().replaceVEGExpressions
(availableValues,
getGroupAttr()->getCharacteristicInputs(),
FALSE, // no key predicates here
0 /* no need for idempotence here */,
replicatePredicates
);
// Replace VEG references in the outputs and remove redundant
// outputs.
//
getGroupAttr()->resolveCharacteristicOutputs
(availableValues,
getGroupAttr()->getCharacteristicInputs());
generator->oltOptInfo()->setMultipleRowsReturned(TRUE);
markAsPreCodeGenned();
return this;
} // PhysTranspose::preCodeGen
// -----------------------------------------------------------------------
// PhyPack::preCodeGen() is basically the same as RelExpr::preCodeGen().
// It replaces the VEG's in its packingExpr_ as well as selectionPred_.
// -----------------------------------------------------------------------
RelExpr* PhyPack::preCodeGen(Generator* generator,
const ValueIdSet& externalInputs,
ValueIdSet &pulledNewInputs)
{
if (nodeIsPreCodeGenned())
return this;
// Check if the pivs of this operator and it's child are the same.
// If they are not, make them the same.
replacePivs();
// Resolve the VEGReferences and VEGPredicates, if any, that appear
// in the Characteristic Inputs, in terms of the externalInputs.
//
getGroupAttr()->resolveCharacteristicInputs(externalInputs);
// My Characteristic Inputs become the external inputs for my children.
//
Int32 nc = getArity();
for(Int32 index = 0; index < nc; index++)
{
ValueIdSet childPulledInputs;
child(index) = child(index)->preCodeGen(generator,
externalInputs,
pulledNewInputs);
if(! child(index).getPtr()) return NULL;
// process additional input value ids the child wants
getGroupAttr()->addCharacteristicInputs(childPulledInputs);
pulledNewInputs += childPulledInputs;
}
if (getFirstNRows() != -1)
{
RelExpr * firstn = new(generator->wHeap()) FirstN(child(0),
getFirstNRows());
// move my child's attributes to the firstN node.
// Estimated rows will be mine.
firstn->setEstRowsUsed(getEstRowsUsed());
firstn->setMaxCardEst(getMaxCardEst());
firstn->setInputCardinality(child(0)->getInputCardinality());
firstn->setPhysicalProperty(child(0)->getPhysicalProperty());
firstn->setGroupAttr(child(0)->getGroupAttr());
//10-060516-6532 -Begin
//When FIRSTN node is created after optimization phase, the cost
//of that node does not matter.But, display_explain and explain
//show zero operator costs and rollup cost which confuses the user.
//Also, the VQP crashes when cost tab for FIRSTN node is selected.
//So, creating a cost object will fix this.
//The operator cost is zero and rollup cost is same as it childs.
Cost* firstnNodecost = new HEAP Cost();
firstn->setOperatorCost(firstnNodecost);
Cost* rollupcost = (Cost *)(child(0)->getRollUpCost());
*rollupcost += *firstnNodecost;
firstn->setRollUpCost(rollupcost);
//10-060516-6532 -End
firstn =
firstn->preCodeGen(generator,
getGroupAttr()->getCharacteristicInputs(),
pulledNewInputs);
if (! firstn)
return NULL;
setChild(0, firstn);
}
// The VEG expressions in the selection predicates and the characteristic
// outputs can reference any expression that is either a potential output
// or a characteristic input for this RelExpr. Supply these values for
// rewriting the VEG expressions.
//
ValueIdSet availableValues;
getInputValuesFromParentAndChildren(availableValues);
const ValueIdSet& inputValues = getGroupAttr()->getCharacteristicInputs();
// Replace VEG's in both the packing expression and the packing factor.
//
packingFactor().replaceVEGExpressions(availableValues,inputValues);
packingExpr().replaceVEGExpressions(availableValues,inputValues);
// The selectionPred has access to the output values generated by Pack.
//
getInputAndPotentialOutputValues(availableValues);
// Rewrite the selection predicates.
//
NABoolean replicatePredicates = TRUE;
selectionPred().replaceVEGExpressions(availableValues,
inputValues,
FALSE, // no key predicates here
0 /* no need for idempotence here */,
replicatePredicates
);
// Replace VEG references in the outputs and remove redundant outputs.
//
getGroupAttr()->resolveCharacteristicOutputs(availableValues,inputValues);
markAsPreCodeGenned();
return this;
} // PhyPack::preCodeGen()
//
//PrecodeGen method for class PhysicalTuple list
//This was put in as a fix for cr 10-010327-1947.
//Before the fix the RelExpr was getting to the generator
//with a VEGRef still in it, because the VEGRef from the
//tupleExpr had not be removed and resolved correctly.
RelExpr * PhysicalTuple::preCodeGen(Generator * generator,
const ValueIdSet& externalInputs,
ValueIdSet& pulledNewInputs_)
{
ValueIdSet availableValues = externalInputs;
tupleExpr().replaceVEGExpressions
(availableValues, externalInputs);
return (RelExpr::preCodeGen(generator, availableValues, pulledNewInputs_));
} // PhysicalTuple::preCodeGen()
//
RelExpr * PhysicalTupleList::preCodeGen(Generator * generator,
const ValueIdSet& externalInputs,
ValueIdSet& pulledNewInputs_)
{
ValueIdSet availableValues = externalInputs;
tupleExpr().replaceVEGExpressions
(availableValues, externalInputs);
generator->oltOptInfo()->setMultipleRowsReturned(TRUE);
return (RelExpr::preCodeGen(generator, availableValues, pulledNewInputs_));
} // PhysicalTupleList::preCodeGen()
RelExpr * CompoundStmt::preCodeGen(Generator * generator,
const ValueIdSet & externalInputs,
ValueIdSet &pulledNewInputs)
{
if (nodeIsPreCodeGenned())
return this;
// Check if the pivs of this operator and it's child are the same.
// If they are not, make them the same.
replacePivs();
ValueIdSet availableValues;
ValueIdSet childPulledInputs;
// Resolve the VEGReferences and VEGPredicates, if any, that appear
// in the Characteristic Inputs, in terms of the externalInputs.
getGroupAttr()->resolveCharacteristicInputs(externalInputs);
availableValues = getGroupAttr()->getCharacteristicInputs();
// This is similar to what is done in Join::precodeGen when we have a TSJ.
// A compound statement node behaves in a similar way to a TSJ node since
// it flows values from left to right.
// My Characteristic Inputs become the external inputs for my left child.
child(0) = child(0)->preCodeGen(generator,availableValues,childPulledInputs);
if (! child(0).getPtr())
return NULL;
// process additional input value ids the child wants
// (see RelExpr::preCodeGen())
getGroupAttr()->addCharacteristicInputs(childPulledInputs);
pulledNewInputs += childPulledInputs;
availableValues += childPulledInputs;
childPulledInputs.clear();
// The values produced as output by my left child can be used as
// "external" inputs by my right child.
availableValues += child(0)->getGroupAttr()->getCharacteristicOutputs();
// Process the right child
child(1) = child(1)->preCodeGen(generator,availableValues,childPulledInputs);
if (! child(1).getPtr())
return NULL;
// process additional input value ids the child wants
// (see RelExpr::preCodeGen())
getGroupAttr()->addCharacteristicInputs(childPulledInputs);
pulledNewInputs += childPulledInputs;
// Accumulate the values that are provided as inputs by my parent
// together with the values that are produced as outputs by my
// children. Use these values for rewriting the VEG expressions.
getInputValuesFromParentAndChildren(availableValues);
// The VEG expressions in the selection predicates and the characteristic
// outputs can reference any expression that is either a potential output
// or a characteristic input for this RelExpr. Supply these values for
// rewriting the VEG expressions.
getInputAndPotentialOutputValues(availableValues);
// Rewrite the selection predicates.
NABoolean replicatePredicates = TRUE;
selectionPred().replaceVEGExpressions
(availableValues,
getGroupAttr()->getCharacteristicInputs(),
FALSE, // no need to generate key predicates here
0 /* no need for idempotence here */,
replicatePredicates
);
getGroupAttr()->resolveCharacteristicOutputs
(availableValues,
getGroupAttr()->getCharacteristicInputs());
// Xn will be aborted if there is any IUD stmt within this CS and
// an error occurs at runtime.
if (generator->foundAnUpdate())
{
//generator->setUpdAbortOnError(TRUE);
generator->setUpdSavepointOnError(FALSE);
generator->setUpdErrorOnError(FALSE);
//generator->setUpdPartialOnError(FALSE);
}
generator->setAqrEnabled(FALSE);
markAsPreCodeGenned();
return this;
} // CompoundStmt::preCodeGen
RelExpr * FirstN::preCodeGen(Generator * generator,
const ValueIdSet & externalInputs,
ValueIdSet &pulledNewInputs)
{
if (nodeIsPreCodeGenned())
return this;
if (! RelExpr::preCodeGen(generator,externalInputs,pulledNewInputs))
return NULL;
markAsPreCodeGenned();
return this;
} // FirstN::preCodeGen
RelExpr * RelRoutine::preCodeGen (Generator * generator,
const ValueIdSet &externalInputs,
ValueIdSet &pulledNewInputs)
{
if (nodeIsPreCodeGenned())
return this;
if (!RelExpr::preCodeGen(generator,externalInputs,pulledNewInputs))
return NULL;
// The VEG expressions in the selection predicates and the characteristic
// outputs can reference any expression that is either a potential output
// or a characteristic input for this RelExpr. Supply these values for
// rewriting the VEG expressions.
//
ValueIdSet availableValues;
availableValues = getGroupAttr()->getCharacteristicInputs();
const ValueIdSet &inputValues = getGroupAttr()->getCharacteristicInputs();
getProcInputParamsVids().replaceVEGExpressions(availableValues, inputValues);
generator->setAqrEnabled(FALSE);
markAsPreCodeGenned();
return this;
}
RelExpr * IsolatedNonTableUDR::preCodeGen (Generator * generator,
const ValueIdSet &externalInputs,
ValueIdSet &pulledNewInputs)
{
if (nodeIsPreCodeGenned())
return this;
if (!RelRoutine::preCodeGen(generator,externalInputs,pulledNewInputs))
return NULL;
// The VEG expressions in the selection predicates and the characteristic
// outputs can reference any expression that is either a potential output
// or a characteristic input for this RelExpr. Supply these values for
// rewriting the VEG expressions.
//
ValueIdSet availableValues;
availableValues = getGroupAttr()->getCharacteristicInputs();
const ValueIdSet &inputValues = getGroupAttr()->getCharacteristicInputs();
getNeededValueIds().replaceVEGExpressions(availableValues, inputValues);
markAsPreCodeGenned();
return this;
}
RelExpr * PhysicalTableMappingUDF::preCodeGen(Generator * generator,
const ValueIdSet &externalInputs,
ValueIdSet &pulledNewInputs)
{
if (nodeIsPreCodeGenned())
return this;
if (!RelRoutine::preCodeGen(generator,externalInputs,pulledNewInputs))
return NULL;
ValueIdSet availableValues;
getInputValuesFromParentAndChildren(availableValues);
for(Int32 i = 0; i < getArity(); i++)
{
ValueIdList &childOutputs(getChildInfo(i)->getOutputIds());
ValueIdList origChildOutputs(childOutputs);
childOutputs.replaceVEGExpressions(
availableValues,
getGroupAttr()->getCharacteristicInputs());
for (CollIndex j=0; j<childOutputs.entries(); j++)
if (NOT(childOutputs[j].getType() == origChildOutputs[j].getType()))
{
// VEG rewrite changed the type.
// Since we recorded the original type of the input
// column and exposed this type to the UDF writer, don't
// change the type now. Instead, add a cast back to the
// original type.
ItemExpr *castToOrigType = new(CmpCommon::statementHeap())
Cast(childOutputs[j].getItemExpr(),
origChildOutputs[j].getType().newCopy());
castToOrigType->synthTypeAndValueId();
childOutputs[j] = castToOrigType->getValueId();
}
}
planInfo_ = getPhysicalProperty()->getUDRPlanInfo();
if (!getDllInteraction()->finalizePlan(this, planInfo_))
return NULL;
markAsPreCodeGenned();
return this;
}
RelExpr * PhysicalFastExtract::preCodeGen (Generator * generator,
const ValueIdSet &externalInputs,
ValueIdSet &pulledNewInputs)
{
if (nodeIsPreCodeGenned())
return this;
if (getIsMainQueryOperator())
generator->setIsFastExtract(TRUE);
else
generator->setContainsFastExtract(TRUE);
if (!RelExpr::preCodeGen(generator,externalInputs,pulledNewInputs))
return NULL;
ValueIdSet availableValues;
getInputValuesFromParentAndChildren(availableValues);
getSelectListIds().replaceVEGExpressions(availableValues, externalInputs);
if (isAppend())
generator->setAqrEnabled(FALSE);
// This relation is a linear fit to cpu consumption data observed during a
// performance run, while extracting data from the LINEITEM table. CPU Usage
// can go from 0% to 50% according to this relation. CPU Usage is determined
// by 2 factors (a) bytes of data extracted and (b) % non-character
// (termed numeric below) columns in each row (computed based on total max
// row size and tol non-char column size). Both factors have equal weigth,
// i.e. they can contribute at most 25% towards Cpu usage. For upto 50 GB
// extracted data the bytes of extracted data increases linearly from 0% to
// 25%. After 50 GB (total volume across all partitions), the contribution to
// cpu usage from bytes extracted does not increase. Similarly the a table
// all non-char columns can contribute upto 25% towards cpu usage. The numeric
// factor is also weighted by the volume of data extracted.
const Int32 plateauTabSizeInGB = 50;
const float weightOfBaseExtract = 0.5;
const float weightOfNumericExpressionEval = 0.5;
const Int32 maxPossibleCpuUsage = 50 ; // in percentage units
Int32 rowLength = child(0).getGroupAttr()->getCharacteristicOutputs().getRowLength();
Int32 numericRowLength = child(0).getGroupAttr()->
getCharacteristicOutputs().getRowLengthOfNumericCols();
float numericRowLengthRatio = ((float) numericRowLength)/rowLength ;
double bytesExtractedInGB = (getEstRowsUsed().value()*rowLength)/(1024*1024*1024);
double bytesExtractedRatio = bytesExtractedInGB/plateauTabSizeInGB ;
if (bytesExtractedRatio > 1)
bytesExtractedRatio = 1;
Int32 maxCpuUsage = (Int32) (maxPossibleCpuUsage*bytesExtractedRatio*(weightOfBaseExtract +
(weightOfNumericExpressionEval*numericRowLengthRatio)));
generator->setMaxCpuUsage(maxCpuUsage);
markAsPreCodeGenned();
return this;
}
RelExpr * RelLock::preCodeGen (Generator * generator,
const ValueIdSet &externalInputs,
ValueIdSet &pulledNewInputs)
{
if (nodeIsPreCodeGenned())
return this;
// Since the newExch node is added as the parent
// to SequenceGenerator node, this method gets
// called again during the preCodeGen of t
// newExch.
if(parallelExecution_)
{
// Add an exchange node here so this could be executed in ESP.
RelExpr * exchange = new(generator->wHeap()) Exchange (this);
exchange->setPhysicalProperty(this->getPhysicalProperty());
exchange->setGroupAttr(this->getGroupAttr());
markAsPreCodeGenned();
exchange = exchange->preCodeGen(generator, externalInputs, pulledNewInputs);
// Done.
return exchange;
/*
RelExpr *newExch =
generator->insertEspExchange(this, getPhysicalProperty());
((Exchange *)newExch)->makeAnESPAccess();
markAsPreCodeGenned();
RelExpr * exch =
newExch->preCodeGen(generator, externalInputs, pulledNewInputs);
return exch;
*/
}
if (!RelExpr::preCodeGen(generator,externalInputs,pulledNewInputs))
return NULL;
markAsPreCodeGenned();
return this;
}
RelExpr * StatisticsFunc::preCodeGen(Generator * generator,
const ValueIdSet & externalInputs,
ValueIdSet &pulledNewInputs)
{
if (nodeIsPreCodeGenned())
return this;
if (! RelExpr::preCodeGen(generator,externalInputs,pulledNewInputs))
return NULL;
// don't collect stats for stats func itself
generator->setComputeStats(FALSE);
markAsPreCodeGenned();
// Done.
return this;
}
RelExpr * ExeUtilGetStatistics::preCodeGen(Generator * generator,
const ValueIdSet & externalInputs,
ValueIdSet &pulledNewInputs)
{
if (nodeIsPreCodeGenned())
return this;
if (! ExeUtilExpr::preCodeGen(generator,externalInputs,pulledNewInputs))
return NULL;
// don't collect stats for stats func itself
generator->setComputeStats(FALSE);
markAsPreCodeGenned();
// Done.
return this;
}
RelExpr * ExeUtilWnrInsert::preCodeGen(Generator * generator,
const ValueIdSet & externalInputs,
ValueIdSet &pulledNewInputs)
{
if (nodeIsPreCodeGenned())
return this;
if (! ExeUtilExpr::preCodeGen(generator,externalInputs,pulledNewInputs))
return NULL;
markAsPreCodeGenned();
return this;
}
ItemExpr * PositionFunc::preCodeGen(Generator * generator)
{
if (nodeIsPreCodeGenned())
return this;
if (! BuiltinFunction::preCodeGen(generator))
return NULL;
const NAType &type1 =
child(0)->castToItemExpr()->getValueId().getType();
const NAType &type2 =
child(1)->castToItemExpr()->getValueId().getType();
CMPASSERT(
(type1.getTypeQualifier() == NA_CHARACTER_TYPE) &&
(type2.getTypeQualifier() == NA_CHARACTER_TYPE))
const CharType &cType1 = (CharType&)type1;
const CharType &cType2 = (CharType&)type2;
CharInfo::Collation coll1 = cType1.getCollation();
CharInfo::Collation coll2 = cType2.getCollation();
CMPASSERT(coll1==coll2);
setCollation(coll1);
if (CollationInfo::isSystemCollation(coll1))
{
{
ItemExpr * newEncode =
new(generator->wHeap())
CompEncode(child(0),FALSE, -1, CollationInfo::Search);
newEncode = newEncode->bindNode(generator->getBindWA());
newEncode = newEncode->preCodeGen(generator);
if (!newEncode)
return NULL;
setChild(0, newEncode);
newEncode =
new(generator->wHeap())
CompEncode(child(1), FALSE, -1,CollationInfo::Search);
newEncode->bindNode(generator->getBindWA());
newEncode = newEncode->preCodeGen(generator);
if (!newEncode)
return NULL;
setChild(1, newEncode);
}
}
markAsPreCodeGenned();
return this;
} // PositionFunc::preCodeGen()
ItemExpr * Trim::preCodeGen(Generator * generator)
{
if (nodeIsPreCodeGenned())
return this;
if (! BuiltinFunction::preCodeGen(generator))
return NULL;
const NAType &type1 =
child(0)->castToItemExpr()->getValueId().getType();
const NAType &type2 =
child(1)->castToItemExpr()->getValueId().getType();
CMPASSERT(
(type1.getTypeQualifier() == NA_CHARACTER_TYPE) &&
(type2.getTypeQualifier() == NA_CHARACTER_TYPE))
const CharType &cType1 = (CharType&)type1;
const CharType &cType2 = (CharType&)type2;
CharInfo::Collation coll1 = cType1.getCollation();
CharInfo::Collation coll2 = cType2.getCollation();
CMPASSERT(coll1==coll2);
setCollation(coll1);
markAsPreCodeGenned();
return this;
} // Trim::preCodeGen()
ItemExpr * NotIn::preCodeGen(Generator * generator)
{
if (child(0)->getOperatorType() == ITM_ITEM_LIST)
{//Multicolumn NotIn should not reach this far
GenAssert(FALSE,"Multicolumn NotIn should not have reached this far");
return NULL;
}
if (nodeIsPreCodeGenned())
{
return getReplacementExpr();
}
// if single column NOT IN reaches pre-code generation, then replace it with
// non equi-predicate form (NE)
// An example of cases where NotIn reaches this far is a aquery like
// select * from ta where (select sum(a2) from ta) not in (select b2 from tb);
// where the NotIn predicate gets pushed down and is not caught at optimization
// time
ValueId vid = createEquivNonEquiPredicate();
ItemExpr * newPred = vid.getItemExpr();
setReplacementExpr(newPred->preCodeGen(generator));
markAsPreCodeGenned();
return getReplacementExpr();
} // NotIn::preCodeGen()
short HbaseAccess::processSQHbaseKeyPreds(Generator * generator,
NAList<HbaseSearchKey*>& searchKeys,
ListOfUniqueRows &listOfUniqueRows,
ListOfRangeRows &listOfRangeRows)
{
Int32 ct = 0;
HbaseUniqueRows getSpec;
getSpec.rowTS_ = -1;
for (CollIndex i = 0; i<searchKeys.entries(); i++ )
{
HbaseSearchKey* searchKey = searchKeys[i];
ValueIdSet newSelectionPreds;
if ( searchKey->isUnique() )
{
// Since we fill one rowId per entry, we will be using getRow() form of Get.
if ( (ct=searchKey->getCoveredLeadingKeys()) > 0 )
{
NAString result;
ValueIdList keyValues = searchKey->getBeginKeyValues();
keyValues.convertToTextKey(searchKey->getKeyColumns(), result);
getSpec.rowIds_.insert(result);
}
// getSpec.addColumnNames(searchKey->getRequiredOutputColumns());
}
else
{
// Multiple rows. Do Scan
HbaseRangeRows scanSpec;
scanSpec.beginKeyExclusive_ = FALSE;
scanSpec.endKeyExclusive_ = FALSE;
scanSpec.rowTS_ = -1;
if ( !searchKey->areAllBeginKeysMissing() )
{
if ( (ct=searchKey->getCoveredLeadingKeys()) > 0 )
{
ValueIdList beginKeyValues = searchKey->getBeginKeyValues();
beginKeyValues.convertToTextKey(searchKey->getKeyColumns(), scanSpec.beginRowId_);
scanSpec.beginKeyExclusive_ = searchKey->isBeginKeyExclusive();
}
}
if ( !searchKey->areAllEndKeysMissing() )
{
if ( (ct=searchKey->getCoveredLeadingKeys()) )
{
ValueIdList endKeyValues = searchKey->getEndKeyValues();
endKeyValues.convertToTextKey(searchKey->getKeyColumns(), scanSpec.endRowId_);
scanSpec.endKeyExclusive_ = searchKey->isEndKeyExclusive();
}
}
// scanSpec.addColumnNames(searchKey->getRequiredOutputColumns());
listOfRangeRows.insertAt(listOfRangeRows.entries(), scanSpec);
}
} // for
if (getSpec.rowIds_.entries() > 0)
listOfUniqueRows.insert(getSpec);
return 0;
}
short HbaseAccess::processNonSQHbaseKeyPreds(Generator * generator,
ValueIdSet &preds,
ListOfUniqueRows &listOfUniqueRows,
ListOfRangeRows &listOfRangeRows)
{
ValueId vid;
ValueId eqRowIdValVid;
ValueId eqColNameValVid;
ItemExpr * ie = NULL;
NABoolean rowIdFound = FALSE;
NABoolean colNameFound = FALSE;
NABoolean isConstParam = FALSE;
ValueIdList newPredList;
NABoolean addToNewPredList;
HbaseUniqueRows hg;
HbaseRangeRows hs;
for (vid = preds.init();
(preds.next(vid));
preds.advance(vid))
{
ie = vid.getItemExpr();
addToNewPredList = TRUE;
ConstValue * constVal = NULL;
if ((NOT rowIdFound) && (isEqGetExpr(ie, eqRowIdValVid, isConstParam,
"ROW_ID")))
{
rowIdFound = TRUE;
if (isConstParam)
{
ConstantParameter*cp = (ConstantParameter*)eqRowIdValVid.getItemExpr();
constVal = cp->getConstVal();
}
else
constVal = (ConstValue*)eqRowIdValVid.getItemExpr();
NAString rid = *constVal->getRawText();
hg.rowIds_.insert(rid);
hg.rowTS_ = -1;
addToNewPredList = FALSE;
}
if (isEqGetExpr(ie, eqColNameValVid, isConstParam, "COL_NAME"))
{
colNameFound = TRUE;
if (isConstParam)
{
ConstantParameter*cp = (ConstantParameter*)eqColNameValVid.getItemExpr();
constVal = cp->getConstVal();
}
else
constVal = (ConstValue*)eqColNameValVid.getItemExpr();
NAString col = *constVal->getRawText();
hg.colNames_.insert(col);
hs.colNames_.insert(col);
addToNewPredList = FALSE;
}
if (addToNewPredList)
newPredList.insert(vid);
} // for
if ((rowIdFound) || (colNameFound))
{
preds.clear();
preds.insertList(newPredList);
}
if (rowIdFound)
{
listOfUniqueRows.insert(hg);
}
else
{
hs.rowTS_ = -1;
listOfRangeRows.insert(hs);
}
// markAsPreCodeGenned();
// Done.
return 0;
}
////////////////////////////////////////////////////////////////////////////
// To push down, the predicate must have the following form:
// <column> <op> <value-expr>
//
// and all of the following conditions must be met:
//
// <column>: a base table or index column which can be serialized.
// serialized: either the column doesn't need encoding, like
// an unsigned integer, or the column
// was declared with the SERIALIZED option.
// <op>: eq, ne, gt, ge, lt, le
// <value-expr>: an expression that only contains const or param values, and
// <value-expr>'s datatype is not a superset of <column>'s datatype.
//
/////////////////////////////////////////////////////////////////////////////
NABoolean HbaseAccess::isHbaseFilterPred(Generator * generator, ItemExpr * ie,
ValueId &colVID, ValueId &valueVID,
NAString &op,
NABoolean &removeFromOrigList)
{
NABoolean found = FALSE;
removeFromOrigList = FALSE;
NABoolean hbaseLookupPred = FALSE;
NABoolean flipOp = FALSE; // set to TRUE when column is child(1)
if (ie &&
((ie->getOperatorType() >= ITM_EQUAL) &&
(ie->getOperatorType() <= ITM_GREATER_EQ)))
{
ItemExpr * child0 = ie->child(0)->castToItemExpr();
ItemExpr * child1 = ie->child(1)->castToItemExpr();
if ((ie->child(0)->getOperatorType() == ITM_BASECOLUMN) &&
(NOT hasColReference(ie->child(1))))
{
found = TRUE;
colVID = ie->child(0)->getValueId();
valueVID = ie->child(1)->getValueId();
}
else if ((ie->child(1)->getOperatorType() == ITM_BASECOLUMN) &&
(NOT hasColReference(ie->child(0))))
{
found = TRUE;
flipOp = TRUE;
colVID = ie->child(1)->getValueId();
valueVID = ie->child(0)->getValueId();
}
else if ((ie->child(0)->getOperatorType() == ITM_INDEXCOLUMN) &&
(NOT hasColReference(ie->child(1))))
{
found = TRUE;
colVID = ie->child(0)->getValueId();
valueVID = ie->child(1)->getValueId();
}
else if ((ie->child(1)->getOperatorType() == ITM_INDEXCOLUMN) &&
(NOT hasColReference(ie->child(0))))
{
found = TRUE;
flipOp = TRUE;
colVID = ie->child(1)->getValueId();
valueVID = ie->child(0)->getValueId();
}
else if ((ie->child(0)->getOperatorType() == ITM_REFERENCE) &&
(NOT hasColReference(ie->child(1))))
{
found = TRUE;
colVID = ie->child(0)->getValueId();
valueVID = ie->child(1)->getValueId();
}
else if ((ie->child(1)->getOperatorType() == ITM_REFERENCE) &&
(NOT hasColReference(ie->child(0))))
{
found = TRUE;
flipOp = TRUE;
colVID = ie->child(1)->getValueId();
valueVID = ie->child(0)->getValueId();
}
else if ((ie->child(0)->getOperatorType() == ITM_HBASE_COLUMN_LOOKUP) &&
(NOT hasColReference(ie->child(1))))
{
HbaseColumnLookup * hcl = (HbaseColumnLookup*)ie->child(0)->castToItemExpr();
if (hcl->getValueId().getType().getTypeQualifier() == NA_CHARACTER_TYPE)
{
hbaseLookupPred = TRUE;
ItemExpr * newCV = new(generator->wHeap()) ConstValue(hcl->hbaseCol());
newCV = newCV->bindNode(generator->getBindWA());
newCV = newCV->preCodeGen(generator);
found = TRUE;
colVID = newCV->getValueId();
valueVID = ie->child(1)->getValueId();
}
}
else if ((ie->child(1)->getOperatorType() == ITM_HBASE_COLUMN_LOOKUP) &&
(NOT hasColReference(ie->child(0))))
{
HbaseColumnLookup * hcl = (HbaseColumnLookup*)ie->child(1)->castToItemExpr();
if (hcl->getValueId().getType().getTypeQualifier() == NA_CHARACTER_TYPE)
{
hbaseLookupPred = TRUE;
ItemExpr * newCV = new(generator->wHeap()) ConstValue(hcl->hbaseCol());
newCV = newCV->bindNode(generator->getBindWA());
newCV = newCV->preCodeGen(generator);
found = TRUE;
flipOp = TRUE;
colVID = newCV->getValueId();
valueVID = ie->child(0)->getValueId();
}
}
}
if (found)
{
const NAType &colType = colVID.getType();
const NAType &valueType = valueVID.getType();
NABoolean generateNarrow = FALSE;
if (NOT hbaseLookupPred)
{
generateNarrow = valueType.errorsCanOccur(colType);
if ((generateNarrow) || // value not a superset of column
(NOT columnEnabledForSerialization(colVID.getItemExpr())))
found = FALSE;
}
if (found)
{
if (colType.getTypeQualifier() == NA_CHARACTER_TYPE)
{
const CharType &charColType = (CharType&)colType;
const CharType &charValType = (CharType&)valueType;
if ((charColType.isCaseinsensitive() || charValType.isCaseinsensitive()) ||
(charColType.isUpshifted() || charValType.isUpshifted()))
found = FALSE;
}
else if (colType.getTypeQualifier() == NA_NUMERIC_TYPE)
{
const NumericType &numType = (NumericType&)colType;
const NumericType &valType = (NumericType&)valueType;
if (numType.isBigNum() || valType.isBigNum())
found = FALSE;
}
}
if (found)
{
if ((ie) && (((BiRelat*)ie)->addedForLikePred()) &&
(valueVID.getItemExpr()->getOperatorType() == ITM_CONSTANT))
{
// remove trailing '\0' characters since this is being pushed down to hbase.
ConstValue * cv = (ConstValue*)(valueVID.getItemExpr());
char * cvv = (char*)cv->getConstValue();
Lng32 len = cv->getStorageSize() - 1;
while ((len > 0) && (cvv[len] == '\0'))
len--;
NAString newCVV(cvv, len+1);
ItemExpr * newCV = new(generator->wHeap()) ConstValue(newCVV);
newCV = newCV->bindNode(generator->getBindWA());
newCV = newCV->preCodeGen(generator);
valueVID = newCV->getValueId();
}
ItemExpr * castValue = NULL;
if (NOT hbaseLookupPred)
castValue = new(generator->wHeap()) Cast(valueVID.getItemExpr(), &colType);
else
{
castValue = new(generator->wHeap()) Cast(valueVID.getItemExpr(), &valueVID.getType());
}
if ((NOT hbaseLookupPred) &&
(isEncodingNeededForSerialization(colVID.getItemExpr())))
{
castValue = new(generator->wHeap()) CompEncode
(castValue, FALSE, -1, CollationInfo::Sort, TRUE, FALSE);
}
castValue = castValue->bindNode(generator->getBindWA());
castValue = castValue->preCodeGen(generator);
valueVID = castValue->getValueId();
// hbase pred evaluation compares the column byte string with the
// value byte string. It doesn't have a notion of nullability.
// For a nullable value stored in database, the first byte represents
// if the value is a null value.
// During pred evaluation in hbase, a null value could either get filtered
// out due to byte string comparison, or it may get returned back.
// For ex, <col> <gt> <value>
// will return TRUE if the first byte of <col> is a null value.
// Similary, <col> <lt> <value>
// will return FALSE if the first byte of <col> is a null value.
// If the a null value gets filtered out, then that is correct semantics.
// But if the null value gets returned to executor, then it still need to be
// filtered out. To do that, the predicate need to be evaluated in executor
// with proper null semantics.
//
// Long story short, do not remove the original pred if the col or value is
// nullable.
//
if ((colType.supportsSQLnull()) ||
(valueType.supportsSQLnull()))
{
removeFromOrigList = FALSE;
}
else
{
removeFromOrigList = TRUE;
}
if (ie->getOperatorType() == ITM_EQUAL)
op = "EQUAL";
else if (ie->getOperatorType() == ITM_NOT_EQUAL)
op = "NOT_EQUAL";
else if (ie->getOperatorType() == ITM_LESS)
{
if (flipOp)
op = "GREATER";
else
op = "LESS";
}
else if (ie->getOperatorType() == ITM_LESS_EQ)
{
if (flipOp)
op = "GREATER_OR_EQUAL";
else
op = "LESS_OR_EQUAL";
}
else if (ie->getOperatorType() == ITM_GREATER)
{
if (flipOp)
op = "LESS";
else
op = "GREATER";
}
else if (ie->getOperatorType() == ITM_GREATER_EQ)
{
if (flipOp)
op = "LESS_OR_EQUAL";
else
op = "GREATER_OR_EQUAL";
}
else
op = "NO_OP";
}
}
return found;
}
short HbaseAccess::extractHbaseFilterPreds(Generator * generator,
ValueIdSet &preds, ValueIdSet &newExePreds)
{
if (CmpCommon::getDefault(HBASE_FILTER_PREDS) == DF_OFF)
return 0;
// cannot push preds for aligned format row
NABoolean isAlignedFormat = getTableDesc()->getNATable()->isAlignedFormat(getIndexDesc());
if (isAlignedFormat)
return 0;
for (ValueId vid = preds.init();
(preds.next(vid));
preds.advance(vid))
{
ItemExpr * ie = vid.getItemExpr();
ValueId colVID;
ValueId valueVID;
NABoolean removeFromOrigList = FALSE;
NAString op;
NABoolean isHFP =
isHbaseFilterPred(generator, ie, colVID, valueVID, op, removeFromOrigList);
if (isHFP)
{
hbaseFilterColVIDlist_.insert(colVID);
hbaseFilterValueVIDlist_.insert(valueVID);
opList_.insert(op);
if (NOT removeFromOrigList)
newExePreds.insert(vid);
}
else
{
newExePreds.insert(vid);
}
} // end for
return 0;
}
////////////////////////////////////////////////////////////////////////////
// To push down, the predicate must have the following form:
// xp:= <column> <op> <value-expr>
// xp:= <column> is not null (no support for hbase lookup)
// xp:= <column> is null (no support for hbase lookup)
// (xp:=<column> like <value-expr> not yet implemented)
// xp:=<xp> OR <xp> (not evaluated in isHbaseFilterPredV2, but by extractHbaseFilterPredV2)
// xp:=<xp> AND <xp>(not evaluated in isHbaseFilterPredV2, but by extractHbaseFilterPredV2)
//
// and all of the following conditions must be met:
//
// <column>: a base table or index column which can be serialized and belong to the table being scanned.
// serialized: either the column doesn't need encoding, like
// an unsigned integer, or the column
// was declared with the SERIALIZED option.
// it also must not be an added column with default non null.
// <op>: eq, ne, gt, ge, lt, le
// <value-expr>: an expression that only contains const or param values, and
// <value-expr>'s datatype is not a superset of <column>'s datatype.
//
// colVID, valueID and op are output parameters.
/////////////////////////////////////////////////////////////////////////////
NABoolean HbaseAccess::isHbaseFilterPredV2(Generator * generator, ItemExpr * ie,
ValueId &colVID, ValueId &valueVID,
NAString &op)
{
NABoolean foundBinary = FALSE;
NABoolean foundUnary = FALSE;
NABoolean hbaseLookupPred = FALSE;
NABoolean flipOp = FALSE; // set to TRUE when column is child(1)
if (ie &&
((ie->getOperatorType() >= ITM_EQUAL) &&
(ie->getOperatorType() <= ITM_GREATER_EQ))) //binary operator case
{//begin expression
ItemExpr * child0 = ie->child(0)->castToItemExpr();
ItemExpr * child1 = ie->child(1)->castToItemExpr();
if ((ie->child(0)->getOperatorType() == ITM_BASECOLUMN) &&
(NOT hasColReference(ie->child(1))))
{
foundBinary = TRUE;
colVID = ie->child(0)->getValueId();
valueVID = ie->child(1)->getValueId();
}
else if ((ie->child(1)->getOperatorType() == ITM_BASECOLUMN) &&
(NOT hasColReference(ie->child(0))))
{
foundBinary = TRUE;
flipOp = TRUE;
colVID = ie->child(1)->getValueId();
valueVID = ie->child(0)->getValueId();
}
else if ((ie->child(0)->getOperatorType() == ITM_INDEXCOLUMN) &&
(NOT hasColReference(ie->child(1))))
{
foundBinary = TRUE;
colVID = ie->child(0)->getValueId();
valueVID = ie->child(1)->getValueId();
}
else if ((ie->child(1)->getOperatorType() == ITM_INDEXCOLUMN) &&
(NOT hasColReference(ie->child(0))))
{
foundBinary = TRUE;
flipOp = TRUE;
colVID = ie->child(1)->getValueId();
valueVID = ie->child(0)->getValueId();
}
else if ((ie->child(0)->getOperatorType() == ITM_HBASE_COLUMN_LOOKUP) &&
(NOT hasColReference(ie->child(1))))
{
HbaseColumnLookup * hcl = (HbaseColumnLookup*)ie->child(0)->castToItemExpr();
if (hcl->getValueId().getType().getTypeQualifier() == NA_CHARACTER_TYPE)
{
hbaseLookupPred = TRUE;
ItemExpr * newCV = new(generator->wHeap()) ConstValue(hcl->hbaseCol());
newCV = newCV->bindNode(generator->getBindWA());
newCV = newCV->preCodeGen(generator);
foundBinary = TRUE;
colVID = newCV->getValueId();
valueVID = ie->child(1)->getValueId();
}
}
else if ((ie->child(1)->getOperatorType() == ITM_HBASE_COLUMN_LOOKUP) &&
(NOT hasColReference(ie->child(0))))
{
HbaseColumnLookup * hcl = (HbaseColumnLookup*)ie->child(1)->castToItemExpr();
if (hcl->getValueId().getType().getTypeQualifier() == NA_CHARACTER_TYPE)
{
hbaseLookupPred = TRUE;
ItemExpr * newCV = new(generator->wHeap()) ConstValue(hcl->hbaseCol());
newCV = newCV->bindNode(generator->getBindWA());
newCV = newCV->preCodeGen(generator);
foundBinary = TRUE;
flipOp = TRUE;
colVID = newCV->getValueId();
valueVID = ie->child(0)->getValueId();
}
}
}//end binary operators
else if (ie && ((ie->getOperatorType() == ITM_IS_NULL)||(ie->getOperatorType() == ITM_IS_NOT_NULL))){//check for unary operators
ItemExpr * child0 = ie->child(0)->castToItemExpr();
if ((ie->child(0)->getOperatorType() == ITM_BASECOLUMN) ||
(ie->child(0)->getOperatorType() == ITM_INDEXCOLUMN)){
foundUnary = TRUE;
colVID = ie->child(0)->getValueId();
valueVID = NULL_VALUE_ID;
}
}//end unary operators
//check if found columns belong to table being scanned (so is not an input to the scan node)
if (foundBinary || foundUnary){
ValueId dummyValueId;
if (getGroupAttr()->getCharacteristicInputs().referencesTheGivenValue(colVID,dummyValueId)){
foundBinary=FALSE;
foundUnary=FALSE;
}
}
//check if not an added column with default non null
if ((foundBinary || foundUnary)&& (NOT hbaseLookupPred)){
if (colVID.isColumnWithNonNullNonCurrentDefault()){
foundBinary=FALSE;
foundUnary=FALSE;
}
}
if (foundBinary)
{
const NAType &colType = colVID.getType();
const NAType &valueType = valueVID.getType();
NABoolean generateNarrow = FALSE;
if (NOT hbaseLookupPred)
{
generateNarrow = valueType.errorsCanOccur(colType);
if ((generateNarrow) || // value not a superset of column
(NOT columnEnabledForSerialization(colVID.getItemExpr())))
foundBinary = FALSE;
}
if (foundBinary)
{
if (colType.getTypeQualifier() == NA_CHARACTER_TYPE)
{
const CharType &charColType = (CharType&)colType;
const CharType &charValType = (CharType&)valueType;
if ((charColType.isCaseinsensitive() || charValType.isCaseinsensitive()) ||
(charColType.isUpshifted() || charValType.isUpshifted()))
foundBinary = FALSE;
}
else if (colType.getTypeQualifier() == NA_NUMERIC_TYPE)
{
const NumericType &numType = (NumericType&)colType;
const NumericType &valType = (NumericType&)valueType;
if (numType.isBigNum() || valType.isBigNum())
foundBinary = FALSE;
}
}
if (foundBinary)
{
if ((ie) && (((BiRelat*)ie)->addedForLikePred()) &&
(valueVID.getItemExpr()->getOperatorType() == ITM_CONSTANT))
{
// remove trailing '\0' characters since this is being pushed down to hbase.
ConstValue * cv = (ConstValue*)(valueVID.getItemExpr());
char * cvv = (char*)cv->getConstValue();
Lng32 len = cv->getStorageSize() - 1;
while ((len > 0) && (cvv[len] == '\0'))
len--;
NAString newCVV(cvv, len+1);
ItemExpr * newCV = new(generator->wHeap()) ConstValue(newCVV);
newCV = newCV->bindNode(generator->getBindWA());
newCV = newCV->preCodeGen(generator);
valueVID = newCV->getValueId();
}
ItemExpr * castValue = NULL;
if (NOT hbaseLookupPred)
castValue = new(generator->wHeap()) Cast(valueVID.getItemExpr(), &colType);
else
{
castValue = new(generator->wHeap()) Cast(valueVID.getItemExpr(), &valueVID.getType());
}
if ((NOT hbaseLookupPred) &&
(isEncodingNeededForSerialization(colVID.getItemExpr())))
{
castValue = new(generator->wHeap()) CompEncode
(castValue, FALSE, -1, CollationInfo::Sort, TRUE, FALSE);
}
castValue = castValue->bindNode(generator->getBindWA());
castValue = castValue->preCodeGen(generator);
valueVID = castValue->getValueId();
NAString nullType;
if ((colType.supportsSQLnull()) ||
(valueType.supportsSQLnull()))
{
nullType = "_NULL";
}
else
{
nullType = "";
}
// append -NULL to the operator to signify the java code generating pushdown filters to handle NULL semantic logic
if (ie->getOperatorType() == ITM_EQUAL)
op = "EQUAL"+nullType;
else if (ie->getOperatorType() == ITM_NOT_EQUAL)
op = "NOT_EQUAL"+nullType;
else if (ie->getOperatorType() == ITM_LESS){
if (flipOp)
op = "GREATER"+nullType;
else
op = "LESS"+nullType;
}
else if (ie->getOperatorType() == ITM_LESS_EQ){
if (flipOp)
op = "GREATER_OR_EQUAL"+nullType;
else
op = "LESS_OR_EQUAL"+nullType;
}else if (ie->getOperatorType() == ITM_GREATER){
if (flipOp)
op = "LESS"+nullType;
else
op = "GREATER"+nullType;
}else if (ie->getOperatorType() == ITM_GREATER_EQ){
if (flipOp)
op = "LESS_OR_EQUAL"+nullType;
else
op = "GREATER_OR_EQUAL"+nullType;
}else
op = "NO_OP"+nullType;
}
}
if (foundUnary){
const NAType &colType = colVID.getType();
NAString nullType;
if (colType.supportsSQLnull())
{
nullType = "_NULL";
}
else
{
nullType = "";
}
if (ie->getOperatorType() == ITM_IS_NULL)
op = "IS_NULL"+nullType;
else if (ie->getOperatorType() == ITM_IS_NOT_NULL)
op = "IS_NOT_NULL"+nullType;
}
return foundBinary || foundUnary;
}
short HbaseAccess::extractHbaseFilterPredsVX(Generator * generator,
ValueIdSet &preds, ValueIdSet &newExePreds){
//separate the code that should not belong in the recursive function
if (CmpCommon::getDefault(HBASE_FILTER_PREDS) == DF_OFF)
return 0;
// check if initial (version 1) implementation
if (CmpCommon::getDefault(HBASE_FILTER_PREDS) == DF_MINIMUM)
return extractHbaseFilterPreds(generator,preds,newExePreds);
// if here, we are DF_MEDIUM
// cannot push preds for aligned format row
NABoolean isAlignedFormat = getTableDesc()->getNATable()->isAlignedFormat(getIndexDesc());
if (isAlignedFormat)
return 0;
//recursive function call
opList_.insert("V2");//to instruct the java side that we are dealing with predicate pushdown V2 semantic, add "V2" marker
extractHbaseFilterPredsV2(generator,preds,newExePreds,FALSE);
return 0;
}
// return true if successfull push down of node
NABoolean HbaseAccess::extractHbaseFilterPredsV2(Generator * generator,
ValueIdSet &preds, ValueIdSet &newExePreds, NABoolean checkOnly)
{
// the isFirstAndLayer is used to allow detecting top level predicate that can still be pushed to executor
int addedNode=0;
for (ValueId vid = preds.init();
(preds.next(vid));
preds.advance(vid))
{
ItemExpr * ie = vid.getItemExpr();
// if it is AND operation, recurse through left and right children
if (ie->getOperatorType() == ITM_AND){
ValueIdSet leftPreds;
ValueIdSet rightPreds;
leftPreds += ie->child(0)->castToItemExpr()->getValueId();
rightPreds += ie->child(1)->castToItemExpr()->getValueId();
//cannot be first AND layer, both left and right must be pushable to get anything pushed
if(extractHbaseFilterPredsV2(generator, leftPreds, newExePreds, TRUE)&&
extractHbaseFilterPredsV2(generator, rightPreds, newExePreds, TRUE)){// both left and right child must match
if(!checkOnly){
extractHbaseFilterPredsV2(generator, leftPreds, newExePreds, FALSE);//generate tree
extractHbaseFilterPredsV2(generator, rightPreds, newExePreds, FALSE);//generate tree
opList_.insert("AND");
}
if (preds.entries()==1)
return TRUE;
}
else{
if(!checkOnly){
newExePreds.insert(vid);
}
if (preds.entries()==1)
return FALSE;
}
continue;
// the OR case is easier, as we don t have the case of top level expression that can still be pushed to executor
}//end if AND
else if(ie->getOperatorType() == ITM_OR){
ValueIdSet leftPreds;
ValueIdSet rightPreds;
leftPreds += ie->child(0)->castToItemExpr()->getValueId();
rightPreds += ie->child(1)->castToItemExpr()->getValueId();
//both left and right must be pushable to get anything pushed
if(extractHbaseFilterPredsV2(generator, leftPreds, newExePreds, TRUE)&&
extractHbaseFilterPredsV2(generator, rightPreds, newExePreds, TRUE)){// both left and right child must match
if(!checkOnly){
extractHbaseFilterPredsV2(generator, leftPreds, newExePreds, FALSE);//generate tree
extractHbaseFilterPredsV2(generator, rightPreds, newExePreds, FALSE);//generate tree
opList_.insert("OR");
if (addedNode>0)opList_.insert("AND"); // if it is not the first node add to the push down, AND it with the rest
addedNode++; // we just pushed it down, so increase the node count pushed down.
}
if (preds.entries()==1)
return TRUE;
}
else{// if predicate cannot be pushed down
if(!checkOnly){
newExePreds.insert(vid);
}
if (preds.entries()==1)
return FALSE;
}
continue;
}//end if OR
ValueId colVID;
ValueId valueVID;
NAString op;
NABoolean isHFP =
isHbaseFilterPredV2(generator, ie, colVID, valueVID, op);
if (isHFP && !checkOnly){// if pushable, push it
hbaseFilterColVIDlist_.insert(colVID);
if (valueVID != NULL_VALUE_ID) hbaseFilterValueVIDlist_.insert(valueVID);// don't insert valueID for unary operators.
opList_.insert(op);
if (addedNode>0)opList_.insert("AND"); // if it is not the first node add to the push down, AND it with the rest
addedNode++; // we just pushed it down, so increase the node count pushed down.
}else if (!checkOnly){//if not pushable, pass it for executor evaluation.
newExePreds.insert(vid);
}
if (preds.entries()==1){
return isHFP; // if we are not on the first call level, where we can have multiple preds, exit returning the pushability
}
} // end for
return TRUE;//don't really care, means we are top level.
}
void HbaseAccess::computeRetrievedCols()
{
GroupAttributes fakeGA;
ValueIdSet requiredValueIds(getGroupAttr()->
getCharacteristicOutputs());
ValueIdSet coveredExprs;
// ---------------------------------------------------------------------
// Make fake group attributes with all inputs that are available to
// the file scan node and with no "native" values.
// Then call the "coverTest" method, offering it all the index columns
// as additional inputs. "coverTest" will mark those index columns that
// it actually needs to satisfy the required value ids, and that is
// what we actually want. The actual cover test should always succeed,
// otherwise the FileScan node would have been inconsistent.
// ---------------------------------------------------------------------
fakeGA.addCharacteristicInputs(getGroupAttr()->getCharacteristicInputs());
requiredValueIds += selectionPred();
requiredValueIds += executorPred();
fakeGA.coverTest(requiredValueIds, // char outputs + preds
getIndexDesc()->getIndexColumns(), // all index columns
coveredExprs, // dummy parameter
retrievedCols()); // needed index cols
//
// *** This CMPASSERT goes off sometimes, indicating an actual problem.
// Hans has agreed to look into it (10/18/96) but I (brass) am
// commenting it out for now, for sake of my time in doing a checking.
//
// CMPASSERT(coveredExprs == requiredValueIds);
}
RelExpr * HbaseAccess::preCodeGen(Generator * generator,
const ValueIdSet & externalInputs,
ValueIdSet &pulledNewInputs)
{
if (nodeIsPreCodeGenned())
return this;
const PartitioningFunction* myPartFunc = getPartFunc();
// use const HBase keys only if we don't have to add
// partitioning key predicates
if ( myPartFunc == NULL ||
!myPartFunc->isPartitioned() ||
myPartFunc->isAReplicationPartitioningFunction())
if (!processConstHBaseKeys(
generator,
this,
getSearchKey(),
getIndexDesc(),
executorPred(),
getHbaseSearchKeys(),
listOfUniqueRows_,
listOfRangeRows_))
return NULL;
if (! FileScan::preCodeGen(generator,externalInputs,pulledNewInputs))
return NULL;
//compute isUnique:
NABoolean isUnique = FALSE;
if (listOfRangeRows_.entries() == 0)
{
if ((searchKey() && searchKey()->isUnique()) &&
(listOfUniqueRows_.entries() == 0))
isUnique = TRUE;
else if ((NOT (searchKey() && searchKey()->isUnique())) &&
(listOfUniqueRows_.entries() == 1) &&
(listOfUniqueRows_[0].rowIds_.entries() == 1))
isUnique = TRUE;
}
// executorPred() contains an ANDed list of predicates.
// if hbase filter preds are enabled, then extracts those preds from executorPred()
// which could be pushed down to hbase.
// Do this only for non-unique scan access.
ValueIdSet newExePreds;
ValueIdSet* originExePreds = new (generator->wHeap())ValueIdSet(executorPred()) ;//saved for futur nullable column check
if (CmpCommon::getDefault(HBASE_FILTER_PREDS) != DF_MINIMUM){ // the check for V2 and above is moved up before calculating retrieved columns
if ((NOT isUnique) &&
(extractHbaseFilterPredsVX(generator, executorPred(), newExePreds)))
return this;
// if some filter preds were found, then initialize executor preds with new exe preds.
// newExePreds may be empty which means that all predicates were changed into
// hbase preds. In this case, nuke existing exe preds.
if (hbaseFilterColVIDlist_.entries() > 0)
setExecutorPredicates(newExePreds);
}
ValueIdSet colRefSet;
computeRetrievedCols();
for (ValueId valId = retrievedCols().init();
retrievedCols().next(valId);
retrievedCols().advance(valId))
{
ValueId dummyValId;
if ((valId.getItemExpr()->getOperatorType() != ITM_CONSTANT) &&
(getGroupAttr()->getCharacteristicOutputs().referencesTheGivenValue(valId, dummyValId)))
colRefSet.insert(valId);
}
if (getTableDesc()->getNATable()->isHbaseCellTable())
{
for (Lng32 i = 0; i < getIndexDesc()->getIndexColumns().entries(); i++)
{
// retColRefSet_.insert(getIndexDesc()->getIndexColumns()[i]);
}
}
else if (getTableDesc()->getNATable()->isHbaseRowTable())
{
NASet<NAString> * hbaseColNameSet =
generator->getBindWA()->hbaseColUsageInfo()->hbaseColNameSet
((QualifiedName*)&getTableDesc()->getNATable()->getTableName());
NABoolean starFound = FALSE;
for (Lng32 ij = 0; ij < hbaseColNameSet->entries(); ij++)
{
NAString &colName = (*hbaseColNameSet)[ij];
retHbaseColRefSet_.insert(colName);
if (colName == "*")
starFound = TRUE;
}
if (starFound)
retHbaseColRefSet_.clear();
}
else
{
// create the list of columns that need to be retrieved from hbase .
// first add all columns referenced in the executor pred.
HbaseAccess::addReferenceFromVIDset(executorPred(), TRUE, TRUE, colRefSet);
HbaseAccess::addReferenceFromVIDset
(getGroupAttr()->getCharacteristicOutputs(), TRUE, TRUE, colRefSet);
for (ValueId valId = colRefSet.init();
colRefSet.next(valId);
colRefSet.advance(valId))
{
ValueId dummyValId;
if (NOT getGroupAttr()->getCharacteristicInputs().referencesTheGivenValue(valId, dummyValId))
{
retColRefSet_.insert(valId);
if (valId.getItemExpr()->getOperatorType() == ITM_HBASE_TIMESTAMP)
{
Lng32 colNumber = ((BaseColumn*)((HbaseTimestamp*)valId.getItemExpr())->col())->getColNumber();
ValueId colVID = getIndexDesc()->getIndexColumns()[colNumber];
retColRefSet_.insert(colVID);
}
if (valId.getItemExpr()->getOperatorType() == ITM_HBASE_VERSION)
{
Lng32 colNumber = ((BaseColumn*)((HbaseVersion*)valId.getItemExpr())->col())->getColNumber();
ValueId colVID = getIndexDesc()->getIndexColumns()[colNumber];
retColRefSet_.insert(colVID);
}
}
}
// add key columns. If values are missing in hbase, then atleast the key
// value is needed to retrieve a row.
//only if needed. If there is already a non nullable non added non nullable with default columns in the set, we should not need to add
//any other columns.
if (CmpCommon::getDefault(HBASE_FILTER_PREDS) == DF_MEDIUM && getMdamKeyPtr() == NULL){ //only enable column retrieval optimization with DF_MEDIUM and not for MDAM scan
bool needAddingNonNullableColumn = true; //assume we need to add one non nullable column
for (ValueId vid = retColRefSet_.init();// look for each column in th eresult set if one match the criteria non null non added non nullable with default
retColRefSet_.next(vid);
retColRefSet_.advance(vid))
{
if (originExePreds->isNotNullable(vid)){// it is non nullable
OperatorTypeEnum operatorType = vid.getItemExpr()->getOperatorType();
if ((operatorType == ITM_BASECOLUMN || operatorType == ITM_INDEXCOLUMN) && !vid.isColumnWithNonNullNonCurrentDefault()){//check if with non null or non current default... notgood
needAddingNonNullableColumn = false; // we found one column meeting all criteria
break;
}
}
}
if (needAddingNonNullableColumn){ // ok now we need to add one key column that is not nullable
bool foundAtLeastOneKeyColumnNotNullable = false;
for(int i=getIndexDesc()->getIndexKey().entries()-1; i>=0;i--)// doing reverse search is making sure we are trying to avoid to use _SALT_ column
// because _SALT_ is physicaly the last column therefore we don't skip columns optimally if using _SALT_ column
{
ValueId vaId = getIndexDesc()->getIndexKey()[i];
if ( (vaId.getItemExpr()->getOperatorType() == ITM_BASECOLUMN && !((BaseColumn*)vaId.getItemExpr())->getNAColumn()->getType()->supportsSQLnullPhysical())||
(vaId.getItemExpr()->getOperatorType() == ITM_INDEXCOLUMN && !((IndexColumn*)vaId.getItemExpr())->getNAColumn()->getType()->supportsSQLnullPhysical())
){ //found good key column candidate?
HbaseAccess::addReferenceFromItemExprTree(vaId.getItemExpr(),TRUE,FALSE,retColRefSet_); // add it
foundAtLeastOneKeyColumnNotNullable = true; //tag we found it
break; // no need to look further
}
}
if (!foundAtLeastOneKeyColumnNotNullable){//oh well, did not find any key column non nullable, let s add all key columns
HbaseAccess::addColReferenceFromVIDlist(getIndexDesc()->getIndexKey(), retColRefSet_);
}
}
}else //end if DF_MEDIUM
HbaseAccess::addColReferenceFromVIDlist(getIndexDesc()->getIndexKey(), retColRefSet_);
}
if ((getMdamKeyPtr()) &&
((listOfRangeRows_.entries() > 0) ||
(listOfUniqueRows_.entries() > 0)))
{
GenAssert(0, "listOfRange/Unique cannot be used if mdam is chosen.");
return NULL;
}
// flag for both hive and hbase tables
generator->setHdfsAccess(TRUE);
if (!isUnique)
generator->oltOptInfo()->setMultipleRowsReturned(TRUE) ;
// Do not allow cancel of unique queries but allow cancel of queries
// that are part of a rowset operation.
if ((isUnique) &&
(NOT generator->oltOptInfo()->multipleRowsReturned()))
{
generator->setMayNotCancel(TRUE);
uniqueHbaseOper() = TRUE;
}
else
{
generator->oltOptInfo()->setOltCliOpt(FALSE);
if (isUnique)
{
if ((CmpCommon::getDefault(HBASE_ROWSET_VSBB_OPT) == DF_ON) &&
(NOT generator->isRIinliningForTrafIUD()) &&
(searchKey() && searchKey()->isUnique()))
{
uniqueRowsetHbaseOper() = TRUE;
}
}
}
// executorPred() contains an ANDed list of predicates.
// if hbase filter preds are enabled, then extracts those preds from executorPred()
// which could be pushed down to hbase.
// Do this only for non-unique scan access.
if (CmpCommon::getDefault(HBASE_FILTER_PREDS) == DF_MINIMUM){ //keep the check for pushdown after column retrieval for pushdown V1.
if ((NOT isUnique) &&
(extractHbaseFilterPreds(generator, executorPred(), newExePreds)))
return this;
// if some filter preds were found, then initialize executor preds with new exe preds.
// newExePreds may be empty which means that all predicates were changed into
// hbase preds. In this case, nuke existing exe preds.
if (hbaseFilterColVIDlist_.entries() > 0)
setExecutorPredicates(newExePreds);
}//DF_MINIMUM
snpType_ = SNP_NONE;
DefaultToken tok = CmpCommon::getDefault(TRAF_TABLE_SNAPSHOT_SCAN);
if (tok == DF_LATEST)
//latest snapshot -- new way used with scan independent from bulk unload
snpType_= SNP_LATEST;
else if (tok == DF_SUFFIX)
//the exsiting where snapshot scan is used with bulk unload
snpType_ = SNP_SUFFIX;
markAsPreCodeGenned();
// Done.
return this;
}
RelExpr * HbaseAccessCoProcAggr::preCodeGen(Generator * generator,
const ValueIdSet & externalInputs,
ValueIdSet &pulledNewInputs)
{
if (nodeIsPreCodeGenned())
return this;
if (! HbaseAccess::preCodeGen(generator,externalInputs,pulledNewInputs))
return NULL;
// Rebuild the aggregate expressions tree
ValueIdSet availableValues;
getInputValuesFromParentAndChildren(availableValues);
aggregateExpr().replaceVEGExpressions
(availableValues,
getGroupAttr()->getCharacteristicInputs());
markAsPreCodeGenned();
// Done.
return this;
}
RelExpr * ExeUtilHbaseCoProcAggr::preCodeGen(Generator * generator,
const ValueIdSet & externalInputs,
ValueIdSet &pulledNewInputs)
{
if (nodeIsPreCodeGenned())
return this;
if (! ExeUtilExpr::preCodeGen(generator,externalInputs,pulledNewInputs))
return NULL;
// Rebuild the aggregate expressions tree
ValueIdSet availableValues;
getInputValuesFromParentAndChildren(availableValues);
aggregateExpr().replaceVEGExpressions
(availableValues,
getGroupAttr()->getCharacteristicInputs());
markAsPreCodeGenned();
// Done.
return this;
}
RelExpr * ExeUtilOrcFastAggr::preCodeGen(Generator * generator,
const ValueIdSet & externalInputs,
ValueIdSet &pulledNewInputs)
{
if (nodeIsPreCodeGenned())
return this;
if (! ExeUtilExpr::preCodeGen(generator,externalInputs,pulledNewInputs))
return NULL;
// Rebuild the aggregate expressions tree
ValueIdSet availableValues;
getInputValuesFromParentAndChildren(availableValues);
aggregateExpr().replaceVEGExpressions
(availableValues,
getGroupAttr()->getCharacteristicInputs());
markAsPreCodeGenned();
// Done.
return this;
}
| 1 | 15,981 | Need to ensure this is set to TRU for LOB datatype access too not just for hive . | apache-trafodion | cpp |
@@ -1772,7 +1772,11 @@ class Series(_Frame, IndexOpsMixin):
# ----------------------------------------------------------------------
- def _reduce_for_stat_function(self, sfun):
+ def _reduce_for_stat_function(self, sfun, numeric_only=None):
+ """
+ :param sfun: the stats function to be used for aggregation
+ :param numeric_only: not used by this implementation, but passed down by stats functions
+ """
from inspect import signature
num_args = len(signature(sfun).parameters)
col_sdf = self._scol | 1 | #
# Copyright (C) 2019 Databricks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
A wrapper class for Spark Column to behave similar to pandas Series.
"""
import re
import inspect
from functools import partial, wraps
from typing import Any, Optional, List, Union
import numpy as np
import pandas as pd
from pandas.core.accessor import CachedAccessor
from pyspark import sql as spark
from pyspark.sql import functions as F
from pyspark.sql.types import BooleanType, StructType
from databricks import koalas as ks # For running doctests and reference resolution in PyCharm.
from databricks.koalas.base import IndexOpsMixin
from databricks.koalas.frame import DataFrame
from databricks.koalas.generic import _Frame, max_display_count
from databricks.koalas.metadata import Metadata
from databricks.koalas.missing.series import _MissingPandasLikeSeries
from databricks.koalas.utils import validate_arguments_and_invoke_function
from databricks.koalas.datetimes import DatetimeMethods
# This regular expression pattern is complied and defined here to avoid to compile the same
# pattern every time it is used in _repr_ in Series.
# This pattern basically seeks the footer string from Pandas'
REPR_PATTERN = re.compile(r"Length: (?P<length>[0-9]+)")
_flex_doc_SERIES = """
Return {desc} of series and other, element-wise (binary operator `{op_name}`).
Equivalent to ``{equiv}``, but with support to substitute a fill_value for
missing data in one of the inputs.
Parameters
----------
other : Series or scalar value
fill_value : None or float value, default None (NaN)
Fill existing missing (NaN) values, and any new element needed for
successful Series alignment, with this value before computation.
If data in both corresponding Series locations is missing
the result will be missing.
level : int or name
Broadcast across a level, matching Index values on the
passed MultiIndex level.
Returns
-------
Series
The result of the operation.
See Also
--------
Series.{reverse}
{series_examples}
"""
_add_example_SERIES = """
Examples
--------
>>> df = ks.DataFrame({'a': [1, 1, 1, np.nan],
... 'b': [1, np.nan, 1, np.nan]},
... index=['a', 'b', 'c', 'd'], columns=['a', 'b'])
>>> df
a b
a 1.0 1.0
b 1.0 NaN
c 1.0 1.0
d NaN NaN
>>> df.a.add(df.b)
a 2.0
b NaN
c 2.0
d NaN
Name: a, dtype: float64
"""
_sub_example_SERIES = """
Examples
--------
>>> df = ks.DataFrame({'a': [1, 1, 1, np.nan],
... 'b': [1, np.nan, 1, np.nan]},
... index=['a', 'b', 'c', 'd'], columns=['a', 'b'])
>>> df
a b
a 1.0 1.0
b 1.0 NaN
c 1.0 1.0
d NaN NaN
>>> df.a.subtract(df.b)
a 0.0
b NaN
c 0.0
d NaN
Name: a, dtype: float64
"""
_mul_example_SERIES = """
Examples
--------
>>> df = ks.DataFrame({'a': [2, 2, 4, np.nan],
... 'b': [2, np.nan, 2, np.nan]},
... index=['a', 'b', 'c', 'd'], columns=['a', 'b'])
>>> df
a b
a 2.0 2.0
b 2.0 NaN
c 4.0 2.0
d NaN NaN
>>> df.a.multiply(df.b)
a 4.0
b NaN
c 8.0
d NaN
Name: a, dtype: float64
"""
_div_example_SERIES = """
Examples
--------
>>> df = ks.DataFrame({'a': [2, 2, 4, np.nan],
... 'b': [2, np.nan, 2, np.nan]},
... index=['a', 'b', 'c', 'd'], columns=['a', 'b'])
>>> df
a b
a 2.0 2.0
b 2.0 NaN
c 4.0 2.0
d NaN NaN
>>> df.a.divide(df.b)
a 1.0
b NaN
c 2.0
d NaN
Name: a, dtype: float64
"""
class Series(_Frame, IndexOpsMixin):
"""
Koala Series that corresponds to Pandas Series logically. This holds Spark Column
internally.
:ivar _scol: Spark Column instance
:type _scol: pyspark.Column
:ivar _kdf: Parent's Koalas DataFrame
:type _kdf: ks.DataFrame
:ivar _index_map: Each pair holds the index field name which exists in Spark fields,
and the index name.
Parameters
----------
data : array-like, dict, or scalar value, Pandas Series or Spark Column
Contains data stored in Series
If data is a dict, argument order is maintained for Python 3.6
and later.
Note that if `data` is a Pandas Series, other arguments should not be used.
If `data` is a Spark Column, all other arguments except `index` should not be used.
index : array-like or Index (1d)
Values must be hashable and have the same length as `data`.
Non-unique index values are allowed. Will default to
RangeIndex (0, 1, 2, ..., n) if not provided. If both a dict and index
sequence are used, the index will override the keys found in the
dict.
If `data` is a Spark DataFrame, `index` is expected to be `Metadata`s `index_map`.
dtype : numpy.dtype or None
If None, dtype will be inferred
copy : boolean, default False
Copy input data
"""
def __init__(self, data=None, index=None, dtype=None, name=None, copy=False, fastpath=False,
anchor=None):
if isinstance(data, pd.Series):
assert index is None
assert dtype is None
assert name is None
assert not copy
assert anchor is None
assert not fastpath
self._init_from_pandas(data)
elif isinstance(data, spark.Column):
assert dtype is None
assert name is None
assert not copy
assert not fastpath
self._init_from_spark(data, anchor, index)
else:
s = pd.Series(
data=data, index=index, dtype=dtype, name=name, copy=copy, fastpath=fastpath)
self._init_from_pandas(s)
def _init_from_pandas(self, s):
"""
Creates Koalas Series from Pandas Series.
:param s: Pandas Series
"""
kdf = DataFrame(pd.DataFrame(s))
self._init_from_spark(kdf._sdf[kdf._metadata.data_columns[0]],
kdf, kdf._metadata.index_map)
def _init_from_spark(self, scol, kdf, index_map):
"""
Creates Koalas Series from Spark Column.
:param scol: Spark Column
:param kdf: Koalas DataFrame that should have the `scol`.
:param index_map: index information of this Series.
"""
assert index_map is not None
assert kdf is not None
assert isinstance(kdf, ks.DataFrame), type(kdf)
self._scol = scol
self._kdf = kdf
self._index_map = index_map
def _with_new_scol(self, scol: spark.Column) -> 'Series':
"""
Copy Koalas Series with the new Spark Column.
:param scol: the new Spark Column
:return: the copied Series
"""
return Series(scol, anchor=self._kdf, index=self._index_map)
@property
def dtypes(self):
"""Return the dtype object of the underlying data.
>>> s = ks.Series(list('abc'))
>>> s.dtype == s.dtypes
True
"""
return self.dtype
@property
def spark_type(self):
""" Returns the data type as defined by Spark, as a Spark DataType object."""
return self.schema.fields[-1].dataType
# Arithmetic Operators
def add(self, other):
return (self + other).rename(self.name)
add.__doc__ = _flex_doc_SERIES.format(
desc='Addition',
op_name="+",
equiv="series + other",
reverse='radd',
series_examples=_add_example_SERIES)
def radd(self, other):
return (other + self).rename(self.name)
radd.__doc__ = _flex_doc_SERIES.format(
desc='Addition',
op_name="+",
equiv="other + series",
reverse='add',
series_examples=_add_example_SERIES)
def div(self, other):
return (self / other).rename(self.name)
div.__doc__ = _flex_doc_SERIES.format(
desc='Floating division',
op_name="/",
equiv="series / other",
reverse='rdiv',
series_examples=_div_example_SERIES)
divide = div
def rdiv(self, other):
return (other / self).rename(self.name)
rdiv.__doc__ = _flex_doc_SERIES.format(
desc='Floating division',
op_name="/",
equiv="other / series",
reverse='div',
series_examples=_div_example_SERIES)
def truediv(self, other):
return (self / other).rename(self.name)
truediv.__doc__ = _flex_doc_SERIES.format(
desc='Floating division',
op_name="/",
equiv="series / other",
reverse='rtruediv',
series_examples=_div_example_SERIES)
def rtruediv(self, other):
return (other / self).rename(self.name)
rtruediv.__doc__ = _flex_doc_SERIES.format(
desc='Floating division',
op_name="/",
equiv="other / series",
reverse='truediv',
series_examples=_div_example_SERIES)
def mul(self, other):
return (self * other).rename(self.name)
mul.__doc__ = _flex_doc_SERIES.format(
desc='Multiplication',
op_name="*",
equiv="series * other",
reverse='rmul',
series_examples=_mul_example_SERIES)
multiply = mul
def rmul(self, other):
return (other * self).rename(self.name)
rmul.__doc__ = _flex_doc_SERIES.format(
desc='Multiplication',
op_name="*",
equiv="other * series",
reverse='mul',
series_examples=_mul_example_SERIES)
def sub(self, other):
return (self - other).rename(self.name)
sub.__doc__ = _flex_doc_SERIES.format(
desc='Subtraction',
op_name="-",
equiv="series - other",
reverse='rsub',
series_examples=_sub_example_SERIES)
subtract = sub
def rsub(self, other):
return (other - self).rename(self.name)
rsub.__doc__ = _flex_doc_SERIES.format(
desc='Subtraction',
op_name="-",
equiv="other - series",
reverse='sub',
series_examples=_sub_example_SERIES)
# TODO: arg should support Series
# TODO: NaN and None
def map(self, arg):
"""
Map values of Series according to input correspondence.
Used for substituting each value in a Series with another value,
that may be derived from a function, a ``dict``.
.. note:: make sure the size of the dictionary is not huge because it could
downgrade the performance or throw OutOfMemoryError due to a huge
expression within Spark. Consider the input as a functions as an
alternative instead in this case.
Parameters
----------
arg : function or dict
Mapping correspondence.
Returns
-------
Series
Same index as caller.
See Also
--------
Series.apply : For applying more complex functions on a Series.
DataFrame.applymap : Apply a function elementwise on a whole DataFrame.
Notes
-----
When ``arg`` is a dictionary, values in Series that are not in the
dictionary (as keys) are converted to ``None``. However, if the
dictionary is a ``dict`` subclass that defines ``__missing__`` (i.e.
provides a method for default values), then this default is used
rather than ``None``.
Examples
--------
>>> s = ks.Series(['cat', 'dog', None, 'rabbit'])
>>> s
0 cat
1 dog
2 None
3 rabbit
Name: 0, dtype: object
``map`` accepts a ``dict``. Values that are not found
in the ``dict`` are converted to ``None``, unless the dict has a default
value (e.g. ``defaultdict``):
>>> s.map({'cat': 'kitten', 'dog': 'puppy'})
0 kitten
1 puppy
2 None
3 None
Name: 0, dtype: object
It also accepts a function:
>>> def format(x) -> str:
... return 'I am a {}'.format(x)
>>> s.map(format)
0 I am a cat
1 I am a dog
2 I am a None
3 I am a rabbit
Name: 0, dtype: object
"""
if isinstance(arg, dict):
is_start = True
# In case dictionary is empty.
current = F.when(F.lit(False), F.lit(None).cast(self.spark_type))
for to_replace, value in arg.items():
if is_start:
current = F.when(self._scol == F.lit(to_replace), value)
is_start = False
else:
current = current.when(self._scol == F.lit(to_replace), value)
if hasattr(arg, "__missing__"):
tmp_val = arg[np._NoValue]
del arg[np._NoValue] # Remove in case it's set in defaultdict.
current = current.otherwise(F.lit(tmp_val))
else:
current = current.otherwise(F.lit(None).cast(self.spark_type))
return Series(current, anchor=self._kdf, index=self._index_map).rename(self.name)
else:
return self.apply(arg)
def astype(self, dtype) -> 'Series':
"""
Cast a Koalas object to a specified dtype ``dtype``.
Parameters
----------
dtype : data type
Use a numpy.dtype or Python type to cast entire pandas object to
the same type.
Returns
-------
casted : same type as caller
See Also
--------
to_datetime : Convert argument to datetime.
Examples
--------
>>> ser = ks.Series([1, 2], dtype='int32')
>>> ser
0 1
1 2
Name: 0, dtype: int32
>>> ser.astype('int64')
0 1
1 2
Name: 0, dtype: int64
"""
from databricks.koalas.typedef import as_spark_type
spark_type = as_spark_type(dtype)
if not spark_type:
raise ValueError("Type {} not understood".format(dtype))
return Series(self._scol.cast(spark_type), anchor=self._kdf, index=self._index_map)
def getField(self, name):
if not isinstance(self.schema, StructType):
raise AttributeError("Not a struct: {}".format(self.schema))
else:
fnames = self.schema.fieldNames()
if name not in fnames:
raise AttributeError(
"Field {} not found, possible values are {}".format(name, ", ".join(fnames)))
return Series(self._scol.getField(name), anchor=self._kdf, index=self._index_map)
def alias(self, name):
"""An alias for :meth:`Series.rename`."""
return self.rename(name)
@property
def schema(self) -> StructType:
"""Return the underlying Spark DataFrame's schema."""
return self.to_dataframe()._sdf.schema
@property
def shape(self):
"""Return a tuple of the shape of the underlying data."""
return len(self),
@property
def name(self) -> str:
"""Return name of the Series."""
return self._metadata.data_columns[0]
@name.setter
def name(self, name):
self.rename(name, inplace=True)
# TODO: Functionality and documentation should be matched. Currently, changing index labels
# taking dictionary and function to change index are not supported.
def rename(self, index=None, **kwargs):
"""
Alter Series name.
Parameters
----------
index : scalar
Scalar will alter the ``Series.name`` attribute.
inplace : bool, default False
Whether to return a new Series. If True then value of copy is
ignored.
Returns
-------
Series
Series with name altered.
Examples
--------
>>> s = ks.Series([1, 2, 3])
>>> s
0 1
1 2
2 3
Name: 0, dtype: int64
>>> s.rename("my_name") # scalar, changes Series.name
0 1
1 2
2 3
Name: my_name, dtype: int64
"""
if index is None:
return self
scol = self._scol.alias(index)
if kwargs.get('inplace', False):
self._scol = scol
return self
else:
return Series(scol, anchor=self._kdf, index=self._index_map)
@property
def _metadata(self):
return self.to_dataframe()._metadata
@property
def index(self):
"""The index (axis labels) Column of the Series.
Currently not supported when the DataFrame has no index.
See Also
--------
Index
"""
return self._kdf.index
@property
def is_unique(self):
"""
Return boolean if values in the object are unique
Returns
-------
is_unique : boolean
>>> ks.Series([1, 2, 3]).is_unique
True
>>> ks.Series([1, 2, 2]).is_unique
False
>>> ks.Series([1, 2, 3, None]).is_unique
True
"""
sdf = self._kdf._sdf.select(self._scol)
col = self._scol
# Here we check:
# 1. the distinct count without nulls and count without nulls for non-null values
# 2. count null values and see if null is a distinct value.
#
# This workaround is in order to calculate the distinct count including nulls in
# single pass. Note that COUNT(DISTINCT expr) in Spark is designed to ignore nulls.
return sdf.select(
(F.count(col) == F.countDistinct(col)) &
(F.count(F.when(col.isNull(), 1).otherwise(None)) <= 1)
).collect()[0][0]
def reset_index(self, level=None, drop=False, name=None, inplace=False):
"""
Generate a new DataFrame or Series with the index reset.
This is useful when the index needs to be treated as a column,
or when the index is meaningless and needs to be reset
to the default before another operation.
Parameters
----------
level : int, str, tuple, or list, default optional
For a Series with a MultiIndex, only remove the specified levels from the index.
Removes all levels by default.
drop : bool, default False
Just reset the index, without inserting it as a column in the new DataFrame.
name : object, optional
The name to use for the column containing the original Series values.
Uses self.name by default. This argument is ignored when drop is True.
inplace : bool, default False
Modify the Series in place (do not create a new object).
Returns
-------
Series or DataFrame
When `drop` is False (the default), a DataFrame is returned.
The newly created columns will come first in the DataFrame,
followed by the original Series values.
When `drop` is True, a `Series` is returned.
In either case, if ``inplace=True``, no value is returned.
Examples
--------
>>> s = ks.Series([1, 2, 3, 4], name='foo',
... index=pd.Index(['a', 'b', 'c', 'd'], name='idx'))
Generate a DataFrame with default index.
>>> s.reset_index()
idx foo
0 a 1
1 b 2
2 c 3
3 d 4
To specify the name of the new column use `name`.
>>> s.reset_index(name='values')
idx values
0 a 1
1 b 2
2 c 3
3 d 4
To generate a new Series with the default set `drop` to True.
>>> s.reset_index(drop=True)
0 1
1 2
2 3
3 4
Name: foo, dtype: int64
To update the Series in place, without generating a new one
set `inplace` to True. Note that it also requires ``drop=True``.
>>> s.reset_index(inplace=True, drop=True)
>>> s
0 1
1 2
2 3
3 4
Name: foo, dtype: int64
"""
if inplace and not drop:
raise TypeError('Cannot reset_index inplace on a Series to create a DataFrame')
if name is not None:
kdf = self.rename(name).to_dataframe()
else:
kdf = self.to_dataframe()
kdf = kdf.reset_index(level=level, drop=drop)
if drop:
s = _col(kdf)
if inplace:
self._kdf = kdf
self._scol = s._scol
self._index_map = s._index_map
else:
return s
else:
return kdf
def to_dataframe(self) -> spark.DataFrame:
sdf = self._kdf._sdf.select([field for field, _ in self._index_map] + [self._scol])
metadata = Metadata(data_columns=[sdf.schema[-1].name], index_map=self._index_map)
return DataFrame(sdf, metadata)
def to_string(self, buf=None, na_rep='NaN', float_format=None, header=True,
index=True, length=False, dtype=False, name=False,
max_rows=None):
"""
Render a string representation of the Series.
.. note:: This method should only be used if the resulting Pandas object is expected
to be small, as all the data is loaded into the driver's memory. If the input
is large, set max_rows parameter.
Parameters
----------
buf : StringIO-like, optional
buffer to write to
na_rep : string, optional
string representation of NAN to use, default 'NaN'
float_format : one-parameter function, optional
formatter function to apply to columns' elements if they are floats
default None
header : boolean, default True
Add the Series header (index name)
index : bool, optional
Add index (row) labels, default True
length : boolean, default False
Add the Series length
dtype : boolean, default False
Add the Series dtype
name : boolean, default False
Add the Series name if not None
max_rows : int, optional
Maximum number of rows to show before truncating. If None, show
all.
Returns
-------
formatted : string (if not buffer passed)
Examples
--------
>>> df = ks.DataFrame([(.2, .3), (.0, .6), (.6, .0), (.2, .1)], columns=['dogs', 'cats'])
>>> print(df['dogs'].to_string())
0 0.2
1 0.0
2 0.6
3 0.2
>>> print(df['dogs'].to_string(max_rows=2))
0 0.2
1 0.0
"""
# Make sure locals() call is at the top of the function so we don't capture local variables.
args = locals()
if max_rows is not None:
kseries = self.head(max_rows)
else:
kseries = self
return validate_arguments_and_invoke_function(
kseries.to_pandas(), self.to_string, pd.Series.to_string, args)
def to_clipboard(self, excel=True, sep=None, **kwargs):
# Docstring defined below by reusing DataFrame.to_clipboard's.
args = locals()
kseries = self
return validate_arguments_and_invoke_function(
kseries.to_pandas(), self.to_clipboard, pd.Series.to_clipboard, args)
to_clipboard.__doc__ = DataFrame.to_clipboard.__doc__
def to_dict(self, into=dict):
"""
Convert Series to {label -> value} dict or dict-like object.
.. note:: This method should only be used if the resulting Pandas DataFrame is expected
to be small, as all the data is loaded into the driver's memory.
Parameters
----------
into : class, default dict
The collections.abc.Mapping subclass to use as the return
object. Can be the actual class or an empty
instance of the mapping type you want. If you want a
collections.defaultdict, you must pass it initialized.
Returns
-------
collections.abc.Mapping
Key-value representation of Series.
Examples
--------
>>> s = ks.Series([1, 2, 3, 4])
>>> s_dict = s.to_dict()
>>> sorted(s_dict.items())
[(0, 1), (1, 2), (2, 3), (3, 4)]
>>> from collections import OrderedDict, defaultdict
>>> s.to_dict(OrderedDict)
OrderedDict([(0, 1), (1, 2), (2, 3), (3, 4)])
>>> dd = defaultdict(list)
>>> s.to_dict(dd) # doctest: +ELLIPSIS
defaultdict(<class 'list'>, {...})
"""
# Make sure locals() call is at the top of the function so we don't capture local variables.
args = locals()
kseries = self
return validate_arguments_and_invoke_function(
kseries.to_pandas(), self.to_dict, pd.Series.to_dict, args)
def to_latex(self, buf=None, columns=None, col_space=None, header=True, index=True,
na_rep='NaN', formatters=None, float_format=None, sparsify=None, index_names=True,
bold_rows=False, column_format=None, longtable=None, escape=None, encoding=None,
decimal='.', multicolumn=None, multicolumn_format=None, multirow=None):
args = locals()
kseries = self
return validate_arguments_and_invoke_function(
kseries.to_pandas(), self.to_latex, pd.Series.to_latex, args)
to_latex.__doc__ = DataFrame.to_latex.__doc__
def to_pandas(self):
"""
Return a pandas Series.
.. note:: This method should only be used if the resulting Pandas object is expected
to be small, as all the data is loaded into the driver's memory. If the input
is large, set max_rows parameter.
Examples
--------
>>> df = ks.DataFrame([(.2, .3), (.0, .6), (.6, .0), (.2, .1)], columns=['dogs', 'cats'])
>>> df['dogs'].to_pandas()
0 0.2
1 0.0
2 0.6
3 0.2
Name: dogs, dtype: float64
"""
return _col(self.to_dataframe().toPandas())
# Alias to maintain backward compatibility with Spark
toPandas = to_pandas
def to_list(self):
"""
Return a list of the values.
These are each a scalar type, which is a Python scalar
(for str, int, float) or a pandas scalar
(for Timestamp/Timedelta/Interval/Period)
.. note:: This method should only be used if the resulting list is expected
to be small, as all the data is loaded into the driver's memory.
"""
return self.to_pandas().to_list()
tolist = to_list
def fillna(self, value=None, axis=None, inplace=False):
"""Fill NA/NaN values.
Parameters
----------
value : scalar
Value to use to fill holes.
axis : {0 or `index`}
1 and `columns` are not supported.
inplace : boolean, default False
Fill in place (do not create a new object)
Returns
-------
Series
Series with NA entries filled.
Examples
--------
>>> s = ks.Series([np.nan, 2, 3, 4, np.nan, 6], name='x')
>>> s
0 NaN
1 2.0
2 3.0
3 4.0
4 NaN
5 6.0
Name: x, dtype: float64
Replace all NaN elements with 0s.
>>> s.fillna(0)
0 0.0
1 2.0
2 3.0
3 4.0
4 0.0
5 6.0
Name: x, dtype: float64
"""
ks = _col(self.to_dataframe().fillna(value=value, axis=axis, inplace=False))
if inplace:
self._kdf = ks._kdf
self._scol = ks._scol
else:
return ks
def dropna(self, axis=0, inplace=False, **kwargs):
"""
Return a new Series with missing values removed.
Parameters
----------
axis : {0 or 'index'}, default 0
There is only one axis to drop values from.
inplace : bool, default False
If True, do operation inplace and return None.
**kwargs
Not in use.
Returns
-------
Series
Series with NA entries dropped from it.
Examples
--------
>>> ser = ks.Series([1., 2., np.nan])
>>> ser
0 1.0
1 2.0
2 NaN
Name: 0, dtype: float64
Drop NA values from a Series.
>>> ser.dropna()
0 1.0
1 2.0
Name: 0, dtype: float64
Keep the Series with valid entries in the same variable.
>>> ser.dropna(inplace=True)
>>> ser
0 1.0
1 2.0
Name: 0, dtype: float64
"""
# TODO: last two examples from Pandas produce different results.
kser = _col(self.to_dataframe().dropna(axis=axis, inplace=False))
if inplace:
self._kdf = kser._kdf
self._scol = kser._scol
else:
return kser
def clip(self, lower: Union[float, int] = None, upper: Union[float, int] = None) -> 'Series':
"""
Trim values at input threshold(s).
Assigns values outside boundary to boundary values.
Parameters
----------
lower : float or int, default None
Minimum threshold value. All values below this threshold will be set to it.
upper : float or int, default None
Maximum threshold value. All values above this threshold will be set to it.
Returns
-------
Series
Series with the values outside the clip boundaries replaced
Examples
--------
>>> ks.Series([0, 2, 4]).clip(1, 3)
0 1
1 2
2 3
Name: 0, dtype: int64
Notes
-----
One difference between this implementation and pandas is that running
`pd.Series(['a', 'b']).clip(0, 1)` will crash with "TypeError: '<=' not supported between
instances of 'str' and 'int'" while `ks.Series(['a', 'b']).clip(0, 1)` will output the
original Series, simply ignoring the incompatible types.
"""
return _col(self.to_dataframe().clip(lower, upper))
def head(self, n=5):
"""
Return the first n rows.
This function returns the first n rows for the object based on position.
It is useful for quickly testing if your object has the right type of data in it.
Parameters
----------
n : Integer, default = 5
Returns
-------
The first n rows of the caller object.
Examples
--------
>>> df = ks.DataFrame({'animal':['alligator', 'bee', 'falcon', 'lion']})
>>> df.animal.head(2) # doctest: +NORMALIZE_WHITESPACE
0 alligator
1 bee
Name: animal, dtype: object
"""
return _col(self.to_dataframe().head(n))
# TODO: Categorical type isn't supported (due to PySpark's limitation) and
# some doctests related with timestamps were not added.
def unique(self):
"""
Return unique values of Series object.
Uniques are returned in order of appearance. Hash table-based unique,
therefore does NOT sort.
.. note:: This method returns newly creased Series whereas Pandas returns
the unique values as a NumPy array.
Returns
-------
Returns the unique values as a Series.
See Examples section.
Examples
--------
>>> ks.Series([2, 1, 3, 3], name='A').unique()
0 1
1 3
2 2
Name: A, dtype: int64
>>> ks.Series([pd.Timestamp('2016-01-01') for _ in range(3)]).unique()
0 2016-01-01
Name: 0, dtype: datetime64[ns]
"""
sdf = self.to_dataframe()._sdf
return _col(DataFrame(sdf.select(self._scol).distinct()))
def nunique(self, dropna: bool = True, approx: bool = False, rsd: float = 0.05) -> int:
"""
Return number of unique elements in the object.
Excludes NA values by default.
Parameters
----------
dropna : bool, default True
Don’t include NaN in the count.
approx: bool, default False
If False, will use the exact algorithm and return the exact number of unique.
If True, it uses the HyperLogLog approximate algorithm, which is significantly faster
for large amount of data.
Note: This parameter is specific to Koalas and is not found in pandas.
rsd: float, default 0.05
Maximum estimation error allowed in the HyperLogLog algorithm.
Note: Just like ``approx`` this parameter is specific to Koalas.
Returns
-------
The number of unique values as an int.
Examples
--------
>>> ks.Series([1, 2, 3, np.nan]).nunique()
3
>>> ks.Series([1, 2, 3, np.nan]).nunique(dropna=False)
4
On big data, we recommend using the approximate algorithm to speed up this function.
The result will be very close to the exact unique count.
>>> ks.Series([1, 2, 3, np.nan]).nunique(approx=True)
3
"""
return self.to_dataframe().nunique(dropna=dropna, approx=approx, rsd=rsd).iloc[0]
# TODO: Update Documentation for Bins Parameter when its supported
def value_counts(self, normalize=False, sort=True, ascending=False, bins=None, dropna=True):
"""
Return a Series containing counts of unique values.
The resulting object will be in descending order so that the
first element is the most frequently-occurring element.
Excludes NA values by default.
Parameters
----------
normalize : boolean, default False
If True then the object returned will contain the relative
frequencies of the unique values.
sort : boolean, default True
Sort by values.
ascending : boolean, default False
Sort in ascending order.
bins : Not Yet Supported
dropna : boolean, default True
Don't include counts of NaN.
Returns
-------
counts : Series
See Also
--------
Series.count: Number of non-NA elements in a Series.
Examples
--------
>>> df = ks.DataFrame({'x':[0, 0, 1, 1, 1, np.nan]})
>>> df.x.value_counts() # doctest: +NORMALIZE_WHITESPACE
1.0 3
0.0 2
Name: x, dtype: int64
With `normalize` set to `True`, returns the relative frequency by
dividing all values by the sum of values.
>>> df.x.value_counts(normalize=True) # doctest: +NORMALIZE_WHITESPACE
1.0 0.6
0.0 0.4
Name: x, dtype: float64
**dropna**
With `dropna` set to `False` we can also see NaN index values.
>>> df.x.value_counts(dropna=False) # doctest: +NORMALIZE_WHITESPACE
1.0 3
0.0 2
NaN 1
Name: x, dtype: int64
"""
if bins is not None:
raise NotImplementedError("value_counts currently does not support bins")
if dropna:
sdf_dropna = self._kdf._sdf.filter(self.notna()._scol)
else:
sdf_dropna = self._kdf._sdf
sdf = sdf_dropna.groupby(self._scol).count()
if sort:
if ascending:
sdf = sdf.orderBy(F.col('count'))
else:
sdf = sdf.orderBy(F.col('count').desc())
if normalize:
sum = sdf_dropna.count()
sdf = sdf.withColumn('count', F.col('count') / F.lit(sum))
index_name = 'index' if self.name != 'index' else 'level_0'
kdf = DataFrame(sdf)
kdf.columns = [index_name, self.name]
kdf._metadata = Metadata(data_columns=[self.name], index_map=[(index_name, None)])
return _col(kdf)
def sort_values(self, ascending: bool = True, inplace: bool = False,
na_position: str = 'last') -> Union['Series', None]:
"""
Sort by the values.
Sort a Series in ascending or descending order by some criterion.
Parameters
----------
ascending : bool or list of bool, default True
Sort ascending vs. descending. Specify list for multiple sort
orders. If this is a list of bools, must match the length of
the by.
inplace : bool, default False
if True, perform operation in-place
na_position : {'first', 'last'}, default 'last'
`first` puts NaNs at the beginning, `last` puts NaNs at the end
Returns
-------
sorted_obj : Series ordered by values.
Examples
--------
>>> s = ks.Series([np.nan, 1, 3, 10, 5])
>>> s
0 NaN
1 1.0
2 3.0
3 10.0
4 5.0
Name: 0, dtype: float64
Sort values ascending order (default behaviour)
>>> s.sort_values(ascending=True)
1 1.0
2 3.0
4 5.0
3 10.0
0 NaN
Name: 0, dtype: float64
Sort values descending order
>>> s.sort_values(ascending=False)
3 10.0
4 5.0
2 3.0
1 1.0
0 NaN
Name: 0, dtype: float64
Sort values inplace
>>> s.sort_values(ascending=False, inplace=True)
>>> s
3 10.0
4 5.0
2 3.0
1 1.0
0 NaN
Name: 0, dtype: float64
Sort values putting NAs first
>>> s.sort_values(na_position='first')
0 NaN
1 1.0
2 3.0
4 5.0
3 10.0
Name: 0, dtype: float64
Sort a series of strings
>>> s = ks.Series(['z', 'b', 'd', 'a', 'c'])
>>> s
0 z
1 b
2 d
3 a
4 c
Name: 0, dtype: object
>>> s.sort_values()
3 a
1 b
4 c
2 d
0 z
Name: 0, dtype: object
"""
ks_ = _col(self.to_dataframe().sort_values(by=self.name, ascending=ascending,
na_position=na_position))
if inplace:
self._kdf = ks_.to_dataframe()
self._scol = ks_._scol
self._index_map = ks_._index_map
return None
else:
return ks_
def sort_index(self, axis: int = 0, level: int = None, ascending: bool = True,
inplace: bool = False, kind: str = None, na_position: str = 'last') \
-> Optional['Series']:
"""
Sort object by labels (along an axis)
Parameters
----------
axis : index, columns to direct sorting. Currently, only axis = 0 is supported.
level : int or level name or list of ints or list of level names
if not None, sort on values in specified index level(s)
ascending : boolean, default True
Sort ascending vs. descending
inplace : bool, default False
if True, perform operation in-place
kind : str, default None
Koalas does not allow specifying the sorting algorithm at the moment, default None
na_position : {‘first’, ‘last’}, default ‘last’
first puts NaNs at the beginning, last puts NaNs at the end. Not implemented for
MultiIndex.
Returns
-------
sorted_obj : Series
Examples
--------
>>> df = ks.Series([2, 1, np.nan], index=['b', 'a', np.nan])
>>> df.sort_index()
a 1.0
b 2.0
NaN NaN
Name: 0, dtype: float64
>>> df.sort_index(ascending=False)
b 2.0
a 1.0
NaN NaN
Name: 0, dtype: float64
>>> df.sort_index(na_position='first')
NaN NaN
a 1.0
b 2.0
Name: 0, dtype: float64
>>> df.sort_index(inplace=True)
>>> df
a 1.0
b 2.0
NaN NaN
Name: 0, dtype: float64
>>> ks.Series(range(4), index=[['b', 'b', 'a', 'a'], [1, 0, 1, 0]], name='0').sort_index()
a 0 3
1 2
b 0 1
1 0
Name: 0, dtype: int64
"""
if axis != 0:
raise ValueError("No other axes than 0 are supported at the moment")
if level is not None:
raise ValueError("The 'axis' argument is not supported at the moment")
if kind is not None:
raise ValueError("Specifying the sorting algorithm is supported at the moment.")
ks_ = _col(self.to_dataframe().sort_values(by=self._metadata.index_columns,
ascending=ascending, na_position=na_position))
if inplace:
self._kdf = ks_.to_dataframe()
self._scol = ks_._scol
self._index_map = ks_._index_map
return None
else:
return ks_
def add_prefix(self, prefix):
"""
Prefix labels with string `prefix`.
For Series, the row labels are prefixed.
For DataFrame, the column labels are prefixed.
Parameters
----------
prefix : str
The string to add before each label.
Returns
-------
Series
New Series with updated labels.
See Also
--------
Series.add_suffix: Suffix column labels with string `suffix`.
DataFrame.add_suffix: Suffix column labels with string `suffix`.
DataFrame.add_prefix: Prefix column labels with string `prefix`.
Examples
--------
>>> s = ks.Series([1, 2, 3, 4])
>>> s
0 1
1 2
2 3
3 4
Name: 0, dtype: int64
>>> s.add_prefix('item_')
item_0 1
item_1 2
item_2 3
item_3 4
Name: 0, dtype: int64
"""
assert isinstance(prefix, str)
kdf = self.to_dataframe()
metadata = kdf._metadata
sdf = kdf._sdf
kdf._sdf = sdf.select([F.concat(F.lit(prefix), sdf[index_column]).alias(index_column)
for index_column in metadata.index_columns] + metadata.data_columns)
return Series(self._scol, anchor=kdf, index=self._index_map)
def add_suffix(self, suffix):
"""
Suffix labels with string suffix.
For Series, the row labels are suffixed.
For DataFrame, the column labels are suffixed.
Parameters
----------
suffix : str
The string to add after each label.
Returns
-------
Series
New Series with updated labels.
See Also
--------
Series.add_prefix: Prefix row labels with string `prefix`.
DataFrame.add_prefix: Prefix column labels with string `prefix`.
DataFrame.add_suffix: Suffix column labels with string `suffix`.
Examples
--------
>>> s = ks.Series([1, 2, 3, 4])
>>> s
0 1
1 2
2 3
3 4
Name: 0, dtype: int64
>>> s.add_suffix('_item')
0_item 1
1_item 2
2_item 3
3_item 4
Name: 0, dtype: int64
"""
assert isinstance(suffix, str)
kdf = self.to_dataframe()
metadata = kdf._metadata
sdf = kdf._sdf
kdf._sdf = sdf.select([F.concat(sdf[index_column], F.lit(suffix)).alias(index_column)
for index_column in metadata.index_columns] + metadata.data_columns)
return Series(self._scol, anchor=kdf, index=self._index_map)
def corr(self, other, method='pearson'):
"""
Compute correlation with `other` Series, excluding missing values.
Parameters
----------
other : Series
method : {'pearson', 'spearman'}
* pearson : standard correlation coefficient
* spearman : Spearman rank correlation
Returns
-------
correlation : float
Examples
--------
>>> df = ks.DataFrame({'s1': [.2, .0, .6, .2],
... 's2': [.3, .6, .0, .1]})
>>> s1 = df.s1
>>> s2 = df.s2
>>> s1.corr(s2, method='pearson') # doctest: +ELLIPSIS
-0.851064...
>>> s1.corr(s2, method='spearman') # doctest: +ELLIPSIS
-0.948683...
Notes
-----
There are behavior differences between Koalas and pandas.
* the `method` argument only accepts 'pearson', 'spearman'
* the data should not contain NaNs. Koalas will return an error.
* Koalas doesn't support the following argument(s).
* `min_periods` argument is not supported
"""
# This implementation is suboptimal because it computes more than necessary,
# but it should be a start
df = self._kdf.assign(corr_arg1=self, corr_arg2=other)[["corr_arg1", "corr_arg2"]]
c = df.corr(method=method)
return c.loc["corr_arg1", "corr_arg2"]
def nsmallest(self, n: int = 5) -> 'Series':
"""
Return the smallest `n` elements.
Parameters
----------
n : int, default 5
Return this many ascending sorted values.
Returns
-------
Series
The `n` smallest values in the Series, sorted in increasing order.
See Also
--------
Series.nlargest: Get the `n` largest elements.
Series.sort_values: Sort Series by values.
Series.head: Return the first `n` rows.
Notes
-----
Faster than ``.sort_values().head(n)`` for small `n` relative to
the size of the ``Series`` object.
In Koalas, thanks to Spark's lazy execution and query optimizer,
the two would have same performance.
Examples
--------
>>> data = [1, 2, 3, 4, np.nan ,6, 7, 8]
>>> s = ks.Series(data)
>>> s
0 1.0
1 2.0
2 3.0
3 4.0
4 NaN
5 6.0
6 7.0
7 8.0
Name: 0, dtype: float64
The `n` largest elements where ``n=5`` by default.
>>> s.nsmallest()
0 1.0
1 2.0
2 3.0
3 4.0
5 6.0
Name: 0, dtype: float64
>>> s.nsmallest(3)
0 1.0
1 2.0
2 3.0
Name: 0, dtype: float64
"""
return _col(self._kdf.nsmallest(n=n, columns=self.name))
def nlargest(self, n: int = 5) -> 'Series':
"""
Return the largest `n` elements.
Parameters
----------
n : int, default 5
Returns
-------
Series
The `n` largest values in the Series, sorted in decreasing order.
See Also
--------
Series.nsmallest: Get the `n` smallest elements.
Series.sort_values: Sort Series by values.
Series.head: Return the first `n` rows.
Notes
-----
Faster than ``.sort_values(ascending=False).head(n)`` for small `n`
relative to the size of the ``Series`` object.
In Koalas, thanks to Spark's lazy execution and query optimizer,
the two would have same performance.
Examples
--------
>>> data = [1, 2, 3, 4, np.nan ,6, 7, 8]
>>> s = ks.Series(data)
>>> s
0 1.0
1 2.0
2 3.0
3 4.0
4 NaN
5 6.0
6 7.0
7 8.0
Name: 0, dtype: float64
The `n` largest elements where ``n=5`` by default.
>>> s.nlargest()
7 8.0
6 7.0
5 6.0
3 4.0
2 3.0
Name: 0, dtype: float64
>>> s.nlargest(n=3)
7 8.0
6 7.0
5 6.0
Name: 0, dtype: float64
"""
return _col(self._kdf.nlargest(n=n, columns=self.name))
def count(self):
"""
Return number of non-NA/null observations in the Series.
Returns
-------
nobs : int
Examples
--------
Constructing DataFrame from a dictionary:
>>> df = ks.DataFrame({"Person":
... ["John", "Myla", "Lewis", "John", "Myla"],
... "Age": [24., np.nan, 21., 33, 26]})
Notice the uncounted NA values:
>>> df['Person'].count()
5
>>> df['Age'].count()
4
"""
return self._reduce_for_stat_function(_Frame._count_expr)
def sample(self, n: Optional[int] = None, frac: Optional[float] = None, replace: bool = False,
random_state: Optional[int] = None) -> 'Series':
return _col(self.to_dataframe().sample(
n=n, frac=frac, replace=replace, random_state=random_state))
sample.__doc__ = DataFrame.sample.__doc__
def apply(self, func, args=(), **kwds):
"""
Invoke function on values of Series.
Can be a Python function that only works on the Series.
.. note:: unlike pandas, it is required for `func` to specify return type hint.
Parameters
----------
func : function
Python function to apply. Note that type hint for return type is required.
args : tuple
Positional arguments passed to func after the series value.
**kwds
Additional keyword arguments passed to func.
Returns
-------
Series
Examples
--------
Create a Series with typical summer temperatures for each city.
>>> s = ks.Series([20, 21, 12],
... index=['London', 'New York', 'Helsinki'])
>>> s
London 20
New York 21
Helsinki 12
Name: 0, dtype: int64
Square the values by defining a function and passing it as an
argument to ``apply()``.
>>> def square(x) -> np.int64:
... return x ** 2
>>> s.apply(square)
London 400
New York 441
Helsinki 144
Name: 0, dtype: int64
Define a custom function that needs additional positional
arguments and pass these additional arguments using the
``args`` keyword
>>> def subtract_custom_value(x, custom_value) -> np.int64:
... return x - custom_value
>>> s.apply(subtract_custom_value, args=(5,))
London 15
New York 16
Helsinki 7
Name: 0, dtype: int64
Define a custom function that takes keyword arguments
and pass these arguments to ``apply``
>>> def add_custom_values(x, **kwargs) -> np.int64:
... for month in kwargs:
... x += kwargs[month]
... return x
>>> s.apply(add_custom_values, june=30, july=20, august=25)
London 95
New York 96
Helsinki 87
Name: 0, dtype: int64
Use a function from the Numpy library
>>> def numpy_log(col) -> np.float64:
... return np.log(col)
>>> s.apply(numpy_log)
London 2.995732
New York 3.044522
Helsinki 2.484907
Name: 0, dtype: float64
"""
assert callable(func), "the first argument should be a callable function."
spec = inspect.getfullargspec(func)
return_sig = spec.annotations.get("return", None)
if return_sig is None:
raise ValueError("Given function must have return type hint; however, not found.")
apply_each = wraps(func)(lambda s, *a, **k: s.apply(func, args=a, **k))
wrapped = ks.pandas_wraps(return_col=return_sig)(apply_each)
return wrapped(self, *args, **kwds).rename(self.name)
def describe(self, percentiles: Optional[List[float]] = None) -> 'Series':
return _col(self.to_dataframe().describe(percentiles))
describe.__doc__ = DataFrame.describe.__doc__
# ----------------------------------------------------------------------
# Accessor Methods
# ----------------------------------------------------------------------
dt = CachedAccessor("dt", DatetimeMethods)
# ----------------------------------------------------------------------
def _reduce_for_stat_function(self, sfun):
from inspect import signature
num_args = len(signature(sfun).parameters)
col_sdf = self._scol
col_type = self.schema[self.name].dataType
if isinstance(col_type, BooleanType) and sfun.__name__ not in ('min', 'max'):
# Stat functions cannot be used with boolean values by default
# Thus, cast to integer (true to 1 and false to 0)
# Exclude the min and max methods though since those work with booleans
col_sdf = col_sdf.cast('integer')
if num_args == 1:
# Only pass in the column if sfun accepts only one arg
col_sdf = sfun(col_sdf)
else: # must be 2
assert num_args == 2
# Pass in both the column and its data type if sfun accepts two args
col_sdf = sfun(col_sdf, col_type)
return _unpack_scalar(self._kdf._sdf.select(col_sdf))
def __len__(self):
return len(self.to_dataframe())
def __getitem__(self, key):
return Series(self._scol.__getitem__(key), anchor=self._kdf, index=self._index_map)
def __getattr__(self, item: str) -> Any:
if item.startswith("__") or item.startswith("_pandas_") or item.startswith("_spark_"):
raise AttributeError(item)
if hasattr(_MissingPandasLikeSeries, item):
property_or_func = getattr(_MissingPandasLikeSeries, item)
if isinstance(property_or_func, property):
return property_or_func.fget(self) # type: ignore
else:
return partial(property_or_func, self)
return self.getField(item)
def __str__(self):
return self._pandas_orig_repr()
def __repr__(self):
pser = self.head(max_display_count + 1).to_pandas()
pser_length = len(pser)
repr_string = repr(pser.iloc[:max_display_count])
if pser_length > max_display_count:
rest, prev_footer = repr_string.rsplit("\n", 1)
match = REPR_PATTERN.search(prev_footer)
if match is not None:
length = match.group("length")
footer = ("\n{prev_footer}\nShowing only the first {length}"
.format(length=length, prev_footer=prev_footer))
return rest + footer
return repr_string
def __dir__(self):
if not isinstance(self.schema, StructType):
fields = []
else:
fields = [f for f in self.schema.fieldNames() if ' ' not in f]
return super(Series, self).__dir__() + fields
def _pandas_orig_repr(self):
# TODO: figure out how to reuse the original one.
return 'Column<%s>' % self._scol._jc.toString().encode('utf8')
def _unpack_scalar(sdf):
"""
Takes a dataframe that is supposed to contain a single row with a single scalar value,
and returns this value.
"""
l = sdf.head(2)
assert len(l) == 1, (sdf, l)
row = l[0]
l2 = list(row.asDict().values())
assert len(l2) == 1, (row, l2)
return l2[0]
def _col(df):
assert isinstance(df, (DataFrame, pd.DataFrame)), type(df)
return df[df.columns[0]]
| 1 | 9,872 | You're using the Sphinx docstring style here (maybe because it's the default in PyCharm?). Instead, you should use the NumPy style (that you've also used in other places) to be more consistent with the rest of the project. | databricks-koalas | py |
@@ -142,8 +142,16 @@ namespace Microsoft.AspNetCore.Server.Kestrel
}
else if (!hasListenOptions && !hasServerAddresses)
{
- _logger.LogDebug($"No listening endpoints were configured. Binding to {Constants.DefaultIPEndPoint} by default.");
- listenOptions.Add(new ListenOptions(Constants.DefaultIPEndPoint));
+ _logger.LogDebug($"No listening endpoints were configured. Binding to {Constants.DefaultServerAddress} by default.");
+
+ // "localhost" for both IPv4 and IPv6 can't be represented as an IPEndPoint.
+ StartLocalhost(engine, ServerAddress.FromUrl(Constants.DefaultServerAddress));
+
+ // If StartLocalhost doesn't throw, there is at least one listener.
+ // The port cannot change for "localhost".
+ _serverAddresses.Addresses.Add(Constants.DefaultServerAddress);
+
+ return;
}
else if (!hasListenOptions)
{ | 1 | // Copyright (c) .NET Foundation. All rights reserved.
// Licensed under the Apache License, Version 2.0. See License.txt in the project root for license information.
using System;
using System.Collections.Generic;
using System.IO;
using System.Linq;
using System.Net;
using System.Reflection;
using Microsoft.AspNetCore.Hosting;
using Microsoft.AspNetCore.Hosting.Server;
using Microsoft.AspNetCore.Hosting.Server.Features;
using Microsoft.AspNetCore.Http.Features;
using Microsoft.AspNetCore.Server.Kestrel.Internal;
using Microsoft.AspNetCore.Server.Kestrel.Internal.Http;
using Microsoft.AspNetCore.Server.Kestrel.Internal.Infrastructure;
using Microsoft.AspNetCore.Server.Kestrel.Internal.Networking;
using Microsoft.Extensions.Logging;
using Microsoft.Extensions.Options;
namespace Microsoft.AspNetCore.Server.Kestrel
{
public class KestrelServer : IServer
{
private Stack<IDisposable> _disposables;
private readonly IApplicationLifetime _applicationLifetime;
private readonly ILogger _logger;
private readonly IServerAddressesFeature _serverAddresses;
public KestrelServer(IOptions<KestrelServerOptions> options, IApplicationLifetime applicationLifetime, ILoggerFactory loggerFactory)
{
if (options == null)
{
throw new ArgumentNullException(nameof(options));
}
if (applicationLifetime == null)
{
throw new ArgumentNullException(nameof(applicationLifetime));
}
if (loggerFactory == null)
{
throw new ArgumentNullException(nameof(loggerFactory));
}
Options = options.Value ?? new KestrelServerOptions();
InternalOptions = new InternalKestrelServerOptions();
_applicationLifetime = applicationLifetime;
_logger = loggerFactory.CreateLogger(typeof(KestrelServer).GetTypeInfo().Namespace);
Features = new FeatureCollection();
_serverAddresses = new ServerAddressesFeature();
Features.Set<IServerAddressesFeature>(_serverAddresses);
Features.Set(InternalOptions);
}
public IFeatureCollection Features { get; }
public KestrelServerOptions Options { get; }
private InternalKestrelServerOptions InternalOptions { get; }
public void Start<TContext>(IHttpApplication<TContext> application)
{
try
{
if (!BitConverter.IsLittleEndian)
{
throw new PlatformNotSupportedException("Kestrel does not support big-endian architectures.");
}
ValidateOptions();
if (_disposables != null)
{
// The server has already started and/or has not been cleaned up yet
throw new InvalidOperationException("Server has already started.");
}
_disposables = new Stack<IDisposable>();
var dateHeaderValueManager = new DateHeaderValueManager();
var trace = new KestrelTrace(_logger);
IThreadPool threadPool;
if (InternalOptions.ThreadPoolDispatching)
{
threadPool = new LoggingThreadPool(trace);
}
else
{
threadPool = new InlineLoggingThreadPool(trace);
}
var engine = new KestrelEngine(new ServiceContext
{
FrameFactory = context =>
{
return new Frame<TContext>(application, context);
},
AppLifetime = _applicationLifetime,
Log = trace,
HttpParserFactory = frame => new KestrelHttpParser(frame.ConnectionContext.ListenerContext.ServiceContext.Log),
ThreadPool = threadPool,
DateHeaderValueManager = dateHeaderValueManager,
ServerOptions = Options
});
_disposables.Push(engine);
_disposables.Push(dateHeaderValueManager);
var threadCount = Options.ThreadCount;
if (threadCount <= 0)
{
throw new ArgumentOutOfRangeException(nameof(threadCount),
threadCount,
"ThreadCount must be positive.");
}
if (!Constants.ECONNRESET.HasValue)
{
_logger.LogWarning("Unable to determine ECONNRESET value on this platform.");
}
if (!Constants.EADDRINUSE.HasValue)
{
_logger.LogWarning("Unable to determine EADDRINUSE value on this platform.");
}
engine.Start(threadCount);
var listenOptions = Options.ListenOptions;
var hasListenOptions = listenOptions.Any();
var hasServerAddresses = _serverAddresses.Addresses.Any();
if (hasListenOptions && hasServerAddresses)
{
var joined = string.Join(", ", _serverAddresses.Addresses);
_logger.LogWarning($"Overriding address(es) '{joined}'. Binding to endpoints defined in {nameof(WebHostBuilderKestrelExtensions.UseKestrel)}() instead.");
_serverAddresses.Addresses.Clear();
}
else if (!hasListenOptions && !hasServerAddresses)
{
_logger.LogDebug($"No listening endpoints were configured. Binding to {Constants.DefaultIPEndPoint} by default.");
listenOptions.Add(new ListenOptions(Constants.DefaultIPEndPoint));
}
else if (!hasListenOptions)
{
// If no endpoints are configured directly using KestrelServerOptions, use those configured via the IServerAddressesFeature.
var copiedAddresses = _serverAddresses.Addresses.ToArray();
_serverAddresses.Addresses.Clear();
foreach (var address in copiedAddresses)
{
var parsedAddress = ServerAddress.FromUrl(address);
if (parsedAddress.IsUnixPipe)
{
listenOptions.Add(new ListenOptions(parsedAddress.UnixPipePath)
{
Scheme = parsedAddress.Scheme,
PathBase = parsedAddress.PathBase
});
}
else
{
if (string.Equals(parsedAddress.Host, "localhost", StringComparison.OrdinalIgnoreCase))
{
// "localhost" for both IPv4 and IPv6 can't be represented as an IPEndPoint.
StartLocalhost(engine, parsedAddress);
// If StartLocalhost doesn't throw, there is at least one listener.
// The port cannot change for "localhost".
_serverAddresses.Addresses.Add(parsedAddress.ToString());
}
else
{
// These endPoints will be added later to _serverAddresses.Addresses
listenOptions.Add(new ListenOptions(CreateIPEndPoint(parsedAddress))
{
Scheme = parsedAddress.Scheme,
PathBase = parsedAddress.PathBase
});
}
}
}
}
foreach (var endPoint in listenOptions)
{
try
{
_disposables.Push(engine.CreateServer(endPoint));
}
catch (AggregateException ex)
{
if ((ex.InnerException as UvException)?.StatusCode == Constants.EADDRINUSE)
{
throw new IOException($"Failed to bind to address {endPoint}: address already in use.", ex);
}
throw;
}
// If requested port was "0", replace with assigned dynamic port.
_serverAddresses.Addresses.Add(endPoint.ToString());
}
}
catch (Exception ex)
{
_logger.LogCritical(0, ex, "Unable to start Kestrel.");
Dispose();
throw;
}
}
public void Dispose()
{
if (_disposables != null)
{
while (_disposables.Count > 0)
{
_disposables.Pop().Dispose();
}
_disposables = null;
}
}
private void ValidateOptions()
{
if (Options.Limits.MaxRequestBufferSize.HasValue &&
Options.Limits.MaxRequestBufferSize < Options.Limits.MaxRequestLineSize)
{
throw new InvalidOperationException(
$"Maximum request buffer size ({Options.Limits.MaxRequestBufferSize.Value}) must be greater than or equal to maximum request line size ({Options.Limits.MaxRequestLineSize}).");
}
}
private void StartLocalhost(KestrelEngine engine, ServerAddress parsedAddress)
{
if (parsedAddress.Port == 0)
{
throw new InvalidOperationException("Dynamic port binding is not supported when binding to localhost. You must either bind to 127.0.0.1:0 or [::1]:0, or both.");
}
var exceptions = new List<Exception>();
try
{
var ipv4ListenOptions = new ListenOptions(new IPEndPoint(IPAddress.Loopback, parsedAddress.Port))
{
Scheme = parsedAddress.Scheme,
PathBase = parsedAddress.PathBase
};
_disposables.Push(engine.CreateServer(ipv4ListenOptions));
}
catch (AggregateException ex) when (ex.InnerException is UvException)
{
var uvEx = (UvException)ex.InnerException;
if (uvEx.StatusCode == Constants.EADDRINUSE)
{
throw new IOException($"Failed to bind to address {parsedAddress} on the IPv4 loopback interface: port already in use.", ex);
}
else
{
_logger.LogWarning(0, $"Unable to bind to {parsedAddress} on the IPv4 loopback interface: ({uvEx.Message})");
exceptions.Add(uvEx);
}
}
try
{
var ipv6ListenOptions = new ListenOptions(new IPEndPoint(IPAddress.IPv6Loopback, parsedAddress.Port))
{
Scheme = parsedAddress.Scheme,
PathBase = parsedAddress.PathBase
};
_disposables.Push(engine.CreateServer(ipv6ListenOptions));
}
catch (AggregateException ex) when (ex.InnerException is UvException)
{
var uvEx = (UvException)ex.InnerException;
if (uvEx.StatusCode == Constants.EADDRINUSE)
{
throw new IOException($"Failed to bind to address {parsedAddress} on the IPv6 loopback interface: port already in use.", ex);
}
else
{
_logger.LogWarning(0, $"Unable to bind to {parsedAddress} on the IPv6 loopback interface: ({uvEx.Message})");
exceptions.Add(uvEx);
}
}
if (exceptions.Count == 2)
{
throw new IOException($"Failed to bind to address {parsedAddress}.", new AggregateException(exceptions));
}
}
/// <summary>
/// Returns an <see cref="IPEndPoint"/> for the given host an port.
/// If the host parameter isn't "localhost" or an IP address, use IPAddress.Any.
/// </summary>
internal static IPEndPoint CreateIPEndPoint(ServerAddress address)
{
IPAddress ip;
if (!IPAddress.TryParse(address.Host, out ip))
{
ip = IPAddress.IPv6Any;
}
return new IPEndPoint(ip, address.Port);
}
}
}
| 1 | 11,837 | Don't really need this since the rest of the method will no-op but it's also safe to return early. | aspnet-KestrelHttpServer | .cs |
@@ -19,11 +19,13 @@
#include <fastdds/rtps/messages/RTPSMessageCreator.h>
#include <fastdds/rtps/messages/CDRMessage.h>
-#include <fastrtps/qos/ParameterList.h>
#include <fastdds/dds/log/Log.hpp>
+#include "../../fastdds/core/policy/ParameterList.hpp"
+
using namespace eprosima::fastrtps;
+using ParameterList = eprosima::fastdds::dds::ParameterList;
namespace eprosima {
namespace fastrtps { | 1 | // Copyright 2016 Proyectos y Sistemas de Mantenimiento SL (eProsima).
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
/*
* @file CDRMessageCreator.cpp
*
*/
#include <fastdds/rtps/messages/RTPSMessageCreator.h>
#include <fastdds/rtps/messages/CDRMessage.h>
#include <fastrtps/qos/ParameterList.h>
#include <fastdds/dds/log/Log.hpp>
using namespace eprosima::fastrtps;
namespace eprosima {
namespace fastrtps {
namespace rtps {
bool RTPSMessageCreator::addHeader(CDRMessage_t*msg, const GuidPrefix_t& guidPrefix,
const ProtocolVersion_t& version,const VendorId_t& vendorId)
{
CDRMessage::addOctet(msg,'R');
CDRMessage::addOctet(msg,'T');
CDRMessage::addOctet(msg,'P');
CDRMessage::addOctet(msg,'S');
CDRMessage::addOctet(msg,version.m_major);
CDRMessage::addOctet(msg,version.m_minor);
CDRMessage::addOctet(msg,vendorId[0]);
CDRMessage::addOctet(msg,vendorId[1]);
CDRMessage::addData(msg,guidPrefix.value, 12);
msg->length = msg->pos;
return true;
}
bool RTPSMessageCreator::addHeader(CDRMessage_t*msg, const GuidPrefix_t& guidPrefix)
{
return RTPSMessageCreator::addHeader(msg,guidPrefix, c_ProtocolVersion,c_VendorId_eProsima);
}
bool RTPSMessageCreator::addCustomContent(CDRMessage_t*msg, const octet* content, const size_t contentSize)
{
CDRMessage::addData(msg, content, static_cast<uint32_t>(contentSize));
msg->length = msg->pos;
return true;
}
bool RTPSMessageCreator::addSubmessageHeader(
CDRMessage_t* msg,
octet id,
octet flags,
uint16_t size)
{
CDRMessage::addOctet(msg,id);
CDRMessage::addOctet(msg,flags);
CDRMessage::addUInt16(msg, size);
msg->length = msg->pos;
return true;
}
bool RTPSMessageCreator::addSubmessageInfoTS(
CDRMessage_t* msg,
const Time_t &time,
bool invalidateFlag)
{
octet flags = 0x0;
uint16_t size = 8;
#if __BIG_ENDIAN__
msg->msg_endian = BIGEND;
#else
flags = flags | BIT(0);
msg->msg_endian = LITTLEEND;
#endif
if(invalidateFlag)
{
flags = flags | BIT(1);
size = 0;
}
CDRMessage::addOctet(msg,INFO_TS);
CDRMessage::addOctet(msg,flags);
CDRMessage::addUInt16(msg, size);
if(!invalidateFlag)
{
CDRMessage::addInt32(msg, time.seconds());
CDRMessage::addUInt32(msg, time.fraction());
}
return true;
}
bool RTPSMessageCreator::addSubmessageInfoSRC(CDRMessage_t* msg, const ProtocolVersion_t& version,
const VendorId_t& vendorId, const GuidPrefix_t& guidPrefix)
{
octet flags = 0x0;
uint16_t size = 20;
#if __BIG_ENDIAN__
msg->msg_endian = BIGEND;
#else
flags = flags | BIT(0);
msg->msg_endian = LITTLEEND;
#endif
CDRMessage::addOctet(msg, INFO_SRC);
CDRMessage::addOctet(msg, flags);
CDRMessage::addUInt16(msg, size);
CDRMessage::addUInt32(msg, 0);
CDRMessage::addOctet(msg, version.m_major);
CDRMessage::addOctet(msg, version.m_minor);
CDRMessage::addOctet(msg, vendorId[0]);
CDRMessage::addOctet(msg, vendorId[1]);
CDRMessage::addData(msg, guidPrefix.value, 12);
return true;
}
bool RTPSMessageCreator::addSubmessageInfoDST(CDRMessage_t* msg, const GuidPrefix_t& guidP)
{
octet flags = 0x0;
uint16_t size = 12;
#if __BIG_ENDIAN__
msg->msg_endian = BIGEND;
#else
flags = flags | BIT(0);
msg->msg_endian = LITTLEEND;
#endif
CDRMessage::addOctet(msg,INFO_DST);
CDRMessage::addOctet(msg,flags);
CDRMessage::addUInt16(msg, size);
CDRMessage::addData(msg,guidP.value,12);
return true;
}
bool RTPSMessageCreator::addSubmessageInfoTS_Now(CDRMessage_t* msg,bool invalidateFlag)
{
Time_t time_now;
Time_t::now(time_now);
return RTPSMessageCreator::addSubmessageInfoTS(msg,time_now,invalidateFlag);
}
}
} /* namespace rtps */
} /* namespace eprosima */
#include <rtps/messages/submessages/DataMsg.hpp>
#include <rtps/messages/submessages/HeartbeatMsg.hpp>
#include <rtps/messages/submessages/AckNackMsg.hpp>
#include <rtps/messages/submessages/GapMsg.hpp>
| 1 | 18,475 | Don't use relative paths. src directory is already on the include search path | eProsima-Fast-DDS | cpp |
@@ -0,0 +1,5 @@
+// Licensed to the .NET Foundation under one or more agreements.
+// The .NET Foundation licenses this file to you under the MIT license.
+// See the LICENSE file in the project root for more information.
+
+// [assembly: BenchmarkDotNet.Attributes.AspNetCoreBenchmark] | 1 | 1 | 11,315 | this file can be removed | dotnet-performance | .cs |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.