unique_id
int64
13
189k
target
int64
0
1
code
stringlengths
20
241k
__index_level_0__
int64
0
18.9k
172,196
0
bool socket_listen(const socket_t *socket, port_t port) { assert(socket != NULL); struct sockaddr_in addr; addr.sin_family = AF_INET; addr.sin_addr.s_addr = htonl(LOCALHOST_); addr.sin_port = htons(port); if (bind(socket->fd, (struct sockaddr *)&addr, sizeof(addr)) == -1) { LOG_ERROR("%s unable to bind socket to port %u: %s", __func__, port, strerror(errno)); return false; } if (listen(socket->fd, 10) == -1) { LOG_ERROR("%s unable to listen on port %u: %s", __func__, port, strerror(errno)); return false; } return true; }
14,600
59,712
0
R_API int r_bin_dwarf_parse_info_raw(Sdb *s, RBinDwarfDebugAbbrev *da, const ut8 *obuf, size_t len, const ut8 *debug_str, size_t debug_str_len, int mode) { const ut8 *buf = obuf, *buf_end = obuf + len; size_t k, offset = 0; int curr_unit = 0; RBinDwarfDebugInfo di = {0}; RBinDwarfDebugInfo *inf = &di; bool ret = true; if (!da || !s || !obuf) { return false; } if (r_bin_dwarf_init_debug_info (inf) < 0) { ret = false; goto out; } while (buf < buf_end) { if (inf->length >= inf->capacity) break; if (r_bin_dwarf_init_comp_unit (&inf->comp_units[curr_unit]) < 0) { ret = false; curr_unit--; goto out_debug_info; } inf->comp_units[curr_unit].offset = buf - obuf; inf->comp_units[curr_unit].hdr.pointer_size = 0; inf->comp_units[curr_unit].hdr.abbrev_offset = 0; inf->comp_units[curr_unit].hdr.length = READ (buf, ut32); inf->comp_units[curr_unit].hdr.version = READ (buf, ut16); if (inf->comp_units[curr_unit].hdr.version != 2) { ret = false; goto out_debug_info; } if (inf->comp_units[curr_unit].hdr.length > len) { ret = false; goto out_debug_info; } inf->comp_units[curr_unit].hdr.abbrev_offset = READ (buf, ut32); inf->comp_units[curr_unit].hdr.pointer_size = READ (buf, ut8); inf->length++; /* Linear search FIXME */ if (da->decls->length >= da->capacity) { eprintf ("WARNING: malformed dwarf have not enough buckets for decls.\n"); } const int k_max = R_MIN (da->capacity, da->decls->length); for (k = 0; k < k_max; k++) { if (da->decls[k].offset == inf->comp_units[curr_unit].hdr.abbrev_offset) { offset = k; break; } } buf = r_bin_dwarf_parse_comp_unit (s, buf, &inf->comp_units[curr_unit], da, offset, debug_str, debug_str_len); if (!buf) { ret = false; goto out_debug_info; } curr_unit++; } if (mode == R_CORE_BIN_PRINT) { r_bin_dwarf_dump_debug_info (NULL, inf); } out_debug_info: for (; curr_unit >= 0; curr_unit--) { r_bin_dwarf_free_comp_unit (&inf->comp_units[curr_unit]); } r_bin_dwarf_free_debug_info (inf); out: return ret; }
14,601
120,948
0
int SocketStream::DoSecureProxyConnect() { DCHECK(factory_); SSLClientSocketContext ssl_context; ssl_context.cert_verifier = context_->cert_verifier(); ssl_context.transport_security_state = context_->transport_security_state(); ssl_context.server_bound_cert_service = context_->server_bound_cert_service(); socket_.reset(factory_->CreateSSLClientSocket( socket_.release(), proxy_info_.proxy_server().host_port_pair(), proxy_ssl_config_, ssl_context)); next_state_ = STATE_SECURE_PROXY_CONNECT_COMPLETE; metrics_->OnCountConnectionType(SocketStreamMetrics::SECURE_PROXY_CONNECTION); return socket_->Connect(io_callback_); }
14,602
99,373
0
void GetPrinterHelper(HANDLE printer, int level, scoped_array<uint8>* buffer) { DWORD buf_size = 0; GetPrinter(printer, level, NULL, 0, &buf_size); if (buf_size) { buffer->reset(new uint8[buf_size]); memset(buffer->get(), 0, buf_size); if (!GetPrinter(printer, level, buffer->get(), buf_size, &buf_size)) { buffer->reset(); } } }
14,603
46,213
0
struct super_block *user_get_super(dev_t dev) { struct super_block *sb; spin_lock(&sb_lock); rescan: list_for_each_entry(sb, &super_blocks, s_list) { if (hlist_unhashed(&sb->s_instances)) continue; if (sb->s_dev == dev) { sb->s_count++; spin_unlock(&sb_lock); down_read(&sb->s_umount); /* still alive? */ if (sb->s_root && (sb->s_flags & MS_BORN)) return sb; up_read(&sb->s_umount); /* nope, got unmounted */ spin_lock(&sb_lock); __put_super(sb); goto rescan; } } spin_unlock(&sb_lock); return NULL; }
14,604
26,642
0
perf_event_nmi_handler(struct notifier_block *self, unsigned long cmd, void *__args) { struct die_args *args = __args; unsigned int this_nmi; int handled; if (!atomic_read(&active_events)) return NOTIFY_DONE; switch (cmd) { case DIE_NMI: break; case DIE_NMIUNKNOWN: this_nmi = percpu_read(irq_stat.__nmi_count); if (this_nmi != __this_cpu_read(pmu_nmi.marked)) /* let the kernel handle the unknown nmi */ return NOTIFY_DONE; /* * This one is a PMU back-to-back nmi. Two events * trigger 'simultaneously' raising two back-to-back * NMIs. If the first NMI handles both, the latter * will be empty and daze the CPU. So, we drop it to * avoid false-positive 'unknown nmi' messages. */ return NOTIFY_STOP; default: return NOTIFY_DONE; } apic_write(APIC_LVTPC, APIC_DM_NMI); handled = x86_pmu.handle_irq(args->regs); if (!handled) return NOTIFY_DONE; this_nmi = percpu_read(irq_stat.__nmi_count); if ((handled > 1) || /* the next nmi could be a back-to-back nmi */ ((__this_cpu_read(pmu_nmi.marked) == this_nmi) && (__this_cpu_read(pmu_nmi.handled) > 1))) { /* * We could have two subsequent back-to-back nmis: The * first handles more than one counter, the 2nd * handles only one counter and the 3rd handles no * counter. * * This is the 2nd nmi because the previous was * handling more than one counter. We will mark the * next (3rd) and then drop it if unhandled. */ __this_cpu_write(pmu_nmi.marked, this_nmi + 1); __this_cpu_write(pmu_nmi.handled, handled); } return NOTIFY_STOP; }
14,605
34,415
0
static long btrfs_ioctl_ino_to_path(struct btrfs_root *root, void __user *arg) { int ret = 0; int i; u64 rel_ptr; int size; struct btrfs_ioctl_ino_path_args *ipa = NULL; struct inode_fs_paths *ipath = NULL; struct btrfs_path *path; if (!capable(CAP_SYS_ADMIN)) return -EPERM; path = btrfs_alloc_path(); if (!path) { ret = -ENOMEM; goto out; } ipa = memdup_user(arg, sizeof(*ipa)); if (IS_ERR(ipa)) { ret = PTR_ERR(ipa); ipa = NULL; goto out; } size = min_t(u32, ipa->size, 4096); ipath = init_ipath(size, root, path); if (IS_ERR(ipath)) { ret = PTR_ERR(ipath); ipath = NULL; goto out; } ret = paths_from_inode(ipa->inum, ipath); if (ret < 0) goto out; for (i = 0; i < ipath->fspath->elem_cnt; ++i) { rel_ptr = ipath->fspath->val[i] - (u64)(unsigned long)ipath->fspath->val; ipath->fspath->val[i] = rel_ptr; } ret = copy_to_user((void *)(unsigned long)ipa->fspath, (void *)(unsigned long)ipath->fspath, size); if (ret) { ret = -EFAULT; goto out; } out: btrfs_free_path(path); free_ipath(ipath); kfree(ipa); return ret; }
14,606
15,247
0
PHP_FUNCTION(linkinfo) { char *link; char *dirname; int link_len, dir_len; struct stat sb; int ret; if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "p", &link, &link_len) == FAILURE) { return; } dirname = estrndup(link, link_len); dir_len = php_dirname(dirname, link_len); if (php_check_open_basedir(dirname TSRMLS_CC)) { efree(dirname); RETURN_FALSE; } ret = VCWD_LSTAT(link, &sb); if (ret == -1) { php_error_docref(NULL TSRMLS_CC, E_WARNING, "%s", strerror(errno)); efree(dirname); RETURN_LONG(-1L); } efree(dirname); RETURN_LONG((long) sb.st_dev); }
14,607
163,914
0
void ImageCapture::UpdateMediaTrackCapabilities( media::mojom::blink::PhotoStatePtr photo_state) { if (!photo_state) return; WTF::Vector<WTF::String> supported_white_balance_modes; supported_white_balance_modes.ReserveInitialCapacity( photo_state->supported_white_balance_modes.size()); for (const auto& supported_mode : photo_state->supported_white_balance_modes) supported_white_balance_modes.push_back(ToString(supported_mode)); if (!supported_white_balance_modes.IsEmpty()) { capabilities_.setWhiteBalanceMode(std::move(supported_white_balance_modes)); settings_.setWhiteBalanceMode( ToString(photo_state->current_white_balance_mode)); } WTF::Vector<WTF::String> supported_exposure_modes; supported_exposure_modes.ReserveInitialCapacity( photo_state->supported_exposure_modes.size()); for (const auto& supported_mode : photo_state->supported_exposure_modes) supported_exposure_modes.push_back(ToString(supported_mode)); if (!supported_exposure_modes.IsEmpty()) { capabilities_.setExposureMode(std::move(supported_exposure_modes)); settings_.setExposureMode(ToString(photo_state->current_exposure_mode)); } WTF::Vector<WTF::String> supported_focus_modes; supported_focus_modes.ReserveInitialCapacity( photo_state->supported_focus_modes.size()); for (const auto& supported_mode : photo_state->supported_focus_modes) supported_focus_modes.push_back(ToString(supported_mode)); if (!supported_focus_modes.IsEmpty()) { capabilities_.setFocusMode(std::move(supported_focus_modes)); settings_.setFocusMode(ToString(photo_state->current_focus_mode)); } HeapVector<Point2D> current_points_of_interest; if (!photo_state->points_of_interest.IsEmpty()) { for (const auto& point : photo_state->points_of_interest) { Point2D web_point; web_point.setX(point->x); web_point.setY(point->y); current_points_of_interest.push_back(mojo::Clone(web_point)); } } settings_.setPointsOfInterest(std::move(current_points_of_interest)); if (photo_state->exposure_compensation->max != photo_state->exposure_compensation->min) { capabilities_.setExposureCompensation( MediaSettingsRange::Create(*photo_state->exposure_compensation)); settings_.setExposureCompensation( photo_state->exposure_compensation->current); } if (photo_state->color_temperature->max != photo_state->color_temperature->min) { capabilities_.setColorTemperature( MediaSettingsRange::Create(*photo_state->color_temperature)); settings_.setColorTemperature(photo_state->color_temperature->current); } if (photo_state->iso->max != photo_state->iso->min) { capabilities_.setIso(MediaSettingsRange::Create(*photo_state->iso)); settings_.setIso(photo_state->iso->current); } if (photo_state->brightness->max != photo_state->brightness->min) { capabilities_.setBrightness( MediaSettingsRange::Create(*photo_state->brightness)); settings_.setBrightness(photo_state->brightness->current); } if (photo_state->contrast->max != photo_state->contrast->min) { capabilities_.setContrast( MediaSettingsRange::Create(*photo_state->contrast)); settings_.setContrast(photo_state->contrast->current); } if (photo_state->saturation->max != photo_state->saturation->min) { capabilities_.setSaturation( MediaSettingsRange::Create(*photo_state->saturation)); settings_.setSaturation(photo_state->saturation->current); } if (photo_state->sharpness->max != photo_state->sharpness->min) { capabilities_.setSharpness( MediaSettingsRange::Create(*photo_state->sharpness)); settings_.setSharpness(photo_state->sharpness->current); } if (photo_state->zoom->max != photo_state->zoom->min) { capabilities_.setZoom(MediaSettingsRange::Create(*photo_state->zoom)); settings_.setZoom(photo_state->zoom->current); } if (photo_state->supports_torch) capabilities_.setTorch(photo_state->supports_torch); if (photo_state->supports_torch) settings_.setTorch(photo_state->torch); }
14,608
161,659
0
void AudioNode::disconnect(AudioParam* destination_param, ExceptionState& exception_state) { DCHECK(IsMainThread()); BaseAudioContext::GraphAutoLocker locker(context()); unsigned number_of_disconnections = 0; for (unsigned output_index = 0; output_index < Handler().NumberOfOutputs(); ++output_index) { if (DisconnectFromOutputIfConnected(output_index, *destination_param)) number_of_disconnections++; } if (number_of_disconnections == 0) { exception_state.ThrowDOMException(kInvalidAccessError, "the given AudioParam is not connected."); return; } }
14,609
185,068
1
RenderWidgetHostViewAndroid::RenderWidgetHostViewAndroid( RenderWidgetHostImpl* widget_host, ContentViewCoreImpl* content_view_core) : host_(widget_host), is_layer_attached_(true), content_view_core_(NULL), ime_adapter_android_(ALLOW_THIS_IN_INITIALIZER_LIST(this)), cached_background_color_(SK_ColorWHITE), texture_id_in_layer_(0) { if (CompositorImpl::UsesDirectGL()) { surface_texture_transport_.reset(new SurfaceTextureTransportClient()); layer_ = surface_texture_transport_->Initialize(); } else { texture_layer_ = cc::TextureLayer::create(0); layer_ = texture_layer_; } layer_->setContentsOpaque(true); layer_->setIsDrawable(true); host_->SetView(this); SetContentViewCore(content_view_core); }
14,610
171,686
0
static inline pthread_t create_thread(void *(*start_routine)(void *), void * arg){ APPL_TRACE_DEBUG("create_thread: entered"); pthread_attr_t thread_attr; pthread_attr_init(&thread_attr); pthread_attr_setdetachstate(&thread_attr, PTHREAD_CREATE_JOINABLE); pthread_t thread_id = -1; if ( pthread_create(&thread_id, &thread_attr, start_routine, arg)!=0 ) { APPL_TRACE_ERROR("pthread_create : %s", strerror(errno)); return -1; } APPL_TRACE_DEBUG("create_thread: thread created successfully"); return thread_id; }
14,611
15,238
0
static void php_pgsql_get_link_info(INTERNAL_FUNCTION_PARAMETERS, int entry_type) { zval *pgsql_link = NULL; int id = -1, argc = ZEND_NUM_ARGS(); PGconn *pgsql; char *msgbuf; if (zend_parse_parameters(argc TSRMLS_CC, "|r", &pgsql_link) == FAILURE) { return; } if (argc == 0) { id = PGG(default_link); CHECK_DEFAULT_LINK(id); } if (pgsql_link == NULL && id == -1) { RETURN_FALSE; } ZEND_FETCH_RESOURCE2(pgsql, PGconn *, &pgsql_link, id, "PostgreSQL link", le_link, le_plink); switch(entry_type) { case PHP_PG_DBNAME: Z_STRVAL_P(return_value) = PQdb(pgsql); break; case PHP_PG_ERROR_MESSAGE: RETURN_STRING(PQErrorMessageTrim(pgsql, &msgbuf), 0); return; case PHP_PG_OPTIONS: Z_STRVAL_P(return_value) = PQoptions(pgsql); break; case PHP_PG_PORT: Z_STRVAL_P(return_value) = PQport(pgsql); break; case PHP_PG_TTY: Z_STRVAL_P(return_value) = PQtty(pgsql); break; case PHP_PG_HOST: Z_STRVAL_P(return_value) = PQhost(pgsql); break; case PHP_PG_VERSION: array_init(return_value); add_assoc_string(return_value, "client", PG_VERSION, 1); #if HAVE_PQPROTOCOLVERSION add_assoc_long(return_value, "protocol", PQprotocolVersion(pgsql)); #if HAVE_PQPARAMETERSTATUS if (PQprotocolVersion(pgsql) >= 3) { add_assoc_string(return_value, "server", (char*)PQparameterStatus(pgsql, "server_version"), 1); } #endif #endif return; default: RETURN_FALSE; } if (Z_STRVAL_P(return_value)) { Z_STRLEN_P(return_value) = strlen(Z_STRVAL_P(return_value)); Z_STRVAL_P(return_value) = (char *) estrdup(Z_STRVAL_P(return_value)); } else { Z_STRLEN_P(return_value) = 0; Z_STRVAL_P(return_value) = (char *) estrdup(""); } Z_TYPE_P(return_value) = IS_STRING; }
14,612
81,493
0
tracing_read_pipe(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos) { struct trace_iterator *iter = filp->private_data; ssize_t sret; /* * Avoid more than one consumer on a single file descriptor * This is just a matter of traces coherency, the ring buffer itself * is protected. */ mutex_lock(&iter->mutex); /* return any leftover data */ sret = trace_seq_to_user(&iter->seq, ubuf, cnt); if (sret != -EBUSY) goto out; trace_seq_init(&iter->seq); if (iter->trace->read) { sret = iter->trace->read(iter, filp, ubuf, cnt, ppos); if (sret) goto out; } waitagain: sret = tracing_wait_pipe(filp); if (sret <= 0) goto out; /* stop when tracing is finished */ if (trace_empty(iter)) { sret = 0; goto out; } if (cnt >= PAGE_SIZE) cnt = PAGE_SIZE - 1; /* reset all but tr, trace, and overruns */ memset(&iter->seq, 0, sizeof(struct trace_iterator) - offsetof(struct trace_iterator, seq)); cpumask_clear(iter->started); iter->pos = -1; trace_event_read_lock(); trace_access_lock(iter->cpu_file); while (trace_find_next_entry_inc(iter) != NULL) { enum print_line_t ret; int save_len = iter->seq.seq.len; ret = print_trace_line(iter); if (ret == TRACE_TYPE_PARTIAL_LINE) { /* don't print partial lines */ iter->seq.seq.len = save_len; break; } if (ret != TRACE_TYPE_NO_CONSUME) trace_consume(iter); if (trace_seq_used(&iter->seq) >= cnt) break; /* * Setting the full flag means we reached the trace_seq buffer * size and we should leave by partial output condition above. * One of the trace_seq_* functions is not used properly. */ WARN_ONCE(iter->seq.full, "full flag set for trace type %d", iter->ent->type); } trace_access_unlock(iter->cpu_file); trace_event_read_unlock(); /* Now copy what we have to the user */ sret = trace_seq_to_user(&iter->seq, ubuf, cnt); if (iter->seq.seq.readpos >= trace_seq_used(&iter->seq)) trace_seq_init(&iter->seq); /* * If there was nothing to send to user, in spite of consuming trace * entries, go back to wait for more entries. */ if (sret == -EBUSY) goto waitagain; out: mutex_unlock(&iter->mutex); return sret; }
14,613
174,402
0
FLAC__bool read_frame_(FLAC__StreamDecoder *decoder, FLAC__bool *got_a_frame, FLAC__bool do_full_decode) { unsigned channel; unsigned i; FLAC__int32 mid, side; unsigned frame_crc; /* the one we calculate from the input stream */ FLAC__uint32 x; *got_a_frame = false; /* init the CRC */ frame_crc = 0; frame_crc = FLAC__CRC16_UPDATE(decoder->private_->header_warmup[0], frame_crc); frame_crc = FLAC__CRC16_UPDATE(decoder->private_->header_warmup[1], frame_crc); FLAC__bitreader_reset_read_crc16(decoder->private_->input, (FLAC__uint16)frame_crc); if(!read_frame_header_(decoder)) return false; if(decoder->protected_->state == FLAC__STREAM_DECODER_SEARCH_FOR_FRAME_SYNC) /* means we didn't sync on a valid header */ return true; if(!allocate_output_(decoder, decoder->private_->frame.header.blocksize, decoder->private_->frame.header.channels)) return false; for(channel = 0; channel < decoder->private_->frame.header.channels; channel++) { /* * first figure the correct bits-per-sample of the subframe */ unsigned bps = decoder->private_->frame.header.bits_per_sample; switch(decoder->private_->frame.header.channel_assignment) { case FLAC__CHANNEL_ASSIGNMENT_INDEPENDENT: /* no adjustment needed */ break; case FLAC__CHANNEL_ASSIGNMENT_LEFT_SIDE: FLAC__ASSERT(decoder->private_->frame.header.channels == 2); if(channel == 1) bps++; break; case FLAC__CHANNEL_ASSIGNMENT_RIGHT_SIDE: FLAC__ASSERT(decoder->private_->frame.header.channels == 2); if(channel == 0) bps++; break; case FLAC__CHANNEL_ASSIGNMENT_MID_SIDE: FLAC__ASSERT(decoder->private_->frame.header.channels == 2); if(channel == 1) bps++; break; default: FLAC__ASSERT(0); } /* * now read it */ if(!read_subframe_(decoder, channel, bps, do_full_decode)) return false; if(decoder->protected_->state == FLAC__STREAM_DECODER_SEARCH_FOR_FRAME_SYNC) /* means bad sync or got corruption */ return true; } if(!read_zero_padding_(decoder)) return false; if(decoder->protected_->state == FLAC__STREAM_DECODER_SEARCH_FOR_FRAME_SYNC) /* means bad sync or got corruption (i.e. "zero bits" were not all zeroes) */ return true; /* * Read the frame CRC-16 from the footer and check */ frame_crc = FLAC__bitreader_get_read_crc16(decoder->private_->input); if(!FLAC__bitreader_read_raw_uint32(decoder->private_->input, &x, FLAC__FRAME_FOOTER_CRC_LEN)) return false; /* read_callback_ sets the state for us */ if(frame_crc == x) { if(do_full_decode) { /* Undo any special channel coding */ switch(decoder->private_->frame.header.channel_assignment) { case FLAC__CHANNEL_ASSIGNMENT_INDEPENDENT: /* do nothing */ break; case FLAC__CHANNEL_ASSIGNMENT_LEFT_SIDE: FLAC__ASSERT(decoder->private_->frame.header.channels == 2); for(i = 0; i < decoder->private_->frame.header.blocksize; i++) decoder->private_->output[1][i] = decoder->private_->output[0][i] - decoder->private_->output[1][i]; break; case FLAC__CHANNEL_ASSIGNMENT_RIGHT_SIDE: FLAC__ASSERT(decoder->private_->frame.header.channels == 2); for(i = 0; i < decoder->private_->frame.header.blocksize; i++) decoder->private_->output[0][i] += decoder->private_->output[1][i]; break; case FLAC__CHANNEL_ASSIGNMENT_MID_SIDE: FLAC__ASSERT(decoder->private_->frame.header.channels == 2); for(i = 0; i < decoder->private_->frame.header.blocksize; i++) { #if 1 mid = decoder->private_->output[0][i]; side = decoder->private_->output[1][i]; mid <<= 1; mid |= (side & 1); /* i.e. if 'side' is odd... */ decoder->private_->output[0][i] = (mid + side) >> 1; decoder->private_->output[1][i] = (mid - side) >> 1; #else /* OPT: without 'side' temp variable */ mid = (decoder->private_->output[0][i] << 1) | (decoder->private_->output[1][i] & 1); /* i.e. if 'side' is odd... */ decoder->private_->output[0][i] = (mid + decoder->private_->output[1][i]) >> 1; decoder->private_->output[1][i] = (mid - decoder->private_->output[1][i]) >> 1; #endif } break; default: FLAC__ASSERT(0); break; } } } else { /* Bad frame, emit error and zero the output signal */ send_error_to_client_(decoder, FLAC__STREAM_DECODER_ERROR_STATUS_FRAME_CRC_MISMATCH); if(do_full_decode) { for(channel = 0; channel < decoder->private_->frame.header.channels; channel++) { memset(decoder->private_->output[channel], 0, sizeof(FLAC__int32) * decoder->private_->frame.header.blocksize); } } } *got_a_frame = true; /* we wait to update fixed_block_size until here, when we're sure we've got a proper frame and hence a correct blocksize */ if(decoder->private_->next_fixed_block_size) decoder->private_->fixed_block_size = decoder->private_->next_fixed_block_size; /* put the latest values into the public section of the decoder instance */ decoder->protected_->channels = decoder->private_->frame.header.channels; decoder->protected_->channel_assignment = decoder->private_->frame.header.channel_assignment; decoder->protected_->bits_per_sample = decoder->private_->frame.header.bits_per_sample; decoder->protected_->sample_rate = decoder->private_->frame.header.sample_rate; decoder->protected_->blocksize = decoder->private_->frame.header.blocksize; FLAC__ASSERT(decoder->private_->frame.header.number_type == FLAC__FRAME_NUMBER_TYPE_SAMPLE_NUMBER); decoder->private_->samples_decoded = decoder->private_->frame.header.number.sample_number + decoder->private_->frame.header.blocksize; /* write it */ if(do_full_decode) { if(write_audio_frame_to_client_(decoder, &decoder->private_->frame, (const FLAC__int32 * const *)decoder->private_->output) != FLAC__STREAM_DECODER_WRITE_STATUS_CONTINUE) return false; } decoder->protected_->state = FLAC__STREAM_DECODER_SEARCH_FOR_FRAME_SYNC; return true; }
14,614
80,882
0
GF_Err trgr_dump(GF_Box *a, FILE * trace) { GF_TrackGroupBox *ptr = (GF_TrackGroupBox *) a; gf_isom_box_dump_start(a, "TrackGroupBox", trace); fprintf(trace, ">\n"); gf_isom_box_array_dump(ptr->groups, trace); gf_isom_box_dump_done("TrackGroupBox", a, trace); return GF_OK; }
14,615
101,709
0
void Browser::NotifyTabOfFullscreenExitIfNecessary() { if (fullscreened_tab_) fullscreened_tab_->ExitFullscreenMode(); fullscreened_tab_ = NULL; tab_caused_fullscreen_ = false; }
14,616
89,028
0
MagickExport MagickBooleanType PerceptibleImage(Image *image, const double epsilon) { MagickBooleanType status; status=PerceptibleImageChannel(image,DefaultChannels,epsilon); return(status); }
14,617
138,398
0
Document::Document(const DocumentInit& initializer, DocumentClassFlags documentClasses) : ContainerNode(0, CreateDocument) , TreeScope(*this) , m_hasNodesWithPlaceholderStyle(false) , m_evaluateMediaQueriesOnStyleRecalc(false) , m_pendingSheetLayout(NoLayoutWithPendingSheets) , m_frame(initializer.frame()) , m_domWindow(m_frame ? m_frame->localDOMWindow() : 0) , m_importsController(initializer.importsController()) , m_activeParserCount(0) , m_contextFeatures(ContextFeatures::defaultSwitch()) , m_wellFormed(false) , m_printing(false) , m_paginatedForScreen(false) , m_compatibilityMode(NoQuirksMode) , m_compatibilityModeLocked(false) , m_executeScriptsWaitingForResourcesTimer(this, &Document::executeScriptsWaitingForResourcesTimerFired) , m_hasAutofocused(false) , m_clearFocusedElementTimer(this, &Document::clearFocusedElementTimerFired) , m_domTreeVersion(++s_globalTreeVersion) , m_styleVersion(0) , m_listenerTypes(0) , m_mutationObserverTypes(0) , m_visitedLinkState(VisitedLinkState::create(*this)) , m_visuallyOrdered(false) , m_readyState(Complete) , m_parsingState(FinishedParsing) , m_gotoAnchorNeededAfterStylesheetsLoad(false) , m_containsValidityStyleRules(false) , m_updateFocusAppearanceRestoresSelection(false) , m_containsPlugins(false) , m_ignoreDestructiveWriteCount(0) , m_markers(adoptPtrWillBeNoop(new DocumentMarkerController)) , m_updateFocusAppearanceTimer(this, &Document::updateFocusAppearanceTimerFired) , m_cssTarget(nullptr) , m_loadEventProgress(LoadEventNotRun) , m_startTime(currentTime()) , m_scriptRunner(ScriptRunner::create(this)) , m_xmlVersion("1.0") , m_xmlStandalone(StandaloneUnspecified) , m_hasXMLDeclaration(0) , m_designMode(false) , m_hasAnnotatedRegions(false) , m_annotatedRegionsDirty(false) , m_useSecureKeyboardEntryWhenActive(false) , m_documentClasses(documentClasses) , m_isViewSource(false) , m_sawElementsInKnownNamespaces(false) , m_isSrcdocDocument(false) , m_isMobileDocument(false) , m_layoutView(0) #if !ENABLE(OILPAN) , m_weakFactory(this) #endif , m_contextDocument(initializer.contextDocument()) , m_hasFullscreenSupplement(false) , m_loadEventDelayCount(0) , m_loadEventDelayTimer(this, &Document::loadEventDelayTimerFired) , m_pluginLoadingTimer(this, &Document::pluginLoadingTimerFired) , m_writeRecursionIsTooDeep(false) , m_writeRecursionDepth(0) , m_taskRunner(MainThreadTaskRunner::create(this)) , m_registrationContext(initializer.registrationContext(this)) , m_elementDataCacheClearTimer(this, &Document::elementDataCacheClearTimerFired) , m_timeline(AnimationTimeline::create(this)) , m_templateDocumentHost(nullptr) , m_didAssociateFormControlsTimer(this, &Document::didAssociateFormControlsTimerFired) , m_hasViewportUnits(false) , m_styleRecalcElementCounter(0) , m_parserSyncPolicy(AllowAsynchronousParsing) { if (m_frame) { ASSERT(m_frame->page()); provideContextFeaturesToDocumentFrom(*this, *m_frame->page()); m_fetcher = m_frame->loader().documentLoader()->fetcher(); FrameFetchContext::provideDocumentToContext(m_fetcher->context(), this); } else if (m_importsController) { m_fetcher = FrameFetchContext::createContextAndFetcher(nullptr); FrameFetchContext::provideDocumentToContext(m_fetcher->context(), this); } else { m_fetcher = ResourceFetcher::create(nullptr); } if (initializer.shouldSetURL()) setURL(initializer.url()); initSecurityContext(initializer); initDNSPrefetch(); #if !ENABLE(OILPAN) for (unsigned i = 0; i < WTF_ARRAY_LENGTH(m_nodeListCounts); ++i) m_nodeListCounts[i] = 0; #endif InspectorCounters::incrementCounter(InspectorCounters::DocumentCounter); m_lifecycle.advanceTo(DocumentLifecycle::Inactive); m_styleEngine = StyleEngine::create(*this); ASSERT(!parentDocument() || !parentDocument()->activeDOMObjectsAreSuspended()); #ifndef NDEBUG liveDocumentSet().add(this); #endif }
14,618
172,924
0
static int dexOptMkdir(const char* path, int mode) { #ifdef _WIN32 return mkdir(path); #else return mkdir(path, mode); #endif }
14,619
167,384
0
void VisitAndAddFavicon(const GURL& page_url) { history_service_->AddPage(page_url, base::Time::Now(), nullptr, 0, GURL(), history::RedirectList(), ui::PAGE_TRANSITION_LINK, history::SOURCE_BROWSED, false); SkBitmap bitmap; bitmap.allocN32Pixels(gfx::kFaviconSize, gfx::kFaviconSize); bitmap.eraseColor(SK_ColorBLUE); favicon_service_->SetFavicons({page_url}, page_url, favicon_base::IconType::kFavicon, gfx::Image::CreateFrom1xBitmap(bitmap)); }
14,620
74,043
0
MagickExport unsigned int GetMagickSignature(const StringInfo *nonce) { register unsigned char *p; StringInfo *version; unsigned int signature; version=AcquireStringInfo(MagickPathExtent); p=GetStringInfoDatum(version); signature=MAGICKCORE_QUANTUM_DEPTH; (void) memcpy(p,&signature,sizeof(signature)); p+=sizeof(signature); signature=MAGICKCORE_HDRI_ENABLE; (void) memcpy(p,&signature,sizeof(signature)); p+=sizeof(signature); signature=MagickLibInterface; (void) memcpy(p,&signature,sizeof(signature)); p+=sizeof(signature); signature=1; /* endianess */ (void) memcpy(p,&signature,sizeof(signature)); p+=sizeof(signature); SetStringInfoLength(version,p-GetStringInfoDatum(version)); if (nonce != (const StringInfo *) NULL) ConcatenateStringInfo(version,nonce); signature=CRC32(GetStringInfoDatum(version),GetStringInfoLength(version)); version=DestroyStringInfo(version); return(signature); }
14,621
43,323
0
int CLASS minolta_z2() { int i, nz; char tail[424]; fseek (ifp, -sizeof tail, SEEK_END); fread (tail, 1, sizeof tail, ifp); for (nz=i=0; i < (int) sizeof tail; i++) if (tail[i]) nz++; return nz > 20; }
14,622
178,772
1
static int hfsplus_readdir(struct file *filp, void *dirent, filldir_t filldir) { struct inode *inode = filp->f_path.dentry->d_inode; struct super_block *sb = inode->i_sb; int len, err; char strbuf[HFSPLUS_MAX_STRLEN + 1]; hfsplus_cat_entry entry; struct hfs_find_data fd; struct hfsplus_readdir_data *rd; u16 type; if (filp->f_pos >= inode->i_size) return 0; err = hfs_find_init(HFSPLUS_SB(sb)->cat_tree, &fd); if (err) return err; hfsplus_cat_build_key(sb, fd.search_key, inode->i_ino, NULL); err = hfs_brec_find(&fd); if (err) goto out; switch ((u32)filp->f_pos) { case 0: /* This is completely artificial... */ if (filldir(dirent, ".", 1, 0, inode->i_ino, DT_DIR)) goto out; filp->f_pos++; /* fall through */ case 1: hfs_bnode_read(fd.bnode, &entry, fd.entryoffset, fd.entrylength); if (be16_to_cpu(entry.type) != HFSPLUS_FOLDER_THREAD) { printk(KERN_ERR "hfs: bad catalog folder thread\n"); err = -EIO; goto out; } if (fd.entrylength < HFSPLUS_MIN_THREAD_SZ) { printk(KERN_ERR "hfs: truncated catalog thread\n"); err = -EIO; goto out; } if (filldir(dirent, "..", 2, 1, be32_to_cpu(entry.thread.parentID), DT_DIR)) goto out; filp->f_pos++; /* fall through */ default: if (filp->f_pos >= inode->i_size) goto out; err = hfs_brec_goto(&fd, filp->f_pos - 1); if (err) goto out; } for (;;) { if (be32_to_cpu(fd.key->cat.parent) != inode->i_ino) { printk(KERN_ERR "hfs: walked past end of dir\n"); err = -EIO; goto out; } hfs_bnode_read(fd.bnode, &entry, fd.entryoffset, fd.entrylength); type = be16_to_cpu(entry.type); len = HFSPLUS_MAX_STRLEN; err = hfsplus_uni2asc(sb, &fd.key->cat.name, strbuf, &len); if (err) goto out; if (type == HFSPLUS_FOLDER) { if (fd.entrylength < sizeof(struct hfsplus_cat_folder)) { printk(KERN_ERR "hfs: small dir entry\n"); err = -EIO; goto out; } if (HFSPLUS_SB(sb)->hidden_dir && HFSPLUS_SB(sb)->hidden_dir->i_ino == be32_to_cpu(entry.folder.id)) goto next; if (filldir(dirent, strbuf, len, filp->f_pos, be32_to_cpu(entry.folder.id), DT_DIR)) break; } else if (type == HFSPLUS_FILE) { if (fd.entrylength < sizeof(struct hfsplus_cat_file)) { printk(KERN_ERR "hfs: small file entry\n"); err = -EIO; goto out; } if (filldir(dirent, strbuf, len, filp->f_pos, be32_to_cpu(entry.file.id), DT_REG)) break; } else { printk(KERN_ERR "hfs: bad catalog entry type\n"); err = -EIO; goto out; } next: filp->f_pos++; if (filp->f_pos >= inode->i_size) goto out; err = hfs_brec_goto(&fd, 1); if (err) goto out; } rd = filp->private_data; if (!rd) { rd = kmalloc(sizeof(struct hfsplus_readdir_data), GFP_KERNEL); if (!rd) { err = -ENOMEM; goto out; } filp->private_data = rd; rd->file = filp; list_add(&rd->list, &HFSPLUS_I(inode)->open_dir_list); } memcpy(&rd->key, fd.key, sizeof(struct hfsplus_cat_key)); out: hfs_find_exit(&fd); return err; }
14,623
2,670
0
ssize_t vfs_pwrite_data(struct smb_request *req, files_struct *fsp, const char *buffer, size_t N, off_t offset) { size_t total=0; ssize_t ret; if (req && req->unread_bytes) { int sockfd = req->xconn->transport.sock; SMB_ASSERT(req->unread_bytes == N); /* VFS_RECVFILE must drain the socket * before returning. */ req->unread_bytes = 0; /* * Leave the socket non-blocking and * use SMB_VFS_RECVFILE. If it returns * EAGAIN || EWOULDBLOCK temporarily set * the socket blocking and retry * the RECVFILE. */ while (total < N) { ret = SMB_VFS_RECVFILE(sockfd, fsp, offset + total, N - total); if (ret == 0 || (ret == -1 && (errno == EAGAIN || errno == EWOULDBLOCK))) { int old_flags; /* Ensure the socket is blocking. */ old_flags = fcntl(sockfd, F_GETFL, 0); if (set_blocking(sockfd, true) == -1) { return (ssize_t)-1; } ret = SMB_VFS_RECVFILE(sockfd, fsp, offset + total, N - total); if (fcntl(sockfd, F_SETFL, old_flags) == -1) { return (ssize_t)-1; } if (ret == -1) { return (ssize_t)-1; } total += ret; return (ssize_t)total; } /* Any other error case. */ if (ret == -1) { return ret; } total += ret; } return (ssize_t)total; } while (total < N) { ret = SMB_VFS_PWRITE(fsp, buffer + total, N - total, offset + total); if (ret == -1) return -1; if (ret == 0) return total; total += ret; } return (ssize_t)total; }
14,624
140,727
0
error::Error GLES2DecoderImpl::DoCompressedTexImage2D( GLenum target, GLint level, GLenum internal_format, GLsizei width, GLsizei height, GLint border, GLsizei image_size, const void* data) { if (!validators_->texture_target.IsValid(target)) { LOCAL_SET_GL_ERROR_INVALID_ENUM( "glCompressedTexImage2D", target, "target"); return error::kNoError; } if (!validators_->compressed_texture_format.IsValid( internal_format)) { LOCAL_SET_GL_ERROR_INVALID_ENUM( "glCompressedTexImage2D", internal_format, "internal_format"); return error::kNoError; } if (!texture_manager()->ValidForTarget(target, level, width, height, 1) || border != 0) { LOCAL_SET_GL_ERROR( GL_INVALID_VALUE, "glCompressedTexImage2D", "dimensions out of range"); return error::kNoError; } TextureRef* texture_ref = texture_manager()->GetTextureInfoForTarget( &state_, target); if (!texture_ref) { LOCAL_SET_GL_ERROR( GL_INVALID_VALUE, "glCompressedTexImage2D", "unknown texture target"); return error::kNoError; } Texture* texture = texture_ref->texture(); if (texture->IsImmutable()) { LOCAL_SET_GL_ERROR( GL_INVALID_OPERATION, "glCompressedTexImage2D", "texture is immutable"); return error::kNoError; } if (!ValidateCompressedTexDimensions( "glCompressedTexImage2D", level, width, height, internal_format) || !ValidateCompressedTexFuncData( "glCompressedTexImage2D", width, height, internal_format, image_size)) { return error::kNoError; } if (!EnsureGPUMemoryAvailable(image_size)) { LOCAL_SET_GL_ERROR( GL_OUT_OF_MEMORY, "glCompressedTexImage2D", "out of memory"); return error::kNoError; } if (texture->IsAttachedToFramebuffer()) { framebuffer_state_.clear_state_dirty = true; } scoped_ptr<int8[]> zero; if (!data) { zero.reset(new int8[image_size]); memset(zero.get(), 0, image_size); data = zero.get(); } LOCAL_COPY_REAL_GL_ERRORS_TO_WRAPPER("glCompressedTexImage2D"); glCompressedTexImage2D( target, level, internal_format, width, height, border, image_size, data); GLenum error = LOCAL_PEEK_GL_ERROR("glCompressedTexImage2D"); if (error == GL_NO_ERROR) { texture_manager()->SetLevelInfo( texture_ref, target, level, internal_format, width, height, 1, border, 0, 0, true); } ExitCommandProcessingEarly(); return error::kNoError; }
14,625
11,188
0
static zend_object *phar_rename_archive(phar_archive_data **sphar, char *ext, zend_bool compress) /* {{{ */ { const char *oldname = NULL; phar_archive_data *phar = *sphar; char *oldpath = NULL; char *basename = NULL, *basepath = NULL; char *newname = NULL, *newpath = NULL; zval ret, arg1; zend_class_entry *ce; char *error; const char *pcr_error; int ext_len = ext ? strlen(ext) : 0; int oldname_len; phar_archive_data *pphar = NULL; php_stream_statbuf ssb; if (!ext) { if (phar->is_zip) { if (phar->is_data) { ext = "zip"; } else { ext = "phar.zip"; } } else if (phar->is_tar) { switch (phar->flags) { case PHAR_FILE_COMPRESSED_GZ: if (phar->is_data) { ext = "tar.gz"; } else { ext = "phar.tar.gz"; } break; case PHAR_FILE_COMPRESSED_BZ2: if (phar->is_data) { ext = "tar.bz2"; } else { ext = "phar.tar.bz2"; } break; default: if (phar->is_data) { ext = "tar"; } else { ext = "phar.tar"; } } } else { switch (phar->flags) { case PHAR_FILE_COMPRESSED_GZ: ext = "phar.gz"; break; case PHAR_FILE_COMPRESSED_BZ2: ext = "phar.bz2"; break; default: ext = "phar"; } } } else if (phar_path_check(&ext, &ext_len, &pcr_error) > pcr_is_ok) { if (phar->is_data) { zend_throw_exception_ex(spl_ce_BadMethodCallException, 0, "data phar converted from \"%s\" has invalid extension %s", phar->fname, ext); } else { zend_throw_exception_ex(spl_ce_BadMethodCallException, 0, "phar converted from \"%s\" has invalid extension %s", phar->fname, ext); } return NULL; } if (ext[0] == '.') { ++ext; } oldpath = estrndup(phar->fname, phar->fname_len); if ((oldname = zend_memrchr(phar->fname, '/', phar->fname_len))) { ++oldname; } else { oldname = phar->fname; } oldname_len = strlen(oldname); basename = estrndup(oldname, oldname_len); spprintf(&newname, 0, "%s.%s", strtok(basename, "."), ext); efree(basename); basepath = estrndup(oldpath, (strlen(oldpath) - oldname_len)); phar->fname_len = spprintf(&newpath, 0, "%s%s", basepath, newname); phar->fname = newpath; phar->ext = newpath + phar->fname_len - strlen(ext) - 1; efree(basepath); efree(newname); if (PHAR_G(manifest_cached) && NULL != (pphar = zend_hash_str_find_ptr(&cached_phars, newpath, phar->fname_len))) { efree(oldpath); zend_throw_exception_ex(spl_ce_BadMethodCallException, 0, "Unable to add newly converted phar \"%s\" to the list of phars, new phar name is in phar.cache_list", phar->fname); return NULL; } if (NULL != (pphar = zend_hash_str_find_ptr(&(PHAR_G(phar_fname_map)), newpath, phar->fname_len))) { if (pphar->fname_len == phar->fname_len && !memcmp(pphar->fname, phar->fname, phar->fname_len)) { if (!zend_hash_num_elements(&phar->manifest)) { pphar->is_tar = phar->is_tar; pphar->is_zip = phar->is_zip; pphar->is_data = phar->is_data; pphar->flags = phar->flags; pphar->fp = phar->fp; phar->fp = NULL; phar_destroy_phar_data(phar); *sphar = NULL; phar = pphar; phar->refcount++; newpath = oldpath; goto its_ok; } } efree(oldpath); zend_throw_exception_ex(spl_ce_BadMethodCallException, 0, "Unable to add newly converted phar \"%s\" to the list of phars, a phar with that name already exists", phar->fname); return NULL; } its_ok: if (SUCCESS == php_stream_stat_path(newpath, &ssb)) { zend_throw_exception_ex(spl_ce_BadMethodCallException, 0, "phar \"%s\" exists and must be unlinked prior to conversion", newpath); efree(oldpath); return NULL; } if (!phar->is_data) { if (SUCCESS != phar_detect_phar_fname_ext(newpath, phar->fname_len, (const char **) &(phar->ext), &(phar->ext_len), 1, 1, 1)) { efree(oldpath); zend_throw_exception_ex(spl_ce_BadMethodCallException, 0, "phar \"%s\" has invalid extension %s", phar->fname, ext); return NULL; } if (phar->alias) { if (phar->is_temporary_alias) { phar->alias = NULL; phar->alias_len = 0; } else { phar->alias = estrndup(newpath, strlen(newpath)); phar->alias_len = strlen(newpath); phar->is_temporary_alias = 1; zend_hash_str_update_ptr(&(PHAR_G(phar_alias_map)), newpath, phar->fname_len, phar); } } } else { if (SUCCESS != phar_detect_phar_fname_ext(newpath, phar->fname_len, (const char **) &(phar->ext), &(phar->ext_len), 0, 1, 1)) { efree(oldpath); zend_throw_exception_ex(spl_ce_BadMethodCallException, 0, "data phar \"%s\" has invalid extension %s", phar->fname, ext); return NULL; } phar->alias = NULL; phar->alias_len = 0; } if ((!pphar || phar == pphar) && NULL == zend_hash_str_update_ptr(&(PHAR_G(phar_fname_map)), newpath, phar->fname_len, phar)) { efree(oldpath); zend_throw_exception_ex(spl_ce_BadMethodCallException, 0, "Unable to add newly converted phar \"%s\" to the list of phars", phar->fname); return NULL; } phar_flush(phar, 0, 0, 1, &error); if (error) { zend_throw_exception_ex(spl_ce_BadMethodCallException, 0, "%s", error); efree(error); efree(oldpath); return NULL; } efree(oldpath); if (phar->is_data) { ce = phar_ce_data; } else { ce = phar_ce_archive; } ZVAL_NULL(&ret); if (SUCCESS != object_init_ex(&ret, ce)) { zval_dtor(&ret); zend_throw_exception_ex(spl_ce_BadMethodCallException, 0, "Unable to instantiate phar object when converting archive \"%s\"", phar->fname); return NULL; } ZVAL_STRINGL(&arg1, phar->fname, phar->fname_len); zend_call_method_with_1_params(&ret, ce, &ce->constructor, "__construct", NULL, &arg1); zval_ptr_dtor(&arg1); return Z_OBJ(ret); } /* }}} */
14,626
34,686
0
static void do_unflock(struct file *file, struct file_lock *fl) { struct gfs2_file *fp = file->private_data; struct gfs2_holder *fl_gh = &fp->f_fl_gh; mutex_lock(&fp->f_fl_mutex); flock_lock_file_wait(file, fl); if (fl_gh->gh_gl) { gfs2_glock_dq_wait(fl_gh); gfs2_holder_uninit(fl_gh); } mutex_unlock(&fp->f_fl_mutex); }
14,627
106,693
0
PassOwnPtr<DrawingAreaProxy> WebView::createDrawingAreaProxy() { if (useNewDrawingArea()) return DrawingAreaProxyImpl::create(m_page.get()); return ChunkedUpdateDrawingAreaProxy::create(this, m_page.get()); }
14,628
2,032
0
static int check_setuid(void) { if (geteuid()) { fprintf(stderr, "This program is not installed setuid root - " " \"user\" CIFS mounts not supported.\n"); return EX_USAGE; } #if CIFS_DISABLE_SETUID_CAPABILITY if (getuid() && !geteuid()) { printf("This mount.cifs program has been built with the " "ability to run as a setuid root program disabled.\n"); return EX_USAGE; } #endif /* CIFS_DISABLE_SETUID_CAPABILITY */ return 0; }
14,629
54,928
0
static int in_bitmapped_pack(struct object_list *roots) { while (roots) { struct object *object = roots->item; roots = roots->next; if (find_pack_entry_one(object->oid.hash, bitmap_git.pack) > 0) return 1; } return 0; }
14,630
98,243
0
String WebFrame::counterValue(JSObjectRef element) { if (!toJS(element)->inherits(&JSElement::s_info)) return String(); return counterValueForElement(static_cast<JSElement*>(toJS(element))->impl()); }
14,631
173,184
0
png_col_from_pass_col(png_uint_32 xIn, int pass) { /* By examination of the array: */ switch (pass) { case 0: return xIn * 8; case 1: return xIn * 8 + 4; case 2: return xIn * 4; case 3: return xIn * 4 + 2; case 4: return xIn * 2; case 5: return xIn * 2 + 1; case 6: return xIn; default: break; } return 0xff; /* bad pass number */ }
14,632
94,625
0
int X509_CRL_sign_ctx(X509_CRL *x, EVP_MD_CTX *ctx) { x->crl->enc.modified = 1; return ASN1_item_sign_ctx(ASN1_ITEM_rptr(X509_CRL_INFO), x->crl->sig_alg, x->sig_alg, x->signature, x->crl, ctx); }
14,633
122,094
0
thunk::PPB_Buffer_API* Buffer::AsPPB_Buffer_API() { return this; }
14,634
113,389
0
void WebProcessProxy::didReceiveMessageOnConnectionWorkQueue(CoreIPC::Connection* connection, CoreIPC::MessageID messageID, CoreIPC::MessageDecoder& decoder, bool& didHandleMessage) { if (decoder.messageReceiverName() == Messages::WebProcessProxy::messageReceiverName()) didReceiveWebProcessProxyMessageOnConnectionWorkQueue(connection, messageID, decoder, didHandleMessage); }
14,635
14,699
0
Goffset JBIG2Stream::getPos() { if (pageBitmap == NULL) { return 0; } return dataPtr - pageBitmap->getDataPtr(); }
14,636
86,381
0
static int __init hugetlb_nrpages_setup(char *s) { unsigned long *mhp; static unsigned long *last_mhp; if (!parsed_valid_hugepagesz) { pr_warn("hugepages = %s preceded by " "an unsupported hugepagesz, ignoring\n", s); parsed_valid_hugepagesz = true; return 1; } /* * !hugetlb_max_hstate means we haven't parsed a hugepagesz= parameter yet, * so this hugepages= parameter goes to the "default hstate". */ else if (!hugetlb_max_hstate) mhp = &default_hstate_max_huge_pages; else mhp = &parsed_hstate->max_huge_pages; if (mhp == last_mhp) { pr_warn("hugepages= specified twice without interleaving hugepagesz=, ignoring\n"); return 1; } if (sscanf(s, "%lu", mhp) <= 0) *mhp = 0; /* * Global state is always initialized later in hugetlb_init. * But we need to allocate >= MAX_ORDER hstates here early to still * use the bootmem allocator. */ if (hugetlb_max_hstate && parsed_hstate->order >= MAX_ORDER) hugetlb_hstate_alloc_pages(parsed_hstate); last_mhp = mhp; return 1; }
14,637
130,579
0
static void activityLoggedAttrGetter1AttributeSetterCallback(v8::Local<v8::String>, v8::Local<v8::Value> jsValue, const v8::PropertyCallbackInfo<void>& info) { TRACE_EVENT_SET_SAMPLING_STATE("Blink", "DOMSetter"); TestObjectV8Internal::activityLoggedAttrGetter1AttributeSetter(jsValue, info); TRACE_EVENT_SET_SAMPLING_STATE("V8", "V8Execution"); }
14,638
71,646
0
static ssize_t DecodePSDPixels(const size_t number_compact_pixels, const unsigned char *compact_pixels,const ssize_t depth, const size_t number_pixels,unsigned char *pixels) { int pixel; register ssize_t i, j; size_t length; ssize_t packets; packets=(ssize_t) number_compact_pixels; for (i=0; (packets > 1) && (i < (ssize_t) number_pixels); ) { length=(size_t) (*compact_pixels++); packets--; if (length == 128) continue; if (length > 128) { length=256-length+1; if (((ssize_t) length+i) > (ssize_t) number_pixels) length=number_pixels-(size_t) i; pixel=(*compact_pixels++); packets--; for (j=0; j < (ssize_t) length; j++) { switch (depth) { case 1: { *pixels++=(pixel >> 7) & 0x01 ? 0U : 255U; *pixels++=(pixel >> 6) & 0x01 ? 0U : 255U; *pixels++=(pixel >> 5) & 0x01 ? 0U : 255U; *pixels++=(pixel >> 4) & 0x01 ? 0U : 255U; *pixels++=(pixel >> 3) & 0x01 ? 0U : 255U; *pixels++=(pixel >> 2) & 0x01 ? 0U : 255U; *pixels++=(pixel >> 1) & 0x01 ? 0U : 255U; *pixels++=(pixel >> 0) & 0x01 ? 0U : 255U; i+=8; break; } case 4: { *pixels++=(unsigned char) ((pixel >> 4) & 0xff); *pixels++=(unsigned char) ((pixel & 0x0f) & 0xff); i+=2; break; } case 2: { *pixels++=(unsigned char) ((pixel >> 6) & 0x03); *pixels++=(unsigned char) ((pixel >> 4) & 0x03); *pixels++=(unsigned char) ((pixel >> 2) & 0x03); *pixels++=(unsigned char) ((pixel & 0x03) & 0x03); i+=4; break; } default: { *pixels++=(unsigned char) pixel; i++; break; } } } continue; } length++; if (((ssize_t) length+i) > (ssize_t) number_pixels) length=number_pixels-(size_t) i; for (j=0; j < (ssize_t) length; j++) { switch (depth) { case 1: { *pixels++=(*compact_pixels >> 7) & 0x01 ? 0U : 255U; *pixels++=(*compact_pixels >> 6) & 0x01 ? 0U : 255U; *pixels++=(*compact_pixels >> 5) & 0x01 ? 0U : 255U; *pixels++=(*compact_pixels >> 4) & 0x01 ? 0U : 255U; *pixels++=(*compact_pixels >> 3) & 0x01 ? 0U : 255U; *pixels++=(*compact_pixels >> 2) & 0x01 ? 0U : 255U; *pixels++=(*compact_pixels >> 1) & 0x01 ? 0U : 255U; *pixels++=(*compact_pixels >> 0) & 0x01 ? 0U : 255U; i+=8; break; } case 4: { *pixels++=(*compact_pixels >> 4) & 0xff; *pixels++=(*compact_pixels & 0x0f) & 0xff; i+=2; break; } case 2: { *pixels++=(*compact_pixels >> 6) & 0x03; *pixels++=(*compact_pixels >> 4) & 0x03; *pixels++=(*compact_pixels >> 2) & 0x03; *pixels++=(*compact_pixels & 0x03) & 0x03; i+=4; break; } default: { *pixels++=(*compact_pixels); i++; break; } } compact_pixels++; } } return(i); }
14,639
62,671
0
static ssize_t WriteCALSRecord(Image *image,const char *data) { char pad[128]; register const char *p; register ssize_t i; ssize_t count; i=0; count=0; if (data != (const char *) NULL) { p=data; for (i=0; (i < 128) && (p[i] != '\0'); i++); count=WriteBlob(image,(size_t) i,(const unsigned char *) data); } if (i < 128) { i=128-i; (void) ResetMagickMemory(pad,' ',(size_t) i); count=WriteBlob(image,(size_t) i,(const unsigned char *) pad); } return(count); }
14,640
101,181
0
FilePathWatcherImpl::FilePathWatcherImpl() : delegate_(NULL) { }
14,641
116,156
0
void ResourceDispatcherHostImpl::OnUploadProgressACK(int request_id) { int child_id = filter_->child_id(); PendingRequestList::iterator i = pending_requests_.find( GlobalRequestID(child_id, request_id)); if (i == pending_requests_.end()) return; ResourceRequestInfoImpl* info = ResourceRequestInfoImpl::ForRequest(i->second); info->set_waiting_for_upload_progress_ack(false); }
14,642
79,910
0
xfs_attr3_leaf_compact( struct xfs_da_args *args, struct xfs_attr3_icleaf_hdr *ichdr_dst, struct xfs_buf *bp) { struct xfs_attr_leafblock *leaf_src; struct xfs_attr_leafblock *leaf_dst; struct xfs_attr3_icleaf_hdr ichdr_src; struct xfs_trans *trans = args->trans; char *tmpbuffer; trace_xfs_attr_leaf_compact(args); tmpbuffer = kmem_alloc(args->geo->blksize, KM_SLEEP); memcpy(tmpbuffer, bp->b_addr, args->geo->blksize); memset(bp->b_addr, 0, args->geo->blksize); leaf_src = (xfs_attr_leafblock_t *)tmpbuffer; leaf_dst = bp->b_addr; /* * Copy the on-disk header back into the destination buffer to ensure * all the information in the header that is not part of the incore * header structure is preserved. */ memcpy(bp->b_addr, tmpbuffer, xfs_attr3_leaf_hdr_size(leaf_src)); /* Initialise the incore headers */ ichdr_src = *ichdr_dst; /* struct copy */ ichdr_dst->firstused = args->geo->blksize; ichdr_dst->usedbytes = 0; ichdr_dst->count = 0; ichdr_dst->holes = 0; ichdr_dst->freemap[0].base = xfs_attr3_leaf_hdr_size(leaf_src); ichdr_dst->freemap[0].size = ichdr_dst->firstused - ichdr_dst->freemap[0].base; /* write the header back to initialise the underlying buffer */ xfs_attr3_leaf_hdr_to_disk(args->geo, leaf_dst, ichdr_dst); /* * Copy all entry's in the same (sorted) order, * but allocate name/value pairs packed and in sequence. */ xfs_attr3_leaf_moveents(args, leaf_src, &ichdr_src, 0, leaf_dst, ichdr_dst, 0, ichdr_src.count); /* * this logs the entire buffer, but the caller must write the header * back to the buffer when it is finished modifying it. */ xfs_trans_log_buf(trans, bp, 0, args->geo->blksize - 1); kmem_free(tmpbuffer); }
14,643
99,095
0
void ResourceDispatcherHost::OnCancelRequest(int request_id) { CancelRequest(receiver_->id(), request_id, true, true); }
14,644
132,715
0
protocol::CursorShapeStub* ChromotingInstance::GetCursorShapeStub() { return &empty_cursor_filter_; }
14,645
188,045
1
static void copyStereo8( short *dst, const int *const *src, unsigned nSamples, unsigned /* nChannels */) { for (unsigned i = 0; i < nSamples; ++i) { *dst++ = src[0][i] << 8; *dst++ = src[1][i] << 8; } }
14,646
168,506
0
static v8::Local<v8::Function> CreateFunction( ScriptState* script_state, ReadableStreamBytesConsumer* consumer) { return (new OnFulfilled(script_state, consumer))->BindToV8Function(); }
14,647
61,773
0
static int cine_read_packet(AVFormatContext *avctx, AVPacket *pkt) { CineDemuxContext *cine = avctx->priv_data; AVStream *st = avctx->streams[0]; AVIOContext *pb = avctx->pb; int n, size, ret; if (cine->pts >= st->duration) return AVERROR_EOF; avio_seek(pb, st->index_entries[cine->pts].pos, SEEK_SET); n = avio_rl32(pb); if (n < 8) return AVERROR_INVALIDDATA; avio_skip(pb, n - 8); size = avio_rl32(pb); ret = av_get_packet(pb, pkt, size); if (ret < 0) return ret; pkt->pts = cine->pts++; pkt->stream_index = 0; pkt->flags |= AV_PKT_FLAG_KEY; return 0; }
14,648
48,948
0
__be16 skb_network_protocol(struct sk_buff *skb, int *depth) { __be16 type = skb->protocol; /* Tunnel gso handlers can set protocol to ethernet. */ if (type == htons(ETH_P_TEB)) { struct ethhdr *eth; if (unlikely(!pskb_may_pull(skb, sizeof(struct ethhdr)))) return 0; eth = (struct ethhdr *)skb_mac_header(skb); type = eth->h_proto; } return __vlan_get_protocol(skb, type, depth); }
14,649
84,046
0
void merge_all_config(GF_AVCConfig *avc_cfg, GF_HEVCConfig *hevc_cfg, GF_MediaBox *mdia) { u32 i; GF_TrackReferenceTypeBox *scal = NULL; Track_FindRef(mdia->mediaTrack, GF_ISOM_REF_SCAL, &scal); if (!scal) return; for (i=0; i<scal->trackIDCount; i++) { GF_TrackBox *a_track = GetTrackbyID(mdia->mediaTrack->moov, scal->trackIDs[i]); GF_MPEGVisualSampleEntryBox *an_entry = NULL; if (a_track && a_track->Media && a_track->Media->information && a_track->Media->information->sampleTable && a_track->Media->information->sampleTable->SampleDescription) an_entry = (GF_MPEGVisualSampleEntryBox*)gf_list_get(a_track->Media->information->sampleTable->SampleDescription->other_boxes, 0); if (!an_entry) continue; if (avc_cfg && an_entry->svc_config && an_entry->svc_config->config) merge_avc_config(avc_cfg, an_entry->svc_config->config); if (avc_cfg && an_entry->mvc_config && an_entry->mvc_config->config) merge_avc_config(avc_cfg, an_entry->mvc_config->config); if (avc_cfg && an_entry->avc_config && an_entry->avc_config->config) merge_avc_config(avc_cfg, an_entry->avc_config->config); if (hevc_cfg && an_entry->lhvc_config && an_entry->lhvc_config->config) merge_hevc_config(hevc_cfg, an_entry->lhvc_config->config, GF_TRUE); if (hevc_cfg && an_entry->hevc_config && an_entry->hevc_config->config) merge_hevc_config(hevc_cfg, an_entry->hevc_config->config, GF_TRUE); } if (hevc_cfg) hevc_cfg->is_lhvc = GF_FALSE; }
14,650
45,176
0
int ssl3_get_client_certificate(SSL *s) { int i,ok,al,ret= -1; X509 *x=NULL; unsigned long l,nc,llen,n; const unsigned char *p,*q; unsigned char *d; STACK_OF(X509) *sk=NULL; n=s->method->ssl_get_message(s, SSL3_ST_SR_CERT_A, SSL3_ST_SR_CERT_B, -1, s->max_cert_list, &ok); if (!ok) return((int)n); if (s->s3->tmp.message_type == SSL3_MT_CLIENT_KEY_EXCHANGE) { if ( (s->verify_mode & SSL_VERIFY_PEER) && (s->verify_mode & SSL_VERIFY_FAIL_IF_NO_PEER_CERT)) { SSLerr(SSL_F_SSL3_GET_CLIENT_CERTIFICATE,SSL_R_PEER_DID_NOT_RETURN_A_CERTIFICATE); al=SSL_AD_HANDSHAKE_FAILURE; goto f_err; } /* If tls asked for a client cert, the client must return a 0 list */ if ((s->version > SSL3_VERSION) && s->s3->tmp.cert_request) { SSLerr(SSL_F_SSL3_GET_CLIENT_CERTIFICATE,SSL_R_TLS_PEER_DID_NOT_RESPOND_WITH_CERTIFICATE_LIST); al=SSL_AD_UNEXPECTED_MESSAGE; goto f_err; } s->s3->tmp.reuse_message=1; return(1); } if (s->s3->tmp.message_type != SSL3_MT_CERTIFICATE) { al=SSL_AD_UNEXPECTED_MESSAGE; SSLerr(SSL_F_SSL3_GET_CLIENT_CERTIFICATE,SSL_R_WRONG_MESSAGE_TYPE); goto f_err; } p=d=(unsigned char *)s->init_msg; if ((sk=sk_X509_new_null()) == NULL) { SSLerr(SSL_F_SSL3_GET_CLIENT_CERTIFICATE,ERR_R_MALLOC_FAILURE); goto err; } n2l3(p,llen); if (llen+3 != n) { al=SSL_AD_DECODE_ERROR; SSLerr(SSL_F_SSL3_GET_CLIENT_CERTIFICATE,SSL_R_LENGTH_MISMATCH); goto f_err; } for (nc=0; nc<llen; ) { n2l3(p,l); if ((l+nc+3) > llen) { al=SSL_AD_DECODE_ERROR; SSLerr(SSL_F_SSL3_GET_CLIENT_CERTIFICATE,SSL_R_CERT_LENGTH_MISMATCH); goto f_err; } q=p; x=d2i_X509(NULL,&p,l); if (x == NULL) { SSLerr(SSL_F_SSL3_GET_CLIENT_CERTIFICATE,ERR_R_ASN1_LIB); goto err; } if (p != (q+l)) { al=SSL_AD_DECODE_ERROR; SSLerr(SSL_F_SSL3_GET_CLIENT_CERTIFICATE,SSL_R_CERT_LENGTH_MISMATCH); goto f_err; } if (!sk_X509_push(sk,x)) { SSLerr(SSL_F_SSL3_GET_CLIENT_CERTIFICATE,ERR_R_MALLOC_FAILURE); goto err; } x=NULL; nc+=l+3; } if (sk_X509_num(sk) <= 0) { /* TLS does not mind 0 certs returned */ if (s->version == SSL3_VERSION) { al=SSL_AD_HANDSHAKE_FAILURE; SSLerr(SSL_F_SSL3_GET_CLIENT_CERTIFICATE,SSL_R_NO_CERTIFICATES_RETURNED); goto f_err; } /* Fail for TLS only if we required a certificate */ else if ((s->verify_mode & SSL_VERIFY_PEER) && (s->verify_mode & SSL_VERIFY_FAIL_IF_NO_PEER_CERT)) { SSLerr(SSL_F_SSL3_GET_CLIENT_CERTIFICATE,SSL_R_PEER_DID_NOT_RETURN_A_CERTIFICATE); al=SSL_AD_HANDSHAKE_FAILURE; goto f_err; } /* No client certificate so digest cached records */ if (s->s3->handshake_buffer && !ssl3_digest_cached_records(s)) { al=SSL_AD_INTERNAL_ERROR; goto f_err; } } else { EVP_PKEY *pkey; i=ssl_verify_cert_chain(s,sk); if (i <= 0) { al=ssl_verify_alarm_type(s->verify_result); SSLerr(SSL_F_SSL3_GET_CLIENT_CERTIFICATE,SSL_R_CERTIFICATE_VERIFY_FAILED); goto f_err; } if (i > 1) { SSLerr(SSL_F_SSL3_GET_CLIENT_CERTIFICATE, i); al = SSL_AD_HANDSHAKE_FAILURE; goto f_err; } pkey = X509_get_pubkey(sk_X509_value(sk, 0)); if (pkey == NULL) { al=SSL3_AD_HANDSHAKE_FAILURE; SSLerr(SSL_F_SSL3_GET_CLIENT_CERTIFICATE, SSL_R_UNKNOWN_CERTIFICATE_TYPE); goto f_err; } EVP_PKEY_free(pkey); } if (s->session->peer != NULL) /* This should not be needed */ X509_free(s->session->peer); s->session->peer=sk_X509_shift(sk); s->session->verify_result = s->verify_result; /* With the current implementation, sess_cert will always be NULL * when we arrive here. */ if (s->session->sess_cert == NULL) { s->session->sess_cert = ssl_sess_cert_new(); if (s->session->sess_cert == NULL) { SSLerr(SSL_F_SSL3_GET_CLIENT_CERTIFICATE, ERR_R_MALLOC_FAILURE); goto err; } } if (s->session->sess_cert->cert_chain != NULL) sk_X509_pop_free(s->session->sess_cert->cert_chain, X509_free); s->session->sess_cert->cert_chain=sk; /* Inconsistency alert: cert_chain does *not* include the * peer's own certificate, while we do include it in s3_clnt.c */ sk=NULL; ret=1; if (0) { f_err: ssl3_send_alert(s,SSL3_AL_FATAL,al); } err: if (x != NULL) X509_free(x); if (sk != NULL) sk_X509_pop_free(sk,X509_free); return(ret); }
14,651
175,936
0
void smp_proc_sec_req(tSMP_CB* p_cb, tSMP_INT_DATA* p_data) { tBTM_LE_AUTH_REQ auth_req = *(tBTM_LE_AUTH_REQ*)p_data; tBTM_BLE_SEC_REQ_ACT sec_req_act; SMP_TRACE_DEBUG("%s: auth_req=0x%x", __func__, auth_req); p_cb->cb_evt = 0; btm_ble_link_sec_check(p_cb->pairing_bda, auth_req, &sec_req_act); SMP_TRACE_DEBUG("%s: sec_req_act=0x%x", __func__, sec_req_act); switch (sec_req_act) { case BTM_BLE_SEC_REQ_ACT_ENCRYPT: SMP_TRACE_DEBUG("%s: BTM_BLE_SEC_REQ_ACT_ENCRYPT", __func__); smp_sm_event(p_cb, SMP_ENC_REQ_EVT, NULL); break; case BTM_BLE_SEC_REQ_ACT_PAIR: p_cb->secure_connections_only_mode_required = (btm_cb.security_mode == BTM_SEC_MODE_SC) ? true : false; /* respond to non SC pairing request as failure in SC only mode */ if (p_cb->secure_connections_only_mode_required && (auth_req & SMP_SC_SUPPORT_BIT) == 0) { tSMP_INT_DATA smp_int_data; smp_int_data.status = SMP_PAIR_AUTH_FAIL; smp_sm_event(p_cb, SMP_AUTH_CMPL_EVT, &smp_int_data); } else { /* initialize local i/r key to be default keys */ p_cb->peer_auth_req = auth_req; p_cb->local_r_key = p_cb->local_i_key = SMP_SEC_DEFAULT_KEY; p_cb->cb_evt = SMP_SEC_REQUEST_EVT; } break; case BTM_BLE_SEC_REQ_ACT_DISCARD: p_cb->discard_sec_req = true; break; default: /* do nothing */ break; } }
14,652
121,229
0
void HTMLInputElement::reportMemoryUsage(MemoryObjectInfo* memoryObjectInfo) const { MemoryClassInfo info(memoryObjectInfo, this, WebCoreMemoryTypes::DOM); HTMLTextFormControlElement::reportMemoryUsage(memoryObjectInfo); info.addMember(m_name, "name"); info.addMember(m_valueIfDirty, "valueIfDirty"); info.addMember(m_suggestedValue, "suggestedValue"); info.addMember(m_inputType, "inputType"); info.addMember(m_listAttributeTargetObserver, "listAttributeTargetObserver"); }
14,653
146,141
0
void WebGL2RenderingContextBase::texImage3D(GLenum target, GLint level, GLint internalformat, GLsizei width, GLsizei height, GLsizei depth, GLint border, GLenum format, GLenum type, ImageData* pixels) { DCHECK(pixels); IntRect source_image_rect; source_image_rect.SetLocation( IntPoint(unpack_skip_pixels_, unpack_skip_rows_)); source_image_rect.SetSize(IntSize(width, height)); TexImageHelperImageData(kTexImage3D, target, level, internalformat, 0, format, type, depth, 0, 0, 0, pixels, source_image_rect, unpack_image_height_); }
14,654
9,959
0
bool PSIR_FileWriter::GetImgRsrc ( XMP_Uns16 id, ImgRsrcInfo* info ) const { InternalRsrcMap::const_iterator rsrcPos = this->imgRsrcs.find ( id ); if ( rsrcPos == this->imgRsrcs.end() ) return false; const InternalRsrcInfo & rsrcInfo = rsrcPos->second; if ( info != 0 ) { info->id = rsrcInfo.id; info->dataLen = rsrcInfo.dataLen; info->dataPtr = rsrcInfo.dataPtr; info->origOffset = rsrcInfo.origOffset; } return true; } // PSIR_FileWriter::GetImgRsrc
14,655
47,378
0
sha512_transform(u64 *state, const u8 *input) { u64 a, b, c, d, e, f, g, h, t1, t2; int i; u64 W[16]; /* load the state into our registers */ a=state[0]; b=state[1]; c=state[2]; d=state[3]; e=state[4]; f=state[5]; g=state[6]; h=state[7]; /* now iterate */ for (i=0; i<80; i+=8) { if (!(i & 8)) { int j; if (i < 16) { /* load the input */ for (j = 0; j < 16; j++) LOAD_OP(i + j, W, input); } else { for (j = 0; j < 16; j++) { BLEND_OP(i + j, W); } } } t1 = h + e1(e) + Ch(e,f,g) + sha512_K[i ] + W[(i & 15)]; t2 = e0(a) + Maj(a,b,c); d+=t1; h=t1+t2; t1 = g + e1(d) + Ch(d,e,f) + sha512_K[i+1] + W[(i & 15) + 1]; t2 = e0(h) + Maj(h,a,b); c+=t1; g=t1+t2; t1 = f + e1(c) + Ch(c,d,e) + sha512_K[i+2] + W[(i & 15) + 2]; t2 = e0(g) + Maj(g,h,a); b+=t1; f=t1+t2; t1 = e + e1(b) + Ch(b,c,d) + sha512_K[i+3] + W[(i & 15) + 3]; t2 = e0(f) + Maj(f,g,h); a+=t1; e=t1+t2; t1 = d + e1(a) + Ch(a,b,c) + sha512_K[i+4] + W[(i & 15) + 4]; t2 = e0(e) + Maj(e,f,g); h+=t1; d=t1+t2; t1 = c + e1(h) + Ch(h,a,b) + sha512_K[i+5] + W[(i & 15) + 5]; t2 = e0(d) + Maj(d,e,f); g+=t1; c=t1+t2; t1 = b + e1(g) + Ch(g,h,a) + sha512_K[i+6] + W[(i & 15) + 6]; t2 = e0(c) + Maj(c,d,e); f+=t1; b=t1+t2; t1 = a + e1(f) + Ch(f,g,h) + sha512_K[i+7] + W[(i & 15) + 7]; t2 = e0(b) + Maj(b,c,d); e+=t1; a=t1+t2; } state[0] += a; state[1] += b; state[2] += c; state[3] += d; state[4] += e; state[5] += f; state[6] += g; state[7] += h; /* erase our data */ a = b = c = d = e = f = g = h = t1 = t2 = 0; }
14,656
74,187
0
free_config_phone( config_tree *ptree ) { FREE_STRING_FIFO(ptree->phone); }
14,657
60,433
0
static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt, struct net_device *orig_dev) { struct sock *sk; struct packet_sock *po; struct sockaddr_ll *sll; union tpacket_uhdr h; u8 *skb_head = skb->data; int skb_len = skb->len; unsigned int snaplen, res; unsigned long status = TP_STATUS_USER; unsigned short macoff, netoff, hdrlen; struct sk_buff *copy_skb = NULL; struct timespec ts; __u32 ts_status; bool is_drop_n_account = false; bool do_vnet = false; /* struct tpacket{2,3}_hdr is aligned to a multiple of TPACKET_ALIGNMENT. * We may add members to them until current aligned size without forcing * userspace to call getsockopt(..., PACKET_HDRLEN, ...). */ BUILD_BUG_ON(TPACKET_ALIGN(sizeof(*h.h2)) != 32); BUILD_BUG_ON(TPACKET_ALIGN(sizeof(*h.h3)) != 48); if (skb->pkt_type == PACKET_LOOPBACK) goto drop; sk = pt->af_packet_priv; po = pkt_sk(sk); if (!net_eq(dev_net(dev), sock_net(sk))) goto drop; if (dev->header_ops) { if (sk->sk_type != SOCK_DGRAM) skb_push(skb, skb->data - skb_mac_header(skb)); else if (skb->pkt_type == PACKET_OUTGOING) { /* Special case: outgoing packets have ll header at head */ skb_pull(skb, skb_network_offset(skb)); } } snaplen = skb->len; res = run_filter(skb, sk, snaplen); if (!res) goto drop_n_restore; if (skb->ip_summed == CHECKSUM_PARTIAL) status |= TP_STATUS_CSUMNOTREADY; else if (skb->pkt_type != PACKET_OUTGOING && (skb->ip_summed == CHECKSUM_COMPLETE || skb_csum_unnecessary(skb))) status |= TP_STATUS_CSUM_VALID; if (snaplen > res) snaplen = res; if (sk->sk_type == SOCK_DGRAM) { macoff = netoff = TPACKET_ALIGN(po->tp_hdrlen) + 16 + po->tp_reserve; } else { unsigned int maclen = skb_network_offset(skb); netoff = TPACKET_ALIGN(po->tp_hdrlen + (maclen < 16 ? 16 : maclen)) + po->tp_reserve; if (po->has_vnet_hdr) { netoff += sizeof(struct virtio_net_hdr); do_vnet = true; } macoff = netoff - maclen; } if (po->tp_version <= TPACKET_V2) { if (macoff + snaplen > po->rx_ring.frame_size) { if (po->copy_thresh && atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf) { if (skb_shared(skb)) { copy_skb = skb_clone(skb, GFP_ATOMIC); } else { copy_skb = skb_get(skb); skb_head = skb->data; } if (copy_skb) skb_set_owner_r(copy_skb, sk); } snaplen = po->rx_ring.frame_size - macoff; if ((int)snaplen < 0) { snaplen = 0; do_vnet = false; } } } else if (unlikely(macoff + snaplen > GET_PBDQC_FROM_RB(&po->rx_ring)->max_frame_len)) { u32 nval; nval = GET_PBDQC_FROM_RB(&po->rx_ring)->max_frame_len - macoff; pr_err_once("tpacket_rcv: packet too big, clamped from %u to %u. macoff=%u\n", snaplen, nval, macoff); snaplen = nval; if (unlikely((int)snaplen < 0)) { snaplen = 0; macoff = GET_PBDQC_FROM_RB(&po->rx_ring)->max_frame_len; do_vnet = false; } } spin_lock(&sk->sk_receive_queue.lock); h.raw = packet_current_rx_frame(po, skb, TP_STATUS_KERNEL, (macoff+snaplen)); if (!h.raw) goto drop_n_account; if (po->tp_version <= TPACKET_V2) { packet_increment_rx_head(po, &po->rx_ring); /* * LOSING will be reported till you read the stats, * because it's COR - Clear On Read. * Anyways, moving it for V1/V2 only as V3 doesn't need this * at packet level. */ if (po->stats.stats1.tp_drops) status |= TP_STATUS_LOSING; } po->stats.stats1.tp_packets++; if (copy_skb) { status |= TP_STATUS_COPY; __skb_queue_tail(&sk->sk_receive_queue, copy_skb); } spin_unlock(&sk->sk_receive_queue.lock); if (do_vnet) { if (virtio_net_hdr_from_skb(skb, h.raw + macoff - sizeof(struct virtio_net_hdr), vio_le(), true)) { spin_lock(&sk->sk_receive_queue.lock); goto drop_n_account; } } skb_copy_bits(skb, 0, h.raw + macoff, snaplen); if (!(ts_status = tpacket_get_timestamp(skb, &ts, po->tp_tstamp))) getnstimeofday(&ts); status |= ts_status; switch (po->tp_version) { case TPACKET_V1: h.h1->tp_len = skb->len; h.h1->tp_snaplen = snaplen; h.h1->tp_mac = macoff; h.h1->tp_net = netoff; h.h1->tp_sec = ts.tv_sec; h.h1->tp_usec = ts.tv_nsec / NSEC_PER_USEC; hdrlen = sizeof(*h.h1); break; case TPACKET_V2: h.h2->tp_len = skb->len; h.h2->tp_snaplen = snaplen; h.h2->tp_mac = macoff; h.h2->tp_net = netoff; h.h2->tp_sec = ts.tv_sec; h.h2->tp_nsec = ts.tv_nsec; if (skb_vlan_tag_present(skb)) { h.h2->tp_vlan_tci = skb_vlan_tag_get(skb); h.h2->tp_vlan_tpid = ntohs(skb->vlan_proto); status |= TP_STATUS_VLAN_VALID | TP_STATUS_VLAN_TPID_VALID; } else { h.h2->tp_vlan_tci = 0; h.h2->tp_vlan_tpid = 0; } memset(h.h2->tp_padding, 0, sizeof(h.h2->tp_padding)); hdrlen = sizeof(*h.h2); break; case TPACKET_V3: /* tp_nxt_offset,vlan are already populated above. * So DONT clear those fields here */ h.h3->tp_status |= status; h.h3->tp_len = skb->len; h.h3->tp_snaplen = snaplen; h.h3->tp_mac = macoff; h.h3->tp_net = netoff; h.h3->tp_sec = ts.tv_sec; h.h3->tp_nsec = ts.tv_nsec; memset(h.h3->tp_padding, 0, sizeof(h.h3->tp_padding)); hdrlen = sizeof(*h.h3); break; default: BUG(); } sll = h.raw + TPACKET_ALIGN(hdrlen); sll->sll_halen = dev_parse_header(skb, sll->sll_addr); sll->sll_family = AF_PACKET; sll->sll_hatype = dev->type; sll->sll_protocol = skb->protocol; sll->sll_pkttype = skb->pkt_type; if (unlikely(po->origdev)) sll->sll_ifindex = orig_dev->ifindex; else sll->sll_ifindex = dev->ifindex; smp_mb(); #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 1 if (po->tp_version <= TPACKET_V2) { u8 *start, *end; end = (u8 *) PAGE_ALIGN((unsigned long) h.raw + macoff + snaplen); for (start = h.raw; start < end; start += PAGE_SIZE) flush_dcache_page(pgv_to_page(start)); } smp_wmb(); #endif if (po->tp_version <= TPACKET_V2) { __packet_set_status(po, h.raw, status); sk->sk_data_ready(sk); } else { prb_clear_blk_fill_status(&po->rx_ring); } drop_n_restore: if (skb_head != skb->data && skb_shared(skb)) { skb->data = skb_head; skb->len = skb_len; } drop: if (!is_drop_n_account) consume_skb(skb); else kfree_skb(skb); return 0; drop_n_account: is_drop_n_account = true; po->stats.stats1.tp_drops++; spin_unlock(&sk->sk_receive_queue.lock); sk->sk_data_ready(sk); kfree_skb(copy_skb); goto drop_n_restore; }
14,658
33,120
0
static int verify_newpolicy_info(struct xfrm_userpolicy_info *p) { switch (p->share) { case XFRM_SHARE_ANY: case XFRM_SHARE_SESSION: case XFRM_SHARE_USER: case XFRM_SHARE_UNIQUE: break; default: return -EINVAL; } switch (p->action) { case XFRM_POLICY_ALLOW: case XFRM_POLICY_BLOCK: break; default: return -EINVAL; } switch (p->sel.family) { case AF_INET: break; case AF_INET6: #if IS_ENABLED(CONFIG_IPV6) break; #else return -EAFNOSUPPORT; #endif default: return -EINVAL; } return verify_policy_dir(p->dir); }
14,659
8,507
0
static void c_write_stderr(int trusted, const char *buf, int len) { int i; for (i = 0; i < len; i++) if (buf[i] != '\r' && (trusted || buf[i] == '\n' || (buf[i] & 0x60))) fputc(buf[i], stderr); }
14,660
39,636
0
static long futex_wait_restart(struct restart_block *restart) { u32 __user *uaddr = (u32 __user *)restart->futex.uaddr; int fshared = 0; ktime_t t, *tp = NULL; if (restart->futex.flags & FLAGS_HAS_TIMEOUT) { t.tv64 = restart->futex.time; tp = &t; } restart->fn = do_no_restart_syscall; if (restart->futex.flags & FLAGS_SHARED) fshared = 1; return (long)futex_wait(uaddr, fshared, restart->futex.val, tp, restart->futex.bitset, restart->futex.flags & FLAGS_CLOCKRT); }
14,661
50,286
0
static void print_version() { fprintf(stderr, PROGRAM_NAME " " PROGRAM_VERSION "\n"); }
14,662
85,867
0
void dm_accept_partial_bio(struct bio *bio, unsigned n_sectors) { struct dm_target_io *tio = container_of(bio, struct dm_target_io, clone); unsigned bi_size = bio->bi_iter.bi_size >> SECTOR_SHIFT; BUG_ON(bio->bi_opf & REQ_PREFLUSH); BUG_ON(bi_size > *tio->len_ptr); BUG_ON(n_sectors > bi_size); *tio->len_ptr -= bi_size - n_sectors; bio->bi_iter.bi_size = n_sectors << SECTOR_SHIFT; }
14,663
151,098
0
WebContents* DevToolsWindow::OpenURLFromTab( WebContents* source, const content::OpenURLParams& params) { DCHECK(source == main_web_contents_); if (!params.url.SchemeIs(content::kChromeDevToolsScheme)) { WebContents* inspected_web_contents = GetInspectedWebContents(); return inspected_web_contents ? inspected_web_contents->OpenURL(params) : NULL; } bindings_->Reload(); return main_web_contents_; }
14,664
170,223
0
content::RenderFrameHost* GetMostVisitedIframe(content::WebContents* tab) { for (content::RenderFrameHost* frame : tab->GetAllFrames()) { if (frame->GetFrameName() == "mv-single") return frame; } return nullptr; }
14,665
174,020
0
bool VideoTrack::VetEntry(const BlockEntry* pBlockEntry) const { return Track::VetEntry(pBlockEntry) && pBlockEntry->GetBlock()->IsKey(); }
14,666
92,580
0
static void hrtick_start_fair(struct rq *rq, struct task_struct *p) { struct sched_entity *se = &p->se; struct cfs_rq *cfs_rq = cfs_rq_of(se); SCHED_WARN_ON(task_rq(p) != rq); if (rq->cfs.h_nr_running > 1) { u64 slice = sched_slice(cfs_rq, se); u64 ran = se->sum_exec_runtime - se->prev_sum_exec_runtime; s64 delta = slice - ran; if (delta < 0) { if (rq->curr == p) resched_curr(rq); return; } hrtick_start(rq, delta); } }
14,667
112,877
0
FilePath GDataCache::GetCacheRootPath(Profile* profile) { FilePath cache_base_path; chrome::GetUserCacheDirectory(profile->GetPath(), &cache_base_path); FilePath cache_root_path = cache_base_path.Append(chrome::kGDataCacheDirname); return cache_root_path.Append(kGDataCacheVersionDir); }
14,668
72,601
0
unsigned ring_buffer_event_length(struct ring_buffer_event *event) { unsigned length; if (event->type_len == RINGBUF_TYPE_TIME_EXTEND) event = skip_time_extend(event); length = rb_event_length(event); if (event->type_len > RINGBUF_TYPE_DATA_TYPE_LEN_MAX) return length; length -= RB_EVNT_HDR_SIZE; if (length > RB_MAX_SMALL_DATA + sizeof(event->array[0])) length -= sizeof(event->array[0]); return length; }
14,669
113,341
0
void Initialize() { InitializeWithConfig(config_); }
14,670
40,742
0
static void unix_release_sock(struct sock *sk, int embrion) { struct unix_sock *u = unix_sk(sk); struct path path; struct sock *skpair; struct sk_buff *skb; int state; unix_remove_socket(sk); /* Clear state */ unix_state_lock(sk); sock_orphan(sk); sk->sk_shutdown = SHUTDOWN_MASK; path = u->path; u->path.dentry = NULL; u->path.mnt = NULL; state = sk->sk_state; sk->sk_state = TCP_CLOSE; unix_state_unlock(sk); wake_up_interruptible_all(&u->peer_wait); skpair = unix_peer(sk); if (skpair != NULL) { if (sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET) { unix_state_lock(skpair); /* No more writes */ skpair->sk_shutdown = SHUTDOWN_MASK; if (!skb_queue_empty(&sk->sk_receive_queue) || embrion) skpair->sk_err = ECONNRESET; unix_state_unlock(skpair); skpair->sk_state_change(skpair); sk_wake_async(skpair, SOCK_WAKE_WAITD, POLL_HUP); } sock_put(skpair); /* It may now die */ unix_peer(sk) = NULL; } /* Try to flush out this socket. Throw out buffers at least */ while ((skb = skb_dequeue(&sk->sk_receive_queue)) != NULL) { if (state == TCP_LISTEN) unix_release_sock(skb->sk, 1); /* passed fds are erased in the kfree_skb hook */ kfree_skb(skb); } if (path.dentry) path_put(&path); sock_put(sk); /* ---- Socket is dead now and most probably destroyed ---- */ /* * Fixme: BSD difference: In BSD all sockets connected to us get * ECONNRESET and we die on the spot. In Linux we behave * like files and pipes do and wait for the last * dereference. * * Can't we simply set sock->err? * * What the above comment does talk about? --ANK(980817) */ if (unix_tot_inflight) unix_gc(); /* Garbage collect fds */ }
14,671
42,570
0
static void sync_super(struct mddev *mddev, struct md_rdev *rdev) { if (mddev->sync_super) { mddev->sync_super(mddev, rdev); return; } BUG_ON(mddev->major_version >= ARRAY_SIZE(super_types)); super_types[mddev->major_version].sync_super(mddev, rdev); }
14,672
73,348
0
ASS_Renderer *ass_renderer_init(ASS_Library *library) { int error; FT_Library ft; ASS_Renderer *priv = 0; int vmajor, vminor, vpatch; error = FT_Init_FreeType(&ft); if (error) { ass_msg(library, MSGL_FATAL, "%s failed", "FT_Init_FreeType"); goto ass_init_exit; } FT_Library_Version(ft, &vmajor, &vminor, &vpatch); ass_msg(library, MSGL_V, "Raster: FreeType %d.%d.%d", vmajor, vminor, vpatch); priv = calloc(1, sizeof(ASS_Renderer)); if (!priv) { FT_Done_FreeType(ft); goto ass_init_exit; } priv->library = library; priv->ftlibrary = ft; #if (defined(__i386__) || defined(__x86_64__)) && CONFIG_ASM if (has_avx2()) priv->engine = &ass_bitmap_engine_avx2; else if (has_sse2()) priv->engine = &ass_bitmap_engine_sse2; else priv->engine = &ass_bitmap_engine_c; #else priv->engine = &ass_bitmap_engine_c; #endif #if CONFIG_RASTERIZER rasterizer_init(&priv->rasterizer, 16); #endif priv->cache.font_cache = ass_font_cache_create(); priv->cache.bitmap_cache = ass_bitmap_cache_create(); priv->cache.composite_cache = ass_composite_cache_create(); priv->cache.outline_cache = ass_outline_cache_create(); priv->cache.glyph_max = GLYPH_CACHE_MAX; priv->cache.bitmap_max_size = BITMAP_CACHE_MAX_SIZE; priv->cache.composite_max_size = COMPOSITE_CACHE_MAX_SIZE; priv->text_info.max_bitmaps = MAX_BITMAPS_INITIAL; priv->text_info.max_glyphs = MAX_GLYPHS_INITIAL; priv->text_info.max_lines = MAX_LINES_INITIAL; priv->text_info.n_bitmaps = 0; priv->text_info.combined_bitmaps = calloc(MAX_BITMAPS_INITIAL, sizeof(CombinedBitmapInfo)); priv->text_info.glyphs = calloc(MAX_GLYPHS_INITIAL, sizeof(GlyphInfo)); priv->text_info.lines = calloc(MAX_LINES_INITIAL, sizeof(LineInfo)); priv->settings.font_size_coeff = 1.; priv->settings.selective_style_overrides = ASS_OVERRIDE_BIT_SELECTIVE_FONT_SCALE; priv->shaper = ass_shaper_new(0); ass_shaper_info(library); #ifdef CONFIG_HARFBUZZ priv->settings.shaper = ASS_SHAPING_COMPLEX; #else priv->settings.shaper = ASS_SHAPING_SIMPLE; #endif ass_init_exit: if (priv) ass_msg(library, MSGL_V, "Initialized"); else ass_msg(library, MSGL_ERR, "Initialization failed"); return priv; }
14,673
165,631
0
void Location::setPathname(LocalDOMWindow* current_window, LocalDOMWindow* entered_window, const String& pathname, ExceptionState& exception_state) { KURL url = GetDocument()->Url(); url.SetPath(pathname); SetLocation(url.GetString(), current_window, entered_window, &exception_state); }
14,674
17,905
0
void vnc_job_push(VncJob *job) { vnc_lock_queue(queue); if (queue->exit || QLIST_EMPTY(&job->rectangles)) { g_free(job); } else { QTAILQ_INSERT_TAIL(&queue->jobs, job, next); qemu_cond_broadcast(&queue->cond); } vnc_unlock_queue(queue); }
14,675
54,749
0
static void free_urb_and_buffer(struct snd_usb_midi *umidi, struct urb *urb, unsigned int buffer_length) { usb_free_coherent(umidi->dev, buffer_length, urb->transfer_buffer, urb->transfer_dma); usb_free_urb(urb); }
14,676
109,181
0
void RenderViewImpl::zoomLevelChanged() { bool remember = !webview()->mainFrame()->document().isPluginDocument(); float zoom_level = webview()->zoomLevel(); FOR_EACH_OBSERVER(RenderViewObserver, observers_, ZoomLevelChanged()); Send(new ViewHostMsg_DidZoomURL( routing_id_, zoom_level, remember, GURL(webview()->mainFrame()->document().url()))); }
14,677
78,196
0
authentic_manage_sdo_generate(struct sc_card *card, struct sc_authentic_sdo *sdo) { struct sc_context *ctx = card->ctx; struct sc_apdu apdu; unsigned char rbuf[0x400]; unsigned char *data = NULL; size_t data_len = 0; int rv; LOG_FUNC_CALLED(ctx); sc_log(ctx, "Generate SDO(mech:%X,id:%X)", sdo->docp.mech, sdo->docp.id); rv = authentic_manage_sdo_encode(card, sdo, SC_CARDCTL_AUTHENTIC_SDO_GENERATE, &data, &data_len); LOG_TEST_RET(ctx, rv, "Cannot encode SDO data"); sc_log(ctx, "encoded SDO length %"SC_FORMAT_LEN_SIZE_T"u", data_len); sc_format_apdu(card, &apdu, SC_APDU_CASE_4_SHORT, 0x47, 0x00, 0x00); apdu.data = data; apdu.datalen = data_len; apdu.lc = data_len; apdu.resp = rbuf; apdu.resplen = sizeof(rbuf); apdu.le = 0x100; rv = sc_transmit_apdu(card, &apdu); LOG_TEST_RET(ctx, rv, "APDU transmit failed"); rv = sc_check_sw(card, apdu.sw1, apdu.sw2); LOG_TEST_RET(ctx, rv, "authentic_sdo_create() SDO put data error"); rv = authentic_decode_pubkey_rsa(ctx, apdu.resp, apdu.resplen, &sdo->data.prvkey); SC_TEST_RET(card->ctx, SC_LOG_DEBUG_NORMAL, rv, "cannot decode public key"); free(data); LOG_FUNC_RETURN(ctx, rv); }
14,678
176,468
0
void vpx_img_flip(vpx_image_t *img) { /* Note: In the calculation pointer adjustment calculation, we want the * rhs to be promoted to a signed type. Section 6.3.1.8 of the ISO C99 * standard indicates that if the adjustment parameter is unsigned, the * stride parameter will be promoted to unsigned, causing errors when * the lhs is a larger type than the rhs. */ img->planes[VPX_PLANE_Y] += (signed)(img->d_h - 1) * img->stride[VPX_PLANE_Y]; img->stride[VPX_PLANE_Y] = -img->stride[VPX_PLANE_Y]; img->planes[VPX_PLANE_U] += (signed)((img->d_h >> img->y_chroma_shift) - 1) * img->stride[VPX_PLANE_U]; img->stride[VPX_PLANE_U] = -img->stride[VPX_PLANE_U]; img->planes[VPX_PLANE_V] += (signed)((img->d_h >> img->y_chroma_shift) - 1) * img->stride[VPX_PLANE_V]; img->stride[VPX_PLANE_V] = -img->stride[VPX_PLANE_V]; img->planes[VPX_PLANE_ALPHA] += (signed)(img->d_h - 1) * img->stride[VPX_PLANE_ALPHA]; img->stride[VPX_PLANE_ALPHA] = -img->stride[VPX_PLANE_ALPHA]; }
14,679
60,579
0
static int deliver_to_subscribers(struct snd_seq_client *client, struct snd_seq_event *event, int atomic, int hop) { struct snd_seq_subscribers *subs; int err, result = 0, num_ev = 0; struct snd_seq_event event_saved; struct snd_seq_client_port *src_port; struct snd_seq_port_subs_info *grp; src_port = snd_seq_port_use_ptr(client, event->source.port); if (src_port == NULL) return -EINVAL; /* invalid source port */ /* save original event record */ event_saved = *event; grp = &src_port->c_src; /* lock list */ if (atomic) read_lock(&grp->list_lock); else down_read(&grp->list_mutex); list_for_each_entry(subs, &grp->list_head, src_list) { /* both ports ready? */ if (atomic_read(&subs->ref_count) != 2) continue; event->dest = subs->info.dest; if (subs->info.flags & SNDRV_SEQ_PORT_SUBS_TIMESTAMP) /* convert time according to flag with subscription */ update_timestamp_of_queue(event, subs->info.queue, subs->info.flags & SNDRV_SEQ_PORT_SUBS_TIME_REAL); err = snd_seq_deliver_single_event(client, event, 0, atomic, hop); if (err < 0) { /* save first error that occurs and continue */ if (!result) result = err; continue; } num_ev++; /* restore original event record */ *event = event_saved; } if (atomic) read_unlock(&grp->list_lock); else up_read(&grp->list_mutex); *event = event_saved; /* restore */ snd_seq_port_unlock(src_port); return (result < 0) ? result : num_ev; }
14,680
106,752
0
bool WebView::registerWebViewWindowClass() { static bool haveRegisteredWindowClass = false; if (haveRegisteredWindowClass) return true; haveRegisteredWindowClass = true; WNDCLASSEX wcex; wcex.cbSize = sizeof(WNDCLASSEX); wcex.style = CS_DBLCLKS; wcex.lpfnWndProc = WebView::WebViewWndProc; wcex.cbClsExtra = 0; wcex.cbWndExtra = sizeof(WebView*); wcex.hInstance = instanceHandle(); wcex.hIcon = 0; wcex.hCursor = ::LoadCursor(0, IDC_ARROW); wcex.hbrBackground = 0; wcex.lpszMenuName = 0; wcex.lpszClassName = kWebKit2WebViewWindowClassName; wcex.hIconSm = 0; return !!::RegisterClassEx(&wcex); }
14,681
46,348
0
int ramfs_nommu_expand_for_mapping(struct inode *inode, size_t newsize) { unsigned long npages, xpages, loop; struct page *pages; unsigned order; void *data; int ret; /* make various checks */ order = get_order(newsize); if (unlikely(order >= MAX_ORDER)) return -EFBIG; ret = inode_newsize_ok(inode, newsize); if (ret) return ret; i_size_write(inode, newsize); /* allocate enough contiguous pages to be able to satisfy the * request */ pages = alloc_pages(mapping_gfp_mask(inode->i_mapping), order); if (!pages) return -ENOMEM; /* split the high-order page into an array of single pages */ xpages = 1UL << order; npages = (newsize + PAGE_SIZE - 1) >> PAGE_SHIFT; split_page(pages, order); /* trim off any pages we don't actually require */ for (loop = npages; loop < xpages; loop++) __free_page(pages + loop); /* clear the memory we allocated */ newsize = PAGE_SIZE * npages; data = page_address(pages); memset(data, 0, newsize); /* attach all the pages to the inode's address space */ for (loop = 0; loop < npages; loop++) { struct page *page = pages + loop; ret = add_to_page_cache_lru(page, inode->i_mapping, loop, GFP_KERNEL); if (ret < 0) goto add_error; /* prevent the page from being discarded on memory pressure */ SetPageDirty(page); SetPageUptodate(page); unlock_page(page); put_page(page); } return 0; add_error: while (loop < npages) __free_page(pages + loop++); return ret; }
14,682
39,669
0
static inline int check_sticky(struct inode *dir, struct inode *inode) { uid_t fsuid = current_fsuid(); if (!(dir->i_mode & S_ISVTX)) return 0; if (inode->i_uid == fsuid) return 0; if (dir->i_uid == fsuid) return 0; return !capable(CAP_FOWNER); }
14,683
37,252
0
static int vmx_get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata) { u64 data; struct shared_msr_entry *msr; if (!pdata) { printk(KERN_ERR "BUG: get_msr called with NULL pdata\n"); return -EINVAL; } switch (msr_index) { #ifdef CONFIG_X86_64 case MSR_FS_BASE: data = vmcs_readl(GUEST_FS_BASE); break; case MSR_GS_BASE: data = vmcs_readl(GUEST_GS_BASE); break; case MSR_KERNEL_GS_BASE: vmx_load_host_state(to_vmx(vcpu)); data = to_vmx(vcpu)->msr_guest_kernel_gs_base; break; #endif case MSR_EFER: return kvm_get_msr_common(vcpu, msr_index, pdata); case MSR_IA32_TSC: data = guest_read_tsc(); break; case MSR_IA32_SYSENTER_CS: data = vmcs_read32(GUEST_SYSENTER_CS); break; case MSR_IA32_SYSENTER_EIP: data = vmcs_readl(GUEST_SYSENTER_EIP); break; case MSR_IA32_SYSENTER_ESP: data = vmcs_readl(GUEST_SYSENTER_ESP); break; case MSR_IA32_BNDCFGS: if (!vmx_mpx_supported()) return 1; data = vmcs_read64(GUEST_BNDCFGS); break; case MSR_IA32_FEATURE_CONTROL: if (!nested_vmx_allowed(vcpu)) return 1; data = to_vmx(vcpu)->nested.msr_ia32_feature_control; break; case MSR_IA32_VMX_BASIC ... MSR_IA32_VMX_VMFUNC: if (!nested_vmx_allowed(vcpu)) return 1; return vmx_get_vmx_msr(vcpu, msr_index, pdata); case MSR_TSC_AUX: if (!to_vmx(vcpu)->rdtscp_enabled) return 1; /* Otherwise falls through */ default: msr = find_msr_entry(to_vmx(vcpu), msr_index); if (msr) { data = msr->data; break; } return kvm_get_msr_common(vcpu, msr_index, pdata); } *pdata = data; return 0; }
14,684
61,331
0
static void ffprobe_show_program_version(WriterContext *w) { AVBPrint pbuf; av_bprint_init(&pbuf, 1, AV_BPRINT_SIZE_UNLIMITED); writer_print_section_header(w, SECTION_ID_PROGRAM_VERSION); print_str("version", FFMPEG_VERSION); print_fmt("copyright", "Copyright (c) %d-%d the FFmpeg developers", program_birth_year, CONFIG_THIS_YEAR); print_str("compiler_ident", CC_IDENT); print_str("configuration", FFMPEG_CONFIGURATION); writer_print_section_footer(w); av_bprint_finalize(&pbuf, NULL); }
14,685
164,863
0
void DownloadTestObserverNotInProgress::StartObserving() { started_observing_ = true; }
14,686
160,470
0
void RenderFrameHostImpl::OnRunJavaScriptDialog( const base::string16& message, const base::string16& default_prompt, const GURL& frame_url, JavaScriptDialogType dialog_type, IPC::Message* reply_msg) { if (dialog_type == JavaScriptDialogType::JAVASCRIPT_DIALOG_TYPE_ALERT) GetFrameResourceCoordinator()->OnAlertFired(); if (IsWaitingForUnloadACK()) { SendJavaScriptDialogReply(reply_msg, true, base::string16()); return; } int32_t message_length = static_cast<int32_t>(message.length()); if (GetParent()) { UMA_HISTOGRAM_COUNTS("JSDialogs.CharacterCount.Subframe", message_length); } else { UMA_HISTOGRAM_COUNTS("JSDialogs.CharacterCount.MainFrame", message_length); } GetProcess()->SetIgnoreInputEvents(true); delegate_->RunJavaScriptDialog(this, message, default_prompt, frame_url, dialog_type, reply_msg); }
14,687
37,349
0
sctp_disposition_t sctp_sf_eat_fwd_tsn(struct net *net, const struct sctp_endpoint *ep, const struct sctp_association *asoc, const sctp_subtype_t type, void *arg, sctp_cmd_seq_t *commands) { struct sctp_chunk *chunk = arg; struct sctp_fwdtsn_hdr *fwdtsn_hdr; struct sctp_fwdtsn_skip *skip; __u16 len; __u32 tsn; if (!sctp_vtag_verify(chunk, asoc)) { sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_BAD_TAG, SCTP_NULL()); return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands); } /* Make sure that the FORWARD_TSN chunk has valid length. */ if (!sctp_chunk_length_valid(chunk, sizeof(struct sctp_fwdtsn_chunk))) return sctp_sf_violation_chunklen(net, ep, asoc, type, arg, commands); fwdtsn_hdr = (struct sctp_fwdtsn_hdr *)chunk->skb->data; chunk->subh.fwdtsn_hdr = fwdtsn_hdr; len = ntohs(chunk->chunk_hdr->length); len -= sizeof(struct sctp_chunkhdr); skb_pull(chunk->skb, len); tsn = ntohl(fwdtsn_hdr->new_cum_tsn); pr_debug("%s: TSN 0x%x\n", __func__, tsn); /* The TSN is too high--silently discard the chunk and count on it * getting retransmitted later. */ if (sctp_tsnmap_check(&asoc->peer.tsn_map, tsn) < 0) goto discard_noforce; /* Silently discard the chunk if stream-id is not valid */ sctp_walk_fwdtsn(skip, chunk) { if (ntohs(skip->stream) >= asoc->c.sinit_max_instreams) goto discard_noforce; } sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_FWDTSN, SCTP_U32(tsn)); if (len > sizeof(struct sctp_fwdtsn_hdr)) sctp_add_cmd_sf(commands, SCTP_CMD_PROCESS_FWDTSN, SCTP_CHUNK(chunk)); /* Count this as receiving DATA. */ if (asoc->timeouts[SCTP_EVENT_TIMEOUT_AUTOCLOSE]) { sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_RESTART, SCTP_TO(SCTP_EVENT_TIMEOUT_AUTOCLOSE)); } /* FIXME: For now send a SACK, but DATA processing may * send another. */ sctp_add_cmd_sf(commands, SCTP_CMD_GEN_SACK, SCTP_NOFORCE()); return SCTP_DISPOSITION_CONSUME; discard_noforce: return SCTP_DISPOSITION_DISCARD; }
14,688
147,785
0
static void ReflectedIdAttributeSetter( v8::Local<v8::Value> v8_value, const v8::FunctionCallbackInfo<v8::Value>& info) { v8::Isolate* isolate = info.GetIsolate(); ALLOW_UNUSED_LOCAL(isolate); v8::Local<v8::Object> holder = info.Holder(); ALLOW_UNUSED_LOCAL(holder); TestObject* impl = V8TestObject::ToImpl(holder); V0CustomElementProcessingStack::CallbackDeliveryScope delivery_scope; V8StringResource<> cpp_value = v8_value; if (!cpp_value.Prepare()) return; impl->setAttribute(html_names::kIdAttr, cpp_value); }
14,689
51,037
0
void page_put_link(void *arg) { put_page(arg); }
14,690
152,179
0
static void WaitForDeletion( scoped_refptr<ServiceWorkerRegistration> registration, base::OnceClosure callback) { DCHECK(!registration->is_deleted()); registration->AddListener( new RegistrationDeletionListener(registration, std::move(callback))); }
14,691
169,063
0
void OfflinePageModelImpl::GetPagesSupportedByDownloads( const MultipleOfflinePageItemCallback& callback) { OfflinePageModelQueryBuilder builder; builder.RequireSupportedByDownload( OfflinePageModelQuery::Requirement::INCLUDE_MATCHING); RunWhenLoaded( base::Bind(&OfflinePageModelImpl::GetPagesMatchingQueryWhenLoadDone, weak_ptr_factory_.GetWeakPtr(), base::Passed(builder.Build(GetPolicyController())), callback)); }
14,692
15,623
0
static bool is_version_0 (void *opaque, int version_id) { return version_id == 0; }
14,693
126,844
0
void BrowserView::RotatePaneFocus(bool forwards) { std::vector<views::AccessiblePaneView*> accessible_panes; GetAccessiblePanes(&accessible_panes); int pane_count = static_cast<int>(accessible_panes.size()); int special_index = -1; std::vector<views::View*> accessible_views( accessible_panes.begin(), accessible_panes.end()); accessible_views.push_back(GetTabContentsContainerView()); if (devtools_container_->visible()) accessible_views.push_back(devtools_container_); int count = static_cast<int>(accessible_views.size()); const views::View* focused_view = GetFocusManager()->GetFocusedView(); int index = -1; if (focused_view) { for (int i = 0; i < count; ++i) { if (accessible_views[i] == focused_view || accessible_views[i]->Contains(focused_view)) { index = i; break; } } } if (focused_view && index >= pane_count) GetFocusManager()->StoreFocusedView(); #if defined(OS_CHROMEOS) && defined(USE_AURA) special_index = count; ++count; #endif for (;;) { if (forwards) index = (index + 1) % count; else index = ((index - 1) + count) % count; if (index == special_index) { #if defined(USE_ASH) ash::Shell::GetInstance()->RotateFocus( forwards ? ash::Shell::FORWARD : ash::Shell::BACKWARD); #endif break; } else if (index < pane_count) { if (accessible_panes[index]->SetPaneFocusAndFocusDefault()) break; } else { accessible_views[index]->RequestFocus(); break; } } }
14,694
112,597
0
const SecurityOrigin* Document::topOrigin() const { return topDocument()->securityOrigin(); }
14,695
104,260
0
PassRefPtr<RTCVoidRequestImpl> RTCVoidRequestImpl::create(ScriptExecutionContext* context, PassRefPtr<VoidCallback> successCallback, PassRefPtr<RTCErrorCallback> errorCallback) { RefPtr<RTCVoidRequestImpl> request = adoptRef(new RTCVoidRequestImpl(context, successCallback, errorCallback)); request->suspendIfNeeded(); return request.release(); }
14,696
57,779
0
static inline bool kvm_vcpu_running(struct kvm_vcpu *vcpu) { return (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE && !vcpu->arch.apf.halted); }
14,697
176,037
0
void bta_av_rc_vendor_cmd(tBTA_AV_CB* p_cb, tBTA_AV_DATA* p_data) { tBTA_AV_RCB* p_rcb; if ((p_cb->features & (BTA_AV_FEAT_RCCT | BTA_AV_FEAT_VENDOR)) == (BTA_AV_FEAT_RCCT | BTA_AV_FEAT_VENDOR)) { if (p_data->hdr.layer_specific < BTA_AV_NUM_RCB) { p_rcb = &p_cb->rcb[p_data->hdr.layer_specific]; AVRC_VendorCmd(p_rcb->handle, p_data->api_vendor.label, &p_data->api_vendor.msg); } } }
14,698
124,035
0
ProfileKeyedAPIFactory<BookmarksAPI>* BookmarksAPI::GetFactoryInstance() { return &g_factory.Get(); }
14,699