idx
int64 | func
string | target
int64 |
|---|---|---|
413,347
|
static HashTable *php_snmp_get_properties(zval *object TSRMLS_DC)
{
php_snmp_object *obj;
php_snmp_prop_handler *hnd;
HashTable *props;
zval *val;
char *key;
uint key_len;
HashPosition pos;
ulong num_key;
obj = (php_snmp_object *)zend_objects_get_address(object TSRMLS_CC);
props = zend_std_get_properties(object TSRMLS_CC);
zend_hash_internal_pointer_reset_ex(&php_snmp_properties, &pos);
while (zend_hash_get_current_data_ex(&php_snmp_properties, (void**)&hnd, &pos) == SUCCESS) {
zend_hash_get_current_key_ex(&php_snmp_properties, &key, &key_len, &num_key, 0, &pos);
if (!hnd->read_func || hnd->read_func(obj, &val TSRMLS_CC) != SUCCESS) {
val = EG(uninitialized_zval_ptr);
Z_ADDREF_P(val);
}
zend_hash_update(props, key, key_len, (void *)&val, sizeof(zval *), NULL);
zend_hash_move_forward_ex(&php_snmp_properties, &pos);
}
return obj->zo.properties;
}
| 0
|
387,738
|
JvmtiCachedClassFileData* InstanceKlass::get_archived_class_data() {
if (DumpSharedSpaces) {
return _cached_class_file;
} else {
assert(this->is_shared(), "class should be shared");
if (MetaspaceShared::is_in_shared_metaspace(_cached_class_file)) {
return _cached_class_file;
} else {
return NULL;
}
}
}
| 0
|
359,579
|
peer_ebgp_multihop_unset_vty (struct vty *vty, const char *ip_str)
{
struct peer *peer;
peer = peer_and_group_lookup_vty (vty, ip_str);
if (! peer)
return CMD_WARNING;
peer_ebgp_multihop_unset (peer);
return CMD_SUCCESS;
}
| 0
|
387,813
|
static bool method_matches(const Method* m,
const Symbol* signature,
bool skipping_overpass,
bool skipping_static,
bool skipping_private) {
return ((m->signature() == signature) &&
(!skipping_overpass || !m->is_overpass()) &&
(!skipping_static || !m->is_static()) &&
(!skipping_private || !m->is_private()));
}
| 0
|
508,331
|
static bool auto_repair_table(THD *thd, TABLE_LIST *table_list)
{
TABLE_SHARE *share;
TABLE *entry;
bool result= TRUE;
thd->clear_error();
if (!(entry= (TABLE*)my_malloc(sizeof(TABLE), MYF(MY_WME))))
return result;
if (!(share= tdc_acquire_share(thd, table_list, GTS_TABLE)))
goto end_free;
DBUG_ASSERT(! share->is_view);
if (open_table_from_share(thd, share, table_list->alias,
HA_OPEN_KEYFILE | HA_TRY_READ_ONLY,
EXTRA_RECORD,
ha_open_options | HA_OPEN_FOR_REPAIR,
entry, FALSE) || ! entry->file ||
(entry->file->is_crashed() && entry->file->ha_check_and_repair(thd)))
{
/* Give right error message */
thd->clear_error();
my_error(ER_NOT_KEYFILE, MYF(0), share->table_name.str);
sql_print_error("Couldn't repair table: %s.%s", share->db.str,
share->table_name.str);
if (entry->file)
closefrm(entry);
}
else
{
thd->clear_error(); // Clear error message
closefrm(entry);
result= FALSE;
}
tdc_release_share(share);
/* Remove the repaired share from the table cache. */
tdc_remove_table(thd, TDC_RT_REMOVE_ALL,
table_list->db, table_list->table_name,
FALSE);
end_free:
my_free(entry);
return result;
}
| 0
|
463,189
|
static int annotate_canon_value(struct buf *value, int type)
{
char *p = NULL;
unsigned long uwhatever = 0;
long whatever = 0;
/* check for NIL */
if (value->s == NULL)
return 0;
switch (type) {
case ATTRIB_TYPE_STRING:
/* free form */
break;
case ATTRIB_TYPE_BOOLEAN:
/* make sure its "true" or "false" */
if (value->len == 4 && !strncasecmp(value->s, "true", 4)) {
buf_reset(value);
buf_appendcstr(value, "true");
buf_cstring(value);
}
else if (value->len == 5 && !strncasecmp(value->s, "false", 5)) {
buf_reset(value);
buf_appendcstr(value, "false");
buf_cstring(value);
}
else return IMAP_ANNOTATION_BADVALUE;
break;
case ATTRIB_TYPE_UINT:
/* make sure its a valid ulong ( >= 0 ) */
errno = 0;
buf_cstring(value);
uwhatever = strtoul(value->s, &p, 10);
if ((p == value->s) /* no value */
|| (*p != '\0') /* illegal char */
|| (unsigned)(p - value->s) != value->len
/* embedded NUL */
|| errno /* overflow */
|| strchr(value->s, '-')) { /* negative number */
return IMAP_ANNOTATION_BADVALUE;
}
break;
case ATTRIB_TYPE_INT:
/* make sure its a valid long */
errno = 0;
buf_cstring(value);
whatever = strtol(value->s, &p, 10);
if ((p == value->s) /* no value */
|| (*p != '\0') /* illegal char */
|| (unsigned)(p - value->s) != value->len
/* embedded NUL */
|| errno) { /* underflow/overflow */
return IMAP_ANNOTATION_BADVALUE;
}
break;
default:
/* unknown type */
return IMAP_ANNOTATION_BADVALUE;
}
if (whatever || uwhatever) /* filthy compiler magic */
return 0;
return 0;
}
| 0
|
274,718
|
static const char *screen_units_str(void)
{
/* NOTE: in order of gerbv_gui_unit_t */
const char *units_str[] = {N_("mil"), N_("mm"), N_("in")};
return _(units_str[screen.unit]);
}
| 0
|
198,170
|
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
auto* params = reinterpret_cast<TfLiteSVDFParams*>(node->builtin_data);
OpData* op_data = reinterpret_cast<OpData*>(node->user_data);
const TfLiteTensor* input;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputTensor, &input));
const TfLiteTensor* weights_feature;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kWeightsFeatureTensor,
&weights_feature));
const TfLiteTensor* weights_time;
TF_LITE_ENSURE_OK(
context, GetInputSafe(context, node, kWeightsTimeTensor, &weights_time));
const TfLiteTensor* bias = GetOptionalInputTensor(context, node, kBiasTensor);
TfLiteTensor* scratch;
TF_LITE_ENSURE_OK(context,
GetTemporarySafe(context, node, /*index=*/0, &scratch));
TfLiteTensor* state = GetVariableInput(context, node, kStateTensor);
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kOutputTensor, &output));
switch (weights_feature->type) {
case kTfLiteFloat32: {
reference_ops::EvalFloatSVDF(
params, GetTensorShape(input), GetTensorData<float>(input),
GetTensorShape(weights_feature),
GetTensorData<float>(weights_feature), GetTensorShape(weights_time),
GetTensorData<float>(weights_time), GetTensorShape(bias),
GetTensorData<float>(bias), GetTensorData<float>(scratch),
GetTensorData<float>(state), GetTensorShape(output),
GetTensorData<float>(output));
return kTfLiteOk;
}
case kTfLiteUInt8:
case kTfLiteInt8: {
if (input->type == kTfLiteFloat32) {
TfLiteTensor* input_quantized;
TF_LITE_ENSURE_OK(context, GetTemporarySafe(context, node, /*index=*/1,
&input_quantized));
TfLiteTensor* scaling_factors;
TF_LITE_ENSURE_OK(context, GetTemporarySafe(context, node, /*index=*/2,
&scaling_factors));
TfLiteTensor* float_weights_time;
TF_LITE_ENSURE_OK(context, GetTemporarySafe(context, node, /*index=*/3,
&float_weights_time));
TfLiteTensor* zero_points;
TF_LITE_ENSURE_OK(context, GetTemporarySafe(context, node, /*index=*/4,
&zero_points));
TfLiteTensor* row_sums;
TF_LITE_ENSURE_OK(
context, GetTemporarySafe(context, node, /*index=*/5, &row_sums));
// Dequantize weights time.
// TODO(alanchiao): this dequantization initialization only needs to
// happen once per model and should theoretically be placed in either
// Init or Prepare. However, TFLite doesn't allocate float_weights_time
// until the Eval function.
// TODO(alanchiao): refactor logic out into dequantize function.
if (!op_data->float_weights_time_initialized) {
const float dequantization_scale = weights_time->params.scale;
const int8_t* weights_time_ptr = GetTensorData<int8_t>(weights_time);
float* float_weights_time_ptr =
GetTensorData<float>(float_weights_time);
for (int i = 0; i < NumElements(float_weights_time); ++i) {
float_weights_time_ptr[i] =
weights_time_ptr[i] * dequantization_scale;
}
op_data->float_weights_time_initialized = true;
}
int32_t* zero_points_ptr = nullptr;
int32_t* row_sums_ptr = nullptr;
if (params->asymmetric_quantize_inputs && row_sums != nullptr) {
zero_points_ptr = GetTensorData<int32_t>(zero_points);
row_sums_ptr = GetTensorData<int32_t>(row_sums);
}
reference_ops::EvalHybridSVDF(
params, GetTensorShape(input), GetTensorData<float>(input),
GetTensorShape(weights_feature),
GetTensorData<int8_t>(weights_feature),
weights_feature->params.scale, GetTensorShape(float_weights_time),
GetTensorData<float>(float_weights_time), GetTensorShape(bias),
GetTensorData<float>(bias), GetTensorData<float>(scratch),
GetTensorData<float>(scaling_factors),
GetTensorData<int8_t>(input_quantized), GetTensorData<float>(state),
GetTensorShape(output), GetTensorData<float>(output),
zero_points_ptr, row_sums_ptr, &op_data->compute_row_sums);
return kTfLiteOk;
}
auto* input_params = reinterpret_cast<TfLiteAffineQuantization*>(
input->quantization.params);
auto* output_params = reinterpret_cast<TfLiteAffineQuantization*>(
output->quantization.params);
TfLiteTensor* output_temp;
TF_LITE_ENSURE_OK(
context, GetTemporarySafe(context, node, /*index=*/1, &output_temp));
// Currently supports only ReLU.
// TODO(jianlijianli): support other activations.
TF_LITE_ENSURE_EQ(context, params->activation, kTfLiteActRelu);
reference_ops::EvalIntegerSVDF(
params, GetTensorShape(input), GetTensorData<int8_t>(input),
GetTensorShape(weights_feature),
GetTensorData<int8_t>(weights_feature), GetTensorShape(weights_time),
GetTensorData<int16_t>(weights_time), GetTensorShape(bias),
GetTensorData<int32_t>(bias), GetTensorData<int16_t>(state),
GetTensorShape(output), GetTensorData<int8_t>(output),
GetTensorData<int32_t>(scratch), GetTensorData<int32_t>(output_temp),
op_data->effective_scale_1_a, op_data->effective_scale_1_b,
op_data->effective_scale_2_a, op_data->effective_scale_2_b,
input_params->zero_point->data[0],
output_params->zero_point->data[0]);
return kTfLiteOk;
}
default:
context->ReportError(context, "Type %s not currently supported.",
TfLiteTypeGetName(weights_feature->type));
return kTfLiteError;
}
}
| 1
|
481,788
|
static int qh_input(int sd, int events, void *ioc_)
{
iocache * ioc = (iocache *) ioc_;
int result = 0;
/*
input on main socket, so accept one
this is when a worker initially connects
we create the iocache and then register
that to a new socket descriptor and this function
so that ioc_ != NULL next time
*/
if (sd == qh_listen_sock) {
struct sockaddr sa;
socklen_t slen = 0;
int nsd = 0;
/* shut valgrind up */
memset(&sa, 0, sizeof(sa));
nsd = accept(sd, &sa, &slen);
if (qh_max_running && qh_running >= qh_max_running) {
nsock_printf(nsd, "503: Server full");
close(nsd);
return 0;
}
ioc = iocache_create(16384);
if (ioc == NULL) {
logit(NSLOG_RUNTIME_ERROR, TRUE, "qh: Failed to create iocache for inbound request\n");
nsock_printf(nsd, "500: Internal server error");
close(nsd);
return 0;
}
/*
* @todo: Stash the iocache and the socket in some
* addressable list so we can release them on deinit
*/
result = iobroker_register(nagios_iobs, nsd, ioc, qh_input);
if (result < 0) {
logit(NSLOG_RUNTIME_ERROR, TRUE, "qh: Failed to register input socket %d with I/O broker: %s\n", nsd, strerror(errno));
iocache_destroy(ioc);
close(nsd);
return 0;
}
/* make it non-blocking, but leave kernel buffers unchanged */
worker_set_sockopts(nsd, 0);
qh_running++;
return 0;
}
/*
this is when an existing connection
sends more data after they've already made
the connection
*/
else {
unsigned long len = 0;
unsigned int query_len = 0;
struct query_handler * qh = NULL;
char * buf = NULL;
char * space = NULL;
char * handler = NULL;
char * query = NULL;
result = iocache_read(ioc, sd);
/* disconnect? */
if (result == 0 || (result < 0 && errno == EPIPE)) {
iocache_destroy(ioc);
iobroker_close(nagios_iobs, sd);
qh_running--;
return 0;
}
/*
* A request looks like this: '[@|#]<qh>[<SP>][<query>]\0'.
* That is, optional '#' (oneshot) or '@' (keepalive),
* followed by the name of a registered handler, followed by
* an optional space and an optional query. If the handler
* has no "default" handler, a query is required or an error
* will be thrown.
*/
/* Use data up to the first nul byte */
buf = iocache_use_delim(ioc, "\0", 1, &len);
if (buf == NULL) {
return 0;
}
/* Identify handler part and any magic query bytes */
if (*buf == '@' || *buf == '#') {
handler = buf + 1;
}
/* Locate query (if any) */
space = strchr(buf, ' ');
if (space != NULL) {
*space = 0;
query = space + 1;
query_len = len - (unsigned long)(query - buf);
}
/* locate the handler */
qh = qh_find_handler(handler);
/* not found. that's a 404 */
if (qh == NULL) {
nsock_printf(sd, "404: %s: No such handler", handler);
iobroker_close(nagios_iobs, sd);
iocache_destroy(ioc);
return 0;
}
/* strip trailing newlines */
while (query_len > 0
&& (query[query_len - 1] == 0 || query[query_len - 1] == '\n')) {
query[--query_len] = 0;
}
/* now pass the query to the handler */
result = qh->handler(sd, query, query_len);
if (result >= 100) {
nsock_printf_nul(sd, "%d: %s", result, qh_strerror(result));
}
/* error code or one-shot query */
if (result >= 300 || *buf == '#') {
iobroker_close(nagios_iobs, sd);
iocache_destroy(ioc);
return 0;
}
/* check for magic handler codes */
switch (result) {
/* oneshot handler */
case QH_CLOSE:
/* general error */
case -1:
iobroker_close(nagios_iobs, sd);
/* fallthrough */
/* handler takes over */
case QH_TAKEOVER:
/* switch protocol (takeover + message) */
case 101:
iocache_destroy(ioc);
break;
}
}
return 0;
}
| 0
|
509,491
|
int ha_maria::index_end()
{
active_index=MAX_KEY;
ma_set_index_cond_func(file, NULL, 0);
in_range_check_pushed_down= FALSE;
ds_mrr.dsmrr_close();
return 0;
}
| 0
|
409,489
|
termgui_mch_get_rgb(guicolor_T color)
{
return color;
}
| 0
|
238,604
|
static int check_pseudo_btf_id(struct bpf_verifier_env *env,
struct bpf_insn *insn,
struct bpf_insn_aux_data *aux)
{
const struct btf_var_secinfo *vsi;
const struct btf_type *datasec;
struct btf_mod_pair *btf_mod;
const struct btf_type *t;
const char *sym_name;
bool percpu = false;
u32 type, id = insn->imm;
struct btf *btf;
s32 datasec_id;
u64 addr;
int i, btf_fd, err;
btf_fd = insn[1].imm;
if (btf_fd) {
btf = btf_get_by_fd(btf_fd);
if (IS_ERR(btf)) {
verbose(env, "invalid module BTF object FD specified.\n");
return -EINVAL;
}
} else {
if (!btf_vmlinux) {
verbose(env, "kernel is missing BTF, make sure CONFIG_DEBUG_INFO_BTF=y is specified in Kconfig.\n");
return -EINVAL;
}
btf = btf_vmlinux;
btf_get(btf);
}
t = btf_type_by_id(btf, id);
if (!t) {
verbose(env, "ldimm64 insn specifies invalid btf_id %d.\n", id);
err = -ENOENT;
goto err_put;
}
if (!btf_type_is_var(t)) {
verbose(env, "pseudo btf_id %d in ldimm64 isn't KIND_VAR.\n", id);
err = -EINVAL;
goto err_put;
}
sym_name = btf_name_by_offset(btf, t->name_off);
addr = kallsyms_lookup_name(sym_name);
if (!addr) {
verbose(env, "ldimm64 failed to find the address for kernel symbol '%s'.\n",
sym_name);
err = -ENOENT;
goto err_put;
}
datasec_id = find_btf_percpu_datasec(btf);
if (datasec_id > 0) {
datasec = btf_type_by_id(btf, datasec_id);
for_each_vsi(i, datasec, vsi) {
if (vsi->type == id) {
percpu = true;
break;
}
}
}
insn[0].imm = (u32)addr;
insn[1].imm = addr >> 32;
type = t->type;
t = btf_type_skip_modifiers(btf, type, NULL);
if (percpu) {
aux->btf_var.reg_type = PTR_TO_PERCPU_BTF_ID;
aux->btf_var.btf = btf;
aux->btf_var.btf_id = type;
} else if (!btf_type_is_struct(t)) {
const struct btf_type *ret;
const char *tname;
u32 tsize;
/* resolve the type size of ksym. */
ret = btf_resolve_size(btf, t, &tsize);
if (IS_ERR(ret)) {
tname = btf_name_by_offset(btf, t->name_off);
verbose(env, "ldimm64 unable to resolve the size of type '%s': %ld\n",
tname, PTR_ERR(ret));
err = -EINVAL;
goto err_put;
}
aux->btf_var.reg_type = PTR_TO_MEM | MEM_RDONLY;
aux->btf_var.mem_size = tsize;
} else {
aux->btf_var.reg_type = PTR_TO_BTF_ID;
aux->btf_var.btf = btf;
aux->btf_var.btf_id = type;
}
/* check whether we recorded this BTF (and maybe module) already */
for (i = 0; i < env->used_btf_cnt; i++) {
if (env->used_btfs[i].btf == btf) {
btf_put(btf);
return 0;
}
}
if (env->used_btf_cnt >= MAX_USED_BTFS) {
err = -E2BIG;
goto err_put;
}
btf_mod = &env->used_btfs[env->used_btf_cnt];
btf_mod->btf = btf;
btf_mod->module = NULL;
/* if we reference variables from kernel module, bump its refcount */
if (btf_is_module(btf)) {
btf_mod->module = btf_try_get_module(btf);
if (!btf_mod->module) {
err = -ENXIO;
goto err_put;
}
}
env->used_btf_cnt++;
return 0;
err_put:
btf_put(btf);
return err;
}
| 0
|
247,591
|
TEST_P(SslSocketTest, FailedClientCertificateExpirationVerification) {
envoy::config::listener::v3::Listener listener;
envoy::extensions::transport_sockets::tls::v3::UpstreamTlsContext client;
OptionalServerConfig server_config;
server_config.allow_expired_cert = false;
configureServerAndExpiredClientCertificate(listener, client, server_config);
TestUtilOptionsV2 test_options(listener, client, false, GetParam());
testUtilV2(test_options.setExpectedClientCertUri("spiffe://lyft.com/test-team")
.setExpectedTransportFailureReasonContains("SSLV3_ALERT_CERTIFICATE_EXPIRED"));
}
| 0
|
195,720
|
void updateHandshakeState(QuicServerConnectionState& conn) {
// Zero RTT read cipher is available after chlo is processed with the
// condition that early data attempt is accepted.
auto handshakeLayer = conn.serverHandshakeLayer;
auto zeroRttReadCipher = handshakeLayer->getZeroRttReadCipher();
auto zeroRttHeaderCipher = handshakeLayer->getZeroRttReadHeaderCipher();
// One RTT write cipher is available at Fizz layer after chlo is processed.
// However, the cipher is only exported to QUIC if early data attempt is
// accepted. Otherwise, the cipher will be available after cfin is
// processed.
auto oneRttWriteCipher = handshakeLayer->getOneRttWriteCipher();
// One RTT read cipher is available after cfin is processed.
auto oneRttReadCipher = handshakeLayer->getOneRttReadCipher();
auto oneRttWriteHeaderCipher = handshakeLayer->getOneRttWriteHeaderCipher();
auto oneRttReadHeaderCipher = handshakeLayer->getOneRttReadHeaderCipher();
if (zeroRttReadCipher) {
if (conn.qLogger) {
conn.qLogger->addTransportStateUpdate(kDerivedZeroRttReadCipher);
}
QUIC_TRACE(fst_trace, conn, "derived 0-rtt read cipher");
conn.readCodec->setZeroRttReadCipher(std::move(zeroRttReadCipher));
}
if (zeroRttHeaderCipher) {
conn.readCodec->setZeroRttHeaderCipher(std::move(zeroRttHeaderCipher));
}
if (oneRttWriteHeaderCipher) {
conn.oneRttWriteHeaderCipher = std::move(oneRttWriteHeaderCipher);
}
if (oneRttReadHeaderCipher) {
conn.readCodec->setOneRttHeaderCipher(std::move(oneRttReadHeaderCipher));
}
if (oneRttWriteCipher) {
if (conn.qLogger) {
conn.qLogger->addTransportStateUpdate(kDerivedOneRttWriteCipher);
}
QUIC_TRACE(fst_trace, conn, "derived 1-rtt write cipher");
CHECK(!conn.oneRttWriteCipher.get());
conn.oneRttWriteCipher = std::move(oneRttWriteCipher);
updatePacingOnKeyEstablished(conn);
// We negotiate the transport parameters whenever we have the 1-RTT write
// keys available.
auto clientParams = handshakeLayer->getClientTransportParams();
if (!clientParams) {
throw QuicTransportException(
"No client transport params",
TransportErrorCode::TRANSPORT_PARAMETER_ERROR);
}
processClientInitialParams(conn, std::move(*clientParams));
}
if (oneRttReadCipher) {
if (conn.qLogger) {
conn.qLogger->addTransportStateUpdate(kDerivedOneRttReadCipher);
}
QUIC_TRACE(fst_trace, conn, "derived 1-rtt read cipher");
// Clear limit because CFIN is received at this point
conn.writableBytesLimit = folly::none;
conn.readCodec->setOneRttReadCipher(std::move(oneRttReadCipher));
}
auto handshakeReadCipher = handshakeLayer->getHandshakeReadCipher();
auto handshakeReadHeaderCipher =
handshakeLayer->getHandshakeReadHeaderCipher();
if (handshakeReadCipher) {
CHECK(handshakeReadHeaderCipher);
conn.readCodec->setHandshakeReadCipher(std::move(handshakeReadCipher));
conn.readCodec->setHandshakeHeaderCipher(
std::move(handshakeReadHeaderCipher));
}
if (handshakeLayer->isHandshakeDone()) {
CHECK(conn.oneRttWriteCipher);
if (conn.version != QuicVersion::MVFST_D24 && !conn.sentHandshakeDone) {
sendSimpleFrame(conn, HandshakeDoneFrame());
conn.sentHandshakeDone = true;
}
}
}
| 1
|
252,298
|
size_t SaveEXRImageToMemory(const EXRImage *exr_image,
const EXRHeader *exr_header,
unsigned char **memory_out, const char **err) {
if (exr_image == NULL || memory_out == NULL ||
exr_header->compression_type < 0) {
tinyexr::SetErrorMessage("Invalid argument for SaveEXRImageToMemory", err);
return 0;
}
#if !TINYEXR_USE_PIZ
if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_PIZ) {
tinyexr::SetErrorMessage("PIZ compression is not supported in this build",
err);
return 0;
}
#endif
#if !TINYEXR_USE_ZFP
if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZFP) {
tinyexr::SetErrorMessage("ZFP compression is not supported in this build",
err);
return 0;
}
#endif
#if TINYEXR_USE_ZFP
for (size_t i = 0; i < static_cast<size_t>(exr_header->num_channels); i++) {
if (exr_header->requested_pixel_types[i] != TINYEXR_PIXELTYPE_FLOAT) {
tinyexr::SetErrorMessage("Pixel type must be FLOAT for ZFP compression",
err);
return 0;
}
}
#endif
std::vector<unsigned char> memory;
// Header
{
const char header[] = {0x76, 0x2f, 0x31, 0x01};
memory.insert(memory.end(), header, header + 4);
}
// Version, scanline.
{
char marker[] = {2, 0, 0, 0};
/* @todo
if (exr_header->tiled) {
marker[1] |= 0x2;
}
if (exr_header->long_name) {
marker[1] |= 0x4;
}
if (exr_header->non_image) {
marker[1] |= 0x8;
}
if (exr_header->multipart) {
marker[1] |= 0x10;
}
*/
memory.insert(memory.end(), marker, marker + 4);
}
int num_scanlines = 1;
if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZIP) {
num_scanlines = 16;
} else if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_PIZ) {
num_scanlines = 32;
} else if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZFP) {
num_scanlines = 16;
}
// Write attributes.
std::vector<tinyexr::ChannelInfo> channels;
{
std::vector<unsigned char> data;
for (int c = 0; c < exr_header->num_channels; c++) {
tinyexr::ChannelInfo info;
info.p_linear = 0;
info.pixel_type = exr_header->requested_pixel_types[c];
info.x_sampling = 1;
info.y_sampling = 1;
info.name = std::string(exr_header->channels[c].name);
channels.push_back(info);
}
tinyexr::WriteChannelInfo(data, channels);
tinyexr::WriteAttributeToMemory(&memory, "channels", "chlist", &data.at(0),
static_cast<int>(data.size()));
}
{
int comp = exr_header->compression_type;
tinyexr::swap4(reinterpret_cast<unsigned int *>(&comp));
tinyexr::WriteAttributeToMemory(
&memory, "compression", "compression",
reinterpret_cast<const unsigned char *>(&comp), 1);
}
{
int data[4] = {0, 0, exr_image->width - 1, exr_image->height - 1};
tinyexr::swap4(reinterpret_cast<unsigned int *>(&data[0]));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&data[1]));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&data[2]));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&data[3]));
tinyexr::WriteAttributeToMemory(
&memory, "dataWindow", "box2i",
reinterpret_cast<const unsigned char *>(data), sizeof(int) * 4);
tinyexr::WriteAttributeToMemory(
&memory, "displayWindow", "box2i",
reinterpret_cast<const unsigned char *>(data), sizeof(int) * 4);
}
{
unsigned char line_order = 0; // @fixme { read line_order from EXRHeader }
tinyexr::WriteAttributeToMemory(&memory, "lineOrder", "lineOrder",
&line_order, 1);
}
{
float aspectRatio = 1.0f;
tinyexr::swap4(reinterpret_cast<unsigned int *>(&aspectRatio));
tinyexr::WriteAttributeToMemory(
&memory, "pixelAspectRatio", "float",
reinterpret_cast<const unsigned char *>(&aspectRatio), sizeof(float));
}
{
float center[2] = {0.0f, 0.0f};
tinyexr::swap4(reinterpret_cast<unsigned int *>(¢er[0]));
tinyexr::swap4(reinterpret_cast<unsigned int *>(¢er[1]));
tinyexr::WriteAttributeToMemory(
&memory, "screenWindowCenter", "v2f",
reinterpret_cast<const unsigned char *>(center), 2 * sizeof(float));
}
{
float w = static_cast<float>(exr_image->width);
tinyexr::swap4(reinterpret_cast<unsigned int *>(&w));
tinyexr::WriteAttributeToMemory(&memory, "screenWindowWidth", "float",
reinterpret_cast<const unsigned char *>(&w),
sizeof(float));
}
// Custom attributes
if (exr_header->num_custom_attributes > 0) {
for (int i = 0; i < exr_header->num_custom_attributes; i++) {
tinyexr::WriteAttributeToMemory(
&memory, exr_header->custom_attributes[i].name,
exr_header->custom_attributes[i].type,
reinterpret_cast<const unsigned char *>(
exr_header->custom_attributes[i].value),
exr_header->custom_attributes[i].size);
}
}
{ // end of header
unsigned char e = 0;
memory.push_back(e);
}
int num_blocks = exr_image->height / num_scanlines;
if (num_blocks * num_scanlines < exr_image->height) {
num_blocks++;
}
std::vector<tinyexr::tinyexr_uint64> offsets(static_cast<size_t>(num_blocks));
size_t headerSize = memory.size();
tinyexr::tinyexr_uint64 offset =
headerSize +
static_cast<size_t>(num_blocks) *
sizeof(
tinyexr::tinyexr_int64); // sizeof(header) + sizeof(offsetTable)
std::vector<std::vector<unsigned char> > data_list(
static_cast<size_t>(num_blocks));
std::vector<size_t> channel_offset_list(
static_cast<size_t>(exr_header->num_channels));
int pixel_data_size = 0;
size_t channel_offset = 0;
for (size_t c = 0; c < static_cast<size_t>(exr_header->num_channels); c++) {
channel_offset_list[c] = channel_offset;
if (exr_header->requested_pixel_types[c] == TINYEXR_PIXELTYPE_HALF) {
pixel_data_size += sizeof(unsigned short);
channel_offset += sizeof(unsigned short);
} else if (exr_header->requested_pixel_types[c] ==
TINYEXR_PIXELTYPE_FLOAT) {
pixel_data_size += sizeof(float);
channel_offset += sizeof(float);
} else if (exr_header->requested_pixel_types[c] == TINYEXR_PIXELTYPE_UINT) {
pixel_data_size += sizeof(unsigned int);
channel_offset += sizeof(unsigned int);
} else {
assert(0);
}
}
#if TINYEXR_USE_ZFP
tinyexr::ZFPCompressionParam zfp_compression_param;
// Use ZFP compression parameter from custom attributes(if such a parameter
// exists)
{
bool ret = tinyexr::FindZFPCompressionParam(
&zfp_compression_param, exr_header->custom_attributes,
exr_header->num_custom_attributes);
if (!ret) {
// Use predefined compression parameter.
zfp_compression_param.type = 0;
zfp_compression_param.rate = 2;
}
}
#endif
// Use signed int since some OpenMP compiler doesn't allow unsigned type for
// `parallel for`
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (int i = 0; i < num_blocks; i++) {
size_t ii = static_cast<size_t>(i);
int start_y = num_scanlines * i;
int endY = (std::min)(num_scanlines * (i + 1), exr_image->height);
int h = endY - start_y;
std::vector<unsigned char> buf(
static_cast<size_t>(exr_image->width * h * pixel_data_size));
for (size_t c = 0; c < static_cast<size_t>(exr_header->num_channels); c++) {
if (exr_header->pixel_types[c] == TINYEXR_PIXELTYPE_HALF) {
if (exr_header->requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT) {
for (int y = 0; y < h; y++) {
// Assume increasing Y
float *line_ptr = reinterpret_cast<float *>(&buf.at(
static_cast<size_t>(pixel_data_size * y * exr_image->width) +
channel_offset_list[c] *
static_cast<size_t>(exr_image->width)));
for (int x = 0; x < exr_image->width; x++) {
tinyexr::FP16 h16;
h16.u = reinterpret_cast<unsigned short **>(
exr_image->images)[c][(y + start_y) * exr_image->width + x];
tinyexr::FP32 f32 = half_to_float(h16);
tinyexr::swap4(reinterpret_cast<unsigned int *>(&f32.f));
// line_ptr[x] = f32.f;
tinyexr::cpy4(line_ptr + x, &(f32.f));
}
}
} else if (exr_header->requested_pixel_types[c] ==
TINYEXR_PIXELTYPE_HALF) {
for (int y = 0; y < h; y++) {
// Assume increasing Y
unsigned short *line_ptr = reinterpret_cast<unsigned short *>(
&buf.at(static_cast<size_t>(pixel_data_size * y *
exr_image->width) +
channel_offset_list[c] *
static_cast<size_t>(exr_image->width)));
for (int x = 0; x < exr_image->width; x++) {
unsigned short val = reinterpret_cast<unsigned short **>(
exr_image->images)[c][(y + start_y) * exr_image->width + x];
tinyexr::swap2(&val);
// line_ptr[x] = val;
tinyexr::cpy2(line_ptr + x, &val);
}
}
} else {
assert(0);
}
} else if (exr_header->pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT) {
if (exr_header->requested_pixel_types[c] == TINYEXR_PIXELTYPE_HALF) {
for (int y = 0; y < h; y++) {
// Assume increasing Y
unsigned short *line_ptr = reinterpret_cast<unsigned short *>(
&buf.at(static_cast<size_t>(pixel_data_size * y *
exr_image->width) +
channel_offset_list[c] *
static_cast<size_t>(exr_image->width)));
for (int x = 0; x < exr_image->width; x++) {
tinyexr::FP32 f32;
f32.f = reinterpret_cast<float **>(
exr_image->images)[c][(y + start_y) * exr_image->width + x];
tinyexr::FP16 h16;
h16 = float_to_half_full(f32);
tinyexr::swap2(reinterpret_cast<unsigned short *>(&h16.u));
// line_ptr[x] = h16.u;
tinyexr::cpy2(line_ptr + x, &(h16.u));
}
}
} else if (exr_header->requested_pixel_types[c] ==
TINYEXR_PIXELTYPE_FLOAT) {
for (int y = 0; y < h; y++) {
// Assume increasing Y
float *line_ptr = reinterpret_cast<float *>(&buf.at(
static_cast<size_t>(pixel_data_size * y * exr_image->width) +
channel_offset_list[c] *
static_cast<size_t>(exr_image->width)));
for (int x = 0; x < exr_image->width; x++) {
float val = reinterpret_cast<float **>(
exr_image->images)[c][(y + start_y) * exr_image->width + x];
tinyexr::swap4(reinterpret_cast<unsigned int *>(&val));
// line_ptr[x] = val;
tinyexr::cpy4(line_ptr + x, &val);
}
}
} else {
assert(0);
}
} else if (exr_header->pixel_types[c] == TINYEXR_PIXELTYPE_UINT) {
for (int y = 0; y < h; y++) {
// Assume increasing Y
unsigned int *line_ptr = reinterpret_cast<unsigned int *>(&buf.at(
static_cast<size_t>(pixel_data_size * y * exr_image->width) +
channel_offset_list[c] * static_cast<size_t>(exr_image->width)));
for (int x = 0; x < exr_image->width; x++) {
unsigned int val = reinterpret_cast<unsigned int **>(
exr_image->images)[c][(y + start_y) * exr_image->width + x];
tinyexr::swap4(&val);
// line_ptr[x] = val;
tinyexr::cpy4(line_ptr + x, &val);
}
}
}
}
if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_NONE) {
// 4 byte: scan line
// 4 byte: data size
// ~ : pixel data(uncompressed)
std::vector<unsigned char> header(8);
unsigned int data_len = static_cast<unsigned int>(buf.size());
memcpy(&header.at(0), &start_y, sizeof(int));
memcpy(&header.at(4), &data_len, sizeof(unsigned int));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&header.at(0)));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&header.at(4)));
data_list[ii].insert(data_list[ii].end(), header.begin(), header.end());
data_list[ii].insert(data_list[ii].end(), buf.begin(),
buf.begin() + data_len);
} else if ((exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZIPS) ||
(exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZIP)) {
#if TINYEXR_USE_MINIZ
std::vector<unsigned char> block(tinyexr::miniz::mz_compressBound(
static_cast<unsigned long>(buf.size())));
#else
std::vector<unsigned char> block(
compressBound(static_cast<uLong>(buf.size())));
#endif
tinyexr::tinyexr_uint64 outSize = block.size();
tinyexr::CompressZip(&block.at(0), outSize,
reinterpret_cast<const unsigned char *>(&buf.at(0)),
static_cast<unsigned long>(buf.size()));
// 4 byte: scan line
// 4 byte: data size
// ~ : pixel data(compressed)
std::vector<unsigned char> header(8);
unsigned int data_len = static_cast<unsigned int>(outSize); // truncate
memcpy(&header.at(0), &start_y, sizeof(int));
memcpy(&header.at(4), &data_len, sizeof(unsigned int));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&header.at(0)));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&header.at(4)));
data_list[ii].insert(data_list[ii].end(), header.begin(), header.end());
data_list[ii].insert(data_list[ii].end(), block.begin(),
block.begin() + data_len);
} else if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_RLE) {
// (buf.size() * 3) / 2 would be enough.
std::vector<unsigned char> block((buf.size() * 3) / 2);
tinyexr::tinyexr_uint64 outSize = block.size();
tinyexr::CompressRle(&block.at(0), outSize,
reinterpret_cast<const unsigned char *>(&buf.at(0)),
static_cast<unsigned long>(buf.size()));
// 4 byte: scan line
// 4 byte: data size
// ~ : pixel data(compressed)
std::vector<unsigned char> header(8);
unsigned int data_len = static_cast<unsigned int>(outSize); // truncate
memcpy(&header.at(0), &start_y, sizeof(int));
memcpy(&header.at(4), &data_len, sizeof(unsigned int));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&header.at(0)));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&header.at(4)));
data_list[ii].insert(data_list[ii].end(), header.begin(), header.end());
data_list[ii].insert(data_list[ii].end(), block.begin(),
block.begin() + data_len);
} else if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_PIZ) {
#if TINYEXR_USE_PIZ
unsigned int bufLen =
8192 + static_cast<unsigned int>(
2 * static_cast<unsigned int>(
buf.size())); // @fixme { compute good bound. }
std::vector<unsigned char> block(bufLen);
unsigned int outSize = static_cast<unsigned int>(block.size());
CompressPiz(&block.at(0), &outSize,
reinterpret_cast<const unsigned char *>(&buf.at(0)),
buf.size(), channels, exr_image->width, h);
// 4 byte: scan line
// 4 byte: data size
// ~ : pixel data(compressed)
std::vector<unsigned char> header(8);
unsigned int data_len = outSize;
memcpy(&header.at(0), &start_y, sizeof(int));
memcpy(&header.at(4), &data_len, sizeof(unsigned int));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&header.at(0)));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&header.at(4)));
data_list[ii].insert(data_list[ii].end(), header.begin(), header.end());
data_list[ii].insert(data_list[ii].end(), block.begin(),
block.begin() + data_len);
#else
assert(0);
#endif
} else if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZFP) {
#if TINYEXR_USE_ZFP
std::vector<unsigned char> block;
unsigned int outSize;
tinyexr::CompressZfp(
&block, &outSize, reinterpret_cast<const float *>(&buf.at(0)),
exr_image->width, h, exr_header->num_channels, zfp_compression_param);
// 4 byte: scan line
// 4 byte: data size
// ~ : pixel data(compressed)
std::vector<unsigned char> header(8);
unsigned int data_len = outSize;
memcpy(&header.at(0), &start_y, sizeof(int));
memcpy(&header.at(4), &data_len, sizeof(unsigned int));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&header.at(0)));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&header.at(4)));
data_list[ii].insert(data_list[ii].end(), header.begin(), header.end());
data_list[ii].insert(data_list[ii].end(), block.begin(),
block.begin() + data_len);
#else
assert(0);
#endif
} else {
assert(0);
}
} // omp parallel
for (size_t i = 0; i < static_cast<size_t>(num_blocks); i++) {
offsets[i] = offset;
tinyexr::swap8(reinterpret_cast<tinyexr::tinyexr_uint64 *>(&offsets[i]));
offset += data_list[i].size();
}
size_t totalSize = static_cast<size_t>(offset);
{
memory.insert(
memory.end(), reinterpret_cast<unsigned char *>(&offsets.at(0)),
reinterpret_cast<unsigned char *>(&offsets.at(0)) +
sizeof(tinyexr::tinyexr_uint64) * static_cast<size_t>(num_blocks));
}
if (memory.size() == 0) {
tinyexr::SetErrorMessage("Output memory size is zero", err);
return 0;
}
(*memory_out) = static_cast<unsigned char *>(malloc(totalSize));
memcpy((*memory_out), &memory.at(0), memory.size());
unsigned char *memory_ptr = *memory_out + memory.size();
for (size_t i = 0; i < static_cast<size_t>(num_blocks); i++) {
memcpy(memory_ptr, &data_list[i].at(0), data_list[i].size());
memory_ptr += data_list[i].size();
}
return totalSize; // OK
}
| 0
|
455,327
|
bash_default_completion (text, start, end, qc, compflags)
const char *text;
int start, end, qc, compflags;
{
char **matches, *t;
matches = (char **)NULL;
/* New posix-style command substitution or variable name? */
if (*text == '$')
{
if (qc != '\'' && text[1] == '(') /* ) */
matches = rl_completion_matches (text, command_subst_completion_function);
else
{
matches = rl_completion_matches (text, variable_completion_function);
/* If a single match, see if it expands to a directory name and append
a slash if it does. This requires us to expand the variable name,
so we don't want to display errors if the variable is unset. This
can happen with dynamic variables whose value has never been
requested. */
if (matches && matches[0] && matches[1] == 0)
{
t = savestring (matches[0]);
bash_filename_stat_hook (&t);
/* doesn't use test_for_directory because that performs tilde
expansion */
if (file_isdir (t))
rl_completion_append_character = '/';
free (t);
}
}
}
/* If the word starts in `~', and there is no slash in the word, then
try completing this word as a username. */
if (matches == 0 && *text == '~' && mbschr (text, '/') == 0)
matches = rl_completion_matches (text, rl_username_completion_function);
/* Another one. Why not? If the word starts in '@', then look through
the world of known hostnames for completion first. */
if (matches == 0 && perform_hostname_completion && *text == '@')
matches = rl_completion_matches (text, hostname_completion_function);
/* And last, (but not least) if this word is in a command position, then
complete over possible command names, including aliases, functions,
and command names. */
if (matches == 0 && (compflags & DEFCOMP_CMDPOS))
{
/* If END == START and text[0] == 0, we are trying to complete an empty
command word. */
if (no_empty_command_completion && end == start && text[0] == '\0')
{
matches = (char **)NULL;
rl_ignore_some_completions_function = bash_ignore_everything;
}
else
{
#define CMD_IS_DIR(x) (absolute_pathname(x) == 0 && absolute_program(x) == 0 && *(x) != '~' && test_for_directory (x))
dot_in_path = 0;
matches = rl_completion_matches (text, command_word_completion_function);
/* If we are attempting command completion and nothing matches, we
do not want readline to perform filename completion for us. We
still want to be able to complete partial pathnames, so set the
completion ignore function to something which will remove
filenames and leave directories in the match list. */
if (matches == (char **)NULL)
rl_ignore_some_completions_function = bash_ignore_filenames;
else if (matches[1] == 0 && CMD_IS_DIR(matches[0]) && dot_in_path == 0)
/* If we found a single match, without looking in the current
directory (because it's not in $PATH), but the found name is
also a command in the current directory, suppress appending any
terminating character, since it's ambiguous. */
{
rl_completion_suppress_append = 1;
rl_filename_completion_desired = 0;
}
else if (matches[0] && matches[1] && STREQ (matches[0], matches[1]) && CMD_IS_DIR (matches[0]))
/* There are multiple instances of the same match (duplicate
completions haven't yet been removed). In this case, all of
the matches will be the same, and the duplicate removal code
will distill them all down to one. We turn on
rl_completion_suppress_append for the same reason as above.
Remember: we only care if there's eventually a single unique
completion. If there are multiple completions this won't
make a difference and the problem won't occur. */
{
rl_completion_suppress_append = 1;
rl_filename_completion_desired = 0;
}
}
}
/* This could be a globbing pattern, so try to expand it using pathname
expansion. */
if (!matches && completion_glob_pattern ((char *)text))
{
matches = rl_completion_matches (text, glob_complete_word);
/* A glob expression that matches more than one filename is problematic.
If we match more than one filename, punt. */
if (matches && matches[1] && rl_completion_type == TAB)
{
strvec_dispose (matches);
matches = (char **)0;
}
else if (matches && matches[1] && rl_completion_type == '!')
{
rl_completion_suppress_append = 1;
rl_filename_completion_desired = 0;
}
}
return (matches);
}
| 0
|
328,860
|
R_API void r_bin_java_get_method_json_definitions(RBinJavaObj *bin, PJ *pj) {
r_return_if_fail (pj);
RBinJavaField *fm_type = NULL;
RListIter *iter = NULL;
pj_ka (pj, "methods");
if (!bin) {
pj_end (pj);
return;
}
r_list_foreach (bin->methods_list, iter, fm_type) {
r_bin_java_get_method_json_definition (bin, fm_type, pj);
}
pj_end (pj);
}
| 0
|
282,876
|
static int rsi_send_w9116_features(struct rsi_common *common)
{
struct rsi_wlan_9116_features *w9116_features;
u16 frame_len = sizeof(struct rsi_wlan_9116_features);
struct sk_buff *skb;
rsi_dbg(MGMT_TX_ZONE,
"%s: Sending wlan 9116 features\n", __func__);
skb = dev_alloc_skb(frame_len);
if (!skb)
return -ENOMEM;
memset(skb->data, 0, frame_len);
w9116_features = (struct rsi_wlan_9116_features *)skb->data;
w9116_features->pll_mode = common->w9116_features.pll_mode;
w9116_features->rf_type = common->w9116_features.rf_type;
w9116_features->wireless_mode = common->w9116_features.wireless_mode;
w9116_features->enable_ppe = common->w9116_features.enable_ppe;
w9116_features->afe_type = common->w9116_features.afe_type;
if (common->w9116_features.dpd)
w9116_features->feature_enable |= cpu_to_le32(RSI_DPD);
if (common->w9116_features.sifs_tx_enable)
w9116_features->feature_enable |=
cpu_to_le32(RSI_SIFS_TX_ENABLE);
if (common->w9116_features.ps_options & RSI_DUTY_CYCLING)
w9116_features->feature_enable |= cpu_to_le32(RSI_DUTY_CYCLING);
if (common->w9116_features.ps_options & RSI_END_OF_FRAME)
w9116_features->feature_enable |= cpu_to_le32(RSI_END_OF_FRAME);
w9116_features->feature_enable |=
cpu_to_le32((common->w9116_features.ps_options & ~0x3) << 2);
rsi_set_len_qno(&w9116_features->desc.desc_dword0.len_qno,
frame_len - FRAME_DESC_SZ, RSI_WIFI_MGMT_Q);
w9116_features->desc.desc_dword0.frame_type = FEATURES_ENABLE;
skb_put(skb, frame_len);
return rsi_send_internal_mgmt_frame(common, skb);
}
| 0
|
300,830
|
void tipc_sk_mcast_rcv(struct net *net, struct sk_buff_head *arrvq,
struct sk_buff_head *inputq)
{
u32 self = tipc_own_addr(net);
struct sk_buff *skb, *_skb;
u32 portid, onode;
struct sk_buff_head tmpq;
struct list_head dports;
struct tipc_msg *hdr;
struct tipc_uaddr ua;
int user, mtyp, hlen;
__skb_queue_head_init(&tmpq);
INIT_LIST_HEAD(&dports);
ua.addrtype = TIPC_SERVICE_RANGE;
/* tipc_skb_peek() increments the head skb's reference counter */
skb = tipc_skb_peek(arrvq, &inputq->lock);
for (; skb; skb = tipc_skb_peek(arrvq, &inputq->lock)) {
hdr = buf_msg(skb);
user = msg_user(hdr);
mtyp = msg_type(hdr);
hlen = skb_headroom(skb) + msg_hdr_sz(hdr);
onode = msg_orignode(hdr);
ua.sr.type = msg_nametype(hdr);
ua.sr.lower = msg_namelower(hdr);
ua.sr.upper = msg_nameupper(hdr);
if (onode == self)
ua.scope = TIPC_ANY_SCOPE;
else
ua.scope = TIPC_CLUSTER_SCOPE;
if (mtyp == TIPC_GRP_UCAST_MSG || user == GROUP_PROTOCOL) {
spin_lock_bh(&inputq->lock);
if (skb_peek(arrvq) == skb) {
__skb_dequeue(arrvq);
__skb_queue_tail(inputq, skb);
}
kfree_skb(skb);
spin_unlock_bh(&inputq->lock);
continue;
}
/* Group messages require exact scope match */
if (msg_in_group(hdr)) {
ua.sr.lower = 0;
ua.sr.upper = ~0;
ua.scope = msg_lookup_scope(hdr);
}
/* Create destination port list: */
tipc_nametbl_lookup_mcast_sockets(net, &ua, &dports);
/* Clone message per destination */
while (tipc_dest_pop(&dports, NULL, &portid)) {
_skb = __pskb_copy(skb, hlen, GFP_ATOMIC);
if (_skb) {
msg_set_destport(buf_msg(_skb), portid);
__skb_queue_tail(&tmpq, _skb);
continue;
}
pr_warn("Failed to clone mcast rcv buffer\n");
}
/* Append clones to inputq only if skb is still head of arrvq */
spin_lock_bh(&inputq->lock);
if (skb_peek(arrvq) == skb) {
skb_queue_splice_tail_init(&tmpq, inputq);
/* Decrement the skb's refcnt */
kfree_skb(__skb_dequeue(arrvq));
}
spin_unlock_bh(&inputq->lock);
__skb_queue_purge(&tmpq);
kfree_skb(skb);
}
tipc_sk_rcv(net, inputq);
}
| 0
|
430,352
|
void seq_escape_mem(struct seq_file *m, const char *src, size_t len,
unsigned int flags, const char *esc)
{
char *buf;
size_t size = seq_get_buf(m, &buf);
int ret;
ret = string_escape_mem(src, len, buf, size, flags, esc);
seq_commit(m, ret < size ? ret : -1);
}
| 0
|
204,425
|
bgp_capability_msg_parse (struct peer *peer, u_char *pnt, bgp_size_t length)
{
u_char *end;
struct capability cap;
u_char action;
struct bgp *bgp;
afi_t afi;
safi_t safi;
bgp = peer->bgp;
end = pnt + length;
while (pnt < end)
{
/* We need at least action, capability code and capability length. */
if (pnt + 3 > end)
{
zlog_info ("%s Capability length error", peer->host);
bgp_notify_send (peer, BGP_NOTIFY_CEASE, 0);
return -1;
}
action = *pnt;
/* Fetch structure to the byte stream. */
memcpy (&cap, pnt + 1, sizeof (struct capability));
/* Action value check. */
if (action != CAPABILITY_ACTION_SET
&& action != CAPABILITY_ACTION_UNSET)
{
zlog_info ("%s Capability Action Value error %d",
peer->host, action);
bgp_notify_send (peer, BGP_NOTIFY_CEASE, 0);
return -1;
}
if (BGP_DEBUG (normal, NORMAL))
zlog_debug ("%s CAPABILITY has action: %d, code: %u, length %u",
peer->host, action, cap.code, cap.length);
/* Capability length check. */
if (pnt + (cap.length + 3) > end)
{
zlog_info ("%s Capability length error", peer->host);
bgp_notify_send (peer, BGP_NOTIFY_CEASE, 0);
return -1;
}
/* We know MP Capability Code. */
if (cap.code == CAPABILITY_CODE_MP)
{
afi = ntohs (cap.mpc.afi);
safi = cap.mpc.safi;
/* Ignore capability when override-capability is set. */
if (CHECK_FLAG (peer->flags, PEER_FLAG_OVERRIDE_CAPABILITY))
continue;
/* Address family check. */
if ((afi == AFI_IP
|| afi == AFI_IP6)
&& (safi == SAFI_UNICAST
|| safi == SAFI_MULTICAST
|| safi == BGP_SAFI_VPNV4))
{
if (BGP_DEBUG (normal, NORMAL))
zlog_debug ("%s CAPABILITY has %s MP_EXT CAP for afi/safi: %u/%u",
peer->host,
action == CAPABILITY_ACTION_SET
? "Advertising" : "Removing",
ntohs(cap.mpc.afi) , cap.mpc.safi);
/* Adjust safi code. */
if (safi == BGP_SAFI_VPNV4)
safi = SAFI_MPLS_VPN;
if (action == CAPABILITY_ACTION_SET)
{
peer->afc_recv[afi][safi] = 1;
if (peer->afc[afi][safi])
{
peer->afc_nego[afi][safi] = 1;
bgp_announce_route (peer, afi, safi);
}
}
else
{
peer->afc_recv[afi][safi] = 0;
peer->afc_nego[afi][safi] = 0;
if (peer_active_nego (peer))
bgp_clear_route (peer, afi, safi);
else
BGP_EVENT_ADD (peer, BGP_Stop);
}
}
}
else
{
zlog_warn ("%s unrecognized capability code: %d - ignored",
peer->host, cap.code);
}
pnt += cap.length + 3;
}
return 0;
}
| 1
|
474,035
|
onigenc_unicode_apply_all_case_fold(OnigCaseFoldType flag,
OnigApplyAllCaseFoldFunc f, void* arg,
OnigEncoding enc ARG_UNUSED)
{
const CaseUnfold_11_Type* p11;
OnigCodePoint code;
int i, j, k, r;
/* if (CaseFoldInited == 0) init_case_fold_table(); */
for (i = 0; i < numberof(CaseUnfold_11); i++) {
p11 = &CaseUnfold_11[i];
for (j = 0; j < p11->to.n; j++) {
code = p11->from;
r = (*f)(p11->to.code[j], &code, 1, arg);
if (r != 0) return r;
code = p11->to.code[j];
r = (*f)(p11->from, &code, 1, arg);
if (r != 0) return r;
for (k = 0; k < j; k++) {
r = (*f)(p11->to.code[j], (OnigCodePoint* )(&p11->to.code[k]), 1, arg);
if (r != 0) return r;
r = (*f)(p11->to.code[k], (OnigCodePoint* )(&p11->to.code[j]), 1, arg);
if (r != 0) return r;
}
}
}
#ifdef USE_UNICODE_CASE_FOLD_TURKISH_AZERI
if ((flag & ONIGENC_CASE_FOLD_TURKISH_AZERI) != 0) {
code = 0x0131;
r = (*f)(0x0049, &code, 1, arg);
if (r != 0) return r;
code = 0x0049;
r = (*f)(0x0131, &code, 1, arg);
if (r != 0) return r;
code = 0x0130;
r = (*f)(0x0069, &code, 1, arg);
if (r != 0) return r;
code = 0x0069;
r = (*f)(0x0130, &code, 1, arg);
if (r != 0) return r;
}
else {
#endif
for (i = 0; i < numberof(CaseUnfold_11_Locale); i++) {
p11 = &CaseUnfold_11_Locale[i];
for (j = 0; j < p11->to.n; j++) {
code = p11->from;
r = (*f)(p11->to.code[j], &code, 1, arg);
if (r != 0) return r;
code = p11->to.code[j];
r = (*f)(p11->from, &code, 1, arg);
if (r != 0) return r;
for (k = 0; k < j; k++) {
r = (*f)(p11->to.code[j], (OnigCodePoint* )(&p11->to.code[k]),
1, arg);
if (r != 0) return r;
r = (*f)(p11->to.code[k], (OnigCodePoint* )(&p11->to.code[j]),
1, arg);
if (r != 0) return r;
}
}
}
#ifdef USE_UNICODE_CASE_FOLD_TURKISH_AZERI
}
#endif
if ((flag & INTERNAL_ONIGENC_CASE_FOLD_MULTI_CHAR) != 0) {
for (i = 0; i < numberof(CaseUnfold_12); i++) {
for (j = 0; j < CaseUnfold_12[i].to.n; j++) {
r = (*f)(CaseUnfold_12[i].to.code[j],
(OnigCodePoint* )CaseUnfold_12[i].from, 2, arg);
if (r != 0) return r;
for (k = 0; k < CaseUnfold_12[i].to.n; k++) {
if (k == j) continue;
r = (*f)(CaseUnfold_12[i].to.code[j],
(OnigCodePoint* )(&CaseUnfold_12[i].to.code[k]), 1, arg);
if (r != 0) return r;
}
}
}
#ifdef USE_UNICODE_CASE_FOLD_TURKISH_AZERI
if ((flag & ONIGENC_CASE_FOLD_TURKISH_AZERI) == 0) {
#endif
for (i = 0; i < numberof(CaseUnfold_12_Locale); i++) {
for (j = 0; j < CaseUnfold_12_Locale[i].to.n; j++) {
r = (*f)(CaseUnfold_12_Locale[i].to.code[j],
(OnigCodePoint* )CaseUnfold_12_Locale[i].from, 2, arg);
if (r != 0) return r;
for (k = 0; k < CaseUnfold_12_Locale[i].to.n; k++) {
if (k == j) continue;
r = (*f)(CaseUnfold_12_Locale[i].to.code[j],
(OnigCodePoint* )(&CaseUnfold_12_Locale[i].to.code[k]),
1, arg);
if (r != 0) return r;
}
}
}
#ifdef USE_UNICODE_CASE_FOLD_TURKISH_AZERI
}
#endif
for (i = 0; i < numberof(CaseUnfold_13); i++) {
for (j = 0; j < CaseUnfold_13[i].to.n; j++) {
r = (*f)(CaseUnfold_13[i].to.code[j],
(OnigCodePoint* )CaseUnfold_13[i].from, 3, arg);
if (r != 0) return r;
for (k = 0; k < CaseUnfold_13[i].to.n; k++) {
if (k == j) continue;
r = (*f)(CaseUnfold_13[i].to.code[j],
(OnigCodePoint* )(&CaseUnfold_13[i].to.code[k]), 1, arg);
if (r != 0) return r;
}
}
}
}
return 0;
}
| 0
|
225,690
|
GF_Err maxr_box_write(GF_Box *s, GF_BitStream *bs)
{
GF_Err e;
GF_MAXRBox *ptr = (GF_MAXRBox *)s;
if (ptr == NULL) return GF_BAD_PARAM;
e = gf_isom_box_write_header(s, bs);
if (e) return e;
gf_bs_write_u32(bs, ptr->granularity);
gf_bs_write_u32(bs, ptr->maxDataRate);
return GF_OK;
}
| 0
|
389,697
|
tv_get_string(typval_T *varp)
{
static char_u mybuf[NUMBUFLEN];
return tv_get_string_buf(varp, mybuf);
}
| 0
|
232,928
|
static void deflate_close_writer(struct Curl_easy *data,
struct contenc_writer *writer)
{
struct zlib_params *zp = (struct zlib_params *) &writer->params;
z_stream *z = &zp->z; /* zlib state structure */
exit_zlib(data, z, &zp->zlib_init, CURLE_OK);
}
| 0
|
257,695
|
static void mod_wstunnel_merge_config_cpv(plugin_config * const pconf, const config_plugin_value_t * const cpv) {
switch (cpv->k_id) { /* index into static config_plugin_keys_t cpk[] */
case 0: /* wstunnel.server */
if (cpv->vtype == T_CONFIG_LOCAL) {
gw_plugin_config * const gw = cpv->v.v;
pconf->gw.exts = gw->exts;
pconf->gw.exts_auth = gw->exts_auth;
pconf->gw.exts_resp = gw->exts_resp;
}
break;
case 1: /* wstunnel.balance */
/*if (cpv->vtype == T_CONFIG_LOCAL)*//*always true here for this param*/
pconf->gw.balance = (int)cpv->v.u;
break;
case 2: /* wstunnel.debug */
pconf->gw.debug = (int)cpv->v.u;
break;
case 3: /* wstunnel.map-extensions */
pconf->gw.ext_mapping = cpv->v.a;
break;
case 4: /* wstunnel.frame-type */
pconf->frame_type = cpv->v.u;
break;
case 5: /* wstunnel.origins */
pconf->origins = cpv->v.a;
break;
case 6: /* wstunnel.ping-interval */
pconf->ping_interval = cpv->v.shrt;
break;
default:/* should not happen */
return;
}
}
| 0
|
225,414
|
void v4l2_ctrl_handler_free(struct v4l2_ctrl_handler *hdl)
{
}
| 0
|
343,318
|
static int ul_handle_data(ULHandler * const ulhandler, off_t * const uploaded,
const double ts_start)
{
ssize_t readnb;
double required_sleep = 0.0;
int pollret;
int ret;
if (ulhandler->max_filesize >= (off_t) 0 &&
ulhandler->total_uploaded > ulhandler->max_filesize) {
addreply(552, MSG_ABORTED " (quota)");
return -2;
}
if (ulhandler->chunk_size > (off_t) ulhandler->sizeof_buf) {
ulhandler->chunk_size = ulhandler->max_chunk_size =
ulhandler->sizeof_buf;
}
if (ulhandler->tls_fd != NULL) {
#ifdef WITH_TLS
readnb = SSL_read(ulhandler->tls_fd, ulhandler->buf,
ulhandler->chunk_size);
#else
abort();
#endif
} else {
readnb = read(ulhandler->xferfd, ulhandler->buf,
ulhandler->chunk_size);
}
if (readnb == (ssize_t) 0) {
return 2;
}
if (readnb < (ssize_t) 0) {
if (errno == EAGAIN || errno == EINTR) {
return 0;
}
addreply_noformat(451, MSG_DATA_READ_FAILED);
return -1;
}
if (ul_dowrite(ulhandler, ulhandler->buf, readnb, uploaded) != 0) {
addreply_noformat(452, MSG_WRITE_FAILED);
return -1;
}
ulhandler->cur_pos += *uploaded;
#ifdef FTPWHO
if (shm_data_cur != NULL) {
shm_data_cur->download_current_size =
shm_data_cur->download_total_size = ulhandler->cur_pos;
}
#endif
ulhandler->total_uploaded += *uploaded;
if (ulhandler->bandwidth > 0UL) {
ulhandler_throttle(ulhandler, *uploaded, ts_start, &required_sleep);
if (required_sleep > 0.0) {
repoll:
ulhandler->pfds_command.revents = 0;
pollret = poll(&ulhandler->pfds_command, 1, required_sleep * 1000.0);
if (pollret == 0) {
return 0;
}
if (pollret < 0) {
if (errno == EINTR) {
goto repoll;
}
return -1;
}
if ((ulhandler->pfds_command.revents &
(POLLERR | POLLHUP | POLLNVAL)) != 0) {
return -1;
}
if ((ulhandler->pfds_command.revents & (POLLIN | POLLPRI)) != 0) {
ret = ulhandler_handle_commands(ulhandler);
if (ret != 0) {
return ret;
}
goto repoll;
}
}
}
return 0;
}
| 0
|
226,346
|
GF_Err iods_box_size(GF_Box *s)
{
GF_ObjectDescriptorBox *ptr = (GF_ObjectDescriptorBox *)s;
ptr->size += gf_odf_desc_size(ptr->descriptor);
return GF_OK;
}
| 0
|
264,371
|
inline const string* const* TensorProtoData<tstring>(const TensorProto& t) {
static_assert(SaveTypeTraits<tstring>::supported,
"Specified type tstring not supported for Restore");
return t.string_val().data();
}
| 0
|
294,671
|
valid_commercial_sub(int argc, VALUE *argv, VALUE klass, int need_jd)
{
VALUE nth, y;
int w, d, ry, rw, rd;
double sg;
y = argv[0];
w = NUM2INT(argv[1]);
d = NUM2INT(argv[2]);
sg = NUM2DBL(argv[3]);
valid_sg(sg);
{
int rjd, ns;
VALUE rjd2;
if (!valid_commercial_p(y, w, d, sg,
&nth, &ry,
&rw, &rd, &rjd,
&ns))
return Qnil;
if (!need_jd)
return INT2FIX(0); /* dummy */
encode_jd(nth, rjd, &rjd2);
return rjd2;
}
}
| 0
|
310,174
|
main(int argc, char *argv[])
{
int n;
int r_run, t_run, n_run;
char *old_term = getenv("TERM");
int r_opt = 1;
char *t_opt = 0;
int len_names = 0; /* cur # of items in all_names[] */
int use_names = 10; /* max # of items in all_names[] */
char **all_names = typeCalloc(char *, use_names);
int all_parms[10]; /* workspace for "-a" option */
int len_terms = 0; /* cur # of items in all_terms[] */
int use_terms = 10; /* max # of items in all_terms[] */
char **all_terms = typeCalloc(char *, use_terms);
int len_parms = 0; /* cur # of items in num_parms[], str_parms[] */
int use_parms = argc + 10; /* max # of items in num_parms[], str_parms[] */
int *num_parms = typeCalloc(int, use_parms);
char **str_parms = typeCalloc(char *, use_parms);
if (all_names == 0 || all_terms == 0 || num_parms == 0 || str_parms == 0)
failed("no memory");
while ((n = getopt(argc, argv, "T:ar:v")) != -1) {
switch (n) {
case 'T':
t_opt = optarg;
break;
case 'a':
++a_opt;
break;
case 'r':
r_opt = atoi(optarg);
break;
case 'v':
++v_opt;
break;
default:
usage();
break;
}
}
/*
* If there is a nonnumeric parameter after the options, use that as the
* capability name.
*/
if (optind < argc) {
if (!isNumeric(argv[optind])) {
all_names[len_names++] = strdup(argv[optind++]);
}
}
/*
* Any remaining arguments must be possible parameter values. If numeric,
* and "-a" is not set, use those as the maximum values within which the
* test parameters should vary.
*/
while (optind < argc) {
if (isNumeric(argv[optind])) {
char *dummy = 0;
long value = strtol(argv[optind], &dummy, 0);
num_parms[len_parms] = (int) value;
}
str_parms[len_parms] = argv[optind];
++optind;
++len_parms;
}
for (n = len_parms; n < use_parms; ++n) {
static char dummy[1];
str_parms[n] = dummy;
}
if (v_opt) {
printf("%d parameter%s%s\n", PLURAL(len_parms), COLONS(len_parms));
for (n = 0; n < len_parms; ++n) {
printf(" %d: %d (%s)\n", n + 1, num_parms[n], str_parms[n]);
}
}
/*
* Make a list of values for $TERM. Accept "-" for standard input to
* simplify scripting a check of the whole database.
*/
old_term = strdup((old_term == 0) ? "unknown" : old_term);
if (t_opt != 0) {
if (!strcmp(t_opt, "-")) {
char buffer[BUFSIZ];
while (fgets(buffer, sizeof(buffer) - 1, stdin) != 0) {
char *s = buffer;
char *t;
while (isspace(UChar(s[0])))
++s;
t = s + strlen(s);
while (t != s && isspace(UChar(t[-1])))
*--t = '\0';
s = strdup(s);
if (len_terms + 2 >= use_terms) {
use_terms *= 2;
all_terms = typeRealloc(char *, use_terms, all_terms);
if (all_terms == 0)
failed("no memory: all_terms");
}
all_terms[len_terms++] = s;
}
} else {
char *s = t_opt;
char *t;
while ((t = strtok(s, ",")) != 0) {
s = 0;
if (len_terms + 2 >= use_terms) {
use_terms *= 2;
all_terms = typeRealloc(char *, use_terms, all_terms);
if (all_terms == 0)
failed("no memory: all_terms");
}
all_terms[len_terms++] = strdup(t);
}
}
} else {
all_terms[len_terms++] = strdup(old_term);
}
all_terms[len_terms] = 0;
if (v_opt) {
printf("%d term%s:\n", PLURAL(len_terms));
for (n = 0; n < len_terms; ++n) {
printf(" %d: %s\n", n + 1, all_terms[n]);
}
}
/*
* If no capability name was selected, use the predefined list of string
* capabilities.
*
* TODO: To address the "other" systems which do not follow SVr4,
* just use the output from infocmp on $TERM.
*/
if (len_names == 0) {
#if defined(HAVE_CURSES_DATA_BOOLNAMES) || defined(DECL_CURSES_DATA_BOOLNAMES)
for (n = 0; strnames[n] != 0; ++n) {
if (len_names + 2 >= use_names) {
use_names *= 2;
all_names = typeRealloc(char *, use_names, all_names);
if (all_names == 0) {
failed("no memory: all_names");
}
}
all_names[len_names++] = strdup(strnames[n]);
}
#else
all_names[len_names++] = strdup("cup");
all_names[len_names++] = strdup("sgr");
#endif
}
all_names[len_names] = 0;
if (v_opt) {
printf("%d name%s%s\n", PLURAL(len_names), COLONS(len_names));
for (n = 0; n < len_names; ++n) {
printf(" %d: %s\n", n + 1, all_names[n]);
}
}
if (r_opt <= 0)
r_opt = 1;
for (r_run = 0; r_run < r_opt; ++r_run) {
for (t_run = 0; t_run < len_terms; ++t_run) {
int errs;
if (setupterm(all_terms[t_run], fileno(stdout), &errs) != OK) {
printf("** skipping %s (errs:%d)\n", all_terms[t_run], errs);
}
if (v_opt)
printf("** testing %s\n", all_terms[t_run]);
if (len_names == 1) {
if (a_opt) {
/* for each combination of values */
memset(all_parms, 0, sizeof(all_parms));
do {
test_tparm(all_names[0], all_parms);
}
while (increment(all_parms, num_parms, len_parms, 0));
} else {
/* for the given values */
test_tparm(all_names[0], num_parms);
}
} else {
for (n_run = 0; n_run < len_names; ++n_run) {
test_tparm(all_names[n_run], num_parms);
}
}
if (cur_term != 0) {
del_curterm(cur_term);
} else {
printf("? no cur_term\n");
}
}
}
#if NO_LEAKS
for (n = 0; n < len_names; ++n) {
free(all_names[n]);
}
free(all_names);
free(old_term);
for (n = 0; n < len_terms; ++n) {
free(all_terms[n]);
}
free(all_terms);
free(num_parms);
free(str_parms);
#endif
ExitProgram(EXIT_SUCCESS);
}
| 0
|
387,804
|
template <class T> void do_oop_work(T* p) {
oop obj = RawAccess<>::oop_load(p);
if (!oopDesc::is_oop_or_null(obj)) {
tty->print_cr("Failed: " PTR_FORMAT " -> " PTR_FORMAT, p2i(p), p2i(obj));
Universe::print_on(tty);
guarantee(false, "boom");
}
}
| 0
|
247,290
|
void Compute(OpKernelContext* ctx) override {
const Tensor& input = ctx->input(kInputTensorIndex);
const Tensor& input_min = ctx->input(kInputMinIndex);
const Tensor& input_max = ctx->input(kInputMaxIndex);
const size_t depth = input_max.NumElements();
OP_REQUIRES(
ctx, input_min.dim_size(0) == depth,
errors::InvalidArgument("input_min has incorrect size, expected ",
depth, " was ", input_min.dim_size(0)));
OP_REQUIRES(
ctx, input_max.dim_size(0) == depth,
errors::InvalidArgument("input_max has incorrect size, expected ",
depth, " was ", input_max.dim_size(0)));
OP_REQUIRES(
ctx, input_min.NumElements() == depth,
errors::InvalidArgument("input_min must have the same number of "
"elements as input_max, got ",
input_min.NumElements(), " and ", depth));
OP_REQUIRES(ctx, input.NumElements() > 0,
errors::InvalidArgument("input must not be empty"));
OP_REQUIRES(ctx, input.dims() == 4,
errors::InvalidArgument("input must be in NHWC format"));
OP_REQUIRES(
ctx, input.dim_size(3) == depth,
errors::InvalidArgument(
"input must have same number of channels as length of input_min: ",
input.dim_size(3), " vs ", depth));
const float* input_min_data = input_min.flat<float>().data();
const float* input_max_data = input_max.flat<float>().data();
std::vector<float> ranges(depth);
bool is_non_negative = true;
Eigen::array<int, 2> shuffling({1, 0});
auto input_matrix = input.flat_inner_dims<qint32>();
// TODO: verify performance of not transposing and finding the min max
// directly from input_matrix vs the one presented below of transposing and
// using the transposed matrix as the transposing operation in itself might
// be more costly.
// Note that this operation is a calibration step for quantization and will
// cease to exist in the final inference graph(will exist as a const node).
auto transposed_input = input_matrix.shuffle(shuffling);
// Find the ranges of each channel in parallel.
float out_min_max = std::numeric_limits<float>::min();
#ifdef ENABLE_ONEDNN_OPENMP
#ifdef _MSC_VER
#pragma omp parallel for
#else
#pragma omp parallel for reduction(max : out_min_max)
#endif
#endif // ENABLE_ONEDNN_OPENMP
// TODO: Add eigen parallel_for
for (int64_t i = 0; i < depth; ++i) {
Eigen::Tensor<qint32, 0, Eigen::RowMajor> min =
transposed_input.chip<0>(i).minimum();
Eigen::Tensor<qint32, 0, Eigen::RowMajor> max =
transposed_input.chip<0>(i).maximum();
const int32_t min_per_channel = min();
const int32_t max_per_channel = max();
const int32_t abs_max =
std::max(std::abs(min_per_channel), std::abs(max_per_channel));
float scale =
std::max(std::abs(input_min_data[i]), std::abs(input_max_data[i]));
ranges[i] =
scale * static_cast<float>(abs_max) / static_cast<float>(1L << 31);
if (min_per_channel < 0) is_non_negative = false;
// Thread-local out_min_max.
out_min_max = std::max(out_min_max, ranges[i]);
}
// All local out_min_max gets max-reduced into one global out_min_max at
// the end of the loop by specifying reduction(max:out_min_max) along with
// omp parallel for.
// Fixing max to clip_value_max_ (example 6.0 to support relu6)
if (out_min_max > clip_value_max_) out_min_max = clip_value_max_;
Tensor* output_min = nullptr;
Tensor* output_max = nullptr;
OP_REQUIRES_OK(ctx, ctx->allocate_output(kOutputMinIndex, {}, &output_min));
OP_REQUIRES_OK(ctx, ctx->allocate_output(kOutputMaxIndex, {}, &output_max));
output_min->flat<float>()(0) = is_non_negative ? 0.0f : -out_min_max;
output_max->flat<float>()(0) = out_min_max;
}
| 0
|
244,209
|
void void_box_del(GF_Box *s)
{
gf_free(s);
}
| 0
|
332,376
|
is_ex_cmdchar(cmdarg_T *cap)
{
return cap->cmdchar == ':'
|| cap->cmdchar == K_COMMAND
|| cap->cmdchar == K_SCRIPT_COMMAND;
}
| 0
|
369,166
|
static int __io_sq_thread(struct io_ring_ctx *ctx, bool cap_entries)
{
unsigned int to_submit;
int ret = 0;
to_submit = io_sqring_entries(ctx);
/* if we're handling multiple rings, cap submit size for fairness */
if (cap_entries && to_submit > IORING_SQPOLL_CAP_ENTRIES_VALUE)
to_submit = IORING_SQPOLL_CAP_ENTRIES_VALUE;
if (!wq_list_empty(&ctx->iopoll_list) || to_submit) {
const struct cred *creds = NULL;
if (ctx->sq_creds != current_cred())
creds = override_creds(ctx->sq_creds);
mutex_lock(&ctx->uring_lock);
if (!wq_list_empty(&ctx->iopoll_list))
io_do_iopoll(ctx, true);
/*
* Don't submit if refs are dying, good for io_uring_register(),
* but also it is relied upon by io_ring_exit_work()
*/
if (to_submit && likely(!percpu_ref_is_dying(&ctx->refs)) &&
!(ctx->flags & IORING_SETUP_R_DISABLED))
ret = io_submit_sqes(ctx, to_submit);
mutex_unlock(&ctx->uring_lock);
if (to_submit && wq_has_sleeper(&ctx->sqo_sq_wait))
wake_up(&ctx->sqo_sq_wait);
if (creds)
revert_creds(creds);
}
return ret;
| 0
|
390,541
|
CheckKeySyms( ClientPtr client,
XkbDescPtr xkb,
xkbSetMapReq * req,
int nTypes,
CARD8 * mapWidths,
CARD16 * symsPerKey,
xkbSymMapWireDesc ** wireRtrn,
int * errorRtrn)
{
register unsigned i;
XkbSymMapPtr map;
xkbSymMapWireDesc* wire = *wireRtrn;
if (!(XkbKeySymsMask&req->present))
return 1;
CHK_REQ_KEY_RANGE2(0x11,req->firstKeySym,req->nKeySyms,req,(*errorRtrn),0);
map = &xkb->map->key_sym_map[xkb->min_key_code];
for (i=xkb->min_key_code;i<(unsigned)req->firstKeySym;i++,map++) {
register int g,ng,w;
ng= XkbNumGroups(map->group_info);
for (w=g=0;g<ng;g++) {
if (map->kt_index[g]>=(unsigned)nTypes) {
*errorRtrn = _XkbErrCode4(0x13,i,g,map->kt_index[g]);
return 0;
}
if (mapWidths[map->kt_index[g]]>w)
w= mapWidths[map->kt_index[g]];
}
symsPerKey[i] = w*ng;
}
for (i=0;i<req->nKeySyms;i++) {
KeySym *pSyms;
register unsigned nG;
if (client->swapped) {
swaps(&wire->nSyms,nG);
}
nG = XkbNumGroups(wire->groupInfo);
if (nG>XkbNumKbdGroups) {
*errorRtrn = _XkbErrCode3(0x14,i+req->firstKeySym,nG);
return 0;
}
if (nG>0) {
register int g,w;
for (g=w=0;g<nG;g++) {
if (wire->ktIndex[g]>=(unsigned)nTypes) {
*errorRtrn= _XkbErrCode4(0x15,i+req->firstKeySym,g,
wire->ktIndex[g]);
return 0;
}
if (mapWidths[wire->ktIndex[g]]>w)
w= mapWidths[wire->ktIndex[g]];
}
if (wire->width!=w) {
*errorRtrn= _XkbErrCode3(0x16,i+req->firstKeySym,wire->width);
return 0;
}
w*= nG;
symsPerKey[i+req->firstKeySym] = w;
if (w!=wire->nSyms) {
*errorRtrn=_XkbErrCode4(0x16,i+req->firstKeySym,wire->nSyms,w);
return 0;
}
}
else if (wire->nSyms!=0) {
*errorRtrn = _XkbErrCode3(0x17,i+req->firstKeySym,wire->nSyms);
return 0;
}
pSyms = (KeySym *)&wire[1];
wire = (xkbSymMapWireDesc *)&pSyms[wire->nSyms];
}
map = &xkb->map->key_sym_map[i];
for (;i<=(unsigned)xkb->max_key_code;i++,map++) {
register int g,nG,w;
nG= XkbKeyNumGroups(xkb,i);
for (w=g=0;g<nG;g++) {
if (map->kt_index[g]>=(unsigned)nTypes) {
*errorRtrn = _XkbErrCode4(0x18,i,g,map->kt_index[g]);
return 0;
}
if (mapWidths[map->kt_index[g]]>w)
w= mapWidths[map->kt_index[g]];
}
symsPerKey[i] = w*nG;
}
*wireRtrn = wire;
return 1;
}
| 0
|
387,747
|
bool InstanceKlass::has_nestmate_access_to(InstanceKlass* k, TRAPS) {
assert(this != k, "this should be handled by higher-level code");
// Per JVMS 5.4.4 we first resolve and validate the current class, then
// the target class k. Resolution exceptions will be passed on by upper
// layers. IncompatibleClassChangeErrors from membership validation failures
// will also be passed through.
Symbol* icce = vmSymbols::java_lang_IncompatibleClassChangeError();
InstanceKlass* cur_host = nest_host(icce, CHECK_false);
if (cur_host == NULL) {
return false;
}
Klass* k_nest_host = k->nest_host(icce, CHECK_false);
if (k_nest_host == NULL) {
return false;
}
bool access = (cur_host == k_nest_host);
if (log_is_enabled(Trace, class, nestmates)) {
ResourceMark rm(THREAD);
log_trace(class, nestmates)("Class %s does %shave nestmate access to %s",
this->external_name(),
access ? "" : "NOT ",
k->external_name());
}
return access;
}
| 0
|
222,838
|
std::size_t operator()(const DimId& dim) const {
return std::hash<const NodeDef*>{}(dim.node) + dim.port_id +
dim.dim_index;
}
| 0
|
412,094
|
add_server_nonce(uint8_t *nonce)
{
uint64_t ts;
uint64_t tsn;
uint32_t suffix;
ts = dnscrypt_hrtime();
// TODO? dnscrypt-wrapper does some logic with context->nonce_ts_last
// unclear if we really need it, so skipping it for now.
tsn = (ts << 10) | (randombytes_random() & 0x3ff);
#if (BYTE_ORDER == LITTLE_ENDIAN)
tsn =
(((uint64_t)htonl((uint32_t)tsn)) << 32) | htonl((uint32_t)(tsn >> 32));
#endif
memcpy(nonce + crypto_box_HALF_NONCEBYTES, &tsn, 8);
suffix = randombytes_random();
memcpy(nonce + crypto_box_HALF_NONCEBYTES + 8, &suffix, 4);
}
| 0
|
427,723
|
_cdf_tole8(uint64_t sv)
{
uint64_t rv;
uint8_t *s = RCAST(uint8_t *, RCAST(void *, &sv));
uint8_t *d = RCAST(uint8_t *, RCAST(void *, &rv));
d[0] = s[7];
d[1] = s[6];
d[2] = s[5];
d[3] = s[4];
d[4] = s[3];
d[5] = s[2];
d[6] = s[1];
d[7] = s[0];
return rv;
}
| 0
|
336,665
|
static void openssl_thread_setup(void)
{
int i;
/* Somebody else already setup threading for OpenSSL,
* don't do it twice to avoid possible races.
*/
if (CRYPTO_get_locking_callback() != NULL) {
red_dump_openssl_errors();
return;
}
lock_cs = (pthread_mutex_t*) OPENSSL_malloc(CRYPTO_num_locks() * sizeof(pthread_mutex_t));
for (i = 0; i < CRYPTO_num_locks(); i++) {
pthread_mutex_init(&(lock_cs[i]), NULL);
}
CRYPTO_THREADID_set_callback(pthreads_thread_id);
CRYPTO_set_locking_callback(pthreads_locking_callback);
}
| 0
|
246,732
|
void PrintEncryptUsage()
{
u32 i=0;
gf_sys_format_help(helpout, help_flags, "# Encryption/Decryption Options\n"
"MP4Box supports encryption and decryption of ISMA, OMA and CENC content, see [encryption filter `gpac -h cecrypt`](cecrypt).\n"
"It requires a specific XML file called `CryptFile`, whose syntax is available at https://wiki.gpac.io/Common-Encryption\n"
"Image files (HEIF) can also be crypted / decrypted, using CENC only.\n"
" \n"
"Options:\n"
);
while (m4b_crypt_args[i].name) {
GF_GPACArg *arg = (GF_GPACArg *) &m4b_crypt_args[i];
i++;
gf_sys_print_arg(helpout, help_flags, arg, "mp4box-crypt");
}
}
| 0
|
445,955
|
fr_window_open_files_with_application (FrWindow *window,
GList *file_list,
GAppInfo *app)
{
GList *uris;
GList *scan;
GdkAppLaunchContext *context;
GError *error = NULL;
if (window->priv->activity_ref > 0)
return;
uris = NULL;
for (scan = file_list; scan; scan = scan->next)
uris = g_list_prepend (uris, g_file_get_uri (G_FILE (scan->data)));
context = gdk_display_get_app_launch_context (gtk_widget_get_display (GTK_WIDGET (window)));
gdk_app_launch_context_set_screen (context, gtk_widget_get_screen (GTK_WIDGET (window)));
gdk_app_launch_context_set_timestamp (context, 0);
if (! g_app_info_launch_uris (app, uris, G_APP_LAUNCH_CONTEXT (context), &error)) {
_gtk_error_dialog_run (GTK_WINDOW (window),
_("Could not perform the operation"),
"%s",
error->message);
g_clear_error (&error);
}
g_object_unref (context);
_g_string_list_free (uris);
}
| 0
|
484,803
|
static int xennet_set_features(struct net_device *dev,
netdev_features_t features)
{
if (!(features & NETIF_F_SG) && dev->mtu > ETH_DATA_LEN) {
netdev_info(dev, "Reducing MTU because no SG offload");
dev->mtu = ETH_DATA_LEN;
}
return 0;
}
| 0
|
475,983
|
lzw_result lzw_context_create(struct lzw_ctx **ctx)
{
struct lzw_ctx *c = malloc(sizeof(*c));
if (c == NULL) {
return LZW_NO_MEM;
}
*ctx = c;
return LZW_OK;
}
| 0
|
242,978
|
static int ssl_parse_record_header( mbedtls_ssl_context const *ssl,
unsigned char *buf,
size_t len,
mbedtls_record *rec )
{
int major_ver, minor_ver;
size_t const rec_hdr_type_offset = 0;
size_t const rec_hdr_type_len = 1;
size_t const rec_hdr_version_offset = rec_hdr_type_offset +
rec_hdr_type_len;
size_t const rec_hdr_version_len = 2;
size_t const rec_hdr_ctr_len = 8;
#if defined(MBEDTLS_SSL_PROTO_DTLS)
uint32_t rec_epoch;
size_t const rec_hdr_ctr_offset = rec_hdr_version_offset +
rec_hdr_version_len;
#if defined(MBEDTLS_SSL_DTLS_CONNECTION_ID)
size_t const rec_hdr_cid_offset = rec_hdr_ctr_offset +
rec_hdr_ctr_len;
size_t rec_hdr_cid_len = 0;
#endif /* MBEDTLS_SSL_DTLS_CONNECTION_ID */
#endif /* MBEDTLS_SSL_PROTO_DTLS */
size_t rec_hdr_len_offset; /* To be determined */
size_t const rec_hdr_len_len = 2;
/*
* Check minimum lengths for record header.
*/
#if defined(MBEDTLS_SSL_PROTO_DTLS)
if( ssl->conf->transport == MBEDTLS_SSL_TRANSPORT_DATAGRAM )
{
rec_hdr_len_offset = rec_hdr_ctr_offset + rec_hdr_ctr_len;
}
else
#endif /* MBEDTLS_SSL_PROTO_DTLS */
{
rec_hdr_len_offset = rec_hdr_version_offset + rec_hdr_version_len;
}
if( len < rec_hdr_len_offset + rec_hdr_len_len )
{
MBEDTLS_SSL_DEBUG_MSG( 1, ( "datagram of length %u too small to hold DTLS record header of length %u",
(unsigned) len,
(unsigned)( rec_hdr_len_len + rec_hdr_len_len ) ) );
return( MBEDTLS_ERR_SSL_INVALID_RECORD );
}
/*
* Parse and validate record content type
*/
rec->type = buf[ rec_hdr_type_offset ];
/* Check record content type */
#if defined(MBEDTLS_SSL_DTLS_CONNECTION_ID)
rec->cid_len = 0;
if( ssl->conf->transport == MBEDTLS_SSL_TRANSPORT_DATAGRAM &&
ssl->conf->cid_len != 0 &&
rec->type == MBEDTLS_SSL_MSG_CID )
{
/* Shift pointers to account for record header including CID
* struct {
* ContentType special_type = tls12_cid;
* ProtocolVersion version;
* uint16 epoch;
* uint48 sequence_number;
* opaque cid[cid_length]; // Additional field compared to
* // default DTLS record format
* uint16 length;
* opaque enc_content[DTLSCiphertext.length];
* } DTLSCiphertext;
*/
/* So far, we only support static CID lengths
* fixed in the configuration. */
rec_hdr_cid_len = ssl->conf->cid_len;
rec_hdr_len_offset += rec_hdr_cid_len;
if( len < rec_hdr_len_offset + rec_hdr_len_len )
{
MBEDTLS_SSL_DEBUG_MSG( 1, ( "datagram of length %u too small to hold DTLS record header including CID, length %u",
(unsigned) len,
(unsigned)( rec_hdr_len_offset + rec_hdr_len_len ) ) );
return( MBEDTLS_ERR_SSL_INVALID_RECORD );
}
/* configured CID len is guaranteed at most 255, see
* MBEDTLS_SSL_CID_OUT_LEN_MAX in check_config.h */
rec->cid_len = (uint8_t) rec_hdr_cid_len;
memcpy( rec->cid, buf + rec_hdr_cid_offset, rec_hdr_cid_len );
}
else
#endif /* MBEDTLS_SSL_DTLS_CONNECTION_ID */
{
if( ssl_check_record_type( rec->type ) )
{
MBEDTLS_SSL_DEBUG_MSG( 1, ( "unknown record type %u",
(unsigned) rec->type ) );
return( MBEDTLS_ERR_SSL_INVALID_RECORD );
}
}
/*
* Parse and validate record version
*/
rec->ver[0] = buf[ rec_hdr_version_offset + 0 ];
rec->ver[1] = buf[ rec_hdr_version_offset + 1 ];
mbedtls_ssl_read_version( &major_ver, &minor_ver,
ssl->conf->transport,
&rec->ver[0] );
if( major_ver != ssl->major_ver )
{
MBEDTLS_SSL_DEBUG_MSG( 1, ( "major version mismatch: got %u, expected %u",
(unsigned) major_ver,
(unsigned) ssl->major_ver ) );
return( MBEDTLS_ERR_SSL_INVALID_RECORD );
}
if( minor_ver > ssl->conf->max_minor_ver )
{
MBEDTLS_SSL_DEBUG_MSG( 1, ( "minor version mismatch: got %u, expected max %u",
(unsigned) minor_ver,
(unsigned) ssl->conf->max_minor_ver ) );
return( MBEDTLS_ERR_SSL_INVALID_RECORD );
}
/*
* Parse/Copy record sequence number.
*/
#if defined(MBEDTLS_SSL_PROTO_DTLS)
if( ssl->conf->transport == MBEDTLS_SSL_TRANSPORT_DATAGRAM )
{
/* Copy explicit record sequence number from input buffer. */
memcpy( &rec->ctr[0], buf + rec_hdr_ctr_offset,
rec_hdr_ctr_len );
}
else
#endif /* MBEDTLS_SSL_PROTO_DTLS */
{
/* Copy implicit record sequence number from SSL context structure. */
memcpy( &rec->ctr[0], ssl->in_ctr, rec_hdr_ctr_len );
}
/*
* Parse record length.
*/
rec->data_offset = rec_hdr_len_offset + rec_hdr_len_len;
rec->data_len = ( (size_t) buf[ rec_hdr_len_offset + 0 ] << 8 ) |
( (size_t) buf[ rec_hdr_len_offset + 1 ] << 0 );
MBEDTLS_SSL_DEBUG_BUF( 4, "input record header", buf, rec->data_offset );
MBEDTLS_SSL_DEBUG_MSG( 3, ( "input record: msgtype = %u, "
"version = [%d:%d], msglen = %" MBEDTLS_PRINTF_SIZET,
rec->type,
major_ver, minor_ver, rec->data_len ) );
rec->buf = buf;
rec->buf_len = rec->data_offset + rec->data_len;
if( rec->data_len == 0 )
return( MBEDTLS_ERR_SSL_INVALID_RECORD );
/*
* DTLS-related tests.
* Check epoch before checking length constraint because
* the latter varies with the epoch. E.g., if a ChangeCipherSpec
* message gets duplicated before the corresponding Finished message,
* the second ChangeCipherSpec should be discarded because it belongs
* to an old epoch, but not because its length is shorter than
* the minimum record length for packets using the new record transform.
* Note that these two kinds of failures are handled differently,
* as an unexpected record is silently skipped but an invalid
* record leads to the entire datagram being dropped.
*/
#if defined(MBEDTLS_SSL_PROTO_DTLS)
if( ssl->conf->transport == MBEDTLS_SSL_TRANSPORT_DATAGRAM )
{
rec_epoch = ( rec->ctr[0] << 8 ) | rec->ctr[1];
/* Check that the datagram is large enough to contain a record
* of the advertised length. */
if( len < rec->data_offset + rec->data_len )
{
MBEDTLS_SSL_DEBUG_MSG( 1, ( "Datagram of length %u too small to contain record of advertised length %u.",
(unsigned) len,
(unsigned)( rec->data_offset + rec->data_len ) ) );
return( MBEDTLS_ERR_SSL_INVALID_RECORD );
}
/* Records from other, non-matching epochs are silently discarded.
* (The case of same-port Client reconnects must be considered in
* the caller). */
if( rec_epoch != ssl->in_epoch )
{
MBEDTLS_SSL_DEBUG_MSG( 1, ( "record from another epoch: "
"expected %u, received %lu",
ssl->in_epoch, (unsigned long) rec_epoch ) );
/* Records from the next epoch are considered for buffering
* (concretely: early Finished messages). */
if( rec_epoch == (unsigned) ssl->in_epoch + 1 )
{
MBEDTLS_SSL_DEBUG_MSG( 2, ( "Consider record for buffering" ) );
return( MBEDTLS_ERR_SSL_EARLY_MESSAGE );
}
return( MBEDTLS_ERR_SSL_UNEXPECTED_RECORD );
}
#if defined(MBEDTLS_SSL_DTLS_ANTI_REPLAY)
/* For records from the correct epoch, check whether their
* sequence number has been seen before. */
else if( mbedtls_ssl_dtls_record_replay_check( (mbedtls_ssl_context *) ssl,
&rec->ctr[0] ) != 0 )
{
MBEDTLS_SSL_DEBUG_MSG( 1, ( "replayed record" ) );
return( MBEDTLS_ERR_SSL_UNEXPECTED_RECORD );
}
#endif
}
#endif /* MBEDTLS_SSL_PROTO_DTLS */
return( 0 );
}
| 0
|
262,078
|
InstanceFeatureDimKey(const int32_t instance, const int32_t feature_dim)
: instance(instance), feature_dim(feature_dim) {}
| 0
|
219,005
|
void ConstantFolding::SimplifySqueeze(const GraphProperties& properties,
bool use_shape_info,
GraphDef* optimized_graph,
NodeDef* node) {
if (use_shape_info && IsSqueeze(*node) &&
!properties.GetInputProperties(node->name()).empty()) {
// https://www.tensorflow.org/api_docs/python/tf/squeeze mentions it's
// error to squeeze a dimension that is not 1, so we only need to check
// whether the input has > 1 size for each dimension.
const auto& shape = properties.GetInputProperties(node->name())[0].shape();
// The node is replaceable iff
// unknown_rank == false && (dim_size == 0 || all dims have size > 1)
bool replaceable = !shape.unknown_rank();
for (int j = 0; replaceable && j < shape.dim_size(); ++j) {
replaceable &= shape.dim(j).size() > 1;
}
if (replaceable) {
ReplaceOperationWithIdentity(0, properties, node, optimized_graph);
}
}
}
| 0
|
232,953
|
static CURLcode identity_init_writer(struct Curl_easy *data,
struct contenc_writer *writer)
{
(void) data;
return writer->downstream? CURLE_OK: CURLE_WRITE_ERROR;
}
| 0
|
343,302
|
static int ul_dowrite(ULHandler * const ulhandler, const unsigned char *buf_,
const size_t size_, off_t * const uploaded)
{
size_t size = size_;
ssize_t written;
const unsigned char *buf = buf_;
unsigned char *unasciibuf = NULL;
int ret = 0;
if (size_ <= (size_t) 0U) {
*uploaded = 0;
return -1;
}
#ifndef WITHOUT_ASCII
if (ulhandler->ascii_mode > 0) {
unsigned char *unasciibufpnt;
size_t z = (size_t) 0U;
if (size > (size_t) ulhandler->chunk_size ||
(unasciibuf = ALLOCA((size_t) ulhandler->chunk_size)) == NULL) {
return -1;
}
unasciibufpnt = unasciibuf;
do {
if (buf_[z] != (unsigned char) '\r') {
*unasciibufpnt++ = buf_[z];
}
z++;
} while (z < size);
buf = unasciibuf;
size = (size_t) (unasciibufpnt - unasciibuf);
}
#endif
written = safe_write(ulhandler->f, buf, size, -1);
ret = - (written != (ssize_t) size);
if (unasciibuf != NULL) {
ALLOCA_FREE(unasciibuf);
}
if (ret < 0) {
*uploaded = 0;
} else {
*uploaded = size;
}
return ret;
}
| 0
|
512,701
|
Item *field_transformer_for_having_pushdown(THD *thd, uchar *arg)
{ return this; }
| 0
|
226,223
|
GF_Err dfla_box_write(GF_Box *s, GF_BitStream *bs)
{
GF_Err e;
GF_FLACConfigBox *ptr = (GF_FLACConfigBox *) s;
e = gf_isom_full_box_write(s, bs);
if (e) return e;
gf_bs_write_data(bs, ptr->data, ptr->dataSize);
return GF_OK;
| 0
|
225,597
|
GF_Err lsrc_box_size(GF_Box *s)
{
GF_LASERConfigurationBox *ptr = (GF_LASERConfigurationBox *)s;
ptr->size += ptr->hdr_size;
return GF_OK;
| 0
|
222,891
|
bool IsDequeue(const NodeDef& n) {
return (n.op().find("Dequeue") != string::npos &&
n.op().find("DequeueMany") == string::npos);
}
| 0
|
352,982
|
octetStringSubstringsMatch(
int *matchp,
slap_mask_t flags,
Syntax *syntax,
MatchingRule *mr,
struct berval *value,
void *assertedValue )
{
int match = 0;
SubstringsAssertion *sub = assertedValue;
struct berval left = *value;
int i;
ber_len_t inlen = 0;
/* Add up asserted input length */
if ( !BER_BVISNULL( &sub->sa_initial ) ) {
inlen += sub->sa_initial.bv_len;
}
if ( sub->sa_any ) {
for ( i = 0; !BER_BVISNULL( &sub->sa_any[i] ); i++ ) {
inlen += sub->sa_any[i].bv_len;
}
}
if ( !BER_BVISNULL( &sub->sa_final ) ) {
inlen += sub->sa_final.bv_len;
}
if ( !BER_BVISNULL( &sub->sa_initial ) ) {
if ( inlen > left.bv_len ) {
match = 1;
goto done;
}
match = memcmp( sub->sa_initial.bv_val, left.bv_val,
sub->sa_initial.bv_len );
if ( match != 0 ) {
goto done;
}
left.bv_val += sub->sa_initial.bv_len;
left.bv_len -= sub->sa_initial.bv_len;
inlen -= sub->sa_initial.bv_len;
}
if ( !BER_BVISNULL( &sub->sa_final ) ) {
if ( inlen > left.bv_len ) {
match = 1;
goto done;
}
match = memcmp( sub->sa_final.bv_val,
&left.bv_val[left.bv_len - sub->sa_final.bv_len],
sub->sa_final.bv_len );
if ( match != 0 ) {
goto done;
}
left.bv_len -= sub->sa_final.bv_len;
inlen -= sub->sa_final.bv_len;
}
if ( sub->sa_any ) {
for ( i = 0; !BER_BVISNULL( &sub->sa_any[i] ); i++ ) {
ber_len_t idx;
char *p;
retry:
if ( inlen > left.bv_len ) {
/* not enough length */
match = 1;
goto done;
}
if ( BER_BVISEMPTY( &sub->sa_any[i] ) ) {
continue;
}
p = memchr( left.bv_val, *sub->sa_any[i].bv_val, left.bv_len );
if( p == NULL ) {
match = 1;
goto done;
}
idx = p - left.bv_val;
if ( idx >= left.bv_len ) {
/* this shouldn't happen */
return LDAP_OTHER;
}
left.bv_val = p;
left.bv_len -= idx;
if ( sub->sa_any[i].bv_len > left.bv_len ) {
/* not enough left */
match = 1;
goto done;
}
match = memcmp( left.bv_val,
sub->sa_any[i].bv_val,
sub->sa_any[i].bv_len );
if ( match != 0 ) {
left.bv_val++;
left.bv_len--;
goto retry;
}
left.bv_val += sub->sa_any[i].bv_len;
left.bv_len -= sub->sa_any[i].bv_len;
inlen -= sub->sa_any[i].bv_len;
}
}
done:
*matchp = match;
return LDAP_SUCCESS;
}
| 0
|
229,246
|
cql_server::connection::process_register(uint16_t stream, request_reader in, service::client_state& client_state,
tracing::trace_state_ptr trace_state) {
++_server._stats.register_requests;
std::vector<sstring> event_types;
in.read_string_list(event_types);
for (auto&& event_type : event_types) {
auto et = parse_event_type(event_type);
_server._notifier->register_event(et, this);
}
_ready = true;
return make_ready_future<std::unique_ptr<cql_server::response>>(make_ready(stream, std::move(trace_state)));
}
| 0
|
359,453
|
DEFUN (neighbor_port,
neighbor_port_cmd,
NEIGHBOR_CMD "port <0-65535>",
NEIGHBOR_STR
NEIGHBOR_ADDR_STR
"Neighbor's BGP port\n"
"TCP port number\n")
{
return peer_port_vty (vty, argv[0], AFI_IP, argv[1]);
}
| 0
|
254,753
|
njs_typed_array_prototype_sort(njs_vm_t *vm, njs_value_t *args,
njs_uint_t nargs, njs_index_t unused)
{
u_char *base, *orig;
int64_t length;
uint32_t element_size;
njs_value_t *this, *comparefn;
njs_typed_array_t *array;
njs_array_buffer_t *buffer;
njs_typed_array_cmp_t cmp;
njs_typed_array_sort_ctx_t ctx;
this = njs_argument(args, 0);
if (njs_slow_path(!njs_is_typed_array(this))) {
njs_type_error(vm, "this is not a typed array");
return NJS_ERROR;
}
array = njs_typed_array(this);
if (njs_slow_path(njs_is_detached_buffer(array->buffer))) {
njs_type_error(vm, "detached buffer");
return NJS_ERROR;
}
ctx.vm = vm;
ctx.buffer = array->buffer;
ctx.exception = 0;
comparefn = njs_arg(args, nargs, 1);
if (njs_is_defined(comparefn)) {
if (njs_slow_path(!njs_is_function(comparefn))) {
njs_type_error(vm, "comparefn must be callable or undefined");
return NJS_ERROR;
}
ctx.function = njs_function(comparefn);
} else {
ctx.function = NULL;
}
switch (array->type) {
case NJS_OBJ_TYPE_UINT8_ARRAY:
case NJS_OBJ_TYPE_UINT8_CLAMPED_ARRAY:
cmp = njs_typed_array_compare_u8;
ctx.get = njs_typed_array_get_u8;
break;
case NJS_OBJ_TYPE_INT8_ARRAY:
cmp = njs_typed_array_compare_i8;
ctx.get = njs_typed_array_get_i8;
break;
case NJS_OBJ_TYPE_UINT16_ARRAY:
cmp = njs_typed_array_compare_u16;
ctx.get = njs_typed_array_get_u16;
break;
case NJS_OBJ_TYPE_INT16_ARRAY:
cmp = njs_typed_array_compare_i16;
ctx.get = njs_typed_array_get_i16;
break;
case NJS_OBJ_TYPE_UINT32_ARRAY:
cmp = njs_typed_array_compare_u32;
ctx.get = njs_typed_array_get_u32;
break;
case NJS_OBJ_TYPE_INT32_ARRAY:
cmp = njs_typed_array_compare_i32;
ctx.get = njs_typed_array_get_i32;
break;
case NJS_OBJ_TYPE_FLOAT32_ARRAY:
cmp = njs_typed_array_compare_f32;
ctx.get = njs_typed_array_get_f32;
break;
default:
/* NJS_OBJ_TYPE_FLOAT64_ARRAY. */
cmp = njs_typed_array_compare_f64;
ctx.get = njs_typed_array_get_f64;
break;
}
buffer = njs_typed_array_writable(vm, array);
if (njs_slow_path(buffer == NULL)) {
return NJS_ERROR;
}
length = njs_typed_array_length(array);
element_size = njs_typed_array_element_size(array->type);
base = &buffer->u.u8[array->offset * element_size];
orig = base;
if (ctx.function != NULL) {
cmp = njs_typed_array_generic_compare;
base = njs_mp_alloc(vm->mem_pool, length * element_size);
if (njs_slow_path(base == NULL)) {
njs_memory_error(vm);
return NJS_ERROR;
}
memcpy(base, &buffer->u.u8[array->offset * element_size],
length * element_size);
}
njs_qsort(base, length, element_size, cmp, &ctx);
if (ctx.function != NULL) {
if (&buffer->u.u8[array->offset * element_size] == orig) {
memcpy(orig, base, length * element_size);
}
njs_mp_free(vm->mem_pool, base);
}
if (njs_slow_path(ctx.exception)) {
return NJS_ERROR;
}
njs_set_typed_array(&vm->retval, array);
return NJS_OK;
}
| 0
|
387,832
|
bool InstanceKlass::verify_code(bool throw_verifyerror, TRAPS) {
// 1) Verify the bytecodes
Verifier::Mode mode =
throw_verifyerror ? Verifier::ThrowException : Verifier::NoException;
return Verifier::verify(this, mode, should_verify_class(), THREAD);
}
| 0
|
409,418
|
termrequest_sent(termrequest_T *status)
{
status->tr_progress = STATUS_SENT;
status->tr_start = time(NULL);
}
| 0
|
359,243
|
DEFUN (no_neighbor_remove_private_as,
no_neighbor_remove_private_as_cmd,
NO_NEIGHBOR_CMD2 "remove-private-AS",
NO_STR
NEIGHBOR_STR
NEIGHBOR_ADDR_STR2
"Remove private AS number from outbound updates\n")
{
return peer_af_flag_unset_vty (vty, argv[0], bgp_node_afi (vty),
bgp_node_safi (vty),
PEER_FLAG_REMOVE_PRIVATE_AS);
}
| 0
|
238,772
|
save_last_search_pattern(void)
{
if (++did_save_last_search_spat != 1)
// nested call, nothing to do
return;
saved_last_search_spat = spats[RE_SEARCH];
if (spats[RE_SEARCH].pat != NULL)
saved_last_search_spat.pat = vim_strsave(spats[RE_SEARCH].pat);
saved_last_idx = last_idx;
saved_no_hlsearch = no_hlsearch;
}
| 0
|
247,649
|
TEST_P(SslSocketTest, GetPeerCert) {
const std::string client_ctx_yaml = R"EOF(
common_tls_context:
tls_certificates:
certificate_chain:
filename: "{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/no_san_cert.pem"
private_key:
filename: "{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/no_san_key.pem"
)EOF";
const std::string server_ctx_yaml = R"EOF(
common_tls_context:
tls_certificates:
certificate_chain:
filename: "{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_uri_cert.pem"
private_key:
filename: "{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_uri_key.pem"
validation_context:
trusted_ca:
filename: "{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/ca_cert.pem"
require_client_certificate: true
)EOF";
TestUtilOptions test_options(client_ctx_yaml, server_ctx_yaml, true, GetParam());
std::string expected_peer_cert =
TestEnvironment::readFileToStringForTest(TestEnvironment::substitute(
"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/no_san_cert.pem"));
testUtil(test_options.setExpectedSerialNumber(TEST_NO_SAN_CERT_SERIAL)
.setExpectedPeerIssuer(
"CN=Test CA,OU=Lyft Engineering,O=Lyft,L=San Francisco,ST=California,C=US")
.setExpectedPeerSubject(
"CN=Test Server,OU=Lyft Engineering,O=Lyft,L=San Francisco,ST=California,C=US")
.setExpectedLocalSubject(
"CN=Test Server,OU=Lyft Engineering,O=Lyft,L=San Francisco,ST=California,C=US")
.setExpectedPeerCert(expected_peer_cert));
}
| 0
|
247,362
|
rpmRC pgpVerifySignature(pgpDigParams key, pgpDigParams sig, DIGEST_CTX hashctx)
{
DIGEST_CTX ctx = rpmDigestDup(hashctx);
uint8_t *hash = NULL;
size_t hashlen = 0;
rpmRC res = RPMRC_FAIL; /* assume failure */
if (sig == NULL || ctx == NULL)
goto exit;
if (sig->hash != NULL)
rpmDigestUpdate(ctx, sig->hash, sig->hashlen);
if (sig->version == 4) {
/* V4 trailer is six octets long (rfc4880) */
uint8_t trailer[6];
uint32_t nb = sig->hashlen;
nb = htonl(nb);
trailer[0] = sig->version;
trailer[1] = 0xff;
memcpy(trailer+2, &nb, 4);
rpmDigestUpdate(ctx, trailer, sizeof(trailer));
}
rpmDigestFinal(ctx, (void **)&hash, &hashlen, 0);
/* Compare leading 16 bits of digest for quick check. */
if (hash == NULL || memcmp(hash, sig->signhash16, 2) != 0)
goto exit;
/*
* If we have a key, verify the signature for real. Otherwise we've
* done all we can, return NOKEY to indicate "looks okay but dunno."
*/
if (key && key->alg) {
pgpDigAlg sa = sig->alg;
pgpDigAlg ka = key->alg;
if (sa && sa->verify) {
if (sa->verify(ka, sa, hash, hashlen, sig->hash_algo) == 0) {
res = RPMRC_OK;
}
}
} else {
res = RPMRC_NOKEY;
}
exit:
free(hash);
return res;
}
| 0
|
359,205
|
static void ringbuf_map_free(struct bpf_map *map)
{
struct bpf_ringbuf_map *rb_map;
rb_map = container_of(map, struct bpf_ringbuf_map, map);
bpf_ringbuf_free(rb_map->rb);
kfree(rb_map);
}
| 0
|
218,789
|
static MagickBooleanType ReadPSDLayer(Image *image,const ImageInfo *image_info,
const PSDInfo *psd_info,LayerInfo* layer_info,ExceptionInfo *exception)
{
char
message[MaxTextExtent];
MagickBooleanType
status;
PSDCompressionType
compression;
ssize_t
j;
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" setting up new layer image");
if (psd_info->mode != IndexedMode)
(void) SetImageBackgroundColor(layer_info->image);
layer_info->image->compose=PSDBlendModeToCompositeOperator(
layer_info->blendkey);
if (layer_info->visible == MagickFalse)
{
layer_info->image->compose=NoCompositeOp;
(void) SetImageArtifact(layer_info->image,"psd:layer.invisible","true");
}
if (psd_info->mode == CMYKMode)
(void) SetImageColorspace(layer_info->image,CMYKColorspace);
else if ((psd_info->mode == BitmapMode) || (psd_info->mode == DuotoneMode) ||
(psd_info->mode == GrayscaleMode))
(void) SetImageColorspace(layer_info->image,GRAYColorspace);
/*
Set up some hidden attributes for folks that need them.
*/
(void) FormatLocaleString(message,MaxTextExtent,"%.20g",
(double) layer_info->page.x);
(void) SetImageArtifact(layer_info->image,"psd:layer.x",message);
(void) FormatLocaleString(message,MaxTextExtent,"%.20g",
(double) layer_info->page.y);
(void) SetImageArtifact(layer_info->image,"psd:layer.y",message);
(void) FormatLocaleString(message,MaxTextExtent,"%.20g",(double)
layer_info->opacity);
(void) SetImageArtifact(layer_info->image,"psd:layer.opacity",message);
(void) SetImageProperty(layer_info->image,"label",(char *) layer_info->name);
status=MagickTrue;
for (j=0; j < (ssize_t) layer_info->channels; j++)
{
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" reading data for channel %.20g",(double) j);
compression=(PSDCompressionType) ReadBlobShort(layer_info->image);
if ((compression == ZipWithPrediction) && (image->depth == 32))
{
(void) ThrowMagickException(exception,GetMagickModule(),
TypeError,"CompressionNotSupported","ZipWithPrediction(32 bit)");
return(MagickFalse);
}
layer_info->image->compression=ConvertPSDCompression(compression);
if (layer_info->channel_info[j].type == -1)
layer_info->image->matte=MagickTrue;
status=ReadPSDChannel(layer_info->image,image_info,psd_info,layer_info,
(size_t) j,compression,exception);
InheritException(exception,&layer_info->image->exception);
if (status == MagickFalse)
break;
}
if (status != MagickFalse)
status=ApplyPSDLayerOpacity(layer_info->image,layer_info->opacity,
MagickFalse,exception);
if ((status != MagickFalse) &&
(layer_info->image->colorspace == CMYKColorspace))
status=NegateImage(layer_info->image,MagickFalse);
if ((status != MagickFalse) && (layer_info->mask.image != (Image *) NULL))
{
const char
*option;
layer_info->mask.image->page.x=layer_info->mask.page.x;
layer_info->mask.image->page.y=layer_info->mask.page.y;
/* Do not composite the mask when it is disabled */
if ((layer_info->mask.flags & 0x02) == 0x02)
layer_info->mask.image->compose=NoCompositeOp;
else
status=ApplyPSDOpacityMask(layer_info->image,layer_info->mask.image,
layer_info->mask.background == 0 ? 0 : QuantumRange,MagickFalse,
exception);
option=GetImageOption(image_info,"psd:preserve-opacity-mask");
if (IsStringTrue(option) != MagickFalse)
PreservePSDOpacityMask(image,layer_info,exception);
layer_info->mask.image=DestroyImage(layer_info->mask.image);
}
return(status);
}
| 0
|
344,789
|
tun_open(int tun, int mode, char **ifname)
{
#if defined(CUSTOM_SYS_TUN_OPEN)
return (sys_tun_open(tun, mode, ifname));
#elif defined(SSH_TUN_OPENBSD)
struct ifreq ifr;
char name[100];
int fd = -1, sock;
const char *tunbase = "tun";
if (ifname != NULL)
*ifname = NULL;
if (mode == SSH_TUNMODE_ETHERNET)
tunbase = "tap";
/* Open the tunnel device */
if (tun <= SSH_TUNID_MAX) {
snprintf(name, sizeof(name), "/dev/%s%d", tunbase, tun);
fd = open(name, O_RDWR);
} else if (tun == SSH_TUNID_ANY) {
for (tun = 100; tun >= 0; tun--) {
snprintf(name, sizeof(name), "/dev/%s%d",
tunbase, tun);
if ((fd = open(name, O_RDWR)) >= 0)
break;
}
} else {
debug_f("invalid tunnel %u", tun);
return -1;
}
if (fd == -1) {
debug_f("%s open: %s", name, strerror(errno));
return -1;
}
debug_f("%s mode %d fd %d", name, mode, fd);
/* Bring interface up if it is not already */
snprintf(ifr.ifr_name, sizeof(ifr.ifr_name), "%s%d", tunbase, tun);
if ((sock = socket(PF_UNIX, SOCK_STREAM, 0)) == -1)
goto failed;
if (ioctl(sock, SIOCGIFFLAGS, &ifr) == -1) {
debug_f("get interface %s flags: %s", ifr.ifr_name,
strerror(errno));
goto failed;
}
if (!(ifr.ifr_flags & IFF_UP)) {
ifr.ifr_flags |= IFF_UP;
if (ioctl(sock, SIOCSIFFLAGS, &ifr) == -1) {
debug_f("activate interface %s: %s", ifr.ifr_name,
strerror(errno));
goto failed;
}
}
if (ifname != NULL)
*ifname = xstrdup(ifr.ifr_name);
close(sock);
return fd;
failed:
if (fd >= 0)
close(fd);
if (sock >= 0)
close(sock);
return -1;
#else
error("Tunnel interfaces are not supported on this platform");
return (-1);
#endif
}
| 0
|
229,240
|
scattered_message<char> cql_server::response::make_message(uint8_t version, cql_compression compression) {
if (compression != cql_compression::none) {
compress(compression);
}
scattered_message<char> msg;
auto frame = make_frame(version, _body.size());
msg.append(std::move(frame));
for (auto&& fragment : _body.fragments()) {
msg.append_static(reinterpret_cast<const char*>(fragment.data()), fragment.size());
}
return msg;
}
| 0
|
473,880
|
is_not_included(Node* x, Node* y, regex_t* reg)
{
int i;
OnigDistance len;
OnigCodePoint code;
UChar *p, c;
int ytype;
retry:
ytype = NTYPE(y);
switch (NTYPE(x)) {
case NT_CTYPE:
{
switch (ytype) {
case NT_CTYPE:
if (NCTYPE(y)->ctype == NCTYPE(x)->ctype &&
NCTYPE(y)->not != NCTYPE(x)->not)
return 1;
else
return 0;
break;
case NT_CCLASS:
swap:
{
Node* tmp;
tmp = x; x = y; y = tmp;
goto retry;
}
break;
case NT_STR:
goto swap;
break;
default:
break;
}
}
break;
case NT_CCLASS:
{
CClassNode* xc = NCCLASS(x);
switch (ytype) {
case NT_CTYPE:
switch (NCTYPE(y)->ctype) {
case ONIGENC_CTYPE_WORD:
if (NCTYPE(y)->not == 0) {
if (IS_NULL(xc->mbuf) && !IS_NCCLASS_NOT(xc)) {
for (i = 0; i < SINGLE_BYTE_SIZE; i++) {
if (BITSET_AT(xc->bs, i)) {
if (IS_CODE_SB_WORD(reg->enc, i)) return 0;
}
}
return 1;
}
return 0;
}
else {
for (i = 0; i < SINGLE_BYTE_SIZE; i++) {
if (! IS_CODE_SB_WORD(reg->enc, i)) {
if (!IS_NCCLASS_NOT(xc)) {
if (BITSET_AT(xc->bs, i))
return 0;
}
else {
if (! BITSET_AT(xc->bs, i))
return 0;
}
}
}
return 1;
}
break;
default:
break;
}
break;
case NT_CCLASS:
{
int v;
CClassNode* yc = NCCLASS(y);
for (i = 0; i < SINGLE_BYTE_SIZE; i++) {
v = BITSET_AT(xc->bs, i);
if ((v != 0 && !IS_NCCLASS_NOT(xc)) ||
(v == 0 && IS_NCCLASS_NOT(xc))) {
v = BITSET_AT(yc->bs, i);
if ((v != 0 && !IS_NCCLASS_NOT(yc)) ||
(v == 0 && IS_NCCLASS_NOT(yc)))
return 0;
}
}
if ((IS_NULL(xc->mbuf) && !IS_NCCLASS_NOT(xc)) ||
(IS_NULL(yc->mbuf) && !IS_NCCLASS_NOT(yc)))
return 1;
return 0;
}
break;
case NT_STR:
goto swap;
break;
default:
break;
}
}
break;
case NT_STR:
{
StrNode* xs = NSTR(x);
if (NSTRING_LEN(x) == 0)
break;
c = *(xs->s);
switch (ytype) {
case NT_CTYPE:
switch (NCTYPE(y)->ctype) {
case ONIGENC_CTYPE_WORD:
if (ONIGENC_IS_MBC_WORD(reg->enc, xs->s, xs->end))
return NCTYPE(y)->not;
else
return !(NCTYPE(y)->not);
break;
default:
break;
}
break;
case NT_CCLASS:
{
CClassNode* cc = NCCLASS(y);
code = ONIGENC_MBC_TO_CODE(reg->enc, xs->s,
xs->s + ONIGENC_MBC_MAXLEN(reg->enc));
return (onig_is_code_in_cc(reg->enc, code, cc) != 0 ? 0 : 1);
}
break;
case NT_STR:
{
UChar *q;
StrNode* ys = NSTR(y);
len = NSTRING_LEN(x);
if (len > NSTRING_LEN(y)) len = NSTRING_LEN(y);
if (NSTRING_IS_AMBIG(x) || NSTRING_IS_AMBIG(y)) {
/* tiny version */
return 0;
}
else {
for (i = 0, p = ys->s, q = xs->s; (OnigDistance)i < len; i++, p++, q++) {
if (*p != *q) return 1;
}
}
}
break;
default:
break;
}
}
break;
default:
break;
}
return 0;
}
| 0
|
449,321
|
static char *getsistring(FILE *f, uint32_t ptr, uint32_t len) {
char *name;
uint32_t i;
if (!len) return NULL;
if (len>400) len=400;
name = cli_malloc(len+1);
if (!name) {
cli_dbgmsg("SIS: OOM\n");
return NULL;
}
fseek(f, ptr, SEEK_SET);
if (fread(name, len, 1, f)!=1) {
cli_dbgmsg("SIS: Unable to read string\n");
free(name);
return NULL;
}
for (i = 0 ; i < len; i+=2) name[i/2] = name[i];
name[i/2]='\0';
return name;
}
| 0
|
232,937
|
static void gzip_close_writer(struct Curl_easy *data,
struct contenc_writer *writer)
{
struct zlib_params *zp = (struct zlib_params *) &writer->params;
z_stream *z = &zp->z; /* zlib state structure */
exit_zlib(data, z, &zp->zlib_init, CURLE_OK);
}
| 0
|
491,898
|
static void fuse_send_readpages(struct fuse_req *req, struct file *file)
{
struct fuse_file *ff = file->private_data;
struct fuse_conn *fc = ff->fc;
loff_t pos = page_offset(req->pages[0]);
size_t count = req->num_pages << PAGE_CACHE_SHIFT;
req->out.argpages = 1;
req->out.page_zeroing = 1;
fuse_read_fill(req, file, pos, count, FUSE_READ);
req->misc.read.attr_ver = fuse_get_attr_version(fc);
if (fc->async_read) {
req->ff = fuse_file_get(ff);
req->end = fuse_readpages_end;
fuse_request_send_background(fc, req);
} else {
fuse_request_send(fc, req);
fuse_readpages_end(fc, req);
fuse_put_request(fc, req);
}
}
| 0
|
384,896
|
transchar(int c)
{
return transchar_buf(curbuf, c);
}
| 0
|
101,659
|
void WebProcessProxy::assumeReadAccessToBaseURL(const String& urlString)
{
KURL url(KURL(), urlString);
if (!url.isLocalFile())
return;
KURL baseURL(KURL(), url.baseAsString());
m_localPathsWithAssumedReadAccess.add(baseURL.fileSystemPath());
}
| 0
|
261,897
|
njs_encode_base64_core(njs_str_t *dst, const njs_str_t *src,
const u_char *basis, njs_bool_t padding)
{
u_char *d, *s, c0, c1, c2;
size_t len;
len = src->length;
s = src->start;
d = dst->start;
while (len > 2) {
c0 = s[0];
c1 = s[1];
c2 = s[2];
*d++ = basis[c0 >> 2];
*d++ = basis[((c0 & 0x03) << 4) | (c1 >> 4)];
*d++ = basis[((c1 & 0x0f) << 2) | (c2 >> 6)];
*d++ = basis[c2 & 0x3f];
s += 3;
len -= 3;
}
if (len > 0) {
c0 = s[0];
*d++ = basis[c0 >> 2];
if (len == 1) {
*d++ = basis[(c0 & 0x03) << 4];
if (padding) {
*d++ = '=';
*d++ = '=';
}
} else {
c1 = s[1];
*d++ = basis[((c0 & 0x03) << 4) | (c1 >> 4)];
*d++ = basis[(c1 & 0x0f) << 2];
if (padding) {
*d++ = '=';
}
}
}
dst->length = d - dst->start;
}
| 0
|
476,112
|
void usb_remove_config(struct usb_composite_dev *cdev,
struct usb_configuration *config)
{
unsigned long flags;
spin_lock_irqsave(&cdev->lock, flags);
if (cdev->config == config)
reset_config(cdev);
spin_unlock_irqrestore(&cdev->lock, flags);
remove_config(cdev, config);
}
| 0
|
359,540
|
DEFUN (no_neighbor_dont_capability_negotiate,
no_neighbor_dont_capability_negotiate_cmd,
NO_NEIGHBOR_CMD2 "dont-capability-negotiate",
NO_STR
NEIGHBOR_STR
NEIGHBOR_ADDR_STR2
"Do not perform capability negotiation\n")
{
return peer_flag_unset_vty (vty, argv[0], PEER_FLAG_DONT_CAPABILITY);
}
| 0
|
409,521
|
stoptermcap(void)
{
screen_stop_highlight();
reset_cterm_colors();
if (termcap_active)
{
#ifdef FEAT_TERMRESPONSE
# ifdef FEAT_GUI
if (!gui.in_use && !gui.starting)
# endif
{
// May need to discard T_CRV, T_U7 or T_RBG response.
if (termrequest_any_pending())
{
# ifdef UNIX
// Give the terminal a chance to respond.
mch_delay(100L, 0);
# endif
# ifdef TCIFLUSH
// Discard data received but not read.
if (exiting)
tcflush(fileno(stdin), TCIFLUSH);
# endif
}
// Check for termcodes first, otherwise an external program may
// get them.
check_for_codes_from_term();
}
#endif
MAY_WANT_TO_LOG_THIS;
#if defined(UNIX) || defined(VMS)
// Disable xterm's focus reporting mode if 'esckeys' is set.
if (p_ek && *T_FD != NUL)
out_str(T_FD);
#endif
out_str(T_BD); // disable bracketed paste mode
out_str(T_KE); // stop "keypad transmit" mode
out_flush();
termcap_active = FALSE;
cursor_on(); // just in case it is still off
out_str(T_CTE); // stop "raw" mode
out_str(T_TE); // stop termcap mode
screen_start(); // don't know where cursor is now
out_flush();
}
}
| 0
|
459,211
|
static void tcf_block_offload_inc(struct tcf_block *block, u32 *flags)
{
if (*flags & TCA_CLS_FLAGS_IN_HW)
return;
*flags |= TCA_CLS_FLAGS_IN_HW;
atomic_inc(&block->offloadcnt);
}
| 0
|
215,549
|
int unlzw(in, out)
int in, out; /* input and output file descriptors */
{
REG2 char_type *stackp;
REG3 code_int code;
REG4 int finchar;
REG5 code_int oldcode;
REG6 code_int incode;
REG7 long inbits;
REG8 long posbits;
REG9 int outpos;
/* REG10 int insize; (global) */
REG11 unsigned bitmask;
REG12 code_int free_ent;
REG13 code_int maxcode;
REG14 code_int maxmaxcode;
REG15 int n_bits;
REG16 int rsize;
#ifdef MAXSEG_64K
tab_prefix[0] = tab_prefix0;
tab_prefix[1] = tab_prefix1;
#endif
maxbits = get_byte();
block_mode = maxbits & BLOCK_MODE;
if ((maxbits & LZW_RESERVED) != 0) {
WARN((stderr, "\n%s: %s: warning, unknown flags 0x%x\n",
program_name, ifname, maxbits & LZW_RESERVED));
}
maxbits &= BIT_MASK;
maxmaxcode = MAXCODE(maxbits);
if (maxbits > BITS) {
fprintf(stderr,
"\n%s: %s: compressed with %d bits, can only handle %d bits\n",
program_name, ifname, maxbits, BITS);
exit_code = ERROR;
return ERROR;
}
rsize = insize;
maxcode = MAXCODE(n_bits = INIT_BITS)-1;
bitmask = (1<<n_bits)-1;
oldcode = -1;
finchar = 0;
outpos = 0;
posbits = inptr<<3;
free_ent = ((block_mode) ? FIRST : 256);
clear_tab_prefixof(); /* Initialize the first 256 entries in the table. */
for (code = 255 ; code >= 0 ; --code) {
tab_suffixof(code) = (char_type)code;
}
do {
REG1 int i;
int e;
int o;
resetbuf:
e = insize-(o = (posbits>>3));
for (i = 0 ; i < e ; ++i) {
inbuf[i] = inbuf[i+o];
}
insize = e;
posbits = 0;
if (insize < INBUF_EXTRA) {
rsize = read_buffer (in, (char *) inbuf + insize, INBUFSIZ);
if (rsize == -1) {
read_error();
}
insize += rsize;
bytes_in += (off_t)rsize;
}
inbits = ((rsize != 0) ? ((long)insize - insize%n_bits)<<3 :
((long)insize<<3)-(n_bits-1));
while (inbits > posbits) {
if (free_ent > maxcode) {
posbits = ((posbits-1) +
((n_bits<<3)-(posbits-1+(n_bits<<3))%(n_bits<<3)));
++n_bits;
if (n_bits == maxbits) {
maxcode = maxmaxcode;
} else {
maxcode = MAXCODE(n_bits)-1;
}
bitmask = (1<<n_bits)-1;
goto resetbuf;
}
input(inbuf,posbits,code,n_bits,bitmask);
Tracev((stderr, "%d ", code));
if (oldcode == -1) {
if (256 <= code)
gzip_error ("corrupt input.");
outbuf[outpos++] = (char_type)(finchar = (int)(oldcode=code));
continue;
}
if (code == CLEAR && block_mode) {
clear_tab_prefixof();
free_ent = FIRST - 1;
posbits = ((posbits-1) +
((n_bits<<3)-(posbits-1+(n_bits<<3))%(n_bits<<3)));
maxcode = MAXCODE(n_bits = INIT_BITS)-1;
bitmask = (1<<n_bits)-1;
goto resetbuf;
}
incode = code;
stackp = de_stack;
if (code >= free_ent) { /* Special case for KwKwK string. */
if (code > free_ent) {
#ifdef DEBUG
char_type *p;
posbits -= n_bits;
p = &inbuf[posbits>>3];
fprintf(stderr,
"code:%ld free_ent:%ld n_bits:%d insize:%u\n",
code, free_ent, n_bits, insize);
fprintf(stderr,
"posbits:%ld inbuf:%02X %02X %02X %02X %02X\n",
posbits, p[-1],p[0],p[1],p[2],p[3]);
#endif
if (!test && outpos > 0) {
write_buf(out, (char*)outbuf, outpos);
bytes_out += (off_t)outpos;
}
gzip_error (to_stdout
? "corrupt input."
: "corrupt input. Use zcat to recover some data.");
}
*--stackp = (char_type)finchar;
code = oldcode;
}
while ((cmp_code_int)code >= (cmp_code_int)256) {
/* Generate output characters in reverse order */
*--stackp = tab_suffixof(code);
code = tab_prefixof(code);
}
*--stackp = (char_type)(finchar = tab_suffixof(code));
/* And put them out in forward order */
{
REG1 int i;
if (outpos+(i = (de_stack-stackp)) >= OUTBUFSIZ) {
do {
if (i > OUTBUFSIZ-outpos) i = OUTBUFSIZ-outpos;
if (i > 0) {
memcpy(outbuf+outpos, stackp, i);
outpos += i;
}
if (outpos >= OUTBUFSIZ) {
if (!test) {
write_buf(out, (char*)outbuf, outpos);
bytes_out += (off_t)outpos;
}
outpos = 0;
}
stackp+= i;
} while ((i = (de_stack-stackp)) > 0);
} else {
memcpy(outbuf+outpos, stackp, i);
outpos += i;
}
}
if ((code = free_ent) < maxmaxcode) { /* Generate the new entry. */
tab_prefixof(code) = (unsigned short)oldcode;
tab_suffixof(code) = (char_type)finchar;
free_ent = code+1;
}
oldcode = incode; /* Remember previous code. */
}
} while (rsize != 0);
if (!test && outpos > 0) {
write_buf(out, (char*)outbuf, outpos);
bytes_out += (off_t)outpos;
}
return OK;
}
| 1
|
301,397
|
static int vfswrap_open(vfs_handle_struct *handle,
struct smb_filename *smb_fname,
files_struct *fsp, int flags, mode_t mode)
{
int result = -1;
START_PROFILE(syscall_open);
if (smb_fname->stream_name) {
errno = ENOENT;
goto out;
}
result = open(smb_fname->base_name, flags, mode);
out:
END_PROFILE(syscall_open);
return result;
}
| 0
|
522,329
|
int64_t GmfOpenMesh(const char *FilNam, int mod, ...)
{
int KwdCod, res, *PtrVer, *PtrDim, err;
int64_t MshIdx;
char str[ GmfStrSiz ];
va_list VarArg;
GmfMshSct *msh;
/*---------------------*/
/* MESH STRUCTURE INIT */
/*---------------------*/
if(!(msh = calloc(1, sizeof(GmfMshSct))))
return(0);
MshIdx = (int64_t)msh;
// Save the current stack environment for longjmp
if( (err = setjmp(msh->err)) != 0)
{
#ifdef GMFDEBUG
printf("libMeshb : mesh %p : error %d\n", msh, err);
#endif
if(msh->hdl != NULL)
fclose(msh->hdl);
if(msh->FilDes != 0)
#ifdef GMF_WINDOWS
_close(msh->FilDes);
#else
close(msh->FilDes);
#endif
free(msh);
return(0);
}
// Copy the FilNam into the structure
if(strlen(FilNam) + 7 >= GmfStrSiz)
longjmp(msh->err, -4);
strcpy(msh->FilNam, FilNam);
// Store the opening mod (read or write) and guess
// the filetype (binary or ascii) depending on the extension
msh->mod = mod;
msh->buf = (void *)msh->DblBuf;
msh->FltBuf = (void *)msh->DblBuf;
msh->IntBuf = (void *)msh->DblBuf;
if(strstr(msh->FilNam, ".meshb"))
msh->typ |= (Bin | MshFil);
else if(strstr(msh->FilNam, ".mesh"))
msh->typ |= (Asc | MshFil);
else if(strstr(msh->FilNam, ".solb"))
msh->typ |= (Bin | SolFil);
else if(strstr(msh->FilNam, ".sol"))
msh->typ |= (Asc | SolFil);
else
longjmp(msh->err, -5);
// Open the file in the required mod and initialize the mesh structure
if(msh->mod == GmfRead)
{
/*-----------------------*/
/* OPEN FILE FOR READING */
/*-----------------------*/
va_start(VarArg, mod);
PtrVer = va_arg(VarArg, int *);
PtrDim = va_arg(VarArg, int *);
va_end(VarArg);
// Read the endian coding tag, the mesh version
// and the mesh dimension (mandatory kwd)
if(msh->typ & Bin)
{
// Create the name string and open the file
#ifdef WITH_GMF_AIO
// [Bruno] added binary flag (necessary under Windows)
msh->FilDes = open(msh->FilNam, OPEN_READ_FLAGS, OPEN_READ_MODE);
if(msh->FilDes <= 0)
longjmp(msh->err, -6);
// Read the endian coding tag
if(read(msh->FilDes, &msh->cod, WrdSiz) != WrdSiz)
longjmp(msh->err, -7);
#else
// [Bruno] added binary flag (necessary under Windows)
if(!(msh->hdl = fopen(msh->FilNam, "rb")))
longjmp(msh->err, -8);
// Read the endian coding tag
safe_fread(&msh->cod, WrdSiz, 1, msh->hdl, msh->err);
#endif
// Read the mesh version and the mesh dimension (mandatory kwd)
if( (msh->cod != 1) && (msh->cod != 16777216) )
longjmp(msh->err, -9);
ScaWrd(msh, (unsigned char *)&msh->ver);
if( (msh->ver < 1) || (msh->ver > 4) )
longjmp(msh->err, -10);
if( (msh->ver >= 3) && (sizeof(int64_t) != 8) )
longjmp(msh->err, -11);
ScaWrd(msh, (unsigned char *)&KwdCod);
if(KwdCod != GmfDimension)
longjmp(msh->err, -12);
GetPos(msh);
ScaWrd(msh, (unsigned char *)&msh->dim);
}
else
{
// Create the name string and open the file
if(!(msh->hdl = fopen(msh->FilNam, "rb")))
longjmp(msh->err, -13);
do
{
res = fscanf(msh->hdl, "%100s", str);
}while( (res != EOF) && strcmp(str, "MeshVersionFormatted") );
if(res == EOF)
longjmp(msh->err, -14);
safe_fscanf(msh->hdl, "%d", &msh->ver, msh->err);
if( (msh->ver < 1) || (msh->ver > 4) )
longjmp(msh->err, -15);
do
{
res = fscanf(msh->hdl, "%100s", str);
}while( (res != EOF) && strcmp(str, "Dimension") );
if(res == EOF)
longjmp(msh->err, -16);
safe_fscanf(msh->hdl, "%d", &msh->dim, msh->err);
}
if( (msh->dim != 2) && (msh->dim != 3) )
longjmp(msh->err, -17);
(*PtrVer) = msh->ver;
(*PtrDim) = msh->dim;
// Set default real numbers size
if(msh->ver == 1)
msh->FltSiz = 32;
else
msh->FltSiz = 64;
/*------------*/
/* KW READING */
/*------------*/
// Read the list of kw present in the file
if(!ScaKwdTab(msh))
return(0);
return(MshIdx);
}
else if(msh->mod == GmfWrite)
{
/*-----------------------*/
/* OPEN FILE FOR WRITING */
/*-----------------------*/
msh->cod = 1;
// Check if the user provided a valid version number and dimension
va_start(VarArg, mod);
msh->ver = va_arg(VarArg, int);
msh->dim = va_arg(VarArg, int);
va_end(VarArg);
if( (msh->ver < 1) || (msh->ver > 4) )
longjmp(msh->err, -18);
if( (msh->ver >= 3) && (sizeof(int64_t) != 8) )
longjmp(msh->err, -19);
if( (msh->dim != 2) && (msh->dim != 3) )
longjmp(msh->err, -20);
// Set default real numbers size
if(msh->ver == 1)
msh->FltSiz = 32;
else
msh->FltSiz = 64;
// Create the mesh file
if(msh->typ & Bin)
{
/*
* [Bruno] replaced previous call to creat():
* with a call to open(), because Windows needs the
* binary flag to be specified.
*/
#ifdef WITH_GMF_AIO
msh->FilDes = open(msh->FilNam, OPEN_WRITE_FLAGS, OPEN_WRITE_MODE);
if(msh->FilDes <= 0)
longjmp(msh->err, -21);
#else
if(!(msh->hdl = fopen(msh->FilNam, "wb")))
longjmp(msh->err, -22);
#endif
}
else if(!(msh->hdl = fopen(msh->FilNam, "wb")))
longjmp(msh->err, -23);
/*------------*/
/* KW WRITING */
/*------------*/
// Write the mesh version and dimension
if(msh->typ & Asc)
{
fprintf(msh->hdl, "%s %d\n\n",
GmfKwdFmt[ GmfVersionFormatted ][0], msh->ver);
fprintf(msh->hdl, "%s %d\n",
GmfKwdFmt[ GmfDimension ][0], msh->dim);
}
else
{
RecWrd(msh, (unsigned char *)&msh->cod);
RecWrd(msh, (unsigned char *)&msh->ver);
GmfSetKwd(MshIdx, GmfDimension, 0);
RecWrd(msh, (unsigned char *)&msh->dim);
}
return(MshIdx);
}
else
{
free(msh);
return(0);
}
}
| 0
|
338,172
|
void WasmBinaryBuilder::requireFunctionContext(const char* error) {
if (!currFunction) {
throwError(std::string("in a non-function context: ") + error);
}
}
| 0
|
90,778
|
void QuotaManager::GetOriginsModifiedSince(
StorageType type,
base::Time modified_since,
GetOriginsCallback* callback) {
LazyInitialize();
make_scoped_refptr(new GetModifiedSinceTask(
this, type, modified_since, callback))->Start();
}
| 0
|
338,162
|
void WasmBinaryWriter::prepare() {
// Collect function types and their frequencies. Collect information in each
// function in parallel, then merge.
ModuleUtils::collectHeapTypes(*wasm, types, typeIndices);
importInfo = wasm::make_unique<ImportInfo>(*wasm);
}
| 0
|
336,138
|
static int ip6gre_changelink(struct net_device *dev, struct nlattr *tb[],
struct nlattr *data[])
{
struct ip6_tnl *t, *nt = netdev_priv(dev);
struct net *net = nt->net;
struct ip6gre_net *ign = net_generic(net, ip6gre_net_id);
struct __ip6_tnl_parm p;
struct ip_tunnel_encap ipencap;
if (dev == ign->fb_tunnel_dev)
return -EINVAL;
if (ip6gre_netlink_encap_parms(data, &ipencap)) {
int err = ip6_tnl_encap_setup(nt, &ipencap);
if (err < 0)
return err;
}
ip6gre_netlink_parms(data, &p);
t = ip6gre_tunnel_locate(net, &p, 0);
if (t) {
if (t->dev != dev)
return -EEXIST;
} else {
t = nt;
}
ip6gre_tunnel_unlink(ign, t);
ip6gre_tnl_change(t, &p, !tb[IFLA_MTU]);
ip6gre_tunnel_link(ign, t);
return 0;
}
| 0
|
234,808
|
int btrfs_pause_balance(struct btrfs_fs_info *fs_info)
{
int ret = 0;
mutex_lock(&fs_info->balance_mutex);
if (!fs_info->balance_ctl) {
mutex_unlock(&fs_info->balance_mutex);
return -ENOTCONN;
}
if (test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags)) {
atomic_inc(&fs_info->balance_pause_req);
mutex_unlock(&fs_info->balance_mutex);
wait_event(fs_info->balance_wait_q,
!test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags));
mutex_lock(&fs_info->balance_mutex);
/* we are good with balance_ctl ripped off from under us */
BUG_ON(test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags));
atomic_dec(&fs_info->balance_pause_req);
} else {
ret = -ENOTCONN;
}
mutex_unlock(&fs_info->balance_mutex);
return ret;
}
| 0
|
473,834
|
concat_opt_exact_info(OptExactInfo* to, OptExactInfo* add, OnigEncoding enc)
{
int i, j, len;
UChar *p, *end;
OptAncInfo tanc;
if (! to->ignore_case && add->ignore_case) {
if (to->len >= add->len) return ; /* avoid */
to->ignore_case = 1;
}
p = add->s;
end = p + add->len;
for (i = to->len; p < end; ) {
len = enclen(enc, p, end);
if (i + len > OPT_EXACT_MAXLEN) break;
for (j = 0; j < len && p < end; j++)
to->s[i++] = *p++;
}
to->len = i;
to->reach_end = (p == end ? add->reach_end : 0);
concat_opt_anc_info(&tanc, &to->anc, &add->anc, 1, 1);
if (! to->reach_end) tanc.right_anchor = 0;
copy_opt_anc_info(&to->anc, &tanc);
}
| 0
|
197,110
|
void Compute(OpKernelContext* c) override {
core::RefCountPtr<Var> v;
OP_REQUIRES_OK(c, LookupResource(c, HandleFromInput(c, 0), &v));
OP_REQUIRES_OK(c, EnsureSparseVariableAccess<Device, T>(c, v.get()));
// NOTE: We hold the lock for the whole gather operation instead
// of increasing the reference count of v->tensor() to avoid a
// situation where a write to the same variable will see a
// reference count greater than one and make a copy of the
// (potentially very large) tensor buffer.
tf_shared_lock ml(*v->mu());
const Tensor& params = *v->tensor();
const Tensor& indices = c->input(1);
OP_REQUIRES(
c, TensorShapeUtils::IsVectorOrHigher(params.shape()),
errors::InvalidArgument("params must be at least 1 dimensional"));
// Check that we have enough index space
const int64_t N = indices.NumElements();
OP_REQUIRES(
c, params.dim_size(0) <= std::numeric_limits<Index>::max(),
errors::InvalidArgument("params.shape[0] too large for ",
DataTypeString(DataTypeToEnum<Index>::v()),
" indexing: ", params.dim_size(0), " > ",
std::numeric_limits<Index>::max()));
// The result shape is params.shape[:batch_dims] +
// indices.shape[batch_dims:] + params.shape[batch_dims+1:].
TensorShape result_shape;
for (int i = 0; i < batch_dims_; ++i) {
result_shape.AddDim(params.dim_size(i));
}
for (int i = batch_dims_; i < indices.dims(); ++i) {
result_shape.AddDim(indices.dim_size(i));
}
for (int i = batch_dims_ + 1; i < params.dims(); ++i) {
result_shape.AddDim(params.dim_size(i));
}
Tensor* out = nullptr;
Tensor tmp;
if (params.dtype() == DT_VARIANT) {
tmp = Tensor(DT_VARIANT, result_shape);
c->set_output(0, tmp);
out = &tmp;
} else {
OP_REQUIRES_OK(c, c->allocate_output(0, result_shape, &out));
}
if (N > 0) {
Tensor tmp_indices;
// Points to the original or updated (if batch_dims is set) indices.
const Tensor* op_indices = &indices;
if (batch_dims_ > 0) {
OP_REQUIRES_OK(c, c->allocate_temp(indices.dtype(), indices.shape(),
&tmp_indices));
functor::DenseUpdate<Device, Index, ASSIGN> copy_functor;
copy_functor(c->eigen_device<Device>(), tmp_indices.flat<Index>(),
indices.flat<Index>());
AddBatchOffsets(&tmp_indices, params);
op_indices = &tmp_indices;
}
int64_t gather_dim_size = 1;
for (int idx = 0; idx <= batch_dims_; ++idx) {
gather_dim_size *= params.dim_size(idx);
}
int64_t inner_size = 1;
for (int i = batch_dims_ + 1; i < params.dims(); ++i) {
inner_size *= params.dim_size(i);
}
auto params_flat = params.shaped<T, 3>({1, gather_dim_size, inner_size});
const auto indices_flat = op_indices->flat<Index>();
auto out_flat = out->shaped<T, 3>({1, N, out->NumElements() / N});
functor::GatherFunctor<Device, T, Index> functor;
int64_t bad_i = functor(c, params_flat, indices_flat, out_flat);
OP_REQUIRES(
c, bad_i < 0,
errors::InvalidArgument(
"indices", SliceDebugString(indices.shape(), bad_i), " = ",
indices_flat(bad_i), " is not in [0, ", params.dim_size(0), ")"));
}
}
| 1
|
269,521
|
static MagickBooleanType EncodeLabImage(Image *image,ExceptionInfo *exception)
{
CacheView
*image_view;
MagickBooleanType
status;
ssize_t
y;
status=MagickTrue;
image_view=AcquireAuthenticCacheView(image,exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
Quantum
*magick_restrict q;
ssize_t
x;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
break;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
double
a,
b;
a=QuantumScale*GetPixela(image,q)-0.5;
if (a < 0.0)
a+=1.0;
b=QuantumScale*GetPixelb(image,q)-0.5;
if (b < 0.0)
b+=1.0;
SetPixela(image,QuantumRange*a,q);
SetPixelb(image,QuantumRange*b,q);
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
{
status=MagickFalse;
break;
}
}
image_view=DestroyCacheView(image_view);
return(status);
}
| 0
|
314,495
|
PJ_DEF(int) pjmedia_sdp_print( const pjmedia_sdp_session *desc,
char *buf, pj_size_t size)
{
return print_session(desc, buf, size);
}
| 0
|
229,232
|
future<std::unique_ptr<cql_server::response>> cql_server::connection::process_auth_response(uint16_t stream, request_reader in, service::client_state& client_state,
tracing::trace_state_ptr trace_state) {
++_server._stats.auth_responses;
auto sasl_challenge = client_state.get_auth_service()->underlying_authenticator().new_sasl_challenge();
auto buf = in.read_raw_bytes_view(in.bytes_left());
auto challenge = sasl_challenge->evaluate_response(buf);
if (sasl_challenge->is_complete()) {
return sasl_challenge->get_authenticated_user().then([this, sasl_challenge, stream, &client_state, challenge = std::move(challenge), trace_state](auth::authenticated_user user) mutable {
client_state.set_login(std::move(user));
auto f = client_state.check_user_can_login();
f = f.then([&client_state] {
return client_state.maybe_update_per_service_level_params();
});
return f.then([this, stream, &client_state, challenge = std::move(challenge), trace_state]() mutable {
return make_ready_future<std::unique_ptr<cql_server::response>>(make_auth_success(stream, std::move(challenge), trace_state));
});
});
}
return make_ready_future<std::unique_ptr<cql_server::response>>(make_auth_challenge(stream, std::move(challenge), trace_state));
}
| 0
|
427,225
|
static void removevars (FuncState *fs, int tolevel) {
fs->ls->dyd->actvar.n -= (fs->nactvar - tolevel);
while (fs->nactvar > tolevel) {
LocVar *var = localdebuginfo(fs, --fs->nactvar);
if (var) /* does it have debug information? */
var->endpc = fs->pc;
}
}
| 0
|
458,301
|
static int writeState(const char *stateFilename)
{
struct logState *p;
FILE *f;
char *chptr;
unsigned int i = 0;
int error = 0;
int bytes = 0;
int fdcurr;
int fdsave;
struct stat sb;
char *tmpFilename = NULL;
struct tm now;
time_t now_time, last_time;
char *prevCtx;
int force_mode = 0;
if (!strcmp(stateFilename, "/dev/null"))
/* explicitly asked not to write the state file */
return 0;
localtime_r(&nowSecs, &now);
tmpFilename = malloc(strlen(stateFilename) + 5 );
if (tmpFilename == NULL) {
message_OOM();
return 1;
}
strcpy(tmpFilename, stateFilename);
strcat(tmpFilename, ".tmp");
/* Remove possible tmp state file from previous run */
error = unlink(tmpFilename);
if (error == -1 && errno != ENOENT) {
message(MESS_ERROR, "error removing old temporary state file %s: %s\n",
tmpFilename, strerror(errno));
free(tmpFilename);
return 1;
}
error = 0;
fdcurr = open(stateFilename, O_RDONLY);
if (fdcurr == -1) {
/* the statefile should exist, lockState() already created an empty
* state file in case it did not exist initially */
message(MESS_ERROR, "error opening state file %s: %s\n",
stateFilename, strerror(errno));
free(tmpFilename);
return 1;
}
/* get attributes, to assign them to the new state file */
if (setSecCtx(fdcurr, stateFilename, &prevCtx) != 0) {
/* error msg already printed */
free(tmpFilename);
close(fdcurr);
return 1;
}
#ifdef WITH_ACL
if ((prev_acl = acl_get_fd(fdcurr)) == NULL) {
if (is_acl_well_supported(errno)) {
message(MESS_ERROR, "getting file ACL %s: %s\n",
stateFilename, strerror(errno));
restoreSecCtx(&prevCtx);
free(tmpFilename);
close(fdcurr);
return 1;
}
}
#endif
if (fstat(fdcurr, &sb) == -1) {
message(MESS_ERROR, "error stating %s: %s\n", stateFilename, strerror(errno));
restoreSecCtx(&prevCtx);
free(tmpFilename);
#ifdef WITH_ACL
if (prev_acl) {
acl_free(prev_acl);
prev_acl = NULL;
}
#endif
return 1;
}
close(fdcurr);
if (sb.st_mode & (mode_t)S_IROTH) {
/* drop world-readable flag to prevent others from locking */
sb.st_mode &= ~(mode_t)S_IROTH;
force_mode = 1;
}
fdsave = createOutputFile(tmpFilename, O_RDWR, &sb, prev_acl, force_mode);
#ifdef WITH_ACL
if (prev_acl) {
acl_free(prev_acl);
prev_acl = NULL;
}
#endif
restoreSecCtx(&prevCtx);
if (fdsave < 0) {
free(tmpFilename);
return 1;
}
f = fdopen(fdsave, "w");
if (!f) {
message(MESS_ERROR, "error creating temp state file %s: %s\n",
tmpFilename, strerror(errno));
free(tmpFilename);
return 1;
}
bytes = fprintf(f, "logrotate state -- version 2\n");
if (bytes < 0)
error = bytes;
/*
* Time in seconds it takes earth to go around sun. The value is
* astronomical measurement (solar year) rather than something derived from
* a convention (calendar year).
*/
#define SECONDS_IN_YEAR 31556926
for (i = 0; i < hashSize && error == 0; i++) {
for (p = states[i]->head.lh_first; p != NULL && error == 0;
p = p->list.le_next) {
/* Skip states which are not used for more than a year. */
now_time = mktime(&now);
last_time = mktime(&p->lastRotated);
if (!p->isUsed && difftime(now_time, last_time) > SECONDS_IN_YEAR) {
message(MESS_DEBUG, "Removing %s from state file, "
"because it does not exist and has not been rotated for one year\n",
p->fn);
continue;
}
error = fputc('"', f) == EOF;
for (chptr = p->fn; *chptr && error == 0; chptr++) {
switch (*chptr) {
case '"':
case '\\':
error = fputc('\\', f) == EOF;
break;
case '\n':
error = fputc('\\', f) == EOF;
if (error == 0) {
error = fputc('n', f) == EOF;
}
continue;
default:
break;
}
if (error == 0 && fputc(*chptr, f) == EOF) {
error = 1;
}
}
if (error == 0 && fputc('"', f) == EOF)
error = 1;
if (error == 0) {
bytes = fprintf(f, " %d-%d-%d-%d:%d:%d\n",
p->lastRotated.tm_year + 1900,
p->lastRotated.tm_mon + 1,
p->lastRotated.tm_mday,
p->lastRotated.tm_hour,
p->lastRotated.tm_min,
p->lastRotated.tm_sec);
if (bytes < 0)
error = bytes;
}
}
}
if (error == 0)
error = fflush(f);
if (error == 0)
error = fsync(fdsave);
if (error == 0)
error = fclose(f);
else
fclose(f);
if (error == 0) {
if (rename(tmpFilename, stateFilename)) {
message(MESS_ERROR, "error renaming temp state file %s to %s: %s\n",
tmpFilename, stateFilename, strerror(errno));
unlink(tmpFilename);
error = 1;
}
}
else {
if (errno)
message(MESS_ERROR, "error creating temp state file %s: %s\n",
tmpFilename, strerror(errno));
else
message(MESS_ERROR, "error creating temp state file %s%s\n",
tmpFilename, error == ENOMEM ?
": Insufficient storage space is available." : "" );
unlink(tmpFilename);
}
free(tmpFilename);
return error;
}
| 0
|
481,264
|
static inline void mlx5_fpga_conn_cqes(struct mlx5_fpga_conn *conn,
unsigned int budget)
{
struct mlx5_cqe64 *cqe;
while (budget) {
cqe = mlx5_cqwq_get_cqe(&conn->cq.wq);
if (!cqe)
break;
budget--;
mlx5_cqwq_pop(&conn->cq.wq);
mlx5_fpga_conn_handle_cqe(conn, cqe);
mlx5_cqwq_update_db_record(&conn->cq.wq);
}
if (!budget) {
tasklet_schedule(&conn->cq.tasklet);
return;
}
mlx5_fpga_dbg(conn->fdev, "Re-arming CQ with cc# %u\n", conn->cq.wq.cc);
/* ensure cq space is freed before enabling more cqes */
wmb();
mlx5_fpga_conn_arm_cq(conn);
}
| 0
|
331,755
|
QPaintEngineExPrivate::QPaintEngineExPrivate()
: dasher(&stroker),
strokeHandler(nullptr),
activeStroker(nullptr),
strokerPen(Qt::NoPen)
{
}
| 0
|
418,779
|
ins_mouse(int c)
{
pos_T tpos;
win_T *old_curwin = curwin;
# ifdef FEAT_GUI
// When GUI is active, also move/paste when 'mouse' is empty
if (!gui.in_use)
# endif
if (!mouse_has(MOUSE_INSERT))
return;
undisplay_dollar();
tpos = curwin->w_cursor;
if (do_mouse(NULL, c, BACKWARD, 1L, 0))
{
win_T *new_curwin = curwin;
if (curwin != old_curwin && win_valid(old_curwin))
{
// Mouse took us to another window. We need to go back to the
// previous one to stop insert there properly.
curwin = old_curwin;
curbuf = curwin->w_buffer;
#ifdef FEAT_JOB_CHANNEL
if (bt_prompt(curbuf))
// Restart Insert mode when re-entering the prompt buffer.
curbuf->b_prompt_insert = 'A';
#endif
}
start_arrow(curwin == old_curwin ? &tpos : NULL);
if (curwin != new_curwin && win_valid(new_curwin))
{
curwin = new_curwin;
curbuf = curwin->w_buffer;
}
set_can_cindent(TRUE);
}
// redraw status lines (in case another window became active)
redraw_statuslines();
}
| 0
|
253,573
|
smb2_clear_stats(struct cifs_tcon *tcon)
{
int i;
for (i = 0; i < NUMBER_OF_SMB2_COMMANDS; i++) {
atomic_set(&tcon->stats.smb2_stats.smb2_com_sent[i], 0);
atomic_set(&tcon->stats.smb2_stats.smb2_com_failed[i], 0);
}
}
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.