idx
int64 | func
string | target
int64 |
|---|---|---|
261,007
|
onig_st_lookup_strend(hash_table_type* table, const UChar* str_key,
const UChar* end_key, hash_data_type *value)
{
st_str_end_key key;
key.s = (UChar* )str_key;
key.end = (UChar* )end_key;
return onig_st_lookup(table, (st_data_t )(&key), value);
}
| 0
|
36,881
|
vips_foreign_load_gif_close( VipsForeignLoadGif *gif )
{
#ifdef HAVE_GIFLIB_5
if( gif->file ) {
int error;
if( DGifCloseFile( gif->file, &error ) == GIF_ERROR )
vips_foreign_load_gif_error_vips( gif, error );
gif->file = NULL;
}
#else
if( gif->file ) {
if( DGifCloseFile( gif->file ) == GIF_ERROR )
vips_foreign_load_gif_error_vips( gif, GifLastError() );
gif->file = NULL;
}
#endif
}
| 0
|
391,025
|
static int ntop_stats_insert_hour_sampling(lua_State *vm) {
char *sampling;
time_t rawtime;
int ifid;
NetworkInterface* iface;
StatsManager *sm;
ntop->getTrace()->traceEvent(TRACE_INFO, "%s() called", __FUNCTION__);
if(ntop_lua_check(vm, __FUNCTION__, 1, LUA_TNUMBER)) return(CONST_LUA_ERROR);
ifid = lua_tointeger(vm, 1);
if(ifid < 0)
return(CONST_LUA_ERROR);
if(ntop_lua_check(vm, __FUNCTION__, 2, LUA_TSTRING)) return(CONST_LUA_ERROR);
if((sampling = (char*)lua_tostring(vm, 2)) == NULL) return(CONST_LUA_PARAM_ERROR);
if(!(iface = ntop->getInterfaceById(ifid)) ||
!(sm = iface->getStatsManager()))
return (CONST_LUA_ERROR);
time(&rawtime);
rawtime -= (rawtime % 60);
if(sm->insertHourSampling(rawtime, sampling))
return(CONST_LUA_ERROR);
return(CONST_LUA_OK);
}
| 0
|
42,338
|
print_unix_command_map ()
{
Keymap save, cmd_xmap;
save = rl_get_keymap ();
cmd_xmap = get_cmd_xmap_from_keymap (save);
rl_set_keymap (cmd_xmap);
rl_macro_dumper (1);
rl_set_keymap (save);
return 0;
}
| 0
|
60,492
|
static int encode_public_key(RSA *rsa, u8 *key, size_t *keysize)
{
u8 buf[1024], *p = buf;
u8 bnbuf[256];
int base = 0;
int r;
const BIGNUM *rsa_n, *rsa_e;
switch (RSA_bits(rsa)) {
case 512:
base = 32;
break;
case 768:
base = 48;
break;
case 1024:
base = 64;
break;
case 2048:
base = 128;
break;
}
if (base == 0) {
fprintf(stderr, "Key length invalid.\n");
return 2;
}
*p++ = (5 * base + 7) >> 8;
*p++ = (5 * base + 7) & 0xFF;
*p++ = opt_key_num;
RSA_get0_key(rsa, &rsa_n, &rsa_e, NULL);
r = bn2cf(rsa_n, bnbuf);
if (r != 2*base) {
fprintf(stderr, "Invalid public key.\n");
return 2;
}
memcpy(p, bnbuf, 2*base);
p += 2*base;
memset(p, 0, base);
p += base;
memset(bnbuf, 0, 2*base);
memcpy(p, bnbuf, 2*base);
p += 2*base;
r = bn2cf(rsa_e, bnbuf);
memcpy(p, bnbuf, 4);
p += 4;
memcpy(key, buf, p - buf);
*keysize = p - buf;
return 0;
}
| 0
|
42,313
|
event_name2nr(char_u *start, char_u **end)
{
char_u *p;
int i;
int len;
// the event name ends with end of line, '|', a blank or a comma
for (p = start; *p && !VIM_ISWHITE(*p) && *p != ',' && *p != '|'; ++p)
;
for (i = 0; event_names[i].name != NULL; ++i)
{
len = (int)STRLEN(event_names[i].name);
if (len == p - start && STRNICMP(event_names[i].name, start, len) == 0)
break;
}
if (*p == ',')
++p;
*end = p;
if (event_names[i].name == NULL)
return NUM_EVENTS;
return event_names[i].event;
}
| 0
|
164,066
|
void _php_curl_verify_handlers(php_curl *ch, int reporterror) /* {{{ */
{
php_stream *stream;
if (!ch || !ch->handlers) {
return;
}
if (!Z_ISUNDEF(ch->handlers->std_err)) {
stream = zend_fetch_resource(&ch->handlers->std_err, -1, NULL, NULL, 2, php_file_le_stream(), php_file_le_pstream());
if (stream == NULL) {
if (reporterror) {
php_error_docref(NULL, E_WARNING, "CURLOPT_STDERR resource has gone away, resetting to stderr");
}
zval_ptr_dtor(&ch->handlers->std_err);
ZVAL_UNDEF(&ch->handlers->std_err);
curl_easy_setopt(ch->cp, CURLOPT_STDERR, stderr);
}
}
if (ch->handlers->read && !Z_ISUNDEF(ch->handlers->read->stream)) {
stream = zend_fetch_resource(&ch->handlers->read->stream, -1, NULL, NULL, 2, php_file_le_stream(), php_file_le_pstream());
if (stream == NULL) {
if (reporterror) {
php_error_docref(NULL, E_WARNING, "CURLOPT_INFILE resource has gone away, resetting to default");
}
zval_ptr_dtor(&ch->handlers->read->stream);
ZVAL_UNDEF(&ch->handlers->read->stream);
ch->handlers->read->res = NULL;
ch->handlers->read->fp = 0;
curl_easy_setopt(ch->cp, CURLOPT_INFILE, (void *) ch);
}
}
if (ch->handlers->write_header && !Z_ISUNDEF(ch->handlers->write_header->stream)) {
stream = zend_fetch_resource(&ch->handlers->write_header->stream, -1, NULL, NULL, 2, php_file_le_stream(), php_file_le_pstream());
if (stream == NULL) {
if (reporterror) {
php_error_docref(NULL, E_WARNING, "CURLOPT_WRITEHEADER resource has gone away, resetting to default");
}
zval_ptr_dtor(&ch->handlers->write_header->stream);
ZVAL_UNDEF(&ch->handlers->write_header->stream);
ch->handlers->write_header->fp = 0;
ch->handlers->write_header->method = PHP_CURL_IGNORE;
curl_easy_setopt(ch->cp, CURLOPT_WRITEHEADER, (void *) ch);
}
}
if (ch->handlers->write && !Z_ISUNDEF(ch->handlers->write->stream)) {
stream = zend_fetch_resource(&ch->handlers->write->stream, -1, NULL, NULL, 2, php_file_le_stream(), php_file_le_pstream());
if (stream == NULL) {
if (reporterror) {
php_error_docref(NULL, E_WARNING, "CURLOPT_FILE resource has gone away, resetting to default");
}
zval_ptr_dtor(&ch->handlers->write->stream);
ZVAL_UNDEF(&ch->handlers->write->stream);
ch->handlers->write->fp = 0;
ch->handlers->write->method = PHP_CURL_STDOUT;
curl_easy_setopt(ch->cp, CURLOPT_FILE, (void *) ch);
}
}
return ;
}
/* }}} */
| 0
|
20,402
|
static int php_uwsgi_startup ( sapi_module_struct * sapi_module ) {
if ( php_module_startup ( & uwsgi_sapi_module , & uwsgi_module_entry , 1 ) == FAILURE ) {
return FAILURE ;
}
else {
return SUCCESS ;
}
}
| 0
|
477,859
|
CImg<T>& load_pnm(std::FILE *const file) {
return _load_pnm(file,0);
}
| 0
|
463,361
|
static int rtw_set_beacon(struct net_device *dev, struct ieee_param *param, int len)
{
int ret = 0;
struct adapter *padapter = rtw_netdev_priv(dev);
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
struct sta_priv *pstapriv = &padapter->stapriv;
unsigned char *pbuf = param->u.bcn_ie.buf;
DBG_88E("%s, len =%d\n", __func__, len);
if (!check_fwstate(pmlmepriv, WIFI_AP_STATE))
return -EINVAL;
memcpy(&pstapriv->max_num_sta, param->u.bcn_ie.reserved, 2);
if ((pstapriv->max_num_sta > NUM_STA) || (pstapriv->max_num_sta <= 0))
pstapriv->max_num_sta = NUM_STA;
if (rtw_check_beacon_data(padapter, pbuf, len - 12 - 2) == _SUCCESS) /* 12 = param header, 2:no packed */
ret = 0;
else
ret = -EINVAL;
return ret;
}
| 0
|
318,511
|
int ff_lock_avcodec(AVCodecContext *log_ctx, const AVCodec *codec)
{
_Bool exp = 0;
if (codec->caps_internal & FF_CODEC_CAP_INIT_THREADSAFE || !codec->init)
return 0;
if (lockmgr_cb) {
if ((*lockmgr_cb)(&codec_mutex, AV_LOCK_OBTAIN))
return -1;
}
if (atomic_fetch_add(&entangled_thread_counter, 1)) {
av_log(log_ctx, AV_LOG_ERROR,
"Insufficient thread locking. At least %d threads are "
"calling avcodec_open2() at the same time right now.\n",
atomic_load(&entangled_thread_counter));
if (!lockmgr_cb)
av_log(log_ctx, AV_LOG_ERROR, "No lock manager is set, please see av_lockmgr_register()\n");
atomic_store(&ff_avcodec_locked, 1);
ff_unlock_avcodec(codec);
return AVERROR(EINVAL);
}
av_assert0(atomic_compare_exchange_strong(&ff_avcodec_locked, &exp, 1));
return 0;
}
| 0
|
376,626
|
void HGraphBuilder::GenerateMathSqrt(CallRuntime* call) {
return Bailout("inlined runtime function: MathSqrt");
}
| 0
|
514,587
|
void MemoryInfo(MemoryTracker* tracker) const override {
tracker->TrackField("paths", paths);
}
| 0
|
425,774
|
apr_status_t h2_mplx_stream_cleanup(h2_mplx *m, h2_stream *stream)
{
H2_MPLX_ENTER(m);
ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, m->c,
H2_STRM_MSG(stream, "cleanup"));
stream_cleanup(m, stream);
H2_MPLX_LEAVE(m);
return APR_SUCCESS;
}
| 0
|
409,212
|
static js_Ast *primary(js_State *J)
{
js_Ast *a;
if (J->lookahead == TK_IDENTIFIER) {
a = jsP_newstrnode(J, EXP_IDENTIFIER, J->text);
jsP_next(J);
return a;
}
if (J->lookahead == TK_STRING) {
a = jsP_newstrnode(J, EXP_STRING, J->text);
jsP_next(J);
return a;
}
if (J->lookahead == TK_REGEXP) {
a = jsP_newstrnode(J, EXP_REGEXP, J->text);
a->number = J->number;
jsP_next(J);
return a;
}
if (J->lookahead == TK_NUMBER) {
a = jsP_newnumnode(J, EXP_NUMBER, J->number);
jsP_next(J);
return a;
}
if (jsP_accept(J, TK_THIS)) return EXP0(THIS);
if (jsP_accept(J, TK_NULL)) return EXP0(NULL);
if (jsP_accept(J, TK_TRUE)) return EXP0(TRUE);
if (jsP_accept(J, TK_FALSE)) return EXP0(FALSE);
if (jsP_accept(J, '{')) { a = EXP1(OBJECT, objectliteral(J)); jsP_expect(J, '}'); return a; }
if (jsP_accept(J, '[')) { a = EXP1(ARRAY, arrayliteral(J)); jsP_expect(J, ']'); return a; }
if (jsP_accept(J, '(')) { a = expression(J, 0); jsP_expect(J, ')'); return a; }
jsP_error(J, "unexpected token in expression: %s", jsY_tokenstring(J->lookahead));
}
| 0
|
309,715
|
RenderText* SimplifiedBackwardsTextIterator::handleFirstLetter(int& startOffset, int& offsetInNode)
{
RenderText* renderer = toRenderText(m_node->renderer());
startOffset = (m_node == m_startNode) ? m_startOffset : 0;
if (!renderer->isTextFragment()) {
offsetInNode = 0;
return renderer;
}
RenderTextFragment* fragment = toRenderTextFragment(renderer);
int offsetAfterFirstLetter = fragment->start();
if (startOffset >= offsetAfterFirstLetter) {
ASSERT(!m_shouldHandleFirstLetter);
offsetInNode = offsetAfterFirstLetter;
return renderer;
}
if (!m_shouldHandleFirstLetter && offsetAfterFirstLetter < m_offset) {
m_shouldHandleFirstLetter = true;
offsetInNode = offsetAfterFirstLetter;
return renderer;
}
m_shouldHandleFirstLetter = false;
offsetInNode = 0;
return firstRenderTextInFirstLetter(fragment->firstLetter());
}
| 0
|
146,991
|
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
TF_LITE_ENSURE_EQ(context, NumInputs(node), 2);
TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1);
const TfLiteTensor* input;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputTensor, &input));
const TfLiteTensor* size;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kSizeTensor, &size));
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kOutputTensor, &output));
// TODO(ahentz): Our current implementations rely on the inputs being 4D.
TF_LITE_ENSURE_EQ(context, NumDimensions(input), 4);
TF_LITE_ENSURE_EQ(context, NumDimensions(size), 1);
TF_LITE_ENSURE_EQ(context, size->type, kTfLiteInt32);
// ResizeBilinear creates a float tensor even when the input is made of
// integers.
output->type = input->type;
if (!IsConstantTensor(size)) {
SetTensorToDynamic(output);
return kTfLiteOk;
}
// Ensure params are valid.
auto* params =
reinterpret_cast<TfLiteResizeBilinearParams*>(node->builtin_data);
if (params->half_pixel_centers && params->align_corners) {
context->ReportError(
context, "If half_pixel_centers is True, align_corners must be False.");
return kTfLiteError;
}
return ResizeOutputTensor(context, input, size, output);
}
| 0
|
16,258
|
void mime_field_name_value_set ( HdrHeap * heap , MIMEHdrImpl * mh , MIMEField * field , int16_t name_wks_idx_or_neg1 , const char * name , int name_length , const char * value , int value_length , int n_v_raw_printable , int n_v_raw_length , bool must_copy_strings ) {
unsigned int n_v_raw_pad = n_v_raw_length - ( name_length + value_length ) ;
ink_assert ( field -> m_readiness == MIME_FIELD_SLOT_READINESS_DETACHED ) ;
if ( must_copy_strings ) {
mime_field_name_set ( heap , mh , field , name_wks_idx_or_neg1 , name , name_length , true ) ;
mime_field_value_set ( heap , mh , field , value , value_length , true ) ;
}
else {
field -> m_wks_idx = name_wks_idx_or_neg1 ;
field -> m_ptr_name = name ;
field -> m_ptr_value = value ;
field -> m_len_name = name_length ;
field -> m_len_value = value_length ;
if ( n_v_raw_printable && ( n_v_raw_pad <= 7 ) ) {
field -> m_n_v_raw_printable = n_v_raw_printable ;
field -> m_n_v_raw_printable_pad = n_v_raw_pad ;
}
else {
field -> m_n_v_raw_printable = 0 ;
}
if ( ( name_wks_idx_or_neg1 == MIME_WKSIDX_CACHE_CONTROL ) || ( name_wks_idx_or_neg1 == MIME_WKSIDX_PRAGMA ) ) {
field -> m_flags |= MIME_FIELD_SLOT_FLAGS_COOKED ;
}
if ( field -> is_live ( ) && field -> is_cooked ( ) ) {
mh -> recompute_cooked_stuff ( field ) ;
}
}
}
| 0
|
463,094
|
void doCheckAuthorization(OperationContext* opCtx) const final {
AuthorizationSession* authSession = AuthorizationSession::get(opCtx->getClient());
uassert(ErrorCodes::Unauthorized,
"Unauthorized",
authSession->isAuthorizedToParseNamespaceElement(_request.body.firstElement()));
const auto hasTerm = _request.body.hasField(kTermField);
uassertStatusOK(authSession->checkAuthForFind(
AutoGetCollection::resolveNamespaceStringOrUUID(
opCtx, CommandHelpers::parseNsOrUUID(_dbName, _request.body)),
hasTerm));
}
| 0
|
10,677
|
PHP_FUNCTION(locale_get_all_variants)
{
const char* loc_name = NULL;
int loc_name_len = 0;
int result = 0;
char* token = NULL;
char* variant = NULL;
char* saved_ptr = NULL;
intl_error_reset( NULL TSRMLS_CC );
if(zend_parse_parameters( ZEND_NUM_ARGS() TSRMLS_CC, "s",
&loc_name, &loc_name_len ) == FAILURE)
{
intl_error_set( NULL, U_ILLEGAL_ARGUMENT_ERROR,
"locale_parse: unable to parse input params", 0 TSRMLS_CC );
RETURN_FALSE;
}
if(loc_name_len == 0) {
loc_name = intl_locale_get_default(TSRMLS_C);
}
array_init( return_value );
/* If the locale is grandfathered, stop, no variants */
if( findOffset( LOC_GRANDFATHERED , loc_name ) >= 0 ){
/* ("Grandfathered Tag. No variants."); */
}
else {
/* Call ICU variant */
variant = get_icu_value_internal( loc_name , LOC_VARIANT_TAG , &result ,0);
if( result > 0 && variant){
/* Tokenize on the "_" or "-" */
token = php_strtok_r( variant , DELIMITER , &saved_ptr);
add_next_index_stringl( return_value, token , strlen(token) ,TRUE );
/* tokenize on the "_" or "-" and stop at singleton if any */
while( (token = php_strtok_r(NULL , DELIMITER, &saved_ptr)) && (strlen(token)>1) ){
add_next_index_stringl( return_value, token , strlen(token) ,TRUE );
}
}
if( variant ){
efree( variant );
}
}
}
| 1
|
173,027
|
generate_row(png_bytep row, size_t rowbytes, unsigned int y, int color_type,
int bit_depth, png_const_bytep gamma_table, double conv,
unsigned int *colors, int small)
{
int filters = 0; /* file *MASK*, 0 means the default, not NONE */
png_uint_32 size_max =
image_size_of_type(color_type, bit_depth, colors, small)-1;
png_uint_32 depth_max = (1U << bit_depth)-1; /* up to 65536 */
if (colors[0] == 0) if (small)
{
unsigned int pixel_depth = pixel_depth_of_type(color_type, bit_depth);
/* For pixel depths less than 16 generate a single row containing all the
* possible pixel values. For 16 generate all 65536 byte pair
* combinations in a 256x256 pixel array.
*/
switch (pixel_depth)
{
case 1:
assert(y == 0 && rowbytes == 1 && size_max == 1);
row[0] = 0x6CU; /* binary: 01101100, only top 2 bits used */
filters = PNG_FILTER_NONE;
break;
case 2:
assert(y == 0 && rowbytes == 1 && size_max == 3);
row[0] = 0x1BU; /* binary 00011011, all bits used */
filters = PNG_FILTER_NONE;
break;
case 4:
assert(y == 0 && rowbytes == 8 && size_max == 15);
row[0] = 0x01U;
row[1] = 0x23U; /* SUB gives 0x22U for all following bytes */
row[2] = 0x45U;
row[3] = 0x67U;
row[4] = 0x89U;
row[5] = 0xABU;
row[6] = 0xCDU;
row[7] = 0xEFU;
filters = PNG_FILTER_SUB;
break;
case 8:
/* The row will have all the pixel values in order starting with
* '1', the SUB filter will change every byte into '1' (including
* the last, which generates pixel value '0'). Since the SUB filter
* has value 1 this should result in maximum compression.
*/
assert(y == 0 && rowbytes == 256 && size_max == 255);
for (;;)
{
row[size_max] = 0xFFU & (size_max+1);
if (size_max == 0)
break;
--size_max;
}
filters = PNG_FILTER_SUB;
break;
case 16:
/* Rows are generated such that each row has a constant difference
* between the first and second byte of each pixel and so that the
* difference increases by 1 at each row. The rows start with the
* first byte value of 0 and the value increases to 255 across the
* row.
*
* The difference starts at 1, so the first row is:
*
* 0 1 1 2 2 3 3 4 ... 254 255 255 0
*
* This means that running the SUB filter on the first row produces:
*
* [SUB==1] 0 1 0 1 0 1...
*
* Then the difference is 2 on the next row, giving:
*
* 0 2 1 3 2 4 3 5 ... 254 0 255 1
*
* When the UP filter is run on this libpng produces:
*
* [UP ==2] 0 1 0 1 0 1...
*
* And so on for all the remain rows to the final two * rows:
*
* row 254: 0 255 1 0 2 1 3 2 4 3 ... 254 253 255 254
* row 255: 0 0 1 1 2 2 3 3 4 4 ... 254 254 255 255
*/
assert(rowbytes == 512 && size_max == 255);
for (;;)
{
row[2*size_max ] = 0xFFU & size_max;
row[2*size_max+1] = 0xFFU & (size_max+y+1);
if (size_max == 0)
break;
--size_max;
}
/* The first row must include PNG_FILTER_UP so that libpng knows we
* need to keep it for the following row:
*/
filters = (y == 0 ? PNG_FILTER_SUB+PNG_FILTER_UP : PNG_FILTER_UP);
break;
case 24:
case 32:
case 48:
case 64:
/* The rows are filled by an alogorithm similar to the above, in the
* first row pixel bytes are all equal, increasing from 0 by 1 for
* each pixel. In the second row the bytes within a pixel are
* incremented 1,3,5,7,... from the previous row byte. Using an odd
* number ensures all the possible byte values are used.
*/
assert(size_max == 255 && rowbytes == 256*(pixel_depth>>3));
pixel_depth >>= 3; /* now in bytes */
while (rowbytes > 0)
{
const size_t pixel_index = --rowbytes/pixel_depth;
if (y == 0)
row[rowbytes] = 0xFFU & pixel_index;
else
{
const size_t byte_offset =
rowbytes - pixel_index * pixel_depth;
row[rowbytes] =
0xFFU & (pixel_index + (byte_offset * 2*y) + 1);
}
}
filters = (y == 0 ? PNG_FILTER_SUB+PNG_FILTER_UP : PNG_FILTER_UP);
break;
default:
assert(0/*NOT REACHED*/);
}
}
else switch (channels_of_type(color_type))
{
/* 1 channel: a square image with a diamond, the least luminous colors are on
* the edge of the image, the most luminous in the center.
*/
case 1:
{
png_uint_32 x;
png_uint_32 base = 2*size_max - abs(2*y-size_max);
for (x=0; x<=size_max; ++x)
{
png_uint_32 luma = base - abs(2*x-size_max);
/* 'luma' is now in the range 0..2*size_max, we need
* 0..depth_max
*/
luma = (luma*depth_max + size_max) / (2*size_max);
set_value(row, rowbytes, x, bit_depth, luma, gamma_table, conv);
}
}
break;
/* 2 channels: the color channel increases in luminosity from top to bottom,
* the alpha channel increases in opacity from left to right.
*/
case 2:
{
png_uint_32 alpha = (depth_max * y * 2 + size_max) / (2 * size_max);
png_uint_32 x;
for (x=0; x<=size_max; ++x)
{
set_value(row, rowbytes, 2*x, bit_depth,
(depth_max * x * 2 + size_max) / (2 * size_max), gamma_table,
conv);
set_value(row, rowbytes, 2*x+1, bit_depth, alpha, gamma_table,
conv);
}
}
break;
/* 3 channels: linear combinations of, from the top-left corner clockwise,
* black, green, white, red.
*/
case 3:
{
/* x0: the black->red scale (the value of the red component) at the
* start of the row (blue and green are 0).
* x1: the green->white scale (the value of the red and blue
* components at the end of the row; green is depth_max).
*/
png_uint_32 Y = (depth_max * y * 2 + size_max) / (2 * size_max);
png_uint_32 x;
/* Interpolate x/depth_max from start to end:
*
* start end difference
* red: Y Y 0
* green: 0 depth_max depth_max
* blue: 0 Y Y
*/
for (x=0; x<=size_max; ++x)
{
set_value(row, rowbytes, 3*x+0, bit_depth, /* red */ Y,
gamma_table, conv);
set_value(row, rowbytes, 3*x+1, bit_depth, /* green */
(depth_max * x * 2 + size_max) / (2 * size_max),
gamma_table, conv);
set_value(row, rowbytes, 3*x+2, bit_depth, /* blue */
(Y * x * 2 + size_max) / (2 * size_max),
gamma_table, conv);
}
}
break;
/* 4 channels: linear combinations of, from the top-left corner clockwise,
* transparent, red, green, blue.
*/
case 4:
{
/* x0: the transparent->blue scale (the value of the blue and alpha
* components) at the start of the row (red and green are 0).
* x1: the red->green scale (the value of the red and green
* components at the end of the row; blue is 0 and alpha is
* depth_max).
*/
png_uint_32 Y = (depth_max * y * 2 + size_max) / (2 * size_max);
png_uint_32 x;
/* Interpolate x/depth_max from start to end:
*
* start end difference
* red: 0 depth_max-Y depth_max-Y
* green: 0 Y Y
* blue: Y 0 -Y
* alpha: Y depth_max depth_max-Y
*/
for (x=0; x<=size_max; ++x)
{
set_value(row, rowbytes, 4*x+0, bit_depth, /* red */
((depth_max-Y) * x * 2 + size_max) / (2 * size_max),
gamma_table, conv);
set_value(row, rowbytes, 4*x+1, bit_depth, /* green */
(Y * x * 2 + size_max) / (2 * size_max),
gamma_table, conv);
set_value(row, rowbytes, 4*x+2, bit_depth, /* blue */
Y - (Y * x * 2 + size_max) / (2 * size_max),
gamma_table, conv);
set_value(row, rowbytes, 4*x+3, bit_depth, /* alpha */
Y + ((depth_max-Y) * x * 2 + size_max) / (2 * size_max),
gamma_table, conv);
}
}
break;
default:
fprintf(stderr, "makepng: internal bad channel count\n");
exit(2);
}
else if (color_type & PNG_COLOR_MASK_PALETTE)
{
/* Palette with fixed color: the image rows are all 0 and the image width
* is 16.
*/
memset(row, 0, rowbytes);
}
else if (colors[0] == channels_of_type(color_type))
switch (channels_of_type(color_type))
{
case 1:
{
const png_uint_32 luma = colors[1];
png_uint_32 x;
for (x=0; x<=size_max; ++x)
set_value(row, rowbytes, x, bit_depth, luma, gamma_table,
conv);
}
break;
case 2:
{
const png_uint_32 luma = colors[1];
const png_uint_32 alpha = colors[2];
png_uint_32 x;
for (x=0; x<size_max; ++x)
{
set_value(row, rowbytes, 2*x, bit_depth, luma, gamma_table,
conv);
set_value(row, rowbytes, 2*x+1, bit_depth, alpha, gamma_table,
conv);
}
}
break;
case 3:
{
const png_uint_32 red = colors[1];
const png_uint_32 green = colors[2];
const png_uint_32 blue = colors[3];
png_uint_32 x;
for (x=0; x<=size_max; ++x)
{
set_value(row, rowbytes, 3*x+0, bit_depth, red, gamma_table,
conv);
set_value(row, rowbytes, 3*x+1, bit_depth, green, gamma_table,
conv);
set_value(row, rowbytes, 3*x+2, bit_depth, blue, gamma_table,
conv);
}
}
break;
case 4:
{
const png_uint_32 red = colors[1];
const png_uint_32 green = colors[2];
const png_uint_32 blue = colors[3];
const png_uint_32 alpha = colors[4];
png_uint_32 x;
for (x=0; x<=size_max; ++x)
{
set_value(row, rowbytes, 4*x+0, bit_depth, red, gamma_table,
conv);
set_value(row, rowbytes, 4*x+1, bit_depth, green, gamma_table,
conv);
set_value(row, rowbytes, 4*x+2, bit_depth, blue, gamma_table,
conv);
set_value(row, rowbytes, 4*x+3, bit_depth, alpha, gamma_table,
conv);
}
}
break;
default:
fprintf(stderr, "makepng: internal bad channel count\n");
exit(2);
}
else
{
fprintf(stderr,
"makepng: --color: count(%u) does not match channels(%u)\n",
colors[0], channels_of_type(color_type));
exit(1);
}
return filters;
}
| 0
|
515,718
|
get_baudrate(TERMINAL *termp)
{
int my_ospeed;
int result;
if (GET_TTY(termp->Filedes, &termp->Nttyb) == OK) {
#ifdef TERMIOS
termp->Nttyb.c_oflag &= (unsigned) (~OFLAGS_TABS);
#else
termp->Nttyb.sg_flags &= (unsigned) (~XTABS);
#endif
}
#ifdef USE_OLD_TTY
result = (int) cfgetospeed(&(termp->Nttyb));
my_ospeed = (NCURSES_OSPEED) _nc_ospeed(result);
#else /* !USE_OLD_TTY */
#ifdef TERMIOS
my_ospeed = (NCURSES_OSPEED) cfgetospeed(&(termp->Nttyb));
#else
my_ospeed = (NCURSES_OSPEED) termp->Nttyb.sg_ospeed;
#endif
result = _nc_baudrate(my_ospeed);
#endif
termp->_baudrate = result;
ospeed = (NCURSES_OSPEED) my_ospeed;
}
| 0
|
310,335
|
int ProfileChooserView::GetDiceSigninPromoShowCount() const {
return browser_->profile()->GetPrefs()->GetInteger(
prefs::kDiceSigninUserMenuPromoCount);
}
| 0
|
153,578
|
int do_journal_get_write_access(handle_t *handle,
struct buffer_head *bh)
{
int dirty = buffer_dirty(bh);
int ret;
if (!buffer_mapped(bh) || buffer_freed(bh))
return 0;
/*
* __block_write_begin() could have dirtied some buffers. Clean
* the dirty bit as jbd2_journal_get_write_access() could complain
* otherwise about fs integrity issues. Setting of the dirty bit
* by __block_write_begin() isn't a real problem here as we clear
* the bit before releasing a page lock and thus writeback cannot
* ever write the buffer.
*/
if (dirty)
clear_buffer_dirty(bh);
BUFFER_TRACE(bh, "get write access");
ret = ext4_journal_get_write_access(handle, bh);
if (!ret && dirty)
ret = ext4_handle_dirty_metadata(handle, NULL, bh);
return ret;
}
| 0
|
328,056
|
static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
target_ulong vaddr)
{
phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] |= CODE_DIRTY_FLAG;
}
| 0
|
25,221
|
static void useNonBlockingConnectTimeout ( socket_handle_t sock ) {
int res_snd ;
int res_rcv ;
# ifdef _WIN32 const DWORD socket_timeout = SOCKET_RW_TIMEOUT_MS ;
unsigned long non_blocking = 1 ;
res_snd = setsockopt ( sock , SOL_SOCKET , SO_SNDTIMEO , ( const char * ) & socket_timeout , sizeof ( socket_timeout ) ) ;
res_rcv = setsockopt ( sock , SOL_SOCKET , SO_RCVTIMEO , ( const char * ) & socket_timeout , sizeof ( socket_timeout ) ) ;
ioctlsocket ( sock , FIONBIO , & non_blocking ) ;
# else const struct timeval socket_timeout = {
. tv_sec = SOCKET_RW_TIMEOUT_MS / 1000 , . tv_usec = ( SOCKET_RW_TIMEOUT_MS % 1000 ) * 1000 }
;
res_snd = setsockopt ( sock , SOL_SOCKET , SO_SNDTIMEO , & socket_timeout , sizeof ( socket_timeout ) ) ;
res_rcv = setsockopt ( sock , SOL_SOCKET , SO_RCVTIMEO , & socket_timeout , sizeof ( socket_timeout ) ) ;
# endif if ( res_snd != 0 ) g_debug ( "Can't set socket timeout, using default" ) ;
if ( res_rcv != 0 ) g_debug ( "Can't set socket timeout, using default" ) ;
}
| 0
|
131,131
|
static int lxc_cgroupfs_enter(struct cgroup_process_info *info, pid_t pid, bool enter_sub)
{
char pid_buf[32];
char *cgroup_tasks_fn;
int r;
struct cgroup_process_info *info_ptr;
snprintf(pid_buf, 32, "%lu", (unsigned long)pid);
for (info_ptr = info; info_ptr; info_ptr = info_ptr->next) {
char *cgroup_path = (enter_sub && info_ptr->cgroup_path_sub) ?
info_ptr->cgroup_path_sub :
info_ptr->cgroup_path;
if (!info_ptr->designated_mount_point) {
info_ptr->designated_mount_point = lxc_cgroup_find_mount_point(info_ptr->hierarchy, cgroup_path, true);
if (!info_ptr->designated_mount_point) {
SYSERROR("Could not add pid %lu to cgroup %s: internal error (couldn't find any writable mountpoint to cgroup filesystem)", (unsigned long)pid, cgroup_path);
return -1;
}
}
cgroup_tasks_fn = cgroup_to_absolute_path(info_ptr->designated_mount_point, cgroup_path, "/tasks");
if (!cgroup_tasks_fn) {
SYSERROR("Could not add pid %lu to cgroup %s: internal error", (unsigned long)pid, cgroup_path);
return -1;
}
r = lxc_write_to_file(cgroup_tasks_fn, pid_buf, strlen(pid_buf), false);
free(cgroup_tasks_fn);
if (r < 0) {
SYSERROR("Could not add pid %lu to cgroup %s: internal error", (unsigned long)pid, cgroup_path);
return -1;
}
}
return 0;
}
| 0
|
515,834
|
lookup_user_capability(const char *name)
{
struct user_table_entry const *result = 0;
if (*name != 'k') {
result = _nc_find_user_entry(name);
}
return result;
}
| 0
|
327,687
|
static int mov_write_source_reference_tag(AVIOContext *pb, MOVTrack *track, const char *reel_name){
int64_t pos = avio_tell(pb);
avio_wb32(pb, 0); /* size */
ffio_wfourcc(pb, "name"); /* Data format */
avio_wb16(pb, strlen(reel_name)); /* string size */
avio_wb16(pb, track->language); /* langcode */
avio_write(pb, reel_name, strlen(reel_name)); /* reel name */
return update_size(pb,pos);
}
| 0
|
402,298
|
static void xhci_reset_streams(XHCIEPContext *epctx)
{
unsigned int i;
for (i = 0; i < epctx->nr_pstreams; i++) {
epctx->pstreams[i].sct = -1;
}
}
| 0
|
422,232
|
ews_backend_sync_deleted_folders (EEwsBackend *backend,
GSList *list)
{
GSList *link;
for (link = list; link != NULL; link = g_slist_next (link)) {
const gchar *folder_id = link->data;
ESource *source = NULL;
if (folder_id != NULL)
source = ews_backend_folders_lookup (
backend, folder_id);
if (source == NULL)
continue;
/* This will trigger a "child-removed" signal and
* our handler will remove the hash table entry. */
e_source_remove_sync (source, NULL, NULL);
g_object_unref (source);
}
}
| 0
|
177,609
|
DataPipeConsumerDispatcher::Deserialize(const void* data,
size_t num_bytes,
const ports::PortName* ports,
size_t num_ports,
PlatformHandle* handles,
size_t num_handles) {
if (num_ports != 1 || num_handles != 1 ||
num_bytes != sizeof(SerializedState)) {
return nullptr;
}
const SerializedState* state = static_cast<const SerializedState*>(data);
if (!state->options.capacity_num_bytes || !state->options.element_num_bytes ||
state->options.capacity_num_bytes < state->options.element_num_bytes ||
state->read_offset >= state->options.capacity_num_bytes ||
state->bytes_available > state->options.capacity_num_bytes) {
return nullptr;
}
NodeController* node_controller = Core::Get()->GetNodeController();
ports::PortRef port;
if (node_controller->node()->GetPort(ports[0], &port) != ports::OK)
return nullptr;
auto region_handle = CreateSharedMemoryRegionHandleFromPlatformHandles(
std::move(handles[0]), PlatformHandle());
auto region = base::subtle::PlatformSharedMemoryRegion::Take(
std::move(region_handle),
base::subtle::PlatformSharedMemoryRegion::Mode::kUnsafe,
state->options.capacity_num_bytes,
base::UnguessableToken::Deserialize(state->buffer_guid_high,
state->buffer_guid_low));
auto ring_buffer =
base::UnsafeSharedMemoryRegion::Deserialize(std::move(region));
if (!ring_buffer.IsValid()) {
DLOG(ERROR) << "Failed to deserialize shared buffer handle.";
return nullptr;
}
scoped_refptr<DataPipeConsumerDispatcher> dispatcher =
new DataPipeConsumerDispatcher(node_controller, port,
std::move(ring_buffer), state->options,
state->pipe_id);
{
base::AutoLock lock(dispatcher->lock_);
dispatcher->read_offset_ = state->read_offset;
dispatcher->bytes_available_ = state->bytes_available;
dispatcher->new_data_available_ = state->bytes_available > 0;
dispatcher->peer_closed_ = state->flags & kFlagPeerClosed;
if (!dispatcher->InitializeNoLock())
return nullptr;
if (state->options.capacity_num_bytes >
dispatcher->ring_buffer_mapping_.mapped_size()) {
return nullptr;
}
dispatcher->UpdateSignalsStateNoLock();
}
return dispatcher;
}
| 0
|
228,157
|
static void __mem_cgroup_clear_mc(void)
{
struct mem_cgroup *from = mc.from;
struct mem_cgroup *to = mc.to;
/* we must uncharge all the leftover precharges from mc.to */
if (mc.precharge) {
__mem_cgroup_cancel_charge(mc.to, mc.precharge);
mc.precharge = 0;
}
/*
* we didn't uncharge from mc.from at mem_cgroup_move_account(), so
* we must uncharge here.
*/
if (mc.moved_charge) {
__mem_cgroup_cancel_charge(mc.from, mc.moved_charge);
mc.moved_charge = 0;
}
/* we must fixup refcnts and charges */
if (mc.moved_swap) {
/* uncharge swap account from the old cgroup */
if (!mem_cgroup_is_root(mc.from))
res_counter_uncharge(&mc.from->memsw,
PAGE_SIZE * mc.moved_swap);
__mem_cgroup_put(mc.from, mc.moved_swap);
if (!mem_cgroup_is_root(mc.to)) {
/*
* we charged both to->res and to->memsw, so we should
* uncharge to->res.
*/
res_counter_uncharge(&mc.to->res,
PAGE_SIZE * mc.moved_swap);
}
/* we've already done mem_cgroup_get(mc.to) */
mc.moved_swap = 0;
}
memcg_oom_recover(from);
memcg_oom_recover(to);
wake_up_all(&mc.waitq);
}
| 0
|
40,204
|
Status OpLevelCostEstimator::PredictNaryOp(const OpContext& op_context,
NodeCosts* node_costs) const {
const auto& op_info = op_context.op_info;
bool found_unknown_shapes = false;
// Calculate the largest known tensor size across all inputs and output.
int64_t op_count = CalculateLargestInputCount(op_info, &found_unknown_shapes);
// If output shape is available, try to use the element count calculated from
// that.
if (op_info.outputs_size() > 0) {
op_count = std::max(
op_count,
CalculateTensorElementCount(op_info.outputs(0), &found_unknown_shapes));
}
// Also calculate the output shape possibly resulting from broadcasting.
// Note that the some Nary ops (such as AddN) do not support broadcasting,
// but we're including this here for completeness.
if (op_info.inputs_size() >= 2) {
op_count = std::max(op_count, CwiseOutputElementCount(op_info));
}
// Nary ops perform one operation for every element in every input tensor.
op_count *= op_info.inputs_size() - 1;
const auto sum_cost = Eigen::internal::functor_traits<
Eigen::internal::scalar_sum_op<float>>::Cost;
return PredictDefaultNodeCosts(op_count * sum_cost, op_context,
&found_unknown_shapes, node_costs);
}
| 0
|
321,575
|
void do_interrupt(CPUARMState *env)
{
uint32_t addr;
uint32_t mask;
int new_mode;
uint32_t offset;
if (IS_M(env)) {
do_interrupt_v7m(env);
return;
}
/* TODO: Vectored interrupt controller. */
switch (env->exception_index) {
case EXCP_UDEF:
new_mode = ARM_CPU_MODE_UND;
addr = 0x04;
mask = CPSR_I;
if (env->thumb)
offset = 2;
else
offset = 4;
break;
case EXCP_SWI:
if (semihosting_enabled) {
/* Check for semihosting interrupt. */
if (env->thumb) {
mask = lduw_code(env->regs[15] - 2) & 0xff;
} else {
mask = ldl_code(env->regs[15] - 4) & 0xffffff;
}
/* Only intercept calls from privileged modes, to provide some
semblance of security. */
if (((mask == 0x123456 && !env->thumb)
|| (mask == 0xab && env->thumb))
&& (env->uncached_cpsr & CPSR_M) != ARM_CPU_MODE_USR) {
env->regs[0] = do_arm_semihosting(env);
return;
}
}
new_mode = ARM_CPU_MODE_SVC;
addr = 0x08;
mask = CPSR_I;
/* The PC already points to the next instruction. */
offset = 0;
break;
case EXCP_BKPT:
/* See if this is a semihosting syscall. */
if (env->thumb && semihosting_enabled) {
mask = lduw_code(env->regs[15]) & 0xff;
if (mask == 0xab
&& (env->uncached_cpsr & CPSR_M) != ARM_CPU_MODE_USR) {
env->regs[15] += 2;
env->regs[0] = do_arm_semihosting(env);
return;
}
}
env->cp15.c5_insn = 2;
/* Fall through to prefetch abort. */
case EXCP_PREFETCH_ABORT:
new_mode = ARM_CPU_MODE_ABT;
addr = 0x0c;
mask = CPSR_A | CPSR_I;
offset = 4;
break;
case EXCP_DATA_ABORT:
new_mode = ARM_CPU_MODE_ABT;
addr = 0x10;
mask = CPSR_A | CPSR_I;
offset = 8;
break;
case EXCP_IRQ:
new_mode = ARM_CPU_MODE_IRQ;
addr = 0x18;
/* Disable IRQ and imprecise data aborts. */
mask = CPSR_A | CPSR_I;
offset = 4;
break;
case EXCP_FIQ:
new_mode = ARM_CPU_MODE_FIQ;
addr = 0x1c;
/* Disable FIQ, IRQ and imprecise data aborts. */
mask = CPSR_A | CPSR_I | CPSR_F;
offset = 4;
break;
default:
cpu_abort(env, "Unhandled exception 0x%x\n", env->exception_index);
return; /* Never happens. Keep compiler happy. */
}
/* High vectors. */
if (env->cp15.c1_sys & (1 << 13)) {
addr += 0xffff0000;
}
switch_mode (env, new_mode);
env->spsr = cpsr_read(env);
/* Clear IT bits. */
env->condexec_bits = 0;
/* Switch to the new mode, and to the correct instruction set. */
env->uncached_cpsr = (env->uncached_cpsr & ~CPSR_M) | new_mode;
env->uncached_cpsr |= mask;
/* this is a lie, as the was no c1_sys on V4T/V5, but who cares
* and we should just guard the thumb mode on V4 */
if (arm_feature(env, ARM_FEATURE_V4T)) {
env->thumb = (env->cp15.c1_sys & (1 << 30)) != 0;
}
env->regs[14] = env->regs[15] + offset;
env->regs[15] = addr;
env->interrupt_request |= CPU_INTERRUPT_EXITTB;
}
| 1
|
146,661
|
aff_check_number(int spinval, int affval, char *name)
{
if (spinval != 0 && spinval != affval)
smsg(_("%s value differs from what is used in another .aff file"), name);
}
| 0
|
393,885
|
dissect_rpcap_open_reply (tvbuff_t *tvb, packet_info *pinfo _U_,
proto_tree *parent_tree, gint offset)
{
proto_tree *tree;
proto_item *ti;
ti = proto_tree_add_item (parent_tree, hf_open_reply, tvb, offset, -1, ENC_NA);
tree = proto_item_add_subtree (ti, ett_open_reply);
linktype = tvb_get_ntohl (tvb, offset);
proto_tree_add_item (tree, hf_linktype, tvb, offset, 4, ENC_BIG_ENDIAN);
offset += 4;
proto_tree_add_item (tree, hf_tzoff, tvb, offset, 4, ENC_BIG_ENDIAN);
}
| 0
|
388,989
|
struct file_list *flist_new(int flags, char *msg)
{
struct file_list *flist;
if (!(flist = new0(struct file_list)))
out_of_memory(msg);
if (flags & FLIST_TEMP) {
if (!(flist->file_pool = pool_create(SMALL_EXTENT, 0,
out_of_memory,
POOL_INTERN)))
out_of_memory(msg);
} else {
/* This is a doubly linked list with prev looping back to
* the end of the list, but the last next pointer is NULL. */
if (!first_flist) {
flist->file_pool = pool_create(NORMAL_EXTENT, 0,
out_of_memory,
POOL_INTERN);
if (!flist->file_pool)
out_of_memory(msg);
flist->ndx_start = flist->flist_num = inc_recurse ? 1 : 0;
first_flist = cur_flist = flist->prev = flist;
} else {
struct file_list *prev = first_flist->prev;
flist->file_pool = first_flist->file_pool;
flist->ndx_start = prev->ndx_start + prev->used + 1;
flist->flist_num = prev->flist_num + 1;
flist->prev = prev;
prev->next = first_flist->prev = flist;
}
flist->pool_boundary = pool_boundary(flist->file_pool, 0);
flist_cnt++;
}
return flist;
}
| 0
|
204,231
|
void RunLoop::AfterRun() {
DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
running_ = false;
auto& active_run_loops_ = delegate_->active_run_loops_;
DCHECK_EQ(active_run_loops_.top(), this);
active_run_loops_.pop();
RunLoop* previous_run_loop =
active_run_loops_.empty() ? nullptr : active_run_loops_.top();
if (previous_run_loop && previous_run_loop->quit_called_)
delegate_->Quit();
}
| 0
|
453,686
|
bootParameterValidate(
Syntax *syntax,
struct berval *val )
{
char *p, *e;
if ( BER_BVISEMPTY( val ) ) {
return LDAP_INVALID_SYNTAX;
}
p = (char *)val->bv_val;
e = p + val->bv_len;
/* key */
for (; ( p < e ) && ( *p != '=' ); p++ ) {
if ( !AD_CHAR( *p ) ) {
return LDAP_INVALID_SYNTAX;
}
}
if ( *p != '=' ) {
return LDAP_INVALID_SYNTAX;
}
/* server */
for ( p++; ( p < e ) && ( *p != ':' ); p++ ) {
if ( !AD_CHAR( *p ) ) {
return LDAP_INVALID_SYNTAX;
}
}
if ( *p != ':' ) {
return LDAP_INVALID_SYNTAX;
}
/* path */
for ( p++; p < e; p++ ) {
if ( !SLAP_PRINTABLE( *p ) ) {
return LDAP_INVALID_SYNTAX;
}
}
return LDAP_SUCCESS;
}
| 0
|
265,804
|
term_write_session(FILE *fd, win_T *wp)
{
term_T *term = wp->w_buffer->b_term;
/* Create the terminal and run the command. This is not without
* risk, but let's assume the user only creates a session when this
* will be OK. */
if (fprintf(fd, "terminal ++curwin ++cols=%d ++rows=%d ",
term->tl_cols, term->tl_rows) < 0)
return FAIL;
if (term->tl_command != NULL && fputs((char *)term->tl_command, fd) < 0)
return FAIL;
return put_eol(fd);
}
| 0
|
242,798
|
static void btif_in_split_uuids_string_to_list(char *str, bt_uuid_t *p_uuid,
uint32_t *p_num_uuid)
{
char buf[64];
char *p_start = str;
char *p_needle;
uint32_t num = 0;
do
{
p_needle = strchr(p_start, ' ');
if (p_needle < p_start) break;
memset(buf, 0, sizeof(buf));
strncpy(buf, p_start, (p_needle-p_start));
string_to_uuid(buf, p_uuid + num);
num++;
p_start = ++p_needle;
} while (*p_start != 0);
*p_num_uuid = num;
}
| 0
|
377,234
|
static int coroutine_fn bdrv_co_io_em(BlockDriverState *bs, int64_t sector_num,
int nb_sectors, QEMUIOVector *iov,
bool is_write)
{
CoroutineIOCompletion co = {
.coroutine = qemu_coroutine_self(),
};
BlockDriverAIOCB *acb;
if (is_write) {
acb = bs->drv->bdrv_aio_writev(bs, sector_num, iov, nb_sectors,
bdrv_co_io_em_complete, &co);
} else {
acb = bs->drv->bdrv_aio_readv(bs, sector_num, iov, nb_sectors,
bdrv_co_io_em_complete, &co);
}
trace_bdrv_co_io_em(bs, sector_num, nb_sectors, is_write, acb);
if (!acb) {
return -EIO;
}
qemu_coroutine_yield();
return co.ret;
}
| 0
|
508,199
|
static OCSP_RESPONSE *query_responder(BIO *err, BIO *cbio, char *path,
STACK_OF(CONF_VALUE) *headers,
OCSP_REQUEST *req, int req_timeout)
{
int fd;
int rv;
int i;
OCSP_REQ_CTX *ctx = NULL;
OCSP_RESPONSE *rsp = NULL;
fd_set confds;
struct timeval tv;
if (req_timeout != -1)
BIO_set_nbio(cbio, 1);
rv = BIO_do_connect(cbio);
if ((rv <= 0) && ((req_timeout == -1) || !BIO_should_retry(cbio)))
{
BIO_puts(err, "Error connecting BIO\n");
return NULL;
}
if (BIO_get_fd(cbio, &fd) <= 0)
{
BIO_puts(err, "Can't get connection fd\n");
goto err;
}
if (req_timeout != -1 && rv <= 0)
{
FD_ZERO(&confds);
openssl_fdset(fd, &confds);
tv.tv_usec = 0;
tv.tv_sec = req_timeout;
rv = select(fd + 1, NULL, (void *)&confds, NULL, &tv);
if (rv == 0)
{
BIO_puts(err, "Timeout on connect\n");
return NULL;
}
}
ctx = OCSP_sendreq_new(cbio, path, NULL, -1);
if (!ctx)
return NULL;
for (i = 0; i < sk_CONF_VALUE_num(headers); i++)
{
CONF_VALUE *hdr = sk_CONF_VALUE_value(headers, i);
if (!OCSP_REQ_CTX_add1_header(ctx, hdr->name, hdr->value))
goto err;
}
if (!OCSP_REQ_CTX_set1_req(ctx, req))
goto err;
for (;;)
{
rv = OCSP_sendreq_nbio(&rsp, ctx);
if (rv != -1)
break;
if (req_timeout == -1)
continue;
FD_ZERO(&confds);
openssl_fdset(fd, &confds);
tv.tv_usec = 0;
tv.tv_sec = req_timeout;
if (BIO_should_read(cbio))
rv = select(fd + 1, (void *)&confds, NULL, NULL, &tv);
else if (BIO_should_write(cbio))
rv = select(fd + 1, NULL, (void *)&confds, NULL, &tv);
else
{
BIO_puts(err, "Unexpected retry condition\n");
goto err;
}
if (rv == 0)
{
BIO_puts(err, "Timeout on request\n");
break;
}
if (rv == -1)
{
BIO_puts(err, "Select error\n");
break;
}
}
err:
if (ctx)
OCSP_REQ_CTX_free(ctx);
return rsp;
}
| 0
|
164,262
|
PrintPreviewUI::~PrintPreviewUI() {
print_preview_data_service()->RemoveEntry(id_);
g_print_preview_request_id_map.Get().Erase(id_);
g_print_preview_ui_id_map.Get().Remove(id_);
}
| 0
|
371,036
|
int LibRaw::adjust_maximum()
{
int i;
ushort real_max;
float auto_threshold;
if(O.adjust_maximum_thr < 0.00001)
return LIBRAW_SUCCESS;
else if (O.adjust_maximum_thr > 0.99999)
auto_threshold = LIBRAW_DEFAULT_ADJUST_MAXIMUM_THRESHOLD;
else
auto_threshold = O.adjust_maximum_thr;
real_max = C.channel_maximum[0];
for(i = 1; i< 4; i++)
if(real_max < C.channel_maximum[i])
real_max = C.channel_maximum[i];
if (real_max > 0 && real_max < C.maximum && real_max > C.maximum* auto_threshold)
{
C.maximum = real_max;
}
return LIBRAW_SUCCESS;
}
| 0
|
346,260
|
static inline void Process_ipfix_option_templates(exporter_ipfix_domain_t *exporter, void *option_template_flowset, FlowSource_t *fs) {
void *DataPtr;
uint32_t size_left, size_required, i;
// uint32_t nr_scopes, nr_options;
uint16_t id, field_count, scope_field_count, offset, sampler_id_length;
uint16_t offset_sampler_id, offset_sampler_mode, offset_sampler_interval, found_sampler;
uint16_t offset_std_sampler_interval, offset_std_sampler_algorithm, found_std_sampling;
i = 0; // keep compiler happy
size_left = GET_FLOWSET_LENGTH(option_template_flowset) - 4; // -4 for flowset header -> id and length
if ( size_left < 6 ) {
syslog(LOG_ERR, "Process_ipfix: [%u] option template length error: size left %u too small for an options template",
exporter->info.id, size_left);
return;
}
DataPtr = option_template_flowset + 4;
id = GET_OPTION_TEMPLATE_ID(DataPtr);
field_count = GET_OPTION_TEMPLATE_FIELD_COUNT(DataPtr);
scope_field_count = GET_OPTION_TEMPLATE_SCOPE_FIELD_COUNT(DataPtr);
DataPtr += 6;
size_left -= 6;
if ( scope_field_count == 0 ) {
syslog(LOG_ERR, "Process_ipfx: [%u] scope field count error: length must not be zero",
exporter->info.id);
dbg_printf("scope field count error: length must not be zero\n");
return;
}
size_required = field_count * 2 * sizeof(uint16_t);
dbg_printf("Size left: %u, size required: %u\n", size_left, size_required);
if ( size_left < size_required ) {
syslog(LOG_ERR, "Process_ipfix: [%u] option template length error: size left %u too small for %u scopes length and %u options length",
exporter->info.id, size_left, field_count, scope_field_count);
dbg_printf("option template length error: size left %u too small for field_count %u\n",
size_left, field_count);
return;
}
dbg_printf("Decode Option Template. id: %u, field count: %u, scope field count: %u\n",
id, field_count, scope_field_count);
if ( scope_field_count == 0 ) {
syslog(LOG_ERR, "Process_ipfxi: [%u] scope field count error: length must not be zero",
exporter->info.id);
return;
}
for ( i=0; i<scope_field_count; i++ ) {
uint32_t enterprise_value;
uint16_t id, length;
int Enterprise;
id = Get_val16(DataPtr); DataPtr += 2;
length = Get_val16(DataPtr); DataPtr += 2;
Enterprise = id & 0x8000 ? 1 : 0;
if ( Enterprise ) {
size_required += 4;
dbg_printf("Adjusted: Size left: %u, size required: %u\n", size_left, size_required);
if ( size_left < size_required ) {
syslog(LOG_ERR, "Process_ipfix: [%u] option template length error: size left %u too small for %u scopes length and %u options length",
exporter->info.id, size_left, field_count, scope_field_count);
dbg_printf("option template length error: size left %u too small for field_count %u\n",
size_left, field_count);
return;
}
enterprise_value = Get_val32(DataPtr);
DataPtr += 4;
dbg_printf(" [%i] Enterprise: 1, scope id: %u, scope length %u enterprise value: %u\n",
i, id, length, enterprise_value);
} else {
dbg_printf(" [%i] Enterprise: 0, scope id: %u, scope length %u\n", i, id, length);
}
}
for ( ;i<field_count; i++ ) {
uint32_t enterprise_value;
uint16_t id, length;
int Enterprise;
id = Get_val16(DataPtr); DataPtr += 2;
length = Get_val16(DataPtr); DataPtr += 2;
Enterprise = id & 0x8000 ? 1 : 0;
if ( Enterprise ) {
size_required += 4;
dbg_printf("Adjusted: Size left: %u, size required: %u\n", size_left, size_required);
if ( size_left < size_required ) {
syslog(LOG_ERR, "Process_ipfix: [%u] option template length error: size left %u too small for %u scopes length and %u options length",
exporter->info.id, size_left, field_count, scope_field_count);
dbg_printf("option template length error: size left %u too small for field_count %u\n",
size_left, field_count);
return;
}
enterprise_value = Get_val32(DataPtr);
DataPtr += 4;
dbg_printf(" [%i] Enterprise: 1, option id: %u, option length %u enterprise value: %u\n",
i, id, length, enterprise_value);
} else {
dbg_printf(" [%i] Enterprise: 0, option id: %u, option length %u\n", i, id, length);
}
}
sampler_id_length = 0;
offset_sampler_id = 0;
offset_sampler_mode = 0;
offset_sampler_interval = 0;
offset_std_sampler_interval = 0;
offset_std_sampler_algorithm = 0;
found_sampler = 0;
found_std_sampling = 0;
offset = 0;
/* XXX
XXX Sampling for IPFIX not yet implemented due to lack of data and information
switch (type) {
// general sampling
case NF9_SAMPLING_INTERVAL:
offset_std_sampler_interval = offset;
found_std_sampling++;
break;
case NF9_SAMPLING_ALGORITHM:
offset_std_sampler_algorithm = offset;
found_std_sampling++;
break;
// individual samplers
case NF9_FLOW_SAMPLER_ID:
offset_sampler_id = offset;
sampler_id_length = length;
found_sampler++;
break;
case FLOW_SAMPLER_MODE:
offset_sampler_mode = offset;
found_sampler++;
break;
case NF9_FLOW_SAMPLER_RANDOM_INTERVAL:
offset_sampler_interval = offset;
found_sampler++;
break;
}
offset += length;
if ( found_sampler == 3 ) { // need all three tags
dbg_printf("[%u] Sampling information found\n", exporter->info.id);
InsertSamplerOffset(fs, id, offset_sampler_id, sampler_id_length, offset_sampler_mode, offset_sampler_interval);
} else if ( found_std_sampling == 2 ) { // need all two tags
dbg_printf("[%u] Std sampling information found\n", exporter->info.id);
InsertStdSamplerOffset(fs, id, offset_std_sampler_interval, offset_std_sampler_algorithm);
} else {
dbg_printf("[%u] No Sampling information found\n", exporter->info.id);
}
*/
dbg_printf("\n");
processed_records++;
} // End of Process_ipfix_option_templates
| 1
|
309,439
|
void NavigationControllerImpl::Reload(ReloadType reload_type,
bool check_for_repost) {
if (transient_entry_index_ != -1) {
NavigationEntryImpl* transient_entry = GetTransientEntry();
if (!transient_entry)
return;
LoadURL(transient_entry->GetURL(),
Referrer(),
ui::PAGE_TRANSITION_RELOAD,
transient_entry->extra_headers());
return;
}
NavigationEntryImpl* entry = NULL;
int current_index = -1;
if (IsInitialNavigation() && pending_entry_) {
entry = pending_entry_;
current_index = pending_entry_index_;
} else {
DiscardNonCommittedEntriesInternal();
current_index = GetCurrentEntryIndex();
if (current_index != -1) {
entry = GetEntryAtIndex(current_index);
}
}
if (!entry)
return;
if (last_committed_reload_type_ != ReloadType::NONE) {
DCHECK(!last_committed_reload_time_.is_null());
base::Time now =
time_smoother_.GetSmoothedTime(get_timestamp_callback_.Run());
DCHECK_GT(now, last_committed_reload_time_);
if (!last_committed_reload_time_.is_null() &&
now > last_committed_reload_time_) {
base::TimeDelta delta = now - last_committed_reload_time_;
UMA_HISTOGRAM_MEDIUM_TIMES("Navigation.Reload.ReloadToReloadDuration",
delta);
if (last_committed_reload_type_ == ReloadType::NORMAL) {
UMA_HISTOGRAM_MEDIUM_TIMES(
"Navigation.Reload.ReloadMainResourceToReloadDuration", delta);
}
}
}
entry->set_reload_type(reload_type);
if (g_check_for_repost && check_for_repost &&
entry->GetHasPostData()) {
delegate_->NotifyBeforeFormRepostWarningShow();
pending_reload_ = reload_type;
delegate_->ActivateAndShowRepostFormWarningDialog();
} else {
if (!IsInitialNavigation())
DiscardNonCommittedEntriesInternal();
SiteInstanceImpl* site_instance = entry->site_instance();
bool is_for_guests_only = site_instance && site_instance->HasProcess() &&
site_instance->GetProcess()->IsForGuestsOnly();
if (!is_for_guests_only && site_instance &&
site_instance->HasWrongProcessForURL(entry->GetURL())) {
NavigationEntryImpl* nav_entry = NavigationEntryImpl::FromNavigationEntry(
CreateNavigationEntry(
entry->GetURL(), entry->GetReferrer(), entry->GetTransitionType(),
false, entry->extra_headers(), browser_context_).release());
reload_type = ReloadType::NONE;
nav_entry->set_should_replace_entry(true);
pending_entry_ = nav_entry;
DCHECK_EQ(-1, pending_entry_index_);
} else {
pending_entry_ = entry;
pending_entry_index_ = current_index;
pending_entry_->SetTitle(base::string16());
pending_entry_->SetTransitionType(ui::PAGE_TRANSITION_RELOAD);
}
NavigateToPendingEntry(reload_type);
}
}
| 0
|
401,863
|
ZEND_VM_COLD_CONST_HANDLER(14, ZEND_BOOL_NOT, CONST|TMPVAR|CV, ANY)
{
USE_OPLINE
zval *val;
zend_free_op free_op1;
val = GET_OP1_ZVAL_PTR_UNDEF(BP_VAR_R);
if (Z_TYPE_INFO_P(val) == IS_TRUE) {
ZVAL_FALSE(EX_VAR(opline->result.var));
} else if (EXPECTED(Z_TYPE_INFO_P(val) <= IS_TRUE)) {
/* The result and op1 can be the same cv zval */
const uint32_t orig_val_type = Z_TYPE_INFO_P(val);
ZVAL_TRUE(EX_VAR(opline->result.var));
if (OP1_TYPE == IS_CV && UNEXPECTED(orig_val_type == IS_UNDEF)) {
SAVE_OPLINE();
ZVAL_UNDEFINED_OP1();
ZEND_VM_NEXT_OPCODE_CHECK_EXCEPTION();
}
} else {
SAVE_OPLINE();
ZVAL_BOOL(EX_VAR(opline->result.var), !i_zend_is_true(val));
FREE_OP1();
ZEND_VM_NEXT_OPCODE_CHECK_EXCEPTION();
}
ZEND_VM_NEXT_OPCODE();
}
| 0
|
244,535
|
void InputConnectionImpl::StartStateUpdateTimer() {
state_update_timer_.Start(
FROM_HERE, kStateUpdateTimeout,
base::BindOnce(&InputConnectionImpl::UpdateTextInputState,
base::Unretained(this),
true /* is_input_state_update_requested */));
}
| 0
|
203,332
|
void GLES2DecoderImpl::DoCompressedTexSubImage2D(
GLenum target,
GLint level,
GLint xoffset,
GLint yoffset,
GLsizei width,
GLsizei height,
GLenum format,
GLsizei image_size,
const void * data) {
TextureManager::TextureInfo* info = GetTextureInfoForTarget(target);
if (!info) {
SetGLError(GL_INVALID_OPERATION,
"glCompressedTexSubImage2D: unknown texture for target");
return;
}
GLenum type = 0;
GLenum internal_format = 0;
if (!info->GetLevelType(target, level, &type, &internal_format)) {
SetGLError(
GL_INVALID_OPERATION,
"glCompressdTexSubImage2D: level does not exist.");
return;
}
if (internal_format != format) {
SetGLError(
GL_INVALID_OPERATION,
"glCompressdTexSubImage2D: format does not match internal format.");
return;
}
if (!info->ValidForTexture(
target, level, xoffset, yoffset, width, height, format, type)) {
SetGLError(GL_INVALID_VALUE,
"glCompressdTexSubImage2D: bad dimensions.");
return;
}
glCompressedTexSubImage2D(
target, level, xoffset, yoffset, width, height, format, image_size, data);
}
| 0
|
143,716
|
SegmentBTreeRoot * JavascriptArray::GetSegmentMap() const
{
return (HasSegmentMap() ? segmentUnion.segmentBTreeRoot : nullptr);
}
| 0
|
385,620
|
PHP_FUNCTION(pcntl_getpriority)
{
long who = PRIO_PROCESS;
long pid = getpid();
int pri;
if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "|ll", &pid, &who) == FAILURE) {
RETURN_FALSE;
}
/* needs to be cleared, since any returned value is valid */
errno = 0;
pri = getpriority(who, pid);
if (errno) {
PCNTL_G(last_error) = errno;
switch (errno) {
case ESRCH:
php_error_docref(NULL TSRMLS_CC, E_WARNING, "Error %d: No process was located using the given parameters", errno);
break;
case EINVAL:
php_error_docref(NULL TSRMLS_CC, E_WARNING, "Error %d: Invalid identifier flag", errno);
break;
default:
php_error_docref(NULL TSRMLS_CC, E_WARNING, "Unknown error %d has occurred", errno);
break;
}
RETURN_FALSE;
}
RETURN_LONG(pri);
}
| 0
|
165,081
|
findCursorHideCount(ClientPtr pClient, ScreenPtr pScreen)
{
CursorScreenPtr cs = GetCursorScreen(pScreen);
CursorHideCountPtr pChc;
for (pChc = cs->pCursorHideCounts; pChc != NULL; pChc = pChc->pNext) {
if (pChc->pClient == pClient) {
return pChc;
}
}
return NULL;
}
| 0
|
199,955
|
gfx::Size WebContentsImpl::GetSizeForNewRenderView(bool is_main_frame) {
gfx::Size size;
if (is_main_frame)
size = device_emulation_size_;
if (size.IsEmpty() && delegate_)
size = delegate_->GetSizeForNewRenderView(this);
if (size.IsEmpty())
size = GetContainerBounds().size();
return size;
}
| 0
|
34,827
|
static int use_db(char *database)
{
if (mysql_get_server_version(sock) >= FIRST_INFORMATION_SCHEMA_VERSION &&
!my_strcasecmp(&my_charset_latin1, database, INFORMATION_SCHEMA_DB_NAME))
return 1;
if (mysql_get_server_version(sock) >= FIRST_PERFORMANCE_SCHEMA_VERSION &&
!my_strcasecmp(&my_charset_latin1, database, PERFORMANCE_SCHEMA_DB_NAME))
return 1;
if (mysql_select_db(sock, database))
{
DBerror(sock, "when selecting the database");
return 1;
}
return 0;
} /* use_db */
| 0
|
295,759
|
static int nfs4_open_recover_helper(struct nfs4_opendata *opendata, fmode_t fmode, struct nfs4_state **res)
{
struct nfs4_state *newstate;
int ret;
opendata->o_arg.open_flags = 0;
opendata->o_arg.fmode = fmode;
memset(&opendata->o_res, 0, sizeof(opendata->o_res));
memset(&opendata->c_res, 0, sizeof(opendata->c_res));
nfs4_init_opendata_res(opendata);
ret = _nfs4_proc_open(opendata);
if (ret != 0)
return ret;
newstate = nfs4_opendata_to_nfs4_state(opendata);
if (IS_ERR(newstate))
return PTR_ERR(newstate);
nfs4_close_state(&opendata->path, newstate, fmode);
*res = newstate;
return 0;
}
| 0
|
318,414
|
av_cold void ff_af_queue_init(AVCodecContext *avctx, AudioFrameQueue *afq)
{
afq->avctx = avctx;
afq->next_pts = AV_NOPTS_VALUE;
afq->remaining_delay = avctx->delay;
afq->remaining_samples = avctx->delay;
afq->frame_queue = NULL;
}
| 0
|
67,461
|
static int pkcs7_decrypt_rinfo(unsigned char **pek, int *peklen,
PKCS7_RECIP_INFO *ri, EVP_PKEY *pkey)
{
EVP_PKEY_CTX *pctx = NULL;
unsigned char *ek = NULL;
size_t eklen;
int ret = -1;
pctx = EVP_PKEY_CTX_new(pkey, NULL);
if (!pctx)
return -1;
if (EVP_PKEY_decrypt_init(pctx) <= 0)
goto err;
if (EVP_PKEY_CTX_ctrl(pctx, -1, EVP_PKEY_OP_DECRYPT,
EVP_PKEY_CTRL_PKCS7_DECRYPT, 0, ri) <= 0) {
PKCS7err(PKCS7_F_PKCS7_DECRYPT_RINFO, PKCS7_R_CTRL_ERROR);
goto err;
}
if (EVP_PKEY_decrypt(pctx, NULL, &eklen,
ri->enc_key->data, ri->enc_key->length) <= 0)
goto err;
ek = OPENSSL_malloc(eklen);
if (ek == NULL) {
PKCS7err(PKCS7_F_PKCS7_DECRYPT_RINFO, ERR_R_MALLOC_FAILURE);
goto err;
}
if (EVP_PKEY_decrypt(pctx, ek, &eklen,
ri->enc_key->data, ri->enc_key->length) <= 0) {
ret = 0;
PKCS7err(PKCS7_F_PKCS7_DECRYPT_RINFO, ERR_R_EVP_LIB);
goto err;
}
ret = 1;
OPENSSL_clear_free(*pek, *peklen);
*pek = ek;
*peklen = eklen;
err:
EVP_PKEY_CTX_free(pctx);
if (!ret)
OPENSSL_free(ek);
return ret;
}
| 0
|
434,204
|
number_format(int value)
{
const char *result = "%d";
if ((outform != F_TERMCAP) && (value > 255)) {
unsigned long lv = (unsigned long) value;
unsigned long mm;
int bits = sizeof(unsigned long) * 8;
int nn;
for (nn = 8; nn < bits; ++nn) {
mm = 1UL << nn;
if ((mm - 16) <= lv && (mm + 16) > lv) {
result = "%#x";
break;
}
}
}
return result;
}
| 0
|
338,718
|
int swr_convert_frame(SwrContext *s,
AVFrame *out, const AVFrame *in)
{
int ret, setup = 0;
if (!swr_is_initialized(s)) {
if ((ret = swr_config_frame(s, out, in)) < 0)
return ret;
if ((ret = swr_init(s)) < 0)
return ret;
setup = 1;
} else {
// return as is or reconfigure for input changes?
if ((ret = config_changed(s, out, in)))
return ret;
}
if (out) {
if (!out->linesize[0]) {
out->nb_samples = swr_get_delay(s, s->out_sample_rate)
+ in->nb_samples*(int64_t)s->out_sample_rate / s->in_sample_rate
+ 3;
if ((ret = av_frame_get_buffer(out, 0)) < 0) {
if (setup)
swr_close(s);
return ret;
}
} else {
if (!out->nb_samples)
out->nb_samples = available_samples(out);
}
}
return convert_frame(s, out, in);
}
| 1
|
214,065
|
void Dispatcher::OnCancelSuspend(const std::string& extension_id) {
DispatchEvent(extension_id, kOnSuspendCanceledEvent);
}
| 0
|
378,622
|
static ssize_t vfswrap_listxattr(struct vfs_handle_struct *handle, const char *path, char *list, size_t size)
{
return listxattr(path, list, size);
}
| 0
|
333,138
|
static gsize calc_float_string_storage(double value)
{
int whole_value = value;
gsize i = 0;
do {
i++;
} while (whole_value /= 10);
return i + 2 + FLOAT_STRING_PRECISION;
}
| 1
|
101,900
|
static int pnm_gethdr(jas_stream_t *in, pnm_hdr_t *hdr)
{
int_fast32_t maxval;
int_fast32_t width;
int_fast32_t height;
int type;
if (pnm_getint16(in, &hdr->magic) || pnm_getsintstr(in, &width) ||
pnm_getsintstr(in, &height)) {
return -1;
}
hdr->width = width;
hdr->height = height;
if ((type = pnm_type(hdr->magic)) == PNM_TYPE_INVALID) {
return -1;
}
if (type != PNM_TYPE_PBM) {
if (pnm_getsintstr(in, &maxval)) {
return -1;
}
} else {
maxval = 1;
}
if (maxval < 0) {
hdr->maxval = -maxval;
hdr->sgnd = true;
} else {
hdr->maxval = maxval;
hdr->sgnd = false;
}
switch (type) {
case PNM_TYPE_PBM:
case PNM_TYPE_PGM:
hdr->numcmpts = 1;
break;
case PNM_TYPE_PPM:
hdr->numcmpts = 3;
break;
default:
abort();
break;
}
return 0;
}
| 0
|
173,960
|
bool NetworkThrottleManagerImpl::ThrottleImpl::IsBlocked() const {
return state_ == State::BLOCKED;
}
| 0
|
507,457
|
ASN1_INTEGER *TS_ACCURACY_get_micros(TS_ACCURACY *a)
{
return a->micros;
}
| 0
|
414,072
|
int wmi_set_mgmt_retry(struct wil6210_priv *wil, u8 retry_short)
{
int rc;
struct wmi_set_mgmt_retry_limit_cmd cmd = {
.mgmt_retry_limit = retry_short,
};
struct {
struct wmi_cmd_hdr wmi;
struct wmi_set_mgmt_retry_limit_event evt;
} __packed reply;
wil_dbg_wmi(wil, "Setting mgmt retry short %d\n", retry_short);
if (!test_bit(WMI_FW_CAPABILITY_MGMT_RETRY_LIMIT, wil->fw_capabilities))
return -ENOTSUPP;
reply.evt.status = WMI_FW_STATUS_FAILURE;
rc = wmi_call(wil, WMI_SET_MGMT_RETRY_LIMIT_CMDID, &cmd, sizeof(cmd),
WMI_SET_MGMT_RETRY_LIMIT_EVENTID, &reply, sizeof(reply),
100);
if (rc)
return rc;
if (reply.evt.status != WMI_FW_STATUS_SUCCESS) {
wil_err(wil, "set mgmt retry limit failed with status %d\n",
reply.evt.status);
rc = -EINVAL;
}
return rc;
}
| 0
|
400,861
|
ref_param_begin_read_collection(gs_param_list * plist, gs_param_name pkey,
gs_param_dict * pvalue,
gs_param_collection_type_t coll_type)
{
iparam_list *const iplist = (iparam_list *) plist;
iparam_loc loc;
bool int_keys = coll_type != 0;
int code = ref_param_read(iplist, pkey, &loc, -1);
dict_param_list *dlist;
if (code != 0)
return code;
dlist = (dict_param_list *)
gs_alloc_bytes(plist->memory, size_of(dict_param_list),
"ref_param_begin_read_collection");
if (dlist == 0)
return_error(gs_error_VMerror);
if (r_has_type(loc.pvalue, t_dictionary)) {
code = dict_param_list_read(dlist, loc.pvalue, NULL, false,
iplist->ref_memory);
dlist->int_keys = int_keys;
if (code >= 0)
pvalue->size = dict_length(loc.pvalue);
} else if (int_keys && r_is_array(loc.pvalue)) {
code = array_indexed_param_list_read(dlist, loc.pvalue, NULL, false,
iplist->ref_memory);
if (code >= 0)
pvalue->size = r_size(loc.pvalue);
} else
code = gs_note_error(gs_error_typecheck);
if (code < 0) {
gs_free_object(plist->memory, dlist, "ref_param_begin_write_collection");
return iparam_note_error(loc, code);
}
pvalue->list = (gs_param_list *) dlist;
return 0;
}
| 0
|
46,246
|
START_TEST(single_quote_string)
{
int i;
struct {
const char *encoded;
const char *decoded;
} test_cases[] = {
{ "'hello world'", "hello world" },
{ "'the quick brown fox \\' jumped over the fence'",
"the quick brown fox ' jumped over the fence" },
{}
};
for (i = 0; test_cases[i].encoded; i++) {
QObject *obj;
QString *str;
obj = qobject_from_json(test_cases[i].encoded);
fail_unless(obj != NULL);
fail_unless(qobject_type(obj) == QTYPE_QSTRING);
str = qobject_to_qstring(obj);
fail_unless(strcmp(qstring_get_str(str), test_cases[i].decoded) == 0);
QDECREF(str);
}
}
| 0
|
413,708
|
allocateHeader(FileInfo *nested, TranslationTableHeader **table) {
/* Allocate memory for the table header and a guess on the number of
* rules */
const TranslationTableOffset startSize = 2 * sizeof(**table);
if (*table) return 1;
tableUsed = sizeof(**table) + OFFSETSIZE; /* So no offset is ever zero */
if (!(*table = malloc(startSize))) {
compileError(nested, "Not enough memory");
if (*table != NULL) free(*table);
*table = NULL;
_lou_outOfMemory();
}
memset(*table, 0, startSize);
tableSize = startSize;
return 1;
}
| 0
|
139,436
|
static int is_non_fatal(int lib_error_code) {
return lib_error_code < 0 && lib_error_code > NGHTTP2_ERR_FATAL;
}
| 0
|
200,556
|
void WebPage::popupClosed()
{
ASSERT(d->m_selectPopup);
d->m_selectPopup = 0;
}
| 0
|
233,927
|
http_process(struct vtclog *vl, const char *spec, int sock, int *sfd)
{
struct http *hp;
char *s, *q;
int retval;
(void)sfd;
ALLOC_OBJ(hp, HTTP_MAGIC);
AN(hp);
hp->fd = sock;
hp->timeout = 15000;
hp->nrxbuf = 2048*1024;
hp->vsb = VSB_new_auto();
hp->rxbuf = malloc(hp->nrxbuf); /* XXX */
hp->sfd = sfd;
hp->vl = vl;
hp->gziplevel = 0;
hp->gzipresidual = -1;
AN(hp->rxbuf);
AN(hp->vsb);
s = strdup(spec);
q = strchr(s, '\0');
assert(q > s);
AN(s);
parse_string(s, http_cmds, hp, vl);
retval = hp->fd;
VSB_delete(hp->vsb);
free(hp->rxbuf);
free(hp);
return (retval);
}
| 0
|
31,753
|
static int ext4_alloc_file_blocks(struct file *file, ext4_lblk_t offset,
ext4_lblk_t len, loff_t new_size,
int flags, int mode)
{
struct inode *inode = file_inode(file);
handle_t *handle;
int ret = 0;
int ret2 = 0;
int retries = 0;
int depth = 0;
struct ext4_map_blocks map;
unsigned int credits;
loff_t epos;
map.m_lblk = offset;
map.m_len = len;
/*
* Don't normalize the request if it can fit in one extent so
* that it doesn't get unnecessarily split into multiple
* extents.
*/
if (len <= EXT_UNWRITTEN_MAX_LEN)
flags |= EXT4_GET_BLOCKS_NO_NORMALIZE;
/* Wait all existing dio workers, newcomers will block on i_mutex */
ext4_inode_block_unlocked_dio(inode);
inode_dio_wait(inode);
/*
* credits to insert 1 extent into extent tree
*/
credits = ext4_chunk_trans_blocks(inode, len);
/*
* We can only call ext_depth() on extent based inodes
*/
if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
depth = ext_depth(inode);
else
depth = -1;
retry:
while (ret >= 0 && len) {
/*
* Recalculate credits when extent tree depth changes.
*/
if (depth >= 0 && depth != ext_depth(inode)) {
credits = ext4_chunk_trans_blocks(inode, len);
depth = ext_depth(inode);
}
handle = ext4_journal_start(inode, EXT4_HT_MAP_BLOCKS,
credits);
if (IS_ERR(handle)) {
ret = PTR_ERR(handle);
break;
}
ret = ext4_map_blocks(handle, inode, &map, flags);
if (ret <= 0) {
ext4_debug("inode #%lu: block %u: len %u: "
"ext4_ext_map_blocks returned %d",
inode->i_ino, map.m_lblk,
map.m_len, ret);
ext4_mark_inode_dirty(handle, inode);
ret2 = ext4_journal_stop(handle);
break;
}
map.m_lblk += ret;
map.m_len = len = len - ret;
epos = (loff_t)map.m_lblk << inode->i_blkbits;
inode->i_ctime = ext4_current_time(inode);
if (new_size) {
if (epos > new_size)
epos = new_size;
if (ext4_update_inode_size(inode, epos) & 0x1)
inode->i_mtime = inode->i_ctime;
} else {
if (epos > inode->i_size)
ext4_set_inode_flag(inode,
EXT4_INODE_EOFBLOCKS);
}
ext4_mark_inode_dirty(handle, inode);
ret2 = ext4_journal_stop(handle);
if (ret2)
break;
}
if (ret == -ENOSPC &&
ext4_should_retry_alloc(inode->i_sb, &retries)) {
ret = 0;
goto retry;
}
ext4_inode_resume_unlocked_dio(inode);
return ret > 0 ? ret2 : ret;
}
| 0
|
403,568
|
acpi_parse_x2apic(struct acpi_subtable_header *header, const unsigned long end)
{
struct acpi_madt_local_x2apic *processor = NULL;
int apic_id;
u8 enabled;
processor = (struct acpi_madt_local_x2apic *)header;
if (BAD_MADT_ENTRY(processor, end))
return -EINVAL;
acpi_table_print_madt_entry(header);
apic_id = processor->local_apic_id;
enabled = processor->lapic_flags & ACPI_MADT_ENABLED;
#ifdef CONFIG_X86_X2APIC
/*
* We need to register disabled CPU as well to permit
* counting disabled CPUs. This allows us to size
* cpus_possible_map more accurately, to permit
* to not preallocating memory for all NR_CPUS
* when we use CPU hotplug.
*/
if (!apic->apic_id_valid(apic_id) && enabled)
printk(KERN_WARNING PREFIX "x2apic entry ignored\n");
else
acpi_register_lapic(apic_id, processor->uid, enabled);
#else
printk(KERN_WARNING PREFIX "x2apic entry ignored\n");
#endif
return 0;
}
| 0
|
11,794
|
aspath_put (struct stream *s, struct aspath *as, int use32bit )
{
struct assegment *seg = as->segments;
size_t bytes = 0;
if (!seg || seg->length == 0)
return 0;
if (seg)
{
/*
* Hey, what do we do when we have > STREAM_WRITABLE(s) here?
* At the moment, we would write out a partial aspath, and our peer
* will complain and drop the session :-/
*
* The general assumption here is that many things tested will
* never happen. And, in real live, up to now, they have not.
*/
while (seg && (ASSEGMENT_LEN(seg, use32bit) <= STREAM_WRITEABLE(s)))
{
struct assegment *next = seg->next;
int written = 0;
int asns_packed = 0;
size_t lenp;
/* Overlength segments have to be split up */
while ( (seg->length - written) > AS_SEGMENT_MAX)
{
assegment_header_put (s, seg->type, AS_SEGMENT_MAX);
assegment_data_put (s, seg->as, AS_SEGMENT_MAX, use32bit);
written += AS_SEGMENT_MAX;
bytes += ASSEGMENT_SIZE (written, use32bit);
}
/* write the final segment, probably is also the first */
lenp = assegment_header_put (s, seg->type, seg->length - written);
assegment_data_put (s, (seg->as + written), seg->length - written,
use32bit);
/* Sequence-type segments can be 'packed' together
* Case of a segment which was overlength and split up
* will be missed here, but that doesn't matter.
*/
while (next && ASSEGMENTS_PACKABLE (seg, next))
{
/* NB: We should never normally get here given we
* normalise aspath data when parse them. However, better
* safe than sorry. We potentially could call
* assegment_normalise here instead, but it's cheaper and
* easier to do it on the fly here rather than go through
* the segment list twice every time we write out
* aspath's.
*/
/* Next segment's data can fit in this one */
assegment_data_put (s, next->as, next->length, use32bit);
/* update the length of the segment header */
stream_putc_at (s, lenp, seg->length - written + next->length);
asns_packed += next->length;
next = next->next;
}
bytes += ASSEGMENT_SIZE (seg->length - written + asns_packed,
use32bit);
seg = next;
}
}
return bytes;
}
| 1
|
265,993
|
trigger_set_xy(int trigger_index, int x, int y)
{
struct map_trigger* trigger;
trigger = vector_get(s_map->triggers, trigger_index);
trigger->x = x;
trigger->y = y;
}
| 0
|
259,296
|
int SPIFFEValidator::initializeSslContexts(std::vector<SSL_CTX*>, bool) {
return SSL_VERIFY_PEER | SSL_VERIFY_FAIL_IF_NO_PEER_CERT;
}
| 0
|
334,897
|
static void pflash_timer (void *opaque)
{
pflash_t *pfl = opaque;
DPRINTF("%s: command %02x done\n", __func__, pfl->cmd);
/* Reset flash */
pfl->status ^= 0x80;
if (pfl->bypass) {
pfl->wcycle = 2;
} else {
memory_region_rom_device_set_readable(&pfl->mem, true);
pfl->wcycle = 0;
}
pfl->cmd = 0;
}
| 0
|
134,033
|
static int virtio_rpmsg_trysendto(struct rpmsg_endpoint *ept, void *data,
int len, u32 dst)
{
struct rpmsg_device *rpdev = ept->rpdev;
u32 src = ept->addr;
return rpmsg_send_offchannel_raw(rpdev, src, dst, data, len, false);
}
| 0
|
268,762
|
struct tcp_md5sig_key *tcp_md5_do_lookup(const struct sock *sk,
const union tcp_md5_addr *addr,
int family)
{
const struct tcp_sock *tp = tcp_sk(sk);
struct tcp_md5sig_key *key;
unsigned int size = sizeof(struct in_addr);
const struct tcp_md5sig_info *md5sig;
/* caller either holds rcu_read_lock() or socket lock */
md5sig = rcu_dereference_check(tp->md5sig_info,
lockdep_sock_is_held(sk));
if (!md5sig)
return NULL;
#if IS_ENABLED(CONFIG_IPV6)
if (family == AF_INET6)
size = sizeof(struct in6_addr);
#endif
hlist_for_each_entry_rcu(key, &md5sig->head, node) {
if (key->family != family)
continue;
if (!memcmp(&key->addr, addr, size))
return key;
}
return NULL;
}
| 0
|
458,124
|
static OPJ_BOOL opj_j2k_write_all_tile_parts(opj_j2k_t *p_j2k,
OPJ_BYTE * p_data,
OPJ_UINT32 * p_data_written,
OPJ_UINT32 total_data_size,
opj_stream_private_t *p_stream,
struct opj_event_mgr * p_manager
)
{
OPJ_UINT32 tilepartno = 0;
OPJ_UINT32 l_nb_bytes_written = 0;
OPJ_UINT32 l_current_nb_bytes_written;
OPJ_UINT32 l_part_tile_size;
OPJ_UINT32 tot_num_tp;
OPJ_UINT32 pino;
OPJ_BYTE * l_begin_data;
opj_tcp_t *l_tcp = 00;
opj_tcd_t * l_tcd = 00;
opj_cp_t * l_cp = 00;
l_tcd = p_j2k->m_tcd;
l_cp = &(p_j2k->m_cp);
l_tcp = l_cp->tcps + p_j2k->m_current_tile_number;
/*Get number of tile parts*/
tot_num_tp = opj_j2k_get_num_tp(l_cp, 0, p_j2k->m_current_tile_number);
/* start writing remaining tile parts */
++p_j2k->m_specific_param.m_encoder.m_current_tile_part_number;
for (tilepartno = 1; tilepartno < tot_num_tp ; ++tilepartno) {
p_j2k->m_specific_param.m_encoder.m_current_poc_tile_part_number = tilepartno;
l_current_nb_bytes_written = 0;
l_part_tile_size = 0;
l_begin_data = p_data;
if (! opj_j2k_write_sot(p_j2k, p_data,
total_data_size,
&l_current_nb_bytes_written,
p_stream,
p_manager)) {
return OPJ_FALSE;
}
l_nb_bytes_written += l_current_nb_bytes_written;
p_data += l_current_nb_bytes_written;
total_data_size -= l_current_nb_bytes_written;
l_part_tile_size += l_current_nb_bytes_written;
l_current_nb_bytes_written = 0;
if (! opj_j2k_write_sod(p_j2k, l_tcd, p_data, &l_current_nb_bytes_written,
total_data_size, p_stream, p_manager)) {
return OPJ_FALSE;
}
p_data += l_current_nb_bytes_written;
l_nb_bytes_written += l_current_nb_bytes_written;
total_data_size -= l_current_nb_bytes_written;
l_part_tile_size += l_current_nb_bytes_written;
/* Writing Psot in SOT marker */
opj_write_bytes(l_begin_data + 6, l_part_tile_size,
4); /* PSOT */
if (OPJ_IS_CINEMA(l_cp->rsiz) || OPJ_IS_IMF(l_cp->rsiz)) {
opj_j2k_update_tlm(p_j2k, l_part_tile_size);
}
++p_j2k->m_specific_param.m_encoder.m_current_tile_part_number;
}
for (pino = 1; pino <= l_tcp->numpocs; ++pino) {
l_tcd->cur_pino = pino;
/*Get number of tile parts*/
tot_num_tp = opj_j2k_get_num_tp(l_cp, pino, p_j2k->m_current_tile_number);
for (tilepartno = 0; tilepartno < tot_num_tp ; ++tilepartno) {
p_j2k->m_specific_param.m_encoder.m_current_poc_tile_part_number = tilepartno;
l_current_nb_bytes_written = 0;
l_part_tile_size = 0;
l_begin_data = p_data;
if (! opj_j2k_write_sot(p_j2k, p_data,
total_data_size,
&l_current_nb_bytes_written, p_stream,
p_manager)) {
return OPJ_FALSE;
}
l_nb_bytes_written += l_current_nb_bytes_written;
p_data += l_current_nb_bytes_written;
total_data_size -= l_current_nb_bytes_written;
l_part_tile_size += l_current_nb_bytes_written;
l_current_nb_bytes_written = 0;
if (! opj_j2k_write_sod(p_j2k, l_tcd, p_data, &l_current_nb_bytes_written,
total_data_size, p_stream, p_manager)) {
return OPJ_FALSE;
}
l_nb_bytes_written += l_current_nb_bytes_written;
p_data += l_current_nb_bytes_written;
total_data_size -= l_current_nb_bytes_written;
l_part_tile_size += l_current_nb_bytes_written;
/* Writing Psot in SOT marker */
opj_write_bytes(l_begin_data + 6, l_part_tile_size,
4); /* PSOT */
if (OPJ_IS_CINEMA(l_cp->rsiz) || OPJ_IS_IMF(l_cp->rsiz)) {
opj_j2k_update_tlm(p_j2k, l_part_tile_size);
}
++p_j2k->m_specific_param.m_encoder.m_current_tile_part_number;
}
}
*p_data_written = l_nb_bytes_written;
return OPJ_TRUE;
}
| 0
|
407,500
|
static int theme_read(THEME_REC *theme, const char *path)
{
CONFIG_REC *config;
THEME_READ_REC rec;
char *str;
config = config_open(path, -1) ;
if (config == NULL) {
/* didn't exist or no access? */
str = g_strdup_printf("Error reading theme file %s: %s",
path, g_strerror(errno));
read_error(str);
g_free(str);
return FALSE;
}
if (path == NULL)
config_parse_data(config, default_theme, "internal");
else
config_parse(config);
if (config_last_error(config) != NULL) {
str = g_strdup_printf("Ignored errors in theme %s:\n%s",
theme->name, config_last_error(config));
read_error(str);
g_free(str);
}
theme->default_color =
config_get_int(config, NULL, "default_color", -1);
theme->info_eol = config_get_bool(config, NULL, "info_eol", FALSE);
theme_read_replaces(config, theme);
if (path != NULL)
theme_copy_abstracts(theme, internal_theme);
theme_read_abstracts(config, theme);
rec.theme = theme;
rec.config = config;
g_hash_table_foreach(default_formats,
(GHFunc) theme_read_modules, &rec);
config_close(config);
return TRUE;
}
| 0
|
316,515
|
void FoFiType1C::getFontMatrix(double *mat) {
int i;
if (topDict.firstOp == 0x0c1e && privateDicts[0].hasFontMatrix) {
if (topDict.hasFontMatrix) {
mat[0] = topDict.fontMatrix[0] * privateDicts[0].fontMatrix[0] +
topDict.fontMatrix[1] * privateDicts[0].fontMatrix[2];
mat[1] = topDict.fontMatrix[0] * privateDicts[0].fontMatrix[1] +
topDict.fontMatrix[1] * privateDicts[0].fontMatrix[3];
mat[2] = topDict.fontMatrix[2] * privateDicts[0].fontMatrix[0] +
topDict.fontMatrix[3] * privateDicts[0].fontMatrix[2];
mat[3] = topDict.fontMatrix[2] * privateDicts[0].fontMatrix[1] +
topDict.fontMatrix[3] * privateDicts[0].fontMatrix[3];
mat[4] = topDict.fontMatrix[4] * privateDicts[0].fontMatrix[0] +
topDict.fontMatrix[5] * privateDicts[0].fontMatrix[2];
mat[5] = topDict.fontMatrix[4] * privateDicts[0].fontMatrix[1] +
topDict.fontMatrix[5] * privateDicts[0].fontMatrix[3];
} else {
for (i = 0; i < 6; ++i) {
mat[i] = privateDicts[0].fontMatrix[i];
}
}
} else {
for (i = 0; i < 6; ++i) {
mat[i] = topDict.fontMatrix[i];
}
}
}
| 0
|
126,425
|
void AxoGluonArc(double *args)
{
SetLineWidth(axolinewidth + args[7]);
if ( args[9] ) { /* Clockwise */
double a = args[3]; args[3] = args[4]; args[4] = a;
}
if ( args[8] ) { /* Dashes */
args[7] = args[8];
DashGluonArc(args);
}
else {
GluonArc(args);
}
}
| 0
|
348,081
|
static int iccdomain(i_ctx_t * i_ctx_p, ref *space, float *ptr)
{
int components, i, code = 0;
ref *tempref, ICCdict, valref;
code = array_get(imemory, space, 1, &ICCdict);
if (code < 0)
return code;
code = dict_find_string(&ICCdict, "N", &tempref);
if (code < 0)
return code;
if (code == 0)
return gs_note_error(gs_error_undefined);
components = tempref->value.intval;
code = dict_find_string(&ICCdict, "Range", &tempref);
if (code > 0 && !r_has_type(tempref, t_null)) {
for (i=0;i<components * 2;i++) {
code = array_get(imemory, tempref, i, &valref);
if (code < 0)
return code;
if (r_has_type(&valref, t_integer))
ptr[i * 2] = (float)valref.value.intval;
else
ptr[i * 2] = valref.value.realval;
}
} else {
for (i=0;i<components;i++) {
ptr[i * 2] = 0;
ptr[(i * 2) + 1] = 1;
}
}
return 0;
}
| 1
|
402,821
|
poolGrow(STRING_POOL *pool)
{
if (pool->freeBlocks) {
if (pool->start == 0) {
pool->blocks = pool->freeBlocks;
pool->freeBlocks = pool->freeBlocks->next;
pool->blocks->next = NULL;
pool->start = pool->blocks->s;
pool->end = pool->start + pool->blocks->size;
pool->ptr = pool->start;
return XML_TRUE;
}
if (pool->end - pool->start < pool->freeBlocks->size) {
BLOCK *tem = pool->freeBlocks->next;
pool->freeBlocks->next = pool->blocks;
pool->blocks = pool->freeBlocks;
pool->freeBlocks = tem;
memcpy(pool->blocks->s, pool->start,
(pool->end - pool->start) * sizeof(XML_Char));
pool->ptr = pool->blocks->s + (pool->ptr - pool->start);
pool->start = pool->blocks->s;
pool->end = pool->start + pool->blocks->size;
return XML_TRUE;
}
}
if (pool->blocks && pool->start == pool->blocks->s) {
BLOCK *temp;
int blockSize = (int)((unsigned)(pool->end - pool->start)*2U);
size_t bytesToAllocate;
if (blockSize < 0)
return XML_FALSE;
bytesToAllocate = poolBytesToAllocateFor(blockSize);
if (bytesToAllocate == 0)
return XML_FALSE;
temp = (BLOCK *)
pool->mem->realloc_fcn(pool->blocks, (unsigned)bytesToAllocate);
if (temp == NULL)
return XML_FALSE;
pool->blocks = temp;
pool->blocks->size = blockSize;
pool->ptr = pool->blocks->s + (pool->ptr - pool->start);
pool->start = pool->blocks->s;
pool->end = pool->start + blockSize;
}
else {
BLOCK *tem;
int blockSize = (int)(pool->end - pool->start);
size_t bytesToAllocate;
if (blockSize < 0)
return XML_FALSE;
if (blockSize < INIT_BLOCK_SIZE)
blockSize = INIT_BLOCK_SIZE;
else {
/* Detect overflow, avoiding _signed_ overflow undefined behavior */
if ((int)((unsigned)blockSize * 2U) < 0) {
return XML_FALSE;
}
blockSize *= 2;
}
bytesToAllocate = poolBytesToAllocateFor(blockSize);
if (bytesToAllocate == 0)
return XML_FALSE;
tem = (BLOCK *)pool->mem->malloc_fcn(bytesToAllocate);
if (!tem)
return XML_FALSE;
tem->size = blockSize;
tem->next = pool->blocks;
pool->blocks = tem;
if (pool->ptr != pool->start)
memcpy(tem->s, pool->start,
(pool->ptr - pool->start) * sizeof(XML_Char));
pool->ptr = tem->s + (pool->ptr - pool->start);
pool->start = tem->s;
pool->end = tem->s + blockSize;
}
return XML_TRUE;
}
| 0
|
313,915
|
void CL_ServerStatusResponse( netadr_t from, msg_t *msg ) {
char *s;
char info[MAX_INFO_STRING];
int i, l, score, ping;
int len;
serverStatus_t *serverStatus;
serverStatus = NULL;
for ( i = 0; i < MAX_SERVERSTATUSREQUESTS; i++ ) {
if ( NET_CompareAdr( from, cl_serverStatusList[i].address ) ) {
serverStatus = &cl_serverStatusList[i];
break;
}
}
if ( !serverStatus ) {
return;
}
s = MSG_ReadStringLine( msg );
len = 0;
Com_sprintf( &serverStatus->string[len], sizeof( serverStatus->string ) - len, "%s", s );
if ( serverStatus->print ) {
Com_Printf( "Server settings:\n" );
while ( *s ) {
for ( i = 0; i < 2 && *s; i++ ) {
if ( *s == '\\' ) {
s++;
}
l = 0;
while ( *s ) {
info[l++] = *s;
if ( l >= MAX_INFO_STRING - 1 ) {
break;
}
s++;
if ( *s == '\\' ) {
break;
}
}
info[l] = '\0';
if ( i ) {
Com_Printf( "%s\n", info );
} else {
Com_Printf( "%-24s", info );
}
}
}
}
len = strlen( serverStatus->string );
Com_sprintf( &serverStatus->string[len], sizeof( serverStatus->string ) - len, "\\" );
if ( serverStatus->print ) {
Com_Printf( "\nPlayers:\n" );
Com_Printf( "num: score: ping: name:\n" );
}
for ( i = 0, s = MSG_ReadStringLine( msg ); *s; s = MSG_ReadStringLine( msg ), i++ ) {
len = strlen( serverStatus->string );
Com_sprintf( &serverStatus->string[len], sizeof( serverStatus->string ) - len, "\\%s", s );
if ( serverStatus->print ) {
score = ping = 0;
sscanf( s, "%d %d", &score, &ping );
s = strchr( s, ' ' );
if ( s ) {
s = strchr( s + 1, ' ' );
}
if ( s ) {
s++;
} else {
s = "unknown";
}
Com_Printf( "%-2d %-3d %-3d %s\n", i, score, ping, s );
}
}
len = strlen( serverStatus->string );
Com_sprintf( &serverStatus->string[len], sizeof( serverStatus->string ) - len, "\\" );
serverStatus->time = Com_Milliseconds();
serverStatus->address = from;
serverStatus->pending = qfalse;
if (serverStatus->print) {
serverStatus->retrieved = qtrue;
}
}
| 0
|
136,710
|
static VALUE cState_aref(VALUE self, VALUE name)
{
name = rb_funcall(name, i_to_s, 0);
if (RTEST(rb_funcall(self, i_respond_to_p, 1, name))) {
return rb_funcall(self, i_send, 1, name);
} else {
return rb_ivar_get(self, rb_intern_str(rb_str_concat(rb_str_new2("@"), name)));
}
}
| 0
|
46,503
|
bool Jsi_ValueIsUndef(Jsi_Interp *interp, Jsi_Value *pv)
{
return (pv->vt == JSI_VT_UNDEF);
}
| 0
|
20,946
|
static void read_intra_frame_mode_info ( VP9_COMMON * const cm , MACROBLOCKD * const xd , int mi_row , int mi_col , vp9_reader * r ) {
MODE_INFO * const mi = xd -> mi [ 0 ] . src_mi ;
MB_MODE_INFO * const mbmi = & mi -> mbmi ;
const MODE_INFO * above_mi = xd -> mi [ - cm -> mi_stride ] . src_mi ;
const MODE_INFO * left_mi = xd -> left_available ? xd -> mi [ - 1 ] . src_mi : NULL ;
const BLOCK_SIZE bsize = mbmi -> sb_type ;
int i ;
mbmi -> segment_id = read_intra_segment_id ( cm , xd , mi_row , mi_col , r ) ;
mbmi -> skip = read_skip ( cm , xd , mbmi -> segment_id , r ) ;
mbmi -> tx_size = read_tx_size ( cm , xd , cm -> tx_mode , bsize , 1 , r ) ;
mbmi -> ref_frame [ 0 ] = INTRA_FRAME ;
mbmi -> ref_frame [ 1 ] = NONE ;
switch ( bsize ) {
case BLOCK_4X4 : for ( i = 0 ;
i < 4 ;
++ i ) mi -> bmi [ i ] . as_mode = read_intra_mode ( r , get_y_mode_probs ( mi , above_mi , left_mi , i ) ) ;
mbmi -> mode = mi -> bmi [ 3 ] . as_mode ;
break ;
case BLOCK_4X8 : mi -> bmi [ 0 ] . as_mode = mi -> bmi [ 2 ] . as_mode = read_intra_mode ( r , get_y_mode_probs ( mi , above_mi , left_mi , 0 ) ) ;
mi -> bmi [ 1 ] . as_mode = mi -> bmi [ 3 ] . as_mode = mbmi -> mode = read_intra_mode ( r , get_y_mode_probs ( mi , above_mi , left_mi , 1 ) ) ;
break ;
case BLOCK_8X4 : mi -> bmi [ 0 ] . as_mode = mi -> bmi [ 1 ] . as_mode = read_intra_mode ( r , get_y_mode_probs ( mi , above_mi , left_mi , 0 ) ) ;
mi -> bmi [ 2 ] . as_mode = mi -> bmi [ 3 ] . as_mode = mbmi -> mode = read_intra_mode ( r , get_y_mode_probs ( mi , above_mi , left_mi , 2 ) ) ;
break ;
default : mbmi -> mode = read_intra_mode ( r , get_y_mode_probs ( mi , above_mi , left_mi , 0 ) ) ;
}
mbmi -> uv_mode = read_intra_mode ( r , vp9_kf_uv_mode_prob [ mbmi -> mode ] ) ;
}
| 0
|
43,352
|
static pj_status_t parse_rr(pj_dns_parsed_rr *rr, pj_pool_t *pool,
const pj_uint8_t *pkt,
const pj_uint8_t *start, const pj_uint8_t *max,
int *parsed_len)
{
const pj_uint8_t *p = start;
int name_len, name_part_len;
pj_status_t status;
/* Get the length of the name */
status = get_name_len(0, pkt, start, max, &name_part_len, &name_len);
if (status != PJ_SUCCESS)
return status;
/* Allocate memory for the name */
rr->name.ptr = (char*) pj_pool_alloc(pool, name_len+4);
rr->name.slen = 0;
/* Get the name */
status = get_name(0, pkt, start, max, &rr->name);
if (status != PJ_SUCCESS)
return status;
p = (start + name_part_len);
/* Check the size can accomodate next few fields. */
if (p+10 > max)
return PJLIB_UTIL_EDNSINSIZE;
/* Get the type */
pj_memcpy(&rr->type, p, 2);
rr->type = pj_ntohs(rr->type);
p += 2;
/* Get the class */
pj_memcpy(&rr->dnsclass, p, 2);
rr->dnsclass = pj_ntohs(rr->dnsclass);
p += 2;
/* Class MUST be IN */
if (rr->dnsclass != 1) {
/* Class is not IN, return error only if type is known (see #1889) */
if (rr->type == PJ_DNS_TYPE_A || rr->type == PJ_DNS_TYPE_AAAA ||
rr->type == PJ_DNS_TYPE_CNAME || rr->type == PJ_DNS_TYPE_NS ||
rr->type == PJ_DNS_TYPE_PTR || rr->type == PJ_DNS_TYPE_SRV)
{
return PJLIB_UTIL_EDNSINCLASS;
}
}
/* Get TTL */
pj_memcpy(&rr->ttl, p, 4);
rr->ttl = pj_ntohl(rr->ttl);
p += 4;
/* Get rdlength */
pj_memcpy(&rr->rdlength, p, 2);
rr->rdlength = pj_ntohs(rr->rdlength);
p += 2;
/* Check that length is valid */
if (p + rr->rdlength > max)
return PJLIB_UTIL_EDNSINSIZE;
/* Parse some well known records */
if (rr->type == PJ_DNS_TYPE_A) {
pj_memcpy(&rr->rdata.a.ip_addr, p, 4);
p += 4;
} else if (rr->type == PJ_DNS_TYPE_AAAA) {
pj_memcpy(&rr->rdata.aaaa.ip_addr, p, 16);
p += 16;
} else if (rr->type == PJ_DNS_TYPE_CNAME ||
rr->type == PJ_DNS_TYPE_NS ||
rr->type == PJ_DNS_TYPE_PTR)
{
/* Get the length of the target name */
status = get_name_len(0, pkt, p, max, &name_part_len, &name_len);
if (status != PJ_SUCCESS)
return status;
/* Allocate memory for the name */
rr->rdata.cname.name.ptr = (char*) pj_pool_alloc(pool, name_len);
rr->rdata.cname.name.slen = 0;
/* Get the name */
status = get_name(0, pkt, p, max, &rr->rdata.cname.name);
if (status != PJ_SUCCESS)
return status;
p += name_part_len;
} else if (rr->type == PJ_DNS_TYPE_SRV) {
/* Priority */
pj_memcpy(&rr->rdata.srv.prio, p, 2);
rr->rdata.srv.prio = pj_ntohs(rr->rdata.srv.prio);
p += 2;
/* Weight */
pj_memcpy(&rr->rdata.srv.weight, p, 2);
rr->rdata.srv.weight = pj_ntohs(rr->rdata.srv.weight);
p += 2;
/* Port */
pj_memcpy(&rr->rdata.srv.port, p, 2);
rr->rdata.srv.port = pj_ntohs(rr->rdata.srv.port);
p += 2;
/* Get the length of the target name */
status = get_name_len(0, pkt, p, max, &name_part_len, &name_len);
if (status != PJ_SUCCESS)
return status;
/* Allocate memory for the name */
rr->rdata.srv.target.ptr = (char*) pj_pool_alloc(pool, name_len);
rr->rdata.srv.target.slen = 0;
/* Get the name */
status = get_name(0, pkt, p, max, &rr->rdata.srv.target);
if (status != PJ_SUCCESS)
return status;
p += name_part_len;
} else {
/* Copy the raw data */
rr->data = pj_pool_alloc(pool, rr->rdlength);
pj_memcpy(rr->data, p, rr->rdlength);
p += rr->rdlength;
}
*parsed_len = (int)(p - start);
return PJ_SUCCESS;
}
| 0
|
347,372
|
int ZipStreamBuf::readFromDevice(char* buffer, std::streamsize length)
{
if (!_ptrBuf) return 0; // directory entry
_ptrBuf->read(buffer, length);
int cnt = static_cast<int>(_ptrBuf->gcount());
if (cnt > 0)
{
_crc32.update(buffer, cnt);
}
else
{
if (_crc32.checksum() != _expectedCrc32)
{
if (_checkCRC)
throw ZipException("CRC failure");
else
{
// the CRC value is written directly after the data block
// parse it directly from the input stream
ZipDataInfo nfo(*_pIstr, false);
// now push back the header to the stream, so that the ZipLocalFileHeader can read it
Poco::Int32 size = static_cast<Poco::Int32>(nfo.getFullHeaderSize());
_expectedCrc32 = nfo.getCRC32();
const char* rawHeader = nfo.getRawHeader();
_pIstr->seekg(-size, std::ios::cur);
if (!_pIstr->good()) throw Poco::IOException("Failed to seek on input stream");
if (!crcValid())
throw ZipException("CRC failure");
}
}
}
return cnt;
}
| 1
|
440,596
|
static struct rtable *geneve_get_v4_rt(struct sk_buff *skb,
struct net_device *dev,
struct geneve_sock *gs4,
struct flowi4 *fl4,
const struct ip_tunnel_info *info)
{
bool use_cache = ip_tunnel_dst_cache_usable(skb, info);
struct geneve_dev *geneve = netdev_priv(dev);
struct dst_cache *dst_cache;
struct rtable *rt = NULL;
__u8 tos;
if (!gs4)
return ERR_PTR(-EIO);
memset(fl4, 0, sizeof(*fl4));
fl4->flowi4_mark = skb->mark;
fl4->flowi4_proto = IPPROTO_UDP;
fl4->daddr = info->key.u.ipv4.dst;
fl4->saddr = info->key.u.ipv4.src;
tos = info->key.tos;
if ((tos == 1) && !geneve->collect_md) {
tos = ip_tunnel_get_dsfield(ip_hdr(skb), skb);
use_cache = false;
}
fl4->flowi4_tos = RT_TOS(tos);
dst_cache = (struct dst_cache *)&info->dst_cache;
if (use_cache) {
rt = dst_cache_get_ip4(dst_cache, &fl4->saddr);
if (rt)
return rt;
}
rt = ip_route_output_key(geneve->net, fl4);
if (IS_ERR(rt)) {
netdev_dbg(dev, "no route to %pI4\n", &fl4->daddr);
return ERR_PTR(-ENETUNREACH);
}
if (rt->dst.dev == dev) { /* is this necessary? */
netdev_dbg(dev, "circular route to %pI4\n", &fl4->daddr);
ip_rt_put(rt);
return ERR_PTR(-ELOOP);
}
if (use_cache)
dst_cache_set_ip4(dst_cache, &rt->dst, fl4->saddr);
return rt;
}
| 0
|
286,188
|
static void copy_asoundrc(void) {
char *src = RUN_ASOUNDRC_FILE ;
char *dest;
if (asprintf(&dest, "%s/.asoundrc", cfg.homedir) == -1)
errExit("asprintf");
if (is_link(dest)) {
fprintf(stderr, "Error: %s is a symbolic link\n", dest);
exit(1);
}
copy_file_as_user(src, dest, getuid(), getgid(), S_IRUSR | S_IWUSR);
fs_logger2("clone", dest);
unlink(src);
}
| 0
|
284,464
|
construct_le_tlv(struct sc_apdu *apdu, unsigned char *apdu_buf, size_t data_tlv_len,
unsigned char *le_tlv, size_t * le_tlv_len, const unsigned char key_type)
{
size_t block_size = (KEY_TYPE_AES == key_type ? 16 : 8);
*(apdu_buf + block_size + data_tlv_len) = 0x97;
if (apdu->le > 0x7F) {
/* Le' > 0x7E, use extended APDU */
*(apdu_buf + block_size + data_tlv_len + 1) = 2;
*(apdu_buf + block_size + data_tlv_len + 2) = (unsigned char)(apdu->le / 0x100);
*(apdu_buf + block_size + data_tlv_len + 3) = (unsigned char)(apdu->le % 0x100);
memcpy(le_tlv, apdu_buf + block_size + data_tlv_len, 4);
*le_tlv_len = 4;
}
else {
*(apdu_buf + block_size + data_tlv_len + 1) = 1;
*(apdu_buf + block_size + data_tlv_len + 2) = (unsigned char)apdu->le;
memcpy(le_tlv, apdu_buf + block_size + data_tlv_len, 3);
*le_tlv_len = 3;
}
return 0;
}
| 0
|
238,958
|
static bool isCandidateForOpaquenessTest(RenderBox* childBox)
{
RenderStyle* childStyle = childBox->style();
if (childStyle->position() != StaticPosition && childBox->containingBlock() != childBox->parent())
return false;
if (childStyle->visibility() != VISIBLE || childStyle->shapeOutside())
return false;
if (!childBox->width() || !childBox->height())
return false;
if (RenderLayer* childLayer = childBox->layer()) {
if (childLayer->compositingState() != NotComposited)
return false;
if (!childStyle->hasAutoZIndex())
return false;
if (childLayer->hasTransform() || childLayer->isTransparent() || childLayer->hasFilter())
return false;
if (childBox->hasOverflowClip() && childStyle->hasBorderRadius())
return false;
}
return true;
}
| 0
|
455,710
|
TEST_F(QueryPlannerTest, MultikeyDoubleDottedElemMatch) {
// true means multikey
addIndex(BSON("a.b.x" << 1 << "a.b.y" << 1), true);
runQuery(fromjson("{a: {$elemMatch: {b: {$elemMatch: {x: 1, y: 1}}}}}"));
assertNumSolutions(2U);
assertSolutionExists("{cscan: {dir: 1}}");
assertSolutionExists(
"{fetch: {node: {ixscan: {pattern: {'a.b.x':1,'a.b.y':1}, bounds: "
"{'a.b.x': [[1,1,true,true]], "
" 'a.b.y': [[1,1,true,true]]}}}}}");
}
| 0
|
490,695
|
storageVolDelete(virStorageVolPtr vol,
unsigned int flags)
{
virStoragePoolObj *obj;
virStorageBackend *backend;
virStorageVolDef *voldef = NULL;
int ret = -1;
if (!(voldef = virStorageVolDefFromVol(vol, &obj, &backend)))
return -1;
if (virStorageVolDeleteEnsureACL(vol->conn, virStoragePoolObjGetDef(obj),
voldef) < 0)
goto cleanup;
if (voldef->in_use) {
virReportError(VIR_ERR_OPERATION_INVALID,
_("volume '%s' is still in use."),
voldef->name);
goto cleanup;
}
if (voldef->building) {
virReportError(VIR_ERR_OPERATION_INVALID,
_("volume '%s' is still being allocated."),
voldef->name);
goto cleanup;
}
if (storageVolDeleteInternal(backend, obj, voldef, flags, true) < 0)
goto cleanup;
ret = 0;
cleanup:
virStoragePoolObjEndAPI(&obj);
return ret;
}
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.