idx
int64 | func
string | target
int64 |
|---|---|---|
521,376
|
uchar *get_plugin_hash_key(const uchar *buff, size_t *length,
my_bool not_used __attribute__((unused)))
{
struct st_plugin_int *plugin= (st_plugin_int *)buff;
*length= (uint)plugin->name.length;
return((uchar *)plugin->name.str);
}
| 0
|
306,206
|
CotpConnection_getLocalRef(CotpConnection* self)
{
return self->localRef;
}
| 0
|
271,582
|
TIFFLastDirectory(TIFF* tif)
{
return (tif->tif_nextdiroff == 0);
}
| 0
|
505,763
|
void ssl3_free(SSL *s)
{
if(s == NULL)
return;
#ifdef TLSEXT_TYPE_opaque_prf_input
if (s->s3->client_opaque_prf_input != NULL)
OPENSSL_free(s->s3->client_opaque_prf_input);
if (s->s3->server_opaque_prf_input != NULL)
OPENSSL_free(s->s3->server_opaque_prf_input);
#endif
ssl3_cleanup_key_block(s);
if (s->s3->rbuf.buf != NULL)
ssl3_release_read_buffer(s);
if (s->s3->wbuf.buf != NULL)
ssl3_release_write_buffer(s);
if (s->s3->rrec.comp != NULL)
OPENSSL_free(s->s3->rrec.comp);
#ifndef OPENSSL_NO_DH
if (s->s3->tmp.dh != NULL)
DH_free(s->s3->tmp.dh);
#endif
#ifndef OPENSSL_NO_ECDH
if (s->s3->tmp.ecdh != NULL)
EC_KEY_free(s->s3->tmp.ecdh);
#endif
if (s->s3->tmp.ca_names != NULL)
sk_X509_NAME_pop_free(s->s3->tmp.ca_names,X509_NAME_free);
if (s->s3->handshake_buffer) {
BIO_free(s->s3->handshake_buffer);
}
if (s->s3->handshake_dgst) ssl3_free_digest_list(s);
#ifndef OPENSSL_NO_SRP
SSL_SRP_CTX_free(s);
#endif
OPENSSL_cleanse(s->s3,sizeof *s->s3);
OPENSSL_free(s->s3);
s->s3=NULL;
}
| 0
|
410,901
|
bool WriteUint8(std::uint8_t val, std::FILE* fileptr) {
if (fileptr == nullptr)
return false;
return (std::fputc(val, fileptr) == val);
}
| 0
|
98,778
|
_zip_cdir_free(zip_cdir_t *cd)
{
zip_uint64_t i;
if (!cd)
return;
for (i=0; i<cd->nentry; i++)
_zip_entry_finalize(cd->entry+i);
free(cd->entry);
_zip_string_free(cd->comment);
free(cd);
}
| 0
|
339,707
|
static av_always_inline void idct_internal(uint8_t *dst, DCTELEM *block, int stride, int block_stride, int shift, int add){
int i;
uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;
block[0] += 1<<(shift-1);
for(i=0; i<4; i++){
const int z0= block[0 + block_stride*i] + block[2 + block_stride*i];
const int z1= block[0 + block_stride*i] - block[2 + block_stride*i];
const int z2= (block[1 + block_stride*i]>>1) - block[3 + block_stride*i];
const int z3= block[1 + block_stride*i] + (block[3 + block_stride*i]>>1);
block[0 + block_stride*i]= z0 + z3;
block[1 + block_stride*i]= z1 + z2;
block[2 + block_stride*i]= z1 - z2;
block[3 + block_stride*i]= z0 - z3;
}
for(i=0; i<4; i++){
const int z0= block[i + block_stride*0] + block[i + block_stride*2];
const int z1= block[i + block_stride*0] - block[i + block_stride*2];
const int z2= (block[i + block_stride*1]>>1) - block[i + block_stride*3];
const int z3= block[i + block_stride*1] + (block[i + block_stride*3]>>1);
dst[i + 0*stride]= cm[ add*dst[i + 0*stride] + ((z0 + z3) >> shift) ];
dst[i + 1*stride]= cm[ add*dst[i + 1*stride] + ((z1 + z2) >> shift) ];
dst[i + 2*stride]= cm[ add*dst[i + 2*stride] + ((z1 - z2) >> shift) ];
dst[i + 3*stride]= cm[ add*dst[i + 3*stride] + ((z0 - z3) >> shift) ];
}
}
| 0
|
228,063
|
const std::string Extension::VersionString() const {
return version()->GetString();
}
| 0
|
211,432
|
void HttpProxyClientSocket::OnIOComplete(int result) {
DCHECK_NE(STATE_NONE, next_state_);
DCHECK_NE(STATE_DONE, next_state_);
int rv = DoLoop(result);
if (rv != ERR_IO_PENDING)
DoCallback(rv);
}
| 0
|
351,826
|
Reprog *regcompx(void *(*alloc)(void *ctx, void *p, int n), void *ctx,
const char *pattern, int cflags, const char **errorp)
{
struct cstate g;
Renode *node;
Reinst *split, *jump;
int i, n;
g.pstart = NULL;
g.prog = NULL;
if (setjmp(g.kaboom)) {
if (errorp) *errorp = g.error;
alloc(ctx, g.pstart, 0);
alloc(ctx, g.prog, 0);
return NULL;
}
g.prog = alloc(ctx, NULL, sizeof (Reprog));
if (!g.prog)
die(&g, "cannot allocate regular expression");
n = strlen(pattern) * 2;
if (n > REG_MAXPROG)
die(&g, "program too large");
if (n > 0) {
g.pstart = g.pend = alloc(ctx, NULL, sizeof (Renode) * n);
if (!g.pstart)
die(&g, "cannot allocate regular expression parse list");
}
g.source = pattern;
g.ncclass = 0;
g.nsub = 1;
for (i = 0; i < REG_MAXSUB; ++i)
g.sub[i] = 0;
g.prog->flags = cflags;
next(&g);
node = parsealt(&g);
if (g.lookahead == ')')
die(&g, "unmatched ')'");
if (g.lookahead != EOF)
die(&g, "syntax error");
#ifdef TEST
dumpnode(node);
putchar('\n');
#endif
n = 6 + count(&g, node);
if (n < 0 || n > REG_MAXPROG)
die(&g, "program too large");
g.prog->nsub = g.nsub;
g.prog->start = g.prog->end = alloc(ctx, NULL, n * sizeof (Reinst));
if (!g.prog->start)
die(&g, "cannot allocate regular expression instruction list");
split = emit(g.prog, I_SPLIT);
split->x = split + 3;
split->y = split + 1;
emit(g.prog, I_ANYNL);
jump = emit(g.prog, I_JUMP);
jump->x = split;
emit(g.prog, I_LPAR);
compile(g.prog, node);
emit(g.prog, I_RPAR);
emit(g.prog, I_END);
#ifdef TEST
dumpprog(g.prog);
#endif
alloc(ctx, g.pstart, 0);
if (errorp) *errorp = NULL;
return g.prog;
}
| 1
|
362,182
|
event_help( void )
{
grEvent dummy_event;
FTDemo_Display_Clear( display );
grGotoxy( 0, 0 );
grSetMargin( 2, 1 );
grGotobitmap( display->bitmap );
grWriteln( "FreeType Glyph Viewer - part of the FreeType test suite" );
grLn();
grWriteln( "This program is used to display all glyphs from one or" );
grWriteln( "several font files, with the FreeType library." );
grLn();
grWriteln( "Use the following keys:" );
grLn();
grWriteln( " F1, ? display this help screen" );
grLn();
grWriteln( " a toggle anti-aliasing" );
grWriteln( " b toggle embedded bitmaps" );
grWriteln( " c toggle between cache modes" );
grWriteln( " f toggle forced auto-hinting" );
grWriteln( " h toggle outline hinting" );
grWriteln( " l toggle low precision rendering" );
grLn();
grWriteln( " L cycle through LCD modes" );
grWriteln( " space cycle through rendering modes" );
grWriteln( " 1-6 select rendering mode" );
grLn();
grWriteln( " e, E adjust emboldening" );
grWriteln( " s, S adjust slanting" );
grLn();
grWriteln( " F toggle custom LCD filter mode" );
grWriteln( " [, ] select custom LCD filter weight" );
grWriteln( " -, +(=) adjust selected custom LCD filter weight" );
grLn();
grWriteln( " G show gamma ramp" );
grWriteln( " g, v adjust gamma value" );
grLn();
grWriteln( " p, n select previous/next font" );
grLn();
grWriteln( " Up, Down adjust pointsize by 1 unit" );
grWriteln( " PgUp, PgDn adjust pointsize by 10 units" );
grLn();
grWriteln( " Left, Right adjust index by 1" );
grWriteln( " F7, F8 adjust index by 10" );
grWriteln( " F9, F10 adjust index by 100" );
grWriteln( " F11, F12 adjust index by 1000" );
grLn();
grWriteln( "press any key to exit this help screen" );
grRefreshSurface( display->surface );
grListenSurface( display->surface, gr_event_key, &dummy_event );
}
| 0
|
107,429
|
static void bond_setup_by_slave(struct net_device *bond_dev,
struct net_device *slave_dev)
{
struct bonding *bond = netdev_priv(bond_dev);
bond_dev->header_ops = slave_dev->header_ops;
bond_dev->type = slave_dev->type;
bond_dev->hard_header_len = slave_dev->hard_header_len;
bond_dev->addr_len = slave_dev->addr_len;
memcpy(bond_dev->broadcast, slave_dev->broadcast,
slave_dev->addr_len);
bond->setup_by_slave = 1;
}
| 0
|
378,912
|
int usage_advanced()
{
DISPLAY( "\nAdvanced options :\n");
DISPLAY( " -c# : test only compression function # [1-%i]\n", NB_COMPRESSION_ALGORITHMS);
DISPLAY( " -d# : test only decompression function # [1-%i]\n", NB_DECOMPRESSION_ALGORITHMS);
DISPLAY( " -i# : iteration loops [1-9](default : %i)\n", NBLOOPS);
DISPLAY( " -B# : Block size [4-7](default : 7)\n");
//DISPLAY( " -BD : Block dependency (improve compression ratio)\n");
return 0;
}
| 0
|
162,528
|
ContainerChunk::ContainerChunk( ContainerChunk* parent, XMP_Uns32 id, XMP_Uns32 containerType ) : Chunk( NULL /* !! */, chunk_CONTAINER, id )
{
XMP_Enforce( parent != NULL );
this->containerType = containerType;
this->newSize = 12;
this->parent = parent;
chunkVect* siblings = &parent->children;
siblings->push_back( this );
}
| 0
|
279,007
|
int num_decoded_frames() { return num_decoded_frames_; }
| 0
|
415,928
|
gx_device_set_media_from_hwsize(gx_device *dev)
{
int rot = (dev->LeadingEdge & 1);
double x = dev->width * 72.0 / dev->HWResolution[0];
double y = dev->height * 72.0 / dev->HWResolution[1];
if (rot) {
dev->MediaSize[1] = x;
dev->MediaSize[0] = y;
} else {
dev->MediaSize[0] = x;
dev->MediaSize[1] = y;
}
}
| 0
|
277,911
|
void RenderWidgetHostViewGtk::AccessibilityDoDefaultAction(int acc_obj_id) {
if (!host_)
return;
host_->AccessibilityDoDefaultAction(acc_obj_id);
}
| 0
|
153,429
|
void sqlite3AlterBeginAddColumn(Parse *pParse, SrcList *pSrc){
Table *pNew;
Table *pTab;
int iDb;
int i;
int nAlloc;
sqlite3 *db = pParse->db;
/* Look up the table being altered. */
assert( pParse->pNewTable==0 );
assert( sqlite3BtreeHoldsAllMutexes(db) );
if( db->mallocFailed ) goto exit_begin_add_column;
pTab = sqlite3LocateTableItem(pParse, 0, &pSrc->a[0]);
if( !pTab ) goto exit_begin_add_column;
#ifndef SQLITE_OMIT_VIRTUALTABLE
if( IsVirtual(pTab) ){
sqlite3ErrorMsg(pParse, "virtual tables may not be altered");
goto exit_begin_add_column;
}
#endif
/* Make sure this is not an attempt to ALTER a view. */
if( pTab->pSelect ){
sqlite3ErrorMsg(pParse, "Cannot add a column to a view");
goto exit_begin_add_column;
}
if( SQLITE_OK!=isAlterableTable(pParse, pTab) ){
goto exit_begin_add_column;
}
sqlite3MayAbort(pParse);
assert( pTab->addColOffset>0 );
iDb = sqlite3SchemaToIndex(db, pTab->pSchema);
/* Put a copy of the Table struct in Parse.pNewTable for the
** sqlite3AddColumn() function and friends to modify. But modify
** the name by adding an "sqlite_altertab_" prefix. By adding this
** prefix, we insure that the name will not collide with an existing
** table because user table are not allowed to have the "sqlite_"
** prefix on their name.
*/
pNew = (Table*)sqlite3DbMallocZero(db, sizeof(Table));
if( !pNew ) goto exit_begin_add_column;
pParse->pNewTable = pNew;
pNew->nTabRef = 1;
pNew->nCol = pTab->nCol;
assert( pNew->nCol>0 );
nAlloc = (((pNew->nCol-1)/8)*8)+8;
assert( nAlloc>=pNew->nCol && nAlloc%8==0 && nAlloc-pNew->nCol<8 );
pNew->aCol = (Column*)sqlite3DbMallocZero(db, sizeof(Column)*nAlloc);
pNew->zName = sqlite3MPrintf(db, "sqlite_altertab_%s", pTab->zName);
if( !pNew->aCol || !pNew->zName ){
assert( db->mallocFailed );
goto exit_begin_add_column;
}
memcpy(pNew->aCol, pTab->aCol, sizeof(Column)*pNew->nCol);
for(i=0; i<pNew->nCol; i++){
Column *pCol = &pNew->aCol[i];
pCol->zName = sqlite3DbStrDup(db, pCol->zName);
pCol->zColl = 0;
pCol->pDflt = 0;
}
pNew->pSchema = db->aDb[iDb].pSchema;
pNew->addColOffset = pTab->addColOffset;
pNew->nTabRef = 1;
exit_begin_add_column:
sqlite3SrcListDelete(db, pSrc);
return;
}
| 0
|
416,340
|
int hidp_connection_del(struct hidp_conndel_req *req)
{
u32 valid_flags = BIT(HIDP_VIRTUAL_CABLE_UNPLUG);
struct hidp_session *session;
if (req->flags & ~valid_flags)
return -EINVAL;
session = hidp_session_find(&req->bdaddr);
if (!session)
return -ENOENT;
if (req->flags & BIT(HIDP_VIRTUAL_CABLE_UNPLUG))
hidp_send_ctrl_message(session,
HIDP_TRANS_HID_CONTROL |
HIDP_CTRL_VIRTUAL_CABLE_UNPLUG,
NULL, 0);
else
l2cap_unregister_user(session->conn, &session->user);
hidp_session_put(session);
return 0;
}
| 0
|
170,205
|
static void VoidMethodArrayBufferViewArgMethod(const v8::FunctionCallbackInfo<v8::Value>& info) {
ExceptionState exception_state(info.GetIsolate(), ExceptionState::kExecutionContext, "TestObject", "voidMethodArrayBufferViewArg");
TestObject* impl = V8TestObject::ToImpl(info.Holder());
if (UNLIKELY(info.Length() < 1)) {
exception_state.ThrowTypeError(ExceptionMessages::NotEnoughArguments(1, info.Length()));
return;
}
NotShared<TestArrayBufferView> array_buffer_view_arg;
array_buffer_view_arg = ToNotShared<NotShared<TestArrayBufferView>>(info.GetIsolate(), info[0], exception_state);
if (exception_state.HadException())
return;
if (!array_buffer_view_arg) {
exception_state.ThrowTypeError(ExceptionMessages::ArgumentNotOfType(0, "ArrayBufferView"));
return;
}
impl->voidMethodArrayBufferViewArg(array_buffer_view_arg);
}
| 0
|
352,887
|
__libc_res_nquery(res_state statp,
const char *name, /* domain name */
int class, int type, /* class and type of query */
u_char *answer, /* buffer to put answer */
int anslen, /* size of answer buffer */
u_char **answerp, /* if buffer needs to be enlarged */
u_char **answerp2,
int *nanswerp2,
int *resplen2,
int *answerp2_malloced)
{
HEADER *hp = (HEADER *) answer;
HEADER *hp2;
int n, use_malloc = 0;
u_int oflags = statp->_flags;
size_t bufsize = (type == T_UNSPEC ? 2 : 1) * QUERYSIZE;
u_char *buf = alloca (bufsize);
u_char *query1 = buf;
int nquery1 = -1;
u_char *query2 = NULL;
int nquery2 = 0;
again:
hp->rcode = NOERROR; /* default */
#ifdef DEBUG
if (statp->options & RES_DEBUG)
printf(";; res_query(%s, %d, %d)\n", name, class, type);
#endif
if (type == T_UNSPEC)
{
n = res_nmkquery(statp, QUERY, name, class, T_A, NULL, 0, NULL,
query1, bufsize);
if (n > 0)
{
if ((oflags & RES_F_EDNS0ERR) == 0
&& (statp->options & (RES_USE_EDNS0|RES_USE_DNSSEC)) != 0)
{
n = __res_nopt(statp, n, query1, bufsize, anslen / 2);
if (n < 0)
goto unspec_nomem;
}
nquery1 = n;
/* Align the buffer. */
int npad = ((nquery1 + __alignof__ (HEADER) - 1)
& ~(__alignof__ (HEADER) - 1)) - nquery1;
if (n > bufsize - npad)
{
n = -1;
goto unspec_nomem;
}
int nused = n + npad;
query2 = buf + nused;
n = res_nmkquery(statp, QUERY, name, class, T_AAAA, NULL, 0,
NULL, query2, bufsize - nused);
if (n > 0
&& (oflags & RES_F_EDNS0ERR) == 0
&& (statp->options & (RES_USE_EDNS0|RES_USE_DNSSEC)) != 0)
n = __res_nopt(statp, n, query2, bufsize - nused - n,
anslen / 2);
nquery2 = n;
}
unspec_nomem:;
}
else
{
n = res_nmkquery(statp, QUERY, name, class, type, NULL, 0, NULL,
query1, bufsize);
if (n > 0
&& (oflags & RES_F_EDNS0ERR) == 0
&& (statp->options & (RES_USE_EDNS0|RES_USE_DNSSEC)) != 0)
n = __res_nopt(statp, n, query1, bufsize, anslen);
nquery1 = n;
}
if (__builtin_expect (n <= 0, 0) && !use_malloc) {
/* Retry just in case res_nmkquery failed because of too
short buffer. Shouldn't happen. */
bufsize = (type == T_UNSPEC ? 2 : 1) * MAXPACKET;
buf = malloc (bufsize);
if (buf != NULL) {
query1 = buf;
use_malloc = 1;
goto again;
}
}
if (__glibc_unlikely (n <= 0)) {
/* If the query choked with EDNS0, retry without EDNS0. */
if ((statp->options & (RES_USE_EDNS0|RES_USE_DNSSEC)) != 0
&& ((oflags ^ statp->_flags) & RES_F_EDNS0ERR) != 0) {
statp->_flags |= RES_F_EDNS0ERR;
#ifdef DEBUG
if (statp->options & RES_DEBUG)
printf(";; res_nquery: retry without EDNS0\n");
#endif
goto again;
}
#ifdef DEBUG
if (statp->options & RES_DEBUG)
printf(";; res_query: mkquery failed\n");
#endif
RES_SET_H_ERRNO(statp, NO_RECOVERY);
if (use_malloc)
free (buf);
return (n);
}
assert (answerp == NULL || (void *) *answerp == (void *) answer);
n = __libc_res_nsend(statp, query1, nquery1, query2, nquery2, answer,
anslen, answerp, answerp2, nanswerp2, resplen2,
answerp2_malloced);
if (use_malloc)
free (buf);
if (n < 0) {
#ifdef DEBUG
if (statp->options & RES_DEBUG)
printf(";; res_query: send error\n");
#endif
RES_SET_H_ERRNO(statp, TRY_AGAIN);
return (n);
}
if (answerp != NULL)
/* __libc_res_nsend might have reallocated the buffer. */
hp = (HEADER *) *answerp;
/* We simplify the following tests by assigning HP to HP2 or
vice versa. It is easy to verify that this is the same as
ignoring all tests of HP or HP2. */
if (answerp2 == NULL || *resplen2 < (int) sizeof (HEADER))
{
hp2 = hp;
}
else
{
hp2 = (HEADER *) *answerp2;
if (n < (int) sizeof (HEADER))
{
hp = hp2;
}
}
/* Make sure both hp and hp2 are defined */
assert((hp != NULL) && (hp2 != NULL));
if ((hp->rcode != NOERROR || ntohs(hp->ancount) == 0)
&& (hp2->rcode != NOERROR || ntohs(hp2->ancount) == 0)) {
#ifdef DEBUG
if (statp->options & RES_DEBUG) {
printf(";; rcode = %d, ancount=%d\n", hp->rcode,
ntohs(hp->ancount));
if (hp != hp2)
printf(";; rcode2 = %d, ancount2=%d\n", hp2->rcode,
ntohs(hp2->ancount));
}
#endif
switch (hp->rcode == NOERROR ? hp2->rcode : hp->rcode) {
case NXDOMAIN:
if ((hp->rcode == NOERROR && ntohs (hp->ancount) != 0)
|| (hp2->rcode == NOERROR
&& ntohs (hp2->ancount) != 0))
goto success;
RES_SET_H_ERRNO(statp, HOST_NOT_FOUND);
break;
case SERVFAIL:
RES_SET_H_ERRNO(statp, TRY_AGAIN);
break;
case NOERROR:
if (ntohs (hp->ancount) != 0
|| ntohs (hp2->ancount) != 0)
goto success;
RES_SET_H_ERRNO(statp, NO_DATA);
break;
case FORMERR:
case NOTIMP:
/* Servers must not reply to AAAA queries with
NOTIMP etc but some of them do. */
if ((hp->rcode == NOERROR && ntohs (hp->ancount) != 0)
|| (hp2->rcode == NOERROR
&& ntohs (hp2->ancount) != 0))
goto success;
/* FALLTHROUGH */
case REFUSED:
default:
RES_SET_H_ERRNO(statp, NO_RECOVERY);
break;
}
return (-1);
}
success:
return (n);
}
| 1
|
73,334
|
static bool hwsim_chans_compat(struct ieee80211_channel *c1,
struct ieee80211_channel *c2)
{
if (!c1 || !c2)
return false;
return c1->center_freq == c2->center_freq;
}
| 0
|
100,822
|
std::shared_ptr<Wasm> createThreadLocalWasm(Wasm& base_wasm, absl::string_view configuration,
Event::Dispatcher& dispatcher) {
auto wasm = std::make_shared<Wasm>(base_wasm, dispatcher);
Context* root_context = wasm->start();
if (!wasm->configure(root_context, configuration)) {
throw WasmException("Failed to configure WASM code");
}
if (!wasm->vm_id().empty()) {
local_wasms[wasm->vm_id()] = wasm;
}
return wasm;
}
| 0
|
134,143
|
void test_nghttp2_session_set_option(void) {
nghttp2_session *session;
nghttp2_session_callbacks callbacks;
nghttp2_option *option;
nghttp2_hd_deflater *deflater;
int rv;
memset(&callbacks, 0, sizeof(nghttp2_session_callbacks));
callbacks.send_callback = null_send_callback;
/* Test for nghttp2_option_set_no_auto_window_update */
nghttp2_option_new(&option);
nghttp2_option_set_no_auto_window_update(option, 1);
nghttp2_session_client_new2(&session, &callbacks, NULL, option);
CU_ASSERT(session->opt_flags & NGHTTP2_OPTMASK_NO_AUTO_WINDOW_UPDATE);
nghttp2_session_del(session);
nghttp2_option_del(option);
/* Test for nghttp2_option_set_peer_max_concurrent_streams */
nghttp2_option_new(&option);
nghttp2_option_set_peer_max_concurrent_streams(option, 100);
nghttp2_session_client_new2(&session, &callbacks, NULL, option);
CU_ASSERT(100 == session->remote_settings.max_concurrent_streams);
nghttp2_session_del(session);
nghttp2_option_del(option);
/* Test for nghttp2_option_set_max_reserved_remote_streams */
nghttp2_option_new(&option);
nghttp2_option_set_max_reserved_remote_streams(option, 99);
nghttp2_session_client_new2(&session, &callbacks, NULL, option);
CU_ASSERT(99 == session->max_incoming_reserved_streams);
nghttp2_session_del(session);
nghttp2_option_del(option);
/* Test for nghttp2_option_set_no_auto_ping_ack */
nghttp2_option_new(&option);
nghttp2_option_set_no_auto_ping_ack(option, 1);
nghttp2_session_client_new2(&session, &callbacks, NULL, option);
CU_ASSERT(session->opt_flags & NGHTTP2_OPTMASK_NO_AUTO_PING_ACK);
nghttp2_session_del(session);
nghttp2_option_del(option);
/* Test for nghttp2_option_set_max_deflate_dynamic_table_size */
nghttp2_option_new(&option);
nghttp2_option_set_max_deflate_dynamic_table_size(option, 0);
nghttp2_session_client_new2(&session, &callbacks, NULL, option);
deflater = &session->hd_deflater;
rv = nghttp2_submit_request(session, NULL, reqnv, ARRLEN(reqnv), NULL, NULL);
CU_ASSERT(1 == rv);
rv = nghttp2_session_send(session);
CU_ASSERT(0 == rv);
CU_ASSERT(0 == deflater->deflate_hd_table_bufsize_max);
CU_ASSERT(0 == deflater->ctx.hd_table_bufsize);
nghttp2_session_del(session);
nghttp2_option_del(option);
}
| 0
|
42,543
|
add_bwrap_wrapper (FlatpakBwrap *bwrap,
const char *app_info_path,
GError **error)
{
glnx_autofd int app_info_fd = -1;
g_auto(GLnxDirFdIterator) dir_iter = { 0 };
struct dirent *dent;
g_autofree char *user_runtime_dir = flatpak_get_real_xdg_runtime_dir ();
g_autofree char *proxy_socket_dir = g_build_filename (user_runtime_dir, ".dbus-proxy/", NULL);
app_info_fd = open (app_info_path, O_RDONLY | O_CLOEXEC);
if (app_info_fd == -1)
return glnx_throw_errno_prefix (error, _("Failed to open app info file"));
if (!glnx_dirfd_iterator_init_at (AT_FDCWD, "/", FALSE, &dir_iter, error))
return FALSE;
flatpak_bwrap_add_arg (bwrap, flatpak_get_bwrap ());
while (TRUE)
{
glnx_autofd int o_path_fd = -1;
struct statfs stfs;
if (!glnx_dirfd_iterator_next_dent_ensure_dtype (&dir_iter, &dent, NULL, error))
return FALSE;
if (dent == NULL)
break;
if (strcmp (dent->d_name, ".flatpak-info") == 0)
continue;
/* O_PATH + fstatfs is the magic that we need to statfs without automounting the target */
o_path_fd = openat (dir_iter.fd, dent->d_name, O_PATH | O_NOFOLLOW | O_CLOEXEC);
if (o_path_fd == -1 || fstatfs (o_path_fd, &stfs) != 0 || stfs.f_type == AUTOFS_SUPER_MAGIC)
continue; /* AUTOFS mounts are risky and can cause us to block (see issue #1633), so ignore it. Its unlikely the proxy needs such a directory. */
if (dent->d_type == DT_DIR)
{
if (strcmp (dent->d_name, "tmp") == 0 ||
strcmp (dent->d_name, "var") == 0 ||
strcmp (dent->d_name, "run") == 0)
flatpak_bwrap_add_arg (bwrap, "--bind");
else
flatpak_bwrap_add_arg (bwrap, "--ro-bind");
flatpak_bwrap_add_arg_printf (bwrap, "/%s", dent->d_name);
flatpak_bwrap_add_arg_printf (bwrap, "/%s", dent->d_name);
}
else if (dent->d_type == DT_LNK)
{
g_autofree gchar *target = NULL;
target = glnx_readlinkat_malloc (dir_iter.fd, dent->d_name,
NULL, error);
if (target == NULL)
return FALSE;
flatpak_bwrap_add_args (bwrap, "--symlink", target, NULL);
flatpak_bwrap_add_arg_printf (bwrap, "/%s", dent->d_name);
}
}
flatpak_bwrap_add_args (bwrap, "--bind", proxy_socket_dir, proxy_socket_dir, NULL);
/* This is a file rather than a bind mount, because it will then
not be unmounted from the namespace when the namespace dies. */
flatpak_bwrap_add_args_data_fd (bwrap, "--file", glnx_steal_fd (&app_info_fd), "/.flatpak-info");
if (!flatpak_bwrap_bundle_args (bwrap, 1, -1, FALSE, error))
return FALSE;
return TRUE;
}
| 0
|
37,492
|
TEST_F(RouterTest, RetryUpstream5xx) {
NiceMock<Http::MockRequestEncoder> encoder1;
Http::ResponseDecoder* response_decoder = nullptr;
expectNewStreamWithImmediateEncoder(encoder1, &response_decoder, Http::Protocol::Http10);
expectResponseTimerCreate();
Http::TestRequestHeaderMapImpl headers{{"x-envoy-retry-on", "5xx"}, {"x-envoy-internal", "true"}};
HttpTestUtility::addDefaultHeaders(headers);
router_.decodeHeaders(headers, true);
EXPECT_EQ(1U,
callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value());
// 5xx response.
router_.retry_state_->expectHeadersRetry();
Http::ResponseHeaderMapPtr response_headers1(
new Http::TestResponseHeaderMapImpl{{":status", "503"}});
EXPECT_CALL(cm_.thread_local_cluster_.conn_pool_.host_->outlier_detector_,
putHttpResponseCode(503));
response_decoder->decodeHeaders(std::move(response_headers1), true);
EXPECT_TRUE(verifyHostUpstreamStats(0, 1));
// We expect the 5xx response to kick off a new request.
EXPECT_CALL(encoder1.stream_, resetStream(_)).Times(0);
NiceMock<Http::MockRequestEncoder> encoder2;
expectNewStreamWithImmediateEncoder(encoder2, &response_decoder, Http::Protocol::Http10);
router_.retry_state_->callback_();
EXPECT_EQ(2U,
callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value());
// Normal response.
EXPECT_CALL(*router_.retry_state_, shouldRetryHeaders(_, _, _)).WillOnce(Return(RetryStatus::No));
EXPECT_CALL(cm_.thread_local_cluster_.conn_pool_.host_->health_checker_, setUnhealthy(_))
.Times(0);
Http::ResponseHeaderMapPtr response_headers2(
new Http::TestResponseHeaderMapImpl{{":status", "200"}});
EXPECT_CALL(cm_.thread_local_cluster_.conn_pool_.host_->outlier_detector_,
putHttpResponseCode(200));
response_decoder->decodeHeaders(std::move(response_headers2), true);
EXPECT_TRUE(verifyHostUpstreamStats(1, 1));
}
| 0
|
186,152
|
static inline void unmap_mapping_range_tree(struct prio_tree_root *root,
struct zap_details *details)
{
struct vm_area_struct *vma;
struct prio_tree_iter iter;
pgoff_t vba, vea, zba, zea;
vma_prio_tree_foreach(vma, &iter, root,
details->first_index, details->last_index) {
vba = vma->vm_pgoff;
vea = vba + ((vma->vm_end - vma->vm_start) >> PAGE_SHIFT) - 1;
/* Assume for now that PAGE_CACHE_SHIFT == PAGE_SHIFT */
zba = details->first_index;
if (zba < vba)
zba = vba;
zea = details->last_index;
if (zea > vea)
zea = vea;
unmap_mapping_range_vma(vma,
((zba - vba) << PAGE_SHIFT) + vma->vm_start,
((zea - vba + 1) << PAGE_SHIFT) + vma->vm_start,
details);
}
}
| 0
|
38,847
|
R_API RBinFile *r_bin_file_find_by_id(RBin *bin, ut32 binfile_id) {
RBinFile *binfile = NULL;
RListIter *iter = NULL;
r_list_foreach (bin->binfiles, iter, binfile) {
if (binfile->id == binfile_id) {
break;
}
binfile = NULL;
}
return binfile;
}
| 0
|
508,792
|
discrete_component_transfer_func (gint C, RsvgNodeComponentTransferFunc * user_data)
{
gint k;
if (!user_data->nbTableValues)
return C;
k = (C * user_data->nbTableValues) / 255;
return user_data->tableValues[CLAMP (k, 0, user_data->nbTableValues)];
}
| 0
|
20,474
|
int fill_schema_table_privileges ( THD * thd , TABLE_LIST * tables , COND * cond ) {
# ifndef NO_EMBEDDED_ACCESS_CHECKS int error = 0 ;
uint index ;
char buff [ 100 ] ;
TABLE * table = tables -> table ;
bool no_global_access = check_access ( thd , SELECT_ACL , "mysql" , 0 , 1 , 1 , 0 ) ;
char * curr_host = thd -> security_ctx -> priv_host_name ( ) ;
DBUG_ENTER ( "fill_schema_table_privileges" ) ;
rw_rdlock ( & LOCK_grant ) ;
for ( index = 0 ;
index < column_priv_hash . records ;
index ++ ) {
const char * user , * host , * is_grantable = "YES" ;
GRANT_TABLE * grant_table = ( GRANT_TABLE * ) hash_element ( & column_priv_hash , index ) ;
if ( ! ( user = grant_table -> user ) ) user = "" ;
if ( ! ( host = grant_table -> host . hostname ) ) host = "" ;
if ( no_global_access && ( strcmp ( thd -> security_ctx -> priv_user , user ) || my_strcasecmp ( system_charset_info , curr_host , host ) ) ) continue ;
ulong table_access = grant_table -> privs ;
if ( table_access ) {
ulong test_access = table_access & ~ GRANT_ACL ;
if ( ! test_access && grant_table -> cols ) continue ;
if ( ! ( table_access & GRANT_ACL ) ) is_grantable = "NO" ;
strxmov ( buff , "'" , user , "'@'" , host , "'" , NullS ) ;
if ( ! test_access ) {
if ( update_schema_privilege ( thd , table , buff , grant_table -> db , grant_table -> tname , 0 , 0 , STRING_WITH_LEN ( "USAGE" ) , is_grantable ) ) {
error = 1 ;
goto err ;
}
}
else {
ulong j ;
int cnt ;
for ( cnt = 0 , j = SELECT_ACL ;
j <= TABLE_ACLS ;
cnt ++ , j <<= 1 ) {
if ( test_access & j ) {
if ( update_schema_privilege ( thd , table , buff , grant_table -> db , grant_table -> tname , 0 , 0 , command_array [ cnt ] , command_lengths [ cnt ] , is_grantable ) ) {
error = 1 ;
goto err ;
}
}
}
}
}
}
err : rw_unlock ( & LOCK_grant ) ;
DBUG_RETURN ( error ) ;
# else return ( 0 ) ;
# endif }
| 0
|
192,897
|
GrGLInterface* WebGraphicsContext3DCommandBufferImpl::onCreateGrGLInterface() {
return webkit_glue::CreateCommandBufferSkiaGLBinding();
}
| 0
|
208,276
|
void Element::willModifyAttribute(const QualifiedName& name, const AtomicString& oldValue, const AtomicString& newValue)
{
if (isIdAttributeName(name))
updateId(oldValue, newValue);
else if (name == HTMLNames::nameAttr)
updateName(oldValue, newValue);
else if (name == HTMLNames::forAttr && hasTagName(labelTag)) {
TreeScope* scope = treeScope();
if (scope->shouldCacheLabelsByForAttribute())
updateLabel(scope, oldValue, newValue);
}
if (oldValue != newValue) {
if (attached() && document()->styleResolver() && document()->styleResolver()->hasSelectorForAttribute(name.localName()))
setNeedsStyleRecalc();
}
if (OwnPtr<MutationObserverInterestGroup> recipients = MutationObserverInterestGroup::createForAttributesMutation(this, name))
recipients->enqueueMutationRecord(MutationRecord::createAttributes(this, name, oldValue));
InspectorInstrumentation::willModifyDOMAttr(document(), this, oldValue, newValue);
}
| 0
|
167,556
|
nm_ip4_config_get_ifindex (const NMIP4Config *config)
{
return NM_IP4_CONFIG_GET_PRIVATE (config)->ifindex;
}
| 0
|
347,385
|
static int automount_dispatch_io(sd_event_source *s, int fd, uint32_t events, void *userdata) {
_cleanup_(sd_bus_error_free) sd_bus_error error = SD_BUS_ERROR_NULL;
union autofs_v5_packet_union packet;
Automount *a = AUTOMOUNT(userdata);
struct stat st;
Unit *trigger;
int r;
assert(a);
assert(fd == a->pipe_fd);
if (events != EPOLLIN) {
log_unit_error(UNIT(a), "Got invalid poll event %"PRIu32" on pipe (fd=%d)", events, fd);
goto fail;
}
r = loop_read_exact(a->pipe_fd, &packet, sizeof(packet), true);
if (r < 0) {
log_unit_error_errno(UNIT(a), r, "Invalid read from pipe: %m");
goto fail;
}
switch (packet.hdr.type) {
case autofs_ptype_missing_direct:
if (packet.v5_packet.pid > 0) {
_cleanup_free_ char *p = NULL;
get_process_comm(packet.v5_packet.pid, &p);
log_unit_info(UNIT(a), "Got automount request for %s, triggered by %"PRIu32" (%s)", a->where, packet.v5_packet.pid, strna(p));
} else
log_unit_debug(UNIT(a), "Got direct mount request on %s", a->where);
r = set_ensure_allocated(&a->tokens, NULL);
if (r < 0) {
log_unit_error(UNIT(a), "Failed to allocate token set.");
goto fail;
}
r = set_put(a->tokens, UINT_TO_PTR(packet.v5_packet.wait_queue_token));
if (r < 0) {
log_unit_error_errno(UNIT(a), r, "Failed to remember token: %m");
goto fail;
}
automount_enter_runnning(a);
break;
case autofs_ptype_expire_direct:
log_unit_debug(UNIT(a), "Got direct umount request on %s", a->where);
automount_stop_expire(a);
r = set_ensure_allocated(&a->expire_tokens, NULL);
if (r < 0) {
log_unit_error(UNIT(a), "Failed to allocate token set.");
goto fail;
}
r = set_put(a->expire_tokens, UINT_TO_PTR(packet.v5_packet.wait_queue_token));
if (r < 0) {
log_unit_error_errno(UNIT(a), r, "Failed to remember token: %m");
goto fail;
}
/* Before we do anything, let's see if somebody is playing games with us? */
if (lstat(a->where, &st) < 0) {
log_unit_warning_errno(UNIT(a), errno, "Failed to stat automount point: %m");
goto fail;
}
if (!S_ISDIR(st.st_mode) || st.st_dev == a->dev_id) {
log_unit_info(UNIT(a), "Automount point already unmounted?");
automount_send_ready(a, a->expire_tokens, 0);
break;
}
trigger = UNIT_TRIGGER(UNIT(a));
if (!trigger) {
log_unit_error(UNIT(a), "Unit to trigger vanished.");
goto fail;
}
r = manager_add_job(UNIT(a)->manager, JOB_STOP, trigger, JOB_REPLACE, &error, NULL);
if (r < 0) {
log_unit_warning(UNIT(a), "Failed to queue umount startup job: %s", bus_error_message(&error, r));
goto fail;
}
break;
default:
log_unit_error(UNIT(a), "Received unknown automount request %i", packet.hdr.type);
break;
}
return 0;
fail:
automount_enter_dead(a, AUTOMOUNT_FAILURE_RESOURCES);
return 0;
}
| 1
|
347,050
|
secret (gcry_mpi_t output, gcry_mpi_t input, RSA_secret_key *skey )
{
/* Remove superfluous leading zeroes from INPUT. */
mpi_normalize (input);
if (!skey->p || !skey->q || !skey->u)
{
mpi_powm (output, input, skey->d, skey->n);
}
else
{
gcry_mpi_t m1 = mpi_alloc_secure( mpi_get_nlimbs(skey->n)+1 );
gcry_mpi_t m2 = mpi_alloc_secure( mpi_get_nlimbs(skey->n)+1 );
gcry_mpi_t h = mpi_alloc_secure( mpi_get_nlimbs(skey->n)+1 );
/* m1 = c ^ (d mod (p-1)) mod p */
mpi_sub_ui( h, skey->p, 1 );
mpi_fdiv_r( h, skey->d, h );
mpi_powm( m1, input, h, skey->p );
/* m2 = c ^ (d mod (q-1)) mod q */
mpi_sub_ui( h, skey->q, 1 );
mpi_fdiv_r( h, skey->d, h );
mpi_powm( m2, input, h, skey->q );
/* h = u * ( m2 - m1 ) mod q */
mpi_sub( h, m2, m1 );
if ( mpi_has_sign ( h ) )
mpi_add ( h, h, skey->q );
mpi_mulm( h, skey->u, h, skey->q );
/* m = m1 + h * p */
mpi_mul ( h, h, skey->p );
mpi_add ( output, m1, h );
mpi_free ( h );
mpi_free ( m1 );
mpi_free ( m2 );
}
}
| 1
|
454,876
|
TEST_F(QueryPlannerTest, SortSkipLimit) {
runQuerySortProjSkipNToReturn(BSONObj(), fromjson("{a: 1}"), BSONObj(), 2, -3);
assertNumSolutions(1U);
// Limit in sort node should be adjusted by skip count
assertSolutionExists(
"{skip: {n: 2, node: "
"{sort: {pattern: {a: 1}, limit: 5, node: {sortKeyGen: "
"{node: {cscan: {dir: 1}}}}}}}}");
}
| 0
|
84,207
|
static struct fib6_node * fib6_lookup_1(struct fib6_node *root,
struct lookup_args *args)
{
struct fib6_node *fn;
__be32 dir;
if (unlikely(args->offset == 0))
return NULL;
/*
* Descend on a tree
*/
fn = root;
for (;;) {
struct fib6_node *next;
dir = addr_bit_set(args->addr, fn->fn_bit);
next = dir ? fn->right : fn->left;
if (next) {
fn = next;
continue;
}
break;
}
while (fn) {
if (FIB6_SUBTREE(fn) || fn->fn_flags & RTN_RTINFO) {
struct rt6key *key;
key = (struct rt6key *) ((u8 *) fn->leaf +
args->offset);
if (ipv6_prefix_equal(&key->addr, args->addr, key->plen)) {
#ifdef CONFIG_IPV6_SUBTREES
if (fn->subtree) {
struct fib6_node *sfn;
sfn = fib6_lookup_1(fn->subtree,
args + 1);
if (!sfn)
goto backtrack;
fn = sfn;
}
#endif
if (fn->fn_flags & RTN_RTINFO)
return fn;
}
}
#ifdef CONFIG_IPV6_SUBTREES
backtrack:
#endif
if (fn->fn_flags & RTN_ROOT)
break;
fn = fn->parent;
}
return NULL;
}
| 0
|
91,108
|
static void tlb_flush_mmu_free(struct mmu_gather *tlb)
{
struct mmu_gather_batch *batch;
for (batch = &tlb->local; batch && batch->nr; batch = batch->next) {
free_pages_and_swap_cache(batch->pages, batch->nr);
batch->nr = 0;
}
tlb->active = &tlb->local;
}
| 0
|
310,658
|
static void voidMethodArrayTestInterfaceEmptyArgMethod(const v8::FunctionCallbackInfo<v8::Value>& info)
{
if (UNLIKELY(info.Length() < 1)) {
throwTypeError(ExceptionMessages::failedToExecute("voidMethodArrayTestInterfaceEmptyArg", "TestObjectPython", ExceptionMessages::notEnoughArguments(1, info.Length())), info.GetIsolate());
return;
}
TestObjectPython* imp = V8TestObjectPython::toNative(info.Holder());
V8TRYCATCH_VOID(Vector<RefPtr<TestInterfaceEmpty> >, arrayTestInterfaceEmptyArg, (toRefPtrNativeArray<TestInterfaceEmpty, V8TestInterfaceEmpty>(info[0], 1, info.GetIsolate())));
imp->voidMethodArrayTestInterfaceEmptyArg(arrayTestInterfaceEmptyArg);
}
| 0
|
59,792
|
hts_idx_t *bcf_index_load3(const char *fn, const char *fnidx, int flags)
{
return hts_idx_load3(fn, fnidx, HTS_FMT_CSI, flags);
}
| 0
|
311,747
|
void BrowserViewRenderer::CalculateTileMemoryPolicy() {
base::CommandLine* cl = base::CommandLine::ForCurrentProcess();
bool client_hard_limit_bytes_overridden =
cl->HasSwitch(switches::kForceGpuMemAvailableMb);
if (client_hard_limit_bytes_overridden) {
base::StringToUint64(
base::CommandLine::ForCurrentProcess()->GetSwitchValueASCII(
switches::kForceGpuMemAvailableMb),
&g_memory_override_in_bytes);
g_memory_override_in_bytes *= 1024 * 1024;
}
}
| 0
|
427,441
|
deliver_local(address_item *addr, BOOL shadowing)
{
BOOL use_initgroups;
uid_t uid;
gid_t gid;
int status, len, rc;
int pfd[2];
pid_t pid;
uschar *working_directory;
address_item *addr2;
transport_instance *tp = addr->transport;
/* Set up the return path from the errors or sender address. If the transport
has its own return path setting, expand it and replace the existing value. */
if(addr->prop.errors_address)
return_path = addr->prop.errors_address;
#ifdef EXPERIMENTAL_SRS
else if (addr->prop.srs_sender)
return_path = addr->prop.srs_sender;
#endif
else
return_path = sender_address;
if (tp->return_path)
{
uschar *new_return_path = expand_string(tp->return_path);
if (!new_return_path)
{
if (!expand_string_forcedfail)
{
common_error(TRUE, addr, ERRNO_EXPANDFAIL,
US"Failed to expand return path \"%s\" in %s transport: %s",
tp->return_path, tp->name, expand_string_message);
return;
}
}
else return_path = new_return_path;
}
/* For local deliveries, one at a time, the value used for logging can just be
set directly, once and for all. */
used_return_path = return_path;
/* Sort out the uid, gid, and initgroups flag. If an error occurs, the message
gets put into the address(es), and the expansions are unset, so we can just
return. */
if (!findugid(addr, tp, &uid, &gid, &use_initgroups)) return;
/* See if either the transport or the address specifies a home directory. A
home directory set in the address may already be expanded; a flag is set to
indicate that. In other cases we must expand it. */
if ( (deliver_home = tp->home_dir) /* Set in transport, or */
|| ( (deliver_home = addr->home_dir) /* Set in address and */
&& !testflag(addr, af_home_expanded) /* not expanded */
) )
{
uschar *rawhome = deliver_home;
deliver_home = NULL; /* in case it contains $home */
if (!(deliver_home = expand_string(rawhome)))
{
common_error(TRUE, addr, ERRNO_EXPANDFAIL, US"home directory \"%s\" failed "
"to expand for %s transport: %s", rawhome, tp->name,
expand_string_message);
return;
}
if (*deliver_home != '/')
{
common_error(TRUE, addr, ERRNO_NOTABSOLUTE, US"home directory path \"%s\" "
"is not absolute for %s transport", deliver_home, tp->name);
return;
}
}
/* See if either the transport or the address specifies a current directory,
and if so, expand it. If nothing is set, use the home directory, unless it is
also unset in which case use "/", which is assumed to be a directory to which
all users have access. It is necessary to be in a visible directory for some
operating systems when running pipes, as some commands (e.g. "rm" under Solaris
2.5) require this. */
working_directory = tp->current_dir ? tp->current_dir : addr->current_dir;
if (working_directory)
{
uschar *raw = working_directory;
if (!(working_directory = expand_string(raw)))
{
common_error(TRUE, addr, ERRNO_EXPANDFAIL, US"current directory \"%s\" "
"failed to expand for %s transport: %s", raw, tp->name,
expand_string_message);
return;
}
if (*working_directory != '/')
{
common_error(TRUE, addr, ERRNO_NOTABSOLUTE, US"current directory path "
"\"%s\" is not absolute for %s transport", working_directory, tp->name);
return;
}
}
else working_directory = deliver_home ? deliver_home : US"/";
/* If one of the return_output flags is set on the transport, create and open a
file in the message log directory for the transport to write its output onto.
This is mainly used by pipe transports. The file needs to be unique to the
address. This feature is not available for shadow transports. */
if ( !shadowing
&& ( tp->return_output || tp->return_fail_output
|| tp->log_output || tp->log_fail_output || tp->log_defer_output
) )
{
uschar * error;
addr->return_filename =
spool_fname(US"msglog", message_subdir, message_id,
string_sprintf("-%d-%d", getpid(), return_count++));
if ((addr->return_file = open_msglog_file(addr->return_filename, 0400, &error)) < 0)
{
common_error(TRUE, addr, errno, US"Unable to %s file for %s transport "
"to return message: %s", error, tp->name, strerror(errno));
return;
}
}
/* Create the pipe for inter-process communication. */
if (pipe(pfd) != 0)
{
common_error(TRUE, addr, ERRNO_PIPEFAIL, US"Creation of pipe failed: %s",
strerror(errno));
return;
}
/* Now fork the process to do the real work in the subprocess, but first
ensure that all cached resources are freed so that the subprocess starts with
a clean slate and doesn't interfere with the parent process. */
search_tidyup();
if ((pid = fork()) == 0)
{
BOOL replicate = TRUE;
/* Prevent core dumps, as we don't want them in users' home directories.
HP-UX doesn't have RLIMIT_CORE; I don't know how to do this in that
system. Some experimental/developing systems (e.g. GNU/Hurd) may define
RLIMIT_CORE but not support it in setrlimit(). For such systems, do not
complain if the error is "not supported".
There are two scenarios where changing the max limit has an effect. In one,
the user is using a .forward and invoking a command of their choice via pipe;
for these, we do need the max limit to be 0 unless the admin chooses to
permit an increased limit. In the other, the command is invoked directly by
the transport and is under administrator control, thus being able to raise
the limit aids in debugging. So there's no general always-right answer.
Thus we inhibit core-dumps completely but let individual transports, while
still root, re-raise the limits back up to aid debugging. We make the
default be no core-dumps -- few enough people can use core dumps in
diagnosis that it's reasonable to make them something that has to be explicitly requested.
*/
#ifdef RLIMIT_CORE
struct rlimit rl;
rl.rlim_cur = 0;
rl.rlim_max = 0;
if (setrlimit(RLIMIT_CORE, &rl) < 0)
{
# ifdef SETRLIMIT_NOT_SUPPORTED
if (errno != ENOSYS && errno != ENOTSUP)
# endif
log_write(0, LOG_MAIN|LOG_PANIC, "setrlimit(RLIMIT_CORE) failed: %s",
strerror(errno));
}
#endif
/* Reset the random number generator, so different processes don't all
have the same sequence. */
random_seed = 0;
/* If the transport has a setup entry, call this first, while still
privileged. (Appendfile uses this to expand quota, for example, while
able to read private files.) */
if (addr->transport->setup)
switch((addr->transport->setup)(addr->transport, addr, NULL, uid, gid,
&(addr->message)))
{
case DEFER:
addr->transport_return = DEFER;
goto PASS_BACK;
case FAIL:
addr->transport_return = PANIC;
goto PASS_BACK;
}
/* Ignore SIGINT and SIGTERM during delivery. Also ignore SIGUSR1, as
when the process becomes unprivileged, it won't be able to write to the
process log. SIGHUP is ignored throughout exim, except when it is being
run as a daemon. */
signal(SIGINT, SIG_IGN);
signal(SIGTERM, SIG_IGN);
signal(SIGUSR1, SIG_IGN);
/* Close the unwanted half of the pipe, and set close-on-exec for the other
half - for transports that exec things (e.g. pipe). Then set the required
gid/uid. */
(void)close(pfd[pipe_read]);
(void)fcntl(pfd[pipe_write], F_SETFD, fcntl(pfd[pipe_write], F_GETFD) |
FD_CLOEXEC);
exim_setugid(uid, gid, use_initgroups,
string_sprintf("local delivery to %s <%s> transport=%s", addr->local_part,
addr->address, addr->transport->name));
DEBUG(D_deliver)
{
address_item *batched;
debug_printf(" home=%s current=%s\n", deliver_home, working_directory);
for (batched = addr->next; batched; batched = batched->next)
debug_printf("additional batched address: %s\n", batched->address);
}
/* Set an appropriate working directory. */
if (Uchdir(working_directory) < 0)
{
addr->transport_return = DEFER;
addr->basic_errno = errno;
addr->message = string_sprintf("failed to chdir to %s", working_directory);
}
/* If successful, call the transport */
else
{
BOOL ok = TRUE;
set_process_info("delivering %s to %s using %s", message_id,
addr->local_part, addr->transport->name);
/* Setting this global in the subprocess means we need never clear it */
transport_name = addr->transport->name;
/* If a transport filter has been specified, set up its argument list.
Any errors will get put into the address, and FALSE yielded. */
if (addr->transport->filter_command)
{
ok = transport_set_up_command(&transport_filter_argv,
addr->transport->filter_command,
TRUE, PANIC, addr, US"transport filter", NULL);
transport_filter_timeout = addr->transport->filter_timeout;
}
else transport_filter_argv = NULL;
if (ok)
{
debug_print_string(addr->transport->debug_string);
replicate = !(addr->transport->info->code)(addr->transport, addr);
}
}
/* Pass the results back down the pipe. If necessary, first replicate the
status in the top address to the others in the batch. The label is the
subject of a goto when a call to the transport's setup function fails. We
pass the pointer to the transport back in case it got changed as a result of
file_format in appendfile. */
PASS_BACK:
if (replicate) replicate_status(addr);
for (addr2 = addr; addr2; addr2 = addr2->next)
{
int i;
int local_part_length = Ustrlen(addr2->local_part);
uschar *s;
int ret;
if( (ret = write(pfd[pipe_write], &addr2->transport_return, sizeof(int))) != sizeof(int)
|| (ret = write(pfd[pipe_write], &transport_count, sizeof(transport_count))) != sizeof(transport_count)
|| (ret = write(pfd[pipe_write], &addr2->flags, sizeof(addr2->flags))) != sizeof(addr2->flags)
|| (ret = write(pfd[pipe_write], &addr2->basic_errno, sizeof(int))) != sizeof(int)
|| (ret = write(pfd[pipe_write], &addr2->more_errno, sizeof(int))) != sizeof(int)
|| (ret = write(pfd[pipe_write], &addr2->delivery_usec, sizeof(int))) != sizeof(int)
|| (ret = write(pfd[pipe_write], &addr2->special_action, sizeof(int))) != sizeof(int)
|| (ret = write(pfd[pipe_write], &addr2->transport,
sizeof(transport_instance *))) != sizeof(transport_instance *)
/* For a file delivery, pass back the local part, in case the original
was only part of the final delivery path. This gives more complete
logging. */
|| (testflag(addr2, af_file)
&& ( (ret = write(pfd[pipe_write], &local_part_length, sizeof(int))) != sizeof(int)
|| (ret = write(pfd[pipe_write], addr2->local_part, local_part_length)) != local_part_length
)
)
)
log_write(0, LOG_MAIN|LOG_PANIC, "Failed writing transport results to pipe: %s",
ret == -1 ? strerror(errno) : "short write");
/* Now any messages */
for (i = 0, s = addr2->message; i < 2; i++, s = addr2->user_message)
{
int message_length = s ? Ustrlen(s) + 1 : 0;
if( (ret = write(pfd[pipe_write], &message_length, sizeof(int))) != sizeof(int)
|| message_length > 0 && (ret = write(pfd[pipe_write], s, message_length)) != message_length
)
log_write(0, LOG_MAIN|LOG_PANIC, "Failed writing transport results to pipe: %s",
ret == -1 ? strerror(errno) : "short write");
}
}
/* OK, this process is now done. Free any cached resources that it opened,
and close the pipe we were writing down before exiting. */
(void)close(pfd[pipe_write]);
search_tidyup();
exit(EXIT_SUCCESS);
}
/* Back in the main process: panic if the fork did not succeed. This seems
better than returning an error - if forking is failing it is probably best
not to try other deliveries for this message. */
if (pid < 0)
log_write(0, LOG_MAIN|LOG_PANIC_DIE, "Fork failed for local delivery to %s",
addr->address);
/* Read the pipe to get the delivery status codes and error messages. Our copy
of the writing end must be closed first, as otherwise read() won't return zero
on an empty pipe. We check that a status exists for each address before
overwriting the address structure. If data is missing, the default DEFER status
will remain. Afterwards, close the reading end. */
(void)close(pfd[pipe_write]);
for (addr2 = addr; addr2; addr2 = addr2->next)
{
if ((len = read(pfd[pipe_read], &status, sizeof(int))) > 0)
{
int i;
uschar **sptr;
addr2->transport_return = status;
len = read(pfd[pipe_read], &transport_count,
sizeof(transport_count));
len = read(pfd[pipe_read], &addr2->flags, sizeof(addr2->flags));
len = read(pfd[pipe_read], &addr2->basic_errno, sizeof(int));
len = read(pfd[pipe_read], &addr2->more_errno, sizeof(int));
len = read(pfd[pipe_read], &addr2->delivery_usec, sizeof(int));
len = read(pfd[pipe_read], &addr2->special_action, sizeof(int));
len = read(pfd[pipe_read], &addr2->transport,
sizeof(transport_instance *));
if (testflag(addr2, af_file))
{
int llen;
if ( read(pfd[pipe_read], &llen, sizeof(int)) != sizeof(int)
|| llen > 64*4 /* limit from rfc 5821, times I18N factor */
)
{
log_write(0, LOG_MAIN|LOG_PANIC, "bad local_part length read"
" from delivery subprocess");
break;
}
/* sanity-checked llen so disable the Coverity error */
/* coverity[tainted_data] */
if (read(pfd[pipe_read], big_buffer, llen) != llen)
{
log_write(0, LOG_MAIN|LOG_PANIC, "bad local_part read"
" from delivery subprocess");
break;
}
big_buffer[llen] = 0;
addr2->local_part = string_copy(big_buffer);
}
for (i = 0, sptr = &addr2->message; i < 2; i++, sptr = &addr2->user_message)
{
int message_length;
len = read(pfd[pipe_read], &message_length, sizeof(int));
if (message_length > 0)
{
len = read(pfd[pipe_read], big_buffer, message_length);
big_buffer[big_buffer_size-1] = '\0'; /* guard byte */
if (len > 0) *sptr = string_copy(big_buffer);
}
}
}
else
{
log_write(0, LOG_MAIN|LOG_PANIC, "failed to read delivery status for %s "
"from delivery subprocess", addr2->unique);
break;
}
}
(void)close(pfd[pipe_read]);
/* Unless shadowing, write all successful addresses immediately to the journal
file, to ensure they are recorded asap. For homonymic addresses, use the base
address plus the transport name. Failure to write the journal is panic-worthy,
but don't stop, as it may prove possible subsequently to update the spool file
in order to record the delivery. */
if (!shadowing)
{
for (addr2 = addr; addr2; addr2 = addr2->next)
if (addr2->transport_return == OK)
{
if (testflag(addr2, af_homonym))
sprintf(CS big_buffer, "%.500s/%s\n", addr2->unique + 3, tp->name);
else
sprintf(CS big_buffer, "%.500s\n", addr2->unique);
/* In the test harness, wait just a bit to let the subprocess finish off
any debug output etc first. */
if (running_in_test_harness) millisleep(300);
DEBUG(D_deliver) debug_printf("journalling %s", big_buffer);
len = Ustrlen(big_buffer);
if (write(journal_fd, big_buffer, len) != len)
log_write(0, LOG_MAIN|LOG_PANIC, "failed to update journal for %s: %s",
big_buffer, strerror(errno));
}
/* Ensure the journal file is pushed out to disk. */
if (EXIMfsync(journal_fd) < 0)
log_write(0, LOG_MAIN|LOG_PANIC, "failed to fsync journal: %s",
strerror(errno));
}
/* Wait for the process to finish. If it terminates with a non-zero code,
freeze the message (except for SIGTERM, SIGKILL and SIGQUIT), but leave the
status values of all the addresses as they are. Take care to handle the case
when the subprocess doesn't seem to exist. This has been seen on one system
when Exim was called from an MUA that set SIGCHLD to SIG_IGN. When that
happens, wait() doesn't recognize the termination of child processes. Exim now
resets SIGCHLD to SIG_DFL, but this code should still be robust. */
while ((rc = wait(&status)) != pid)
if (rc < 0 && errno == ECHILD) /* Process has vanished */
{
log_write(0, LOG_MAIN, "%s transport process vanished unexpectedly",
addr->transport->driver_name);
status = 0;
break;
}
if ((status & 0xffff) != 0)
{
int msb = (status >> 8) & 255;
int lsb = status & 255;
int code = (msb == 0)? (lsb & 0x7f) : msb;
if (msb != 0 || (code != SIGTERM && code != SIGKILL && code != SIGQUIT))
addr->special_action = SPECIAL_FREEZE;
log_write(0, LOG_MAIN|LOG_PANIC, "%s transport process returned non-zero "
"status 0x%04x: %s %d",
addr->transport->driver_name,
status,
msb == 0 ? "terminated by signal" : "exit code",
code);
}
/* If SPECIAL_WARN is set in the top address, send a warning message. */
if (addr->special_action == SPECIAL_WARN && addr->transport->warn_message)
{
int fd;
uschar *warn_message;
pid_t pid;
DEBUG(D_deliver) debug_printf("Warning message requested by transport\n");
if (!(warn_message = expand_string(addr->transport->warn_message)))
log_write(0, LOG_MAIN|LOG_PANIC, "Failed to expand \"%s\" (warning "
"message for %s transport): %s", addr->transport->warn_message,
addr->transport->name, expand_string_message);
else if ((pid = child_open_exim(&fd)) > 0)
{
FILE *f = fdopen(fd, "wb");
if (errors_reply_to && !contains_header(US"Reply-To", warn_message))
fprintf(f, "Reply-To: %s\n", errors_reply_to);
fprintf(f, "Auto-Submitted: auto-replied\n");
if (!contains_header(US"From", warn_message))
moan_write_from(f);
fprintf(f, "%s", CS warn_message);
/* Close and wait for child process to complete, without a timeout. */
(void)fclose(f);
(void)child_close(pid, 0);
}
addr->special_action = SPECIAL_NONE;
}
}
| 0
|
154,333
|
termgui_mch_get_color(char_u *name)
{
return gui_get_color_cmn(name);
}
| 0
|
172,818
|
void Gfx::opSetFlat(Object args[], int numArgs) {
state->setFlatness((int)args[0].getNum());
out->updateFlatness(state);
}
| 0
|
464,238
|
ChangeLedFeedback(ClientPtr client, DeviceIntPtr dev, long unsigned int mask,
LedFeedbackPtr l, xLedFeedbackCtl * f)
{
LedCtrl lctrl; /* might get BadValue part way through */
if (client->swapped) {
swaps(&f->length);
swapl(&f->led_values);
swapl(&f->led_mask);
}
f->led_mask &= l->ctrl.led_mask; /* set only supported leds */
f->led_values &= l->ctrl.led_mask; /* set only supported leds */
if (mask & DvLed) {
lctrl.led_mask = f->led_mask;
lctrl.led_values = f->led_values;
(*l->CtrlProc) (dev, &lctrl);
l->ctrl.led_values &= ~(f->led_mask); /* zero changed leds */
l->ctrl.led_values |= (f->led_mask & f->led_values); /* OR in set leds */
}
return Success;
}
| 0
|
151,863
|
static int decode_stateid(struct xdr_stream *xdr, nfs4_stateid *stateid)
{
return decode_opaque_fixed(xdr, stateid->data, NFS4_STATEID_SIZE);
}
| 0
|
302,450
|
GPMF_ERR GPMF_Validate(GPMF_stream *ms, GPMF_LEVELS recurse)
{
if (ms)
{
uint32_t currpos = ms->pos;
int32_t nestsize = (int32_t)ms->nest_size[ms->nest_level];
if (nestsize == 0 && ms->nest_level == 0)
nestsize = ms->buffer_size_longs;
while (ms->pos+1 < ms->buffer_size_longs && nestsize > 0)
{
uint32_t key = ms->buffer[ms->pos];
if (ms->nest_level == 0 && key != GPMF_KEY_DEVICE && ms->device_count == 0 && ms->pos == 0)
{
DBG_MSG("ERROR: uninitized -- GPMF_ERROR_BAD_STRUCTURE\n");
return GPMF_ERROR_BAD_STRUCTURE;
}
if (GPMF_VALID_FOURCC(key))
{
uint32_t type_size_repeat = ms->buffer[ms->pos + 1];
int32_t size = GPMF_DATA_SIZE(type_size_repeat) >> 2;
uint8_t type = GPMF_SAMPLE_TYPE(type_size_repeat);
if (size + 2 > nestsize)
{
DBG_MSG("ERROR: nest size too small within %c%c%c%c-- GPMF_ERROR_BAD_STRUCTURE\n", PRINTF_4CC(key));
return GPMF_ERROR_BAD_STRUCTURE;
}
if (!GPMF_VALID_FOURCC(key))
{
DBG_MSG("ERROR: invalid 4CC -- GPMF_ERROR_BAD_STRUCTURE\n");
return GPMF_ERROR_BAD_STRUCTURE;
}
if (type == GPMF_TYPE_NEST && recurse == GPMF_RECURSE_LEVELS)
{
uint32_t validnest;
ms->pos += 2;
ms->nest_level++;
if (ms->nest_level > GPMF_NEST_LIMIT)
{
DBG_MSG("ERROR: nest level within %c%c%c%c too deep -- GPMF_ERROR_BAD_STRUCTURE\n", PRINTF_4CC(key));
return GPMF_ERROR_BAD_STRUCTURE;
}
ms->nest_size[ms->nest_level] = size;
validnest = GPMF_Validate(ms, recurse);
ms->nest_level--;
if (GPMF_OK != validnest)
{
DBG_MSG("ERROR: invalid nest within %c%c%c%c -- GPMF_ERROR_BAD_STRUCTURE\n", PRINTF_4CC(key));
return GPMF_ERROR_BAD_STRUCTURE;
}
else
{
if (ms->nest_level == 0)
ms->device_count++;
}
ms->pos += size;
nestsize -= 2 + size;
while (ms->pos < ms->buffer_size_longs && nestsize > 0 && ms->buffer[ms->pos] == GPMF_KEY_END)
{
ms->pos++;
nestsize--;
}
}
else
{
ms->pos += 2 + size;
nestsize -= 2 + size;
}
if (ms->pos == ms->buffer_size_longs)
{
ms->pos = currpos;
return GPMF_OK;
}
}
else
{
if (key == GPMF_KEY_END)
{
do
{
ms->pos++;
nestsize--;
} while (ms->pos < ms->buffer_size_longs && nestsize > 0 && ms->buffer[ms->pos] == 0);
}
else if (ms->nest_level == 0 && ms->device_count > 0)
{
ms->pos = currpos;
return GPMF_OK;
}
else
{
DBG_MSG("ERROR: bad struct within %c%c%c%c -- GPMF_ERROR_BAD_STRUCTURE\n", PRINTF_4CC(key));
return GPMF_ERROR_BAD_STRUCTURE;
}
}
}
ms->pos = currpos;
return GPMF_OK;
}
else
{
DBG_MSG("ERROR: Invalid handle -- GPMF_ERROR_MEMORY\n");
return GPMF_ERROR_MEMORY;
}
}
| 0
|
102,115
|
irda_queue_t *hashbin_get_next( hashbin_t *hashbin)
{
irda_queue_t* entry;
int bin;
int i;
IRDA_ASSERT( hashbin != NULL, return NULL;);
IRDA_ASSERT( hashbin->magic == HB_MAGIC, return NULL;);
if ( hashbin->hb_current == NULL) {
IRDA_ASSERT( hashbin->hb_current != NULL, return NULL;);
return NULL;
}
entry = hashbin->hb_current->q_next;
bin = GET_HASHBIN( entry->q_hash);
/*
* Make sure that we are not back at the beginning of the queue
* again
*/
if ( entry != hashbin->hb_queue[ bin ]) {
hashbin->hb_current = entry;
return entry;
}
/*
* Check that this is not the last queue in hashbin
*/
if ( bin >= HASHBIN_SIZE)
return NULL;
/*
* Move to next queue in hashbin
*/
bin++;
for ( i = bin; i < HASHBIN_SIZE; i++ ) {
entry = hashbin->hb_queue[ i];
if ( entry) {
hashbin->hb_current = entry;
return entry;
}
}
return NULL;
}
| 0
|
325,250
|
static void net_socket_send(void *opaque)
{
NetSocketState *s = opaque;
int l, size, err;
uint8_t buf1[4096];
const uint8_t *buf;
size = recv(s->fd, buf1, sizeof(buf1), 0);
if (size < 0) {
err = socket_error();
if (err != EWOULDBLOCK)
goto eoc;
} else if (size == 0) {
/* end of connection */
eoc:
qemu_set_fd_handler(s->fd, NULL, NULL, NULL);
closesocket(s->fd);
return;
}
buf = buf1;
while (size > 0) {
/* reassemble a packet from the network */
switch(s->state) {
case 0:
l = 4 - s->index;
if (l > size)
l = size;
memcpy(s->buf + s->index, buf, l);
buf += l;
size -= l;
s->index += l;
if (s->index == 4) {
/* got length */
s->packet_len = ntohl(*(uint32_t *)s->buf);
s->index = 0;
s->state = 1;
}
break;
case 1:
l = s->packet_len - s->index;
if (l > size)
l = size;
memcpy(s->buf + s->index, buf, l);
s->index += l;
buf += l;
size -= l;
if (s->index >= s->packet_len) {
qemu_send_packet(s->vc, s->buf, s->packet_len);
s->index = 0;
s->state = 0;
}
break;
}
}
}
| 1
|
320,047
|
static float quantize_band_cost(struct AACEncContext *s, const float *in,
const float *scaled, int size, int scale_idx,
int cb, const float lambda, const float uplim,
int *bits)
{
const float IQ = ff_aac_pow2sf_tab[200 + scale_idx - SCALE_ONE_POS + SCALE_DIV_512];
const float Q = ff_aac_pow2sf_tab[200 - scale_idx + SCALE_ONE_POS - SCALE_DIV_512];
const float CLIPPED_ESCAPE = 165140.0f*IQ;
int i, j, k;
float cost = 0;
const int dim = cb < FIRST_PAIR_BT ? 4 : 2;
int resbits = 0;
#ifndef USE_REALLY_FULL_SEARCH
const float Q34 = sqrtf(Q * sqrtf(Q));
const int range = aac_cb_range[cb];
const int maxval = aac_cb_maxval[cb];
int offs[4];
#endif /* USE_REALLY_FULL_SEARCH */
if (!cb) {
for (i = 0; i < size; i++)
cost += in[i]*in[i]*lambda;
if (bits)
*bits = 0;
return cost;
}
#ifndef USE_REALLY_FULL_SEARCH
offs[0] = 1;
for (i = 1; i < dim; i++)
offs[i] = offs[i-1]*range;
quantize_bands(s->qcoefs, in, scaled, size, Q34, !IS_CODEBOOK_UNSIGNED(cb), maxval);
#endif /* USE_REALLY_FULL_SEARCH */
for (i = 0; i < size; i += dim) {
float mincost;
int minidx = 0;
int minbits = 0;
const float *vec;
#ifndef USE_REALLY_FULL_SEARCH
int (*quants)[2] = &s->qcoefs[i];
mincost = 0.0f;
for (j = 0; j < dim; j++)
mincost += in[i+j]*in[i+j]*lambda;
minidx = IS_CODEBOOK_UNSIGNED(cb) ? 0 : 40;
minbits = ff_aac_spectral_bits[cb-1][minidx];
mincost += minbits;
for (j = 0; j < (1<<dim); j++) {
float rd = 0.0f;
int curbits;
int curidx = IS_CODEBOOK_UNSIGNED(cb) ? 0 : 40;
int same = 0;
for (k = 0; k < dim; k++) {
if ((j & (1 << k)) && quants[k][0] == quants[k][1]) {
same = 1;
break;
}
}
if (same)
continue;
for (k = 0; k < dim; k++)
curidx += quants[k][!!(j & (1 << k))] * offs[dim - 1 - k];
curbits = ff_aac_spectral_bits[cb-1][curidx];
vec = &ff_aac_codebook_vectors[cb-1][curidx*dim];
#else
mincost = INFINITY;
vec = ff_aac_codebook_vectors[cb-1];
for (j = 0; j < ff_aac_spectral_sizes[cb-1]; j++, vec += dim) {
float rd = 0.0f;
int curbits = ff_aac_spectral_bits[cb-1][j];
#endif /* USE_REALLY_FULL_SEARCH */
if (IS_CODEBOOK_UNSIGNED(cb)) {
for (k = 0; k < dim; k++) {
float t = fabsf(in[i+k]);
float di;
//do not code with escape sequence small values
if (vec[k] == 64.0f && t < 39.0f*IQ) {
rd = INFINITY;
break;
}
if (vec[k] == 64.0f) { //FIXME: slow
if (t >= CLIPPED_ESCAPE) {
di = t - CLIPPED_ESCAPE;
curbits += 21;
} else {
int c = av_clip(quant(t, Q), 0, 8191);
di = t - c*cbrt(c)*IQ;
curbits += av_log2(c)*2 - 4 + 1;
}
} else {
di = t - vec[k]*IQ;
}
if (vec[k] != 0.0f)
curbits++;
rd += di*di*lambda;
}
} else {
for (k = 0; k < dim; k++) {
float di = in[i+k] - vec[k]*IQ;
rd += di*di*lambda;
}
}
rd += curbits;
if (rd < mincost) {
mincost = rd;
minidx = j;
minbits = curbits;
}
}
cost += mincost;
resbits += minbits;
if (cost >= uplim)
return uplim;
}
if (bits)
*bits = resbits;
return cost;
}
| 0
|
209,914
|
static float lite_font_stringwidth( wmfAPI* API, wmfFont* font, char* str)
{
#if 0
wmf_magick_t
*ddata = WMF_MAGICK_GetData(API);
Image
*image = ddata->image;
DrawInfo
*draw_info;
TypeMetric
metrics;
float
stringwidth = 0;
double
orig_x_resolution,
orig_y_resolution;
ResolutionType
orig_resolution_units;
orig_x_resolution = image->x_resolution;
orig_y_resolution = image->y_resolution;
orig_resolution_units = image->units;
draw_info=ddata->draw_info;
if (draw_info == (const DrawInfo *) NULL)
return 0;
draw_info->font=WMF_FONT_PSNAME(font);
draw_info->pointsize=12;
draw_info->text=str;
image->x_resolution = 72;
image->y_resolution = 72;
image->units = PixelsPerInchResolution;
if (GetTypeMetrics(image, draw_info, &metrics) != MagickFalse)
stringwidth = ((metrics.width * 72)/(image->x_resolution * draw_info->pointsize)); /* *0.916348; */
draw_info->font=NULL;
draw_info->text=NULL;
#if 0
printf("\nlite_font_stringwidth\n");
printf("string = \"%s\"\n", str);
printf("WMF_FONT_NAME = \"%s\"\n", WMF_FONT_NAME(font));
printf("WMF_FONT_PSNAME = \"%s\"\n", WMF_FONT_PSNAME(font));
printf("stringwidth = %g\n", stringwidth);
/* printf("WMF_FONT_HEIGHT = %i\n", (int)WMF_FONT_HEIGHT(font)); */
/* printf("WMF_FONT_WIDTH = %i\n", (int)WMF_FONT_WIDTH(font)); */
fflush(stdout);
#endif
image->x_resolution = orig_x_resolution;
image->y_resolution = orig_y_resolution;
image->units = orig_resolution_units;
return stringwidth;
#else
(void) API;
(void) font;
(void) str;
return 0;
#endif
}
| 0
|
296,971
|
static int rawsock_connect(struct socket *sock, struct sockaddr *_addr,
int len, int flags)
{
struct sock *sk = sock->sk;
struct sockaddr_nfc *addr = (struct sockaddr_nfc *)_addr;
struct nfc_dev *dev;
int rc = 0;
pr_debug("sock=%p sk=%p flags=%d\n", sock, sk, flags);
if (!addr || len < sizeof(struct sockaddr_nfc) ||
addr->sa_family != AF_NFC)
return -EINVAL;
pr_debug("addr dev_idx=%u target_idx=%u protocol=%u\n",
addr->dev_idx, addr->target_idx, addr->nfc_protocol);
lock_sock(sk);
if (sock->state == SS_CONNECTED) {
rc = -EISCONN;
goto error;
}
dev = nfc_get_device(addr->dev_idx);
if (!dev) {
rc = -ENODEV;
goto error;
}
if (addr->target_idx > dev->target_next_idx - 1 ||
addr->target_idx < dev->target_next_idx - dev->n_targets) {
rc = -EINVAL;
goto error;
}
rc = nfc_activate_target(dev, addr->target_idx, addr->nfc_protocol);
if (rc)
goto put_dev;
nfc_rawsock(sk)->dev = dev;
nfc_rawsock(sk)->target_idx = addr->target_idx;
sock->state = SS_CONNECTED;
sk->sk_state = TCP_ESTABLISHED;
sk->sk_state_change(sk);
release_sock(sk);
return 0;
put_dev:
nfc_put_device(dev);
error:
release_sock(sk);
return rc;
}
| 0
|
416,934
|
static int l2tp_eth_create(struct net *net, struct l2tp_tunnel *tunnel,
u32 session_id, u32 peer_session_id,
struct l2tp_session_cfg *cfg)
{
unsigned char name_assign_type;
struct net_device *dev;
char name[IFNAMSIZ];
struct l2tp_session *session;
struct l2tp_eth *priv;
struct l2tp_eth_sess *spriv;
int rc;
struct l2tp_eth_net *pn;
if (cfg->ifname) {
strlcpy(name, cfg->ifname, IFNAMSIZ);
name_assign_type = NET_NAME_USER;
} else {
strcpy(name, L2TP_ETH_DEV_NAME);
name_assign_type = NET_NAME_ENUM;
}
session = l2tp_session_create(sizeof(*spriv), tunnel, session_id,
peer_session_id, cfg);
if (IS_ERR(session)) {
rc = PTR_ERR(session);
goto out;
}
dev = alloc_netdev(sizeof(*priv), name, name_assign_type,
l2tp_eth_dev_setup);
if (!dev) {
rc = -ENOMEM;
goto out_del_session;
}
dev_net_set(dev, net);
dev->min_mtu = 0;
dev->max_mtu = ETH_MAX_MTU;
l2tp_eth_adjust_mtu(tunnel, session, dev);
priv = netdev_priv(dev);
priv->dev = dev;
priv->session = session;
INIT_LIST_HEAD(&priv->list);
priv->tunnel_sock = tunnel->sock;
session->recv_skb = l2tp_eth_dev_recv;
session->session_close = l2tp_eth_delete;
#if IS_ENABLED(CONFIG_L2TP_DEBUGFS)
session->show = l2tp_eth_show;
#endif
spriv = l2tp_session_priv(session);
spriv->dev = dev;
rc = register_netdev(dev);
if (rc < 0)
goto out_del_dev;
__module_get(THIS_MODULE);
/* Must be done after register_netdev() */
strlcpy(session->ifname, dev->name, IFNAMSIZ);
dev_hold(dev);
pn = l2tp_eth_pernet(dev_net(dev));
spin_lock(&pn->l2tp_eth_lock);
list_add(&priv->list, &pn->l2tp_eth_dev_list);
spin_unlock(&pn->l2tp_eth_lock);
return 0;
out_del_dev:
free_netdev(dev);
spriv->dev = NULL;
out_del_session:
l2tp_session_delete(session);
out:
return rc;
}
| 0
|
16,613
|
static int dsa_pub_encode ( X509_PUBKEY * pk , const EVP_PKEY * pkey ) {
DSA * dsa ;
int ptype ;
unsigned char * penc = NULL ;
int penclen ;
ASN1_STRING * str = NULL ;
ASN1_INTEGER * pubint = NULL ;
dsa = pkey -> pkey . dsa ;
if ( pkey -> save_parameters && dsa -> p && dsa -> q && dsa -> g ) {
str = ASN1_STRING_new ( ) ;
if ( str == NULL ) {
DSAerr ( DSA_F_DSA_PUB_ENCODE , ERR_R_MALLOC_FAILURE ) ;
goto err ;
}
str -> length = i2d_DSAparams ( dsa , & str -> data ) ;
if ( str -> length <= 0 ) {
DSAerr ( DSA_F_DSA_PUB_ENCODE , ERR_R_MALLOC_FAILURE ) ;
goto err ;
}
ptype = V_ASN1_SEQUENCE ;
}
else ptype = V_ASN1_UNDEF ;
pubint = BN_to_ASN1_INTEGER ( dsa -> pub_key , NULL ) ;
if ( pubint == NULL ) {
DSAerr ( DSA_F_DSA_PUB_ENCODE , ERR_R_MALLOC_FAILURE ) ;
goto err ;
}
penclen = i2d_ASN1_INTEGER ( pubint , & penc ) ;
ASN1_INTEGER_free ( pubint ) ;
if ( penclen <= 0 ) {
DSAerr ( DSA_F_DSA_PUB_ENCODE , ERR_R_MALLOC_FAILURE ) ;
goto err ;
}
if ( X509_PUBKEY_set0_param ( pk , OBJ_nid2obj ( EVP_PKEY_DSA ) , ptype , str , penc , penclen ) ) return 1 ;
err : OPENSSL_free ( penc ) ;
ASN1_STRING_free ( str ) ;
return 0 ;
}
| 0
|
438,606
|
static u32 *gen12_emit_fini_breadcrumb(struct i915_request *request, u32 *cs)
{
cs = gen8_emit_ggtt_write(cs,
request->fence.seqno,
i915_request_active_timeline(request)->hwsp_offset,
0);
return gen12_emit_fini_breadcrumb_footer(request, cs);
}
| 0
|
206,218
|
GLint GLES2Implementation::GetAttribLocation(GLuint program, const char* name) {
GPU_CLIENT_SINGLE_THREAD_CHECK();
GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glGetAttribLocation(" << program
<< ", " << name << ")");
TRACE_EVENT0("gpu", "GLES2::GetAttribLocation");
GLint loc = share_group_->program_info_manager()->GetAttribLocation(
this, program, name);
GPU_CLIENT_LOG("returned " << loc);
CheckGLError();
return loc;
}
| 0
|
351,319
|
int ax25_fwd_ioctl(unsigned int cmd, struct ax25_fwd_struct *fwd)
{
ax25_dev *ax25_dev, *fwd_dev;
if ((ax25_dev = ax25_addr_ax25dev(&fwd->port_from)) == NULL)
return -EINVAL;
switch (cmd) {
case SIOCAX25ADDFWD:
if ((fwd_dev = ax25_addr_ax25dev(&fwd->port_to)) == NULL)
return -EINVAL;
if (ax25_dev->forward != NULL)
return -EINVAL;
ax25_dev->forward = fwd_dev->dev;
ax25_dev_put(fwd_dev);
break;
case SIOCAX25DELFWD:
if (ax25_dev->forward == NULL)
return -EINVAL;
ax25_dev->forward = NULL;
break;
default:
return -EINVAL;
}
ax25_dev_put(ax25_dev);
return 0;
}
| 1
|
375,904
|
void usb_device_handle_attach(USBDevice *dev)
{
USBDeviceClass *klass = USB_DEVICE_GET_CLASS(dev);
if (klass->handle_attach) {
klass->handle_attach(dev);
}
}
| 0
|
328,292
|
static void vnc_connect(VncDisplay *vd, int csock,
bool skipauth, bool websocket)
{
VncState *vs = g_malloc0(sizeof(VncState));
int i;
vs->csock = csock;
vs->vd = vd;
if (skipauth) {
vs->auth = VNC_AUTH_NONE;
vs->subauth = VNC_AUTH_INVALID;
} else {
if (websocket) {
vs->auth = vd->ws_auth;
vs->subauth = VNC_AUTH_INVALID;
} else {
vs->auth = vd->auth;
vs->subauth = vd->subauth;
}
}
VNC_DEBUG("Client sock=%d ws=%d auth=%d subauth=%d\n",
csock, websocket, vs->auth, vs->subauth);
vs->lossy_rect = g_malloc0(VNC_STAT_ROWS * sizeof (*vs->lossy_rect));
for (i = 0; i < VNC_STAT_ROWS; ++i) {
vs->lossy_rect[i] = g_malloc0(VNC_STAT_COLS * sizeof (uint8_t));
}
VNC_DEBUG("New client on socket %d\n", csock);
update_displaychangelistener(&vd->dcl, VNC_REFRESH_INTERVAL_BASE);
qemu_set_nonblock(vs->csock);
#ifdef CONFIG_VNC_WS
if (websocket) {
vs->websocket = 1;
#ifdef CONFIG_VNC_TLS
if (vd->ws_tls) {
qemu_set_fd_handler(vs->csock, vncws_tls_handshake_io, NULL, vs);
} else
#endif /* CONFIG_VNC_TLS */
{
qemu_set_fd_handler(vs->csock, vncws_handshake_read, NULL, vs);
}
} else
#endif /* CONFIG_VNC_WS */
{
qemu_set_fd_handler(vs->csock, vnc_client_read, NULL, vs);
}
vnc_client_cache_addr(vs);
vnc_qmp_event(vs, QAPI_EVENT_VNC_CONNECTED);
vnc_set_share_mode(vs, VNC_SHARE_MODE_CONNECTING);
#ifdef CONFIG_VNC_WS
if (!vs->websocket)
#endif
{
vnc_init_state(vs);
}
if (vd->num_connecting > vd->connections_limit) {
QTAILQ_FOREACH(vs, &vd->clients, next) {
if (vs->share_mode == VNC_SHARE_MODE_CONNECTING) {
vnc_disconnect_start(vs);
return;
}
}
}
}
| 0
|
154,813
|
ssize_t blk_mq_tag_sysfs_show(struct blk_mq_tags *tags, char *page)
{
char *orig_page = page;
unsigned int free, res;
if (!tags)
return 0;
page += sprintf(page, "nr_tags=%u, reserved_tags=%u, "
"bits_per_word=%u\n",
tags->nr_tags, tags->nr_reserved_tags,
tags->bitmap_tags.bits_per_word);
free = bt_unused_tags(&tags->bitmap_tags);
res = bt_unused_tags(&tags->breserved_tags);
page += sprintf(page, "nr_free=%u, nr_reserved=%u\n", free, res);
page += sprintf(page, "active_queues=%u\n", atomic_read(&tags->active_queues));
return page - orig_page;
}
| 0
|
138,877
|
pushval_asis(Datum opaque, TSQueryParserState state, char *strval, int lenval,
int16 weight, bool prefix)
{
pushValue(state, strval, lenval, weight, prefix);
}
| 0
|
194,589
|
GLint GLES2Implementation::GetProgramResourceLocation(
GLuint program,
GLenum program_interface,
const char* name) {
GPU_CLIENT_SINGLE_THREAD_CHECK();
GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glGetProgramResourceLocation("
<< program << ", " << program_interface << ", " << name
<< ")");
TRACE_EVENT0("gpu", "GLES2::GetProgramResourceLocation");
GLint location =
share_group_->program_info_manager()->GetProgramResourceLocation(
this, program, program_interface, name);
GPU_CLIENT_LOG("returned " << location);
CheckGLError();
return location;
}
| 0
|
8,824
|
cib_recv_plaintext(int sock)
{
char *buf = NULL;
ssize_t rc = 0;
ssize_t len = 0;
ssize_t chunk_size = 512;
buf = calloc(1, chunk_size);
while (1) {
errno = 0;
rc = read(sock, buf + len, chunk_size);
crm_trace("Got %d more bytes. errno=%d", (int)rc, errno);
if (errno == EINTR || errno == EAGAIN) {
crm_trace("Retry: %d", (int)rc);
if (rc > 0) {
len += rc;
buf = realloc(buf, len + chunk_size);
CRM_ASSERT(buf != NULL);
}
} else if (rc < 0) {
crm_perror(LOG_ERR, "Error receiving message: %d", (int)rc);
goto bail;
} else if (rc == chunk_size) {
len += rc;
chunk_size *= 2;
buf = realloc(buf, len + chunk_size);
crm_trace("Retry with %d more bytes", (int)chunk_size);
CRM_ASSERT(buf != NULL);
} else if (buf[len + rc - 1] != 0) {
crm_trace("Last char is %d '%c'", buf[len + rc - 1], buf[len + rc - 1]);
crm_trace("Retry with %d more bytes", (int)chunk_size);
len += rc;
buf = realloc(buf, len + chunk_size);
CRM_ASSERT(buf != NULL);
} else {
return buf;
}
}
bail:
free(buf);
return NULL;
}
| 1
|
99,635
|
Variant c_SimpleXMLElementIterator::t_key() {
if (m_iter1) {
return m_iter1->first();
}
return uninit_null();
}
| 0
|
140,710
|
void CL_DemoFilename( char *buf, int bufSize ) {
time_t rawtime;
char timeStr[32] = {0}; // should really only reach ~19 chars
time( &rawtime );
strftime( timeStr, sizeof( timeStr ), "%Y-%m-%d_%H-%M-%S", localtime( &rawtime ) ); // or gmtime
Com_sprintf( buf, bufSize, "demo%s", timeStr );
}
| 0
|
394,399
|
static void cli_connect_nb_done(struct tevent_req *subreq)
{
struct tevent_req *req = tevent_req_callback_data(
subreq, struct tevent_req);
struct cli_connect_nb_state *state = tevent_req_data(
req, struct cli_connect_nb_state);
NTSTATUS status;
int fd = 0;
uint16_t port;
status = cli_connect_sock_recv(subreq, &fd, &port);
TALLOC_FREE(subreq);
if (tevent_req_nterror(req, status)) {
return;
}
state->cli = cli_state_create(state, fd, state->desthost, NULL,
state->signing_state, state->flags);
if (tevent_req_nomem(state->cli, req)) {
close(fd);
return;
}
tevent_req_done(req);
}
| 0
|
61,643
|
cmsMLU* CMSEXPORT cmsMLUdup(const cmsMLU* mlu)
{
cmsMLU* NewMlu = NULL;
// Duplicating a NULL obtains a NULL
if (mlu == NULL) return NULL;
NewMlu = cmsMLUalloc(mlu ->ContextID, mlu ->UsedEntries);
if (NewMlu == NULL) return NULL;
// Should never happen
if (NewMlu ->AllocatedEntries < mlu ->UsedEntries)
goto Error;
// Sanitize...
if (NewMlu ->Entries == NULL || mlu ->Entries == NULL) goto Error;
memmove(NewMlu ->Entries, mlu ->Entries, mlu ->UsedEntries * sizeof(_cmsMLUentry));
NewMlu ->UsedEntries = mlu ->UsedEntries;
// The MLU may be empty
if (mlu ->PoolUsed == 0) {
NewMlu ->MemPool = NULL;
}
else {
// It is not empty
NewMlu ->MemPool = _cmsMalloc(mlu ->ContextID, mlu ->PoolUsed);
if (NewMlu ->MemPool == NULL) goto Error;
}
NewMlu ->PoolSize = mlu ->PoolUsed;
if (NewMlu ->MemPool == NULL || mlu ->MemPool == NULL) goto Error;
memmove(NewMlu ->MemPool, mlu->MemPool, mlu ->PoolUsed);
NewMlu ->PoolUsed = mlu ->PoolUsed;
return NewMlu;
Error:
if (NewMlu != NULL) cmsMLUfree(NewMlu);
return NULL;
}
| 0
|
119,775
|
ARN::ARN(const rgw_bucket& b)
: partition(Partition::aws),
service(Service::s3),
region(),
account(b.tenant),
resource(b.name) { }
| 0
|
478,711
|
static int format(const short val) { return (int)val; }
| 0
|
516,391
|
void X509Certificate::Fingerprint512(const FunctionCallbackInfo<Value>& args) {
Environment* env = Environment::GetCurrent(args);
X509Certificate* cert;
ASSIGN_OR_RETURN_UNWRAP(&cert, args.Holder());
Local<Value> ret;
if (GetFingerprintDigest(env, EVP_sha512(), cert->get()).ToLocal(&ret))
args.GetReturnValue().Set(ret);
}
| 0
|
459,718
|
dissect_kafka_create_partitions_response(tvbuff_t *tvb, packet_info *pinfo, proto_tree *tree, int offset,
kafka_api_version_t api_version)
{
proto_item *subti;
proto_tree *subtree;
offset = dissect_kafka_throttle_time(tvb, pinfo, tree, offset);
subtree = proto_tree_add_subtree(tree, tvb, offset, -1,
ett_kafka_topics,
&subti, "Topics");
offset = dissect_kafka_array(subtree, tvb, pinfo, offset, api_version >= 2, api_version,
&dissect_kafka_create_partitions_response_topic, NULL);
proto_item_set_end(subti, tvb, offset);
if (api_version >= 2) {
offset = dissect_kafka_tagged_fields(tvb, pinfo, tree, offset, 0);
}
return offset;
}
| 0
|
380,479
|
evbuffer_invoke_callbacks(struct evbuffer *buffer)
{
if (TAILQ_EMPTY(&buffer->callbacks)) {
buffer->n_add_for_cb = buffer->n_del_for_cb = 0;
return;
}
if (buffer->deferred_cbs) {
if (buffer->deferred.queued)
return;
_evbuffer_incref_and_lock(buffer);
if (buffer->parent)
bufferevent_incref(buffer->parent);
EVBUFFER_UNLOCK(buffer);
event_deferred_cb_schedule(buffer->cb_queue, &buffer->deferred);
}
evbuffer_run_callbacks(buffer, 0);
}
| 0
|
394,558
|
write_iso9660_data(struct archive_write *a, const void *buff, size_t s)
{
struct iso9660 *iso9660 = a->format_data;
size_t ws;
if (iso9660->temp_fd < 0) {
archive_set_error(&a->archive, ARCHIVE_ERRNO_MISC,
"Couldn't create temporary file");
return (ARCHIVE_FATAL);
}
ws = s;
if (iso9660->need_multi_extent &&
(iso9660->cur_file->cur_content->size + ws) >=
(MULTI_EXTENT_SIZE - LOGICAL_BLOCK_SIZE)) {
struct content *con;
size_t ts;
ts = (size_t)(MULTI_EXTENT_SIZE - LOGICAL_BLOCK_SIZE -
iso9660->cur_file->cur_content->size);
if (iso9660->zisofs.detect_magic)
zisofs_detect_magic(a, buff, ts);
if (iso9660->zisofs.making) {
if (zisofs_write_to_temp(a, buff, ts) != ARCHIVE_OK)
return (ARCHIVE_FATAL);
} else {
if (wb_write_to_temp(a, buff, ts) != ARCHIVE_OK)
return (ARCHIVE_FATAL);
iso9660->cur_file->cur_content->size += ts;
}
/* Write padding. */
if (wb_write_padding_to_temp(a,
iso9660->cur_file->cur_content->size) != ARCHIVE_OK)
return (ARCHIVE_FATAL);
/* Compute the logical block number. */
iso9660->cur_file->cur_content->blocks = (int)
((iso9660->cur_file->cur_content->size
+ LOGICAL_BLOCK_SIZE -1) >> LOGICAL_BLOCK_BITS);
/*
* Make next extent.
*/
ws -= ts;
buff = (const void *)(((const unsigned char *)buff) + ts);
/* Make a content for next extent. */
con = calloc(1, sizeof(*con));
if (con == NULL) {
archive_set_error(&a->archive, ENOMEM,
"Can't allocate content data");
return (ARCHIVE_FATAL);
}
con->offset_of_temp = wb_offset(a);
iso9660->cur_file->cur_content->next = con;
iso9660->cur_file->cur_content = con;
#ifdef HAVE_ZLIB_H
iso9660->zisofs.block_offset = 0;
#endif
}
if (iso9660->zisofs.detect_magic)
zisofs_detect_magic(a, buff, ws);
if (iso9660->zisofs.making) {
if (zisofs_write_to_temp(a, buff, ws) != ARCHIVE_OK)
return (ARCHIVE_FATAL);
} else {
if (wb_write_to_temp(a, buff, ws) != ARCHIVE_OK)
return (ARCHIVE_FATAL);
iso9660->cur_file->cur_content->size += ws;
}
return (s);
}
| 0
|
19,334
|
static void guestfwd_read ( void * opaque , const uint8_t * buf , int size ) {
struct GuestFwd * fwd = opaque ;
slirp_socket_recv ( fwd -> slirp , fwd -> server , fwd -> port , buf , size ) ;
}
| 0
|
81,028
|
static int thread_cpu_nsleep(const clockid_t which_clock, int flags,
struct timespec *rqtp, struct timespec __user *rmtp)
{
return -EINVAL;
}
| 0
|
488,907
|
static Bool ctxload_process_event(GF_Filter *filter, const GF_FilterEvent *com)
{
u32 count, i;
CTXLoadPriv *priv = gf_filter_get_udta(filter);
//check for scene attach
switch (com->base.type) {
case GF_FEVT_PLAY:
//cancel play event, we work with full file
//TODO: animation stream in BT
priv->is_playing = GF_TRUE;
return GF_TRUE;
case GF_FEVT_ATTACH_SCENE:
break;
case GF_FEVT_RESET_SCENE:
gf_sm_load_done(&priv->load);
if (priv->ctx) gf_sm_del(priv->ctx);
priv->ctx = NULL;
priv->load_flags = 3;
return GF_FALSE;
default:
return GF_FALSE;
}
if (!com->attach_scene.on_pid) return GF_TRUE;
count = gf_filter_get_ipid_count(filter);
for (i=0; i<count; i++) {
GF_FilterPid *ipid = gf_filter_get_ipid(filter, i);
GF_FilterPid *opid = gf_filter_pid_get_udta(ipid);
//we found our pid, set it up
if (opid == com->attach_scene.on_pid) {
if (!priv->scene) {
GF_ObjectManager *odm = com->attach_scene.object_manager;
priv->scene = odm->subscene ? odm->subscene : odm->parentscene;
gf_sg_set_node_callback(priv->scene->graph, CTXLoad_NodeCallback);
priv->service_url = odm->scene_ns->url;
if (!priv->ctx) CTXLoad_Setup(filter, priv);
}
return GF_TRUE;
}
}
return GF_FALSE;
}
| 0
|
389,442
|
void cgit_print_docstart(void)
{
if (ctx.cfg.embedded) {
if (ctx.cfg.header)
html_include(ctx.cfg.header);
return;
}
char *host = cgit_hosturl();
html(cgit_doctype);
html("<html xmlns='http://www.w3.org/1999/xhtml' xml:lang='en' lang='en'>\n");
html("<head>\n");
html("<title>");
html_txt(ctx.page.title);
html("</title>\n");
htmlf("<meta name='generator' content='cgit %s'/>\n", cgit_version);
if (ctx.cfg.robots && *ctx.cfg.robots)
htmlf("<meta name='robots' content='%s'/>\n", ctx.cfg.robots);
html("<link rel='stylesheet' type='text/css' href='");
html_attr(ctx.cfg.css);
html("'/>\n");
if (ctx.cfg.favicon) {
html("<link rel='shortcut icon' href='");
html_attr(ctx.cfg.favicon);
html("'/>\n");
}
if (host && ctx.repo && ctx.qry.head) {
char *fileurl;
struct strbuf sb = STRBUF_INIT;
strbuf_addf(&sb, "h=%s", ctx.qry.head);
html("<link rel='alternate' title='Atom feed' href='");
html(cgit_httpscheme());
html_attr(host);
fileurl = cgit_fileurl(ctx.repo->url, "atom", ctx.qry.vpath,
sb.buf);
html_attr(fileurl);
html("' type='application/atom+xml'/>\n");
strbuf_release(&sb);
free(fileurl);
}
if (ctx.repo)
cgit_add_clone_urls(print_rel_vcs_link);
if (ctx.cfg.head_include)
html_include(ctx.cfg.head_include);
html("</head>\n");
html("<body>\n");
if (ctx.cfg.header)
html_include(ctx.cfg.header);
free(host);
}
| 0
|
28,737
|
static void e1000e_set_eitr ( E1000ECore * core , int index , uint32_t val ) {
uint32_t interval = val & 0xffff ;
uint32_t eitr_num = index - EITR ;
trace_e1000e_irq_eitr_set ( eitr_num , val ) ;
core -> eitr_guest_value [ eitr_num ] = interval ;
core -> mac [ index ] = MAX ( interval , E1000E_MIN_XITR ) ;
}
| 0
|
32,415
|
create_schema(MYSQL *mysql, const char *db, statement *stmt,
option_string *engine_stmt)
{
char query[HUGE_STRING_LENGTH];
statement *ptr;
statement *after_create;
int len;
ulonglong count;
DBUG_ENTER("create_schema");
len= snprintf(query, HUGE_STRING_LENGTH, "CREATE SCHEMA `%s`", db);
if (verbose >= 2)
printf("Loading Pre-data\n");
if (run_query(mysql, query, len))
{
fprintf(stderr,"%s: Cannot create schema %s : %s\n", my_progname, db,
mysql_error(mysql));
exit(1);
}
if (opt_only_print)
{
printf("use %s;\n", db);
}
else
{
if (verbose >= 3)
printf("%s;\n", query);
if (mysql_select_db(mysql, db))
{
fprintf(stderr,"%s: Cannot select schema '%s': %s\n",my_progname, db,
mysql_error(mysql));
exit(1);
}
}
if (engine_stmt)
{
len= snprintf(query, HUGE_STRING_LENGTH, "set storage_engine=`%s`",
engine_stmt->string);
if (run_query(mysql, query, len))
{
fprintf(stderr,"%s: Cannot set default engine: %s\n", my_progname,
mysql_error(mysql));
exit(1);
}
}
count= 0;
after_create= stmt;
limit_not_met:
for (ptr= after_create; ptr && ptr->length; ptr= ptr->next, count++)
{
if (auto_generate_sql && ( auto_generate_sql_number == count))
break;
if (engine_stmt && engine_stmt->option && ptr->type == CREATE_TABLE_TYPE)
{
char buffer[HUGE_STRING_LENGTH];
snprintf(buffer, HUGE_STRING_LENGTH, "%s %s", ptr->string,
engine_stmt->option);
if (run_query(mysql, buffer, strlen(buffer)))
{
fprintf(stderr,"%s: Cannot run query %.*s ERROR : %s\n",
my_progname, (uint)ptr->length, ptr->string, mysql_error(mysql));
exit(1);
}
}
else
{
if (run_query(mysql, ptr->string, ptr->length))
{
fprintf(stderr,"%s: Cannot run query %.*s ERROR : %s\n",
my_progname, (uint)ptr->length, ptr->string, mysql_error(mysql));
exit(1);
}
}
}
if (auto_generate_sql && (auto_generate_sql_number > count ))
{
/* Special case for auto create, we don't want to create tables twice */
after_create= stmt->next;
goto limit_not_met;
}
DBUG_RETURN(0);
}
| 0
|
488,263
|
njs_object_prop_define(njs_vm_t *vm, njs_value_t *object,
njs_value_t *name, njs_value_t *value, njs_object_prop_define_t type)
{
uint32_t length;
njs_int_t ret;
njs_array_t *array;
njs_object_prop_t *prop, *prev;
njs_property_query_t pq;
static const njs_str_t length_key = njs_str("length");
if (njs_slow_path(!njs_is_key(name))) {
ret = njs_value_to_key(vm, name, name);
if (njs_slow_path(ret != NJS_OK)) {
return ret;
}
}
again:
njs_property_query_init(&pq, NJS_PROPERTY_QUERY_SET, 1);
ret = njs_property_query(vm, &pq, object, name);
if (njs_slow_path(ret == NJS_ERROR)) {
return ret;
}
prop = njs_object_prop_alloc(vm, name, &njs_value_invalid,
NJS_ATTRIBUTE_UNSET);
if (njs_slow_path(prop == NULL)) {
return NJS_ERROR;
}
switch (type) {
case NJS_OBJECT_PROP_DESCRIPTOR:
if (njs_descriptor_prop(vm, prop, value) != NJS_OK) {
return NJS_ERROR;
}
break;
case NJS_OBJECT_PROP_GETTER:
prop->getter = *value;
njs_set_invalid(&prop->setter);
prop->enumerable = NJS_ATTRIBUTE_TRUE;
prop->configurable = NJS_ATTRIBUTE_TRUE;
break;
case NJS_OBJECT_PROP_SETTER:
prop->setter = *value;
njs_set_invalid(&prop->getter);
prop->enumerable = NJS_ATTRIBUTE_TRUE;
prop->configurable = NJS_ATTRIBUTE_TRUE;
break;
}
if (njs_fast_path(ret == NJS_DECLINED)) {
set_prop:
if (!njs_object(object)->extensible) {
njs_key_string_get(vm, &pq.key, &pq.lhq.key);
njs_type_error(vm, "Cannot add property \"%V\", "
"object is not extensible", &pq.lhq.key);
return NJS_ERROR;
}
if (njs_slow_path(njs_is_typed_array(object)
&& njs_is_string(name)))
{
/* Integer-Indexed Exotic Objects [[DefineOwnProperty]]. */
if (!isnan(njs_string_to_index(name))) {
njs_type_error(vm, "Invalid typed array index");
return NJS_ERROR;
}
}
/* 6.2.5.6 CompletePropertyDescriptor */
if (njs_is_accessor_descriptor(prop)) {
if (!njs_is_valid(&prop->getter)) {
njs_set_undefined(&prop->getter);
}
if (!njs_is_valid(&prop->setter)) {
njs_set_undefined(&prop->setter);
}
} else {
if (prop->writable == NJS_ATTRIBUTE_UNSET) {
prop->writable = 0;
}
if (!njs_is_valid(&prop->value)) {
njs_set_undefined(&prop->value);
}
}
if (prop->enumerable == NJS_ATTRIBUTE_UNSET) {
prop->enumerable = 0;
}
if (prop->configurable == NJS_ATTRIBUTE_UNSET) {
prop->configurable = 0;
}
if (njs_slow_path(pq.lhq.value != NULL)) {
prev = pq.lhq.value;
if (njs_slow_path(prev->type == NJS_WHITEOUT)) {
/* Previously deleted property. */
*prev = *prop;
}
} else {
pq.lhq.value = prop;
pq.lhq.replace = 0;
pq.lhq.pool = vm->mem_pool;
ret = njs_lvlhsh_insert(njs_object_hash(object), &pq.lhq);
if (njs_slow_path(ret != NJS_OK)) {
njs_internal_error(vm, "lvlhsh insert failed");
return NJS_ERROR;
}
}
return NJS_OK;
}
/* Updating existing prop. */
prev = pq.lhq.value;
switch (prev->type) {
case NJS_PROPERTY:
case NJS_PROPERTY_HANDLER:
break;
case NJS_PROPERTY_REF:
if (njs_is_accessor_descriptor(prop)
|| prop->configurable == NJS_ATTRIBUTE_FALSE
|| prop->enumerable == NJS_ATTRIBUTE_FALSE
|| prop->writable == NJS_ATTRIBUTE_FALSE)
{
array = njs_array(object);
length = array->length;
ret = njs_array_convert_to_slow_array(vm, array);
if (njs_slow_path(ret != NJS_OK)) {
return ret;
}
ret = njs_array_length_redefine(vm, object, length);
if (njs_slow_path(ret != NJS_OK)) {
return ret;
}
goto again;
}
if (njs_is_valid(&prop->value)) {
*prev->value.data.u.value = prop->value;
} else {
njs_set_undefined(prev->value.data.u.value);
}
return NJS_OK;
case NJS_PROPERTY_TYPED_ARRAY_REF:
if (njs_is_accessor_descriptor(prop)) {
goto exception;
}
if (prop->configurable == NJS_ATTRIBUTE_TRUE ||
prop->enumerable == NJS_ATTRIBUTE_FALSE ||
prop->writable == NJS_ATTRIBUTE_FALSE)
{
goto exception;
}
if (njs_is_valid(&prop->value)) {
return njs_typed_array_set_value(vm, njs_typed_array(&prev->value),
prev->value.data.magic32,
&prop->value);
}
return NJS_OK;
default:
njs_internal_error(vm, "unexpected property type \"%s\" "
"while defining property",
njs_prop_type_string(prev->type));
return NJS_ERROR;
}
/* 9.1.6.3 ValidateAndApplyPropertyDescriptor */
if (!prev->configurable) {
if (prop->configurable == NJS_ATTRIBUTE_TRUE) {
goto exception;
}
if (prop->enumerable != NJS_ATTRIBUTE_UNSET
&& prev->enumerable != prop->enumerable)
{
goto exception;
}
}
if (njs_is_generic_descriptor(prop)) {
goto done;
}
if (njs_is_data_descriptor(prev) != njs_is_data_descriptor(prop)) {
if (!prev->configurable) {
goto exception;
}
/*
* 6.b-c Preserve the existing values of the converted property's
* [[Configurable]] and [[Enumerable]] attributes and set the rest of
* the property's attributes to their default values.
*/
if (pq.temp) {
pq.lhq.value = NULL;
prop->configurable = prev->configurable;
prop->enumerable = prev->enumerable;
goto set_prop;
}
prev->type = prop->type;
if (njs_is_data_descriptor(prev)) {
njs_set_undefined(&prev->getter);
njs_set_undefined(&prev->setter);
njs_set_invalid(&prev->value);
prev->writable = NJS_ATTRIBUTE_UNSET;
} else {
njs_set_undefined(&prev->value);
prev->writable = NJS_ATTRIBUTE_FALSE;
njs_set_invalid(&prev->getter);
njs_set_invalid(&prev->setter);
}
} else if (njs_is_data_descriptor(prev)
&& njs_is_data_descriptor(prop))
{
if (!prev->configurable && !prev->writable) {
if (prop->writable == NJS_ATTRIBUTE_TRUE) {
goto exception;
}
if (njs_is_valid(&prop->value)
&& prev->type != NJS_PROPERTY_HANDLER
&& !njs_values_same(&prop->value, &prev->value))
{
goto exception;
}
}
} else {
if (!prev->configurable) {
if (njs_is_valid(&prop->getter)
&& !njs_values_strict_equal(&prop->getter, &prev->getter))
{
goto exception;
}
if (njs_is_valid(&prop->setter)
&& !njs_values_strict_equal(&prop->setter, &prev->setter))
{
goto exception;
}
}
}
done:
if (njs_is_valid(&prop->value)) {
if (prev->type == NJS_PROPERTY_HANDLER) {
if (prev->writable) {
ret = prev->value.data.u.prop_handler(vm, prev, object,
&prop->value,
&vm->retval);
if (njs_slow_path(ret == NJS_ERROR)) {
return ret;
}
if (ret == NJS_DECLINED) {
pq.lhq.value = NULL;
goto set_prop;
}
}
} else {
if (njs_slow_path(pq.lhq.key_hash == NJS_LENGTH_HASH)) {
if (njs_strstr_eq(&pq.lhq.key, &length_key)) {
ret = njs_array_length_set(vm, object, prev, &prop->value);
if (ret != NJS_DECLINED) {
return ret;
}
}
}
prev->value = prop->value;
}
}
/*
* 9. For each field of Desc that is present, set the corresponding
* attribute of the property named P of object O to the value of the field.
*/
if (njs_is_valid(&prop->getter)) {
prev->getter = prop->getter;
}
if (njs_is_valid(&prop->setter)) {
prev->setter = prop->setter;
}
if (prop->writable != NJS_ATTRIBUTE_UNSET) {
prev->writable = prop->writable;
}
if (prop->enumerable != NJS_ATTRIBUTE_UNSET) {
prev->enumerable = prop->enumerable;
}
if (prop->configurable != NJS_ATTRIBUTE_UNSET) {
prev->configurable = prop->configurable;
}
return NJS_OK;
exception:
njs_key_string_get(vm, &pq.key, &pq.lhq.key);
njs_type_error(vm, "Cannot redefine property: \"%V\"", &pq.lhq.key);
return NJS_ERROR;
}
| 0
|
165,005
|
AcpiNsExecModuleCode (
ACPI_OPERAND_OBJECT *MethodObj,
ACPI_EVALUATE_INFO *Info)
{
ACPI_OPERAND_OBJECT *ParentObj;
ACPI_NAMESPACE_NODE *ParentNode;
ACPI_OBJECT_TYPE Type;
ACPI_STATUS Status;
ACPI_FUNCTION_TRACE (NsExecModuleCode);
/*
* Get the parent node. We cheat by using the NextObject field
* of the method object descriptor.
*/
ParentNode = ACPI_CAST_PTR (
ACPI_NAMESPACE_NODE, MethodObj->Method.NextObject);
Type = AcpiNsGetType (ParentNode);
/*
* Get the region handler and save it in the method object. We may need
* this if an operation region declaration causes a _REG method to be run.
*
* We can't do this in AcpiPsLinkModuleCode because
* AcpiGbl_RootNode->Object is NULL at PASS1.
*/
if ((Type == ACPI_TYPE_DEVICE) && ParentNode->Object)
{
MethodObj->Method.Dispatch.Handler =
ParentNode->Object->Device.Handler;
}
/* Must clear NextObject (AcpiNsAttachObject needs the field) */
MethodObj->Method.NextObject = NULL;
/* Initialize the evaluation information block */
memset (Info, 0, sizeof (ACPI_EVALUATE_INFO));
Info->PrefixNode = ParentNode;
/*
* Get the currently attached parent object. Add a reference,
* because the ref count will be decreased when the method object
* is installed to the parent node.
*/
ParentObj = AcpiNsGetAttachedObject (ParentNode);
if (ParentObj)
{
AcpiUtAddReference (ParentObj);
}
/* Install the method (module-level code) in the parent node */
Status = AcpiNsAttachObject (ParentNode, MethodObj, ACPI_TYPE_METHOD);
if (ACPI_FAILURE (Status))
{
goto Exit;
}
/* Execute the parent node as a control method */
Status = AcpiNsEvaluate (Info);
ACPI_DEBUG_PRINT ((ACPI_DB_INIT_NAMES,
"Executed module-level code at %p\n",
MethodObj->Method.AmlStart));
/* Delete a possible implicit return value (in slack mode) */
if (Info->ReturnObject)
{
AcpiUtRemoveReference (Info->ReturnObject);
}
/* Detach the temporary method object */
AcpiNsDetachObject (ParentNode);
/* Restore the original parent object */
if (ParentObj)
{
Status = AcpiNsAttachObject (ParentNode, ParentObj, Type);
}
else
{
ParentNode->Type = (UINT8) Type;
}
Exit:
if (ParentObj)
{
AcpiUtRemoveReference (ParentObj);
}
return_VOID;
}
| 0
|
94,624
|
static int usb_internal_control_msg(struct usb_device *usb_dev,
unsigned int pipe,
struct usb_ctrlrequest *cmd,
void *data, int len, int timeout)
{
struct urb *urb;
int retv;
int length;
urb = usb_alloc_urb(0, GFP_NOIO);
if (!urb)
return -ENOMEM;
usb_fill_control_urb(urb, usb_dev, pipe, (unsigned char *)cmd, data,
len, usb_api_blocking_completion, NULL);
retv = usb_start_wait_urb(urb, timeout, &length);
if (retv < 0)
return retv;
else
return length;
}
| 0
|
100,786
|
svcauth_gss_set_svc_name(gss_name_t name)
{
OM_uint32 maj_stat, min_stat;
log_debug("in svcauth_gss_set_svc_name()");
if (svcauth_gss_name != NULL) {
maj_stat = gss_release_name(&min_stat, &svcauth_gss_name);
if (maj_stat != GSS_S_COMPLETE) {
log_status("gss_release_name", maj_stat, min_stat);
return (FALSE);
}
svcauth_gss_name = NULL;
}
if (svcauth_gss_name == GSS_C_NO_NAME)
return (TRUE);
maj_stat = gss_duplicate_name(&min_stat, name, &svcauth_gss_name);
if (maj_stat != GSS_S_COMPLETE) {
log_status("gss_duplicate_name", maj_stat, min_stat);
return (FALSE);
}
return (TRUE);
}
| 0
|
377,733
|
static void audit_buffer_free(struct audit_buffer *ab)
{
unsigned long flags;
if (!ab)
return;
if (ab->skb)
kfree_skb(ab->skb);
spin_lock_irqsave(&audit_freelist_lock, flags);
if (audit_freelist_count > AUDIT_MAXFREE)
kfree(ab);
else {
audit_freelist_count++;
list_add(&ab->list, &audit_freelist);
}
spin_unlock_irqrestore(&audit_freelist_lock, flags);
}
| 0
|
237,378
|
static bool ExecuteMoveUpAndModifySelection(LocalFrame& frame,
Event*,
EditorCommandSource,
const String&) {
frame.Selection().Modify(SelectionModifyAlteration::kExtend,
SelectionModifyDirection::kBackward,
TextGranularity::kLine, SetSelectionBy::kUser);
return true;
}
| 0
|
96,204
|
bool isSpace(char ch) const {
return ch == ' ' || ch == '\n' || ch == '\t' || ch == '\f';
}
| 0
|
121,796
|
static pyc_object *get_int_object(RBuffer *buffer) {
bool error = false;
pyc_object *ret = NULL;
st32 i = get_st32 (buffer, &error);
if (error) {
return NULL;
}
ret = R_NEW0 (pyc_object);
if (!ret) {
return NULL;
}
ret->type = TYPE_INT;
ret->data = r_str_newf ("%d", i);
if (!ret->data) {
R_FREE (ret);
}
return ret;
}
| 0
|
259,309
|
void mg_mgr_free(struct mg_mgr *mgr) {
struct mg_connection *c;
for (c = mgr->conns; c != NULL; c = c->next) c->is_closing = 1;
mg_mgr_poll(mgr, 0);
#if MG_ARCH == MG_ARCH_FREERTOS_TCP
FreeRTOS_DeleteSocketSet(mgr->ss);
#endif
LOG(LL_INFO, ("All connections closed"));
}
| 0
|
325,228
|
static int bdrv_inherited_flags(int flags)
{
/* Enable protocol handling, disable format probing for bs->file */
flags |= BDRV_O_PROTOCOL;
/* Our block drivers take care to send flushes and respect unmap policy,
* so we can enable both unconditionally on lower layers. */
flags |= BDRV_O_CACHE_WB | BDRV_O_UNMAP;
/* The backing file of a temporary snapshot is read-only */
if (flags & BDRV_O_SNAPSHOT) {
flags &= ~BDRV_O_RDWR;
}
/* Clear flags that only apply to the top layer */
flags &= ~(BDRV_O_SNAPSHOT | BDRV_O_NO_BACKING);
return flags;
}
| 0
|
29,657
|
void config_nic_rules ( config_tree * ptree ) {
nic_rule_node * curr_node ;
sockaddr_u addr ;
nic_rule_match match_type ;
nic_rule_action action ;
char * if_name ;
char * pchSlash ;
int prefixlen ;
int addrbits ;
curr_node = HEAD_PFIFO ( ptree -> nic_rules ) ;
if ( curr_node != NULL && ( HAVE_OPT ( NOVIRTUALIPS ) || HAVE_OPT ( INTERFACE ) ) ) {
msyslog ( LOG_ERR , "interfaceic rules are not allowed with --interface (-I) or --novirtualips (-L)%s" , ( input_from_file ) ? ", exiting" : "" ) ;
if ( input_from_file ) exit ( 1 ) ;
else return ;
}
for ( ;
curr_node != NULL ;
curr_node = curr_node -> link ) {
prefixlen = - 1 ;
if_name = curr_node -> if_name ;
if ( if_name != NULL ) if_name = estrdup ( if_name ) ;
switch ( curr_node -> match_class ) {
default : match_type = MATCH_ALL ;
NTP_INSIST ( 0 ) ;
break ;
case 0 : NTP_INSIST ( if_name != NULL ) ;
pchSlash = strchr ( if_name , '/' ) ;
if ( pchSlash != NULL ) * pchSlash = '\0' ;
if ( is_ip_address ( if_name , AF_UNSPEC , & addr ) ) {
match_type = MATCH_IFADDR ;
if ( pchSlash != NULL ) {
sscanf ( pchSlash + 1 , "%d" , & prefixlen ) ;
addrbits = 8 * SIZEOF_INADDR ( AF ( & addr ) ) ;
prefixlen = max ( - 1 , prefixlen ) ;
prefixlen = min ( prefixlen , addrbits ) ;
}
}
else {
match_type = MATCH_IFNAME ;
if ( pchSlash != NULL ) * pchSlash = '/' ;
}
break ;
case T_All : match_type = MATCH_ALL ;
break ;
case T_Ipv4 : match_type = MATCH_IPV4 ;
break ;
case T_Ipv6 : match_type = MATCH_IPV6 ;
break ;
case T_Wildcard : match_type = MATCH_WILDCARD ;
break ;
}
switch ( curr_node -> action ) {
default : action = ACTION_LISTEN ;
NTP_INSIST ( 0 ) ;
break ;
case T_Listen : action = ACTION_LISTEN ;
break ;
case T_Ignore : action = ACTION_IGNORE ;
break ;
case T_Drop : action = ACTION_DROP ;
break ;
}
add_nic_rule ( match_type , if_name , prefixlen , action ) ;
timer_interfacetimeout ( current_time + 2 ) ;
if ( if_name != NULL ) free ( if_name ) ;
}
}
| 0
|
115,213
|
ext4_xattr_create_cache(void)
{
return mb2_cache_create(HASH_BUCKET_BITS);
}
| 0
|
252,124
|
void CL_Disconnect_f( void ) {
SCR_StopCinematic();
Cvar_Set( "savegame_loading", "0" );
Cvar_Set( "g_reloading", "0" );
if ( clc.state != CA_DISCONNECTED && clc.state != CA_CINEMATIC ) {
Com_Error( ERR_DISCONNECT, "Disconnected from server" );
}
}
| 0
|
81,611
|
static void dump_esp_combs(struct sk_buff *skb, const struct xfrm_tmpl *t)
{
struct sadb_prop *p;
int i, k;
p = (struct sadb_prop*)skb_put(skb, sizeof(struct sadb_prop));
p->sadb_prop_len = sizeof(struct sadb_prop)/8;
p->sadb_prop_exttype = SADB_EXT_PROPOSAL;
p->sadb_prop_replay = 32;
memset(p->sadb_prop_reserved, 0, sizeof(p->sadb_prop_reserved));
for (i=0; ; i++) {
const struct xfrm_algo_desc *ealg = xfrm_ealg_get_byidx(i);
if (!ealg)
break;
if (!ealg->pfkey_supported)
continue;
if (!(ealg_tmpl_set(t, ealg) && ealg->available))
continue;
for (k = 1; ; k++) {
struct sadb_comb *c;
const struct xfrm_algo_desc *aalg = xfrm_aalg_get_byidx(k);
if (!aalg)
break;
if (!aalg->pfkey_supported)
continue;
if (!(aalg_tmpl_set(t, aalg) && aalg->available))
continue;
c = (struct sadb_comb*)skb_put(skb, sizeof(struct sadb_comb));
memset(c, 0, sizeof(*c));
p->sadb_prop_len += sizeof(struct sadb_comb)/8;
c->sadb_comb_auth = aalg->desc.sadb_alg_id;
c->sadb_comb_auth_minbits = aalg->desc.sadb_alg_minbits;
c->sadb_comb_auth_maxbits = aalg->desc.sadb_alg_maxbits;
c->sadb_comb_encrypt = ealg->desc.sadb_alg_id;
c->sadb_comb_encrypt_minbits = ealg->desc.sadb_alg_minbits;
c->sadb_comb_encrypt_maxbits = ealg->desc.sadb_alg_maxbits;
c->sadb_comb_hard_addtime = 24*60*60;
c->sadb_comb_soft_addtime = 20*60*60;
c->sadb_comb_hard_usetime = 8*60*60;
c->sadb_comb_soft_usetime = 7*60*60;
}
}
}
| 0
|
47,196
|
rfbBool TextChatOpen(rfbClient* client)
{
rfbTextChatMsg chat;
if (!SupportsClient2Server(client, rfbTextChat)) return TRUE;
chat.type = rfbTextChat;
chat.pad1 = 0;
chat.pad2 = 0;
chat.length = rfbClientSwap32IfLE(rfbTextChatOpen);
return (WriteToRFBServer(client, (char *)&chat, sz_rfbTextChatMsg) ? TRUE : FALSE);
}
| 0
|
152,146
|
static int ipgre_tunnel_validate(struct nlattr *tb[], struct nlattr *data[])
{
__be16 flags;
if (!data)
return 0;
flags = 0;
if (data[IFLA_GRE_IFLAGS])
flags |= nla_get_be16(data[IFLA_GRE_IFLAGS]);
if (data[IFLA_GRE_OFLAGS])
flags |= nla_get_be16(data[IFLA_GRE_OFLAGS]);
if (flags & (GRE_VERSION|GRE_ROUTING))
return -EINVAL;
return 0;
}
| 0
|
488,359
|
static void mld_send_initial_cr(struct inet6_dev *idev)
{
struct sk_buff *skb;
struct ifmcaddr6 *pmc;
int type;
if (mld_in_v1_mode(idev))
return;
skb = NULL;
for_each_mc_mclock(idev, pmc) {
if (pmc->mca_sfcount[MCAST_EXCLUDE])
type = MLD2_CHANGE_TO_EXCLUDE;
else
type = MLD2_ALLOW_NEW_SOURCES;
skb = add_grec(skb, pmc, type, 0, 0, 1);
}
if (skb)
mld_sendpack(skb);
}
| 0
|
101,187
|
static int http_buf_read(URLContext *h, uint8_t *buf, int size)
{
HTTPContext *s = h->priv_data;
int len;
/* read bytes from input buffer first */
len = s->buf_end - s->buf_ptr;
if (len > 0) {
if (len > size)
len = size;
memcpy(buf, s->buf_ptr, len);
s->buf_ptr += len;
} else {
uint64_t target_end = s->end_off ? s->end_off : s->filesize;
if ((!s->willclose || s->chunksize == UINT64_MAX) && s->off >= target_end)
return AVERROR_EOF;
len = ffurl_read(s->hd, buf, size);
if (!len && (!s->willclose || s->chunksize == UINT64_MAX) && s->off < target_end) {
av_log(h, AV_LOG_ERROR,
"Stream ends prematurely at %"PRIu64", should be %"PRIu64"\n",
s->off, target_end
);
return AVERROR(EIO);
}
}
if (len > 0) {
s->off += len;
if (s->chunksize > 0)
s->chunksize -= len;
}
return len;
}
| 0
|
226,549
|
status_t OMXNodeInstance::storeMetaDataInBuffers(
OMX_U32 portIndex, OMX_BOOL enable, MetadataBufferType *type) {
Mutex::Autolock autolock(mLock);
CLOG_CONFIG(storeMetaDataInBuffers, "%s:%u en:%d", portString(portIndex), portIndex, enable);
return storeMetaDataInBuffers_l(portIndex, enable, type);
}
| 0
|
184,776
|
void BrowserWindowGtk::UpdateDevToolsSplitPosition() {
if (!window_has_shown_)
return;
GtkAllocation contents_rect;
gtk_widget_get_allocation(contents_vsplit_, &contents_rect);
if (devtools_window_->dock_side() == DEVTOOLS_DOCK_SIDE_RIGHT) {
int split_offset = contents_rect.width -
devtools_window_->GetWidth(contents_rect.width);
gtk_paned_set_position(GTK_PANED(contents_hsplit_), split_offset);
} else {
int split_offset = contents_rect.height -
devtools_window_->GetHeight(contents_rect.height);
gtk_paned_set_position(GTK_PANED(contents_vsplit_), split_offset);
}
}
| 0
|
350,242
|
static bool io_uring_cancel_files(struct io_ring_ctx *ctx,
struct files_struct *files)
{
if (list_empty_careful(&ctx->inflight_list))
return false;
io_cancel_defer_files(ctx, files);
/* cancel all at once, should be faster than doing it one by one*/
io_wq_cancel_cb(ctx->io_wq, io_wq_files_match, files, true);
while (!list_empty_careful(&ctx->inflight_list)) {
struct io_kiocb *cancel_req = NULL, *req;
DEFINE_WAIT(wait);
spin_lock_irq(&ctx->inflight_lock);
list_for_each_entry(req, &ctx->inflight_list, inflight_entry) {
if (req->work.files != files)
continue;
/* req is being completed, ignore */
if (!refcount_inc_not_zero(&req->refs))
continue;
cancel_req = req;
break;
}
if (cancel_req)
prepare_to_wait(&ctx->inflight_wait, &wait,
TASK_UNINTERRUPTIBLE);
spin_unlock_irq(&ctx->inflight_lock);
/* We need to keep going until we don't find a matching req */
if (!cancel_req)
break;
/* cancel this request, or head link requests */
io_attempt_cancel(ctx, cancel_req);
io_put_req(cancel_req);
/* cancellations _may_ trigger task work */
io_run_task_work();
schedule();
finish_wait(&ctx->inflight_wait, &wait);
}
return true;
}
| 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.