idx
int64 | func
string | target
int64 |
|---|---|---|
168,939
|
static void float32ArrayMethodMethod(const v8::FunctionCallbackInfo<v8::Value>& info)
{
TestObjectPython* imp = V8TestObjectPython::toNative(info.Holder());
v8SetReturnValue(info, imp->float32ArrayMethod());
}
| 0
|
230,011
|
void DelegatedFrameHost::DidNotProduceFrame(const viz::BeginFrameAck& ack) {
DCHECK(!ack.has_damage);
support_->DidNotProduceFrame(ack);
}
| 0
|
410,635
|
static NETJOIN_REC *netjoin_find(IRC_SERVER_REC *server, const char *nick)
{
NETJOIN_SERVER_REC *srec;
GSList *tmp;
g_return_val_if_fail(server != NULL, NULL);
g_return_val_if_fail(nick != NULL, NULL);
srec = netjoin_find_server(server);
if (srec == NULL) return NULL;
for (tmp = srec->netjoins; tmp != NULL; tmp = tmp->next) {
NETJOIN_REC *rec = tmp->data;
if (g_ascii_strcasecmp(rec->nick, nick) == 0)
return rec;
}
return NULL;
}
| 0
|
75,817
|
R_API RList *r_bin_get_mem(RBin *bin) {
RBinObject *o = r_bin_cur_object (bin);
return o? o->mem: NULL;
}
| 0
|
100,484
|
mariadb_field_attr(MARIADB_CONST_STRING *attr,
const MYSQL_FIELD *field,
enum mariadb_field_attr_t type)
{
MA_FIELD_EXTENSION *ext= (MA_FIELD_EXTENSION*) field->extension;
if (!ext || type > MARIADB_FIELD_ATTR_LAST)
{
*attr= null_const_string;
return 1;
}
*attr= ext->metadata[type];
return 0;
}
| 0
|
216,898
|
void LoginDisplayHostWebUI::OnUserSwitchAnimationFinished() {
ShutdownDisplayHost();
}
| 0
|
320,315
|
static gboolean nbd_negotiate_continue(QIOChannel *ioc,
GIOCondition condition,
void *opaque)
{
qemu_coroutine_enter(opaque, NULL);
return TRUE;
}
| 1
|
111,392
|
static avifBool avifParseItemPropertiesBox(avifMeta * meta, const uint8_t * raw, size_t rawLen)
{
BEGIN_STREAM(s, raw, rawLen);
avifBoxHeader ipcoHeader;
CHECK(avifROStreamReadBoxHeader(&s, &ipcoHeader));
if (memcmp(ipcoHeader.type, "ipco", 4) != 0) {
return AVIF_FALSE;
}
// Read all item properties inside of ItemPropertyContainerBox
CHECK(avifParseItemPropertyContainerBox(&meta->properties, avifROStreamCurrent(&s), ipcoHeader.size));
CHECK(avifROStreamSkip(&s, ipcoHeader.size));
// Now read all ItemPropertyAssociation until the end of the box, and make associations
while (avifROStreamHasBytesLeft(&s, 1)) {
avifBoxHeader ipmaHeader;
CHECK(avifROStreamReadBoxHeader(&s, &ipmaHeader));
if (!memcmp(ipmaHeader.type, "ipma", 4)) {
CHECK(avifParseItemPropertyAssociation(meta, avifROStreamCurrent(&s), ipmaHeader.size));
} else {
// These must all be type ipma
return AVIF_FALSE;
}
CHECK(avifROStreamSkip(&s, ipmaHeader.size));
}
return AVIF_TRUE;
}
| 0
|
371,013
|
guestfs___check_linux_root (guestfs_h *g, struct inspect_fs *fs)
{
int r;
fs->type = OS_TYPE_LINUX;
if (guestfs_exists (g, "/etc/lsb-release") > 0) {
r = parse_lsb_release (g, fs);
if (r == -1) /* error */
return -1;
if (r == 1) /* ok - detected the release from this file */
goto skip_release_checks;
}
if (guestfs_exists (g, "/etc/redhat-release") > 0) {
fs->distro = OS_DISTRO_REDHAT_BASED; /* Something generic Red Hat-like. */
if (parse_release_file (g, fs, "/etc/redhat-release") == -1)
return -1;
char *major, *minor;
if ((major = match1 (g, fs->product_name, re_fedora)) != NULL) {
fs->distro = OS_DISTRO_FEDORA;
fs->major_version = guestfs___parse_unsigned_int (g, major);
free (major);
if (fs->major_version == -1)
return -1;
}
else if (match2 (g, fs->product_name, re_rhel_old, &major, &minor) ||
match2 (g, fs->product_name, re_rhel, &major, &minor)) {
fs->distro = OS_DISTRO_RHEL;
fs->major_version = guestfs___parse_unsigned_int (g, major);
free (major);
if (fs->major_version == -1) {
free (minor);
return -1;
}
fs->minor_version = guestfs___parse_unsigned_int (g, minor);
free (minor);
if (fs->minor_version == -1)
return -1;
}
else if ((major = match1 (g, fs->product_name, re_rhel_no_minor)) != NULL) {
fs->distro = OS_DISTRO_RHEL;
fs->major_version = guestfs___parse_unsigned_int (g, major);
free (major);
if (fs->major_version == -1)
return -1;
fs->minor_version = 0;
}
else if (match2 (g, fs->product_name, re_centos_old, &major, &minor) ||
match2 (g, fs->product_name, re_centos, &major, &minor)) {
fs->distro = OS_DISTRO_CENTOS;
fs->major_version = guestfs___parse_unsigned_int (g, major);
free (major);
if (fs->major_version == -1) {
free (minor);
return -1;
}
fs->minor_version = guestfs___parse_unsigned_int (g, minor);
free (minor);
if (fs->minor_version == -1)
return -1;
}
else if ((major = match1 (g, fs->product_name, re_centos_no_minor)) != NULL) {
fs->distro = OS_DISTRO_CENTOS;
fs->major_version = guestfs___parse_unsigned_int (g, major);
free (major);
if (fs->major_version == -1)
return -1;
fs->minor_version = 0;
}
else if (match2 (g, fs->product_name, re_scientific_linux_old, &major, &minor) ||
match2 (g, fs->product_name, re_scientific_linux, &major, &minor)) {
fs->distro = OS_DISTRO_SCIENTIFIC_LINUX;
fs->major_version = guestfs___parse_unsigned_int (g, major);
free (major);
if (fs->major_version == -1) {
free (minor);
return -1;
}
fs->minor_version = guestfs___parse_unsigned_int (g, minor);
free (minor);
if (fs->minor_version == -1)
return -1;
}
else if ((major = match1 (g, fs->product_name, re_scientific_linux_no_minor)) != NULL) {
fs->distro = OS_DISTRO_SCIENTIFIC_LINUX;
fs->major_version = guestfs___parse_unsigned_int (g, major);
free (major);
if (fs->major_version == -1)
return -1;
fs->minor_version = 0;
}
}
else if (guestfs_exists (g, "/etc/debian_version") > 0) {
fs->distro = OS_DISTRO_DEBIAN;
if (parse_release_file (g, fs, "/etc/debian_version") == -1)
return -1;
if (guestfs___parse_major_minor (g, fs) == -1)
return -1;
}
else if (guestfs_exists (g, "/etc/pardus-release") > 0) {
fs->distro = OS_DISTRO_PARDUS;
if (parse_release_file (g, fs, "/etc/pardus-release") == -1)
return -1;
if (guestfs___parse_major_minor (g, fs) == -1)
return -1;
}
else if (guestfs_exists (g, "/etc/arch-release") > 0) {
fs->distro = OS_DISTRO_ARCHLINUX;
/* /etc/arch-release file is empty and I can't see a way to
* determine the actual release or product string.
*/
}
else if (guestfs_exists (g, "/etc/gentoo-release") > 0) {
fs->distro = OS_DISTRO_GENTOO;
if (parse_release_file (g, fs, "/etc/gentoo-release") == -1)
return -1;
if (guestfs___parse_major_minor (g, fs) == -1)
return -1;
}
else if (guestfs_exists (g, "/etc/meego-release") > 0) {
fs->distro = OS_DISTRO_MEEGO;
if (parse_release_file (g, fs, "/etc/meego-release") == -1)
return -1;
if (guestfs___parse_major_minor (g, fs) == -1)
return -1;
}
else if (guestfs_exists (g, "/etc/slackware-version") > 0) {
fs->distro = OS_DISTRO_SLACKWARE;
if (parse_release_file (g, fs, "/etc/slackware-version") == -1)
return -1;
if (guestfs___parse_major_minor (g, fs) == -1)
return -1;
}
else if (guestfs_exists (g, "/etc/ttylinux-target") > 0) {
fs->distro = OS_DISTRO_TTYLINUX;
if (parse_release_file (g, fs, "/etc/ttylinux-target") == -1)
return -1;
if (guestfs___parse_major_minor (g, fs) == -1)
return -1;
}
else if (guestfs_exists (g, "/etc/SuSE-release") > 0) {
fs->distro = OS_DISTRO_SUSE_BASED;
if (parse_suse_release (g, fs, "/etc/SuSE-release") == -1)
return -1;
}
/* Buildroot (http://buildroot.net) is an embedded Linux distro
* toolkit. It is used by specific distros such as Cirros.
*/
else if (guestfs_exists (g, "/etc/br-version") > 0) {
if (guestfs_exists (g, "/usr/share/cirros/logo") > 0)
fs->distro = OS_DISTRO_CIRROS;
else
fs->distro = OS_DISTRO_BUILDROOT;
/* /etc/br-version has the format YYYY.MM[-git/hg/svn release] */
if (parse_release_file (g, fs, "/etc/br-version") == -1)
return -1;
if (guestfs___parse_major_minor (g, fs) == -1)
return -1;
}
skip_release_checks:;
/* Determine the architecture. */
check_architecture (g, fs);
/* We already know /etc/fstab exists because it's part of the test
* for Linux root above. We must now parse this file to determine
* which filesystems are used by the operating system and how they
* are mounted.
*/
const char *configfiles[] = { "/etc/fstab", "/etc/mdadm.conf", NULL };
if (inspect_with_augeas (g, fs, configfiles, check_fstab) == -1)
return -1;
/* Determine hostname. */
if (check_hostname_unix (g, fs) == -1)
return -1;
return 0;
}
| 0
|
432,152
|
static void line6_unlink_audio_urbs(struct snd_line6_pcm *line6pcm,
struct line6_pcm_stream *pcms)
{
int i;
for (i = 0; i < line6pcm->line6->iso_buffers; i++) {
if (test_bit(i, &pcms->active_urbs)) {
if (!test_and_set_bit(i, &pcms->unlink_urbs))
usb_unlink_urb(pcms->urbs[i]);
}
}
}
| 0
|
352,150
|
ecma_date_to_string_format (ecma_number_t datetime_number, /**< datetime */
const char *format_p) /**< format buffer */
{
const uint32_t date_buffer_length = 37;
JERRY_VLA (lit_utf8_byte_t, date_buffer, date_buffer_length);
lit_utf8_byte_t *dest_p = date_buffer;
while (*format_p != LIT_CHAR_NULL)
{
if (*format_p != LIT_CHAR_DOLLAR_SIGN)
{
*dest_p++ = (lit_utf8_byte_t) *format_p++;
continue;
}
format_p++;
const char *str_p = NULL;
int32_t number = 0;
int32_t number_length = 0;
switch (*format_p)
{
case LIT_CHAR_UPPERCASE_Y: /* Year. */
{
number = ecma_date_year_from_time (datetime_number);
if (number >= 100000 || number <= -100000)
{
number_length = 6;
}
else if (number >= 10000 || number <= -10000)
{
number_length = 5;
}
else
{
number_length = 4;
}
break;
}
case LIT_CHAR_LOWERCASE_Y: /* ISO Year: -000001, 0000, 0001, 9999, +012345 */
{
number = ecma_date_year_from_time (datetime_number);
if (0 <= number && number <= 9999)
{
number_length = 4;
}
else
{
number_length = 6;
}
break;
}
case LIT_CHAR_UPPERCASE_M: /* Month. */
{
int32_t month = ecma_date_month_from_time (datetime_number);
JERRY_ASSERT (month >= 0 && month <= 11);
str_p = month_names_p[month];
break;
}
case LIT_CHAR_UPPERCASE_O: /* Month as number. */
{
/* The 'ecma_date_month_from_time' (ECMA 262 v5, 15.9.1.4) returns a
* number from 0 to 11, but we have to print the month from 1 to 12
* for ISO 8601 standard (ECMA 262 v5, 15.9.1.15). */
number = ecma_date_month_from_time (datetime_number) + 1;
number_length = 2;
break;
}
case LIT_CHAR_UPPERCASE_D: /* Day. */
{
number = ecma_date_date_from_time (datetime_number);
number_length = 2;
break;
}
case LIT_CHAR_UPPERCASE_W: /* Day of week. */
{
int32_t day = ecma_date_week_day (datetime_number);
JERRY_ASSERT (day >= 0 && day <= 6);
str_p = day_names_p[day];
break;
}
case LIT_CHAR_LOWERCASE_H: /* Hour. */
{
number = ecma_date_hour_from_time (datetime_number);
number_length = 2;
break;
}
case LIT_CHAR_LOWERCASE_M: /* Minutes. */
{
number = ecma_date_min_from_time (datetime_number);
number_length = 2;
break;
}
case LIT_CHAR_LOWERCASE_S: /* Seconds. */
{
number = ecma_date_sec_from_time (datetime_number);
number_length = 2;
break;
}
case LIT_CHAR_LOWERCASE_I: /* Milliseconds. */
{
number = ecma_date_ms_from_time (datetime_number);
number_length = 3;
break;
}
case LIT_CHAR_LOWERCASE_Z: /* Time zone hours part. */
{
int32_t time_zone = (int32_t) ecma_date_local_time_zone_adjustment (datetime_number);
if (time_zone >= 0)
{
*dest_p++ = LIT_CHAR_PLUS;
}
else
{
*dest_p++ = LIT_CHAR_MINUS;
time_zone = -time_zone;
}
number = time_zone / ECMA_DATE_MS_PER_HOUR;
number_length = 2;
break;
}
default:
{
JERRY_ASSERT (*format_p == LIT_CHAR_UPPERCASE_Z); /* Time zone minutes part. */
int32_t time_zone = (int32_t) ecma_date_local_time_zone_adjustment (datetime_number);
if (time_zone < 0)
{
time_zone = -time_zone;
}
number = (time_zone % ECMA_DATE_MS_PER_HOUR) / ECMA_DATE_MS_PER_MINUTE;
number_length = 2;
break;
}
}
format_p++;
if (str_p != NULL)
{
/* Print string values: month or day name which is always 3 characters */
memcpy (dest_p, str_p, 3);
dest_p += 3;
continue;
}
/* Print right aligned number values. */
JERRY_ASSERT (number_length > 0);
if (number < 0)
{
number = -number;
*dest_p++ = '-';
}
else if (*(format_p - 1) == LIT_CHAR_LOWERCASE_Y && number_length == 6)
{
/* positive sign is compulsory for extended years */
*dest_p++ = '+';
}
dest_p += number_length;
lit_utf8_byte_t *buffer_p = dest_p;
do
{
buffer_p--;
*buffer_p = (lit_utf8_byte_t) ((number % 10) + (int32_t) LIT_CHAR_0);
number /= 10;
}
while (--number_length);
}
JERRY_ASSERT (dest_p <= date_buffer + date_buffer_length);
return ecma_make_string_value (ecma_new_ecma_string_from_utf8 (date_buffer,
(lit_utf8_size_t) (dest_p - date_buffer)));
} /* ecma_date_to_string_format */
| 1
|
241,696
|
bool xmp_has_property(XmpPtr xmp, const char *schema, const char *name)
{
CHECK_PTR(xmp, false);
RESET_ERROR;
bool ret = true;
auto txmp = reinterpret_cast<const SXMPMeta *>(xmp);
try {
ret = txmp->DoesPropertyExist(schema, name);
}
catch (const XMP_Error &e) {
set_error(e);
ret = false;
}
catch (...) {
ret = false;
}
return ret;
}
| 0
|
109,354
|
static int processTLSBlock(struct ndpi_detection_module_struct *ndpi_struct,
struct ndpi_flow_struct *flow) {
struct ndpi_packet_struct *packet = &flow->packet;
switch(packet->payload[0] /* block type */) {
case 0x01: /* Client Hello */
case 0x02: /* Server Hello */
processClientServerHello(ndpi_struct, flow);
flow->l4.tcp.tls.hello_processed = 1;
ndpi_int_tls_add_connection(ndpi_struct, flow, NDPI_PROTOCOL_TLS);
break;
case 0x0b: /* Certificate */
/* Important: populate the tls union fields only after
* ndpi_int_tls_add_connection has been called */
if(flow->l4.tcp.tls.hello_processed) {
processCertificate(ndpi_struct, flow);
flow->l4.tcp.tls.certificate_processed = 1;
}
break;
default:
return(-1);
}
return(0);
}
| 0
|
169,886
|
sc_get_driver(void)
{
struct sc_card_driver *iso_drv;
iso_drv = sc_get_iso7816_driver();
iso_ops = iso_drv->ops;
gpk_ops = *iso_ops;
gpk_ops.match_card = gpk_match_card;
gpk_ops.init = gpk_init;
gpk_ops.finish = gpk_finish;
gpk_ops.select_file = gpk_select_file;
gpk_ops.read_binary = gpk_read_binary;
gpk_ops.write_binary = gpk_write_binary;
gpk_ops.update_binary = gpk_update_binary;
gpk_ops.create_file = gpk_create_file;
/* gpk_ops.check_sw = gpk_check_sw; */
gpk_ops.card_ctl = gpk_card_ctl;
gpk_ops.set_security_env= gpk_set_security_env;
gpk_ops.restore_security_env= gpk_restore_security_env;
gpk_ops.compute_signature= gpk_compute_signature;
gpk_ops.decipher = gpk_decipher;
gpk_ops.pin_cmd = gpk_pin_cmd;
return &gpk_drv;
}
| 0
|
503,736
|
void PacketBypassCallback(Packet *p)
{
/* Don't try to bypass if flow is already out or
* if we have failed to do it once */
int state = SC_ATOMIC_GET(p->flow->flow_state);
if ((state == FLOW_STATE_LOCAL_BYPASSED) ||
(state == FLOW_STATE_CAPTURE_BYPASSED)) {
return;
}
if (p->BypassPacketsFlow && p->BypassPacketsFlow(p)) {
FlowUpdateState(p->flow, FLOW_STATE_CAPTURE_BYPASSED);
} else {
FlowUpdateState(p->flow, FLOW_STATE_LOCAL_BYPASSED);
}
}
| 0
|
439,588
|
__u32 skb_get_hash_perturb(const struct sk_buff *skb,
const siphash_key_t *perturb)
{
struct flow_keys keys;
return ___skb_get_hash(skb, &keys, perturb);
}
| 0
|
53,967
|
int ext4_ext_map_blocks(handle_t *handle, struct inode *inode,
struct ext4_map_blocks *map, int flags)
{
struct ext4_ext_path *path = NULL;
struct ext4_extent newex, *ex, *ex2;
struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
ext4_fsblk_t newblock = 0;
int free_on_err = 0, err = 0, depth, ret;
unsigned int allocated = 0, offset = 0;
unsigned int allocated_clusters = 0;
struct ext4_allocation_request ar;
ext4_io_end_t *io = ext4_inode_aio(inode);
ext4_lblk_t cluster_offset;
int set_unwritten = 0;
bool map_from_cluster = false;
ext_debug("blocks %u/%u requested for inode %lu\n",
map->m_lblk, map->m_len, inode->i_ino);
trace_ext4_ext_map_blocks_enter(inode, map->m_lblk, map->m_len, flags);
/* find extent for this block */
path = ext4_find_extent(inode, map->m_lblk, NULL, 0);
if (IS_ERR(path)) {
err = PTR_ERR(path);
path = NULL;
goto out2;
}
depth = ext_depth(inode);
/*
* consistent leaf must not be empty;
* this situation is possible, though, _during_ tree modification;
* this is why assert can't be put in ext4_find_extent()
*/
if (unlikely(path[depth].p_ext == NULL && depth != 0)) {
EXT4_ERROR_INODE(inode, "bad extent address "
"lblock: %lu, depth: %d pblock %lld",
(unsigned long) map->m_lblk, depth,
path[depth].p_block);
err = -EIO;
goto out2;
}
ex = path[depth].p_ext;
if (ex) {
ext4_lblk_t ee_block = le32_to_cpu(ex->ee_block);
ext4_fsblk_t ee_start = ext4_ext_pblock(ex);
unsigned short ee_len;
/*
* unwritten extents are treated as holes, except that
* we split out initialized portions during a write.
*/
ee_len = ext4_ext_get_actual_len(ex);
trace_ext4_ext_show_extent(inode, ee_block, ee_start, ee_len);
/* if found extent covers block, simply return it */
if (in_range(map->m_lblk, ee_block, ee_len)) {
newblock = map->m_lblk - ee_block + ee_start;
/* number of remaining blocks in the extent */
allocated = ee_len - (map->m_lblk - ee_block);
ext_debug("%u fit into %u:%d -> %llu\n", map->m_lblk,
ee_block, ee_len, newblock);
/*
* If the extent is initialized check whether the
* caller wants to convert it to unwritten.
*/
if ((!ext4_ext_is_unwritten(ex)) &&
(flags & EXT4_GET_BLOCKS_CONVERT_UNWRITTEN)) {
allocated = convert_initialized_extent(
handle, inode, map, &path,
flags, allocated, newblock);
goto out2;
} else if (!ext4_ext_is_unwritten(ex))
goto out;
ret = ext4_ext_handle_unwritten_extents(
handle, inode, map, &path, flags,
allocated, newblock);
if (ret < 0)
err = ret;
else
allocated = ret;
goto out2;
}
}
/*
* requested block isn't allocated yet;
* we couldn't try to create block if create flag is zero
*/
if ((flags & EXT4_GET_BLOCKS_CREATE) == 0) {
/*
* put just found gap into cache to speed up
* subsequent requests
*/
ext4_ext_put_gap_in_cache(inode, path, map->m_lblk);
goto out2;
}
/*
* Okay, we need to do block allocation.
*/
newex.ee_block = cpu_to_le32(map->m_lblk);
cluster_offset = EXT4_LBLK_COFF(sbi, map->m_lblk);
/*
* If we are doing bigalloc, check to see if the extent returned
* by ext4_find_extent() implies a cluster we can use.
*/
if (cluster_offset && ex &&
get_implied_cluster_alloc(inode->i_sb, map, ex, path)) {
ar.len = allocated = map->m_len;
newblock = map->m_pblk;
map_from_cluster = true;
goto got_allocated_blocks;
}
/* find neighbour allocated blocks */
ar.lleft = map->m_lblk;
err = ext4_ext_search_left(inode, path, &ar.lleft, &ar.pleft);
if (err)
goto out2;
ar.lright = map->m_lblk;
ex2 = NULL;
err = ext4_ext_search_right(inode, path, &ar.lright, &ar.pright, &ex2);
if (err)
goto out2;
/* Check if the extent after searching to the right implies a
* cluster we can use. */
if ((sbi->s_cluster_ratio > 1) && ex2 &&
get_implied_cluster_alloc(inode->i_sb, map, ex2, path)) {
ar.len = allocated = map->m_len;
newblock = map->m_pblk;
map_from_cluster = true;
goto got_allocated_blocks;
}
/*
* See if request is beyond maximum number of blocks we can have in
* a single extent. For an initialized extent this limit is
* EXT_INIT_MAX_LEN and for an unwritten extent this limit is
* EXT_UNWRITTEN_MAX_LEN.
*/
if (map->m_len > EXT_INIT_MAX_LEN &&
!(flags & EXT4_GET_BLOCKS_UNWRIT_EXT))
map->m_len = EXT_INIT_MAX_LEN;
else if (map->m_len > EXT_UNWRITTEN_MAX_LEN &&
(flags & EXT4_GET_BLOCKS_UNWRIT_EXT))
map->m_len = EXT_UNWRITTEN_MAX_LEN;
/* Check if we can really insert (m_lblk)::(m_lblk + m_len) extent */
newex.ee_len = cpu_to_le16(map->m_len);
err = ext4_ext_check_overlap(sbi, inode, &newex, path);
if (err)
allocated = ext4_ext_get_actual_len(&newex);
else
allocated = map->m_len;
/* allocate new block */
ar.inode = inode;
ar.goal = ext4_ext_find_goal(inode, path, map->m_lblk);
ar.logical = map->m_lblk;
/*
* We calculate the offset from the beginning of the cluster
* for the logical block number, since when we allocate a
* physical cluster, the physical block should start at the
* same offset from the beginning of the cluster. This is
* needed so that future calls to get_implied_cluster_alloc()
* work correctly.
*/
offset = EXT4_LBLK_COFF(sbi, map->m_lblk);
ar.len = EXT4_NUM_B2C(sbi, offset+allocated);
ar.goal -= offset;
ar.logical -= offset;
if (S_ISREG(inode->i_mode))
ar.flags = EXT4_MB_HINT_DATA;
else
/* disable in-core preallocation for non-regular files */
ar.flags = 0;
if (flags & EXT4_GET_BLOCKS_NO_NORMALIZE)
ar.flags |= EXT4_MB_HINT_NOPREALLOC;
if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE)
ar.flags |= EXT4_MB_DELALLOC_RESERVED;
newblock = ext4_mb_new_blocks(handle, &ar, &err);
if (!newblock)
goto out2;
ext_debug("allocate new block: goal %llu, found %llu/%u\n",
ar.goal, newblock, allocated);
free_on_err = 1;
allocated_clusters = ar.len;
ar.len = EXT4_C2B(sbi, ar.len) - offset;
if (ar.len > allocated)
ar.len = allocated;
got_allocated_blocks:
/* try to insert new extent into found leaf and return */
ext4_ext_store_pblock(&newex, newblock + offset);
newex.ee_len = cpu_to_le16(ar.len);
/* Mark unwritten */
if (flags & EXT4_GET_BLOCKS_UNWRIT_EXT){
ext4_ext_mark_unwritten(&newex);
map->m_flags |= EXT4_MAP_UNWRITTEN;
/*
* io_end structure was created for every IO write to an
* unwritten extent. To avoid unnecessary conversion,
* here we flag the IO that really needs the conversion.
* For non asycn direct IO case, flag the inode state
* that we need to perform conversion when IO is done.
*/
if (flags & EXT4_GET_BLOCKS_PRE_IO)
set_unwritten = 1;
}
err = 0;
if ((flags & EXT4_GET_BLOCKS_KEEP_SIZE) == 0)
err = check_eofblocks_fl(handle, inode, map->m_lblk,
path, ar.len);
if (!err)
err = ext4_ext_insert_extent(handle, inode, &path,
&newex, flags);
if (!err && set_unwritten) {
if (io)
ext4_set_io_unwritten_flag(inode, io);
else
ext4_set_inode_state(inode,
EXT4_STATE_DIO_UNWRITTEN);
}
if (err && free_on_err) {
int fb_flags = flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE ?
EXT4_FREE_BLOCKS_NO_QUOT_UPDATE : 0;
/* free data blocks we just allocated */
/* not a good idea to call discard here directly,
* but otherwise we'd need to call it every free() */
ext4_discard_preallocations(inode);
ext4_free_blocks(handle, inode, NULL, newblock,
EXT4_C2B(sbi, allocated_clusters), fb_flags);
goto out2;
}
/* previous routine could use block we allocated */
newblock = ext4_ext_pblock(&newex);
allocated = ext4_ext_get_actual_len(&newex);
if (allocated > map->m_len)
allocated = map->m_len;
map->m_flags |= EXT4_MAP_NEW;
/*
* Update reserved blocks/metadata blocks after successful
* block allocation which had been deferred till now.
*/
if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) {
unsigned int reserved_clusters;
/*
* Check how many clusters we had reserved this allocated range
*/
reserved_clusters = get_reserved_cluster_alloc(inode,
map->m_lblk, allocated);
if (map_from_cluster) {
if (reserved_clusters) {
/*
* We have clusters reserved for this range.
* But since we are not doing actual allocation
* and are simply using blocks from previously
* allocated cluster, we should release the
* reservation and not claim quota.
*/
ext4_da_update_reserve_space(inode,
reserved_clusters, 0);
}
} else {
BUG_ON(allocated_clusters < reserved_clusters);
if (reserved_clusters < allocated_clusters) {
struct ext4_inode_info *ei = EXT4_I(inode);
int reservation = allocated_clusters -
reserved_clusters;
/*
* It seems we claimed few clusters outside of
* the range of this allocation. We should give
* it back to the reservation pool. This can
* happen in the following case:
*
* * Suppose s_cluster_ratio is 4 (i.e., each
* cluster has 4 blocks. Thus, the clusters
* are [0-3],[4-7],[8-11]...
* * First comes delayed allocation write for
* logical blocks 10 & 11. Since there were no
* previous delayed allocated blocks in the
* range [8-11], we would reserve 1 cluster
* for this write.
* * Next comes write for logical blocks 3 to 8.
* In this case, we will reserve 2 clusters
* (for [0-3] and [4-7]; and not for [8-11] as
* that range has a delayed allocated blocks.
* Thus total reserved clusters now becomes 3.
* * Now, during the delayed allocation writeout
* time, we will first write blocks [3-8] and
* allocate 3 clusters for writing these
* blocks. Also, we would claim all these
* three clusters above.
* * Now when we come here to writeout the
* blocks [10-11], we would expect to claim
* the reservation of 1 cluster we had made
* (and we would claim it since there are no
* more delayed allocated blocks in the range
* [8-11]. But our reserved cluster count had
* already gone to 0.
*
* Thus, at the step 4 above when we determine
* that there are still some unwritten delayed
* allocated blocks outside of our current
* block range, we should increment the
* reserved clusters count so that when the
* remaining blocks finally gets written, we
* could claim them.
*/
dquot_reserve_block(inode,
EXT4_C2B(sbi, reservation));
spin_lock(&ei->i_block_reservation_lock);
ei->i_reserved_data_blocks += reservation;
spin_unlock(&ei->i_block_reservation_lock);
}
/*
* We will claim quota for all newly allocated blocks.
* We're updating the reserved space *after* the
* correction above so we do not accidentally free
* all the metadata reservation because we might
* actually need it later on.
*/
ext4_da_update_reserve_space(inode, allocated_clusters,
1);
}
}
/*
* Cache the extent and update transaction to commit on fdatasync only
* when it is _not_ an unwritten extent.
*/
if ((flags & EXT4_GET_BLOCKS_UNWRIT_EXT) == 0)
ext4_update_inode_fsync_trans(handle, inode, 1);
else
ext4_update_inode_fsync_trans(handle, inode, 0);
out:
if (allocated > map->m_len)
allocated = map->m_len;
ext4_ext_show_leaf(inode, path);
map->m_flags |= EXT4_MAP_MAPPED;
map->m_pblk = newblock;
map->m_len = allocated;
out2:
ext4_ext_drop_refs(path);
kfree(path);
trace_ext4_ext_map_blocks_exit(inode, flags, map,
err ? err : allocated);
return err ? err : allocated;
}
| 0
|
440,414
|
PHP_METHOD(Phar, webPhar)
{
zval *mimeoverride = NULL, *rewrite = NULL;
char *alias = NULL, *error, *index_php = NULL, *f404 = NULL, *ru = NULL;
size_t alias_len = 0, f404_len = 0, free_pathinfo = 0;
int ru_len = 0;
char *fname, *path_info, *mime_type = NULL, *entry, *pt;
const char *basename;
size_t fname_len, index_php_len = 0;
int entry_len, code, not_cgi;
phar_archive_data *phar = NULL;
phar_entry_info *info = NULL;
size_t sapi_mod_name_len = strlen(sapi_module.name);
if (zend_parse_parameters(ZEND_NUM_ARGS(), "|s!s!saz", &alias, &alias_len, &index_php, &index_php_len, &f404, &f404_len, &mimeoverride, &rewrite) == FAILURE) {
return;
}
phar_request_initialize();
fname = (char*)zend_get_executed_filename();
fname_len = strlen(fname);
if (ZEND_SIZE_T_INT_OVFL(alias_len)
|| ZEND_SIZE_T_INT_OVFL(f404_len) || ZEND_SIZE_T_INT_OVFL(index_php_len)) {
RETURN_FALSE;
}
if (phar_open_executed_filename(alias, (int)alias_len, &error) != SUCCESS) {
if (error) {
zend_throw_exception_ex(phar_ce_PharException, 0, "%s", error);
efree(error);
}
return;
}
/* retrieve requested file within phar */
if (!(SG(request_info).request_method
&& SG(request_info).request_uri
&& (!strcmp(SG(request_info).request_method, "GET")
|| !strcmp(SG(request_info).request_method, "POST")
|| !strcmp(SG(request_info).request_method, "DELETE")
|| !strcmp(SG(request_info).request_method, "HEAD")
|| !strcmp(SG(request_info).request_method, "OPTIONS")
|| !strcmp(SG(request_info).request_method, "PATCH")
|| !strcmp(SG(request_info).request_method, "PUT")
)
)
) {
return;
}
#ifdef PHP_WIN32
fname = estrndup(fname, fname_len);
phar_unixify_path_separators(fname, fname_len);
#endif
basename = zend_memrchr(fname, '/', fname_len);
if (!basename) {
basename = fname;
} else {
++basename;
}
if ((sapi_mod_name_len == sizeof("cgi-fcgi") - 1 && !strncmp(sapi_module.name, "cgi-fcgi", sizeof("cgi-fcgi") - 1))
|| (sapi_mod_name_len == sizeof("fpm-fcgi") - 1 && !strncmp(sapi_module.name, "fpm-fcgi", sizeof("fpm-fcgi") - 1))
|| (sapi_mod_name_len == sizeof("cgi") - 1 && !strncmp(sapi_module.name, "cgi", sizeof("cgi") - 1))) {
if (Z_TYPE(PG(http_globals)[TRACK_VARS_SERVER]) != IS_UNDEF) {
HashTable *_server = Z_ARRVAL(PG(http_globals)[TRACK_VARS_SERVER]);
zval *z_script_name, *z_path_info;
if (NULL == (z_script_name = zend_hash_str_find(_server, "SCRIPT_NAME", sizeof("SCRIPT_NAME")-1)) ||
IS_STRING != Z_TYPE_P(z_script_name) ||
!strstr(Z_STRVAL_P(z_script_name), basename)) {
return;
}
if (NULL != (z_path_info = zend_hash_str_find(_server, "PATH_INFO", sizeof("PATH_INFO")-1)) &&
IS_STRING == Z_TYPE_P(z_path_info)) {
entry_len = (int)Z_STRLEN_P(z_path_info);
entry = estrndup(Z_STRVAL_P(z_path_info), entry_len);
path_info = emalloc(Z_STRLEN_P(z_script_name) + entry_len + 1);
memcpy(path_info, Z_STRVAL_P(z_script_name), Z_STRLEN_P(z_script_name));
memcpy(path_info + Z_STRLEN_P(z_script_name), entry, entry_len + 1);
free_pathinfo = 1;
} else {
entry_len = 0;
entry = estrndup("", 0);
path_info = Z_STRVAL_P(z_script_name);
}
pt = estrndup(Z_STRVAL_P(z_script_name), Z_STRLEN_P(z_script_name));
} else {
char *testit;
testit = sapi_getenv("SCRIPT_NAME", sizeof("SCRIPT_NAME")-1);
if (!(pt = strstr(testit, basename))) {
efree(testit);
return;
}
path_info = sapi_getenv("PATH_INFO", sizeof("PATH_INFO")-1);
if (path_info) {
entry = path_info;
entry_len = (int)strlen(entry);
spprintf(&path_info, 0, "%s%s", testit, path_info);
free_pathinfo = 1;
} else {
path_info = testit;
free_pathinfo = 1;
entry = estrndup("", 0);
entry_len = 0;
}
pt = estrndup(testit, (pt - testit) + (fname_len - (basename - fname)));
}
not_cgi = 0;
} else {
path_info = SG(request_info).request_uri;
if (!(pt = strstr(path_info, basename))) {
/* this can happen with rewrite rules - and we have no idea what to do then, so return */
return;
}
entry_len = (int)strlen(path_info);
entry_len -= (pt - path_info) + (fname_len - (basename - fname));
entry = estrndup(pt + (fname_len - (basename - fname)), entry_len);
pt = estrndup(path_info, (pt - path_info) + (fname_len - (basename - fname)));
not_cgi = 1;
}
if (rewrite) {
zend_fcall_info fci;
zend_fcall_info_cache fcc;
zval params, retval;
ZVAL_STRINGL(¶ms, entry, entry_len);
if (FAILURE == zend_fcall_info_init(rewrite, 0, &fci, &fcc, NULL, NULL)) {
zend_throw_exception_ex(phar_ce_PharException, 0, "phar error: invalid rewrite callback");
if (free_pathinfo) {
efree(path_info);
}
efree(pt);
return;
}
fci.param_count = 1;
fci.params = ¶ms;
Z_ADDREF(params);
fci.retval = &retval;
if (FAILURE == zend_call_function(&fci, &fcc)) {
if (!EG(exception)) {
zend_throw_exception_ex(phar_ce_PharException, 0, "phar error: failed to call rewrite callback");
}
if (free_pathinfo) {
efree(path_info);
}
efree(pt);
return;
}
if (Z_TYPE_P(fci.retval) == IS_UNDEF || Z_TYPE(retval) == IS_UNDEF) {
if (free_pathinfo) {
efree(path_info);
}
zend_throw_exception_ex(phar_ce_PharException, 0, "phar error: rewrite callback must return a string or false");
efree(pt);
return;
}
switch (Z_TYPE(retval)) {
case IS_STRING:
efree(entry);
if (ZEND_SIZE_T_INT_OVFL(Z_STRLEN_P(fci.retval))) {
zend_throw_exception_ex(phar_ce_PharException, 0, "phar error: rewrite callback returned oversized value");
return;
}
entry = estrndup(Z_STRVAL_P(fci.retval), Z_STRLEN_P(fci.retval));
entry_len = (int)Z_STRLEN_P(fci.retval);
break;
case IS_TRUE:
case IS_FALSE:
phar_do_403(entry, entry_len);
if (free_pathinfo) {
efree(path_info);
}
efree(pt);
zend_bailout();
return;
default:
if (free_pathinfo) {
efree(path_info);
}
efree(pt);
zend_throw_exception_ex(phar_ce_PharException, 0, "phar error: rewrite callback must return a string or false");
return;
}
}
if (entry_len) {
phar_postprocess_ru_web(fname, (int)fname_len, &entry, &entry_len, &ru, &ru_len);
}
if (!entry_len || (entry_len == 1 && entry[0] == '/')) {
efree(entry);
/* direct request */
if (index_php_len) {
entry = index_php;
entry_len = (int)index_php_len;
if (entry[0] != '/') {
spprintf(&entry, 0, "/%s", index_php);
++entry_len;
}
} else {
/* assume "index.php" is starting point */
entry = estrndup("/index.php", sizeof("/index.php"));
entry_len = sizeof("/index.php")-1;
}
if (FAILURE == phar_get_archive(&phar, fname, (int)fname_len, NULL, 0, NULL) ||
(info = phar_get_entry_info(phar, entry, entry_len, NULL, 0)) == NULL) {
phar_do_404(phar, fname, (int)fname_len, f404, (int)f404_len, entry, entry_len);
if (free_pathinfo) {
efree(path_info);
}
zend_bailout();
} else {
char *tmp = NULL, sa = '\0';
sapi_header_line ctr = {0};
ctr.response_code = 301;
ctr.line_len = sizeof("HTTP/1.1 301 Moved Permanently")-1;
ctr.line = "HTTP/1.1 301 Moved Permanently";
sapi_header_op(SAPI_HEADER_REPLACE, &ctr);
if (not_cgi) {
tmp = strstr(path_info, basename) + fname_len;
sa = *tmp;
*tmp = '\0';
}
ctr.response_code = 0;
if (path_info[strlen(path_info)-1] == '/') {
ctr.line_len = spprintf(&(ctr.line), 4096, "Location: %s%s", path_info, entry + 1);
} else {
ctr.line_len = spprintf(&(ctr.line), 4096, "Location: %s%s", path_info, entry);
}
if (not_cgi) {
*tmp = sa;
}
if (free_pathinfo) {
efree(path_info);
}
sapi_header_op(SAPI_HEADER_REPLACE, &ctr);
sapi_send_headers();
efree(ctr.line);
zend_bailout();
}
}
if (FAILURE == phar_get_archive(&phar, fname, (int)fname_len, NULL, 0, NULL) ||
(info = phar_get_entry_info(phar, entry, entry_len, NULL, 0)) == NULL) {
phar_do_404(phar, fname, (int)fname_len, f404, (int)f404_len, entry, entry_len);
#ifdef PHP_WIN32
efree(fname);
#endif
zend_bailout();
}
if (mimeoverride && zend_hash_num_elements(Z_ARRVAL_P(mimeoverride))) {
const char *ext = zend_memrchr(entry, '.', entry_len);
zval *val;
if (ext) {
++ext;
if (NULL != (val = zend_hash_str_find(Z_ARRVAL_P(mimeoverride), ext, strlen(ext)))) {
switch (Z_TYPE_P(val)) {
case IS_LONG:
if (Z_LVAL_P(val) == PHAR_MIME_PHP || Z_LVAL_P(val) == PHAR_MIME_PHPS) {
mime_type = "";
code = (int)Z_LVAL_P(val);
} else {
zend_throw_exception_ex(phar_ce_PharException, 0, "Unknown mime type specifier used, only Phar::PHP, Phar::PHPS and a mime type string are allowed");
if (free_pathinfo) {
efree(path_info);
}
efree(pt);
efree(entry);
#ifdef PHP_WIN32
efree(fname);
#endif
RETURN_FALSE;
}
break;
case IS_STRING:
mime_type = Z_STRVAL_P(val);
code = PHAR_MIME_OTHER;
break;
default:
zend_throw_exception_ex(phar_ce_PharException, 0, "Unknown mime type specifier used (not a string or int), only Phar::PHP, Phar::PHPS and a mime type string are allowed");
if (free_pathinfo) {
efree(path_info);
}
efree(pt);
efree(entry);
#ifdef PHP_WIN32
efree(fname);
#endif
RETURN_FALSE;
}
}
}
}
if (!mime_type) {
code = phar_file_type(&PHAR_G(mime_types), entry, &mime_type);
}
phar_file_action(phar, info, mime_type, code, entry, entry_len, fname, pt, ru, ru_len);
}
| 0
|
187,755
|
bool TopSitesImpl::IsKnownURL(const GURL& url) {
return loaded_ && cache_->IsKnownURL(url);
}
| 0
|
433,160
|
static int rds_release(struct socket *sock)
{
struct sock *sk = sock->sk;
struct rds_sock *rs;
if (!sk)
goto out;
rs = rds_sk_to_rs(sk);
sock_orphan(sk);
/* Note - rds_clear_recv_queue grabs rs_recv_lock, so
* that ensures the recv path has completed messing
* with the socket. */
rds_clear_recv_queue(rs);
rds_cong_remove_socket(rs);
rds_remove_bound(rs);
rds_send_drop_to(rs, NULL);
rds_rdma_drop_keys(rs);
rds_notify_queue_get(rs, NULL);
spin_lock_bh(&rds_sock_lock);
list_del_init(&rs->rs_item);
rds_sock_count--;
spin_unlock_bh(&rds_sock_lock);
rds_trans_put(rs->rs_transport);
sock->sk = NULL;
sock_put(sk);
out:
return 0;
}
| 0
|
244,338
|
void ReportPrintSettingsStats(const DictionaryValue& settings) {
bool landscape;
if (settings.GetBoolean(printing::kSettingLandscape, &landscape))
ReportPrintSettingHistogram(landscape ? LANDSCAPE : PORTRAIT);
bool collate;
if (settings.GetBoolean(printing::kSettingCollate, &collate) && collate)
ReportPrintSettingHistogram(COLLATE);
int duplex_mode;
if (settings.GetInteger(printing::kSettingDuplexMode, &duplex_mode))
ReportPrintSettingHistogram(duplex_mode ? DUPLEX : SIMPLEX);
int color_mode;
if (settings.GetInteger(printing::kSettingColor, &color_mode)) {
ReportPrintSettingHistogram(
printing::isColorModelSelected(color_mode) ? COLOR : BLACK_AND_WHITE);
}
}
| 0
|
306,844
|
static inline pg_data_t *page_pgdat(const struct page *page)
{
return NODE_DATA(page_to_nid(page));
}
| 0
|
279
|
static _Bool have_gcrypt ( void ) {
static _Bool result = 0 ;
static _Bool need_init = 1 ;
if ( ! need_init ) return ( result ) ;
need_init = 0 ;
# if HAVE_LIBGCRYPT # if GCRYPT_VERSION_NUMBER < 0x010600 gcry_control ( GCRYCTL_SET_THREAD_CBS , & gcry_threads_pthread ) ;
# endif if ( ! gcry_check_version ( GCRYPT_VERSION ) ) return ( 0 ) ;
gcry_control ( GCRYCTL_INIT_SECMEM , 32768 , 0 ) ;
gcry_control ( GCRYCTL_INITIALIZATION_FINISHED , 0 ) ;
result = 1 ;
return ( 1 ) ;
# else return ( 0 ) ;
# endif }
| 1
|
70,201
|
void __perf_event_task_sched_in(struct task_struct *prev,
struct task_struct *task)
{
struct perf_event_context *ctx;
int ctxn;
for_each_task_context_nr(ctxn) {
ctx = task->perf_event_ctxp[ctxn];
if (likely(!ctx))
continue;
perf_event_context_sched_in(ctx, task);
}
/*
* if cgroup events exist on this CPU, then we need
* to check if we have to switch in PMU state.
* cgroup event are system-wide mode only
*/
if (atomic_read(&__get_cpu_var(perf_cgroup_events)))
perf_cgroup_sched_in(prev, task);
/* check for system-wide branch_stack events */
if (atomic_read(&__get_cpu_var(perf_branch_stack_events)))
perf_branch_stack_sched_in(prev, task);
}
| 0
|
517,810
|
my_decimal *Item::val_decimal_from_date(my_decimal *decimal_value)
{
DBUG_ASSERT(fixed == 1);
MYSQL_TIME ltime;
if (get_temporal_with_sql_mode(<ime))
{
my_decimal_set_zero(decimal_value);
null_value= 1; // set NULL, stop processing
return 0;
}
return date2my_decimal(<ime, decimal_value);
}
| 0
|
10,587
|
method_invocation_get_uid (GDBusMethodInvocation *context)
{
const gchar *sender;
PolkitSubject *busname;
PolkitSubject *process;
uid_t uid;
sender = g_dbus_method_invocation_get_sender (context);
busname = polkit_system_bus_name_new (sender);
process = polkit_system_bus_name_get_process_sync (POLKIT_SYSTEM_BUS_NAME (busname), NULL, NULL);
uid = polkit_unix_process_get_uid (POLKIT_UNIX_PROCESS (process));
g_object_unref (busname);
g_object_unref (process);
return uid;
}
| 1
|
113,220
|
static int l2tp_ip_recvmsg(struct sock *sk, struct msghdr *msg,
size_t len, int noblock, int flags, int *addr_len)
{
struct inet_sock *inet = inet_sk(sk);
size_t copied = 0;
int err = -EOPNOTSUPP;
DECLARE_SOCKADDR(struct sockaddr_in *, sin, msg->msg_name);
struct sk_buff *skb;
if (flags & MSG_OOB)
goto out;
skb = skb_recv_datagram(sk, flags, noblock, &err);
if (!skb)
goto out;
copied = skb->len;
if (len < copied) {
msg->msg_flags |= MSG_TRUNC;
copied = len;
}
err = skb_copy_datagram_msg(skb, 0, msg, copied);
if (err)
goto done;
sock_recv_timestamp(msg, sk, skb);
/* Copy the address. */
if (sin) {
sin->sin_family = AF_INET;
sin->sin_addr.s_addr = ip_hdr(skb)->saddr;
sin->sin_port = 0;
memset(&sin->sin_zero, 0, sizeof(sin->sin_zero));
*addr_len = sizeof(*sin);
}
if (inet->cmsg_flags)
ip_cmsg_recv(msg, skb);
if (flags & MSG_TRUNC)
copied = skb->len;
done:
skb_free_datagram(sk, skb);
out:
return err ? err : copied;
}
| 0
|
368,654
|
dir_networkstatus_download_failed(smartlist_t *failed, int status_code)
{
if (status_code == 503)
return;
SMARTLIST_FOREACH(failed, const char *, fp,
{
char digest[DIGEST_LEN];
trusted_dir_server_t *dir;
if (base16_decode(digest, DIGEST_LEN, fp, strlen(fp))<0) {
log_warn(LD_BUG, "Called with bad fingerprint in list: %s",
escaped(fp));
continue;
}
dir = router_get_trusteddirserver_by_digest(digest);
if (dir)
download_status_failed(&dir->v2_ns_dl_status, status_code);
});
}
| 0
|
286,327
|
static int compareNumbersForQSort(const void* a, const void* b)
{
double da = static_cast<const JSValue*>(a)->uncheckedGetNumber();
double db = static_cast<const JSValue*>(b)->uncheckedGetNumber();
return (da > db) - (da < db);
}
| 0
|
372,535
|
int endBlockingModDir(request_rec *r) {
RequestNote *note = getRequestNote(r);
if (note != 0 && hasModDir()) {
r->finfo.filetype = note->oldFileType;
}
return DECLINED;
}
| 0
|
224,357
|
ScriptValue WebGL2RenderingContextBase::getParameter(ScriptState* script_state,
GLenum pname) {
if (isContextLost())
return ScriptValue::CreateNull(script_state);
switch (pname) {
case GL_SHADING_LANGUAGE_VERSION: {
return WebGLAny(
script_state,
"WebGL GLSL ES 3.00 (" +
String(ContextGL()->GetString(GL_SHADING_LANGUAGE_VERSION)) +
")");
}
case GL_VERSION:
return WebGLAny(
script_state,
"WebGL 2.0 (" + String(ContextGL()->GetString(GL_VERSION)) + ")");
case GL_COPY_READ_BUFFER_BINDING:
return WebGLAny(script_state, bound_copy_read_buffer_.Get());
case GL_COPY_WRITE_BUFFER_BINDING:
return WebGLAny(script_state, bound_copy_write_buffer_.Get());
case GL_DRAW_FRAMEBUFFER_BINDING:
return WebGLAny(script_state, framebuffer_binding_.Get());
case GL_FRAGMENT_SHADER_DERIVATIVE_HINT:
return GetUnsignedIntParameter(script_state, pname);
case GL_MAX_3D_TEXTURE_SIZE:
return GetIntParameter(script_state, pname);
case GL_MAX_ARRAY_TEXTURE_LAYERS:
return GetIntParameter(script_state, pname);
case GC3D_MAX_CLIENT_WAIT_TIMEOUT_WEBGL:
return WebGLAny(script_state, kMaxClientWaitTimeout);
case GL_MAX_COLOR_ATTACHMENTS:
return GetIntParameter(script_state, pname);
case GL_MAX_COMBINED_FRAGMENT_UNIFORM_COMPONENTS:
return GetInt64Parameter(script_state, pname);
case GL_MAX_COMBINED_UNIFORM_BLOCKS:
return GetIntParameter(script_state, pname);
case GL_MAX_COMBINED_VERTEX_UNIFORM_COMPONENTS:
return GetInt64Parameter(script_state, pname);
case GL_MAX_DRAW_BUFFERS:
return GetIntParameter(script_state, pname);
case GL_MAX_ELEMENT_INDEX:
return GetInt64Parameter(script_state, pname);
case GL_MAX_ELEMENTS_INDICES:
return GetIntParameter(script_state, pname);
case GL_MAX_ELEMENTS_VERTICES:
return GetIntParameter(script_state, pname);
case GL_MAX_FRAGMENT_INPUT_COMPONENTS:
return GetIntParameter(script_state, pname);
case GL_MAX_FRAGMENT_UNIFORM_BLOCKS:
return GetIntParameter(script_state, pname);
case GL_MAX_FRAGMENT_UNIFORM_COMPONENTS:
return GetIntParameter(script_state, pname);
case GL_MAX_PROGRAM_TEXEL_OFFSET:
return GetIntParameter(script_state, pname);
case GL_MAX_SAMPLES:
return GetIntParameter(script_state, pname);
case GL_MAX_SERVER_WAIT_TIMEOUT:
return GetInt64Parameter(script_state, pname);
case GL_MAX_TEXTURE_LOD_BIAS:
return GetFloatParameter(script_state, pname);
case GL_MAX_TRANSFORM_FEEDBACK_INTERLEAVED_COMPONENTS:
return GetIntParameter(script_state, pname);
case GL_MAX_TRANSFORM_FEEDBACK_SEPARATE_ATTRIBS:
return GetIntParameter(script_state, pname);
case GL_MAX_TRANSFORM_FEEDBACK_SEPARATE_COMPONENTS:
return GetIntParameter(script_state, pname);
case GL_MAX_UNIFORM_BLOCK_SIZE:
return GetInt64Parameter(script_state, pname);
case GL_MAX_UNIFORM_BUFFER_BINDINGS:
return GetIntParameter(script_state, pname);
case GL_MAX_VARYING_COMPONENTS:
return GetIntParameter(script_state, pname);
case GL_MAX_VERTEX_OUTPUT_COMPONENTS:
return GetIntParameter(script_state, pname);
case GL_MAX_VERTEX_UNIFORM_BLOCKS:
return GetIntParameter(script_state, pname);
case GL_MAX_VERTEX_UNIFORM_COMPONENTS:
return GetIntParameter(script_state, pname);
case GL_MIN_PROGRAM_TEXEL_OFFSET:
return GetIntParameter(script_state, pname);
case GL_PACK_ROW_LENGTH:
return GetIntParameter(script_state, pname);
case GL_PACK_SKIP_PIXELS:
return GetIntParameter(script_state, pname);
case GL_PACK_SKIP_ROWS:
return GetIntParameter(script_state, pname);
case GL_PIXEL_PACK_BUFFER_BINDING:
return WebGLAny(script_state, bound_pixel_pack_buffer_.Get());
case GL_PIXEL_UNPACK_BUFFER_BINDING:
return WebGLAny(script_state, bound_pixel_unpack_buffer_.Get());
case GL_RASTERIZER_DISCARD:
return GetBooleanParameter(script_state, pname);
case GL_READ_BUFFER: {
GLenum value = 0;
if (!isContextLost()) {
WebGLFramebuffer* read_framebuffer_binding =
GetFramebufferBinding(GL_READ_FRAMEBUFFER);
if (!read_framebuffer_binding)
value = read_buffer_of_default_framebuffer_;
else
value = read_framebuffer_binding->GetReadBuffer();
}
return WebGLAny(script_state, value);
}
case GL_READ_FRAMEBUFFER_BINDING:
return WebGLAny(script_state, read_framebuffer_binding_.Get());
case GL_SAMPLER_BINDING:
return WebGLAny(script_state, sampler_units_[active_texture_unit_].Get());
case GL_TEXTURE_BINDING_2D_ARRAY:
return WebGLAny(
script_state,
texture_units_[active_texture_unit_].texture2d_array_binding_.Get());
case GL_TEXTURE_BINDING_3D:
return WebGLAny(
script_state,
texture_units_[active_texture_unit_].texture3d_binding_.Get());
case GL_TRANSFORM_FEEDBACK_ACTIVE:
return GetBooleanParameter(script_state, pname);
case GL_TRANSFORM_FEEDBACK_BUFFER_BINDING:
return WebGLAny(
script_state,
transform_feedback_binding_->GetBoundTransformFeedbackBuffer());
case GL_TRANSFORM_FEEDBACK_BINDING:
if (!transform_feedback_binding_->IsDefaultObject()) {
return WebGLAny(script_state, transform_feedback_binding_.Get());
}
return ScriptValue::CreateNull(script_state);
case GL_TRANSFORM_FEEDBACK_PAUSED:
return GetBooleanParameter(script_state, pname);
case GL_UNIFORM_BUFFER_BINDING:
return WebGLAny(script_state, bound_uniform_buffer_.Get());
case GL_UNIFORM_BUFFER_OFFSET_ALIGNMENT:
return GetIntParameter(script_state, pname);
case GL_UNPACK_IMAGE_HEIGHT:
return GetIntParameter(script_state, pname);
case GL_UNPACK_ROW_LENGTH:
return GetIntParameter(script_state, pname);
case GL_UNPACK_SKIP_IMAGES:
return GetIntParameter(script_state, pname);
case GL_UNPACK_SKIP_PIXELS:
return GetIntParameter(script_state, pname);
case GL_UNPACK_SKIP_ROWS:
return GetIntParameter(script_state, pname);
case GL_TIMESTAMP_EXT:
if (ExtensionEnabled(kEXTDisjointTimerQueryWebGL2Name)) {
return WebGLAny(script_state, 0);
}
SynthesizeGLError(GL_INVALID_ENUM, "getParameter",
"invalid parameter name, "
"EXT_disjoint_timer_query_webgl2 not enabled");
return ScriptValue::CreateNull(script_state);
case GL_GPU_DISJOINT_EXT:
if (ExtensionEnabled(kEXTDisjointTimerQueryWebGL2Name)) {
return GetBooleanParameter(script_state, GL_GPU_DISJOINT_EXT);
}
SynthesizeGLError(GL_INVALID_ENUM, "getParameter",
"invalid parameter name, "
"EXT_disjoint_timer_query_webgl2 not enabled");
return ScriptValue::CreateNull(script_state);
default:
return WebGLRenderingContextBase::getParameter(script_state, pname);
}
}
| 0
|
340,039
|
static int h264_mp4toannexb_filter(AVBitStreamFilterContext *bsfc,
AVCodecContext *avctx, const char *args,
uint8_t **poutbuf, int *poutbuf_size,
const uint8_t *buf, int buf_size,
int keyframe) {
H264BSFContext *ctx = bsfc->priv_data;
uint8_t unit_type;
uint32_t nal_size, cumul_size = 0;
/* nothing to filter */
if (!avctx->extradata || avctx->extradata_size < 6) {
*poutbuf = (uint8_t*) buf;
*poutbuf_size = buf_size;
return 0;
}
/* retrieve sps and pps NAL units from extradata */
if (!ctx->sps_pps_data) {
uint16_t unit_size;
uint32_t total_size = 0;
uint8_t *out = NULL, unit_nb, sps_done = 0;
const uint8_t *extradata = avctx->extradata+4;
static const uint8_t nalu_header[4] = {0, 0, 0, 1};
/* retrieve length coded size */
ctx->length_size = (*extradata++ & 0x3) + 1;
if (ctx->length_size == 3)
return AVERROR(EINVAL);
/* retrieve sps and pps unit(s) */
unit_nb = *extradata++ & 0x1f; /* number of sps unit(s) */
if (!unit_nb) {
unit_nb = *extradata++; /* number of pps unit(s) */
sps_done++;
}
while (unit_nb--) {
unit_size = AV_RB16(extradata);
total_size += unit_size+4;
if (extradata+2+unit_size > avctx->extradata+avctx->extradata_size) {
av_free(out);
return AVERROR(EINVAL);
}
out = av_realloc(out, total_size);
if (!out)
return AVERROR(ENOMEM);
memcpy(out+total_size-unit_size-4, nalu_header, 4);
memcpy(out+total_size-unit_size, extradata+2, unit_size);
extradata += 2+unit_size;
if (!unit_nb && !sps_done++)
unit_nb = *extradata++; /* number of pps unit(s) */
}
ctx->sps_pps_data = out;
ctx->size = total_size;
ctx->first_idr = 1;
}
*poutbuf_size = 0;
*poutbuf = NULL;
do {
if (ctx->length_size == 1)
nal_size = buf[0];
else if (ctx->length_size == 2)
nal_size = AV_RB16(buf);
else
nal_size = AV_RB32(buf);
buf += ctx->length_size;
unit_type = *buf & 0x1f;
/* prepend only to the first type 5 NAL unit of an IDR picture */
if (ctx->first_idr && unit_type == 5) {
alloc_and_copy(poutbuf, poutbuf_size,
ctx->sps_pps_data, ctx->size,
buf, nal_size);
ctx->first_idr = 0;
}
else {
alloc_and_copy(poutbuf, poutbuf_size,
NULL, 0,
buf, nal_size);
if (!ctx->first_idr && unit_type == 1)
ctx->first_idr = 1;
}
buf += nal_size;
cumul_size += nal_size + ctx->length_size;
} while (cumul_size < buf_size);
return 1;
}
| 0
|
14,102
|
void SafeBrowsingBlockingPage::DontProceed() {
DCHECK(action_taken() != DONT_PROCEED_ACTION);
if (action_taken() == PROCEED_ACTION) {
InterstitialPage::DontProceed();
return;
}
RecordUserAction(DONT_PROCEED);
FinishMalwareDetails(0); // No delay
NotifySafeBrowsingService(sb_service_, unsafe_resources_, false);
UnsafeResourceMap* unsafe_resource_map = GetUnsafeResourcesMap();
UnsafeResourceMap::iterator iter = unsafe_resource_map->find(tab());
if (iter != unsafe_resource_map->end() && !iter->second.empty()) {
NotifySafeBrowsingService(sb_service_, iter->second, false);
unsafe_resource_map->erase(iter);
}
if (navigation_entry_index_to_remove_ != -1 && !tab()->is_being_destroyed()) {
tab()->controller().RemoveEntryAtIndex(navigation_entry_index_to_remove_,
GURL(chrome::kChromeUINewTabURL));
navigation_entry_index_to_remove_ = -1;
}
InterstitialPage::DontProceed();
}
| 1
|
233,570
|
void HTMLFormElement::associate(FormAssociatedElement& e) {
m_associatedElementsAreDirty = true;
m_associatedElements.clear();
if (toHTMLElement(e).fastHasAttribute(formAttr))
m_hasElementsAssociatedByFormAttribute = true;
}
| 0
|
65,632
|
static int copy_sighand(unsigned long clone_flags, struct task_struct *tsk)
{
struct sighand_struct *sig;
if (clone_flags & (CLONE_SIGHAND | CLONE_THREAD)) {
atomic_inc(¤t->sighand->count);
return 0;
}
sig = kmem_cache_alloc(sighand_cachep, GFP_KERNEL);
rcu_assign_pointer(tsk->sighand, sig);
if (!sig)
return -ENOMEM;
atomic_set(&sig->count, 1);
memcpy(sig->action, current->sighand->action, sizeof(sig->action));
return 0;
}
| 0
|
386,710
|
static gboolean parse_rle_data(TGAContext *ctx, GError **err)
{
guint rows = 0;
guint count = 0;
guint bytes_done_before = ctx->pbuf_bytes_done;
if (ctx->hdr->type == TGA_TYPE_RLE_PSEUDOCOLOR)
count = parse_rle_data_pseudocolor(ctx);
else if (ctx->hdr->type == TGA_TYPE_RLE_TRUECOLOR)
count = parse_rle_data_truecolor(ctx);
else if (ctx->hdr->type == TGA_TYPE_RLE_GRAYSCALE)
count = parse_rle_data_grayscale(ctx);
if (ctx->hdr->flags & TGA_ORIGIN_RIGHT) {
guchar *row = ctx->pbuf->pixels + (bytes_done_before / ctx->pbuf->rowstride) * ctx->pbuf->rowstride;
guchar *row_after = ctx->pbuf->pixels + (ctx->pbuf_bytes_done / ctx->pbuf->rowstride) * ctx->pbuf->rowstride;
for (; row < row_after; row += ctx->pbuf->rowstride)
pixbuf_flip_row (ctx->pbuf, row);
}
ctx->in = io_buffer_free_segment(ctx->in, count, err);
if (!ctx->in)
return FALSE;
if (ctx->done) {
/* FIXME doing the vertical flipping afterwards is not
* perfect, but doing it during the rle decoding in place
* is considerably more work.
*/
if (!(ctx->hdr->flags & TGA_ORIGIN_UPPER)) {
pixbuf_flip_vertically (ctx->pbuf);
ctx->hdr->flags |= TGA_ORIGIN_UPPER;
}
}
rows = ctx->pbuf_bytes_done / ctx->pbuf->rowstride - bytes_done_before / ctx->pbuf->rowstride;
if (ctx->ufunc)
(*ctx->ufunc) (ctx->pbuf, 0, bytes_done_before / ctx->pbuf->rowstride,
ctx->pbuf->width, rows,
ctx->udata);
return TRUE;
}
| 0
|
114,430
|
struct inode *ovl_new_inode(struct super_block *sb, umode_t mode,
struct ovl_entry *oe)
{
struct inode *inode;
inode = new_inode(sb);
if (!inode)
return NULL;
mode &= S_IFMT;
inode->i_ino = get_next_ino();
inode->i_mode = mode;
inode->i_flags |= S_NOATIME | S_NOCMTIME;
switch (mode) {
case S_IFDIR:
inode->i_private = oe;
inode->i_op = &ovl_dir_inode_operations;
inode->i_fop = &ovl_dir_operations;
break;
case S_IFLNK:
inode->i_op = &ovl_symlink_inode_operations;
break;
case S_IFREG:
case S_IFSOCK:
case S_IFBLK:
case S_IFCHR:
case S_IFIFO:
inode->i_op = &ovl_file_inode_operations;
break;
default:
WARN(1, "illegal file type: %i\n", mode);
iput(inode);
inode = NULL;
}
return inode;
}
| 0
|
5,758
|
static int kvm_ioctl_create_device(struct kvm *kvm,
struct kvm_create_device *cd)
{
struct kvm_device_ops *ops = NULL;
struct kvm_device *dev;
bool test = cd->flags & KVM_CREATE_DEVICE_TEST;
int ret;
if (cd->type >= ARRAY_SIZE(kvm_device_ops_table))
return -ENODEV;
ops = kvm_device_ops_table[cd->type];
if (ops == NULL)
return -ENODEV;
if (test)
return 0;
dev = kzalloc(sizeof(*dev), GFP_KERNEL);
if (!dev)
return -ENOMEM;
dev->ops = ops;
dev->kvm = kvm;
mutex_lock(&kvm->lock);
ret = ops->create(dev, cd->type);
if (ret < 0) {
mutex_unlock(&kvm->lock);
kfree(dev);
return ret;
}
list_add(&dev->vm_node, &kvm->devices);
mutex_unlock(&kvm->lock);
if (ops->init)
ops->init(dev);
ret = anon_inode_getfd(ops->name, &kvm_device_fops, dev, O_RDWR | O_CLOEXEC);
if (ret < 0) {
mutex_lock(&kvm->lock);
list_del(&dev->vm_node);
mutex_unlock(&kvm->lock);
ops->destroy(dev);
return ret;
}
kvm_get_kvm(kvm);
cd->fd = ret;
return 0;
}
| 1
|
111,509
|
static int initialize_context_compression(
blosc2_context* context, const void* src, int32_t srcsize, void* dest,
int32_t destsize, int clevel, uint8_t const *filters,
uint8_t const *filters_meta, int32_t typesize, int compressor,
int32_t blocksize, int new_nthreads, int nthreads, blosc2_schunk* schunk) {
/* Set parameters */
context->do_compress = 1;
context->src = (const uint8_t*)src;
context->srcsize = srcsize;
context->dest = (uint8_t*)dest;
context->output_bytes = 0;
context->destsize = destsize;
context->sourcesize = srcsize;
context->typesize = (int32_t)typesize;
context->filter_flags = filters_to_flags(filters);
for (int i = 0; i < BLOSC2_MAX_FILTERS; i++) {
context->filters[i] = filters[i];
context->filters_meta[i] = filters_meta[i];
}
context->compcode = compressor;
context->nthreads = nthreads;
context->new_nthreads = new_nthreads;
context->end_threads = 0;
context->clevel = clevel;
context->schunk = schunk;
/* Tune some compression parameters */
context->blocksize = (int32_t)blocksize;
if (context->btune != NULL) {
btune_next_cparams(context);
} else {
btune_next_blocksize(context);
}
char* envvar = getenv("BLOSC_WARN");
int warnlvl = 0;
if (envvar != NULL) {
warnlvl = strtol(envvar, NULL, 10);
}
/* Check buffer size limits */
if (srcsize > BLOSC_MAX_BUFFERSIZE) {
if (warnlvl > 0) {
fprintf(stderr, "Input buffer size cannot exceed %d bytes\n",
BLOSC_MAX_BUFFERSIZE);
}
return 0;
}
if (destsize < BLOSC_MAX_OVERHEAD) {
if (warnlvl > 0) {
fprintf(stderr, "Output buffer size should be larger than %d bytes\n",
BLOSC_MAX_OVERHEAD);
}
return 0;
}
if (destsize < BLOSC_MAX_OVERHEAD) {
if (warnlvl > 0) {
fprintf(stderr, "Output buffer size should be larger than %d bytes\n",
BLOSC_MAX_OVERHEAD);
}
return -2;
}
if (destsize < BLOSC_MAX_OVERHEAD) {
fprintf(stderr, "Output buffer size should be larger than %d bytes\n",
BLOSC_MAX_OVERHEAD);
return -1;
}
/* Compression level */
if (clevel < 0 || clevel > 9) {
/* If clevel not in 0..9, print an error */
fprintf(stderr, "`clevel` parameter must be between 0 and 9!\n");
return -10;
}
/* Check typesize limits */
if (context->typesize > BLOSC_MAX_TYPESIZE) {
/* If typesize is too large, treat buffer as an 1-byte stream. */
context->typesize = 1;
}
/* Compute number of blocks in buffer */
context->nblocks = context->sourcesize / context->blocksize;
context->leftover = context->sourcesize % context->blocksize;
context->nblocks = (context->leftover > 0) ?
(context->nblocks + 1) : context->nblocks;
return 1;
}
| 0
|
338,885
|
static void mxf_read_pixel_layout(ByteIOContext *pb, MXFDescriptor *descriptor)
{
int code, value, ofs = 0;
char layout[16] = {};
do {
code = get_byte(pb);
value = get_byte(pb);
dprintf(NULL, "pixel layout: code %#x\n", code);
if (ofs < 16) {
layout[ofs++] = code;
layout[ofs++] = value;
}
} while (code != 0); /* SMPTE 377M E.2.46 */
ff_mxf_decode_pixel_layout(layout, &descriptor->pix_fmt);
}
| 0
|
188,913
|
bool ChromeNetworkDelegate::OnCanGetCookies(
const net::URLRequest& request,
const net::CookieList& cookie_list) {
if (!cookie_settings_.get())
return true;
bool allow = cookie_settings_->IsReadingCookieAllowed(
request.url(), request.first_party_for_cookies());
int render_process_id = -1;
int render_view_id = -1;
if (content::ResourceRequestInfo::GetRenderViewForRequest(
&request, &render_process_id, &render_view_id)) {
BrowserThread::PostTask(
BrowserThread::UI, FROM_HERE,
base::Bind(&TabSpecificContentSettings::CookiesRead,
render_process_id, render_view_id,
request.url(), request.first_party_for_cookies(),
cookie_list, !allow));
}
return allow;
}
| 0
|
43,704
|
void PrintGeneralUsage()
{
u32 i=0;
gf_sys_format_help(helpout, help_flags, "# General Options\n"
"MP4Box is a multimedia packager, with a vast number of functionalities: conversion, splitting, hinting, dumping, DASH-ing, encryption, transcoding and others.\n"
"MP4Box provides a large set of options, classified by categories (see [-h]()). These options do not follow any particular ordering.\n"
" \n"
"By default, MP4Box rewrites the input file. You can change this behavior by using the [-out]() option.\n"
"MP4Box stores by default the file with 0.5 second interleaving and meta-data (`moov` ...) at the beginning, making it suitable for HTTP download-and-play. This may however takes longer to store the file, use [-flat]() to change this behavior.\n"
" \n"
"MP4Box usually generates a temporary file when creating a new IsoMedia file. The location of this temporary file is OS-dependent, and it may happen that the drive/partition the temporary file is created on has not enough space or no write access. In such a case, you can specify a temporary file location with [-tmp]().\n"
" \n"
"Option values:\n"
"Unless specified otherwise, an option of type `integer` expects a trackID value following it."
"An option of type `boolean` expects no following value."
"Note: Track operations identify tracks through their ID (usually referred to as tkID in the help), not their order.\n"
" \n"
);
while (m4b_gen_args[i].name) {
GF_GPACArg *arg = (GF_GPACArg *) &m4b_gen_args[i];
i++;
gf_sys_print_arg(helpout, help_flags, arg, "mp4box-gen");
}
}
| 0
|
106,724
|
static void cmd_anal_aad(RCore *core, const char *input) {
RListIter *iter;
RAnalRef *ref;
RList *list = r_list_newf (NULL);
r_anal_xrefs_from (core->anal, list, "xref", R_ANAL_REF_TYPE_DATA, UT64_MAX);
r_list_foreach (list, iter, ref) {
if (r_io_is_valid_offset (core->io, ref->addr, false)) {
r_core_anal_fcn (core, ref->at, ref->addr, R_ANAL_REF_TYPE_NULL, 1);
}
}
r_list_free (list);
}
| 0
|
186,212
|
void GLES2Implementation::DeleteSamplersStub(GLsizei n,
const GLuint* samplers) {
helper_->DeleteSamplersImmediate(n, samplers);
}
| 0
|
509,376
|
EC_POINT *EC_POINT_dup(const EC_POINT *a, const EC_GROUP *group)
{
EC_POINT *t;
int r;
if (a == NULL) return NULL;
t = EC_POINT_new(group);
if (t == NULL) return(NULL);
r = EC_POINT_copy(t, a);
if (!r)
{
EC_POINT_free(t);
return NULL;
}
else return t;
}
| 0
|
420,502
|
sparse_scan_file (struct tar_sparse_file *file)
{
/* always check for completely sparse files */
if (sparse_scan_file_wholesparse (file))
return true;
switch (hole_detection)
{
case HOLE_DETECTION_DEFAULT:
case HOLE_DETECTION_SEEK:
#ifdef SEEK_HOLE
if (sparse_scan_file_seek (file))
return true;
#else
if (hole_detection == HOLE_DETECTION_SEEK)
WARN((0, 0,
_("\"seek\" hole detection is not supported, using \"raw\".")));
/* fall back to "raw" for this and all other files */
hole_detection = HOLE_DETECTION_RAW;
#endif
FALLTHROUGH;
case HOLE_DETECTION_RAW:
if (sparse_scan_file_raw (file))
return true;
}
return false;
}
| 0
|
144,736
|
txInteger fxGetArrayBufferLength(txMachine* the, txSlot* slot)
{
txSlot* instance = fxCheckArrayBufferInstance(the, slot);
txSlot* arrayBuffer = instance->next;
txSlot* bufferInfo = arrayBuffer->next;
return bufferInfo->value.bufferInfo.length;
}
| 0
|
128,313
|
check_for_string_or_number_or_list_arg(typval_T *args, int idx)
{
if (args[idx].v_type != VAR_STRING
&& args[idx].v_type != VAR_NUMBER
&& args[idx].v_type != VAR_LIST)
{
semsg(_(e_string_number_or_list_required_for_argument_nr), idx + 1);
return FAIL;
}
return OK;
}
| 0
|
97,665
|
static int io_shutdown(struct io_kiocb *req, unsigned int issue_flags)
{
#if defined(CONFIG_NET)
struct socket *sock;
int ret;
if (issue_flags & IO_URING_F_NONBLOCK)
return -EAGAIN;
sock = sock_from_file(req->file);
if (unlikely(!sock))
return -ENOTSOCK;
ret = __sys_shutdown_sock(sock, req->shutdown.how);
if (ret < 0)
req_set_fail(req);
io_req_complete(req, ret);
return 0;
#else
return -EOPNOTSUPP;
#endif
}
| 0
|
50,801
|
QPDFWriter::writeStandard()
{
if (this->deterministic_id)
{
pushMD5Pipeline();
}
// Start writing
writeHeader();
writeString(this->extra_header_text);
if (this->preserve_unreferenced_objects)
{
QTC::TC("qpdf", "QPDFWriter preserve unreferenced standard");
std::vector<QPDFObjectHandle> all = this->pdf.getAllObjects();
for (std::vector<QPDFObjectHandle>::iterator iter = all.begin();
iter != all.end(); ++iter)
{
enqueueObject(*iter);
}
}
// Put root first on queue.
QPDFObjectHandle trailer = getTrimmedTrailer();
enqueueObject(trailer.getKey("/Root"));
// Next place any other objects referenced from the trailer
// dictionary into the queue, handling direct objects recursively.
// Root is already there, so enqueuing it a second time is a
// no-op.
std::set<std::string> keys = trailer.getKeys();
for (std::set<std::string>::iterator iter = keys.begin();
iter != keys.end(); ++iter)
{
enqueueObject(trailer.getKey(*iter));
}
// Now start walking queue, output each object
while (this->object_queue.size())
{
QPDFObjectHandle cur_object = this->object_queue.front();
this->object_queue.pop_front();
writeObject(cur_object);
}
// Write out the encryption dictionary, if any
if (this->encrypted)
{
writeEncryptionDictionary();
}
// Now write out xref. next_objid is now the number of objects.
qpdf_offset_t xref_offset = this->pipeline->getCount();
if (this->object_stream_to_objects.empty())
{
// Write regular cross-reference table
writeXRefTable(t_normal, 0, this->next_objid - 1, this->next_objid);
}
else
{
// Write cross-reference stream.
int xref_id = this->next_objid++;
writeXRefStream(xref_id, xref_id, xref_offset, t_normal,
0, this->next_objid - 1, this->next_objid);
}
writeString("startxref\n");
writeString(QUtil::int_to_string(xref_offset));
writeString("\n%%EOF\n");
if (this->deterministic_id)
{
QTC::TC("qpdf", "QPDFWriter standard deterministic ID",
this->object_stream_to_objects.empty() ? 0 : 1);
popPipelineStack();
assert(this->md5_pipeline == 0);
}
}
| 0
|
65,543
|
TRIO_PUBLIC_STRING char* trio_string_index_last TRIO_ARGS2((self, character), trio_string_t* self,
int character)
{
assert(self);
return trio_index_last(self->content, character);
}
| 0
|
473,150
|
tmx_m_of(union DateData *x)
{
return m_of(x);
}
| 0
|
88,699
|
NodeDebugInfo::NodeDebugInfo(const Node& n) : NodeDebugInfo(n.def()) {}
| 0
|
226,575
|
static ZEND_RESULT_CODE parse_idn2(struct parse_state *state, size_t prev_len)
{
char *idn = NULL;
int rv = -1;
TSRMLS_FETCH_FROM_CTX(state->ts);
if (state->flags & PHP_HTTP_URL_PARSE_MBUTF8) {
rv = idn2_lookup_u8((const unsigned char *) state->url.host, (unsigned char **) &idn, IDN2_NFC_INPUT);
}
# ifdef PHP_HTTP_HAVE_WCHAR
else if (state->flags & PHP_HTTP_URL_PARSE_MBLOC) {
rv = idn2_lookup_ul(state->url.host, &idn, 0);
}
# endif
if (rv != IDN2_OK) {
php_error_docref(NULL TSRMLS_CC, E_WARNING, "Failed to parse IDN; %s", idn2_strerror(rv));
return FAILURE;
} else {
size_t idnlen = strlen(idn);
memcpy(state->url.host, idn, idnlen + 1);
free(idn);
state->offset += idnlen - prev_len;
return SUCCESS;
}
}
| 0
|
426,621
|
static void locking_callback(int mode, int n, const char *file, int line) {
if(mode & CRYPTO_LOCK)
pni_mutex_lock(&locks[n]);
else
pni_mutex_unlock(&locks[n]);
}
| 0
|
381,894
|
ExecGetTriggerResultRel(EState *estate, Oid relid)
{
ResultRelInfo *rInfo;
int nr;
ListCell *l;
Relation rel;
MemoryContext oldcontext;
/* First, search through the query result relations */
rInfo = estate->es_result_relations;
nr = estate->es_num_result_relations;
while (nr > 0)
{
if (RelationGetRelid(rInfo->ri_RelationDesc) == relid)
return rInfo;
rInfo++;
nr--;
}
/* Nope, but maybe we already made an extra ResultRelInfo for it */
foreach(l, estate->es_trig_target_relations)
{
rInfo = (ResultRelInfo *) lfirst(l);
if (RelationGetRelid(rInfo->ri_RelationDesc) == relid)
return rInfo;
}
/* Nope, so we need a new one */
/*
* Open the target relation's relcache entry. We assume that an
* appropriate lock is still held by the backend from whenever the trigger
* event got queued, so we need take no new lock here. Also, we need not
* recheck the relkind, so no need for CheckValidResultRel.
*/
rel = heap_open(relid, NoLock);
/*
* Make the new entry in the right context.
*/
oldcontext = MemoryContextSwitchTo(estate->es_query_cxt);
rInfo = makeNode(ResultRelInfo);
InitResultRelInfo(rInfo,
rel,
0, /* dummy rangetable index */
estate->es_instrument);
estate->es_trig_target_relations =
lappend(estate->es_trig_target_relations, rInfo);
MemoryContextSwitchTo(oldcontext);
/*
* Currently, we don't need any index information in ResultRelInfos used
* only for triggers, so no need to call ExecOpenIndices.
*/
return rInfo;
}
| 0
|
319,601
|
static CharDriverState *qemu_chr_open_udp_fd(int fd)
{
CharDriverState *chr = NULL;
NetCharDriver *s = NULL;
chr = g_malloc0(sizeof(CharDriverState));
s = g_malloc0(sizeof(NetCharDriver));
s->fd = fd;
s->chan = io_channel_from_socket(s->fd);
s->bufcnt = 0;
s->bufptr = 0;
chr->opaque = s;
chr->chr_write = udp_chr_write;
chr->chr_update_read_handler = udp_chr_update_read_handler;
chr->chr_close = udp_chr_close;
/* be isn't opened until we get a connection */
chr->explicit_be_open = true;
return chr;
}
| 0
|
80,284
|
void operator()(OpKernelContext* context, const Tensor& x,
const Tensor& scale, const Tensor& offset,
const Tensor& estimated_mean,
const Tensor& estimated_variance, const Tensor* side_input,
U epsilon, U exponential_avg_factor,
FusedBatchNormActivationMode activation_mode, Tensor* y,
Tensor* batch_mean, Tensor* batch_var, Tensor* saved_mean,
Tensor* saved_inv_var, TensorFormat tensor_format,
bool use_reserved_space) {
auto* stream = context->op_device_context()->stream();
OP_REQUIRES(context, stream, errors::Internal("No GPU stream available"));
const int64_t batch_size = GetTensorDim(x, tensor_format, 'N');
const int64_t channels = GetTensorDim(x, tensor_format, 'C');
const int64_t height = GetTensorDim(x, tensor_format, 'H');
const int64_t width = GetTensorDim(x, tensor_format, 'W');
// If use_reserved_space we have reserve_space_3 output (only in
// FusedBatchNormV3 op).
#if GOOGLE_CUDA
// Check if cuDNN batch normalization has a fast NHWC implementation:
// (1) In inference mode it's always fast.
// (2) Tensorflow enabled batchnorm spatial persistence, we are called
// from
// FusedBatchNormV3, i.e. use_reserved_space is true.
const bool fast_nhwc_batch_norm =
!is_training ||
(BatchnormSpatialPersistentEnabled() &&
DataTypeToEnum<T>::value == DT_HALF && use_reserved_space);
#else
// fast NHWC implementation is a CUDA only feature
const bool fast_nhwc_batch_norm = false;
#endif
// If input tensor is in NHWC format, and we have a fast cuDNN
// implementation, there is no need to do data format conversion.
TensorFormat compute_format =
fast_nhwc_batch_norm && tensor_format == FORMAT_NHWC ? FORMAT_NHWC
: FORMAT_NCHW;
VLOG(2) << "FusedBatchNorm:"
<< " batch_size: " << batch_size << " channels: " << channels
<< " height: " << height << " width:" << width
<< " x shape: " << x.shape().DebugString()
<< " scale shape: " << scale.shape().DebugString()
<< " offset shape: " << offset.shape().DebugString()
<< " activation mode: " << ToString(activation_mode)
<< " tensor format: " << ToString(tensor_format)
<< " compute format: " << ToString(compute_format);
auto maybe_make_dummy_output = [context, use_reserved_space]() -> Status {
if (use_reserved_space) {
Tensor* dummy_reserve_space = nullptr;
return context->allocate_output(5, {}, &dummy_reserve_space);
}
return Status::OK();
};
// If input is empty, return NaN mean/variance
if (x.shape().num_elements() == 0) {
OP_REQUIRES_OK(context, maybe_make_dummy_output());
functor::SetNanFunctor<GPUDevice, U> f;
f(context->eigen_device<GPUDevice>(), batch_mean->flat<U>());
f(context->eigen_device<GPUDevice>(), batch_var->flat<U>());
return;
}
// In inference mode we use custom CUDA kernel, because cuDNN does not
// support side input and activations for inference.
const bool has_side_input = side_input != nullptr;
const bool has_activation =
activation_mode != FusedBatchNormActivationMode::kIdentity;
if (!is_training && (has_side_input || has_activation)) {
OP_REQUIRES_OK(context, maybe_make_dummy_output());
FusedBatchNormInferenceFunctor<GPUDevice, T, U> inference_functor;
if (has_side_input) {
inference_functor(context, tensor_format, x.tensor<T, 4>(),
scale.vec<U>(), offset.vec<U>(),
estimated_mean.vec<U>(), estimated_variance.vec<U>(),
side_input->tensor<T, 4>(), epsilon, activation_mode,
y->tensor<T, 4>());
} else {
typename TTypes<T, 4>::ConstTensor empty_tensor(nullptr, 0, 0, 0, 0);
inference_functor(context, tensor_format, x.tensor<T, 4>(),
scale.vec<U>(), offset.vec<U>(),
estimated_mean.vec<U>(), estimated_variance.vec<U>(),
empty_tensor, epsilon, activation_mode,
y->tensor<T, 4>());
}
return;
}
Tensor x_maybe_transformed = x;
Tensor x_transformed;
Tensor y_transformed;
se::DeviceMemory<T> y_ptr;
if (tensor_format == compute_format) {
y_ptr = StreamExecutorUtil::AsDeviceMemory<T>(*y);
} else if (tensor_format == FORMAT_NHWC && compute_format == FORMAT_NCHW) {
OP_REQUIRES_OK(context, context->allocate_temp(
DataTypeToEnum<T>::value,
ShapeFromFormat(compute_format, batch_size,
height, width, channels),
&x_transformed));
functor::NHWCToNCHW<GPUDevice, T, 4>()(
context->eigen_device<GPUDevice>(),
const_cast<const Tensor&>(x_maybe_transformed).tensor<T, 4>(),
x_transformed.tensor<T, 4>());
x_maybe_transformed = x_transformed;
OP_REQUIRES_OK(context, context->allocate_temp(
DataTypeToEnum<T>::value,
ShapeFromFormat(compute_format, batch_size,
height, width, channels),
&y_transformed));
y_ptr = StreamExecutorUtil::AsDeviceMemory<T>(y_transformed);
} else {
context->SetStatus(errors::Internal(
"Unsupported tensor format: ", ToString(tensor_format),
" and compute format: ", ToString(compute_format)));
return;
}
const se::dnn::DataLayout data_layout =
compute_format == FORMAT_NHWC ? se::dnn::DataLayout::kBatchYXDepth
: se::dnn::DataLayout::kBatchDepthYX;
se::dnn::BatchDescriptor x_desc;
x_desc.set_count(batch_size)
.set_feature_map_count(channels)
.set_height(height)
.set_width(width)
.set_layout(data_layout);
se::dnn::BatchDescriptor scale_offset_desc;
scale_offset_desc.set_count(1)
.set_feature_map_count(channels)
.set_height(1)
.set_width(1)
.set_layout(se::dnn::DataLayout::kBatchDepthYX);
auto x_ptr = StreamExecutorUtil::AsDeviceMemory<T>(x_maybe_transformed);
auto scale_ptr = StreamExecutorUtil::AsDeviceMemory<U>(scale);
auto offset_ptr = StreamExecutorUtil::AsDeviceMemory<U>(offset);
auto estimated_mean_ptr =
StreamExecutorUtil::AsDeviceMemory<U>(estimated_mean);
auto estimated_variance_ptr =
StreamExecutorUtil::AsDeviceMemory<U>(estimated_variance);
auto side_input_ptr =
side_input != nullptr
? StreamExecutorUtil::AsDeviceMemory<T>(*side_input)
: se::DeviceMemory<T>();
auto batch_mean_ptr = StreamExecutorUtil::AsDeviceMemory<U>(*batch_mean);
auto batch_var_ptr = StreamExecutorUtil::AsDeviceMemory<U>(*batch_var);
auto saved_mean_ptr = StreamExecutorUtil::AsDeviceMemory<U>(*saved_mean);
auto saved_inv_var_ptr =
StreamExecutorUtil::AsDeviceMemory<U>(*saved_inv_var);
std::unique_ptr<functor::CudnnBatchNormAllocatorInOutput<U>>
reserve_space_allocator;
std::unique_ptr<functor::CudnnBatchNormAllocatorInTemp<uint8>>
workspace_allocator;
if (use_reserved_space) {
reserve_space_allocator.reset(
new functor::CudnnBatchNormAllocatorInOutput<U>(context, 5));
workspace_allocator.reset(
new functor::CudnnBatchNormAllocatorInTemp<uint8>(context));
}
if (!batch_mean->SharesBufferWith(estimated_mean) &&
exponential_avg_factor != 1.0f) {
OP_REQUIRES(
context,
stream
->ThenMemcpyD2D(&batch_mean_ptr, estimated_mean_ptr,
estimated_mean.NumElements() * sizeof(U))
.ok(),
errors::Internal("MatrixTriangularSolveOp: failed to copy rhs "
"from device"));
}
if (!batch_var->SharesBufferWith(estimated_variance) &&
exponential_avg_factor != 1.0f) {
OP_REQUIRES(
context,
stream
->ThenMemcpyD2D(&batch_var_ptr, estimated_variance_ptr,
estimated_variance.NumElements() * sizeof(U))
.ok(),
errors::Internal("MatrixTriangularSolveOp: failed to copy rhs "
"from device"));
}
bool cudnn_launch_status =
stream
->ThenBatchNormalizationForward(
x_ptr, scale_ptr, offset_ptr, estimated_mean_ptr,
estimated_variance_ptr, side_input_ptr, x_desc,
scale_offset_desc, static_cast<double>(epsilon),
static_cast<double>(exponential_avg_factor),
AsDnnActivationMode(activation_mode), &y_ptr, &batch_mean_ptr,
&batch_var_ptr, &saved_mean_ptr, &saved_inv_var_ptr,
is_training, reserve_space_allocator.get(),
workspace_allocator.get())
.ok();
if (!cudnn_launch_status) {
context->SetStatus(
errors::Internal("cuDNN launch failure : input shape (",
x.shape().DebugString(), ")"));
return;
}
if (tensor_format == FORMAT_NHWC && compute_format == FORMAT_NCHW) {
functor::NCHWToNHWC<GPUDevice, T, 4>()(
context->eigen_device<GPUDevice>(),
const_cast<const Tensor&>(y_transformed).tensor<T, 4>(),
y->tensor<T, 4>());
}
}
| 0
|
18,371
|
static int selinux_tun_dev_alloc_security ( void * * security ) {
struct tun_security_struct * tunsec ;
tunsec = kzalloc ( sizeof ( * tunsec ) , GFP_KERNEL ) ;
if ( ! tunsec ) return - ENOMEM ;
tunsec -> sid = current_sid ( ) ;
* security = tunsec ;
return 0 ;
}
| 0
|
360,107
|
nautilus_file_get_activation_uri (NautilusFile *file)
{
g_return_val_if_fail (NAUTILUS_IS_FILE (file), NULL);
if (file->details->activation_location != NULL) {
return g_file_get_uri (file->details->activation_location);
}
return nautilus_file_get_uri (file);
}
| 0
|
357,532
|
bsg_map_hdr(struct bsg_device *bd, struct sg_io_v4 *hdr, fmode_t has_write_perm)
{
struct request_queue *q = bd->queue;
struct request *rq, *next_rq = NULL;
int ret, rw;
unsigned int dxfer_len;
void *dxferp = NULL;
dprintk("map hdr %llx/%u %llx/%u\n", (unsigned long long) hdr->dout_xferp,
hdr->dout_xfer_len, (unsigned long long) hdr->din_xferp,
hdr->din_xfer_len);
ret = bsg_validate_sgv4_hdr(q, hdr, &rw);
if (ret)
return ERR_PTR(ret);
/*
* map scatter-gather elements seperately and string them to request
*/
rq = blk_get_request(q, rw, GFP_KERNEL);
if (!rq)
return ERR_PTR(-ENOMEM);
ret = blk_fill_sgv4_hdr_rq(q, rq, hdr, bd, has_write_perm);
if (ret)
goto out;
if (rw == WRITE && hdr->din_xfer_len) {
if (!test_bit(QUEUE_FLAG_BIDI, &q->queue_flags)) {
ret = -EOPNOTSUPP;
goto out;
}
next_rq = blk_get_request(q, READ, GFP_KERNEL);
if (!next_rq) {
ret = -ENOMEM;
goto out;
}
rq->next_rq = next_rq;
next_rq->cmd_type = rq->cmd_type;
dxferp = (void*)(unsigned long)hdr->din_xferp;
ret = blk_rq_map_user(q, next_rq, NULL, dxferp,
hdr->din_xfer_len, GFP_KERNEL);
if (ret)
goto out;
}
if (hdr->dout_xfer_len) {
dxfer_len = hdr->dout_xfer_len;
dxferp = (void*)(unsigned long)hdr->dout_xferp;
} else if (hdr->din_xfer_len) {
dxfer_len = hdr->din_xfer_len;
dxferp = (void*)(unsigned long)hdr->din_xferp;
} else
dxfer_len = 0;
if (dxfer_len) {
ret = blk_rq_map_user(q, rq, NULL, dxferp, dxfer_len,
GFP_KERNEL);
if (ret)
goto out;
}
return rq;
out:
if (rq->cmd != rq->__cmd)
kfree(rq->cmd);
blk_put_request(rq);
if (next_rq) {
blk_rq_unmap_user(next_rq->bio);
blk_put_request(next_rq);
}
return ERR_PTR(ret);
}
| 0
|
506,453
|
const char *SSL_state_string(const SSL *s)
{
const char *str;
switch (s->state)
{
case SSL_ST_BEFORE: str="PINIT "; break;
case SSL_ST_ACCEPT: str="AINIT "; break;
case SSL_ST_CONNECT: str="CINIT "; break;
case SSL_ST_OK: str="SSLOK "; break;
#ifndef OPENSSL_NO_SSL2
case SSL2_ST_CLIENT_START_ENCRYPTION: str="2CSENC"; break;
case SSL2_ST_SERVER_START_ENCRYPTION: str="2SSENC"; break;
case SSL2_ST_SEND_CLIENT_HELLO_A: str="2SCH_A"; break;
case SSL2_ST_SEND_CLIENT_HELLO_B: str="2SCH_B"; break;
case SSL2_ST_GET_SERVER_HELLO_A: str="2GSH_A"; break;
case SSL2_ST_GET_SERVER_HELLO_B: str="2GSH_B"; break;
case SSL2_ST_SEND_CLIENT_MASTER_KEY_A: str="2SCMKA"; break;
case SSL2_ST_SEND_CLIENT_MASTER_KEY_B: str="2SCMKB"; break;
case SSL2_ST_SEND_CLIENT_FINISHED_A: str="2SCF_A"; break;
case SSL2_ST_SEND_CLIENT_FINISHED_B: str="2SCF_B"; break;
case SSL2_ST_SEND_CLIENT_CERTIFICATE_A: str="2SCC_A"; break;
case SSL2_ST_SEND_CLIENT_CERTIFICATE_B: str="2SCC_B"; break;
case SSL2_ST_SEND_CLIENT_CERTIFICATE_C: str="2SCC_C"; break;
case SSL2_ST_SEND_CLIENT_CERTIFICATE_D: str="2SCC_D"; break;
case SSL2_ST_GET_SERVER_VERIFY_A: str="2GSV_A"; break;
case SSL2_ST_GET_SERVER_VERIFY_B: str="2GSV_B"; break;
case SSL2_ST_GET_SERVER_FINISHED_A: str="2GSF_A"; break;
case SSL2_ST_GET_SERVER_FINISHED_B: str="2GSF_B"; break;
case SSL2_ST_GET_CLIENT_HELLO_A: str="2GCH_A"; break;
case SSL2_ST_GET_CLIENT_HELLO_B: str="2GCH_B"; break;
case SSL2_ST_GET_CLIENT_HELLO_C: str="2GCH_C"; break;
case SSL2_ST_SEND_SERVER_HELLO_A: str="2SSH_A"; break;
case SSL2_ST_SEND_SERVER_HELLO_B: str="2SSH_B"; break;
case SSL2_ST_GET_CLIENT_MASTER_KEY_A: str="2GCMKA"; break;
case SSL2_ST_GET_CLIENT_MASTER_KEY_B: str="2GCMKA"; break;
case SSL2_ST_SEND_SERVER_VERIFY_A: str="2SSV_A"; break;
case SSL2_ST_SEND_SERVER_VERIFY_B: str="2SSV_B"; break;
case SSL2_ST_SEND_SERVER_VERIFY_C: str="2SSV_C"; break;
case SSL2_ST_GET_CLIENT_FINISHED_A: str="2GCF_A"; break;
case SSL2_ST_GET_CLIENT_FINISHED_B: str="2GCF_B"; break;
case SSL2_ST_SEND_SERVER_FINISHED_A: str="2SSF_A"; break;
case SSL2_ST_SEND_SERVER_FINISHED_B: str="2SSF_B"; break;
case SSL2_ST_SEND_REQUEST_CERTIFICATE_A: str="2SRC_A"; break;
case SSL2_ST_SEND_REQUEST_CERTIFICATE_B: str="2SRC_B"; break;
case SSL2_ST_SEND_REQUEST_CERTIFICATE_C: str="2SRC_C"; break;
case SSL2_ST_SEND_REQUEST_CERTIFICATE_D: str="2SRC_D"; break;
case SSL2_ST_X509_GET_SERVER_CERTIFICATE: str="2X9GSC"; break;
case SSL2_ST_X509_GET_CLIENT_CERTIFICATE: str="2X9GCC"; break;
#endif
#ifndef OPENSSL_NO_SSL3
/* SSLv3 additions */
case SSL3_ST_SW_FLUSH:
case SSL3_ST_CW_FLUSH: str="3FLUSH"; break;
case SSL3_ST_CW_CLNT_HELLO_A: str="3WCH_A"; break;
case SSL3_ST_CW_CLNT_HELLO_B: str="3WCH_B"; break;
case SSL3_ST_CR_SRVR_HELLO_A: str="3RSH_A"; break;
case SSL3_ST_CR_SRVR_HELLO_B: str="3RSH_B"; break;
case SSL3_ST_CR_CERT_A: str="3RSC_A"; break;
case SSL3_ST_CR_CERT_B: str="3RSC_B"; break;
case SSL3_ST_CR_KEY_EXCH_A: str="3RSKEA"; break;
case SSL3_ST_CR_KEY_EXCH_B: str="3RSKEB"; break;
case SSL3_ST_CR_CERT_REQ_A: str="3RCR_A"; break;
case SSL3_ST_CR_CERT_REQ_B: str="3RCR_B"; break;
case SSL3_ST_CR_SRVR_DONE_A: str="3RSD_A"; break;
case SSL3_ST_CR_SRVR_DONE_B: str="3RSD_B"; break;
case SSL3_ST_CW_CERT_A: str="3WCC_A"; break;
case SSL3_ST_CW_CERT_B: str="3WCC_B"; break;
case SSL3_ST_CW_CERT_C: str="3WCC_C"; break;
case SSL3_ST_CW_CERT_D: str="3WCC_D"; break;
case SSL3_ST_CW_KEY_EXCH_A: str="3WCKEA"; break;
case SSL3_ST_CW_KEY_EXCH_B: str="3WCKEB"; break;
case SSL3_ST_CW_CERT_VRFY_A: str="3WCV_A"; break;
case SSL3_ST_CW_CERT_VRFY_B: str="3WCV_B"; break;
case SSL3_ST_SW_CHANGE_A:
case SSL3_ST_CW_CHANGE_A: str="3WCCSA"; break;
case SSL3_ST_SW_CHANGE_B:
case SSL3_ST_CW_CHANGE_B: str="3WCCSB"; break;
case SSL3_ST_SW_FINISHED_A:
case SSL3_ST_CW_FINISHED_A: str="3WFINA"; break;
case SSL3_ST_SW_FINISHED_B:
case SSL3_ST_CW_FINISHED_B: str="3WFINB"; break;
case SSL3_ST_SR_CHANGE_A:
case SSL3_ST_CR_CHANGE_A: str="3RCCSA"; break;
case SSL3_ST_SR_CHANGE_B:
case SSL3_ST_CR_CHANGE_B: str="3RCCSB"; break;
case SSL3_ST_SR_FINISHED_A:
case SSL3_ST_CR_FINISHED_A: str="3RFINA"; break;
case SSL3_ST_SR_FINISHED_B:
case SSL3_ST_CR_FINISHED_B: str="3RFINB"; break;
case SSL3_ST_SW_HELLO_REQ_A: str="3WHR_A"; break;
case SSL3_ST_SW_HELLO_REQ_B: str="3WHR_B"; break;
case SSL3_ST_SW_HELLO_REQ_C: str="3WHR_C"; break;
case SSL3_ST_SR_CLNT_HELLO_A: str="3RCH_A"; break;
case SSL3_ST_SR_CLNT_HELLO_B: str="3RCH_B"; break;
case SSL3_ST_SR_CLNT_HELLO_C: str="3RCH_C"; break;
case SSL3_ST_SW_SRVR_HELLO_A: str="3WSH_A"; break;
case SSL3_ST_SW_SRVR_HELLO_B: str="3WSH_B"; break;
case SSL3_ST_SW_CERT_A: str="3WSC_A"; break;
case SSL3_ST_SW_CERT_B: str="3WSC_B"; break;
case SSL3_ST_SW_KEY_EXCH_A: str="3WSKEA"; break;
case SSL3_ST_SW_KEY_EXCH_B: str="3WSKEB"; break;
case SSL3_ST_SW_CERT_REQ_A: str="3WCR_A"; break;
case SSL3_ST_SW_CERT_REQ_B: str="3WCR_B"; break;
case SSL3_ST_SW_SRVR_DONE_A: str="3WSD_A"; break;
case SSL3_ST_SW_SRVR_DONE_B: str="3WSD_B"; break;
case SSL3_ST_SR_CERT_A: str="3RCC_A"; break;
case SSL3_ST_SR_CERT_B: str="3RCC_B"; break;
case SSL3_ST_SR_KEY_EXCH_A: str="3RCKEA"; break;
case SSL3_ST_SR_KEY_EXCH_B: str="3RCKEB"; break;
case SSL3_ST_SR_CERT_VRFY_A: str="3RCV_A"; break;
case SSL3_ST_SR_CERT_VRFY_B: str="3RCV_B"; break;
#endif
#if !defined(OPENSSL_NO_SSL2) && !defined(OPENSSL_NO_SSL3)
/* SSLv2/v3 compatibility states */
/* client */
case SSL23_ST_CW_CLNT_HELLO_A: str="23WCHA"; break;
case SSL23_ST_CW_CLNT_HELLO_B: str="23WCHB"; break;
case SSL23_ST_CR_SRVR_HELLO_A: str="23RSHA"; break;
case SSL23_ST_CR_SRVR_HELLO_B: str="23RSHA"; break;
/* server */
case SSL23_ST_SR_CLNT_HELLO_A: str="23RCHA"; break;
case SSL23_ST_SR_CLNT_HELLO_B: str="23RCHB"; break;
#endif
/* DTLS */
case DTLS1_ST_CR_HELLO_VERIFY_REQUEST_A: str="DRCHVA"; break;
case DTLS1_ST_CR_HELLO_VERIFY_REQUEST_B: str="DRCHVB"; break;
case DTLS1_ST_SW_HELLO_VERIFY_REQUEST_A: str="DWCHVA"; break;
case DTLS1_ST_SW_HELLO_VERIFY_REQUEST_B: str="DWCHVB"; break;
default: str="UNKWN "; break;
}
return(str);
}
| 0
|
65,559
|
static int ipxitf_rcv(struct ipx_interface *intrfc, struct sk_buff *skb)
{
struct ipxhdr *ipx = ipx_hdr(skb);
int rc = 0;
ipxitf_hold(intrfc);
/* See if we should update our network number */
if (!intrfc->if_netnum) /* net number of intrfc not known yet */
ipxitf_discover_netnum(intrfc, skb);
IPX_SKB_CB(skb)->last_hop.index = -1;
if (ipx->ipx_type == IPX_TYPE_PPROP) {
rc = ipxitf_pprop(intrfc, skb);
if (rc)
goto out_free_skb;
}
/* local processing follows */
if (!IPX_SKB_CB(skb)->ipx_dest_net)
IPX_SKB_CB(skb)->ipx_dest_net = intrfc->if_netnum;
if (!IPX_SKB_CB(skb)->ipx_source_net)
IPX_SKB_CB(skb)->ipx_source_net = intrfc->if_netnum;
/* it doesn't make sense to route a pprop packet, there's no meaning
* in the ipx_dest_net for such packets */
if (ipx->ipx_type != IPX_TYPE_PPROP &&
intrfc->if_netnum != IPX_SKB_CB(skb)->ipx_dest_net) {
/* We only route point-to-point packets. */
if (skb->pkt_type == PACKET_HOST) {
skb = skb_unshare(skb, GFP_ATOMIC);
if (skb)
rc = ipxrtr_route_skb(skb);
goto out_intrfc;
}
goto out_free_skb;
}
/* see if we should keep it */
if (!memcmp(ipx_broadcast_node, ipx->ipx_dest.node, IPX_NODE_LEN) ||
!memcmp(intrfc->if_node, ipx->ipx_dest.node, IPX_NODE_LEN)) {
rc = ipxitf_demux_socket(intrfc, skb, 0);
goto out_intrfc;
}
/* we couldn't pawn it off so unload it */
out_free_skb:
kfree_skb(skb);
out_intrfc:
ipxitf_put(intrfc);
return rc;
}
| 0
|
86,966
|
static TEE_Result load_elf(const TEE_UUID *uuid, struct user_ta_ctx *utc)
{
TEE_Result res;
const struct user_ta_store_ops *op = NULL;
SCATTERED_ARRAY_FOREACH(op, ta_stores, struct user_ta_store_ops) {
DMSG("Lookup user TA ELF %pUl (%s)", (void *)uuid,
op->description);
res = load_elf_from_store(uuid, op, utc);
if (res == TEE_ERROR_ITEM_NOT_FOUND)
continue;
if (res) {
DMSG("res=0x%x", res);
continue;
}
return res;
}
return TEE_ERROR_ITEM_NOT_FOUND;
}
| 0
|
510,740
|
int ha_partition::index_prev(uchar * buf)
{
DBUG_ENTER("ha_partition::index_prev");
decrement_statistics(&SSV::ha_read_prev_count);
/* TODO: read comment in index_next */
DBUG_ASSERT(m_index_scan_type != partition_index_first);
DBUG_RETURN(handle_ordered_prev(buf));
}
| 0
|
166,657
|
gfx::NativeViewId RenderWidgetHostViewAura::GetNativeViewId() const {
#if defined(OS_WIN)
aura::WindowEventDispatcher* dispatcher = window_->GetDispatcher();
if (dispatcher)
return reinterpret_cast<gfx::NativeViewId>(
dispatcher->host()->GetAcceleratedWidget());
#endif
return static_cast<gfx::NativeViewId>(NULL);
}
| 0
|
3,080
|
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
OpData* op_data = static_cast<OpData*>(node->user_data);
TF_LITE_ENSURE_EQ(context, node->outputs->size, 1);
// Logic for determining regular lstm and layer norm lstm:
// input_size, forget_gate_layer_norm_tensor (20) null? is_layer_norm?
// 20, N/A, No.
// 24, null, No.
// 24, not null, Yes.
// 20-inputs lstm are deprecated and is only kept here for backward
// compatibility.
if (node->inputs->size == 24) {
const TfLiteTensor* forget_layer_norm_coefficients = GetOptionalInputTensor(
context, node, kForgetLayerNormCoefficientsTensor);
if (forget_layer_norm_coefficients == nullptr) {
op_data->use_layer_norm = false;
} else {
op_data->use_layer_norm = true;
}
} else if (node->inputs->size == 20) {
// This is deprecated and is only kept here for backward compatibility.
op_data->use_layer_norm = false;
} else {
context->ReportError(
context, "The LSTM Full kernel expects 20 or 24 inputs. Got %d inputs",
node->inputs->size);
return kTfLiteError;
}
const bool use_layer_norm = op_data->use_layer_norm;
// Inferring batch size, number of outputs and number of cells from the
// input tensors.
const TfLiteTensor* input = GetInput(context, node, kInputTensor);
const bool is_integer = input->type == kTfLiteInt8;
TF_LITE_ENSURE(context, input->dims->size > 1);
const int n_batch = input->dims->data[0];
const int n_input = input->dims->data[1];
const TfLiteTensor* input_to_output_weights =
GetInput(context, node, kInputToOutputWeightsTensor);
const int n_cell = input_to_output_weights->dims->data[0];
TF_LITE_ENSURE_EQ(context, input_to_output_weights->dims->size, 2);
TF_LITE_ENSURE_EQ(context, input_to_output_weights->dims->data[1], n_input);
const TfLiteTensor* recurrent_to_output_weights =
GetInput(context, node, kRecurrentToOutputWeightsTensor);
TF_LITE_ENSURE_EQ(context, recurrent_to_output_weights->dims->size, 2);
TF_LITE_ENSURE_EQ(context, recurrent_to_output_weights->dims->data[0],
n_cell);
const int n_output = recurrent_to_output_weights->dims->data[1];
// Check that input tensor dimensions matches with each other.
TF_LITE_ENSURE_OK(
context, CheckInputTensorDimensions(context, node, n_input, n_output,
n_cell, use_layer_norm, is_integer));
// Get the pointer to output, output_state and cell_state tensors.
TfLiteTensor* output = GetOutput(context, node, kOutputTensor);
TfLiteTensor* output_state =
GetVariableInput(context, node, kOutputStateTensor);
TF_LITE_ENSURE(context, output_state != nullptr);
TfLiteTensor* cell_state = GetVariableInput(context, node, kCellStateTensor);
TF_LITE_ENSURE(context, cell_state != nullptr);
// Check the shape of input state tensors.
// These tensor may be 1D or 2D. It's fine as long as the total size is
// correct.
TF_LITE_ENSURE_EQ(context, NumElements(output_state), n_batch * n_output);
TF_LITE_ENSURE_EQ(context, NumElements(cell_state), n_batch * n_cell);
// Resize the output tensors.
TfLiteIntArray* output_size = TfLiteIntArrayCreate(2);
output_size->data[0] = n_batch;
output_size->data[1] = n_output;
TF_LITE_ENSURE_OK(context,
context->ResizeTensor(context, output, output_size));
// The weights are of consistent type, so it suffices to check one.
const bool is_hybrid_op = IsHybridOp(input, input_to_output_weights);
const bool is_sparse_op = (input_to_output_weights->sparsity != nullptr);
// The type of Integer LSTM.
const int num_intermediate_tensors = node->intermediates->size;
if (is_integer) {
TF_LITE_ENSURE(context, num_intermediate_tensors == 5 ||
num_intermediate_tensors == 12);
}
// We use number of intermediate tensors to distinguish the 8 bit matmul
// output and the 16 bit matmul output version.
const bool is_8x8_16 = num_intermediate_tensors == 5;
TfLiteIntArrayFree(node->temporaries);
if (is_hybrid_op) {
if (is_sparse_op) {
node->temporaries =
TfLiteIntArrayCreate(kNumHybridTemporaryTensors + kLedgersToAdd);
} else {
node->temporaries = TfLiteIntArrayCreate(kNumHybridTemporaryTensors);
}
} else if (is_integer) {
if (is_8x8_16) {
node->temporaries = TfLiteIntArrayCreate(6);
} else {
node->temporaries = TfLiteIntArrayCreate(8);
}
} else {
node->temporaries = TfLiteIntArrayCreate(1);
}
// Create a scratch buffer tensor for float case and hybrid case.
// TODO(b/152066492): Create a is_float boolean and reorganize the temporary
// buffer allocation logic.
if (!is_integer) {
node->temporaries->data[kScratchBuffer] =
op_data->scratch_tensor_index + kScratchBuffer;
TfLiteTensor* scratch_buffer = GetTemporary(context, node, kScratchBuffer);
scratch_buffer->type = input->type;
scratch_buffer->allocation_type = kTfLiteArenaRw;
const TfLiteTensor* input_to_input_weights =
GetOptionalInputTensor(context, node, kInputToInputWeightsTensor);
const bool use_cifg = (input_to_input_weights == nullptr);
TfLiteIntArray* scratch_buffer_size = TfLiteIntArrayCreate(2);
scratch_buffer_size->data[0] = n_batch;
if (use_cifg) {
// Reserving space for Cell, Forget, Output gates
scratch_buffer_size->data[1] = n_cell * 3;
} else {
// Reserving space for Input, Cell, Forget, Output gates
scratch_buffer_size->data[1] = n_cell * 4;
}
TF_LITE_ENSURE_OK(context, context->ResizeTensor(context, scratch_buffer,
scratch_buffer_size));
}
if (is_hybrid_op) {
if (!is_sparse_op) {
op_data->compute_row_sums = true;
}
// Allocate temporary tensors to store quantized values of input,
// output_state and cell_state tensors.
node->temporaries->data[kInputQuantized] =
op_data->scratch_tensor_index + kInputQuantized;
TfLiteTensor* input_quantized =
GetTemporary(context, node, kInputQuantized);
input_quantized->type = input_to_output_weights->type;
input_quantized->allocation_type = kTfLiteArenaRw;
if (!TfLiteIntArrayEqual(input_quantized->dims, input->dims)) {
TfLiteIntArray* input_quantized_size = TfLiteIntArrayCopy(input->dims);
TF_LITE_ENSURE_OK(context, context->ResizeTensor(context, input_quantized,
input_quantized_size));
}
node->temporaries->data[kOutputStateQuantized] =
op_data->scratch_tensor_index + kOutputStateQuantized;
TfLiteTensor* output_state_quantized =
GetTemporary(context, node, kOutputStateQuantized);
output_state_quantized->type = input_to_output_weights->type;
output_state_quantized->allocation_type = kTfLiteArenaRw;
if (!TfLiteIntArrayEqual(output_state_quantized->dims,
output_state->dims)) {
TfLiteIntArray* output_state_quantized_size =
TfLiteIntArrayCopy(output_state->dims);
TF_LITE_ENSURE_OK(context,
context->ResizeTensor(context, output_state_quantized,
output_state_quantized_size));
}
node->temporaries->data[kCellStateQuantized] =
op_data->scratch_tensor_index + kCellStateQuantized;
TfLiteTensor* cell_state_quantized =
GetTemporary(context, node, kCellStateQuantized);
cell_state_quantized->type = input_to_output_weights->type;
cell_state_quantized->allocation_type = kTfLiteArenaRw;
if (!TfLiteIntArrayEqual(cell_state_quantized->dims, cell_state->dims)) {
TfLiteIntArray* cell_state_quantized_size =
TfLiteIntArrayCopy(cell_state->dims);
TF_LITE_ENSURE_OK(context,
context->ResizeTensor(context, cell_state_quantized,
cell_state_quantized_size));
}
// Allocate temporary tensors to store scaling factors and product scaling
// factors. The latter is a convenience storage which allows to quantize
// a vector once (which produces the scaling factors) and multiply it with
// different matrices (which requires multiplying the scaling factors with
// the scaling factor of the matrix).
node->temporaries->data[kInputScalingFactors] =
op_data->scratch_tensor_index + kInputScalingFactors;
TfLiteTensor* input_sf = GetTemporary(context, node, kInputScalingFactors);
input_sf->type = kTfLiteFloat32;
input_sf->allocation_type = kTfLiteArenaRw;
int scaling_dims[1] = {n_batch};
if (!TfLiteIntArrayEqualsArray(input_sf->dims, 1, scaling_dims)) {
TfLiteIntArray* input_sf_size = TfLiteIntArrayCreate(1);
input_sf_size->data[0] = n_batch;
TF_LITE_ENSURE_OK(
context, context->ResizeTensor(context, input_sf, input_sf_size));
}
node->temporaries->data[kOutputStateScalingFactors] =
op_data->scratch_tensor_index + kOutputStateScalingFactors;
TfLiteTensor* output_state_sf =
GetTemporary(context, node, kOutputStateScalingFactors);
output_state_sf->type = kTfLiteFloat32;
output_state_sf->allocation_type = kTfLiteArenaRw;
if (!TfLiteIntArrayEqualsArray(output_state_sf->dims, 1, scaling_dims)) {
TfLiteIntArray* output_state_sf_size = TfLiteIntArrayCreate(1);
output_state_sf_size->data[0] = n_batch;
TF_LITE_ENSURE_OK(context, context->ResizeTensor(context, output_state_sf,
output_state_sf_size));
}
node->temporaries->data[kProductScalingFactors] =
op_data->scratch_tensor_index + kProductScalingFactors;
TfLiteTensor* prod_scaling_factors =
GetTemporary(context, node, kProductScalingFactors);
prod_scaling_factors->type = kTfLiteFloat32;
prod_scaling_factors->allocation_type = kTfLiteArenaRw;
if (!TfLiteIntArrayEqualsArray(prod_scaling_factors->dims, 1,
scaling_dims)) {
TfLiteIntArray* prod_scaling_factors_size = TfLiteIntArrayCreate(1);
prod_scaling_factors_size->data[0] = n_batch;
TF_LITE_ENSURE_OK(context,
context->ResizeTensor(context, prod_scaling_factors,
prod_scaling_factors_size));
}
// Allocate a temporary tensor to store the recovered cell weights. Since
// this is used for diagonal matrices, only need to store n_cell values.
node->temporaries->data[kRecoveredCellWeights] =
op_data->scratch_tensor_index + kRecoveredCellWeights;
TfLiteTensor* recovered_cell_weights =
GetTemporary(context, node, kRecoveredCellWeights);
recovered_cell_weights->type = kTfLiteFloat32;
recovered_cell_weights->allocation_type = kTfLiteArenaRw;
int recovered_cell_dims[1] = {n_cell};
if (!TfLiteIntArrayEqualsArray(recovered_cell_weights->dims, 1,
recovered_cell_dims)) {
TfLiteIntArray* recovered_cell_weights_size = TfLiteIntArrayCreate(1);
recovered_cell_weights_size->data[0] = n_cell;
TF_LITE_ENSURE_OK(context,
context->ResizeTensor(context, recovered_cell_weights,
recovered_cell_weights_size));
}
// Allocate a temporary tensor to store accumulate values for matrix
// multiplication before multiplication by scaling factor
node->temporaries->data[kAccumScratch] =
op_data->scratch_tensor_index + kAccumScratch;
TfLiteTensor* accum_scratch = GetTemporary(context, node, kAccumScratch);
accum_scratch->type = kTfLiteInt32;
accum_scratch->allocation_type = kTfLiteArenaRw;
int accum_scratch_dims[2] = {n_cell, n_batch};
if (!TfLiteIntArrayEqualsArray(accum_scratch->dims, 2,
accum_scratch_dims)) {
TfLiteIntArray* accum_size = TfLiteIntArrayCreate(2);
accum_size->data[0] = n_cell;
accum_size->data[1] = n_batch;
TF_LITE_ENSURE_OK(
context, context->ResizeTensor(context, accum_scratch, accum_size));
}
node->temporaries->data[kInputZeroPoints] =
op_data->scratch_tensor_index + kInputZeroPoints;
TfLiteTensor* input_zp = GetTemporary(context, node, kInputZeroPoints);
input_zp->type = kTfLiteFloat32;
input_zp->allocation_type = kTfLiteArenaRw;
if (!TfLiteIntArrayEqualsArray(input_zp->dims, 1, scaling_dims)) {
TfLiteIntArray* input_zp_size = TfLiteIntArrayCreate(1);
input_zp_size->data[0] = n_batch;
TF_LITE_ENSURE_OK(
context, context->ResizeTensor(context, input_zp, input_zp_size));
}
node->temporaries->data[kOutputStateZeroPoints] =
op_data->scratch_tensor_index + kOutputStateZeroPoints;
TfLiteTensor* output_state_zp =
GetTemporary(context, node, kOutputStateZeroPoints);
output_state_zp->type = kTfLiteFloat32;
output_state_zp->allocation_type = kTfLiteArenaRw;
if (!TfLiteIntArrayEqualsArray(output_state_zp->dims, 1, scaling_dims)) {
TfLiteIntArray* output_state_zp_size = TfLiteIntArrayCreate(1);
output_state_zp_size->data[0] = n_batch;
TF_LITE_ENSURE_OK(context, context->ResizeTensor(context, output_state_zp,
output_state_zp_size));
}
node->temporaries->data[kRowSums] =
op_data->scratch_tensor_index + kRowSums;
const TfLiteTensor* input_to_input_weights =
GetOptionalInputTensor(context, node, kInputToInputWeightsTensor);
const bool use_cifg = (input_to_input_weights == nullptr);
int row_sums_rows = use_cifg ? 6 : 8;
const TfLiteTensor* projection_weights =
GetOptionalInputTensor(context, node, kProjectionWeightsTensor);
if (projection_weights != nullptr) {
row_sums_rows += ceil(static_cast<float>(n_output) / n_cell);
}
TfLiteTensor* row_sums = GetTemporary(context, node, kRowSums);
row_sums->type = kTfLiteInt32;
row_sums->allocation_type = kTfLiteArenaRwPersistent;
const int row_sums_dims[2] = {row_sums_rows, n_cell};
if (!TfLiteIntArrayEqualsArray(row_sums->dims, 2, row_sums_dims)) {
TfLiteIntArray* row_sums_size = TfLiteIntArrayCreate(2);
row_sums_size->data[0] = row_sums_dims[0];
row_sums_size->data[1] = row_sums_dims[1];
TF_LITE_ENSURE_OK(
context, context->ResizeTensor(context, row_sums, row_sums_size));
}
if (is_sparse_op) {
op_data->ledger_initialized = false;
int offset = kNumHybridTemporaryTensors;
{
node->temporaries->data[offset + kInputToInputWeightsLedgerOffset] =
op_data->ledger_index + kInputToInputWeightsLedgerOffset;
const TfLiteTensor* input_to_input_weights =
GetOptionalInputTensor(context, node, kInputToInputWeightsTensor);
TfLiteTensor* input_to_input_weights_ledger =
&context->tensors[op_data->ledger_index +
kInputToInputWeightsLedgerOffset];
auto status = make_ledger(input_to_input_weights == nullptr
? nullptr
: input_to_input_weights->sparsity,
context, input_to_input_weights_ledger);
if (status != kTfLiteOk) return status;
}
{
node->temporaries->data[offset + kInputToForgetWeightsLedgerOffset] =
op_data->ledger_index + kInputToForgetWeightsLedgerOffset;
const TfLiteTensor* input_to_forget_weights =
GetInput(context, node, kInputToForgetWeightsTensor);
TfLiteTensor* input_to_forget_weights_ledger =
&context->tensors[op_data->ledger_index +
kInputToForgetWeightsLedgerOffset];
auto status = make_ledger(input_to_forget_weights->sparsity, context,
input_to_forget_weights_ledger);
if (status != kTfLiteOk) return status;
}
{
node->temporaries->data[offset + kInputToCellWeightsLedgerOffset] =
op_data->ledger_index + kInputToCellWeightsLedgerOffset;
const TfLiteTensor* input_to_cell_weights =
GetInput(context, node, kInputToCellWeightsTensor);
TfLiteTensor* input_to_cell_weights_ledger =
&context->tensors[op_data->ledger_index +
kInputToCellWeightsLedgerOffset];
auto status = make_ledger(input_to_cell_weights->sparsity, context,
input_to_cell_weights_ledger);
if (status != kTfLiteOk) return status;
}
{
node->temporaries->data[offset + kInputToOutputWeightsLedgerOffset] =
op_data->ledger_index + kInputToOutputWeightsLedgerOffset;
const TfLiteTensor* input_to_output_weights =
GetInput(context, node, kInputToOutputWeightsTensor);
TfLiteTensor* input_to_output_weights_ledger =
&context->tensors[op_data->ledger_index +
kInputToOutputWeightsLedgerOffset];
auto status = make_ledger(input_to_output_weights->sparsity, context,
input_to_output_weights_ledger);
if (status != kTfLiteOk) return status;
}
{
node->temporaries->data[offset + kRecurrentToInputWeightsLedgerOffset] =
op_data->ledger_index + kRecurrentToInputWeightsLedgerOffset;
const TfLiteTensor* recurrent_to_input_weights = GetOptionalInputTensor(
context, node, kRecurrentToInputWeightsTensor);
TfLiteTensor* recurrent_to_input_weights_ledger =
&context->tensors[op_data->ledger_index +
kRecurrentToInputWeightsLedgerOffset];
auto status = make_ledger(recurrent_to_input_weights == nullptr
? nullptr
: recurrent_to_input_weights->sparsity,
context, recurrent_to_input_weights_ledger);
if (status != kTfLiteOk) return status;
}
{
node->temporaries
->data[offset + kRecurrentToForgetWeightsLedgerOffset] =
op_data->ledger_index + kRecurrentToForgetWeightsLedgerOffset;
const TfLiteTensor* recurrent_to_forget_weights =
GetInput(context, node, kRecurrentToForgetWeightsTensor);
TfLiteTensor* recurrent_to_forget_weights_ledger =
&context->tensors[op_data->ledger_index +
kRecurrentToForgetWeightsLedgerOffset];
auto status = make_ledger(recurrent_to_forget_weights->sparsity,
context, recurrent_to_forget_weights_ledger);
if (status != kTfLiteOk) return status;
}
{
node->temporaries->data[offset + kRecurrentToCellWeightsLedgerOffset] =
op_data->ledger_index + kRecurrentToCellWeightsLedgerOffset;
const TfLiteTensor* recurrent_to_cell_weights =
GetInput(context, node, kRecurrentToCellWeightsTensor);
TfLiteTensor* recurrent_to_cell_weights_ledger =
&context->tensors[op_data->ledger_index +
kRecurrentToCellWeightsLedgerOffset];
auto status = make_ledger(recurrent_to_cell_weights->sparsity, context,
recurrent_to_cell_weights_ledger);
if (status != kTfLiteOk) return status;
}
{
node->temporaries
->data[offset + kRecurrentToOutputWeightsLedgerOffset] =
op_data->ledger_index + kRecurrentToOutputWeightsLedgerOffset;
const TfLiteTensor* recurrent_to_output_weights =
GetInput(context, node, kRecurrentToOutputWeightsTensor);
TfLiteTensor* recurrent_to_output_weights_ledger =
&context->tensors[op_data->ledger_index +
kRecurrentToOutputWeightsLedgerOffset];
auto status = make_ledger(recurrent_to_output_weights->sparsity,
context, recurrent_to_output_weights_ledger);
if (status != kTfLiteOk) return status;
}
{
node->temporaries->data[offset + kProjectionWeightsLedgerOffset] =
op_data->ledger_index + kProjectionWeightsLedgerOffset;
const TfLiteTensor* projection_weights =
GetInput(context, node, kProjectionWeightsTensor);
TfLiteTensor* projection_weights_ledger =
&context->tensors[op_data->ledger_index +
kProjectionWeightsLedgerOffset];
auto status = make_ledger(projection_weights->sparsity, context,
projection_weights_ledger);
if (status != kTfLiteOk) return status;
}
}
}
if (is_integer) {
if (is_8x8_16) {
// Integer LSTM prepare function for 8x8->16.
// This code path needs 5 intermediate tensors per Op.
// Populate quantization parameters.
PopulateQuantizedLstmParams8x8_16(context, node,
&op_data->integer_lstm_param);
// Allocate scratch buffer. Need 6 16bit buffer with size n_batch * n_cell
// and 1 8bit buffer with size n_batch * n_cell. We also need 1 32 bit
// buffer with size n_batch * n_cell.
//
// Handle cifg case as well, which might save one buffer.
for (int scratch_index = 0; scratch_index < 6; ++scratch_index) {
node->temporaries->data[scratch_index] =
op_data->scratch_tensor_index + scratch_index;
TfLiteTensor* scratch_tensor =
GetTemporary(context, node, scratch_index);
scratch_tensor->type = kTfLiteInt16;
if (scratch_index == 4) {
scratch_tensor->type = kTfLiteInt8;
} else if (scratch_index == 5) {
scratch_tensor->type = kTfLiteInt32;
}
scratch_tensor->allocation_type = kTfLiteArenaRw;
const int scratch_dimension[2] = {n_batch, n_cell};
if (!TfLiteIntArrayEqualsArray(scratch_tensor->dims, 2,
scratch_dimension)) {
TfLiteIntArray* scratch_buffer_size = TfLiteIntArrayCreate(2);
scratch_buffer_size->data[0] = n_batch;
scratch_buffer_size->data[1] = n_cell;
TF_LITE_ENSURE_OK(context,
context->ResizeTensor(context, scratch_tensor,
scratch_buffer_size));
}
}
// Populate precomputed zp * weight.
TF_LITE_ENSURE_OK(context, PopulatePrecomputedZPTimesWeightsWithBias(
context, op_data, node));
} else {
// Integer LSTM prepare function for 8x8->8.
// This code path needs 12 intermediate tensors per Op.
PopulateQuantizedLstmParams8x8_8(context, node,
&op_data->integer_lstm_param);
// Allocate scratch buffer. Need 6 16bit buffer with size n_batch * n_cell
// and 2 8bit buffer with size n_batch * n_cell.
//
// Handle cifg case as well, which might save one buffer.
for (int scratch_index = 0; scratch_index < 8; ++scratch_index) {
node->temporaries->data[scratch_index] =
op_data->scratch_tensor_index + scratch_index;
TfLiteTensor* scratch_tensor =
GetTemporary(context, node, scratch_index);
if (scratch_index == 0 || scratch_index == 1) {
scratch_tensor->type = kTfLiteInt8;
} else {
scratch_tensor->type = kTfLiteInt16;
}
scratch_tensor->allocation_type = kTfLiteArenaRw;
const int scratch_dimension[2] = {n_batch, n_cell};
if (!TfLiteIntArrayEqualsArray(scratch_tensor->dims, 2,
scratch_dimension)) {
TfLiteIntArray* scratch_buffer_size = TfLiteIntArrayCreate(2);
scratch_buffer_size->data[0] = n_batch;
scratch_buffer_size->data[1] = n_cell;
TF_LITE_ENSURE_OK(context,
context->ResizeTensor(context, scratch_tensor,
scratch_buffer_size));
}
}
}
}
return kTfLiteOk;
}
| 1
|
131,071
|
static void voice_link(struct VOICE_S *p_voice)
{
struct VOICE_S *p_voice2;
p_voice2 = first_voice;
for (;;) {
if (p_voice2 == p_voice)
return;
if (!p_voice2->next)
break;
p_voice2 = p_voice2->next;
}
p_voice2->next = p_voice;
}
| 0
|
272,626
|
static void move_back(compiler_common *common, jump_list **backtracks, BOOL must_be_valid)
{
/* Goes one character back. Affects STR_PTR and TMP1. If must_be_valid is TRUE,
TMP2 is not used. Otherwise TMP2 must contain the start of the subject buffer,
and it is destroyed. Does not modify STR_PTR for invalid character sequences. */
DEFINE_COMPILER;
#if defined SUPPORT_UNICODE && PCRE2_CODE_UNIT_WIDTH != 32
struct sljit_jump *jump;
#endif
#ifdef SUPPORT_UNICODE
#if PCRE2_CODE_UNIT_WIDTH == 8
struct sljit_label *label;
if (common->utf)
{
if (!must_be_valid && common->invalid_utf)
{
OP1(MOV_UCHAR, TMP1, 0, SLJIT_MEM1(STR_PTR), -IN_UCHARS(1));
OP2(SLJIT_SUB, STR_PTR, 0, STR_PTR, 0, SLJIT_IMM, IN_UCHARS(1));
jump = CMP(SLJIT_LESS, TMP1, 0, SLJIT_IMM, 0x80);
add_jump(compiler, &common->utfmoveback_invalid, JUMP(SLJIT_FAST_CALL));
if (backtracks != NULL)
add_jump(compiler, backtracks, CMP(SLJIT_EQUAL, TMP1, 0, SLJIT_IMM, 0));
JUMPHERE(jump);
return;
}
label = LABEL();
OP1(MOV_UCHAR, TMP1, 0, SLJIT_MEM1(STR_PTR), -IN_UCHARS(1));
OP2(SLJIT_SUB, STR_PTR, 0, STR_PTR, 0, SLJIT_IMM, IN_UCHARS(1));
OP2(SLJIT_AND, TMP1, 0, TMP1, 0, SLJIT_IMM, 0xc0);
CMPTO(SLJIT_EQUAL, TMP1, 0, SLJIT_IMM, 0x80, label);
return;
}
#elif PCRE2_CODE_UNIT_WIDTH == 16
if (common->utf)
{
OP1(MOV_UCHAR, TMP1, 0, SLJIT_MEM1(STR_PTR), -IN_UCHARS(1));
OP2(SLJIT_SUB, STR_PTR, 0, STR_PTR, 0, SLJIT_IMM, IN_UCHARS(1));
if (!must_be_valid && common->invalid_utf)
{
OP2(SLJIT_SUB, TMP1, 0, TMP1, 0, SLJIT_IMM, 0xd800);
jump = CMP(SLJIT_GREATER_EQUAL, TMP1, 0, SLJIT_IMM, 0xe000 - 0xd800);
add_jump(compiler, &common->utfmoveback_invalid, JUMP(SLJIT_FAST_CALL));
if (backtracks != NULL)
add_jump(compiler, backtracks, CMP(SLJIT_EQUAL, TMP1, 0, SLJIT_IMM, 0));
JUMPHERE(jump);
return;
}
/* Skip low surrogate if necessary. */
OP2(SLJIT_AND, TMP1, 0, TMP1, 0, SLJIT_IMM, 0xfc00);
OP2U(SLJIT_SUB | SLJIT_SET_Z, TMP1, 0, SLJIT_IMM, 0xdc00);
OP_FLAGS(SLJIT_MOV, TMP1, 0, SLJIT_EQUAL);
OP2(SLJIT_SHL, TMP1, 0, TMP1, 0, SLJIT_IMM, UCHAR_SHIFT);
OP2(SLJIT_SUB, STR_PTR, 0, STR_PTR, 0, TMP1, 0);
return;
}
#elif PCRE2_CODE_UNIT_WIDTH == 32
if (common->invalid_utf && !must_be_valid)
{
OP1(MOV_UCHAR, TMP1, 0, SLJIT_MEM1(STR_PTR), -IN_UCHARS(1));
if (backtracks != NULL)
{
add_jump(compiler, backtracks, CMP(SLJIT_GREATER_EQUAL, TMP1, 0, SLJIT_IMM, 0x110000));
OP2(SLJIT_SUB, STR_PTR, 0, STR_PTR, 0, SLJIT_IMM, IN_UCHARS(1));
return;
}
OP2U(SLJIT_SUB | SLJIT_SET_LESS, TMP1, 0, SLJIT_IMM, 0x110000);
OP_FLAGS(SLJIT_MOV, TMP1, 0, SLJIT_LESS);
OP2(SLJIT_SHL, TMP1, 0, TMP1, 0, SLJIT_IMM, UCHAR_SHIFT);
OP2(SLJIT_SUB, STR_PTR, 0, STR_PTR, 0, TMP1, 0);
return;
}
#endif /* PCRE2_CODE_UNIT_WIDTH == [8|16|32] */
#endif /* SUPPORT_UNICODE */
SLJIT_UNUSED_ARG(backtracks);
SLJIT_UNUSED_ARG(must_be_valid);
OP2(SLJIT_SUB, STR_PTR, 0, STR_PTR, 0, SLJIT_IMM, IN_UCHARS(1));
}
| 0
|
322,763
|
static void create_cps(MaltaState *s, const char *cpu_model,
qemu_irq *cbus_irq, qemu_irq *i8259_irq)
{
Error *err = NULL;
s->cps = g_new0(MIPSCPSState, 1);
object_initialize(s->cps, sizeof(MIPSCPSState), TYPE_MIPS_CPS);
qdev_set_parent_bus(DEVICE(s->cps), sysbus_get_default());
object_property_set_str(OBJECT(s->cps), cpu_model, "cpu-model", &err);
object_property_set_int(OBJECT(s->cps), smp_cpus, "num-vp", &err);
object_property_set_bool(OBJECT(s->cps), true, "realized", &err);
if (err != NULL) {
error_report("%s", error_get_pretty(err));
exit(1);
}
sysbus_mmio_map_overlap(SYS_BUS_DEVICE(s->cps), 0, 0, 1);
/* FIXME: When GIC is present then we should use GIC's IRQ 3.
Until then CPS exposes CPU's IRQs thus use the default IRQ 2. */
*i8259_irq = get_cps_irq(s->cps, 2);
*cbus_irq = NULL;
}
| 0
|
258,324
|
static char * ReadBlobStringWithLongSize ( Image * image , char * string , size_t max , ExceptionInfo * exception ) {
int c ;
MagickOffsetType offset ;
register ssize_t i ;
size_t length ;
assert ( image != ( Image * ) NULL ) ;
assert ( image -> signature == MagickCoreSignature ) ;
assert ( max != 0 ) ;
if ( image -> debug != MagickFalse ) ( void ) LogMagickEvent ( TraceEvent , GetMagickModule ( ) , "%s" , image -> filename ) ;
length = ReadBlobMSBLong ( image ) ;
for ( i = 0 ;
i < ( ssize_t ) MagickMin ( length , max - 1 ) ;
i ++ ) {
c = ReadBlobByte ( image ) ;
if ( c == EOF ) return ( ( char * ) NULL ) ;
string [ i ] = ( char ) c ;
}
string [ i ] = '\0' ;
offset = SeekBlob ( image , ( MagickOffsetType ) ( length - i ) , SEEK_CUR ) ;
if ( offset < 0 ) ( void ) ThrowMagickException ( exception , GetMagickModule ( ) , CorruptImageError , "ImproperImageHeader" , "`%s'" , image -> filename ) ;
return ( string ) ;
}
| 0
|
203,117
|
bool PDFiumEngine::OnKeyUp(const pp::KeyboardInputEvent& event) {
if (last_page_mouse_down_ == -1)
return false;
return !!FORM_OnKeyUp(
form_, pages_[last_page_mouse_down_]->GetPage(),
event.GetKeyCode(), event.GetModifiers());
}
| 0
|
446,417
|
virDomainDiskSourceFormatPrivateData(virBufferPtr buf,
virStorageSourcePtr src,
unsigned int flags,
virDomainXMLOptionPtr xmlopt)
{
g_auto(virBuffer) childBuf = VIR_BUFFER_INIT_CHILD(buf);
if (!(flags & VIR_DOMAIN_DEF_FORMAT_STATUS) ||
!xmlopt || !xmlopt->privateData.storageFormat)
return 0;
if (xmlopt->privateData.storageFormat(src, &childBuf) < 0)
return -1;
virXMLFormatElement(buf, "privateData", NULL, &childBuf);
return 0;
}
| 0
|
209,982
|
JSRetainPtr<JSStringRef> AccessibilityUIElement::description()
{
if (!m_element || !ATK_IS_OBJECT(m_element))
return JSStringCreateWithCharacters(0, 0);
const gchar* description = atk_object_get_description(ATK_OBJECT(m_element));
if (!description)
return JSStringCreateWithCharacters(0, 0);
GOwnPtr<gchar> axDesc(g_strdup_printf("AXDescription: %s", description));
return JSStringCreateWithUTF8CString(axDesc.get());
}
| 0
|
321,736
|
static inline int vec_reg_offset(int regno, int element, TCGMemOp size)
{
int offs = offsetof(CPUARMState, vfp.regs[regno * 2]);
#ifdef HOST_WORDS_BIGENDIAN
/* This is complicated slightly because vfp.regs[2n] is
* still the low half and vfp.regs[2n+1] the high half
* of the 128 bit vector, even on big endian systems.
* Calculate the offset assuming a fully bigendian 128 bits,
* then XOR to account for the order of the two 64 bit halves.
*/
offs += (16 - ((element + 1) * (1 << size)));
offs ^= 8;
#else
offs += element * (1 << size);
#endif
return offs;
}
| 0
|
415,285
|
vpnc_cleanup (NMVPNCPlugin *self, gboolean killit)
{
NMVPNCPluginPrivate *priv = NM_VPNC_PLUGIN_GET_PRIVATE (self);
if (priv->infd >= 0) {
close (priv->infd);
priv->infd = -1;
}
pipe_cleanup (&priv->out);
pipe_cleanup (&priv->err);
g_string_truncate (priv->server_message, 0);
priv->server_message_done = FALSE;
if (priv->watch_id) {
g_source_remove (priv->watch_id);
priv->watch_id = 0;
}
if (priv->pid) {
if (killit) {
/* Try giving it some time to disconnect cleanly */
if (kill (priv->pid, SIGTERM) == 0)
g_timeout_add (2000, ensure_killed, GINT_TO_POINTER (priv->pid));
_LOGI ("Terminated vpnc daemon with PID %d.", priv->pid);
} else {
/* Already quit, just reap the child */
waitpid (priv->pid, NULL, WNOHANG);
}
priv->pid = 0;
}
}
| 0
|
345,544
|
get_one_option(int optid, const struct my_option *opt,
char *argument)
{
my_bool add_option= TRUE;
switch (optid) {
case '?':
printf("%s Ver %s Distrib %s, for %s (%s)\n",
my_progname, VER, MYSQL_SERVER_VERSION, SYSTEM_TYPE, MACHINE_TYPE);
puts(ORACLE_WELCOME_COPYRIGHT_NOTICE("2000"));
puts("MySQL utility for upgrading databases to new MySQL versions.\n");
my_print_help(my_long_options);
exit(0);
break;
case '#':
DBUG_PUSH(argument ? argument : default_dbug_option);
add_option= FALSE;
debug_check_flag= 1;
break;
case 'p':
if (argument == disabled_my_option)
argument= (char*) ""; /* Don't require password */
tty_password= 1;
add_option= FALSE;
if (argument)
{
/* Add password to ds_args before overwriting the arg with x's */
add_one_option(&ds_args, opt, argument);
while (*argument)
*argument++= 'x'; /* Destroy argument */
tty_password= 0;
}
break;
case 't':
strnmov(opt_tmpdir, argument, sizeof(opt_tmpdir));
add_option= FALSE;
break;
case 'b': /* --basedir */
case 'd': /* --datadir */
fprintf(stderr, "%s: the '--%s' option is always ignored\n",
my_progname, optid == 'b' ? "basedir" : "datadir");
/* FALLTHROUGH */
case 'k': /* --version-check */
case 'v': /* --verbose */
case 'f': /* --force */
case 's': /* --upgrade-system-tables */
case OPT_WRITE_BINLOG: /* --write-binlog */
add_option= FALSE;
break;
case 'h': /* --host */
case 'W': /* --pipe */
case 'P': /* --port */
case 'S': /* --socket */
case OPT_MYSQL_PROTOCOL: /* --protocol */
case OPT_SHARED_MEMORY_BASE_NAME: /* --shared-memory-base-name */
case OPT_PLUGIN_DIR: /* --plugin-dir */
case OPT_DEFAULT_AUTH: /* --default-auth */
add_one_option(&conn_args, opt, argument);
break;
}
if (add_option)
{
/*
This is an option that is accpted by mysql_upgrade just so
it can be passed on to "mysql" and "mysqlcheck"
Save it in the ds_args string
*/
add_one_option(&ds_args, opt, argument);
}
return 0;
}
| 1
|
428,536
|
replace_contents_open_callback (GObject *obj,
GAsyncResult *open_res,
gpointer user_data)
{
GFile *file = G_FILE (obj);
GFileOutputStream *stream;
ReplaceContentsData *data = user_data;
GError *error = NULL;
stream = g_file_replace_finish (file, open_res, &error);
if (stream)
{
const gchar *content;
gsize length;
content = g_bytes_get_data (data->content, &length);
g_output_stream_write_async (G_OUTPUT_STREAM (stream),
content + data->pos,
length - data->pos,
0,
g_task_get_cancellable (data->task),
replace_contents_write_callback,
data);
}
else
{
g_task_return_error (data->task, error);
g_object_unref (data->task);
}
}
| 0
|
413,823
|
Mixin_Call_Obj Parser::parse_include_directive()
{
// lex identifier into `lexed` var
lex_identifier(); // may error out
// normalize underscores to hyphens
std::string name(Util::normalize_underscores(lexed));
// create the initial mixin call object
Mixin_Call_Obj call = SASS_MEMORY_NEW(Mixin_Call, pstate, name, {}, {});
// parse mandatory arguments
call->arguments(parse_arguments());
// parse optional block
if (peek < exactly <'{'> >()) {
call->block(parse_block());
}
// return ast node
return call.detach();
}
| 0
|
51,544
|
inline void StringData::checkStack() const {
assertx(uintptr_t(this) - s_stackLimit >= s_stackSize);
}
| 0
|
308,716
|
SplashError Splash::drawImage(SplashImageSource src, void *srcData,
SplashColorMode srcMode, GBool srcAlpha,
int w, int h, SplashCoord *mat) {
SplashPipe pipe;
GBool ok, rot;
SplashCoord xScale, yScale, xShear, yShear, yShear1;
int tx, tx2, ty, ty2, scaledWidth, scaledHeight, xSign, ySign;
int ulx, uly, llx, lly, urx, ury, lrx, lry;
int ulx1, uly1, llx1, lly1, urx1, ury1, lrx1, lry1;
int xMin, xMax, yMin, yMax;
SplashClipResult clipRes, clipRes2;
int yp, yq, yt, yStep, lastYStep;
int xp, xq, xt, xStep, xSrc;
int k1, spanXMin, spanXMax, spanY;
SplashColorPtr colorBuf, p;
SplashColor pix;
Guchar *alphaBuf, *q;
#if SPLASH_CMYK
int pixAcc0, pixAcc1, pixAcc2, pixAcc3;
#else
int pixAcc0, pixAcc1, pixAcc2;
#endif
int alphaAcc;
SplashCoord pixMul, alphaMul, alpha;
int x, y, x1, x2, y2;
SplashCoord y1;
int nComps, n, m, i, j;
if (debugMode) {
printf("drawImage: srcMode=%d srcAlpha=%d w=%d h=%d mat=[%.2f %.2f %.2f %.2f %.2f %.2f]\n",
srcMode, srcAlpha, w, h, (double)mat[0], (double)mat[1], (double)mat[2],
(double)mat[3], (double)mat[4], (double)mat[5]);
}
ok = gFalse; // make gcc happy
nComps = 0; // make gcc happy
switch (bitmap->mode) {
case splashModeMono1:
case splashModeMono8:
ok = srcMode == splashModeMono8;
nComps = 1;
break;
case splashModeRGB8:
ok = srcMode == splashModeRGB8;
nComps = 3;
break;
case splashModeXBGR8:
ok = srcMode == splashModeXBGR8;
nComps = 4;
break;
case splashModeBGR8:
ok = srcMode == splashModeBGR8;
nComps = 3;
break;
#if SPLASH_CMYK
case splashModeCMYK8:
ok = srcMode == splashModeCMYK8;
nComps = 4;
break;
#endif
}
if (!ok) {
return splashErrModeMismatch;
}
if (splashAbs(mat[0] * mat[3] - mat[1] * mat[2]) < 0.000001) {
return splashErrSingularMatrix;
}
rot = splashAbs(mat[1]) > splashAbs(mat[0]);
if (rot) {
xScale = -mat[1];
yScale = mat[2] - (mat[0] * mat[3]) / mat[1];
xShear = -mat[3] / yScale;
yShear = -mat[0] / mat[1];
} else {
xScale = mat[0];
yScale = mat[3] - (mat[1] * mat[2]) / mat[0];
xShear = mat[2] / yScale;
yShear = mat[1] / mat[0];
}
if (xScale >= 0) {
tx = splashFloor(mat[4] - 0.01);
tx2 = splashFloor(mat[4] + xScale + 0.01);
} else {
tx = splashFloor(mat[4] + 0.01);
tx2 = splashFloor(mat[4] + xScale - 0.01);
}
scaledWidth = abs(tx2 - tx) + 1;
if (yScale >= 0) {
ty = splashFloor(mat[5] - 0.01);
ty2 = splashFloor(mat[5] + yScale + 0.01);
} else {
ty = splashFloor(mat[5] + 0.01);
ty2 = splashFloor(mat[5] + yScale - 0.01);
}
scaledHeight = abs(ty2 - ty) + 1;
xSign = (xScale < 0) ? -1 : 1;
ySign = (yScale < 0) ? -1 : 1;
yShear1 = (SplashCoord)xSign * yShear;
ulx1 = 0;
uly1 = 0;
urx1 = xSign * (scaledWidth - 1);
ury1 = (int)(yShear * urx1);
llx1 = splashRound(xShear * ySign * (scaledHeight - 1));
lly1 = ySign * (scaledHeight - 1) + (int)(yShear * llx1);
lrx1 = xSign * (scaledWidth - 1) +
splashRound(xShear * ySign * (scaledHeight - 1));
lry1 = ySign * (scaledHeight - 1) + (int)(yShear * lrx1);
if (rot) {
ulx = tx + uly1; uly = ty - ulx1;
urx = tx + ury1; ury = ty - urx1;
llx = tx + lly1; lly = ty - llx1;
lrx = tx + lry1; lry = ty - lrx1;
} else {
ulx = tx + ulx1; uly = ty + uly1;
urx = tx + urx1; ury = ty + ury1;
llx = tx + llx1; lly = ty + lly1;
lrx = tx + lrx1; lry = ty + lry1;
}
xMin = (ulx < urx) ? (ulx < llx) ? (ulx < lrx) ? ulx : lrx
: (llx < lrx) ? llx : lrx
: (urx < llx) ? (urx < lrx) ? urx : lrx
: (llx < lrx) ? llx : lrx;
xMax = (ulx > urx) ? (ulx > llx) ? (ulx > lrx) ? ulx : lrx
: (llx > lrx) ? llx : lrx
: (urx > llx) ? (urx > lrx) ? urx : lrx
: (llx > lrx) ? llx : lrx;
yMin = (uly < ury) ? (uly < lly) ? (uly < lry) ? uly : lry
: (lly < lry) ? lly : lry
: (ury < lly) ? (ury < lry) ? ury : lry
: (lly < lry) ? lly : lry;
yMax = (uly > ury) ? (uly > lly) ? (uly > lry) ? uly : lry
: (lly > lry) ? lly : lry
: (ury > lly) ? (ury > lry) ? ury : lry
: (lly > lry) ? lly : lry;
clipRes = state->clip->testRect(xMin, yMin, xMax, yMax);
opClipRes = clipRes;
if (clipRes == splashClipAllOutside) {
return splashOk;
}
yp = h / scaledHeight;
yq = h % scaledHeight;
xp = w / scaledWidth;
xq = w % scaledWidth;
colorBuf = (SplashColorPtr)gmallocn3((yp + 1), w, nComps);
if (srcAlpha) {
alphaBuf = (Guchar *)gmallocn((yp + 1), w);
} else {
alphaBuf = NULL;
}
pixAcc0 = pixAcc1 = pixAcc2 = 0; // make gcc happy
#if SPLASH_CMYK
pixAcc3 = 0; // make gcc happy
#endif
pipeInit(&pipe, 0, 0, NULL, pix, state->fillAlpha,
srcAlpha || (vectorAntialias && clipRes != splashClipAllInside),
gFalse);
if (vectorAntialias) {
drawAAPixelInit();
}
if (srcAlpha) {
yt = 0;
lastYStep = 1;
for (y = 0; y < scaledHeight; ++y) {
yStep = yp;
yt += yq;
if (yt >= scaledHeight) {
yt -= scaledHeight;
++yStep;
}
n = (yp > 0) ? yStep : lastYStep;
if (n > 0) {
p = colorBuf;
q = alphaBuf;
for (i = 0; i < n; ++i) {
(*src)(srcData, p, q);
p += w * nComps;
q += w;
}
}
lastYStep = yStep;
k1 = splashRound(xShear * ySign * y);
if (clipRes != splashClipAllInside &&
!rot &&
(int)(yShear * k1) ==
(int)(yShear * (xSign * (scaledWidth - 1) + k1))) {
if (xSign > 0) {
spanXMin = tx + k1;
spanXMax = spanXMin + (scaledWidth - 1);
} else {
spanXMax = tx + k1;
spanXMin = spanXMax - (scaledWidth - 1);
}
spanY = ty + ySign * y + (int)(yShear * k1);
clipRes2 = state->clip->testSpan(spanXMin, spanXMax, spanY);
if (clipRes2 == splashClipAllOutside) {
continue;
}
} else {
clipRes2 = clipRes;
}
xt = 0;
xSrc = 0;
x1 = k1;
y1 = (SplashCoord)ySign * y + yShear * x1;
if (yShear1 < 0) {
y1 += 0.999;
}
n = yStep > 0 ? yStep : 1;
switch (srcMode) {
case splashModeMono1:
case splashModeMono8:
for (x = 0; x < scaledWidth; ++x) {
xStep = xp;
xt += xq;
if (xt >= scaledWidth) {
xt -= scaledWidth;
++xStep;
}
if (rot) {
x2 = (int)y1;
y2 = -x1;
} else {
x2 = x1;
y2 = (int)y1;
}
m = xStep > 0 ? xStep : 1;
alphaAcc = 0;
p = colorBuf + xSrc;
q = alphaBuf + xSrc;
pixAcc0 = 0;
for (i = 0; i < n; ++i) {
for (j = 0; j < m; ++j) {
pixAcc0 += *p++;
alphaAcc += *q++;
}
p += w - m;
q += w - m;
}
pixMul = (SplashCoord)1 / (SplashCoord)(n * m);
alphaMul = pixMul * (1.0 / 255.0);
alpha = (SplashCoord)alphaAcc * alphaMul;
if (alpha > 0) {
pix[0] = (int)((SplashCoord)pixAcc0 * pixMul);
pipe.shape = alpha;
if (vectorAntialias && clipRes != splashClipAllInside) {
drawAAPixel(&pipe, tx + x2, ty + y2);
} else {
drawPixel(&pipe, tx + x2, ty + y2,
clipRes2 == splashClipAllInside);
}
}
xSrc += xStep;
x1 += xSign;
y1 += yShear1;
}
break;
case splashModeRGB8:
case splashModeBGR8:
for (x = 0; x < scaledWidth; ++x) {
xStep = xp;
xt += xq;
if (xt >= scaledWidth) {
xt -= scaledWidth;
++xStep;
}
if (rot) {
x2 = (int)y1;
y2 = -x1;
} else {
x2 = x1;
y2 = (int)y1;
}
m = xStep > 0 ? xStep : 1;
alphaAcc = 0;
p = colorBuf + xSrc * 3;
q = alphaBuf + xSrc;
pixAcc0 = pixAcc1 = pixAcc2 = 0;
for (i = 0; i < n; ++i) {
for (j = 0; j < m; ++j) {
pixAcc0 += *p++;
pixAcc1 += *p++;
pixAcc2 += *p++;
alphaAcc += *q++;
}
p += 3 * (w - m);
q += w - m;
}
pixMul = (SplashCoord)1 / (SplashCoord)(n * m);
alphaMul = pixMul * (1.0 / 255.0);
alpha = (SplashCoord)alphaAcc * alphaMul;
if (alpha > 0) {
pix[0] = (int)((SplashCoord)pixAcc0 * pixMul);
pix[1] = (int)((SplashCoord)pixAcc1 * pixMul);
pix[2] = (int)((SplashCoord)pixAcc2 * pixMul);
pipe.shape = alpha;
if (vectorAntialias && clipRes != splashClipAllInside) {
drawAAPixel(&pipe, tx + x2, ty + y2);
} else {
drawPixel(&pipe, tx + x2, ty + y2,
clipRes2 == splashClipAllInside);
}
}
xSrc += xStep;
x1 += xSign;
y1 += yShear1;
}
break;
case splashModeXBGR8:
for (x = 0; x < scaledWidth; ++x) {
xStep = xp;
xt += xq;
if (xt >= scaledWidth) {
xt -= scaledWidth;
++xStep;
}
if (rot) {
x2 = (int)y1;
y2 = -x1;
} else {
x2 = x1;
y2 = (int)y1;
}
m = xStep > 0 ? xStep : 1;
alphaAcc = 0;
p = colorBuf + xSrc * 4;
q = alphaBuf + xSrc;
pixAcc0 = pixAcc1 = pixAcc2 = 0;
for (i = 0; i < n; ++i) {
for (j = 0; j < m; ++j) {
pixAcc0 += *p++;
pixAcc1 += *p++;
pixAcc2 += *p++;
*p++;
alphaAcc += *q++;
}
p += 4 * (w - m);
q += w - m;
}
pixMul = (SplashCoord)1 / (SplashCoord)(n * m);
alphaMul = pixMul * (1.0 / 255.0);
alpha = (SplashCoord)alphaAcc * alphaMul;
if (alpha > 0) {
pix[0] = (int)((SplashCoord)pixAcc0 * pixMul);
pix[1] = (int)((SplashCoord)pixAcc1 * pixMul);
pix[2] = (int)((SplashCoord)pixAcc2 * pixMul);
pix[3] = 255;
pipe.shape = alpha;
if (vectorAntialias && clipRes != splashClipAllInside) {
drawAAPixel(&pipe, tx + x2, ty + y2);
} else {
drawPixel(&pipe, tx + x2, ty + y2,
clipRes2 == splashClipAllInside);
}
}
xSrc += xStep;
x1 += xSign;
y1 += yShear1;
}
break;
#if SPLASH_CMYK
case splashModeCMYK8:
for (x = 0; x < scaledWidth; ++x) {
xStep = xp;
xt += xq;
if (xt >= scaledWidth) {
xt -= scaledWidth;
++xStep;
}
if (rot) {
x2 = (int)y1;
y2 = -x1;
} else {
x2 = x1;
y2 = (int)y1;
}
m = xStep > 0 ? xStep : 1;
alphaAcc = 0;
p = colorBuf + xSrc * 4;
q = alphaBuf + xSrc;
pixAcc0 = pixAcc1 = pixAcc2 = pixAcc3 = 0;
for (i = 0; i < n; ++i) {
for (j = 0; j < m; ++j) {
pixAcc0 += *p++;
pixAcc1 += *p++;
pixAcc2 += *p++;
pixAcc3 += *p++;
alphaAcc += *q++;
}
p += 4 * (w - m);
q += w - m;
}
pixMul = (SplashCoord)1 / (SplashCoord)(n * m);
alphaMul = pixMul * (1.0 / 255.0);
alpha = (SplashCoord)alphaAcc * alphaMul;
if (alpha > 0) {
pix[0] = (int)((SplashCoord)pixAcc0 * pixMul);
pix[1] = (int)((SplashCoord)pixAcc1 * pixMul);
pix[2] = (int)((SplashCoord)pixAcc2 * pixMul);
pix[3] = (int)((SplashCoord)pixAcc3 * pixMul);
pipe.shape = alpha;
if (vectorAntialias && clipRes != splashClipAllInside) {
drawAAPixel(&pipe, tx + x2, ty + y2);
} else {
drawPixel(&pipe, tx + x2, ty + y2,
clipRes2 == splashClipAllInside);
}
}
xSrc += xStep;
x1 += xSign;
y1 += yShear1;
}
break;
#endif // SPLASH_CMYK
}
}
} else {
yt = 0;
lastYStep = 1;
for (y = 0; y < scaledHeight; ++y) {
yStep = yp;
yt += yq;
if (yt >= scaledHeight) {
yt -= scaledHeight;
++yStep;
}
n = (yp > 0) ? yStep : lastYStep;
if (n > 0) {
p = colorBuf;
for (i = 0; i < n; ++i) {
(*src)(srcData, p, NULL);
p += w * nComps;
}
}
lastYStep = yStep;
k1 = splashRound(xShear * ySign * y);
if (clipRes != splashClipAllInside &&
!rot &&
(int)(yShear * k1) ==
(int)(yShear * (xSign * (scaledWidth - 1) + k1))) {
if (xSign > 0) {
spanXMin = tx + k1;
spanXMax = spanXMin + (scaledWidth - 1);
} else {
spanXMax = tx + k1;
spanXMin = spanXMax - (scaledWidth - 1);
}
spanY = ty + ySign * y + (int)(yShear * k1);
clipRes2 = state->clip->testSpan(spanXMin, spanXMax, spanY);
if (clipRes2 == splashClipAllOutside) {
continue;
}
} else {
clipRes2 = clipRes;
}
xt = 0;
xSrc = 0;
x1 = k1;
y1 = (SplashCoord)ySign * y + yShear * x1;
if (yShear1 < 0) {
y1 += 0.999;
}
n = yStep > 0 ? yStep : 1;
switch (srcMode) {
case splashModeMono1:
case splashModeMono8:
for (x = 0; x < scaledWidth; ++x) {
xStep = xp;
xt += xq;
if (xt >= scaledWidth) {
xt -= scaledWidth;
++xStep;
}
if (rot) {
x2 = (int)y1;
y2 = -x1;
} else {
x2 = x1;
y2 = (int)y1;
}
m = xStep > 0 ? xStep : 1;
p = colorBuf + xSrc;
pixAcc0 = 0;
for (i = 0; i < n; ++i) {
for (j = 0; j < m; ++j) {
pixAcc0 += *p++;
}
p += w - m;
}
pixMul = (SplashCoord)1 / (SplashCoord)(n * m);
pix[0] = (int)((SplashCoord)pixAcc0 * pixMul);
if (vectorAntialias && clipRes != splashClipAllInside) {
pipe.shape = (SplashCoord)1;
drawAAPixel(&pipe, tx + x2, ty + y2);
} else {
drawPixel(&pipe, tx + x2, ty + y2,
clipRes2 == splashClipAllInside);
}
xSrc += xStep;
x1 += xSign;
y1 += yShear1;
}
break;
case splashModeRGB8:
case splashModeBGR8:
for (x = 0; x < scaledWidth; ++x) {
xStep = xp;
xt += xq;
if (xt >= scaledWidth) {
xt -= scaledWidth;
++xStep;
}
if (rot) {
x2 = (int)y1;
y2 = -x1;
} else {
x2 = x1;
y2 = (int)y1;
}
m = xStep > 0 ? xStep : 1;
p = colorBuf + xSrc * 3;
pixAcc0 = pixAcc1 = pixAcc2 = 0;
for (i = 0; i < n; ++i) {
for (j = 0; j < m; ++j) {
pixAcc0 += *p++;
pixAcc1 += *p++;
pixAcc2 += *p++;
}
p += 3 * (w - m);
}
pixMul = (SplashCoord)1 / (SplashCoord)(n * m);
pix[0] = (int)((SplashCoord)pixAcc0 * pixMul);
pix[1] = (int)((SplashCoord)pixAcc1 * pixMul);
pix[2] = (int)((SplashCoord)pixAcc2 * pixMul);
if (vectorAntialias && clipRes != splashClipAllInside) {
pipe.shape = (SplashCoord)1;
drawAAPixel(&pipe, tx + x2, ty + y2);
} else {
drawPixel(&pipe, tx + x2, ty + y2,
clipRes2 == splashClipAllInside);
}
xSrc += xStep;
x1 += xSign;
y1 += yShear1;
}
break;
case splashModeXBGR8:
for (x = 0; x < scaledWidth; ++x) {
xStep = xp;
xt += xq;
if (xt >= scaledWidth) {
xt -= scaledWidth;
++xStep;
}
if (rot) {
x2 = (int)y1;
y2 = -x1;
} else {
x2 = x1;
y2 = (int)y1;
}
m = xStep > 0 ? xStep : 1;
p = colorBuf + xSrc * 4;
pixAcc0 = pixAcc1 = pixAcc2 = 0;
for (i = 0; i < n; ++i) {
for (j = 0; j < m; ++j) {
pixAcc0 += *p++;
pixAcc1 += *p++;
pixAcc2 += *p++;
*p++;
}
p += 4 * (w - m);
}
pixMul = (SplashCoord)1 / (SplashCoord)(n * m);
pix[0] = (int)((SplashCoord)pixAcc0 * pixMul);
pix[1] = (int)((SplashCoord)pixAcc1 * pixMul);
pix[2] = (int)((SplashCoord)pixAcc2 * pixMul);
pix[3] = 255;
if (vectorAntialias && clipRes != splashClipAllInside) {
pipe.shape = (SplashCoord)1;
drawAAPixel(&pipe, tx + x2, ty + y2);
} else {
drawPixel(&pipe, tx + x2, ty + y2,
clipRes2 == splashClipAllInside);
}
xSrc += xStep;
x1 += xSign;
y1 += yShear1;
}
break;
#if SPLASH_CMYK
case splashModeCMYK8:
for (x = 0; x < scaledWidth; ++x) {
xStep = xp;
xt += xq;
if (xt >= scaledWidth) {
xt -= scaledWidth;
++xStep;
}
if (rot) {
x2 = (int)y1;
y2 = -x1;
} else {
x2 = x1;
y2 = (int)y1;
}
m = xStep > 0 ? xStep : 1;
p = colorBuf + xSrc * 4;
pixAcc0 = pixAcc1 = pixAcc2 = pixAcc3 = 0;
for (i = 0; i < n; ++i) {
for (j = 0; j < m; ++j) {
pixAcc0 += *p++;
pixAcc1 += *p++;
pixAcc2 += *p++;
pixAcc3 += *p++;
}
p += 4 * (w - m);
}
pixMul = (SplashCoord)1 / (SplashCoord)(n * m);
pix[0] = (int)((SplashCoord)pixAcc0 * pixMul);
pix[1] = (int)((SplashCoord)pixAcc1 * pixMul);
pix[2] = (int)((SplashCoord)pixAcc2 * pixMul);
pix[3] = (int)((SplashCoord)pixAcc3 * pixMul);
if (vectorAntialias && clipRes != splashClipAllInside) {
pipe.shape = (SplashCoord)1;
drawAAPixel(&pipe, tx + x2, ty + y2);
} else {
drawPixel(&pipe, tx + x2, ty + y2,
clipRes2 == splashClipAllInside);
}
xSrc += xStep;
x1 += xSign;
y1 += yShear1;
}
break;
#endif // SPLASH_CMYK
}
}
}
gfree(colorBuf);
gfree(alphaBuf);
return splashOk;
}
| 0
|
173,007
|
IHEVCD_ERROR_T ihevcd_get_tile_pos(pps_t *ps_pps,
sps_t *ps_sps,
WORD32 ctb_x,
WORD32 ctb_y,
WORD32 *pi4_ctb_tile_x,
WORD32 *pi4_ctb_tile_y,
WORD32 *pi4_tile_idx)
{
tile_t *ps_tile_tmp;
WORD32 i;
WORD32 tile_row, tile_col;
if(ctb_x < 0 || ctb_y < 0)
{
*pi4_ctb_tile_x = 0;
*pi4_ctb_tile_y = 0;
*pi4_tile_idx = 0;
return (IHEVCD_ERROR_T)IHEVCD_SUCCESS;
}
tile_row = 0;
tile_col = 0;
ps_tile_tmp = ps_pps->ps_tile;
if(0 == ps_pps->i1_tiles_enabled_flag)
{
*pi4_ctb_tile_x = ctb_x;
*pi4_ctb_tile_y = ctb_y;
*pi4_tile_idx = 0;
}
else
{
for(i = 0; i < ps_pps->i1_num_tile_columns; i++)
{
WORD16 next_tile_ctb_x;
ps_tile_tmp = ps_pps->ps_tile + i; //* ps_pps->i1_num_tile_rows;
if((ps_pps->i1_num_tile_columns - 1) == i)
{
next_tile_ctb_x = ps_sps->i2_pic_wd_in_ctb;
}
else
{
tile_t *ps_tile_next_tmp;
ps_tile_next_tmp = ps_pps->ps_tile + i + 1;
next_tile_ctb_x = ps_tile_next_tmp->u1_pos_x;
}
if((ctb_x >= ps_tile_tmp->u1_pos_x) && (ctb_x < next_tile_ctb_x))
{
tile_col = i;
break;
}
}
*pi4_ctb_tile_x = ctb_x - ps_tile_tmp->u1_pos_x;
for(i = 0; i < ps_pps->i1_num_tile_rows; i++)
{
WORD16 next_tile_ctb_y;
ps_tile_tmp = ps_pps->ps_tile + i * ps_pps->i1_num_tile_columns;
if((ps_pps->i1_num_tile_rows - 1) == i)
{
next_tile_ctb_y = ps_sps->i2_pic_ht_in_ctb;
}
else
{
tile_t *ps_tile_next_tmp;
ps_tile_next_tmp = ps_pps->ps_tile + ((i + 1) * ps_pps->i1_num_tile_columns);
next_tile_ctb_y = ps_tile_next_tmp->u1_pos_y;
}
if((ctb_y >= ps_tile_tmp->u1_pos_y) && (ctb_y < next_tile_ctb_y))
{
tile_row = i;
break;
}
}
*pi4_ctb_tile_y = ctb_y - ps_tile_tmp->u1_pos_y;
*pi4_tile_idx = tile_row * ps_pps->i1_num_tile_columns
+ tile_col;
}
return (IHEVCD_ERROR_T)IHEVCD_SUCCESS;
}
| 0
|
264,688
|
static void qemu_init_child_watch(void)
{
struct sigaction act;
sigchld_bh = qemu_bh_new(sigchld_bh_handler, NULL);
memset(&act, 0, sizeof(act));
act.sa_handler = sigchld_handler;
act.sa_flags = SA_NOCLDSTOP;
sigaction(SIGCHLD, &act, NULL);
}
| 0
|
51,790
|
purgeline(struct html_feed_environ *h_env)
{
char *p, *q;
Str tmp;
if (h_env->buf == NULL || h_env->blank_lines == 0)
return;
p = rpopTextLine(h_env->buf)->line->ptr;
tmp = Strnew();
while (*p) {
q = p;
if (sloppy_parse_line(&p)) {
Strcat_charp_n(tmp, q, p - q);
}
}
appendTextLine(h_env->buf, tmp, 0);
h_env->blank_lines--;
}
| 0
|
318,425
|
static int config_props(AVFilterLink *inlink)
{
AVFilterContext *ctx = inlink->dst;
LutContext *lut = ctx->priv;
const AVPixFmtDescriptor *desc = &av_pix_fmt_descriptors[inlink->format];
int min[4], max[4];
int val, comp, ret;
lut->hsub = desc->log2_chroma_w;
lut->vsub = desc->log2_chroma_h;
lut->var_values[VAR_W] = inlink->w;
lut->var_values[VAR_H] = inlink->h;
switch (inlink->format) {
case PIX_FMT_YUV410P:
case PIX_FMT_YUV411P:
case PIX_FMT_YUV420P:
case PIX_FMT_YUV422P:
case PIX_FMT_YUV440P:
case PIX_FMT_YUV444P:
case PIX_FMT_YUVA420P:
min[Y] = min[U] = min[V] = 16;
max[Y] = 235;
max[U] = max[V] = 240;
min[A] = 0; max[A] = 255;
break;
default:
min[0] = min[1] = min[2] = min[3] = 0;
max[0] = max[1] = max[2] = max[3] = 255;
}
lut->is_yuv = lut->is_rgb = 0;
if (ff_fmt_is_in(inlink->format, yuv_pix_fmts)) lut->is_yuv = 1;
else if (ff_fmt_is_in(inlink->format, rgb_pix_fmts)) lut->is_rgb = 1;
if (lut->is_rgb) {
switch (inlink->format) {
case PIX_FMT_ARGB: lut->rgba_map[A] = 0; lut->rgba_map[R] = 1; lut->rgba_map[G] = 2; lut->rgba_map[B] = 3; break;
case PIX_FMT_ABGR: lut->rgba_map[A] = 0; lut->rgba_map[B] = 1; lut->rgba_map[G] = 2; lut->rgba_map[R] = 3; break;
case PIX_FMT_RGBA:
case PIX_FMT_RGB24: lut->rgba_map[R] = 0; lut->rgba_map[G] = 1; lut->rgba_map[B] = 2; lut->rgba_map[A] = 3; break;
case PIX_FMT_BGRA:
case PIX_FMT_BGR24: lut->rgba_map[B] = 0; lut->rgba_map[G] = 1; lut->rgba_map[R] = 2; lut->rgba_map[A] = 3; break;
}
lut->step = av_get_bits_per_pixel(desc) >> 3;
}
for (comp = 0; comp < desc->nb_components; comp++) {
double res;
/* create the parsed expression */
ret = av_expr_parse(&lut->comp_expr[comp], lut->comp_expr_str[comp],
var_names, funcs1_names, funcs1, NULL, NULL, 0, ctx);
if (ret < 0) {
av_log(ctx, AV_LOG_ERROR,
"Error when parsing the expression '%s' for the component %d.\n",
lut->comp_expr_str[comp], comp);
return AVERROR(EINVAL);
}
/* compute the lut */
lut->var_values[VAR_MAXVAL] = max[comp];
lut->var_values[VAR_MINVAL] = min[comp];
for (val = 0; val < 256; val++) {
lut->var_values[VAR_VAL] = val;
lut->var_values[VAR_CLIPVAL] = av_clip(val, min[comp], max[comp]);
lut->var_values[VAR_NEGVAL] =
av_clip(min[comp] + max[comp] - lut->var_values[VAR_VAL],
min[comp], max[comp]);
res = av_expr_eval(lut->comp_expr[comp], lut->var_values, lut);
if (isnan(res)) {
av_log(ctx, AV_LOG_ERROR,
"Error when evaluating the expression '%s' for the value %d for the component #%d.\n",
lut->comp_expr_str[comp], val, comp);
return AVERROR(EINVAL);
}
lut->lut[comp][val] = av_clip((int)res, min[comp], max[comp]);
av_log(ctx, AV_LOG_DEBUG, "val[%d][%d] = %d\n", comp, val, lut->lut[comp][val]);
}
}
return 0;
}
| 0
|
399,073
|
WORD_LIST *
expand_words_shellexp (list)
WORD_LIST *list;
{
return (expand_word_list_internal (list, WEXP_SHELLEXP));
| 0
|
132,545
|
static unsigned long mem_cgroup_recursive_stat(struct mem_cgroup *memcg,
enum mem_cgroup_stat_index idx)
{
struct mem_cgroup *iter;
long val = 0;
/* Per-cpu values can be negative, use a signed accumulator */
for_each_mem_cgroup_tree(iter, memcg)
val += mem_cgroup_read_stat(iter, idx);
if (val < 0) /* race ? */
val = 0;
return val;
}
| 0
|
134,132
|
static int __net_init ping_v4_proc_init_net(struct net *net)
{
return ping_proc_register(net, &ping_v4_seq_afinfo);
}
| 0
|
285,048
|
static int __mem_cgroup_try_charge(struct mm_struct *mm,
gfp_t gfp_mask,
unsigned int nr_pages,
struct mem_cgroup **ptr,
bool oom)
{
unsigned int batch = max(CHARGE_BATCH, nr_pages);
int nr_oom_retries = MEM_CGROUP_RECLAIM_RETRIES;
struct mem_cgroup *memcg = NULL;
int ret;
/*
* Unlike gloval-vm's OOM-kill, we're not in memory shortage
* in system level. So, allow to go ahead dying process in addition to
* MEMDIE process.
*/
if (unlikely(test_thread_flag(TIF_MEMDIE)
|| fatal_signal_pending(current)))
goto bypass;
/*
* We always charge the cgroup the mm_struct belongs to.
* The mm_struct's mem_cgroup changes on task migration if the
* thread group leader migrates. It's possible that mm is not
* set, if so charge the init_mm (happens for pagecache usage).
*/
if (!*ptr && !mm)
*ptr = root_mem_cgroup;
again:
if (*ptr) { /* css should be a valid one */
memcg = *ptr;
VM_BUG_ON(css_is_removed(&memcg->css));
if (mem_cgroup_is_root(memcg))
goto done;
if (nr_pages == 1 && consume_stock(memcg))
goto done;
css_get(&memcg->css);
} else {
struct task_struct *p;
rcu_read_lock();
p = rcu_dereference(mm->owner);
/*
* Because we don't have task_lock(), "p" can exit.
* In that case, "memcg" can point to root or p can be NULL with
* race with swapoff. Then, we have small risk of mis-accouning.
* But such kind of mis-account by race always happens because
* we don't have cgroup_mutex(). It's overkill and we allo that
* small race, here.
* (*) swapoff at el will charge against mm-struct not against
* task-struct. So, mm->owner can be NULL.
*/
memcg = mem_cgroup_from_task(p);
if (!memcg)
memcg = root_mem_cgroup;
if (mem_cgroup_is_root(memcg)) {
rcu_read_unlock();
goto done;
}
if (nr_pages == 1 && consume_stock(memcg)) {
/*
* It seems dagerous to access memcg without css_get().
* But considering how consume_stok works, it's not
* necessary. If consume_stock success, some charges
* from this memcg are cached on this cpu. So, we
* don't need to call css_get()/css_tryget() before
* calling consume_stock().
*/
rcu_read_unlock();
goto done;
}
/* after here, we may be blocked. we need to get refcnt */
if (!css_tryget(&memcg->css)) {
rcu_read_unlock();
goto again;
}
rcu_read_unlock();
}
do {
bool oom_check;
/* If killed, bypass charge */
if (fatal_signal_pending(current)) {
css_put(&memcg->css);
goto bypass;
}
oom_check = false;
if (oom && !nr_oom_retries) {
oom_check = true;
nr_oom_retries = MEM_CGROUP_RECLAIM_RETRIES;
}
ret = mem_cgroup_do_charge(memcg, gfp_mask, batch, oom_check);
switch (ret) {
case CHARGE_OK:
break;
case CHARGE_RETRY: /* not in OOM situation but retry */
batch = nr_pages;
css_put(&memcg->css);
memcg = NULL;
goto again;
case CHARGE_WOULDBLOCK: /* !__GFP_WAIT */
css_put(&memcg->css);
goto nomem;
case CHARGE_NOMEM: /* OOM routine works */
if (!oom) {
css_put(&memcg->css);
goto nomem;
}
/* If oom, we never return -ENOMEM */
nr_oom_retries--;
break;
case CHARGE_OOM_DIE: /* Killed by OOM Killer */
css_put(&memcg->css);
goto bypass;
}
} while (ret != CHARGE_OK);
if (batch > nr_pages)
refill_stock(memcg, batch - nr_pages);
css_put(&memcg->css);
done:
*ptr = memcg;
return 0;
nomem:
*ptr = NULL;
return -ENOMEM;
bypass:
*ptr = root_mem_cgroup;
return -EINTR;
}
| 0
|
204,719
|
void RenderFrameImpl::showContextMenu(const blink::WebContextMenuData& data) {
ContextMenuParams params = ContextMenuParamsBuilder::Build(data);
params.source_type = GetRenderWidget()->context_menu_source_type();
GetRenderWidget()->OnShowHostContextMenu(¶ms);
if (GetRenderWidget()->has_host_context_menu_location()) {
params.x = GetRenderWidget()->host_context_menu_location().x();
params.y = GetRenderWidget()->host_context_menu_location().y();
}
if (params.src_url.spec().size() > GetMaxURLChars())
params.src_url = GURL();
context_menu_node_ = data.node;
#if defined(OS_ANDROID)
gfx::Rect start_rect;
gfx::Rect end_rect;
GetRenderWidget()->GetSelectionBounds(&start_rect, &end_rect);
params.selection_start = gfx::Point(start_rect.x(), start_rect.bottom());
params.selection_end = gfx::Point(end_rect.right(), end_rect.bottom());
#endif
Send(new FrameHostMsg_ContextMenu(routing_id_, params));
}
| 0
|
274,561
|
const wchar_t *LibRaw_bigfile_datastream::wfname()
{
return wfilename.size()>0?wfilename.c_str():NULL;
}
| 0
|
494,831
|
v8::Local<v8::Promise> ExecuteJavaScriptInIsolatedWorld(
gin::Arguments* gin_args,
int world_id,
const std::vector<gin_helper::Dictionary>& scripts) {
gin_helper::Arguments* args = static_cast<gin_helper::Arguments*>(gin_args);
v8::Isolate* isolate = args->isolate();
gin_helper::Promise<v8::Local<v8::Value>> promise(isolate);
v8::Local<v8::Promise> handle = promise.GetHandle();
content::RenderFrame* render_frame;
std::string error_msg;
if (!MaybeGetRenderFrame(&error_msg, "executeJavaScriptInIsolatedWorld",
&render_frame)) {
promise.RejectWithErrorMessage(error_msg);
return handle;
}
bool has_user_gesture = false;
args->GetNext(&has_user_gesture);
blink::WebLocalFrame::ScriptExecutionType scriptExecutionType =
blink::WebLocalFrame::kSynchronous;
args->GetNext(&scriptExecutionType);
ScriptExecutionCallback::CompletionCallback completion_callback;
args->GetNext(&completion_callback);
std::vector<blink::WebScriptSource> sources;
sources.reserve(scripts.size());
for (const auto& script : scripts) {
std::u16string code;
std::u16string url;
int start_line = 1;
script.Get("url", &url);
script.Get("startLine", &start_line);
if (!script.Get("code", &code)) {
const char error_message[] = "Invalid 'code'";
if (!completion_callback.is_null()) {
std::move(completion_callback)
.Run(v8::Undefined(isolate),
v8::Exception::Error(
v8::String::NewFromUtf8(isolate, error_message)
.ToLocalChecked()));
}
promise.RejectWithErrorMessage(error_message);
return handle;
}
sources.emplace_back(blink::WebString::FromUTF16(code),
blink::WebURL(GURL(url)), start_line);
}
render_frame->GetWebFrame()->RequestExecuteScript(
world_id, base::make_span(sources), has_user_gesture,
scriptExecutionType,
new ScriptExecutionCallback(std::move(promise),
std::move(completion_callback)),
blink::BackForwardCacheAware::kPossiblyDisallow);
return handle;
}
| 0
|
255,779
|
pipe_iov_copy_to_user(struct iovec *iov, const void *from, unsigned long len,
int atomic)
{
unsigned long copy;
while (len > 0) {
while (!iov->iov_len)
iov++;
copy = min_t(unsigned long, len, iov->iov_len);
if (atomic) {
if (__copy_to_user_inatomic(iov->iov_base, from, copy))
return -EFAULT;
} else {
if (copy_to_user(iov->iov_base, from, copy))
return -EFAULT;
}
from += copy;
len -= copy;
iov->iov_base += copy;
iov->iov_len -= copy;
}
return 0;
}
| 1
|
154,390
|
void CLASS sinar_4shot_load_raw()
{
ushort *pixel;
unsigned shot, row, col, r, c;
if (raw_image)
{
shot = LIM(shot_select, 1, 4) - 1;
fseek(ifp, data_offset + shot * 4, SEEK_SET);
fseek(ifp, get4(), SEEK_SET);
unpacked_load_raw();
return;
}
pixel = (ushort *)calloc(raw_width, sizeof *pixel);
merror(pixel, "sinar_4shot_load_raw()");
#ifdef LIBRAW_LIBRARY_BUILD
try
{
#endif
for (shot = 0; shot < 4; shot++)
{
#ifdef LIBRAW_LIBRARY_BUILD
checkCancel();
#endif
fseek(ifp, data_offset + shot * 4, SEEK_SET);
fseek(ifp, get4(), SEEK_SET);
for (row = 0; row < raw_height; row++)
{
read_shorts(pixel, raw_width);
if ((r = row - top_margin - (shot >> 1 & 1)) >= height)
continue;
for (col = 0; col < raw_width; col++)
{
if ((c = col - left_margin - (shot & 1)) >= width)
continue;
image[r * width + c][(row & 1) * 3 ^ (~col & 1)] = pixel[col];
}
}
}
#ifdef LIBRAW_LIBRARY_BUILD
}
catch (...)
{
free(pixel);
throw;
}
#endif
free(pixel);
mix_green = 1;
}
| 0
|
329,204
|
static uint32_t nvic_readl(NVICState *s, uint32_t offset, MemTxAttrs attrs)
{
ARMCPU *cpu = s->cpu;
uint32_t val;
switch (offset) {
case 4: /* Interrupt Control Type. */
return ((s->num_irq - NVIC_FIRST_IRQ) / 32) - 1;
case 0x380 ... 0x3bf: /* NVIC_ITNS<n> */
{
int startvec = 32 * (offset - 0x380) + NVIC_FIRST_IRQ;
int i;
if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) {
goto bad_offset;
}
if (!attrs.secure) {
return 0;
}
val = 0;
for (i = 0; i < 32 && startvec + i < s->num_irq; i++) {
if (s->itns[startvec + i]) {
val |= (1 << i);
}
}
return val;
}
case 0xd00: /* CPUID Base. */
return cpu->midr;
case 0xd04: /* Interrupt Control State (ICSR) */
/* VECTACTIVE */
val = cpu->env.v7m.exception;
/* VECTPENDING */
val |= (s->vectpending & 0xff) << 12;
/* ISRPENDING - set if any external IRQ is pending */
if (nvic_isrpending(s)) {
val |= (1 << 22);
}
/* RETTOBASE - set if only one handler is active */
if (nvic_rettobase(s)) {
val |= (1 << 11);
}
if (attrs.secure) {
/* PENDSTSET */
if (s->sec_vectors[ARMV7M_EXCP_SYSTICK].pending) {
val |= (1 << 26);
}
/* PENDSVSET */
if (s->sec_vectors[ARMV7M_EXCP_PENDSV].pending) {
val |= (1 << 28);
}
} else {
/* PENDSTSET */
if (s->vectors[ARMV7M_EXCP_SYSTICK].pending) {
val |= (1 << 26);
}
/* PENDSVSET */
if (s->vectors[ARMV7M_EXCP_PENDSV].pending) {
val |= (1 << 28);
}
}
/* NMIPENDSET */
if ((cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK) &&
s->vectors[ARMV7M_EXCP_NMI].pending) {
val |= (1 << 31);
}
/* ISRPREEMPT: RES0 when halting debug not implemented */
/* STTNS: RES0 for the Main Extension */
return val;
case 0xd08: /* Vector Table Offset. */
return cpu->env.v7m.vecbase[attrs.secure];
case 0xd0c: /* Application Interrupt/Reset Control (AIRCR) */
val = 0xfa050000 | (s->prigroup[attrs.secure] << 8);
if (attrs.secure) {
/* s->aircr stores PRIS, BFHFNMINS, SYSRESETREQS */
val |= cpu->env.v7m.aircr;
} else {
if (arm_feature(&cpu->env, ARM_FEATURE_V8)) {
/* BFHFNMINS is R/O from NS; other bits are RAZ/WI. If
* security isn't supported then BFHFNMINS is RAO (and
* the bit in env.v7m.aircr is always set).
*/
val |= cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK;
}
}
return val;
case 0xd10: /* System Control. */
/* TODO: Implement SLEEPONEXIT. */
return 0;
case 0xd14: /* Configuration Control. */
/* The BFHFNMIGN bit is the only non-banked bit; we
* keep it in the non-secure copy of the register.
*/
val = cpu->env.v7m.ccr[attrs.secure];
val |= cpu->env.v7m.ccr[M_REG_NS] & R_V7M_CCR_BFHFNMIGN_MASK;
return val;
case 0xd24: /* System Handler Control and State (SHCSR) */
val = 0;
if (attrs.secure) {
if (s->sec_vectors[ARMV7M_EXCP_MEM].active) {
val |= (1 << 0);
}
if (s->sec_vectors[ARMV7M_EXCP_HARD].active) {
val |= (1 << 2);
}
if (s->sec_vectors[ARMV7M_EXCP_USAGE].active) {
val |= (1 << 3);
}
if (s->sec_vectors[ARMV7M_EXCP_SVC].active) {
val |= (1 << 7);
}
if (s->sec_vectors[ARMV7M_EXCP_PENDSV].active) {
val |= (1 << 10);
}
if (s->sec_vectors[ARMV7M_EXCP_SYSTICK].active) {
val |= (1 << 11);
}
if (s->sec_vectors[ARMV7M_EXCP_USAGE].pending) {
val |= (1 << 12);
}
if (s->sec_vectors[ARMV7M_EXCP_MEM].pending) {
val |= (1 << 13);
}
if (s->sec_vectors[ARMV7M_EXCP_SVC].pending) {
val |= (1 << 15);
}
if (s->sec_vectors[ARMV7M_EXCP_MEM].enabled) {
val |= (1 << 16);
}
if (s->sec_vectors[ARMV7M_EXCP_USAGE].enabled) {
val |= (1 << 18);
}
if (s->sec_vectors[ARMV7M_EXCP_HARD].pending) {
val |= (1 << 21);
}
/* SecureFault is not banked but is always RAZ/WI to NS */
if (s->vectors[ARMV7M_EXCP_SECURE].active) {
val |= (1 << 4);
}
if (s->vectors[ARMV7M_EXCP_SECURE].enabled) {
val |= (1 << 19);
}
if (s->vectors[ARMV7M_EXCP_SECURE].pending) {
val |= (1 << 20);
}
} else {
if (s->vectors[ARMV7M_EXCP_MEM].active) {
val |= (1 << 0);
}
if (arm_feature(&cpu->env, ARM_FEATURE_V8)) {
/* HARDFAULTACT, HARDFAULTPENDED not present in v7M */
if (s->vectors[ARMV7M_EXCP_HARD].active) {
val |= (1 << 2);
}
if (s->vectors[ARMV7M_EXCP_HARD].pending) {
val |= (1 << 21);
}
}
if (s->vectors[ARMV7M_EXCP_USAGE].active) {
val |= (1 << 3);
}
if (s->vectors[ARMV7M_EXCP_SVC].active) {
val |= (1 << 7);
}
if (s->vectors[ARMV7M_EXCP_PENDSV].active) {
val |= (1 << 10);
}
if (s->vectors[ARMV7M_EXCP_SYSTICK].active) {
val |= (1 << 11);
}
if (s->vectors[ARMV7M_EXCP_USAGE].pending) {
val |= (1 << 12);
}
if (s->vectors[ARMV7M_EXCP_MEM].pending) {
val |= (1 << 13);
}
if (s->vectors[ARMV7M_EXCP_SVC].pending) {
val |= (1 << 15);
}
if (s->vectors[ARMV7M_EXCP_MEM].enabled) {
val |= (1 << 16);
}
if (s->vectors[ARMV7M_EXCP_USAGE].enabled) {
val |= (1 << 18);
}
}
if (attrs.secure || (cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK)) {
if (s->vectors[ARMV7M_EXCP_BUS].active) {
val |= (1 << 1);
}
if (s->vectors[ARMV7M_EXCP_BUS].pending) {
val |= (1 << 14);
}
if (s->vectors[ARMV7M_EXCP_BUS].enabled) {
val |= (1 << 17);
}
if (arm_feature(&cpu->env, ARM_FEATURE_V8) &&
s->vectors[ARMV7M_EXCP_NMI].active) {
/* NMIACT is not present in v7M */
val |= (1 << 5);
}
}
/* TODO: this is RAZ/WI from NS if DEMCR.SDME is set */
if (s->vectors[ARMV7M_EXCP_DEBUG].active) {
val |= (1 << 8);
}
return val;
case 0xd28: /* Configurable Fault Status. */
/* The BFSR bits [15:8] are shared between security states
* and we store them in the NS copy
*/
val = cpu->env.v7m.cfsr[attrs.secure];
val |= cpu->env.v7m.cfsr[M_REG_NS] & R_V7M_CFSR_BFSR_MASK;
return val;
case 0xd2c: /* Hard Fault Status. */
return cpu->env.v7m.hfsr;
case 0xd30: /* Debug Fault Status. */
return cpu->env.v7m.dfsr;
case 0xd34: /* MMFAR MemManage Fault Address */
return cpu->env.v7m.mmfar[attrs.secure];
case 0xd38: /* Bus Fault Address. */
return cpu->env.v7m.bfar;
case 0xd3c: /* Aux Fault Status. */
/* TODO: Implement fault status registers. */
qemu_log_mask(LOG_UNIMP,
"Aux Fault status registers unimplemented\n");
return 0;
case 0xd40: /* PFR0. */
return 0x00000030;
case 0xd44: /* PRF1. */
return 0x00000200;
case 0xd48: /* DFR0. */
return 0x00100000;
case 0xd4c: /* AFR0. */
return 0x00000000;
case 0xd50: /* MMFR0. */
return 0x00000030;
case 0xd54: /* MMFR1. */
return 0x00000000;
case 0xd58: /* MMFR2. */
return 0x00000000;
case 0xd5c: /* MMFR3. */
return 0x00000000;
case 0xd60: /* ISAR0. */
return 0x01141110;
case 0xd64: /* ISAR1. */
return 0x02111000;
case 0xd68: /* ISAR2. */
return 0x21112231;
case 0xd6c: /* ISAR3. */
return 0x01111110;
case 0xd70: /* ISAR4. */
return 0x01310102;
/* TODO: Implement debug registers. */
case 0xd90: /* MPU_TYPE */
/* Unified MPU; if the MPU is not present this value is zero */
return cpu->pmsav7_dregion << 8;
break;
case 0xd94: /* MPU_CTRL */
return cpu->env.v7m.mpu_ctrl[attrs.secure];
case 0xd98: /* MPU_RNR */
return cpu->env.pmsav7.rnr[attrs.secure];
case 0xd9c: /* MPU_RBAR */
case 0xda4: /* MPU_RBAR_A1 */
case 0xdac: /* MPU_RBAR_A2 */
case 0xdb4: /* MPU_RBAR_A3 */
{
int region = cpu->env.pmsav7.rnr[attrs.secure];
if (arm_feature(&cpu->env, ARM_FEATURE_V8)) {
/* PMSAv8M handling of the aliases is different from v7M:
* aliases A1, A2, A3 override the low two bits of the region
* number in MPU_RNR, and there is no 'region' field in the
* RBAR register.
*/
int aliasno = (offset - 0xd9c) / 8; /* 0..3 */
if (aliasno) {
region = deposit32(region, 0, 2, aliasno);
}
if (region >= cpu->pmsav7_dregion) {
return 0;
}
return cpu->env.pmsav8.rbar[attrs.secure][region];
}
if (region >= cpu->pmsav7_dregion) {
return 0;
}
return (cpu->env.pmsav7.drbar[region] & 0x1f) | (region & 0xf);
}
case 0xda0: /* MPU_RASR (v7M), MPU_RLAR (v8M) */
case 0xda8: /* MPU_RASR_A1 (v7M), MPU_RLAR_A1 (v8M) */
case 0xdb0: /* MPU_RASR_A2 (v7M), MPU_RLAR_A2 (v8M) */
case 0xdb8: /* MPU_RASR_A3 (v7M), MPU_RLAR_A3 (v8M) */
{
int region = cpu->env.pmsav7.rnr[attrs.secure];
if (arm_feature(&cpu->env, ARM_FEATURE_V8)) {
/* PMSAv8M handling of the aliases is different from v7M:
* aliases A1, A2, A3 override the low two bits of the region
* number in MPU_RNR.
*/
int aliasno = (offset - 0xda0) / 8; /* 0..3 */
if (aliasno) {
region = deposit32(region, 0, 2, aliasno);
}
if (region >= cpu->pmsav7_dregion) {
return 0;
}
return cpu->env.pmsav8.rlar[attrs.secure][region];
}
if (region >= cpu->pmsav7_dregion) {
return 0;
}
return ((cpu->env.pmsav7.dracr[region] & 0xffff) << 16) |
(cpu->env.pmsav7.drsr[region] & 0xffff);
}
case 0xdc0: /* MPU_MAIR0 */
if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) {
goto bad_offset;
}
return cpu->env.pmsav8.mair0[attrs.secure];
case 0xdc4: /* MPU_MAIR1 */
if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) {
goto bad_offset;
}
return cpu->env.pmsav8.mair1[attrs.secure];
case 0xdd0: /* SAU_CTRL */
if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) {
goto bad_offset;
}
if (!attrs.secure) {
return 0;
}
return cpu->env.sau.ctrl;
case 0xdd4: /* SAU_TYPE */
if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) {
goto bad_offset;
}
if (!attrs.secure) {
return 0;
}
return cpu->sau_sregion;
case 0xdd8: /* SAU_RNR */
if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) {
goto bad_offset;
}
if (!attrs.secure) {
return 0;
}
return cpu->env.sau.rnr;
case 0xddc: /* SAU_RBAR */
{
int region = cpu->env.sau.rnr;
if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) {
goto bad_offset;
}
if (!attrs.secure) {
return 0;
}
if (region >= cpu->sau_sregion) {
return 0;
}
return cpu->env.sau.rbar[region];
}
case 0xde0: /* SAU_RLAR */
{
int region = cpu->env.sau.rnr;
if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) {
goto bad_offset;
}
if (!attrs.secure) {
return 0;
}
if (region >= cpu->sau_sregion) {
return 0;
}
return cpu->env.sau.rlar[region];
}
case 0xde4: /* SFSR */
if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) {
goto bad_offset;
}
if (!attrs.secure) {
return 0;
}
return cpu->env.v7m.sfsr;
case 0xde8: /* SFAR */
if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) {
goto bad_offset;
}
if (!attrs.secure) {
return 0;
}
return cpu->env.v7m.sfar;
default:
bad_offset:
qemu_log_mask(LOG_GUEST_ERROR, "NVIC: Bad read offset 0x%x\n", offset);
return 0;
}
}
| 1
|
62,038
|
static BOOL autodetect_recv_bandwidth_measure_results(rdpRdp* rdp, wStream* s,
AUTODETECT_RSP_PDU* autodetectRspPdu)
{
BOOL success = TRUE;
if (autodetectRspPdu->headerLength != 0x0E)
return FALSE;
WLog_VRB(AUTODETECT_TAG, "received Bandwidth Measure Results PDU");
if (Stream_GetRemainingLength(s) < 8)
return -1;
Stream_Read_UINT32(s, rdp->autodetect->bandwidthMeasureTimeDelta); /* timeDelta (4 bytes) */
Stream_Read_UINT32(s, rdp->autodetect->bandwidthMeasureByteCount); /* byteCount (4 bytes) */
if (rdp->autodetect->bandwidthMeasureTimeDelta > 0)
rdp->autodetect->netCharBandwidth = rdp->autodetect->bandwidthMeasureByteCount * 8 /
rdp->autodetect->bandwidthMeasureTimeDelta;
else
rdp->autodetect->netCharBandwidth = 0;
IFCALLRET(rdp->autodetect->BandwidthMeasureResults, success, rdp->context,
autodetectRspPdu->sequenceNumber);
return success;
}
| 0
|
53,776
|
pj_status_t pj_ssl_sock_ossl_test_send_buf(pj_pool_t *pool)
{
enum { MAX_CHUNK_NUM = 20 };
unsigned chunk_size, chunk_cnt, i;
write_data_t *wdata[MAX_CHUNK_NUM] = {0};
pj_time_val now;
pj_ssl_sock_t *ssock = NULL;
pj_ssl_sock_param param;
pj_status_t status;
pj_gettimeofday(&now);
pj_srand((unsigned)now.sec);
pj_ssl_sock_param_default(¶m);
status = pj_ssl_sock_create(pool, ¶m, &ssock);
if (status != PJ_SUCCESS) {
return status;
}
if (ssock->send_buf.max_len == 0) {
ssock->send_buf.buf = (char*)
pj_pool_alloc(ssock->pool,
ssock->param.send_buffer_size);
ssock->send_buf.max_len = ssock->param.send_buffer_size;
ssock->send_buf.start = ssock->send_buf.buf;
ssock->send_buf.len = 0;
}
chunk_size = ssock->param.send_buffer_size / MAX_CHUNK_NUM / 2;
chunk_cnt = 0;
for (i = 0; i < MAX_CHUNK_NUM; i++) {
wdata[i] = alloc_send_data(ssock, pj_rand() % chunk_size + 321);
if (wdata[i])
chunk_cnt++;
else
break;
}
while (chunk_cnt) {
i = pj_rand() % MAX_CHUNK_NUM;
if (wdata[i]) {
free_send_data(ssock, wdata[i]);
wdata[i] = NULL;
chunk_cnt--;
}
}
if (ssock->send_buf.len != 0)
status = PJ_EBUG;
pj_ssl_sock_close(ssock);
return status;
}
| 0
|
291,884
|
CJSON_PUBLIC(const char *) cJSON_GetErrorPtr(void)
{
return (const char*) (global_error.json + global_error.position);
}
| 0
|
33,583
|
xml2_error_hdr(void *arg, const char *msg, xmlParserSeverities severity,
xmlTextReaderLocatorPtr locator)
{
struct archive_read *a;
(void)locator; /* UNUSED */
a = (struct archive_read *)arg;
switch (severity) {
case XML_PARSER_SEVERITY_VALIDITY_WARNING:
case XML_PARSER_SEVERITY_WARNING:
archive_set_error(&a->archive, ARCHIVE_ERRNO_MISC,
"XML Parsing error: %s", msg);
break;
case XML_PARSER_SEVERITY_VALIDITY_ERROR:
case XML_PARSER_SEVERITY_ERROR:
archive_set_error(&a->archive, ARCHIVE_ERRNO_MISC,
"XML Parsing error: %s", msg);
break;
}
}
| 0
|
286,167
|
int Reverb_getParameter(ReverbContext *pContext,
void *pParam,
uint32_t *pValueSize,
void *pValue){
int status = 0;
int32_t *pParamTemp = (int32_t *)pParam;
int32_t param = *pParamTemp++;
char *name;
t_reverb_settings *pProperties;
if (pContext->preset) {
if (param != REVERB_PARAM_PRESET || *pValueSize < sizeof(uint16_t)) {
return -EINVAL;
}
*(uint16_t *)pValue = pContext->nextPreset;
ALOGV("get REVERB_PARAM_PRESET, preset %d", pContext->nextPreset);
return 0;
}
switch (param){
case REVERB_PARAM_ROOM_LEVEL:
if (*pValueSize != sizeof(int16_t)){
ALOGV("\tLVM_ERROR : Reverb_getParameter() invalid pValueSize1 %d", *pValueSize);
return -EINVAL;
}
*pValueSize = sizeof(int16_t);
break;
case REVERB_PARAM_ROOM_HF_LEVEL:
if (*pValueSize != sizeof(int16_t)){
ALOGV("\tLVM_ERROR : Reverb_getParameter() invalid pValueSize12 %d", *pValueSize);
return -EINVAL;
}
*pValueSize = sizeof(int16_t);
break;
case REVERB_PARAM_DECAY_TIME:
if (*pValueSize != sizeof(uint32_t)){
ALOGV("\tLVM_ERROR : Reverb_getParameter() invalid pValueSize3 %d", *pValueSize);
return -EINVAL;
}
*pValueSize = sizeof(uint32_t);
break;
case REVERB_PARAM_DECAY_HF_RATIO:
if (*pValueSize != sizeof(int16_t)){
ALOGV("\tLVM_ERROR : Reverb_getParameter() invalid pValueSize4 %d", *pValueSize);
return -EINVAL;
}
*pValueSize = sizeof(int16_t);
break;
case REVERB_PARAM_REFLECTIONS_LEVEL:
if (*pValueSize != sizeof(int16_t)){
ALOGV("\tLVM_ERROR : Reverb_getParameter() invalid pValueSize5 %d", *pValueSize);
return -EINVAL;
}
*pValueSize = sizeof(int16_t);
break;
case REVERB_PARAM_REFLECTIONS_DELAY:
if (*pValueSize != sizeof(uint32_t)){
ALOGV("\tLVM_ERROR : Reverb_getParameter() invalid pValueSize6 %d", *pValueSize);
return -EINVAL;
}
*pValueSize = sizeof(uint32_t);
break;
case REVERB_PARAM_REVERB_LEVEL:
if (*pValueSize != sizeof(int16_t)){
ALOGV("\tLVM_ERROR : Reverb_getParameter() invalid pValueSize7 %d", *pValueSize);
return -EINVAL;
}
*pValueSize = sizeof(int16_t);
break;
case REVERB_PARAM_REVERB_DELAY:
if (*pValueSize != sizeof(uint32_t)){
ALOGV("\tLVM_ERROR : Reverb_getParameter() invalid pValueSize8 %d", *pValueSize);
return -EINVAL;
}
*pValueSize = sizeof(uint32_t);
break;
case REVERB_PARAM_DIFFUSION:
if (*pValueSize != sizeof(int16_t)){
ALOGV("\tLVM_ERROR : Reverb_getParameter() invalid pValueSize9 %d", *pValueSize);
return -EINVAL;
}
*pValueSize = sizeof(int16_t);
break;
case REVERB_PARAM_DENSITY:
if (*pValueSize != sizeof(int16_t)){
ALOGV("\tLVM_ERROR : Reverb_getParameter() invalid pValueSize10 %d", *pValueSize);
return -EINVAL;
}
*pValueSize = sizeof(int16_t);
break;
case REVERB_PARAM_PROPERTIES:
if (*pValueSize != sizeof(t_reverb_settings)){
ALOGV("\tLVM_ERROR : Reverb_getParameter() invalid pValueSize11 %d", *pValueSize);
return -EINVAL;
}
*pValueSize = sizeof(t_reverb_settings);
break;
default:
ALOGV("\tLVM_ERROR : Reverb_getParameter() invalid param %d", param);
return -EINVAL;
}
pProperties = (t_reverb_settings *) pValue;
switch (param){
case REVERB_PARAM_PROPERTIES:
pProperties->roomLevel = ReverbGetRoomLevel(pContext);
pProperties->roomHFLevel = ReverbGetRoomHfLevel(pContext);
pProperties->decayTime = ReverbGetDecayTime(pContext);
pProperties->decayHFRatio = ReverbGetDecayHfRatio(pContext);
pProperties->reflectionsLevel = 0;
pProperties->reflectionsDelay = 0;
pProperties->reverbDelay = 0;
pProperties->reverbLevel = ReverbGetReverbLevel(pContext);
pProperties->diffusion = ReverbGetDiffusion(pContext);
pProperties->density = ReverbGetDensity(pContext);
ALOGV("\tReverb_getParameter() REVERB_PARAM_PROPERTIES Value is roomLevel %d",
pProperties->roomLevel);
ALOGV("\tReverb_getParameter() REVERB_PARAM_PROPERTIES Value is roomHFLevel %d",
pProperties->roomHFLevel);
ALOGV("\tReverb_getParameter() REVERB_PARAM_PROPERTIES Value is decayTime %d",
pProperties->decayTime);
ALOGV("\tReverb_getParameter() REVERB_PARAM_PROPERTIES Value is decayHFRatio %d",
pProperties->decayHFRatio);
ALOGV("\tReverb_getParameter() REVERB_PARAM_PROPERTIES Value is reflectionsLevel %d",
pProperties->reflectionsLevel);
ALOGV("\tReverb_getParameter() REVERB_PARAM_PROPERTIES Value is reflectionsDelay %d",
pProperties->reflectionsDelay);
ALOGV("\tReverb_getParameter() REVERB_PARAM_PROPERTIES Value is reverbDelay %d",
pProperties->reverbDelay);
ALOGV("\tReverb_getParameter() REVERB_PARAM_PROPERTIES Value is reverbLevel %d",
pProperties->reverbLevel);
ALOGV("\tReverb_getParameter() REVERB_PARAM_PROPERTIES Value is diffusion %d",
pProperties->diffusion);
ALOGV("\tReverb_getParameter() REVERB_PARAM_PROPERTIES Value is density %d",
pProperties->density);
break;
case REVERB_PARAM_ROOM_LEVEL:
*(int16_t *)pValue = ReverbGetRoomLevel(pContext);
break;
case REVERB_PARAM_ROOM_HF_LEVEL:
*(int16_t *)pValue = ReverbGetRoomHfLevel(pContext);
break;
case REVERB_PARAM_DECAY_TIME:
*(uint32_t *)pValue = ReverbGetDecayTime(pContext);
break;
case REVERB_PARAM_DECAY_HF_RATIO:
*(int16_t *)pValue = ReverbGetDecayHfRatio(pContext);
break;
case REVERB_PARAM_REVERB_LEVEL:
*(int16_t *)pValue = ReverbGetReverbLevel(pContext);
break;
case REVERB_PARAM_DIFFUSION:
*(int16_t *)pValue = ReverbGetDiffusion(pContext);
break;
case REVERB_PARAM_DENSITY:
*(uint16_t *)pValue = 0;
*(int16_t *)pValue = ReverbGetDensity(pContext);
break;
case REVERB_PARAM_REFLECTIONS_LEVEL:
*(uint16_t *)pValue = 0;
case REVERB_PARAM_REFLECTIONS_DELAY:
*(uint32_t *)pValue = 0;
case REVERB_PARAM_REVERB_DELAY:
*(uint32_t *)pValue = 0;
break;
default:
ALOGV("\tLVM_ERROR : Reverb_getParameter() invalid param %d", param);
status = -EINVAL;
break;
}
return status;
} /* end Reverb_getParameter */
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.