idx
int64 | func
string | target
int64 |
|---|---|---|
237,420
|
void RenderProcessImpl::FreeTransportDIB(TransportDIB* dib) {
if (!dib)
return;
#if defined(OS_MACOSX)
IPC::Message* msg = new ViewHostMsg_FreeTransportDIB(dib->id());
main_thread()->Send(msg);
#endif
delete dib;
}
| 0
|
426,211
|
flatpak_run_get_minimal_env (gboolean devel)
{
GPtrArray *env_array;
static const char * const copy[] = {
"PWD",
"GDMSESSION",
"XDG_CURRENT_DESKTOP",
"XDG_SESSION_DESKTOP",
"DESKTOP_SESSION",
"EMAIL_ADDRESS",
"HOME",
"HOSTNAME",
"LOGNAME",
"REAL_NAME",
"TERM",
"USER",
"USERNAME",
};
static const char * const copy_nodevel[] = {
"LANG",
"LANGUAGE",
"LC_ALL",
"LC_ADDRESS",
"LC_COLLATE",
"LC_CTYPE",
"LC_IDENTIFICATION",
"LC_MEASUREMENT",
"LC_MESSAGES",
"LC_MONETARY",
"LC_NAME",
"LC_NUMERIC",
"LC_PAPER",
"LC_TELEPHONE",
"LC_TIME",
};
int i;
env_array = g_ptr_array_new_with_free_func (g_free);
for (i = 0; i < G_N_ELEMENTS (default_exports); i++)
g_ptr_array_add (env_array, g_strdup_printf ("%s=%s", default_exports[i].env, default_exports[i].val));
if (devel)
{
for (i = 0; i < G_N_ELEMENTS(devel_exports); i++)
g_ptr_array_add (env_array, g_strdup_printf ("%s=%s", devel_exports[i].env, devel_exports[i].val));
}
for (i = 0; i < G_N_ELEMENTS (copy); i++)
{
const char *current = g_getenv (copy[i]);
if (current)
g_ptr_array_add (env_array, g_strdup_printf ("%s=%s", copy[i], current));
}
if (!devel)
{
for (i = 0; i < G_N_ELEMENTS (copy_nodevel); i++)
{
const char *current = g_getenv (copy_nodevel[i]);
if (current)
g_ptr_array_add (env_array, g_strdup_printf ("%s=%s", copy_nodevel[i], current));
}
}
g_ptr_array_add (env_array, NULL);
return (char **) g_ptr_array_free (env_array, FALSE);
}
| 0
|
395,921
|
gnutls_mac_algorithm_t gnutls_mac_get(gnutls_session_t session)
{
record_parameters_st *record_params;
int ret;
ret =
_gnutls_epoch_get(session, EPOCH_READ_CURRENT, &record_params);
if (ret < 0)
return gnutls_assert_val(GNUTLS_MAC_NULL);
return record_params->mac->id;
}
| 0
|
466,327
|
TEST_F(ConnectionManagerUtilityTest, SkipMutateResponseHeadersReturnXRequestId) {
TestResponseHeaderMapImpl response_headers;
TestRequestHeaderMapImpl request_headers{{"x-request-id", "request-id"}};
EXPECT_CALL(*request_id_extension_,
setInResponse(testing::Ref(response_headers), testing::Ref(request_headers)))
.Times(0);
ConnectionManagerUtility::mutateResponseHeaders(response_headers, &request_headers, config_, "");
EXPECT_EQ("", response_headers.get_("x-request-id"));
}
| 0
|
98,463
|
void ConnectionManagerImpl::onEvent(Network::ConnectionEvent event) {
if (event == Network::ConnectionEvent::LocalClose) {
stats_.named_.downstream_cx_destroy_local_.inc();
}
if (event == Network::ConnectionEvent::RemoteClose ||
event == Network::ConnectionEvent::LocalClose) {
if (event == Network::ConnectionEvent::RemoteClose) {
remote_close_ = true;
stats_.named_.downstream_cx_destroy_remote_.inc();
}
absl::string_view details =
event == Network::ConnectionEvent::RemoteClose
? StreamInfo::ResponseCodeDetails::get().DownstreamRemoteDisconnect
: StreamInfo::ResponseCodeDetails::get().DownstreamLocalDisconnect;
// TODO(mattklein123): It is technically possible that something outside of the filter causes
// a local connection close, so we still guard against that here. A better solution would be to
// have some type of "pre-close" callback that we could hook for cleanup that would get called
// regardless of where local close is invoked from.
// NOTE: that this will cause doConnectionClose() to get called twice in the common local close
// cases, but the method protects against that.
// NOTE: In the case where a local close comes from outside the filter, this will cause any
// stream closures to increment remote close stats. We should do better here in the future,
// via the pre-close callback mentioned above.
doConnectionClose(absl::nullopt, absl::nullopt, details);
}
}
| 0
|
100,475
|
void gf_isom_sample_entry_predestroy(GF_SampleEntryBox *ptr)
{
}
| 0
|
158,224
|
void* formal_count_address() { return &thread_local_top_.formal_count_; }
| 0
|
105,652
|
int generic_update_time(struct inode *inode, struct timespec64 *time, int flags)
{
int iflags = I_DIRTY_TIME;
bool dirty = false;
if (flags & S_ATIME)
inode->i_atime = *time;
if (flags & S_VERSION)
dirty = inode_maybe_inc_iversion(inode, false);
if (flags & S_CTIME)
inode->i_ctime = *time;
if (flags & S_MTIME)
inode->i_mtime = *time;
if ((flags & (S_ATIME | S_CTIME | S_MTIME)) &&
!(inode->i_sb->s_flags & SB_LAZYTIME))
dirty = true;
if (dirty)
iflags |= I_DIRTY_SYNC;
__mark_inode_dirty(inode, iflags);
return 0;
}
| 0
|
128,813
|
set_cmdspos_cursor(void)
{
int i, m, c;
set_cmdspos();
if (KeyTyped)
{
m = Columns * Rows;
if (m < 0) // overflow, Columns or Rows at weird value
m = MAXCOL;
}
else
m = MAXCOL;
for (i = 0; i < ccline.cmdlen && i < ccline.cmdpos; ++i)
{
c = cmdline_charsize(i);
// Count ">" for double-wide multi-byte char that doesn't fit.
if (has_mbyte)
correct_cmdspos(i, c);
// If the cmdline doesn't fit, show cursor on last visible char.
// Don't move the cursor itself, so we can still append.
if ((ccline.cmdspos += c) >= m)
{
ccline.cmdspos -= c;
break;
}
if (has_mbyte)
i += (*mb_ptr2len)(ccline.cmdbuff + i) - 1;
}
}
| 0
|
292,304
|
static void create_src_dst(RAnalOp *op) {
op->src[0] = r_anal_value_new ();
op->src[1] = r_anal_value_new ();
op->src[2] = r_anal_value_new ();
op->dst = r_anal_value_new ();
}
| 0
|
330,361
|
static void win32_rearm_timer(struct qemu_alarm_timer *t)
{
struct qemu_alarm_win32 *data = t->priv;
uint64_t nearest_delta_us;
if (!active_timers[QEMU_TIMER_REALTIME] &&
!active_timers[QEMU_TIMER_VIRTUAL])
return;
nearest_delta_us = qemu_next_deadline_dyntick();
nearest_delta_us /= 1000;
timeKillEvent(data->timerId);
data->timerId = timeSetEvent(1,
data->period,
host_alarm_handler,
(DWORD)t,
TIME_ONESHOT | TIME_PERIODIC);
if (!data->timerId) {
fprintf(stderr, "Failed to re-arm win32 alarm timer %ld\n",
GetLastError());
timeEndPeriod(data->period);
exit(1);
}
}
| 0
|
294,990
|
static int query_raw_packet_qp_state(struct mlx5_ib_dev *dev,
struct mlx5_ib_qp *qp,
u8 *raw_packet_qp_state)
{
struct mlx5_ib_raw_packet_qp *raw_packet_qp = &qp->raw_packet_qp;
struct mlx5_ib_sq *sq = &raw_packet_qp->sq;
struct mlx5_ib_rq *rq = &raw_packet_qp->rq;
int err;
u8 sq_state = MLX5_SQ_STATE_NA;
u8 rq_state = MLX5_RQ_STATE_NA;
if (qp->sq.wqe_cnt) {
err = query_raw_packet_qp_sq_state(dev, sq, &sq_state);
if (err)
return err;
}
if (qp->rq.wqe_cnt) {
err = query_raw_packet_qp_rq_state(dev, rq, &rq_state);
if (err)
return err;
}
return sqrq_state_to_qp_state(sq_state, rq_state, qp,
raw_packet_qp_state);
}
| 0
|
447,418
|
char* RemoveCR(const char* txt)
{
static char Buffer[2048];
char* pt;
strncpy(Buffer, txt, 2047);
Buffer[2047] = 0;
for (pt = Buffer; *pt; pt++)
if (*pt == '\n' || *pt == '\r') *pt = ' ';
return Buffer;
}
| 0
|
145,860
|
handle_update_read(int handle, ssize_t bytes)
{
if (handle_is_ok(handle, HANDLE_FILE) && bytes > 0)
handles[handle].bytes_read += bytes;
}
| 0
|
59,978
|
bool f2fs_should_update_outplace(struct inode *inode, struct f2fs_io_info *fio)
{
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
if (test_opt(sbi, LFS))
return true;
if (S_ISDIR(inode->i_mode))
return true;
if (IS_NOQUOTA(inode))
return true;
if (f2fs_is_atomic_file(inode))
return true;
if (fio) {
if (is_cold_data(fio->page))
return true;
if (IS_ATOMIC_WRITTEN_PAGE(fio->page))
return true;
if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED) &&
f2fs_is_checkpointed_data(sbi, fio->old_blkaddr)))
return true;
}
return false;
}
| 0
|
184,373
|
RendererSchedulerImpl::RendererSchedulerImpl(
std::unique_ptr<TaskQueueManager> task_queue_manager)
: helper_(std::move(task_queue_manager), this),
idle_helper_(
&helper_,
this,
"RendererSchedulerIdlePeriod",
base::TimeDelta(),
helper_.NewTaskQueue(MainThreadTaskQueue::QueueCreationParams(
MainThreadTaskQueue::QueueType::kIdle))),
idle_canceled_delayed_task_sweeper_(&helper_,
idle_helper_.IdleTaskRunner()),
render_widget_scheduler_signals_(this),
control_task_queue_(helper_.ControlMainThreadTaskQueue()),
compositor_task_queue_(
helper_.NewTaskQueue(MainThreadTaskQueue::QueueCreationParams(
MainThreadTaskQueue::QueueType::kCompositor)
.SetShouldMonitorQuiescence(true))),
input_task_queue_(
helper_.NewTaskQueue(MainThreadTaskQueue::QueueCreationParams(
MainThreadTaskQueue::QueueType::kInput)
.SetShouldMonitorQuiescence(true))),
compositor_task_queue_enabled_voter_(
compositor_task_queue_->CreateQueueEnabledVoter()),
input_task_queue_enabled_voter_(
input_task_queue_->CreateQueueEnabledVoter()),
delayed_update_policy_runner_(
base::Bind(&RendererSchedulerImpl::UpdatePolicy,
base::Unretained(this)),
helper_.ControlMainThreadTaskQueue()),
seqlock_queueing_time_estimator_(
QueueingTimeEstimator(this, kQueueingTimeWindowDuration, 20)),
main_thread_only_(this,
compositor_task_queue_,
helper_.GetClock(),
helper_.NowTicks()),
any_thread_(this),
policy_may_need_update_(&any_thread_lock_),
weak_factory_(this) {
task_queue_throttler_.reset(
new TaskQueueThrottler(this, &tracing_controller_));
update_policy_closure_ = base::Bind(&RendererSchedulerImpl::UpdatePolicy,
weak_factory_.GetWeakPtr());
end_renderer_hidden_idle_period_closure_.Reset(base::Bind(
&RendererSchedulerImpl::EndIdlePeriod, weak_factory_.GetWeakPtr()));
task_runners_.insert(
std::make_pair(helper_.DefaultMainThreadTaskQueue(), nullptr));
task_runners_.insert(
std::make_pair(compositor_task_queue_,
compositor_task_queue_->CreateQueueEnabledVoter()));
task_runners_.insert(std::make_pair(
input_task_queue_, input_task_queue_->CreateQueueEnabledVoter()));
default_timer_task_queue_ =
NewTimerTaskQueue(MainThreadTaskQueue::QueueType::kDefaultTimer);
v8_task_queue_ = NewTaskQueue(MainThreadTaskQueue::QueueCreationParams(
MainThreadTaskQueue::QueueType::kV8));
ipc_task_queue_ = NewTaskQueue(MainThreadTaskQueue::QueueCreationParams(
MainThreadTaskQueue::QueueType::kIPC));
TRACE_EVENT_OBJECT_CREATED_WITH_ID(
TRACE_DISABLED_BY_DEFAULT("renderer.scheduler"), "RendererScheduler",
this);
helper_.SetObserver(this);
if (base::ThreadTaskRunnerHandle::IsSet()) {
base::trace_event::TraceLog::GetInstance()->AddAsyncEnabledStateObserver(
weak_factory_.GetWeakPtr());
}
int32_t delay_for_background_tab_stopping_millis;
if (!base::StringToInt(
base::GetFieldTrialParamValue("BackgroundTabStopping",
"DelayForBackgroundTabStoppingMills"),
&delay_for_background_tab_stopping_millis)) {
delay_for_background_tab_stopping_millis =
kDelayForBackgroundTabStoppingMillis;
}
delay_for_background_tab_stopping_ = base::TimeDelta::FromMilliseconds(
delay_for_background_tab_stopping_millis);
internal::ProcessState::Get()->is_process_backgrounded =
main_thread_only().renderer_backgrounded;
}
| 0
|
375,648
|
reached_end_position(XLogRecPtr segendpos, uint32 timeline,
bool segment_finished)
{
if (!has_xlogendptr)
{
#ifndef WIN32
fd_set fds;
struct timeval tv;
int r;
/*
* Don't have the end pointer yet - check our pipe to see if it has
* been sent yet.
*/
FD_ZERO(&fds);
FD_SET(bgpipe[0], &fds);
MemSet(&tv, 0, sizeof(tv));
r = select(bgpipe[0] + 1, &fds, NULL, NULL, &tv);
if (r == 1)
{
char xlogend[64];
uint32 hi,
lo;
MemSet(xlogend, 0, sizeof(xlogend));
r = read(bgpipe[0], xlogend, sizeof(xlogend)-1);
if (r < 0)
{
fprintf(stderr, _("%s: could not read from ready pipe: %s\n"),
progname, strerror(errno));
exit(1);
}
if (sscanf(xlogend, "%X/%X", &hi, &lo) != 2)
{
fprintf(stderr,
_("%s: could not parse transaction log location \"%s\"\n"),
progname, xlogend);
exit(1);
}
xlogendptr = ((uint64) hi) << 32 | lo;
has_xlogendptr = 1;
/*
* Fall through to check if we've reached the point further
* already.
*/
}
else
{
/*
* No data received on the pipe means we don't know the end
* position yet - so just say it's not time to stop yet.
*/
return false;
}
#else
/*
* On win32, has_xlogendptr is set by the main thread, so if it's not
* set here, we just go back and wait until it shows up.
*/
return false;
#endif
}
/*
* At this point we have an end pointer, so compare it to the current
* position to figure out if it's time to stop.
*/
if (segendpos >= xlogendptr)
return true;
/*
* Have end pointer, but haven't reached it yet - so tell the caller to
* keep streaming.
*/
return false;
}
| 0
|
9,668
|
bool InputHandler::shouldRequestSpellCheckingOptionsForPoint(Platform::IntPoint& point, const Element* touchedElement, imf_sp_text_t& spellCheckingOptionRequest)
{
if (!isActiveTextEdit())
return false;
Element* currentFocusElement = m_currentFocusElement.get();
if (!currentFocusElement || !currentFocusElement->isElementNode())
return false;
while (!currentFocusElement->isRootEditableElement()) {
Element* parentElement = currentFocusElement->parentElement();
if (!parentElement)
break;
currentFocusElement = parentElement;
}
if (touchedElement != currentFocusElement)
return false;
LayoutPoint contentPos(m_webPage->mapFromViewportToContents(point));
contentPos = DOMSupport::convertPointToFrame(m_webPage->mainFrame(), m_webPage->focusedOrMainFrame(), roundedIntPoint(contentPos));
Document* document = currentFocusElement->document();
ASSERT(document);
RenderedDocumentMarker* marker = document->markers()->renderedMarkerContainingPoint(contentPos, DocumentMarker::Spelling);
if (!marker)
return false;
m_didSpellCheckWord = true;
spellCheckingOptionRequest.startTextPosition = marker->startOffset();
spellCheckingOptionRequest.endTextPosition = marker->endOffset();
m_spellCheckingOptionsRequest.startTextPosition = 0;
m_spellCheckingOptionsRequest.endTextPosition = 0;
SpellingLog(LogLevelInfo, "InputHandler::shouldRequestSpellCheckingOptionsForPoint Found spelling marker at point %d, %d\nMarker start %d end %d",
point.x(), point.y(), spellCheckingOptionRequest.startTextPosition, spellCheckingOptionRequest.endTextPosition);
return true;
}
| 1
|
56,939
|
TRIO_PUBLIC_STRING trio_long_double_t trio_to_long_double TRIO_ARGS2((source, endp),
TRIO_CONST char* source,
char** endp)
{
#if defined(USE_STRTOLD)
return strtold(source, endp);
#else
int isNegative = FALSE;
int isExponentNegative = FALSE;
trio_long_double_t integer = 0.0;
trio_long_double_t fraction = 0.0;
unsigned long exponent = 0;
trio_long_double_t base;
trio_long_double_t fracdiv = 1.0;
trio_long_double_t value = 0.0;
/* First try hex-floats */
if ((source[0] == '0') && ((source[1] == 'x') || (source[1] == 'X')))
{
base = 16.0;
source += 2;
while (isxdigit((int)*source))
{
integer *= base;
integer += (isdigit((int)*source) ? (*source - '0')
: 10 + (internal_to_upper((int)*source) - 'A'));
source++;
}
if (*source == '.')
{
source++;
while (isxdigit((int)*source))
{
fracdiv /= base;
fraction += fracdiv * (isdigit((int)*source)
? (*source - '0')
: 10 + (internal_to_upper((int)*source) - 'A'));
source++;
}
if ((*source == 'p') || (*source == 'P'))
{
source++;
if ((*source == '+') || (*source == '-'))
{
isExponentNegative = (*source == '-');
source++;
}
while (isdigit((int)*source))
{
exponent *= 10;
exponent += (*source - '0');
source++;
}
}
}
/* For later use with exponent */
base = 2.0;
}
else /* Then try normal decimal floats */
{
base = 10.0;
isNegative = (*source == '-');
/* Skip sign */
if ((*source == '+') || (*source == '-'))
source++;
/* Integer part */
while (isdigit((int)*source))
{
integer *= base;
integer += (*source - '0');
source++;
}
if (*source == '.')
{
source++; /* skip decimal point */
while (isdigit((int)*source))
{
fracdiv /= base;
fraction += (*source - '0') * fracdiv;
source++;
}
}
if ((*source == 'e') || (*source == 'E')
#if TRIO_MICROSOFT
|| (*source == 'd') || (*source == 'D')
#endif
)
{
source++; /* Skip exponential indicator */
isExponentNegative = (*source == '-');
if ((*source == '+') || (*source == '-'))
source++;
while (isdigit((int)*source))
{
exponent *= (int)base;
exponent += (*source - '0');
source++;
}
}
}
value = integer + fraction;
if (exponent != 0)
{
if (isExponentNegative)
value /= trio_powl(base, (trio_long_double_t)exponent);
else
value *= trio_powl(base, (trio_long_double_t)exponent);
}
if (isNegative)
value = -value;
if (endp)
*endp = (char*)source;
return value;
#endif
}
| 0
|
138,343
|
local void *malloc_track(struct mem_track_s *mem, size_t size)
{
void *ptr;
ptr = malloc(size);
if (ptr != NULL) {
size = MALLOC_SIZE(ptr);
mem_track_grab(mem);
mem->num++;
mem->size += size;
if (mem->size > mem->max)
mem->max = mem->size;
mem_track_drop(mem);
}
return ptr;
}
| 0
|
125,485
|
cdf_unpack_header(cdf_header_t *h, char *buf)
{
size_t i;
size_t len = 0;
CDF_UNPACK(h->h_magic);
CDF_UNPACKA(h->h_uuid);
CDF_UNPACK(h->h_revision);
CDF_UNPACK(h->h_version);
CDF_UNPACK(h->h_byte_order);
CDF_UNPACK(h->h_sec_size_p2);
CDF_UNPACK(h->h_short_sec_size_p2);
CDF_UNPACKA(h->h_unused0);
CDF_UNPACK(h->h_num_sectors_in_sat);
CDF_UNPACK(h->h_secid_first_directory);
CDF_UNPACKA(h->h_unused1);
CDF_UNPACK(h->h_min_size_standard_stream);
CDF_UNPACK(h->h_secid_first_sector_in_short_sat);
CDF_UNPACK(h->h_num_sectors_in_short_sat);
CDF_UNPACK(h->h_secid_first_sector_in_master_sat);
CDF_UNPACK(h->h_num_sectors_in_master_sat);
for (i = 0; i < __arraycount(h->h_master_sat); i++)
CDF_UNPACK(h->h_master_sat[i]);
}
| 0
|
318,020
|
error::Error GLES2DecoderImpl::HandleAsyncTexImage2DCHROMIUM(
uint32 immediate_data_size, const cmds::AsyncTexImage2DCHROMIUM& c) {
TRACE_EVENT0("gpu", "GLES2DecoderImpl::HandleAsyncTexImage2DCHROMIUM");
GLenum target = static_cast<GLenum>(c.target);
GLint level = static_cast<GLint>(c.level);
GLenum internal_format = static_cast<GLenum>(c.internalformat);
GLsizei width = static_cast<GLsizei>(c.width);
GLsizei height = static_cast<GLsizei>(c.height);
GLint border = static_cast<GLint>(c.border);
GLenum format = static_cast<GLenum>(c.format);
GLenum type = static_cast<GLenum>(c.type);
uint32 pixels_shm_id = static_cast<uint32>(c.pixels_shm_id);
uint32 pixels_shm_offset = static_cast<uint32>(c.pixels_shm_offset);
uint32 pixels_size;
uint32 async_upload_token = static_cast<uint32>(c.async_upload_token);
uint32 sync_data_shm_id = static_cast<uint32>(c.sync_data_shm_id);
uint32 sync_data_shm_offset = static_cast<uint32>(c.sync_data_shm_offset);
base::ScopedClosureRunner scoped_completion_callback;
if (async_upload_token) {
base::Closure completion_closure =
AsyncUploadTokenCompletionClosure(async_upload_token,
sync_data_shm_id,
sync_data_shm_offset);
if (completion_closure.is_null())
return error::kInvalidArguments;
scoped_completion_callback.Reset(completion_closure);
}
if (!GLES2Util::ComputeImageDataSizes(
width, height, format, type, state_.unpack_alignment, &pixels_size, NULL,
NULL)) {
return error::kOutOfBounds;
}
const void* pixels = NULL;
if (pixels_shm_id != 0 || pixels_shm_offset != 0) {
pixels = GetSharedMemoryAs<const void*>(
pixels_shm_id, pixels_shm_offset, pixels_size);
if (!pixels) {
return error::kOutOfBounds;
}
}
TextureManager::DoTextImage2DArguments args = {
target, level, internal_format, width, height, border, format, type,
pixels, pixels_size};
TextureRef* texture_ref;
if (!texture_manager()->ValidateTexImage2D(
&state_, "glAsyncTexImage2DCHROMIUM", args, &texture_ref)) {
return error::kNoError;
}
Texture* texture = texture_ref->texture();
if (!ValidateAsyncTransfer(
"glAsyncTexImage2DCHROMIUM", texture_ref, target, level, pixels))
return error::kNoError;
if (texture->IsDefined()) {
LOCAL_SET_GL_ERROR(
GL_INVALID_OPERATION,
"glAsyncTexImage2DCHROMIUM", "already defined");
return error::kNoError;
}
if (!EnsureGPUMemoryAvailable(pixels_size)) {
LOCAL_SET_GL_ERROR(
GL_OUT_OF_MEMORY, "glAsyncTexImage2DCHROMIUM", "out of memory");
return error::kNoError;
}
AsyncTexImage2DParams tex_params = {
target, level, static_cast<GLenum>(internal_format),
width, height, border, format, type};
AsyncMemoryParams mem_params(
GetSharedMemoryBuffer(c.pixels_shm_id), c.pixels_shm_offset, pixels_size);
AsyncPixelTransferDelegate* delegate =
async_pixel_transfer_manager_->CreatePixelTransferDelegate(texture_ref,
tex_params);
texture->SetImmutable(true);
delegate->AsyncTexImage2D(
tex_params,
mem_params,
base::Bind(&TextureManager::SetLevelInfoFromParams,
base::Unretained(texture_manager()),
base::Unretained(texture_ref),
tex_params));
return error::kNoError;
}
| 0
|
461,941
|
static int pin_ggtt_status_page(struct intel_engine_cs *engine,
struct i915_vma *vma)
{
unsigned int flags;
if (!HAS_LLC(engine->i915) && i915_ggtt_has_aperture(engine->gt->ggtt))
/*
* On g33, we cannot place HWS above 256MiB, so
* restrict its pinning to the low mappable arena.
* Though this restriction is not documented for
* gen4, gen5, or byt, they also behave similarly
* and hang if the HWS is placed at the top of the
* GTT. To generalise, it appears that all !llc
* platforms have issues with us placing the HWS
* above the mappable region (even though we never
* actually map it).
*/
flags = PIN_MAPPABLE;
else
flags = PIN_HIGH;
return i915_ggtt_pin(vma, NULL, 0, flags);
}
| 0
|
199,699
|
static int handle_mkdir(struct fuse* fuse, struct fuse_handler* handler,
const struct fuse_in_header* hdr, const struct fuse_mkdir_in* req, const char* name)
{
struct node* parent_node;
char parent_path[PATH_MAX];
char child_path[PATH_MAX];
const char* actual_name;
pthread_mutex_lock(&fuse->global->lock);
parent_node = lookup_node_and_path_by_id_locked(fuse, hdr->nodeid,
parent_path, sizeof(parent_path));
TRACE("[%d] MKDIR %s 0%o @ %"PRIx64" (%s)\n", handler->token,
name, req->mode, hdr->nodeid, parent_node ? parent_node->name : "?");
pthread_mutex_unlock(&fuse->global->lock);
if (!parent_node || !(actual_name = find_file_within(parent_path, name,
child_path, sizeof(child_path), 1))) {
return -ENOENT;
}
if (!check_caller_access_to_name(fuse, hdr, parent_node, name, W_OK)) {
return -EACCES;
}
__u32 mode = (req->mode & (~0777)) | 0775;
if (mkdir(child_path, mode) < 0) {
return -errno;
}
/* When creating /Android/data and /Android/obb, mark them as .nomedia */
if (parent_node->perm == PERM_ANDROID && !strcasecmp(name, "data")) {
char nomedia[PATH_MAX];
snprintf(nomedia, PATH_MAX, "%s/.nomedia", child_path);
if (touch(nomedia, 0664) != 0) {
ERROR("Failed to touch(%s): %s\n", nomedia, strerror(errno));
return -ENOENT;
}
}
if (parent_node->perm == PERM_ANDROID && !strcasecmp(name, "obb")) {
char nomedia[PATH_MAX];
snprintf(nomedia, PATH_MAX, "%s/.nomedia", fuse->global->obb_path);
if (touch(nomedia, 0664) != 0) {
ERROR("Failed to touch(%s): %s\n", nomedia, strerror(errno));
return -ENOENT;
}
}
return fuse_reply_entry(fuse, hdr->unique, parent_node, name, actual_name, child_path);
}
| 0
|
427,741
|
static void rtreeMatchArgFree(void *pArg){
int i;
RtreeMatchArg *p = (RtreeMatchArg*)pArg;
for(i=0; i<p->nParam; i++){
sqlite3_value_free(p->apSqlParam[i]);
}
sqlite3_free(p);
}
| 0
|
295,707
|
valid_filetype(char_u *val)
{
return valid_name(val, ".-_");
}
| 0
|
126,880
|
TEST_F(HttpConnectionManagerImplTest, IdleTimeoutNoCodec) {
// Not used in the test.
delete codec_;
idle_timeout_ = (std::chrono::milliseconds(10));
Event::MockTimer* idle_timer = setUpTimer();
EXPECT_CALL(*idle_timer, enableTimer(_, _));
setup(false, "");
EXPECT_CALL(filter_callbacks_.connection_, close(Network::ConnectionCloseType::FlushWrite));
EXPECT_CALL(*idle_timer, disableTimer());
idle_timer->invokeCallback();
EXPECT_EQ(1U, stats_.named_.downstream_cx_idle_timeout_.value());
}
| 0
|
117,216
|
static int _server_handle_m(libgdbr_t *g, int (*cmd_cb) (void*, const char*, char*, size_t), void *core_ptr) {
int ret;
ut64 addr;
int length;
char *buf1, *buf2, cmd[64];
int buf1_len, buf2_len;
if (send_ack (g) < 0) {
return -1;
}
g->data[g->data_len] = 0;
sscanf (g->data, "m%"PFMT64x",%d", &addr, &length);
if (g->data_len < 4 || !strchr (g->data, ',')) {
return send_msg (g, "E01");
}
buf1_len = length;
buf2_len = length * 2 + 1;
if (!(buf1 = malloc (buf1_len))) {
return -1;
}
if (!(buf2 = malloc (buf2_len))) {
free (buf1);
return -1;
}
memset (buf2, 0, buf2_len);
snprintf (cmd, sizeof (cmd) - 1, "m %"PFMT64x" %d", addr, length);
if ((buf1_len = cmd_cb (core_ptr, cmd, buf1, buf1_len)) < 0) {
free (buf1);
free (buf2);
send_msg (g, "E01");
return -1;
}
pack_hex (buf1, buf1_len, buf2);
ret = send_msg (g, buf2);
free (buf1);
free (buf2);
return ret;
}
| 0
|
362,708
|
xmlXPathPopExternal (xmlXPathParserContextPtr ctxt) {
xmlXPathObjectPtr obj;
void * ret;
if ((ctxt == NULL) || (ctxt->value == NULL)) {
xmlXPathSetError(ctxt, XPATH_INVALID_OPERAND);
return(NULL);
}
if (ctxt->value->type != XPATH_USERS) {
xmlXPathSetTypeError(ctxt);
return(NULL);
}
obj = valuePop(ctxt);
ret = obj->user;
obj->user = NULL;
xmlXPathReleaseObject(ctxt->context, obj);
return(ret);
}
| 0
|
44,947
|
int TTF_GlyphMetrics32(TTF_Font *font, Uint32 ch,
int *minx, int *maxx, int *miny, int *maxy, int *advance)
{
c_glyph *glyph;
TTF_CHECK_POINTER(font, -1);
if (Find_GlyphMetrics(font, ch, &glyph) < 0) {
return -1;
}
if (minx) {
*minx = glyph->sz_left;
}
if (maxx) {
*maxx = glyph->sz_left + glyph->sz_width;
*maxx += 2 * font->outline_val;
}
if (miny) {
*miny = glyph->sz_top - glyph->sz_rows;
}
if (maxy) {
*maxy = glyph->sz_top;
*maxy += 2 * font->outline_val;
}
if (advance) {
*advance = FT_CEIL(glyph->advance);
}
return 0;
| 0
|
10,173
|
bool WebRequestPermissions::HideRequest(
const extensions::InfoMap* extension_info_map,
const extensions::WebRequestInfo& request) {
if (request.is_web_view)
return false;
if (request.is_pac_request)
return true;
bool is_request_from_browser = request.render_process_id == -1;
bool is_request_from_webui_renderer = false;
if (!is_request_from_browser) {
if (request.is_web_view)
return false;
if (extension_info_map &&
extension_info_map->process_map().Contains(extensions::kWebStoreAppId,
request.render_process_id)) {
return true;
}
is_request_from_webui_renderer =
content::ChildProcessSecurityPolicy::GetInstance()->HasWebUIBindings(
request.render_process_id);
}
return IsSensitiveURL(request.url, is_request_from_browser ||
is_request_from_webui_renderer) ||
!HasWebRequestScheme(request.url);
}
| 1
|
174,944
|
bool Performance::HasObserverFor(
PerformanceEntry::EntryType filter_type) const {
return observer_filter_options_ & filter_type;
}
| 0
|
214,316
|
static struct vm_area_struct* hugetlb_vma(unsigned long addr, struct mm_walk *walk)
{
struct vm_area_struct *vma;
/* We don't need vma lookup at all. */
if (!walk->hugetlb_entry)
return NULL;
VM_BUG_ON(!rwsem_is_locked(&walk->mm->mmap_sem));
vma = find_vma(walk->mm, addr);
if (vma && vma->vm_start <= addr && is_vm_hugetlb_page(vma))
return vma;
return NULL;
}
| 0
|
268,458
|
int wc_ecc_get_curve_idx_from_name(const char* curveName)
{
int curve_idx;
word32 len;
if (curveName == NULL)
return BAD_FUNC_ARG;
len = (word32)XSTRLEN(curveName);
for (curve_idx = 0; ecc_sets[curve_idx].size != 0; curve_idx++) {
if (
#ifndef WOLFSSL_ECC_CURVE_STATIC
ecc_sets[curve_idx].name &&
#endif
XSTRNCASECMP(ecc_sets[curve_idx].name, curveName, len) == 0) {
break;
}
}
if (ecc_sets[curve_idx].size == 0) {
WOLFSSL_MSG("ecc_set curve name not found");
return ECC_CURVE_INVALID;
}
return curve_idx;
}
| 0
|
375,272
|
RenameRelationInternal(Oid myrelid, const char *newrelname, bool is_internal)
{
Relation targetrelation;
Relation relrelation; /* for RELATION relation */
HeapTuple reltup;
Form_pg_class relform;
Oid namespaceId;
/*
* Grab an exclusive lock on the target table, index, sequence or view,
* which we will NOT release until end of transaction.
*/
targetrelation = relation_open(myrelid, AccessExclusiveLock);
namespaceId = RelationGetNamespace(targetrelation);
/*
* Find relation's pg_class tuple, and make sure newrelname isn't in use.
*/
relrelation = heap_open(RelationRelationId, RowExclusiveLock);
reltup = SearchSysCacheCopy1(RELOID, ObjectIdGetDatum(myrelid));
if (!HeapTupleIsValid(reltup)) /* shouldn't happen */
elog(ERROR, "cache lookup failed for relation %u", myrelid);
relform = (Form_pg_class) GETSTRUCT(reltup);
if (get_relname_relid(newrelname, namespaceId) != InvalidOid)
ereport(ERROR,
(errcode(ERRCODE_DUPLICATE_TABLE),
errmsg("relation \"%s\" already exists",
newrelname)));
/*
* Update pg_class tuple with new relname. (Scribbling on reltup is OK
* because it's a copy...)
*/
namestrcpy(&(relform->relname), newrelname);
simple_heap_update(relrelation, &reltup->t_self, reltup);
/* keep the system catalog indexes current */
CatalogUpdateIndexes(relrelation, reltup);
InvokeObjectPostAlterHookArg(RelationRelationId, myrelid, 0,
InvalidOid, is_internal);
heap_freetuple(reltup);
heap_close(relrelation, RowExclusiveLock);
/*
* Also rename the associated type, if any.
*/
if (OidIsValid(targetrelation->rd_rel->reltype))
RenameTypeInternal(targetrelation->rd_rel->reltype,
newrelname, namespaceId);
/*
* Also rename the associated constraint, if any.
*/
if (targetrelation->rd_rel->relkind == RELKIND_INDEX)
{
Oid constraintId = get_index_constraint(myrelid);
if (OidIsValid(constraintId))
RenameConstraintById(constraintId, newrelname);
}
/*
* Close rel, but keep exclusive lock!
*/
relation_close(targetrelation, NoLock);
}
| 0
|
276,506
|
static void UnsignedShortAttributeAttributeGetter(const v8::FunctionCallbackInfo<v8::Value>& info) {
v8::Local<v8::Object> holder = info.Holder();
TestObject* impl = V8TestObject::ToImpl(holder);
V8SetReturnValueUnsigned(info, impl->unsignedShortAttribute());
}
| 0
|
415,891
|
extern int lxc_rmdir_onedev(const char *path, const char *exclude)
{
struct stat mystat;
bool onedev = true;
if (is_native_overlayfs(path))
onedev = false;
if (lstat(path, &mystat) < 0) {
if (errno == ENOENT)
return 0;
ERROR("Failed to stat %s", path);
return -1;
}
return _recursive_rmdir(path, mystat.st_dev, exclude, 0, onedev);
}
| 0
|
74,062
|
store_two(int c, char *s)
{
s[0] = (char)((c >> 8) & 255);
s[1] = (char)(c & 255);
}
| 0
|
484,398
|
std::unique_ptr<JBIG2Bitmap> JBIG2Stream::readGenericRefinementRegion(int w, int h, int templ, bool tpgrOn, JBIG2Bitmap *refBitmap, int refDX, int refDY, int *atx, int *aty)
{
bool ltp;
unsigned int ltpCX, cx, cx0, cx2, cx3, cx4, tpgrCX0, tpgrCX1, tpgrCX2;
JBIG2BitmapPtr cxPtr0 = { nullptr, 0, 0 };
JBIG2BitmapPtr cxPtr1 = { nullptr, 0, 0 };
JBIG2BitmapPtr cxPtr2 = { nullptr, 0, 0 };
JBIG2BitmapPtr cxPtr3 = { nullptr, 0, 0 };
JBIG2BitmapPtr cxPtr4 = { nullptr, 0, 0 };
JBIG2BitmapPtr cxPtr5 = { nullptr, 0, 0 };
JBIG2BitmapPtr cxPtr6 = { nullptr, 0, 0 };
JBIG2BitmapPtr tpgrCXPtr0 = { nullptr, 0, 0 };
JBIG2BitmapPtr tpgrCXPtr1 = { nullptr, 0, 0 };
JBIG2BitmapPtr tpgrCXPtr2 = { nullptr, 0, 0 };
int x, y, pix;
if (!refBitmap) {
return nullptr;
}
auto bitmap = std::make_unique<JBIG2Bitmap>(0, w, h);
if (!bitmap->isOk()) {
return nullptr;
}
bitmap->clearToZero();
// set up the typical row context
if (templ) {
ltpCX = 0x008;
} else {
ltpCX = 0x0010;
}
ltp = false;
for (y = 0; y < h; ++y) {
if (templ) {
// set up the context
bitmap->getPixelPtr(0, y - 1, &cxPtr0);
cx0 = bitmap->nextPixel(&cxPtr0);
bitmap->getPixelPtr(-1, y, &cxPtr1);
refBitmap->getPixelPtr(-refDX, y - 1 - refDY, &cxPtr2);
refBitmap->getPixelPtr(-1 - refDX, y - refDY, &cxPtr3);
cx3 = refBitmap->nextPixel(&cxPtr3);
cx3 = (cx3 << 1) | refBitmap->nextPixel(&cxPtr3);
refBitmap->getPixelPtr(-refDX, y + 1 - refDY, &cxPtr4);
cx4 = refBitmap->nextPixel(&cxPtr4);
// set up the typical prediction context
tpgrCX0 = tpgrCX1 = tpgrCX2 = 0; // make gcc happy
if (tpgrOn) {
refBitmap->getPixelPtr(-1 - refDX, y - 1 - refDY, &tpgrCXPtr0);
tpgrCX0 = refBitmap->nextPixel(&tpgrCXPtr0);
tpgrCX0 = (tpgrCX0 << 1) | refBitmap->nextPixel(&tpgrCXPtr0);
tpgrCX0 = (tpgrCX0 << 1) | refBitmap->nextPixel(&tpgrCXPtr0);
refBitmap->getPixelPtr(-1 - refDX, y - refDY, &tpgrCXPtr1);
tpgrCX1 = refBitmap->nextPixel(&tpgrCXPtr1);
tpgrCX1 = (tpgrCX1 << 1) | refBitmap->nextPixel(&tpgrCXPtr1);
tpgrCX1 = (tpgrCX1 << 1) | refBitmap->nextPixel(&tpgrCXPtr1);
refBitmap->getPixelPtr(-1 - refDX, y + 1 - refDY, &tpgrCXPtr2);
tpgrCX2 = refBitmap->nextPixel(&tpgrCXPtr2);
tpgrCX2 = (tpgrCX2 << 1) | refBitmap->nextPixel(&tpgrCXPtr2);
tpgrCX2 = (tpgrCX2 << 1) | refBitmap->nextPixel(&tpgrCXPtr2);
} else {
tpgrCXPtr0.p = tpgrCXPtr1.p = tpgrCXPtr2.p = nullptr; // make gcc happy
tpgrCXPtr0.shift = tpgrCXPtr1.shift = tpgrCXPtr2.shift = 0;
tpgrCXPtr0.x = tpgrCXPtr1.x = tpgrCXPtr2.x = 0;
}
for (x = 0; x < w; ++x) {
// update the context
cx0 = ((cx0 << 1) | bitmap->nextPixel(&cxPtr0)) & 7;
cx3 = ((cx3 << 1) | refBitmap->nextPixel(&cxPtr3)) & 7;
cx4 = ((cx4 << 1) | refBitmap->nextPixel(&cxPtr4)) & 3;
if (tpgrOn) {
// update the typical predictor context
tpgrCX0 = ((tpgrCX0 << 1) | refBitmap->nextPixel(&tpgrCXPtr0)) & 7;
tpgrCX1 = ((tpgrCX1 << 1) | refBitmap->nextPixel(&tpgrCXPtr1)) & 7;
tpgrCX2 = ((tpgrCX2 << 1) | refBitmap->nextPixel(&tpgrCXPtr2)) & 7;
// check for a "typical" pixel
if (arithDecoder->decodeBit(ltpCX, refinementRegionStats)) {
ltp = !ltp;
}
if (tpgrCX0 == 0 && tpgrCX1 == 0 && tpgrCX2 == 0) {
bitmap->clearPixel(x, y);
continue;
} else if (tpgrCX0 == 7 && tpgrCX1 == 7 && tpgrCX2 == 7) {
bitmap->setPixel(x, y);
continue;
}
}
// build the context
cx = (cx0 << 7) | (bitmap->nextPixel(&cxPtr1) << 6) | (refBitmap->nextPixel(&cxPtr2) << 5) | (cx3 << 2) | cx4;
// decode the pixel
if ((pix = arithDecoder->decodeBit(cx, refinementRegionStats))) {
bitmap->setPixel(x, y);
}
}
} else {
// set up the context
bitmap->getPixelPtr(0, y - 1, &cxPtr0);
cx0 = bitmap->nextPixel(&cxPtr0);
bitmap->getPixelPtr(-1, y, &cxPtr1);
refBitmap->getPixelPtr(-refDX, y - 1 - refDY, &cxPtr2);
cx2 = refBitmap->nextPixel(&cxPtr2);
refBitmap->getPixelPtr(-1 - refDX, y - refDY, &cxPtr3);
cx3 = refBitmap->nextPixel(&cxPtr3);
cx3 = (cx3 << 1) | refBitmap->nextPixel(&cxPtr3);
refBitmap->getPixelPtr(-1 - refDX, y + 1 - refDY, &cxPtr4);
cx4 = refBitmap->nextPixel(&cxPtr4);
cx4 = (cx4 << 1) | refBitmap->nextPixel(&cxPtr4);
bitmap->getPixelPtr(atx[0], y + aty[0], &cxPtr5);
refBitmap->getPixelPtr(atx[1] - refDX, y + aty[1] - refDY, &cxPtr6);
// set up the typical prediction context
tpgrCX0 = tpgrCX1 = tpgrCX2 = 0; // make gcc happy
if (tpgrOn) {
refBitmap->getPixelPtr(-1 - refDX, y - 1 - refDY, &tpgrCXPtr0);
tpgrCX0 = refBitmap->nextPixel(&tpgrCXPtr0);
tpgrCX0 = (tpgrCX0 << 1) | refBitmap->nextPixel(&tpgrCXPtr0);
tpgrCX0 = (tpgrCX0 << 1) | refBitmap->nextPixel(&tpgrCXPtr0);
refBitmap->getPixelPtr(-1 - refDX, y - refDY, &tpgrCXPtr1);
tpgrCX1 = refBitmap->nextPixel(&tpgrCXPtr1);
tpgrCX1 = (tpgrCX1 << 1) | refBitmap->nextPixel(&tpgrCXPtr1);
tpgrCX1 = (tpgrCX1 << 1) | refBitmap->nextPixel(&tpgrCXPtr1);
refBitmap->getPixelPtr(-1 - refDX, y + 1 - refDY, &tpgrCXPtr2);
tpgrCX2 = refBitmap->nextPixel(&tpgrCXPtr2);
tpgrCX2 = (tpgrCX2 << 1) | refBitmap->nextPixel(&tpgrCXPtr2);
tpgrCX2 = (tpgrCX2 << 1) | refBitmap->nextPixel(&tpgrCXPtr2);
} else {
tpgrCXPtr0.p = tpgrCXPtr1.p = tpgrCXPtr2.p = nullptr; // make gcc happy
tpgrCXPtr0.shift = tpgrCXPtr1.shift = tpgrCXPtr2.shift = 0;
tpgrCXPtr0.x = tpgrCXPtr1.x = tpgrCXPtr2.x = 0;
}
for (x = 0; x < w; ++x) {
// update the context
cx0 = ((cx0 << 1) | bitmap->nextPixel(&cxPtr0)) & 3;
cx2 = ((cx2 << 1) | refBitmap->nextPixel(&cxPtr2)) & 3;
cx3 = ((cx3 << 1) | refBitmap->nextPixel(&cxPtr3)) & 7;
cx4 = ((cx4 << 1) | refBitmap->nextPixel(&cxPtr4)) & 7;
if (tpgrOn) {
// update the typical predictor context
tpgrCX0 = ((tpgrCX0 << 1) | refBitmap->nextPixel(&tpgrCXPtr0)) & 7;
tpgrCX1 = ((tpgrCX1 << 1) | refBitmap->nextPixel(&tpgrCXPtr1)) & 7;
tpgrCX2 = ((tpgrCX2 << 1) | refBitmap->nextPixel(&tpgrCXPtr2)) & 7;
// check for a "typical" pixel
if (arithDecoder->decodeBit(ltpCX, refinementRegionStats)) {
ltp = !ltp;
}
if (tpgrCX0 == 0 && tpgrCX1 == 0 && tpgrCX2 == 0) {
bitmap->clearPixel(x, y);
continue;
} else if (tpgrCX0 == 7 && tpgrCX1 == 7 && tpgrCX2 == 7) {
bitmap->setPixel(x, y);
continue;
}
}
// build the context
cx = (cx0 << 11) | (bitmap->nextPixel(&cxPtr1) << 10) | (cx2 << 8) | (cx3 << 5) | (cx4 << 2) | (bitmap->nextPixel(&cxPtr5) << 1) | refBitmap->nextPixel(&cxPtr6);
// decode the pixel
if ((pix = arithDecoder->decodeBit(cx, refinementRegionStats))) {
bitmap->setPixel(x, y);
}
}
}
}
return bitmap;
}
| 0
|
240,153
|
void RenderBox::clearContainingBlockOverrideSize()
{
if (gOverrideContainingBlockLogicalWidthMap)
gOverrideContainingBlockLogicalWidthMap->remove(this);
clearOverrideContainingBlockContentLogicalHeight();
}
| 0
|
474,996
|
static enum netdev_lag_hash bond_lag_hash_type(struct bonding *bond,
enum netdev_lag_tx_type type)
{
if (type != NETDEV_LAG_TX_TYPE_HASH)
return NETDEV_LAG_HASH_NONE;
switch (bond->params.xmit_policy) {
case BOND_XMIT_POLICY_LAYER2:
return NETDEV_LAG_HASH_L2;
case BOND_XMIT_POLICY_LAYER34:
return NETDEV_LAG_HASH_L34;
case BOND_XMIT_POLICY_LAYER23:
return NETDEV_LAG_HASH_L23;
case BOND_XMIT_POLICY_ENCAP23:
return NETDEV_LAG_HASH_E23;
case BOND_XMIT_POLICY_ENCAP34:
return NETDEV_LAG_HASH_E34;
case BOND_XMIT_POLICY_VLAN_SRCMAC:
return NETDEV_LAG_HASH_VLAN_SRCMAC;
default:
return NETDEV_LAG_HASH_UNKNOWN;
}
}
| 0
|
216,521
|
PHP_FUNCTION(stream_get_meta_data)
{
zval *arg1;
php_stream *stream;
zval *newval;
if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "r", &arg1) == FAILURE) {
return;
}
php_stream_from_zval(stream, &arg1);
array_init(return_value);
if (stream->wrapperdata) {
MAKE_STD_ZVAL(newval);
MAKE_COPY_ZVAL(&stream->wrapperdata, newval);
add_assoc_zval(return_value, "wrapper_data", newval);
}
if (stream->wrapper) {
add_assoc_string(return_value, "wrapper_type", (char *)stream->wrapper->wops->label, 1);
}
add_assoc_string(return_value, "stream_type", (char *)stream->ops->label, 1);
add_assoc_string(return_value, "mode", stream->mode, 1);
#if 0 /* TODO: needs updating for new filter API */
if (stream->filterhead) {
php_stream_filter *filter;
MAKE_STD_ZVAL(newval);
array_init(newval);
for (filter = stream->filterhead; filter != NULL; filter = filter->next) {
add_next_index_string(newval, (char *)filter->fops->label, 1);
}
add_assoc_zval(return_value, "filters", newval);
}
#endif
add_assoc_long(return_value, "unread_bytes", stream->writepos - stream->readpos);
add_assoc_bool(return_value, "seekable", (stream->ops->seek) && (stream->flags & PHP_STREAM_FLAG_NO_SEEK) == 0);
if (stream->orig_path) {
add_assoc_string(return_value, "uri", stream->orig_path, 1);
}
if (!php_stream_populate_meta_data(stream, return_value)) {
add_assoc_bool(return_value, "timed_out", 0);
add_assoc_bool(return_value, "blocked", 1);
add_assoc_bool(return_value, "eof", php_stream_eof(stream));
}
}
| 0
|
148,299
|
set_default_router_id(data_t *data, char *new_id)
{
if (!new_id || !new_id[0])
return;
data->router_id = MALLOC(strlen(new_id)+1);
strcpy(data->router_id, new_id);
}
| 0
|
161,040
|
Handle<GlobalObject> global_object() {
return Handle<GlobalObject>(context()->global_object());
}
| 0
|
508,261
|
void X509_STORE_CTX_set_error(X509_STORE_CTX *ctx, int err)
{
ctx->error=err;
}
| 0
|
118,801
|
static int wait_for_vfork_done(struct task_struct *child,
struct completion *vfork)
{
int killed;
freezer_do_not_count();
cgroup_enter_frozen();
killed = wait_for_completion_killable(vfork);
cgroup_leave_frozen(false);
freezer_count();
if (killed) {
task_lock(child);
child->vfork_done = NULL;
task_unlock(child);
}
put_task_struct(child);
return killed;
}
| 0
|
138,930
|
static int futex_unlock_pi(u32 __user *uaddr, unsigned int flags)
{
struct futex_hash_bucket *hb;
struct futex_q *this, *next;
union futex_key key = FUTEX_KEY_INIT;
u32 uval, vpid = task_pid_vnr(current);
int ret;
retry:
if (get_user(uval, uaddr))
return -EFAULT;
/*
* We release only a lock we actually own:
*/
if ((uval & FUTEX_TID_MASK) != vpid)
return -EPERM;
ret = get_futex_key(uaddr, flags & FLAGS_SHARED, &key, VERIFY_WRITE);
if (unlikely(ret != 0))
goto out;
hb = hash_futex(&key);
spin_lock(&hb->lock);
/*
* To avoid races, try to do the TID -> 0 atomic transition
* again. If it succeeds then we can return without waking
* anyone else up:
*/
if (!(uval & FUTEX_OWNER_DIED) &&
cmpxchg_futex_value_locked(&uval, uaddr, vpid, 0))
goto pi_faulted;
/*
* Rare case: we managed to release the lock atomically,
* no need to wake anyone else up:
*/
if (unlikely(uval == vpid))
goto out_unlock;
/*
* Ok, other tasks may need to be woken up - check waiters
* and do the wakeup if necessary:
*/
plist_for_each_entry_safe(this, next, &hb->chain, list) {
if (!match_futex (&this->key, &key))
continue;
ret = wake_futex_pi(uaddr, uval, this);
/*
* The atomic access to the futex value
* generated a pagefault, so retry the
* user-access and the wakeup:
*/
if (ret == -EFAULT)
goto pi_faulted;
goto out_unlock;
}
/*
* No waiters - kernel unlocks the futex:
*/
if (!(uval & FUTEX_OWNER_DIED)) {
ret = unlock_futex_pi(uaddr, uval);
if (ret == -EFAULT)
goto pi_faulted;
}
out_unlock:
spin_unlock(&hb->lock);
put_futex_key(&key);
out:
return ret;
pi_faulted:
spin_unlock(&hb->lock);
put_futex_key(&key);
ret = fault_in_user_writeable(uaddr);
if (!ret)
goto retry;
return ret;
}
| 0
|
10,037
|
void OfflineAudioDestinationHandler::NotifyComplete() {
DCHECK(IsMainThread());
render_thread_.reset();
if (Context() && Context()->GetExecutionContext())
Context()->FireCompletionEvent();
}
| 1
|
459,253
|
gst_h264_create_sei_memory_internal (guint8 nal_prefix_size,
gboolean packetized, GArray * messages)
{
NalWriter nw;
gint i;
gboolean have_written_data = FALSE;
nal_writer_init (&nw, nal_prefix_size, packetized);
if (messages->len == 0)
goto error;
GST_DEBUG ("Create SEI nal from array, len: %d", messages->len);
/* nal header */
/* forbidden_zero_bit */
WRITE_UINT8 (&nw, 0, 1);
/* nal_ref_idc, zero for sei nalu */
WRITE_UINT8 (&nw, 0, 2);
/* nal_unit_type */
WRITE_UINT8 (&nw, GST_H264_NAL_SEI, 5);
for (i = 0; i < messages->len; i++) {
GstH264SEIMessage *msg = &g_array_index (messages, GstH264SEIMessage, i);
guint32 payload_size_data = 0;
guint32 payload_size_in_bits = 0;
guint32 payload_type_data = msg->payloadType;
gboolean need_align = FALSE;
switch (payload_type_data) {
case GST_H264_SEI_REGISTERED_USER_DATA:{
GstH264RegisteredUserData *rud = &msg->payload.registered_user_data;
/* itu_t_t35_country_code: 8 bits */
payload_size_data = 1;
if (rud->country_code == 0xff) {
/* itu_t_t35_country_code_extension_byte */
payload_size_data++;
}
payload_size_data += rud->size;
break;
}
case GST_H264_SEI_FRAME_PACKING:{
GstH264FramePacking *frame_packing = &msg->payload.frame_packing;
guint leading_zeros, rest;
/* frame_packing_arrangement_id: exp-golomb bits */
count_exp_golomb_bits (frame_packing->frame_packing_id,
&leading_zeros, &rest);
payload_size_in_bits = leading_zeros + rest;
/* frame_packing_arrangement_cancel_flag: 1 bit */
payload_size_in_bits++;
if (!frame_packing->frame_packing_cancel_flag) {
/* frame_packing_arrangement_type: 7 bits
* quincunx_sampling_flag: 1 bit
* content_interpretation_type: 6 bit
* spatial_flipping_flag: 1 bit
* frame0_flipped_flag: 1 bit
* field_views_flag: 1 bit
* current_frame_is_frame0_flag: 1 bit
* frame0_self_contained_flag: 1 bit
* frame1_self_contained_flag: 1 bit
*/
payload_size_in_bits += 20;
if (!frame_packing->quincunx_sampling_flag &&
frame_packing->frame_packing_type !=
GST_H264_FRAME_PACKING_TEMPORAL_INTERLEAVING) {
/* frame0_grid_position_x: 4bits
* frame0_grid_position_y: 4bits
* frame1_grid_position_x: 4bits
* frame1_grid_position_y: 4bits
*/
payload_size_in_bits += 16;
}
/* frame_packing_arrangement_reserved_byte: 8 bits */
payload_size_in_bits += 8;
/* frame_packing_arrangement_repetition_period: exp-golomb bits */
count_exp_golomb_bits (frame_packing->frame_packing_repetition_period,
&leading_zeros, &rest);
payload_size_in_bits += (leading_zeros + rest);
}
/* frame_packing_arrangement_extension_flag: 1 bit */
payload_size_in_bits++;
payload_size_data = payload_size_in_bits >> 3;
if ((payload_size_in_bits & 0x7) != 0) {
GST_INFO ("Bits for Frame Packing SEI is not byte aligned");
payload_size_data++;
need_align = TRUE;
}
break;
}
case GST_H264_SEI_MASTERING_DISPLAY_COLOUR_VOLUME:
/* x, y 16 bits per RGB channel
* x, y 16 bits white point
* max, min luminance 32 bits
*
* (2 * 2 * 3) + (2 * 2) + (4 * 2) = 24 bytes
*/
payload_size_data = 24;
break;
case GST_H264_SEI_CONTENT_LIGHT_LEVEL:
/* maxCLL and maxFALL per 16 bits
*
* 2 * 2 = 4 bytes
*/
payload_size_data = 4;
break;
case GST_H264_SEI_PIC_TIMING:{
GstH264PicTiming *tim = &msg->payload.pic_timing;
const guint8 num_clock_ts_table[9] = {
1, 1, 1, 2, 2, 3, 3, 2, 3
};
guint8 num_clock_num_ts;
guint i;
if (!tim->CpbDpbDelaysPresentFlag && !tim->pic_struct_present_flag) {
GST_WARNING
("Both CpbDpbDelaysPresentFlag and pic_struct_present_flag are zero");
break;
}
if (tim->CpbDpbDelaysPresentFlag) {
payload_size_in_bits = tim->cpb_removal_delay_length_minus1 + 1;
payload_size_in_bits += tim->dpb_output_delay_length_minus1 + 1;
}
if (tim->pic_struct_present_flag) {
/* pic_struct: 4bits */
payload_size_in_bits += 4;
num_clock_num_ts = num_clock_ts_table[tim->pic_struct];
for (i = 0; i < num_clock_num_ts; i++) {
/* clock_timestamp_flag: 1bit */
payload_size_in_bits++;
if (tim->clock_timestamp_flag[i]) {
GstH264ClockTimestamp *timestamp = &tim->clock_timestamp[i];
/* ct_type: 2bits
* nuit_field_based_flag: 1bit
* counting_type: 5bits
* full_timestamp_flag: 1bit
* discontinuity_flag: 1bit
* cnt_dropped_flag: 1bit
* n_frames: 8bits
*/
payload_size_in_bits += 19;
if (timestamp->full_timestamp_flag) {
/* seconds_value: 6bits
* minutes_value: 6bits
* hours_value: 5bits
*/
payload_size_in_bits += 17;
} else {
/* seconds_flag: 1bit */
payload_size_in_bits++;
if (timestamp->seconds_flag) {
/* seconds_value: 6bits
* minutes_flag: 1bit
*/
payload_size_in_bits += 7;
if (timestamp->minutes_flag) {
/* minutes_value: 6bits
* hours_flag: 1bits
*/
payload_size_in_bits += 7;
if (timestamp->hours_flag) {
/* hours_value: 5bits */
payload_size_in_bits += 5;
}
}
}
}
/* time_offset_length bits */
payload_size_in_bits += tim->time_offset_length;
}
}
}
payload_size_data = payload_size_in_bits >> 3;
if ((payload_size_in_bits & 0x7) != 0) {
GST_INFO ("Bits for Picture Timing SEI is not byte aligned");
payload_size_data++;
need_align = TRUE;
}
break;
}
default:
break;
}
if (payload_size_data == 0) {
GST_FIXME ("Unsupported SEI type %d", msg->payloadType);
continue;
}
/* write payload type bytes */
while (payload_type_data >= 0xff) {
WRITE_UINT8 (&nw, 0xff, 8);
payload_type_data -= -0xff;
}
WRITE_UINT8 (&nw, payload_type_data, 8);
/* write payload size bytes */
while (payload_size_data >= 0xff) {
WRITE_UINT8 (&nw, 0xff, 8);
payload_size_data -= -0xff;
}
WRITE_UINT8 (&nw, payload_size_data, 8);
switch (msg->payloadType) {
case GST_H264_SEI_REGISTERED_USER_DATA:
GST_DEBUG ("Writing \"Registered user data\"");
if (!gst_h264_write_sei_registered_user_data (&nw,
&msg->payload.registered_user_data)) {
GST_WARNING ("Failed to write \"Registered user data\"");
goto error;
}
have_written_data = TRUE;
break;
case GST_H264_SEI_FRAME_PACKING:
GST_DEBUG ("Writing \"Frame packing\"");
if (!gst_h264_write_sei_frame_packing (&nw,
&msg->payload.frame_packing)) {
GST_WARNING ("Failed to write \"Frame packing\"");
goto error;
}
have_written_data = TRUE;
break;
case GST_H264_SEI_MASTERING_DISPLAY_COLOUR_VOLUME:
GST_DEBUG ("Wrtiting \"Mastering display colour volume\"");
if (!gst_h264_write_sei_mastering_display_colour_volume (&nw,
&msg->payload.mastering_display_colour_volume)) {
GST_WARNING ("Failed to write \"Mastering display colour volume\"");
goto error;
}
have_written_data = TRUE;
break;
case GST_H264_SEI_CONTENT_LIGHT_LEVEL:
GST_DEBUG ("Writing \"Content light level\"");
if (!gst_h264_write_sei_content_light_level_info (&nw,
&msg->payload.content_light_level)) {
GST_WARNING ("Failed to write \"Content light level\"");
goto error;
}
have_written_data = TRUE;
break;
case GST_H264_SEI_PIC_TIMING:
GST_DEBUG ("Writing \"Picture timing\"");
if (!gst_h264_write_sei_pic_timing (&nw, &msg->payload.pic_timing)) {
GST_WARNING ("Failed to write \"Picture timing\"");
goto error;
}
have_written_data = TRUE;
break;
default:
break;
}
if (need_align && !nal_writer_do_rbsp_trailing_bits (&nw)) {
GST_WARNING ("Cannot insert traling bits");
goto error;
}
}
if (!have_written_data) {
GST_WARNING ("No written sei data");
goto error;
}
if (!nal_writer_do_rbsp_trailing_bits (&nw)) {
GST_WARNING ("Failed to insert rbsp trailing bits");
goto error;
}
return nal_writer_reset_and_get_memory (&nw);
error:
nal_writer_reset (&nw);
return NULL;
}
| 0
|
460,781
|
gboolean reds_config_get_agent_mouse(const RedsState *reds)
{
return reds->config->agent_mouse;
}
| 0
|
34,689
|
int splice_grow_spd(const struct pipe_inode_info *pipe, struct splice_pipe_desc *spd)
{
unsigned int buffers = ACCESS_ONCE(pipe->buffers);
spd->nr_pages_max = buffers;
if (buffers <= PIPE_DEF_BUFFERS)
return 0;
spd->pages = kmalloc(buffers * sizeof(struct page *), GFP_KERNEL);
spd->partial = kmalloc(buffers * sizeof(struct partial_page), GFP_KERNEL);
if (spd->pages && spd->partial)
return 0;
kfree(spd->pages);
kfree(spd->partial);
return -ENOMEM;
}
| 0
|
426,355
|
static MagickBooleanType TraceSVGImage(Image *image,ExceptionInfo *exception)
{
#if defined(MAGICKCORE_AUTOTRACE_DELEGATE)
{
at_bitmap_type
*trace;
at_fitting_opts_type
*fitting_options;
at_output_opts_type
*output_options;
at_splines_type
*splines;
ImageType
type;
register const PixelPacket
*p;
register ssize_t
i,
x;
size_t
number_planes;
ssize_t
y;
/*
Trace image and write as SVG.
*/
fitting_options=at_fitting_opts_new();
output_options=at_output_opts_new();
type=GetImageType(image,exception);
number_planes=3;
if ((type == BilevelType) || (type == GrayscaleType))
number_planes=1;
trace=at_bitmap_new(image->columns,image->rows,number_planes);
i=0;
for (y=0; y < (ssize_t) image->rows; y++)
{
p=GetVirtualPixels(image,0,y,image->columns,1,exception);
if (p == (const PixelPacket *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
trace->bitmap[i++]=GetPixelRed(p);
if (number_planes == 3)
{
trace->bitmap[i++]=GetPixelGreen(p);
trace->bitmap[i++]=GetPixelBlue(p);
}
p++;
}
}
splines=at_splines_new_full(trace,fitting_options,NULL,NULL,NULL,NULL,NULL,
NULL);
at_splines_write(at_output_get_handler_by_suffix((char *) "svg"),
GetBlobFileHandle(image),image->filename,output_options,splines,NULL,
NULL);
/*
Free resources.
*/
at_splines_free(splines);
at_bitmap_free(trace);
at_output_opts_free(output_options);
at_fitting_opts_free(fitting_options);
}
#else
{
char
*base64,
message[MaxTextExtent];
Image
*clone_image;
ImageInfo
*image_info;
register char
*p;
size_t
blob_length,
encode_length;
ssize_t
i;
unsigned char
*blob;
(void) WriteBlobString(image,
"<?xml version=\"1.0\" encoding=\"UTF-8\" standalone=\"no\"?>\n");
(void) WriteBlobString(image,
"<!DOCTYPE svg PUBLIC \"-//W3C//DTD SVG 1.1//EN\"");
(void) WriteBlobString(image,
" \"http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd\">\n");
(void) FormatLocaleString(message,MaxTextExtent,
"<svg version=\"1.1\" id=\"Layer_1\" "
"xmlns=\"http://www.w3.org/2000/svg\" "
"xmlns:xlink=\"http://www.w3.org/1999/xlink\" x=\"0px\" y=\"0px\" "
"width=\"%.20gpx\" height=\"%.20gpx\" viewBox=\"0 0 %.20g %.20g\" "
"enable-background=\"new 0 0 %.20g %.20g\" xml:space=\"preserve\">",
(double) image->columns,(double) image->rows,
(double) image->columns,(double) image->rows,
(double) image->columns,(double) image->rows);
(void) WriteBlobString(image,message);
clone_image=CloneImage(image,0,0,MagickTrue,exception);
if (clone_image == (Image *) NULL)
return(MagickFalse);
image_info=AcquireImageInfo();
(void) CopyMagickString(image_info->magick,"PNG",MaxTextExtent);
blob_length=2048;
blob=(unsigned char *) ImageToBlob(image_info,clone_image,&blob_length,
exception);
clone_image=DestroyImage(clone_image);
image_info=DestroyImageInfo(image_info);
if (blob == (unsigned char *) NULL)
return(MagickFalse);
encode_length=0;
base64=Base64Encode(blob,blob_length,&encode_length);
blob=(unsigned char *) RelinquishMagickMemory(blob);
(void) FormatLocaleString(message,MaxTextExtent,
" <image id=\"image%.20g\" width=\"%.20g\" height=\"%.20g\" "
"x=\"%.20g\" y=\"%.20g\"\n href=\"data:image/png;base64,",
(double) image->scene,(double) image->columns,(double) image->rows,
(double) image->page.x,(double) image->page.y);
(void) WriteBlobString(image,message);
p=base64;
for (i=(ssize_t) encode_length; i > 0; i-=76)
{
(void) FormatLocaleString(message,MaxTextExtent,"%.76s",p);
(void) WriteBlobString(image,message);
p+=76;
if (i > 76)
(void) WriteBlobString(image,"\n");
}
base64=DestroyString(base64);
(void) WriteBlobString(image,"\" />\n");
(void) WriteBlobString(image,"</svg>\n");
}
#endif
(void) CloseBlob(image);
return(MagickTrue);
}
| 0
|
378,545
|
static int wsgi_find_path_info(const char *uri, const char *path_info)
{
int lu = strlen(uri);
int lp = strlen(path_info);
while (lu-- && lp-- && uri[lu] == path_info[lp]) {
if (path_info[lp] == '/') {
while (lu && uri[lu-1] == '/') lu--;
}
}
if (lu == -1) {
lu = 0;
}
while (uri[lu] != '\0' && uri[lu] != '/') {
lu++;
}
return lu;
}
| 0
|
387,173
|
static void vmxnet_tx_pkt_do_sw_csum(struct VmxnetTxPkt *pkt)
{
struct iovec *iov = &pkt->vec[VMXNET_TX_PKT_L2HDR_FRAG];
uint32_t csum_cntr;
uint16_t csum = 0;
/* num of iovec without vhdr */
uint32_t iov_len = pkt->payload_frags + VMXNET_TX_PKT_PL_START_FRAG - 1;
uint16_t csl;
struct ip_header *iphdr;
size_t csum_offset = pkt->virt_hdr.csum_start + pkt->virt_hdr.csum_offset;
/* Put zero to checksum field */
iov_from_buf(iov, iov_len, csum_offset, &csum, sizeof csum);
/* Calculate L4 TCP/UDP checksum */
csl = pkt->payload_len;
/* data checksum */
csum_cntr =
net_checksum_add_iov(iov, iov_len, pkt->virt_hdr.csum_start, csl);
/* add pseudo header to csum */
iphdr = pkt->vec[VMXNET_TX_PKT_L3HDR_FRAG].iov_base;
csum_cntr += eth_calc_pseudo_hdr_csum(iphdr, csl);
/* Put the checksum obtained into the packet */
csum = cpu_to_be16(net_checksum_finish(csum_cntr));
iov_from_buf(iov, iov_len, csum_offset, &csum, sizeof csum);
}
| 0
|
303,797
|
static int rtnl_phys_port_id_fill(struct sk_buff *skb, struct net_device *dev)
{
int err;
struct netdev_phys_item_id ppid;
err = dev_get_phys_port_id(dev, &ppid);
if (err) {
if (err == -EOPNOTSUPP)
return 0;
return err;
}
if (nla_put(skb, IFLA_PHYS_PORT_ID, ppid.id_len, ppid.id))
return -EMSGSIZE;
return 0;
}
| 0
|
58,082
|
#else
| 0
|
494,036
|
static void SerializeGltfBufferBin(Buffer &buffer, json &o,
std::vector<unsigned char> &binBuffer) {
SerializeNumberProperty("byteLength", buffer.data.size(), o);
binBuffer = buffer.data;
if (buffer.name.size()) SerializeStringProperty("name", buffer.name, o);
if (buffer.extras.Type() != NULL_TYPE) {
SerializeValue("extras", buffer.extras, o);
}
}
| 0
|
101,961
|
static ssize_t ipmi_interrupts_enabled_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct smi_info *smi_info = dev_get_drvdata(dev);
int enabled = smi_info->io.irq && !smi_info->interrupt_disabled;
return snprintf(buf, 10, "%d\n", enabled);
}
| 0
|
10,756
|
int phar_parse_zipfile(php_stream *fp, char *fname, int fname_len, char *alias, int alias_len, phar_archive_data** pphar, char **error TSRMLS_DC) /* {{{ */
{
phar_zip_dir_end locator;
char buf[sizeof(locator) + 65536];
long size;
php_uint16 i;
phar_archive_data *mydata = NULL;
phar_entry_info entry = {0};
char *p = buf, *ext, *actual_alias = NULL;
char *metadata = NULL;
size = php_stream_tell(fp);
if (size > sizeof(locator) + 65536) {
/* seek to max comment length + end of central directory record */
size = sizeof(locator) + 65536;
if (FAILURE == php_stream_seek(fp, -size, SEEK_END)) {
php_stream_close(fp);
if (error) {
spprintf(error, 4096, "phar error: unable to search for end of central directory in zip-based phar \"%s\"", fname);
}
return FAILURE;
}
} else {
php_stream_seek(fp, 0, SEEK_SET);
}
if (!php_stream_read(fp, buf, size)) {
php_stream_close(fp);
if (error) {
spprintf(error, 4096, "phar error: unable to read in data to search for end of central directory in zip-based phar \"%s\"", fname);
}
return FAILURE;
}
while ((p=(char *) memchr(p + 1, 'P', (size_t) (size - (p + 1 - buf)))) != NULL) {
if (!memcmp(p + 1, "K\5\6", 3)) {
memcpy((void *)&locator, (void *) p, sizeof(locator));
if (PHAR_GET_16(locator.centraldisk) != 0 || PHAR_GET_16(locator.disknumber) != 0) {
/* split archives not handled */
php_stream_close(fp);
if (error) {
spprintf(error, 4096, "phar error: split archives spanning multiple zips cannot be processed in zip-based phar \"%s\"", fname);
}
return FAILURE;
}
if (PHAR_GET_16(locator.counthere) != PHAR_GET_16(locator.count)) {
if (error) {
spprintf(error, 4096, "phar error: corrupt zip archive, conflicting file count in end of central directory record in zip-based phar \"%s\"", fname);
}
php_stream_close(fp);
return FAILURE;
}
mydata = pecalloc(1, sizeof(phar_archive_data), PHAR_G(persist));
mydata->is_persistent = PHAR_G(persist);
/* read in archive comment, if any */
if (PHAR_GET_16(locator.comment_len)) {
metadata = p + sizeof(locator);
if (PHAR_GET_16(locator.comment_len) != size - (metadata - buf)) {
if (error) {
spprintf(error, 4096, "phar error: corrupt zip archive, zip file comment truncated in zip-based phar \"%s\"", fname);
}
php_stream_close(fp);
pefree(mydata, mydata->is_persistent);
return FAILURE;
}
mydata->metadata_len = PHAR_GET_16(locator.comment_len);
if (phar_parse_metadata(&metadata, &mydata->metadata, PHAR_GET_16(locator.comment_len) TSRMLS_CC) == FAILURE) {
mydata->metadata_len = 0;
/* if not valid serialized data, it is a regular string */
if (entry.is_persistent) {
ALLOC_PERMANENT_ZVAL(mydata->metadata);
} else {
ALLOC_ZVAL(mydata->metadata);
}
INIT_ZVAL(*mydata->metadata);
metadata = pestrndup(metadata, PHAR_GET_16(locator.comment_len), mydata->is_persistent);
ZVAL_STRINGL(mydata->metadata, metadata, PHAR_GET_16(locator.comment_len), 0);
}
} else {
mydata->metadata = NULL;
}
goto foundit;
}
}
php_stream_close(fp);
if (error) {
spprintf(error, 4096, "phar error: end of central directory not found in zip-based phar \"%s\"", fname);
}
return FAILURE;
foundit:
mydata->fname = pestrndup(fname, fname_len, mydata->is_persistent);
#ifdef PHP_WIN32
phar_unixify_path_separators(mydata->fname, fname_len);
#endif
mydata->is_zip = 1;
mydata->fname_len = fname_len;
ext = strrchr(mydata->fname, '/');
if (ext) {
mydata->ext = memchr(ext, '.', (mydata->fname + fname_len) - ext);
if (mydata->ext == ext) {
mydata->ext = memchr(ext + 1, '.', (mydata->fname + fname_len) - ext - 1);
}
if (mydata->ext) {
mydata->ext_len = (mydata->fname + fname_len) - mydata->ext;
}
}
/* clean up on big-endian systems */
/* seek to central directory */
php_stream_seek(fp, PHAR_GET_32(locator.cdir_offset), SEEK_SET);
/* read in central directory */
zend_hash_init(&mydata->manifest, PHAR_GET_16(locator.count),
zend_get_hash_value, destroy_phar_manifest_entry, (zend_bool)mydata->is_persistent);
zend_hash_init(&mydata->mounted_dirs, 5,
zend_get_hash_value, NULL, (zend_bool)mydata->is_persistent);
zend_hash_init(&mydata->virtual_dirs, PHAR_GET_16(locator.count) * 2,
zend_get_hash_value, NULL, (zend_bool)mydata->is_persistent);
entry.phar = mydata;
entry.is_zip = 1;
entry.fp_type = PHAR_FP;
entry.is_persistent = mydata->is_persistent;
#define PHAR_ZIP_FAIL_FREE(errmsg, save) \
zend_hash_destroy(&mydata->manifest); \
mydata->manifest.arBuckets = 0; \
zend_hash_destroy(&mydata->mounted_dirs); \
mydata->mounted_dirs.arBuckets = 0; \
zend_hash_destroy(&mydata->virtual_dirs); \
mydata->virtual_dirs.arBuckets = 0; \
php_stream_close(fp); \
if (mydata->metadata) { \
zval_dtor(mydata->metadata); \
} \
if (mydata->signature) { \
efree(mydata->signature); \
} \
if (error) { \
spprintf(error, 4096, "phar error: %s in zip-based phar \"%s\"", errmsg, mydata->fname); \
} \
pefree(mydata->fname, mydata->is_persistent); \
if (mydata->alias) { \
pefree(mydata->alias, mydata->is_persistent); \
} \
pefree(mydata, mydata->is_persistent); \
efree(save); \
return FAILURE;
#define PHAR_ZIP_FAIL(errmsg) \
zend_hash_destroy(&mydata->manifest); \
mydata->manifest.arBuckets = 0; \
zend_hash_destroy(&mydata->mounted_dirs); \
mydata->mounted_dirs.arBuckets = 0; \
zend_hash_destroy(&mydata->virtual_dirs); \
mydata->virtual_dirs.arBuckets = 0; \
php_stream_close(fp); \
if (mydata->metadata) { \
zval_dtor(mydata->metadata); \
} \
if (mydata->signature) { \
efree(mydata->signature); \
} \
if (error) { \
spprintf(error, 4096, "phar error: %s in zip-based phar \"%s\"", errmsg, mydata->fname); \
} \
pefree(mydata->fname, mydata->is_persistent); \
if (mydata->alias) { \
pefree(mydata->alias, mydata->is_persistent); \
} \
pefree(mydata, mydata->is_persistent); \
return FAILURE;
/* add each central directory item to the manifest */
for (i = 0; i < PHAR_GET_16(locator.count); ++i) {
phar_zip_central_dir_file zipentry;
off_t beforeus = php_stream_tell(fp);
if (sizeof(zipentry) != php_stream_read(fp, (char *) &zipentry, sizeof(zipentry))) {
PHAR_ZIP_FAIL("unable to read central directory entry, truncated");
}
/* clean up for bigendian systems */
if (memcmp("PK\1\2", zipentry.signature, 4)) {
/* corrupted entry */
PHAR_ZIP_FAIL("corrupted central directory entry, no magic signature");
}
if (entry.is_persistent) {
entry.manifest_pos = i;
}
entry.compressed_filesize = PHAR_GET_32(zipentry.compsize);
entry.uncompressed_filesize = PHAR_GET_32(zipentry.uncompsize);
entry.crc32 = PHAR_GET_32(zipentry.crc32);
/* do not PHAR_GET_16 either on the next line */
entry.timestamp = phar_zip_d2u_time(zipentry.timestamp, zipentry.datestamp);
entry.flags = PHAR_ENT_PERM_DEF_FILE;
entry.header_offset = PHAR_GET_32(zipentry.offset);
entry.offset = entry.offset_abs = PHAR_GET_32(zipentry.offset) + sizeof(phar_zip_file_header) + PHAR_GET_16(zipentry.filename_len) +
PHAR_GET_16(zipentry.extra_len);
if (PHAR_GET_16(zipentry.flags) & PHAR_ZIP_FLAG_ENCRYPTED) {
PHAR_ZIP_FAIL("Cannot process encrypted zip files");
}
if (!PHAR_GET_16(zipentry.filename_len)) {
PHAR_ZIP_FAIL("Cannot process zips created from stdin (zero-length filename)");
}
entry.filename_len = PHAR_GET_16(zipentry.filename_len);
entry.filename = (char *) pemalloc(entry.filename_len + 1, entry.is_persistent);
if (entry.filename_len != php_stream_read(fp, entry.filename, entry.filename_len)) {
pefree(entry.filename, entry.is_persistent);
PHAR_ZIP_FAIL("unable to read in filename from central directory, truncated");
}
entry.filename[entry.filename_len] = '\0';
if (entry.filename[entry.filename_len - 1] == '/') {
entry.is_dir = 1;
if(entry.filename_len > 1) {
entry.filename_len--;
}
entry.flags |= PHAR_ENT_PERM_DEF_DIR;
} else {
entry.is_dir = 0;
}
if (entry.filename_len == sizeof(".phar/signature.bin")-1 && !strncmp(entry.filename, ".phar/signature.bin", sizeof(".phar/signature.bin")-1)) {
size_t read;
php_stream *sigfile;
off_t now;
char *sig;
now = php_stream_tell(fp);
pefree(entry.filename, entry.is_persistent);
sigfile = php_stream_fopen_tmpfile();
if (!sigfile) {
PHAR_ZIP_FAIL("couldn't open temporary file");
}
php_stream_seek(fp, 0, SEEK_SET);
/* copy file contents + local headers and zip comment, if any, to be hashed for signature */
phar_stream_copy_to_stream(fp, sigfile, entry.header_offset, NULL);
/* seek to central directory */
php_stream_seek(fp, PHAR_GET_32(locator.cdir_offset), SEEK_SET);
/* copy central directory header */
phar_stream_copy_to_stream(fp, sigfile, beforeus - PHAR_GET_32(locator.cdir_offset), NULL);
if (metadata) {
php_stream_write(sigfile, metadata, PHAR_GET_16(locator.comment_len));
}
php_stream_seek(fp, sizeof(phar_zip_file_header) + entry.header_offset + entry.filename_len + PHAR_GET_16(zipentry.extra_len), SEEK_SET);
sig = (char *) emalloc(entry.uncompressed_filesize);
read = php_stream_read(fp, sig, entry.uncompressed_filesize);
if (read != entry.uncompressed_filesize) {
php_stream_close(sigfile);
efree(sig);
PHAR_ZIP_FAIL("signature cannot be read");
}
mydata->sig_flags = PHAR_GET_32(sig);
if (FAILURE == phar_verify_signature(sigfile, php_stream_tell(sigfile), mydata->sig_flags, sig + 8, entry.uncompressed_filesize - 8, fname, &mydata->signature, &mydata->sig_len, error TSRMLS_CC)) {
efree(sig);
if (error) {
char *save;
php_stream_close(sigfile);
spprintf(&save, 4096, "signature cannot be verified: %s", *error);
efree(*error);
PHAR_ZIP_FAIL_FREE(save, save);
} else {
php_stream_close(sigfile);
PHAR_ZIP_FAIL("signature cannot be verified");
}
}
php_stream_close(sigfile);
efree(sig);
/* signature checked out, let's ensure this is the last file in the phar */
if (i != PHAR_GET_16(locator.count) - 1) {
PHAR_ZIP_FAIL("entries exist after signature, invalid phar");
}
continue;
}
phar_add_virtual_dirs(mydata, entry.filename, entry.filename_len TSRMLS_CC);
if (PHAR_GET_16(zipentry.extra_len)) {
off_t loc = php_stream_tell(fp);
if (FAILURE == phar_zip_process_extra(fp, &entry, PHAR_GET_16(zipentry.extra_len) TSRMLS_CC)) {
pefree(entry.filename, entry.is_persistent);
PHAR_ZIP_FAIL("Unable to process extra field header for file in central directory");
}
php_stream_seek(fp, loc + PHAR_GET_16(zipentry.extra_len), SEEK_SET);
}
switch (PHAR_GET_16(zipentry.compressed)) {
case PHAR_ZIP_COMP_NONE :
/* compression flag already set */
break;
case PHAR_ZIP_COMP_DEFLATE :
entry.flags |= PHAR_ENT_COMPRESSED_GZ;
if (!PHAR_G(has_zlib)) {
pefree(entry.filename, entry.is_persistent);
PHAR_ZIP_FAIL("zlib extension is required");
}
break;
case PHAR_ZIP_COMP_BZIP2 :
entry.flags |= PHAR_ENT_COMPRESSED_BZ2;
if (!PHAR_G(has_bz2)) {
pefree(entry.filename, entry.is_persistent);
PHAR_ZIP_FAIL("bzip2 extension is required");
}
break;
case 1 :
pefree(entry.filename, entry.is_persistent);
PHAR_ZIP_FAIL("unsupported compression method (Shrunk) used in this zip");
case 2 :
case 3 :
case 4 :
case 5 :
pefree(entry.filename, entry.is_persistent);
PHAR_ZIP_FAIL("unsupported compression method (Reduce) used in this zip");
case 6 :
pefree(entry.filename, entry.is_persistent);
PHAR_ZIP_FAIL("unsupported compression method (Implode) used in this zip");
case 7 :
pefree(entry.filename, entry.is_persistent);
PHAR_ZIP_FAIL("unsupported compression method (Tokenize) used in this zip");
case 9 :
pefree(entry.filename, entry.is_persistent);
PHAR_ZIP_FAIL("unsupported compression method (Deflate64) used in this zip");
case 10 :
pefree(entry.filename, entry.is_persistent);
PHAR_ZIP_FAIL("unsupported compression method (PKWare Implode/old IBM TERSE) used in this zip");
case 14 :
pefree(entry.filename, entry.is_persistent);
PHAR_ZIP_FAIL("unsupported compression method (LZMA) used in this zip");
case 18 :
pefree(entry.filename, entry.is_persistent);
PHAR_ZIP_FAIL("unsupported compression method (IBM TERSE) used in this zip");
case 19 :
pefree(entry.filename, entry.is_persistent);
PHAR_ZIP_FAIL("unsupported compression method (IBM LZ77) used in this zip");
case 97 :
pefree(entry.filename, entry.is_persistent);
PHAR_ZIP_FAIL("unsupported compression method (WavPack) used in this zip");
case 98 :
pefree(entry.filename, entry.is_persistent);
PHAR_ZIP_FAIL("unsupported compression method (PPMd) used in this zip");
default :
pefree(entry.filename, entry.is_persistent);
PHAR_ZIP_FAIL("unsupported compression method (unknown) used in this zip");
}
/* get file metadata */
if (PHAR_GET_16(zipentry.comment_len)) {
if (PHAR_GET_16(zipentry.comment_len) != php_stream_read(fp, buf, PHAR_GET_16(zipentry.comment_len))) {
pefree(entry.filename, entry.is_persistent);
PHAR_ZIP_FAIL("unable to read in file comment, truncated");
}
p = buf;
entry.metadata_len = PHAR_GET_16(zipentry.comment_len);
if (phar_parse_metadata(&p, &(entry.metadata), PHAR_GET_16(zipentry.comment_len) TSRMLS_CC) == FAILURE) {
entry.metadata_len = 0;
/* if not valid serialized data, it is a regular string */
if (entry.is_persistent) {
ALLOC_PERMANENT_ZVAL(entry.metadata);
} else {
ALLOC_ZVAL(entry.metadata);
}
INIT_ZVAL(*entry.metadata);
ZVAL_STRINGL(entry.metadata, pestrndup(buf, PHAR_GET_16(zipentry.comment_len), entry.is_persistent), PHAR_GET_16(zipentry.comment_len), 0);
}
} else {
entry.metadata = NULL;
}
if (!actual_alias && entry.filename_len == sizeof(".phar/alias.txt")-1 && !strncmp(entry.filename, ".phar/alias.txt", sizeof(".phar/alias.txt")-1)) {
php_stream_filter *filter;
off_t saveloc;
/* verify local file header */
phar_zip_file_header local;
/* archive alias found */
saveloc = php_stream_tell(fp);
php_stream_seek(fp, PHAR_GET_32(zipentry.offset), SEEK_SET);
if (sizeof(local) != php_stream_read(fp, (char *) &local, sizeof(local))) {
pefree(entry.filename, entry.is_persistent);
PHAR_ZIP_FAIL("phar error: internal corruption of zip-based phar (cannot read local file header for alias)");
}
/* verify local header */
if (entry.filename_len != PHAR_GET_16(local.filename_len) || entry.crc32 != PHAR_GET_32(local.crc32) || entry.uncompressed_filesize != PHAR_GET_32(local.uncompsize) || entry.compressed_filesize != PHAR_GET_32(local.compsize)) {
pefree(entry.filename, entry.is_persistent);
PHAR_ZIP_FAIL("phar error: internal corruption of zip-based phar (local header of alias does not match central directory)");
}
/* construct actual offset to file start - local extra_len can be different from central extra_len */
entry.offset = entry.offset_abs =
sizeof(local) + entry.header_offset + PHAR_GET_16(local.filename_len) + PHAR_GET_16(local.extra_len);
php_stream_seek(fp, entry.offset, SEEK_SET);
/* these next lines should be for php < 5.2.6 after 5.3 filters are fixed */
fp->writepos = 0;
fp->readpos = 0;
php_stream_seek(fp, entry.offset, SEEK_SET);
fp->writepos = 0;
fp->readpos = 0;
/* the above lines should be for php < 5.2.6 after 5.3 filters are fixed */
mydata->alias_len = entry.uncompressed_filesize;
if (entry.flags & PHAR_ENT_COMPRESSED_GZ) {
filter = php_stream_filter_create("zlib.inflate", NULL, php_stream_is_persistent(fp) TSRMLS_CC);
if (!filter) {
pefree(entry.filename, entry.is_persistent);
PHAR_ZIP_FAIL("unable to decompress alias, zlib filter creation failed");
}
php_stream_filter_append(&fp->readfilters, filter);
if (!(entry.uncompressed_filesize = php_stream_copy_to_mem(fp, &actual_alias, entry.uncompressed_filesize, 0)) || !actual_alias) {
pefree(entry.filename, entry.is_persistent);
PHAR_ZIP_FAIL("unable to read in alias, truncated");
}
php_stream_filter_flush(filter, 1);
php_stream_filter_remove(filter, 1 TSRMLS_CC);
} else if (entry.flags & PHAR_ENT_COMPRESSED_BZ2) {
filter = php_stream_filter_create("bzip2.decompress", NULL, php_stream_is_persistent(fp) TSRMLS_CC);
if (!filter) {
pefree(entry.filename, entry.is_persistent);
PHAR_ZIP_FAIL("unable to read in alias, bzip2 filter creation failed");
}
php_stream_filter_append(&fp->readfilters, filter);
if (!(entry.uncompressed_filesize = php_stream_copy_to_mem(fp, &actual_alias, entry.uncompressed_filesize, 0)) || !actual_alias) {
pefree(entry.filename, entry.is_persistent);
PHAR_ZIP_FAIL("unable to read in alias, truncated");
}
php_stream_filter_flush(filter, 1);
php_stream_filter_remove(filter, 1 TSRMLS_CC);
} else {
if (!(entry.uncompressed_filesize = php_stream_copy_to_mem(fp, &actual_alias, entry.uncompressed_filesize, 0)) || !actual_alias) {
pefree(entry.filename, entry.is_persistent);
PHAR_ZIP_FAIL("unable to read in alias, truncated");
}
}
/* return to central directory parsing */
php_stream_seek(fp, saveloc, SEEK_SET);
}
phar_set_inode(&entry TSRMLS_CC);
zend_hash_add(&mydata->manifest, entry.filename, entry.filename_len, (void *)&entry,sizeof(phar_entry_info), NULL);
}
mydata->fp = fp;
if (zend_hash_exists(&(mydata->manifest), ".phar/stub.php", sizeof(".phar/stub.php")-1)) {
mydata->is_data = 0;
} else {
mydata->is_data = 1;
}
zend_hash_add(&(PHAR_GLOBALS->phar_fname_map), mydata->fname, fname_len, (void*)&mydata, sizeof(phar_archive_data*), NULL);
if (actual_alias) {
phar_archive_data **fd_ptr;
if (!phar_validate_alias(actual_alias, mydata->alias_len)) {
if (error) {
spprintf(error, 4096, "phar error: invalid alias \"%s\" in zip-based phar \"%s\"", actual_alias, fname);
}
efree(actual_alias);
zend_hash_del(&(PHAR_GLOBALS->phar_fname_map), mydata->fname, fname_len);
return FAILURE;
}
mydata->is_temporary_alias = 0;
if (SUCCESS == zend_hash_find(&(PHAR_GLOBALS->phar_alias_map), actual_alias, mydata->alias_len, (void **)&fd_ptr)) {
if (SUCCESS != phar_free_alias(*fd_ptr, actual_alias, mydata->alias_len TSRMLS_CC)) {
if (error) {
spprintf(error, 4096, "phar error: Unable to add zip-based phar \"%s\" with implicit alias, alias is already in use", fname);
}
efree(actual_alias);
zend_hash_del(&(PHAR_GLOBALS->phar_fname_map), mydata->fname, fname_len);
return FAILURE;
}
}
mydata->alias = entry.is_persistent ? pestrndup(actual_alias, mydata->alias_len, 1) : actual_alias;
if (entry.is_persistent) {
efree(actual_alias);
}
zend_hash_add(&(PHAR_GLOBALS->phar_alias_map), actual_alias, mydata->alias_len, (void*)&mydata, sizeof(phar_archive_data*), NULL);
} else {
phar_archive_data **fd_ptr;
if (alias_len) {
if (SUCCESS == zend_hash_find(&(PHAR_GLOBALS->phar_alias_map), alias, alias_len, (void **)&fd_ptr)) {
if (SUCCESS != phar_free_alias(*fd_ptr, alias, alias_len TSRMLS_CC)) {
if (error) {
spprintf(error, 4096, "phar error: Unable to add zip-based phar \"%s\" with explicit alias, alias is already in use", fname);
}
zend_hash_del(&(PHAR_GLOBALS->phar_fname_map), mydata->fname, fname_len);
return FAILURE;
}
}
zend_hash_add(&(PHAR_GLOBALS->phar_alias_map), actual_alias, mydata->alias_len, (void*)&mydata, sizeof(phar_archive_data*), NULL);
mydata->alias = pestrndup(alias, alias_len, mydata->is_persistent);
mydata->alias_len = alias_len;
} else {
mydata->alias = pestrndup(mydata->fname, fname_len, mydata->is_persistent);
mydata->alias_len = fname_len;
}
mydata->is_temporary_alias = 1;
}
if (pphar) {
*pphar = mydata;
}
return SUCCESS;
}
/* }}} */
| 1
|
187,321
|
void PictureLayer::ClearClient() {
client_ = nullptr;
UpdateDrawsContent(HasDrawableContent());
}
| 0
|
164,682
|
bool PictureLayer::SupportsLCDText() const {
return true;
}
| 0
|
519,375
|
void set_stored_in_db_flag(bool stored)
{
stored_in_db= stored;
}
| 0
|
322,834
|
static inline void RENAME(BEToUV)(uint8_t *dstU, uint8_t *dstV, const uint8_t *src1, const uint8_t *src2, long width, uint32_t *unused)
{
#if COMPILE_TEMPLATE_MMX
__asm__ volatile(
"movq "MANGLE(bm01010101)", %%mm4 \n\t"
"mov %0, %%"REG_a" \n\t"
"1: \n\t"
"movq (%1, %%"REG_a",2), %%mm0 \n\t"
"movq 8(%1, %%"REG_a",2), %%mm1 \n\t"
"movq (%2, %%"REG_a",2), %%mm2 \n\t"
"movq 8(%2, %%"REG_a",2), %%mm3 \n\t"
"pand %%mm4, %%mm0 \n\t"
"pand %%mm4, %%mm1 \n\t"
"pand %%mm4, %%mm2 \n\t"
"pand %%mm4, %%mm3 \n\t"
"packuswb %%mm1, %%mm0 \n\t"
"packuswb %%mm3, %%mm2 \n\t"
"movq %%mm0, (%3, %%"REG_a") \n\t"
"movq %%mm2, (%4, %%"REG_a") \n\t"
"add $8, %%"REG_a" \n\t"
" js 1b \n\t"
: : "g" ((x86_reg)-width), "r" (src1+width*2), "r" (src2+width*2), "r" (dstU+width), "r" (dstV+width)
: "%"REG_a
);
#else
int i;
for (i=0; i<width; i++) {
dstU[i]= src1[2*i];
dstV[i]= src2[2*i];
}
#endif
}
| 0
|
460,821
|
static void reds_late_initialization(RedsState *reds)
{
// do only once
if (reds->late_initialization_done) {
return;
}
// create stream channels for streaming devices
for (auto dev: reds->char_devices) {
auto stream_dev = dynamic_cast<StreamDevice*>(dev.get());
if (stream_dev) {
stream_dev->create_channel();
}
}
reds->late_initialization_done = true;
}
| 0
|
367
|
int ff_h264_decode_mb_cabac ( H264Context * h ) {
int mb_xy ;
int mb_type , partition_count , cbp = 0 ;
int dct8x8_allowed = h -> pps . transform_8x8_mode ;
int decode_chroma = h -> sps . chroma_format_idc == 1 || h -> sps . chroma_format_idc == 2 ;
const int pixel_shift = h -> pixel_shift ;
mb_xy = h -> mb_xy = h -> mb_x + h -> mb_y * h -> mb_stride ;
tprintf ( h -> avctx , "pic:%d mb:%d/%d\n" , h -> frame_num , h -> mb_x , h -> mb_y ) ;
if ( h -> slice_type_nos != AV_PICTURE_TYPE_I ) {
int skip ;
if ( FRAME_MBAFF && ( h -> mb_y & 1 ) == 1 && h -> prev_mb_skipped ) skip = h -> next_mb_skipped ;
else skip = decode_cabac_mb_skip ( h , h -> mb_x , h -> mb_y ) ;
if ( skip ) {
if ( FRAME_MBAFF && ( h -> mb_y & 1 ) == 0 ) {
h -> cur_pic . f . mb_type [ mb_xy ] = MB_TYPE_SKIP ;
h -> next_mb_skipped = decode_cabac_mb_skip ( h , h -> mb_x , h -> mb_y + 1 ) ;
if ( ! h -> next_mb_skipped ) h -> mb_mbaff = h -> mb_field_decoding_flag = decode_cabac_field_decoding_flag ( h ) ;
}
decode_mb_skip ( h ) ;
h -> cbp_table [ mb_xy ] = 0 ;
h -> chroma_pred_mode_table [ mb_xy ] = 0 ;
h -> last_qscale_diff = 0 ;
return 0 ;
}
}
if ( FRAME_MBAFF ) {
if ( ( h -> mb_y & 1 ) == 0 ) h -> mb_mbaff = h -> mb_field_decoding_flag = decode_cabac_field_decoding_flag ( h ) ;
}
h -> prev_mb_skipped = 0 ;
fill_decode_neighbors ( h , - ( MB_FIELD ) ) ;
if ( h -> slice_type_nos == AV_PICTURE_TYPE_B ) {
int ctx = 0 ;
assert ( h -> slice_type_nos == AV_PICTURE_TYPE_B ) ;
if ( ! IS_DIRECT ( h -> left_type [ LTOP ] - 1 ) ) ctx ++ ;
if ( ! IS_DIRECT ( h -> top_type - 1 ) ) ctx ++ ;
if ( ! get_cabac_noinline ( & h -> cabac , & h -> cabac_state [ 27 + ctx ] ) ) {
mb_type = 0 ;
}
else if ( ! get_cabac_noinline ( & h -> cabac , & h -> cabac_state [ 27 + 3 ] ) ) {
mb_type = 1 + get_cabac_noinline ( & h -> cabac , & h -> cabac_state [ 27 + 5 ] ) ;
}
else {
int bits ;
bits = get_cabac_noinline ( & h -> cabac , & h -> cabac_state [ 27 + 4 ] ) << 3 ;
bits += get_cabac_noinline ( & h -> cabac , & h -> cabac_state [ 27 + 5 ] ) << 2 ;
bits += get_cabac_noinline ( & h -> cabac , & h -> cabac_state [ 27 + 5 ] ) << 1 ;
bits += get_cabac_noinline ( & h -> cabac , & h -> cabac_state [ 27 + 5 ] ) ;
if ( bits < 8 ) {
mb_type = bits + 3 ;
}
else if ( bits == 13 ) {
mb_type = decode_cabac_intra_mb_type ( h , 32 , 0 ) ;
goto decode_intra_mb ;
}
else if ( bits == 14 ) {
mb_type = 11 ;
}
else if ( bits == 15 ) {
mb_type = 22 ;
}
else {
bits = ( bits << 1 ) + get_cabac_noinline ( & h -> cabac , & h -> cabac_state [ 27 + 5 ] ) ;
mb_type = bits - 4 ;
}
}
partition_count = b_mb_type_info [ mb_type ] . partition_count ;
mb_type = b_mb_type_info [ mb_type ] . type ;
}
else if ( h -> slice_type_nos == AV_PICTURE_TYPE_P ) {
if ( get_cabac_noinline ( & h -> cabac , & h -> cabac_state [ 14 ] ) == 0 ) {
if ( get_cabac_noinline ( & h -> cabac , & h -> cabac_state [ 15 ] ) == 0 ) {
mb_type = 3 * get_cabac_noinline ( & h -> cabac , & h -> cabac_state [ 16 ] ) ;
}
else {
mb_type = 2 - get_cabac_noinline ( & h -> cabac , & h -> cabac_state [ 17 ] ) ;
}
partition_count = p_mb_type_info [ mb_type ] . partition_count ;
mb_type = p_mb_type_info [ mb_type ] . type ;
}
else {
mb_type = decode_cabac_intra_mb_type ( h , 17 , 0 ) ;
goto decode_intra_mb ;
}
}
else {
mb_type = decode_cabac_intra_mb_type ( h , 3 , 1 ) ;
if ( h -> slice_type == AV_PICTURE_TYPE_SI && mb_type ) mb_type -- ;
assert ( h -> slice_type_nos == AV_PICTURE_TYPE_I ) ;
decode_intra_mb : partition_count = 0 ;
cbp = i_mb_type_info [ mb_type ] . cbp ;
h -> intra16x16_pred_mode = i_mb_type_info [ mb_type ] . pred_mode ;
mb_type = i_mb_type_info [ mb_type ] . type ;
}
if ( MB_FIELD ) mb_type |= MB_TYPE_INTERLACED ;
h -> slice_table [ mb_xy ] = h -> slice_num ;
if ( IS_INTRA_PCM ( mb_type ) ) {
const int mb_size = ff_h264_mb_sizes [ h -> sps . chroma_format_idc ] * h -> sps . bit_depth_luma >> 3 ;
const uint8_t * ptr ;
ptr = h -> cabac . bytestream ;
if ( h -> cabac . low & 0x1 ) ptr -- ;
if ( CABAC_BITS == 16 ) {
if ( h -> cabac . low & 0x1FF ) ptr -- ;
}
if ( ( int ) ( h -> cabac . bytestream_end - ptr ) < mb_size ) return - 1 ;
h -> intra_pcm_ptr = ptr ;
ptr += mb_size ;
ff_init_cabac_decoder ( & h -> cabac , ptr , h -> cabac . bytestream_end - ptr ) ;
h -> cbp_table [ mb_xy ] = 0xf7ef ;
h -> chroma_pred_mode_table [ mb_xy ] = 0 ;
h -> cur_pic . f . qscale_table [ mb_xy ] = 0 ;
memset ( h -> non_zero_count [ mb_xy ] , 16 , 48 ) ;
h -> cur_pic . f . mb_type [ mb_xy ] = mb_type ;
h -> last_qscale_diff = 0 ;
return 0 ;
}
fill_decode_caches ( h , mb_type ) ;
if ( IS_INTRA ( mb_type ) ) {
int i , pred_mode ;
if ( IS_INTRA4x4 ( mb_type ) ) {
if ( dct8x8_allowed && get_cabac_noinline ( & h -> cabac , & h -> cabac_state [ 399 + h -> neighbor_transform_size ] ) ) {
mb_type |= MB_TYPE_8x8DCT ;
for ( i = 0 ;
i < 16 ;
i += 4 ) {
int pred = pred_intra_mode ( h , i ) ;
int mode = decode_cabac_mb_intra4x4_pred_mode ( h , pred ) ;
fill_rectangle ( & h -> intra4x4_pred_mode_cache [ scan8 [ i ] ] , 2 , 2 , 8 , mode , 1 ) ;
}
}
else {
for ( i = 0 ;
i < 16 ;
i ++ ) {
int pred = pred_intra_mode ( h , i ) ;
h -> intra4x4_pred_mode_cache [ scan8 [ i ] ] = decode_cabac_mb_intra4x4_pred_mode ( h , pred ) ;
av_dlog ( h -> avctx , "i4x4 pred=%d mode=%d\n" , pred , h -> intra4x4_pred_mode_cache [ scan8 [ i ] ] ) ;
}
}
write_back_intra_pred_mode ( h ) ;
if ( ff_h264_check_intra4x4_pred_mode ( h ) < 0 ) return - 1 ;
}
else {
h -> intra16x16_pred_mode = ff_h264_check_intra_pred_mode ( h , h -> intra16x16_pred_mode , 0 ) ;
if ( h -> intra16x16_pred_mode < 0 ) return - 1 ;
}
if ( decode_chroma ) {
h -> chroma_pred_mode_table [ mb_xy ] = pred_mode = decode_cabac_mb_chroma_pre_mode ( h ) ;
pred_mode = ff_h264_check_intra_pred_mode ( h , pred_mode , 1 ) ;
if ( pred_mode < 0 ) return - 1 ;
h -> chroma_pred_mode = pred_mode ;
}
else {
h -> chroma_pred_mode = DC_128_PRED8x8 ;
}
}
else if ( partition_count == 4 ) {
int i , j , sub_partition_count [ 4 ] , list , ref [ 2 ] [ 4 ] ;
if ( h -> slice_type_nos == AV_PICTURE_TYPE_B ) {
for ( i = 0 ;
i < 4 ;
i ++ ) {
h -> sub_mb_type [ i ] = decode_cabac_b_mb_sub_type ( h ) ;
sub_partition_count [ i ] = b_sub_mb_type_info [ h -> sub_mb_type [ i ] ] . partition_count ;
h -> sub_mb_type [ i ] = b_sub_mb_type_info [ h -> sub_mb_type [ i ] ] . type ;
}
if ( IS_DIRECT ( h -> sub_mb_type [ 0 ] | h -> sub_mb_type [ 1 ] | h -> sub_mb_type [ 2 ] | h -> sub_mb_type [ 3 ] ) ) {
ff_h264_pred_direct_motion ( h , & mb_type ) ;
h -> ref_cache [ 0 ] [ scan8 [ 4 ] ] = h -> ref_cache [ 1 ] [ scan8 [ 4 ] ] = h -> ref_cache [ 0 ] [ scan8 [ 12 ] ] = h -> ref_cache [ 1 ] [ scan8 [ 12 ] ] = PART_NOT_AVAILABLE ;
for ( i = 0 ;
i < 4 ;
i ++ ) fill_rectangle ( & h -> direct_cache [ scan8 [ 4 * i ] ] , 2 , 2 , 8 , ( h -> sub_mb_type [ i ] >> 1 ) & 0xFF , 1 ) ;
}
}
else {
for ( i = 0 ;
i < 4 ;
i ++ ) {
h -> sub_mb_type [ i ] = decode_cabac_p_mb_sub_type ( h ) ;
sub_partition_count [ i ] = p_sub_mb_type_info [ h -> sub_mb_type [ i ] ] . partition_count ;
h -> sub_mb_type [ i ] = p_sub_mb_type_info [ h -> sub_mb_type [ i ] ] . type ;
}
}
for ( list = 0 ;
list < h -> list_count ;
list ++ ) {
for ( i = 0 ;
i < 4 ;
i ++ ) {
if ( IS_DIRECT ( h -> sub_mb_type [ i ] ) ) continue ;
if ( IS_DIR ( h -> sub_mb_type [ i ] , 0 , list ) ) {
int rc = h -> ref_count [ list ] << MB_MBAFF ;
if ( rc > 1 ) {
ref [ list ] [ i ] = decode_cabac_mb_ref ( h , list , 4 * i ) ;
if ( ref [ list ] [ i ] >= ( unsigned ) rc ) {
av_log ( h -> avctx , AV_LOG_ERROR , "Reference %d >= %d\n" , ref [ list ] [ i ] , rc ) ;
return - 1 ;
}
}
else ref [ list ] [ i ] = 0 ;
}
else {
ref [ list ] [ i ] = - 1 ;
}
h -> ref_cache [ list ] [ scan8 [ 4 * i ] + 1 ] = h -> ref_cache [ list ] [ scan8 [ 4 * i ] + 8 ] = h -> ref_cache [ list ] [ scan8 [ 4 * i ] + 9 ] = ref [ list ] [ i ] ;
}
}
if ( dct8x8_allowed ) dct8x8_allowed = get_dct8x8_allowed ( h ) ;
for ( list = 0 ;
list < h -> list_count ;
list ++ ) {
for ( i = 0 ;
i < 4 ;
i ++ ) {
h -> ref_cache [ list ] [ scan8 [ 4 * i ] ] = h -> ref_cache [ list ] [ scan8 [ 4 * i ] + 1 ] ;
if ( IS_DIRECT ( h -> sub_mb_type [ i ] ) ) {
fill_rectangle ( h -> mvd_cache [ list ] [ scan8 [ 4 * i ] ] , 2 , 2 , 8 , 0 , 2 ) ;
continue ;
}
if ( IS_DIR ( h -> sub_mb_type [ i ] , 0 , list ) && ! IS_DIRECT ( h -> sub_mb_type [ i ] ) ) {
const int sub_mb_type = h -> sub_mb_type [ i ] ;
const int block_width = ( sub_mb_type & ( MB_TYPE_16x16 | MB_TYPE_16x8 ) ) ? 2 : 1 ;
for ( j = 0 ;
j < sub_partition_count [ i ] ;
j ++ ) {
int mpx , mpy ;
int mx , my ;
const int index = 4 * i + block_width * j ;
int16_t ( * mv_cache ) [ 2 ] = & h -> mv_cache [ list ] [ scan8 [ index ] ] ;
uint8_t ( * mvd_cache ) [ 2 ] = & h -> mvd_cache [ list ] [ scan8 [ index ] ] ;
pred_motion ( h , index , block_width , list , h -> ref_cache [ list ] [ scan8 [ index ] ] , & mx , & my ) ;
DECODE_CABAC_MB_MVD ( h , list , index ) tprintf ( h -> avctx , "final mv:%d %d\n" , mx , my ) ;
if ( IS_SUB_8X8 ( sub_mb_type ) ) {
mv_cache [ 1 ] [ 0 ] = mv_cache [ 8 ] [ 0 ] = mv_cache [ 9 ] [ 0 ] = mx ;
mv_cache [ 1 ] [ 1 ] = mv_cache [ 8 ] [ 1 ] = mv_cache [ 9 ] [ 1 ] = my ;
mvd_cache [ 1 ] [ 0 ] = mvd_cache [ 8 ] [ 0 ] = mvd_cache [ 9 ] [ 0 ] = mpx ;
mvd_cache [ 1 ] [ 1 ] = mvd_cache [ 8 ] [ 1 ] = mvd_cache [ 9 ] [ 1 ] = mpy ;
}
else if ( IS_SUB_8X4 ( sub_mb_type ) ) {
mv_cache [ 1 ] [ 0 ] = mx ;
mv_cache [ 1 ] [ 1 ] = my ;
mvd_cache [ 1 ] [ 0 ] = mpx ;
mvd_cache [ 1 ] [ 1 ] = mpy ;
}
else if ( IS_SUB_4X8 ( sub_mb_type ) ) {
mv_cache [ 8 ] [ 0 ] = mx ;
mv_cache [ 8 ] [ 1 ] = my ;
mvd_cache [ 8 ] [ 0 ] = mpx ;
mvd_cache [ 8 ] [ 1 ] = mpy ;
}
mv_cache [ 0 ] [ 0 ] = mx ;
mv_cache [ 0 ] [ 1 ] = my ;
mvd_cache [ 0 ] [ 0 ] = mpx ;
mvd_cache [ 0 ] [ 1 ] = mpy ;
}
}
else {
fill_rectangle ( h -> mv_cache [ list ] [ scan8 [ 4 * i ] ] , 2 , 2 , 8 , 0 , 4 ) ;
fill_rectangle ( h -> mvd_cache [ list ] [ scan8 [ 4 * i ] ] , 2 , 2 , 8 , 0 , 2 ) ;
}
}
}
}
else if ( IS_DIRECT ( mb_type ) ) {
ff_h264_pred_direct_motion ( h , & mb_type ) ;
fill_rectangle ( h -> mvd_cache [ 0 ] [ scan8 [ 0 ] ] , 4 , 4 , 8 , 0 , 2 ) ;
fill_rectangle ( h -> mvd_cache [ 1 ] [ scan8 [ 0 ] ] , 4 , 4 , 8 , 0 , 2 ) ;
dct8x8_allowed &= h -> sps . direct_8x8_inference_flag ;
}
else {
int list , i ;
if ( IS_16X16 ( mb_type ) ) {
for ( list = 0 ;
list < h -> list_count ;
list ++ ) {
if ( IS_DIR ( mb_type , 0 , list ) ) {
int ref , rc = h -> ref_count [ list ] << MB_MBAFF ;
if ( rc > 1 ) {
ref = decode_cabac_mb_ref ( h , list , 0 ) ;
if ( ref >= ( unsigned ) rc ) {
av_log ( h -> avctx , AV_LOG_ERROR , "Reference %d >= %d\n" , ref , rc ) ;
return - 1 ;
}
}
else ref = 0 ;
fill_rectangle ( & h -> ref_cache [ list ] [ scan8 [ 0 ] ] , 4 , 4 , 8 , ref , 1 ) ;
}
}
for ( list = 0 ;
list < h -> list_count ;
list ++ ) {
if ( IS_DIR ( mb_type , 0 , list ) ) {
int mx , my , mpx , mpy ;
pred_motion ( h , 0 , 4 , list , h -> ref_cache [ list ] [ scan8 [ 0 ] ] , & mx , & my ) ;
DECODE_CABAC_MB_MVD ( h , list , 0 ) tprintf ( h -> avctx , "final mv:%d %d\n" , mx , my ) ;
fill_rectangle ( h -> mvd_cache [ list ] [ scan8 [ 0 ] ] , 4 , 4 , 8 , pack8to16 ( mpx , mpy ) , 2 ) ;
fill_rectangle ( h -> mv_cache [ list ] [ scan8 [ 0 ] ] , 4 , 4 , 8 , pack16to32 ( mx , my ) , 4 ) ;
}
}
}
else if ( IS_16X8 ( mb_type ) ) {
for ( list = 0 ;
list < h -> list_count ;
list ++ ) {
for ( i = 0 ;
i < 2 ;
i ++ ) {
if ( IS_DIR ( mb_type , i , list ) ) {
int ref , rc = h -> ref_count [ list ] << MB_MBAFF ;
if ( rc > 1 ) {
ref = decode_cabac_mb_ref ( h , list , 8 * i ) ;
if ( ref >= ( unsigned ) rc ) {
av_log ( h -> avctx , AV_LOG_ERROR , "Reference %d >= %d\n" , ref , rc ) ;
return - 1 ;
}
}
else ref = 0 ;
fill_rectangle ( & h -> ref_cache [ list ] [ scan8 [ 0 ] + 16 * i ] , 4 , 2 , 8 , ref , 1 ) ;
}
else fill_rectangle ( & h -> ref_cache [ list ] [ scan8 [ 0 ] + 16 * i ] , 4 , 2 , 8 , ( LIST_NOT_USED & 0xFF ) , 1 ) ;
}
}
for ( list = 0 ;
list < h -> list_count ;
list ++ ) {
for ( i = 0 ;
i < 2 ;
i ++ ) {
if ( IS_DIR ( mb_type , i , list ) ) {
int mx , my , mpx , mpy ;
pred_16x8_motion ( h , 8 * i , list , h -> ref_cache [ list ] [ scan8 [ 0 ] + 16 * i ] , & mx , & my ) ;
DECODE_CABAC_MB_MVD ( h , list , 8 * i ) tprintf ( h -> avctx , "final mv:%d %d\n" , mx , my ) ;
fill_rectangle ( h -> mvd_cache [ list ] [ scan8 [ 0 ] + 16 * i ] , 4 , 2 , 8 , pack8to16 ( mpx , mpy ) , 2 ) ;
fill_rectangle ( h -> mv_cache [ list ] [ scan8 [ 0 ] + 16 * i ] , 4 , 2 , 8 , pack16to32 ( mx , my ) , 4 ) ;
}
else {
fill_rectangle ( h -> mvd_cache [ list ] [ scan8 [ 0 ] + 16 * i ] , 4 , 2 , 8 , 0 , 2 ) ;
fill_rectangle ( h -> mv_cache [ list ] [ scan8 [ 0 ] + 16 * i ] , 4 , 2 , 8 , 0 , 4 ) ;
}
}
}
}
else {
assert ( IS_8X16 ( mb_type ) ) ;
for ( list = 0 ;
list < h -> list_count ;
list ++ ) {
for ( i = 0 ;
i < 2 ;
i ++ ) {
if ( IS_DIR ( mb_type , i , list ) ) {
int ref , rc = h -> ref_count [ list ] << MB_MBAFF ;
if ( rc > 1 ) {
ref = decode_cabac_mb_ref ( h , list , 4 * i ) ;
if ( ref >= ( unsigned ) rc ) {
av_log ( h -> avctx , AV_LOG_ERROR , "Reference %d >= %d\n" , ref , rc ) ;
return - 1 ;
}
}
else ref = 0 ;
fill_rectangle ( & h -> ref_cache [ list ] [ scan8 [ 0 ] + 2 * i ] , 2 , 4 , 8 , ref , 1 ) ;
}
else fill_rectangle ( & h -> ref_cache [ list ] [ scan8 [ 0 ] + 2 * i ] , 2 , 4 , 8 , ( LIST_NOT_USED & 0xFF ) , 1 ) ;
}
}
for ( list = 0 ;
list < h -> list_count ;
list ++ ) {
for ( i = 0 ;
i < 2 ;
i ++ ) {
if ( IS_DIR ( mb_type , i , list ) ) {
int mx , my , mpx , mpy ;
pred_8x16_motion ( h , i * 4 , list , h -> ref_cache [ list ] [ scan8 [ 0 ] + 2 * i ] , & mx , & my ) ;
DECODE_CABAC_MB_MVD ( h , list , 4 * i ) tprintf ( h -> avctx , "final mv:%d %d\n" , mx , my ) ;
fill_rectangle ( h -> mvd_cache [ list ] [ scan8 [ 0 ] + 2 * i ] , 2 , 4 , 8 , pack8to16 ( mpx , mpy ) , 2 ) ;
fill_rectangle ( h -> mv_cache [ list ] [ scan8 [ 0 ] + 2 * i ] , 2 , 4 , 8 , pack16to32 ( mx , my ) , 4 ) ;
}
else {
fill_rectangle ( h -> mvd_cache [ list ] [ scan8 [ 0 ] + 2 * i ] , 2 , 4 , 8 , 0 , 2 ) ;
fill_rectangle ( h -> mv_cache [ list ] [ scan8 [ 0 ] + 2 * i ] , 2 , 4 , 8 , 0 , 4 ) ;
}
}
}
}
}
if ( IS_INTER ( mb_type ) ) {
h -> chroma_pred_mode_table [ mb_xy ] = 0 ;
write_back_motion ( h , mb_type ) ;
}
if ( ! IS_INTRA16x16 ( mb_type ) ) {
cbp = decode_cabac_mb_cbp_luma ( h ) ;
if ( decode_chroma ) cbp |= decode_cabac_mb_cbp_chroma ( h ) << 4 ;
}
h -> cbp_table [ mb_xy ] = h -> cbp = cbp ;
if ( dct8x8_allowed && ( cbp & 15 ) && ! IS_INTRA ( mb_type ) ) {
mb_type |= MB_TYPE_8x8DCT * get_cabac_noinline ( & h -> cabac , & h -> cabac_state [ 399 + h -> neighbor_transform_size ] ) ;
}
if ( CHROMA444 && IS_8x8DCT ( mb_type ) ) {
int i ;
uint8_t * nnz_cache = h -> non_zero_count_cache ;
for ( i = 0 ;
i < 2 ;
i ++ ) {
if ( h -> left_type [ LEFT ( i ) ] && ! IS_8x8DCT ( h -> left_type [ LEFT ( i ) ] ) ) {
nnz_cache [ 3 + 8 * 1 + 2 * 8 * i ] = nnz_cache [ 3 + 8 * 2 + 2 * 8 * i ] = nnz_cache [ 3 + 8 * 6 + 2 * 8 * i ] = nnz_cache [ 3 + 8 * 7 + 2 * 8 * i ] = nnz_cache [ 3 + 8 * 11 + 2 * 8 * i ] = nnz_cache [ 3 + 8 * 12 + 2 * 8 * i ] = IS_INTRA ( mb_type ) ? 64 : 0 ;
}
}
if ( h -> top_type && ! IS_8x8DCT ( h -> top_type ) ) {
uint32_t top_empty = CABAC && ! IS_INTRA ( mb_type ) ? 0 : 0x40404040 ;
AV_WN32A ( & nnz_cache [ 4 + 8 * 0 ] , top_empty ) ;
AV_WN32A ( & nnz_cache [ 4 + 8 * 5 ] , top_empty ) ;
AV_WN32A ( & nnz_cache [ 4 + 8 * 10 ] , top_empty ) ;
}
}
h -> cur_pic . f . mb_type [ mb_xy ] = mb_type ;
if ( cbp || IS_INTRA16x16 ( mb_type ) ) {
const uint8_t * scan , * scan8x8 ;
const uint32_t * qmul ;
if ( IS_INTERLACED ( mb_type ) ) {
scan8x8 = h -> qscale ? h -> field_scan8x8 : h -> field_scan8x8_q0 ;
scan = h -> qscale ? h -> field_scan : h -> field_scan_q0 ;
}
else {
scan8x8 = h -> qscale ? h -> zigzag_scan8x8 : h -> zigzag_scan8x8_q0 ;
scan = h -> qscale ? h -> zigzag_scan : h -> zigzag_scan_q0 ;
}
if ( get_cabac_noinline ( & h -> cabac , & h -> cabac_state [ 60 + ( h -> last_qscale_diff != 0 ) ] ) ) {
int val = 1 ;
int ctx = 2 ;
const int max_qp = 51 + 6 * ( h -> sps . bit_depth_luma - 8 ) ;
while ( get_cabac_noinline ( & h -> cabac , & h -> cabac_state [ 60 + ctx ] ) ) {
ctx = 3 ;
val ++ ;
if ( val > 2 * max_qp ) {
av_log ( h -> avctx , AV_LOG_ERROR , "cabac decode of qscale diff failed at %d %d\n" , h -> mb_x , h -> mb_y ) ;
return - 1 ;
}
}
if ( val & 0x01 ) val = ( val + 1 ) >> 1 ;
else val = - ( ( val + 1 ) >> 1 ) ;
h -> last_qscale_diff = val ;
h -> qscale += val ;
if ( ( ( unsigned ) h -> qscale ) > max_qp ) {
if ( h -> qscale < 0 ) h -> qscale += max_qp + 1 ;
else h -> qscale -= max_qp + 1 ;
}
h -> chroma_qp [ 0 ] = get_chroma_qp ( h , 0 , h -> qscale ) ;
h -> chroma_qp [ 1 ] = get_chroma_qp ( h , 1 , h -> qscale ) ;
}
else h -> last_qscale_diff = 0 ;
decode_cabac_luma_residual ( h , scan , scan8x8 , pixel_shift , mb_type , cbp , 0 ) ;
if ( CHROMA444 ) {
decode_cabac_luma_residual ( h , scan , scan8x8 , pixel_shift , mb_type , cbp , 1 ) ;
decode_cabac_luma_residual ( h , scan , scan8x8 , pixel_shift , mb_type , cbp , 2 ) ;
}
else if ( CHROMA422 ) {
if ( cbp & 0x30 ) {
int c ;
for ( c = 0 ;
c < 2 ;
c ++ ) decode_cabac_residual_dc_422 ( h , h -> mb + ( ( 256 + 16 * 16 * c ) << pixel_shift ) , 3 , CHROMA_DC_BLOCK_INDEX + c , chroma422_dc_scan , 8 ) ;
}
if ( cbp & 0x20 ) {
int c , i , i8x8 ;
for ( c = 0 ;
c < 2 ;
c ++ ) {
int16_t * mb = h -> mb + ( 16 * ( 16 + 16 * c ) << pixel_shift ) ;
qmul = h -> dequant4_coeff [ c + 1 + ( IS_INTRA ( mb_type ) ? 0 : 3 ) ] [ h -> chroma_qp [ c ] ] ;
for ( i8x8 = 0 ;
i8x8 < 2 ;
i8x8 ++ ) {
for ( i = 0 ;
i < 4 ;
i ++ ) {
const int index = 16 + 16 * c + 8 * i8x8 + i ;
decode_cabac_residual_nondc ( h , mb , 4 , index , scan + 1 , qmul , 15 ) ;
mb += 16 << pixel_shift ;
}
}
}
}
else {
fill_rectangle ( & h -> non_zero_count_cache [ scan8 [ 16 ] ] , 4 , 4 , 8 , 0 , 1 ) ;
fill_rectangle ( & h -> non_zero_count_cache [ scan8 [ 32 ] ] , 4 , 4 , 8 , 0 , 1 ) ;
}
}
else {
if ( cbp & 0x30 ) {
int c ;
for ( c = 0 ;
c < 2 ;
c ++ ) decode_cabac_residual_dc ( h , h -> mb + ( ( 256 + 16 * 16 * c ) << pixel_shift ) , 3 , CHROMA_DC_BLOCK_INDEX + c , chroma_dc_scan , 4 ) ;
}
if ( cbp & 0x20 ) {
int c , i ;
for ( c = 0 ;
c < 2 ;
c ++ ) {
qmul = h -> dequant4_coeff [ c + 1 + ( IS_INTRA ( mb_type ) ? 0 : 3 ) ] [ h -> chroma_qp [ c ] ] ;
for ( i = 0 ;
i < 4 ;
i ++ ) {
const int index = 16 + 16 * c + i ;
decode_cabac_residual_nondc ( h , h -> mb + ( 16 * index << pixel_shift ) , 4 , index , scan + 1 , qmul , 15 ) ;
}
}
}
else {
fill_rectangle ( & h -> non_zero_count_cache [ scan8 [ 16 ] ] , 4 , 4 , 8 , 0 , 1 ) ;
fill_rectangle ( & h -> non_zero_count_cache [ scan8 [ 32 ] ] , 4 , 4 , 8 , 0 , 1 ) ;
}
}
}
else {
fill_rectangle ( & h -> non_zero_count_cache [ scan8 [ 0 ] ] , 4 , 4 , 8 , 0 , 1 ) ;
fill_rectangle ( & h -> non_zero_count_cache [ scan8 [ 16 ] ] , 4 , 4 , 8 , 0 , 1 ) ;
fill_rectangle ( & h -> non_zero_count_cache [ scan8 [ 32 ] ] , 4 , 4 , 8 , 0 , 1 ) ;
h -> last_qscale_diff = 0 ;
}
h -> cur_pic . f . qscale_table [ mb_xy ] = h -> qscale ;
write_back_non_zero_count ( h ) ;
return 0 ;
}
| 1
|
22,205
|
static Selectivity prefix_selectivity ( PlannerInfo * root , VariableStatData * vardata , Oid vartype , Oid opfamily , Const * prefixcon ) {
Selectivity prefixsel ;
Oid cmpopr ;
FmgrInfo opproc ;
Const * greaterstrcon ;
Selectivity eq_sel ;
cmpopr = get_opfamily_member ( opfamily , vartype , vartype , BTGreaterEqualStrategyNumber ) ;
if ( cmpopr == InvalidOid ) elog ( ERROR , "no >= operator for opfamily %u" , opfamily ) ;
fmgr_info ( get_opcode ( cmpopr ) , & opproc ) ;
prefixsel = ineq_histogram_selectivity ( root , vardata , & opproc , true , prefixcon -> constvalue , prefixcon -> consttype ) ;
if ( prefixsel < 0.0 ) {
return DEFAULT_MATCH_SEL ;
}
cmpopr = get_opfamily_member ( opfamily , vartype , vartype , BTLessStrategyNumber ) ;
if ( cmpopr == InvalidOid ) elog ( ERROR , "no < operator for opfamily %u" , opfamily ) ;
fmgr_info ( get_opcode ( cmpopr ) , & opproc ) ;
greaterstrcon = make_greater_string ( prefixcon , & opproc , DEFAULT_COLLATION_OID ) ;
if ( greaterstrcon ) {
Selectivity topsel ;
topsel = ineq_histogram_selectivity ( root , vardata , & opproc , false , greaterstrcon -> constvalue , greaterstrcon -> consttype ) ;
Assert ( topsel >= 0.0 ) ;
prefixsel = topsel + prefixsel - 1.0 ;
}
cmpopr = get_opfamily_member ( opfamily , vartype , vartype , BTEqualStrategyNumber ) ;
if ( cmpopr == InvalidOid ) elog ( ERROR , "no = operator for opfamily %u" , opfamily ) ;
eq_sel = var_eq_const ( vardata , cmpopr , prefixcon -> constvalue , false , true ) ;
prefixsel = Max ( prefixsel , eq_sel ) ;
return prefixsel ;
}
| 0
|
145,479
|
BOOL update_recv_pointer(rdpUpdate* update, wStream* s)
{
BOOL rc = FALSE;
UINT16 messageType;
rdpContext* context = update->context;
rdpPointerUpdate* pointer = update->pointer;
if (Stream_GetRemainingLength(s) < 2 + 2)
return FALSE;
Stream_Read_UINT16(s, messageType); /* messageType (2 bytes) */
Stream_Seek_UINT16(s); /* pad2Octets (2 bytes) */
switch (messageType)
{
case PTR_MSG_TYPE_POSITION:
{
POINTER_POSITION_UPDATE* pointer_position = update_read_pointer_position(update, s);
if (pointer_position)
{
rc = IFCALLRESULT(FALSE, pointer->PointerPosition, context, pointer_position);
free_pointer_position_update(context, pointer_position);
}
}
break;
case PTR_MSG_TYPE_SYSTEM:
{
POINTER_SYSTEM_UPDATE* pointer_system = update_read_pointer_system(update, s);
if (pointer_system)
{
rc = IFCALLRESULT(FALSE, pointer->PointerSystem, context, pointer_system);
free_pointer_system_update(context, pointer_system);
}
}
break;
case PTR_MSG_TYPE_COLOR:
{
POINTER_COLOR_UPDATE* pointer_color = update_read_pointer_color(update, s, 24);
if (pointer_color)
{
rc = IFCALLRESULT(FALSE, pointer->PointerColor, context, pointer_color);
free_pointer_color_update(context, pointer_color);
}
}
break;
case PTR_MSG_TYPE_POINTER:
{
POINTER_NEW_UPDATE* pointer_new = update_read_pointer_new(update, s);
if (pointer_new)
{
rc = IFCALLRESULT(FALSE, pointer->PointerNew, context, pointer_new);
free_pointer_new_update(context, pointer_new);
}
}
break;
case PTR_MSG_TYPE_CACHED:
{
POINTER_CACHED_UPDATE* pointer_cached = update_read_pointer_cached(update, s);
if (pointer_cached)
{
rc = IFCALLRESULT(FALSE, pointer->PointerCached, context, pointer_cached);
free_pointer_cached_update(context, pointer_cached);
}
}
break;
default:
break;
}
return rc;
}
| 0
|
505,445
|
int ssl3_write_pending(SSL *s, int type, const unsigned char *buf,
unsigned int len)
{
int i;
/* XXXX */
if ((s->s3->wpend_tot > (int)len)
|| ((s->s3->wpend_buf != buf) &&
!(s->mode & SSL_MODE_ACCEPT_MOVING_WRITE_BUFFER))
|| (s->s3->wpend_type != type))
{
SSLerr(SSL_F_SSL3_WRITE_PENDING,SSL_R_BAD_WRITE_RETRY);
return(-1);
}
for (;;)
{
clear_sys_error();
if (s->wbio != NULL)
{
s->rwstate=SSL_WRITING;
i=BIO_write(s->wbio,
(char *)&(s->s3->wbuf.buf[s->s3->wbuf.offset]),
(unsigned int)s->s3->wbuf.left);
}
else
{
SSLerr(SSL_F_SSL3_WRITE_PENDING,SSL_R_BIO_NOT_SET);
i= -1;
}
if (i == s->s3->wbuf.left)
{
s->s3->wbuf.left=0;
s->rwstate=SSL_NOTHING;
return(s->s3->wpend_ret);
}
else if (i <= 0) {
if (s->version == DTLS1_VERSION ||
s->version == DTLS1_BAD_VER) {
/* For DTLS, just drop it. That's kind of the whole
point in using a datagram service */
s->s3->wbuf.left = 0;
}
return(i);
}
s->s3->wbuf.offset+=i;
s->s3->wbuf.left-=i;
}
}
| 0
|
129,479
|
TRIO_PRIVATE void TrioWriteString TRIO_ARGS5((self, string, flags, width, precision),
trio_class_t* self, TRIO_CONST char* string,
trio_flags_t flags, int width, int precision)
{
int length = 0;
int ch;
assert(VALID(self));
assert(VALID(self->OutStream));
if (string == NULL)
{
string = internalNullString;
length = sizeof(internalNullString) - 1;
#if TRIO_FEATURE_QUOTE
/* Disable quoting for the null pointer */
flags &= (~FLAGS_QUOTE);
#endif
width = 0;
}
else
{
if (precision <= 0)
{
length = trio_length(string);
}
else
{
length = trio_length_max(string, precision);
}
}
if ((NO_PRECISION != precision) && (precision < length))
{
length = precision;
}
width -= length;
#if TRIO_FEATURE_QUOTE
if (flags & FLAGS_QUOTE)
self->OutStream(self, CHAR_QUOTE);
#endif
if (!(flags & FLAGS_LEFTADJUST))
{
while (width-- > 0)
self->OutStream(self, CHAR_ADJUST);
}
while (length-- > 0)
{
/* The ctype parameters must be an unsigned char (or EOF) */
ch = (int)((unsigned char)(*string++));
TrioWriteStringCharacter(self, ch, flags);
}
if (flags & FLAGS_LEFTADJUST)
{
while (width-- > 0)
self->OutStream(self, CHAR_ADJUST);
}
#if TRIO_FEATURE_QUOTE
if (flags & FLAGS_QUOTE)
self->OutStream(self, CHAR_QUOTE);
#endif
}
| 0
|
211,971
|
void TabStripGtk::UpdateDropIndex(GdkDragContext* context, gint x, gint y) {
x = gtk_util::MirroredXCoordinate(tabstrip_.get(), x);
for (int i = GetMiniTabCount(); i < GetTabCount(); ++i) {
TabGtk* tab = GetTabAt(i);
gfx::Rect bounds = tab->GetNonMirroredBounds(tabstrip_.get());
const int tab_max_x = bounds.x() + bounds.width();
const int hot_width = bounds.width() / kTabEdgeRatioInverse;
if (x < tab_max_x) {
if (x < bounds.x() + hot_width)
SetDropIndex(i, true);
else if (x >= tab_max_x - hot_width)
SetDropIndex(i + 1, true);
else
SetDropIndex(i, false);
return;
}
}
SetDropIndex(GetTabCount(), true);
}
| 0
|
341,810
|
int ffurl_register_protocol(URLProtocol *protocol, int size)
{
URLProtocol **p;
if (size < sizeof(URLProtocol)) {
URLProtocol *temp = av_mallocz(sizeof(URLProtocol));
memcpy(temp, protocol, size);
protocol = temp;
}
p = &first_protocol;
while (*p != NULL)
p = &(*p)->next;
*p = protocol;
protocol->next = NULL;
return 0;
}
| 1
|
200,367
|
void RenderViewImpl::OnEnumerateDirectoryResponse(
int id,
const std::vector<base::FilePath>& paths) {
if (!enumeration_completions_[id])
return;
WebVector<WebString> ws_file_names(paths.size());
for (size_t i = 0; i < paths.size(); ++i)
ws_file_names[i] = webkit_base::FilePathToWebString(paths[i]);
enumeration_completions_[id]->didChooseFile(ws_file_names);
enumeration_completions_.erase(id);
}
| 0
|
352,892
|
CreateStatistics(CreateStatsStmt *stmt)
{
int16 attnums[STATS_MAX_DIMENSIONS];
int numcols = 0;
char *namestr;
NameData stxname;
Oid statoid;
Oid namespaceId;
Oid stxowner = GetUserId();
HeapTuple htup;
Datum values[Natts_pg_statistic_ext];
bool nulls[Natts_pg_statistic_ext];
int2vector *stxkeys;
Relation statrel;
Relation rel = NULL;
Oid relid;
ObjectAddress parentobject,
myself;
Datum types[2]; /* one for each possible type of statistic */
int ntypes;
ArrayType *stxkind;
bool build_ndistinct;
bool build_dependencies;
bool requested_type = false;
int i;
ListCell *cell;
Assert(IsA(stmt, CreateStatsStmt));
/*
* Examine the FROM clause. Currently, we only allow it to be a single
* simple table, but later we'll probably allow multiple tables and JOIN
* syntax. The grammar is already prepared for that, so we have to check
* here that what we got is what we can support.
*/
if (list_length(stmt->relations) != 1)
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
errmsg("only a single relation is allowed in CREATE STATISTICS")));
foreach(cell, stmt->relations)
{
Node *rln = (Node *) lfirst(cell);
if (!IsA(rln, RangeVar))
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
errmsg("only a single relation is allowed in CREATE STATISTICS")));
/*
* CREATE STATISTICS will influence future execution plans but does
* not interfere with currently executing plans. So it should be
* enough to take only ShareUpdateExclusiveLock on relation,
* conflicting with ANALYZE and other DDL that sets statistical
* information, but not with normal queries.
*/
rel = relation_openrv((RangeVar *) rln, ShareUpdateExclusiveLock);
/* Restrict to allowed relation types */
if (rel->rd_rel->relkind != RELKIND_RELATION &&
rel->rd_rel->relkind != RELKIND_MATVIEW &&
rel->rd_rel->relkind != RELKIND_FOREIGN_TABLE &&
rel->rd_rel->relkind != RELKIND_PARTITIONED_TABLE)
ereport(ERROR,
(errcode(ERRCODE_WRONG_OBJECT_TYPE),
errmsg("relation \"%s\" is not a table, foreign table, or materialized view",
RelationGetRelationName(rel))));
/* You must own the relation to create stats on it */
if (!pg_class_ownercheck(RelationGetRelid(rel), stxowner))
aclcheck_error(ACLCHECK_NOT_OWNER, ACL_KIND_CLASS,
RelationGetRelationName(rel));
/* Creating statistics on system catalogs is not allowed */
if (!allowSystemTableMods && IsSystemRelation(rel))
ereport(ERROR,
(errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
errmsg("permission denied: \"%s\" is a system catalog",
RelationGetRelationName(rel))));
}
Assert(rel);
relid = RelationGetRelid(rel);
/*
* If the node has a name, split it up and determine creation namespace.
* If not (a possibility not considered by the grammar, but one which can
* occur via the "CREATE TABLE ... (LIKE)" command), then we put the
* object in the same namespace as the relation, and cons up a name for it.
*/
if (stmt->defnames)
namespaceId = QualifiedNameGetCreationNamespace(stmt->defnames,
&namestr);
else
{
namespaceId = RelationGetNamespace(rel);
namestr = ChooseExtendedStatisticName(RelationGetRelationName(rel),
ChooseExtendedStatisticNameAddition(stmt->exprs),
"stat",
namespaceId);
}
namestrcpy(&stxname, namestr);
/*
* Deal with the possibility that the statistics object already exists.
*/
if (SearchSysCacheExists2(STATEXTNAMENSP,
CStringGetDatum(namestr),
ObjectIdGetDatum(namespaceId)))
{
if (stmt->if_not_exists)
{
ereport(NOTICE,
(errcode(ERRCODE_DUPLICATE_OBJECT),
errmsg("statistics object \"%s\" already exists, skipping",
namestr)));
relation_close(rel, NoLock);
return InvalidObjectAddress;
}
ereport(ERROR,
(errcode(ERRCODE_DUPLICATE_OBJECT),
errmsg("statistics object \"%s\" already exists", namestr)));
}
/*
* Currently, we only allow simple column references in the expression
* list. That will change someday, and again the grammar already supports
* it so we have to enforce restrictions here. For now, we can convert
* the expression list to a simple array of attnums. While at it, enforce
* some constraints.
*/
foreach(cell, stmt->exprs)
{
Node *expr = (Node *) lfirst(cell);
ColumnRef *cref;
char *attname;
HeapTuple atttuple;
Form_pg_attribute attForm;
TypeCacheEntry *type;
if (!IsA(expr, ColumnRef))
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
errmsg("only simple column references are allowed in CREATE STATISTICS")));
cref = (ColumnRef *) expr;
if (list_length(cref->fields) != 1)
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
errmsg("only simple column references are allowed in CREATE STATISTICS")));
attname = strVal((Value *) linitial(cref->fields));
atttuple = SearchSysCacheAttName(relid, attname);
if (!HeapTupleIsValid(atttuple))
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_COLUMN),
errmsg("column \"%s\" does not exist",
attname)));
attForm = (Form_pg_attribute) GETSTRUCT(atttuple);
/* Disallow use of system attributes in extended stats */
if (attForm->attnum <= 0)
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
errmsg("statistics creation on system columns is not supported")));
/* Disallow data types without a less-than operator */
type = lookup_type_cache(attForm->atttypid, TYPECACHE_LT_OPR);
if (type->lt_opr == InvalidOid)
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
errmsg("column \"%s\" cannot be used in statistics because its type %s has no default btree operator class",
attname, format_type_be(attForm->atttypid))));
/* Make sure no more than STATS_MAX_DIMENSIONS columns are used */
if (numcols >= STATS_MAX_DIMENSIONS)
ereport(ERROR,
(errcode(ERRCODE_TOO_MANY_COLUMNS),
errmsg("cannot have more than %d columns in statistics",
STATS_MAX_DIMENSIONS)));
attnums[numcols] = attForm->attnum;
numcols++;
ReleaseSysCache(atttuple);
}
/*
* Check that at least two columns were specified in the statement. The
* upper bound was already checked in the loop above.
*/
if (numcols < 2)
ereport(ERROR,
(errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
errmsg("extended statistics require at least 2 columns")));
/*
* Sort the attnums, which makes detecting duplicates somewhat easier, and
* it does not hurt (it does not affect the efficiency, unlike for
* indexes, for example).
*/
qsort(attnums, numcols, sizeof(int16), compare_int16);
/*
* Check for duplicates in the list of columns. The attnums are sorted so
* just check consecutive elements.
*/
for (i = 1; i < numcols; i++)
{
if (attnums[i] == attnums[i - 1])
ereport(ERROR,
(errcode(ERRCODE_DUPLICATE_COLUMN),
errmsg("duplicate column name in statistics definition")));
}
/* Form an int2vector representation of the sorted column list */
stxkeys = buildint2vector(attnums, numcols);
/*
* Parse the statistics kinds.
*/
build_ndistinct = false;
build_dependencies = false;
foreach(cell, stmt->stat_types)
{
char *type = strVal((Value *) lfirst(cell));
if (strcmp(type, "ndistinct") == 0)
{
build_ndistinct = true;
requested_type = true;
}
else if (strcmp(type, "dependencies") == 0)
{
build_dependencies = true;
requested_type = true;
}
else
ereport(ERROR,
(errcode(ERRCODE_SYNTAX_ERROR),
errmsg("unrecognized statistics kind \"%s\"",
type)));
}
/* If no statistic type was specified, build them all. */
if (!requested_type)
{
build_ndistinct = true;
build_dependencies = true;
}
/* construct the char array of enabled statistic types */
ntypes = 0;
if (build_ndistinct)
types[ntypes++] = CharGetDatum(STATS_EXT_NDISTINCT);
if (build_dependencies)
types[ntypes++] = CharGetDatum(STATS_EXT_DEPENDENCIES);
Assert(ntypes > 0 && ntypes <= lengthof(types));
stxkind = construct_array(types, ntypes, CHAROID, 1, true, 'c');
/*
* Everything seems fine, so let's build the pg_statistic_ext tuple.
*/
memset(values, 0, sizeof(values));
memset(nulls, false, sizeof(nulls));
values[Anum_pg_statistic_ext_stxrelid - 1] = ObjectIdGetDatum(relid);
values[Anum_pg_statistic_ext_stxname - 1] = NameGetDatum(&stxname);
values[Anum_pg_statistic_ext_stxnamespace - 1] = ObjectIdGetDatum(namespaceId);
values[Anum_pg_statistic_ext_stxowner - 1] = ObjectIdGetDatum(stxowner);
values[Anum_pg_statistic_ext_stxkeys - 1] = PointerGetDatum(stxkeys);
values[Anum_pg_statistic_ext_stxkind - 1] = PointerGetDatum(stxkind);
/* no statistics built yet */
nulls[Anum_pg_statistic_ext_stxndistinct - 1] = true;
nulls[Anum_pg_statistic_ext_stxdependencies - 1] = true;
/* insert it into pg_statistic_ext */
statrel = heap_open(StatisticExtRelationId, RowExclusiveLock);
htup = heap_form_tuple(statrel->rd_att, values, nulls);
statoid = CatalogTupleInsert(statrel, htup);
heap_freetuple(htup);
relation_close(statrel, RowExclusiveLock);
/*
* Invalidate relcache so that others see the new statistics object.
*/
CacheInvalidateRelcache(rel);
relation_close(rel, NoLock);
/*
* Add an AUTO dependency on each column used in the stats, so that the
* stats object goes away if any or all of them get dropped.
*/
ObjectAddressSet(myself, StatisticExtRelationId, statoid);
for (i = 0; i < numcols; i++)
{
ObjectAddressSubSet(parentobject, RelationRelationId, relid, attnums[i]);
recordDependencyOn(&myself, &parentobject, DEPENDENCY_AUTO);
}
/*
* Also add dependencies on namespace and owner. These are required
* because the stats object might have a different namespace and/or owner
* than the underlying table(s).
*/
ObjectAddressSet(parentobject, NamespaceRelationId, namespaceId);
recordDependencyOn(&myself, &parentobject, DEPENDENCY_NORMAL);
recordDependencyOnOwner(StatisticExtRelationId, statoid, stxowner);
/*
* XXX probably there should be a recordDependencyOnCurrentExtension call
* here too, but we'd have to add support for ALTER EXTENSION ADD/DROP
* STATISTICS, which is more work than it seems worth.
*/
/* Return stats object's address */
return myself;
}
| 1
|
323,947
|
void arm_gen_test_cc(int cc, int label)
{
TCGv_i32 tmp;
int inv;
switch (cc) {
case 0: /* eq: Z */
tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_ZF, 0, label);
break;
case 1: /* ne: !Z */
tcg_gen_brcondi_i32(TCG_COND_NE, cpu_ZF, 0, label);
break;
case 2: /* cs: C */
tcg_gen_brcondi_i32(TCG_COND_NE, cpu_CF, 0, label);
break;
case 3: /* cc: !C */
tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_CF, 0, label);
break;
case 4: /* mi: N */
tcg_gen_brcondi_i32(TCG_COND_LT, cpu_NF, 0, label);
break;
case 5: /* pl: !N */
tcg_gen_brcondi_i32(TCG_COND_GE, cpu_NF, 0, label);
break;
case 6: /* vs: V */
tcg_gen_brcondi_i32(TCG_COND_LT, cpu_VF, 0, label);
break;
case 7: /* vc: !V */
tcg_gen_brcondi_i32(TCG_COND_GE, cpu_VF, 0, label);
break;
case 8: /* hi: C && !Z */
inv = gen_new_label();
tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_CF, 0, inv);
tcg_gen_brcondi_i32(TCG_COND_NE, cpu_ZF, 0, label);
gen_set_label(inv);
break;
case 9: /* ls: !C || Z */
tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_CF, 0, label);
tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_ZF, 0, label);
break;
case 10: /* ge: N == V -> N ^ V == 0 */
tmp = tcg_temp_new_i32();
tcg_gen_xor_i32(tmp, cpu_VF, cpu_NF);
tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
tcg_temp_free_i32(tmp);
break;
case 11: /* lt: N != V -> N ^ V != 0 */
tmp = tcg_temp_new_i32();
tcg_gen_xor_i32(tmp, cpu_VF, cpu_NF);
tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
tcg_temp_free_i32(tmp);
break;
case 12: /* gt: !Z && N == V */
inv = gen_new_label();
tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_ZF, 0, inv);
tmp = tcg_temp_new_i32();
tcg_gen_xor_i32(tmp, cpu_VF, cpu_NF);
tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
tcg_temp_free_i32(tmp);
gen_set_label(inv);
break;
case 13: /* le: Z || N != V */
tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_ZF, 0, label);
tmp = tcg_temp_new_i32();
tcg_gen_xor_i32(tmp, cpu_VF, cpu_NF);
tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
tcg_temp_free_i32(tmp);
break;
default:
fprintf(stderr, "Bad condition code 0x%x\n", cc);
abort();
}
}
| 0
|
115,835
|
_cimg_math_parser():
code(_code),p_code_end(0),p_break((CImg<ulongT>*)(cimg_ulong)-2),
imgin(CImg<T>::const_empty()),listin(CImgList<T>::const_empty()),
imgout(CImg<T>::empty()),listout(CImgList<T>::empty()),
img_stats(_img_stats),list_stats(_list_stats),list_median(_list_median),debug_indent(0),
result_dim(0),break_type(0),constcache_size(0),is_parallelizable(true),is_fill(false),need_input_copy(false),
rng(0),calling_function(0) {
mem.assign(1 + _cimg_mp_slot_c,1,1,1,0); // Allow to skip 'is_empty?' test in operator()()
result = mem._data;
| 0
|
77,201
|
static int fill_umem_pbl_tbl(struct ib_umem *umem, u64 *pbl_tbl_orig,
int page_shift)
{
u64 *pbl_tbl = pbl_tbl_orig;
u64 page_size = BIT_ULL(page_shift);
struct ib_block_iter biter;
rdma_for_each_block(umem->sg_head.sgl, &biter, umem->nmap, page_size)
*pbl_tbl++ = rdma_block_iter_dma_address(&biter);
return pbl_tbl - pbl_tbl_orig;
}
| 0
|
121,316
|
void rose_start_idletimer(struct sock *sk)
{
struct rose_sock *rose = rose_sk(sk);
sk_stop_timer(sk, &rose->idletimer);
if (rose->idle > 0) {
rose->idletimer.function = rose_idletimer_expiry;
rose->idletimer.expires = jiffies + rose->idle;
sk_reset_timer(sk, &rose->idletimer, rose->idletimer.expires);
}
}
| 0
|
449,976
|
static int hdr_validate_tokens(struct crypt_device *cd, json_object *hdr_jobj)
{
json_object *jobj;
if (!json_object_object_get_ex(hdr_jobj, "tokens", &jobj)) {
log_dbg(cd, "Missing tokens section.");
return 1;
}
json_object_object_foreach(jobj, key, val) {
if (!numbered(cd, "Token", key))
return 1;
if (LUKS2_token_validate(cd, hdr_jobj, val, key))
return 1;
}
return 0;
}
| 0
|
52,670
|
irc_server_set_send_default_tags (const char *tags)
{
irc_server_send_default_tags = tags;
}
| 0
|
503,793
|
StreamInfoImpl(
TimeSource& time_source,
const Network::ConnectionInfoProviderSharedPtr& downstream_connection_info_provider,
FilterState::LifeSpan life_span = FilterState::LifeSpan::FilterChain)
: StreamInfoImpl(absl::nullopt, time_source, downstream_connection_info_provider,
std::make_shared<FilterStateImpl>(life_span)) {}
| 0
|
244,860
|
PrintWebViewHelper::~PrintWebViewHelper() {}
| 0
|
256,091
|
static int dnxhd_find_frame_end(DNXHDParserContext *dctx,
const uint8_t *buf, int buf_size)
{
ParseContext *pc = &dctx->pc;
uint64_t state = pc->state64;
int pic_found = pc->frame_start_found;
int i = 0;
int interlaced = dctx->interlaced;
int cur_field = dctx->cur_field;
if (!pic_found) {
for (i = 0; i < buf_size; i++) {
state = (state << 8) | buf[i];
if (ff_dnxhd_check_header_prefix(state & 0xffffffffff00LL) != 0) {
i++;
pic_found = 1;
interlaced = (state&2)>>1; /* byte following the 5-byte header prefix */
cur_field = state&1;
dctx->cur_byte = 0;
dctx->remaining = 0;
break;
}
}
}
if (pic_found && !dctx->remaining) {
if (!buf_size) /* EOF considered as end of frame */
return 0;
for (; i < buf_size; i++) {
dctx->cur_byte++;
state = (state << 8) | buf[i];
if (dctx->cur_byte == 24) {
dctx->h = (state >> 32) & 0xFFFF;
} else if (dctx->cur_byte == 26) {
dctx->w = (state >> 32) & 0xFFFF;
} else if (dctx->cur_byte == 42) {
int cid = (state >> 32) & 0xFFFFFFFF;
if (cid <= 0)
continue;
dctx->remaining = avpriv_dnxhd_get_frame_size(cid);
if (dctx->remaining <= 0) {
dctx->remaining = dnxhd_get_hr_frame_size(cid, dctx->w, dctx->h);
if (dctx->remaining <= 0)
return dctx->remaining;
}
if (buf_size - i >= dctx->remaining && (!dctx->interlaced || dctx->cur_field)) {
int remaining = dctx->remaining;
pc->frame_start_found = 0;
pc->state64 = -1;
dctx->interlaced = interlaced;
dctx->cur_field = 0;
dctx->cur_byte = 0;
dctx->remaining = 0;
return remaining;
} else {
dctx->remaining -= buf_size;
}
}
}
} else if (pic_found) {
if (dctx->remaining > buf_size) {
dctx->remaining -= buf_size;
} else {
int remaining = dctx->remaining;
pc->frame_start_found = 0;
pc->state64 = -1;
dctx->interlaced = interlaced;
dctx->cur_field = 0;
dctx->cur_byte = 0;
dctx->remaining = 0;
return remaining;
}
}
pc->frame_start_found = pic_found;
pc->state64 = state;
dctx->interlaced = interlaced;
dctx->cur_field = cur_field;
return END_NOT_FOUND;
}
| 1
|
244,900
|
bool InputDispatcher::InputState::isNeutral() const {
return mKeyMementos.isEmpty() && mMotionMementos.isEmpty();
}
| 0
|
185,036
|
int GetPageCountFromSettingsDictionary(const DictionaryValue& settings) {
int count = 0;
const ListValue* page_range_array;
if (settings.GetList(printing::kSettingPageRange, &page_range_array)) {
for (size_t index = 0; index < page_range_array->GetSize(); ++index) {
const DictionaryValue* dict;
if (!page_range_array->GetDictionary(index, &dict))
continue;
printing::PageRange range;
if (!dict->GetInteger(printing::kSettingPageRangeFrom, &range.from) ||
!dict->GetInteger(printing::kSettingPageRangeTo, &range.to)) {
continue;
}
count += (range.to - range.from) + 1;
}
}
return count;
}
| 0
|
453,337
|
static void account_event(struct perf_event *event)
{
bool inc = false;
if (event->parent)
return;
if (event->attach_state & PERF_ATTACH_TASK)
inc = true;
if (event->attr.mmap || event->attr.mmap_data)
atomic_inc(&nr_mmap_events);
if (event->attr.comm)
atomic_inc(&nr_comm_events);
if (event->attr.namespaces)
atomic_inc(&nr_namespaces_events);
if (event->attr.cgroup)
atomic_inc(&nr_cgroup_events);
if (event->attr.task)
atomic_inc(&nr_task_events);
if (event->attr.freq)
account_freq_event();
if (event->attr.context_switch) {
atomic_inc(&nr_switch_events);
inc = true;
}
if (has_branch_stack(event))
inc = true;
if (is_cgroup_event(event))
inc = true;
if (event->attr.ksymbol)
atomic_inc(&nr_ksymbol_events);
if (event->attr.bpf_event)
atomic_inc(&nr_bpf_events);
if (event->attr.text_poke)
atomic_inc(&nr_text_poke_events);
if (inc) {
/*
* We need the mutex here because static_branch_enable()
* must complete *before* the perf_sched_count increment
* becomes visible.
*/
if (atomic_inc_not_zero(&perf_sched_count))
goto enabled;
mutex_lock(&perf_sched_mutex);
if (!atomic_read(&perf_sched_count)) {
static_branch_enable(&perf_sched_events);
/*
* Guarantee that all CPUs observe they key change and
* call the perf scheduling hooks before proceeding to
* install events that need them.
*/
synchronize_rcu();
}
/*
* Now that we have waited for the sync_sched(), allow further
* increments to by-pass the mutex.
*/
atomic_inc(&perf_sched_count);
mutex_unlock(&perf_sched_mutex);
}
enabled:
account_event_cpu(event, event->cpu);
account_pmu_sb_event(event);
}
| 0
|
474,755
|
struct nci_conn_info *nci_get_conn_info_by_conn_id(struct nci_dev *ndev,
int conn_id)
{
struct nci_conn_info *conn_info;
list_for_each_entry(conn_info, &ndev->conn_info_list, list) {
if (conn_info->conn_id == conn_id)
return conn_info;
}
return NULL;
}
| 0
|
407,461
|
static void parse_chanmodes(IRC_SERVER_REC *server, const char *sptr)
{
mode_func_t *modefuncs[] = {
modes_type_a,
modes_type_b,
modes_type_c,
modes_type_d
};
char **item, **chanmodes;
int i;
chanmodes = g_strsplit(sptr, ",", 5); /* ignore extras */
for (i = 0, item = chanmodes; *item != NULL && i < 4; item++, i++) {
unsigned char *p = (unsigned char*) *item;
while (*p != '\0') {
server->modes[(int)*p].func = modefuncs[i];
p++;
}
}
g_strfreev(chanmodes);
}
| 0
|
230,192
|
LinearHistogram::LinearHistogram(const std::string& name,
Sample minimum,
Sample maximum,
const BucketRanges* ranges)
: Histogram(name, minimum, maximum, ranges) {
}
| 0
|
253,511
|
TestBrowserWindow::TestLocationBar::GetWindowOpenDisposition() const {
return WindowOpenDisposition::CURRENT_TAB;
}
| 0
|
427,643
|
static int rowidWrite(Rtree *pRtree, sqlite3_int64 iRowid, sqlite3_int64 iNode){
sqlite3_bind_int64(pRtree->pWriteRowid, 1, iRowid);
sqlite3_bind_int64(pRtree->pWriteRowid, 2, iNode);
sqlite3_step(pRtree->pWriteRowid);
return sqlite3_reset(pRtree->pWriteRowid);
}
| 0
|
74,147
|
static GFINLINE void av1dmx_update_cts(GF_AV1DmxCtx *ctx)
{
assert(ctx->cur_fps.num);
assert(ctx->cur_fps.den);
if (ctx->timescale) {
u64 inc = ctx->cur_fps.den;
inc *= ctx->timescale;
inc /= ctx->cur_fps.num;
ctx->cts += inc;
} else {
ctx->cts += ctx->cur_fps.den;
}
}
| 0
|
23,056
|
static int cond_continue ( i_ctx_t * i_ctx_p ) {
os_ptr op = osp ;
es_ptr ep = esp ;
int code ;
check_type ( * op , t_boolean ) ;
if ( op -> value . boolval ) {
array_get ( imemory , ep , 1L , ep ) ;
esfile_check_cache ( ) ;
code = o_pop_estack ;
}
else if ( r_size ( ep ) > 2 ) {
const ref_packed * elts = ep -> value . packed ;
check_estack ( 2 ) ;
r_dec_size ( ep , 2 ) ;
elts = packed_next ( elts ) ;
elts = packed_next ( elts ) ;
ep -> value . packed = elts ;
array_get ( imemory , ep , 0L , ep + 2 ) ;
make_op_estack ( ep + 1 , cond_continue ) ;
esp = ep + 2 ;
esfile_check_cache ( ) ;
code = o_push_estack ;
}
else {
esp = ep - 1 ;
code = o_pop_estack ;
}
pop ( 1 ) ;
return code ;
}
| 0
|
273,764
|
void Context::scriptLog(spdlog::level::level_enum level, absl::string_view message) {
switch (level) {
case spdlog::level::trace:
ENVOY_LOG(trace, "wasm log{}: {}", log_prefix(), message);
return;
case spdlog::level::debug:
ENVOY_LOG(debug, "wasm log{}: {}", log_prefix(), message);
return;
case spdlog::level::info:
ENVOY_LOG(info, "wasm log{}: {}", log_prefix(), message);
return;
case spdlog::level::warn:
ENVOY_LOG(warn, "wasm log{}: {}", log_prefix(), message);
return;
case spdlog::level::err:
ENVOY_LOG(error, "wasm log{}: {}", log_prefix(), message);
return;
case spdlog::level::critical:
ENVOY_LOG(critical, "wasm log{}: {}", log_prefix(), message);
return;
case spdlog::level::off:
NOT_IMPLEMENTED_GCOVR_EXCL_LINE;
}
}
| 0
|
233,064
|
SimpleGetHelperResult SimpleGetHelper(base::span<const MockRead> data_reads) {
MockWrite data_writes[] = {
MockWrite("GET / HTTP/1.1\r\n"
"Host: www.example.org\r\n"
"Connection: keep-alive\r\n\r\n"),
};
StaticSocketDataProvider reads(data_reads, data_writes);
StaticSocketDataProvider* data[] = {&reads};
SimpleGetHelperResult out = SimpleGetHelperForData(data);
EXPECT_EQ(CountWriteBytes(data_writes), out.total_sent_bytes);
return out;
}
| 0
|
398,978
|
static int selinux_task_setnice(struct task_struct *p, int nice)
{
return current_has_perm(p, PROCESS__SETSCHED);
}
| 0
|
318,755
|
static void v9fs_renameat(void *opaque)
{
ssize_t err = 0;
size_t offset = 7;
V9fsPDU *pdu = opaque;
V9fsState *s = pdu->s;
int32_t olddirfid, newdirfid;
V9fsString old_name, new_name;
v9fs_string_init(&old_name);
v9fs_string_init(&new_name);
err = pdu_unmarshal(pdu, offset, "dsds", &olddirfid,
&old_name, &newdirfid, &new_name);
if (err < 0) {
if (name_is_illegal(old_name.data) || name_is_illegal(new_name.data)) {
err = -ENOENT;
v9fs_path_write_lock(s);
err = v9fs_complete_renameat(pdu, olddirfid,
&old_name, newdirfid, &new_name);
v9fs_path_unlock(s);
if (!err) {
err = offset;
out_err:
pdu_complete(pdu, err);
v9fs_string_free(&old_name);
v9fs_string_free(&new_name);
| 1
|
342,550
|
static void gen_window_check1(DisasContext *dc, unsigned r1)
{
if (dc->tb->flags & XTENSA_TBFLAG_EXCM) {
return;
}
if (option_enabled(dc, XTENSA_OPTION_WINDOWED_REGISTER) &&
r1 / 4 > dc->used_window) {
TCGv_i32 pc = tcg_const_i32(dc->pc);
TCGv_i32 w = tcg_const_i32(r1 / 4);
dc->used_window = r1 / 4;
gen_advance_ccount(dc);
gen_helper_window_check(cpu_env, pc, w);
tcg_temp_free(w);
tcg_temp_free(pc);
}
}
| 0
|
26,742
|
static int jbig2_decode_get_code ( Jbig2MmrCtx * mmr , const mmr_table_node * table , int initial_bits ) {
uint32_t word = mmr -> word ;
int table_ix = word >> ( 32 - initial_bits ) ;
int val = table [ table_ix ] . val ;
int n_bits = table [ table_ix ] . n_bits ;
if ( n_bits > initial_bits ) {
int mask = ( 1 << ( 32 - initial_bits ) ) - 1 ;
table_ix = val + ( ( word & mask ) >> ( 32 - n_bits ) ) ;
val = table [ table_ix ] . val ;
n_bits = initial_bits + table [ table_ix ] . n_bits ;
}
jbig2_decode_mmr_consume ( mmr , n_bits ) ;
return val ;
}
| 0
|
517,904
|
bool Item::check_type_can_return_time(const char *opname) const
{
const Type_handler *handler= type_handler();
if (handler->can_return_time())
return false;
my_error(ER_ILLEGAL_PARAMETER_DATA_TYPE_FOR_OPERATION, MYF(0),
handler->name().ptr(), opname);
return true;
}
| 0
|
170,984
|
xmlParseCtxtExternalEntity(xmlParserCtxtPtr ctx, const xmlChar *URL,
const xmlChar *ID, xmlNodePtr *lst) {
xmlParserCtxtPtr ctxt;
xmlDocPtr newDoc;
xmlNodePtr newRoot;
xmlSAXHandlerPtr oldsax = NULL;
int ret = 0;
xmlChar start[4];
xmlCharEncoding enc;
if (ctx == NULL) return(-1);
if (((ctx->depth > 40) && ((ctx->options & XML_PARSE_HUGE) == 0)) ||
(ctx->depth > 1024)) {
return(XML_ERR_ENTITY_LOOP);
}
if (lst != NULL)
*lst = NULL;
if ((URL == NULL) && (ID == NULL))
return(-1);
if (ctx->myDoc == NULL) /* @@ relax but check for dereferences */
return(-1);
ctxt = xmlCreateEntityParserCtxtInternal(URL, ID, NULL, ctx);
if (ctxt == NULL) {
return(-1);
}
oldsax = ctxt->sax;
ctxt->sax = ctx->sax;
xmlDetectSAX2(ctxt);
newDoc = xmlNewDoc(BAD_CAST "1.0");
if (newDoc == NULL) {
xmlFreeParserCtxt(ctxt);
return(-1);
}
newDoc->properties = XML_DOC_INTERNAL;
if (ctx->myDoc->dict) {
newDoc->dict = ctx->myDoc->dict;
xmlDictReference(newDoc->dict);
}
if (ctx->myDoc != NULL) {
newDoc->intSubset = ctx->myDoc->intSubset;
newDoc->extSubset = ctx->myDoc->extSubset;
}
if (ctx->myDoc->URL != NULL) {
newDoc->URL = xmlStrdup(ctx->myDoc->URL);
}
newRoot = xmlNewDocNode(newDoc, NULL, BAD_CAST "pseudoroot", NULL);
if (newRoot == NULL) {
ctxt->sax = oldsax;
xmlFreeParserCtxt(ctxt);
newDoc->intSubset = NULL;
newDoc->extSubset = NULL;
xmlFreeDoc(newDoc);
return(-1);
}
xmlAddChild((xmlNodePtr) newDoc, newRoot);
nodePush(ctxt, newDoc->children);
if (ctx->myDoc == NULL) {
ctxt->myDoc = newDoc;
} else {
ctxt->myDoc = ctx->myDoc;
newDoc->children->doc = ctx->myDoc;
}
/*
* Get the 4 first bytes and decode the charset
* if enc != XML_CHAR_ENCODING_NONE
* plug some encoding conversion routines.
*/
GROW
if ((ctxt->input->end - ctxt->input->cur) >= 4) {
start[0] = RAW;
start[1] = NXT(1);
start[2] = NXT(2);
start[3] = NXT(3);
enc = xmlDetectCharEncoding(start, 4);
if (enc != XML_CHAR_ENCODING_NONE) {
xmlSwitchEncoding(ctxt, enc);
}
}
/*
* Parse a possible text declaration first
*/
if ((CMP5(CUR_PTR, '<', '?', 'x', 'm', 'l')) && (IS_BLANK_CH(NXT(5)))) {
xmlParseTextDecl(ctxt);
/*
* An XML-1.0 document can't reference an entity not XML-1.0
*/
if ((xmlStrEqual(ctx->version, BAD_CAST "1.0")) &&
(!xmlStrEqual(ctxt->input->version, BAD_CAST "1.0"))) {
xmlFatalErrMsg(ctxt, XML_ERR_VERSION_MISMATCH,
"Version mismatch between document and entity\n");
}
}
/*
* Doing validity checking on chunk doesn't make sense
*/
ctxt->instate = XML_PARSER_CONTENT;
ctxt->validate = ctx->validate;
ctxt->valid = ctx->valid;
ctxt->loadsubset = ctx->loadsubset;
ctxt->depth = ctx->depth + 1;
ctxt->replaceEntities = ctx->replaceEntities;
if (ctxt->validate) {
ctxt->vctxt.error = ctx->vctxt.error;
ctxt->vctxt.warning = ctx->vctxt.warning;
} else {
ctxt->vctxt.error = NULL;
ctxt->vctxt.warning = NULL;
}
ctxt->vctxt.nodeTab = NULL;
ctxt->vctxt.nodeNr = 0;
ctxt->vctxt.nodeMax = 0;
ctxt->vctxt.node = NULL;
if (ctxt->dict != NULL) xmlDictFree(ctxt->dict);
ctxt->dict = ctx->dict;
ctxt->str_xml = xmlDictLookup(ctxt->dict, BAD_CAST "xml", 3);
ctxt->str_xmlns = xmlDictLookup(ctxt->dict, BAD_CAST "xmlns", 5);
ctxt->str_xml_ns = xmlDictLookup(ctxt->dict, XML_XML_NAMESPACE, 36);
ctxt->dictNames = ctx->dictNames;
ctxt->attsDefault = ctx->attsDefault;
ctxt->attsSpecial = ctx->attsSpecial;
ctxt->linenumbers = ctx->linenumbers;
xmlParseContent(ctxt);
ctx->validate = ctxt->validate;
ctx->valid = ctxt->valid;
if ((RAW == '<') && (NXT(1) == '/')) {
xmlFatalErr(ctxt, XML_ERR_NOT_WELL_BALANCED, NULL);
} else if (RAW != 0) {
xmlFatalErr(ctxt, XML_ERR_EXTRA_CONTENT, NULL);
}
if (ctxt->node != newDoc->children) {
xmlFatalErr(ctxt, XML_ERR_NOT_WELL_BALANCED, NULL);
}
if (!ctxt->wellFormed) {
if (ctxt->errNo == 0)
ret = 1;
else
ret = ctxt->errNo;
} else {
if (lst != NULL) {
xmlNodePtr cur;
/*
* Return the newly created nodeset after unlinking it from
* they pseudo parent.
*/
cur = newDoc->children->children;
*lst = cur;
while (cur != NULL) {
cur->parent = NULL;
cur = cur->next;
}
newDoc->children->children = NULL;
}
ret = 0;
}
ctxt->sax = oldsax;
ctxt->dict = NULL;
ctxt->attsDefault = NULL;
ctxt->attsSpecial = NULL;
xmlFreeParserCtxt(ctxt);
newDoc->intSubset = NULL;
newDoc->extSubset = NULL;
xmlFreeDoc(newDoc);
return(ret);
}
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.