id
int32 0
27.3k
| func
stringlengths 26
142k
| target
bool 2
classes | project
stringclasses 2
values | commit_id
stringlengths 40
40
|
---|---|---|---|---|
499 | bool colo_supported(void)
{
return true;
}
| false | qemu | fd198f9002a9e1f070c82b04d3229c18d9a49471 |
500 | static uint64_t pxa2xx_i2s_read(void *opaque, hwaddr addr,
unsigned size)
{
PXA2xxI2SState *s = (PXA2xxI2SState *) opaque;
switch (addr) {
case SACR0:
return s->control[0];
case SACR1:
return s->control[1];
case SASR0:
return s->status;
case SAIMR:
return s->mask;
case SAICR:
return 0;
case SADIV:
return s->clk;
case SADR:
if (s->rx_len > 0) {
s->rx_len --;
pxa2xx_i2s_update(s);
return s->codec_in(s->opaque);
}
return 0;
default:
printf("%s: Bad register " REG_FMT "\n", __FUNCTION__, addr);
break;
}
return 0;
}
| false | qemu | a89f364ae8740dfc31b321eed9ee454e996dc3c1 |
502 | char *desc_get_buf(DescInfo *info, bool read_only)
{
PCIDevice *dev = PCI_DEVICE(info->ring->r);
size_t size = read_only ? le16_to_cpu(info->desc.tlv_size) :
le16_to_cpu(info->desc.buf_size);
if (size > info->buf_size) {
info->buf = g_realloc(info->buf, size);
info->buf_size = size;
}
if (!info->buf) {
return NULL;
}
if (pci_dma_read(dev, le64_to_cpu(info->desc.buf_addr), info->buf, size)) {
return NULL;
}
return info->buf;
}
| false | qemu | 4cee3cf35c05c863f5acf87af915298c752eefd9 |
503 | static void copy_context_after_encode(MpegEncContext *d, MpegEncContext *s, int type){
int i;
memcpy(d->mv, s->mv, 2*4*2*sizeof(int));
memcpy(d->last_mv, s->last_mv, 2*2*2*sizeof(int)); //FIXME is memcpy faster then a loop?
/* mpeg1 */
d->mb_incr= s->mb_incr;
for(i=0; i<3; i++)
d->last_dc[i]= s->last_dc[i];
/* statistics */
d->mv_bits= s->mv_bits;
d->i_tex_bits= s->i_tex_bits;
d->p_tex_bits= s->p_tex_bits;
d->i_count= s->i_count;
d->p_count= s->p_count;
d->skip_count= s->skip_count;
d->misc_bits= s->misc_bits;
d->mb_intra= s->mb_intra;
d->mb_skiped= s->mb_skiped;
d->mv_type= s->mv_type;
d->mv_dir= s->mv_dir;
d->pb= s->pb;
d->block= s->block;
for(i=0; i<6; i++)
d->block_last_index[i]= s->block_last_index[i];
}
| false | FFmpeg | 7f2fe444a39bca733d390b6608801c5f002bfd31 |
504 | static inline int gen_intermediate_code_internal (CPUState *env,
TranslationBlock *tb,
int search_pc)
{
DisasContext ctx, *ctxp = &ctx;
opc_handler_t **table, *handler;
target_ulong pc_start;
uint16_t *gen_opc_end;
int j, lj = -1;
pc_start = tb->pc;
gen_opc_ptr = gen_opc_buf;
gen_opc_end = gen_opc_buf + OPC_MAX_SIZE;
gen_opparam_ptr = gen_opparam_buf;
nb_gen_labels = 0;
ctx.nip = pc_start;
ctx.tb = tb;
ctx.exception = EXCP_NONE;
ctx.spr_cb = env->spr_cb;
#if defined(CONFIG_USER_ONLY)
ctx.mem_idx = msr_le;
#if defined(TARGET_PPC64)
ctx.mem_idx |= msr_sf << 1;
#endif
#else
ctx.supervisor = 1 - msr_pr;
ctx.mem_idx = ((1 - msr_pr) << 1) | msr_le;
#if defined(TARGET_PPC64)
ctx.mem_idx |= msr_sf << 2;
#endif
#endif
#if defined(TARGET_PPC64)
ctx.sf_mode = msr_sf;
#endif
ctx.fpu_enabled = msr_fp;
#if defined(TARGET_PPCEMB)
ctx.spe_enabled = msr_spe;
#endif
ctx.singlestep_enabled = env->singlestep_enabled;
#if defined (DO_SINGLE_STEP) && 0
/* Single step trace mode */
msr_se = 1;
#endif
/* Set env in case of segfault during code fetch */
while (ctx.exception == EXCP_NONE && gen_opc_ptr < gen_opc_end) {
if (unlikely(env->nb_breakpoints > 0)) {
for (j = 0; j < env->nb_breakpoints; j++) {
if (env->breakpoints[j] == ctx.nip) {
gen_update_nip(&ctx, ctx.nip);
gen_op_debug();
break;
}
}
}
if (unlikely(search_pc)) {
j = gen_opc_ptr - gen_opc_buf;
if (lj < j) {
lj++;
while (lj < j)
gen_opc_instr_start[lj++] = 0;
gen_opc_pc[lj] = ctx.nip;
gen_opc_instr_start[lj] = 1;
}
}
#if defined PPC_DEBUG_DISAS
if (loglevel & CPU_LOG_TB_IN_ASM) {
fprintf(logfile, "----------------\n");
fprintf(logfile, "nip=" ADDRX " super=%d ir=%d\n",
ctx.nip, 1 - msr_pr, msr_ir);
}
#endif
ctx.opcode = ldl_code(ctx.nip);
if (msr_le) {
ctx.opcode = ((ctx.opcode & 0xFF000000) >> 24) |
((ctx.opcode & 0x00FF0000) >> 8) |
((ctx.opcode & 0x0000FF00) << 8) |
((ctx.opcode & 0x000000FF) << 24);
}
#if defined PPC_DEBUG_DISAS
if (loglevel & CPU_LOG_TB_IN_ASM) {
fprintf(logfile, "translate opcode %08x (%02x %02x %02x) (%s)\n",
ctx.opcode, opc1(ctx.opcode), opc2(ctx.opcode),
opc3(ctx.opcode), msr_le ? "little" : "big");
}
#endif
ctx.nip += 4;
table = env->opcodes;
handler = table[opc1(ctx.opcode)];
if (is_indirect_opcode(handler)) {
table = ind_table(handler);
handler = table[opc2(ctx.opcode)];
if (is_indirect_opcode(handler)) {
table = ind_table(handler);
handler = table[opc3(ctx.opcode)];
}
}
/* Is opcode *REALLY* valid ? */
if (unlikely(handler->handler == &gen_invalid)) {
if (loglevel != 0) {
fprintf(logfile, "invalid/unsupported opcode: "
"%02x - %02x - %02x (%08x) 0x" ADDRX " %d\n",
opc1(ctx.opcode), opc2(ctx.opcode),
opc3(ctx.opcode), ctx.opcode, ctx.nip - 4, msr_ir);
} else {
printf("invalid/unsupported opcode: "
"%02x - %02x - %02x (%08x) 0x" ADDRX " %d\n",
opc1(ctx.opcode), opc2(ctx.opcode),
opc3(ctx.opcode), ctx.opcode, ctx.nip - 4, msr_ir);
}
} else {
if (unlikely((ctx.opcode & handler->inval) != 0)) {
if (loglevel != 0) {
fprintf(logfile, "invalid bits: %08x for opcode: "
"%02x -%02x - %02x (%08x) 0x" ADDRX "\n",
ctx.opcode & handler->inval, opc1(ctx.opcode),
opc2(ctx.opcode), opc3(ctx.opcode),
ctx.opcode, ctx.nip - 4);
} else {
printf("invalid bits: %08x for opcode: "
"%02x -%02x - %02x (%08x) 0x" ADDRX "\n",
ctx.opcode & handler->inval, opc1(ctx.opcode),
opc2(ctx.opcode), opc3(ctx.opcode),
ctx.opcode, ctx.nip - 4);
}
RET_INVAL(ctxp);
break;
}
}
(*(handler->handler))(&ctx);
#if defined(DO_PPC_STATISTICS)
handler->count++;
#endif
/* Check trace mode exceptions */
#if 0 // XXX: buggy on embedded PowerPC
if (unlikely((msr_be && ctx.exception == EXCP_BRANCH) ||
/* Check in single step trace mode
* we need to stop except if:
* - rfi, trap or syscall
* - first instruction of an exception handler
*/
(msr_se && (ctx.nip < 0x100 ||
ctx.nip > 0xF00 ||
(ctx.nip & 0xFC) != 0x04) &&
ctx.exception != EXCP_SYSCALL &&
ctx.exception != EXCP_SYSCALL_USER &&
ctx.exception != EXCP_TRAP))) {
RET_EXCP(ctxp, EXCP_TRACE, 0);
}
#endif
/* if we reach a page boundary or are single stepping, stop
* generation
*/
if (unlikely(((ctx.nip & (TARGET_PAGE_SIZE - 1)) == 0) ||
(env->singlestep_enabled))) {
break;
}
#if defined (DO_SINGLE_STEP)
break;
#endif
}
if (ctx.exception == EXCP_NONE) {
gen_goto_tb(&ctx, 0, ctx.nip);
} else if (ctx.exception != EXCP_BRANCH) {
gen_op_reset_T0();
/* Generate the return instruction */
gen_op_exit_tb();
}
*gen_opc_ptr = INDEX_op_end;
if (unlikely(search_pc)) {
j = gen_opc_ptr - gen_opc_buf;
lj++;
while (lj <= j)
gen_opc_instr_start[lj++] = 0;
} else {
tb->size = ctx.nip - pc_start;
}
#if defined(DEBUG_DISAS)
if (loglevel & CPU_LOG_TB_CPU) {
fprintf(logfile, "---------------- excp: %04x\n", ctx.exception);
cpu_dump_state(env, logfile, fprintf, 0);
}
if (loglevel & CPU_LOG_TB_IN_ASM) {
int flags;
flags = env->bfd_mach;
flags |= msr_le << 16;
fprintf(logfile, "IN: %s\n", lookup_symbol(pc_start));
target_disas(logfile, pc_start, ctx.nip - pc_start, flags);
fprintf(logfile, "\n");
}
if (loglevel & CPU_LOG_TB_OP) {
fprintf(logfile, "OP:\n");
dump_ops(gen_opc_buf, gen_opparam_buf);
fprintf(logfile, "\n");
}
#endif
return 0;
}
| false | qemu | e1833e1f96456fd8fc17463246fe0b2050e68efb |
505 | void *address_space_map(AddressSpace *as,
target_phys_addr_t addr,
target_phys_addr_t *plen,
bool is_write)
{
AddressSpaceDispatch *d = as->dispatch;
target_phys_addr_t len = *plen;
target_phys_addr_t todo = 0;
int l;
target_phys_addr_t page;
MemoryRegionSection *section;
ram_addr_t raddr = RAM_ADDR_MAX;
ram_addr_t rlen;
void *ret;
while (len > 0) {
page = addr & TARGET_PAGE_MASK;
l = (page + TARGET_PAGE_SIZE) - addr;
if (l > len)
l = len;
section = phys_page_find(d, page >> TARGET_PAGE_BITS);
if (!(memory_region_is_ram(section->mr) && !section->readonly)) {
if (todo || bounce.buffer) {
break;
}
bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, TARGET_PAGE_SIZE);
bounce.addr = addr;
bounce.len = l;
if (!is_write) {
address_space_read(as, addr, bounce.buffer, l);
}
*plen = l;
return bounce.buffer;
}
if (!todo) {
raddr = memory_region_get_ram_addr(section->mr)
+ memory_region_section_addr(section, addr);
}
len -= l;
addr += l;
todo += l;
}
rlen = todo;
ret = qemu_ram_ptr_length(raddr, &rlen);
*plen = rlen;
return ret;
}
| false | qemu | a8170e5e97ad17ca169c64ba87ae2f53850dab4c |
506 | GenericList *visit_next_list(Visitor *v, GenericList **list, size_t size)
{
assert(list && size >= sizeof(GenericList));
return v->next_list(v, list, size);
}
| false | qemu | d9f62dde1303286b24ac8ce88be27e2b9b9c5f46 |
507 | static void *vring_map(MemoryRegion **mr, hwaddr phys, hwaddr len,
bool is_write)
{
MemoryRegionSection section = memory_region_find(get_system_memory(), phys, len);
if (!section.mr || int128_get64(section.size) < len) {
goto out;
}
if (is_write && section.readonly) {
goto out;
}
if (!memory_region_is_ram(section.mr)) {
goto out;
}
/* Ignore regions with dirty logging, we cannot mark them dirty */
if (memory_region_is_logging(section.mr)) {
goto out;
}
*mr = section.mr;
return memory_region_get_ram_ptr(section.mr) + section.offset_within_region;
out:
memory_region_unref(section.mr);
*mr = NULL;
return NULL;
}
| false | qemu | 2d1a35bef0ed96b3f23535e459c552414ccdbafd |
508 | uint32_t ldl_be_phys(target_phys_addr_t addr)
{
return ldl_phys_internal(addr, DEVICE_BIG_ENDIAN);
}
| false | qemu | a8170e5e97ad17ca169c64ba87ae2f53850dab4c |
509 | stream_push(StreamSlave *sink, uint8_t *buf, size_t len, uint32_t *app)
{
StreamSlaveClass *k = STREAM_SLAVE_GET_CLASS(sink);
return k->push(sink, buf, len, app);
}
| false | qemu | 42bb9c9178ae7ac4c439172b1ae99cc29188a5c6 |
511 | static int raw_write(BlockDriverState *bs, int64_t sector_num,
const uint8_t *buf, int nb_sectors)
{
return bdrv_write(bs->file, sector_num, buf, nb_sectors);
}
| false | qemu | d8b7e0adf562277180f96ecbd7f1777a384a0308 |
513 | static void tftp_handle_rrq(Slirp *slirp, struct tftp_t *tp, int pktlen)
{
struct tftp_session *spt;
int s, k;
size_t prefix_len;
char *req_fname;
/* check if a session already exists and if so terminate it */
s = tftp_session_find(slirp, tp);
if (s >= 0) {
tftp_session_terminate(&slirp->tftp_sessions[s]);
}
s = tftp_session_allocate(slirp, tp);
if (s < 0) {
return;
}
spt = &slirp->tftp_sessions[s];
/* unspecifed prefix means service disabled */
if (!slirp->tftp_prefix) {
tftp_send_error(spt, 2, "Access violation", tp);
return;
}
/* skip header fields */
k = 0;
pktlen -= ((uint8_t *)&tp->x.tp_buf[0] - (uint8_t *)tp);
/* prepend tftp_prefix */
prefix_len = strlen(slirp->tftp_prefix);
spt->filename = qemu_malloc(prefix_len + TFTP_FILENAME_MAX + 2);
memcpy(spt->filename, slirp->tftp_prefix, prefix_len);
spt->filename[prefix_len] = '/';
/* get name */
req_fname = spt->filename + prefix_len + 1;
while (1) {
if (k >= TFTP_FILENAME_MAX || k >= pktlen) {
tftp_send_error(spt, 2, "Access violation", tp);
return;
}
req_fname[k] = (char)tp->x.tp_buf[k];
if (req_fname[k++] == '\0') {
break;
}
}
/* check mode */
if ((pktlen - k) < 6) {
tftp_send_error(spt, 2, "Access violation", tp);
return;
}
if (memcmp(&tp->x.tp_buf[k], "octet\0", 6) != 0) {
tftp_send_error(spt, 4, "Unsupported transfer mode", tp);
return;
}
k += 6; /* skipping octet */
/* do sanity checks on the filename */
if (!strncmp(req_fname, "../", 3) ||
req_fname[strlen(req_fname) - 1] == '/' ||
strstr(req_fname, "/../")) {
tftp_send_error(spt, 2, "Access violation", tp);
return;
}
/* check if the file exists */
if (tftp_read_data(spt, 0, NULL, 0) < 0) {
tftp_send_error(spt, 1, "File not found", tp);
return;
}
if (tp->x.tp_buf[pktlen - 1] != 0) {
tftp_send_error(spt, 2, "Access violation", tp);
return;
}
while (k < pktlen) {
const char *key, *value;
key = (const char *)&tp->x.tp_buf[k];
k += strlen(key) + 1;
if (k >= pktlen) {
tftp_send_error(spt, 2, "Access violation", tp);
return;
}
value = (const char *)&tp->x.tp_buf[k];
k += strlen(value) + 1;
if (strcmp(key, "tsize") == 0) {
int tsize = atoi(value);
struct stat stat_p;
if (tsize == 0) {
if (stat(spt->filename, &stat_p) == 0)
tsize = stat_p.st_size;
else {
tftp_send_error(spt, 1, "File not found", tp);
return;
}
}
tftp_send_oack(spt, "tsize", tsize, tp);
return;
}
}
tftp_send_data(spt, 1, tp);
}
| false | qemu | facf1a60f29853590073f321e3cba491a5ee097a |
515 | static MegasasCmd *megasas_next_frame(MegasasState *s,
target_phys_addr_t frame)
{
MegasasCmd *cmd = NULL;
int num = 0, index;
cmd = megasas_lookup_frame(s, frame);
if (cmd) {
trace_megasas_qf_found(cmd->index, cmd->pa);
return cmd;
}
index = s->reply_queue_head;
num = 0;
while (num < s->fw_cmds) {
if (!s->frames[index].pa) {
cmd = &s->frames[index];
break;
}
index = megasas_next_index(s, index, s->fw_cmds);
num++;
}
if (!cmd) {
trace_megasas_qf_failed(frame);
}
trace_megasas_qf_new(index, cmd);
return cmd;
}
| false | qemu | a8170e5e97ad17ca169c64ba87ae2f53850dab4c |
516 | static int hnm_decode_frame(AVCodecContext *avctx, void *data,
int *got_frame, AVPacket *avpkt)
{
AVFrame *frame = data;
Hnm4VideoContext *hnm = avctx->priv_data;
int ret;
uint16_t chunk_id;
if (avpkt->size < 8) {
if ((ret = ff_get_buffer(avctx, frame, 0)) < 0)
return ret;
chunk_id = AV_RL16(avpkt->data + 4);
if (chunk_id == HNM4_CHUNK_ID_PL) {
hnm_update_palette(avctx, avpkt->data, avpkt->size);
frame->palette_has_changed = 1;
} else if (chunk_id == HNM4_CHUNK_ID_IZ) {
unpack_intraframe(avctx, avpkt->data + 12, avpkt->size - 12);
memcpy(hnm->previous, hnm->current, hnm->width * hnm->height);
if (hnm->version == 0x4a)
memcpy(hnm->processed, hnm->current, hnm->width * hnm->height);
else
postprocess_current_frame(avctx);
copy_processed_frame(avctx, frame);
frame->pict_type = AV_PICTURE_TYPE_I;
frame->key_frame = 1;
memcpy(frame->data[1], hnm->palette, 256 * 4);
*got_frame = 1;
} else if (chunk_id == HNM4_CHUNK_ID_IU) {
if (hnm->version == 0x4a) {
decode_interframe_v4a(avctx, avpkt->data + 8, avpkt->size - 8);
memcpy(hnm->processed, hnm->current, hnm->width * hnm->height);
} else {
decode_interframe_v4(avctx, avpkt->data + 8, avpkt->size - 8);
postprocess_current_frame(avctx);
copy_processed_frame(avctx, frame);
frame->pict_type = AV_PICTURE_TYPE_P;
frame->key_frame = 0;
memcpy(frame->data[1], hnm->palette, 256 * 4);
*got_frame = 1;
hnm_flip_buffers(hnm);
} else {
av_log(avctx, AV_LOG_ERROR, "invalid chunk id: %d\n", chunk_id);
return avpkt->size;
| true | FFmpeg | 0398b7cbd39abb049775d558ccc4ccf6dc01e92c |
517 | static void test_nesting(void)
{
Coroutine *root;
NestData nd = {
.n_enter = 0,
.n_return = 0,
.max = 128,
};
root = qemu_coroutine_create(nest);
qemu_coroutine_enter(root, &nd);
/* Must enter and return from max nesting level */
g_assert_cmpint(nd.n_enter, ==, nd.max);
g_assert_cmpint(nd.n_return, ==, nd.max);
}
| true | qemu | 0b8b8753e4d94901627b3e86431230f2319215c4 |
518 | static int fdctrl_connect_drives(FDCtrl *fdctrl)
{
unsigned int i;
FDrive *drive;
for (i = 0; i < MAX_FD; i++) {
drive = &fdctrl->drives[i];
drive->fdctrl = fdctrl;
if (drive->bs) {
if (bdrv_get_on_error(drive->bs, 0) != BLOCK_ERR_STOP_ENOSPC) {
error_report("fdc doesn't support drive option werror");
return -1;
}
if (bdrv_get_on_error(drive->bs, 1) != BLOCK_ERR_REPORT) {
error_report("fdc doesn't support drive option rerror");
return -1;
}
}
fd_init(drive);
fd_revalidate(drive);
if (drive->bs) {
bdrv_set_dev_ops(drive->bs, &fdctrl_block_ops, drive);
}
}
return 0;
}
| true | qemu | cfb08fbafcd946341bdf14103293887763802697 |
519 | static int ac3_sync(uint64_t state, AACAC3ParseContext *hdr_info,
int *need_next_header, int *new_frame_start)
{
int err;
union {
uint64_t u64;
uint8_t u8[8];
} tmp = { av_be2ne64(state) };
AC3HeaderInfo hdr, *phdr = &hdr;
GetBitContext gbc;
init_get_bits(&gbc, tmp.u8+8-AC3_HEADER_SIZE, 54);
err = avpriv_ac3_parse_header2(&gbc, &phdr);
if(err < 0)
return 0;
hdr_info->sample_rate = hdr.sample_rate;
hdr_info->bit_rate = hdr.bit_rate;
hdr_info->channels = hdr.channels;
hdr_info->channel_layout = hdr.channel_layout;
hdr_info->samples = hdr.num_blocks * 256;
hdr_info->service_type = hdr.bitstream_mode;
if (hdr.bitstream_mode == 0x7 && hdr.channels > 1)
hdr_info->service_type = AV_AUDIO_SERVICE_TYPE_KARAOKE;
if(hdr.bitstream_id>10)
hdr_info->codec_id = AV_CODEC_ID_EAC3;
else if (hdr_info->codec_id == AV_CODEC_ID_NONE)
hdr_info->codec_id = AV_CODEC_ID_AC3;
*need_next_header = (hdr.frame_type != EAC3_FRAME_TYPE_AC3_CONVERT);
*new_frame_start = (hdr.frame_type != EAC3_FRAME_TYPE_DEPENDENT);
return hdr.frame_size;
}
| true | FFmpeg | fccd85b9f30525f88692f53134eba41f1f2d90db |
520 | static bool use_goto_tb(DisasContext *ctx, target_ulong dest)
{
/* Suppress goto_tb in the case of single-steping and IO. */
if ((ctx->base.tb->cflags & CF_LAST_IO) || ctx->base.singlestep_enabled) {
return false;
}
return true;
}
| true | qemu | c5a49c63fa26e8825ad101dfe86339ae4c216539 |
522 | QEMUFile *qemu_bufopen(const char *mode, QEMUSizedBuffer *input)
{
QEMUBuffer *s;
if (mode == NULL || (mode[0] != 'r' && mode[0] != 'w') ||
mode[1] != '\0') {
error_report("qemu_bufopen: Argument validity check failed");
return NULL;
}
s = g_malloc0(sizeof(QEMUBuffer));
if (mode[0] == 'r') {
s->qsb = input;
}
if (s->qsb == NULL) {
s->qsb = qsb_create(NULL, 0);
}
if (!s->qsb) {
g_free(s);
error_report("qemu_bufopen: qsb_create failed");
return NULL;
}
if (mode[0] == 'r') {
s->file = qemu_fopen_ops(s, &buf_read_ops);
} else {
s->file = qemu_fopen_ops(s, &buf_write_ops);
}
return s->file;
}
| true | qemu | 60fe637bf0e4d7989e21e50f52526444765c63b4 |
523 | static void mips_cpu_class_init(ObjectClass *c, void *data)
{
MIPSCPUClass *mcc = MIPS_CPU_CLASS(c);
CPUClass *cc = CPU_CLASS(c);
DeviceClass *dc = DEVICE_CLASS(c);
mcc->parent_realize = dc->realize;
dc->realize = mips_cpu_realizefn;
mcc->parent_reset = cc->reset;
cc->reset = mips_cpu_reset;
cc->has_work = mips_cpu_has_work;
cc->do_interrupt = mips_cpu_do_interrupt;
cc->cpu_exec_interrupt = mips_cpu_exec_interrupt;
cc->dump_state = mips_cpu_dump_state;
cc->set_pc = mips_cpu_set_pc;
cc->synchronize_from_tb = mips_cpu_synchronize_from_tb;
cc->gdb_read_register = mips_cpu_gdb_read_register;
cc->gdb_write_register = mips_cpu_gdb_write_register;
#ifdef CONFIG_USER_ONLY
cc->handle_mmu_fault = mips_cpu_handle_mmu_fault;
#else
cc->do_unassigned_access = mips_cpu_unassigned_access;
cc->do_unaligned_access = mips_cpu_do_unaligned_access;
cc->get_phys_page_debug = mips_cpu_get_phys_page_debug;
cc->vmsd = &vmstate_mips_cpu;
#endif
cc->disas_set_info = mips_cpu_disas_set_info;
cc->gdb_num_core_regs = 73;
cc->gdb_stop_before_watchpoint = true;
/*
* Reason: mips_cpu_initfn() calls cpu_exec_init(), which saves
* the object in cpus -> dangling pointer after final
* object_unref().
*/
dc->cannot_destroy_with_object_finalize_yet = true;
}
| true | qemu | ce5b1bbf624b977a55ff7f85bb3871682d03baff |
524 | static void check_pointer_type_change(Notifier *notifier, void *data)
{
VncState *vs = container_of(notifier, VncState, mouse_mode_notifier);
int absolute = qemu_input_is_absolute();
if (vnc_has_feature(vs, VNC_FEATURE_POINTER_TYPE_CHANGE) && vs->absolute != absolute) {
vnc_lock_output(vs);
vnc_write_u8(vs, VNC_MSG_SERVER_FRAMEBUFFER_UPDATE);
vnc_write_u8(vs, 0);
vnc_write_u16(vs, 1);
vnc_framebuffer_update(vs, absolute, 0,
surface_width(vs->vd->ds),
surface_height(vs->vd->ds),
VNC_ENCODING_POINTER_TYPE_CHANGE);
vnc_unlock_output(vs);
vnc_flush(vs);
}
vs->absolute = absolute;
}
| true | qemu | bea60dd7679364493a0d7f5b54316c767cf894ef |
525 | static MMSSCPacketType get_tcp_server_response(MMSContext *mms)
{
int read_result;
MMSSCPacketType packet_type= -1;
for(;;) {
if((read_result= url_read_complete(mms->mms_hd, mms->in_buffer, 8))==8) {
// handle command packet.
if(AV_RL32(mms->in_buffer + 4)==0xb00bface) {
mms->incoming_flags= mms->in_buffer[3];
read_result= url_read_complete(mms->mms_hd, mms->in_buffer+8, 4);
if(read_result == 4) {
int length_remaining= AV_RL32(mms->in_buffer+8) + 4;
int hr;
dprintf(NULL, "Length remaining is %d\n", length_remaining);
// read the rest of the packet.
if (length_remaining < 0
|| length_remaining > sizeof(mms->in_buffer) - 12) {
dprintf(NULL, "Incoming message len %d exceeds buffer len %d\n",
length_remaining, sizeof(mms->in_buffer) - 12);
read_result = url_read_complete(mms->mms_hd, mms->in_buffer + 12,
length_remaining) ;
if (read_result == length_remaining) {
packet_type= AV_RL16(mms->in_buffer+36);
} else {
dprintf(NULL, "read for packet type failed%d!\n", read_result);
} else {
dprintf(NULL, "read for length remaining failed%d!\n", read_result);
} else {
int length_remaining;
int packet_id_type;
int tmp;
assert(mms->remaining_in_len==0);
// note we cache the first 8 bytes,
// then fill up the buffer with the others
tmp = AV_RL16(mms->in_buffer + 6);
length_remaining = (tmp - 8) & 0xffff;
mms->incoming_packet_seq = AV_RL32(mms->in_buffer);
packet_id_type = mms->in_buffer[4];
mms->incoming_flags = mms->in_buffer[5];
if (length_remaining < 0
|| length_remaining > sizeof(mms->in_buffer) - 8) {
dprintf(NULL, "Incoming data len %d exceeds buffer len %d\n",
length_remaining, sizeof(mms->in_buffer));
mms->remaining_in_len = length_remaining;
mms->read_in_ptr = mms->in_buffer;
read_result= url_read_complete(mms->mms_hd, mms->in_buffer, length_remaining);
if(read_result != length_remaining) {
dprintf(NULL, "read_bytes result: %d asking for %d\n",
read_result, length_remaining);
} else {
// if we successfully read everything.
if(packet_id_type == mms->header_packet_id) {
packet_type = SC_PKT_ASF_HEADER;
// Store the asf header
if(!mms->header_parsed) {
void *p = av_realloc(mms->asf_header,
mms->asf_header_size
+ mms->remaining_in_len);
if (!p) {
av_freep(&mms->asf_header);
return AVERROR(ENOMEM);
mms->asf_header = p;
memcpy(mms->asf_header + mms->asf_header_size,
mms->read_in_ptr,
mms->remaining_in_len);
mms->asf_header_size += mms->remaining_in_len;
} else if(packet_id_type == mms->packet_id) {
packet_type = SC_PKT_ASF_MEDIA;
} else {
dprintf(NULL, "packet id type %d is old.", packet_id_type);
continue;
// preprocess some packet type
if(packet_type == SC_PKT_KEEPALIVE) {
send_keepalive_packet(mms);
continue;
} else if(packet_type == SC_PKT_STREAM_CHANGING) {
handle_packet_stream_changing_type(mms);
} else if(packet_type == SC_PKT_ASF_MEDIA) {
pad_media_packet(mms);
return packet_type;
} else {
if(read_result<0) {
dprintf(NULL, "Read error (or cancelled) returned %d!\n", read_result);
packet_type = SC_PKT_CANCEL;
} else {
dprintf(NULL, "Read result of zero?!\n");
packet_type = SC_PKT_NO_DATA;
return packet_type;
| true | FFmpeg | 9078eba062f5e0463d4029aa42b7b0026f42bed3 |
527 | get_sigframe(struct emulated_sigaction *ka, CPUX86State *env, size_t frame_size)
{
unsigned long esp;
/* Default to using normal stack */
esp = env->regs[R_ESP];
#if 0
/* This is the X/Open sanctioned signal stack switching. */
if (ka->sa.sa_flags & SA_ONSTACK) {
if (sas_ss_flags(esp) == 0)
esp = current->sas_ss_sp + current->sas_ss_size;
}
/* This is the legacy signal stack switching. */
else if ((regs->xss & 0xffff) != __USER_DS &&
!(ka->sa.sa_flags & SA_RESTORER) &&
ka->sa.sa_restorer) {
esp = (unsigned long) ka->sa.sa_restorer;
}
#endif
return (void *)((esp - frame_size) & -8ul);
}
| true | qemu | a52c757c9f98311c3ba22744d609caa767b899e1 |
528 | static uint32_t add_weights(uint32_t w1, uint32_t w2)
{
uint32_t max = (w1 & 0xFF) > (w2 & 0xFF) ? (w1 & 0xFF) : (w2 & 0xFF);
return ((w1 & 0xFFFFFF00) + (w2 & 0xFFFFFF00)) | (1 + max);
}
| true | FFmpeg | f92f4935acd7d974adfd1deebdf1bb06cbe107ca |
529 | static void decode_rlc_opc(CPUTriCoreState *env, DisasContext *ctx,
uint32_t op1)
{
int32_t const16;
int r1, r2;
const16 = MASK_OP_RLC_CONST16_SEXT(ctx->opcode);
r1 = MASK_OP_RLC_S1(ctx->opcode);
r2 = MASK_OP_RLC_D(ctx->opcode);
switch (op1) {
case OPC1_32_RLC_ADDI:
gen_addi_d(cpu_gpr_d[r2], cpu_gpr_d[r1], const16);
break;
case OPC1_32_RLC_ADDIH:
gen_addi_d(cpu_gpr_d[r2], cpu_gpr_d[r1], const16 << 16);
break;
case OPC1_32_RLC_ADDIH_A:
tcg_gen_addi_tl(cpu_gpr_a[r2], cpu_gpr_a[r1], const16 << 16);
break;
case OPC1_32_RLC_MFCR:
const16 = MASK_OP_RLC_CONST16(ctx->opcode);
gen_mfcr(env, cpu_gpr_d[r2], const16);
break;
case OPC1_32_RLC_MOV:
tcg_gen_movi_tl(cpu_gpr_d[r2], const16);
break;
case OPC1_32_RLC_MOV_64:
if (tricore_feature(env, TRICORE_FEATURE_16)) {
if ((r2 & 0x1) != 0) {
/* TODO: raise OPD trap */
}
tcg_gen_movi_tl(cpu_gpr_d[r2], const16);
tcg_gen_movi_tl(cpu_gpr_d[r2+1], const16 >> 15);
} else {
/* TODO: raise illegal opcode trap */
}
break;
case OPC1_32_RLC_MOV_U:
const16 = MASK_OP_RLC_CONST16(ctx->opcode);
tcg_gen_movi_tl(cpu_gpr_d[r2], const16);
break;
case OPC1_32_RLC_MOV_H:
tcg_gen_movi_tl(cpu_gpr_d[r2], const16 << 16);
break;
case OPC1_32_RLC_MOVH_A:
tcg_gen_movi_tl(cpu_gpr_a[r2], const16 << 16);
break;
case OPC1_32_RLC_MTCR:
const16 = MASK_OP_RLC_CONST16(ctx->opcode);
gen_mtcr(env, ctx, cpu_gpr_d[r1], const16);
break;
}
}
| true | qemu | f678f671ba654d4610f0e43d175c8c1b2fad10df |
530 | static bool ga_open_pidfile(const char *pidfile)
{
int pidfd;
char pidstr[32];
pidfd = open(pidfile, O_CREAT|O_WRONLY, S_IRUSR|S_IWUSR);
if (pidfd == -1 || lockf(pidfd, F_TLOCK, 0)) {
g_critical("Cannot lock pid file, %s", strerror(errno));
if (pidfd != -1) {
close(pidfd);
}
return false;
}
if (ftruncate(pidfd, 0) || lseek(pidfd, 0, SEEK_SET)) {
g_critical("Failed to truncate pid file");
goto fail;
}
snprintf(pidstr, sizeof(pidstr), "%d\n", getpid());
if (write(pidfd, pidstr, strlen(pidstr)) != strlen(pidstr)) {
g_critical("Failed to write pid file");
goto fail;
}
return true;
fail:
unlink(pidfile);
return false;
}
| true | qemu | 6ffacc5d3ddf2e3227aae2a8cc5c15627265f727 |
531 | static void video_image_display(VideoState *is)
{
Frame *vp;
Frame *sp = NULL;
SDL_Rect rect;
vp = frame_queue_peek_last(&is->pictq);
if (vp->bmp) {
if (is->subtitle_st) {
if (frame_queue_nb_remaining(&is->subpq) > 0) {
sp = frame_queue_peek(&is->subpq);
if (vp->pts >= sp->pts + ((float) sp->sub.start_display_time / 1000)) {
if (!sp->uploaded) {
uint8_t *pixels;
int pitch;
int i;
if (!sp->width || !sp->height) {
sp->width = vp->width;
sp->height = vp->height;
}
if (realloc_texture(&is->sub_texture, SDL_PIXELFORMAT_ARGB8888, sp->width, sp->height, SDL_BLENDMODE_BLEND, 1) < 0)
return;
for (i = 0; i < sp->sub.num_rects; i++) {
AVSubtitleRect *sub_rect = sp->sub.rects[i];
sub_rect->x = av_clip(sub_rect->x, 0, sp->width );
sub_rect->y = av_clip(sub_rect->y, 0, sp->height);
sub_rect->w = av_clip(sub_rect->w, 0, sp->width - sub_rect->x);
sub_rect->h = av_clip(sub_rect->h, 0, sp->height - sub_rect->y);
is->sub_convert_ctx = sws_getCachedContext(is->sub_convert_ctx,
sub_rect->w, sub_rect->h, AV_PIX_FMT_PAL8,
sub_rect->w, sub_rect->h, AV_PIX_FMT_BGRA,
0, NULL, NULL, NULL);
if (!is->sub_convert_ctx) {
av_log(NULL, AV_LOG_FATAL, "Cannot initialize the conversion context\n");
return;
}
if (!SDL_LockTexture(is->sub_texture, (SDL_Rect *)sub_rect, (void **)&pixels, &pitch)) {
sws_scale(is->sub_convert_ctx, (const uint8_t * const *)sub_rect->data, sub_rect->linesize,
0, sub_rect->h, &pixels, &pitch);
SDL_UnlockTexture(is->sub_texture);
}
}
sp->uploaded = 1;
}
} else
sp = NULL;
}
}
calculate_display_rect(&rect, is->xleft, is->ytop, is->width, is->height, vp->width, vp->height, vp->sar);
if (!vp->uploaded) {
if (upload_texture(vp->bmp, vp->frame, &is->img_convert_ctx) < 0)
return;
vp->uploaded = 1;
vp->flip_v = vp->frame->linesize[0] < 0;
}
SDL_RenderCopyEx(renderer, vp->bmp, NULL, &rect, 0, NULL, vp->flip_v ? SDL_FLIP_VERTICAL : 0);
if (sp) {
#if USE_ONEPASS_SUBTITLE_RENDER
SDL_RenderCopy(renderer, is->sub_texture, NULL, &rect);
#else
int i;
double xratio = (double)rect.w / (double)sp->width;
double yratio = (double)rect.h / (double)sp->height;
for (i = 0; i < sp->sub.num_rects; i++) {
SDL_Rect *sub_rect = (SDL_Rect*)sp->sub.rects[i];
SDL_Rect target = {.x = rect.x + sub_rect->x * xratio,
.y = rect.y + sub_rect->y * yratio,
.w = sub_rect->w * xratio,
.h = sub_rect->h * yratio};
SDL_RenderCopy(renderer, is->sub_texture, sub_rect, &target);
}
#endif
}
}
}
| true | FFmpeg | 1f3910262e1b9091f597ebbb710b478d40319986 |
532 | static int rtmp_server_handshake(URLContext *s, RTMPContext *rt)
{
uint8_t buffer[RTMP_HANDSHAKE_PACKET_SIZE];
uint32_t hs_epoch;
uint32_t hs_my_epoch;
uint8_t hs_c1[RTMP_HANDSHAKE_PACKET_SIZE];
uint8_t hs_s1[RTMP_HANDSHAKE_PACKET_SIZE];
uint32_t zeroes;
uint32_t temp = 0;
int randomidx = 0;
int inoutsize = 0;
int ret;
inoutsize = ffurl_read_complete(rt->stream, buffer, 1); // Receive C0
if (inoutsize <= 0) {
av_log(s, AV_LOG_ERROR, "Unable to read handshake\n");
return AVERROR(EIO);
}
// Check Version
if (buffer[0] != 3) {
av_log(s, AV_LOG_ERROR, "RTMP protocol version mismatch\n");
return AVERROR(EIO);
}
if (ffurl_write(rt->stream, buffer, 1) <= 0) { // Send S0
av_log(s, AV_LOG_ERROR,
"Unable to write answer - RTMP S0\n");
return AVERROR(EIO);
}
/* Receive C1 */
ret = rtmp_receive_hs_packet(rt, &hs_epoch, &zeroes, hs_c1,
RTMP_HANDSHAKE_PACKET_SIZE);
if (ret) {
av_log(s, AV_LOG_ERROR, "RTMP Handshake C1 Error\n");
return ret;
}
if (zeroes)
av_log(s, AV_LOG_WARNING, "Erroneous C1 Message zero != 0\n");
/* Send S1 */
/* By now same epoch will be sent */
hs_my_epoch = hs_epoch;
/* Generate random */
for (randomidx = 0; randomidx < (RTMP_HANDSHAKE_PACKET_SIZE);
randomidx += 4)
AV_WB32(hs_s1 + 8 + randomidx, av_get_random_seed());
ret = rtmp_send_hs_packet(rt, hs_my_epoch, 0, hs_s1,
RTMP_HANDSHAKE_PACKET_SIZE);
if (ret) {
av_log(s, AV_LOG_ERROR, "RTMP Handshake S1 Error\n");
return ret;
}
/* Send S2 */
ret = rtmp_send_hs_packet(rt, hs_epoch, 0, hs_c1,
RTMP_HANDSHAKE_PACKET_SIZE);
if (ret) {
av_log(s, AV_LOG_ERROR, "RTMP Handshake S2 Error\n");
return ret;
}
/* Receive C2 */
ret = rtmp_receive_hs_packet(rt, &temp, &zeroes, buffer,
RTMP_HANDSHAKE_PACKET_SIZE);
if (ret) {
av_log(s, AV_LOG_ERROR, "RTMP Handshake C2 Error\n");
return ret;
}
if (temp != hs_my_epoch)
av_log(s, AV_LOG_WARNING,
"Erroneous C2 Message epoch does not match up with C1 epoch\n");
if (memcmp(buffer + 8, hs_s1 + 8,
RTMP_HANDSHAKE_PACKET_SIZE - 8))
av_log(s, AV_LOG_WARNING,
"Erroneous C2 Message random does not match up\n");
return 0;
}
| false | FFmpeg | 5a75924dfd432c0ada79a9f489889dc92d53b481 |
533 | int hw_device_setup_for_encode(OutputStream *ost)
{
enum AVHWDeviceType type;
HWDevice *dev;
type = hw_device_match_type_in_name(ost->enc->name);
if (type != AV_HWDEVICE_TYPE_NONE) {
dev = hw_device_get_by_type(type);
if (!dev) {
av_log(ost->enc_ctx, AV_LOG_WARNING, "No device available "
"for encoder (device type %s for codec %s).\n",
av_hwdevice_get_type_name(type), ost->enc->name);
return 0;
}
ost->enc_ctx->hw_device_ctx = av_buffer_ref(dev->device_ref);
if (!ost->enc_ctx->hw_device_ctx)
return AVERROR(ENOMEM);
return 0;
} else {
// No device required.
return 0;
}
}
| false | FFmpeg | b0cd14fb1dab4b044f7fe6b53ac635409849de77 |
535 | e1000e_cleanup_msix(E1000EState *s)
{
if (msix_enabled(PCI_DEVICE(s))) {
e1000e_unuse_msix_vectors(s, E1000E_MSIX_VEC_NUM);
msix_uninit(PCI_DEVICE(s), &s->msix, &s->msix);
}
}
| true | qemu | 7ec7ae4b973d1471f6f39fc2b6481f69c2b39593 |
537 | ASSStyle *ff_ass_style_get(ASSSplitContext *ctx, const char *style)
{
ASS *ass = &ctx->ass;
int i;
if (!style || !*style)
style = "Default";
for (i=0; i<ass->styles_count; i++)
if (!strcmp(ass->styles[i].name, style))
return ass->styles + i;
return NULL;
}
| true | FFmpeg | 158f0545d81b2aca1c936490f80d13988616910e |
538 | static void vnc_dpy_copy(DisplayChangeListener *dcl,
int src_x, int src_y,
int dst_x, int dst_y, int w, int h)
{
VncDisplay *vd = container_of(dcl, VncDisplay, dcl);
VncState *vs, *vn;
uint8_t *src_row;
uint8_t *dst_row;
int i, x, y, pitch, inc, w_lim, s;
int cmp_bytes;
vnc_refresh_server_surface(vd);
QTAILQ_FOREACH_SAFE(vs, &vd->clients, next, vn) {
if (vnc_has_feature(vs, VNC_FEATURE_COPYRECT)) {
vs->force_update = 1;
vnc_update_client(vs, 1, true);
/* vs might be free()ed here */
/* do bitblit op on the local surface too */
pitch = vnc_server_fb_stride(vd);
src_row = vnc_server_fb_ptr(vd, src_x, src_y);
dst_row = vnc_server_fb_ptr(vd, dst_x, dst_y);
y = dst_y;
inc = 1;
if (dst_y > src_y) {
/* copy backwards */
src_row += pitch * (h-1);
dst_row += pitch * (h-1);
pitch = -pitch;
y = dst_y + h - 1;
inc = -1;
w_lim = w - (VNC_DIRTY_PIXELS_PER_BIT - (dst_x % VNC_DIRTY_PIXELS_PER_BIT));
if (w_lim < 0) {
w_lim = w;
} else {
w_lim = w - (w_lim % VNC_DIRTY_PIXELS_PER_BIT);
for (i = 0; i < h; i++) {
for (x = 0; x <= w_lim;
x += s, src_row += cmp_bytes, dst_row += cmp_bytes) {
if (x == w_lim) {
if ((s = w - w_lim) == 0)
break;
} else if (!x) {
s = (VNC_DIRTY_PIXELS_PER_BIT -
(dst_x % VNC_DIRTY_PIXELS_PER_BIT));
s = MIN(s, w_lim);
} else {
s = VNC_DIRTY_PIXELS_PER_BIT;
cmp_bytes = s * VNC_SERVER_FB_BYTES;
if (memcmp(src_row, dst_row, cmp_bytes) == 0)
continue;
memmove(dst_row, src_row, cmp_bytes);
QTAILQ_FOREACH(vs, &vd->clients, next) {
if (!vnc_has_feature(vs, VNC_FEATURE_COPYRECT)) {
set_bit(((x + dst_x) / VNC_DIRTY_PIXELS_PER_BIT),
vs->dirty[y]);
src_row += pitch - w * VNC_SERVER_FB_BYTES;
dst_row += pitch - w * VNC_SERVER_FB_BYTES;
y += inc;
QTAILQ_FOREACH(vs, &vd->clients, next) {
if (vnc_has_feature(vs, VNC_FEATURE_COPYRECT)) {
vnc_copy(vs, src_x, src_y, dst_x, dst_y, w, h); | true | qemu | 7fe4a41c262e2529dc79f77f6fe63c5309fa2fd9 |
540 | static inline void RENAME(yuv2yuvX)(SwsContext *c, int16_t *lumFilter, int16_t **lumSrc, int lumFilterSize,
int16_t *chrFilter, int16_t **chrSrc, int chrFilterSize,
uint8_t *dest, uint8_t *uDest, uint8_t *vDest, long dstW, long chrDstW)
{
#ifdef HAVE_MMX
if(c->flags & SWS_ACCURATE_RND){
if(uDest){
YSCALEYUV2YV12X_ACCURATE( 0, CHR_MMX_FILTER_OFFSET, uDest, chrDstW)
YSCALEYUV2YV12X_ACCURATE(4096, CHR_MMX_FILTER_OFFSET, vDest, chrDstW)
}
YSCALEYUV2YV12X_ACCURATE(0, LUM_MMX_FILTER_OFFSET, dest, dstW)
}else{
if(uDest){
YSCALEYUV2YV12X( 0, CHR_MMX_FILTER_OFFSET, uDest, chrDstW)
YSCALEYUV2YV12X(4096, CHR_MMX_FILTER_OFFSET, vDest, chrDstW)
}
YSCALEYUV2YV12X(0, LUM_MMX_FILTER_OFFSET, dest, dstW)
}
#else
#ifdef HAVE_ALTIVEC
yuv2yuvX_altivec_real(lumFilter, lumSrc, lumFilterSize,
chrFilter, chrSrc, chrFilterSize,
dest, uDest, vDest, dstW, chrDstW);
#else //HAVE_ALTIVEC
yuv2yuvXinC(lumFilter, lumSrc, lumFilterSize,
chrFilter, chrSrc, chrFilterSize,
dest, uDest, vDest, dstW, chrDstW);
#endif //!HAVE_ALTIVEC
#endif
}
| true | FFmpeg | 2da0d70d5eebe42f9fcd27ee554419ebe2a5da06 |
541 | static int vorbis_residue_decode(vorbis_context *vc, vorbis_residue *vr, uint_fast8_t ch, uint_fast8_t *do_not_decode, float *vec, uint_fast16_t vlen) {
GetBitContext *gb=&vc->gb;
uint_fast8_t c_p_c=vc->codebooks[vr->classbook].dimensions;
uint_fast16_t n_to_read=vr->end-vr->begin;
uint_fast16_t ptns_to_read=n_to_read/vr->partition_size;
uint_fast8_t classifs[ptns_to_read*vc->audio_channels];
uint_fast8_t pass;
uint_fast8_t ch_used;
uint_fast8_t i,j,l;
uint_fast16_t k;
if (vr->type==2) {
for(j=1;j<ch;++j) {
do_not_decode[0]&=do_not_decode[j]; // FIXME - clobbering input
}
if (do_not_decode[0]) return 0;
ch_used=1;
} else {
ch_used=ch;
}
AV_DEBUG(" residue type 0/1/2 decode begin, ch: %d cpc %d \n", ch, c_p_c);
for(pass=0;pass<=vr->maxpass;++pass) { // FIXME OPTIMIZE?
uint_fast16_t voffset;
uint_fast16_t partition_count;
uint_fast16_t j_times_ptns_to_read;
voffset=vr->begin;
for(partition_count=0;partition_count<ptns_to_read;) { // SPEC error
if (!pass) {
uint_fast32_t inverse_class = ff_inverse[vr->classifications];
for(j_times_ptns_to_read=0, j=0;j<ch_used;++j) {
if (!do_not_decode[j]) {
uint_fast32_t temp=get_vlc2(gb, vc->codebooks[vr->classbook].vlc.table,
vc->codebooks[vr->classbook].nb_bits, 3);
AV_DEBUG("Classword: %d \n", temp);
assert(vr->classifications > 1 && temp<=65536); //needed for inverse[]
for(i=0;i<c_p_c;++i) {
uint_fast32_t temp2;
temp2=(((uint_fast64_t)temp) * inverse_class)>>32;
if (partition_count+c_p_c-1-i < ptns_to_read) {
classifs[j_times_ptns_to_read+partition_count+c_p_c-1-i]=temp-temp2*vr->classifications;
}
temp=temp2;
}
}
j_times_ptns_to_read+=ptns_to_read;
}
}
for(i=0;(i<c_p_c) && (partition_count<ptns_to_read);++i) {
for(j_times_ptns_to_read=0, j=0;j<ch_used;++j) {
uint_fast16_t voffs;
if (!do_not_decode[j]) {
uint_fast8_t vqclass=classifs[j_times_ptns_to_read+partition_count];
int_fast16_t vqbook=vr->books[vqclass][pass];
if (vqbook>=0) {
uint_fast16_t coffs;
unsigned dim= vc->codebooks[vqbook].dimensions; // not uint_fast8_t: 64bit is slower here on amd64
uint_fast16_t step= dim==1 ? vr->partition_size
: FASTDIV(vr->partition_size, dim);
vorbis_codebook codebook= vc->codebooks[vqbook];
if (vr->type==0) {
voffs=voffset+j*vlen;
for(k=0;k<step;++k) {
coffs=get_vlc2(gb, codebook.vlc.table, codebook.nb_bits, 3) * dim;
for(l=0;l<dim;++l) {
vec[voffs+k+l*step]+=codebook.codevectors[coffs+l]; // FPMATH
}
}
}
else if (vr->type==1) {
voffs=voffset+j*vlen;
for(k=0;k<step;++k) {
coffs=get_vlc2(gb, codebook.vlc.table, codebook.nb_bits, 3) * dim;
for(l=0;l<dim;++l, ++voffs) {
vec[voffs]+=codebook.codevectors[coffs+l]; // FPMATH
AV_DEBUG(" pass %d offs: %d curr: %f change: %f cv offs.: %d \n", pass, voffs, vec[voffs], codebook.codevectors[coffs+l], coffs);
}
}
}
else if (vr->type==2 && ch==2 && (voffset&1)==0 && (dim&1)==0) { // most frequent case optimized
voffs=voffset>>1;
if(dim==2) {
for(k=0;k<step;++k) {
coffs=get_vlc2(gb, codebook.vlc.table, codebook.nb_bits, 3) * 2;
vec[voffs+k ]+=codebook.codevectors[coffs ]; // FPMATH
vec[voffs+k+vlen]+=codebook.codevectors[coffs+1]; // FPMATH
}
} else
for(k=0;k<step;++k) {
coffs=get_vlc2(gb, codebook.vlc.table, codebook.nb_bits, 3) * dim;
for(l=0;l<dim;l+=2, voffs++) {
vec[voffs ]+=codebook.codevectors[coffs+l ]; // FPMATH
vec[voffs+vlen]+=codebook.codevectors[coffs+l+1]; // FPMATH
AV_DEBUG(" pass %d offs: %d curr: %f change: %f cv offs.: %d+%d \n", pass, voffset/ch+(voffs%ch)*vlen, vec[voffset/ch+(voffs%ch)*vlen], codebook.codevectors[coffs+l], coffs, l);
}
}
}
else if (vr->type==2) {
voffs=voffset;
for(k=0;k<step;++k) {
coffs=get_vlc2(gb, codebook.vlc.table, codebook.nb_bits, 3) * dim;
for(l=0;l<dim;++l, ++voffs) {
vec[voffs/ch+(voffs%ch)*vlen]+=codebook.codevectors[coffs+l]; // FPMATH FIXME use if and counter instead of / and %
AV_DEBUG(" pass %d offs: %d curr: %f change: %f cv offs.: %d+%d \n", pass, voffset/ch+(voffs%ch)*vlen, vec[voffset/ch+(voffs%ch)*vlen], codebook.codevectors[coffs+l], coffs, l);
}
}
} else {
av_log(vc->avccontext, AV_LOG_ERROR, " Invalid residue type while residue decode?! \n");
return 1;
}
}
}
j_times_ptns_to_read+=ptns_to_read;
}
++partition_count;
voffset+=vr->partition_size;
}
}
}
return 0;
}
| true | FFmpeg | 975741e79cedc6033e5b02319792534a3a42c4ae |
542 | static CharDriverState *qmp_chardev_open_parallel(ChardevHostdev *parallel,
Error **errp)
{
#ifdef HAVE_CHARDEV_PARPORT
int fd;
fd = qmp_chardev_open_file_source(parallel->device, O_RDWR, errp);
if (error_is_set(errp)) {
return NULL;
}
return qemu_chr_open_pp_fd(fd);
#else
error_setg(errp, "character device backend type 'parallel' not supported");
return NULL;
#endif
}
| true | qemu | 5f758366c0710d23e43f4d0f83816b98616a13d0 |
543 | void cpu_exit(CPUArchState *env)
{
CPUState *cpu = ENV_GET_CPU(env);
cpu->exit_request = 1;
cpu_unlink_tb(cpu);
}
| true | qemu | 378df4b23753a11be650af7664ca76bc75cb9f01 |
544 | static int hdcd_envelope(int32_t *samples, int count, int stride, int gain, int target_gain, int extend)
{
int i;
int32_t *samples_end = samples + stride * count;
if (extend) {
for (i = 0; i < count; i++) {
int32_t sample = samples[i * stride];
int32_t asample = abs(sample) - 0x5981;
if (asample >= 0)
sample = sample >= 0 ? peaktab[asample] : -peaktab[asample];
else
sample <<= 15;
samples[i * stride] = sample;
}
} else {
for (i = 0; i < count; i++)
samples[i * stride] <<= 15;
}
if (gain <= target_gain) {
int len = FFMIN(count, target_gain - gain);
/* attenuate slowly */
for (i = 0; i < len; i++) {
++gain;
APPLY_GAIN(*samples, gain);
samples += stride;
}
count -= len;
} else {
int len = FFMIN(count, (gain - target_gain) >> 3);
/* amplify quickly */
for (i = 0; i < len; i++) {
gain -= 8;
APPLY_GAIN(*samples, gain);
samples += stride;
}
if (gain - 8 < target_gain)
gain = target_gain;
count -= len;
}
/* hold a steady level */
if (gain == 0) {
if (count > 0)
samples += count * stride;
} else {
while (--count >= 0) {
APPLY_GAIN(*samples, gain);
samples += stride;
}
}
av_assert0(samples == samples_end);
return gain;
}
| false | FFmpeg | 0e0f8859ba0af33e1145a4c4022e964011e2d75b |
545 | static QObject *parse_array(JSONParserContext *ctxt, va_list *ap)
{
QList *list = NULL;
QObject *token, *peek;
token = parser_context_pop_token(ctxt);
assert(token && token_get_type(token) == JSON_LSQUARE);
list = qlist_new();
peek = parser_context_peek_token(ctxt);
if (peek == NULL) {
parse_error(ctxt, NULL, "premature EOI");
goto out;
}
if (token_get_type(peek) != JSON_RSQUARE) {
QObject *obj;
obj = parse_value(ctxt, ap);
if (obj == NULL) {
parse_error(ctxt, token, "expecting value");
goto out;
}
qlist_append_obj(list, obj);
token = parser_context_pop_token(ctxt);
if (token == NULL) {
parse_error(ctxt, NULL, "premature EOI");
goto out;
}
while (token_get_type(token) != JSON_RSQUARE) {
if (token_get_type(token) != JSON_COMMA) {
parse_error(ctxt, token, "expected separator in list");
goto out;
}
obj = parse_value(ctxt, ap);
if (obj == NULL) {
parse_error(ctxt, token, "expecting value");
goto out;
}
qlist_append_obj(list, obj);
token = parser_context_pop_token(ctxt);
if (token == NULL) {
parse_error(ctxt, NULL, "premature EOI");
goto out;
}
}
} else {
(void)parser_context_pop_token(ctxt);
}
return QOBJECT(list);
out:
QDECREF(list);
return NULL;
}
| false | qemu | 9bada8971173345ceb37ed1a47b00a01a4dd48cf |
546 | static void raw_aio_unplug(BlockDriverState *bs)
{
#ifdef CONFIG_LINUX_AIO
BDRVRawState *s = bs->opaque;
if (s->use_aio) {
laio_io_unplug(bs, s->aio_ctx, true);
}
#endif
}
| false | qemu | 6b98bd649520d07df4d1b7a0a54ac73bf178519c |
548 | void hmp_info_tpm(Monitor *mon, const QDict *qdict)
{
TPMInfoList *info_list, *info;
Error *err = NULL;
unsigned int c = 0;
TPMPassthroughOptions *tpo;
info_list = qmp_query_tpm(&err);
if (err) {
monitor_printf(mon, "TPM device not supported\n");
error_free(err);
return;
}
if (info_list) {
monitor_printf(mon, "TPM device:\n");
}
for (info = info_list; info; info = info->next) {
TPMInfo *ti = info->value;
monitor_printf(mon, " tpm%d: model=%s\n",
c, TpmModel_lookup[ti->model]);
monitor_printf(mon, " \\ %s: type=%s",
ti->id, TpmTypeOptionsKind_lookup[ti->options->kind]);
switch (ti->options->kind) {
case TPM_TYPE_OPTIONS_KIND_PASSTHROUGH:
tpo = ti->options->passthrough;
monitor_printf(mon, "%s%s%s%s",
tpo->has_path ? ",path=" : "",
tpo->has_path ? tpo->path : "",
tpo->has_cancel_path ? ",cancel-path=" : "",
tpo->has_cancel_path ? tpo->cancel_path : "");
break;
case TPM_TYPE_OPTIONS_KIND_MAX:
break;
}
monitor_printf(mon, "\n");
c++;
}
qapi_free_TPMInfoList(info_list);
}
| false | qemu | ce21131a0b9e556bb73bf65eacdc07ccb21f78a9 |
549 | static void tap_set_sndbuf(TAPState *s, int sndbuf, Monitor *mon)
{
#ifdef TUNSETSNDBUF
if (ioctl(s->fd, TUNSETSNDBUF, &sndbuf) == -1) {
config_error(mon, "TUNSETSNDBUF ioctl failed: %s\n",
strerror(errno));
}
#else
config_error(mon, "No '-net tap,sndbuf=<nbytes>' support available\n");
#endif
}
| false | qemu | fc5b81d1f6df7342f0963120b2cf3e919d6fc08a |
550 | ssize_t pcnet_receive(VLANClientState *nc, const uint8_t *buf, size_t size_)
{
PCNetState *s = DO_UPCAST(NICState, nc, nc)->opaque;
int is_padr = 0, is_bcast = 0, is_ladr = 0;
uint8_t buf1[60];
int remaining;
int crc_err = 0;
int size = size_;
if (CSR_DRX(s) || CSR_STOP(s) || CSR_SPND(s) || !size)
return -1;
#ifdef PCNET_DEBUG
printf("pcnet_receive size=%d\n", size);
#endif
/* if too small buffer, then expand it */
if (size < MIN_BUF_SIZE) {
memcpy(buf1, buf, size);
memset(buf1 + size, 0, MIN_BUF_SIZE - size);
buf = buf1;
size = MIN_BUF_SIZE;
}
if (CSR_PROM(s)
|| (is_padr=padr_match(s, buf, size))
|| (is_bcast=padr_bcast(s, buf, size))
|| (is_ladr=ladr_match(s, buf, size))) {
pcnet_rdte_poll(s);
if (!(CSR_CRST(s) & 0x8000) && s->rdra) {
struct pcnet_RMD rmd;
int rcvrc = CSR_RCVRC(s)-1,i;
target_phys_addr_t nrda;
for (i = CSR_RCVRL(s)-1; i > 0; i--, rcvrc--) {
if (rcvrc <= 1)
rcvrc = CSR_RCVRL(s);
nrda = s->rdra +
(CSR_RCVRL(s) - rcvrc) *
(BCR_SWSTYLE(s) ? 16 : 8 );
RMDLOAD(&rmd, nrda);
if (GET_FIELD(rmd.status, RMDS, OWN)) {
#ifdef PCNET_DEBUG_RMD
printf("pcnet - scan buffer: RCVRC=%d PREV_RCVRC=%d\n",
rcvrc, CSR_RCVRC(s));
#endif
CSR_RCVRC(s) = rcvrc;
pcnet_rdte_poll(s);
break;
}
}
}
if (!(CSR_CRST(s) & 0x8000)) {
#ifdef PCNET_DEBUG_RMD
printf("pcnet - no buffer: RCVRC=%d\n", CSR_RCVRC(s));
#endif
s->csr[0] |= 0x1000; /* Set MISS flag */
CSR_MISSC(s)++;
} else {
uint8_t *src = s->buffer;
target_phys_addr_t crda = CSR_CRDA(s);
struct pcnet_RMD rmd;
int pktcount = 0;
if (!s->looptest) {
memcpy(src, buf, size);
/* no need to compute the CRC */
src[size] = 0;
src[size + 1] = 0;
src[size + 2] = 0;
src[size + 3] = 0;
size += 4;
} else if (s->looptest == PCNET_LOOPTEST_CRC ||
!CSR_DXMTFCS(s) || size < MIN_BUF_SIZE+4) {
uint32_t fcs = ~0;
uint8_t *p = src;
while (p != &src[size])
CRC(fcs, *p++);
*(uint32_t *)p = htonl(fcs);
size += 4;
} else {
uint32_t fcs = ~0;
uint8_t *p = src;
while (p != &src[size-4])
CRC(fcs, *p++);
crc_err = (*(uint32_t *)p != htonl(fcs));
}
#ifdef PCNET_DEBUG_MATCH
PRINT_PKTHDR(buf);
#endif
RMDLOAD(&rmd, PHYSADDR(s,crda));
/*if (!CSR_LAPPEN(s))*/
SET_FIELD(&rmd.status, RMDS, STP, 1);
#define PCNET_RECV_STORE() do { \
int count = MIN(4096 - GET_FIELD(rmd.buf_length, RMDL, BCNT),remaining); \
target_phys_addr_t rbadr = PHYSADDR(s, rmd.rbadr); \
s->phys_mem_write(s->dma_opaque, rbadr, src, count, CSR_BSWP(s)); \
src += count; remaining -= count; \
SET_FIELD(&rmd.status, RMDS, OWN, 0); \
RMDSTORE(&rmd, PHYSADDR(s,crda)); \
pktcount++; \
} while (0)
remaining = size;
PCNET_RECV_STORE();
if ((remaining > 0) && CSR_NRDA(s)) {
target_phys_addr_t nrda = CSR_NRDA(s);
#ifdef PCNET_DEBUG_RMD
PRINT_RMD(&rmd);
#endif
RMDLOAD(&rmd, PHYSADDR(s,nrda));
if (GET_FIELD(rmd.status, RMDS, OWN)) {
crda = nrda;
PCNET_RECV_STORE();
#ifdef PCNET_DEBUG_RMD
PRINT_RMD(&rmd);
#endif
if ((remaining > 0) && (nrda=CSR_NNRD(s))) {
RMDLOAD(&rmd, PHYSADDR(s,nrda));
if (GET_FIELD(rmd.status, RMDS, OWN)) {
crda = nrda;
PCNET_RECV_STORE();
}
}
}
}
#undef PCNET_RECV_STORE
RMDLOAD(&rmd, PHYSADDR(s,crda));
if (remaining == 0) {
SET_FIELD(&rmd.msg_length, RMDM, MCNT, size);
SET_FIELD(&rmd.status, RMDS, ENP, 1);
SET_FIELD(&rmd.status, RMDS, PAM, !CSR_PROM(s) && is_padr);
SET_FIELD(&rmd.status, RMDS, LFAM, !CSR_PROM(s) && is_ladr);
SET_FIELD(&rmd.status, RMDS, BAM, !CSR_PROM(s) && is_bcast);
if (crc_err) {
SET_FIELD(&rmd.status, RMDS, CRC, 1);
SET_FIELD(&rmd.status, RMDS, ERR, 1);
}
} else {
SET_FIELD(&rmd.status, RMDS, OFLO, 1);
SET_FIELD(&rmd.status, RMDS, BUFF, 1);
SET_FIELD(&rmd.status, RMDS, ERR, 1);
}
RMDSTORE(&rmd, PHYSADDR(s,crda));
s->csr[0] |= 0x0400;
#ifdef PCNET_DEBUG
printf("RCVRC=%d CRDA=0x%08x BLKS=%d\n",
CSR_RCVRC(s), PHYSADDR(s,CSR_CRDA(s)), pktcount);
#endif
#ifdef PCNET_DEBUG_RMD
PRINT_RMD(&rmd);
#endif
while (pktcount--) {
if (CSR_RCVRC(s) <= 1)
CSR_RCVRC(s) = CSR_RCVRL(s);
else
CSR_RCVRC(s)--;
}
pcnet_rdte_poll(s);
}
}
pcnet_poll(s);
pcnet_update_irq(s);
return size_;
}
| false | qemu | c1ded3dc9f2d6caeb62eb3005510837a62b795d2 |
551 | static void gen_stda_asi(DisasContext *dc, TCGv hi, TCGv addr,
int insn, int rd)
{
TCGv_i32 r_asi, r_size;
TCGv lo = gen_load_gpr(dc, rd + 1);
TCGv_i64 t64 = tcg_temp_new_i64();
tcg_gen_concat_tl_i64(t64, lo, hi);
r_asi = gen_get_asi(dc, insn);
r_size = tcg_const_i32(8);
gen_helper_st_asi(cpu_env, addr, t64, r_asi, r_size);
tcg_temp_free_i32(r_size);
tcg_temp_free_i32(r_asi);
tcg_temp_free_i64(t64);
}
| false | qemu | 7ec1e5ea4bd0700fa48da86bffa2fcc6146c410a |
552 | void s390_pci_iommu_enable(S390PCIBusDevice *pbdev)
{
memory_region_init_iommu(&pbdev->iommu_mr, OBJECT(&pbdev->mr),
&s390_iommu_ops, "iommu-s390", pbdev->pal + 1);
memory_region_add_subregion(&pbdev->mr, 0, &pbdev->iommu_mr);
pbdev->iommu_enabled = true;
}
| false | qemu | 67d5cd9722b230027d3d4267ae6069c5d8a65463 |
553 | void vnc_disconnect_finish(VncState *vs)
{
int i;
vnc_jobs_join(vs); /* Wait encoding jobs */
vnc_lock_output(vs);
vnc_qmp_event(vs, QAPI_EVENT_VNC_DISCONNECTED);
buffer_free(&vs->input);
buffer_free(&vs->output);
#ifdef CONFIG_VNC_WS
buffer_free(&vs->ws_input);
buffer_free(&vs->ws_output);
#endif /* CONFIG_VNC_WS */
qapi_free_VncClientInfo(vs->info);
vnc_zlib_clear(vs);
vnc_tight_clear(vs);
vnc_zrle_clear(vs);
#ifdef CONFIG_VNC_TLS
vnc_tls_client_cleanup(vs);
#endif /* CONFIG_VNC_TLS */
#ifdef CONFIG_VNC_SASL
vnc_sasl_client_cleanup(vs);
#endif /* CONFIG_VNC_SASL */
audio_del(vs);
vnc_release_modifiers(vs);
if (vs->initialized) {
QTAILQ_REMOVE(&vs->vd->clients, vs, next);
qemu_remove_mouse_mode_change_notifier(&vs->mouse_mode_notifier);
}
if (vs->vd->lock_key_sync)
qemu_remove_led_event_handler(vs->led);
vnc_unlock_output(vs);
qemu_mutex_destroy(&vs->output_mutex);
if (vs->bh != NULL) {
qemu_bh_delete(vs->bh);
}
buffer_free(&vs->jobs_buffer);
for (i = 0; i < VNC_STAT_ROWS; ++i) {
g_free(vs->lossy_rect[i]);
}
g_free(vs->lossy_rect);
g_free(vs);
}
| false | qemu | 8e9b0d24fb986d4241ae3b77752eca5dab4cb486 |
554 | void msix_write_config(PCIDevice *dev, uint32_t addr,
uint32_t val, int len)
{
unsigned enable_pos = dev->msix_cap + MSIX_CONTROL_OFFSET;
if (addr + len <= enable_pos || addr > enable_pos)
return;
if (msix_enabled(dev))
qemu_set_irq(dev->irq[0], 0);
}
| false | qemu | 5b5cb08683b6715a2aca5314168e68ff0665912b |
556 | type_init(pflash_cfi02_register_types)
pflash_t *pflash_cfi02_register(hwaddr base,
DeviceState *qdev, const char *name,
hwaddr size,
BlockDriverState *bs, uint32_t sector_len,
int nb_blocs, int nb_mappings, int width,
uint16_t id0, uint16_t id1,
uint16_t id2, uint16_t id3,
uint16_t unlock_addr0, uint16_t unlock_addr1,
int be)
{
DeviceState *dev = qdev_create(NULL, TYPE_CFI_PFLASH02);
if (bs && qdev_prop_set_drive(dev, "drive", bs)) {
abort();
}
qdev_prop_set_uint32(dev, "num-blocks", nb_blocs);
qdev_prop_set_uint32(dev, "sector-length", sector_len);
qdev_prop_set_uint8(dev, "width", width);
qdev_prop_set_uint8(dev, "mappings", nb_mappings);
qdev_prop_set_uint8(dev, "big-endian", !!be);
qdev_prop_set_uint16(dev, "id0", id0);
qdev_prop_set_uint16(dev, "id1", id1);
qdev_prop_set_uint16(dev, "id2", id2);
qdev_prop_set_uint16(dev, "id3", id3);
qdev_prop_set_uint16(dev, "unlock-addr0", unlock_addr0);
qdev_prop_set_uint16(dev, "unlock-addr1", unlock_addr1);
qdev_prop_set_string(dev, "name", name);
qdev_init_nofail(dev);
sysbus_mmio_map(SYS_BUS_DEVICE(dev), 0, base);
return CFI_PFLASH02(dev);
}
| false | qemu | 4be746345f13e99e468c60acbd3a355e8183e3ce |
557 | static void akita_init(int ram_size, int vga_ram_size, int boot_device,
DisplayState *ds, const char **fd_filename, int snapshot,
const char *kernel_filename, const char *kernel_cmdline,
const char *initrd_filename, const char *cpu_model)
{
spitz_common_init(ram_size, vga_ram_size, ds, kernel_filename,
kernel_cmdline, initrd_filename, akita, 0x2e8);
}
| false | qemu | 4207117c93357347500235952ce7891688089cb1 |
558 | static void tcg_out_qemu_ld(TCGContext *s, TCGReg data, TCGReg addr,
TCGMemOpIdx oi, bool is_64)
{
TCGMemOp memop = get_memop(oi);
#ifdef CONFIG_SOFTMMU
unsigned memi = get_mmuidx(oi);
TCGReg addrz, param;
tcg_insn_unit *func;
tcg_insn_unit *label_ptr;
addrz = tcg_out_tlb_load(s, addr, memi, memop & MO_SIZE,
offsetof(CPUTLBEntry, addr_read));
/* The fast path is exactly one insn. Thus we can perform the
entire TLB Hit in the (annulled) delay slot of the branch
over the TLB Miss case. */
/* beq,a,pt %[xi]cc, label0 */
label_ptr = s->code_ptr;
tcg_out_bpcc0(s, COND_E, BPCC_A | BPCC_PT
| (TARGET_LONG_BITS == 64 ? BPCC_XCC : BPCC_ICC), 0);
/* delay slot */
tcg_out_ldst_rr(s, data, addrz, TCG_REG_O1,
qemu_ld_opc[memop & (MO_BSWAP | MO_SSIZE)]);
/* TLB Miss. */
param = TCG_REG_O1;
if (!SPARC64 && TARGET_LONG_BITS == 64) {
/* Skip the high-part; we'll perform the extract in the trampoline. */
param++;
}
tcg_out_mov(s, TCG_TYPE_REG, param++, addr);
/* We use the helpers to extend SB and SW data, leaving the case
of SL needing explicit extending below. */
if ((memop & MO_SSIZE) == MO_SL) {
func = qemu_ld_trampoline[memop & (MO_BSWAP | MO_SIZE)];
} else {
func = qemu_ld_trampoline[memop & (MO_BSWAP | MO_SSIZE)];
}
assert(func != NULL);
tcg_out_call_nodelay(s, func);
/* delay slot */
tcg_out_movi(s, TCG_TYPE_I32, param, oi);
/* Recall that all of the helpers return 64-bit results.
Which complicates things for sparcv8plus. */
if (SPARC64) {
/* We let the helper sign-extend SB and SW, but leave SL for here. */
if (is_64 && (memop & MO_SSIZE) == MO_SL) {
tcg_out_arithi(s, data, TCG_REG_O0, 0, SHIFT_SRA);
} else {
tcg_out_mov(s, TCG_TYPE_REG, data, TCG_REG_O0);
}
} else {
if ((memop & MO_SIZE) == MO_64) {
tcg_out_arithi(s, TCG_REG_O0, TCG_REG_O0, 32, SHIFT_SLLX);
tcg_out_arithi(s, TCG_REG_O1, TCG_REG_O1, 0, SHIFT_SRL);
tcg_out_arith(s, data, TCG_REG_O0, TCG_REG_O1, ARITH_OR);
} else if (is_64) {
/* Re-extend from 32-bit rather than reassembling when we
know the high register must be an extension. */
tcg_out_arithi(s, data, TCG_REG_O1, 0,
memop & MO_SIGN ? SHIFT_SRA : SHIFT_SRL);
} else {
tcg_out_mov(s, TCG_TYPE_I32, data, TCG_REG_O1);
}
}
*label_ptr |= INSN_OFF19(tcg_ptr_byte_diff(s->code_ptr, label_ptr));
#else
if (SPARC64 && TARGET_LONG_BITS == 32) {
tcg_out_arithi(s, TCG_REG_T1, addr, 0, SHIFT_SRL);
addr = TCG_REG_T1;
}
tcg_out_ldst_rr(s, data, addr,
(guest_base ? TCG_GUEST_BASE_REG : TCG_REG_G0),
qemu_ld_opc[memop & (MO_BSWAP | MO_SSIZE)]);
#endif /* CONFIG_SOFTMMU */
}
| false | qemu | eabb7b91b36b202b4dac2df2d59d698e3aff197a |
559 | petalogix_ml605_init(QEMUMachineInitArgs *args)
{
ram_addr_t ram_size = args->ram_size;
const char *cpu_model = args->cpu_model;
MemoryRegion *address_space_mem = get_system_memory();
DeviceState *dev, *dma, *eth0;
Object *peer;
MicroBlazeCPU *cpu;
SysBusDevice *busdev;
CPUMBState *env;
DriveInfo *dinfo;
int i;
hwaddr ddr_base = MEMORY_BASEADDR;
MemoryRegion *phys_lmb_bram = g_new(MemoryRegion, 1);
MemoryRegion *phys_ram = g_new(MemoryRegion, 1);
qemu_irq irq[32], *cpu_irq;
/* init CPUs */
if (cpu_model == NULL) {
cpu_model = "microblaze";
}
cpu = cpu_mb_init(cpu_model);
env = &cpu->env;
/* Attach emulated BRAM through the LMB. */
memory_region_init_ram(phys_lmb_bram, "petalogix_ml605.lmb_bram",
LMB_BRAM_SIZE);
vmstate_register_ram_global(phys_lmb_bram);
memory_region_add_subregion(address_space_mem, 0x00000000, phys_lmb_bram);
memory_region_init_ram(phys_ram, "petalogix_ml605.ram", ram_size);
vmstate_register_ram_global(phys_ram);
memory_region_add_subregion(address_space_mem, ddr_base, phys_ram);
dinfo = drive_get(IF_PFLASH, 0, 0);
/* 5th parameter 2 means bank-width
* 10th paremeter 0 means little-endian */
pflash_cfi01_register(FLASH_BASEADDR,
NULL, "petalogix_ml605.flash", FLASH_SIZE,
dinfo ? dinfo->bdrv : NULL, (64 * 1024),
FLASH_SIZE >> 16,
2, 0x89, 0x18, 0x0000, 0x0, 0);
cpu_irq = microblaze_pic_init_cpu(env);
dev = xilinx_intc_create(INTC_BASEADDR, cpu_irq[0], 4);
for (i = 0; i < 32; i++) {
irq[i] = qdev_get_gpio_in(dev, i);
}
serial_mm_init(address_space_mem, UART16550_BASEADDR + 0x1000, 2,
irq[5], 115200, serial_hds[0], DEVICE_LITTLE_ENDIAN);
/* 2 timers at irq 2 @ 100 Mhz. */
xilinx_timer_create(TIMER_BASEADDR, irq[2], 0, 100 * 1000000);
/* axi ethernet and dma initialization. */
qemu_check_nic_model(&nd_table[0], "xlnx.axi-ethernet");
eth0 = qdev_create(NULL, "xlnx.axi-ethernet");
dma = qdev_create(NULL, "xlnx.axi-dma");
/* FIXME: attach to the sysbus instead */
object_property_add_child(qdev_get_machine(), "xilinx-eth", OBJECT(eth0),
NULL);
object_property_add_child(qdev_get_machine(), "xilinx-dma", OBJECT(dma),
NULL);
peer = object_property_get_link(OBJECT(dma),
"axistream-connected-target", NULL);
xilinx_axiethernet_init(eth0, &nd_table[0], STREAM_SLAVE(peer),
0x82780000, irq[3], 0x1000, 0x1000);
peer = object_property_get_link(OBJECT(eth0),
"axistream-connected-target", NULL);
xilinx_axidma_init(dma, STREAM_SLAVE(peer), 0x84600000, irq[1], irq[0],
100 * 1000000);
{
SSIBus *spi;
dev = qdev_create(NULL, "xlnx.xps-spi");
qdev_prop_set_uint8(dev, "num-ss-bits", NUM_SPI_FLASHES);
qdev_init_nofail(dev);
busdev = SYS_BUS_DEVICE(dev);
sysbus_mmio_map(busdev, 0, 0x40a00000);
sysbus_connect_irq(busdev, 0, irq[4]);
spi = (SSIBus *)qdev_get_child_bus(dev, "spi");
for (i = 0; i < NUM_SPI_FLASHES; i++) {
qemu_irq cs_line;
dev = ssi_create_slave(spi, "n25q128");
cs_line = qdev_get_gpio_in(dev, 0);
sysbus_connect_irq(busdev, i+1, cs_line);
}
}
microblaze_load_kernel(cpu, ddr_base, ram_size, BINARY_DEVICE_TREE_FILE,
machine_cpu_reset);
}
| false | qemu | 42bb9c9178ae7ac4c439172b1ae99cc29188a5c6 |
560 | void virtio_queue_set_num(VirtIODevice *vdev, int n, int num)
{
if (num <= VIRTQUEUE_MAX_SIZE) {
vdev->vq[n].vring.num = num;
virtqueue_init(&vdev->vq[n]);
}
}
| false | qemu | f6049f4483d61fa911a0693c2c48ce8308451d33 |
561 | static void arm_cpu_realizefn(DeviceState *dev, Error **errp)
{
CPUState *cs = CPU(dev);
ARMCPU *cpu = ARM_CPU(dev);
ARMCPUClass *acc = ARM_CPU_GET_CLASS(dev);
CPUARMState *env = &cpu->env;
int pagebits;
Error *local_err = NULL;
cpu_exec_realizefn(cs, &local_err);
if (local_err != NULL) {
error_propagate(errp, local_err);
return;
}
/* Some features automatically imply others: */
if (arm_feature(env, ARM_FEATURE_V8)) {
set_feature(env, ARM_FEATURE_V7);
set_feature(env, ARM_FEATURE_ARM_DIV);
set_feature(env, ARM_FEATURE_LPAE);
}
if (arm_feature(env, ARM_FEATURE_V7)) {
set_feature(env, ARM_FEATURE_VAPA);
set_feature(env, ARM_FEATURE_THUMB2);
set_feature(env, ARM_FEATURE_MPIDR);
if (!arm_feature(env, ARM_FEATURE_M)) {
set_feature(env, ARM_FEATURE_V6K);
} else {
set_feature(env, ARM_FEATURE_V6);
}
/* Always define VBAR for V7 CPUs even if it doesn't exist in
* non-EL3 configs. This is needed by some legacy boards.
*/
set_feature(env, ARM_FEATURE_VBAR);
}
if (arm_feature(env, ARM_FEATURE_V6K)) {
set_feature(env, ARM_FEATURE_V6);
set_feature(env, ARM_FEATURE_MVFR);
}
if (arm_feature(env, ARM_FEATURE_V6)) {
set_feature(env, ARM_FEATURE_V5);
if (!arm_feature(env, ARM_FEATURE_M)) {
set_feature(env, ARM_FEATURE_AUXCR);
}
}
if (arm_feature(env, ARM_FEATURE_V5)) {
set_feature(env, ARM_FEATURE_V4T);
}
if (arm_feature(env, ARM_FEATURE_M)) {
set_feature(env, ARM_FEATURE_THUMB_DIV);
}
if (arm_feature(env, ARM_FEATURE_ARM_DIV)) {
set_feature(env, ARM_FEATURE_THUMB_DIV);
}
if (arm_feature(env, ARM_FEATURE_VFP4)) {
set_feature(env, ARM_FEATURE_VFP3);
set_feature(env, ARM_FEATURE_VFP_FP16);
}
if (arm_feature(env, ARM_FEATURE_VFP3)) {
set_feature(env, ARM_FEATURE_VFP);
}
if (arm_feature(env, ARM_FEATURE_LPAE)) {
set_feature(env, ARM_FEATURE_V7MP);
set_feature(env, ARM_FEATURE_PXN);
}
if (arm_feature(env, ARM_FEATURE_CBAR_RO)) {
set_feature(env, ARM_FEATURE_CBAR);
}
if (arm_feature(env, ARM_FEATURE_THUMB2) &&
!arm_feature(env, ARM_FEATURE_M)) {
set_feature(env, ARM_FEATURE_THUMB_DSP);
}
if (arm_feature(env, ARM_FEATURE_V7) &&
!arm_feature(env, ARM_FEATURE_M) &&
!arm_feature(env, ARM_FEATURE_MPU)) {
/* v7VMSA drops support for the old ARMv5 tiny pages, so we
* can use 4K pages.
*/
pagebits = 12;
} else {
/* For CPUs which might have tiny 1K pages, or which have an
* MPU and might have small region sizes, stick with 1K pages.
*/
pagebits = 10;
}
if (!set_preferred_target_page_bits(pagebits)) {
/* This can only ever happen for hotplugging a CPU, or if
* the board code incorrectly creates a CPU which it has
* promised via minimum_page_size that it will not.
*/
error_setg(errp, "This CPU requires a smaller page size than the "
"system is using");
return;
}
/* This cpu-id-to-MPIDR affinity is used only for TCG; KVM will override it.
* We don't support setting cluster ID ([16..23]) (known as Aff2
* in later ARM ARM versions), or any of the higher affinity level fields,
* so these bits always RAZ.
*/
if (cpu->mp_affinity == ARM64_AFFINITY_INVALID) {
uint32_t Aff1 = cs->cpu_index / ARM_DEFAULT_CPUS_PER_CLUSTER;
uint32_t Aff0 = cs->cpu_index % ARM_DEFAULT_CPUS_PER_CLUSTER;
cpu->mp_affinity = (Aff1 << ARM_AFF1_SHIFT) | Aff0;
}
if (cpu->reset_hivecs) {
cpu->reset_sctlr |= (1 << 13);
}
if (cpu->cfgend) {
if (arm_feature(&cpu->env, ARM_FEATURE_V7)) {
cpu->reset_sctlr |= SCTLR_EE;
} else {
cpu->reset_sctlr |= SCTLR_B;
}
}
if (!cpu->has_el3) {
/* If the has_el3 CPU property is disabled then we need to disable the
* feature.
*/
unset_feature(env, ARM_FEATURE_EL3);
/* Disable the security extension feature bits in the processor feature
* registers as well. These are id_pfr1[7:4] and id_aa64pfr0[15:12].
*/
cpu->id_pfr1 &= ~0xf0;
cpu->id_aa64pfr0 &= ~0xf000;
}
if (!cpu->has_el2) {
unset_feature(env, ARM_FEATURE_EL2);
}
if (!cpu->has_pmu || !kvm_enabled()) {
cpu->has_pmu = false;
unset_feature(env, ARM_FEATURE_PMU);
}
if (!arm_feature(env, ARM_FEATURE_EL2)) {
/* Disable the hypervisor feature bits in the processor feature
* registers if we don't have EL2. These are id_pfr1[15:12] and
* id_aa64pfr0_el1[11:8].
*/
cpu->id_aa64pfr0 &= ~0xf00;
cpu->id_pfr1 &= ~0xf000;
}
if (!cpu->has_mpu) {
unset_feature(env, ARM_FEATURE_MPU);
}
if (arm_feature(env, ARM_FEATURE_MPU) &&
arm_feature(env, ARM_FEATURE_V7)) {
uint32_t nr = cpu->pmsav7_dregion;
if (nr > 0xff) {
error_setg(errp, "PMSAv7 MPU #regions invalid %" PRIu32, nr);
return;
}
if (nr) {
env->pmsav7.drbar = g_new0(uint32_t, nr);
env->pmsav7.drsr = g_new0(uint32_t, nr);
env->pmsav7.dracr = g_new0(uint32_t, nr);
}
}
if (arm_feature(env, ARM_FEATURE_EL3)) {
set_feature(env, ARM_FEATURE_VBAR);
}
register_cp_regs_for_features(cpu);
arm_cpu_register_gdb_regs_for_features(cpu);
init_cpreg_list(cpu);
#ifndef CONFIG_USER_ONLY
if (cpu->has_el3) {
cs->num_ases = 2;
} else {
cs->num_ases = 1;
}
if (cpu->has_el3) {
AddressSpace *as;
if (!cpu->secure_memory) {
cpu->secure_memory = cs->memory;
}
as = address_space_init_shareable(cpu->secure_memory,
"cpu-secure-memory");
cpu_address_space_init(cs, as, ARMASIdx_S);
}
cpu_address_space_init(cs,
address_space_init_shareable(cs->memory,
"cpu-memory"),
ARMASIdx_NS);
#endif
qemu_init_vcpu(cs);
cpu_reset(cs);
acc->parent_realize(dev, errp);
}
| false | qemu | d6f02ce3b8a43ddd8f83553fe754a34b26fb273f |
562 | static bool nbd_process_legacy_socket_options(QDict *output_options,
QemuOpts *legacy_opts,
Error **errp)
{
const char *path = qemu_opt_get(legacy_opts, "path");
const char *host = qemu_opt_get(legacy_opts, "host");
const char *port = qemu_opt_get(legacy_opts, "port");
const QDictEntry *e;
if (!path && !host && !port) {
return true;
}
for (e = qdict_first(output_options); e; e = qdict_next(output_options, e))
{
if (strstart(e->key, "server.", NULL)) {
error_setg(errp, "Cannot use 'server' and path/host/port at the "
"same time");
return false;
}
}
if (path && host) {
error_setg(errp, "path and host may not be used at the same time");
return false;
} else if (path) {
if (port) {
error_setg(errp, "port may not be used without host");
return false;
}
qdict_put(output_options, "server.type", qstring_from_str("unix"));
qdict_put(output_options, "server.data.path", qstring_from_str(path));
} else if (host) {
qdict_put(output_options, "server.type", qstring_from_str("inet"));
qdict_put(output_options, "server.data.host", qstring_from_str(host));
qdict_put(output_options, "server.data.port",
qstring_from_str(port ?: stringify(NBD_DEFAULT_PORT)));
}
return true;
}
| false | qemu | 9445673ea67c272616b9f718396e267caa6446b7 |
563 | static void cirrus_linear_bitblt_write(void *opaque,
target_phys_addr_t addr,
uint64_t val,
unsigned size)
{
CirrusVGAState *s = opaque;
if (s->cirrus_srcptr != s->cirrus_srcptr_end) {
/* bitblt */
*s->cirrus_srcptr++ = (uint8_t) val;
if (s->cirrus_srcptr >= s->cirrus_srcptr_end) {
cirrus_bitblt_cputovideo_next(s);
}
}
}
| false | qemu | a8170e5e97ad17ca169c64ba87ae2f53850dab4c |
564 | static int default_monitor_get_fd(Monitor *mon, const char *name, Error **errp)
{
error_setg(errp, "only QEMU supports file descriptor passing");
return -1;
}
| false | qemu | 1f001dc7bc9e435bf231a5b0edcad1c7c2bd6214 |
565 | uint64_t helper_st_virt_to_phys (uint64_t virtaddr)
{
uint64_t tlb_addr, physaddr;
int index, mmu_idx;
void *retaddr;
mmu_idx = cpu_mmu_index(env);
index = (virtaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
redo:
tlb_addr = env->tlb_table[mmu_idx][index].addr_write;
if ((virtaddr & TARGET_PAGE_MASK) ==
(tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
physaddr = virtaddr + env->tlb_table[mmu_idx][index].addend;
} else {
/* the page is not in the TLB : fill it */
retaddr = GETPC();
tlb_fill(virtaddr, 1, mmu_idx, retaddr);
goto redo;
}
return physaddr;
}
| false | qemu | 2374e73edafff0586cbfb67c333c5a7588f81fd5 |
567 | static int decode_recovery_point(H264Context *h)
{
h->sei_recovery_frame_cnt = get_ue_golomb(&h->gb);
/* 1b exact_match_flag,
* 1b broken_link_flag,
* 2b changing_slice_group_idc */
skip_bits(&h->gb, 4);
if (h->avctx->debug & FF_DEBUG_PICT_INFO)
av_log(h->avctx, AV_LOG_DEBUG, "sei_recovery_frame_cnt: %d\n", h->sei_recovery_frame_cnt);
h->has_recovery_point = 1;
return 0;
}
| false | FFmpeg | c51c08e0e70c186971385bdbb225f69edd4e3375 |
568 | static int mkv_write_header(AVFormatContext *s)
{
MatroskaMuxContext *mkv = s->priv_data;
AVIOContext *pb = s->pb;
ebml_master ebml_header;
AVDictionaryEntry *tag;
int ret, i, version = 2;
int64_t creation_time;
if (!strcmp(s->oformat->name, "webm"))
mkv->mode = MODE_WEBM;
else
mkv->mode = MODE_MATROSKAv2;
if (mkv->mode != MODE_WEBM ||
av_dict_get(s->metadata, "stereo_mode", NULL, 0) ||
av_dict_get(s->metadata, "alpha_mode", NULL, 0))
version = 4;
for (i = 0; i < s->nb_streams; i++) {
if (s->streams[i]->codecpar->codec_id == AV_CODEC_ID_ATRAC3 ||
s->streams[i]->codecpar->codec_id == AV_CODEC_ID_COOK ||
s->streams[i]->codecpar->codec_id == AV_CODEC_ID_RA_288 ||
s->streams[i]->codecpar->codec_id == AV_CODEC_ID_SIPR ||
s->streams[i]->codecpar->codec_id == AV_CODEC_ID_RV10 ||
s->streams[i]->codecpar->codec_id == AV_CODEC_ID_RV20) {
av_log(s, AV_LOG_ERROR,
"The Matroska muxer does not yet support muxing %s\n",
avcodec_get_name(s->streams[i]->codecpar->codec_id));
return AVERROR_PATCHWELCOME;
}
if (s->streams[i]->codecpar->codec_id == AV_CODEC_ID_OPUS ||
av_dict_get(s->streams[i]->metadata, "stereo_mode", NULL, 0) ||
av_dict_get(s->streams[i]->metadata, "alpha_mode", NULL, 0))
version = 4;
}
mkv->tracks = av_mallocz_array(s->nb_streams, sizeof(*mkv->tracks));
if (!mkv->tracks) {
ret = AVERROR(ENOMEM);
goto fail;
}
ebml_header = start_ebml_master(pb, EBML_ID_HEADER, 0);
put_ebml_uint (pb, EBML_ID_EBMLVERSION , 1);
put_ebml_uint (pb, EBML_ID_EBMLREADVERSION , 1);
put_ebml_uint (pb, EBML_ID_EBMLMAXIDLENGTH , 4);
put_ebml_uint (pb, EBML_ID_EBMLMAXSIZELENGTH , 8);
put_ebml_string (pb, EBML_ID_DOCTYPE , s->oformat->name);
put_ebml_uint (pb, EBML_ID_DOCTYPEVERSION , version);
put_ebml_uint (pb, EBML_ID_DOCTYPEREADVERSION , 2);
end_ebml_master(pb, ebml_header);
mkv->segment = start_ebml_master(pb, MATROSKA_ID_SEGMENT, 0);
mkv->segment_offset = avio_tell(pb);
// we write 2 seek heads - one at the end of the file to point to each
// cluster, and one at the beginning to point to all other level one
// elements (including the seek head at the end of the file), which
// isn't more than 10 elements if we only write one of each other
// currently defined level 1 element
mkv->main_seekhead = mkv_start_seekhead(pb, mkv->segment_offset, 10);
if (!mkv->main_seekhead) {
ret = AVERROR(ENOMEM);
goto fail;
}
ret = mkv_add_seekhead_entry(mkv->main_seekhead, MATROSKA_ID_INFO, avio_tell(pb));
if (ret < 0) goto fail;
ret = start_ebml_master_crc32(pb, &mkv->info_bc, mkv, &mkv->info, MATROSKA_ID_INFO, 0);
if (ret < 0)
return ret;
pb = mkv->info_bc;
put_ebml_uint(pb, MATROSKA_ID_TIMECODESCALE, 1000000);
if ((tag = av_dict_get(s->metadata, "title", NULL, 0)))
put_ebml_string(pb, MATROSKA_ID_TITLE, tag->value);
if (!(s->flags & AVFMT_FLAG_BITEXACT)) {
put_ebml_string(pb, MATROSKA_ID_MUXINGAPP, LIBAVFORMAT_IDENT);
if ((tag = av_dict_get(s->metadata, "encoding_tool", NULL, 0)))
put_ebml_string(pb, MATROSKA_ID_WRITINGAPP, tag->value);
else
put_ebml_string(pb, MATROSKA_ID_WRITINGAPP, LIBAVFORMAT_IDENT);
if (mkv->mode != MODE_WEBM) {
uint32_t segment_uid[4];
AVLFG lfg;
av_lfg_init(&lfg, av_get_random_seed());
for (i = 0; i < 4; i++)
segment_uid[i] = av_lfg_get(&lfg);
put_ebml_binary(pb, MATROSKA_ID_SEGMENTUID, segment_uid, 16);
}
} else {
const char *ident = "Lavf";
put_ebml_string(pb, MATROSKA_ID_MUXINGAPP , ident);
put_ebml_string(pb, MATROSKA_ID_WRITINGAPP, ident);
}
if (ff_parse_creation_time_metadata(s, &creation_time, 0) > 0) {
// Adjust time so it's relative to 2001-01-01 and convert to nanoseconds.
int64_t date_utc = (creation_time - 978307200000000LL) * 1000;
uint8_t date_utc_buf[8];
AV_WB64(date_utc_buf, date_utc);
put_ebml_binary(pb, MATROSKA_ID_DATEUTC, date_utc_buf, 8);
}
// reserve space for the duration
mkv->duration = 0;
mkv->duration_offset = avio_tell(pb);
if (!mkv->is_live) {
int64_t metadata_duration = get_metadata_duration(s);
if (s->duration > 0) {
int64_t scaledDuration = av_rescale(s->duration, 1000, AV_TIME_BASE);
put_ebml_float(pb, MATROSKA_ID_DURATION, scaledDuration);
av_log(s, AV_LOG_DEBUG, "Write early duration from recording time = %" PRIu64 "\n", scaledDuration);
} else if (metadata_duration > 0) {
int64_t scaledDuration = av_rescale(metadata_duration, 1000, AV_TIME_BASE);
put_ebml_float(pb, MATROSKA_ID_DURATION, scaledDuration);
av_log(s, AV_LOG_DEBUG, "Write early duration from metadata = %" PRIu64 "\n", scaledDuration);
} else {
put_ebml_void(pb, 11); // assumes double-precision float to be written
}
}
if (s->pb->seekable && !mkv->is_live)
put_ebml_void(s->pb, avio_tell(pb));
else
end_ebml_master_crc32(s->pb, &mkv->info_bc, mkv, mkv->info);
pb = s->pb;
// initialize stream_duration fields
mkv->stream_durations = av_mallocz(s->nb_streams * sizeof(int64_t));
mkv->stream_duration_offsets = av_mallocz(s->nb_streams * sizeof(int64_t));
ret = mkv_write_tracks(s);
if (ret < 0)
goto fail;
for (i = 0; i < s->nb_chapters; i++)
mkv->chapter_id_offset = FFMAX(mkv->chapter_id_offset, 1LL - s->chapters[i]->id);
if (mkv->mode != MODE_WEBM) {
ret = mkv_write_chapters(s);
if (ret < 0)
goto fail;
ret = mkv_write_attachments(s);
if (ret < 0)
goto fail;
ret = mkv_write_tags(s);
if (ret < 0)
goto fail;
}
if (!s->pb->seekable && !mkv->is_live)
mkv_write_seekhead(pb, mkv);
mkv->cues = mkv_start_cues(mkv->segment_offset);
if (!mkv->cues) {
ret = AVERROR(ENOMEM);
goto fail;
}
if (pb->seekable && mkv->reserve_cues_space) {
mkv->cues_pos = avio_tell(pb);
put_ebml_void(pb, mkv->reserve_cues_space);
}
av_init_packet(&mkv->cur_audio_pkt);
mkv->cur_audio_pkt.size = 0;
mkv->cluster_pos = -1;
avio_flush(pb);
// start a new cluster every 5 MB or 5 sec, or 32k / 1 sec for streaming or
// after 4k and on a keyframe
if (pb->seekable) {
if (mkv->cluster_time_limit < 0)
mkv->cluster_time_limit = 5000;
if (mkv->cluster_size_limit < 0)
mkv->cluster_size_limit = 5 * 1024 * 1024;
} else {
if (mkv->cluster_time_limit < 0)
mkv->cluster_time_limit = 1000;
if (mkv->cluster_size_limit < 0)
mkv->cluster_size_limit = 32 * 1024;
}
return 0;
fail:
mkv_free(mkv);
return ret;
}
| true | FFmpeg | 20e8be0c20c7b51964fa4d317073bd36b983eb55 |
570 | static int cpu_gdb_write_register(CPUState *env, uint8_t *mem_buf, int n)
{
uint32_t tmp;
if (n < CPU_NB_REGS) {
env->regs[gpr_map[n]] = ldtul_p(mem_buf);
return sizeof(target_ulong);
} else if (n >= IDX_FP_REGS && n < IDX_FP_REGS + 8) {
#ifdef USE_X86LDOUBLE
/* FIXME: byteswap float values - after fixing fpregs layout. */
memcpy(&env->fpregs[n - IDX_FP_REGS], mem_buf, 10);
#endif
return 10;
} else if (n >= IDX_XMM_REGS && n < IDX_XMM_REGS + CPU_NB_REGS) {
n -= IDX_XMM_REGS;
env->xmm_regs[n].XMM_Q(0) = ldq_p(mem_buf);
env->xmm_regs[n].XMM_Q(1) = ldq_p(mem_buf + 8);
return 16;
} else {
switch (n) {
case IDX_IP_REG:
env->eip = ldtul_p(mem_buf);
return sizeof(target_ulong);
case IDX_FLAGS_REG:
env->eflags = ldl_p(mem_buf);
return 4;
#if defined(CONFIG_USER_ONLY)
#define LOAD_SEG(index, sreg)\
tmp = ldl_p(mem_buf);\
if (tmp != env->segs[sreg].selector)\
cpu_x86_load_seg(env, sreg, tmp);\
return 4
#else
/* FIXME: Honor segment registers. Needs to avoid raising an exception
when the selector is invalid. */
#define LOAD_SEG(index, sreg) return 4
#endif
case IDX_SEG_REGS: LOAD_SEG(10, R_CS);
case IDX_SEG_REGS + 1: LOAD_SEG(11, R_SS);
case IDX_SEG_REGS + 2: LOAD_SEG(12, R_DS);
case IDX_SEG_REGS + 3: LOAD_SEG(13, R_ES);
case IDX_SEG_REGS + 4: LOAD_SEG(14, R_FS);
case IDX_SEG_REGS + 5: LOAD_SEG(15, R_GS);
case IDX_FP_REGS + 8:
env->fpuc = ldl_p(mem_buf);
return 4;
case IDX_FP_REGS + 9:
tmp = ldl_p(mem_buf);
env->fpstt = (tmp >> 11) & 7;
env->fpus = tmp & ~0x3800;
return 4;
case IDX_FP_REGS + 10: /* ftag */ return 4;
case IDX_FP_REGS + 11: /* fiseg */ return 4;
case IDX_FP_REGS + 12: /* fioff */ return 4;
case IDX_FP_REGS + 13: /* foseg */ return 4;
case IDX_FP_REGS + 14: /* fooff */ return 4;
case IDX_FP_REGS + 15: /* fop */ return 4;
case IDX_MXCSR_REG:
env->mxcsr = ldl_p(mem_buf);
return 4;
}
}
/* Unrecognised register. */
return 0;
}
| true | qemu | 84273177f25886b3476138470280890001debcbc |
571 | static void setup_rt_frame(int sig, struct target_sigaction *ka,
target_siginfo_t *info,
target_sigset_t *set, CPUS390XState *env)
{
int i;
rt_sigframe *frame;
abi_ulong frame_addr;
frame_addr = get_sigframe(ka, env, sizeof *frame);
qemu_log("%s: frame_addr 0x%llx\n", __FUNCTION__,
(unsigned long long)frame_addr);
if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
goto give_sigsegv;
}
qemu_log("%s: 1\n", __FUNCTION__);
copy_siginfo_to_user(&frame->info, info);
/* Create the ucontext. */
__put_user(0, &frame->uc.tuc_flags);
__put_user((abi_ulong)0, (abi_ulong *)&frame->uc.tuc_link);
__put_user(target_sigaltstack_used.ss_sp, &frame->uc.tuc_stack.ss_sp);
__put_user(sas_ss_flags(get_sp_from_cpustate(env)),
&frame->uc.tuc_stack.ss_flags);
__put_user(target_sigaltstack_used.ss_size, &frame->uc.tuc_stack.ss_size);
save_sigregs(env, &frame->uc.tuc_mcontext);
for (i = 0; i < TARGET_NSIG_WORDS; i++) {
__put_user((abi_ulong)set->sig[i],
(abi_ulong *)&frame->uc.tuc_sigmask.sig[i]);
}
/* Set up to return from userspace. If provided, use a stub
already in userspace. */
if (ka->sa_flags & TARGET_SA_RESTORER) {
env->regs[14] = (unsigned long) ka->sa_restorer | PSW_ADDR_AMODE;
} else {
env->regs[14] = (unsigned long) frame->retcode | PSW_ADDR_AMODE;
if (__put_user(S390_SYSCALL_OPCODE | TARGET_NR_rt_sigreturn,
(uint16_t *)(frame->retcode))) {
goto give_sigsegv;
}
}
/* Set up backchain. */
if (__put_user(env->regs[15], (abi_ulong *) frame)) {
goto give_sigsegv;
}
/* Set up registers for signal handler */
env->regs[15] = frame_addr;
env->psw.addr = (target_ulong) ka->_sa_handler | PSW_ADDR_AMODE;
env->regs[2] = sig; //map_signal(sig);
env->regs[3] = frame_addr + offsetof(typeof(*frame), info);
env->regs[4] = frame_addr + offsetof(typeof(*frame), uc);
return;
give_sigsegv:
qemu_log("%s: give_sigsegv\n", __FUNCTION__);
unlock_user_struct(frame, frame_addr, 1);
force_sig(TARGET_SIGSEGV);
}
| true | qemu | 0188fadb7fe460d8c4c743372b1f7b25773e183e |
575 | static int dxv_decompress_dxt5(AVCodecContext *avctx)
{
DXVContext *ctx = avctx->priv_data;
GetByteContext *gbc = &ctx->gbc;
uint32_t value, op;
int idx, prev, state = 0;
int pos = 4;
int run = 0;
int probe, check;
/* Copy the first four elements */
AV_WL32(ctx->tex_data + 0, bytestream2_get_le32(gbc));
AV_WL32(ctx->tex_data + 4, bytestream2_get_le32(gbc));
AV_WL32(ctx->tex_data + 8, bytestream2_get_le32(gbc));
AV_WL32(ctx->tex_data + 12, bytestream2_get_le32(gbc));
/* Process input until the whole texture has been filled */
while (pos + 2 <= ctx->tex_size / 4) {
if (run) {
run--;
prev = AV_RL32(ctx->tex_data + 4 * (pos - 4));
AV_WL32(ctx->tex_data + 4 * pos, prev);
pos++;
prev = AV_RL32(ctx->tex_data + 4 * (pos - 4));
AV_WL32(ctx->tex_data + 4 * pos, prev);
pos++;
} else {
if (state == 0) {
value = bytestream2_get_le32(gbc);
state = 16;
}
op = value & 0x3;
value >>= 2;
state--;
switch (op) {
case 0:
/* Long copy */
check = bytestream2_get_byte(gbc) + 1;
if (check == 256) {
do {
probe = bytestream2_get_le16(gbc);
check += probe;
} while (probe == 0xFFFF);
}
while (check && pos + 4 <= ctx->tex_size / 4) {
prev = AV_RL32(ctx->tex_data + 4 * (pos - 4));
AV_WL32(ctx->tex_data + 4 * pos, prev);
pos++;
prev = AV_RL32(ctx->tex_data + 4 * (pos - 4));
AV_WL32(ctx->tex_data + 4 * pos, prev);
pos++;
prev = AV_RL32(ctx->tex_data + 4 * (pos - 4));
AV_WL32(ctx->tex_data + 4 * pos, prev);
pos++;
prev = AV_RL32(ctx->tex_data + 4 * (pos - 4));
AV_WL32(ctx->tex_data + 4 * pos, prev);
pos++;
check--;
}
/* Restart (or exit) the loop */
continue;
break;
case 1:
/* Load new run value */
run = bytestream2_get_byte(gbc);
if (run == 255) {
do {
probe = bytestream2_get_le16(gbc);
run += probe;
} while (probe == 0xFFFF);
}
/* Copy two dwords from previous data */
prev = AV_RL32(ctx->tex_data + 4 * (pos - 4));
AV_WL32(ctx->tex_data + 4 * pos, prev);
pos++;
prev = AV_RL32(ctx->tex_data + 4 * (pos - 4));
AV_WL32(ctx->tex_data + 4 * pos, prev);
pos++;
break;
case 2:
/* Copy two dwords from a previous index */
idx = 8 + bytestream2_get_le16(gbc);
if (idx > pos || (unsigned int)(pos - idx) + 2 > ctx->tex_size / 4)
prev = AV_RL32(ctx->tex_data + 4 * (pos - idx));
AV_WL32(ctx->tex_data + 4 * pos, prev);
pos++;
prev = AV_RL32(ctx->tex_data + 4 * (pos - idx));
AV_WL32(ctx->tex_data + 4 * pos, prev);
pos++;
break;
case 3:
/* Copy two dwords from input */
prev = bytestream2_get_le32(gbc);
AV_WL32(ctx->tex_data + 4 * pos, prev);
pos++;
prev = bytestream2_get_le32(gbc);
AV_WL32(ctx->tex_data + 4 * pos, prev);
pos++;
break;
}
}
CHECKPOINT(4);
if (pos + 2 > ctx->tex_size / 4)
/* Copy two elements from a previous offset or from the input buffer */
if (op) {
if (idx > pos || (unsigned int)(pos - idx) + 2 > ctx->tex_size / 4)
prev = AV_RL32(ctx->tex_data + 4 * (pos - idx));
AV_WL32(ctx->tex_data + 4 * pos, prev);
pos++;
prev = AV_RL32(ctx->tex_data + 4 * (pos - idx));
AV_WL32(ctx->tex_data + 4 * pos, prev);
pos++;
} else {
CHECKPOINT(4);
if (op && (idx > pos || (unsigned int)(pos - idx) + 2 > ctx->tex_size / 4))
if (op)
prev = AV_RL32(ctx->tex_data + 4 * (pos - idx));
else
prev = bytestream2_get_le32(gbc);
AV_WL32(ctx->tex_data + 4 * pos, prev);
pos++;
CHECKPOINT(4);
if (op)
prev = AV_RL32(ctx->tex_data + 4 * (pos - idx));
else
prev = bytestream2_get_le32(gbc);
AV_WL32(ctx->tex_data + 4 * pos, prev);
pos++;
}
}
return 0;
} | true | FFmpeg | d662143f064636f11d92083cd9aa4f907cf97d59 |
576 | static void gd_update_geometry_hints(VirtualConsole *vc)
{
GtkDisplayState *s = vc->s;
GdkWindowHints mask = 0;
GdkGeometry geo = {};
GtkWidget *geo_widget = NULL;
GtkWindow *geo_window;
if (vc->type == GD_VC_GFX) {
if (!vc->gfx.ds) {
return;
}
if (s->free_scale) {
geo.min_width = surface_width(vc->gfx.ds) * VC_SCALE_MIN;
geo.min_height = surface_height(vc->gfx.ds) * VC_SCALE_MIN;
mask |= GDK_HINT_MIN_SIZE;
} else {
geo.min_width = surface_width(vc->gfx.ds) * vc->gfx.scale_x;
geo.min_height = surface_height(vc->gfx.ds) * vc->gfx.scale_y;
mask |= GDK_HINT_MIN_SIZE;
}
geo_widget = vc->gfx.drawing_area;
gtk_widget_set_size_request(geo_widget, geo.min_width, geo.min_height);
#if defined(CONFIG_VTE)
} else if (vc->type == GD_VC_VTE) {
VteTerminal *term = VTE_TERMINAL(vc->vte.terminal);
GtkBorder *ib;
geo.width_inc = vte_terminal_get_char_width(term);
geo.height_inc = vte_terminal_get_char_height(term);
mask |= GDK_HINT_RESIZE_INC;
geo.base_width = geo.width_inc;
geo.base_height = geo.height_inc;
mask |= GDK_HINT_BASE_SIZE;
geo.min_width = geo.width_inc * VC_TERM_X_MIN;
geo.min_height = geo.height_inc * VC_TERM_Y_MIN;
mask |= GDK_HINT_MIN_SIZE;
gtk_widget_style_get(vc->vte.terminal, "inner-border", &ib, NULL);
geo.base_width += ib->left + ib->right;
geo.base_height += ib->top + ib->bottom;
geo.min_width += ib->left + ib->right;
geo.min_height += ib->top + ib->bottom;
geo_widget = vc->vte.terminal;
#endif
}
geo_window = GTK_WINDOW(vc->window ? vc->window : s->window);
gtk_window_set_geometry_hints(geo_window, geo_widget, &geo, mask);
}
| true | qemu | 4fd811a6bd0b8f24f4761fc281454494c336d310 |
577 | static uint32_t cc_calc_abs_64(int64_t dst)
{
if ((uint64_t)dst == 0x8000000000000000ULL) {
return 3;
} else if (dst) {
return 1;
} else {
return 0;
}
}
| true | qemu | 2aaa1940684a3bf2b381fd2a8ff26c287a05109d |
579 | static void virtio_serial_device_unrealize(DeviceState *dev, Error **errp)
{
VirtIODevice *vdev = VIRTIO_DEVICE(dev);
VirtIOSerial *vser = VIRTIO_SERIAL(dev);
QLIST_REMOVE(vser, next);
g_free(vser->ivqs);
g_free(vser->ovqs);
g_free(vser->ports_map);
if (vser->post_load) {
g_free(vser->post_load->connected);
timer_del(vser->post_load->timer);
timer_free(vser->post_load->timer);
g_free(vser->post_load);
}
virtio_cleanup(vdev);
} | true | qemu | f811f97040a48358b456b46ecbc9167f0131034f |
580 | static int flashsv2_prime(FlashSVContext *s, uint8_t *src, int size)
{
z_stream zs;
int zret; // Zlib return code
zs.zalloc = NULL;
zs.zfree = NULL;
zs.opaque = NULL;
s->zstream.next_in = src;
s->zstream.avail_in = size;
s->zstream.next_out = s->tmpblock;
s->zstream.avail_out = s->block_size * 3;
inflate(&s->zstream, Z_SYNC_FLUSH);
if (deflateInit(&zs, 0) != Z_OK)
return -1;
zs.next_in = s->tmpblock;
zs.avail_in = s->block_size * 3 - s->zstream.avail_out;
zs.next_out = s->deflate_block;
zs.avail_out = s->deflate_block_size;
deflate(&zs, Z_SYNC_FLUSH);
deflateEnd(&zs);
if ((zret = inflateReset(&s->zstream)) != Z_OK) {
av_log(s->avctx, AV_LOG_ERROR, "Inflate reset error: %d\n", zret);
return AVERROR_UNKNOWN;
}
s->zstream.next_in = s->deflate_block;
s->zstream.avail_in = s->deflate_block_size - zs.avail_out;
s->zstream.next_out = s->tmpblock;
s->zstream.avail_out = s->block_size * 3;
inflate(&s->zstream, Z_SYNC_FLUSH);
return 0;
} | true | FFmpeg | b6671787db5b5d53e065f88e52a35d94cb50504c |
581 | static void sbr_hf_assemble(int Y1[38][64][2],
const int X_high[64][40][2],
SpectralBandReplication *sbr, SBRData *ch_data,
const int e_a[2])
{
int e, i, j, m;
const int h_SL = 4 * !sbr->bs_smoothing_mode;
const int kx = sbr->kx[1];
const int m_max = sbr->m[1];
static const SoftFloat h_smooth[5] = {
{ 715827883, -1 },
{ 647472402, -1 },
{ 937030863, -2 },
{ 989249804, -3 },
{ 546843842, -4 },
};
SoftFloat (*g_temp)[48] = ch_data->g_temp, (*q_temp)[48] = ch_data->q_temp;
int indexnoise = ch_data->f_indexnoise;
int indexsine = ch_data->f_indexsine;
if (sbr->reset) {
for (i = 0; i < h_SL; i++) {
memcpy(g_temp[i + 2*ch_data->t_env[0]], sbr->gain[0], m_max * sizeof(sbr->gain[0][0]));
memcpy(q_temp[i + 2*ch_data->t_env[0]], sbr->q_m[0], m_max * sizeof(sbr->q_m[0][0]));
}
} else if (h_SL) {
for (i = 0; i < 4; i++) {
memcpy(g_temp[i + 2 * ch_data->t_env[0]],
g_temp[i + 2 * ch_data->t_env_num_env_old],
sizeof(g_temp[0]));
memcpy(q_temp[i + 2 * ch_data->t_env[0]],
q_temp[i + 2 * ch_data->t_env_num_env_old],
sizeof(q_temp[0]));
}
}
for (e = 0; e < ch_data->bs_num_env; e++) {
for (i = 2 * ch_data->t_env[e]; i < 2 * ch_data->t_env[e + 1]; i++) {
memcpy(g_temp[h_SL + i], sbr->gain[e], m_max * sizeof(sbr->gain[0][0]));
memcpy(q_temp[h_SL + i], sbr->q_m[e], m_max * sizeof(sbr->q_m[0][0]));
}
}
for (e = 0; e < ch_data->bs_num_env; e++) {
for (i = 2 * ch_data->t_env[e]; i < 2 * ch_data->t_env[e + 1]; i++) {
SoftFloat g_filt_tab[48];
SoftFloat q_filt_tab[48];
SoftFloat *g_filt, *q_filt;
if (h_SL && e != e_a[0] && e != e_a[1]) {
g_filt = g_filt_tab;
q_filt = q_filt_tab;
for (m = 0; m < m_max; m++) {
const int idx1 = i + h_SL;
g_filt[m].mant = g_filt[m].exp = 0;
q_filt[m].mant = q_filt[m].exp = 0;
for (j = 0; j <= h_SL; j++) {
g_filt[m] = av_add_sf(g_filt[m],
av_mul_sf(g_temp[idx1 - j][m],
h_smooth[j]));
q_filt[m] = av_add_sf(q_filt[m],
av_mul_sf(q_temp[idx1 - j][m],
h_smooth[j]));
}
}
} else {
g_filt = g_temp[i + h_SL];
q_filt = q_temp[i];
}
sbr->dsp.hf_g_filt(Y1[i] + kx, X_high + kx, g_filt, m_max,
i + ENVELOPE_ADJUSTMENT_OFFSET);
if (e != e_a[0] && e != e_a[1]) {
sbr->dsp.hf_apply_noise[indexsine](Y1[i] + kx, sbr->s_m[e],
q_filt, indexnoise,
kx, m_max);
} else {
int idx = indexsine&1;
int A = (1-((indexsine+(kx & 1))&2));
int B = (A^(-idx)) + idx;
int *out = &Y1[i][kx][idx];
int shift, round;
SoftFloat *in = sbr->s_m[e];
for (m = 0; m+1 < m_max; m+=2) {
shift = 22 - in[m ].exp;
if (shift < 32) {
round = 1 << (shift-1);
out[2*m ] += (in[m ].mant * A + round) >> shift;
}
shift = 22 - in[m+1].exp;
if (shift < 32) {
round = 1 << (shift-1);
out[2*m+2] += (in[m+1].mant * B + round) >> shift;
}
}
if(m_max&1)
{
shift = 22 - in[m ].exp;
if (shift < 32) {
round = 1 << (shift-1);
out[2*m ] += (in[m ].mant * A + round) >> shift;
}
}
}
indexnoise = (indexnoise + m_max) & 0x1ff;
indexsine = (indexsine + 1) & 3;
}
}
ch_data->f_indexnoise = indexnoise;
ch_data->f_indexsine = indexsine;
}
| true | FFmpeg | d1992448d37f7cfa2acda5cc729dc0ff1b019390 |
582 | static int vid_probe(AVProbeData *p)
{
// little endian VID tag, file starts with "VID\0"
if (p->buf_size < 4 || AV_RL32(p->buf) != MKTAG('V', 'I', 'D', 0))
return 0;
return AVPROBE_SCORE_MAX;
}
| false | FFmpeg | 87e8788680e16c51f6048af26f3f7830c35207a5 |
584 | static inline void gen_intermediate_code_internal(X86CPU *cpu,
TranslationBlock *tb,
bool search_pc)
{
CPUState *cs = CPU(cpu);
CPUX86State *env = &cpu->env;
DisasContext dc1, *dc = &dc1;
target_ulong pc_ptr;
uint16_t *gen_opc_end;
CPUBreakpoint *bp;
int j, lj;
uint64_t flags;
target_ulong pc_start;
target_ulong cs_base;
int num_insns;
int max_insns;
/* generate intermediate code */
pc_start = tb->pc;
cs_base = tb->cs_base;
flags = tb->flags;
dc->pe = (flags >> HF_PE_SHIFT) & 1;
dc->code32 = (flags >> HF_CS32_SHIFT) & 1;
dc->ss32 = (flags >> HF_SS32_SHIFT) & 1;
dc->addseg = (flags >> HF_ADDSEG_SHIFT) & 1;
dc->f_st = 0;
dc->vm86 = (flags >> VM_SHIFT) & 1;
dc->cpl = (flags >> HF_CPL_SHIFT) & 3;
dc->iopl = (flags >> IOPL_SHIFT) & 3;
dc->tf = (flags >> TF_SHIFT) & 1;
dc->singlestep_enabled = cs->singlestep_enabled;
dc->cc_op = CC_OP_DYNAMIC;
dc->cc_op_dirty = false;
dc->cs_base = cs_base;
dc->tb = tb;
dc->popl_esp_hack = 0;
/* select memory access functions */
dc->mem_index = 0;
if (flags & HF_SOFTMMU_MASK) {
dc->mem_index = cpu_mmu_index(env);
}
dc->cpuid_features = env->features[FEAT_1_EDX];
dc->cpuid_ext_features = env->features[FEAT_1_ECX];
dc->cpuid_ext2_features = env->features[FEAT_8000_0001_EDX];
dc->cpuid_ext3_features = env->features[FEAT_8000_0001_ECX];
dc->cpuid_7_0_ebx_features = env->features[FEAT_7_0_EBX];
#ifdef TARGET_X86_64
dc->lma = (flags >> HF_LMA_SHIFT) & 1;
dc->code64 = (flags >> HF_CS64_SHIFT) & 1;
#endif
dc->flags = flags;
dc->jmp_opt = !(dc->tf || cs->singlestep_enabled ||
(flags & HF_INHIBIT_IRQ_MASK)
#ifndef CONFIG_SOFTMMU
|| (flags & HF_SOFTMMU_MASK)
#endif
);
#if 0
/* check addseg logic */
if (!dc->addseg && (dc->vm86 || !dc->pe || !dc->code32))
printf("ERROR addseg\n");
#endif
cpu_T[0] = tcg_temp_new();
cpu_T[1] = tcg_temp_new();
cpu_A0 = tcg_temp_new();
cpu_tmp0 = tcg_temp_new();
cpu_tmp1_i64 = tcg_temp_new_i64();
cpu_tmp2_i32 = tcg_temp_new_i32();
cpu_tmp3_i32 = tcg_temp_new_i32();
cpu_tmp4 = tcg_temp_new();
cpu_ptr0 = tcg_temp_new_ptr();
cpu_ptr1 = tcg_temp_new_ptr();
cpu_cc_srcT = tcg_temp_local_new();
gen_opc_end = tcg_ctx.gen_opc_buf + OPC_MAX_SIZE;
dc->is_jmp = DISAS_NEXT;
pc_ptr = pc_start;
lj = -1;
num_insns = 0;
max_insns = tb->cflags & CF_COUNT_MASK;
if (max_insns == 0)
max_insns = CF_COUNT_MASK;
gen_tb_start();
for(;;) {
if (unlikely(!QTAILQ_EMPTY(&cs->breakpoints))) {
QTAILQ_FOREACH(bp, &cs->breakpoints, entry) {
if (bp->pc == pc_ptr &&
!((bp->flags & BP_CPU) && (tb->flags & HF_RF_MASK))) {
gen_debug(dc, pc_ptr - dc->cs_base);
break;
}
}
}
if (search_pc) {
j = tcg_ctx.gen_opc_ptr - tcg_ctx.gen_opc_buf;
if (lj < j) {
lj++;
while (lj < j)
tcg_ctx.gen_opc_instr_start[lj++] = 0;
}
tcg_ctx.gen_opc_pc[lj] = pc_ptr;
gen_opc_cc_op[lj] = dc->cc_op;
tcg_ctx.gen_opc_instr_start[lj] = 1;
tcg_ctx.gen_opc_icount[lj] = num_insns;
}
if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO))
gen_io_start();
pc_ptr = disas_insn(env, dc, pc_ptr);
num_insns++;
/* stop translation if indicated */
if (dc->is_jmp)
break;
/* if single step mode, we generate only one instruction and
generate an exception */
/* if irq were inhibited with HF_INHIBIT_IRQ_MASK, we clear
the flag and abort the translation to give the irqs a
change to be happen */
if (dc->tf || dc->singlestep_enabled ||
(flags & HF_INHIBIT_IRQ_MASK)) {
gen_jmp_im(pc_ptr - dc->cs_base);
gen_eob(dc);
break;
}
/* if too long translation, stop generation too */
if (tcg_ctx.gen_opc_ptr >= gen_opc_end ||
(pc_ptr - pc_start) >= (TARGET_PAGE_SIZE - 32) ||
num_insns >= max_insns) {
gen_jmp_im(pc_ptr - dc->cs_base);
gen_eob(dc);
break;
}
if (singlestep) {
gen_jmp_im(pc_ptr - dc->cs_base);
gen_eob(dc);
break;
}
}
if (tb->cflags & CF_LAST_IO)
gen_io_end();
gen_tb_end(tb, num_insns);
*tcg_ctx.gen_opc_ptr = INDEX_op_end;
/* we don't forget to fill the last values */
if (search_pc) {
j = tcg_ctx.gen_opc_ptr - tcg_ctx.gen_opc_buf;
lj++;
while (lj <= j)
tcg_ctx.gen_opc_instr_start[lj++] = 0;
}
#ifdef DEBUG_DISAS
if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
int disas_flags;
qemu_log("----------------\n");
qemu_log("IN: %s\n", lookup_symbol(pc_start));
#ifdef TARGET_X86_64
if (dc->code64)
disas_flags = 2;
else
#endif
disas_flags = !dc->code32;
log_target_disas(env, pc_start, pc_ptr - pc_start, disas_flags);
qemu_log("\n");
}
#endif
if (!search_pc) {
tb->size = pc_ptr - pc_start;
tb->icount = num_insns;
}
}
| true | qemu | e64e353590c2584b41cd1db925f67042a05f4250 |
586 | static always_inline void gen_op_subfo (void)
{
gen_op_move_T2_T0();
gen_op_subf();
gen_op_check_subfo();
}
| true | qemu | c3e10c7b4377c1cbc0a4fbc12312c2cf41c0cda7 |
587 | static void fd_accept_incoming_migration(void *opaque)
{
QEMUFile *f = opaque;
qemu_set_fd_handler2(qemu_get_fd(f), NULL, NULL, NULL, NULL);
process_incoming_migration(f);
}
| true | qemu | 60fe637bf0e4d7989e21e50f52526444765c63b4 |
588 | hwaddr ppc_hash64_get_phys_page_debug(PowerPCCPU *cpu, target_ulong addr)
{
CPUPPCState *env = &cpu->env;
ppc_slb_t *slb;
hwaddr pte_offset;
ppc_hash_pte64_t pte;
unsigned apshift;
if (msr_dr == 0) {
/* In real mode the top 4 effective address bits are ignored */
return addr & 0x0FFFFFFFFFFFFFFFULL;
}
slb = slb_lookup(cpu, addr);
if (!slb) {
return -1;
}
pte_offset = ppc_hash64_htab_lookup(cpu, slb, addr, &pte);
if (pte_offset == -1) {
return -1;
}
apshift = hpte_page_shift(slb->sps, pte.pte0, pte.pte1);
if (!apshift) {
return -1;
}
return deposit64(pte.pte1 & HPTE64_R_RPN, 0, apshift, addr)
& TARGET_PAGE_MASK;
}
| true | qemu | 949868633f0454715af1781c0f377413b6ab000e |
589 | static int adx_read_header(AVFormatContext *s)
{
ADXDemuxerContext *c = s->priv_data;
AVCodecParameters *par;
AVStream *st = avformat_new_stream(s, NULL);
if (!st)
return AVERROR(ENOMEM);
par = s->streams[0]->codecpar;
if (avio_rb16(s->pb) != 0x8000)
return AVERROR_INVALIDDATA;
c->header_size = avio_rb16(s->pb) + 4;
avio_seek(s->pb, -4, SEEK_CUR);
if (ff_get_extradata(s, par, s->pb, c->header_size) < 0)
return AVERROR(ENOMEM);
if (par->extradata_size < 12) {
av_log(s, AV_LOG_ERROR, "Invalid extradata size.\n");
return AVERROR_INVALIDDATA;
}
par->channels = AV_RB8 (par->extradata + 7);
par->sample_rate = AV_RB32(par->extradata + 8);
if (par->channels <= 0) {
av_log(s, AV_LOG_ERROR, "invalid number of channels %d\n", par->channels);
return AVERROR_INVALIDDATA;
}
if (par->sample_rate <= 0) {
av_log(s, AV_LOG_ERROR, "Invalid sample rate %d\n", par->sample_rate);
return AVERROR_INVALIDDATA;
}
par->codec_type = AVMEDIA_TYPE_AUDIO;
par->codec_id = s->iformat->raw_codec_id;
par->bit_rate = par->sample_rate * par->channels * BLOCK_SIZE * 8LL / BLOCK_SAMPLES;
avpriv_set_pts_info(st, 64, BLOCK_SAMPLES, par->sample_rate);
return 0;
}
| true | FFmpeg | ad5807f8aa883bee5431186dc1f24c5435d722d3 |
590 | static void virgl_cmd_get_capset(VirtIOGPU *g,
struct virtio_gpu_ctrl_command *cmd)
{
struct virtio_gpu_get_capset gc;
struct virtio_gpu_resp_capset *resp;
uint32_t max_ver, max_size;
VIRTIO_GPU_FILL_CMD(gc);
virgl_renderer_get_cap_set(gc.capset_id, &max_ver,
&max_size);
if (!max_size) {
cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER;
return;
}
resp = g_malloc(sizeof(*resp) + max_size);
resp->hdr.type = VIRTIO_GPU_RESP_OK_CAPSET;
virgl_renderer_fill_caps(gc.capset_id,
gc.capset_version,
(void *)resp->capset_data);
virtio_gpu_ctrl_response(g, cmd, &resp->hdr, sizeof(*resp) + max_size);
g_free(resp);
}
| true | qemu | 85d9d044471f93c48c5c396f7e217b4ef12f69f8 |
591 | static int qcow2_open(BlockDriverState *bs, QDict *options, int flags)
{
BDRVQcowState *s = bs->opaque;
int len, i, ret = 0;
QCowHeader header;
QemuOpts *opts;
Error *local_err = NULL;
uint64_t ext_end;
uint64_t l1_vm_state_index;
ret = bdrv_pread(bs->file, 0, &header, sizeof(header));
if (ret < 0) {
be32_to_cpus(&header.magic);
be32_to_cpus(&header.version);
be64_to_cpus(&header.backing_file_offset);
be32_to_cpus(&header.backing_file_size);
be64_to_cpus(&header.size);
be32_to_cpus(&header.cluster_bits);
be32_to_cpus(&header.crypt_method);
be64_to_cpus(&header.l1_table_offset);
be32_to_cpus(&header.l1_size);
be64_to_cpus(&header.refcount_table_offset);
be32_to_cpus(&header.refcount_table_clusters);
be64_to_cpus(&header.snapshots_offset);
be32_to_cpus(&header.nb_snapshots);
if (header.magic != QCOW_MAGIC) {
ret = -EMEDIUMTYPE;
if (header.version < 2 || header.version > 3) {
report_unsupported(bs, "QCOW version %d", header.version);
ret = -ENOTSUP;
s->qcow_version = header.version;
/* Initialise version 3 header fields */
if (header.version == 2) {
header.incompatible_features = 0;
header.compatible_features = 0;
header.autoclear_features = 0;
header.refcount_order = 4;
header.header_length = 72;
} else {
be64_to_cpus(&header.incompatible_features);
be64_to_cpus(&header.compatible_features);
be64_to_cpus(&header.autoclear_features);
be32_to_cpus(&header.refcount_order);
be32_to_cpus(&header.header_length);
if (header.header_length > sizeof(header)) {
s->unknown_header_fields_size = header.header_length - sizeof(header);
s->unknown_header_fields = g_malloc(s->unknown_header_fields_size);
ret = bdrv_pread(bs->file, sizeof(header), s->unknown_header_fields,
s->unknown_header_fields_size);
if (ret < 0) {
if (header.backing_file_offset) {
ext_end = header.backing_file_offset;
} else {
ext_end = 1 << header.cluster_bits;
/* Handle feature bits */
s->incompatible_features = header.incompatible_features;
s->compatible_features = header.compatible_features;
s->autoclear_features = header.autoclear_features;
if (s->incompatible_features & ~QCOW2_INCOMPAT_MASK) {
void *feature_table = NULL;
qcow2_read_extensions(bs, header.header_length, ext_end,
&feature_table);
report_unsupported_feature(bs, feature_table,
s->incompatible_features &
~QCOW2_INCOMPAT_MASK);
ret = -ENOTSUP;
/* Check support for various header values */
if (header.refcount_order != 4) {
report_unsupported(bs, "%d bit reference counts",
1 << header.refcount_order);
ret = -ENOTSUP;
if (header.cluster_bits < MIN_CLUSTER_BITS ||
header.cluster_bits > MAX_CLUSTER_BITS) {
ret = -EINVAL;
if (header.crypt_method > QCOW_CRYPT_AES) {
ret = -EINVAL;
s->crypt_method_header = header.crypt_method;
if (s->crypt_method_header) {
bs->encrypted = 1;
s->cluster_bits = header.cluster_bits;
s->cluster_size = 1 << s->cluster_bits;
s->cluster_sectors = 1 << (s->cluster_bits - 9);
s->l2_bits = s->cluster_bits - 3; /* L2 is always one cluster */
s->l2_size = 1 << s->l2_bits;
bs->total_sectors = header.size / 512;
s->csize_shift = (62 - (s->cluster_bits - 8));
s->csize_mask = (1 << (s->cluster_bits - 8)) - 1;
s->cluster_offset_mask = (1LL << s->csize_shift) - 1;
s->refcount_table_offset = header.refcount_table_offset;
s->refcount_table_size =
header.refcount_table_clusters << (s->cluster_bits - 3);
s->snapshots_offset = header.snapshots_offset;
s->nb_snapshots = header.nb_snapshots;
/* read the level 1 table */
s->l1_size = header.l1_size;
l1_vm_state_index = size_to_l1(s, header.size);
if (l1_vm_state_index > INT_MAX) {
ret = -EFBIG;
s->l1_vm_state_index = l1_vm_state_index;
/* the L1 table must contain at least enough entries to put
header.size bytes */
if (s->l1_size < s->l1_vm_state_index) {
ret = -EINVAL;
s->l1_table_offset = header.l1_table_offset;
if (s->l1_size > 0) {
s->l1_table = g_malloc0(
align_offset(s->l1_size * sizeof(uint64_t), 512));
ret = bdrv_pread(bs->file, s->l1_table_offset, s->l1_table,
s->l1_size * sizeof(uint64_t));
if (ret < 0) {
for(i = 0;i < s->l1_size; i++) {
be64_to_cpus(&s->l1_table[i]);
/* alloc L2 table/refcount block cache */
s->l2_table_cache = qcow2_cache_create(bs, L2_CACHE_SIZE);
s->refcount_block_cache = qcow2_cache_create(bs, REFCOUNT_CACHE_SIZE);
s->cluster_cache = g_malloc(s->cluster_size);
/* one more sector for decompressed data alignment */
s->cluster_data = qemu_blockalign(bs, QCOW_MAX_CRYPT_CLUSTERS * s->cluster_size
+ 512);
s->cluster_cache_offset = -1;
s->flags = flags;
ret = qcow2_refcount_init(bs);
if (ret != 0) {
QLIST_INIT(&s->cluster_allocs);
QTAILQ_INIT(&s->discards);
/* read qcow2 extensions */
if (qcow2_read_extensions(bs, header.header_length, ext_end, NULL)) {
ret = -EINVAL;
/* read the backing file name */
if (header.backing_file_offset != 0) {
len = header.backing_file_size;
if (len > 1023) {
len = 1023;
ret = bdrv_pread(bs->file, header.backing_file_offset,
bs->backing_file, len);
if (ret < 0) {
bs->backing_file[len] = '\0';
ret = qcow2_read_snapshots(bs);
if (ret < 0) {
/* Clear unknown autoclear feature bits */
if (!bs->read_only && s->autoclear_features != 0) {
s->autoclear_features = 0;
ret = qcow2_update_header(bs);
if (ret < 0) {
/* Initialise locks */
qemu_co_mutex_init(&s->lock);
/* Repair image if dirty */
if (!(flags & BDRV_O_CHECK) && !bs->read_only &&
(s->incompatible_features & QCOW2_INCOMPAT_DIRTY)) {
BdrvCheckResult result = {0};
ret = qcow2_check(bs, &result, BDRV_FIX_ERRORS);
if (ret < 0) {
/* Enable lazy_refcounts according to image and command line options */
opts = qemu_opts_create_nofail(&qcow2_runtime_opts);
qemu_opts_absorb_qdict(opts, options, &local_err);
if (error_is_set(&local_err)) {
qerror_report_err(local_err);
error_free(local_err);
ret = -EINVAL;
s->use_lazy_refcounts = qemu_opt_get_bool(opts, QCOW2_OPT_LAZY_REFCOUNTS,
(s->compatible_features & QCOW2_COMPAT_LAZY_REFCOUNTS));
s->discard_passthrough[QCOW2_DISCARD_NEVER] = false;
s->discard_passthrough[QCOW2_DISCARD_ALWAYS] = true;
s->discard_passthrough[QCOW2_DISCARD_REQUEST] =
qemu_opt_get_bool(opts, QCOW2_OPT_DISCARD_REQUEST,
flags & BDRV_O_UNMAP);
s->discard_passthrough[QCOW2_DISCARD_SNAPSHOT] =
qemu_opt_get_bool(opts, QCOW2_OPT_DISCARD_SNAPSHOT, true);
s->discard_passthrough[QCOW2_DISCARD_OTHER] =
qemu_opt_get_bool(opts, QCOW2_OPT_DISCARD_OTHER, false);
qemu_opts_del(opts);
if (s->use_lazy_refcounts && s->qcow_version < 3) {
qerror_report(ERROR_CLASS_GENERIC_ERROR, "Lazy refcounts require "
"a qcow2 image with at least qemu 1.1 compatibility level");
ret = -EINVAL;
#ifdef DEBUG_ALLOC
{
BdrvCheckResult result = {0};
qcow2_check_refcounts(bs, &result, 0);
#endif
return ret;
fail:
g_free(s->unknown_header_fields);
cleanup_unknown_header_ext(bs);
qcow2_free_snapshots(bs);
qcow2_refcount_close(bs);
g_free(s->l1_table);
if (s->l2_table_cache) {
qcow2_cache_destroy(bs, s->l2_table_cache);
g_free(s->cluster_cache);
qemu_vfree(s->cluster_data);
return ret; | true | qemu | 69c98726537627e708abb8fcb33e3a2b10e40bf1 |
592 | static void decode_subband(DiracContext *s, GetBitContext *gb, int quant,
int slice_x, int slice_y, int bits_end,
SubBand *b1, SubBand *b2)
{
int left = b1->width * slice_x / s->num_x;
int right = b1->width *(slice_x+1) / s->num_x;
int top = b1->height * slice_y / s->num_y;
int bottom = b1->height *(slice_y+1) / s->num_y;
int qfactor = qscale_tab[quant & 0x7f];
int qoffset = qoffset_intra_tab[quant & 0x7f] + 2;
uint8_t *buf1 = b1->ibuf + top * b1->stride;
uint8_t *buf2 = b2 ? b2->ibuf + top * b2->stride: NULL;
int x, y;
/* we have to constantly check for overread since the spec explicitly
requires this, with the meaning that all remaining coeffs are set to 0 */
if (get_bits_count(gb) >= bits_end)
return;
if (s->pshift) {
for (y = top; y < bottom; y++) {
for (x = left; x < right; x++) {
PARSE_VALUES(int32_t, x, gb, bits_end, buf1, buf2);
}
buf1 += b1->stride;
if (buf2)
buf2 += b2->stride;
}
}
else {
for (y = top; y < bottom; y++) {
for (x = left; x < right; x++) {
PARSE_VALUES(int16_t, x, gb, bits_end, buf1, buf2);
}
buf1 += b1->stride;
if (buf2)
buf2 += b2->stride;
}
}
}
| true | FFmpeg | 5fbd97fc756a827f62f556c66272f851cc3c7f90 |
593 | int spapr_h_cas_compose_response(sPAPRMachineState *spapr,
target_ulong addr, target_ulong size,
bool cpu_update,
sPAPROptionVector *ov5_updates)
{
void *fdt, *fdt_skel;
sPAPRDeviceTreeUpdateHeader hdr = { .version_id = 1 };
size -= sizeof(hdr);
/* Create sceleton */
fdt_skel = g_malloc0(size);
_FDT((fdt_create(fdt_skel, size)));
_FDT((fdt_begin_node(fdt_skel, "")));
_FDT((fdt_end_node(fdt_skel)));
_FDT((fdt_finish(fdt_skel)));
fdt = g_malloc0(size);
_FDT((fdt_open_into(fdt_skel, fdt, size)));
g_free(fdt_skel);
/* Fixup cpu nodes */
if (cpu_update) {
_FDT((spapr_fixup_cpu_dt(fdt, spapr)));
}
if (spapr_dt_cas_updates(spapr, fdt, ov5_updates)) {
return -1;
}
/* Pack resulting tree */
_FDT((fdt_pack(fdt)));
if (fdt_totalsize(fdt) + sizeof(hdr) > size) {
trace_spapr_cas_failed(size);
return -1;
}
cpu_physical_memory_write(addr, &hdr, sizeof(hdr));
cpu_physical_memory_write(addr + sizeof(hdr), fdt, fdt_totalsize(fdt));
trace_spapr_cas_continue(fdt_totalsize(fdt) + sizeof(hdr));
g_free(fdt);
return 0;
}
| false | qemu | 5b120785e70a9a48b43e3f1f156a10a015334a28 |
594 | void kvmppc_hash64_write_pte(CPUPPCState *env, target_ulong pte_index,
target_ulong pte0, target_ulong pte1)
{
int htab_fd;
struct kvm_get_htab_fd ghf;
struct kvm_get_htab_buf hpte_buf;
ghf.flags = 0;
ghf.start_index = 0; /* Ignored */
htab_fd = kvm_vm_ioctl(kvm_state, KVM_PPC_GET_HTAB_FD, &ghf);
if (htab_fd < 0) {
goto error_out;
}
hpte_buf.header.n_valid = 1;
hpte_buf.header.n_invalid = 0;
hpte_buf.header.index = pte_index;
hpte_buf.hpte[0] = pte0;
hpte_buf.hpte[1] = pte1;
/*
* Write the hpte entry.
* CAUTION: write() has the warn_unused_result attribute. Hence we
* need to check the return value, even though we do nothing.
*/
if (write(htab_fd, &hpte_buf, sizeof(hpte_buf)) < 0) {
goto out_close;
}
out_close:
close(htab_fd);
return;
error_out:
return;
}
| false | qemu | 1ad9f0a464fe78d30ee60b3629f7a825cf2fab13 |
595 | int tcp_start_outgoing_migration(MigrationState *s, const char *host_port,
Error **errp)
{
s->get_error = socket_errno;
s->write = socket_write;
s->close = tcp_close;
s->fd = inet_connect(host_port, false, errp);
if (!error_is_set(errp)) {
migrate_fd_connect(s);
} else if (error_is_type(*errp, QERR_SOCKET_CONNECT_IN_PROGRESS)) {
DPRINTF("connect in progress\n");
qemu_set_fd_handler2(s->fd, NULL, NULL, tcp_wait_for_connect, s);
} else if (error_is_type(*errp, QERR_SOCKET_CREATE_FAILED)) {
DPRINTF("connect failed\n");
return -1;
} else if (error_is_type(*errp, QERR_SOCKET_CONNECT_FAILED)) {
DPRINTF("connect failed\n");
migrate_fd_error(s);
return -1;
} else {
DPRINTF("unknown error\n");
return -1;
}
return 0;
}
| false | qemu | 02a08fef079469c005d48fe2d181f0e0eb5752ae |
596 | static void virtio_9p_device_realize(DeviceState *dev, Error **errp)
{
VirtIODevice *vdev = VIRTIO_DEVICE(dev);
V9fsVirtioState *v = VIRTIO_9P(dev);
V9fsState *s = &v->state;
if (v9fs_device_realize_common(s, errp)) {
goto out;
}
v->config_size = sizeof(struct virtio_9p_config) + strlen(s->fsconf.tag);
virtio_init(vdev, "virtio-9p", VIRTIO_ID_9P, v->config_size);
v->vq = virtio_add_queue(vdev, MAX_REQ, handle_9p_output);
v9fs_register_transport(s, &virtio_9p_transport);
out:
return;
}
| false | qemu | bd3be4dbbf0491d6db8bf326706747b4629ace4b |
597 | static void trigger_access_exception(CPUS390XState *env, uint32_t type,
uint32_t ilen, uint64_t tec)
{
S390CPU *cpu = s390_env_get_cpu(env);
if (kvm_enabled()) {
kvm_s390_access_exception(cpu, type, tec);
} else {
CPUState *cs = CPU(cpu);
stq_phys(cs->as, env->psa + offsetof(LowCore, trans_exc_code), tec);
trigger_pgm_exception(env, type, ilen);
}
}
| false | qemu | 820613b1c1c76cb77a15313eb333a710972614ec |
598 | static void spapr_memory_unplug_request(HotplugHandler *hotplug_dev,
DeviceState *dev, Error **errp)
{
sPAPRMachineState *spapr = SPAPR_MACHINE(hotplug_dev);
Error *local_err = NULL;
PCDIMMDevice *dimm = PC_DIMM(dev);
PCDIMMDeviceClass *ddc = PC_DIMM_GET_CLASS(dimm);
MemoryRegion *mr = ddc->get_memory_region(dimm);
uint64_t size = memory_region_size(mr);
uint32_t nr_lmbs = size / SPAPR_MEMORY_BLOCK_SIZE;
uint64_t addr_start, addr;
int i;
sPAPRDRConnector *drc;
sPAPRDRConnectorClass *drck;
sPAPRDIMMState *ds;
addr_start = object_property_get_int(OBJECT(dimm), PC_DIMM_ADDR_PROP,
&local_err);
if (local_err) {
goto out;
}
ds = g_malloc0(sizeof(sPAPRDIMMState));
ds->nr_lmbs = nr_lmbs;
ds->dimm = dimm;
spapr_pending_dimm_unplugs_add(spapr, ds);
addr = addr_start;
for (i = 0; i < nr_lmbs; i++) {
drc = spapr_dr_connector_by_id(SPAPR_DR_CONNECTOR_TYPE_LMB,
addr / SPAPR_MEMORY_BLOCK_SIZE);
g_assert(drc);
drck = SPAPR_DR_CONNECTOR_GET_CLASS(drc);
drck->detach(drc, dev, spapr_lmb_release, NULL, errp);
addr += SPAPR_MEMORY_BLOCK_SIZE;
}
drc = spapr_dr_connector_by_id(SPAPR_DR_CONNECTOR_TYPE_LMB,
addr_start / SPAPR_MEMORY_BLOCK_SIZE);
drck = SPAPR_DR_CONNECTOR_GET_CLASS(drc);
spapr_hotplug_req_remove_by_count_indexed(SPAPR_DR_CONNECTOR_TYPE_LMB,
nr_lmbs,
drck->get_index(drc));
out:
error_propagate(errp, local_err);
}
| false | qemu | 318347234d7069b62d38391dd27e269a3107d668 |
600 | static void l2x0_priv_write(void *opaque, target_phys_addr_t offset,
uint64_t value, unsigned size)
{
l2x0_state *s = (l2x0_state *)opaque;
offset &= 0xfff;
if (offset >= 0x730 && offset < 0x800) {
/* ignore */
return;
}
switch (offset) {
case 0x100:
s->ctrl = value & 1;
break;
case 0x104:
s->aux_ctrl = value;
break;
case 0x108:
s->tag_ctrl = value;
break;
case 0x10C:
s->data_ctrl = value;
break;
case 0xC00:
s->filter_start = value;
break;
case 0xC04:
s->filter_end = value;
break;
case 0xF40:
return;
case 0xF60:
return;
case 0xF80:
return;
default:
fprintf(stderr, "l2x0_priv_write: Bad offset %x\n", (int)offset);
break;
}
}
| false | qemu | a8170e5e97ad17ca169c64ba87ae2f53850dab4c |
601 | static void json_emit_element(QJSON *json, const char *name)
{
/* Check whether we need to print a , before an element */
if (json->omit_comma) {
json->omit_comma = false;
} else {
qstring_append(json->str, ", ");
}
if (name) {
qstring_append(json->str, "\"");
qstring_append(json->str, name);
qstring_append(json->str, "\" : ");
}
}
| false | qemu | 17b74b98676aee5bc470b173b1e528d2fce2cf18 |
602 | static bool vfio_pci_host_match(PCIHostDeviceAddress *host1,
PCIHostDeviceAddress *host2)
{
return (host1->domain == host2->domain && host1->bus == host2->bus &&
host1->slot == host2->slot && host1->function == host2->function);
}
| false | qemu | 7df9381b7aa56c897e344f3bfe43bf5848bbd3e0 |
603 | int ff_ivi_dec_huff_desc(GetBitContext *gb, int desc_coded, int which_tab,
IVIHuffTab *huff_tab, AVCodecContext *avctx)
{
int i, result;
IVIHuffDesc new_huff;
if (!desc_coded) {
/* select default table */
huff_tab->tab = (which_tab) ? &ff_ivi_blk_vlc_tabs[7]
: &ff_ivi_mb_vlc_tabs [7];
} else {
huff_tab->tab_sel = get_bits(gb, 3);
if (huff_tab->tab_sel == 7) {
/* custom huffman table (explicitly encoded) */
new_huff.num_rows = get_bits(gb, 4);
for (i = 0; i < new_huff.num_rows; i++)
new_huff.xbits[i] = get_bits(gb, 4);
/* Have we got the same custom table? Rebuild if not. */
if (ff_ivi_huff_desc_cmp(&new_huff, &huff_tab->cust_desc)) {
ff_ivi_huff_desc_copy(&huff_tab->cust_desc, &new_huff);
if (huff_tab->cust_tab.table)
ff_free_vlc(&huff_tab->cust_tab);
result = ff_ivi_create_huff_from_desc(&huff_tab->cust_desc,
&huff_tab->cust_tab, 0);
if (result) {
av_log(avctx, AV_LOG_ERROR,
"Error while initializing custom vlc table!\n");
return -1;
}
}
huff_tab->tab = &huff_tab->cust_tab;
} else {
/* select one of predefined tables */
huff_tab->tab = (which_tab) ? &ff_ivi_blk_vlc_tabs[huff_tab->tab_sel]
: &ff_ivi_mb_vlc_tabs [huff_tab->tab_sel];
}
}
return 0;
}
| false | FFmpeg | fe7a37c36febd71576cbefc385d995a8d6e444e7 |
604 | int check_params(char *buf, int buf_size,
const char * const *params, const char *str)
{
const char *p;
int i;
p = str;
while (*p != '\0') {
p = get_opt_name(buf, buf_size, p, '=');
if (*p != '=') {
return -1;
}
p++;
for (i = 0; params[i] != NULL; i++) {
if (!strcmp(params[i], buf)) {
break;
}
}
if (params[i] == NULL) {
return -1;
}
p = get_opt_value(NULL, 0, p);
if (*p != ',') {
break;
}
p++;
}
return 0;
}
| false | qemu | a86b35f992f107323e432c0a96107e11e1b699ad |
605 | static void memory_region_update_container_subregions(MemoryRegion *subregion)
{
hwaddr offset = subregion->addr;
MemoryRegion *mr = subregion->container;
MemoryRegion *other;
memory_region_transaction_begin();
memory_region_ref(subregion);
QTAILQ_FOREACH(other, &mr->subregions, subregions_link) {
if (subregion->may_overlap || other->may_overlap) {
continue;
}
if (int128_ge(int128_make64(offset),
int128_add(int128_make64(other->addr), other->size))
|| int128_le(int128_add(int128_make64(offset), subregion->size),
int128_make64(other->addr))) {
continue;
}
#if 0
printf("warning: subregion collision %llx/%llx (%s) "
"vs %llx/%llx (%s)\n",
(unsigned long long)offset,
(unsigned long long)int128_get64(subregion->size),
subregion->name,
(unsigned long long)other->addr,
(unsigned long long)int128_get64(other->size),
other->name);
#endif
}
QTAILQ_FOREACH(other, &mr->subregions, subregions_link) {
if (subregion->priority >= other->priority) {
QTAILQ_INSERT_BEFORE(other, subregion, subregions_link);
goto done;
}
}
QTAILQ_INSERT_TAIL(&mr->subregions, subregion, subregions_link);
done:
memory_region_update_pending |= mr->enabled && subregion->enabled;
memory_region_transaction_commit();
}
| false | qemu | b61359781958759317ee6fd1a45b59be0b7dbbe1 |
606 | static int64_t coroutine_fn bdrv_qed_co_get_block_status(BlockDriverState *bs,
int64_t sector_num,
int nb_sectors, int *pnum)
{
BDRVQEDState *s = bs->opaque;
size_t len = (size_t)nb_sectors * BDRV_SECTOR_SIZE;
QEDIsAllocatedCB cb = {
.bs = bs,
.pos = (uint64_t)sector_num * BDRV_SECTOR_SIZE,
.status = BDRV_BLOCK_OFFSET_MASK,
.pnum = pnum,
};
QEDRequest request = { .l2_table = NULL };
qed_find_cluster(s, &request, cb.pos, len, qed_is_allocated_cb, &cb);
/* Now sleep if the callback wasn't invoked immediately */
while (cb.status == BDRV_BLOCK_OFFSET_MASK) {
cb.co = qemu_coroutine_self();
qemu_coroutine_yield();
}
qed_unref_l2_cache_entry(request.l2_table);
return cb.status;
}
| false | qemu | 67a0fd2a9bca204d2b39f910a97c7137636a0715 |
607 | static sPAPRPHBState *find_phb(sPAPREnvironment *spapr, uint64_t buid)
{
sPAPRPHBState *sphb;
QLIST_FOREACH(sphb, &spapr->phbs, list) {
if (sphb->buid != buid) {
continue;
}
return sphb;
}
return NULL;
}
| false | qemu | 46c5874e9cd752ed8ded31af03472edd8fc3efc1 |
608 | static void ahci_mem_write(void *opaque, target_phys_addr_t addr,
uint64_t val, unsigned size)
{
AHCIState *s = opaque;
/* Only aligned reads are allowed on AHCI */
if (addr & 3) {
fprintf(stderr, "ahci: Mis-aligned write to addr 0x"
TARGET_FMT_plx "\n", addr);
return;
}
if (addr < AHCI_GENERIC_HOST_CONTROL_REGS_MAX_ADDR) {
DPRINTF(-1, "(addr 0x%08X), val 0x%08"PRIX64"\n", (unsigned) addr, val);
switch (addr) {
case HOST_CAP: /* R/WO, RO */
/* FIXME handle R/WO */
break;
case HOST_CTL: /* R/W */
if (val & HOST_CTL_RESET) {
DPRINTF(-1, "HBA Reset\n");
ahci_reset(s);
} else {
s->control_regs.ghc = (val & 0x3) | HOST_CTL_AHCI_EN;
ahci_check_irq(s);
}
break;
case HOST_IRQ_STAT: /* R/WC, RO */
s->control_regs.irqstatus &= ~val;
ahci_check_irq(s);
break;
case HOST_PORTS_IMPL: /* R/WO, RO */
/* FIXME handle R/WO */
break;
case HOST_VERSION: /* RO */
/* FIXME report write? */
break;
default:
DPRINTF(-1, "write to unknown register 0x%x\n", (unsigned)addr);
}
} else if ((addr >= AHCI_PORT_REGS_START_ADDR) &&
(addr < (AHCI_PORT_REGS_START_ADDR +
(s->ports * AHCI_PORT_ADDR_OFFSET_LEN)))) {
ahci_port_write(s, (addr - AHCI_PORT_REGS_START_ADDR) >> 7,
addr & AHCI_PORT_ADDR_OFFSET_MASK, val);
}
}
| false | qemu | a8170e5e97ad17ca169c64ba87ae2f53850dab4c |
609 | static void jpeg_term_destination(j_compress_ptr cinfo)
{
VncState *vs = cinfo->client_data;
Buffer *buffer = &vs->tight_jpeg;
buffer->offset = buffer->capacity - cinfo->dest->free_in_buffer;
}
| false | qemu | 245f7b51c0ea04fb2224b1127430a096c91aee70 |
610 | static void qdict_destroy_obj(QObject *obj)
{
int i;
QDict *qdict;
assert(obj != NULL);
qdict = qobject_to_qdict(obj);
for (i = 0; i < QDICT_BUCKET_MAX; i++) {
QDictEntry *entry = QLIST_FIRST(&qdict->table[i]);
while (entry) {
QDictEntry *tmp = QLIST_NEXT(entry, next);
QLIST_REMOVE(entry, next);
qentry_destroy(entry);
entry = tmp;
}
}
g_free(qdict);
}
| false | qemu | 55e1819c509b3d9c10a54678b9c585bbda13889e |
611 | static int pte64_check (mmu_ctx_t *ctx,
target_ulong pte0, target_ulong pte1, int h, int rw)
{
return _pte_check(ctx, 1, pte0, pte1, h, rw);
}
| false | qemu | b227a8e9aa5f27d29f77ba90d5eb9d0662a1175e |
612 | static GenericList *qobject_input_next_list(Visitor *v, GenericList *tail,
size_t size)
{
QObjectInputVisitor *qiv = to_qiv(v);
StackObject *so = QSLIST_FIRST(&qiv->stack);
if (!so->entry) {
return NULL;
}
tail->next = g_malloc0(size);
return tail->next;
}
| false | qemu | a4a1c70dc759e5b81627e96564f344ab43ea86eb |
613 | static int coroutine_fn v9fs_do_readdir_with_stat(V9fsPDU *pdu,
V9fsFidState *fidp,
uint32_t max_count)
{
V9fsPath path;
V9fsStat v9stat;
int len, err = 0;
int32_t count = 0;
struct stat stbuf;
off_t saved_dir_pos;
struct dirent *dent;
/* save the directory position */
saved_dir_pos = v9fs_co_telldir(pdu, fidp);
if (saved_dir_pos < 0) {
return saved_dir_pos;
}
while (1) {
v9fs_path_init(&path);
v9fs_readdir_lock(&fidp->fs.dir);
err = v9fs_co_readdir(pdu, fidp, &dent);
if (err || !dent) {
break;
}
err = v9fs_co_name_to_path(pdu, &fidp->path, dent->d_name, &path);
if (err < 0) {
break;
}
err = v9fs_co_lstat(pdu, &path, &stbuf);
if (err < 0) {
break;
}
err = stat_to_v9stat(pdu, &path, dent->d_name, &stbuf, &v9stat);
if (err < 0) {
break;
}
/* 11 = 7 + 4 (7 = start offset, 4 = space for storing count) */
len = pdu_marshal(pdu, 11 + count, "S", &v9stat);
v9fs_readdir_unlock(&fidp->fs.dir);
if ((len != (v9stat.size + 2)) || ((count + len) > max_count)) {
/* Ran out of buffer. Set dir back to old position and return */
v9fs_co_seekdir(pdu, fidp, saved_dir_pos);
v9fs_stat_free(&v9stat);
v9fs_path_free(&path);
return count;
}
count += len;
v9fs_stat_free(&v9stat);
v9fs_path_free(&path);
saved_dir_pos = dent->d_off;
}
v9fs_readdir_unlock(&fidp->fs.dir);
v9fs_path_free(&path);
if (err < 0) {
return err;
}
return count;
}
| false | qemu | 772a73692ecb52bace0cff6f95df62f59b8cabe0 |
614 | static int decode_block_refinement(MJpegDecodeContext *s, DCTELEM *block, uint8_t *last_nnz,
int ac_index, int16_t *quant_matrix,
int ss, int se, int Al, int *EOBRUN)
{
int code, i=ss, j, sign, val, run;
int last = FFMIN(se, *last_nnz);
OPEN_READER(re, &s->gb);
if(*EOBRUN)
(*EOBRUN)--;
else {
for(;;i++) {
UPDATE_CACHE(re, &s->gb);
GET_VLC(code, re, &s->gb, s->vlcs[1][ac_index].table, 9, 2)
/* Progressive JPEG use AC coeffs from zero and this decoder sets offset 16 by default */
code -= 16;
if(code & 0xF) {
run = ((unsigned) code) >> 4;
UPDATE_CACHE(re, &s->gb);
val = SHOW_UBITS(re, &s->gb, 1);
LAST_SKIP_BITS(re, &s->gb, 1);
ZERO_RUN;
j = s->scantable.permutated[i];
val--;
block[j] = ((quant_matrix[j]^val)-val) << Al;
if(i == se) {
if(i > *last_nnz)
*last_nnz = i;
CLOSE_READER(re, &s->gb)
return 0;
}
}else{
run = ((unsigned) code) >> 4;
if(run == 0xF){
ZERO_RUN;
}else{
val = run;
run = (1 << run);
if(val) {
UPDATE_CACHE(re, &s->gb);
run += SHOW_UBITS(re, &s->gb, val);
LAST_SKIP_BITS(re, &s->gb, val);
}
*EOBRUN = run - 1;
break;
}
}
}
if(i > *last_nnz)
*last_nnz = i;
}
for(;i<=last;i++) {
j = s->scantable.permutated[i];
if(block[j])
REFINE_BIT(j)
}
CLOSE_READER(re, &s->gb);
return 0;
}
| false | FFmpeg | 5675a11f9277b5c7b1c9ad45da893e9ef9a42f03 |
615 | static void destroy_buffers(SANMVideoContext *ctx)
{
av_freep(&ctx->frm0);
av_freep(&ctx->frm1);
av_freep(&ctx->frm2);
av_freep(&ctx->stored_frame);
av_freep(&ctx->rle_buf);
ctx->frm0_size =
ctx->frm1_size =
ctx->frm2_size = 0;
} | true | FFmpeg | 39bbdebb1ed8eb9c9b0cd6db85afde6ba89d86e4 |
616 | void cpu_state_reset(CPUMIPSState *env)
{
MIPSCPU *cpu = mips_env_get_cpu(env);
CPUState *cs = CPU(cpu);
/* Reset registers to their default values */
env->CP0_PRid = env->cpu_model->CP0_PRid;
env->CP0_Config0 = env->cpu_model->CP0_Config0;
#ifdef TARGET_WORDS_BIGENDIAN
env->CP0_Config0 |= (1 << CP0C0_BE);
#endif
env->CP0_Config1 = env->cpu_model->CP0_Config1;
env->CP0_Config2 = env->cpu_model->CP0_Config2;
env->CP0_Config3 = env->cpu_model->CP0_Config3;
env->CP0_Config4 = env->cpu_model->CP0_Config4;
env->CP0_Config4_rw_bitmask = env->cpu_model->CP0_Config4_rw_bitmask;
env->CP0_Config5 = env->cpu_model->CP0_Config5;
env->CP0_Config5_rw_bitmask = env->cpu_model->CP0_Config5_rw_bitmask;
env->CP0_Config6 = env->cpu_model->CP0_Config6;
env->CP0_Config7 = env->cpu_model->CP0_Config7;
env->CP0_LLAddr_rw_bitmask = env->cpu_model->CP0_LLAddr_rw_bitmask
<< env->cpu_model->CP0_LLAddr_shift;
env->CP0_LLAddr_shift = env->cpu_model->CP0_LLAddr_shift;
env->SYNCI_Step = env->cpu_model->SYNCI_Step;
env->CCRes = env->cpu_model->CCRes;
env->CP0_Status_rw_bitmask = env->cpu_model->CP0_Status_rw_bitmask;
env->CP0_TCStatus_rw_bitmask = env->cpu_model->CP0_TCStatus_rw_bitmask;
env->CP0_SRSCtl = env->cpu_model->CP0_SRSCtl;
env->current_tc = 0;
env->SEGBITS = env->cpu_model->SEGBITS;
env->SEGMask = (target_ulong)((1ULL << env->cpu_model->SEGBITS) - 1);
#if defined(TARGET_MIPS64)
if (env->cpu_model->insn_flags & ISA_MIPS3) {
env->SEGMask |= 3ULL << 62;
}
#endif
env->PABITS = env->cpu_model->PABITS;
env->CP0_SRSConf0_rw_bitmask = env->cpu_model->CP0_SRSConf0_rw_bitmask;
env->CP0_SRSConf0 = env->cpu_model->CP0_SRSConf0;
env->CP0_SRSConf1_rw_bitmask = env->cpu_model->CP0_SRSConf1_rw_bitmask;
env->CP0_SRSConf1 = env->cpu_model->CP0_SRSConf1;
env->CP0_SRSConf2_rw_bitmask = env->cpu_model->CP0_SRSConf2_rw_bitmask;
env->CP0_SRSConf2 = env->cpu_model->CP0_SRSConf2;
env->CP0_SRSConf3_rw_bitmask = env->cpu_model->CP0_SRSConf3_rw_bitmask;
env->CP0_SRSConf3 = env->cpu_model->CP0_SRSConf3;
env->CP0_SRSConf4_rw_bitmask = env->cpu_model->CP0_SRSConf4_rw_bitmask;
env->CP0_SRSConf4 = env->cpu_model->CP0_SRSConf4;
env->CP0_PageGrain_rw_bitmask = env->cpu_model->CP0_PageGrain_rw_bitmask;
env->CP0_PageGrain = env->cpu_model->CP0_PageGrain;
env->CP0_EBaseWG_rw_bitmask = env->cpu_model->CP0_EBaseWG_rw_bitmask;
env->active_fpu.fcr0 = env->cpu_model->CP1_fcr0;
env->active_fpu.fcr31_rw_bitmask = env->cpu_model->CP1_fcr31_rw_bitmask;
env->active_fpu.fcr31 = env->cpu_model->CP1_fcr31;
env->msair = env->cpu_model->MSAIR;
env->insn_flags = env->cpu_model->insn_flags;
#if defined(CONFIG_USER_ONLY)
env->CP0_Status = (MIPS_HFLAG_UM << CP0St_KSU);
# ifdef TARGET_MIPS64
/* Enable 64-bit register mode. */
env->CP0_Status |= (1 << CP0St_PX);
# endif
# ifdef TARGET_ABI_MIPSN64
/* Enable 64-bit address mode. */
env->CP0_Status |= (1 << CP0St_UX);
# endif
/* Enable access to the CPUNum, SYNCI_Step, CC, and CCRes RDHWR
hardware registers. */
env->CP0_HWREna |= 0x0000000F;
if (env->CP0_Config1 & (1 << CP0C1_FP)) {
env->CP0_Status |= (1 << CP0St_CU1);
}
if (env->CP0_Config3 & (1 << CP0C3_DSPP)) {
env->CP0_Status |= (1 << CP0St_MX);
}
# if defined(TARGET_MIPS64)
/* For MIPS64, init FR bit to 1 if FPU unit is there and bit is writable. */
if ((env->CP0_Config1 & (1 << CP0C1_FP)) &&
(env->CP0_Status_rw_bitmask & (1 << CP0St_FR))) {
env->CP0_Status |= (1 << CP0St_FR);
}
# endif
#else
if (env->hflags & MIPS_HFLAG_BMASK) {
/* If the exception was raised from a delay slot,
come back to the jump. */
env->CP0_ErrorEPC = (env->active_tc.PC
- (env->hflags & MIPS_HFLAG_B16 ? 2 : 4));
} else {
env->CP0_ErrorEPC = env->active_tc.PC;
}
env->active_tc.PC = env->exception_base;
env->CP0_Random = env->tlb->nb_tlb - 1;
env->tlb->tlb_in_use = env->tlb->nb_tlb;
env->CP0_Wired = 0;
env->CP0_GlobalNumber = (cs->cpu_index & 0xFF) << CP0GN_VPId;
env->CP0_EBase = (cs->cpu_index & 0x3FF);
if (kvm_enabled()) {
env->CP0_EBase |= 0x40000000;
} else {
env->CP0_EBase |= (int32_t)0x80000000;
}
if (env->CP0_Config3 & (1 << CP0C3_CMGCR)) {
env->CP0_CMGCRBase = 0x1fbf8000 >> 4;
}
env->CP0_EntryHi_ASID_mask = (env->CP0_Config4 & (1 << CP0C4_AE)) ?
0x3ff : 0xff;
env->CP0_Status = (1 << CP0St_BEV) | (1 << CP0St_ERL);
/* vectored interrupts not implemented, timer on int 7,
no performance counters. */
env->CP0_IntCtl = 0xe0000000;
{
int i;
for (i = 0; i < 7; i++) {
env->CP0_WatchLo[i] = 0;
env->CP0_WatchHi[i] = 0x80000000;
}
env->CP0_WatchLo[7] = 0;
env->CP0_WatchHi[7] = 0;
}
/* Count register increments in debug mode, EJTAG version 1 */
env->CP0_Debug = (1 << CP0DB_CNT) | (0x1 << CP0DB_VER);
cpu_mips_store_count(env, 1);
if (env->CP0_Config3 & (1 << CP0C3_MT)) {
int i;
/* Only TC0 on VPE 0 starts as active. */
for (i = 0; i < ARRAY_SIZE(env->tcs); i++) {
env->tcs[i].CP0_TCBind = cs->cpu_index << CP0TCBd_CurVPE;
env->tcs[i].CP0_TCHalt = 1;
}
env->active_tc.CP0_TCHalt = 1;
cs->halted = 1;
if (cs->cpu_index == 0) {
/* VPE0 starts up enabled. */
env->mvp->CP0_MVPControl |= (1 << CP0MVPCo_EVP);
env->CP0_VPEConf0 |= (1 << CP0VPEC0_MVP) | (1 << CP0VPEC0_VPA);
/* TC0 starts up unhalted. */
cs->halted = 0;
env->active_tc.CP0_TCHalt = 0;
env->tcs[0].CP0_TCHalt = 0;
/* With thread 0 active. */
env->active_tc.CP0_TCStatus = (1 << CP0TCSt_A);
env->tcs[0].CP0_TCStatus = (1 << CP0TCSt_A);
}
}
#endif
if ((env->insn_flags & ISA_MIPS32R6) &&
(env->active_fpu.fcr0 & (1 << FCR0_F64))) {
/* Status.FR = 0 mode in 64-bit FPU not allowed in R6 */
env->CP0_Status |= (1 << CP0St_FR);
}
/* MSA */
if (env->CP0_Config3 & (1 << CP0C3_MSAP)) {
msa_reset(env);
}
compute_hflags(env);
restore_fp_status(env);
restore_pamask(env);
cs->exception_index = EXCP_NONE;
if (semihosting_get_argc()) {
/* UHI interface can be used to obtain argc and argv */
env->active_tc.gpr[4] = -1;
}
} | true | qemu | cec56a733dd2c3fa81dbedbecf03922258747f7d |
617 | static void spapr_vio_quiesce_one(VIOsPAPRDevice *dev)
{
dev->flags &= ~VIO_PAPR_FLAG_DMA_BYPASS;
if (dev->rtce_table) {
size_t size = (dev->rtce_window_size >> SPAPR_VIO_TCE_PAGE_SHIFT)
* sizeof(VIOsPAPR_RTCE);
memset(dev->rtce_table, 0, size);
}
dev->crq.qladdr = 0;
dev->crq.qsize = 0;
dev->crq.qnext = 0;
}
| true | qemu | ad0ebb91cd8b5fdc4a583b03645677771f420a46 |
618 | int av_image_alloc(uint8_t *pointers[4], int linesizes[4],
int w, int h, enum AVPixelFormat pix_fmt, int align)
{
const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(pix_fmt);
int i, ret;
uint8_t *buf;
if (!desc)
return AVERROR(EINVAL);
if ((ret = av_image_check_size(w, h, 0, NULL)) < 0)
return ret;
if ((ret = av_image_fill_linesizes(linesizes, pix_fmt, align>7 ? FFALIGN(w, 8) : w)) < 0)
return ret;
for (i = 0; i < 4; i++)
linesizes[i] = FFALIGN(linesizes[i], align);
if ((ret = av_image_fill_pointers(pointers, pix_fmt, h, NULL, linesizes)) < 0)
return ret;
buf = av_malloc(ret + align);
if (!buf)
return AVERROR(ENOMEM);
if ((ret = av_image_fill_pointers(pointers, pix_fmt, h, buf, linesizes)) < 0) {
av_free(buf);
return ret;
if (desc->flags & AV_PIX_FMT_FLAG_PAL || desc->flags & AV_PIX_FMT_FLAG_PSEUDOPAL)
avpriv_set_systematic_pal2((uint32_t*)pointers[1], pix_fmt);
return ret; | true | FFmpeg | 51f64552853e16d72644308db53abee870aecfb9 |