problem stringlengths 26 131k | labels class label 2 classes |
|---|---|
Rest api - update single field of resource : <p>Lets say I have rest endpoint for my Driver resource.
I have PUT method like this</p>
<pre><code>myapi/drivers/{id}
{body of put method}
</code></pre>
<p>I need to add functionality which will allow to 'enable' and 'disable' driver</p>
<p>Is it good idea to create new endpoint for that like this?</p>
<pre><code>PUT myapi/drivers/{id}/enable/false
</code></pre>
<p>or it is better to use existing endpoint ? One problem with using existing endpoint is that driver has lot's of fields(almost 30) and sending all those fields just for updating only 'enabled' or 'disable' driver is something overkill.</p>
<p>What do you think?</p>
| 0debug |
Mysql with ajax : <p>Here is my code that I'm using.</p>
<p><strong>ajax</strong></p>
<pre><code>$.ajax({
type: "GET",
url: "process.php",
dataType: "html",
success: function(data){
$(".content").html(data);
}
});
$("#submit").click(function(){
$.ajax({
type: "POST",
url: "post.php",
dataType: "html",
success: function(data1){
$(".content").html(data1);
}
});
});
</code></pre>
<p><strong>process.php</strong></p>
<pre><code><?php
$db = mysqli_connect("localhost","root","","webcap");
$query = mysqli_query($db,"SELECT * FROM test");
while($row = mysqli_fetch_assoc($query)){
echo $row['num'];
}
</code></pre>
<p><strong>post.php</strong></p>
<pre><code><?php
$db = mysqli_connect("localhost","root","","webcap");
$num_query = mysqli_query($db,"SELECT * FROM test");
$num_row = mysqli_fetch_assoc($num_query);
$number = $num_row['num'];
$new_number = $number + 1;
$query2 = mysqli_query($db,"UPDATE test SET num='$new_number'");
echo $new_number;
</code></pre>
<p>The idea is when i click button it add plus one to my database and i can see it without refreshing but others don't.</p>
<p>I want that everyone in my website see it in real time without refreshing</p>
| 0debug |
static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
AVPacket *avpkt)
{
const uint8_t *buf = avpkt->data;
int buf_size = avpkt->size;
UtvideoContext *c = avctx->priv_data;
int i, j;
const uint8_t *plane_start[5];
int plane_size, max_slice_size = 0, slice_start, slice_end, slice_size;
int ret;
GetByteContext gb;
ThreadFrame frame = { .f = data };
if ((ret = ff_thread_get_buffer(avctx, &frame, 0)) < 0)
return ret;
bytestream2_init(&gb, buf, buf_size);
for (i = 0; i < c->planes; i++) {
plane_start[i] = gb.buffer;
if (bytestream2_get_bytes_left(&gb) < 256 + 4 * c->slices) {
av_log(avctx, AV_LOG_ERROR, "Insufficient data for a plane\n");
return AVERROR_INVALIDDATA;
}
bytestream2_skipu(&gb, 256);
slice_start = 0;
slice_end = 0;
for (j = 0; j < c->slices; j++) {
slice_end = bytestream2_get_le32u(&gb);
slice_size = slice_end - slice_start;
if (slice_end < 0 || slice_size < 0 ||
bytestream2_get_bytes_left(&gb) < slice_end) {
av_log(avctx, AV_LOG_ERROR, "Incorrect slice size\n");
return AVERROR_INVALIDDATA;
}
slice_start = slice_end;
max_slice_size = FFMAX(max_slice_size, slice_size);
}
plane_size = slice_end;
bytestream2_skipu(&gb, plane_size);
}
plane_start[c->planes] = gb.buffer;
if (bytestream2_get_bytes_left(&gb) < c->frame_info_size) {
av_log(avctx, AV_LOG_ERROR, "Not enough data for frame information\n");
return AVERROR_INVALIDDATA;
}
c->frame_info = bytestream2_get_le32u(&gb);
av_log(avctx, AV_LOG_DEBUG, "frame information flags %"PRIX32"\n",
c->frame_info);
c->frame_pred = (c->frame_info >> 8) & 3;
if (c->frame_pred == PRED_GRADIENT) {
avpriv_request_sample(avctx, "Frame with gradient prediction");
return AVERROR_PATCHWELCOME;
}
av_fast_malloc(&c->slice_bits, &c->slice_bits_size,
max_slice_size + AV_INPUT_BUFFER_PADDING_SIZE);
if (!c->slice_bits) {
av_log(avctx, AV_LOG_ERROR, "Cannot allocate temporary buffer\n");
return AVERROR(ENOMEM);
}
switch (c->avctx->pix_fmt) {
case AV_PIX_FMT_RGB24:
case AV_PIX_FMT_RGBA:
for (i = 0; i < c->planes; i++) {
ret = decode_plane(c, i, frame.f->data[0] + ff_ut_rgb_order[i],
c->planes, frame.f->linesize[0], avctx->width,
avctx->height, plane_start[i],
c->frame_pred == PRED_LEFT);
if (ret)
return ret;
if (c->frame_pred == PRED_MEDIAN) {
if (!c->interlaced) {
restore_median(frame.f->data[0] + ff_ut_rgb_order[i],
c->planes, frame.f->linesize[0], avctx->width,
avctx->height, c->slices, 0);
} else {
restore_median_il(frame.f->data[0] + ff_ut_rgb_order[i],
c->planes, frame.f->linesize[0],
avctx->width, avctx->height, c->slices,
0);
}
}
}
restore_rgb_planes(frame.f->data[0], c->planes, frame.f->linesize[0],
avctx->width, avctx->height);
break;
case AV_PIX_FMT_YUV420P:
for (i = 0; i < 3; i++) {
ret = decode_plane(c, i, frame.f->data[i], 1, frame.f->linesize[i],
avctx->width >> !!i, avctx->height >> !!i,
plane_start[i], c->frame_pred == PRED_LEFT);
if (ret)
return ret;
if (c->frame_pred == PRED_MEDIAN) {
if (!c->interlaced) {
restore_median(frame.f->data[i], 1, frame.f->linesize[i],
avctx->width >> !!i, avctx->height >> !!i,
c->slices, !i);
} else {
restore_median_il(frame.f->data[i], 1, frame.f->linesize[i],
avctx->width >> !!i,
avctx->height >> !!i,
c->slices, !i);
}
}
}
break;
case AV_PIX_FMT_YUV422P:
for (i = 0; i < 3; i++) {
ret = decode_plane(c, i, frame.f->data[i], 1, frame.f->linesize[i],
avctx->width >> !!i, avctx->height,
plane_start[i], c->frame_pred == PRED_LEFT);
if (ret)
return ret;
if (c->frame_pred == PRED_MEDIAN) {
if (!c->interlaced) {
restore_median(frame.f->data[i], 1, frame.f->linesize[i],
avctx->width >> !!i, avctx->height,
c->slices, 0);
} else {
restore_median_il(frame.f->data[i], 1, frame.f->linesize[i],
avctx->width >> !!i, avctx->height,
c->slices, 0);
}
}
}
break;
}
frame.f->key_frame = 1;
frame.f->pict_type = AV_PICTURE_TYPE_I;
frame.f->interlaced_frame = !!c->interlaced;
*got_frame = 1;
return buf_size;
}
| 1threat |
static void parse_context_init(SchroParseUnitContext *parse_ctx,
const uint8_t *buf, int buf_size)
{
parse_ctx->buf = buf;
parse_ctx->buf_size = buf_size;
}
| 1threat |
static int colo_packet_compare_icmp(Packet *spkt, Packet *ppkt)
{
int network_header_length = ppkt->ip->ip_hl * 4;
trace_colo_compare_main("compare icmp");
if (colo_packet_compare_common(ppkt, spkt,
network_header_length + ETH_HLEN)) {
trace_colo_compare_icmp_miscompare("primary pkt size",
ppkt->size);
trace_colo_compare_icmp_miscompare("Secondary pkt size",
spkt->size);
if (trace_event_get_state(TRACE_COLO_COMPARE_MISCOMPARE)) {
qemu_hexdump((char *)ppkt->data, stderr, "colo-compare pri pkt",
ppkt->size);
qemu_hexdump((char *)spkt->data, stderr, "colo-compare sec pkt",
spkt->size);
}
return -1;
} else {
return 0;
}
}
| 1threat |
multiple command in postStart hook of a container : <p>in a kubernetes Deployment yaml file is there a simple way to run multiple commands in the postStart hook of a container?</p>
<p>I'm trying to do something like this:</p>
<pre><code>lifecycle:
postStart:
exec:
command: ["/bin/cp", "/webapps/myapp.war", "/apps/"]
command: ["/bin/mkdir", "-p", "/conf/myapp"]
command: ["touch", "/conf/myapp/ready.txt"]
</code></pre>
<p>But it doesn't work.
(looks like only the last command is executed)</p>
<p>I know I could embed a script in the container image and simply call it there... But I would like to be able to customize those commands in the yaml file without touching the container image.</p>
<p>thanks</p>
| 0debug |
PYTHON: Find duplicate elements in the first index of each item and removing items based on the second index : <p>I have a 2D array storing a name and score for different people. The program should sort through the array, and if there are multiple scores per person, it should remove them all except the highest one.</p>
<pre><code>scores = [["Alexander", 7], ["Lucy", 4], ["Kieran", 5], ["Alexander", 4]]
</code></pre>
<p>Should remove ["Alexander", 4] as there are two items in the array with name "Alexander" and 4 is the lowest score out of them both.</p>
| 0debug |
How do I extract a type from an array in typescript? : <p>Is there a way to declare a type in typescript that 'extracts' the inner type of an array?</p>
<p>Example: </p>
<p>Let's say I already have something like this in my codebase:</p>
<pre class="lang-typescript prettyprint-override"><code>export interface Cache {
events: Event[],
users: User[]
}
type CacheType = Event[] | User[];
//or maybe:
// type TypeOfProperty = T[keyof T];
// type CacheType = TypeOfProperty<Cache>;
</code></pre>
<p>What I want is something which would be equivalent to this: </p>
<pre class="lang-typescript prettyprint-override"><code>type InnerCacheType = Event | User;
</code></pre>
<p>But without manually retyping it every time I add something to <code>Cache</code> or <code>CacheType</code></p>
<p>Is this possible in Typescript?</p>
| 0debug |
uint32_t helper_efdctuf (uint64_t val)
{
CPU_DoubleU u;
float64 tmp;
u.ll = val;
if (unlikely(float64_is_nan(u.d)))
return 0;
tmp = uint64_to_float64(1ULL << 32, &env->vec_status);
u.d = float64_mul(u.d, tmp, &env->vec_status);
return float64_to_uint32(u.d, &env->vec_status);
}
| 1threat |
Explanation of different conda channels : <p>What are the major conda channels, and what are their focuses? I can't seem to find any documentation on what major channels are available and when to choose one over the other. What is the relationship to the "default" channel? How does one decide what order to put them in? In general, I use</p>
<ul>
<li><code>anaconda</code></li>
<li><code>conda_forge</code></li>
<li><code>r</code></li>
<li><code>bioconda</code></li>
<li><code>defaults</code></li>
</ul>
<p>But I've been running into some problems with my environment breaking. </p>
| 0debug |
Is there any plugin or way to upload file to server using flutter web? : <p>I want to upload image to the server from flutter web application. Is there any better way of doing that.</p>
<p>I've already tried with couple of plugins.
image-picker, file-picker
But none of them are supported for flutter web.</p>
| 0debug |
What classes in java are overriding equals() and hashCode() methods? : <p>I know that String class has it own implementation for equals method?</p>
<p>Does it have its own implementation for hashCode() method as well?</p>
<p>Does Integer and other wrapper classes in java have their own implementation for hashCode and equals()?</p>
| 0debug |
launchFragmentInContainer unable to resolve Activity in Android : <p>While writing a simple test which uses <code>launchFragmentInContainer</code>, I get the following error message:</p>
<pre><code>java.lang.RuntimeException: Unable to resolve activity for: Intent { act=android.intent.action.MAIN cat=[android.intent.category.LAUNCHER] cmp=com.myapp.appname.debug/androidx.fragment.app.testing.FragmentScenario$EmptyFragmentActivity (has extras) }
</code></pre>
<p>The basic test class is:</p>
<pre><code>class OneFragmentTest {
@Test
fun testOneFragmentState_checkTitleText() {
val args = Bundle().apply {
putString("dummyKey", "dummyValue")
}
launchFragmentInContainer<OneFragment>(args)
onView(withId(R.id.tv_title)).check(matches(withText("title here")))
}
}
</code></pre>
<p>I have tried to update <code>AndroidManifest.xml</code> with the following:</p>
<pre class="lang-xml prettyprint-override"><code><instrumentation
android:name="android.test.InstrumentationTestRunner"
android:targetPackage="com.myapp.appname" />
</code></pre>
<p>but it seems that the tag <code>instrumentation</code> is valid but the values are written in red, so I assume something is wrong with the <code>targetPackage</code> and <code>name</code>.</p>
<p>How can I get rid of this error and run a simple test on OneFragment using <code>launchFragmentInContainer</code>? </p>
| 0debug |
static int vmdk_open_desc_file(BlockDriverState *bs, int flags,
int64_t desc_offset)
{
int ret;
char buf[2048];
char ct[128];
BDRVVmdkState *s = bs->opaque;
ret = bdrv_pread(bs->file, desc_offset, buf, sizeof(buf));
if (ret < 0) {
return ret;
}
buf[2047] = '\0';
if (vmdk_parse_description(buf, "createType", ct, sizeof(ct))) {
return -EINVAL;
}
if (strcmp(ct, "monolithicFlat") &&
strcmp(ct, "twoGbMaxExtentSparse") &&
strcmp(ct, "twoGbMaxExtentFlat")) {
fprintf(stderr,
"VMDK: Not supported image type \"%s\""".\n", ct);
return -ENOTSUP;
}
s->desc_offset = 0;
ret = vmdk_parse_extents(buf, bs, bs->file->filename);
if (ret) {
return ret;
}
if (vmdk_parent_open(bs)) {
g_free(s->extents);
return -EINVAL;
}
s->parent_cid = vmdk_read_cid(bs, 1);
return 0;
}
| 1threat |
TimescaleDB: Is it possible to call 'create_hypertable' from Python? : <p>I want to create TimescaleDB tables in Postgres on the fly as I'm dealing with data sources that change (financial feeds, so could be 100, could be 1000) over time and I want one table per data source.</p>
<p>I can create the tables no problem from Python, but when I call <code>SELECT create_hypertable(test_table1, time)</code> it throws an error. The same query seems to work fine when executed from pSQL of course, so it looks like the timescale API isn't available via psycopg2 perhaps?</p>
<h2>Environment:</h2>
<ul>
<li>Python 3.6.4</li>
<li>psycopg2-binary-2.7.4 (installed with the flag: --no-binary :all:)</li>
<li>Postgres: 10.3</li>
<li>Timescaledb: 0.8.0-2</li>
<li>MacOS: 10.13.3</li>
</ul>
<h2>Test Code:</h2>
<pre><code>db.query("CREATE TABLE test_table1 (time TIMESTAMP NOT NULL, name CHAR(100) NOT NULL")
db.query("SELECT create_hypertable('test_table1', 'time')")
</code></pre>
<h2>Error:</h2>
<blockquote>
<p>2018-03-05 11:45:36,901 [MainThread ] [ERROR] function create_hypertable(unknown, unknown) does not exist<br/>
LINE 1: SELECT create_hypertable('temp_table1', 'time')<br/>
. . . . . . . . . . . . . . ^<br/>
HINT: No function matches the given name and argument types. You might need to add explicit type casts.</p>
</blockquote>
<p>Does anyone know if there currently anyway to making this work? Have I missed something simple? Or is there another service that can replace timescale functionality that supports being dynamically created?</p>
| 0debug |
Arrange inputs into euqal length : I have multiple rows with inputs
<input type="text" style="width: 50px;" >
<br/>
<input type="text" style="width: 25px;" >
<input type="text" style="width: 25px;" >
Unfortunately the `25+25 != 50` because there is a space between, which varies by each browser.
[![enter image description here][1]][1]
Question: How to style the inputs to get `2x25` has equal lenth with `1x50`
[1]: https://i.stack.imgur.com/fhP6o.png | 0debug |
How can I wrap children elements inside a div with pure javascript? : <p>For instance, I have the following code</p>
<pre><code><div class="parent>
<p> Item 1 </p>
<p> Item 1 </p>
<p> Item 1 </p>
</div>
</code></pre>
<p>I want turn the code above into the following code with pure javascript</p>
<pre><code><div class="parent>
<div class="parent-child>
<p> Item 1 </p>
<p> Item 1 </p>
<p> Item 1 </p>
</div>
</div>
</code></pre>
| 0debug |
static int transcode_init(void)
{
int ret = 0, i, j, k;
AVFormatContext *oc;
OutputStream *ost;
InputStream *ist;
char error[1024] = {0};
int want_sdp = 1;
for (i = 0; i < nb_filtergraphs; i++) {
FilterGraph *fg = filtergraphs[i];
for (j = 0; j < fg->nb_outputs; j++) {
OutputFilter *ofilter = fg->outputs[j];
if (!ofilter->ost || ofilter->ost->source_index >= 0)
continue;
if (fg->nb_inputs != 1)
continue;
for (k = nb_input_streams-1; k >= 0 ; k--)
if (fg->inputs[0]->ist == input_streams[k])
break;
ofilter->ost->source_index = k;
}
}
for (i = 0; i < nb_input_files; i++) {
InputFile *ifile = input_files[i];
if (ifile->rate_emu)
for (j = 0; j < ifile->nb_streams; j++)
input_streams[j + ifile->ist_index]->start = av_gettime_relative();
}
for (i = 0; i < nb_output_files; i++) {
oc = output_files[i]->ctx;
if (!oc->nb_streams && !(oc->oformat->flags & AVFMT_NOSTREAMS)) {
av_dump_format(oc, i, oc->filename, 1);
av_log(NULL, AV_LOG_ERROR, "Output file #%d does not contain any stream\n", i);
return AVERROR(EINVAL);
}
}
for (i = 0; i < nb_filtergraphs; i++)
if ((ret = avfilter_graph_config(filtergraphs[i]->graph, NULL)) < 0)
return ret;
for (i = 0; i < nb_output_streams; i++) {
AVCodecContext *enc_ctx;
AVCodecContext *dec_ctx = NULL;
ost = output_streams[i];
oc = output_files[ost->file_index]->ctx;
ist = get_input_stream(ost);
if (ost->attachment_filename)
continue;
enc_ctx = ost->stream_copy ? ost->st->codec : ost->enc_ctx;
if (ist) {
dec_ctx = ist->dec_ctx;
ost->st->disposition = ist->st->disposition;
enc_ctx->bits_per_raw_sample = dec_ctx->bits_per_raw_sample;
enc_ctx->chroma_sample_location = dec_ctx->chroma_sample_location;
} else {
for (j=0; j<oc->nb_streams; j++) {
AVStream *st = oc->streams[j];
if (st != ost->st && st->codec->codec_type == enc_ctx->codec_type)
break;
}
if (j == oc->nb_streams)
if (enc_ctx->codec_type == AVMEDIA_TYPE_AUDIO || enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO)
ost->st->disposition = AV_DISPOSITION_DEFAULT;
}
if (ost->stream_copy) {
AVRational sar;
uint64_t extra_size;
av_assert0(ist && !ost->filter);
extra_size = (uint64_t)dec_ctx->extradata_size + FF_INPUT_BUFFER_PADDING_SIZE;
if (extra_size > INT_MAX) {
return AVERROR(EINVAL);
}
enc_ctx->codec_id = dec_ctx->codec_id;
enc_ctx->codec_type = dec_ctx->codec_type;
if (!enc_ctx->codec_tag) {
unsigned int codec_tag;
if (!oc->oformat->codec_tag ||
av_codec_get_id (oc->oformat->codec_tag, dec_ctx->codec_tag) == enc_ctx->codec_id ||
!av_codec_get_tag2(oc->oformat->codec_tag, dec_ctx->codec_id, &codec_tag))
enc_ctx->codec_tag = dec_ctx->codec_tag;
}
enc_ctx->bit_rate = dec_ctx->bit_rate;
enc_ctx->rc_max_rate = dec_ctx->rc_max_rate;
enc_ctx->rc_buffer_size = dec_ctx->rc_buffer_size;
enc_ctx->field_order = dec_ctx->field_order;
if (dec_ctx->extradata_size) {
enc_ctx->extradata = av_mallocz(extra_size);
if (!enc_ctx->extradata) {
return AVERROR(ENOMEM);
}
memcpy(enc_ctx->extradata, dec_ctx->extradata, dec_ctx->extradata_size);
}
enc_ctx->extradata_size= dec_ctx->extradata_size;
enc_ctx->bits_per_coded_sample = dec_ctx->bits_per_coded_sample;
enc_ctx->time_base = ist->st->time_base;
if(!strcmp(oc->oformat->name, "avi")) {
if ( copy_tb<0 && av_q2d(ist->st->r_frame_rate) >= av_q2d(ist->st->avg_frame_rate)
&& 0.5/av_q2d(ist->st->r_frame_rate) > av_q2d(ist->st->time_base)
&& 0.5/av_q2d(ist->st->r_frame_rate) > av_q2d(dec_ctx->time_base)
&& av_q2d(ist->st->time_base) < 1.0/500 && av_q2d(dec_ctx->time_base) < 1.0/500
|| copy_tb==2){
enc_ctx->time_base.num = ist->st->r_frame_rate.den;
enc_ctx->time_base.den = 2*ist->st->r_frame_rate.num;
enc_ctx->ticks_per_frame = 2;
} else if ( copy_tb<0 && av_q2d(dec_ctx->time_base)*dec_ctx->ticks_per_frame > 2*av_q2d(ist->st->time_base)
&& av_q2d(ist->st->time_base) < 1.0/500
|| copy_tb==0){
enc_ctx->time_base = dec_ctx->time_base;
enc_ctx->time_base.num *= dec_ctx->ticks_per_frame;
enc_ctx->time_base.den *= 2;
enc_ctx->ticks_per_frame = 2;
}
} else if(!(oc->oformat->flags & AVFMT_VARIABLE_FPS)
&& strcmp(oc->oformat->name, "mov") && strcmp(oc->oformat->name, "mp4") && strcmp(oc->oformat->name, "3gp")
&& strcmp(oc->oformat->name, "3g2") && strcmp(oc->oformat->name, "psp") && strcmp(oc->oformat->name, "ipod")
&& strcmp(oc->oformat->name, "f4v")
) {
if( copy_tb<0 && dec_ctx->time_base.den
&& av_q2d(dec_ctx->time_base)*dec_ctx->ticks_per_frame > av_q2d(ist->st->time_base)
&& av_q2d(ist->st->time_base) < 1.0/500
|| copy_tb==0){
enc_ctx->time_base = dec_ctx->time_base;
enc_ctx->time_base.num *= dec_ctx->ticks_per_frame;
}
}
if ( enc_ctx->codec_tag == AV_RL32("tmcd")
&& dec_ctx->time_base.num < dec_ctx->time_base.den
&& dec_ctx->time_base.num > 0
&& 121LL*dec_ctx->time_base.num > dec_ctx->time_base.den) {
enc_ctx->time_base = dec_ctx->time_base;
}
if (ist && !ost->frame_rate.num)
ost->frame_rate = ist->framerate;
if(ost->frame_rate.num)
enc_ctx->time_base = av_inv_q(ost->frame_rate);
av_reduce(&enc_ctx->time_base.num, &enc_ctx->time_base.den,
enc_ctx->time_base.num, enc_ctx->time_base.den, INT_MAX);
if (ist->st->nb_side_data) {
ost->st->side_data = av_realloc_array(NULL, ist->st->nb_side_data,
sizeof(*ist->st->side_data));
if (!ost->st->side_data)
return AVERROR(ENOMEM);
for (j = 0; j < ist->st->nb_side_data; j++) {
const AVPacketSideData *sd_src = &ist->st->side_data[j];
AVPacketSideData *sd_dst = &ost->st->side_data[j];
sd_dst->data = av_malloc(sd_src->size);
if (!sd_dst->data)
return AVERROR(ENOMEM);
memcpy(sd_dst->data, sd_src->data, sd_src->size);
sd_dst->size = sd_src->size;
sd_dst->type = sd_src->type;
ost->st->nb_side_data++;
}
}
ost->parser = av_parser_init(enc_ctx->codec_id);
switch (enc_ctx->codec_type) {
case AVMEDIA_TYPE_AUDIO:
if (audio_volume != 256) {
av_log(NULL, AV_LOG_FATAL, "-acodec copy and -vol are incompatible (frames are not decoded)\n");
exit_program(1);
}
enc_ctx->channel_layout = dec_ctx->channel_layout;
enc_ctx->sample_rate = dec_ctx->sample_rate;
enc_ctx->channels = dec_ctx->channels;
enc_ctx->frame_size = dec_ctx->frame_size;
enc_ctx->audio_service_type = dec_ctx->audio_service_type;
enc_ctx->block_align = dec_ctx->block_align;
enc_ctx->initial_padding = dec_ctx->delay;
#if FF_API_AUDIOENC_DELAY
enc_ctx->delay = dec_ctx->delay;
#endif
if((enc_ctx->block_align == 1 || enc_ctx->block_align == 1152 || enc_ctx->block_align == 576) && enc_ctx->codec_id == AV_CODEC_ID_MP3)
enc_ctx->block_align= 0;
if(enc_ctx->codec_id == AV_CODEC_ID_AC3)
enc_ctx->block_align= 0;
break;
case AVMEDIA_TYPE_VIDEO:
enc_ctx->pix_fmt = dec_ctx->pix_fmt;
enc_ctx->width = dec_ctx->width;
enc_ctx->height = dec_ctx->height;
enc_ctx->has_b_frames = dec_ctx->has_b_frames;
if (ost->frame_aspect_ratio.num) {
sar =
av_mul_q(ost->frame_aspect_ratio,
(AVRational){ enc_ctx->height, enc_ctx->width });
av_log(NULL, AV_LOG_WARNING, "Overriding aspect ratio "
"with stream copy may produce invalid files\n");
}
else if (ist->st->sample_aspect_ratio.num)
sar = ist->st->sample_aspect_ratio;
else
sar = dec_ctx->sample_aspect_ratio;
ost->st->sample_aspect_ratio = enc_ctx->sample_aspect_ratio = sar;
ost->st->avg_frame_rate = ist->st->avg_frame_rate;
ost->st->r_frame_rate = ist->st->r_frame_rate;
break;
case AVMEDIA_TYPE_SUBTITLE:
enc_ctx->width = dec_ctx->width;
enc_ctx->height = dec_ctx->height;
break;
case AVMEDIA_TYPE_DATA:
case AVMEDIA_TYPE_ATTACHMENT:
break;
default:
abort();
}
} else {
if (!ost->enc)
ost->enc = avcodec_find_encoder(enc_ctx->codec_id);
if (!ost->enc) {
snprintf(error, sizeof(error), "Encoder (codec %s) not found for output stream #%d:%d",
avcodec_get_name(ost->st->codec->codec_id), ost->file_index, ost->index);
ret = AVERROR(EINVAL);
goto dump_format;
}
if (ist)
ist->decoding_needed |= DECODING_FOR_OST;
ost->encoding_needed = 1;
set_encoder_id(output_files[ost->file_index], ost);
if (!ost->filter &&
(enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
enc_ctx->codec_type == AVMEDIA_TYPE_AUDIO)) {
FilterGraph *fg;
fg = init_simple_filtergraph(ist, ost);
if (configure_filtergraph(fg)) {
av_log(NULL, AV_LOG_FATAL, "Error opening filters!\n");
exit_program(1);
}
}
if (enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO) {
if (ost->filter && !ost->frame_rate.num)
ost->frame_rate = av_buffersink_get_frame_rate(ost->filter->filter);
if (ist && !ost->frame_rate.num)
ost->frame_rate = ist->framerate;
if (ist && !ost->frame_rate.num)
ost->frame_rate = ist->st->r_frame_rate;
if (ist && !ost->frame_rate.num) {
ost->frame_rate = (AVRational){25, 1};
av_log(NULL, AV_LOG_WARNING,
"No information "
"about the input framerate is available. Falling "
"back to a default value of 25fps for output stream #%d:%d. Use the -r option "
"if you want a different framerate.\n",
ost->file_index, ost->index);
}
if (ost->enc && ost->enc->supported_framerates && !ost->force_fps) {
int idx = av_find_nearest_q_idx(ost->frame_rate, ost->enc->supported_framerates);
ost->frame_rate = ost->enc->supported_framerates[idx];
}
if (enc_ctx->codec_id == AV_CODEC_ID_MPEG4) {
av_reduce(&ost->frame_rate.num, &ost->frame_rate.den,
ost->frame_rate.num, ost->frame_rate.den, 65535);
}
}
switch (enc_ctx->codec_type) {
case AVMEDIA_TYPE_AUDIO:
enc_ctx->sample_fmt = ost->filter->filter->inputs[0]->format;
enc_ctx->sample_rate = ost->filter->filter->inputs[0]->sample_rate;
enc_ctx->channel_layout = ost->filter->filter->inputs[0]->channel_layout;
enc_ctx->channels = avfilter_link_get_channels(ost->filter->filter->inputs[0]);
enc_ctx->time_base = (AVRational){ 1, enc_ctx->sample_rate };
break;
case AVMEDIA_TYPE_VIDEO:
enc_ctx->time_base = av_inv_q(ost->frame_rate);
if (ost->filter && !(enc_ctx->time_base.num && enc_ctx->time_base.den))
enc_ctx->time_base = ost->filter->filter->inputs[0]->time_base;
if ( av_q2d(enc_ctx->time_base) < 0.001 && video_sync_method != VSYNC_PASSTHROUGH
&& (video_sync_method == VSYNC_CFR || video_sync_method == VSYNC_VSCFR || (video_sync_method == VSYNC_AUTO && !(oc->oformat->flags & AVFMT_VARIABLE_FPS)))){
av_log(oc, AV_LOG_WARNING, "Frame rate very high for a muxer not efficiently supporting it.\n"
"Please consider specifying a lower framerate, a different muxer or -vsync 2\n");
}
for (j = 0; j < ost->forced_kf_count; j++)
ost->forced_kf_pts[j] = av_rescale_q(ost->forced_kf_pts[j],
AV_TIME_BASE_Q,
enc_ctx->time_base);
enc_ctx->width = ost->filter->filter->inputs[0]->w;
enc_ctx->height = ost->filter->filter->inputs[0]->h;
enc_ctx->sample_aspect_ratio = ost->st->sample_aspect_ratio =
ost->frame_aspect_ratio.num ?
av_mul_q(ost->frame_aspect_ratio, (AVRational){ enc_ctx->height, enc_ctx->width }) :
ost->filter->filter->inputs[0]->sample_aspect_ratio;
if (!strncmp(ost->enc->name, "libx264", 7) &&
enc_ctx->pix_fmt == AV_PIX_FMT_NONE &&
ost->filter->filter->inputs[0]->format != AV_PIX_FMT_YUV420P)
av_log(NULL, AV_LOG_WARNING,
"No pixel format specified, %s for H.264 encoding chosen.\n"
"Use -pix_fmt yuv420p for compatibility with outdated media players.\n",
av_get_pix_fmt_name(ost->filter->filter->inputs[0]->format));
if (!strncmp(ost->enc->name, "mpeg2video", 10) &&
enc_ctx->pix_fmt == AV_PIX_FMT_NONE &&
ost->filter->filter->inputs[0]->format != AV_PIX_FMT_YUV420P)
av_log(NULL, AV_LOG_WARNING,
"No pixel format specified, %s for MPEG-2 encoding chosen.\n"
"Use -pix_fmt yuv420p for compatibility with outdated media players.\n",
av_get_pix_fmt_name(ost->filter->filter->inputs[0]->format));
enc_ctx->pix_fmt = ost->filter->filter->inputs[0]->format;
ost->st->avg_frame_rate = ost->frame_rate;
if (!dec_ctx ||
enc_ctx->width != dec_ctx->width ||
enc_ctx->height != dec_ctx->height ||
enc_ctx->pix_fmt != dec_ctx->pix_fmt) {
enc_ctx->bits_per_raw_sample = frame_bits_per_raw_sample;
}
if (ost->forced_keyframes) {
if (!strncmp(ost->forced_keyframes, "expr:", 5)) {
ret = av_expr_parse(&ost->forced_keyframes_pexpr, ost->forced_keyframes+5,
forced_keyframes_const_names, NULL, NULL, NULL, NULL, 0, NULL);
if (ret < 0) {
av_log(NULL, AV_LOG_ERROR,
"Invalid force_key_frames expression '%s'\n", ost->forced_keyframes+5);
return ret;
}
ost->forced_keyframes_expr_const_values[FKF_N] = 0;
ost->forced_keyframes_expr_const_values[FKF_N_FORCED] = 0;
ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_N] = NAN;
ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_T] = NAN;
} else {
parse_forced_key_frames(ost->forced_keyframes, ost, ost->enc_ctx);
}
}
break;
case AVMEDIA_TYPE_SUBTITLE:
enc_ctx->time_base = (AVRational){1, 1000};
if (!enc_ctx->width) {
enc_ctx->width = input_streams[ost->source_index]->st->codec->width;
enc_ctx->height = input_streams[ost->source_index]->st->codec->height;
}
break;
case AVMEDIA_TYPE_DATA:
break;
default:
abort();
break;
}
if (enc_ctx->flags & (CODEC_FLAG_PASS1 | CODEC_FLAG_PASS2)) {
char logfilename[1024];
FILE *f;
snprintf(logfilename, sizeof(logfilename), "%s-%d.log",
ost->logfile_prefix ? ost->logfile_prefix :
DEFAULT_PASS_LOGFILENAME_PREFIX,
i);
if (!strcmp(ost->enc->name, "libx264")) {
av_dict_set(&ost->encoder_opts, "stats", logfilename, AV_DICT_DONT_OVERWRITE);
} else {
if (enc_ctx->flags & CODEC_FLAG_PASS2) {
char *logbuffer;
size_t logbuffer_size;
if (cmdutils_read_file(logfilename, &logbuffer, &logbuffer_size) < 0) {
av_log(NULL, AV_LOG_FATAL, "Error reading log file '%s' for pass-2 encoding\n",
logfilename);
exit_program(1);
}
enc_ctx->stats_in = logbuffer;
}
if (enc_ctx->flags & CODEC_FLAG_PASS1) {
f = av_fopen_utf8(logfilename, "wb");
if (!f) {
av_log(NULL, AV_LOG_FATAL, "Cannot write log file '%s' for pass-1 encoding: %s\n",
logfilename, strerror(errno));
exit_program(1);
}
ost->logfile = f;
}
}
}
}
if (ost->disposition) {
static const AVOption opts[] = {
{ "disposition" , NULL, 0, AV_OPT_TYPE_FLAGS, { .i64 = 0 }, INT64_MIN, INT64_MAX, .unit = "flags" },
{ "default" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DEFAULT }, .unit = "flags" },
{ "dub" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DUB }, .unit = "flags" },
{ "original" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_ORIGINAL }, .unit = "flags" },
{ "comment" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_COMMENT }, .unit = "flags" },
{ "lyrics" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_LYRICS }, .unit = "flags" },
{ "karaoke" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_KARAOKE }, .unit = "flags" },
{ "forced" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_FORCED }, .unit = "flags" },
{ "hearing_impaired" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_HEARING_IMPAIRED }, .unit = "flags" },
{ "visual_impaired" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_VISUAL_IMPAIRED }, .unit = "flags" },
{ "clean_effects" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_CLEAN_EFFECTS }, .unit = "flags" },
{ "captions" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_CAPTIONS }, .unit = "flags" },
{ "descriptions" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DESCRIPTIONS }, .unit = "flags" },
{ "metadata" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_METADATA }, .unit = "flags" },
{ NULL },
};
static const AVClass class = {
.class_name = "",
.item_name = av_default_item_name,
.option = opts,
.version = LIBAVUTIL_VERSION_INT,
};
const AVClass *pclass = &class;
ret = av_opt_eval_flags(&pclass, &opts[0], ost->disposition, &ost->st->disposition);
if (ret < 0)
goto dump_format;
}
}
for (i = 0; i < nb_output_streams; i++) {
ost = output_streams[i];
if (ost->encoding_needed) {
AVCodec *codec = ost->enc;
AVCodecContext *dec = NULL;
if ((ist = get_input_stream(ost)))
dec = ist->dec_ctx;
if (dec && dec->subtitle_header) {
ost->enc_ctx->subtitle_header = av_mallocz(dec->subtitle_header_size + 1);
if (!ost->enc_ctx->subtitle_header) {
ret = AVERROR(ENOMEM);
goto dump_format;
}
memcpy(ost->enc_ctx->subtitle_header, dec->subtitle_header, dec->subtitle_header_size);
ost->enc_ctx->subtitle_header_size = dec->subtitle_header_size;
}
if (!av_dict_get(ost->encoder_opts, "threads", NULL, 0))
av_dict_set(&ost->encoder_opts, "threads", "auto", 0);
av_dict_set(&ost->encoder_opts, "side_data_only_packets", "1", 0);
if ((ret = avcodec_open2(ost->enc_ctx, codec, &ost->encoder_opts)) < 0) {
if (ret == AVERROR_EXPERIMENTAL)
abort_codec_experimental(codec, 1);
snprintf(error, sizeof(error), "Error while opening encoder for output stream #%d:%d - maybe incorrect parameters such as bit_rate, rate, width or height",
ost->file_index, ost->index);
goto dump_format;
}
if (ost->enc->type == AVMEDIA_TYPE_AUDIO &&
!(ost->enc->capabilities & CODEC_CAP_VARIABLE_FRAME_SIZE))
av_buffersink_set_frame_size(ost->filter->filter,
ost->enc_ctx->frame_size);
assert_avoptions(ost->encoder_opts);
if (ost->enc_ctx->bit_rate && ost->enc_ctx->bit_rate < 1000)
av_log(NULL, AV_LOG_WARNING, "The bitrate parameter is set too low."
" It takes bits/s as argument, not kbits/s\n");
ret = avcodec_copy_context(ost->st->codec, ost->enc_ctx);
if (ret < 0) {
av_log(NULL, AV_LOG_FATAL,
"Error initializing the output stream codec context.\n");
exit_program(1);
}
ost->st->time_base = av_add_q(ost->enc_ctx->time_base, (AVRational){0, 1});
ost->st->codec->codec= ost->enc_ctx->codec;
} else {
ret = av_opt_set_dict(ost->enc_ctx, &ost->encoder_opts);
if (ret < 0) {
av_log(NULL, AV_LOG_FATAL,
"Error setting up codec context options.\n");
return ret;
}
ost->st->time_base = av_add_q(ost->st->codec->time_base, (AVRational){0, 1});
}
}
for (i = 0; i < nb_input_streams; i++)
if ((ret = init_input_stream(i, error, sizeof(error))) < 0) {
for (i = 0; i < nb_output_streams; i++) {
ost = output_streams[i];
avcodec_close(ost->enc_ctx);
}
goto dump_format;
}
for (i = 0; i < nb_input_files; i++) {
InputFile *ifile = input_files[i];
for (j = 0; j < ifile->ctx->nb_programs; j++) {
AVProgram *p = ifile->ctx->programs[j];
int discard = AVDISCARD_ALL;
for (k = 0; k < p->nb_stream_indexes; k++)
if (!input_streams[ifile->ist_index + p->stream_index[k]]->discard) {
discard = AVDISCARD_DEFAULT;
break;
}
p->discard = discard;
}
}
for (i = 0; i < nb_output_files; i++) {
oc = output_files[i]->ctx;
oc->interrupt_callback = int_cb;
if ((ret = avformat_write_header(oc, &output_files[i]->opts)) < 0) {
snprintf(error, sizeof(error),
"Could not write header for output file #%d "
"(incorrect codec parameters ?): %s",
i, av_err2str(ret));
ret = AVERROR(EINVAL);
goto dump_format;
}
if (strcmp(oc->oformat->name, "rtp")) {
want_sdp = 0;
}
}
dump_format:
for (i = 0; i < nb_output_files; i++) {
av_dump_format(output_files[i]->ctx, i, output_files[i]->ctx->filename, 1);
}
av_log(NULL, AV_LOG_INFO, "Stream mapping:\n");
for (i = 0; i < nb_input_streams; i++) {
ist = input_streams[i];
for (j = 0; j < ist->nb_filters; j++) {
if (ist->filters[j]->graph->graph_desc) {
av_log(NULL, AV_LOG_INFO, " Stream #%d:%d (%s) -> %s",
ist->file_index, ist->st->index, ist->dec ? ist->dec->name : "?",
ist->filters[j]->name);
if (nb_filtergraphs > 1)
av_log(NULL, AV_LOG_INFO, " (graph %d)", ist->filters[j]->graph->index);
av_log(NULL, AV_LOG_INFO, "\n");
}
}
}
for (i = 0; i < nb_output_streams; i++) {
ost = output_streams[i];
if (ost->attachment_filename) {
av_log(NULL, AV_LOG_INFO, " File %s -> Stream #%d:%d\n",
ost->attachment_filename, ost->file_index, ost->index);
continue;
}
if (ost->filter && ost->filter->graph->graph_desc) {
av_log(NULL, AV_LOG_INFO, " %s", ost->filter->name);
if (nb_filtergraphs > 1)
av_log(NULL, AV_LOG_INFO, " (graph %d)", ost->filter->graph->index);
av_log(NULL, AV_LOG_INFO, " -> Stream #%d:%d (%s)\n", ost->file_index,
ost->index, ost->enc ? ost->enc->name : "?");
continue;
}
av_log(NULL, AV_LOG_INFO, " Stream #%d:%d -> #%d:%d",
input_streams[ost->source_index]->file_index,
input_streams[ost->source_index]->st->index,
ost->file_index,
ost->index);
if (ost->sync_ist != input_streams[ost->source_index])
av_log(NULL, AV_LOG_INFO, " [sync #%d:%d]",
ost->sync_ist->file_index,
ost->sync_ist->st->index);
if (ost->stream_copy)
av_log(NULL, AV_LOG_INFO, " (copy)");
else {
const AVCodec *in_codec = input_streams[ost->source_index]->dec;
const AVCodec *out_codec = ost->enc;
const char *decoder_name = "?";
const char *in_codec_name = "?";
const char *encoder_name = "?";
const char *out_codec_name = "?";
const AVCodecDescriptor *desc;
if (in_codec) {
decoder_name = in_codec->name;
desc = avcodec_descriptor_get(in_codec->id);
if (desc)
in_codec_name = desc->name;
if (!strcmp(decoder_name, in_codec_name))
decoder_name = "native";
}
if (out_codec) {
encoder_name = out_codec->name;
desc = avcodec_descriptor_get(out_codec->id);
if (desc)
out_codec_name = desc->name;
if (!strcmp(encoder_name, out_codec_name))
encoder_name = "native";
}
av_log(NULL, AV_LOG_INFO, " (%s (%s) -> %s (%s))",
in_codec_name, decoder_name,
out_codec_name, encoder_name);
}
av_log(NULL, AV_LOG_INFO, "\n");
}
if (ret) {
av_log(NULL, AV_LOG_ERROR, "%s\n", error);
return ret;
}
if (sdp_filename || want_sdp) {
print_sdp();
}
transcode_init_done = 1;
return 0;
}
| 1threat |
I have activity in android project but when activity start i have node in firebase it should be online but its not working? : if i minimize my app and run that activity it works but when i come to that activity from another activity its not working
in my onCreate like below
auth = FirebaseAuth.getInstance();
currentUser = auth.getCurrentUser();
db = FirebaseDatabase.getInstance();
userRef = db.getReference().child("Users").child(auth.getCurrentUser().getUid());
if(currentUser == null) {
sendToStart();
} else {
userRef.child("online").setValue("true");
}
I have the same thing in my onStart method too but not working
can you please help me
| 0debug |
( laravel - Php ) i want a code that shows total users in database : <p>in user panel i want to show total registerd users in my site ,</p>
<p>Example :
Total Users On Our Network : 55
The 55 i want it to get how many users on our site from the database.</p>
| 0debug |
static int qemu_rbd_create(const char *filename, QEMUOptionParameter *options)
{
int64_t bytes = 0;
int64_t objsize;
int obj_order = 0;
char pool[RBD_MAX_POOL_NAME_SIZE];
char name[RBD_MAX_IMAGE_NAME_SIZE];
char snap_buf[RBD_MAX_SNAP_NAME_SIZE];
char conf[RBD_MAX_CONF_SIZE];
rados_t cluster;
rados_ioctx_t io_ctx;
int ret;
if (qemu_rbd_parsename(filename, pool, sizeof(pool),
snap_buf, sizeof(snap_buf),
name, sizeof(name),
conf, sizeof(conf)) < 0) {
return -EINVAL;
}
while (options && options->name) {
if (!strcmp(options->name, BLOCK_OPT_SIZE)) {
bytes = options->value.n;
} else if (!strcmp(options->name, BLOCK_OPT_CLUSTER_SIZE)) {
if (options->value.n) {
objsize = options->value.n;
if ((objsize - 1) & objsize) {
error_report("obj size needs to be power of 2");
return -EINVAL;
}
if (objsize < 4096) {
error_report("obj size too small");
return -EINVAL;
}
obj_order = ffs(objsize) - 1;
}
}
options++;
}
if (rados_create(&cluster, NULL) < 0) {
error_report("error initializing");
return -EIO;
}
if (strstr(conf, "conf=") == NULL) {
if (rados_conf_read_file(cluster, NULL) < 0) {
error_report("error reading config file");
rados_shutdown(cluster);
return -EIO;
}
}
if (conf[0] != '\0' &&
qemu_rbd_set_conf(cluster, conf) < 0) {
error_report("error setting config options");
rados_shutdown(cluster);
return -EIO;
}
if (rados_connect(cluster) < 0) {
error_report("error connecting");
rados_shutdown(cluster);
return -EIO;
}
if (rados_ioctx_create(cluster, pool, &io_ctx) < 0) {
error_report("error opening pool %s", pool);
rados_shutdown(cluster);
return -EIO;
}
ret = rbd_create(io_ctx, name, bytes, &obj_order);
rados_ioctx_destroy(io_ctx);
rados_shutdown(cluster);
return ret;
}
| 1threat |
static void sdhci_data_transfer(SDHCIState *s)
{
SDHCIClass *k = SDHCI_GET_CLASS(s);
if (s->trnmod & SDHC_TRNS_DMA) {
switch (SDHC_DMA_TYPE(s->hostctl)) {
case SDHC_CTRL_SDMA:
if ((s->trnmod & SDHC_TRNS_MULTI) &&
(!(s->trnmod & SDHC_TRNS_BLK_CNT_EN) || s->blkcnt == 0)) {
break;
}
if ((s->blkcnt == 1) || !(s->trnmod & SDHC_TRNS_MULTI)) {
k->do_sdma_single(s);
} else {
k->do_sdma_multi(s);
}
break;
case SDHC_CTRL_ADMA1_32:
if (!(s->capareg & SDHC_CAN_DO_ADMA1)) {
ERRPRINT("ADMA1 not supported\n");
break;
}
k->do_adma(s);
break;
case SDHC_CTRL_ADMA2_32:
if (!(s->capareg & SDHC_CAN_DO_ADMA2)) {
ERRPRINT("ADMA2 not supported\n");
break;
}
k->do_adma(s);
break;
case SDHC_CTRL_ADMA2_64:
if (!(s->capareg & SDHC_CAN_DO_ADMA2) ||
!(s->capareg & SDHC_64_BIT_BUS_SUPPORT)) {
ERRPRINT("64 bit ADMA not supported\n");
break;
}
k->do_adma(s);
break;
default:
ERRPRINT("Unsupported DMA type\n");
break;
}
} else {
if ((s->trnmod & SDHC_TRNS_READ) && sd_data_ready(s->card)) {
s->prnsts |= SDHC_DOING_READ | SDHC_DATA_INHIBIT |
SDHC_DAT_LINE_ACTIVE;
SDHCI_GET_CLASS(s)->read_block_from_card(s);
} else {
s->prnsts |= SDHC_DOING_WRITE | SDHC_DAT_LINE_ACTIVE |
SDHC_SPACE_AVAILABLE | SDHC_DATA_INHIBIT;
SDHCI_GET_CLASS(s)->write_block_to_card(s);
}
}
}
| 1threat |
static void new_video_stream(AVFormatContext *oc)
{
AVStream *st;
AVCodecContext *video_enc;
enum CodecID codec_id;
AVCodec *codec= NULL;
st = av_new_stream(oc, oc->nb_streams < nb_streamid_map ? streamid_map[oc->nb_streams] : 0);
if (!st) {
fprintf(stderr, "Could not alloc stream\n");
ffmpeg_exit(1);
}
output_codecs = grow_array(output_codecs, sizeof(*output_codecs), &nb_output_codecs, nb_output_codecs + 1);
if(!video_stream_copy){
if (video_codec_name) {
codec_id = find_codec_or_die(video_codec_name, AVMEDIA_TYPE_VIDEO, 1,
avcodec_opts[AVMEDIA_TYPE_VIDEO]->strict_std_compliance);
codec = avcodec_find_encoder_by_name(video_codec_name);
output_codecs[nb_output_codecs-1] = codec;
} else {
codec_id = av_guess_codec(oc->oformat, NULL, oc->filename, NULL, AVMEDIA_TYPE_VIDEO);
codec = avcodec_find_encoder(codec_id);
}
}
avcodec_get_context_defaults3(st->codec, codec);
bitstream_filters[nb_output_files] =
grow_array(bitstream_filters[nb_output_files],
sizeof(*bitstream_filters[nb_output_files]),
&nb_bitstream_filters[nb_output_files], oc->nb_streams);
bitstream_filters[nb_output_files][oc->nb_streams - 1]= video_bitstream_filters;
video_bitstream_filters= NULL;
avcodec_thread_init(st->codec, thread_count);
video_enc = st->codec;
if(video_codec_tag)
video_enc->codec_tag= video_codec_tag;
if( (video_global_header&1)
|| (video_global_header==0 && (oc->oformat->flags & AVFMT_GLOBALHEADER))){
video_enc->flags |= CODEC_FLAG_GLOBAL_HEADER;
avcodec_opts[AVMEDIA_TYPE_VIDEO]->flags|= CODEC_FLAG_GLOBAL_HEADER;
}
if(video_global_header&2){
video_enc->flags2 |= CODEC_FLAG2_LOCAL_HEADER;
avcodec_opts[AVMEDIA_TYPE_VIDEO]->flags2|= CODEC_FLAG2_LOCAL_HEADER;
}
if (video_stream_copy) {
st->stream_copy = 1;
video_enc->codec_type = AVMEDIA_TYPE_VIDEO;
video_enc->sample_aspect_ratio =
st->sample_aspect_ratio = av_d2q(frame_aspect_ratio*frame_height/frame_width, 255);
} else {
const char *p;
int i;
AVRational fps= frame_rate.num ? frame_rate : (AVRational){25,1};
video_enc->codec_id = codec_id;
set_context_opts(video_enc, avcodec_opts[AVMEDIA_TYPE_VIDEO], AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM, codec);
if (codec && codec->supported_framerates && !force_fps)
fps = codec->supported_framerates[av_find_nearest_q_idx(fps, codec->supported_framerates)];
video_enc->time_base.den = fps.num;
video_enc->time_base.num = fps.den;
video_enc->width = frame_width;
video_enc->height = frame_height;
video_enc->sample_aspect_ratio = av_d2q(frame_aspect_ratio*video_enc->height/video_enc->width, 255);
video_enc->pix_fmt = frame_pix_fmt;
st->sample_aspect_ratio = video_enc->sample_aspect_ratio;
choose_pixel_fmt(st, codec);
if (intra_only)
video_enc->gop_size = 0;
if (video_qscale || same_quality) {
video_enc->flags |= CODEC_FLAG_QSCALE;
video_enc->global_quality=
st->quality = FF_QP2LAMBDA * video_qscale;
}
if(intra_matrix)
video_enc->intra_matrix = intra_matrix;
if(inter_matrix)
video_enc->inter_matrix = inter_matrix;
p= video_rc_override_string;
for(i=0; p; i++){
int start, end, q;
int e=sscanf(p, "%d,%d,%d", &start, &end, &q);
if(e!=3){
fprintf(stderr, "error parsing rc_override\n");
ffmpeg_exit(1);
}
video_enc->rc_override=
av_realloc(video_enc->rc_override,
sizeof(RcOverride)*(i+1));
video_enc->rc_override[i].start_frame= start;
video_enc->rc_override[i].end_frame = end;
if(q>0){
video_enc->rc_override[i].qscale= q;
video_enc->rc_override[i].quality_factor= 1.0;
}
else{
video_enc->rc_override[i].qscale= 0;
video_enc->rc_override[i].quality_factor= -q/100.0;
}
p= strchr(p, '/');
if(p) p++;
}
video_enc->rc_override_count=i;
if (!video_enc->rc_initial_buffer_occupancy)
video_enc->rc_initial_buffer_occupancy = video_enc->rc_buffer_size*3/4;
video_enc->me_threshold= me_threshold;
video_enc->intra_dc_precision= intra_dc_precision - 8;
if (do_psnr)
video_enc->flags|= CODEC_FLAG_PSNR;
if (do_pass) {
if (do_pass == 1) {
video_enc->flags |= CODEC_FLAG_PASS1;
} else {
video_enc->flags |= CODEC_FLAG_PASS2;
}
}
}
if (video_language) {
av_metadata_set2(&st->metadata, "language", video_language, 0);
av_freep(&video_language);
}
video_disable = 0;
av_freep(&video_codec_name);
video_stream_copy = 0;
frame_pix_fmt = PIX_FMT_NONE;
}
| 1threat |
Windows Calculator returns wrong result : <p>if I enter 4*12+2*14 in the built-in Windows Calculator, the result is 700. Why is that?</p>
<p>Sorry if the question's already been asked, it's my first post.
Thank you!</p>
| 0debug |
Realm Swift Models separate or not? : <p>I'm new to the world of iOS and Swift and am working on a new app which I want to use Realm for persistence. I have Entities in my code already which my Services access and populate for an HTTP API endpoint. </p>
<p>Now I want to persist certain Entities and wanted advice as to whether I should create new Realm specific Models for each of my entities to read and write from Realm. Or should I convert all my existing plain Swift Entities to Realm Entities. At first this felt wrong as I would be passing Realm Entities al around my app instead of just in the persistence layer. </p>
<p>However, the alternative is that every time I read/write entities to Realm I need to convert them back and forth from Entities to Realm Entities. </p>
<p>Any advice on the best approach to this? </p>
<p>Thanks </p>
| 0debug |
e1000e_write_lgcy_rx_descr(E1000ECore *core, uint8_t *desc,
struct NetRxPkt *pkt,
const E1000E_RSSInfo *rss_info,
uint16_t length)
{
uint32_t status_flags, rss, mrq;
uint16_t ip_id;
struct e1000_rx_desc *d = (struct e1000_rx_desc *) desc;
memset(d, 0, sizeof(*d));
assert(!rss_info->enabled);
d->length = cpu_to_le16(length);
e1000e_build_rx_metadata(core, pkt, pkt != NULL,
rss_info,
&rss, &mrq,
&status_flags, &ip_id,
&d->special);
d->errors = (uint8_t) (le32_to_cpu(status_flags) >> 24);
d->status = (uint8_t) le32_to_cpu(status_flags);
}
| 1threat |
static inline void RENAME(rgb24ToUV)(uint8_t *dstU, uint8_t *dstV, const uint8_t *src1, const uint8_t *src2, long width, uint32_t *unused)
{
#if COMPILE_TEMPLATE_MMX
assert(src1==src2);
RENAME(bgr24ToUV_mmx)(dstU, dstV, src1, width, PIX_FMT_RGB24);
#else
int i;
assert(src1==src2);
for (i=0; i<width; i++) {
int r= src1[3*i + 0];
int g= src1[3*i + 1];
int b= src1[3*i + 2];
dstU[i]= (RU*r + GU*g + BU*b + (257<<(RGB2YUV_SHIFT-1)))>>RGB2YUV_SHIFT;
dstV[i]= (RV*r + GV*g + BV*b + (257<<(RGB2YUV_SHIFT-1)))>>RGB2YUV_SHIFT;
}
#endif
}
| 1threat |
i have writen api in php and again i am consuming my api in php,but delete method is not working : i have writen api in php and consuming that api in php to make crud operations,but delete method is not working.
}
else if($_SERVER['REQUEST_METHOD'] == 'DELETE'){
$del_id = $_GET["issuse_id"];
$url = 'http://192.168.0.82/ITS/Controller/issues'. $del_id;
$ch = curl_init($url);
curl_setopt($ch, CURLOPT_CUSTOMREQUEST, 'DELETE');
curl_setopt($ch, CURLOPT_RETURNTRANSFER, true);
$response_json = curl_exec($ch);
curl_close($ch);
$data=json_decode($response_json, true);
return $data; | 0debug |
destructuring assignment default value : <p>I am learning javascript and I got kind of stuck with ES6 syntax while trying to give a default value to a variable when destructuring.
Basically, I am trying to assign a variable giving the value of an object's property to it and if the value is false/null/undefined, I want it to be an empty object.
For example,</p>
<pre><code>let foo = {
prop1: 'hello!',
prop2: null
}
const prop1 = foo.prop1 || {}
const prop2 = foo.prop2 || {}
console.log(prop1) // hello!
console.log(prop2) // {}
</code></pre>
<p>👆This is what I want and 👇 is ES6 sugar syntax I thought as equivalent to above(it doesn't work tho..)</p>
<pre><code>let foo = {
prop1: 'hello!',
prop2: null
}
const { prop1 = {} } = foo
const { prop2 = {} } = foo
console.log(prop1) // hello!
console.log(prop2) // null
</code></pre>
<p>but somehow, sometimes it seems working in React, but the other times it doesn't.. is it compatibility problem? so confusing!</p>
| 0debug |
PrintLn why not? : Please help me. Ecilipse said that in the if clause is a mistake. I am not allowed to use Println there and
if (eingabe=z1)
is also a mistake (eingabe= z1) is red
`import java.util.*;
import java.util.Scanner;
public class Benedikt {
public static void main(String[] args)
{
Random zufall = new Random ();
int eingabe;
Scanner leser1= new Scanner (System.in);
int z1;
z1 = zufall.nextInt(6)+1;
System.out.println("I have got a number between 1 and 6 in my mind. %n");
System.out.println("Try to guess it!");
//Eingabe
eingabe=leser1.nextInt(eingabe);
if (eingabe=z1)
{System.out.println("You have found out the number! My number was %d",
z1);}
else if (eingabe>z1 )
{System.out.println("My number is smaller than %d", eingabe);}
else if (eingabe<z1)
{System.out.println("My number is smaller than %d", eingabe);}
}
} `
| 0debug |
Android XML FrameLayout and BottomNavigationView : <p>How can I make the FrameLayout end with the beginning of the BottomNavigationView? The content of the FrameLayout is being overlapped by the navigation view.</p>
<p>This is the xml:</p>
<pre><code><?xml version="1.0" encoding="utf-8"?>
<android.support.constraint.ConstraintLayout
xmlns:android="http://schemas.android.com/apk/res/android"
xmlns:app="http://schemas.android.com/apk/res-auto"
xmlns:tools="http://schemas.android.com/tools"
android:id="@+id/container"
android:layout_width="match_parent"
android:layout_height="match_parent" tools:context="...">
<FrameLayout
android:id="@+id/content"
android:layout_width="match_parent"
android:layout_height="match_parent">
</FrameLayout>
<android.support.design.widget.BottomNavigationView
android:id="@+id/navigation"
android:layout_width="0dp"
android:layout_height="wrap_content"
android:layout_marginEnd="0dp"
android:layout_marginStart="0dp"
android:background="?android:attr/windowBackground"
app:layout_constraintBottom_toBottomOf="parent"
app:layout_constraintLeft_toLeftOf="parent"
app:layout_constraintRight_toRightOf="parent"
app:menu="@menu/navigation" />
</android.support.constraint.ConstraintLayout>
</code></pre>
| 0debug |
Is floating point arithmetic stable? : <p>I know that floating point numbers have precision and the digits after the precision is not reliable.</p>
<p>But what if the equation used to calculate the number is the same? can I assume the outcome would be the same too?</p>
<p>for example we have two float numbers <code>x</code> and <code>y</code>. Can we assume the result <code>x/y</code> from machine 1 is exactly the same as the result from machine 2? I.E. <code>==</code> comparison would return true</p>
| 0debug |
NBDExport *nbd_export_new(BlockBackend *blk, off_t dev_offset, off_t size,
uint32_t nbdflags, void (*close)(NBDExport *),
Error **errp)
{
NBDExport *exp = g_malloc0(sizeof(NBDExport));
exp->refcount = 1;
QTAILQ_INIT(&exp->clients);
exp->blk = blk;
exp->dev_offset = dev_offset;
exp->nbdflags = nbdflags;
exp->size = size < 0 ? blk_getlength(blk) : size;
if (exp->size < 0) {
error_setg_errno(errp, -exp->size,
"Failed to determine the NBD export's length");
goto fail;
}
exp->size -= exp->size % BDRV_SECTOR_SIZE;
exp->close = close;
exp->ctx = blk_get_aio_context(blk);
blk_ref(blk);
blk_add_aio_context_notifier(blk, blk_aio_attached, blk_aio_detach, exp);
exp->eject_notifier.notify = nbd_eject_notifier;
blk_add_remove_bs_notifier(blk, &exp->eject_notifier);
aio_context_acquire(exp->ctx);
blk_invalidate_cache(blk, NULL);
aio_context_release(exp->ctx);
return exp;
fail:
g_free(exp);
return NULL;
}
| 1threat |
static int rtl8139_cplus_transmit_one(RTL8139State *s)
{
if (!rtl8139_transmitter_enabled(s))
{
DPRINTF("+++ C+ mode: transmitter disabled\n");
return 0;
}
if (!rtl8139_cp_transmitter_enabled(s))
{
DPRINTF("+++ C+ mode: C+ transmitter disabled\n");
return 0 ;
}
int descriptor = s->currCPlusTxDesc;
dma_addr_t cplus_tx_ring_desc = rtl8139_addr64(s->TxAddr[0], s->TxAddr[1]);
cplus_tx_ring_desc += 16 * descriptor;
DPRINTF("+++ C+ mode reading TX descriptor %d from host memory at "
"%08x %08x = 0x"DMA_ADDR_FMT"\n", descriptor, s->TxAddr[1],
s->TxAddr[0], cplus_tx_ring_desc);
uint32_t val, txdw0,txdw1,txbufLO,txbufHI;
pci_dma_read(&s->dev, cplus_tx_ring_desc, (uint8_t *)&val, 4);
txdw0 = le32_to_cpu(val);
pci_dma_read(&s->dev, cplus_tx_ring_desc+4, (uint8_t *)&val, 4);
txdw1 = le32_to_cpu(val);
pci_dma_read(&s->dev, cplus_tx_ring_desc+8, (uint8_t *)&val, 4);
txbufLO = le32_to_cpu(val);
pci_dma_read(&s->dev, cplus_tx_ring_desc+12, (uint8_t *)&val, 4);
txbufHI = le32_to_cpu(val);
DPRINTF("+++ C+ mode TX descriptor %d %08x %08x %08x %08x\n", descriptor,
txdw0, txdw1, txbufLO, txbufHI);
#define CP_TX_OWN (1<<31)
#define CP_TX_EOR (1<<30)
#define CP_TX_FS (1<<29)
#define CP_TX_LS (1<<28)
#define CP_TX_LGSEN (1<<27)
#define CP_TC_LGSEN_MSS_MASK ((1 << 12) - 1)
#define CP_TX_IPCS (1<<18)
#define CP_TX_UDPCS (1<<17)
#define CP_TX_TCPCS (1<<16)
#define CP_TX_BUFFER_SIZE (1<<16)
#define CP_TX_BUFFER_SIZE_MASK (CP_TX_BUFFER_SIZE - 1)
#define CP_TX_TAGC (1<<17)
#define CP_TX_VLAN_TAG_MASK ((1<<16) - 1)
#define CP_TX_STATUS_UNF (1<<25)
#define CP_TX_STATUS_TES (1<<23)
#define CP_TX_STATUS_OWC (1<<22)
#define CP_TX_STATUS_LNKF (1<<21)
#define CP_TX_STATUS_EXC (1<<20)
if (!(txdw0 & CP_TX_OWN))
{
DPRINTF("C+ Tx mode : descriptor %d is owned by host\n", descriptor);
return 0 ;
}
DPRINTF("+++ C+ Tx mode : transmitting from descriptor %d\n", descriptor);
if (txdw0 & CP_TX_FS)
{
DPRINTF("+++ C+ Tx mode : descriptor %d is first segment "
"descriptor\n", descriptor);
s->cplus_txbuffer_offset = 0;
}
int txsize = txdw0 & CP_TX_BUFFER_SIZE_MASK;
dma_addr_t tx_addr = rtl8139_addr64(txbufLO, txbufHI);
if (!s->cplus_txbuffer)
{
s->cplus_txbuffer_len = CP_TX_BUFFER_SIZE;
s->cplus_txbuffer = g_malloc(s->cplus_txbuffer_len);
s->cplus_txbuffer_offset = 0;
DPRINTF("+++ C+ mode transmission buffer allocated space %d\n",
s->cplus_txbuffer_len);
}
while (s->cplus_txbuffer && s->cplus_txbuffer_offset + txsize >= s->cplus_txbuffer_len)
{
s->cplus_txbuffer_len += CP_TX_BUFFER_SIZE;
s->cplus_txbuffer = g_realloc(s->cplus_txbuffer, s->cplus_txbuffer_len);
DPRINTF("+++ C+ mode transmission buffer space changed to %d\n",
s->cplus_txbuffer_len);
}
if (!s->cplus_txbuffer)
{
DPRINTF("+++ C+ mode transmiter failed to reallocate %d bytes\n",
s->cplus_txbuffer_len);
++s->tally_counters.TxERR;
++s->tally_counters.TxAbt;
return 0;
}
DPRINTF("+++ C+ mode transmit reading %d bytes from host memory at "
DMA_ADDR_FMT" to offset %d\n", txsize, tx_addr,
s->cplus_txbuffer_offset);
pci_dma_read(&s->dev, tx_addr,
s->cplus_txbuffer + s->cplus_txbuffer_offset, txsize);
s->cplus_txbuffer_offset += txsize;
if (txdw0 & CP_TX_EOR)
{
s->currCPlusTxDesc = 0;
}
else
{
++s->currCPlusTxDesc;
if (s->currCPlusTxDesc >= 64)
s->currCPlusTxDesc = 0;
}
txdw0 &= ~CP_RX_OWN;
txdw0 &= ~CP_TX_STATUS_UNF;
txdw0 &= ~CP_TX_STATUS_TES;
txdw0 &= ~CP_TX_STATUS_OWC;
txdw0 &= ~CP_TX_STATUS_LNKF;
txdw0 &= ~CP_TX_STATUS_EXC;
val = cpu_to_le32(txdw0);
pci_dma_write(&s->dev, cplus_tx_ring_desc, (uint8_t *)&val, 4);
if (txdw0 & CP_TX_LS)
{
uint8_t dot1q_buffer_space[VLAN_HLEN];
uint16_t *dot1q_buffer;
DPRINTF("+++ C+ Tx mode : descriptor %d is last segment descriptor\n",
descriptor);
uint8_t *saved_buffer = s->cplus_txbuffer;
int saved_size = s->cplus_txbuffer_offset;
int saved_buffer_len = s->cplus_txbuffer_len;
if (txdw1 & CP_TX_TAGC) {
DPRINTF("+++ C+ Tx mode : inserting vlan tag with ""tci: %u\n",
bswap16(txdw1 & CP_TX_VLAN_TAG_MASK));
dot1q_buffer = (uint16_t *) dot1q_buffer_space;
dot1q_buffer[0] = cpu_to_be16(ETH_P_8021Q);
dot1q_buffer[1] = cpu_to_le16(txdw1 & CP_TX_VLAN_TAG_MASK);
} else {
dot1q_buffer = NULL;
}
s->cplus_txbuffer = NULL;
s->cplus_txbuffer_offset = 0;
s->cplus_txbuffer_len = 0;
if (txdw0 & (CP_TX_IPCS | CP_TX_UDPCS | CP_TX_TCPCS | CP_TX_LGSEN))
{
DPRINTF("+++ C+ mode offloaded task checksum\n");
ip_header *ip = NULL;
int hlen = 0;
uint8_t ip_protocol = 0;
uint16_t ip_data_len = 0;
uint8_t *eth_payload_data = NULL;
size_t eth_payload_len = 0;
int proto = be16_to_cpu(*(uint16_t *)(saved_buffer + 12));
if (proto == ETH_P_IP)
{
DPRINTF("+++ C+ mode has IP packet\n");
eth_payload_data = saved_buffer + ETH_HLEN;
eth_payload_len = saved_size - ETH_HLEN;
ip = (ip_header*)eth_payload_data;
if (IP_HEADER_VERSION(ip) != IP_HEADER_VERSION_4) {
DPRINTF("+++ C+ mode packet has bad IP version %d "
"expected %d\n", IP_HEADER_VERSION(ip),
IP_HEADER_VERSION_4);
ip = NULL;
} else {
hlen = IP_HEADER_LENGTH(ip);
ip_protocol = ip->ip_p;
ip_data_len = be16_to_cpu(ip->ip_len) - hlen;
}
}
if (ip)
{
if (txdw0 & CP_TX_IPCS)
{
DPRINTF("+++ C+ mode need IP checksum\n");
if (hlen<sizeof(ip_header) || hlen>eth_payload_len) {
}
else
{
ip->ip_sum = 0;
ip->ip_sum = ip_checksum(ip, hlen);
DPRINTF("+++ C+ mode IP header len=%d checksum=%04x\n",
hlen, ip->ip_sum);
}
}
if ((txdw0 & CP_TX_LGSEN) && ip_protocol == IP_PROTO_TCP)
{
int large_send_mss = (txdw0 >> 16) & CP_TC_LGSEN_MSS_MASK;
DPRINTF("+++ C+ mode offloaded task TSO MTU=%d IP data %d "
"frame data %d specified MSS=%d\n", ETH_MTU,
ip_data_len, saved_size - ETH_HLEN, large_send_mss);
int tcp_send_offset = 0;
int send_count = 0;
uint8_t saved_ip_header[60];
memcpy(saved_ip_header, eth_payload_data, hlen);
uint8_t *data_to_checksum = eth_payload_data + hlen - 12;
tcp_header *p_tcp_hdr = (tcp_header*)(eth_payload_data + hlen);
int tcp_hlen = TCP_HEADER_DATA_OFFSET(p_tcp_hdr);
int tcp_data_len = ip_data_len - tcp_hlen;
int tcp_chunk_size = ETH_MTU - hlen - tcp_hlen;
DPRINTF("+++ C+ mode TSO IP data len %d TCP hlen %d TCP "
"data len %d TCP chunk size %d\n", ip_data_len,
tcp_hlen, tcp_data_len, tcp_chunk_size);
int is_last_frame = 0;
for (tcp_send_offset = 0; tcp_send_offset < tcp_data_len; tcp_send_offset += tcp_chunk_size)
{
uint16_t chunk_size = tcp_chunk_size;
if (tcp_send_offset + tcp_chunk_size >= tcp_data_len)
{
is_last_frame = 1;
chunk_size = tcp_data_len - tcp_send_offset;
}
DPRINTF("+++ C+ mode TSO TCP seqno %08x\n",
be32_to_cpu(p_tcp_hdr->th_seq));
memcpy(data_to_checksum, saved_ip_header + 12, 8);
DPRINTF("+++ C+ mode TSO calculating TCP checksum for "
"packet with %d bytes data\n", tcp_hlen +
chunk_size);
if (tcp_send_offset)
{
memcpy((uint8_t*)p_tcp_hdr + tcp_hlen, (uint8_t*)p_tcp_hdr + tcp_hlen + tcp_send_offset, chunk_size);
}
if (!is_last_frame)
{
TCP_HEADER_CLEAR_FLAGS(p_tcp_hdr, TCP_FLAG_PUSH|TCP_FLAG_FIN);
}
ip_pseudo_header *p_tcpip_hdr = (ip_pseudo_header *)data_to_checksum;
p_tcpip_hdr->zeros = 0;
p_tcpip_hdr->ip_proto = IP_PROTO_TCP;
p_tcpip_hdr->ip_payload = cpu_to_be16(tcp_hlen + chunk_size);
p_tcp_hdr->th_sum = 0;
int tcp_checksum = ip_checksum(data_to_checksum, tcp_hlen + chunk_size + 12);
DPRINTF("+++ C+ mode TSO TCP checksum %04x\n",
tcp_checksum);
p_tcp_hdr->th_sum = tcp_checksum;
memcpy(eth_payload_data, saved_ip_header, hlen);
ip->ip_len = cpu_to_be16(hlen + tcp_hlen + chunk_size);
ip->ip_id = cpu_to_be16(tcp_send_offset/tcp_chunk_size + be16_to_cpu(ip->ip_id));
ip->ip_sum = 0;
ip->ip_sum = ip_checksum(eth_payload_data, hlen);
DPRINTF("+++ C+ mode TSO IP header len=%d "
"checksum=%04x\n", hlen, ip->ip_sum);
int tso_send_size = ETH_HLEN + hlen + tcp_hlen + chunk_size;
DPRINTF("+++ C+ mode TSO transferring packet size "
"%d\n", tso_send_size);
rtl8139_transfer_frame(s, saved_buffer, tso_send_size,
0, (uint8_t *) dot1q_buffer);
p_tcp_hdr->th_seq = cpu_to_be32(chunk_size + be32_to_cpu(p_tcp_hdr->th_seq));
++send_count;
}
saved_size = 0;
}
else if (txdw0 & (CP_TX_TCPCS|CP_TX_UDPCS))
{
DPRINTF("+++ C+ mode need TCP or UDP checksum\n");
uint8_t saved_ip_header[60];
memcpy(saved_ip_header, eth_payload_data, hlen);
uint8_t *data_to_checksum = eth_payload_data + hlen - 12;
memcpy(data_to_checksum, saved_ip_header + 12, 8);
if ((txdw0 & CP_TX_TCPCS) && ip_protocol == IP_PROTO_TCP)
{
DPRINTF("+++ C+ mode calculating TCP checksum for "
"packet with %d bytes data\n", ip_data_len);
ip_pseudo_header *p_tcpip_hdr = (ip_pseudo_header *)data_to_checksum;
p_tcpip_hdr->zeros = 0;
p_tcpip_hdr->ip_proto = IP_PROTO_TCP;
p_tcpip_hdr->ip_payload = cpu_to_be16(ip_data_len);
tcp_header* p_tcp_hdr = (tcp_header *) (data_to_checksum+12);
p_tcp_hdr->th_sum = 0;
int tcp_checksum = ip_checksum(data_to_checksum, ip_data_len + 12);
DPRINTF("+++ C+ mode TCP checksum %04x\n",
tcp_checksum);
p_tcp_hdr->th_sum = tcp_checksum;
}
else if ((txdw0 & CP_TX_UDPCS) && ip_protocol == IP_PROTO_UDP)
{
DPRINTF("+++ C+ mode calculating UDP checksum for "
"packet with %d bytes data\n", ip_data_len);
ip_pseudo_header *p_udpip_hdr = (ip_pseudo_header *)data_to_checksum;
p_udpip_hdr->zeros = 0;
p_udpip_hdr->ip_proto = IP_PROTO_UDP;
p_udpip_hdr->ip_payload = cpu_to_be16(ip_data_len);
udp_header *p_udp_hdr = (udp_header *) (data_to_checksum+12);
p_udp_hdr->uh_sum = 0;
int udp_checksum = ip_checksum(data_to_checksum, ip_data_len + 12);
DPRINTF("+++ C+ mode UDP checksum %04x\n",
udp_checksum);
p_udp_hdr->uh_sum = udp_checksum;
}
memcpy(eth_payload_data, saved_ip_header, hlen);
}
}
}
++s->tally_counters.TxOk;
DPRINTF("+++ C+ mode transmitting %d bytes packet\n", saved_size);
rtl8139_transfer_frame(s, saved_buffer, saved_size, 1,
(uint8_t *) dot1q_buffer);
if (!s->cplus_txbuffer)
{
s->cplus_txbuffer = saved_buffer;
s->cplus_txbuffer_len = saved_buffer_len;
s->cplus_txbuffer_offset = 0;
}
else
{
g_free(saved_buffer);
}
}
else
{
DPRINTF("+++ C+ mode transmission continue to next descriptor\n");
}
return 1;
}
| 1threat |
Bundle install could not find compatible versions for gem "bundler" : <p>When I type bundle install I got this error. I tried to find solution but nothing solve my case. Please help me.</p>
<pre><code> Bundler could not find compatible versions for gem "bundler":
In Gemfile:
rails (= 4.1.8) was resolved to 4.1.8, which depends on
bundler (< 2.0, >= 1.3.0)
Current Bundler version:
bundler (2.0.1)
This Gemfile requires a different version of Bundler.
Perhaps you need to update Bundler by running `gem install bundler`?
Could not find gem 'bundler (< 2.0, >= 1.3.0)', which is required by gem 'rails
(= 4.1.8)', in any of the sources.
Bundler could not find compatible versions for gem "rails":
In Gemfile:
rails (= 4.1.8)
animate-rails was resolved to 1.0.10, which depends on
rails
</code></pre>
| 0debug |
how to split cvs file : "0.0.0.0,""0.255.255.255"",""ZZ"""
"1.0.0.0,""1.0.0.255"",""AU"""
"1.0.1.0,""1.0.3.255"",""CN"""
"1.0.4.0,""1.0.7.255"",""AU"""
"1.0.8.0,""1.0.15.255"",""CN"""
"1.0.16.0,""1.0.31.255"",""JP"""
"1.0.32.0,""1.0.63.255"",""CN"""
"1.0.64.0,""1.0.127.255"",""JP"""
"1.0.128.0,""1.0.255.255"",""TH"""
"1.1.0.0,""1.1.0.255"",""CN"""
"1.1.1.0,""1.1.1.255"",""AU"""
"1.1.2.0,""1.1.63.255"",""CN"""
"1.1.64.0,""1.1.127.255"",""JP"""
"1.1.128.0,""1.1.255.255"",""TH"""
How can split this cvs file.For example 0.0.0.0 0.255.255.255 ZZ for first row and how can add datagridview with 3columns
| 0debug |
How properly generate bootstrap grid via loop using Razor? : <p>I use ASP.NET MVC and bootstrap. I have many objects (>2) in collection and for each need a <code><div class="col-xs-6"></code> but with only 2 cols in a row. How to achive this using loop?
There is 1 way but I am looking for something better:</p>
<pre><code>@model List<Object>
@using (Html.BeginForm("ActionName", "ControllerName"))
{
<div class="row">
@for (int i = 0; i < Model.Count; i++)
{
if (i % 2 != 0) {
<div class="row">
<div class="col-xs-6">
@Html.TextBoxFor(o => o[i].Value)
</div>
</div>
} else {
<div class="col-xs-6">
@Html.TextBoxFor(o => o[i].Value)
</div>
}
}
</div>
}
</code></pre>
| 0debug |
int qemu_paio_write(struct qemu_paiocb *aiocb)
{
return qemu_paio_submit(aiocb, QEMU_PAIO_WRITE);
}
| 1threat |
rails-rspec error cannot load such file -- rspec/core/formatters/progress_formatter : <p>I've broken my rails-rspec. I switched to a different gemset to run a 3rd party test. When I returned to my 2.3.0(default) gemset, I had the following errors.</p>
<p>running rspec gets:</p>
<pre><code>/.rvm/gems/ruby-2.3.0/gems/activesupport-4.2.0/lib/active_support/dependencies.rb:274:in `require': cannot load such file -- rspec/core/formatters/progress_formatter (LoadError)
</code></pre>
<p>running rails generate rspec:install returns:</p>
<pre><code>Could not find generator 'rspec:install'. Maybe you meant 'devise:install' or ...
</code></pre>
<p>I have tried uninstalling and reinstalling, but errors persist.</p>
<p>Running rspec -v returns: </p>
<pre><code>- rspec-core 3.6.0
- rspec-expectations 3.6.0
- rspec-mocks 3.6.0
- rspec-rails 3.6.1
- rspec-support 3.6.0
</code></pre>
<p>It seems that ruby cannot find rspec-core. I have tried the workaround from <a href="https://stackoverflow.com/questions/45673087/cant-find-gem-rspec-core-0-a-gemgemnotfoundexception-when-running-on-j">this post</a> without success. Thank you in advance for any insight you might provide.</p>
<p>Running</p>
<blockquote>
<p>rails 4.2.0, ruby 2.3.0</p>
</blockquote>
| 0debug |
How to find index of a substring? : <p>Looking for Elixir equivalent of Ruby's:</p>
<pre><code>"john.snow@domain.com".index("@") # => 9
"john.snow@domain.com".index("domain") # => 10
</code></pre>
| 0debug |
Static fields in kotlin : <p>I'm new to Kotlin and trying to convert my android activities to Kotlin, but I've no Idea how to declare a field as static.
I want to convert the following code...</p>
<pre><code>public class MainActivity extends AppCompatActivity {
static String TAG = "MainActicity";
@Override
protected void onCreate(Bundle savedInstanceState) {
....
}
....
}
</code></pre>
<p>Can anybody just help?
Thanks in advance!</p>
| 0debug |
two dots as python argument for slicing : Say I have an array and I want a function to select some of its columns based on an argument **a** that is pre-defined :
extracted_columns = array[:,a].
If I have e.g. **a** = np.arange(10), I'll get the first ten columns,
What if I want to define a so that all the columns are selected without knowing the size of the array ?
I'd like to set a = : so that the function does
extracted_columns = array[:,:]
but it seems : can't pas passed as an argument. I also tried a = None but this gives me an array of dimensions 3 with the second dimension equal to 1.
Is there a nice way of doing it ?
Thanks,
| 0debug |
I am having trouble with passing a parameter : <p>I have stored the (num1) and (num2) vairables earlier and i am trying to display the results in a prompt box after but cant get it too work, i have only started learning javascript and help would be appreciated </p>
<pre><code> function calculateNums = (num1 + num2);
numResult(calculateNums);
alert("The sum of " +numResult);
</code></pre>
| 0debug |
static int get_current_cpu(void)
{
return cpu_single_env->cpu_index;
}
| 1threat |
uint32_t do_arm_semihosting(CPUState *env)
{
target_ulong args;
char * s;
int nr;
uint32_t ret;
uint32_t len;
#ifdef CONFIG_USER_ONLY
TaskState *ts = env->opaque;
#else
CPUState *ts = env;
#endif
nr = env->regs[0];
args = env->regs[1];
switch (nr) {
case SYS_OPEN:
if (!(s = lock_user_string(ARG(0))))
return (uint32_t)-1;
if (ARG(1) >= 12)
return (uint32_t)-1;
if (strcmp(s, ":tt") == 0) {
if (ARG(1) < 4)
return STDIN_FILENO;
else
return STDOUT_FILENO;
}
if (use_gdb_syscalls()) {
gdb_do_syscall(arm_semi_cb, "open,%s,%x,1a4", ARG(0),
(int)ARG(2)+1, gdb_open_modeflags[ARG(1)]);
return env->regs[0];
} else {
ret = set_swi_errno(ts, open(s, open_modeflags[ARG(1)], 0644));
}
unlock_user(s, ARG(0), 0);
return ret;
case SYS_CLOSE:
if (use_gdb_syscalls()) {
gdb_do_syscall(arm_semi_cb, "close,%x", ARG(0));
return env->regs[0];
} else {
return set_swi_errno(ts, close(ARG(0)));
}
case SYS_WRITEC:
{
char c;
if (get_user_u8(c, args))
return (uint32_t)-1;
if (use_gdb_syscalls()) {
gdb_do_syscall(arm_semi_cb, "write,2,%x,1", args);
return env->regs[0];
} else {
return write(STDERR_FILENO, &c, 1);
}
}
case SYS_WRITE0:
if (!(s = lock_user_string(args)))
return (uint32_t)-1;
len = strlen(s);
if (use_gdb_syscalls()) {
gdb_do_syscall(arm_semi_cb, "write,2,%x,%x\n", args, len);
ret = env->regs[0];
} else {
ret = write(STDERR_FILENO, s, len);
}
unlock_user(s, args, 0);
return ret;
case SYS_WRITE:
len = ARG(2);
if (use_gdb_syscalls()) {
arm_semi_syscall_len = len;
gdb_do_syscall(arm_semi_cb, "write,%x,%x,%x", ARG(0), ARG(1), len);
return env->regs[0];
} else {
if (!(s = lock_user(VERIFY_READ, ARG(1), len, 1)))
return (uint32_t)-1;
ret = set_swi_errno(ts, write(ARG(0), s, len));
unlock_user(s, ARG(1), 0);
if (ret == (uint32_t)-1)
return -1;
return len - ret;
}
case SYS_READ:
len = ARG(2);
if (use_gdb_syscalls()) {
arm_semi_syscall_len = len;
gdb_do_syscall(arm_semi_cb, "read,%x,%x,%x", ARG(0), ARG(1), len);
return env->regs[0];
} else {
if (!(s = lock_user(VERIFY_WRITE, ARG(1), len, 0)))
return (uint32_t)-1;
do
ret = set_swi_errno(ts, read(ARG(0), s, len));
while (ret == -1 && errno == EINTR);
unlock_user(s, ARG(1), len);
if (ret == (uint32_t)-1)
return -1;
return len - ret;
}
case SYS_READC:
return 0;
case SYS_ISTTY:
if (use_gdb_syscalls()) {
gdb_do_syscall(arm_semi_cb, "isatty,%x", ARG(0));
return env->regs[0];
} else {
return isatty(ARG(0));
}
case SYS_SEEK:
if (use_gdb_syscalls()) {
gdb_do_syscall(arm_semi_cb, "lseek,%x,%x,0", ARG(0), ARG(1));
return env->regs[0];
} else {
ret = set_swi_errno(ts, lseek(ARG(0), ARG(1), SEEK_SET));
if (ret == (uint32_t)-1)
return -1;
return 0;
}
case SYS_FLEN:
if (use_gdb_syscalls()) {
gdb_do_syscall(arm_semi_flen_cb, "fstat,%x,%x",
ARG(0), env->regs[13]-64);
return env->regs[0];
} else {
struct stat buf;
ret = set_swi_errno(ts, fstat(ARG(0), &buf));
if (ret == (uint32_t)-1)
return -1;
return buf.st_size;
}
case SYS_TMPNAM:
return -1;
case SYS_REMOVE:
if (use_gdb_syscalls()) {
gdb_do_syscall(arm_semi_cb, "unlink,%s", ARG(0), (int)ARG(1)+1);
ret = env->regs[0];
} else {
if (!(s = lock_user_string(ARG(0))))
return (uint32_t)-1;
ret = set_swi_errno(ts, remove(s));
unlock_user(s, ARG(0), 0);
}
return ret;
case SYS_RENAME:
if (use_gdb_syscalls()) {
gdb_do_syscall(arm_semi_cb, "rename,%s,%s",
ARG(0), (int)ARG(1)+1, ARG(2), (int)ARG(3)+1);
return env->regs[0];
} else {
char *s2;
s = lock_user_string(ARG(0));
s2 = lock_user_string(ARG(2));
if (!s || !s2)
ret = (uint32_t)-1;
else
ret = set_swi_errno(ts, rename(s, s2));
if (s2)
unlock_user(s2, ARG(2), 0);
if (s)
unlock_user(s, ARG(0), 0);
return ret;
}
case SYS_CLOCK:
return clock() / (CLOCKS_PER_SEC / 100);
case SYS_TIME:
return set_swi_errno(ts, time(NULL));
case SYS_SYSTEM:
if (use_gdb_syscalls()) {
gdb_do_syscall(arm_semi_cb, "system,%s", ARG(0), (int)ARG(1)+1);
return env->regs[0];
} else {
if (!(s = lock_user_string(ARG(0))))
return (uint32_t)-1;
ret = set_swi_errno(ts, system(s));
unlock_user(s, ARG(0), 0);
return ret;
}
case SYS_ERRNO:
#ifdef CONFIG_USER_ONLY
return ts->swi_errno;
#else
return syscall_err;
#endif
case SYS_GET_CMDLINE:
#ifdef CONFIG_USER_ONLY
{
char *arm_cmdline_buffer;
const char *host_cmdline_buffer;
unsigned int i;
unsigned int arm_cmdline_len = ARG(1);
unsigned int host_cmdline_len =
ts->info->arg_end-ts->info->arg_start;
if (!arm_cmdline_len || host_cmdline_len > arm_cmdline_len) {
return -1;
}
if (!host_cmdline_len) {
arm_cmdline_buffer = lock_user(VERIFY_WRITE, ARG(0), 1, 0);
arm_cmdline_buffer[0] = 0;
unlock_user(arm_cmdline_buffer, ARG(0), 1);
SET_ARG(1, 0);
return 0;
}
arm_cmdline_buffer =
lock_user(VERIFY_WRITE, ARG(0), host_cmdline_len, 0);
host_cmdline_buffer =
lock_user(VERIFY_READ, ts->info->arg_start,
host_cmdline_len, 1);
if (arm_cmdline_buffer && host_cmdline_buffer)
{
memcpy(arm_cmdline_buffer, host_cmdline_buffer,
host_cmdline_len);
for (i = 0; i < host_cmdline_len-1; i++) {
if (arm_cmdline_buffer[i] == 0) {
arm_cmdline_buffer[i] = ' ';
}
}
SET_ARG(1, host_cmdline_len-1);
}
unlock_user(arm_cmdline_buffer, ARG(0), host_cmdline_len);
unlock_user((void*)host_cmdline_buffer, ts->info->arg_start, 0);
return (arm_cmdline_buffer && host_cmdline_buffer) ? 0 : -1;
}
#else
return -1;
#endif
case SYS_HEAPINFO:
{
uint32_t *ptr;
uint32_t limit;
#ifdef CONFIG_USER_ONLY
if (!ts->heap_limit) {
long ret;
ts->heap_base = do_brk(0);
limit = ts->heap_base + ARM_ANGEL_HEAP_SIZE;
for (;;) {
ret = do_brk(limit);
if (ret != -1)
break;
limit = (ts->heap_base >> 1) + (limit >> 1);
}
ts->heap_limit = limit;
}
if (!(ptr = lock_user(VERIFY_WRITE, ARG(0), 16, 0)))
return (uint32_t)-1;
ptr[0] = tswap32(ts->heap_base);
ptr[1] = tswap32(ts->heap_limit);
ptr[2] = tswap32(ts->stack_base);
ptr[3] = tswap32(0);
unlock_user(ptr, ARG(0), 16);
#else
limit = ram_size;
if (!(ptr = lock_user(VERIFY_WRITE, ARG(0), 16, 0)))
return (uint32_t)-1;
ptr[0] = tswap32(limit / 2);
ptr[1] = tswap32(limit);
ptr[2] = tswap32(limit);
ptr[3] = tswap32(0);
unlock_user(ptr, ARG(0), 16);
#endif
return 0;
}
case SYS_EXIT:
gdb_exit(env, 0);
exit(0);
default:
fprintf(stderr, "qemu: Unsupported SemiHosting SWI 0x%02x\n", nr);
cpu_dump_state(env, stderr, fprintf, 0);
abort();
}
}
| 1threat |
bool virtio_disk_is_eckd(void)
{
if (guessed_disk_nature) {
return (blk_cfg.blk_size == 4096);
}
return (blk_cfg.geometry.heads == 15)
&& (blk_cfg.geometry.sectors == 12)
&& (blk_cfg.blk_size == 4096);
}
| 1threat |
static int assign_intx(AssignedDevice *dev, Error **errp)
{
AssignedIRQType new_type;
PCIINTxRoute intx_route;
bool intx_host_msi;
int r;
Error *local_err = NULL;
if (assigned_dev_pci_read_byte(&dev->dev, PCI_INTERRUPT_PIN) == 0) {
pci_device_set_intx_routing_notifier(&dev->dev, NULL);
return 0;
}
verify_irqchip_in_kernel(&local_err);
if (local_err) {
error_propagate(errp, local_err);
return -ENOTSUP;
}
pci_device_set_intx_routing_notifier(&dev->dev,
assigned_dev_update_irq_routing);
intx_route = pci_device_route_intx_to_irq(&dev->dev, dev->intpin);
assert(intx_route.mode != PCI_INTX_INVERTED);
if (!pci_intx_route_changed(&dev->intx_route, &intx_route)) {
return 0;
}
switch (dev->assigned_irq_type) {
case ASSIGNED_IRQ_INTX_HOST_INTX:
case ASSIGNED_IRQ_INTX_HOST_MSI:
intx_host_msi = dev->assigned_irq_type == ASSIGNED_IRQ_INTX_HOST_MSI;
r = kvm_device_intx_deassign(kvm_state, dev->dev_id, intx_host_msi);
break;
case ASSIGNED_IRQ_MSI:
r = kvm_device_msi_deassign(kvm_state, dev->dev_id);
break;
case ASSIGNED_IRQ_MSIX:
r = kvm_device_msix_deassign(kvm_state, dev->dev_id);
break;
default:
r = 0;
break;
}
if (r) {
perror("assign_intx: deassignment of previous interrupt failed");
}
dev->assigned_irq_type = ASSIGNED_IRQ_NONE;
if (intx_route.mode == PCI_INTX_DISABLED) {
dev->intx_route = intx_route;
return 0;
}
retry:
if (dev->features & ASSIGNED_DEVICE_PREFER_MSI_MASK &&
dev->cap.available & ASSIGNED_DEVICE_CAP_MSI) {
intx_host_msi = true;
new_type = ASSIGNED_IRQ_INTX_HOST_MSI;
} else {
intx_host_msi = false;
new_type = ASSIGNED_IRQ_INTX_HOST_INTX;
}
r = kvm_device_intx_assign(kvm_state, dev->dev_id, intx_host_msi,
intx_route.irq);
if (r < 0) {
if (r == -EIO && !(dev->features & ASSIGNED_DEVICE_PREFER_MSI_MASK) &&
dev->cap.available & ASSIGNED_DEVICE_CAP_MSI) {
error_report("Host-side INTx sharing not supported, "
"using MSI instead");
error_printf("Some devices do not work properly in this mode.\n");
dev->features |= ASSIGNED_DEVICE_PREFER_MSI_MASK;
goto retry;
}
error_setg_errno(errp, -r, "Failed to assign irq for \"%s\"",
dev->dev.qdev.id);
error_append_hint(errp, "Perhaps you are assigning a device "
"that shares an IRQ with another device?\n");
return r;
}
dev->intx_route = intx_route;
dev->assigned_irq_type = new_type;
return r;
}
| 1threat |
static void tgen_brcond(TCGContext *s, TCGType type, TCGCond c,
TCGReg r1, TCGArg c2, int c2const, int labelno)
{
int cc;
if (facilities & FACILITY_GEN_INST_EXT) {
bool is_unsigned = is_unsigned_cond(c);
bool in_range;
S390Opcode opc;
cc = tcg_cond_to_s390_cond[c];
if (!c2const) {
opc = (type == TCG_TYPE_I32
? (is_unsigned ? RIE_CLRJ : RIE_CRJ)
: (is_unsigned ? RIE_CLGRJ : RIE_CGRJ));
tgen_compare_branch(s, opc, cc, r1, c2, labelno);
return;
}
if (type == TCG_TYPE_I32) {
if (is_unsigned) {
opc = RIE_CLIJ;
in_range = (uint32_t)c2 == (uint8_t)c2;
} else {
opc = RIE_CIJ;
in_range = (int32_t)c2 == (int8_t)c2;
}
} else {
if (is_unsigned) {
opc = RIE_CLGIJ;
in_range = (uint64_t)c2 == (uint8_t)c2;
} else {
opc = RIE_CGIJ;
in_range = (int64_t)c2 == (int8_t)c2;
}
}
if (in_range) {
tgen_compare_imm_branch(s, opc, cc, r1, c2, labelno);
return;
}
}
cc = tgen_cmp(s, type, c, r1, c2, c2const);
tgen_branch(s, cc, labelno);
}
| 1threat |
How to convert list of dictionaries top dictionaries : mylist = [{'a': 1, 'b': 2}, {'c': 3, 'd': 4}, {'e': 5, 'f': 6}]
i want it as
myList ={'a': 1, 'b': 2,'c': 3, 'd': 4,'e': 5, 'f': 6}
| 0debug |
Getting some part of String c# : <p>თანხის შეტანა ბარათი მარჯანიშვილის ფილიალი VISA CLASSIC GE****************0082 GEL
თანხის შეტანა შემნახველი მარჯანიშვილის ფილიალი ჩემი სეიფი GE****************0018 GEL@
თანხის შეტანა ბარათი ცენტრალური ფილიალი MC STANDARD GE****************0006 USD კურსი - 2.5@</p>
<p>this is my strings kinda similar but its different always . I would like to get string from English Letter . in 1st option from "V" in second option from "G" in third option from "M" . easily to say when he sees english letter i want to get all the string from that point. </p>
| 0debug |
how to creat tel input according to the snippet : <p><a href="https://i.stack.imgur.com/fkl2W.jpg" rel="nofollow noreferrer"><img src="https://i.stack.imgur.com/fkl2W.jpg" alt="enter image description here"></a></p>
<p>how can I create tel input ? just 2 input type tel ?
Please, help with css positioning too.</p>
| 0debug |
Multiple function tasks php : I have a question about a function using php.
Can i for example have a function with the same name do multiple things.
e.g
function name () {
echo "this";
}
function name () {
echo "that";
}
name();
so the same function doing multiple tasks.
| 0debug |
Trying to detect wether a variable/string is mixed case in Python : I'm trying to write a simple program that detects wether or not a string is upper case, lower case or mixed case.
I tried x.ismixed but it doesn't work
I have also tried x == mixed.case
This is the code:
x = input('Loud: ')
if x.isupper():
print("Quiet:", x.lower())
elif x.ismixed():
print (x.lower)
else:
print (x.lower)
The error code comes up with
built-in method lower of str object at 0xf70445e0
The output should be x.lower() but instead comes up with the code above.
Output/example: HEllO ThEre to hello there.
| 0debug |
Authorise Pinterest App : <p>I have added a collaborator for my Pinterest app, however, when accessing the colaberators account and clicking on the application there is no 'authorise' button or anything similar. The "You still need at least 1 collaborator to authorize your app before you can submit" warning still shows on the collaberators account.
Ant help would be appreciated, thanks!</p>
| 0debug |
uint64_t helper_fctid (uint64_t arg)
{
CPU_DoubleU farg;
farg.ll = arg;
if (unlikely(float64_is_signaling_nan(farg.d))) {
farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN | POWERPC_EXCP_FP_VXCVI);
} else if (unlikely(float64_is_nan(farg.d) || float64_is_infinity(farg.d))) {
farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXCVI);
} else {
farg.ll = float64_to_int64(farg.d, &env->fp_status);
}
return farg.ll;
}
| 1threat |
Plot solutions of bivariate polynomial equation : <p>How can I plot the solutions of this equation in R?</p>
<blockquote>
<p>(x²+y²-1)³=x²y³</p>
</blockquote>
| 0debug |
static bool cmd_identify(IDEState *s, uint8_t cmd)
{
if (s->bs && s->drive_kind != IDE_CD) {
if (s->drive_kind != IDE_CFATA) {
ide_identify(s);
} else {
ide_cfata_identify(s);
}
s->status = READY_STAT | SEEK_STAT;
ide_transfer_start(s, s->io_buffer, 512, ide_transfer_stop);
ide_set_irq(s->bus);
return false;
} else {
if (s->drive_kind == IDE_CD) {
ide_set_signature(s);
}
ide_abort_command(s);
}
return true;
}
| 1threat |
Find Minimum Possible difference between numbers with unknown digits : <p>I have some cases as follows.</p>
<ul>
<li>1? 2?</li>
<li>?2? ??3</li>
<li>? ?</li>
<li>?5 ?0</li>
</ul>
<p>Now what I am supposed to do is to find some values in place of question marks, that would give produce the minimum possible difference between the 2 numbers.</p>
<p>Answers Should be like</p>
<ul>
<li><p>19 20 </p></li>
<li><p>023 023</p></li>
<li><p>0 0</p></li>
<li><p>05 00</p></li>
</ul>
<p>Note : the number which will be produced after the minimum absolute difference between the 2 values must be smallest. As in, the last case could be 15 and 10 with the absolute difference to be 5 but it is invalid.</p>
<p>I tried some permutation combination ideas for replacing the question marks for both numbers individually and then find out the number but the length of the number could go up to 18 digits per number. Hence I believe it wouldn't be a good idea.</p>
<p>Then I tried to search for similar questions but that didn't help.
I still think that <code>regex</code> could be helpful to solve this question but am stuck with how to do it. </p>
<p>Any help is welcome!! Thanx!</p>
<p>The language shall be Php.. I am working with Php.</p>
| 0debug |
How to call python function from java code without use of jpython : I am trying to call my python code from java code , without use of jpython, as my code contains numpy, scipy and other module which is not available on jpython, more over i want to create a api of my python code for java. | 0debug |
How do I implement this for my android app : <p>Hi Guys I recently have seen an app that allows you to copy a url of and instagram post and share it to the app from instagram</p>
<p><a href="https://youtu.be/8lr8EgCvLTw?t=6" rel="nofollow noreferrer">you can see what I mean here:</a></p>
<p>How do i make it so my app can do this from a web browser, I want theuser to be able to copy the url and send it to my app.</p>
| 0debug |
uint32 float32_to_uint32_round_to_zero( float32 a STATUS_PARAM )
{
int64_t v;
uint32 res;
v = float32_to_int64_round_to_zero(a STATUS_VAR);
if (v < 0) {
res = 0;
float_raise( float_flag_invalid STATUS_VAR);
} else if (v > 0xffffffff) {
res = 0xffffffff;
float_raise( float_flag_invalid STATUS_VAR);
} else {
res = v;
}
return res;
}
| 1threat |
Using a Word2Vec model pre-trained on wikipedia : <p>I need to use gensim to get vector representations of words, and I figure the best thing to use would be a word2vec module that's pre-trained on the english wikipedia corpus. Does anyone know where to download it, how to install it, and how to use gensim to create the vectors?</p>
| 0debug |
static void term_init(void)
{
#if HAVE_TERMIOS_H
if(!run_as_daemon){
struct termios tty;
tcgetattr (0, &tty);
oldtty = tty;
atexit(term_exit);
tty.c_iflag &= ~(IGNBRK|BRKINT|PARMRK|ISTRIP
|INLCR|IGNCR|ICRNL|IXON);
tty.c_oflag |= OPOST;
tty.c_lflag &= ~(ECHO|ECHONL|ICANON|IEXTEN);
tty.c_cflag &= ~(CSIZE|PARENB);
tty.c_cflag |= CS8;
tty.c_cc[VMIN] = 1;
tty.c_cc[VTIME] = 0;
tcsetattr (0, TCSANOW, &tty);
signal(SIGQUIT, sigterm_handler);
}
#endif
avformat_network_deinit();
signal(SIGINT , sigterm_handler);
signal(SIGTERM, sigterm_handler);
#ifdef SIGXCPU
signal(SIGXCPU, sigterm_handler);
#endif
}
| 1threat |
static int parse_pci_devfn(DeviceState *dev, Property *prop, const char *str)
{
uint32_t *ptr = qdev_get_prop_ptr(dev, prop);
unsigned int slot, fn, n;
if (sscanf(str, "%x.%x%n", &slot, &fn, &n) != 2) {
fn = 0;
if (sscanf(str, "%x%n", &slot, &n) != 1) {
return -EINVAL;
}
}
if (str[n] != '\0')
return -EINVAL;
if (fn > 7)
return -EINVAL;
if (slot > 31)
return -EINVAL;
*ptr = slot << 3 | fn;
return 0;
}
| 1threat |
"App Rejected" banner stays up on Google Play Console despite updates/modifications submitted : <p>Upon learning that the v1 of my app was rejected for metadata issues, I had updated the metadata and also submitted a new version of the app via release management. This was completed by hitting the "Resubmit App" button.</p>
<p>However, after doing both, I still see the banner staying up on the top of the Play Console:</p>
<p><a href="https://i.stack.imgur.com/8Vdz2.png" rel="noreferrer"><img src="https://i.stack.imgur.com/8Vdz2.png" alt="enter image description here"></a></p>
<p>Additionally, towards the bottom, the "Resubmit App" button is greyed out with the "Processing Update" arrow frozen: </p>
<p><a href="https://i.stack.imgur.com/7IoNx.png" rel="noreferrer"><img src="https://i.stack.imgur.com/7IoNx.png" alt="enter image description here"></a></p>
<p>Have I missed a step, or is this the normal process with the Google Play Console?</p>
<p>Thanks!</p>
| 0debug |
Pandas pivot_table, sort values by columns : <p>I am a new user to Pandas and I love it!</p>
<p>I am trying to create a pivot table in Pandas. Once I have pivot table the way I want, I would like to rank the values by the columns.</p>
<p>I've attached an image from Excel as it is easier to see in tabular format what I am trying to achieve. <a href="https://i.stack.imgur.com/92iOQ.png" rel="noreferrer">Link to image</a></p>
<p>I've searched through stackoverflow but am having trouble finding an answer. I tried using .sort() but this doesn't work. Any help will be appreciated.</p>
<p>Thanks in advance</p>
| 0debug |
How can I cache external URLs using service worker? : <p>I've been playing with the Google Web Starter Kit (<a href="https://github.com/google/web-starter-kit" rel="noreferrer">https://github.com/google/web-starter-kit</a>) and have got a little progressive web app working but am stuck on one thing: caching static files from external CDNs. E.g. I'm using MDL icons from <a href="https://fonts.googleapis.com/icon?family=Material+Icons" rel="noreferrer">https://fonts.googleapis.com/icon?family=Material+Icons</a> I can't see a way to cache the request as the service worker only responds to URLs within my app domain.</p>
<p>Options I see:
1. Download the file and put it in a vendor folder. Advantages: easy to set up SW cache. Disadvantages: file won't stay up to date as new icons are added (though that won't really matter as my code will only use the icons available).</p>
<ol start="2">
<li><p>Use the NPM repo: <a href="https://www.npmjs.com/package/material-design-icons" rel="noreferrer">https://www.npmjs.com/package/material-design-icons</a> and use build step to copy CSS file from node_modules. Advantages: will allow auto-updating from NPM. Disadvantages: slightly more complex to set up.</p></li>
<li><p>Some fancy proxy method that would allow me to use the SW to cache an external URL. e.g. myapp.com/loadExternal?url=<a href="https://fonts.googleapis.com/icon?family=Material+Icons" rel="noreferrer">https://fonts.googleapis.com/icon?family=Material+Icons</a></p></li>
</ol>
<p>I'm leaning towards 2 right now but would be cool to know if 3 is possible.</p>
| 0debug |
Objective C close keyboard when return key is pressed : <p>I have a button that when pressed brings up a keyboard for a textfield:</p>
<pre><code>- (IBAction)textButtonPress:(id)sender {
[self.textField becomeFirstResponder];
}
</code></pre>
<p>The problem I have is that when I press the return button on the keyboard nothing happens. How can I make the keyboard automatically close when the return key is pressed?</p>
| 0debug |
How to call Linux commands through WSL in Windows command line? : <p>I would like to use <code>ls</code>, and many other Linux tools, from the command line on Windows 10. I know that there is GnuWin32 and other binary replacements, but none of those works as elegantly as the Windows Subsystem for Linux (WSL); including the update mechanisms.</p>
<p>With WSL, one can call <code>ls ...</code> from the command line via <code>bash -c "ls ..."</code>, which works great. Ideally, however, I would just type <code>ls</code> on the command line. I can create an <code>ls.bat</code> which basically does <code>@bash -c "ls %*"</code> - but I would have to do that for every single command I want to export. Is there a more direct way of exporting <code>ls</code> (and many others) to the command line?</p>
| 0debug |
Using DateTime.TryParseExact C# : <p>I dont know how TryParseExact method works in a sample date format:
This is the format: (<strong>Beginning: 2019.06.30. 14:56:43</strong>)
And how to tell to TryParseExact this format?</p>
| 0debug |
Showing a Due Date : I'M Using the Excel App on an android phone.
Trying to set up a budget sheet that shows my currently applicable due date.
The bill is due on the 1st of the month.
I would like it to update automatically. For example if it shows that today is Nov 2nd the due date should say Dec 1st.
I'm fairly new at excel... | 0debug |
static void xhci_kick_epctx(XHCIEPContext *epctx, unsigned int streamid)
{
XHCIState *xhci = epctx->xhci;
XHCIStreamContext *stctx = NULL;
XHCITransfer *xfer;
XHCIRing *ring;
USBEndpoint *ep = NULL;
uint64_t mfindex;
unsigned int count = 0;
int length;
int i;
trace_usb_xhci_ep_kick(epctx->slotid, epctx->epid, streamid);
assert(!epctx->kick_active);
if (!xhci->slots[epctx->slotid - 1].uport ||
!xhci->slots[epctx->slotid - 1].uport->dev ||
!xhci->slots[epctx->slotid - 1].uport->dev->attached) {
return;
}
if (epctx->retry) {
XHCITransfer *xfer = epctx->retry;
trace_usb_xhci_xfer_retry(xfer);
assert(xfer->running_retry);
if (xfer->timed_xfer) {
mfindex = xhci_mfindex_get(xhci);
xhci_check_intr_iso_kick(xhci, xfer, epctx, mfindex);
if (xfer->running_retry) {
return;
}
xfer->timed_xfer = 0;
xfer->running_retry = 1;
}
if (xfer->iso_xfer) {
if (xhci_setup_packet(xfer) < 0) {
return;
}
usb_handle_packet(xfer->packet.ep->dev, &xfer->packet);
assert(xfer->packet.status != USB_RET_NAK);
xhci_try_complete_packet(xfer);
} else {
if (xhci_setup_packet(xfer) < 0) {
return;
}
usb_handle_packet(xfer->packet.ep->dev, &xfer->packet);
if (xfer->packet.status == USB_RET_NAK) {
return;
}
xhci_try_complete_packet(xfer);
}
assert(!xfer->running_retry);
if (xfer->complete) {
xhci_ep_free_xfer(epctx->retry);
}
epctx->retry = NULL;
}
if (epctx->state == EP_HALTED) {
DPRINTF("xhci: ep halted, not running schedule\n");
return;
}
if (epctx->nr_pstreams) {
uint32_t err;
stctx = xhci_find_stream(epctx, streamid, &err);
if (stctx == NULL) {
return;
}
ring = &stctx->ring;
xhci_set_ep_state(xhci, epctx, stctx, EP_RUNNING);
} else {
ring = &epctx->ring;
streamid = 0;
xhci_set_ep_state(xhci, epctx, NULL, EP_RUNNING);
}
assert(ring->dequeue != 0);
epctx->kick_active++;
while (1) {
length = xhci_ring_chain_length(xhci, ring);
if (length <= 0) {
break;
}
xfer = xhci_ep_alloc_xfer(epctx, length);
if (xfer == NULL) {
break;
}
for (i = 0; i < length; i++) {
TRBType type;
type = xhci_ring_fetch(xhci, ring, &xfer->trbs[i], NULL);
assert(type);
}
xfer->streamid = streamid;
if (epctx->epid == 1) {
xhci_fire_ctl_transfer(xhci, xfer);
} else {
xhci_fire_transfer(xhci, xfer, epctx);
}
if (xfer->complete) {
xhci_ep_free_xfer(xfer);
xfer = NULL;
}
if (epctx->state == EP_HALTED) {
break;
}
if (xfer != NULL && xfer->running_retry) {
DPRINTF("xhci: xfer nacked, stopping schedule\n");
epctx->retry = xfer;
break;
}
if (count++ > TRANSFER_LIMIT) {
trace_usb_xhci_enforced_limit("transfers");
break;
}
}
xhci_set_ep_state(xhci, epctx, stctx, epctx->state);
epctx->kick_active--;
ep = xhci_epid_to_usbep(epctx);
if (ep) {
usb_device_flush_ep_queue(ep->dev, ep);
}
}
| 1threat |
TypeScript and ReactDOM.render method doesn't accept component : <h2>TL;DR</h2>
<p>I'm using TypeScript and React. I've defined my <code>AppContainer.tsx</code> component, exported it as default. I'm consuming this in the file <code>app.ts</code> where <code>ReactDOM</code> lives to render it to the targetted dom element. But there I receive the following errors (see image). <strong>Read below for more information and links to GitHub repo.</strong></p>
<p><a href="https://i.stack.imgur.com/qPHxN.jpg" rel="noreferrer"><img src="https://i.stack.imgur.com/qPHxN.jpg" alt="enter image description here"></a></p>
<p><strong>Question:</strong> What am I doing, or interpreting, wrong? From all code examples I've seen this should work - but maybe (clearly) I'm missing something. <strong>Below is more info and links to the full GitHub repo</strong>.</p>
<hr>
<h2>Environment</h2>
<ul>
<li>react 15.4.2</li>
<li>react-dom 15.4.2</li>
<li>typings: <a href="https://github.com/aredfox/electron-starter/blob/master/typings.json" rel="noreferrer">https://github.com/aredfox/electron-starter/blob/master/typings.json</a></li>
<li>tsconfig: <a href="https://github.com/aredfox/electron-starter/blob/master/tsconfig.json" rel="noreferrer">https://github.com/aredfox/electron-starter/blob/master/tsconfig.json</a></li>
</ul>
<h2>Code</h2>
<h3>File '/components/AppContainer.tsx'</h3>
<pre><code>/// <reference path="../../../typings/index.d.ts" />
// Top level application component
/*------------------------------------------------------------------------------------*/
/** IMPORTS **/
import * as React from 'react';
import { Component } from 'react';
/*------------------------------------------------------------------------------------*/
/*///*/
/*------------------------------------------------------------------------------------*/
/** COMPONENT **/
export default class AppContainer extends React.Component<{}, {}> {
render() {
return ( <div /> );
}
}
/*------------------------------------------------------------------------------------*/
/*///*/
</code></pre>
<p><a href="https://github.com/aredfox/electron-starter/blob/master/src/views/components/AppContainer.tsx" rel="noreferrer">https://github.com/aredfox/electron-starter/blob/master/src/views/components/AppContainer.tsx</a></p>
<h3>File 'app.ts'</h3>
<pre><code>/// <reference path="../../typings/index.d.ts" />
/// <reference path="interfaces.d.ts" />
// Setting up react inside the host html
/*------------------------------------------------------------------------------------*/
/** IMPORTS **/
import * as React from 'react';
import * as ReactDOM from 'react-dom';
// Components
import AppContainer from './components/AppContainer';
/*------------------------------------------------------------------------------------*/
/*///*/
/*------------------------------------------------------------------------------------*/
/** RENDER TO DOM **/
ReactDOM.render(
<AppContainer/>,
document.getElementById('AppContainer')
);
/*------------------------------------------------------------------------------------*/
/*///*/
</code></pre>
<p><a href="https://github.com/aredfox/electron-starter/blob/master/src/views/app.ts" rel="noreferrer">https://github.com/aredfox/electron-starter/blob/master/src/views/app.ts</a></p>
<h2>Quick Links</h2>
<ul>
<li>Git repo: <a href="https://github.com/aredfox/electron-starter" rel="noreferrer">https://github.com/aredfox/electron-starter</a>
<ul>
<li><code>app.ts</code> file <a href="https://github.com/aredfox/electron-starter/blob/master/src/views/app.ts" rel="noreferrer">https://github.com/aredfox/electron-starter/blob/master/src/views/app.ts</a></li>
<li><code>AppContainer.tsx</code> file <a href="https://github.com/aredfox/electron-starter/blob/master/src/views/components/AppContainer.tsx" rel="noreferrer">https://github.com/aredfox/electron-starter/blob/master/src/views/components/AppContainer.tsx</a></li>
</ul></li>
</ul>
| 0debug |
'why i'm getting error when i used my print function print(list(my_iter)) in below code? : **[when im using print(list(my_iter)) in code before printing print(my_iter.__next__()) it is throwing error.bu][1]**
[enter image description here][2]
but if I comment it. it running fine. why it is happening
[1]: https://i.stack.imgur.com/kCoVr.png
[2]: https://i.stack.imgur.com/9a0yQ.pngt | 0debug |
How to use Nix to setup a development environment? : <p>Let's say I need PostgreSQL 9.6.3 and Ruby 2.3.1 and various other tools. I can't find a tutorial that explains what I need to do.</p>
<p>From the Nix manual, I seem to need to write a <a href="https://nixos.org/nix/manual/#ch-simple-expression" rel="noreferrer">Nix expression</a> to install the needed dependencies, but I can't make the leap from:</p>
<pre><code>{ stdenv, fetchurl, perl }:
stdenv.mkDerivation {
name = "hello-2.1.1";
builder = ./builder.sh;
src = fetchurl {
url = ftp://ftp.nluug.nl/pub/gnu/hello/hello-2.1.1.tar.gz;
md5 = "70c9ccf9fac07f762c24f2df2290784d";
};
inherit perl;
}
</code></pre>
<p>to the expression that will install the proper PostgreSQL and Ruby versions. It is absolutely unclear to me where to even put the file that installs PostgreSQL and Ruby, or how to run a single file in a given directory.</p>
<p>Can someone provide pointers to such tutorials, or point me in the right direction?</p>
| 0debug |
static int aac_decode_frame_int(AVCodecContext *avctx, void *data,
int *got_frame_ptr, GetBitContext *gb)
{
AACContext *ac = avctx->priv_data;
ChannelElement *che = NULL, *che_prev = NULL;
enum RawDataBlockType elem_type, elem_type_prev = TYPE_END;
int err, elem_id;
int samples = 0, multiplier, audio_found = 0, pce_found = 0;
if (show_bits(gb, 12) == 0xfff) {
if (parse_adts_frame_header(ac, gb) < 0) {
av_log(avctx, AV_LOG_ERROR, "Error decoding AAC frame header.\n");
err = -1;
goto fail;
}
if (ac->oc[1].m4ac.sampling_index > 12) {
av_log(ac->avctx, AV_LOG_ERROR, "invalid sampling rate index %d\n", ac->oc[1].m4ac.sampling_index);
err = -1;
goto fail;
}
}
ac->tags_mapped = 0;
while ((elem_type = get_bits(gb, 3)) != TYPE_END) {
elem_id = get_bits(gb, 4);
if (elem_type < TYPE_DSE) {
if (!(che=get_che(ac, elem_type, elem_id))) {
av_log(ac->avctx, AV_LOG_ERROR, "channel element %d.%d is not allocated\n",
elem_type, elem_id);
err = -1;
goto fail;
}
samples = 1024;
}
switch (elem_type) {
case TYPE_SCE:
err = decode_ics(ac, &che->ch[0], gb, 0, 0);
audio_found = 1;
break;
case TYPE_CPE:
err = decode_cpe(ac, gb, che);
audio_found = 1;
break;
case TYPE_CCE:
err = decode_cce(ac, gb, che);
break;
case TYPE_LFE:
err = decode_ics(ac, &che->ch[0], gb, 0, 0);
audio_found = 1;
break;
case TYPE_DSE:
err = skip_data_stream_element(ac, gb);
break;
case TYPE_PCE: {
uint8_t layout_map[MAX_ELEM_ID*4][3];
int tags;
push_output_configuration(ac);
tags = decode_pce(avctx, &ac->oc[1].m4ac, layout_map, gb);
if (tags < 0) {
err = tags;
break;
}
if (pce_found) {
av_log(avctx, AV_LOG_ERROR,
"Not evaluating a further program_config_element as this construct is dubious at best.\n");
pop_output_configuration(ac);
} else {
err = output_configure(ac, layout_map, tags, 0, OC_TRIAL_PCE);
if (!err)
ac->oc[1].m4ac.chan_config = 0;
pce_found = 1;
}
break;
}
case TYPE_FIL:
if (elem_id == 15)
elem_id += get_bits(gb, 8) - 1;
if (get_bits_left(gb) < 8 * elem_id) {
av_log(avctx, AV_LOG_ERROR, overread_err);
err = -1;
goto fail;
}
while (elem_id > 0)
elem_id -= decode_extension_payload(ac, gb, elem_id, che_prev, elem_type_prev);
err = 0;
break;
default:
err = -1;
break;
}
che_prev = che;
elem_type_prev = elem_type;
if (err)
goto fail;
if (get_bits_left(gb) < 3) {
av_log(avctx, AV_LOG_ERROR, overread_err);
err = -1;
goto fail;
}
}
spectral_to_sample(ac);
multiplier = (ac->oc[1].m4ac.sbr == 1) ? ac->oc[1].m4ac.ext_sample_rate > ac->oc[1].m4ac.sample_rate : 0;
samples <<= multiplier;
if (samples) {
ac->frame.nb_samples = samples;
if ((err = avctx->get_buffer(avctx, &ac->frame)) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
err = -1;
goto fail;
}
if (avctx->sample_fmt == AV_SAMPLE_FMT_FLT)
ac->fmt_conv.float_interleave((float *)ac->frame.data[0],
(const float **)ac->output_data,
samples, avctx->channels);
else
ac->fmt_conv.float_to_int16_interleave((int16_t *)ac->frame.data[0],
(const float **)ac->output_data,
samples, avctx->channels);
*(AVFrame *)data = ac->frame;
}
*got_frame_ptr = !!samples;
if (ac->oc[1].status && audio_found) {
avctx->sample_rate = ac->oc[1].m4ac.sample_rate << multiplier;
avctx->frame_size = samples;
ac->oc[1].status = OC_LOCKED;
}
return 0;
fail:
pop_output_configuration(ac);
return err;
}
| 1threat |
in Python, How I represent like this? :
I make this:
print('number id {0:1.3}.{0:1.3}.{0:1.3}-{0:1.3}'.format("12345678910"))
result : number id 123.123.123-123
how I make correct? like this:
number id 123.456.789-10
| 0debug |
static void spapr_machine_2_5_class_options(MachineClass *mc)
{
sPAPRMachineClass *smc = SPAPR_MACHINE_CLASS(mc);
mc->alias = "pseries";
mc->is_default = 1;
smc->dr_lmb_enabled = true;
}
| 1threat |
static int common_end(AVCodecContext *avctx){
FFV1Context *s = avctx->priv_data;
int i;
for(i=0; i<s->plane_count; i++){
PlaneContext *p= &s->plane[i];
av_freep(&p->state);
}
return 0;
} | 1threat |
static inline void RENAME(BEToUV)(uint8_t *dstU, uint8_t *dstV, const uint8_t *src1, const uint8_t *src2, int width, uint32_t *unused)
{
#if COMPILE_TEMPLATE_MMX
__asm__ volatile(
"movq "MANGLE(bm01010101)", %%mm4 \n\t"
"mov %0, %%"REG_a" \n\t"
"1: \n\t"
"movq (%1, %%"REG_a",2), %%mm0 \n\t"
"movq 8(%1, %%"REG_a",2), %%mm1 \n\t"
"movq (%2, %%"REG_a",2), %%mm2 \n\t"
"movq 8(%2, %%"REG_a",2), %%mm3 \n\t"
"pand %%mm4, %%mm0 \n\t"
"pand %%mm4, %%mm1 \n\t"
"pand %%mm4, %%mm2 \n\t"
"pand %%mm4, %%mm3 \n\t"
"packuswb %%mm1, %%mm0 \n\t"
"packuswb %%mm3, %%mm2 \n\t"
"movq %%mm0, (%3, %%"REG_a") \n\t"
"movq %%mm2, (%4, %%"REG_a") \n\t"
"add $8, %%"REG_a" \n\t"
" js 1b \n\t"
: : "g" ((x86_reg)-width), "r" (src1+width*2), "r" (src2+width*2), "r" (dstU+width), "r" (dstV+width)
: "%"REG_a
);
#else
int i;
for (i=0; i<width; i++) {
dstU[i]= src1[2*i];
dstV[i]= src2[2*i];
}
#endif
}
| 1threat |
If someone knows your SHA-1 certificate fingerprint: : i'm trying to obtain SHA-1 certificate fingerprint with the command "keytool -list -v -keystore "%USERPROFILE%\.android\debug.keystore" -alias androiddebugkey -storepass android -keypass android"
and clicked in jdk folder and opened windows power shell but returned this error to me..."keytool : The term 'keytool' is not recognized as the name of a cmdlet, function, script file, or operable program. Check the
spelling of the name, or if a path was included, verify that the path is correct and try again.
At line:1 char:1
+ keytool -list -v -keystore "%USERPROFILE%\.android\debug.keystore" -a ...
+ ~~~~~~~
+ CategoryInfo : ObjectNotFound: (keytool:String) [], CommandNotFoundException
+ FullyQualifiedErrorId : CommandNotFoundException"
I think this error occur because of I didn't set path in System Environment Variables ... I gone to it but in variables path not exist to edit....
I use windows 10 ver.1709 build16299.371 | 0debug |
bool qemu_clock_use_for_deadline(QEMUClockType type)
{
return !(use_icount && (type == QEMU_CLOCK_VIRTUAL));
}
| 1threat |
static int ea_probe(AVProbeData *p)
{
if (p->buf_size < 4)
return 0;
if (AV_RL32(&p->buf[0]) != SCHl_TAG)
return 0;
return AVPROBE_SCORE_MAX;
}
| 1threat |
static void balloon_page(void *addr, int deflate)
{
#if defined(__linux__)
if (!kvm_enabled() || kvm_has_sync_mmu())
madvise(addr, TARGET_PAGE_SIZE,
deflate ? MADV_WILLNEED : MADV_DONTNEED);
#endif
}
| 1threat |
static int nbd_co_send_request(NbdClientSession *s,
struct nbd_request *request,
QEMUIOVector *qiov, int offset)
{
AioContext *aio_context;
int rc, ret;
qemu_co_mutex_lock(&s->send_mutex);
s->send_coroutine = qemu_coroutine_self();
aio_context = bdrv_get_aio_context(s->bs);
aio_set_fd_handler(aio_context, s->sock,
nbd_reply_ready, nbd_restart_write, s);
if (qiov) {
if (!s->is_unix) {
socket_set_cork(s->sock, 1);
}
rc = nbd_send_request(s->sock, request);
if (rc >= 0) {
ret = qemu_co_sendv(s->sock, qiov->iov, qiov->niov,
offset, request->len);
if (ret != request->len) {
rc = -EIO;
}
}
if (!s->is_unix) {
socket_set_cork(s->sock, 0);
}
} else {
rc = nbd_send_request(s->sock, request);
}
aio_set_fd_handler(aio_context, s->sock, nbd_reply_ready, NULL, s);
s->send_coroutine = NULL;
qemu_co_mutex_unlock(&s->send_mutex);
return rc;
}
| 1threat |
Complex SQL Query or querries : I looked at other examples, but I don't know enough about SQL to adapt it to my needs. I have a table that looks like this:
ID Month NAME COUNT First LAST TOTAL
1 JAN2013 fred 4
2 MAR2013 fred 5
3 APR2014 fred 1
4 JAN2013 Tom 6
5 MAR2014 Tom 1
6 APR2014 Tom 1
This could be in separate queries, but I need 'First' to equal the first month that a particular name is used, so every row with fred would have JAN2013 in the first field for example. I need the 'Last" column to equal the month of the last record of each name, and finally i need the 'total' column to be the sum of all the counts for each name, so in each row that had fred the total would be 10 in this sample data. This is over my head. Can one of you assist?
| 0debug |
Stuck in implementing a method for mapping elements to a range : Let there be an array `b = [1,3,2,6,1]` conatining `N = 5` integer valued elements with probability of occurence of each unique integer as `0.4, 0.2, 0.2, 0.2` respectively. The array `b` can take any integers from the unique symbol set `0,1,2,3,4,5,6,7`. Let `n = 8` elements in the symbol set. In essence, the probability for the above data `b` is
`p= [ 0.4 0.2 0 0.2 0 0 0.2 0]`
An interval `[0,1]` is split into 8 regions. Let, the interval for the data `b` assumed to be known as
` Interval_b = [0, 0.4, 0.6, 0.8, 1];`
In general, for `n = 8` unique symbols, there are `n = 8` intervals such as `I_1, I_2, I_3, I_4, I_5, I_6, I_6,I_7,I_8` and each of these intervals is assigned a symbol such as `[ 1 2 3 4 5 6 7 8]`
Let, `x = 0.2848`. There is a mapping rule which maps `x` to the symbol depending on the interval in which `x` lies and we should obtain the same symbol elements as in `b`. The rule is
if x in I_1, assign symbol = 1 and y= x/p(1);
if x lies in I_2, assign symbol = 2 and y = (x-p(1))./p(2);
if x lies in I_3 , assign symbol = 3 y = (x-(p(1)+p(2)))./p(3);;
if x lies in I_4 , assign symbol = 4 y = (x-(p(1)+p(2)+p(3)))./p(4);;
if x lies in I_5 , assign symbol = 5 y = (x-(p(1)+p(2)+p(3)+p(4)))./p(5);
if x lies in I_6 , assign symbol = 6 y = (x-(p(1)+p(2)+p(3)+p(4)+p(5)))./p(6);
if x lies in I_7, assign symbol = 7 and y = (x-(p(1)+p(2)+p(3)+p(4)+p(5)+p(6)))./p(7)
where `y` is basically the next value of `x`. In this way, I will get an array of floating point numbers `x` and `symbols = b`.
I need to map the elements in `x` to these symbols using the intervals and obtain the value `y`. Theoretically, the value of `y` should be ` y = 0.2848 /0.4 = 0.7120` and the symbol = 1 since `x` is in `I_1` . Then, based on this `y` value obtained, I find the next symbol. In this way I should get `symbol = b = [1,3,2,6,1]`.
But, in my implementation I am getting infinity value for `y` and all incorrect results for symbols. I am unable to understand where I am going wrong. Please help.
clear all
N = 5;
b = [1,3,2,6,1];
[uniqueSym,~,idxUnq]=unique(b);
pp = hist(b , uniqueSym);
p = pp/sum(pp);
Interval = cumsum([0 p]);
p_1 = sum(b==1)/length(b);
p_2 = sum(b==2)/length(b);
p_3 = sum(b==3)/length(b);
p_4 = sum(b==4)/length(b);
p_5 = sum(b==5)/length(b);
p_6 = sum(b==6)/length(b);
p_7 = sum(b==7)/length(b);
p_8 = sum(b==8)/length(b);
p_arr = [p_1,p_2,p_3,p_4,p_5,p_6,p_7,p_8];
x(1) = 0.2848;
[~,symbols(1)] = ObtainSymbols(x(1),p_arr,Interval);
for k = 1:N
[y,sym] = ObtainSymbols(x(k),p_arr,Interval);
x(k+1) = y;
symbols(k+1) = sym;
end
function [y,sym] = ObtainSymbols(x,p,Interval)
if (double(x)>=Interval(1)) && (double(x)<Interval(2)) %interval I1
y= x/p(1);
sym = 1;
elseif (double(x)>=Interval(2)) && (double(x)<Interval(3)) %interval I2
y = (x-p(1))./p(2);
sym = 2;
elseif (double(x)>=Interval(3)) && (double(x)<Interval(4)) %interval I3
y = (x-(p(1)+p(2)))./p(3);
sym = 3;
elseif (double(x)>=Interval(4)) && (double(x)<Interval(5)) %interval I4
y = (x-(p(1)+p(2)+p(3)))./p(4);
sym = 4;
elseif (double(x)>=Interval(5)) && (double(x)<Interval(6)) %interval I5
y = (x-(p(1)+p(2)+p(3)+p(4)))./p(5);
sym = 5;
elseif (double(x)>=Interval(6)) && (double(x)<Interval(7)) %interval I6
y = (x-(p(1)+p(2)+p(3)+p(4)+p(5)))./p(6);
sym = 6;%interval I6
else y = (x-(p(1)+p(2)+p(3)+p(4)+p(5)+p(6)))./p(7);
symb = 7;
end
| 0debug |
static void xilinx_enet_init(Object *obj)
{
XilinxAXIEnet *s = XILINX_AXI_ENET(obj);
SysBusDevice *sbd = SYS_BUS_DEVICE(obj);
object_property_add_link(obj, "axistream-connected", TYPE_STREAM_SLAVE,
(Object **) &s->tx_data_dev, &error_abort);
object_property_add_link(obj, "axistream-control-connected",
TYPE_STREAM_SLAVE,
(Object **) &s->tx_control_dev, &error_abort);
object_initialize(&s->rx_data_dev, sizeof(s->rx_data_dev),
TYPE_XILINX_AXI_ENET_DATA_STREAM);
object_initialize(&s->rx_control_dev, sizeof(s->rx_control_dev),
TYPE_XILINX_AXI_ENET_CONTROL_STREAM);
object_property_add_child(OBJECT(s), "axistream-connected-target",
(Object *)&s->rx_data_dev, &error_abort);
object_property_add_child(OBJECT(s), "axistream-control-connected-target",
(Object *)&s->rx_control_dev, &error_abort);
sysbus_init_irq(sbd, &s->irq);
memory_region_init_io(&s->iomem, OBJECT(s), &enet_ops, s, "enet", 0x40000);
sysbus_init_mmio(sbd, &s->iomem);
}
| 1threat |
def extract_nth_element(list1, n):
result = [x[n] for x in list1]
return result | 0debug |
IOS locked screen notification with three buttons : Is it possible to get notified with three action yes or no or cancel in iphone when the screen is locked | 0debug |
How to get an array values in the dropdown in perl cgi html template : Please suggest how to get array values in the dropdown list using html template.
open (FL, "<file.txt");
file.txt values are
count1
count2
count3
count4
count5
my @TOTAL = <FL>;
foreach $count(@TOTAL)
{
$template->param( COUNT => [{name => $count}]); # here I am getting only one value (count1 only)
}
I am expecting the values like below , so the dropdown will list all the values.
$template->param(COUNT => [{name => $count1}, {name => $count2}, {name => $count3}, {name => $count4}]);
print $template->output, "\n"; | 0debug |
flexbox justify-self: flex-end not working? : <p>I have a layout where <em>sometimes</em> the 'left' item is missing. In such cases, I still want the 'right' item to be right-aligned.</p>
<p>I thought I could do this with <code>justify-self</code> but that doesn't appear to be the case.</p>
<p>Is there a flexbox property for right-aligning one's self?</p>
<p><div class="snippet" data-lang="js" data-hide="false" data-console="true" data-babel="false">
<div class="snippet-code">
<pre class="snippet-code-css lang-css prettyprint-override"><code>.row {
border: 1px solid black;
display: flex;
justify-content: space-between;
align-items: center;
}
.left,
.right {
display: inline-block;
background-color: yellow;
}
.right {
justify-self: flex-end;
}</code></pre>
<pre class="snippet-code-html lang-html prettyprint-override"><code><div class="row">
<!--<div class="left">left</div>-->
<div class="right">right</div>
</div></code></pre>
</div>
</div>
</p>
| 0debug |
Javafx, how to bound 2 functions of one button : Hellow guys, today i come across another problem.
Here is a part of my code. I've got there 2 functions which i want to be assigned to the one Button (Log In), how can i do that ?
@FXML
private void fireLogIn()
{
LogInButton.setOnKeyPressed(event -> {
if(event.getCode() == KeyCode.ENTER){
LogIn(event); // <--- there is an error of wrong type of data
}
});
}
@FXML
private void LogIn(ActionEvent event) throws IOException {
if(LoginField.getText().equals("MKARK")&&PasswdField.getText().equals("KACZOR1"))
{
Parent parent = FXMLLoader.load(getClass().getResource("/fxmlFiles/MainScreen.fxml"));
Scene MainScene = new Scene(parent);
Stage stage = (Stage) ((Node) event.getSource()).getScene().getWindow();
stage.setScene(MainScene);
stage.show();
}
else
{
IncorrectDataLink.setVisible(true);
IncorrectDataLink.setOnAction(e-> openWebpage(uri));
}
}
I want to provide both facilities, pressing the button by mouse or pressing an "ENTER"" botton whenever it's focused on it :(. | 0debug |
static void vmd_decode(VmdVideoContext *s)
{
int i;
unsigned int *palette32;
unsigned char r, g, b;
unsigned char *p = s->buf + 16;
unsigned char *pb;
unsigned char meth;
unsigned char *dp;
unsigned char *pp;
unsigned char len;
int ofs;
int frame_x, frame_y;
int frame_width, frame_height;
frame_x = LE_16(&s->buf[6]);
frame_y = LE_16(&s->buf[8]);
frame_width = LE_16(&s->buf[10]) - frame_x + 1;
frame_height = LE_16(&s->buf[12]) - frame_y + 1;
if (frame_x || frame_y || (frame_width != s->avctx->width) ||
(frame_height != s->avctx->height)) {
memcpy(s->frame.data[0], s->prev_frame.data[0],
s->avctx->height * s->frame.linesize[0]);
}
if (s->buf[15] & 0x02) {
p += 2;
palette32 = (unsigned int *)s->palette;
for (i = 0; i < PALETTE_COUNT; i++) {
r = *p++ * 4;
g = *p++ * 4;
b = *p++ * 4;
palette32[i] = (r << 16) | (g << 8) | (b);
}
s->size -= (256 * 3 + 2);
}
if (s->size >= 0) {
pb = p;
meth = *pb++;
if (meth & 0x80) {
lz_unpack(pb, s->unpack_buffer);
meth &= 0x7F;
pb = s->unpack_buffer;
}
dp = &s->frame.data[0][frame_y * s->frame.linesize[0] + frame_x];
pp = &s->prev_frame.data[0][frame_y * s->prev_frame.linesize[0] + frame_x];
switch (meth) {
case 1:
for (i = 0; i < frame_height; i++) {
ofs = 0;
do {
len = *pb++;
if (len & 0x80) {
len = (len & 0x7F) + 1;
memcpy(&dp[ofs], pb, len);
pb += len;
ofs += len;
} else {
memcpy(&dp[ofs], &pp[ofs], len + 1);
ofs += len + 1;
}
} while (ofs < frame_width);
if (ofs > frame_width) {
av_log(s->avctx, AV_LOG_ERROR, "VMD video: offset > width (%d > %d)\n",
ofs, frame_width);
break;
}
dp += s->frame.linesize[0];
pp += s->prev_frame.linesize[0];
}
break;
case 2:
for (i = 0; i < frame_height; i++) {
memcpy(dp, pb, frame_width);
pb += frame_width;
dp += s->frame.linesize[0];
pp += s->prev_frame.linesize[0];
}
break;
case 3:
for (i = 0; i < frame_height; i++) {
ofs = 0;
do {
len = *pb++;
if (len & 0x80) {
len = (len & 0x7F) + 1;
if (*pb++ == 0xFF)
len = rle_unpack(pb, &dp[ofs], len);
else
memcpy(&dp[ofs], pb, len);
pb += len;
ofs += len;
} else {
memcpy(&dp[ofs], &pp[ofs], len + 1);
ofs += len + 1;
}
} while (ofs < frame_width);
if (ofs > frame_width) {
av_log(s->avctx, AV_LOG_ERROR, "VMD video: offset > width (%d > %d)\n",
ofs, frame_width);
}
dp += s->frame.linesize[0];
pp += s->prev_frame.linesize[0];
}
break;
}
}
}
| 1threat |
GitHub access token with read-only access to private repositories : <p>I have a project with a Node dependency on a private Git repository. I need to be able to run <code>npm install</code> without being prompted to enter a password or allow an SSH connection, so I'm using an access token that I created on GitHub in my package.json:</p>
<pre><code> "dependencies": {
"sass-theme": "git+https://[token]:x-oauth-basic@github.com/MyOrg/sass-theme.git#v1.0.2",
"node-sass": "^4.5.0"
}
</code></pre>
<p>This project is shared with dozens of other people, so obviously I don't want to keep my token in source control. I know I can create a read-only deployment key on GitHub, but I believe that would require other developers to import the SSH key to build the project locally.</p>
<p>Is it possible to create an access token that can be shared but that has read-only access to clone the repository?</p>
| 0debug |
static void gen_wsr_prid(DisasContext *dc, uint32_t sr, TCGv_i32 v)
{
}
| 1threat |
Limit lines in a textarea? : <p>I've got a form that people can fill out and the results of the form are displayed on a page. The form is limited to 2000 characters, however there's nothing limiting how many line breaks there can be, so someone could just put one character per line and get 2000 line breaks, which clutters up the page pretty badly. I'm looking to limit the number of lines that they can type into the textarea so the limits are 2000 characters or 40 lines, whichever comes first.</p>
<p>I would also need this to be protective of people pasting large amounts of text into the form as well. I looked at a lot of other posts about this but none of the answers worked when I tried them. Thanks!</p>
| 0debug |
How can i get fixed order for all order by clause : DECLARE @Table TABLE (Col1 INT, col2 NVARCHAR(24))
INSERT INTO @Table
SELECT 1,'a' UNION ALL
SELECT 2,'c' UNION ALL
SELECT 2,'f' UNION ALL
SELECT 4,'a' UNION ALL
SELECT 10,'a'
SELECT *,ROW_NUMBER () OVER (ORDER BY Col1) AS [Order] FROM @Table ORDER BY col1
Like the title, can i get my [Order] column to have fixed result that not depend on my order by clause that will always return 1,2,3,4,5.... however i order? | 0debug |
How can I manipulate text in bash? : <p>How can I manipulate text in bash using either awk, grep, perl or sed? </p>
<p>Input:</p>
<pre><code>ted foo,bar,zoo
john ket,ben
</code></pre>
<p>Expected Output:</p>
<pre><code>foo,ted
bar,ted
zoo,ted
ket,john
ben,john
</code></pre>
| 0debug |
/bin/sh: apt-get: not found : <p>I am trying to change a dockerFile to work with aspell. I have a bash script that i want to wrap in a dock</p>
<pre><code>Step 4: Wrap the script in a Docker container.
The sample SDK we downloaded earlier contains an example of an action wrapped in a Docker container. In particular, the sample SDK includes a Dockerfile that builds the C program in client/example.c and installs the binary as /blackbox/client/action .
The key line in the sample Dockerfile is:
RUN cd /blackbox/client; gcc -o action example.c
Instead of compiling example.c and installing the binary as an action, we’ll change the Dockerfile to install aspell into the Linux environment, and then install our action.sh script as the executable action command.
To do so, we delete the RUN command above, and insert the following commands into the Dockerfile:
RUN apt-get install -y aspell
RUN rm -f /blackbox/client/action
ADD action.sh /blackbox/client/action
</code></pre>
<p>i am trying to do this on the dockerfile below</p>
<pre><code># Dockerfile for example whisk docker action
FROM openwhisk/dockerskeleton
ENV FLASK_PROXY_PORT 8080
### Add source file(s)
ADD example.c /action/example.c
RUN sudo apt-get install -y aspell
RUN rm -f /blackbox/client/action
ADD action.sh /blackbox/client/action
CMD ["/home/huseyin/bin", "-c", "cd actionProxy && python -u actionproxy.py"]
</code></pre>
<p>the tutorial is outdated so i can't succeed doing it. Can you help me?</p>
| 0debug |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.