problem
stringlengths 26
131k
| labels
class label 2
classes |
|---|---|
static int hdev_open(BlockDriverState *bs, const char *filename, int flags)
{
BDRVRawState *s = bs->opaque;
#if defined(__APPLE__) && defined(__MACH__)
if (strstart(filename, "/dev/cdrom", NULL)) {
kern_return_t kernResult;
io_iterator_t mediaIterator;
char bsdPath[ MAXPATHLEN ];
int fd;
kernResult = FindEjectableCDMedia( &mediaIterator );
kernResult = GetBSDPath( mediaIterator, bsdPath, sizeof( bsdPath ) );
if ( bsdPath[ 0 ] != '\0' ) {
strcat(bsdPath,"s0");
fd = qemu_open(bsdPath, O_RDONLY | O_BINARY | O_LARGEFILE);
if (fd < 0) {
bsdPath[strlen(bsdPath)-1] = '1';
} else {
qemu_close(fd);
}
filename = bsdPath;
}
if ( mediaIterator )
IOObjectRelease( mediaIterator );
}
#endif
s->type = FTYPE_FILE;
#if defined(__linux__)
{
char resolved_path[ MAXPATHLEN ], *temp;
temp = realpath(filename, resolved_path);
if (temp && strstart(temp, "/dev/sg", NULL)) {
bs->sg = 1;
}
}
#endif
return raw_open_common(bs, filename, flags, 0);
}
| 1threat
|
PHP preq_replace replace wildcard string with html and wha happens after the wildcard : Hey I have a hard time understanding regex but I think that what suits my needs best currently have this line:
$str = preg_replace('/https:\/\/clips.twitch.tv\/(.*?)/', '<iframe src="https://clips.twitch.tv/embed?autoplay=false&clip=$1&tt_content=embed&tt_medium=clips_embed" width="640" height="360" frameborder="0" scrolling="no" allowfullscreen="true"></iframe>', $text);
What I want is to replace f.ex this link: https://clips.twitch.tv/GleamingHelpfulOxNotLikeThis
To be the HTML in the replace part, but the last part f.ex. "GleamingHelpfulOxNotLikeThis" ends up behind the iframe and not after "clip=" where I have $1 which I thought would work.
| 0debug
|
Python Argparse of Google Vision AI Product Search : I am trying to build a Google Vision AI product search system. I am using python.
I have uploaded a product set already.
However, when I would like to search the product set with python argparse using below python code, I got an error.
https://github.com/GoogleCloudPlatform/python-docs-samples/blob/master/vision/cloud-client/product_search/product_in_product_set_management.py
The case was when I typed :
python productsetmanagement.py --project_id="test-as-vision-api-v1" --location="asia-east1" list_product_sets
I could find my product_set : set01
However, when I typed :
python productsetmanagement.py --project_id="test-as-vision-api-v1" --location="asia-east1" --product_set_id="set01" get_product_set
I got an error: the following arguments are required: product_set_id
I have already typed the product set id, may I know why I still got the error ? Did I use argparse wrongly?
Many thanks.
| 0debug
|
static void block_job_unref(BlockJob *job)
{
if (--job->refcnt == 0) {
BlockDriverState *bs = blk_bs(job->blk);
bs->job = NULL;
block_job_remove_all_bdrv(job);
blk_remove_aio_context_notifier(job->blk,
block_job_attached_aio_context,
block_job_detach_aio_context, job);
blk_unref(job->blk);
error_free(job->blocker);
g_free(job->id);
QLIST_REMOVE(job, job_list);
g_free(job);
}
}
| 1threat
|
Program type already present: android.support.v4.app.INotificationSideChannel$Stub$Proxy : <p>I know it could look like <a href="https://stackoverflow.com/questions/50289355/google-material-design-library-error-program-type-already-present-android-suppo">This Question</a> but I could not fix it with the solution proposed and I could not comment on it too.
The Error is :</p>
<pre><code>Program type already present:
android.support.v4.app.INotificationSideChannel$Stub$Proxy
Message{kind=ERROR, text=Program type already present:
android.support.v4.app.INotificationSideChannel$Stub$Proxy, sources=[Unknown
source file], tool name=Optional.of(D8)}
</code></pre>
<p>I'm trying to create an app using firebase there's is my gradle file</p>
<pre><code>apply plugin: 'com.android.application'
android {
compileSdkVersion 28
defaultConfig {
minSdkVersion 27
targetSdkVersion 28
versionCode 1
versionName "1.0"
testInstrumentationRunner "android.support.test.runner.AndroidJUnitRunner"
}
buildTypes {
release {
minifyEnabled false
proguardFiles getDefaultProguardFile('proguard-android.txt'), 'proguard-rules.pro'
}
}
aaptOptions {
noCompress "tflite"
}
}
dependencies {
implementation fileTree(include: ['*.jar'], dir: 'libs')
implementation 'androidx.appcompat:appcompat:1.0.0-rc01'
implementation 'androidx.constraintlayout:constraintlayout:1.1.2'
testImplementation 'junit:junit:4.12'
androidTestImplementation 'androidx.test:runner:1.1.0-alpha4'
androidTestImplementation 'androidx.test.espresso:espresso-core:3.1.0-alpha4'
implementation 'com.google.android.material:material:1.0.0-rc01'
implementation 'androidx.cardview:cardview:1.0.0-rc01'
// ML Kit dependencies
implementation 'com.google.firebase:firebase-core:16.0.1'
implementation 'com.google.firebase:firebase-ml-vision:17.0.0'
}
apply plugin: 'com.google.gms.google-services'
</code></pre>
<p>I pass over every file to be sure the import was good, I Also add </p>
<pre><code>android.useAndroidX = true
android.enableJetifier = false
</code></pre>
<p>There's my Project Gradle file :</p>
<pre><code>// Top-level build file where you can add configuration options common to all sub-projects/modules.
buildscript {
repositories {
google()
jcenter()
}
dependencies {
classpath 'com.android.tools.build:gradle:3.1.4'
classpath 'com.google.gms:google-services:4.1.0'
// NOTE: Do not place your application dependencies here; they belong
// in the individual module build.gradle files
}
}
allprojects {
repositories {
google()
jcenter()
}
}
task clean(type: Delete) {
delete rootProject.buildDir
}
</code></pre>
<p>I use Android Studio 3.1.4</p>
| 0debug
|
Slack - how to post a link to network folder? : <p>I'm using a webhook to post messages to Slack via PowerShell script and I'd like to include a link to a network folder. I was able to do it with</p>
<pre><code> <file://server/folder|files>
</code></pre>
<p>however when the generated 'files' link is clicked nothing happens. Is there a way to specify target so that a clicked link opens in a new window? If I copy the generated link and paste it into the browser, the index is rendered just fine and that would be sufficient for my purposes. Are there any alternative solutions?</p>
| 0debug
|
How To Split String Before After Comma? : <p>i have string like this:</p>
<pre><code><?php $string = "52.74837280745686,-51.61665272782557"; ?>
</code></pre>
<p>i want access to first string before comma and second string after comma like this:</p>
<pre><code><?php string1 = "52.74837280745686"; $string2 = "-51.61665272782557" ; ?>
</code></pre>
<p>thank you !</p>
| 0debug
|
How to get a particular JSON element of an array? : <p>There is an <code>array</code> of <code>JSON</code>s :</p>
<pre><code>var a = [ {id:1, latlong:[...]} , {id:2, latlong:[...]} , ... ];
</code></pre>
<p>How to get the JSON element which key <code>id</code> equals 2 for example ?</p>
| 0debug
|
How to QUERY perform operation and then UPDATE on entire table : <p>I have a large table with around 10 million rows. I need to take numbers from 2 columns perform some function and then save the result into a 3rd column.</p>
<p>Is there an efficient way of doing this? The only way I have been able to do this is to QUERY and save the result into a tuple. Then in a second for loop iterate through the tuple where the result and unique hash is stored and filter by hash and then update. </p>
<p>This is very very very slow though! Is there a better way to do this?</p>
| 0debug
|
Is it still relevant to use Repository Pattern in a laravel application?” : <p>I'm setting up a new laravel project, and want to reduce code in my controllers. Do I need to use repository pattern?”</p>
| 0debug
|
I get local ip address from request : <p>I am using Spring boot and when i try to get user ip from request like this :</p>
<pre><code>request.getRemoteAddr();
</code></pre>
<p>I get 127.0.0.1 for every user.</p>
| 0debug
|
static void dump_stream_format(AVFormatContext *ic, int i, int index, int is_output)
{
char buf[256];
int flags = (is_output ? ic->oformat->flags : ic->iformat->flags);
AVStream *st = ic->streams[i];
int g = ff_gcd(st->time_base.num, st->time_base.den);
avcodec_string(buf, sizeof(buf), st->codec, is_output);
av_log(NULL, AV_LOG_INFO, " Stream #%d.%d", index, i);
if (flags & AVFMT_SHOW_IDS)
av_log(NULL, AV_LOG_INFO, "[0x%x]", st->id);
if (strlen(st->language) > 0)
av_log(NULL, AV_LOG_INFO, "(%s)", st->language);
av_log(NULL, AV_LOG_DEBUG, ", %d/%d", st->time_base.num/g, st->time_base.den/g);
av_log(NULL, AV_LOG_INFO, ": %s", buf);
if(st->codec->codec_type == CODEC_TYPE_VIDEO){
if(st->r_frame_rate.den && st->r_frame_rate.num)
av_log(NULL, AV_LOG_INFO, ", %5.2f fps(r)", av_q2d(st->r_frame_rate));
else
av_log(NULL, AV_LOG_INFO, ", %5.2f fps(c)", 1/av_q2d(st->codec->time_base));
}
av_log(NULL, AV_LOG_INFO, "\n");
}
| 1threat
|
Escaping regex in php : <p>I have regexes stored in a txt file. How do I escape them in PHP? preg_quote doesn't help if I use the output in an array, throws fatal error. (The following each are on new lines in the txt file)</p>
<pre><code>/[^a-z\/'"]eval\([^\)]+['"\s\);]+/i
/\$auth_pass\s*=.+;/i
/document\.write\((['"])<iframe .+<\/iframe>\1\);*/i
/preg_replace\s*\(.+[\/\#\|][i]*e[i]*['"].+\)/i
/<\?.+?exec\(.+?system\(.+?passthru\(.+fwrite\(.+/s
/RewriteRule [^ ]+ http\:\/\/(?!127\.).*/i
/<\?[\shp]*\@?error_reporting\(0\);.+?[a-z0-9\/\-\='"\.]{2000}.*?($|\?>)/i
/\<a [^\>]+\>\<span style="color\:\#F1EFE4;"\>(.+?)\<\/span\>\<\/a\>\<span style="color\:\#F1EFE4;"\>(.+?)\<\/span\>/i
/(<!\d)\$[\$\{]*[a-z\-\_0-9]+[\} \t]*(\[[^\]]+\][ \t]*)*\(.*?\)\;/i
/\#(\w+)\#.+?\#\/\1\#/is
/(\$[a-z_0-9]+[=\s\@]+)?create_function\([^,]+,[\s\$\.\[\]a-z_0-9]+[\s\)]+;*/i
/json2\.min\.js/i
/(RewriteCond \%\{HTTP_USER_AGENT\} .+\s+)+RewriteRule \^.*\$ http:\/\/(?!127\.).*/i
/<title>[^<]*hack[3e][rd]/i
</code></pre>
| 0debug
|
static void stream_component_close(VideoState *is, int stream_index)
{
AVFormatContext *ic = is->ic;
AVCodecContext *avctx;
if (stream_index < 0 || stream_index >= ic->nb_streams)
return;
avctx = ic->streams[stream_index]->codec;
switch(avctx->codec_type) {
case AVMEDIA_TYPE_AUDIO:
packet_queue_abort(&is->audioq);
SDL_CloseAudio();
packet_queue_end(&is->audioq);
av_free_packet(&is->audio_pkt);
if (is->reformat_ctx)
av_audio_convert_free(is->reformat_ctx);
is->reformat_ctx = NULL;
if (is->rdft) {
av_rdft_end(is->rdft);
av_freep(&is->rdft_data);
}
break;
case AVMEDIA_TYPE_VIDEO:
packet_queue_abort(&is->videoq);
SDL_LockMutex(is->pictq_mutex);
SDL_CondSignal(is->pictq_cond);
SDL_UnlockMutex(is->pictq_mutex);
SDL_WaitThread(is->video_tid, NULL);
packet_queue_end(&is->videoq);
break;
case AVMEDIA_TYPE_SUBTITLE:
packet_queue_abort(&is->subtitleq);
SDL_LockMutex(is->subpq_mutex);
is->subtitle_stream_changed = 1;
SDL_CondSignal(is->subpq_cond);
SDL_UnlockMutex(is->subpq_mutex);
SDL_WaitThread(is->subtitle_tid, NULL);
packet_queue_end(&is->subtitleq);
break;
default:
break;
}
ic->streams[stream_index]->discard = AVDISCARD_ALL;
avcodec_close(avctx);
switch(avctx->codec_type) {
case AVMEDIA_TYPE_AUDIO:
is->audio_st = NULL;
is->audio_stream = -1;
break;
case AVMEDIA_TYPE_VIDEO:
is->video_st = NULL;
is->video_stream = -1;
break;
case AVMEDIA_TYPE_SUBTITLE:
is->subtitle_st = NULL;
is->subtitle_stream = -1;
break;
default:
break;
}
}
| 1threat
|
Handling unknown values for label encoding : <p>How can I handle unknown values for label encoding in sk-learn?
The label encoder will only blow up with an exception that new labels were detected.</p>
<p>What I want is the <strong>encoding of categorical variables</strong> via <strong>one-hot</strong>-encoder. However, sk-learn does not support strings for that. So I used a label encoder on each column. </p>
<p>My problem is that in my cross-validation step of the pipeline unknown labels show up.
The basic one-hot-encoder would have the option to ignore such cases.
An apriori <code>pandas.getDummies /cat.codes</code> is not sufficient as the pipeline should work with real-life, fresh incoming data which might contain unknown labels as well.</p>
<p>Would it be possible to use a <code>CountVectorizer</code> for this purpose?</p>
| 0debug
|
static inline void dv_decode_video_segment(DVVideoContext *s,
const uint8_t *buf_ptr1,
const uint16_t *mb_pos_ptr)
{
int quant, dc, dct_mode, class1, j;
int mb_index, mb_x, mb_y, v, last_index;
int y_stride, i;
DCTELEM *block, *block1;
int c_offset;
uint8_t *y_ptr;
const uint8_t *buf_ptr;
PutBitContext pb, vs_pb;
GetBitContext gb;
BlockInfo mb_data[5 * DV_MAX_BPM], *mb, *mb1;
DECLARE_ALIGNED_16(DCTELEM, sblock[5*DV_MAX_BPM][64]);
DECLARE_ALIGNED_8(uint8_t, mb_bit_buffer[80 + 4]);
DECLARE_ALIGNED_8(uint8_t, vs_bit_buffer[5 * 80 + 4]);
const int log2_blocksize= 3-s->avctx->lowres;
int is_field_mode[5];
assert((((int)mb_bit_buffer)&7)==0);
assert((((int)vs_bit_buffer)&7)==0);
memset(sblock, 0, sizeof(sblock));
buf_ptr = buf_ptr1;
block1 = &sblock[0][0];
mb1 = mb_data;
init_put_bits(&vs_pb, vs_bit_buffer, 5 * 80);
for(mb_index = 0; mb_index < 5; mb_index++, mb1 += s->sys->bpm, block1 += s->sys->bpm * 64) {
quant = buf_ptr[3] & 0x0f;
buf_ptr += 4;
init_put_bits(&pb, mb_bit_buffer, 80);
mb = mb1;
block = block1;
is_field_mode[mb_index] = 0;
for(j = 0;j < s->sys->bpm; j++) {
last_index = s->sys->block_sizes[j];
init_get_bits(&gb, buf_ptr, last_index);
dc = get_sbits(&gb, 9);
dct_mode = get_bits1(&gb);
class1 = get_bits(&gb, 2);
if (DV_PROFILE_IS_HD(s->sys)) {
mb->idct_put = s->idct_put[0];
mb->scan_table = s->dv_zigzag[0];
mb->factor_table = s->dv100_idct_factor[((s->sys->height == 720)<<1)&(j < 4)][class1][quant];
is_field_mode[mb_index] |= !j && dct_mode;
} else {
mb->idct_put = s->idct_put[dct_mode && log2_blocksize==3];
mb->scan_table = s->dv_zigzag[dct_mode];
mb->factor_table = s->dv_idct_factor[class1 == 3][dct_mode]
[quant + dv_quant_offset[class1]];
}
dc = dc << 2;
dc += 1024;
block[0] = dc;
buf_ptr += last_index >> 3;
mb->pos = 0;
mb->partial_bit_count = 0;
#ifdef VLC_DEBUG
printf("MB block: %d, %d ", mb_index, j);
#endif
dv_decode_ac(&gb, mb, block);
if (mb->pos >= 64)
bit_copy(&pb, &gb);
block += 64;
mb++;
}
#ifdef VLC_DEBUG
printf("***pass 2 size=%d MB#=%d\n", put_bits_count(&pb), mb_index);
#endif
block = block1;
mb = mb1;
init_get_bits(&gb, mb_bit_buffer, put_bits_count(&pb));
flush_put_bits(&pb);
for(j = 0;j < s->sys->bpm; j++, block += 64, mb++) {
if (mb->pos < 64 && get_bits_left(&gb) > 0) {
dv_decode_ac(&gb, mb, block);
if (mb->pos < 64)
break;
}
}
if (j >= s->sys->bpm)
bit_copy(&vs_pb, &gb);
}
#ifdef VLC_DEBUG
printf("***pass 3 size=%d\n", put_bits_count(&vs_pb));
#endif
block = &sblock[0][0];
mb = mb_data;
init_get_bits(&gb, vs_bit_buffer, put_bits_count(&vs_pb));
flush_put_bits(&vs_pb);
for(mb_index = 0; mb_index < 5; mb_index++) {
for(j = 0;j < s->sys->bpm; j++) {
if (mb->pos < 64) {
#ifdef VLC_DEBUG
printf("start %d:%d\n", mb_index, j);
#endif
dv_decode_ac(&gb, mb, block);
}
if (mb->pos >= 64 && mb->pos < 127)
av_log(NULL, AV_LOG_ERROR, "AC EOB marker is absent pos=%d\n", mb->pos);
block += 64;
mb++;
}
}
block = &sblock[0][0];
mb = mb_data;
for(mb_index = 0; mb_index < 5; mb_index++) {
v = *mb_pos_ptr++;
mb_x = v & 0xff;
mb_y = v >> 8;
if (s->sys->height == 720 && !(s->buf[1]&0x0C)) {
mb_y -= (mb_y>17)?18:-72;
}
if ((s->sys->pix_fmt == PIX_FMT_YUV420P) ||
(s->sys->pix_fmt == PIX_FMT_YUV411P && mb_x >= (704 / 8)) ||
(s->sys->height >= 720 && mb_y != 134)) {
y_stride = (s->picture.linesize[0]<<((!is_field_mode[mb_index])*log2_blocksize)) - (2<<log2_blocksize);
} else {
y_stride = 0;
}
y_ptr = s->picture.data[0] + ((mb_y * s->picture.linesize[0] + mb_x)<<log2_blocksize);
for(j = 0; j < 2; j++, y_ptr += y_stride) {
for (i=0; i<2; i++, block += 64, mb++, y_ptr += (1<<log2_blocksize))
if (s->sys->pix_fmt == PIX_FMT_YUV422P && s->sys->width == 720 && i)
y_ptr -= (1<<log2_blocksize);
else
mb->idct_put(y_ptr, s->picture.linesize[0]<<is_field_mode[mb_index], block);
}
c_offset = (((mb_y>>(s->sys->pix_fmt == PIX_FMT_YUV420P)) * s->picture.linesize[1] +
(mb_x>>((s->sys->pix_fmt == PIX_FMT_YUV411P)?2:1)))<<log2_blocksize);
for(j=2; j; j--) {
uint8_t *c_ptr = s->picture.data[j] + c_offset;
if (s->sys->pix_fmt == PIX_FMT_YUV411P && mb_x >= (704 / 8)) {
uint64_t aligned_pixels[64/8];
uint8_t *pixels = (uint8_t*)aligned_pixels;
uint8_t *c_ptr1, *ptr1;
int x, y;
mb->idct_put(pixels, 8, block);
for(y = 0; y < (1<<log2_blocksize); y++, c_ptr += s->picture.linesize[j], pixels += 8) {
ptr1= pixels + (1<<(log2_blocksize-1));
c_ptr1 = c_ptr + (s->picture.linesize[j]<<log2_blocksize);
for(x=0; x < (1<<(log2_blocksize-1)); x++) {
c_ptr[x]= pixels[x];
c_ptr1[x]= ptr1[x];
}
}
block += 64; mb++;
} else {
y_stride = (mb_y == 134) ? (1<<log2_blocksize) :
s->picture.linesize[j]<<((!is_field_mode[mb_index])*log2_blocksize);
for (i=0; i<(1<<(s->sys->bpm==8)); i++, block += 64, mb++, c_ptr += y_stride)
mb->idct_put(c_ptr, s->picture.linesize[j]<<is_field_mode[mb_index], block);
}
}
}
}
| 1threat
|
Python list not appending : <p>I've been following a tutorial exactly but my list isn't appending--I get the error, "AttributeError: 'list' object attribute 'append' is read-only."</p>
<p>My code is:</p>
<pre><code>mylist = [1,2,3]
mylist.append = (4)
</code></pre>
<p>Thank you in advance.</p>
| 0debug
|
static always_inline void gen_fcmov (void *func,
int ra, int rb, int rc)
{
int l1;
TCGv tmp;
if (unlikely(rc == 31))
return;
l1 = gen_new_label();
tmp = tcg_temp_new(TCG_TYPE_I64);
if (ra != 31) {
tmp = tcg_temp_new(TCG_TYPE_I64);
tcg_gen_helper_1_1(func, tmp, cpu_fir[ra]);
} else {
tmp = tcg_const_i64(0);
tcg_gen_helper_1_1(func, tmp, tmp);
}
tcg_gen_brcondi_i64(TCG_COND_EQ, tmp, 0, l1);
if (rb != 31)
tcg_gen_mov_i64(cpu_fir[rc], cpu_fir[ra]);
else
tcg_gen_movi_i64(cpu_fir[rc], 0);
gen_set_label(l1);
}
| 1threat
|
Why can't I use "return" in lambda function in python? : <p>This does not work:</p>
<pre><code>print((lambda : return None)())
</code></pre>
<p>But this does:</p>
<pre><code>print((lambda : None)())
</code></pre>
<p>Why?</p>
| 0debug
|
Q: Batch script for automatic file swapping : I have tool in java that can convert values in xml files into 3d models (using transformation matrix with some textures).
To run it I'm using very simple batch script :`java.exe -cp path/to/tool.jar;path/to/3dlib.jar;path/to/textures.jar -Dsomejavaoptions -Dappdir=directory tool.some.mainclass file.xml file.bin
PAUSE`
If I will place this .bat file in my xml folder it will do the job + eventually show in windows shell whats wrong (file not exist, wrong attributes, missing contents etc)
Typing filenames manually in .bat fie is good for about 10 files, but I have about 10000 to convert by this way.
all .xml files are in xml folder and file names are numeric from 1 to 10000.
How to make this process more automatic? I tried to call file.bat with parameters from shell but I think I need some work with arguments and also script which will execute other .bat file with parameters every 10000 times.
| 0debug
|
static void arm_thistimer_write(void *opaque, target_phys_addr_t addr,
uint64_t value, unsigned size)
{
arm_mptimer_state *s = (arm_mptimer_state *)opaque;
int id = get_current_cpu(s);
timerblock_write(&s->timerblock[id * 2], addr, value, size);
}
| 1threat
|
static inline void apply_motion_generic(RoqContext *ri, int x, int y, int deltax,
int deltay, int sz)
{
int mx, my, cp;
mx = x + deltax;
my = y + deltay;
if ((mx < 0) || (mx > ri->width - sz) ||
(my < 0) || (my > ri->height - sz)) {
av_log(ri->avctx, AV_LOG_ERROR, "motion vector out of bounds: MV = (%d, %d), boundaries = (0, 0, %d, %d)\n",
mx, my, ri->width, ri->height);
return;
}
if (ri->last_frame->data[0] == NULL) {
av_log(ri->avctx, AV_LOG_ERROR, "Invalid decode type. Invalid header?\n");
return;
}
for(cp = 0; cp < 3; cp++) {
int outstride = ri->current_frame->linesize[cp];
int instride = ri->last_frame ->linesize[cp];
block_copy(ri->current_frame->data[cp] + y*outstride + x,
ri->last_frame->data[cp] + my*instride + mx,
outstride, instride, sz);
}
}
| 1threat
|
Oracle : Fetch data for 31 Dec and Today's date in PL/SQL : I'm new to PL/SQL and have a requirement.
I have a table into which data is populated on a Daily Basis with date.
Eg:
Col1 Col2 Date
1>>100>>>>10/10/2017
2>>100>>>>9/10/2017
3>>110>>>>8/10/2017
4>>120>>>>7/10/2017
5>>100>>>>6/10/2017
6>>100>>>>5/10/2017
7>>100>>>>31/12/2016
8>>100>>>>30/12/2016
9>>110>>>>29/12/2016
10>>120>>>>31/12/2015
11>>100>>>>30/12/2015
12>>100>>>>29/12/2015
13>>100>>>>31/12/2014
14>>100>>>>30/12/2014
15>>110>>>>29/12/2014
***My requirement is I need to select the data for 31st Dec of last 5 Years and also the data for Today.***
I tried using this query
> "select floor(months_between(date '2016-12-31', date '2017-10-10')
> /12) from dual; "
, but could'nt figure the final logic.
Please help me to create the query for the same.
Thanks in Advance. :)
| 0debug
|
how to fix this error in my angular 4 app? : [I used this code in my component `<button type="button" class="btn btn-primary "(click) ="toggle()" >Create</button>
`
and add this to app.component.html `<app-table *ngif="showFirst"></app-table>
<app-form *ngif="!showFirst"></app-form>`
][1]
[1]: https://i.stack.imgur.com/eR44y.png
| 0debug
|
static inline void RENAME(bgr24ToY)(uint8_t *dst, const uint8_t *src, long width, uint32_t *unused)
{
#if COMPILE_TEMPLATE_MMX
RENAME(bgr24ToY_mmx)(dst, src, width, PIX_FMT_BGR24);
#else
int i;
for (i=0; i<width; i++) {
int b= src[i*3+0];
int g= src[i*3+1];
int r= src[i*3+2];
dst[i]= ((RY*r + GY*g + BY*b + (33<<(RGB2YUV_SHIFT-1)))>>RGB2YUV_SHIFT);
}
#endif
}
| 1threat
|
If i click on Recyclerview then how to go for an new activity? : public void onClick(View v) {
Intent i = new Intent(context.getApplicationContext(),Watch.class);
startactivity(i);
}
});
| 0debug
|
How to get all values for a key from json using JAVA : I am trying to extra value for `avatar` from all different node. My json looks like this
{
"page": 1,
"per_page": 3,
"total": 12,
"avatar":"https://s3.amazonaws.com/uifaces/faces/twitter/olegpogodaev/128.jpg",
"total_pages": 4,
"data": [
{
"id": 1,
"first_name": "George",
"last_name": "Bluth",
"avatar": "https://s3.amazonaws.com/uifaces/faces/twitter/calebogden/128.jpg"
},
{
"id": 2,
"first_name": "Janet",
"last_name": "Weaver",
"avatar": "https://s3.amazonaws.com/uifaces/faces/twitter/josephstein/128.jpg"
},
{
"id": 3,
"first_name": "Emma",
"last_name": "Wong",
"avatar": "https://s3.amazonaws.com/uifaces/faces/twitter/olegpogodaev/128.jpg"
}
],
"user":{
"id": 3,
"first_name": "Emma",
"last_name": "Wong",
"avatar": "https://s3.amazonaws.com/uifaces/faces/twitter/olegpogodaev/128.jpg"
}
}
| 0debug
|
C# Switch Statement with multiple ifs to Calculate a range of numbers : <p>I am new to C# and am making a windows form in .net and am using the following switch statement where I am calculating the the BMI of candidates. After testing this program it will execute the first case, but then the following cases dont work. </p>
<p>Thank you for your help.
-Mike</p>
<p><em>Where SwitchFinal is the calculated BMI, and Age being a variable holding the user inputed age</em></p>
<pre><code> int switchvar = 1;
switch (switchvar)
{
case 1:
if (Age >= 17 && Age < 21 && SwitchFinal < 20)
{
MessageBox.Show("Candidate is Eligible!");
}
else
{
MessageBox.Show("Candidate is Not Eligible");
}
break;
case 2:
if (Age >= 21 && Age < 28 && SwitchFinal < 22)
{
MessageBox.Show("Candidate is Eligible!");
}
else
{
MessageBox.Show("Candidate is Not Eligible");
}
break;
case 3:
if (Age >= 28 && Age <40 && SwitchFinal < 24)
{
MessageBox.Show("Candidate is Eligible!");
}
else
{
MessageBox.Show("Candidate is Not Eligible");
}
break;
case 4:
if (Age >= 40 && SwitchFinal < 24)
{
MessageBox.Show("Candidate is Eligible!");
}
else
{
MessageBox.Show("Candidate is Not Eligible");
}
break;
</code></pre>
| 0debug
|
Django - How to filter by date with Django Rest Framework? : <p>I have some model with a timestamp field:</p>
<p>models.py</p>
<pre><code>class Event(models.Model):
event_type = models.CharField(
max_length=100,
choices=EVENT_TYPE_CHOICES,
verbose_name=_("Event Type")
)
event_model = models.CharField(
max_length=100,
choices=EVENT_MODEL_CHOICES,
verbose_name=_("Event Model")
)
timestamp = models.DateTimeField(auto_now=True, verbose_name=_("Timestamp"))
</code></pre>
<p>I'm then using Django-rest-framework to create an API endpoint for this class, with django-filter providing a filtering functionality as follows:</p>
<pre><code>from .models import Event
from .serializers import EventSerializer
from rest_framework import viewsets, filters
from rest_framework import renderers
from rest_framework_csv import renderers as csv_renderers
class EventsView(viewsets.ReadOnlyModelViewSet):
"""
A read only view that returns all audit events in JSON or CSV.
"""
queryset = Event.objects.all()
renderer_classes = (csv_renderers.CSVRenderer, renderers.JSONRenderer)
serializer_class = EventSerializer
filter_backends = (filters.DjangoFilterBackend,)
filter_fields = ('event_type', 'event_model', 'timestamp')
</code></pre>
<p>with the following settings:</p>
<pre><code>REST_FRAMEWORK = {
'DEFAULT_FILTER_BACKENDS': ('rest_framework.filters.DjangoFilterBackend',),
}
</code></pre>
<p>I'm able to filter by <code>event_type</code> and <code>event_model</code>, but am having trouble filtering by the timestamp field. Essentially, I want to make an API call that equates to the following:</p>
<pre><code>AuditEvent.objects.filter(timestamp__gte='2016-01-02 00:00+0000')
</code></pre>
<p>which I would expect I could do as follows:</p>
<pre><code>response = self.client.get("/api/v1/events/?timestamp=2016-01-02 00:00+0000", **{'HTTP_ACCEPT': 'application/json'})
</code></pre>
<p>though that is incorect. How do I make an API call that returns all objects with a timestamp greater than or equal to a certain value?</p>
| 0debug
|
How to set up Google Analytics through Google Tag Manager for Next-Js? : <p>formerly I was using react-ga npm module to insert google analytics in my next js app. and It was simply like this:</p>
<pre><code>import ReactGA from 'react-ga'
export const initGA = () => {
ReactGA.initialize('UA-*******-*', {
titleCase: false
})
}
export const logPageView = () => {
if (window.location.href.split('?')[1]) {
ReactGA.set({page: window.location.pathname + '?' + window.location.href.split('?')[1]})
ReactGA.pageview(window.location.pathname + '?' + window.location.href.split('?')[1])
} else {
ReactGA.set({page: window.location.pathname})
ReactGA.pageview(window.location.pathname)
}
}
</code></pre>
<p>and then I was calling logPageView function in my header(that was inserted to every page of my app) like this:</p>
<pre><code> componentDidMount () {
if (!window.GA_INITIALIZED) {
initGA()
window.GA_INITIALIZED = true
}
logPageView()
}
componentWillReceiveProps () {
if (!window.GA_INITIALIZED) {
initGA()
window.GA_INITIALIZED = true
}
logPageView()
}
</code></pre>
<p>now I want to use Google Tag Manager to handle Analytics page view . How could I do this?</p>
| 0debug
|
Best way to import Observable from rxjs : <p>In my angular 2 app I have a service that uses the <code>Observable</code> class from the <code>rxjs</code> library.</p>
<pre><code>import { Observable } from 'rxjs';
</code></pre>
<p>At the moment I am just using <code>Observable</code> so that I can use the <code>toPromise()</code> function.</p>
<p>I read in another StackOverflow question somewhere that importing in this way and also importing from <code>rxjs/Rx</code> will import a whole lot of unnecessary stuff from the <code>rxjs</code> library that will increase the page load times and/or the code base.</p>
<p>My question is, what is the best way to import <code>Observable</code> so I can use the <code>toPromise()</code> function without having to import everything else?</p>
| 0debug
|
static void buffered_rate_tick(void *opaque)
{
QEMUFileBuffered *s = opaque;
if (s->has_error) {
buffered_close(s);
return;
}
qemu_mod_timer(s->timer, qemu_get_clock(rt_clock) + 100);
if (s->freeze_output)
return;
s->bytes_xfer = 0;
buffered_flush(s);
s->put_ready(s->opaque);
}
| 1threat
|
How to bind dropdown list in MVC from sql database? : i don't know how to make view from this code to show data in dropdown list.?
public class Cities
{
public int cityid { get; set; }
public string Description { get; set; }
}
public ActionResult Dropdownlist()
{
List<Citites> cityname= new List<Citites>();
string constring = ConfigurationManager.ConnectionStrings["dbx"].ConnectionString;
SqlConnection con = new SqlConnection(constring);
SqlCommand cmd = new SqlCommand("select Cityid, Description FROM Cities", con);
con.Open();
SqlDataReader rdr = cmd.ExecuteReader();
while (rdr.Read())
{
Citites ci = new Citites();
ci.cityid = Convert.ToInt32(rdr["Cityid"]);
ci.Description = rdr[1].ToString();
cityname.Add(ci);
}
return View(cityname);
}
| 0debug
|
How do I write a null (no-op) contextmanager in Python? : <p>Sometimes I need a dummy context manager that does nothing. It can then be used as a stand-in for a more useful, but optional, context manager. For example:</p>
<pre><code>ctx_mgr = <meaningfulContextManager> if <condition> else <nullContextManager>
with ctx_mgr:
...
</code></pre>
<p>How do I define such a trivial, empty context manager? Does the Python library offer one off the shelf?</p>
<p>How about cases where we want the context to be used with an <code>as</code> clause?</p>
<pre><code>with ctx_mgr as resource:
<operations on resource>
</code></pre>
| 0debug
|
document.write('<script src="evil.js"></script>');
| 1threat
|
How to identify an optimal subsample from a data set with missing values in MATLAB : <p>I would like to identify the largest possible contiguous subsample of a large data set. My data set consists of roughly 15,000 financial time series of up to 360 periods in length. I have imported the data into MATLAB as a 360 by 15,000 numerical matrix.</p>
<p><a href="https://i.stack.imgur.com/qOnzk.png" rel="nofollow noreferrer"><img src="https://i.stack.imgur.com/qOnzk.png" alt="enter image description here"></a></p>
<p>This matrix contains a lot of NaNs due to some of the financial data not being available for the entire period. In the illustration, NaN entries are shown in dark blue, and non-NaN entries appear in light blue. It is these light blue non-NaN entries which I would like to ideally combine into an optimal subsample.</p>
<p>I would like to find the largest possible contiguous block of data that is contained in my matrix, while ensuring that my matrix contains a sufficient number of periods.</p>
<p>In a first step I would like to sort my matrix from left to right in descending order by the number of non-NaN entries in each column, that is, I would like to sort by the vector obtained by entering <code>sum(~isnan(data),1)</code>.</p>
<p>In a second step I would like to find the sub-array of my data matrix that is at least 72 entries along the first dimension and is otherwise as large as possible, measured by the total number of entries.</p>
<p>What is the best way to implement this?</p>
| 0debug
|
static inline int handle_cpu_signal(uintptr_t pc, unsigned long address,
int is_write, sigset_t *old_set)
{
CPUState *cpu;
CPUClass *cc;
int ret;
#if defined(DEBUG_SIGNAL)
printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
pc, address, is_write, *(unsigned long *)old_set);
#endif
if (is_write && h2g_valid(address)) {
switch (page_unprotect(h2g(address), pc)) {
case 0:
break;
case 1:
return 1;
case 2:
cpu_exit_tb_from_sighandler(current_cpu, old_set);
g_assert_not_reached();
default:
g_assert_not_reached();
}
}
address = h2g_nocheck(address);
cpu = current_cpu;
cc = CPU_GET_CLASS(cpu);
g_assert(cc->handle_mmu_fault);
ret = cc->handle_mmu_fault(cpu, address, is_write, MMU_USER_IDX);
if (ret < 0) {
return 0;
}
if (ret == 0) {
return 1;
}
cpu_restore_state(cpu, pc + GETPC_ADJ);
sigprocmask(SIG_SETMASK, old_set, NULL);
cpu_loop_exit(cpu);
return 1;
}
| 1threat
|
static int qcow2_open(BlockDriverState *bs, QDict *options, int flags,
Error **errp)
{
BDRVQcowState *s = bs->opaque;
int len, i, ret = 0;
QCowHeader header;
QemuOpts *opts;
Error *local_err = NULL;
uint64_t ext_end;
uint64_t l1_vm_state_index;
const char *opt_overlap_check;
int overlap_check_template = 0;
ret = bdrv_pread(bs->file, 0, &header, sizeof(header));
if (ret < 0) {
error_setg_errno(errp, -ret, "Could not read qcow2 header");
goto fail;
}
be32_to_cpus(&header.magic);
be32_to_cpus(&header.version);
be64_to_cpus(&header.backing_file_offset);
be32_to_cpus(&header.backing_file_size);
be64_to_cpus(&header.size);
be32_to_cpus(&header.cluster_bits);
be32_to_cpus(&header.crypt_method);
be64_to_cpus(&header.l1_table_offset);
be32_to_cpus(&header.l1_size);
be64_to_cpus(&header.refcount_table_offset);
be32_to_cpus(&header.refcount_table_clusters);
be64_to_cpus(&header.snapshots_offset);
be32_to_cpus(&header.nb_snapshots);
if (header.magic != QCOW_MAGIC) {
error_setg(errp, "Image is not in qcow2 format");
ret = -EMEDIUMTYPE;
goto fail;
}
if (header.version < 2 || header.version > 3) {
report_unsupported(bs, errp, "QCOW version %d", header.version);
ret = -ENOTSUP;
goto fail;
}
s->qcow_version = header.version;
if (header.version == 2) {
header.incompatible_features = 0;
header.compatible_features = 0;
header.autoclear_features = 0;
header.refcount_order = 4;
header.header_length = 72;
} else {
be64_to_cpus(&header.incompatible_features);
be64_to_cpus(&header.compatible_features);
be64_to_cpus(&header.autoclear_features);
be32_to_cpus(&header.refcount_order);
be32_to_cpus(&header.header_length);
}
if (header.header_length > sizeof(header)) {
s->unknown_header_fields_size = header.header_length - sizeof(header);
s->unknown_header_fields = g_malloc(s->unknown_header_fields_size);
ret = bdrv_pread(bs->file, sizeof(header), s->unknown_header_fields,
s->unknown_header_fields_size);
if (ret < 0) {
error_setg_errno(errp, -ret, "Could not read unknown qcow2 header "
"fields");
goto fail;
}
}
if (header.backing_file_offset) {
ext_end = header.backing_file_offset;
} else {
ext_end = 1 << header.cluster_bits;
}
s->incompatible_features = header.incompatible_features;
s->compatible_features = header.compatible_features;
s->autoclear_features = header.autoclear_features;
if (s->incompatible_features & ~QCOW2_INCOMPAT_MASK) {
void *feature_table = NULL;
qcow2_read_extensions(bs, header.header_length, ext_end,
&feature_table, NULL);
report_unsupported_feature(bs, errp, feature_table,
s->incompatible_features &
~QCOW2_INCOMPAT_MASK);
ret = -ENOTSUP;
goto fail;
}
if (s->incompatible_features & QCOW2_INCOMPAT_CORRUPT) {
if ((flags & BDRV_O_RDWR) && !(flags & BDRV_O_CHECK)) {
error_setg(errp, "qcow2: Image is corrupt; cannot be opened "
"read/write");
ret = -EACCES;
goto fail;
}
}
if (header.refcount_order != 4) {
report_unsupported(bs, errp, "%d bit reference counts",
1 << header.refcount_order);
ret = -ENOTSUP;
goto fail;
}
s->refcount_order = header.refcount_order;
if (header.cluster_bits < MIN_CLUSTER_BITS ||
header.cluster_bits > MAX_CLUSTER_BITS) {
error_setg(errp, "Unsupported cluster size: 2^%i", header.cluster_bits);
ret = -EINVAL;
goto fail;
}
if (header.crypt_method > QCOW_CRYPT_AES) {
error_setg(errp, "Unsupported encryption method: %i",
header.crypt_method);
ret = -EINVAL;
goto fail;
}
s->crypt_method_header = header.crypt_method;
if (s->crypt_method_header) {
bs->encrypted = 1;
}
s->cluster_bits = header.cluster_bits;
s->cluster_size = 1 << s->cluster_bits;
s->cluster_sectors = 1 << (s->cluster_bits - 9);
s->l2_bits = s->cluster_bits - 3;
s->l2_size = 1 << s->l2_bits;
bs->total_sectors = header.size / 512;
s->csize_shift = (62 - (s->cluster_bits - 8));
s->csize_mask = (1 << (s->cluster_bits - 8)) - 1;
s->cluster_offset_mask = (1LL << s->csize_shift) - 1;
s->refcount_table_offset = header.refcount_table_offset;
s->refcount_table_size =
header.refcount_table_clusters << (s->cluster_bits - 3);
s->snapshots_offset = header.snapshots_offset;
s->nb_snapshots = header.nb_snapshots;
s->l1_size = header.l1_size;
l1_vm_state_index = size_to_l1(s, header.size);
if (l1_vm_state_index > INT_MAX) {
error_setg(errp, "Image is too big");
ret = -EFBIG;
goto fail;
}
s->l1_vm_state_index = l1_vm_state_index;
if (s->l1_size < s->l1_vm_state_index) {
error_setg(errp, "L1 table is too small");
ret = -EINVAL;
goto fail;
}
s->l1_table_offset = header.l1_table_offset;
if (s->l1_size > 0) {
s->l1_table = g_malloc0(
align_offset(s->l1_size * sizeof(uint64_t), 512));
ret = bdrv_pread(bs->file, s->l1_table_offset, s->l1_table,
s->l1_size * sizeof(uint64_t));
if (ret < 0) {
error_setg_errno(errp, -ret, "Could not read L1 table");
goto fail;
}
for(i = 0;i < s->l1_size; i++) {
be64_to_cpus(&s->l1_table[i]);
}
}
s->l2_table_cache = qcow2_cache_create(bs, L2_CACHE_SIZE);
s->refcount_block_cache = qcow2_cache_create(bs, REFCOUNT_CACHE_SIZE);
s->cluster_cache = g_malloc(s->cluster_size);
s->cluster_data = qemu_blockalign(bs, QCOW_MAX_CRYPT_CLUSTERS * s->cluster_size
+ 512);
s->cluster_cache_offset = -1;
s->flags = flags;
ret = qcow2_refcount_init(bs);
if (ret != 0) {
error_setg_errno(errp, -ret, "Could not initialize refcount handling");
goto fail;
}
QLIST_INIT(&s->cluster_allocs);
QTAILQ_INIT(&s->discards);
if (qcow2_read_extensions(bs, header.header_length, ext_end, NULL,
&local_err)) {
error_propagate(errp, local_err);
ret = -EINVAL;
goto fail;
}
if (header.backing_file_offset != 0) {
len = header.backing_file_size;
if (len > 1023) {
len = 1023;
}
ret = bdrv_pread(bs->file, header.backing_file_offset,
bs->backing_file, len);
if (ret < 0) {
error_setg_errno(errp, -ret, "Could not read backing file name");
goto fail;
}
bs->backing_file[len] = '\0';
}
ret = qcow2_read_snapshots(bs);
if (ret < 0) {
error_setg_errno(errp, -ret, "Could not read snapshots");
goto fail;
}
if (!bs->read_only && s->autoclear_features != 0) {
s->autoclear_features = 0;
ret = qcow2_update_header(bs);
if (ret < 0) {
error_setg_errno(errp, -ret, "Could not update qcow2 header");
goto fail;
}
}
qemu_co_mutex_init(&s->lock);
if (!(flags & BDRV_O_CHECK) && !bs->read_only &&
(s->incompatible_features & QCOW2_INCOMPAT_DIRTY)) {
BdrvCheckResult result = {0};
ret = qcow2_check(bs, &result, BDRV_FIX_ERRORS);
if (ret < 0) {
error_setg_errno(errp, -ret, "Could not repair dirty image");
goto fail;
}
}
opts = qemu_opts_create(&qcow2_runtime_opts, NULL, 0, &error_abort);
qemu_opts_absorb_qdict(opts, options, &local_err);
if (local_err) {
error_propagate(errp, local_err);
ret = -EINVAL;
goto fail;
}
s->use_lazy_refcounts = qemu_opt_get_bool(opts, QCOW2_OPT_LAZY_REFCOUNTS,
(s->compatible_features & QCOW2_COMPAT_LAZY_REFCOUNTS));
s->discard_passthrough[QCOW2_DISCARD_NEVER] = false;
s->discard_passthrough[QCOW2_DISCARD_ALWAYS] = true;
s->discard_passthrough[QCOW2_DISCARD_REQUEST] =
qemu_opt_get_bool(opts, QCOW2_OPT_DISCARD_REQUEST,
flags & BDRV_O_UNMAP);
s->discard_passthrough[QCOW2_DISCARD_SNAPSHOT] =
qemu_opt_get_bool(opts, QCOW2_OPT_DISCARD_SNAPSHOT, true);
s->discard_passthrough[QCOW2_DISCARD_OTHER] =
qemu_opt_get_bool(opts, QCOW2_OPT_DISCARD_OTHER, false);
opt_overlap_check = qemu_opt_get(opts, "overlap-check") ?: "cached";
if (!strcmp(opt_overlap_check, "none")) {
overlap_check_template = 0;
} else if (!strcmp(opt_overlap_check, "constant")) {
overlap_check_template = QCOW2_OL_CONSTANT;
} else if (!strcmp(opt_overlap_check, "cached")) {
overlap_check_template = QCOW2_OL_CACHED;
} else if (!strcmp(opt_overlap_check, "all")) {
overlap_check_template = QCOW2_OL_ALL;
} else {
error_setg(errp, "Unsupported value '%s' for qcow2 option "
"'overlap-check'. Allowed are either of the following: "
"none, constant, cached, all", opt_overlap_check);
qemu_opts_del(opts);
ret = -EINVAL;
goto fail;
}
s->overlap_check = 0;
for (i = 0; i < QCOW2_OL_MAX_BITNR; i++) {
s->overlap_check |=
qemu_opt_get_bool(opts, overlap_bool_option_names[i],
overlap_check_template & (1 << i)) << i;
}
qemu_opts_del(opts);
if (s->use_lazy_refcounts && s->qcow_version < 3) {
error_setg(errp, "Lazy refcounts require a qcow2 image with at least "
"qemu 1.1 compatibility level");
ret = -EINVAL;
goto fail;
}
#ifdef DEBUG_ALLOC
{
BdrvCheckResult result = {0};
qcow2_check_refcounts(bs, &result, 0);
}
#endif
return ret;
fail:
g_free(s->unknown_header_fields);
cleanup_unknown_header_ext(bs);
qcow2_free_snapshots(bs);
qcow2_refcount_close(bs);
g_free(s->l1_table);
s->l1_table = NULL;
if (s->l2_table_cache) {
qcow2_cache_destroy(bs, s->l2_table_cache);
}
g_free(s->cluster_cache);
qemu_vfree(s->cluster_data);
return ret;
}
| 1threat
|
Locale.current reporting wrong language on device : <p>I'm trying to format currency values in an iOS app, and I'm using the current Locale settings on the device to use the appropriate currency formatting.</p>
<p>In the simulator, everything seems to run fine: when using <code>currencyFormatter.locale = Locale.current</code>, it takes the right locale settings and prints numbers with the right currency format.</p>
<p>On my iPhone however, which is configured in French with French regional settings, I would expect another format to be used (e.g.: <strong>1 234,56 €</strong>). But it does not work, and seems to use an English formatting style (e.g.: <strong>€1 234,56</strong>).</p>
<p>In fact, if I print the current Locale from my app on the device, it does not return <code>fr_FR</code> as I would expect:</p>
<pre><code>NSLog(Locale.current.identifier)
>>> en_FR
</code></pre>
<p>The region is good but the language is not, though iOS on that device is clearly in French.</p>
<p>Has anyone an idea about this?</p>
<p>Thanks!</p>
| 0debug
|
static always_inline void gen_bcond (DisasContext *ctx,
TCGCond cond,
int ra, int32_t disp16, int mask)
{
int l1, l2;
l1 = gen_new_label();
l2 = gen_new_label();
if (likely(ra != 31)) {
if (mask) {
TCGv tmp = tcg_temp_new(TCG_TYPE_I64);
tcg_gen_andi_i64(tmp, cpu_ir[ra], 1);
tcg_gen_brcondi_i64(cond, tmp, 0, l1);
tcg_temp_free(tmp);
} else
tcg_gen_brcondi_i64(cond, cpu_ir[ra], 0, l1);
} else {
TCGv tmp = tcg_const_i64(0);
tcg_gen_brcondi_i64(cond, tmp, 0, l1);
tcg_temp_free(tmp);
}
tcg_gen_movi_i64(cpu_pc, ctx->pc);
tcg_gen_br(l2);
gen_set_label(l1);
tcg_gen_movi_i64(cpu_pc, ctx->pc + (int64_t)(disp16 << 2));
gen_set_label(l2);
}
| 1threat
|
How do I require the user to always enter his passphrase/use fingerprint when logging into my Android app? : <p>Given that my app contains very sensitive information, I want to require the user to enter his passphrase whenever he enters the app. </p>
<p>Is there a straightforward way to implement this?</p>
| 0debug
|
gen_intermediate_code_internal(CPUState * env, TranslationBlock * tb,
int search_pc)
{
DisasContext ctx;
target_ulong pc_start;
static uint16_t *gen_opc_end;
CPUBreakpoint *bp;
int i, ii;
int num_insns;
int max_insns;
pc_start = tb->pc;
gen_opc_end = gen_opc_buf + OPC_MAX_SIZE;
ctx.pc = pc_start;
ctx.flags = (uint32_t)tb->flags;
ctx.bstate = BS_NONE;
ctx.sr = env->sr;
ctx.fpscr = env->fpscr;
ctx.memidx = (env->sr & SR_MD) ? 1 : 0;
ctx.delayed_pc = -1;
ctx.tb = tb;
ctx.singlestep_enabled = env->singlestep_enabled;
ctx.features = env->features;
ctx.has_movcal = (tb->flags & TB_FLAG_PENDING_MOVCA);
#ifdef DEBUG_DISAS
qemu_log_mask(CPU_LOG_TB_CPU,
"------------------------------------------------\n");
log_cpu_state_mask(CPU_LOG_TB_CPU, env, 0);
#endif
ii = -1;
num_insns = 0;
max_insns = tb->cflags & CF_COUNT_MASK;
if (max_insns == 0)
max_insns = CF_COUNT_MASK;
gen_icount_start();
while (ctx.bstate == BS_NONE && gen_opc_ptr < gen_opc_end) {
if (unlikely(!TAILQ_EMPTY(&env->breakpoints))) {
TAILQ_FOREACH(bp, &env->breakpoints, entry) {
if (ctx.pc == bp->pc) {
tcg_gen_movi_i32(cpu_pc, ctx.pc);
gen_helper_debug();
ctx.bstate = BS_EXCP;
break;
}
}
}
if (search_pc) {
i = gen_opc_ptr - gen_opc_buf;
if (ii < i) {
ii++;
while (ii < i)
gen_opc_instr_start[ii++] = 0;
}
gen_opc_pc[ii] = ctx.pc;
gen_opc_hflags[ii] = ctx.flags;
gen_opc_instr_start[ii] = 1;
gen_opc_icount[ii] = num_insns;
}
if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO))
gen_io_start();
#if 0
fprintf(stderr, "Loading opcode at address 0x%08x\n", ctx.pc);
fflush(stderr);
#endif
ctx.opcode = lduw_code(ctx.pc);
decode_opc(&ctx);
num_insns++;
ctx.pc += 2;
if ((ctx.pc & (TARGET_PAGE_SIZE - 1)) == 0)
break;
if (env->singlestep_enabled)
break;
if (num_insns >= max_insns)
break;
if (singlestep)
break;
}
if (tb->cflags & CF_LAST_IO)
gen_io_end();
if (env->singlestep_enabled) {
tcg_gen_movi_i32(cpu_pc, ctx.pc);
gen_helper_debug();
} else {
switch (ctx.bstate) {
case BS_STOP:
case BS_NONE:
if (ctx.flags) {
gen_store_flags(ctx.flags | DELAY_SLOT_CLEARME);
}
gen_goto_tb(&ctx, 0, ctx.pc);
break;
case BS_EXCP:
tcg_gen_exit_tb(0);
break;
case BS_BRANCH:
default:
break;
}
}
gen_icount_end(tb, num_insns);
*gen_opc_ptr = INDEX_op_end;
if (search_pc) {
i = gen_opc_ptr - gen_opc_buf;
ii++;
while (ii <= i)
gen_opc_instr_start[ii++] = 0;
} else {
tb->size = ctx.pc - pc_start;
tb->icount = num_insns;
}
#ifdef DEBUG_DISAS
#ifdef SH4_DEBUG_DISAS
qemu_log_mask(CPU_LOG_TB_IN_ASM, "\n");
#endif
if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
qemu_log("IN:\n");
log_target_disas(pc_start, ctx.pc - pc_start, 0);
qemu_log("\n");
}
#endif
}
| 1threat
|
NSURl gives nil while Arabic words are passed through url : //even i use string by adding percent...but it return something like %2a etc and saves (??) in sql database instead of arabic words...
//code
NSString* source=[NSString stringWithFormat:@"http://192.168.227.1/student/Service1.svc/insertTasbeeh/سُبْحَانَ اللّهِ/1000"];
NSURL *url=[NSURL URLWithString:source ];
| 0debug
|
static int protocol_client_vencrypt_auth(VncState *vs, uint8_t *data, size_t len)
{
int auth = read_u32(data, 0);
if (auth != vs->vd->subauth) {
VNC_DEBUG("Rejecting auth %d\n", auth);
vnc_write_u8(vs, 0);
vnc_flush(vs);
vnc_client_error(vs);
} else {
VNC_DEBUG("Accepting auth %d, setting up TLS for handshake\n", auth);
vnc_write_u8(vs, 1);
vnc_flush(vs);
if (vnc_tls_client_setup(vs, NEED_X509_AUTH(vs)) < 0) {
VNC_DEBUG("Failed to setup TLS\n");
return 0;
}
VNC_DEBUG("Start TLS VeNCrypt handshake process\n");
if (vnc_start_vencrypt_handshake(vs) < 0) {
VNC_DEBUG("Failed to start TLS handshake\n");
return 0;
}
}
return 0;
}
| 1threat
|
void vnc_client_read(void *opaque)
{
VncState *vs = opaque;
long ret;
#ifdef CONFIG_VNC_SASL
if (vs->sasl.conn && vs->sasl.runSSF)
ret = vnc_client_read_sasl(vs);
else
#endif
#ifdef CONFIG_VNC_WS
if (vs->encode_ws) {
ret = vnc_client_read_ws(vs);
if (ret == -1) {
vnc_disconnect_start(vs);
return;
} else if (ret == -2) {
vnc_client_error(vs);
return;
}
} else
#endif
{
ret = vnc_client_read_plain(vs);
}
if (!ret) {
if (vs->csock == -1)
vnc_disconnect_finish(vs);
return;
}
while (vs->read_handler && vs->input.offset >= vs->read_handler_expect) {
size_t len = vs->read_handler_expect;
int ret;
ret = vs->read_handler(vs, vs->input.buffer, len);
if (vs->csock == -1) {
vnc_disconnect_finish(vs);
return;
}
if (!ret) {
buffer_advance(&vs->input, len);
} else {
vs->read_handler_expect = ret;
}
}
}
| 1threat
|
static int bdrv_rw_co(BlockDriverState *bs, int64_t sector_num, uint8_t *buf,
int nb_sectors, bool is_write, BdrvRequestFlags flags)
{
QEMUIOVector qiov;
struct iovec iov = {
.iov_base = (void *)buf,
.iov_len = nb_sectors * BDRV_SECTOR_SIZE,
};
qemu_iovec_init_external(&qiov, &iov, 1);
return bdrv_prwv_co(bs, sector_num << BDRV_SECTOR_BITS,
&qiov, is_write, flags);
| 1threat
|
Understanding Arraylist is not Thread safe through a java example : I am trying to understand how Arraylist is not thread safe through a java program.Attached is my program.
import java.util.ArrayList;
import java.util.List;
public class class1
{
static List ar=new ArrayList(1);
public static void main(String[] args) throws InstantiationException,
IllegalAccessException, ClassNotFoundException, InterruptedException
{
Thread t1= new Thread()
{
public void run() {
while(true)
{
ar.add(new Object());
}
}
};
Thread t2=new Thread()
{
public void run()
{
while(true)
{
ar=new ArrayList(1);
ar.add(new Object());
ar.add(new Object());
}
}
};
t1.start();
Thread.sleep(100);
t2.start();
}
}
The error i got is:
Exception in thread "Thread-0" java.lang.ArrayIndexOutOfBoundsException: 2
at java.util.ArrayList.add(Unknown Source)
at class1$1.run(class1.java:22)
I understand that the exception is caused by a thread.However,I am not getting a broader picture on how it is actually functioning.Any help would be highly appreciated.
| 0debug
|
static int h264_mp4toannexb_filter(AVBitStreamFilterContext *bsfc,
AVCodecContext *avctx, const char *args,
uint8_t **poutbuf, int *poutbuf_size,
const uint8_t *buf, int buf_size,
int keyframe)
{
H264BSFContext *ctx = bsfc->priv_data;
int i;
uint8_t unit_type;
int32_t nal_size;
uint32_t cumul_size = 0;
const uint8_t *buf_end = buf + buf_size;
int ret = 0;
if (!avctx->extradata || avctx->extradata_size < 6) {
*poutbuf = (uint8_t *)buf;
*poutbuf_size = buf_size;
return 0;
}
if (!ctx->extradata_parsed) {
if (args && strstr(args, "private_spspps_buf"))
ctx->private_spspps = 1;
ret = h264_extradata_to_annexb(ctx, avctx, AV_INPUT_BUFFER_PADDING_SIZE);
if (ret < 0)
return ret;
ctx->length_size = ret;
ctx->new_idr = 1;
ctx->idr_sps_seen = 0;
ctx->idr_pps_seen = 0;
ctx->extradata_parsed = 1;
}
*poutbuf_size = 0;
*poutbuf = NULL;
do {
ret= AVERROR(EINVAL);
if (buf + ctx->length_size > buf_end)
goto fail;
for (nal_size = 0, i = 0; i<ctx->length_size; i++)
nal_size = (nal_size << 8) | buf[i];
buf += ctx->length_size;
unit_type = *buf & 0x1f;
if (buf + nal_size > buf_end || nal_size < 0)
goto fail;
if (unit_type == 7)
ctx->idr_sps_seen = ctx->new_idr = 1;
else if (unit_type == 8) {
ctx->idr_pps_seen = ctx->new_idr = 1;
if (!ctx->idr_sps_seen) {
if (ctx->sps_offset == -1)
av_log(avctx, AV_LOG_WARNING, "SPS not present in the stream, nor in AVCC, stream may be unreadable\n");
else {
if ((ret = alloc_and_copy(poutbuf, poutbuf_size,
ctx->spspps_buf + ctx->sps_offset,
ctx->pps_offset != -1 ? ctx->pps_offset : ctx->spspps_size - ctx->sps_offset,
buf, nal_size)) < 0)
goto fail;
ctx->idr_sps_seen = 1;
goto next_nal;
}
}
}
if (!ctx->new_idr && unit_type == 5 && (buf[1] & 0x80))
ctx->new_idr = 1;
if (ctx->new_idr && unit_type == 5 && !ctx->idr_sps_seen && !ctx->idr_pps_seen) {
if ((ret=alloc_and_copy(poutbuf, poutbuf_size,
ctx->spspps_buf, ctx->spspps_size,
buf, nal_size)) < 0)
goto fail;
ctx->new_idr = 0;
} else if (ctx->new_idr && unit_type == 5 && ctx->idr_sps_seen && !ctx->idr_pps_seen) {
if (ctx->pps_offset == -1) {
av_log(avctx, AV_LOG_WARNING, "PPS not present in the stream, nor in AVCC, stream may be unreadable\n");
if ((ret = alloc_and_copy(poutbuf, poutbuf_size,
NULL, 0, buf, nal_size)) < 0)
goto fail;
} else if ((ret = alloc_and_copy(poutbuf, poutbuf_size,
ctx->spspps_buf + ctx->pps_offset, ctx->spspps_size - ctx->pps_offset,
buf, nal_size)) < 0)
goto fail;
} else {
if ((ret=alloc_and_copy(poutbuf, poutbuf_size,
NULL, 0, buf, nal_size)) < 0)
goto fail;
if (!ctx->new_idr && unit_type == 1) {
ctx->new_idr = 1;
ctx->idr_sps_seen = 0;
ctx->idr_pps_seen = 0;
}
}
next_nal:
buf += nal_size;
cumul_size += nal_size + ctx->length_size;
} while (cumul_size < buf_size);
return 1;
fail:
av_freep(poutbuf);
*poutbuf_size = 0;
return ret;
}
| 1threat
|
Keeping track of changed properties in JPA : <p>Currently, I'm working on a Java EE project with some non-trivial requirements regarding persistence management. Changes to entities by users first need to be applied to some working copy before being validated, after which they are applied to the "live data". Any changes on that live data also need to have some record of them, to allow auditing.</p>
<p>The entities are managed via JPA, and Hibernate will be used as provider. That is a given, so we don't shy away from Hibernate-specific stuff. For the first requirement, two persistence units are used. One maps the entities to the "live data" tables, the other to the "working copy" tables. For the second requirement, we're going to use Hibernate Envers, a good fit for our use-case.</p>
<p>So far so good. Now, when users view the data on the (web-based) front-end, it would be very useful to be able to indicate which fields were changed in the working copy compared to the live data. A different colour would suffice. For this, we need some way of knowing which properties were altered. My question is, what would be a good way to go about this?</p>
<p>Using the JavaBeans API, a <code>PropertyChangeListener</code> could suffice to be notified of any changes in an entity of the working copy and keep a set of them. But the set would also need to be persisted, since the application could be restarted and changes can be long-lived before they're validated and applied to the live data. And applying the changes on the live data to obtain the working copy every time it is needed isn't feasible (hence the two persistence units).
We could also compare the working copy to the live data and find fields that are different. Some introspection and reflection code would suffice, but again that seems rather processing-intensive, not to mention the live data would need to be fetched.
Maybe I'm missing something simple, or someone know of a wonderful JPA/Hibernate feature I can use. Even if I can't avoid making (a) separate database table(s) for storing such information until it is applied to the live data, some best-practices or real-life experience with this scenario could be very useful.</p>
<p>I realize it's a semi-open question but surely other people must have encountered a requirement like this. Any good suggestion is appreciated, and any pointer to a ready-made solution would be a good candidate as accepted answer.</p>
| 0debug
|
PHP IF-Statement doesnt work like i want : <p>I recently started writing a website for learning html/css/js/php.</p>
<p>I designed the front-end of my site with bootstrap.
Now I am trying to validate some inputs with PHP.</p>
<p>I tried this:</p>
<pre><code> if ($durationHH <= 0 && $durationMM <= 0)
{
echo "DurationHH and DurationMM can not be both zero at the same time.";
echo "<br>";
echo "DurationHH and DurationMM can not be smaller than 0.";
}
elseif (empty($durationHH) || empty($durationMM))
{
echo "DurationHH and DurationMM can not be empty.";
echo "<br>";
}
else
{
echo $_POST["durationMM"];
echo ":";
echo $_POST["durationHH"];
}
</code></pre>
<p>I tested the validation by putting in some values for durationHH and durationMM.</p>
<p>Everything is working fine so far, except these two cases:</p>
<p>durationMM = 0 AND durationHH = any value</p>
<p>&&</p>
<p>durationHH= 0 AND durationMM = any value.</p>
<p>In these two cases i get the output: "DurationHH and DurationMM can not be empty."</p>
<p>How/why does this happen?</p>
| 0debug
|
static int alloc_cluster_link_l2(BlockDriverState *bs, uint64_t cluster_offset,
QCowL2Meta *m)
{
BDRVQcowState *s = bs->opaque;
int i, j = 0, l2_index, ret;
uint64_t *old_cluster, start_sect, l2_offset, *l2_table;
if (m->nb_clusters == 0)
return 0;
old_cluster = qemu_malloc(m->nb_clusters * sizeof(uint64_t));
start_sect = (m->offset & ~(s->cluster_size - 1)) >> 9;
if (m->n_start) {
ret = copy_sectors(bs, start_sect, cluster_offset, 0, m->n_start);
if (ret < 0)
goto err;
}
if (m->nb_available & (s->cluster_sectors - 1)) {
uint64_t end = m->nb_available & ~(uint64_t)(s->cluster_sectors - 1);
ret = copy_sectors(bs, start_sect + end, cluster_offset + (end << 9),
m->nb_available - end, s->cluster_sectors);
if (ret < 0)
goto err;
}
ret = -EIO;
if (!get_cluster_table(bs, m->offset, &l2_table, &l2_offset, &l2_index))
goto err;
for (i = 0; i < m->nb_clusters; i++) {
if(l2_table[l2_index + i] != 0)
old_cluster[j++] = l2_table[l2_index + i];
l2_table[l2_index + i] = cpu_to_be64((cluster_offset +
(i << s->cluster_bits)) | QCOW_OFLAG_COPIED);
}
if (bdrv_pwrite(s->hd, l2_offset + l2_index * sizeof(uint64_t),
l2_table + l2_index, m->nb_clusters * sizeof(uint64_t)) !=
m->nb_clusters * sizeof(uint64_t))
goto err;
for (i = 0; i < j; i++)
free_any_clusters(bs, old_cluster[i], 1);
ret = 0;
err:
qemu_free(old_cluster);
return ret;
}
| 1threat
|
Login with different role : I am new to php. I could not log in either from the user or the admin. How do make it so the user could log in and will be redirected to index.php, and once the admin login will be redirected to admin.php.
I did some research with youtube and could not find anything helpful on what I need.
---html
<form action="login.php" method="post">
<input class="space" name="username" type="text"
placeholder="Username/E-
mail" required="required"></input>
<br />
<input class="space" name="password" type="password"
placeholder="Password" required="required"></input>
<br />
<input type="submit" class="log-btn" value="Login" />
<button type="button" class="log-btn" onclick="return
abc(1,'reg.html')">Register</button>
</form>
[this is the database table][1]
I had also included the admin username and password in the database so admin does not have to register
---php
<?php
include ("conn.php");
session_start();
$sql="SELECT * FROM user WHERE email = '".$_REQUEST['username']."' and
password = '".$_REQUEST['password']."' or username =
'".$_REQUEST['username']."' and password = '".$_REQUEST['password']."'
LIMIT 1";
$result=mysqli_query($con,$sql);
if(mysqli_num_rows($result) <= 0)
{
$cred = mysqli_fetch_row($result);
$_SESSION['user'] = $cred[1];
echo "<script>window.location.href='index.php';</script>";
}
else
echo "<script>window.location.href='index.php?msg=Invalid+Credential';
</script>";
if($row=mysqli_fetch_array($result))
{
$_SESSION['role']=$row['user_role'];
}
if($row['user_role']==="1"])
{
echo "<script>alert('Welcome back admin!');";
echo "window.location.href='admin.html';</script>";
}
?>
I expect that the user will be able to login and will be redirected to the index.php and the admin will be able to login as well as but will be redirected to the admin.pbp. But what I am seeing a white page and some error code on line 20. I know my if-else statement has some issue but not sure on how to fix it to be working
[1]: https://i.stack.imgur.com/owVvk.png
| 0debug
|
C#: Creating a boolean value for an if-else statement : bool InUse = true;
while (InUse)
Console.WriteLine("Welcome to the 2017 Wimbledon tournament! \n");
Console.WriteLine("Press 1 for a Default tournament");
Console.WriteLine("Press 2 for Women's single:");
Console.WriteLine("Press 3 for Men's single:");
Console.WriteLine("Press 4 for Women's double:");
Console.WriteLine("Press 5 for Men's double:");
Console.WriteLine("Press 6 for Mix double:");
Console.Write("Make a choice:");
int userValue = Convert.ToInt32(Console.ReadLine());
if (userValue == 1 || userValue == 2 || userValue == 3 || userValue == 4 || userValue == 5 || userValue == 6)
{
I can't seem to write a statement that returns a false and true value if anything is pressed correct or wrong.. How would you guys do this? I was going to use a return true; but i cant seem to implement this.. Any ideas?
| 0debug
|
Create new DB user for each new device : <p>I got Android app where most information is stored in outside MySQL Data base. As I understand, same user can't connect with same credentials at same time. So I figured if my app would be used by few users at same time that would raise errors. So for that reason I need somehow to create new DB user for each device which installs app, how could I do that ? </p>
| 0debug
|
static void bios_supports_mode(const char *pmutils_bin, const char *pmutils_arg,
const char *sysfile_str, Error **err)
{
pid_t pid;
ssize_t ret;
char *pmutils_path;
int status, pipefds[2];
if (pipe(pipefds) < 0) {
error_set(err, QERR_UNDEFINED_ERROR);
return;
}
pmutils_path = g_find_program_in_path(pmutils_bin);
pid = fork();
if (!pid) {
struct sigaction act;
memset(&act, 0, sizeof(act));
act.sa_handler = SIG_DFL;
sigaction(SIGCHLD, &act, NULL);
setsid();
close(pipefds[0]);
reopen_fd_to_null(0);
reopen_fd_to_null(1);
reopen_fd_to_null(2);
pid = fork();
if (!pid) {
int fd;
char buf[32];
if (pmutils_path) {
execle(pmutils_path, pmutils_bin, pmutils_arg, NULL, environ);
}
if (!sysfile_str) {
_exit(SUSPEND_NOT_SUPPORTED);
}
fd = open(LINUX_SYS_STATE_FILE, O_RDONLY);
if (fd < 0) {
_exit(SUSPEND_NOT_SUPPORTED);
}
ret = read(fd, buf, sizeof(buf)-1);
if (ret <= 0) {
_exit(SUSPEND_NOT_SUPPORTED);
}
buf[ret] = '\0';
if (strstr(buf, sysfile_str)) {
_exit(SUSPEND_SUPPORTED);
}
_exit(SUSPEND_NOT_SUPPORTED);
}
if (pid > 0) {
wait(&status);
} else {
status = SUSPEND_NOT_SUPPORTED;
}
ret = write(pipefds[1], &status, sizeof(status));
if (ret != sizeof(status)) {
_exit(EXIT_FAILURE);
}
_exit(EXIT_SUCCESS);
}
close(pipefds[1]);
g_free(pmutils_path);
if (pid < 0) {
error_set(err, QERR_UNDEFINED_ERROR);
goto out;
}
ret = read(pipefds[0], &status, sizeof(status));
if (ret == sizeof(status) && WIFEXITED(status) &&
WEXITSTATUS(status) == SUSPEND_SUPPORTED) {
goto out;
}
error_set(err, QERR_UNSUPPORTED);
out:
close(pipefds[0]);
}
| 1threat
|
static void virtio_ioport_write(void *opaque, uint32_t addr, uint32_t val)
{
VirtIOPCIProxy *proxy = opaque;
VirtIODevice *vdev = proxy->vdev;
target_phys_addr_t pa;
switch (addr) {
case VIRTIO_PCI_GUEST_FEATURES:
if (val & (1 << VIRTIO_F_BAD_FEATURE)) {
if (vdev->bad_features)
val = proxy->host_features & vdev->bad_features(vdev);
else
val = 0;
}
if (vdev->set_features)
vdev->set_features(vdev, val);
vdev->guest_features = val;
break;
case VIRTIO_PCI_QUEUE_PFN:
pa = (target_phys_addr_t)val << VIRTIO_PCI_QUEUE_ADDR_SHIFT;
if (pa == 0) {
virtio_pci_stop_ioeventfd(proxy);
virtio_reset(proxy->vdev);
msix_unuse_all_vectors(&proxy->pci_dev);
}
else
virtio_queue_set_addr(vdev, vdev->queue_sel, pa);
break;
case VIRTIO_PCI_QUEUE_SEL:
if (val < VIRTIO_PCI_QUEUE_MAX)
vdev->queue_sel = val;
break;
case VIRTIO_PCI_QUEUE_NOTIFY:
virtio_queue_notify(vdev, val);
break;
case VIRTIO_PCI_STATUS:
if (!(val & VIRTIO_CONFIG_S_DRIVER_OK)) {
virtio_pci_stop_ioeventfd(proxy);
}
virtio_set_status(vdev, val & 0xFF);
if (val & VIRTIO_CONFIG_S_DRIVER_OK) {
virtio_pci_start_ioeventfd(proxy);
}
if (vdev->status == 0) {
virtio_reset(proxy->vdev);
msix_unuse_all_vectors(&proxy->pci_dev);
}
if ((val & VIRTIO_CONFIG_S_DRIVER_OK) &&
!(proxy->pci_dev.config[PCI_COMMAND] & PCI_COMMAND_MASTER)) {
proxy->flags |= VIRTIO_PCI_FLAG_BUS_MASTER_BUG;
}
break;
case VIRTIO_MSI_CONFIG_VECTOR:
msix_vector_unuse(&proxy->pci_dev, vdev->config_vector);
if (msix_vector_use(&proxy->pci_dev, val) < 0)
val = VIRTIO_NO_VECTOR;
vdev->config_vector = val;
break;
case VIRTIO_MSI_QUEUE_VECTOR:
msix_vector_unuse(&proxy->pci_dev,
virtio_queue_vector(vdev, vdev->queue_sel));
if (msix_vector_use(&proxy->pci_dev, val) < 0)
val = VIRTIO_NO_VECTOR;
virtio_queue_set_vector(vdev, vdev->queue_sel, val);
break;
default:
error_report("%s: unexpected address 0x%x value 0x%x",
__func__, addr, val);
break;
}
}
| 1threat
|
Can someone explain why we use random_state when we split the data into training and testing? : <p>I've just started building models in Machine Learning and I was wondering why do we have t0 create a random_state variable when we split the data.</p>
| 0debug
|
static void cpu_class_init(ObjectClass *oc, void *data)
{
SCLPEventClass *k = SCLP_EVENT_CLASS(oc);
DeviceClass *dc = DEVICE_CLASS(oc);
k->get_send_mask = send_mask;
k->get_receive_mask = receive_mask;
k->read_event_data = read_event_data;
set_bit(DEVICE_CATEGORY_MISC, dc->categories);
}
| 1threat
|
What is “object of type ‘closure’ is not subsettable” error in Shiny? : <p>I have a shiny app and when I run it I get an error saying that an <code>object of type ‘closure’ is not subsettable</code>. What is that and how can I fix it?</p>
<p><strong>Note</strong>: I wrote this question as this comes up a lot, and the possible dupes are either not <code>shiny</code> related or so specific that it is not obvious that the answers are broadly applicable.</p>
| 0debug
|
void MPV_decode_mb_internal(MpegEncContext *s, DCTELEM block[12][64],
int lowres_flag, int is_mpeg12)
{
int mb_x, mb_y;
const int mb_xy = s->mb_y * s->mb_stride + s->mb_x;
#if CONFIG_MPEG_XVMC_DECODER
if(s->avctx->xvmc_acceleration){
ff_xvmc_decode_mb(s);
return;
}
#endif
mb_x = s->mb_x;
mb_y = s->mb_y;
if(s->avctx->debug&FF_DEBUG_DCT_COEFF) {
int i,j;
DCTELEM *dct = &s->current_picture.dct_coeff[mb_xy*64*6];
for(i=0; i<6; i++)
for(j=0; j<64; j++)
*dct++ = block[i][s->dsp.idct_permutation[j]];
}
s->current_picture.qscale_table[mb_xy]= s->qscale;
if (!s->mb_intra) {
if (!is_mpeg12 && (s->h263_pred || s->h263_aic)) {
if(s->mbintra_table[mb_xy])
ff_clean_intra_table_entries(s);
} else {
s->last_dc[0] =
s->last_dc[1] =
s->last_dc[2] = 128 << s->intra_dc_precision;
}
}
else if (!is_mpeg12 && (s->h263_pred || s->h263_aic))
s->mbintra_table[mb_xy]=1;
if ((s->flags&CODEC_FLAG_PSNR) || !(s->encoding && (s->intra_only || s->pict_type==FF_B_TYPE) && s->avctx->mb_decision != FF_MB_DECISION_RD)) {
uint8_t *dest_y, *dest_cb, *dest_cr;
int dct_linesize, dct_offset;
op_pixels_func (*op_pix)[4];
qpel_mc_func (*op_qpix)[16];
const int linesize= s->current_picture.linesize[0];
const int uvlinesize= s->current_picture.linesize[1];
const int readable= s->pict_type != FF_B_TYPE || s->encoding || s->avctx->draw_horiz_band || lowres_flag;
const int block_size= lowres_flag ? 8>>s->avctx->lowres : 8;
if(!s->encoding){
uint8_t *mbskip_ptr = &s->mbskip_table[mb_xy];
const int age= s->current_picture.age;
assert(age);
if (s->mb_skipped) {
s->mb_skipped= 0;
assert(s->pict_type!=FF_I_TYPE);
(*mbskip_ptr) ++;
if(*mbskip_ptr >99) *mbskip_ptr= 99;
if (*mbskip_ptr >= age && s->current_picture.reference){
return;
}
} else if(!s->current_picture.reference){
(*mbskip_ptr) ++;
if(*mbskip_ptr >99) *mbskip_ptr= 99;
} else{
*mbskip_ptr = 0;
}
}
dct_linesize = linesize << s->interlaced_dct;
dct_offset =(s->interlaced_dct)? linesize : linesize*block_size;
if(readable){
dest_y= s->dest[0];
dest_cb= s->dest[1];
dest_cr= s->dest[2];
}else{
dest_y = s->b_scratchpad;
dest_cb= s->b_scratchpad+16*linesize;
dest_cr= s->b_scratchpad+32*linesize;
}
if (!s->mb_intra) {
if(!s->encoding){
if(lowres_flag){
h264_chroma_mc_func *op_pix = s->dsp.put_h264_chroma_pixels_tab;
if (s->mv_dir & MV_DIR_FORWARD) {
MPV_motion_lowres(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.data, op_pix);
op_pix = s->dsp.avg_h264_chroma_pixels_tab;
}
if (s->mv_dir & MV_DIR_BACKWARD) {
MPV_motion_lowres(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.data, op_pix);
}
}else{
op_qpix= s->me.qpel_put;
if ((!s->no_rounding) || s->pict_type==FF_B_TYPE){
op_pix = s->dsp.put_pixels_tab;
}else{
op_pix = s->dsp.put_no_rnd_pixels_tab;
}
if (s->mv_dir & MV_DIR_FORWARD) {
MPV_motion(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.data, op_pix, op_qpix);
op_pix = s->dsp.avg_pixels_tab;
op_qpix= s->me.qpel_avg;
}
if (s->mv_dir & MV_DIR_BACKWARD) {
MPV_motion(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.data, op_pix, op_qpix);
}
}
}
if(s->hurry_up>1) goto skip_idct;
if(s->avctx->skip_idct){
if( (s->avctx->skip_idct >= AVDISCARD_NONREF && s->pict_type == FF_B_TYPE)
||(s->avctx->skip_idct >= AVDISCARD_NONKEY && s->pict_type != FF_I_TYPE)
|| s->avctx->skip_idct >= AVDISCARD_ALL)
goto skip_idct;
}
if(s->encoding || !( s->h263_msmpeg4 || s->codec_id==CODEC_ID_MPEG1VIDEO || s->codec_id==CODEC_ID_MPEG2VIDEO
|| (s->codec_id==CODEC_ID_MPEG4 && !s->mpeg_quant))){
add_dequant_dct(s, block[0], 0, dest_y , dct_linesize, s->qscale);
add_dequant_dct(s, block[1], 1, dest_y + block_size, dct_linesize, s->qscale);
add_dequant_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize, s->qscale);
add_dequant_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale);
if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
if (s->chroma_y_shift){
add_dequant_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale);
add_dequant_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale);
}else{
dct_linesize >>= 1;
dct_offset >>=1;
add_dequant_dct(s, block[4], 4, dest_cb, dct_linesize, s->chroma_qscale);
add_dequant_dct(s, block[5], 5, dest_cr, dct_linesize, s->chroma_qscale);
add_dequant_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale);
add_dequant_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale);
}
}
} else if(is_mpeg12 || (s->codec_id != CODEC_ID_WMV2)){
add_dct(s, block[0], 0, dest_y , dct_linesize);
add_dct(s, block[1], 1, dest_y + block_size, dct_linesize);
add_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize);
add_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize);
if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
if(s->chroma_y_shift){
add_dct(s, block[4], 4, dest_cb, uvlinesize);
add_dct(s, block[5], 5, dest_cr, uvlinesize);
}else{
dct_linesize = uvlinesize << s->interlaced_dct;
dct_offset =(s->interlaced_dct)? uvlinesize : uvlinesize*8;
add_dct(s, block[4], 4, dest_cb, dct_linesize);
add_dct(s, block[5], 5, dest_cr, dct_linesize);
add_dct(s, block[6], 6, dest_cb+dct_offset, dct_linesize);
add_dct(s, block[7], 7, dest_cr+dct_offset, dct_linesize);
if(!s->chroma_x_shift){
add_dct(s, block[8], 8, dest_cb+8, dct_linesize);
add_dct(s, block[9], 9, dest_cr+8, dct_linesize);
add_dct(s, block[10], 10, dest_cb+8+dct_offset, dct_linesize);
add_dct(s, block[11], 11, dest_cr+8+dct_offset, dct_linesize);
}
}
}
}
else if (CONFIG_WMV2) {
ff_wmv2_add_mb(s, block, dest_y, dest_cb, dest_cr);
}
} else {
if(s->encoding || !(s->codec_id==CODEC_ID_MPEG1VIDEO || s->codec_id==CODEC_ID_MPEG2VIDEO)){
put_dct(s, block[0], 0, dest_y , dct_linesize, s->qscale);
put_dct(s, block[1], 1, dest_y + block_size, dct_linesize, s->qscale);
put_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize, s->qscale);
put_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale);
if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
if(s->chroma_y_shift){
put_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale);
put_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale);
}else{
dct_offset >>=1;
dct_linesize >>=1;
put_dct(s, block[4], 4, dest_cb, dct_linesize, s->chroma_qscale);
put_dct(s, block[5], 5, dest_cr, dct_linesize, s->chroma_qscale);
put_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale);
put_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale);
}
}
}else{
s->dsp.idct_put(dest_y , dct_linesize, block[0]);
s->dsp.idct_put(dest_y + block_size, dct_linesize, block[1]);
s->dsp.idct_put(dest_y + dct_offset , dct_linesize, block[2]);
s->dsp.idct_put(dest_y + dct_offset + block_size, dct_linesize, block[3]);
if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
if(s->chroma_y_shift){
s->dsp.idct_put(dest_cb, uvlinesize, block[4]);
s->dsp.idct_put(dest_cr, uvlinesize, block[5]);
}else{
dct_linesize = uvlinesize << s->interlaced_dct;
dct_offset =(s->interlaced_dct)? uvlinesize : uvlinesize*8;
s->dsp.idct_put(dest_cb, dct_linesize, block[4]);
s->dsp.idct_put(dest_cr, dct_linesize, block[5]);
s->dsp.idct_put(dest_cb + dct_offset, dct_linesize, block[6]);
s->dsp.idct_put(dest_cr + dct_offset, dct_linesize, block[7]);
if(!s->chroma_x_shift){
s->dsp.idct_put(dest_cb + 8, dct_linesize, block[8]);
s->dsp.idct_put(dest_cr + 8, dct_linesize, block[9]);
s->dsp.idct_put(dest_cb + 8 + dct_offset, dct_linesize, block[10]);
s->dsp.idct_put(dest_cr + 8 + dct_offset, dct_linesize, block[11]);
}
}
}
}
}
skip_idct:
if(!readable){
s->dsp.put_pixels_tab[0][0](s->dest[0], dest_y , linesize,16);
s->dsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[1], dest_cb, uvlinesize,16 >> s->chroma_y_shift);
s->dsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[2], dest_cr, uvlinesize,16 >> s->chroma_y_shift);
}
}
}
| 1threat
|
static float quantize_band_cost_bits(struct AACEncContext *s, const float *in,
const float *scaled, int size, int scale_idx,
int cb, const float lambda, const float uplim,
int *bits, int rtz)
{
return get_band_numbits(s, NULL, in, scaled, size, scale_idx, cb, lambda, uplim, bits);
}
| 1threat
|
Efficient way to get the unique values from 2 or more columns in a Dataframe : <p>Given a matrix from an <code>SFrame</code>:</p>
<pre><code>>>> from sframe import SFrame
>>> sf =SFrame({'x':[1,1,2,5,7], 'y':[2,4,6,8,2], 'z':[2,5,8,6,2]})
>>> sf
Columns:
x int
y int
z int
Rows: 5
Data:
+---+---+---+
| x | y | z |
+---+---+---+
| 1 | 2 | 2 |
| 1 | 4 | 5 |
| 2 | 6 | 8 |
| 5 | 8 | 6 |
| 7 | 2 | 2 |
+---+---+---+
[5 rows x 3 columns]
</code></pre>
<p>I want to get the unique values for the <code>x</code> and <code>y</code> columns and I can do it as such:</p>
<pre><code>>>> sf['x'].unique().append(sf['y'].unique()).unique()
dtype: int
Rows: 7
[2, 8, 5, 4, 1, 7, 6]
</code></pre>
<p>This way I get the unique values of x and unique values of y then append them and get the unique values of the appended list.</p>
<p>I could also do it as such:</p>
<pre><code>>>> sf['x'].append(sf['y']).unique()
dtype: int
Rows: 7
[2, 8, 5, 4, 1, 7, 6]
</code></pre>
<p>But that way, if my x and y columns are huge with lots of duplicates, I would be appending it into a very huge container before getting the unique.</p>
<p><strong>Is there a more efficient way to get the unique values of a combined columns created from 2 or more columns in an SFrame?</strong></p>
<p><strong>What is the equivalence in pandas of the efficent way to get unique values from 2 or more columns in <code>pandas</code>?</strong></p>
| 0debug
|
C# - NullReferenceException when trying to give a value through constructor : <p>When I am trying to give value to a variable which is (should be) an object of another class DataContext through constructor it's always a null and I get a NullReferenceException. When I am trying to this by set accessor everything works fine.</p>
<pre><code>public class DataRepository
{
private DataContext data;
private AnyFiller currentFiller;
public AnyFiller CurrentFiller
{
get
{
return currentFiller;
}
set
{
this.currentFiller = value;
}
}
public DataContext Data { get; set; }
public DataRepository()
{
this.data = new DataContext();
}
public DataRepository(DataContext data)
{
this.data = data;
}
</code></pre>
<p>neither one of constructors works fine, but as I said before, it's working when I am doing something like this:</p>
<pre><code> DataContext cont = new DataContext();
DataRepository data = new DataRepository();
data.Data = cont;
</code></pre>
<p>Could anybody tell me what am I doing wrong?</p>
| 0debug
|
what is the difference between --force-rm and --rm when running docker build command : <p>When we build docker images using <code>docker build</code> command we have two options <code>--force-rm=true</code> and <code>--rm=true</code> to remove intermediate containers. what is the difference between these two options and in what scenarios should each be used.</p>
| 0debug
|
static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
target_ulong vaddr)
{
phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] |= CODE_DIRTY_FLAG;
}
| 1threat
|
All of my .json files have problems loading reference/schema from schemastore.azurewebsites.net : <p>I'm working in VS Code and started getting this error yesterday. All of my json files have this error, not just a few. If I remember right there was an update to the program yesterday or the day before. Did the update break something or reset a setting that I forgot I had set?</p>
<p>When looking this up, people are talking about downloading the schema and using it locally, but I would prefer not to have to do that and would instead like to find out why this broke?</p>
<p>I am using a proxy, but as far as I know that hasn't changed. Here's the exact error I'm getting.</p>
<p>Problems loading reference '<a href="https://schemastore.azurewebsites.net/schemas/json/package.json" rel="noreferrer">https://schemastore.azurewebsites.net/schemas/json/package.json</a>': Unable to load schema from '<a href="https://schemastore.azurewebsites.net/schemas/json/package.json" rel="noreferrer">https://schemastore.azurewebsites.net/schemas/json/package.json</a>': Unable to connect to <a href="https://schemastore.azurewebsites.net/schemas/json/package.json" rel="noreferrer">https://schemastore.azurewebsites.net/schemas/json/package.json</a>. Error: connect ECONNREFUSED 168.62.224.13:443</p>
| 0debug
|
int qemu_opts_do_parse(QemuOpts *opts, const char *params, const char *firstname)
{
Error *err = NULL;
opts_do_parse(opts, params, firstname, false, &err);
if (err) {
qerror_report_err(err);
error_free(err);
return -1;
}
return 0;
}
| 1threat
|
Swift : Why is it required for the key type in a Dictionary to be of type Hashable : <p>If I want to create a <code>Dictionary<Key:Value>()</code> it is required for the <code>Key</code> type object to the protocol <code>Hashable</code>. Why is that the case, how are Dictionaries implemented? </p>
<p>I mean I would understand that if the <code>Key</code> just needs to conform to the protocol of <code>Equatable</code> type, as the program will have to search for the related value, however, the extra <code>var hashValue: Int</code> that comes along with <code>Hashable</code> is a bit confusing</p>
| 0debug
|
WinAPI ReadFile returns corrupted data : <p>I'm writing a function in my Visual C++ project that reads contents of a file via WinAPI in 2000 byte increments and returns it as a std::string.</p>
<p>A problem occurs when the file is much larger than the buffer (for example 100 KB), I get garbage added at several locations in the file in the middle of valid data. This is a long <code>0xcccccccc...</code> sequence terminated by 3-4 other bytes, usually appearing in the middle of a word. The function doesn't fail otherwise and none of the valid data is missing.</p>
<p>I haven't checked the exact positions but it seems that this happens at buffer size increments (or a multiplier of buffer size increments). If I increase the size of the buffer to more than the size of the test files, the problem goes away. What causes this to happen? What am I doing wrong?</p>
<pre><code>std::string read_file(std::string filename) {
HANDLE hFile = CreateFile(filename.c_str(), GENERIC_READ, FILE_SHARE_READ, NULL, OPEN_EXISTING, NULL, NULL);
if (hFile == INVALID_HANDLE_VALUE)
{
std::string errortext("Error opening " + filename + ", bad handle value: " + to_string((int)hFile));
MessageBox(hwnd, errortext.c_str(), "Error", 0);
return "";
}
char buffer[2000] = "";
std::string entire_file = "";
DWORD dwBytesRead = 0;
while (ReadFile(hFile, buffer, sizeof(buffer), &dwBytesRead, NULL))
{
if (!dwBytesRead)
break;
entire_file += buffer;
}
CloseHandle(hFile);
return entire_file;
}
</code></pre>
| 0debug
|
static void lowpass16(WaveformContext *s, AVFrame *in, AVFrame *out,
int component, int intensity, int offset, int column)
{
const int plane = s->desc->comp[component].plane;
const int mirror = s->mirror;
const int is_chroma = (component == 1 || component == 2);
const int shift_w = (is_chroma ? s->desc->log2_chroma_w : 0);
const int shift_h = (is_chroma ? s->desc->log2_chroma_h : 0);
const int src_linesize = in->linesize[plane] / 2;
const int dst_linesize = out->linesize[plane] / 2;
const int dst_signed_linesize = dst_linesize * (mirror == 1 ? -1 : 1);
const int limit = s->size - 1;
const int max = limit - intensity;
const int src_h = FF_CEIL_RSHIFT(in->height, shift_h);
const int src_w = FF_CEIL_RSHIFT(in->width, shift_w);
const uint16_t *src_data = (const uint16_t *)in->data[plane];
uint16_t *dst_data = (uint16_t *)out->data[plane] + (column ? (offset >> shift_h) * dst_linesize : offset >> shift_w);
uint16_t * const dst_bottom_line = dst_data + dst_linesize * ((s->size >> shift_h) - 1);
uint16_t * const dst_line = (mirror ? dst_bottom_line : dst_data);
const uint16_t *p;
int y;
if (!column && mirror)
dst_data += s->size >> shift_w;
for (y = 0; y < src_h; y++) {
const uint16_t *src_data_end = src_data + src_w;
uint16_t *dst = dst_line;
for (p = src_data; p < src_data_end; p++) {
uint16_t *target;
int v = FFMIN(*p, limit);
if (column) {
target = dst++ + dst_signed_linesize * (v >> shift_h);
} else {
if (mirror)
target = dst_data - (v >> shift_w) - 1;
else
target = dst_data + (v >> shift_w);
}
update16(target, max, intensity, limit);
}
src_data += src_linesize;
dst_data += dst_linesize;
}
envelope16(s, out, plane, plane);
}
| 1threat
|
Macros for excel to remove certain columns : I have an master sheet, but out of this sheet i only need certain columns to be displayed. Doing it manually is taking a long time and this worksheet is something i have to do once in a fortnight. Can anyone please suggest a macro formula. Thank you.
| 0debug
|
Advice for Math Proof Language : <p>Trying to figure out if I wanted to run a number 17 digits long through 50 million divisions how long would it take on a decent i7 PC and what would you recommend for language? Also I would like to scale it up over time so need a language that can be flexible when I get to say 30ish digit long numbers. For now basically I begin with a 17 digit long number and as I go I only care about the a smaller number after each calculation so it will get smaller quick. I am only doing division and subtraction and not keeping any remainders. Thoughts?</p>
| 0debug
|
int ff_combine_frame(ParseContext *pc, int next, const uint8_t **buf, int *buf_size)
{
if(pc->overread){
av_dlog(NULL, "overread %d, state:%X next:%d index:%d o_index:%d\n",
pc->overread, pc->state, next, pc->index, pc->overread_index);
av_dlog(NULL, "%X %X %X %X\n", (*buf)[0], (*buf)[1], (*buf)[2], (*buf)[3]);
}
for(; pc->overread>0; pc->overread--){
pc->buffer[pc->index++]= pc->buffer[pc->overread_index++];
}
if(!*buf_size && next == END_NOT_FOUND){
next= 0;
}
pc->last_index= pc->index;
if(next == END_NOT_FOUND){
void* new_buffer = av_fast_realloc(pc->buffer, &pc->buffer_size, (*buf_size) + pc->index + FF_INPUT_BUFFER_PADDING_SIZE);
if(!new_buffer)
return AVERROR(ENOMEM);
pc->buffer = new_buffer;
memcpy(&pc->buffer[pc->index], *buf, *buf_size);
pc->index += *buf_size;
return -1;
}
*buf_size=
pc->overread_index= pc->index + next;
if(pc->index){
void* new_buffer = av_fast_realloc(pc->buffer, &pc->buffer_size, next + pc->index + FF_INPUT_BUFFER_PADDING_SIZE);
if(!new_buffer)
return AVERROR(ENOMEM);
pc->buffer = new_buffer;
memcpy(&pc->buffer[pc->index], *buf, next + FF_INPUT_BUFFER_PADDING_SIZE );
pc->index = 0;
*buf= pc->buffer;
}
for(;next < 0; next++){
pc->state = (pc->state<<8) | pc->buffer[pc->last_index + next];
pc->state64 = (pc->state64<<8) | pc->buffer[pc->last_index + next];
pc->overread++;
}
if(pc->overread){
av_dlog(NULL, "overread %d, state:%X next:%d index:%d o_index:%d\n",
pc->overread, pc->state, next, pc->index, pc->overread_index);
av_dlog(NULL, "%X %X %X %X\n", (*buf)[0], (*buf)[1],(*buf)[2],(*buf)[3]);
}
return 0;
}
| 1threat
|
installing kafka on command line : > java.io.FileNotFoundException: .\config\server.properties (The system cannot fin
> d the path specified)
> at java.io.FileInputStream.open0(Native Method)
> at java.io.FileInputStream.open(Unknown Source)
> at java.io.FileInputStream.<init>(Unknown Source)
> at java.io.FileInputStream.<init>(Unknown Source)
> at org.apache.kafka.common.utils.Utils.loadProps(Utils.java:444)enter
> code here
> at kafka.Kafka$.getPropsFromArgs(Kafka.scala:41)
> at kafka.Kafka$.main(Kafka.scala:57)
> at kafka.Kafka.main(Kafka.scala)
| 0debug
|
Clickonce deployment equivalent for Dotnet core applications : <p>We have a dotnet core console application which we want to use ClickOnce deployment. Mainly every time the user runs the application, it should check for update and update itself. </p>
<p>This was available by default with ClickOnce deployment in dotnetframework applications. Dotnet core provides Framework Dependent Deployment & Self Contained Deployment. But in both the case, updates have to manually pushed.</p>
<p>Please suggest how we can achieve ClickOnce deployment for dotnet core applications and if there is any tool available in the market to achieve the same.</p>
| 0debug
|
Java int[] table print reverse numbers : <p>I have been started coding Java and wan't to find solution for this:</p>
<p>I have table for this:</p>
<pre><code>int nmb[ ] = {1,2,3,4,5,6,7,8,9,10};
</code></pre>
<p>And it should output it the numbers reserve from 10 to 1</p>
<p>What i have tested so far haven't worked</p>
<pre><code>int nmb[ ] = {1,2,3,4,5,6,7,8,9,10};
for(int i=10; i >= 1; i--)
nmb[i]=i*2;
for(int i=10; i >= 1; i--)
System.out.println(nmb[i]);
</code></pre>
<p>Also the code should print out the 5th number in the string.</p>
<p>I will be really thankfully if you help me fix this.</p>
| 0debug
|
Can bash script be written inside a AWS Lambda function : <p>Can I write a bash script inside a Lambda function? I read in the aws docs that it can execute code written in Python, NodeJS and Java 8.</p>
<p>It is mentioned in some documents that it might be possible to use Bash but there is no concrete evidence supporting it or any example</p>
| 0debug
|
void cpu_breakpoint_remove_all(CPUState *env, int mask)
{
#if defined(TARGET_HAS_ICE)
CPUBreakpoint *bp, *next;
TAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
if (bp->flags & mask)
cpu_breakpoint_remove_by_ref(env, bp);
}
#endif
}
| 1threat
|
Confused by C String : <p>While learning C String, I had this code snippet:`</p>
<pre><code>char s[1];
strcpy(s, "hello");
puts(s);
printf("sizeof(s) = %ld\n", sizeof(s));//the result is 1
printf("strlen(s) = %ld\n", strlen(s));//the result is 5
printf("s[4] = %c\n", s[4]);//print 'o'
</code></pre>
<p>Why do this code snippet have this strange result? I mean I can legally assign the string of length 5 to a string declared with size 1.</p>
| 0debug
|
Code Iris plugin on Android Studio : <p>I am trying to make code iris plugin work on Android studio.</p>
<p>I press right click -> Create Code Iris graph and then I get a notification that my graph is ready. But I do not know when this graph is stored, what is the name of the file created and how to open it. Any ideas? </p>
| 0debug
|
Detect the most used color in image : <p>I am trying to detect lane lines in a video while I am driving. I reached the step where Canny Edge detection is applied on the region of interest. The region of interest contains only the white line, yellow line and the street (in gray color). Now, I need to mask any other color inside the region of interest because the gray color sometimes interrupt the other color in the edge detection. Then, when we apply the Hough Lines on the yellow and white lines, the detection is not accurate.</p>
<p>So, can how can I apply another mask on the image to leave the yellow and white parts alone?</p>
<p>I am using OpenCV for computer vision and image processing functions with Python.</p>
| 0debug
|
static int ffm_write_header(AVFormatContext *s)
{
FFMContext *ffm = s->priv_data;
AVStream *st;
ByteIOContext *pb = s->pb;
AVCodecContext *codec;
int bit_rate, i;
ffm->packet_size = FFM_PACKET_SIZE;
put_le32(pb, MKTAG('F', 'F', 'M', '1'));
put_be32(pb, ffm->packet_size);
put_be64(pb, ffm->packet_size);
put_be32(pb, s->nb_streams);
bit_rate = 0;
for(i=0;i<s->nb_streams;i++) {
st = s->streams[i];
bit_rate += st->codec->bit_rate;
}
put_be32(pb, bit_rate);
for(i=0;i<s->nb_streams;i++) {
st = s->streams[i];
av_set_pts_info(st, 64, 1, 1000000);
codec = st->codec;
put_be32(pb, codec->codec_id);
put_byte(pb, codec->codec_type);
put_be32(pb, codec->bit_rate);
put_be32(pb, st->quality);
put_be32(pb, codec->flags);
put_be32(pb, codec->flags2);
put_be32(pb, codec->debug);
switch(codec->codec_type) {
case CODEC_TYPE_VIDEO:
put_be32(pb, codec->time_base.num);
put_be32(pb, codec->time_base.den);
put_be16(pb, codec->width);
put_be16(pb, codec->height);
put_be16(pb, codec->gop_size);
put_be32(pb, codec->pix_fmt);
put_byte(pb, codec->qmin);
put_byte(pb, codec->qmax);
put_byte(pb, codec->max_qdiff);
put_be16(pb, (int) (codec->qcompress * 10000.0));
put_be16(pb, (int) (codec->qblur * 10000.0));
put_be32(pb, codec->bit_rate_tolerance);
put_strz(pb, codec->rc_eq);
put_be32(pb, codec->rc_max_rate);
put_be32(pb, codec->rc_min_rate);
put_be32(pb, codec->rc_buffer_size);
put_be64(pb, av_dbl2int(codec->i_quant_factor));
put_be64(pb, av_dbl2int(codec->b_quant_factor));
put_be64(pb, av_dbl2int(codec->i_quant_offset));
put_be64(pb, av_dbl2int(codec->b_quant_offset));
put_be32(pb, codec->dct_algo);
put_be32(pb, codec->strict_std_compliance);
put_be32(pb, codec->max_b_frames);
put_be32(pb, codec->luma_elim_threshold);
put_be32(pb, codec->chroma_elim_threshold);
put_be32(pb, codec->mpeg_quant);
put_be32(pb, codec->intra_dc_precision);
put_be32(pb, codec->me_method);
put_be32(pb, codec->mb_decision);
put_be32(pb, codec->nsse_weight);
put_be32(pb, codec->frame_skip_cmp);
put_be64(pb, av_dbl2int(codec->rc_buffer_aggressivity));
put_be32(pb, codec->codec_tag);
put_byte(pb, codec->thread_count);
break;
case CODEC_TYPE_AUDIO:
put_be32(pb, codec->sample_rate);
put_le16(pb, codec->channels);
put_le16(pb, codec->frame_size);
break;
default:
return -1;
}
if (codec->flags & CODEC_FLAG_GLOBAL_HEADER) {
put_be32(pb, codec->extradata_size);
put_buffer(pb, codec->extradata, codec->extradata_size);
}
}
while ((url_ftell(pb) % ffm->packet_size) != 0)
put_byte(pb, 0);
put_flush_packet(pb);
ffm->packet_ptr = ffm->packet;
ffm->packet_end = ffm->packet + ffm->packet_size - FFM_HEADER_SIZE;
assert(ffm->packet_end >= ffm->packet);
ffm->frame_offset = 0;
ffm->dts = 0;
ffm->first_packet = 1;
return 0;
}
| 1threat
|
static void qemu_wait_io_event_common(CPUState *cpu)
{
if (cpu->stop) {
cpu->stop = false;
cpu->stopped = true;
qemu_cond_broadcast(&qemu_pause_cond);
}
process_queued_cpu_work(cpu);
cpu->thread_kicked = false;
}
| 1threat
|
How to bundle lazy loaded components inside the production dist folder in angular-cli? : <p>I am using <code>angular-cli</code> for development and I have used the following commands and code to build my project.</p>
<p><code>npm install angular-cli</code> (angular-cli: 1.0.0-beta.10)</p>
<p><code>ng new my-app</code></p>
<p><code>ng g component lazy-me</code></p>
<p>Then added a file <code>app.router.ts</code> with the following script</p>
<pre><code>import { provideRouter, RouterConfig } from '@angular/router';
import { AppComponent } from './app.component';
// import { LazyMeComponent } from './+lazy-me/lazy-me.component';
const appRoutes : RouterConfig = [
{path: '', component: AppComponent},
// {path: 'lazyme', component: LazyMeComponent}
{path: 'lazyme', component: 'app/+lazy-me#LazyMeComponent'}
];
export const APP_ROUTER_PROVIDER = [
provideRouter(appRoutes)
];
</code></pre>
<p>And changed my main.ts as following</p>
<pre><code>import { bootstrap } from '@angular/platform-browser-dynamic';
import { enableProdMode,
SystemJsComponentResolver,
ComponentResolver } from '@angular/core';
import {RuntimeCompiler} from '@angular/compiler';
import { AppComponent, environment } from './app/';
import { APP_ROUTER_PROVIDER } from './app/app.router';
if (environment.production) {
enableProdMode();
}
bootstrap(AppComponent,[
APP_ROUTER_PROVIDER,
{
provide: ComponentResolver,
useFactory: (r) => new SystemJsComponentResolver(r),
deps: [RuntimeCompiler]
},
]);
</code></pre>
<p>And to do a production build I have used the following command
<code>ng build -prod</code></p>
<p>When I deploy my code to a webserver and navigate to <code>lazyme</code> path, I get 404 error for <code>app/lazy-me/lazy-me.component.js</code></p>
<p>The folder exists but <code>lazy-me.component.js</code> is missing as expected as everything gets bundled in <code>main.js</code> except .css and .html files.
However, I want <code>ng build -prod</code> to include <code>lazy-me.component.js</code> in <code>dist/app/lazy-me/</code>.</p>
<p>Is there any settings in <code>system-config.ts</code> or anywhere else where I can include lazy loaded components to be part of the <code>dist</code> folder when doing a <code>-prod</code> build?</p>
| 0debug
|
Super slow preflight OPTIONS in Chrome only : <p>I've been struggling recently with a super-weird problem only happening in Chrome: as my API (NodeJS) is on a different subdomain, I need to use CORS to reach it from my front-end (EmberJS).</p>
<p>It's working pretty well but I'm very frequently (95% of the time) having very very slow OPTIONS queries, delaying any API calls by about 3 seconds.</p>
<p><a href="https://i.stack.imgur.com/59gE1.png" rel="noreferrer"><img src="https://i.stack.imgur.com/59gE1.png" alt="2 requests, OPTIONS takes 3 seconds"></a></p>
<p>Most of this time is spent downloading an empty content:</p>
<p><a href="https://i.stack.imgur.com/y9S1G.png" rel="noreferrer"><img src="https://i.stack.imgur.com/y9S1G.png" alt="Downloading an empty content takes 3 seconds"></a></p>
<p>It gets even weirder when I'm trying this on another website we made using a similar architecture, experiencing the exact same problem.</p>
<p>A few other things I tried:</p>
<ul>
<li>I've been trying this with Firefox and Safari, and didn't get any delay.</li>
<li>I've been trying this locally or in production, experimenting the same delay.</li>
<li>I've been trying this with incognito mode (no extensions), and I have the exact same problem.</li>
</ul>
<p>We're using on the back-end NodeJS with the <a href="https://www.npmjs.com/package/cors" rel="noreferrer">CORS package</a>.</p>
<p>Now, I have no idea if the problem is on either Chrome 60, NodeJS, the CORS package or EmberJS + jQuery.</p>
<p>Anyone experienced this too?</p>
| 0debug
|
Python Code -Why is the class probality coming out as 0 when within te for loop its being calculated correctly : Python code snipper
I have isolated the bug in
def classify_single_elem(self, X_elem):
Y_dict = {}
# print ('self.label' ,self.labels) ok
for label in self.labels:
class_probability = self.class_probabilities[label]
# print ('class probability' ,class_probability) okok
for ii in range(0,len(X_elem)):
relative_feature_values = self.nb_dict[label][ii]
if X_elem[ii] in relative_feature_values.keys():
class_probability *= relative_feature_values[X_elem[ii]] ok
else:
class_probability *= 0
when it gets to here the probability is set to zero which means the dict has zero values
Y_dict[label] = class_probability
return self.get_max_value_key(Y_dict)
| 0debug
|
static ssize_t local_readlink(FsContext *fs_ctx, V9fsPath *fs_path,
char *buf, size_t bufsz)
{
ssize_t tsize = -1;
char *buffer;
char *path = fs_path->data;
if ((fs_ctx->export_flags & V9FS_SM_MAPPED) ||
(fs_ctx->export_flags & V9FS_SM_MAPPED_FILE)) {
int fd;
buffer = rpath(fs_ctx, path);
fd = open(buffer, O_RDONLY | O_NOFOLLOW);
g_free(buffer);
if (fd == -1) {
return -1;
}
do {
tsize = read(fd, (void *)buf, bufsz);
} while (tsize == -1 && errno == EINTR);
close(fd);
} else if ((fs_ctx->export_flags & V9FS_SM_PASSTHROUGH) ||
(fs_ctx->export_flags & V9FS_SM_NONE)) {
buffer = rpath(fs_ctx, path);
tsize = readlink(buffer, buf, bufsz);
g_free(buffer);
}
return tsize;
}
| 1threat
|
static int matroska_probe(AVProbeData *p)
{
uint64_t total = 0;
int len_mask = 0x80, size = 1, n = 1, i;
if (AV_RB32(p->buf) != EBML_ID_HEADER)
return 0;
total = p->buf[4];
while (size <= 8 && !(total & len_mask)) {
size++;
len_mask >>= 1;
}
if (size > 8)
return 0;
total &= (len_mask - 1);
while (n < size)
total = (total << 8) | p->buf[4 + n++];
if (p->buf_size < 4 + size + total)
return 0;
for (i = 0; i < FF_ARRAY_ELEMS(matroska_doctypes); i++) {
int probelen = strlen(matroska_doctypes[i]);
for (n = 4+size; n <= 4+size+total-probelen; n++)
if (!memcmp(p->buf+n, matroska_doctypes[i], probelen))
return AVPROBE_SCORE_MAX;
}
return AVPROBE_SCORE_MAX/2;
}
| 1threat
|
Object of abstract class type is not allowed : <p>I am trying to define an inheritance system where a base class, named Animal, has virtual functions that can inforce some functionalities to the derived classes. The derived classes are species of animals, ie. Cow, Dog, Cat.</p>
<p>Overview of classes of my interface:</p>
<pre><code>class Animal
{
public:
virtual void walk() = 0;
virtual void fly() = 0;
virtual void jump() = 0;
}
class Cow : public Animal
{
public:
void walk();
}
void Cow::walk()
{
//do something specific for cow
}
</code></pre>
<p>I would like to save ten different species dynamically to an array of size ten. This is what I have come up with:</p>
<pre><code>const int SIZE = 10;
Animal** myArray = new Animal*[SIZE];
myArray[0] = new Cow();
</code></pre>
<p>I get this error message: "Object of abstract class type "Cow" is not allowed". I believe this is because of the implementation of pure virtual functions in the base class. </p>
| 0debug
|
How to activate bluetooth fmx delphi :
How to activate bluetooth fmx delphi
How to bluetooth disable and enabled
| 0debug
|
We aren't able to process your payment using your PayPal account at this time : <p>I'm getting this error on a sandbox account:</p>
<blockquote>
<p>We aren't able to process your payment using your PayPal account at this time. Please go back to the merchant and try using a different payment method.</p>
</blockquote>
<p>My .Net app is successfully redirecting to PayPal, with the correct payment details. As soon as I log in with my sandbox account I get the above error. Is there a way to get to a log or anything that could help me source the issue? It was all working fine until this week, so I wonder has something changed in that time?</p>
<p>I have checked the accounts have a suitable balance. The payment is for €24 so it is not excessive. There are a few other posts regarding the issue but nothing with any suitable suggestions.</p>
<p><a href="https://i.stack.imgur.com/ucujB.jpg"><img src="https://i.stack.imgur.com/ucujB.jpg" alt="enter image description here"></a></p>
| 0debug
|
Getting noclassfoundexception : java.sql.SQLException in intellij idea for jdk 11 : <p>I am using latest version of Intellij Idea and set jdk as 11. When I execute my project it throws exception noclassfoundexception : java.sql.SQLException.</p>
<p>In java compiler settings in Intellij I have mentioned project bytecode version as 11. In project structure settings too, I have set the module jdk as 11. </p>
| 0debug
|
Beginner C++ Graphics (moving past the console)? : <p>I have been using console for a while now. I would like to move on past the words_on_the_screen.
I read about visual C++, I read about graphic engines .. I am confused on this topic .. I know not where to go next but I do want to learn graphic programming....</p>
| 0debug
|
How do I enclose multiple rails database queries into an atomic statement? : I'm manipulating multiple rails database queries which includes two different databases. I want them to enclose such that if one of the queries fails, all others must rollback. In short, I wanna convert them into an atomic statement.
a). LocationRole.create!(
role: new_params[:role],
resource_type: 'Hub',
resource_id: hub_data[:hub_id],
user_id: new_params[:user_id]
)
b). LocationRole.create!(
role: new_params[:role],
resource_type: 'Cluster',
resource_id: input[:cluster_id],
user_id: new_params[:user_id]
)
c). User.create(:email=>email,:password=>password,:user_type=>user_type)
| 0debug
|
static void vfio_rtl8168_window_quirk_write(void *opaque, hwaddr addr,
uint64_t data, unsigned size)
{
VFIOQuirk *quirk = opaque;
VFIOPCIDevice *vdev = quirk->vdev;
switch (addr) {
case 4:
if ((data & 0x7fff0000) == 0x10000) {
if (data & 0x10000000U &&
vdev->pdev.cap_present & QEMU_PCI_CAP_MSIX) {
trace_vfio_rtl8168_window_quirk_write_table(
memory_region_name(&quirk->mem),
vdev->vbasedev.name);
memory_region_dispatch_write(&vdev->pdev.msix_table_mmio,
(hwaddr)(quirk->data.address_match
& 0xfff),
data,
size,
MEMTXATTRS_UNSPECIFIED);
}
quirk->data.flags = 1;
quirk->data.address_match = data;
return;
}
quirk->data.flags = 0;
break;
case 0:
quirk->data.address_mask = data;
break;
}
trace_vfio_rtl8168_window_quirk_write_direct(
memory_region_name(&quirk->mem),
vdev->vbasedev.name);
vfio_region_write(&vdev->bars[quirk->data.bar].region,
addr + 0x70, data, size);
}
| 1threat
|
static void scsi_generic_realize(SCSIDevice *s, Error **errp)
{
int rc;
int sg_version;
struct sg_scsi_id scsiid;
if (!s->conf.bs) {
error_setg(errp, "drive property not set");
return;
}
if (bdrv_get_on_error(s->conf.bs, 0) != BLOCKDEV_ON_ERROR_ENOSPC) {
error_setg(errp, "Device doesn't support drive option werror");
return;
}
if (bdrv_get_on_error(s->conf.bs, 1) != BLOCKDEV_ON_ERROR_REPORT) {
error_setg(errp, "Device doesn't support drive option rerror");
return;
}
rc = bdrv_ioctl(s->conf.bs, SG_GET_VERSION_NUM, &sg_version);
if (rc < 0) {
error_setg(errp, "cannot get SG_IO version number: %s. "
"Is this a SCSI device?",
strerror(-rc));
return;
}
if (sg_version < 30000) {
error_setg(errp, "scsi generic interface too old");
return;
}
if (bdrv_ioctl(s->conf.bs, SG_GET_SCSI_ID, &scsiid)) {
error_setg(errp, "SG_GET_SCSI_ID ioctl failed");
return;
}
s->type = scsiid.scsi_type;
DPRINTF("device type %d\n", s->type);
switch (s->type) {
case TYPE_TAPE:
s->blocksize = get_stream_blocksize(s->conf.bs);
if (s->blocksize == -1) {
s->blocksize = 0;
}
break;
case TYPE_ROM:
case TYPE_WORM:
s->blocksize = 2048;
break;
default:
s->blocksize = 512;
break;
}
DPRINTF("block size %d\n", s->blocksize);
}
| 1threat
|
How to match vector of strings with dataframe column in r : <p>I have a vector of column like following</p>
<pre><code> a = "ASDRT" "GTYHE" "AQWER" QWERT"
</code></pre>
<p>And I have a dataframe like following</p>
<pre><code> ID. Amount
SDFGH. 45
ASDRT. 67
AQWER. 88
TYUIIO. 543
QWERT. 32
</code></pre>
<p>I want to match vector values with ID column of dataframe and if there is a match it will print 1 else 0</p>
<p>Desired output</p>
<pre><code> ID. Amount. Match
SDFGH. 45. 0
ASDRT. 67. 1
AQWER. 88. 1
TYUIIO. 543. 0
QWERT. 32. 1
</code></pre>
<p>How can I do it in R ?</p>
| 0debug
|
"We can not access the URL currently." : <p>I call google api when the return of "We can not access the URL currently." But the resources must exist and can be accessed.</p>
<p><a href="https://vision.googleapis.com/v1/images:annotate" rel="noreferrer">https://vision.googleapis.com/v1/images:annotate</a></p>
<p>request content:</p>
<pre><code>{
"requests": [
{
"image": {
"source": {
"imageUri": "http://yun.jybdfx.com/static/img/homebg.jpg"
}
},
"features": [
{
"type": "TEXT_DETECTION"
}
],
"imageContext": {
"languageHints": [
"zh"
]
}
}
]
}
</code></pre>
<p>response content:</p>
<pre><code>{
"responses": [
{
"error": {
"code": 4,
"message": "We can not access the URL currently. Please download the content and pass it in."
}
}
]
}
</code></pre>
| 0debug
|
static void test_identify(void)
{
AHCIQState *ahci;
ahci = ahci_boot_and_enable();
ahci_test_identify(ahci);
ahci_shutdown(ahci);
}
| 1threat
|
How can I condense this code in C# : <p>I want to know how to condense this working code. Its just for a simple game of go fish I made with a partner for our third day in C# class. This seems to be a ton of code for something Im sure can be condensed to a few lines. </p>
<p>C# code the part need help on</p>
<pre><code> int count1 = 0;
int count2 = 0;
int count3 = 0;
int count4 = 0;
int count5 = 0;
int count6 = 0;
int count7 = 0;
int count8 = 0;
int count9 = 0;
int count10 = 0;
int count11 = 0;
int count12 = 0;
int count13 = 0;
// System.Console.WriteLine("HI THERE LOOK AT ME " + player1.numOfCardsInHand());
for(int y = player1.numOfCardsInHand()-1; y >= 0 ; y--)
{
if(player1.getListObject()[y].val == 1)
{
count1++;
}
if(player1.getListObject()[y].val == 2)
{
count2++;
}
if(player1.getListObject()[y].val == 3)
{
count3++;
}
if(player1.getListObject()[y].val == 4)
{
count4++;
}
if(player1.getListObject()[y].val == 5)
{
count5++;
}
if(player1.getListObject()[y].val == 6)
{
count6++;
}
if(player1.getListObject()[y].val == 7)
{
count7++;
}
if(player1.getListObject()[y].val == 8)
{
count8++;
}
if(player1.getListObject()[y].val == 9)
{
count9++;
}
if(player1.getListObject()[y].val == 10)
{
count10++;
}
if(player1.getListObject()[y].val == 11)
{
count11++;
}
if(player1.getListObject()[y].val == 12)
{
count12++;
}
if(player1.getListObject()[y].val == 13)
{
count13++;
}
if(count1 == 4)
{
player1Points++;
player1.getListObject().RemoveAll(u => u.Equals("1"));
}
if(count2 == 4)
{
player1Points++;
player1.getListObject().RemoveAll(u => u.Equals("2"));
}
if(count3 == 4)
{
player1Points++;
player1.getListObject().RemoveAll(u => u.Equals("3"));
}
if(count4 == 4)
{
player1Points++;
player1.getListObject().RemoveAll(u => u.Equals("4"));
}
if(count5 == 4)
{
player1Points++;
player1.getListObject().RemoveAll(u => u.Equals("5"));
}
if(count6 == 4)
{
player1Points++;
player1.getListObject().RemoveAll(u => u.Equals("6"));
}
if(count7 == 4)
{
player1Points++;
player1.getListObject().RemoveAll(u => u.Equals("7"));
}
if(count8== 4)
{
player1Points++;
player1.getListObject().RemoveAll(u => u.Equals("8"));
}
if(count9 == 4)
{
player1Points++;
player1.getListObject().RemoveAll(u => u.Equals("9"));
}
if(count10 == 4)
{
player1Points++;
player1.getListObject().RemoveAll(u => u.Equals("10"));
}
if(count11 == 4)
{
player1Points++;
player1.getListObject().RemoveAll(u => u.Equals("11"));
}
if(count12 == 4)
{
player1Points++;
player1.getListObject().RemoveAll(u => u.Equals("12"));
}if(count13 == 4)
{
player1Points++;
player1.getListObject().RemoveAll(u => u.Equals("13"));
}
}
</code></pre>
| 0debug
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.