problem
stringlengths
26
131k
labels
class label
2 classes
static int http_proxy_open(URLContext *h, const char *uri, int flags) { HTTPContext *s = h->priv_data; char hostname[1024], hoststr[1024]; char auth[1024], pathbuf[1024], *path; char line[1024], lower_url[100]; int port, ret = 0; HTTPAuthType cur_auth_type; char *authstr; h->is_streamed = 1; av_url_split(NULL, 0, auth, sizeof(auth), hostname, sizeof(hostname), &port, pathbuf, sizeof(pathbuf), uri); ff_url_join(hoststr, sizeof(hoststr), NULL, NULL, hostname, port, NULL); path = pathbuf; if (*path == '/') path++; ff_url_join(lower_url, sizeof(lower_url), "tcp", NULL, hostname, port, NULL); redo: ret = ffurl_open(&s->hd, lower_url, AVIO_FLAG_READ_WRITE, &h->interrupt_callback, NULL); if (ret < 0) return ret; authstr = ff_http_auth_create_response(&s->proxy_auth_state, auth, path, "CONNECT"); snprintf(s->buffer, sizeof(s->buffer), "CONNECT %s HTTP/1.1\r\n" "Host: %s\r\n" "Connection: close\r\n" "%s%s" "\r\n", path, hoststr, authstr ? "Proxy-" : "", authstr ? authstr : ""); av_freep(&authstr); if ((ret = ffurl_write(s->hd, s->buffer, strlen(s->buffer))) < 0) goto fail; s->buf_ptr = s->buffer; s->buf_end = s->buffer; s->line_count = 0; s->filesize = -1; cur_auth_type = s->proxy_auth_state.auth_type; for (;;) { int new_loc; if (http_get_line(s, line, sizeof(line)) < 0) { ret = AVERROR(EIO); goto fail; } av_dlog(h, "header='%s'\n", line); ret = process_line(h, line, s->line_count, &new_loc); if (ret < 0) goto fail; if (ret == 0) break; s->line_count++; } if (s->http_code == 407 && cur_auth_type == HTTP_AUTH_NONE && s->proxy_auth_state.auth_type != HTTP_AUTH_NONE) { ffurl_close(s->hd); s->hd = NULL; goto redo; } if (s->http_code < 400) return 0; ret = AVERROR(EIO); fail: http_proxy_close(h); return ret; }
1threat
dynamiclly creating a connection string for a database created using c# : I have created a database programmatically is there a way create its connection string dynamically using c#, So that after db creation all data is stored in the new db using c#
0debug
In android studio i'm getting thi below error while running : In android studio i'm getting this error while running the project... Error:Execution failed for task ':app:compileDebugJava'. > Cannot find System Java Compiler. Ensure that you have installed a JDK (not just a JRE) and configured your JAVA_HOME system variable to point to the according directory.
0debug
someone please help me solve this thank you : total=0 output=("enter next sales value") sales=input total_sales=total+sales i keep getting this error Traceback (most recent call last): File "python", line 4, in <module> TypeError: unsupported operand type(s) for +: 'int' and 'builtin_function_or_method'
0debug
dotnet is not recognized as the name of a cmdlet : <p>We have downloaded and run <code>DotNetCore.1.0.1-SDK.1.0.0.Preview2-003133-x64.exe</code>. After having closed and reopened our command prompt, running <code>dotnet</code> gives the following output. </p> <blockquote> <p>The term 'dotnet' is not recognized as the name of a cmdlet, function, script file, or operable program.</p> </blockquote> <p>We have tried the following: </p> <ol> <li>Uninstall all versions of Visual Studio. </li> <li>Uninstall all versions of .NET Core.</li> <li>Repair C++ Redistributable 2015 x86 &amp; x64</li> <li>Restart the computer. </li> <li>Then reinstall the .NET Core SDK. </li> </ol> <p>This is the contents of <code>C:/Program Files/dotnet</code></p> <pre><code>host fxr sdk 1.0.0-preview2-003133 shared Microsoft.NETCore.App swidtag Microsoft .NET Core 1.0.1 - SDK 1.0.0 Preview 2-003133 (x64).swidtag </code></pre> <p>Our PATH includes <code>C:\Program Files\dotnet\</code></p>
0debug
How to exclude Pods from Code Coverage in Xcode : <p>Is there a way to <strong>exclude</strong> Pods from Code Coverage?<br> I would like to see Code Coverage only for the code I've written.</p> <p>Not that it should matter, but I'm using Xcode 8.</p>
0debug
How to get an object from a list of objects in Terraform? : <p>I have the following list of objects variable:</p> <pre><code>variable "objects" { type = "list" description = "list of objects default = [ { id = "name1" attribute = "a" }, { id = "name2" attribute = "a,b" }, { id = "name3" attribute = "d" } ] } </code></pre> <p>How do I get element with id = "name2" ?</p>
0debug
void ppc_store_sdr1(CPUPPCState *env, target_ulong value) { qemu_log_mask(CPU_LOG_MMU, "%s: " TARGET_FMT_lx "\n", __func__, value); assert(!env->external_htab); env->spr[SPR_SDR1] = value; #if defined(TARGET_PPC64) if (env->mmu_model & POWERPC_MMU_64) { PowerPCCPU *cpu = ppc_env_get_cpu(env); Error *local_err = NULL; ppc_hash64_set_sdr1(cpu, value, &local_err); if (local_err) { error_report_err(local_err); error_free(local_err); } } else #endif { env->htab_mask = ((value & SDR_32_HTABMASK) << 16) | 0xFFFF; env->htab_base = value & SDR_32_HTABORG; } }
1threat
static void piix4_pm_class_init(ObjectClass *klass, void *data) { DeviceClass *dc = DEVICE_CLASS(klass); PCIDeviceClass *k = PCI_DEVICE_CLASS(klass); k->no_hotplug = 1; k->init = piix4_pm_initfn; k->config_write = pm_write_config; k->vendor_id = PCI_VENDOR_ID_INTEL; k->device_id = PCI_DEVICE_ID_INTEL_82371AB_3; k->revision = 0x03; k->class_id = PCI_CLASS_BRIDGE_OTHER; dc->desc = "PM"; dc->no_user = 1; dc->vmsd = &vmstate_acpi; dc->props = piix4_pm_properties; }
1threat
Running async methods in parallel : <p>I've got an async method, <code>GetExpensiveThing()</code>, which performs some expensive I/O work. This is how I am using it:</p> <pre><code>// Serial execution public async Task&lt;List&lt;Thing&gt;&gt; GetThings() { var first = await GetExpensiveThing(); var second = await GetExpensiveThing(); return new List&lt;Thing&gt;() { first, second }; } </code></pre> <p>But since it's an expensive method, I want to execute these calls in in parallel. I would have thought moving the awaits would have solved this:</p> <pre><code>// Serial execution public async Task&lt;List&lt;Thing&gt;&gt; GetThings() { var first = GetExpensiveThing(); var second = GetExpensiveThing(); return new List&lt;Thing&gt;() { await first, await second }; } </code></pre> <p>That didn't work, so I wrapped them in some tasks and this works:</p> <pre><code>// Parallel execution public async Task&lt;List&lt;Thing&gt;&gt; GetThings() { var first = Task.Run(() =&gt; { return GetExpensiveThing(); }); var second = Task.Run(() =&gt; { return GetExpensiveThing(); }); return new List&lt;Thing&gt;() { first.Result, second.Result }; } </code></pre> <p>I even tried playing around with awaits and async in and around the tasks, but it got really confusing and I had no luck.</p> <p><strong>Is there a better to run async methods in parallel, or are tasks a good approach?</strong></p>
0debug
static int mov_read_stsz(MOVContext *c, AVIOContext *pb, MOVAtom atom) { AVStream *st; MOVStreamContext *sc; unsigned int i, entries, sample_size, field_size, num_bytes; GetBitContext gb; unsigned char* buf; if (c->fc->nb_streams < 1) return 0; st = c->fc->streams[c->fc->nb_streams-1]; sc = st->priv_data; avio_r8(pb); avio_rb24(pb); if (atom.type == MKTAG('s','t','s','z')) { sample_size = avio_rb32(pb); if (!sc->sample_size) sc->sample_size = sample_size; field_size = 32; } else { sample_size = 0; avio_rb24(pb); field_size = avio_r8(pb); } entries = avio_rb32(pb); av_dlog(c->fc, "sample_size = %d sample_count = %d\n", sc->sample_size, entries); sc->sample_count = entries; if (sample_size) return 0; if (field_size != 4 && field_size != 8 && field_size != 16 && field_size != 32) { av_log(c->fc, AV_LOG_ERROR, "Invalid sample field size %d\n", field_size); return AVERROR_INVALIDDATA; } if (!entries) return 0; if (entries >= UINT_MAX / sizeof(int) || entries >= (UINT_MAX - 4) / field_size) return AVERROR_INVALIDDATA; sc->sample_sizes = av_malloc(entries * sizeof(int)); if (!sc->sample_sizes) return AVERROR(ENOMEM); num_bytes = (entries*field_size+4)>>3; buf = av_malloc(num_bytes+FF_INPUT_BUFFER_PADDING_SIZE); if (!buf) { av_freep(&sc->sample_sizes); return AVERROR(ENOMEM); } if (avio_read(pb, buf, num_bytes) < num_bytes) { av_freep(&sc->sample_sizes); av_free(buf); return AVERROR_INVALIDDATA; } init_get_bits(&gb, buf, 8*num_bytes); for (i = 0; i < entries; i++) { sc->sample_sizes[i] = get_bits_long(&gb, field_size); sc->data_size += sc->sample_sizes[i]; } av_free(buf); return 0; }
1threat
static void event_scan(PowerPCCPU *cpu, sPAPRMachineState *spapr, uint32_t token, uint32_t nargs, target_ulong args, uint32_t nret, target_ulong rets) { uint32_t mask, buf, len, event_len; sPAPREventLogEntry *event; struct rtas_error_log *hdr; if (nargs != 4 || nret != 1) { rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR); return; } mask = rtas_ld(args, 0); buf = rtas_ld(args, 2); len = rtas_ld(args, 3); event = rtas_event_log_dequeue(mask, false); if (!event) { goto out_no_events; } hdr = event->data; event_len = be32_to_cpu(hdr->extended_length) + sizeof(*hdr); if (event_len < len) { len = event_len; } cpu_physical_memory_write(buf, event->data, len); rtas_st(rets, 0, RTAS_OUT_SUCCESS); g_free(event->data); g_free(event); return; out_no_events: rtas_st(rets, 0, RTAS_OUT_NO_ERRORS_FOUND); }
1threat
disconnect() is deprecated: Please use the shouldEstablishDirectChannel property instead : <p>When looking at the disconnect() method and it's description it says the following in the docs</p> <blockquote> <p>Disconnect the current FIRMessaging data connection. This stops any attempts to connect to FIRMessaging. Calling this on an already disconnected client is a no-op.</p> </blockquote> <p>But looking at the shouldEstablishDirectChannel property</p> <blockquote> <p>When set to YES, Firebase Messaging will automatically establish a socket-based, direct channel to the FCM server. You only need to enable this if you are sending upstream messages or receiving non-APNS, data-only messages in foregrounded apps. Default is NO.</p> </blockquote> <p>It seems that they don't do exactly the same thing, but I might be wrong. Can anyone clarify this for me?</p>
0debug
How can I disable past dates in my datepicker? : <p>How can i disable my past date from the current date. I check already the other threads but its not working in my codes.</p> <pre><code>&lt;script&gt; $("#checkinDate").datetimepicker({ format: "yyyy-mm-dd", startView: "month", minView: "month", autoclose: true, todayBtn: true }); $("#checkoutDate").datetimepicker({ format: "yyyy-mm-dd", startView: "month", minView: "month", autoclose: true, todayBtn: true }); &lt;/script&gt; </code></pre>
0debug
static BlockStats *bdrv_query_stats(BlockBackend *blk, const BlockDriverState *bs, bool query_backing) { BlockStats *s; s = bdrv_query_bds_stats(bs, query_backing); if (blk) { s->has_device = true; s->device = g_strdup(blk_name(blk)); bdrv_query_blk_stats(s->stats, blk); } return s; }
1threat
How to annotate a default value inside a android room entity? : <p>I couldn't find any information how to annotate a SQL - "DEFAULT" value while looking into the <a href="https://developer.android.com/reference/android/arch/persistence/room/ColumnInfo.html" rel="noreferrer">@ColumnInfo</a> docs for the new Android Persistence Library.</p> <p>Does Room even provide an annotation for default values?</p> <p>My current solution would be to manually create the corresponding Table ...</p> <pre><code>CREATE TABLE MyTable ( ... MyDefaultValuedCol TEXT DEFAULT 'Default Value', MyDefaultFlagCol INT DEFAULT 1 ) </code></pre> <p>... and put Room on top.</p> <pre><code>@Entity(tableName = "MyTable") class MyClass { ... public String MyDefaultValuedCol; public boolean MyDefaultFlagCol; } </code></pre>
0debug
Java XOR for an array : I'm trying to XOR an array of numbers all at once in Java. the array size is determined at the start of the program by the user and so has no limit. I thought of doing it this way but realised that it would not work. static void cpuTurn(int[] nimArray){ int[] val = new int[nimArray.length]; int holding = 0; for (int i = 0; i < nimArray.length; i++) { holding = holding^nimArray[i]; } } For example an array as such {4,5,6} should return 3 but as it stands, my code returns 7
0debug
why String is not convertible to datetime? : I am passing 2 dates to store procedure but conversion throws error 'String not convertible to datetime' txtFromDate.Text = DateTime.Now.ToString("dd/MM/yyyy"); txtToDate.Text = DateTime.Now.ToString("dd/MM/yyyy"); System.Data.DataTable dt = RejFiles.RejectedFiles(Convert.ToDateTime(txtFromDate.Text.Trim()), Convert.ToDateTime(txtToDate.Text.Trim()), user.OfficeID, user.Type_ID); SP: ALTER PROCEDURE [dbo].[usp_RejectedFiles] ( @FromDate SMALLDATETIME, @ToDate SMALLDATETIME, @OfficeID INT=0, @Type INT=0 ) and db stores the dates in particular table like this: 2014-03-01 00:00:00
0debug
static void musicpal_init(ram_addr_t ram_size, int vga_ram_size, const char *boot_device, DisplayState *ds, const char *kernel_filename, const char *kernel_cmdline, const char *initrd_filename, const char *cpu_model) { CPUState *env; qemu_irq *pic; int index; int iomemtype; unsigned long flash_size; if (!cpu_model) cpu_model = "arm926"; env = cpu_init(cpu_model); if (!env) { fprintf(stderr, "Unable to find CPU definition\n"); exit(1); } pic = arm_pic_init_cpu(env); cpu_register_physical_memory(0, MP_RAM_DEFAULT_SIZE, qemu_ram_alloc(MP_RAM_DEFAULT_SIZE)); sram_off = qemu_ram_alloc(MP_SRAM_SIZE); cpu_register_physical_memory(MP_SRAM_BASE, MP_SRAM_SIZE, sram_off); iomemtype = cpu_register_io_memory(0, musicpal_readfn, musicpal_writefn, env); cpu_register_physical_memory(0x80000000, 0x10000, iomemtype); pic = mv88w8618_pic_init(MP_PIC_BASE, pic[ARM_PIC_CPU_IRQ]); mv88w8618_pit_init(MP_PIT_BASE, pic, MP_TIMER1_IRQ); if (serial_hds[0]) serial_mm_init(MP_UART1_BASE, 2, pic[MP_UART1_IRQ], 1825000, serial_hds[0], 1); if (serial_hds[1]) serial_mm_init(MP_UART2_BASE, 2, pic[MP_UART2_IRQ], 1825000, serial_hds[1], 1); index = drive_get_index(IF_PFLASH, 0, 0); if (index != -1) { flash_size = bdrv_getlength(drives_table[index].bdrv); if (flash_size != 8*1024*1024 && flash_size != 16*1024*1024 && flash_size != 32*1024*1024) { fprintf(stderr, "Invalid flash image size\n"); exit(1); } pflash_cfi02_register(0-MP_FLASH_SIZE_MAX, qemu_ram_alloc(flash_size), drives_table[index].bdrv, 0x10000, (flash_size + 0xffff) >> 16, MP_FLASH_SIZE_MAX / flash_size, 2, 0x00BF, 0x236D, 0x0000, 0x0000, 0x5555, 0x2AAA); } mv88w8618_flashcfg_init(MP_FLASHCFG_BASE); musicpal_lcd_init(ds, MP_LCD_BASE); qemu_add_kbd_event_handler(musicpal_key_event, pic[MP_GPIO_IRQ]); sleep(1); mv88w8618_eth_init(&nd_table[0], MP_ETH_BASE, pic[MP_ETH_IRQ]); mixer_i2c = musicpal_audio_init(MP_AUDIO_BASE, pic[MP_AUDIO_IRQ]); musicpal_binfo.ram_size = MP_RAM_DEFAULT_SIZE; musicpal_binfo.kernel_filename = kernel_filename; musicpal_binfo.kernel_cmdline = kernel_cmdline; musicpal_binfo.initrd_filename = initrd_filename; arm_load_kernel(env, &musicpal_binfo); }
1threat
android java.lang.NoClassDefFoundError: org.bouncycastle.crypto.engines.AESEngine api 16 : <p>I am using <code>com.nimbusds.jose.crypto</code> library in my android client for doing some jwt stuff. </p> <p>This is what i declare in my gradle file : </p> <pre><code> compile 'com.nimbusds:nimbus-jose-jwt:4.23' </code></pre> <p>Everything works fine on api >=19, but when I am running the code on api 16, I am getting this exception : </p> <p><code>java.lang.NoClassDefFoundError: org.bouncycastle.crypto.engines.AESEngine</code>. </p> <p>What's the issue here? Why is the class <code>AESENGINE</code> not available on api 16?</p>
0debug
static inline int get_dwords(uint32_t addr, uint32_t *buf, int num) { int i; for(i = 0; i < num; i++, buf++, addr += sizeof(*buf)) { cpu_physical_memory_rw(addr,(uint8_t *)buf, sizeof(*buf), 0); *buf = le32_to_cpu(*buf); } return 1; }
1threat
Good name for this angular service : <p>at first I thought this is to stupid to ask, but then I said why not :-). I'm building a angular 4 app. I have a tranasction entity in database. The only purpose of this transasction is to group other orders. tranasction has 2 properties</p> <ol> <li>Id</li> <li>UserId - the user the created the transaction.</li> </ol> <p>At first only authenticate user could create a tranasction, so I've created a tranasctionCreator.create(userId) that returns the transasctionId. due to product changes, now a guest can create a transaction and only after login it will be assigned with him.</p> <p>So I'm wondering what should I do:</p> <ol> <li>leave tranasctionCreator service untouched , and add new service tranasctionUserAssigner.assign(tranasctionId, userId)</li> <li>Change transactionCreator name and group both functions. the problem with this is that I can't find a good name for this service</li> </ol>
0debug
Insert buttons on a picture on mouse hover and image darken : <p>I want the image to be darkened a little bit when the mouse is hovered on it (Overlay?). Then, the buttons like in the picture should appear when hover, then if mouse is hovered on one button, it should turn to red (just like picture), and image description text should appear under the buttons (as the picture), then if one button is clicked it should take us to a new URL (Say, if the button is clicked, it takes us to the www.google.com), can anyone provide any code? I have no idea how to do that.</p> <p><a href="https://i.stack.imgur.com/VE7dR.png" rel="nofollow noreferrer"><img src="https://i.stack.imgur.com/VE7dR.png" alt="enter image description here"></a></p>
0debug
Copy a string to clipboard from Mac OS command line : <p>is there a way to copy a string to clipboard from command line?</p> <p>To be more specific, I want to make a script which copies my email address to clipboard, so that when I need to insert it several times for logging in / register, I just run the script once and then CMD+V it whenever I need.</p> <p>I heard of <code>pbcopy</code>, but I think this is not my case. Any suggestion? Many thanks!</p>
0debug
How to build a sparkSession in Spark 2.0 using pyspark? : <p>I just got access to spark 2.0; I have been using spark 1.6.1 up until this point. Can someone please help me set up a sparkSession using pyspark (python)? I know that the scala examples available online are similar (<a href="https://databricks-prod-cloudfront.cloud.databricks.com/public/4027ec902e239c93eaaa8714f173bcfc/6122906529858466/431554386690884/4814681571895601/latest.html" rel="noreferrer">here</a>), but I was hoping for a direct walkthrough in python language. </p> <p>My specific case: I am loading in avro files from S3 in a zeppelin spark notebook. Then building df's and running various pyspark &amp; sql queries off of them. All of my old queries use sqlContext. I know this is poor practice, but I started my notebook with </p> <p><code>sqlContext = SparkSession.builder.enableHiveSupport().getOrCreate()</code>. </p> <p>I can read in the avros with </p> <p><code>mydata = sqlContext.read.format("com.databricks.spark.avro").load("s3:...</code> </p> <p>and build dataframes with no issues. But once I start querying the dataframes/temp tables, I keep getting the "java.lang.NullPointerException" error. I think that is indicative of a translational error (e.g. old queries worked in 1.6.1 but need to be tweaked for 2.0). The error occurs regardless of query type. So I am assuming </p> <p>1.) the sqlContext alias is a bad idea </p> <p>and </p> <p>2.) I need to properly set up a sparkSession. </p> <p>So if someone could show me how this is done, or perhaps explain the discrepancies they know of between the different versions of spark, I would greatly appreciate it. Please let me know if I need to elaborate on this question. I apologize if it is convoluted. </p>
0debug
Lldb : Setting conditional breakpoint with string equality as condition : <p>I would like to set a conditional breakpoint with lldb. This is usually done using <code>-c</code> option :</p> <pre><code>breakpoint set -f myFile.cpp -l 123 -c 'a==3' </code></pre> <p>However, in my case I want to test if a <code>std::string</code> object is equal to a certain string value but doing this</p> <pre><code>breakpoint set -f myFile.cpp -l 123 -c 'a=="hello"' </code></pre> <p>does not work… Lldb does not complain (while gdb would return an error) but it ignores the condition string upon reaching the breakpoint and breaks too early…</p> <p>This question is similar to <a href="https://stackoverflow.com/questions/4183871/how-do-i-set-a-conditional-breakpoint-in-gdb-when-char-x-points-to-a-string-wh">this one</a> but with lldb instead of gdb. The solution presented there</p> <pre><code>breakpoint set -f myFile.cpp -l 123 if strcmp(a, "hello")==0 </code></pre> <p>does not seem to be valid with lldb</p> <p>Lldb version used : 3.4</p>
0debug
static void qmf_32_subbands(DCAContext * s, int chans, float samples_in[32][8], float *samples_out, float scale) { const float *prCoeff; int i; int sb_act = s->subband_activity[chans]; int subindex; scale *= sqrt(1/8.0); if (!s->multirate_inter) prCoeff = fir_32bands_nonperfect; else prCoeff = fir_32bands_perfect; for (i = sb_act; i < 32; i++) s->raXin[i] = 0.0; for (subindex = 0; subindex < 8; subindex++) { for (i = 0; i < sb_act; i++){ uint32_t v = AV_RN32A(&samples_in[i][subindex]) ^ ((i-1)&2)<<30; AV_WN32A(&s->raXin[i], v); } s->synth.synth_filter_float(&s->imdct, s->subband_fir_hist[chans], &s->hist_index[chans], s->subband_fir_noidea[chans], prCoeff, samples_out, s->raXin, scale); samples_out+= 32; } }
1threat
static void imc_get_coeffs(AVCodecContext *avctx, IMCContext *q, IMCChannel *chctx) { int i, j, cw_len, cw; for (i = 0; i < BANDS; i++) { if (!chctx->sumLenArr[i]) continue; if (chctx->bandFlagsBuf[i] || chctx->bandWidthT[i]) { for (j = band_tab[i]; j < band_tab[i + 1]; j++) { cw_len = chctx->CWlengthT[j]; cw = 0; if (get_bits_count(&q->gb) + cw_len > 512) { av_log(avctx, AV_LOG_WARNING, "Potential problem on band %i, coefficient %i" ": cw_len=%i\n", i, j, cw_len); } if (cw_len && (!chctx->bandFlagsBuf[i] || !chctx->skipFlags[j])) cw = get_bits(&q->gb, cw_len); chctx->codewords[j] = cw; } } } }
1threat
static int vfio_initfn(PCIDevice *pdev) { VFIOPCIDevice *vdev = DO_UPCAST(VFIOPCIDevice, pdev, pdev); VFIODevice *vbasedev_iter; VFIOGroup *group; char path[PATH_MAX], iommu_group_path[PATH_MAX], *group_name; ssize_t len; struct stat st; int groupid; int ret; snprintf(path, sizeof(path), "/sys/bus/pci/devices/%04x:%02x:%02x.%01x/", vdev->host.domain, vdev->host.bus, vdev->host.slot, vdev->host.function); if (stat(path, &st) < 0) { error_report("vfio: error: no such host device: %s", path); return -errno; } vdev->vbasedev.ops = &vfio_pci_ops; vdev->vbasedev.type = VFIO_DEVICE_TYPE_PCI; vdev->vbasedev.name = g_strdup_printf("%04x:%02x:%02x.%01x", vdev->host.domain, vdev->host.bus, vdev->host.slot, vdev->host.function); strncat(path, "iommu_group", sizeof(path) - strlen(path) - 1); len = readlink(path, iommu_group_path, sizeof(path)); if (len <= 0 || len >= sizeof(path)) { error_report("vfio: error no iommu_group for device"); return len < 0 ? -errno : -ENAMETOOLONG; } iommu_group_path[len] = 0; group_name = basename(iommu_group_path); if (sscanf(group_name, "%d", &groupid) != 1) { error_report("vfio: error reading %s: %m", path); return -errno; } trace_vfio_initfn(vdev->vbasedev.name, groupid); group = vfio_get_group(groupid, pci_device_iommu_address_space(pdev)); if (!group) { error_report("vfio: failed to get group %d", groupid); return -ENOENT; } snprintf(path, sizeof(path), "%04x:%02x:%02x.%01x", vdev->host.domain, vdev->host.bus, vdev->host.slot, vdev->host.function); QLIST_FOREACH(vbasedev_iter, &group->device_list, next) { if (strcmp(vbasedev_iter->name, vdev->vbasedev.name) == 0) { error_report("vfio: error: device %s is already attached", path); vfio_put_group(group); return -EBUSY; } } ret = vfio_get_device(group, path, &vdev->vbasedev); if (ret) { error_report("vfio: failed to get device %s", path); vfio_put_group(group); return ret; } ret = vfio_populate_device(vdev); if (ret) { return ret; } ret = pread(vdev->vbasedev.fd, vdev->pdev.config, MIN(pci_config_size(&vdev->pdev), vdev->config_size), vdev->config_offset); if (ret < (int)MIN(pci_config_size(&vdev->pdev), vdev->config_size)) { ret = ret < 0 ? -errno : -EFAULT; error_report("vfio: Failed to read device config space"); return ret; } vdev->emulated_config_bits = g_malloc0(vdev->config_size); memset(vdev->emulated_config_bits + PCI_ROM_ADDRESS, 0xff, 4); if (vdev->vendor_id != PCI_ANY_ID) { if (vdev->vendor_id >= 0xffff) { error_report("vfio: Invalid PCI vendor ID provided"); return -EINVAL; } vfio_add_emulated_word(vdev, PCI_VENDOR_ID, vdev->vendor_id, ~0); trace_vfio_pci_emulated_vendor_id(vdev->vbasedev.name, vdev->vendor_id); } else { vdev->vendor_id = pci_get_word(pdev->config + PCI_VENDOR_ID); } if (vdev->device_id != PCI_ANY_ID) { if (vdev->device_id > 0xffff) { error_report("vfio: Invalid PCI device ID provided"); return -EINVAL; } vfio_add_emulated_word(vdev, PCI_DEVICE_ID, vdev->device_id, ~0); trace_vfio_pci_emulated_device_id(vdev->vbasedev.name, vdev->device_id); } else { vdev->device_id = pci_get_word(pdev->config + PCI_DEVICE_ID); } if (vdev->sub_vendor_id != PCI_ANY_ID) { if (vdev->sub_vendor_id > 0xffff) { error_report("vfio: Invalid PCI subsystem vendor ID provided"); return -EINVAL; } vfio_add_emulated_word(vdev, PCI_SUBSYSTEM_VENDOR_ID, vdev->sub_vendor_id, ~0); trace_vfio_pci_emulated_sub_vendor_id(vdev->vbasedev.name, vdev->sub_vendor_id); } if (vdev->sub_device_id != PCI_ANY_ID) { if (vdev->sub_device_id > 0xffff) { error_report("vfio: Invalid PCI subsystem device ID provided"); return -EINVAL; } vfio_add_emulated_word(vdev, PCI_SUBSYSTEM_ID, vdev->sub_device_id, ~0); trace_vfio_pci_emulated_sub_device_id(vdev->vbasedev.name, vdev->sub_device_id); } vdev->emulated_config_bits[PCI_HEADER_TYPE] = PCI_HEADER_TYPE_MULTI_FUNCTION; if (vdev->pdev.cap_present & QEMU_PCI_CAP_MULTIFUNCTION) { vdev->pdev.config[PCI_HEADER_TYPE] |= PCI_HEADER_TYPE_MULTI_FUNCTION; } else { vdev->pdev.config[PCI_HEADER_TYPE] &= ~PCI_HEADER_TYPE_MULTI_FUNCTION; } memset(&vdev->pdev.config[PCI_BASE_ADDRESS_0], 0, 24); memset(&vdev->pdev.config[PCI_ROM_ADDRESS], 0, 4); vfio_pci_size_rom(vdev); ret = vfio_msix_early_setup(vdev); if (ret) { return ret; } vfio_map_bars(vdev); ret = vfio_add_capabilities(vdev); if (ret) { goto out_teardown; } if (pdev->cap_present & QEMU_PCI_CAP_MSIX) { memset(vdev->emulated_config_bits + pdev->msix_cap, 0xff, MSIX_CAP_LENGTH); } if (pdev->cap_present & QEMU_PCI_CAP_MSI) { memset(vdev->emulated_config_bits + pdev->msi_cap, 0xff, vdev->msi_cap_size); } if (vfio_pci_read_config(&vdev->pdev, PCI_INTERRUPT_PIN, 1)) { vdev->intx.mmap_timer = timer_new_ms(QEMU_CLOCK_VIRTUAL, vfio_intx_mmap_enable, vdev); pci_device_set_intx_routing_notifier(&vdev->pdev, vfio_intx_update); ret = vfio_intx_enable(vdev); if (ret) { goto out_teardown; } } vfio_register_err_notifier(vdev); vfio_register_req_notifier(vdev); vfio_setup_resetfn_quirk(vdev); return 0; out_teardown: pci_device_set_intx_routing_notifier(&vdev->pdev, NULL); vfio_teardown_msi(vdev); vfio_unregister_bars(vdev); return ret; }
1threat
NodeJS: How to fix different node module version? : <p>I'm trying to start a nodeJS application, but I do get the error</p> <pre><code>Error: The module '/Users/api/node_modules/bcrypt/lib/binding/bcrypt_lib.node' was compiled against a different Node.js version using NODE_MODULE_VERSION 46. This version of Node.js requires NODE_MODULE_VERSION 57. Please try re-compiling or re-installing the module (for instance, using `npm rebuild` or `npm install`). </code></pre> <p>I already run <code>npm install</code> and <code>npm rebuild</code>. But still the same error...</p>
0debug
static int wv_get_value(WavpackFrameContext *ctx, GetBitContext *gb, int channel, int *last) { int t, t2; int sign, base, add, ret; WvChannel *c = &ctx->ch[channel]; *last = 0; if((ctx->ch[0].median[0] < 2U) && (ctx->ch[1].median[0] < 2U) && !ctx->zero && !ctx->one){ if(ctx->zeroes){ ctx->zeroes--; if(ctx->zeroes){ c->slow_level -= LEVEL_DECAY(c->slow_level); return 0; } }else{ t = get_unary_0_33(gb); if(t >= 2) t = get_bits(gb, t - 1) | (1 << (t-1)); ctx->zeroes = t; if(ctx->zeroes){ memset(ctx->ch[0].median, 0, sizeof(ctx->ch[0].median)); memset(ctx->ch[1].median, 0, sizeof(ctx->ch[1].median)); c->slow_level -= LEVEL_DECAY(c->slow_level); return 0; } } } if(get_bits_count(gb) >= ctx->data_size){ *last = 1; return 0; } if(ctx->zero){ t = 0; ctx->zero = 0; }else{ t = get_unary_0_33(gb); if(get_bits_count(gb) >= ctx->data_size){ *last = 1; return 0; } if(t == 16) { t2 = get_unary_0_33(gb); if(t2 < 2) t += t2; else t += get_bits(gb, t2 - 1) | (1 << (t2 - 1)); } if(ctx->one){ ctx->one = t&1; t = (t>>1) + 1; }else{ ctx->one = t&1; t >>= 1; } ctx->zero = !ctx->one; } if(ctx->hybrid && !channel) update_error_limit(ctx); if(!t){ base = 0; add = GET_MED(0) - 1; DEC_MED(0); }else if(t == 1){ base = GET_MED(0); add = GET_MED(1) - 1; INC_MED(0); DEC_MED(1); }else if(t == 2){ base = GET_MED(0) + GET_MED(1); add = GET_MED(2) - 1; INC_MED(0); INC_MED(1); DEC_MED(2); }else{ base = GET_MED(0) + GET_MED(1) + GET_MED(2) * (t - 2); add = GET_MED(2) - 1; INC_MED(0); INC_MED(1); INC_MED(2); } if(!c->error_limit){ ret = base + get_tail(gb, add); }else{ int mid = (base*2 + add + 1) >> 1; while(add > c->error_limit){ if(get_bits1(gb)){ add -= (mid - base); base = mid; }else add = mid - base - 1; mid = (base*2 + add + 1) >> 1; } ret = mid; } sign = get_bits1(gb); if(ctx->hybrid_bitrate) c->slow_level += wp_log2(ret) - LEVEL_DECAY(c->slow_level); return sign ? ~ret : ret; }
1threat
How to mock a custom hook inside of a React component you want to test? : <p>If you have a React component that calls a custom hook that fetches data, what is the best way to mock that internal custom hook result when testing the React component? I see 2 main approaches:</p> <p>1) Jest.mock the custom hook. This seems to be the most recommended approach, but it seems like it requires the test to have more knowledge of internal implementation details and what it might need to mock than what the props interface of the component might suggest (assuming use of prop-types or TypeScript)</p> <p>2) Use a dependency injection approach. Declare the hook as a prop, but default it to the real hook so you don't have to set it everywhere you render the component, but allow overriding with a mock for tests. Here is a contrived codesandbox example with a test that mocks a custom hook:</p> <p><a href="https://codesandbox.io/s/dependency-inject-custom-hook-for-testing-mjqlf?fontsize=14&amp;module=%2Fsrc%2FApp.js" rel="noreferrer">https://codesandbox.io/s/dependency-inject-custom-hook-for-testing-mjqlf?fontsize=14&amp;module=%2Fsrc%2FApp.js</a></p> <p>2 requires more typing, but seems easier to work with for testing. However, tests already have to have knowledge of internal implementation details of component to test any conditional logic for rendered output, so maybe that's not important and 1 is the best approach. Is 1 the way to go? What tradeoffs do you see? Am I missing another approach altogether?</p>
0debug
Language for Android Development : <p>Which language should I learn for Android Development? I know Java, and is it possible to write code using Eclipse Java SE, and Develop Android App?</p>
0debug
What is the correct implementation of Collections.max()? : So to my understanding the Collections.max method inplements a loop to determine the greatest value of an array. However, for some reason it does not return the greatest value for me, instead returning the first value, I've tried this with multiple data sets but I keep getting the same result. import java.util.*; public class ch10E4 { public static void main(String[] args) { Scanner input=new Scanner(System.in); int num; int inputNum; ArrayList<Integer> total= new ArrayList<Integer>(); ArrayList<Integer> set1= new ArrayList<Integer>(); ArrayList<Integer> set2= new ArrayList<Integer>(); ArrayList<Integer> set3= new ArrayList<Integer>(); ArrayList<Integer> set4= new ArrayList<Integer>(); ArrayList<Integer> set5= new ArrayList<Integer>(); ArrayList<Integer> set6= new ArrayList<Integer>(); ArrayList<Integer> set7= new ArrayList<Integer>(); ArrayList<Integer> set8= new ArrayList<Integer>(); ArrayList<Integer> set9= new ArrayList<Integer>(); System.out.println("Please enter the values of your dataset (between 1-50. Once you are done filling it in, please type any number greater than 51."); do { //sort the values here inputNum = input.nextInt(); if (6 > inputNum){ total.add(inputNum); set1.add(inputNum); } else if (11 > inputNum && inputNum > 5){ total.add(inputNum); set1.add(inputNum); } else if (11 > inputNum && inputNum > 5){ total.add(inputNum); set2.add(inputNum); } else if (16 > inputNum && inputNum > 10){ total.add(inputNum); set3.add(inputNum); } else if (21 > inputNum && inputNum > 15){ total.add(inputNum); set4.add(inputNum); } else if (26 > inputNum && inputNum > 20){ total.add(inputNum); set4.add(inputNum); } else if (31 > inputNum && inputNum > 25){ total.add(inputNum); set5.add(inputNum); } else if (36 > inputNum && inputNum > 30){ total.add(inputNum); set6.add(inputNum); } else if (41 > inputNum && inputNum > 35){ total.add(inputNum); set7.add(inputNum); } else if (46 > inputNum && inputNum > 40){ total.add(inputNum); set8.add(inputNum); } else if (51 > inputNum && inputNum > 45){ total.add(inputNum); set9.add(inputNum); } } while (51 > inputNum); //find average, maximum, range, median, histogram //average int sum = 0; for (double i : total) sum += i; System.out.println("sum total of dataset: "+ sum); double sizeTotal = total.size(); System.out.println("number of data points entered: " + sizeTotal); double average = sum / sizeTotal; System.out.println("The average value of your data is: " + average); //======= //maximum Collections.max(total); System.out.println("The highest value in the array is: " + total.get(0)); } }
0debug
static void avc_luma_midv_qrt_and_aver_dst_4w_msa(const uint8_t *src, int32_t src_stride, uint8_t *dst, int32_t dst_stride, int32_t height, uint8_t ver_offset) { int32_t loop_cnt; int32_t out0, out1; v16i8 src0, src1, src2, src3, src4; v16u8 dst0, dst1; v16i8 mask0, mask1, mask2; v8i16 hz_out0, hz_out1, hz_out2, hz_out3; v8i16 hz_out4, hz_out5, hz_out6; v8i16 res0, res1, res2, res3; v16u8 vec0, vec1; LD_SB3(&luma_mask_arr[48], 16, mask0, mask1, mask2); LD_SB5(src, src_stride, src0, src1, src2, src3, src4); src += (5 * src_stride); XORI_B5_128_SB(src0, src1, src2, src3, src4); hz_out0 = AVC_XOR_VSHF_B_AND_APPLY_6TAP_HORIZ_FILT_SH(src0, src1, mask0, mask1, mask2); hz_out2 = AVC_XOR_VSHF_B_AND_APPLY_6TAP_HORIZ_FILT_SH(src2, src3, mask0, mask1, mask2); PCKOD_D2_SH(hz_out0, hz_out0, hz_out2, hz_out2, hz_out1, hz_out3); hz_out4 = AVC_HORZ_FILTER_SH(src4, mask0, mask1, mask2); for (loop_cnt = (height >> 1); loop_cnt--;) { LD_SB2(src, src_stride, src0, src1); src += (2 * src_stride); XORI_B2_128_SB(src0, src1); LD_UB2(dst, dst_stride, dst0, dst1); hz_out5 = AVC_XOR_VSHF_B_AND_APPLY_6TAP_HORIZ_FILT_SH(src0, src1, mask0, mask1, mask2); hz_out6 = (v8i16) __msa_pckod_d((v2i64) hz_out5, (v2i64) hz_out5); res0 = AVC_CALC_DPADD_H_6PIX_2COEFF_R_SH(hz_out0, hz_out1, hz_out2, hz_out3, hz_out4, hz_out5); res2 = AVC_CALC_DPADD_H_6PIX_2COEFF_R_SH(hz_out1, hz_out2, hz_out3, hz_out4, hz_out5, hz_out6); if (ver_offset) { res1 = __msa_srari_h(hz_out3, 5); res3 = __msa_srari_h(hz_out4, 5); } else { res1 = __msa_srari_h(hz_out2, 5); res3 = __msa_srari_h(hz_out3, 5); } SAT_SH2_SH(res1, res3, 7); res0 = __msa_aver_s_h(res0, res1); res1 = __msa_aver_s_h(res2, res3); vec0 = PCKEV_XORI128_UB(res0, res0); vec1 = PCKEV_XORI128_UB(res1, res1); AVER_UB2_UB(vec0, dst0, vec1, dst1, dst0, dst1); out0 = __msa_copy_u_w((v4i32) dst0, 0); out1 = __msa_copy_u_w((v4i32) dst1, 0); SW(out0, dst); dst += dst_stride; SW(out1, dst); dst += dst_stride; hz_out0 = hz_out2; hz_out1 = hz_out3; hz_out2 = hz_out4; hz_out3 = hz_out5; hz_out4 = hz_out6; } }
1threat
Enable AWS S3 MFA delete with the console : <p>Is it possible to enable AWS S3 MFA delete with the console? How? </p> <p>I don't manage to find any way to do it, neither have found any answer googling.</p>
0debug
If two cells have information, then : I want to check two cells and if both are filled in I would like to have the term 'Switch' in my third cell. I found some information about doing this with blank celss (e.g. **http://stackoverflow.com/questions/27439738/if-1-or-2-cells-are-blank-then#_=_**). However, I would like to do the same with filled cells. I tried several things, only it did not work so far... Help is very appreciated.
0debug
Javascript Syntax Error { : <p>The if statement is broken and just returns "Syntax Error {" no matter how I format it. I still get a syntax error for something on it. PS. IM PROBABLY JUST AN IDIOT</p> <pre><code>var userChoice = prompt("Do you choose rock, paper or scissors"); var computerChoice = Math.random(); console.log(computerChoice); if (computerChoice &lt; .33){ computerchoice = "rock"; } else if (computerChoice &lt; .66){ computerChoice = "paper"; } else (computerChoice &lt; 1){ computerChoice = "scissors"; } </code></pre>
0debug
How to pop out ion-select using different button : <p>How do I pop out the ion-select using different button?</p> <pre><code>&lt;ion-select [(ngModel)]="choices" multiple="true"&gt; &lt;ion-option&gt;Appliances&lt;/ion-option&gt; &lt;ion-option&gt;Automobile&lt;/ion-option&gt; &lt;ion-option&gt;Cellphones&lt;/ion-option&gt; &lt;ion-option&gt;Clothing&lt;/ion-option&gt; &lt;ion-option&gt;Computers&lt;/ion-option&gt; &lt;ion-option&gt;Electronics&lt;/ion-option&gt; &lt;ion-option&gt;Toys&lt;/ion-option&gt; &lt;/ion-select&gt; </code></pre>
0debug
Winston not Logging to console in typescript : <p>I am confused by winston. I am using the following typescript code to log onto the console in my *.ts file:</p> <pre><code>import { Logger, LoggerInstance } from "winston"; const logger:LoggerInstance = new Logger(); logger.info('Now my debug messages are written to the console!'); </code></pre> <p>the console remains empty. There are no compile errors or other issues.</p> <p>At the same time the following works fine:</p> <pre><code>const wnstn = require("winston"); wnstn.info('Finally my messages are written to the console!'); </code></pre> <p>Does anyone have a clue why that is the case? Do I have to configure the Logger differently? How would I use the defaults I get from the second example?</p>
0debug
bool s390_cpu_exec_interrupt(CPUState *cs, int interrupt_request) { if (interrupt_request & CPU_INTERRUPT_HARD) { S390CPU *cpu = S390_CPU(cs); CPUS390XState *env = &cpu->env; if (env->ex_value) { return false; } if (env->psw.mask & PSW_MASK_EXT) { s390_cpu_do_interrupt(cs); return true; } } return false; }
1threat
How to emulate Android's showAsAction="ifRoom" in Flutter? : <p>In my Flutter app I have a screen that is a MaterialApp with a Scaffold widget as it's home.</p> <p>The appBar property of this Scaffold is an AppBar widget with the actions property filled with some actions and a popup menu to house the rest of the options.</p> <p>The thing is, as I understand a child of AppBar <code>actions</code> list can either be a generic widget (it will be added as an action) or an instance of <code>PopupMenuButton</code>, in which case it will add the platform specific icon that when triggered opens the AppBar popup menu.</p> <p>On native Android that's not how it works. I just need to inflate a menu filled with menu items and each item either can be forced to be an action, forced to NOT be an action or have the special value "ifRoom" that means "be an action if there is space, otherwise be an item inside de popup menu".</p> <p>Is there a way in Flutter to have this behavior without having to write a complex logic to populate the "actions" property of the AppBar?</p> <p>I've looked into both AppBar and PopupMenuButton documentations and so far nothing explains how to do such a thing. I could simulate the behavior but then I would have to actually write a routine to calculate the available space and build the "actions" list accordingly.</p> <p>Here's a typical Android menu that mixes actions and popup menu entries. Notice the "load_game" entry can be an action if there is room and will become a menu entry if not.</p> <pre class="lang-xml prettyprint-override"><code>&lt;?xml version="1.0" encoding="utf-8"?&gt; &lt;menu xmlns:android="http://schemas.android.com/apk/res/android"&gt; &lt;item android:id="@+id/new_game" android:icon="@drawable/ic_new_game" android:title="@string/new_game" android:showAsAction="always"/&gt; &lt;item android:id="@+id/load_game" android:icon="@drawable/ic_load_game" android:title="@string/load_game" android:showAsAction="ifRoom"/&gt; &lt;item android:id="@+id/help" android:icon="@drawable/ic_help" android:title="@string/help" android:showAsAction="never" /&gt; &lt;/menu&gt; </code></pre> <p>On the other hand in Flutter I have to decide ahead of time if the options will be an action or a menu entry.</p> <pre class="lang-dart prettyprint-override"><code>AppBar( title: Text("My Incredible Game"), primary: true, actions: &lt;Widget&gt;[ IconButton( icon: Icon(Icons.add), tooltip: "New Game", onPressed: null, ), IconButton( icon: Icon(Icons.cloud_upload), tooltip: "Load Game", onPressed: null, ), PopupMenuButton( itemBuilder: (BuildContext context) { return &lt;PopupMenuEntry&gt;[ PopupMenuItem( child: Text("Help"), ), ]; }, ) ], ) </code></pre> <p>What I hoped would work is that the AppBar actually had just a single "action" property instead of "actions". That property would be just a widget allowing me to have anything so if I wanted just a list of actions then a Row filled with IconButton's would suffice.</p> <p>Along with that each PopupMenuItem inside the PopupMenuButton would have a "showAsAction" property. If one or more PopupMenuItem inside the PopupMenuButton was checked to be an action or "ifRoom" and there is room, then the PopupMenuButton would expand horizontally and place these items as actions.</p>
0debug
static void init_proc_750 (CPUPPCState *env) { gen_spr_ne_601(env); gen_spr_7xx(env); spr_register(env, SPR_L2CR, "L2CR", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, NULL, 0x00000000); gen_tbl(env); gen_spr_thrm(env); spr_register(env, SPR_HID0, "HID0", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); spr_register(env, SPR_HID1, "HID1", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); gen_low_BATs(env); init_excp_7x0(env); env->dcache_line_size = 32; env->icache_line_size = 32; ppc6xx_irq_init(env); }
1threat
void ff_llviddsp_init_x86(LLVidDSPContext *c) { int cpu_flags = av_get_cpu_flags(); #if HAVE_INLINE_ASM && HAVE_7REGS && ARCH_X86_32 if (cpu_flags & AV_CPU_FLAG_CMOV) c->add_median_pred = add_median_pred_cmov; #endif if (ARCH_X86_32 && EXTERNAL_MMX(cpu_flags)) { c->add_bytes = ff_add_bytes_mmx; } if (ARCH_X86_32 && EXTERNAL_MMXEXT(cpu_flags)) { if (!(cpu_flags & AV_CPU_FLAG_3DNOW)) c->add_median_pred = ff_add_median_pred_mmxext; } if (EXTERNAL_SSE2(cpu_flags)) { c->add_bytes = ff_add_bytes_sse2; c->add_median_pred = ff_add_median_pred_sse2; } if (EXTERNAL_SSSE3(cpu_flags)) { c->add_left_pred = ff_add_left_pred_ssse3; c->add_left_pred_int16 = ff_add_left_pred_int16_ssse3; c->add_gradient_pred = ff_add_gradient_pred_ssse3; } if (EXTERNAL_SSSE3_FAST(cpu_flags)) { c->add_left_pred = ff_add_left_pred_unaligned_ssse3; } if (EXTERNAL_SSE4(cpu_flags)) { c->add_left_pred_int16 = ff_add_left_pred_int16_sse4; } if (EXTERNAL_AVX2_FAST(cpu_flags)) { c->add_bytes = ff_add_bytes_avx2; c->add_left_pred = ff_add_left_pred_unaligned_avx2; c->add_gradient_pred = ff_add_gradient_pred_avx2; } }
1threat
static size_t cache_get_cache_pos(const PageCache *cache, uint64_t address) { size_t pos; g_assert(cache->max_num_items); pos = (address / cache->page_size) & (cache->max_num_items - 1); return pos; }
1threat
Java scanner usage with \R pattern (issue with buffer boundary) : <p><strong>Executive summary:</strong> Are there any caveats/known issues with <code>\R</code> (or other regex pattern) usage in Java's <code>Scanner</code> (especially regarding internal buffer's boundary conditions)?</p> <p><strong>Details:</strong> Since I wanted to do some multi-line pattern matching on potentially multi-platform input files, I used patterns with <code>\R</code>, which according to <code>Pattern</code> javadoc is:</p> <blockquote> <p>Any Unicode linebreak sequence, is equivalent to <code>\u000D\u000A|[\u000A\u000B\u000C\u000D\u0085\u2028\u2029]</code></p> </blockquote> <p>Anyhow, I noticed in one of my test files that the loop that's supposed to parse a block of a hex-dump was cut short. After some debugging, I noticed that the line that it was ending on was the end of Scanner's internal buffer.</p> <p>Here's a test program I wrote to simulate the situation:</p> <pre><code>public static void main(String[] args) throws IOException { testString(1); testString(1022); } private static void testString(int prefixLen) { String suffix = "b\r\nX"; String buffer = new String(new char[prefixLen]).replace("\0", "a") + suffix; Scanner scanner = new Scanner(buffer); String pattern = "b\\R"; System.out.printf("=================\nTest String (Len=%d): '%s'\n'%s' found with horizon=0 (w/o bound): %s\n", buffer.length(), convertLineEndings( buffer), pattern, convertLineEndings(scanner.findWithinHorizon(pattern, 0))); System.out.printf("'X' found with horizon=1: %b\n", scanner.findWithinHorizon("X", 1) != null); scanner.close(); } private static String convertLineEndings(String string) { return string.replaceAll("\\n", "\\\\n").replaceAll("\\r", "\\\\r"); } </code></pre> <p>... which produces this output (edited for formatting/brevity):</p> <pre class="lang-none prettyprint-override"><code>================= Test String (Len=5): 'ab\r\nX' 'b\R' found with horizon=0 (w/o bound): b\r\n 'X' found with horizon=1: true ================= Test String (Len=1026): 'a ... ab\r\nX' 'b\R' found with horizon=0 (w/o bound): b\r 'X' found with horizon=1: false </code></pre> <p>To me, this looks like a bug! I think the scanner should match that <code>suffix</code> with the patterns the same way independent of where they show up in the input text (as long as the <code>prefix</code> doesn't get involved with the patterns). (I have also found possibly relevant <strong>Open</strong> JDK Bugs <a href="https://bugs.openjdk.java.net/browse/JDK-8176407" rel="noreferrer">8176407</a>, and <a href="https://bugs.openjdk.java.net/browse/JDK-8072582" rel="noreferrer">8072582</a>, but this was with regular Oracle JDK 8u111).</p> <p>But I may have missed some recommendations regarding scanner or particular <code>\R</code> pattern usage (or that Open JDK, and Oracle have identical(??) implementations for relevant classes here?)... hence the question!</p>
0debug
How to view data, that are parsed from the MySQL database, in offline, using SQLite? : <p>I am developing my school's timetable app for android. I am getting timetable data from MySQL database. -> I have access to the timetable, only when I have an internet. But I want to make timetable accessible without the internet. I read that I need to save data from MySQL to the SQLite, and then use them. Can you, please, give me links to the samples how do to that?</p>
0debug
static inline int RENAME(yuv420_rgb24)(SwsContext *c, uint8_t* src[], int srcStride[], int srcSliceY, int srcSliceH, uint8_t* dst[], int dstStride[]){ int y, h_size; if(c->srcFormat == PIX_FMT_YUV422P){ srcStride[1] *= 2; srcStride[2] *= 2; } h_size= (c->dstW+7)&~7; if(h_size*3 > FFABS(dstStride[0])) h_size-=8; __asm__ __volatile__ ("pxor %mm4, %mm4;" ); for (y= 0; y<srcSliceH; y++ ) { uint8_t *_image = dst[0] + (y+srcSliceY)*dstStride[0]; uint8_t *_py = src[0] + y*srcStride[0]; uint8_t *_pu = src[1] + (y>>1)*srcStride[1]; uint8_t *_pv = src[2] + (y>>1)*srcStride[2]; long index= -h_size/2; __asm__ __volatile__ ( "movd (%2, %0), %%mm0;" "movd (%3, %0), %%mm1;" "movq (%5, %0, 2), %%mm6;" "1: \n\t" YUV2RGB #ifdef HAVE_MMX2 "movq "MANGLE(M24A)", %%mm4 \n\t" "movq "MANGLE(M24C)", %%mm7 \n\t" "pshufw $0x50, %%mm0, %%mm5 \n\t" "pshufw $0x50, %%mm2, %%mm3 \n\t" "pshufw $0x00, %%mm1, %%mm6 \n\t" "pand %%mm4, %%mm5 \n\t" "pand %%mm4, %%mm3 \n\t" "pand %%mm7, %%mm6 \n\t" "psllq $8, %%mm3 \n\t" "por %%mm5, %%mm6 \n\t" "por %%mm3, %%mm6 \n\t" MOVNTQ" %%mm6, (%1) \n\t" "psrlq $8, %%mm2 \n\t" "pshufw $0xA5, %%mm0, %%mm5 \n\t" "pshufw $0x55, %%mm2, %%mm3 \n\t" "pshufw $0xA5, %%mm1, %%mm6 \n\t" "pand "MANGLE(M24B)", %%mm5 \n\t" "pand %%mm7, %%mm3 \n\t" "pand %%mm4, %%mm6 \n\t" "por %%mm5, %%mm3 \n\t" "por %%mm3, %%mm6 \n\t" MOVNTQ" %%mm6, 8(%1) \n\t" "pshufw $0xFF, %%mm0, %%mm5 \n\t" "pshufw $0xFA, %%mm2, %%mm3 \n\t" "pshufw $0xFA, %%mm1, %%mm6 \n\t" "movd 4 (%2, %0), %%mm0;" "pand %%mm7, %%mm5 \n\t" "pand %%mm4, %%mm3 \n\t" "pand "MANGLE(M24B)", %%mm6 \n\t" "movd 4 (%3, %0), %%mm1;" \ "por %%mm5, %%mm3 \n\t" "por %%mm3, %%mm6 \n\t" MOVNTQ" %%mm6, 16(%1) \n\t" "movq 8 (%5, %0, 2), %%mm6;" "pxor %%mm4, %%mm4 \n\t" #else "pxor %%mm4, %%mm4 \n\t" "movq %%mm0, %%mm5 \n\t" "movq %%mm1, %%mm6 \n\t" "punpcklbw %%mm2, %%mm0 \n\t" "punpcklbw %%mm4, %%mm1 \n\t" "punpckhbw %%mm2, %%mm5 \n\t" "punpckhbw %%mm4, %%mm6 \n\t" "movq %%mm0, %%mm7 \n\t" "movq %%mm5, %%mm3 \n\t" "punpcklwd %%mm1, %%mm7 \n\t" "punpckhwd %%mm1, %%mm0 \n\t" "punpcklwd %%mm6, %%mm5 \n\t" "punpckhwd %%mm6, %%mm3 \n\t" "movq %%mm7, %%mm2 \n\t" "movq %%mm0, %%mm6 \n\t" "movq %%mm5, %%mm1 \n\t" "movq %%mm3, %%mm4 \n\t" "psllq $40, %%mm7 \n\t" "psllq $40, %%mm0 \n\t" "psllq $40, %%mm5 \n\t" "psllq $40, %%mm3 \n\t" "punpckhdq %%mm2, %%mm7 \n\t" "punpckhdq %%mm6, %%mm0 \n\t" "punpckhdq %%mm1, %%mm5 \n\t" "punpckhdq %%mm4, %%mm3 \n\t" "psrlq $8, %%mm7 \n\t" "movq %%mm0, %%mm6 \n\t" "psllq $40, %%mm0 \n\t" "por %%mm0, %%mm7 \n\t" MOVNTQ" %%mm7, (%1) \n\t" "movd 4 (%2, %0), %%mm0;" "psrlq $24, %%mm6 \n\t" "movq %%mm5, %%mm1 \n\t" "psllq $24, %%mm5 \n\t" "por %%mm5, %%mm6 \n\t" MOVNTQ" %%mm6, 8(%1) \n\t" "movq 8 (%5, %0, 2), %%mm6;" "psrlq $40, %%mm1 \n\t" "psllq $8, %%mm3 \n\t" "por %%mm3, %%mm1 \n\t" MOVNTQ" %%mm1, 16(%1) \n\t" "movd 4 (%3, %0), %%mm1;" "pxor %%mm4, %%mm4 \n\t" #endif "add $24, %1 \n\t" "add $4, %0 \n\t" " js 1b \n\t" : "+r" (index), "+r" (_image) : "r" (_pu - index), "r" (_pv - index), "r"(&c->redDither), "r" (_py - 2*index) ); } __asm__ __volatile__ (EMMS); return srcSliceH; }
1threat
Wait page in search in symfony2? : <p>I'm doing a search engine and I want to make a waiting page while searching, as do the sites of the airlines while looking for availabilities? Any ideas I'm using Symfony2</p>
0debug
Dart 2: Difference between Future<void> and Future<Null> : <p>Having an asynchronous function that doesn't return a value, what's the ideal return type <code>Future&lt;Null&gt;</code> or <code>Future&lt;void&gt;</code>?, or more specifically, what's the difference in using either? Both are legal, and in both cases the return value of the function is a <code>Future</code> that resolves to <code>null</code>. The following code prints <code>null</code> two times:</p> <pre><code>import 'dart:async'; Future&lt;void&gt; someAsync() async {} Future&lt;Null&gt; otherAsync() async {} main() { someAsync().then((v) =&gt; print(v)); otherAsync().then((v) =&gt; print(v)); } </code></pre>
0debug
int ff_h264_context_init(H264Context *h) { ERContext *er = &h->er; int mb_array_size = h->mb_height * h->mb_stride; int y_size = (2 * h->mb_width + 1) * (2 * h->mb_height + 1); int c_size = h->mb_stride * (h->mb_height + 1); int yc_size = y_size + 2 * c_size; int x, y, i; FF_ALLOCZ_OR_GOTO(h->avctx, h->top_borders[0], h->mb_width * 16 * 3 * sizeof(uint8_t) * 2, fail) FF_ALLOCZ_OR_GOTO(h->avctx, h->top_borders[1], h->mb_width * 16 * 3 * sizeof(uint8_t) * 2, fail) h->ref_cache[0][scan8[5] + 1] = h->ref_cache[0][scan8[7] + 1] = h->ref_cache[0][scan8[13] + 1] = h->ref_cache[1][scan8[5] + 1] = h->ref_cache[1][scan8[7] + 1] = h->ref_cache[1][scan8[13] + 1] = PART_NOT_AVAILABLE; if (CONFIG_ERROR_RESILIENCE) { er->avctx = h->avctx; er->mecc = &h->mecc; er->decode_mb = h264_er_decode_mb; er->opaque = h; er->quarter_sample = 1; er->mb_num = h->mb_num; er->mb_width = h->mb_width; er->mb_height = h->mb_height; er->mb_stride = h->mb_stride; er->b8_stride = h->mb_width * 2 + 1; FF_ALLOCZ_OR_GOTO(h->avctx, er->mb_index2xy, (h->mb_num + 1) * sizeof(int), fail); for (y = 0; y < h->mb_height; y++) for (x = 0; x < h->mb_width; x++) er->mb_index2xy[x + y * h->mb_width] = x + y * h->mb_stride; er->mb_index2xy[h->mb_height * h->mb_width] = (h->mb_height - 1) * h->mb_stride + h->mb_width; FF_ALLOCZ_OR_GOTO(h->avctx, er->error_status_table, mb_array_size * sizeof(uint8_t), fail); FF_ALLOC_OR_GOTO(h->avctx, er->mbintra_table, mb_array_size, fail); memset(er->mbintra_table, 1, mb_array_size); FF_ALLOCZ_OR_GOTO(h->avctx, er->mbskip_table, mb_array_size + 2, fail); FF_ALLOC_OR_GOTO(h->avctx, er->er_temp_buffer, h->mb_height * h->mb_stride, fail); FF_ALLOCZ_OR_GOTO(h->avctx, h->dc_val_base, yc_size * sizeof(int16_t), fail); er->dc_val[0] = h->dc_val_base + h->mb_width * 2 + 2; er->dc_val[1] = h->dc_val_base + y_size + h->mb_stride + 1; er->dc_val[2] = er->dc_val[1] + c_size; for (i = 0; i < yc_size; i++) h->dc_val_base[i] = 1024; } return 0; fail: return AVERROR(ENOMEM); }
1threat
Updating the changes to an open file (Python) : Important Notes: **** **I know how to modify a file**. **** **By file, I mean notepad.** **** **When I say that I have a file open, I don't mean that the Python program has the file open with the `Open()` function. I mean that I, the user, have the file open on the screen to where I can see it.** **** **Please don't ask for actual code. I have not started creating anything. That would be pointless because everything relies on this concept that I am about to ask.** **** **If there is no solution, please don't downvote. That is a waste of both our rep points. Please just leave a comment, and I will gladly delete the question after a couple of days.** **** My problem is that when I make changes to a file using a Python program while I also have it open on my screen, I cannot see the changes without closing the file and opening again. I need a way for the program to update the file while I am looking at it. This is critical to my program. Thank you for any help!
0debug
static int decode_slice_header(H264Context *h){ MpegEncContext * const s = &h->s; int first_mb_in_slice, pps_id; int num_ref_idx_active_override_flag; static const uint8_t slice_type_map[5]= {P_TYPE, B_TYPE, I_TYPE, SP_TYPE, SI_TYPE}; int slice_type; int default_ref_list_done = 0; s->current_picture.reference= h->nal_ref_idc != 0; s->dropable= h->nal_ref_idc == 0; first_mb_in_slice= get_ue_golomb(&s->gb); slice_type= get_ue_golomb(&s->gb); if(slice_type > 9){ av_log(h->s.avctx, AV_LOG_ERROR, "slice type too large (%d) at %d %d\n", h->slice_type, s->mb_x, s->mb_y); return -1; } if(slice_type > 4){ slice_type -= 5; h->slice_type_fixed=1; }else h->slice_type_fixed=0; slice_type= slice_type_map[ slice_type ]; if (slice_type == I_TYPE || (h->slice_num != 0 && slice_type == h->slice_type) ) { default_ref_list_done = 1; } h->slice_type= slice_type; s->pict_type= h->slice_type; pps_id= get_ue_golomb(&s->gb); if(pps_id>255){ av_log(h->s.avctx, AV_LOG_ERROR, "pps_id out of range\n"); return -1; } h->pps= h->pps_buffer[pps_id]; if(h->pps.slice_group_count == 0){ av_log(h->s.avctx, AV_LOG_ERROR, "non existing PPS referenced\n"); return -1; } h->sps= h->sps_buffer[ h->pps.sps_id ]; if(h->sps.log2_max_frame_num == 0){ av_log(h->s.avctx, AV_LOG_ERROR, "non existing SPS referenced\n"); return -1; } if(h->dequant_coeff_pps != pps_id){ h->dequant_coeff_pps = pps_id; init_dequant_tables(h); } s->mb_width= h->sps.mb_width; s->mb_height= h->sps.mb_height * (2 - h->sps.frame_mbs_only_flag); h->b_stride= s->mb_width*4; h->b8_stride= s->mb_width*2; s->width = 16*s->mb_width - 2*(h->sps.crop_left + h->sps.crop_right ); if(h->sps.frame_mbs_only_flag) s->height= 16*s->mb_height - 2*(h->sps.crop_top + h->sps.crop_bottom); else s->height= 16*s->mb_height - 4*(h->sps.crop_top + h->sps.crop_bottom); if (s->context_initialized && ( s->width != s->avctx->width || s->height != s->avctx->height)) { free_tables(h); MPV_common_end(s); } if (!s->context_initialized) { if (MPV_common_init(s) < 0) return -1; if(s->dsp.h264_idct_add == ff_h264_idct_add_c){ memcpy(h->zigzag_scan, zigzag_scan, 16*sizeof(uint8_t)); memcpy(h-> field_scan, field_scan, 16*sizeof(uint8_t)); }else{ int i; for(i=0; i<16; i++){ #define T(x) (x>>2) | ((x<<2) & 0xF) h->zigzag_scan[i] = T(zigzag_scan[i]); h-> field_scan[i] = T( field_scan[i]); #undef T } } if(s->dsp.h264_idct8_add == ff_h264_idct8_add_c){ memcpy(h->zigzag_scan8x8, zigzag_scan8x8, 64*sizeof(uint8_t)); memcpy(h->zigzag_scan8x8_cavlc, zigzag_scan8x8_cavlc, 64*sizeof(uint8_t)); memcpy(h->field_scan8x8, field_scan8x8, 64*sizeof(uint8_t)); memcpy(h->field_scan8x8_cavlc, field_scan8x8_cavlc, 64*sizeof(uint8_t)); }else{ int i; for(i=0; i<64; i++){ #define T(x) (x>>3) | ((x&7)<<3) h->zigzag_scan8x8[i] = T(zigzag_scan8x8[i]); h->zigzag_scan8x8_cavlc[i] = T(zigzag_scan8x8_cavlc[i]); h->field_scan8x8[i] = T(field_scan8x8[i]); h->field_scan8x8_cavlc[i] = T(field_scan8x8_cavlc[i]); #undef T } } if(h->sps.transform_bypass){ h->zigzag_scan_q0 = zigzag_scan; h->zigzag_scan8x8_q0 = zigzag_scan8x8; h->zigzag_scan8x8_cavlc_q0 = zigzag_scan8x8_cavlc; h->field_scan_q0 = field_scan; h->field_scan8x8_q0 = field_scan8x8; h->field_scan8x8_cavlc_q0 = field_scan8x8_cavlc; }else{ h->zigzag_scan_q0 = h->zigzag_scan; h->zigzag_scan8x8_q0 = h->zigzag_scan8x8; h->zigzag_scan8x8_cavlc_q0 = h->zigzag_scan8x8_cavlc; h->field_scan_q0 = h->field_scan; h->field_scan8x8_q0 = h->field_scan8x8; h->field_scan8x8_cavlc_q0 = h->field_scan8x8_cavlc; } alloc_tables(h); s->avctx->width = s->width; s->avctx->height = s->height; s->avctx->sample_aspect_ratio= h->sps.sar; if(!s->avctx->sample_aspect_ratio.den) s->avctx->sample_aspect_ratio.den = 1; if(h->sps.timing_info_present_flag){ s->avctx->time_base= (AVRational){h->sps.num_units_in_tick * 2, h->sps.time_scale}; if(h->x264_build > 0 && h->x264_build < 44) s->avctx->time_base.den *= 2; av_reduce(&s->avctx->time_base.num, &s->avctx->time_base.den, s->avctx->time_base.num, s->avctx->time_base.den, 1<<30); } } if(h->slice_num == 0){ if(frame_start(h) < 0) return -1; } s->current_picture_ptr->frame_num= h->frame_num= get_bits(&s->gb, h->sps.log2_max_frame_num); h->mb_mbaff = 0; h->mb_aff_frame = 0; if(h->sps.frame_mbs_only_flag){ s->picture_structure= PICT_FRAME; }else{ if(get_bits1(&s->gb)) { s->picture_structure= PICT_TOP_FIELD + get_bits1(&s->gb); av_log(h->s.avctx, AV_LOG_ERROR, "PAFF interlacing is not implemented\n"); } else { s->picture_structure= PICT_FRAME; h->mb_aff_frame = h->sps.mb_aff; } } s->resync_mb_x = s->mb_x = first_mb_in_slice % s->mb_width; s->resync_mb_y = s->mb_y = (first_mb_in_slice / s->mb_width) << h->mb_aff_frame; if(s->mb_y >= s->mb_height){ return -1; } if(s->picture_structure==PICT_FRAME){ h->curr_pic_num= h->frame_num; h->max_pic_num= 1<< h->sps.log2_max_frame_num; }else{ h->curr_pic_num= 2*h->frame_num; h->max_pic_num= 1<<(h->sps.log2_max_frame_num + 1); } if(h->nal_unit_type == NAL_IDR_SLICE){ get_ue_golomb(&s->gb); } if(h->sps.poc_type==0){ h->poc_lsb= get_bits(&s->gb, h->sps.log2_max_poc_lsb); if(h->pps.pic_order_present==1 && s->picture_structure==PICT_FRAME){ h->delta_poc_bottom= get_se_golomb(&s->gb); } } if(h->sps.poc_type==1 && !h->sps.delta_pic_order_always_zero_flag){ h->delta_poc[0]= get_se_golomb(&s->gb); if(h->pps.pic_order_present==1 && s->picture_structure==PICT_FRAME) h->delta_poc[1]= get_se_golomb(&s->gb); } init_poc(h); if(h->pps.redundant_pic_cnt_present){ h->redundant_pic_count= get_ue_golomb(&s->gb); } h->ref_count[0]= h->pps.ref_count[0]; h->ref_count[1]= h->pps.ref_count[1]; if(h->slice_type == P_TYPE || h->slice_type == SP_TYPE || h->slice_type == B_TYPE){ if(h->slice_type == B_TYPE){ h->direct_spatial_mv_pred= get_bits1(&s->gb); if(h->sps.mb_aff && h->direct_spatial_mv_pred) av_log(h->s.avctx, AV_LOG_ERROR, "MBAFF + spatial direct mode is not implemented\n"); } num_ref_idx_active_override_flag= get_bits1(&s->gb); if(num_ref_idx_active_override_flag){ h->ref_count[0]= get_ue_golomb(&s->gb) + 1; if(h->slice_type==B_TYPE) h->ref_count[1]= get_ue_golomb(&s->gb) + 1; if(h->ref_count[0] > 32 || h->ref_count[1] > 32){ av_log(h->s.avctx, AV_LOG_ERROR, "reference overflow\n"); return -1; } } } if(!default_ref_list_done){ fill_default_ref_list(h); } if(decode_ref_pic_list_reordering(h) < 0) return -1; if( (h->pps.weighted_pred && (h->slice_type == P_TYPE || h->slice_type == SP_TYPE )) || (h->pps.weighted_bipred_idc==1 && h->slice_type==B_TYPE ) ) pred_weight_table(h); else if(h->pps.weighted_bipred_idc==2 && h->slice_type==B_TYPE) implicit_weight_table(h); else h->use_weight = 0; if(s->current_picture.reference) decode_ref_pic_marking(h); if(FRAME_MBAFF) fill_mbaff_ref_list(h); if( h->slice_type != I_TYPE && h->slice_type != SI_TYPE && h->pps.cabac ) h->cabac_init_idc = get_ue_golomb(&s->gb); h->last_qscale_diff = 0; s->qscale = h->pps.init_qp + get_se_golomb(&s->gb); if(s->qscale<0 || s->qscale>51){ av_log(s->avctx, AV_LOG_ERROR, "QP %d out of range\n", s->qscale); return -1; } h->chroma_qp = get_chroma_qp(h->pps.chroma_qp_index_offset, s->qscale); if(h->slice_type == SP_TYPE){ get_bits1(&s->gb); } if(h->slice_type==SP_TYPE || h->slice_type == SI_TYPE){ get_se_golomb(&s->gb); } h->deblocking_filter = 1; h->slice_alpha_c0_offset = 0; h->slice_beta_offset = 0; if( h->pps.deblocking_filter_parameters_present ) { h->deblocking_filter= get_ue_golomb(&s->gb); if(h->deblocking_filter < 2) h->deblocking_filter^= 1; if( h->deblocking_filter ) { h->slice_alpha_c0_offset = get_se_golomb(&s->gb) << 1; h->slice_beta_offset = get_se_golomb(&s->gb) << 1; } } if( s->avctx->skip_loop_filter >= AVDISCARD_ALL ||(s->avctx->skip_loop_filter >= AVDISCARD_NONKEY && h->slice_type != I_TYPE) ||(s->avctx->skip_loop_filter >= AVDISCARD_BIDIR && h->slice_type == B_TYPE) ||(s->avctx->skip_loop_filter >= AVDISCARD_NONREF && h->nal_ref_idc == 0)) h->deblocking_filter= 0; #if 0 if( h->pps.num_slice_groups > 1 && h->pps.mb_slice_group_map_type >= 3 && h->pps.mb_slice_group_map_type <= 5) slice_group_change_cycle= get_bits(&s->gb, ?); #endif h->slice_num++; h->emu_edge_width= (s->flags&CODEC_FLAG_EMU_EDGE) ? 0 : 16; h->emu_edge_height= FRAME_MBAFF ? 0 : h->emu_edge_width; if(s->avctx->debug&FF_DEBUG_PICT_INFO){ av_log(h->s.avctx, AV_LOG_DEBUG, "slice:%d %s mb:%d %c pps:%d frame:%d poc:%d/%d ref:%d/%d qp:%d loop:%d:%d:%d weight:%d%s\n", h->slice_num, (s->picture_structure==PICT_FRAME ? "F" : s->picture_structure==PICT_TOP_FIELD ? "T" : "B"), first_mb_in_slice, av_get_pict_type_char(h->slice_type), pps_id, h->frame_num, s->current_picture_ptr->field_poc[0], s->current_picture_ptr->field_poc[1], h->ref_count[0], h->ref_count[1], s->qscale, h->deblocking_filter, h->slice_alpha_c0_offset/2, h->slice_beta_offset/2, h->use_weight, h->use_weight==1 && h->use_weight_chroma ? "c" : "" ); } if((s->avctx->flags2 & CODEC_FLAG2_FAST) && !s->current_picture.reference){ s->me.qpel_put= s->dsp.put_2tap_qpel_pixels_tab; s->me.qpel_avg= s->dsp.avg_2tap_qpel_pixels_tab; }else{ s->me.qpel_put= s->dsp.put_h264_qpel_pixels_tab; s->me.qpel_avg= s->dsp.avg_h264_qpel_pixels_tab; } return 0; }
1threat
Set text-cursor position in a textarea : <p>I'm working on a BBCode editor and here is the code:</p> <pre><code>var txtarea = document.getElementById("editor_area"); function boldText() { var start = txtarea.selectionStart; var end = txtarea.selectionEnd; var sel = txtarea.value.substring(start, end); var finText = txtarea.value.substring(0, start) + '[b]' + sel + '[/b]' + txtarea.value.substring(end); txtarea.value = finText; txtarea.focus(); } </code></pre> <p>Everything is OK except one thing which is the position of the text-cursor. When I click on the boldText button, it sets the cursor position at the end of the Textarea!!</p> <p>Actually, I want to be able to set the cursor position at a certain index. I want something like this:</p> <pre><code>txtarea.setFocusAt(20); </code></pre>
0debug
static void replay_enable(const char *fname, int mode) { const char *fmode = NULL; assert(!replay_file); switch (mode) { case REPLAY_MODE_RECORD: fmode = "wb"; break; case REPLAY_MODE_PLAY: fmode = "rb"; break; default: fprintf(stderr, "Replay: internal error: invalid replay mode\n"); exit(1); } atexit(replay_finish); replay_mutex_init(); replay_file = fopen(fname, fmode); if (replay_file == NULL) { fprintf(stderr, "Replay: open %s: %s\n", fname, strerror(errno)); exit(1); } replay_filename = g_strdup(fname); replay_mode = mode; replay_data_kind = -1; replay_state.instructions_count = 0; replay_state.current_step = 0; if (replay_mode == REPLAY_MODE_RECORD) { fseek(replay_file, HEADER_SIZE, SEEK_SET); } else if (replay_mode == REPLAY_MODE_PLAY) { unsigned int version = replay_get_dword(); if (version != REPLAY_VERSION) { fprintf(stderr, "Replay: invalid input log file version\n"); exit(1); } fseek(replay_file, HEADER_SIZE, SEEK_SET); replay_fetch_data_kind(); } replay_init_events(); }
1threat
Name of construction : <pre><code>private final EventManager eventManager; private final DateManager dateManager; private final UserManager userManager; </code></pre> <p>What is the name of this construction? Is this object or something else? EventManager, DateManager, UserManager are the names of classes.</p>
0debug
i want to display All "name" values in array (underscore/js) : var data=[{ "name": "cA", "leaf": false, "largeIconId": null, "label": "cA", "hideAllSearchFilters": false, "guidePage": null, "expanded": false, "defaultSearchCategory": false, "childCategories": [{ "name": "cA-A", "leaf": false, "largeIconId": null, "label": "cA-A", "hideAllSearchFilters": false, "guidePage": null, "expanded": false, "defaultSearchCategory": false, "childCategories": [{ "name": "cA-A-A", "leaf": false, "largeIconId": null, "label": "cA-A-A", "hideAllSearchFilters": false, "guidePage": null, "expanded": false, "defaultSearchCategory": false, "childCategories": [{ "name": "cA-A-A-A", "leaf": false, "largeIconId": null, "label": "cA-A-A-A", "hideAllSearchFilters": false, "guidePage": null, "expanded": false, "defaultSearchCategory": false, "childCategories": [{ "name": "cA-A-A-A-A", "leaf": true, "largeIconId": null, "label": "cA-A-A-A-A", "hideAllSearchFilters": false, "guidePage": null, "expanded": false, "defaultSearchCategory": false, "childCategories": [] }] }] }] }, { "name": "cA-B", "leaf": true, "largeIconId": null, "label": "cA-B", "hideAllSearchFilters": false, "guidePage": null, "expanded": false, "defaultSearchCategory": false, "childCategories": [] }, { "name": "cA-C", "leaf": true, "largeIconId": null, "label": "cA-C", "hideAllSearchFilters": false, "guidePage": null, "expanded": false, "defaultSearchCategory": false, "childCategories": [] }] }, { "name": "A", "leaf": false, "largeIconId": null, "label": "A", "hideAllSearchFilters": false, "guidePage": null, "expanded": false, "defaultSearchCategory": false, "childCategories": [{ "name": "A-Level1", "leaf": false, "largeIconId": null, "label": "A-Level1", "hideAllSearchFilters": false, "guidePage": null, "expanded": false, "defaultSearchCategory": false, "childCategories": [{ "name": "A-Level2", "leaf": true, "largeIconId": null, "label": "A-Level2", "hideAllSearchFilters": false, "guidePage": null, "expanded": false, "defaultSearchCategory": false, "childCategories": [] }] }] }];
0debug
How to preload images in React.js? : <p>How to preload images in React.js? I have dropdown select component which works like menu , but i have to preload image icons for items,because sometimes they are not visible on first open.</p> <p>I have tried:</p> <p><a href="https://github.com/sambernard/react-preload" rel="noreferrer">https://github.com/sambernard/react-preload</a></p> <p><a href="https://github.com/wizardzloy/react-img-preload" rel="noreferrer">https://github.com/wizardzloy/react-img-preload</a></p> <p>First one has nice API easy to understand and use ,but is spaming console with warning that images were not loaded even when they were. Second one has strange API ,but I tried example and it did not preload anything.</p> <p>So I probably need to implement something at my own ,but do not know where to start. Or another possibility would be to loaded them with webpack.</p>
0debug
ionic2 remove blue line color input md : <p>How can I remove the default line below the text input md.</p> <p>I already tried all of these below, but the "default" styling still keeps this line.</p> <p><a href="https://i.stack.imgur.com/wqI3m.png" rel="noreferrer"><img src="https://i.stack.imgur.com/wqI3m.png" alt="enter image description here"></a></p> <pre><code>$text-input-md-highlight-color: "transparent"; $text-input-md-highlight-color-invalid : "transparent"; $text-input-md-highlight-color-valid : "transparent"; $text-input-md-background-color : "transparent"; $text-input-md-show-focus-highlight : "transparent"; $text-input-md-show-invalid-highlight: "transparent"; $text-input-md-show-valid-highlight : "transparent"; $text-input-md-show-success-highlight: false; $text-input-md-show-error-highlight: false; // Input highlight - normal $text-input-md-highlight-color: "transparent"; // Input highlight - valid $text-input-md-hightlight-color-valid: "transparent"; // Input highlight - invalid $text-input-md-hightlight-color-invalid: "transparent"; </code></pre>
0debug
static void spin_kick(void *data) { SpinKick *kick = data; CPUState *cpu = CPU(kick->cpu); CPUPPCState *env = &kick->cpu->env; SpinInfo *curspin = kick->spin; hwaddr map_size = 64 * 1024 * 1024; hwaddr map_start; cpu_synchronize_state(cpu); stl_p(&curspin->pir, env->spr[SPR_PIR]); env->nip = ldq_p(&curspin->addr) & (map_size - 1); env->gpr[3] = ldq_p(&curspin->r3); env->gpr[4] = 0; env->gpr[5] = 0; env->gpr[6] = 0; env->gpr[7] = map_size; env->gpr[8] = 0; env->gpr[9] = 0; map_start = ldq_p(&curspin->addr) & ~(map_size - 1); mmubooke_create_initial_mapping(env, 0, map_start, map_size); cpu->halted = 0; cpu->exception_index = -1; cpu->stopped = false; qemu_cpu_kick(cpu); }
1threat
static int mov_read_sidx(MOVContext *c, AVIOContext *pb, MOVAtom atom) { int64_t offset = avio_tell(pb) + atom.size, pts, timestamp; uint8_t version; unsigned i, j, track_id, item_count; AVStream *st = NULL; AVStream *ref_st = NULL; MOVStreamContext *sc, *ref_sc = NULL; AVRational timescale; version = avio_r8(pb); if (version > 1) { avpriv_request_sample(c->fc, "sidx version %u", version); return 0; } avio_rb24(pb); track_id = avio_rb32(pb); for (i = 0; i < c->fc->nb_streams; i++) { if (c->fc->streams[i]->id == track_id) { st = c->fc->streams[i]; break; } } if (!st) { av_log(c->fc, AV_LOG_WARNING, "could not find corresponding track id %d\n", track_id); return 0; } sc = st->priv_data; timescale = av_make_q(1, avio_rb32(pb)); if (timescale.den <= 0) { av_log(c->fc, AV_LOG_ERROR, "Invalid sidx timescale 1/%d\n", timescale.den); return AVERROR_INVALIDDATA; } if (version == 0) { pts = avio_rb32(pb); offset += avio_rb32(pb); } else { pts = avio_rb64(pb); offset += avio_rb64(pb); } avio_rb16(pb); item_count = avio_rb16(pb); for (i = 0; i < item_count; i++) { int index; MOVFragmentStreamInfo * frag_stream_info; uint32_t size = avio_rb32(pb); uint32_t duration = avio_rb32(pb); if (size & 0x80000000) { avpriv_request_sample(c->fc, "sidx reference_type 1"); return AVERROR_PATCHWELCOME; } avio_rb32(pb); timestamp = av_rescale_q(pts, st->time_base, timescale); index = update_frag_index(c, offset); frag_stream_info = get_frag_stream_info(&c->frag_index, index, track_id); if (frag_stream_info) frag_stream_info->sidx_pts = timestamp; offset += size; pts += duration; } st->duration = sc->track_end = pts; sc->has_sidx = 1; if (offset == avio_size(pb)) { for (i = 0; i < c->frag_index.nb_items; i++) { MOVFragmentIndexItem * item = &c->frag_index.item[i]; for (j = 0; ref_st == NULL && j < item->nb_stream_info; j++) { MOVFragmentStreamInfo * si; si = &item->stream_info[j]; if (si->sidx_pts != AV_NOPTS_VALUE) { ref_st = c->fc->streams[i]; ref_sc = ref_st->priv_data; break; } } } for (i = 0; i < c->fc->nb_streams; i++) { st = c->fc->streams[i]; sc = st->priv_data; if (!sc->has_sidx) { st->duration = sc->track_end = av_rescale(ref_st->duration, sc->time_scale, ref_sc->time_scale); } } c->frag_index.complete = 1; } return 0; }
1threat
FireStore create a document if not exist : <p>I want to update a doc like this:</p> <pre><code>db.collection('users').doc(user_id).update({foo:'bar'}) </code></pre> <p>However, if the doc user_id does not exists, the above code will throw an error. Hence, how to tell Firestore to create the student if not exists, in other word, behave like this:</p> <pre><code>db.collection('users').doc(user_id).set({foo:'bar'}) </code></pre>
0debug
Elastic search - search_after parameter : <p>I read this <a href="https://www.elastic.co/guide/en/elasticsearch/reference/5.0/search-request-search-after.html" rel="noreferrer">doc</a> to understand 'search_after' and have two question.</p> <ol> <li>I'm curious that where "tweet#654323" comes from. Is this one of document id or field data? </li> <li><p>When I added multiple parameter of search_after, Is that 'and' condition or 'or' condition?</p> <p>ex) "search_after": [1463538857, 5147821]</p></li> </ol>
0debug
Should package-lock.json also be published? : <p>npm 5 introduced <em>package-lock.json</em>, of which the documentation is <a href="https://docs.npmjs.com/files/package-lock.json" rel="noreferrer">here</a>.</p> <p>It states that the file is intended to be included with version control, so anyone cloning your package and installing it will have the same dependency versions. In other words, you should not add it to your <em>.gitignore</em> file.</p> <p>What it does not state is wether or not the file is intended to be included with a published package. This question could be rephrased as; <em>should package-lock.json be included in .npmignore?</em></p>
0debug
static int hds_write_header(AVFormatContext *s) { HDSContext *c = s->priv_data; int ret = 0, i; AVOutputFormat *oformat; mkdir(s->filename, 0777); oformat = av_guess_format("flv", NULL, NULL); if (!oformat) { ret = AVERROR_MUXER_NOT_FOUND; goto fail; } c->streams = av_mallocz(sizeof(*c->streams) * s->nb_streams); if (!c->streams) { ret = AVERROR(ENOMEM); goto fail; } for (i = 0; i < s->nb_streams; i++) { OutputStream *os = &c->streams[c->nb_streams]; AVFormatContext *ctx; AVStream *st = s->streams[i]; if (!st->codec->bit_rate) { av_log(s, AV_LOG_ERROR, "No bit rate set for stream %d\n", i); ret = AVERROR(EINVAL); goto fail; } if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO) { if (os->has_video) { c->nb_streams++; os++; } os->has_video = 1; } else if (st->codec->codec_type == AVMEDIA_TYPE_AUDIO) { if (os->has_audio) { c->nb_streams++; os++; } os->has_audio = 1; } else { av_log(s, AV_LOG_ERROR, "Unsupported stream type in stream %d\n", i); ret = AVERROR(EINVAL); goto fail; } os->bitrate += s->streams[i]->codec->bit_rate; if (!os->ctx) { os->first_stream = i; ctx = avformat_alloc_context(); if (!ctx) { ret = AVERROR(ENOMEM); goto fail; } os->ctx = ctx; ctx->oformat = oformat; ctx->interrupt_callback = s->interrupt_callback; ctx->pb = avio_alloc_context(os->iobuf, sizeof(os->iobuf), AVIO_FLAG_WRITE, os, NULL, hds_write, NULL); if (!ctx->pb) { ret = AVERROR(ENOMEM); goto fail; } } else { ctx = os->ctx; } s->streams[i]->id = c->nb_streams; if (!(st = avformat_new_stream(ctx, NULL))) { ret = AVERROR(ENOMEM); goto fail; } avcodec_copy_context(st->codec, s->streams[i]->codec); st->sample_aspect_ratio = s->streams[i]->sample_aspect_ratio; } if (c->streams[c->nb_streams].ctx) c->nb_streams++; for (i = 0; i < c->nb_streams; i++) { OutputStream *os = &c->streams[i]; int j; if ((ret = avformat_write_header(os->ctx, NULL)) < 0) { goto fail; } os->ctx_inited = 1; avio_flush(os->ctx->pb); for (j = 0; j < os->ctx->nb_streams; j++) s->streams[os->first_stream + j]->time_base = os->ctx->streams[j]->time_base; snprintf(os->temp_filename, sizeof(os->temp_filename), "%s/stream%d_temp", s->filename, i); ret = init_file(s, os, 0); if (ret < 0) goto fail; if (!os->has_video && c->min_frag_duration <= 0) { av_log(s, AV_LOG_WARNING, "No video stream in output stream %d and no min frag duration set\n", i); ret = AVERROR(EINVAL); } os->fragment_index = 1; write_abst(s, os, 0); } ret = write_manifest(s, 0); fail: if (ret) hds_free(s); return ret; }
1threat
decode_cabac_residual_internal(H264Context *h, int16_t *block, int cat, int n, const uint8_t *scantable, const uint32_t *qmul, int max_coeff, int is_dc, int chroma422) { static const int significant_coeff_flag_offset[2][14] = { { 105+0, 105+15, 105+29, 105+44, 105+47, 402, 484+0, 484+15, 484+29, 660, 528+0, 528+15, 528+29, 718 }, { 277+0, 277+15, 277+29, 277+44, 277+47, 436, 776+0, 776+15, 776+29, 675, 820+0, 820+15, 820+29, 733 } }; static const int last_coeff_flag_offset[2][14] = { { 166+0, 166+15, 166+29, 166+44, 166+47, 417, 572+0, 572+15, 572+29, 690, 616+0, 616+15, 616+29, 748 }, { 338+0, 338+15, 338+29, 338+44, 338+47, 451, 864+0, 864+15, 864+29, 699, 908+0, 908+15, 908+29, 757 } }; static const int coeff_abs_level_m1_offset[14] = { 227+0, 227+10, 227+20, 227+30, 227+39, 426, 952+0, 952+10, 952+20, 708, 982+0, 982+10, 982+20, 766 }; static const uint8_t significant_coeff_flag_offset_8x8[2][63] = { { 0, 1, 2, 3, 4, 5, 5, 4, 4, 3, 3, 4, 4, 4, 5, 5, 4, 4, 4, 4, 3, 3, 6, 7, 7, 7, 8, 9,10, 9, 8, 7, 7, 6,11,12,13,11, 6, 7, 8, 9,14,10, 9, 8, 6,11, 12,13,11, 6, 9,14,10, 9,11,12,13,11,14,10,12 }, { 0, 1, 1, 2, 2, 3, 3, 4, 5, 6, 7, 7, 7, 8, 4, 5, 6, 9,10,10, 8,11,12,11, 9, 9,10,10, 8,11,12,11, 9, 9,10,10, 8,11,12,11, 9, 9,10,10, 8,13,13, 9, 9,10,10, 8,13,13, 9, 9,10,10,14,14,14,14,14 } }; static const uint8_t sig_coeff_offset_dc[7] = { 0, 0, 1, 1, 2, 2, 2 }; static const uint8_t coeff_abs_level1_ctx[8] = { 1, 2, 3, 4, 0, 0, 0, 0 }; static const uint8_t coeff_abs_levelgt1_ctx[2][8] = { { 5, 5, 5, 5, 6, 7, 8, 9 }, { 5, 5, 5, 5, 6, 7, 8, 8 }, }; static const uint8_t coeff_abs_level_transition[2][8] = { { 1, 2, 3, 3, 4, 5, 6, 7 }, { 4, 4, 4, 4, 5, 6, 7, 7 } }; int index[64]; int av_unused last; int coeff_count = 0; int node_ctx = 0; uint8_t *significant_coeff_ctx_base; uint8_t *last_coeff_ctx_base; uint8_t *abs_level_m1_ctx_base; #if !ARCH_X86 #define CABAC_ON_STACK #endif #ifdef CABAC_ON_STACK #define CC &cc CABACContext cc; cc.range = h->cabac.range; cc.low = h->cabac.low; cc.bytestream= h->cabac.bytestream; #else #define CC &h->cabac #endif significant_coeff_ctx_base = h->cabac_state + significant_coeff_flag_offset[MB_FIELD][cat]; last_coeff_ctx_base = h->cabac_state + last_coeff_flag_offset[MB_FIELD][cat]; abs_level_m1_ctx_base = h->cabac_state + coeff_abs_level_m1_offset[cat]; if( !is_dc && max_coeff == 64 ) { #define DECODE_SIGNIFICANCE( coefs, sig_off, last_off ) \ for(last= 0; last < coefs; last++) { \ uint8_t *sig_ctx = significant_coeff_ctx_base + sig_off; \ if( get_cabac( CC, sig_ctx )) { \ uint8_t *last_ctx = last_coeff_ctx_base + last_off; \ index[coeff_count++] = last; \ if( get_cabac( CC, last_ctx ) ) { \ last= max_coeff; \ break; \ } \ } \ }\ if( last == max_coeff -1 ) {\ index[coeff_count++] = last;\ } const uint8_t *sig_off = significant_coeff_flag_offset_8x8[MB_FIELD]; #ifdef decode_significance coeff_count = decode_significance_8x8(CC, significant_coeff_ctx_base, index, last_coeff_ctx_base, sig_off); } else { if (is_dc && chroma422) { DECODE_SIGNIFICANCE(7, sig_coeff_offset_dc[last], sig_coeff_offset_dc[last]); } else { coeff_count = decode_significance(CC, max_coeff, significant_coeff_ctx_base, index, last_coeff_ctx_base-significant_coeff_ctx_base); } #else DECODE_SIGNIFICANCE( 63, sig_off[last], ff_h264_last_coeff_flag_offset_8x8[last] ); } else { if (is_dc && chroma422) { DECODE_SIGNIFICANCE(7, sig_coeff_offset_dc[last], sig_coeff_offset_dc[last]); } else { DECODE_SIGNIFICANCE(max_coeff - 1, last, last); } #endif } av_assert2(coeff_count > 0); if( is_dc ) { if( cat == 3 ) h->cbp_table[h->mb_xy] |= 0x40 << (n - CHROMA_DC_BLOCK_INDEX); else h->cbp_table[h->mb_xy] |= 0x100 << (n - LUMA_DC_BLOCK_INDEX); h->non_zero_count_cache[scan8[n]] = coeff_count; } else { if( max_coeff == 64 ) fill_rectangle(&h->non_zero_count_cache[scan8[n]], 2, 2, 8, coeff_count, 1); else { av_assert2( cat == 1 || cat == 2 || cat == 4 || cat == 7 || cat == 8 || cat == 11 || cat == 12 ); h->non_zero_count_cache[scan8[n]] = coeff_count; } } #define STORE_BLOCK(type) \ do { \ uint8_t *ctx = coeff_abs_level1_ctx[node_ctx] + abs_level_m1_ctx_base; \ \ int j= scantable[index[--coeff_count]]; \ \ if( get_cabac( CC, ctx ) == 0 ) { \ node_ctx = coeff_abs_level_transition[0][node_ctx]; \ if( is_dc ) { \ ((type*)block)[j] = get_cabac_bypass_sign( CC, -1); \ }else{ \ ((type*)block)[j] = (get_cabac_bypass_sign( CC, -qmul[j]) + 32) >> 6; \ } \ } else { \ int coeff_abs = 2; \ ctx = coeff_abs_levelgt1_ctx[is_dc && chroma422][node_ctx] + abs_level_m1_ctx_base; \ node_ctx = coeff_abs_level_transition[1][node_ctx]; \ \ while( coeff_abs < 15 && get_cabac( CC, ctx ) ) { \ coeff_abs++; \ } \ \ if( coeff_abs >= 15 ) { \ int j = 0; \ while( get_cabac_bypass( CC ) ) { \ j++; \ } \ \ coeff_abs=1; \ while( j-- ) { \ coeff_abs += coeff_abs + get_cabac_bypass( CC ); \ } \ coeff_abs+= 14; \ } \ \ if( is_dc ) { \ ((type*)block)[j] = get_cabac_bypass_sign( CC, -coeff_abs ); \ }else{ \ ((type*)block)[j] = ((int)(get_cabac_bypass_sign( CC, -coeff_abs ) * qmul[j] + 32)) >> 6; \ } \ } \ } while ( coeff_count ); if (h->pixel_shift) { STORE_BLOCK(int32_t) } else { STORE_BLOCK(int16_t) } #ifdef CABAC_ON_STACK h->cabac.range = cc.range ; h->cabac.low = cc.low ; h->cabac.bytestream= cc.bytestream; #endif }
1threat
def remove_tuples(test_list, K): res = [ele for ele in test_list if len(ele) != K] return (res)
0debug
int ff_mpeg4_decode_picture_header(MpegEncContext * s, GetBitContext *gb) { int startcode, v; align_get_bits(gb); if(s->avctx->codec_tag == ff_get_fourcc("WV1F") && show_bits(gb, 24) == 0x575630){ skip_bits(gb, 24); if(get_bits(gb, 8) == 0xF0) return decode_vop_header(s, gb); } startcode = 0xff; for(;;) { v = get_bits(gb, 8); startcode = ((startcode << 8) | v) & 0xffffffff; if(get_bits_count(gb) >= gb->size_in_bits){ if(gb->size_in_bits==8 && (s->divx_version || s->xvid_build)){ av_log(s->avctx, AV_LOG_ERROR, "frame skip %d\n", gb->size_in_bits); return FRAME_SKIPPED; }else return -1; } if((startcode&0xFFFFFF00) != 0x100) continue; if(s->avctx->debug&FF_DEBUG_STARTCODE){ av_log(s->avctx, AV_LOG_DEBUG, "startcode: %3X ", startcode); if (startcode<=0x11F) av_log(s->avctx, AV_LOG_DEBUG, "Video Object Start"); else if(startcode<=0x12F) av_log(s->avctx, AV_LOG_DEBUG, "Video Object Layer Start"); else if(startcode<=0x13F) av_log(s->avctx, AV_LOG_DEBUG, "Reserved"); else if(startcode<=0x15F) av_log(s->avctx, AV_LOG_DEBUG, "FGS bp start"); else if(startcode<=0x1AF) av_log(s->avctx, AV_LOG_DEBUG, "Reserved"); else if(startcode==0x1B0) av_log(s->avctx, AV_LOG_DEBUG, "Visual Object Seq Start"); else if(startcode==0x1B1) av_log(s->avctx, AV_LOG_DEBUG, "Visual Object Seq End"); else if(startcode==0x1B2) av_log(s->avctx, AV_LOG_DEBUG, "User Data"); else if(startcode==0x1B3) av_log(s->avctx, AV_LOG_DEBUG, "Group of VOP start"); else if(startcode==0x1B4) av_log(s->avctx, AV_LOG_DEBUG, "Video Session Error"); else if(startcode==0x1B5) av_log(s->avctx, AV_LOG_DEBUG, "Visual Object Start"); else if(startcode==0x1B6) av_log(s->avctx, AV_LOG_DEBUG, "Video Object Plane start"); else if(startcode==0x1B7) av_log(s->avctx, AV_LOG_DEBUG, "slice start"); else if(startcode==0x1B8) av_log(s->avctx, AV_LOG_DEBUG, "extension start"); else if(startcode==0x1B9) av_log(s->avctx, AV_LOG_DEBUG, "fgs start"); else if(startcode==0x1BA) av_log(s->avctx, AV_LOG_DEBUG, "FBA Object start"); else if(startcode==0x1BB) av_log(s->avctx, AV_LOG_DEBUG, "FBA Object Plane start"); else if(startcode==0x1BC) av_log(s->avctx, AV_LOG_DEBUG, "Mesh Object start"); else if(startcode==0x1BD) av_log(s->avctx, AV_LOG_DEBUG, "Mesh Object Plane start"); else if(startcode==0x1BE) av_log(s->avctx, AV_LOG_DEBUG, "Still Texture Object start"); else if(startcode==0x1BF) av_log(s->avctx, AV_LOG_DEBUG, "Texture Spatial Layer start"); else if(startcode==0x1C0) av_log(s->avctx, AV_LOG_DEBUG, "Texture SNR Layer start"); else if(startcode==0x1C1) av_log(s->avctx, AV_LOG_DEBUG, "Texture Tile start"); else if(startcode==0x1C2) av_log(s->avctx, AV_LOG_DEBUG, "Texture Shape Layer start"); else if(startcode==0x1C3) av_log(s->avctx, AV_LOG_DEBUG, "stuffing start"); else if(startcode<=0x1C5) av_log(s->avctx, AV_LOG_DEBUG, "reserved"); else if(startcode<=0x1FF) av_log(s->avctx, AV_LOG_DEBUG, "System start"); av_log(s->avctx, AV_LOG_DEBUG, " at %d\n", get_bits_count(gb)); } if(startcode >= 0x120 && startcode <= 0x12F){ if(decode_vol_header(s, gb) < 0) return -1; } else if(startcode == USER_DATA_STARTCODE){ decode_user_data(s, gb); } else if(startcode == GOP_STARTCODE){ mpeg4_decode_gop_header(s, gb); } else if(startcode == VOP_STARTCODE){ return decode_vop_header(s, gb); } align_get_bits(gb); startcode = 0xff; } }
1threat
How to sum each user totals per category : <p><a href="https://i.stack.imgur.com/rvPC0.png" rel="nofollow noreferrer">table preview</a></p> <p>I have the above table which I'd like to sum all users total per category. Totals for user 1, User 2, ..., user 20 to be shown at the end of the table. how can i achieve this in Joomla? </p>
0debug
Is there a way to speedup npm ci using cache? : <p>for now <code>npm ci</code> is the most common way to install node modules when using CI. But it is honestly really slow. Is there a way to speedup <code>npm ci</code> using cache or do not fully remove existing packages (whole node_modules folder)?</p>
0debug
static inline void decode_block_intra(MadContext * t, DCTELEM * block) { MpegEncContext *s = &t->s; int level, i, j, run; RLTable *rl = &ff_rl_mpeg1; const uint8_t *scantable = s->intra_scantable.permutated; int16_t *quant_matrix = s->intra_matrix; block[0] = (128 + get_sbits(&s->gb, 8)) * quant_matrix[0]; i = 0; { OPEN_READER(re, &s->gb); for (;;) { UPDATE_CACHE(re, &s->gb); GET_RL_VLC(level, run, re, &s->gb, rl->rl_vlc[0], TEX_VLC_BITS, 2, 0); if (level == 127) { break; } else if (level != 0) { i += run; j = scantable[i]; level = (level*quant_matrix[j]) >> 4; level = (level-1)|1; level = (level ^ SHOW_SBITS(re, &s->gb, 1)) - SHOW_SBITS(re, &s->gb, 1); LAST_SKIP_BITS(re, &s->gb, 1); } else { UPDATE_CACHE(re, &s->gb); level = SHOW_SBITS(re, &s->gb, 10); SKIP_BITS(re, &s->gb, 10); UPDATE_CACHE(re, &s->gb); run = SHOW_UBITS(re, &s->gb, 6)+1; LAST_SKIP_BITS(re, &s->gb, 6); i += run; j = scantable[i]; if (level < 0) { level = -level; level = (level*quant_matrix[j]) >> 4; level = (level-1)|1; level = -level; } else { level = (level*quant_matrix[j]) >> 4; level = (level-1)|1; } } if (i > 63) { av_log(s->avctx, AV_LOG_ERROR, "ac-tex damaged at %d %d\n", s->mb_x, s->mb_y); return; } block[j] = level; } CLOSE_READER(re, &s->gb); } }
1threat
Read string from index 1 in C : <p>I'm trying to use string by char array from index 1.</p> <pre><code>char a[100]; scanf("%s", a+1); </code></pre> <p>I thought it will work well, but it didn't work. What is wrong?</p> <p>How can I skip index 0 and read string from index 1?</p>
0debug
static int vorbis_residue_decode(vorbis_context *vc, vorbis_residue *vr, uint_fast8_t ch, uint_fast8_t *do_not_decode, float *vec, uint_fast16_t vlen) { GetBitContext *gb=&vc->gb; uint_fast8_t c_p_c=vc->codebooks[vr->classbook].dimensions; uint_fast16_t n_to_read=vr->end-vr->begin; uint_fast16_t ptns_to_read=n_to_read/vr->partition_size; uint_fast8_t classifs[ptns_to_read*vc->audio_channels]; uint_fast8_t pass; uint_fast8_t ch_used; uint_fast8_t i,j,l; uint_fast16_t k; if (vr->type==2) { for(j=1;j<ch;++j) { do_not_decode[0]&=do_not_decode[j]; } if (do_not_decode[0]) return 0; ch_used=1; } else { ch_used=ch; } AV_DEBUG(" residue type 0/1/2 decode begin, ch: %d cpc %d \n", ch, c_p_c); for(pass=0;pass<=vr->maxpass;++pass) { uint_fast16_t voffset; uint_fast16_t partition_count; uint_fast16_t j_times_ptns_to_read; voffset=vr->begin; for(partition_count=0;partition_count<ptns_to_read;) { if (!pass) { uint_fast32_t inverse_class = ff_inverse[vr->classifications]; for(j_times_ptns_to_read=0, j=0;j<ch_used;++j) { if (!do_not_decode[j]) { uint_fast32_t temp=get_vlc2(gb, vc->codebooks[vr->classbook].vlc.table, vc->codebooks[vr->classbook].nb_bits, 3); AV_DEBUG("Classword: %d \n", temp); assert(vr->classifications > 1 && temp<=65536); for(i=0;i<c_p_c;++i) { uint_fast32_t temp2; temp2=(((uint_fast64_t)temp) * inverse_class)>>32; if (partition_count+c_p_c-1-i < ptns_to_read) { classifs[j_times_ptns_to_read+partition_count+c_p_c-1-i]=temp-temp2*vr->classifications; } temp=temp2; } } j_times_ptns_to_read+=ptns_to_read; } } for(i=0;(i<c_p_c) && (partition_count<ptns_to_read);++i) { for(j_times_ptns_to_read=0, j=0;j<ch_used;++j) { uint_fast16_t voffs; if (!do_not_decode[j]) { uint_fast8_t vqclass=classifs[j_times_ptns_to_read+partition_count]; int_fast16_t vqbook=vr->books[vqclass][pass]; if (vqbook>=0) { uint_fast16_t coffs; unsigned dim= vc->codebooks[vqbook].dimensions; uint_fast16_t step= dim==1 ? vr->partition_size : FASTDIV(vr->partition_size, dim); vorbis_codebook codebook= vc->codebooks[vqbook]; if (vr->type==0) { voffs=voffset+j*vlen; for(k=0;k<step;++k) { coffs=get_vlc2(gb, codebook.vlc.table, codebook.nb_bits, 3) * dim; for(l=0;l<dim;++l) { vec[voffs+k+l*step]+=codebook.codevectors[coffs+l]; } } } else if (vr->type==1) { voffs=voffset+j*vlen; for(k=0;k<step;++k) { coffs=get_vlc2(gb, codebook.vlc.table, codebook.nb_bits, 3) * dim; for(l=0;l<dim;++l, ++voffs) { vec[voffs]+=codebook.codevectors[coffs+l]; AV_DEBUG(" pass %d offs: %d curr: %f change: %f cv offs.: %d \n", pass, voffs, vec[voffs], codebook.codevectors[coffs+l], coffs); } } } else if (vr->type==2 && ch==2 && (voffset&1)==0 && (dim&1)==0) { voffs=voffset>>1; if(dim==2) { for(k=0;k<step;++k) { coffs=get_vlc2(gb, codebook.vlc.table, codebook.nb_bits, 3) * 2; vec[voffs+k ]+=codebook.codevectors[coffs ]; vec[voffs+k+vlen]+=codebook.codevectors[coffs+1]; } } else for(k=0;k<step;++k) { coffs=get_vlc2(gb, codebook.vlc.table, codebook.nb_bits, 3) * dim; for(l=0;l<dim;l+=2, voffs++) { vec[voffs ]+=codebook.codevectors[coffs+l ]; vec[voffs+vlen]+=codebook.codevectors[coffs+l+1]; AV_DEBUG(" pass %d offs: %d curr: %f change: %f cv offs.: %d+%d \n", pass, voffset/ch+(voffs%ch)*vlen, vec[voffset/ch+(voffs%ch)*vlen], codebook.codevectors[coffs+l], coffs, l); } } } else if (vr->type==2) { voffs=voffset; for(k=0;k<step;++k) { coffs=get_vlc2(gb, codebook.vlc.table, codebook.nb_bits, 3) * dim; for(l=0;l<dim;++l, ++voffs) { vec[voffs/ch+(voffs%ch)*vlen]+=codebook.codevectors[coffs+l]; FIXME use if and counter instead of / and % AV_DEBUG(" pass %d offs: %d curr: %f change: %f cv offs.: %d+%d \n", pass, voffset/ch+(voffs%ch)*vlen, vec[voffset/ch+(voffs%ch)*vlen], codebook.codevectors[coffs+l], coffs, l); } } } else { av_log(vc->avccontext, AV_LOG_ERROR, " Invalid residue type while residue decode?! \n"); return 1; } } } j_times_ptns_to_read+=ptns_to_read; } ++partition_count; voffset+=vr->partition_size; } } } return 0; }
1threat
No toolchains found in the NDK toolchains folder for ABI with prefix: mips64el-linux-android : <p>Can anyone tell me why I am receiving this error? I have downloaded a series of projects from GitHub for a Udacity course. Since there are about 50 or 60 projects (Exercises and Solutions) in the repo, I presume it has to do with the fact that each is an individual project on its own. I do not have a problem usually when I fork a repo and clone it locally. </p> <p>I have read other posts about uninstalling and reinstalling ndk in the sdk. But I really want to know why this is happening and how I can fix without blindly uninstalling and re-installing. Any help is appreciated. Thanks.</p>
0debug
Segmentation fault in C and infinite loop : Consider the code below: #include<stdio.h> int main() { Printf("hello"); main(1,2); return 0; } On executing the code i am getting a rum time error ( Segmentation Fault) and a output : hello...infinite times I understand that a segmentation fault occurs when the program tries to access the area of memory that it is not allowed to access. Which line of my code is accessing memory it is not allowed to access and why? I am hoping it is `main(1,2)` And how come hello is printed infinite times
0debug
static inline void gen_op_fcmpes(int fccno, TCGv_i32 r_rs1, TCGv_i32 r_rs2) { switch (fccno) { case 0: gen_helper_fcmpes(cpu_env, r_rs1, r_rs2); break; case 1: gen_helper_fcmpes_fcc1(cpu_env, r_rs1, r_rs2); break; case 2: gen_helper_fcmpes_fcc2(cpu_env, r_rs1, r_rs2); break; case 3: gen_helper_fcmpes_fcc3(cpu_env, r_rs1, r_rs2); break; } }
1threat
Android java string division when has whitespaces (space) : <p>i have an simple question. It is possible to make some function, that will division my string to several other strings? It will works like that:</p> <blockquote> <p>if string has whitespaces count spaces, make as many string as whitespaces count and division it by spaces? Like: "some string 123" string1 = "some", string2 = "string", string3 = "123"</p> </blockquote> <p>Can someone helps me with it? Im a little bit new in this case :/</p>
0debug
jenkins authentication fails with gitlab : Before to say my problem, i installed my **gitlab**(omnibus settting) and gitlab is connected external **nginx** server. And than now i am setting jenkins. While i am making new jenkins item, when i select "Git" in "Source code management" i receive following message in the picture.[git selecting screen][1] So i checked "error.log" in my external nginx. And then i found following error code. **`[error] 20979#0: OCSP_basic_verify() failed (SSL: error:27069076:OCSP routines:OCSP_basic_verify:signer certificate not found) while requesting certificate status, responder: ocsp.int-x1.letsencrypt.org`** I also searched it on google and anywhere i know, but i still don't know how to solve this problem. thank you advance for your help. [1]: http://i.stack.imgur.com/JGU72.png
0debug
Change the dictionary on c# model : <p>I have a dictionary and I need to change the dictionary on c# model.How to do it?</p> <pre><code>var number = new Dictionary&lt;string, int&gt; { {"One", 1}, {"Two", 2}, {"Three", 3} }; </code></pre> <p>Model:</p> <pre><code>public class Number { public double One{ get; set; } public double Two { get; set; } public double Three { get; set; } } </code></pre>
0debug
static void monitor_puts(Monitor *mon, const char *str) { char c; for(;;) { assert(mon->outbuf_index < sizeof(mon->outbuf) - 1); c = *str++; if (c == '\0') break; if (c == '\n') mon->outbuf[mon->outbuf_index++] = '\r'; mon->outbuf[mon->outbuf_index++] = c; if (mon->outbuf_index >= (sizeof(mon->outbuf) - 1) || c == '\n') monitor_flush(mon); } }
1threat
void memory_region_set_skip_dump(MemoryRegion *mr) { mr->skip_dump = true; }
1threat
Docker Python set utf-8 locale : <p>I am trying to run my python file that first reads a string in Chinese language and print it.</p> <p>This is my Dockerfile</p> <pre><code>FROM python:2.7-onbuild ENV LANG en_US.UTF-8 ADD . /code WORKDIR /code RUN pip install -r requirements.txt CMD ["python", "app.py"] </code></pre> <p>This is my python file:</p> <pre><code> # -*- coding: utf-8 -*- import jieba s = "我来到北京清华大学" s = s.decode('utf-8') print type(s), s </code></pre> <p>I then run :</p> <p><code>docker build -t python-example .</code></p> <p><code>docker run python-example</code></p> <p>Error i got: <code>UnicodeEncodeError: 'ascii' codec can't encode characters in position 0-8: ordinal not in range(128)</code></p> <p>When i run it locally, it works fine. </p>
0debug
How To Extract Data Inside This Single Quoted String in C? I Need This in 2 Hours Please : Simple question and lets be straightforward. I have this string. ('string1', 'string2', 'string3'); I want to extract data ONLY the string1, string2, string3 using C. Anyone have an idea? Have tried something like this `scanf("%s", &data1); printf("%s", data1); if(d=='`'){ scanf("%s", &sampah); printf("%s", sampah); if(d=='`'){ scanf("%s", &data2); printf("%s", data2); if(d=='`'){ scanf("%s", &sampah); printf("%s", sampah); if(d=='`'){ scanf("%s", &data3); printf("%s", data3); if(d=='`'){ scanf("%s", &sampah); printf("%s", sampah); if(d=='`'){ scanf("%s", &data4); printf("%s", data4); } } } } } }` yes its stupid but this is just because I don't know what to do. Please help :(
0debug
Configuring a second NIC in Azure : I have a VM in Azure with two NICs. Each NIC has its own subnet. Azure only lets the Primary NIC have a public IP address. It appears that unless you have a public IP address a NIC cannot access the Internet but I need both to have Internet access. Config details: The Primary NIC is on subnet 10.0.0.0/24 with Gateway 10.0.0.1. This subnet is for the Management Network. I need this to ssh into the machines for host administration purposes. The second NIC is on subnet 203.0.113.0/24 and requires a gateway with IP address 203.0.113.1 but I of course cannot configure that. This subnet is for the application and the application requires Internet assess. [![Network Layout][1]][1] I am not a networking expert so I struggling to work this one out. In fact looking at the diagram maybe the Primary NIC should be the Provider Network... ??? I *think* I need to create another VM and make it the Internet gateway - but maybe there is another way in Azure. Finally, I do not have a VPN set up into Azure. I am SSHing in via the public Internet - and I want to keep it that way. [1]: https://i.stack.imgur.com/977uF.png
0debug
Use aggregate function with arithmetic operation. : [according to the picture, I have a table with two columns][1] Here I need to find a SQL according to the following function, Z = accelx-AVG(accelx) for every rows. [1]: https://i.stack.imgur.com/vjWAe.png
0debug
static void fill_picture_parameters(AVCodecContext *avctx, struct dxva_context *ctx, const VC1Context *v, DXVA_PictureParameters *pp) { const MpegEncContext *s = &v->s; const Picture *current_picture = s->current_picture_ptr; memset(pp, 0, sizeof(*pp)); pp->wDecodedPictureIndex = pp->wDeblockedPictureIndex = ff_dxva2_get_surface_index(ctx, &current_picture->f); if (s->pict_type != AV_PICTURE_TYPE_I && !v->bi_type) pp->wForwardRefPictureIndex = ff_dxva2_get_surface_index(ctx, &s->last_picture.f); else pp->wForwardRefPictureIndex = 0xffff; if (s->pict_type == AV_PICTURE_TYPE_B && !v->bi_type) pp->wBackwardRefPictureIndex = ff_dxva2_get_surface_index(ctx, &s->next_picture.f); else pp->wBackwardRefPictureIndex = 0xffff; if (v->profile == PROFILE_ADVANCED) { pp->wPicWidthInMBminus1 = avctx->width - 1; pp->wPicHeightInMBminus1= avctx->height - 1; } else { pp->wPicWidthInMBminus1 = s->mb_width - 1; pp->wPicHeightInMBminus1= s->mb_height - 1; } pp->bMacroblockWidthMinus1 = 15; pp->bMacroblockHeightMinus1 = 15; pp->bBlockWidthMinus1 = 7; pp->bBlockHeightMinus1 = 7; pp->bBPPminus1 = 7; if (s->picture_structure & PICT_TOP_FIELD) pp->bPicStructure |= 0x01; if (s->picture_structure & PICT_BOTTOM_FIELD) pp->bPicStructure |= 0x02; pp->bSecondField = v->interlace && v->fcm != ILACE_FIELD && !s->first_field; pp->bPicIntra = s->pict_type == AV_PICTURE_TYPE_I || v->bi_type; pp->bPicBackwardPrediction = s->pict_type == AV_PICTURE_TYPE_B && !v->bi_type; pp->bBidirectionalAveragingMode = (1 << 7) | ((ctx->cfg->ConfigIntraResidUnsigned != 0) << 6) | ((ctx->cfg->ConfigResidDiffAccelerator != 0) << 5) | ((v->lumscale != 32 || v->lumshift != 0) << 4) | ((v->profile == PROFILE_ADVANCED) << 3); pp->bMVprecisionAndChromaRelation = ((v->mv_mode == MV_PMODE_1MV_HPEL_BILIN) << 3) | (1 << 2) | (0 << 1) | (!s->quarter_sample ); pp->bChromaFormat = v->chromaformat; ctx->report_id++; if (ctx->report_id >= (1 << 16)) ctx->report_id = 1; pp->bPicScanFixed = ctx->report_id >> 8; pp->bPicScanMethod = ctx->report_id & 0xff; pp->bPicReadbackRequests = 0; pp->bRcontrol = v->rnd; pp->bPicSpatialResid8 = (v->panscanflag << 7) | (v->refdist_flag << 6) | (s->loop_filter << 5) | (v->fastuvmc << 4) | (v->extended_mv << 3) | (v->dquant << 1) | (v->vstransform ); pp->bPicOverflowBlocks = (v->quantizer_mode << 6) | (v->multires << 5) | (v->resync_marker << 4) | (v->rangered << 3) | (s->max_b_frames ); pp->bPicExtrapolation = (!v->interlace || v->fcm == PROGRESSIVE) ? 1 : 2; pp->bPicDeblocked = ((!pp->bPicBackwardPrediction && v->overlap) << 6) | ((v->profile != PROFILE_ADVANCED && v->rangeredfrm) << 5) | (s->loop_filter << 1); pp->bPicDeblockConfined = (v->postprocflag << 7) | (v->broadcast << 6) | (v->interlace << 5) | (v->tfcntrflag << 4) | (v->finterpflag << 3) | ((s->pict_type != AV_PICTURE_TYPE_B) << 2) | (v->psf << 1) | (v->extended_dmv ); if (s->pict_type != AV_PICTURE_TYPE_I) pp->bPic4MVallowed = v->mv_mode == MV_PMODE_MIXED_MV || (v->mv_mode == MV_PMODE_INTENSITY_COMP && v->mv_mode2 == MV_PMODE_MIXED_MV); if (v->profile == PROFILE_ADVANCED) pp->bPicOBMC = (v->range_mapy_flag << 7) | (v->range_mapy << 4) | (v->range_mapuv_flag << 3) | (v->range_mapuv ); pp->bPicBinPB = 0; pp->bMV_RPS = 0; pp->bReservedBits = 0; if (s->picture_structure == PICT_FRAME) { pp->wBitstreamFcodes = v->lumscale; pp->wBitstreamPCEelements = v->lumshift; } else { pp->wBitstreamFcodes = (v->lumscale << 8) | v->lumscale; pp->wBitstreamPCEelements = (v->lumshift << 8) | v->lumshift; } pp->bBitstreamConcealmentNeed = 0; pp->bBitstreamConcealmentMethod = 0; }
1threat
static void do_getfd(Monitor *mon, const QDict *qdict) { const char *fdname = qdict_get_str(qdict, "fdname"); mon_fd_t *monfd; int fd; fd = qemu_chr_get_msgfd(mon->chr); if (fd == -1) { monitor_printf(mon, "getfd: no file descriptor supplied via SCM_RIGHTS\n"); return; } if (qemu_isdigit(fdname[0])) { monitor_printf(mon, "getfd: monitor names may not begin with a number\n"); return; } fd = dup(fd); if (fd == -1) { monitor_printf(mon, "Failed to dup() file descriptor: %s\n", strerror(errno)); return; } LIST_FOREACH(monfd, &mon->fds, next) { if (strcmp(monfd->name, fdname) != 0) { continue; } close(monfd->fd); monfd->fd = fd; return; } monfd = qemu_mallocz(sizeof(mon_fd_t)); monfd->name = qemu_strdup(fdname); monfd->fd = fd; LIST_INSERT_HEAD(&mon->fds, monfd, next); }
1threat
Why SQLite to ImageView, BitmapFactory.decodeByteArray Returning Null Android Studio? : I'm developing a contacts app with a photo. I have Store and retrieve data from SQLite and it's working fine on my list view. The problem is when I click a list item from it's supposed to show an image view and few other details. but the image is not showing on the Activity [List View on Main Activity ][1] [In the empty space here should be an image of the contacts][2] [1]: https://i.stack.imgur.com/OaBX0.png [2]: https://i.stack.imgur.com/aw5gO.png UserDBHelper.java package com.example.arif.contacts; import android.content.ContentValues; import android.content.Context; import android.database.Cursor; import android.database.sqlite.SQLiteDatabase; import android.database.sqlite.SQLiteOpenHelper; import android.graphics.Bitmap; import android.graphics.BitmapFactory; import android.graphics.drawable.BitmapDrawable; import android.util.Log; public class UserDBHelper extends SQLiteOpenHelper { String TAG = "DEBUG"; private static final String DATABASE_NAME = "USERINFO.DB"; private static final String TABLE_NAME = "ContactTable"; private static final String TABLE_COL_NAME = "NAME"; private static final String TABLE_COL_MOB = "MOB"; private static final String TABLE_COL_EMAIL = "EMAIL"; private static final String TABLE_COL_IMG = "EMAIL"; private static final String TABLE_COL_ID = "ID"; private static final int DATABASE_VERSION = 1; private static final String CREATE_QUERY = "CREATE TABLE "+TABLE_NAME+"("+TABLE_COL_ID+" INTEGER PRIMARY KEY AUTOINCREMENT, "+TABLE_COL_NAME+" TEXT, "+TABLE_COL_MOB+" TEXT, "+TABLE_COL_EMAIL+" TEXT, "+TABLE_COL_IMG+" blob)" ; public UserDBHelper(Context context) { super(context, DATABASE_NAME, null, DATABASE_VERSION); Log.e("DataBase", "Database created / Opened"); } @Override public void onCreate(SQLiteDatabase db) { db.execSQL(CREATE_QUERY); Log.e("DataBase", "Table Created"); } @Override public void onUpgrade(SQLiteDatabase db, int i, int i1) { } public boolean AddInfo(String Name, String Mob , String Email, byte[] img ) { SQLiteDatabase db = this.getWritableDatabase(); ContentValues CV = new ContentValues(); CV.put("NAME", Name); CV.put("MOB", Mob); CV.put("EMAIL", Email ); CV.put("NewImage", img); long result = db.insert(TABLE_NAME, null,CV); Log.e("DataBase", "Add Info Bug"); if(result == -1) { return false; } else { return true; } } public Cursor getInformation() { Cursor data; SQLiteDatabase db = this.getWritableDatabase(); String query = "Select * from " + TABLE_NAME; data = db.rawQuery(query,null); return data; } public String fetch_Name(int i) { String Str = ""; SQLiteDatabase db = this.getReadableDatabase(); String query = "Select "+TABLE_COL_NAME+" FROM " + TABLE_NAME + " WHERE " +TABLE_COL_ID+" = "+i; Cursor data = db.rawQuery(query,null); if(data.moveToFirst()) { Str = data.getString(data.getColumnIndex(TABLE_COL_NAME+"")); } return Str; } public String fetch_MOB(int i) { String Str = ""; SQLiteDatabase db = this.getReadableDatabase(); String query = "Select "+TABLE_COL_MOB+" FROM " + TABLE_NAME + " WHERE " +TABLE_COL_ID+" = "+i; Cursor data = db.rawQuery(query,null); if(data.moveToFirst()) { Str = data.getString(data.getColumnIndex(TABLE_COL_MOB+"")); } return Str; } public String fetch_Email(int i) { String Str = ""; SQLiteDatabase db = this.getReadableDatabase(); String query = "Select "+TABLE_COL_EMAIL+" FROM " + TABLE_NAME + " WHERE " +TABLE_COL_ID+" = "+i; Cursor data = db.rawQuery(query,null); if(data.moveToFirst()) { Str = data.getString(data.getColumnIndex(TABLE_COL_EMAIL+"")); } return Str; } public Bitmap fetch_Img(int i) { /// THIS FUNCTION IS NOT WORKING byte[] ImgByte; SQLiteDatabase db = this.getWritableDatabase(); String query = "Select "+TABLE_COL_IMG+" FROM " + TABLE_NAME + " WHERE " +TABLE_COL_ID+" = "+i; Cursor data = db.rawQuery(query,null); if(data.moveToFirst()) { ImgByte = data.getBlob(data.getColumnIndex(TABLE_COL_IMG+"")); Bitmap bitMAP = BitmapFactory.decodeByteArray(ImgByte, 0, ImgByte.length); // Here it's Always Returning Null if(bitMAP == null) { Log.e( TAG, "Bitmap is null :/ "); } return bitMAP; } return null; } } **DetailsViewActivity** package com.example.arif.contacts; import android.content.Intent; import android.database.sqlite.SQLiteDatabase; import android.graphics.Bitmap; import android.graphics.BitmapFactory; import android.support.v7.app.AppCompatActivity; import android.os.Bundle; import android.util.Log; import android.view.Menu; import android.view.MenuItem; import android.widget.ImageView; import android.widget.TextView; import android.widget.Toast; public class DetailsOfContacts extends AppCompatActivity { int Pos; TextView Name_View, Mob_View, Email_View; ImageView Image_View; UserDBHelper userDBHelper; @Override protected void onCreate(Bundle savedInstanceState) { super.onCreate(savedInstanceState); setContentView(R.layout.activity_details_of_contacts); //---- ---- Intend Bundle ---- ---- Intent intent = getIntent(); Bundle bundle = intent.getExtras(); Pos = -1; if(bundle != null) { Pos = bundle.getInt("Position"); } Toast.makeText(getApplicationContext(), "Position "+ Pos,Toast.LENGTH_SHORT).show(); // ---- ---- Find View ---- ---- Name_View = (TextView) findViewById(R.id.DetName); Mob_View = (TextView) findViewById(R.id.DetMob); Email_View = (TextView) findViewById(R.id.DetEmail); Image_View = (ImageView) findViewById(R.id.DetImg); /// --- --- DataBase ----- UserDBHelper userDBHelper = new UserDBHelper(getApplicationContext()); Name_View.setText(userDBHelper.fetch_Name(Pos+1)); // Working Fine Mob_View.setText(userDBHelper.fetch_MOB(Pos+1));// Working Fine Email_View.setText(userDBHelper.fetch_Email(Pos+1));// Working Fine Image_View.setImageBitmap(userDBHelper.fetch_Img(Pos+1)); // it's Not showing The image in activity } @Override public boolean onCreateOptionsMenu(Menu menu) { getMenuInflater().inflate(R.menu.edit_menu, menu); return super.onCreateOptionsMenu(menu); } @Override public boolean onOptionsItemSelected(MenuItem item) { int id ; id = item.getItemId(); if(id == R.id.EditButt) { Toast.makeText(getApplicationContext(),"Edit Button Clicked", Toast.LENGTH_SHORT).show(); Intent I = new Intent(DetailsOfContacts.this, EditActivity.class); startActivity(I); return true; } return super.onOptionsItemSelected(item); } }
0debug
ssize_t virtio_pdu_vmarshal(V9fsPDU *pdu, size_t offset, const char *fmt, va_list ap) { V9fsState *s = pdu->s; V9fsVirtioState *v = container_of(s, V9fsVirtioState, state); VirtQueueElement *elem = &v->elems[pdu->idx]; return v9fs_iov_vmarshal(elem->in_sg, elem->in_num, offset, 1, fmt, ap); }
1threat
static void apply_param_change(AVCodecContext *avctx, AVPacket *avpkt) { int size = 0; const uint8_t *data; uint32_t flags; if (!(avctx->codec->capabilities & CODEC_CAP_PARAM_CHANGE)) return; data = av_packet_get_side_data(avpkt, AV_PKT_DATA_PARAM_CHANGE, &size); if (!data || size < 4) return; flags = bytestream_get_le32(&data); size -= 4; if (size < 4) return; if (flags & AV_SIDE_DATA_PARAM_CHANGE_CHANNEL_COUNT) { avctx->channels = bytestream_get_le32(&data); size -= 4; } if (flags & AV_SIDE_DATA_PARAM_CHANGE_CHANNEL_LAYOUT) { if (size < 8) return; avctx->channel_layout = bytestream_get_le64(&data); size -= 8; } if (size < 4) return; if (flags & AV_SIDE_DATA_PARAM_CHANGE_SAMPLE_RATE) { avctx->sample_rate = bytestream_get_le32(&data); size -= 4; } if (flags & AV_SIDE_DATA_PARAM_CHANGE_DIMENSIONS) { if (size < 8) return; avctx->width = bytestream_get_le32(&data); avctx->height = bytestream_get_le32(&data); avcodec_set_dimensions(avctx, avctx->width, avctx->height); size -= 8; } }
1threat
Android Studio maven { url "https://jitpack.io" } can't download : <p>Can't use anymore <code>maven { url "https://jitpack.io" }</code>. I have following gradle:</p> <pre><code>apply plugin: 'com.android.application' android { compileSdkVersion 27 defaultConfig { applicationId "test.com.myapplication" minSdkVersion 19 targetSdkVersion 27 versionCode 1 versionName "1.0" } buildTypes { release { minifyEnabled false proguardFiles getDefaultProguardFile('proguard-android.txt'), 'proguard-rules.pro' } } } allprojects { repositories { maven { url "https://jitpack.io" } } } dependencies { implementation fileTree(dir: 'libs', include: ['*.jar']) implementation 'com.android.support:appcompat-v7:27.+' } </code></pre> <p>When try to build I get error:</p> <pre><code> org.gradle.internal.resource.transport.http.HttpRequestException: Could not GET 'https://jitpack.io/com/android/support/appcompat-v7/maven-metadata.xml'. at org.gradle.internal.resource.transport.http.HttpClientHelper.performRequest(HttpClientHelper.java:96) at org.gradle.internal.resource.transport.http.HttpClientHelper.performRawGet(HttpClientHelper.java:80) at org.gradle.internal.resource.transport.http.HttpClientHelper.performGet(HttpClientHelper.java:84) at org.gradle.internal.resource.transport.http.HttpResourceAccessor.openResource(HttpResourceAccessor.java:43) at org.gradle.internal.resource.transport.http.HttpResourceAccessor.openResource(HttpResourceAccessor.java:29) at org.gradle.internal.resource.transfer.DefaultExternalResourceConnector.openResource(DefaultExternalResourceConnector.java:56) at org.gradle.internal.resource.transfer.ProgressLoggingExternalResourceAccessor.openResource(ProgressLoggingExternalResourceAccessor.java:36) at org.gradle.internal.resource.transfer.AccessorBackedExternalResource.withContentIfPresent(AccessorBackedExternalResource.java:130) at org.gradle.internal.resource.BuildOperationFiringExternalResourceDecorator$11.call(BuildOperationFiringExternalResourceDecorator.java:237) at org.gradle.internal.resource.BuildOperationFiringExternalResourceDecorator$11.call(BuildOperationFiringExternalResourceDecorator.java:229) at org.gradle.internal.progress.DefaultBuildOperationExecutor$CallableBuildOperationWorker.execute(DefaultBuildOperationExecutor.java:350) at org.gradle.internal.progress.DefaultBuildOperationExecutor$CallableBuildOperationWorker.execute(DefaultBuildOperationExecutor.java:340) at org.gradle.internal.progress.DefaultBuildOperationExecutor.execute(DefaultBuildOperationExecutor.java:199) at org.gradle.internal.progress.DefaultBuildOperationExecutor.call(DefaultBuildOperationExecutor.java:120) at org.gradle.internal.resource.BuildOperationFiringExternalResourceDecorator.withContentIfPresent(BuildOperationFiringExternalResourceDecorator.java:229) at org.gradle.internal.resource.transfer.DefaultCacheAwareExternalResourceAccessor.copyToCache(DefaultCacheAwareExternalResourceAccessor.java:199) at org.gradle.internal.resource.transfer.DefaultCacheAwareExternalResourceAccessor.access$300(DefaultCacheAwareExternalResourceAccessor.java:55) at org.gradle.internal.resource.transfer.DefaultCacheAwareExternalResourceAccessor$1.create(DefaultCacheAwareExternalResourceAccessor.java:90) at org.gradle.internal.resource.transfer.DefaultCacheAwareExternalResourceAccessor$1.create(DefaultCacheAwareExternalResourceAccessor.java:82) at org.gradle.cache.internal.ProducerGuard$AdaptiveProducerGuard.guardByKey(ProducerGuard.java:97) at org.gradle.internal.resource.transfer.DefaultCacheAwareExternalResourceAccessor.getResource(DefaultCacheAwareExternalResourceAccessor.java:82) at org.gradle.api.internal.artifacts.repositories.resolver.MavenMetadataLoader.parseMavenMetadataInfo(MavenMetadataLoader.java:61) at org.gradle.api.internal.artifacts.repositories.resolver.MavenMetadataLoader.load(MavenMetadataLoader.java:51) at org.gradle.api.internal.artifacts.repositories.resolver.MavenVersionLister$1.visit(MavenVersionLister.java:51) at org.gradle.api.internal.artifacts.repositories.resolver.ChainedVersionLister$1.visit(ChainedVersionLister.java:47) at org.gradle.api.internal.artifacts.repositories.resolver.ExternalResourceResolver.listVersionsForAllPatterns(ExternalResourceResolver.java:184) at org.gradle.api.internal.artifacts.repositories.resolver.ExternalResourceResolver.doListModuleVersions(ExternalResourceResolver.java:173) at org.gradle.api.internal.artifacts.repositories.resolver.ExternalResourceResolver.access$100(ExternalResourceResolver.java:90) at org.gradle.api.internal.artifacts.repositories.resolver.ExternalResourceResolver$RemoteRepositoryAccess.listModuleVersions(ExternalResourceResolver.java:462) at org.gradle.api.internal.artifacts.ivyservice.ivyresolve.CachingModuleComponentRepository$ResolveAndCacheRepositoryAccess.listModuleVersions(CachingModuleComponentRepository.java:345) at org.gradle.api.internal.artifacts.ivyservice.ivyresolve.BaseModuleComponentRepositoryAccess.listModuleVersions(BaseModuleComponentRepositoryAccess.java:45) at org.gradle.api.internal.artifacts.ivyservice.ivyresolve.memcache.InMemoryCachedModuleComponentRepository$CachedAccess.listModuleVersions(InMemoryCachedModuleComponentRepository.java:87) at org.gradle.api.internal.artifacts.ivyservice.ivyresolve.ErrorHandlingModuleComponentRepository$ErrorHandlingModuleComponentRepositoryAccess.listModuleVersions(ErrorHandlingModuleComponentRepository.java:111) at org.gradle.api.internal.artifacts.ivyservice.ivyresolve.DynamicVersionResolver$VersionListResult.process(DynamicVersionResolver.java:428) at org.gradle.api.internal.artifacts.ivyservice.ivyresolve.DynamicVersionResolver$VersionListResult.resolve(DynamicVersionResolver.java:413) at org.gradle.api.internal.artifacts.ivyservice.ivyresolve.DynamicVersionResolver$RepositoryResolveState.resolve(DynamicVersionResolver.java:231) at org.gradle.api.internal.artifacts.ivyservice.ivyresolve.DynamicVersionResolver.findLatestModule(DynamicVersionResolver.java:140) at org.gradle.api.internal.artifacts.ivyservice.ivyresolve.DynamicVersionResolver.findLatestModule(DynamicVersionResolver.java:121) at org.gradle.api.internal.artifacts.ivyservice.ivyresolve.DynamicVersionResolver.resolve(DynamicVersionResolver.java:88) at org.gradle.api.internal.artifacts.ivyservice.ivyresolve.RepositoryChainDependencyToComponentIdResolver.resolve(RepositoryChainDependencyToComponentIdResolver.java:61) at org.gradle.api.internal.artifacts.ivyservice.resolveengine.ComponentResolversChain$DependencyToComponentIdResolverChain.resolve(ComponentResolversChain.java:149) at org.gradle.api.internal.artifacts.ivyservice.dependencysubstitution.DependencySubstitutionResolver.resolve(DependencySubstitutionResolver.java:46) at org.gradle.api.internal.artifacts.ivyservice.resolveengine.graph.builder.SelectorState.resolveModuleRevisionId(SelectorState.java:99) at org.gradle.api.internal.artifacts.ivyservice.resolveengine.graph.builder.EdgeState.resolveModuleRevisionId(EdgeState.java:91) at org.gradle.api.internal.artifacts.ivyservice.resolveengine.graph.builder.DependencyGraphBuilder.performSelectionSerially(DependencyGraphBuilder.java:261) at org.gradle.api.internal.artifacts.ivyservice.resolveengine.graph.builder.DependencyGraphBuilder.resolveEdges(DependencyGraphBuilder.java:226) at org.gradle.api.internal.artifacts.ivyservice.resolveengine.graph.builder.DependencyGraphBuilder.traverseGraph(DependencyGraphBuilder.java:143) at org.gradle.api.internal.artifacts.ivyservice.resolveengine.graph.builder.DependencyGraphBuilder.resolve(DependencyGraphBuilder.java:109) at org.gradle.api.internal.artifacts.ivyservice.resolveengine.DefaultArtifactDependencyResolver.resolve(DefaultArtifactDependencyResolver.java:90) at org.gradle.api.internal.artifacts.ivyservice.DefaultConfigurationResolver.resolveGraph(DefaultConfigurationResolver.java:146) at org.gradle.api.internal.artifacts.ivyservice.ShortCircuitEmptyConfigurationResolver.resolveGraph(ShortCircuitEmptyConfigurationResolver.java:73) at org.gradle.api.internal.artifacts.ivyservice.ErrorHandlingConfigurationResolver.resolveGraph(ErrorHandlingConfigurationResolver.java:66) at org.gradle.api.internal.artifacts.configurations.DefaultConfiguration$4.run(DefaultConfiguration.java:483) at org.gradle.internal.progress.DefaultBuildOperationExecutor$RunnableBuildOperationWorker.execute(DefaultBuildOperationExecutor.java:336) at org.gradle.internal.progress.DefaultBuildOperationExecutor$RunnableBuildOperationWorker.execute(DefaultBuildOperationExecutor.java:328) at org.gradle.internal.progress.DefaultBuildOperationExecutor.execute(DefaultBuildOperationExecutor.java:199) at org.gradle.internal.progress.DefaultBuildOperationExecutor.run(DefaultBuildOperationExecutor.java:110) at org.gradle.api.internal.artifacts.configurations.DefaultConfiguration.resolveGraphIfRequired(DefaultConfiguration.java:474) at org.gradle.api.internal.artifacts.configurations.DefaultConfiguration.resolveToStateOrLater(DefaultConfiguration.java:459) at org.gradle.api.internal.artifacts.configurations.DefaultConfiguration.access$1700(DefaultConfiguration.java:116) at org.gradle.api.internal.artifacts.configurations.DefaultConfiguration$ConfigurationFileCollection.getSelectedArtifacts(DefaultConfiguration.java:901) at org.gradle.api.internal.artifacts.configurations.DefaultConfiguration$ConfigurationFileCollection.getFiles(DefaultConfiguration.java:889) at org.gradle.api.internal.file.AbstractFileCollection.iterator(AbstractFileCollection.java:68) at org.gradle.api.internal.changedetection.state.AbstractFileCollectionSnapshotter$FileCollectionVisitorImpl.visitCollection(AbstractFileCollectionSnapshotter.java:70) at org.gradle.api.internal.file.AbstractFileCollection.visitRootElements(AbstractFileCollection.java:234) at org.gradle.api.internal.file.CompositeFileCollection.visitRootElements(CompositeFileCollection.java:185) at org.gradle.api.internal.changedetection.state.AbstractFileCollectionSnapshotter.snapshot(AbstractFileCollectionSnapshotter.java:53) at org.gradle.api.internal.changedetection.state.DefaultGenericFileCollectionSnapshotter.snapshot(DefaultGenericFileCollectionSnapshotter.java:38) at org.gradle.api.internal.changedetection.state.CacheBackedTaskHistoryRepository.snapshotTaskFiles(CacheBackedTaskHistoryRepository.java:331) at org.gradle.api.internal.changedetection.state.CacheBackedTaskHistoryRepository.createExecution(CacheBackedTaskHistoryRepository.java:154) at org.gradle.api.internal.changedetection.state.CacheBackedTaskHistoryRepository.access$100(CacheBackedTaskHistoryRepository.java:61) at org.gradle.api.internal.changedetection.state.CacheBackedTaskHistoryRepository$1.getCurrentExecution(CacheBackedTaskHistoryRepository.java:114) at org.gradle.api.internal.changedetection.changes.DefaultTaskArtifactStateRepository$TaskArtifactStateImpl.getStates(DefaultTaskArtifactStateRepository.java:201) at org.gradle.api.internal.changedetection.changes.DefaultTaskArtifactStateRepository$TaskArtifactStateImpl.isUpToDate(DefaultTaskArtifactStateRepository.java:86) at org.gradle.api.internal.tasks.execution.SkipUpToDateTaskExecuter.execute(SkipUpToDateTaskExecuter.java:53) at org.gradle.api.internal.tasks.execution.ResolveTaskOutputCachingStateExecuter.execute(ResolveTaskOutputCachingStateExecuter.java:54) at org.gradle.api.internal.tasks.execution.ValidatingTaskExecuter.execute(ValidatingTaskExecuter.java:60) at org.gradle.api.internal.tasks.execution.SkipEmptySourceFilesTaskExecuter.execute(SkipEmptySourceFilesTaskExecuter.java:97) at org.gradle.api.internal.tasks.execution.CleanupStaleOutputsExecuter.execute(CleanupStaleOutputsExecuter.java:87) at org.gradle.api.internal.tasks.execution.ResolveTaskArtifactStateTaskExecuter.execute(ResolveTaskArtifactStateTaskExecuter.java:52) at org.gradle.api.internal.tasks.execution.SkipTaskWithNoActionsExecuter.execute(SkipTaskWithNoActionsExecuter.java:52) at org.gradle.api.internal.tasks.execution.SkipOnlyIfTaskExecuter.execute(SkipOnlyIfTaskExecuter.java:54) at org.gradle.api.internal.tasks.execution.ExecuteAtMostOnceTaskExecuter.execute(ExecuteAtMostOnceTaskExecuter.java:43) at org.gradle.api.internal.tasks.execution.CatchExceptionTaskExecuter.execute(CatchExceptionTaskExecuter.java:34) at org.gradle.execution.taskgraph.DefaultTaskGraphExecuter$EventFiringTaskWorker$1.run(DefaultTaskGraphExecuter.java:248) at org.gradle.internal.progress.DefaultBuildOperationExecutor$RunnableBuildOperationWorker.execute(DefaultBuildOperationExecutor.java:336) at org.gradle.internal.progress.DefaultBuildOperationExecutor$RunnableBuildOperationWorker.execute(DefaultBuildOperationExecutor.java:328) at org.gradle.internal.progress.DefaultBuildOperationExecutor.execute(DefaultBuildOperationExecutor.java:199) at org.gradle.internal.progress.DefaultBuildOperationExecutor.run(DefaultBuildOperationExecutor.java:110) at org.gradle.execution.taskgraph.DefaultTaskGraphExecuter$EventFiringTaskWorker.execute(DefaultTaskGraphExecuter.java:241) at org.gradle.execution.taskgraph.DefaultTaskGraphExecuter$EventFiringTaskWorker.execute(DefaultTaskGraphExecuter.java:230) at org.gradle.execution.taskgraph.DefaultTaskPlanExecutor$TaskExecutorWorker.processTask(DefaultTaskPlanExecutor.java:123) at org.gradle.execution.taskgraph.DefaultTaskPlanExecutor$TaskExecutorWorker.access$200(DefaultTaskPlanExecutor.java:79) at org.gradle.execution.taskgraph.DefaultTaskPlanExecutor$TaskExecutorWorker$1.execute(DefaultTaskPlanExecutor.java:104) at org.gradle.execution.taskgraph.DefaultTaskPlanExecutor$TaskExecutorWorker$1.execute(DefaultTaskPlanExecutor.java:98) at org.gradle.execution.taskgraph.DefaultTaskExecutionPlan.execute(DefaultTaskExecutionPlan.java:626) at org.gradle.execution.taskgraph.DefaultTaskExecutionPlan.executeWithTask(DefaultTaskExecutionPlan.java:581) at org.gradle.execution.taskgraph.DefaultTaskPlanExecutor$TaskExecutorWorker.run(DefaultTaskPlanExecutor.java:98) at org.gradle.internal.concurrent.ExecutorPolicy$CatchAndRecordFailures.onExecute(ExecutorPolicy.java:63) at org.gradle.internal.concurrent.ManagedExecutorImpl$1.run(ManagedExecutorImpl.java:46) at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) at org.gradle.internal.concurrent.ThreadFactoryImpl$ManagedThreadRunnable.run(ThreadFactoryImpl.java:55) at java.lang.Thread.run(Thread.java:745) Caused by: org.apache.http.conn.ConnectTimeoutException: Connect to jitpack.io:443 [jitpack.io/104.24.23.62, jitpack.io/104.24.22.62] failed: Read timed out at org.apache.http.impl.conn.DefaultHttpClientConnectionOperator.connect(DefaultHttpClientConnectionOperator.java:143) at org.apache.http.impl.conn.PoolingHttpClientConnectionManager.connect(PoolingHttpClientConnectionManager.java:353) at org.apache.http.impl.execchain.MainClientExec.establishRoute(MainClientExec.java:380) at org.apache.http.impl.execchain.MainClientExec.execute(MainClientExec.java:236) at org.apache.http.impl.execchain.ProtocolExec.execute(ProtocolExec.java:184) at org.apache.http.impl.execchain.RetryExec.execute(RetryExec.java:88) at org.apache.http.impl.execchain.RedirectExec.execute(RedirectExec.java:110) at org.apache.http.impl.client.InternalHttpClient.doExecute(InternalHttpClient.java:184) at org.apache.http.impl.client.CloseableHttpClient.execute(CloseableHttpClient.java:82) at org.gradle.internal.resource.transport.http.HttpClientHelper.performHttpRequest(HttpClientHelper.java:148) at org.gradle.internal.resource.transport.http.HttpClientHelper.performHttpRequest(HttpClientHelper.java:126) at org.gradle.internal.resource.transport.http.HttpClientHelper.executeGetOrHead(HttpClientHelper.java:103) at org.gradle.internal.resource.transport.http.HttpClientHelper.performRequest(HttpClientHelper.java:94) ... 103 more Caused by: java.net.SocketTimeoutException: Read timed out at java.net.SocketInputStream.socketRead0(Native Method) at java.net.SocketInputStream.socketRead(SocketInputStream.java:116) at java.net.SocketInputStream.read(SocketInputStream.java:170) at java.net.SocketInputStream.read(SocketInputStream.java:141) at sun.security.ssl.InputRecord.readFully(InputRecord.java:465) at sun.security.ssl.InputRecord.read(InputRecord.java:503) at sun.security.ssl.SSLSocketImpl.readRecord(SSLSocketImpl.java:973) at sun.security.ssl.SSLSocketImpl.performInitialHandshake(SSLSocketImpl.java:1375) at sun.security.ssl.SSLSocketImpl.startHandshake(SSLSocketImpl.java:1403) at sun.security.ssl.SSLSocketImpl.startHandshake(SSLSocketImpl.java:1387) at org.apache.http.conn.ssl.SSLConnectionSocketFactory.createLayeredSocket(SSLConnectionSocketFactory.java:394) at org.apache.http.conn.ssl.SSLConnectionSocketFactory.connectSocket(SSLConnectionSocketFactory.java:353) at org.apache.http.impl.conn.DefaultHttpClientConnectionOperator.connect(DefaultHttpClientConnectionOperator.java:134) ... 115 more </code></pre> <p><a href="https://i.stack.imgur.com/pVkd0.jpg" rel="noreferrer"><img src="https://i.stack.imgur.com/pVkd0.jpg" alt="enter image description here"></a></p>
0debug
Plot with darker color for denser areas and transparent color for less dense areas : <p>How can I make a plot in R with ggplot2 that is darker where there are more points and more transparent where there are less points? I tried making a geom_hex plot with a gradient but it is ignoring alpha values.</p>
0debug
static void vc1_decode_p_blocks(VC1Context *v) { MpegEncContext *s = &v->s; int apply_loop_filter; switch (v->c_ac_table_index) { case 0: v->codingset = (v->pqindex <= 8) ? CS_HIGH_RATE_INTRA : CS_LOW_MOT_INTRA; break; case 1: v->codingset = CS_HIGH_MOT_INTRA; break; case 2: v->codingset = CS_MID_RATE_INTRA; break; } switch (v->c_ac_table_index) { case 0: v->codingset2 = (v->pqindex <= 8) ? CS_HIGH_RATE_INTER : CS_LOW_MOT_INTER; break; case 1: v->codingset2 = CS_HIGH_MOT_INTER; break; case 2: v->codingset2 = CS_MID_RATE_INTER; break; } apply_loop_filter = s->loop_filter && !(s->avctx->skip_loop_filter >= AVDISCARD_NONKEY); s->first_slice_line = 1; memset(v->cbp_base, 0, sizeof(v->cbp_base[0])*2*s->mb_stride); for (s->mb_y = s->start_mb_y; s->mb_y < s->end_mb_y; s->mb_y++) { s->mb_x = 0; ff_init_block_index(s); for (; s->mb_x < s->mb_width; s->mb_x++) { ff_update_block_index(s); if (v->fcm == ILACE_FIELD) vc1_decode_p_mb_intfi(v); else if (v->fcm == ILACE_FRAME) vc1_decode_p_mb_intfr(v); else vc1_decode_p_mb(v); if (s->mb_y != s->start_mb_y && apply_loop_filter && v->fcm == PROGRESSIVE) vc1_apply_p_loop_filter(v); if (get_bits_count(&s->gb) > v->bits || get_bits_count(&s->gb) < 0) { ff_er_add_slice(s, 0, s->start_mb_y, s->mb_x, s->mb_y, ER_MB_ERROR); av_log(s->avctx, AV_LOG_ERROR, "Bits overconsumption: %i > %i at %ix%i\n", get_bits_count(&s->gb), v->bits, s->mb_x, s->mb_y); return; } } memmove(v->cbp_base, v->cbp, sizeof(v->cbp_base[0]) * s->mb_stride); memmove(v->ttblk_base, v->ttblk, sizeof(v->ttblk_base[0]) * s->mb_stride); memmove(v->is_intra_base, v->is_intra, sizeof(v->is_intra_base[0]) * s->mb_stride); memmove(v->luma_mv_base, v->luma_mv, sizeof(v->luma_mv_base[0]) * s->mb_stride); if (s->mb_y != s->start_mb_y) ff_draw_horiz_band(s, (s->mb_y - 1) * 16, 16); s->first_slice_line = 0; } if (apply_loop_filter) { s->mb_x = 0; ff_init_block_index(s); for (; s->mb_x < s->mb_width; s->mb_x++) { ff_update_block_index(s); vc1_apply_p_loop_filter(v); } } if (s->end_mb_y >= s->start_mb_y) ff_draw_horiz_band(s, (s->end_mb_y - 1) * 16, 16); ff_er_add_slice(s, 0, s->start_mb_y << v->field_mode, s->mb_width - 1, (s->end_mb_y << v->field_mode) - 1, ER_MB_END); }
1threat
static void memory_dump(Monitor *mon, int count, int format, int wsize, hwaddr addr, int is_physical) { int l, line_size, i, max_digits, len; uint8_t buf[16]; uint64_t v; if (format == 'i') { int flags = 0; #ifdef TARGET_I386 CPUArchState *env = mon_get_cpu_env(); if (wsize == 2) { flags = 1; } else if (wsize == 4) { flags = 0; } else { flags = 0; if (env) { #ifdef TARGET_X86_64 if ((env->efer & MSR_EFER_LMA) && (env->segs[R_CS].flags & DESC_L_MASK)) flags = 2; else #endif if (!(env->segs[R_CS].flags & DESC_B_MASK)) flags = 1; } } #endif #ifdef TARGET_PPC CPUArchState *env = mon_get_cpu_env(); flags = msr_le << 16; flags |= env->bfd_mach; #endif monitor_disas(mon, mon_get_cpu(), addr, count, is_physical, flags); return; } len = wsize * count; if (wsize == 1) line_size = 8; else line_size = 16; max_digits = 0; switch(format) { case 'o': max_digits = (wsize * 8 + 2) / 3; break; default: case 'x': max_digits = (wsize * 8) / 4; break; case 'u': case 'd': max_digits = (wsize * 8 * 10 + 32) / 33; break; case 'c': wsize = 1; break; } while (len > 0) { if (is_physical) monitor_printf(mon, TARGET_FMT_plx ":", addr); else monitor_printf(mon, TARGET_FMT_lx ":", (target_ulong)addr); l = len; if (l > line_size) l = line_size; if (is_physical) { cpu_physical_memory_read(addr, buf, l); } else { if (cpu_memory_rw_debug(mon_get_cpu(), addr, buf, l, 0) < 0) { monitor_printf(mon, " Cannot access memory\n"); break; } } i = 0; while (i < l) { switch(wsize) { default: case 1: v = ldub_p(buf + i); break; case 2: v = lduw_p(buf + i); break; case 4: v = (uint32_t)ldl_p(buf + i); break; case 8: v = ldq_p(buf + i); break; } monitor_printf(mon, " "); switch(format) { case 'o': monitor_printf(mon, "%#*" PRIo64, max_digits, v); break; case 'x': monitor_printf(mon, "0x%0*" PRIx64, max_digits, v); break; case 'u': monitor_printf(mon, "%*" PRIu64, max_digits, v); break; case 'd': monitor_printf(mon, "%*" PRId64, max_digits, v); break; case 'c': monitor_printc(mon, v); break; } i += wsize; } monitor_printf(mon, "\n"); addr += l; len -= l; } }
1threat
Basic loop in google sheets : Im trying to use a macro i made in excel in google sheets. This is my first time using this and i have it kinda working but seems real slow compared to excel. not sure whats going on and making it so slow. I have messed with the loop a bunch of different ways but still seems slow. it should only be running about 20-50 times. the loop should be When A is < B then A = A+1 function loopscript() { var app = SpreadsheetApp; var activeSheet = app.getActiveSpreadsheet().getActiveSheet(); var num1 = activeSheet.getRange(3, 2).getValue(); var num2 = activeSheet.getRange(4, 10).getValue(); for(var i=1;num2 < num1;i++){ var num3 = activeSheet.getRange(6, 2).setValue(i); num2 = activeSheet.getRange(4, 10).getValue(); } }
0debug
static int get_qcx(J2kDecoderContext *s, int n, J2kQuantStyle *q) { int i, x; if (s->buf_end - s->buf < 1) return AVERROR(EINVAL); x = bytestream_get_byte(&s->buf); q->nguardbits = x >> 5; q->quantsty = x & 0x1f; if (q->quantsty == J2K_QSTY_NONE){ n -= 3; if (s->buf_end - s->buf < n) return AVERROR(EINVAL); for (i = 0; i < n; i++) q->expn[i] = bytestream_get_byte(&s->buf) >> 3; } else if (q->quantsty == J2K_QSTY_SI){ if (s->buf_end - s->buf < 2) return AVERROR(EINVAL); x = bytestream_get_be16(&s->buf); q->expn[0] = x >> 11; q->mant[0] = x & 0x7ff; for (i = 1; i < 32 * 3; i++){ int curexpn = FFMAX(0, q->expn[0] - (i-1)/3); q->expn[i] = curexpn; q->mant[i] = q->mant[0]; } } else{ n = (n - 3) >> 1; if (s->buf_end - s->buf < n) return AVERROR(EINVAL); for (i = 0; i < n; i++){ x = bytestream_get_be16(&s->buf); q->expn[i] = x >> 11; q->mant[i] = x & 0x7ff; } } return 0; }
1threat
import re def start_withp(words): for w in words: m = re.match("(P\w+)\W(P\w+)", w) if m: return m.groups()
0debug
static int oma_read_seek(struct AVFormatContext *s, int stream_index, int64_t timestamp, int flags) { OMAContext *oc = s->priv_data; int err = ff_pcm_read_seek(s, stream_index, timestamp, flags); if (!oc->encrypted) return err; if (err || avio_tell(s->pb) < oc->content_start) goto wipe; if ((err = avio_seek(s->pb, -8, SEEK_CUR)) < 0) goto wipe; if ((err = avio_read(s->pb, oc->iv, 8)) < 8) { if (err >= 0) err = AVERROR_EOF; goto wipe; } return 0; wipe: memset(oc->iv, 0, 8); return err; }
1threat
Why does it have to cast to "struct in_addr" in this code? : TCPStream::TCPStream(int sd, struct sockaddr_in* address) : msd(sd) { enter code here char ip[50]; inet_ntop(PF_INET, (struct in_addr*)&(address->sin_addr.s_addr), ip, sizeof(ip)-1); m_peerIP = ip; m_peerPort = ntohs(address->sin_port); enter code here } Why does it have to cast to "struct in_addr" in this code? What does '50' mean in this code?
0debug
Printing list items within a specific format : <p>I have a list containing:</p> <pre><code>lst = [10,20,30,40] </code></pre> <p>and i want to print it in a form as such:</p> <pre><code>output: 10 --&gt; 20 --&gt; 30 --&gt; 40 </code></pre> <p>i tried writing:</p> <pre><code>print("output: " + "--&gt;".join(lst)) </code></pre> <p>but I'm getting an error saying str format required. Would appreciate some help on this.</p>
0debug