text
stringlengths
2
99k
meta
dict
# visual configuration for CoNLL-X Portuguese data # (http://ilk.uvt.nl/conll/free_data.html) [labels] # POS tags adj | adj adv | adv art | art conj-c | conj-c conj-s | conj-s ec | ec in | in n | n num | num pp | pp pron-det | pron-det pron-indp | pron-indp pron-pers | pron-pers prop | prop prp | prp punc | punc ROOT | ROOT v-fin | v-fin v-ger | v-ger v-inf | v-inf vp | vp v-pcp | v-pcp # dependencies ACC | ACC ACC_gt_-PASS ACC-PASS | ACC-PASS ADVL | ADVL ADVO | ADVO ADVS | ADVS A_lt_ | A< A_lt_PRED | A<PRED APP | APP AS_lt_ | AS< AUX | AUX AUX_lt_ | AUX< CJT | CJT CJT_amp_ADVL | CJT&ADVL CJT_amp_PRED | CJT&PRED CMD | CMD CO | CO COM | COM DAT | DAT EXC | EXC FOC | FOC _gt_A | >A _gt_N | >N _gt_P | >P _gt_S | >S H | H KOMP_lt_ | KOMP< MV | MV N_lt_ | N< N_lt_PRED | N<PRED NUM_lt_ | NUM< OC | OC P | P PASS | PASS PCJT | PCJT PIV | PIV P_lt_ | P< PMV | PMV PRD | PRD PRED | PRED PRT-AUX | PRT-AUX PRT-AUX_gt_ | PRT-AUX> PRT-AUX_lt_ | PRT-AUX< PUNC | PUNC QUE | QUE _question_ | ? SC | SC S_lt_ | S< STA | STA SUB | SUB SUBJ | SUBJ TOP | TOP UTT | UTT VOC | VOC VOK | VOK [drawing] SPAN_DEFAULT borderColor:darken ROOT bgColor:#e3e3e3 adj bgColor:#fffda8 adv bgColor:#fffda8 art bgColor:#ccadf6 conj-c bgColor:white conj-s bgColor:white ec bgColor:#ccadf6 in bgColor:#ffe8be n bgColor:#a4bced num bgColor:#ccdaf6 pp bgColor:#ffe8be pron-det bgColor:#ccadf6 pron-indp bgColor:#ccdaf6 pron-pers bgColor:#ccdaf6 prop bgColor:#ccdaf6 prp bgColor:#ccdaf6 punc bgColor:#e3e3e3 _question_ bgColor:#e3e3e3 v-fin bgColor:#adf6a2 v-ger bgColor:#adf6a2 v-inf bgColor:#adf6a2 vp bgColor:#adf6a2 v-pcp bgColor:#adf6a2
{ "pile_set_name": "Github" }
// SPDX-License-Identifier: GPL-2.0-or-later /* * Eee PC WMI hotkey driver * * Copyright(C) 2010 Intel Corporation. * Copyright(C) 2010-2011 Corentin Chary <corentin.chary@gmail.com> * * Portions based on wistron_btns.c: * Copyright (C) 2005 Miloslav Trmac <mitr@volny.cz> * Copyright (C) 2005 Bernhard Rosenkraenzer <bero@arklinux.org> * Copyright (C) 2005 Dmitry Torokhov <dtor@mail.ru> */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/kernel.h> #include <linux/module.h> #include <linux/init.h> #include <linux/input.h> #include <linux/input/sparse-keymap.h> #include <linux/dmi.h> #include <linux/fb.h> #include <linux/acpi.h> #include "asus-wmi.h" #define EEEPC_WMI_FILE "eeepc-wmi" MODULE_AUTHOR("Corentin Chary <corentin.chary@gmail.com>"); MODULE_DESCRIPTION("Eee PC WMI Hotkey Driver"); MODULE_LICENSE("GPL"); #define EEEPC_ACPI_HID "ASUS010" /* old _HID used in eeepc-laptop */ #define EEEPC_WMI_EVENT_GUID "ABBC0F72-8EA1-11D1-00A0-C90629100000" MODULE_ALIAS("wmi:"EEEPC_WMI_EVENT_GUID); static bool hotplug_wireless; module_param(hotplug_wireless, bool, 0444); MODULE_PARM_DESC(hotplug_wireless, "Enable hotplug for wireless device. " "If your laptop needs that, please report to " "acpi4asus-user@lists.sourceforge.net."); /* Values for T101MT "Home" key */ #define HOME_PRESS 0xe4 #define HOME_HOLD 0xea #define HOME_RELEASE 0xe5 static const struct key_entry eeepc_wmi_keymap[] = { { KE_KEY, ASUS_WMI_BRN_DOWN, { KEY_BRIGHTNESSDOWN } }, { KE_KEY, ASUS_WMI_BRN_UP, { KEY_BRIGHTNESSUP } }, /* Sleep already handled via generic ACPI code */ { KE_KEY, 0x30, { KEY_VOLUMEUP } }, { KE_KEY, 0x31, { KEY_VOLUMEDOWN } }, { KE_KEY, 0x32, { KEY_MUTE } }, { KE_KEY, 0x5c, { KEY_F15 } }, /* Power Gear key */ { KE_KEY, 0x5d, { KEY_WLAN } }, { KE_KEY, 0x6b, { KEY_TOUCHPAD_TOGGLE } }, /* Toggle Touchpad */ { KE_KEY, 0x82, { KEY_CAMERA } }, { KE_KEY, 0x83, { KEY_CAMERA_ZOOMIN } }, { KE_KEY, 0x88, { KEY_WLAN } }, { KE_KEY, 0xbd, { KEY_CAMERA } }, { KE_KEY, 0xcc, { KEY_SWITCHVIDEOMODE } }, { KE_KEY, 0xe0, { KEY_PROG1 } }, /* Task Manager */ { KE_KEY, 0xe1, { KEY_F14 } }, /* Change Resolution */ { KE_KEY, HOME_PRESS, { KEY_CONFIG } }, /* Home/Express gate key */ { KE_KEY, 0xe8, { KEY_SCREENLOCK } }, { KE_KEY, 0xe9, { KEY_DISPLAYTOGGLE } }, { KE_KEY, 0xeb, { KEY_CAMERA_ZOOMOUT } }, { KE_KEY, 0xec, { KEY_CAMERA_UP } }, { KE_KEY, 0xed, { KEY_CAMERA_DOWN } }, { KE_KEY, 0xee, { KEY_CAMERA_LEFT } }, { KE_KEY, 0xef, { KEY_CAMERA_RIGHT } }, { KE_KEY, 0xf3, { KEY_MENU } }, { KE_KEY, 0xf5, { KEY_HOMEPAGE } }, { KE_KEY, 0xf6, { KEY_ESC } }, { KE_END, 0}, }; static struct quirk_entry quirk_asus_unknown = { }; static struct quirk_entry quirk_asus_1000h = { .hotplug_wireless = true, }; static struct quirk_entry quirk_asus_et2012_type1 = { .store_backlight_power = true, }; static struct quirk_entry quirk_asus_et2012_type3 = { .scalar_panel_brightness = true, .store_backlight_power = true, }; static struct quirk_entry quirk_asus_x101ch = { /* We need this when ACPI function doesn't do this well */ .wmi_backlight_power = true, }; static struct quirk_entry *quirks; static void et2012_quirks(void) { const struct dmi_device *dev = NULL; char oemstring[30]; while ((dev = dmi_find_device(DMI_DEV_TYPE_OEM_STRING, NULL, dev))) { if (sscanf(dev->name, "AEMS%24c", oemstring) == 1) { if (oemstring[18] == '1') quirks = &quirk_asus_et2012_type1; else if (oemstring[18] == '3') quirks = &quirk_asus_et2012_type3; break; } } } static int dmi_matched(const struct dmi_system_id *dmi) { char *model; quirks = dmi->driver_data; model = (char *)dmi->matches[1].substr; if (unlikely(strncmp(model, "ET2012", 6) == 0)) et2012_quirks(); return 1; } static const struct dmi_system_id asus_quirks[] = { { .callback = dmi_matched, .ident = "ASUSTeK Computer INC. 1000H", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer INC."), DMI_MATCH(DMI_PRODUCT_NAME, "1000H"), }, .driver_data = &quirk_asus_1000h, }, { .callback = dmi_matched, .ident = "ASUSTeK Computer INC. ET2012E/I", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer INC."), DMI_MATCH(DMI_PRODUCT_NAME, "ET2012"), }, .driver_data = &quirk_asus_unknown, }, { .callback = dmi_matched, .ident = "ASUSTeK Computer INC. X101CH", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), DMI_MATCH(DMI_PRODUCT_NAME, "X101CH"), }, .driver_data = &quirk_asus_x101ch, }, { .callback = dmi_matched, .ident = "ASUSTeK Computer INC. 1015CX", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), DMI_MATCH(DMI_PRODUCT_NAME, "1015CX"), }, .driver_data = &quirk_asus_x101ch, }, {}, }; static void eeepc_wmi_key_filter(struct asus_wmi_driver *asus_wmi, int *code, unsigned int *value, bool *autorelease) { switch (*code) { case HOME_PRESS: *value = 1; *autorelease = 0; break; case HOME_HOLD: *code = ASUS_WMI_KEY_IGNORE; break; case HOME_RELEASE: *code = HOME_PRESS; *value = 0; *autorelease = 0; break; } } static int eeepc_wmi_probe(struct platform_device *pdev) { if (acpi_dev_found(EEEPC_ACPI_HID)) { pr_warn("Found legacy ATKD device (%s)\n", EEEPC_ACPI_HID); pr_warn("WMI device present, but legacy ATKD device is also " "present and enabled\n"); pr_warn("You probably booted with acpi_osi=\"Linux\" or " "acpi_osi=\"!Windows 2009\"\n"); pr_warn("Can't load eeepc-wmi, use default acpi_osi " "(preferred) or eeepc-laptop\n"); return -EBUSY; } return 0; } static void eeepc_wmi_quirks(struct asus_wmi_driver *driver) { quirks = &quirk_asus_unknown; quirks->hotplug_wireless = hotplug_wireless; dmi_check_system(asus_quirks); driver->quirks = quirks; driver->quirks->wapf = -1; driver->panel_power = FB_BLANK_UNBLANK; } static struct asus_wmi_driver asus_wmi_driver = { .name = EEEPC_WMI_FILE, .owner = THIS_MODULE, .event_guid = EEEPC_WMI_EVENT_GUID, .keymap = eeepc_wmi_keymap, .input_name = "Eee PC WMI hotkeys", .input_phys = EEEPC_WMI_FILE "/input0", .key_filter = eeepc_wmi_key_filter, .probe = eeepc_wmi_probe, .detect_quirks = eeepc_wmi_quirks, }; static int __init eeepc_wmi_init(void) { return asus_wmi_register_driver(&asus_wmi_driver); } static void __exit eeepc_wmi_exit(void) { asus_wmi_unregister_driver(&asus_wmi_driver); } module_init(eeepc_wmi_init); module_exit(eeepc_wmi_exit);
{ "pile_set_name": "Github" }
// Copyright 2017, OpenCensus Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package internal import ( "strings" "unicode" ) const labelKeySizeLimit = 100 // Sanitize returns a string that is trunacated to 100 characters if it's too // long, and replaces non-alphanumeric characters to underscores. func Sanitize(s string) string { if len(s) == 0 { return s } if len(s) > labelKeySizeLimit { s = s[:labelKeySizeLimit] } s = strings.Map(sanitizeRune, s) if unicode.IsDigit(rune(s[0])) { s = "key_" + s } if s[0] == '_' { s = "key" + s } return s } // converts anything that is not a letter or digit to an underscore func sanitizeRune(r rune) rune { if unicode.IsLetter(r) || unicode.IsDigit(r) { return r } // Everything else turns into an underscore return '_' }
{ "pile_set_name": "Github" }
// SPDX-License-Identifier: GPL-2.0 #include <linux/compiler.h> #include <linux/rbtree.h> #include <inttypes.h> #include <string.h> #include "map.h" #include "symbol.h" #include "util.h" #include "tests.h" #include "debug.h" #include "machine.h" #define UM(x) kallsyms_map->unmap_ip(kallsyms_map, (x)) int test__vmlinux_matches_kallsyms(struct test *test __maybe_unused, int subtest __maybe_unused) { int err = -1; struct rb_node *nd; struct symbol *sym; struct map *kallsyms_map, *vmlinux_map, *map; struct machine kallsyms, vmlinux; struct maps *maps = machine__kernel_maps(&vmlinux); u64 mem_start, mem_end; bool header_printed; /* * Step 1: * * Init the machines that will hold kernel, modules obtained from * both vmlinux + .ko files and from /proc/kallsyms split by modules. */ machine__init(&kallsyms, "", HOST_KERNEL_ID); machine__init(&vmlinux, "", HOST_KERNEL_ID); /* * Step 2: * * Create the kernel maps for kallsyms and the DSO where we will then * load /proc/kallsyms. Also create the modules maps from /proc/modules * and find the .ko files that match them in /lib/modules/`uname -r`/. */ if (machine__create_kernel_maps(&kallsyms) < 0) { pr_debug("machine__create_kernel_maps "); goto out; } /* * Step 3: * * Load and split /proc/kallsyms into multiple maps, one per module. * Do not use kcore, as this test was designed before kcore support * and has parts that only make sense if using the non-kcore code. * XXX: extend it to stress the kcorre code as well, hint: the list * of modules extracted from /proc/kcore, in its current form, can't * be compacted against the list of modules found in the "vmlinux" * code and with the one got from /proc/modules from the "kallsyms" code. */ if (machine__load_kallsyms(&kallsyms, "/proc/kallsyms") <= 0) { pr_debug("dso__load_kallsyms "); goto out; } /* * Step 4: * * kallsyms will be internally on demand sorted by name so that we can * find the reference relocation * symbol, i.e. the symbol we will use * to see if the running kernel was relocated by checking if it has the * same value in the vmlinux file we load. */ kallsyms_map = machine__kernel_map(&kallsyms); /* * Step 5: * * Now repeat step 2, this time for the vmlinux file we'll auto-locate. */ if (machine__create_kernel_maps(&vmlinux) < 0) { pr_debug("machine__create_kernel_maps "); goto out; } vmlinux_map = machine__kernel_map(&vmlinux); /* * Step 6: * * Locate a vmlinux file in the vmlinux path that has a buildid that * matches the one of the running kernel. * * While doing that look if we find the ref reloc symbol, if we find it * we'll have its ref_reloc_symbol.unrelocated_addr and then * maps__reloc_vmlinux will notice and set proper ->[un]map_ip routines * to fixup the symbols. */ if (machine__load_vmlinux_path(&vmlinux) <= 0) { pr_debug("Couldn't find a vmlinux that matches the kernel running on this machine, skipping test\n"); err = TEST_SKIP; goto out; } err = 0; /* * Step 7: * * Now look at the symbols in the vmlinux DSO and check if we find all of them * in the kallsyms dso. For the ones that are in both, check its names and * end addresses too. */ map__for_each_symbol(vmlinux_map, sym, nd) { struct symbol *pair, *first_pair; sym = rb_entry(nd, struct symbol, rb_node); if (sym->start == sym->end) continue; mem_start = vmlinux_map->unmap_ip(vmlinux_map, sym->start); mem_end = vmlinux_map->unmap_ip(vmlinux_map, sym->end); first_pair = machine__find_kernel_symbol(&kallsyms, mem_start, NULL); pair = first_pair; if (pair && UM(pair->start) == mem_start) { next_pair: if (arch__compare_symbol_names(sym->name, pair->name) == 0) { /* * kallsyms don't have the symbol end, so we * set that by using the next symbol start - 1, * in some cases we get this up to a page * wrong, trace_kmalloc when I was developing * this code was one such example, 2106 bytes * off the real size. More than that and we * _really_ have a problem. */ s64 skew = mem_end - UM(pair->end); if (llabs(skew) >= page_size) pr_debug("WARN: %#" PRIx64 ": diff end addr for %s v: %#" PRIx64 " k: %#" PRIx64 "\n", mem_start, sym->name, mem_end, UM(pair->end)); /* * Do not count this as a failure, because we * could really find a case where it's not * possible to get proper function end from * kallsyms. */ continue; } else { pair = machine__find_kernel_symbol_by_name(&kallsyms, sym->name, NULL); if (pair) { if (UM(pair->start) == mem_start) goto next_pair; pr_debug("WARN: %#" PRIx64 ": diff name v: %s k: %s\n", mem_start, sym->name, pair->name); } else { pr_debug("WARN: %#" PRIx64 ": diff name v: %s k: %s\n", mem_start, sym->name, first_pair->name); } continue; } } else pr_debug("ERR : %#" PRIx64 ": %s not on kallsyms\n", mem_start, sym->name); err = -1; } if (verbose <= 0) goto out; header_printed = false; for (map = maps__first(maps); map; map = map__next(map)) { struct map * /* * If it is the kernel, kallsyms is always "[kernel.kallsyms]", while * the kernel will have the path for the vmlinux file being used, * so use the short name, less descriptive but the same ("[kernel]" in * both cases. */ pair = map_groups__find_by_name(&kallsyms.kmaps, (map->dso->kernel ? map->dso->short_name : map->dso->name)); if (pair) { pair->priv = 1; } else { if (!header_printed) { pr_info("WARN: Maps only in vmlinux:\n"); header_printed = true; } map__fprintf(map, stderr); } } header_printed = false; for (map = maps__first(maps); map; map = map__next(map)) { struct map *pair; mem_start = vmlinux_map->unmap_ip(vmlinux_map, map->start); mem_end = vmlinux_map->unmap_ip(vmlinux_map, map->end); pair = map_groups__find(&kallsyms.kmaps, mem_start); if (pair == NULL || pair->priv) continue; if (pair->start == mem_start) { if (!header_printed) { pr_info("WARN: Maps in vmlinux with a different name in kallsyms:\n"); header_printed = true; } pr_info("WARN: %" PRIx64 "-%" PRIx64 " %" PRIx64 " %s in kallsyms as", map->start, map->end, map->pgoff, map->dso->name); if (mem_end != pair->end) pr_info(":\nWARN: *%" PRIx64 "-%" PRIx64 " %" PRIx64, pair->start, pair->end, pair->pgoff); pr_info(" %s\n", pair->dso->name); pair->priv = 1; } } header_printed = false; maps = machine__kernel_maps(&kallsyms); for (map = maps__first(maps); map; map = map__next(map)) { if (!map->priv) { if (!header_printed) { pr_info("WARN: Maps only in kallsyms:\n"); header_printed = true; } map__fprintf(map, stderr); } } out: machine__exit(&kallsyms); machine__exit(&vmlinux); return err; }
{ "pile_set_name": "Github" }
// // TOSMBConstants.h // Copyright 2015-2017 Timothy Oliver // // This file is dual-licensed under both the MIT License, and the LGPL v2.1 License. // // ------------------------------------------------------------------------------- // This library is free software; you can redistribute it and/or // modify it under the terms of the GNU Lesser General Public // License as published by the Free Software Foundation; either // version 2.1 of the License, or (at your option) any later version. // // This library is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU // Lesser General Public License for more details. // // You should have received a copy of the GNU Lesser General Public // License along with this library; if not, write to the Free Software // Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA // ------------------------------------------------------------------------------- #ifndef _TOSMBCLIENT_CONSTANTS_H #define _TOSMBCLIENT_CONSTANTS_H #import <Foundation/Foundation.h> extern NSString * const TOSMBClientErrorDomain; /** SMB Error Values */ typedef NS_ENUM(NSInteger, TOSMBSessionErrorCode) { TOSMBSessionErrorCodeUnknown = 0, /* Error code was not specified. */ TOSMBSessionErrorNotOnWiFi = 1000, /* The device isn't presently connected to a local network. */ TOSMBSessionErrorCodeUnableToResolveAddress = 1001, /* Not enough connection information to resolve was supplied. */ TOSMBSessionErrorCodeUnableToConnect = 1002, /* The connection attempt failed. */ TOSMBSessionErrorCodeAuthenticationFailed = 1003, /* The username/password failed (And guest login is not available) */ TOSMBSessionErrorCodeShareConnectionFailed = 1004, /* Connection attempt to a share in the device failed. */ TOSMBSessionErrorCodeFileNotFound = 1005, /* Unable to locate the requested file. */ TOSMBSessionErrorCodeDirectoryDownloaded = 1006, /* A directory was attempted to be downloaded. */ TOSMBSessionErrorCodeFileDownloadFailed = 1007, /* The file could not be downloaded, possible network error. */ }; /** NetBIOS Service Device Types */ typedef NS_ENUM(NSInteger, TONetBIOSNameServiceType) { TONetBIOSNameServiceTypeWorkStation, TONetBIOSNameServiceTypeMessenger, TONetBIOSNameServiceTypeFileServer, TONetBIOSNameServiceTypeDomainMaster }; /** SMB File Download Connection State */ typedef NS_ENUM(NSInteger, TOSMBSessionDownloadTaskState) { TOSMBSessionDownloadTaskStateReady, TOSMBSessionDownloadTaskStateRunning, TOSMBSessionDownloadTaskStateSuspended, TOSMBSessionDownloadTaskStateCancelled, TOSMBSessionDownloadTaskStateCompleted, TOSMBSessionDownloadTaskStateFailed } __deprecated_enum_msg("Use TOSMBSessionTaskState values instead"); /** SMB Connection State */ typedef NS_ENUM(NSUInteger, TOSMBSessionTaskState) { TOSMBSessionTaskStateReady, TOSMBSessionTaskStateRunning, TOSMBSessionTaskStateSuspended, TOSMBSessionTaskStateCancelled, TOSMBSessionTaskStateCompleted, TOSMBSessionTaskStateFailed }; #endif extern TONetBIOSNameServiceType TONetBIOSNameServiceTypeForCType(char type); extern char TONetBIOSNameServiceCTypeForType(char type); extern NSString *localizedStringForErrorCode(TOSMBSessionErrorCode errorCode); extern NSError *errorForErrorCode(TOSMBSessionErrorCode errorCode);
{ "pile_set_name": "Github" }
// untested sections: 2 package matchers import ( "fmt" "github.com/onsi/gomega/format" ) type BeTrueMatcher struct { } func (matcher *BeTrueMatcher) Match(actual interface{}) (success bool, err error) { if !isBool(actual) { return false, fmt.Errorf("Expected a boolean. Got:\n%s", format.Object(actual, 1)) } return actual.(bool), nil } func (matcher *BeTrueMatcher) FailureMessage(actual interface{}) (message string) { return format.Message(actual, "to be true") } func (matcher *BeTrueMatcher) NegatedFailureMessage(actual interface{}) (message string) { return format.Message(actual, "not to be true") }
{ "pile_set_name": "Github" }
<?xml version="1.0" encoding="utf-8"?> <configuration> <runtime> <assemblyBinding xmlns="urn:schemas-microsoft-com:asm.v1"> <dependentAssembly> <assemblyIdentity name="Accord" publicKeyToken="fa1a88e29555ccf7" culture="neutral" /> <bindingRedirect oldVersion="0.0.0.0-3.7.2.0" newVersion="3.7.2.0" /> </dependentAssembly> <dependentAssembly> <assemblyIdentity name="Accord.MachineLearning" publicKeyToken="fa1a88e29555ccf7" culture="neutral" /> <bindingRedirect oldVersion="0.0.0.0-3.7.2.0" newVersion="3.7.2.0" /> </dependentAssembly> <dependentAssembly> <assemblyIdentity name="Accord.Math" publicKeyToken="fa1a88e29555ccf7" culture="neutral" /> <bindingRedirect oldVersion="0.0.0.0-3.7.2.0" newVersion="3.7.2.0" /> </dependentAssembly> <dependentAssembly> <assemblyIdentity name="Accord.Statistics" publicKeyToken="fa1a88e29555ccf7" culture="neutral" /> <bindingRedirect oldVersion="0.0.0.0-3.7.2.0" newVersion="3.7.2.0" /> </dependentAssembly> <dependentAssembly> <assemblyIdentity name="Accord.Math.Core" publicKeyToken="fa1a88e29555ccf7" culture="neutral" /> <bindingRedirect oldVersion="0.0.0.0-3.7.2.0" newVersion="3.7.2.0" /> </dependentAssembly> <dependentAssembly> <assemblyIdentity name="Accord.Imaging" publicKeyToken="fa1a88e29555ccf7" culture="neutral" /> <bindingRedirect oldVersion="0.0.0.0-3.5.1.0" newVersion="3.5.1.0" /> </dependentAssembly> <dependentAssembly> <assemblyIdentity name="Accord.Video" publicKeyToken="fa1a88e29555ccf7" culture="neutral" /> <bindingRedirect oldVersion="0.0.0.0-3.7.2.0" newVersion="3.7.2.0" /> </dependentAssembly> <dependentAssembly> <assemblyIdentity name="Accord.Vision" publicKeyToken="fa1a88e29555ccf7" culture="neutral" /> <bindingRedirect oldVersion="0.0.0.0-3.2.1.0" newVersion="3.2.1.0" /> </dependentAssembly> </assemblyBinding> </runtime> </configuration>
{ "pile_set_name": "Github" }
require('module-alias/register'); const {expect} = require('chai'); // Import utils const helper = require('@utils/helpers'); const loginCommon = require('@commonTests/loginBO'); // Import pages const dashboardPage = require('@pages/BO/dashboard'); const seoAndUrlsPage = require('@pages/BO/shopParameters/trafficAndSeo/seoAndUrls'); const addSeoAndUrlPage = require('@pages/BO/shopParameters/trafficAndSeo/seoAndUrls/add'); // Import data const {orderReturn, pdfOrderReturn} = require('@data/demo/seoPages'); const SeoPageFaker = require('@data/faker/seoPage'); // Import test context const testContext = require('@utils/testContext'); const baseContext = 'functional_BO_shopParameters_TrafficAndSeo_seoAndUrls_bulkDeleteSeoPages'; let browserContext; let page; const seoPagesData = [ new SeoPageFaker({page: orderReturn.page, title: 'ToDelete1'}), new SeoPageFaker({page: pdfOrderReturn.page, title: 'ToDelete2'}), ]; let numberOfSeoPages = 0; describe('Bulk delete seo pages', async () => { // before and after functions before(async function () { browserContext = await helper.createBrowserContext(this.browser); page = await helper.newTab(browserContext); }); after(async () => { await helper.closeBrowserContext(browserContext); }); it('should login in BO', async function () { await loginCommon.loginBO(this, page); }); it('should go to \'Shop parameters > SEO and Urls\' page', async function () { await testContext.addContextItem(this, 'testIdentifier', 'goToSeoAndUrlsPage', baseContext); await dashboardPage.goToSubMenu( page, dashboardPage.shopParametersParentLink, dashboardPage.trafficAndSeoLink, ); await seoAndUrlsPage.closeSfToolBar(page); const pageTitle = await seoAndUrlsPage.getPageTitle(page); await expect(pageTitle).to.contains(seoAndUrlsPage.pageTitle); }); it('should reset all filters and get number of SEO pages in BO', async function () { await testContext.addContextItem(this, 'testIdentifier', 'resetFilterFirst', baseContext); numberOfSeoPages = await seoAndUrlsPage.resetAndGetNumberOfLines(page); await expect(numberOfSeoPages).to.be.above(0); }); describe('Create 2 seo pages', async () => { seoPagesData.forEach((seoPageData, index) => { it('should go to new seo page page', async function () { await testContext.addContextItem(this, 'testIdentifier', `goToNewSeoPage${index + 1}`, baseContext); await seoAndUrlsPage.goToNewSeoUrlPage(page); const pageTitle = await addSeoAndUrlPage.getPageTitle(page); await expect(pageTitle).to.contains(addSeoAndUrlPage.pageTitle); }); it('should create seo page', async function () { await testContext.addContextItem(this, 'testIdentifier', `createSeoPage${index + 1}`, baseContext); const result = await addSeoAndUrlPage.createEditSeoPage(page, seoPageData); await expect(result).to.equal(seoAndUrlsPage.successfulCreationMessage); const numberOfSeoPagesAfterCreation = await seoAndUrlsPage.getNumberOfElementInGrid(page); await expect(numberOfSeoPagesAfterCreation).to.equal(numberOfSeoPages + 1 + index); }); }); }); describe('Delete seo pages by bulk actions', async () => { it('should filter by seo page name', async function () { await testContext.addContextItem(this, 'testIdentifier', 'filterToDelete', baseContext); await seoAndUrlsPage.filterTable(page, 'title', 'toDelete'); const textColumn = await seoAndUrlsPage.getTextColumnFromTable(page, 1, 'title'); await expect(textColumn).to.contains('ToDelete'); }); it('should bulk delete seo page', async function () { await testContext.addContextItem(this, 'testIdentifier', 'bulkDeleteSeoPage', baseContext); // Delete seo page in first row const result = await seoAndUrlsPage.bulkDeleteSeoUrlPage(page); await expect(result).to.be.equal(seoAndUrlsPage.successfulMultiDeleteMessage); }); it('should reset filter and check number of seo pages', async function () { await testContext.addContextItem(this, 'testIdentifier', 'resetAfterDelete', baseContext); const numberOfSeoPagesAfterCreation = await seoAndUrlsPage.resetAndGetNumberOfLines(page); await expect(numberOfSeoPagesAfterCreation).to.equal(numberOfSeoPages); }); }); });
{ "pile_set_name": "Github" }
import React from 'react'; import {ApolloIcon} from '@apollo/space-kit/icons/ApolloIcon'; export default function Logo() { return ( <ApolloIcon style={{ display: 'block', width: 64, height: 'auto' }} /> ); }
{ "pile_set_name": "Github" }
.\" ************************************************************************** .\" * _ _ ____ _ .\" * Project ___| | | | _ \| | .\" * / __| | | | |_) | | .\" * | (__| |_| | _ <| |___ .\" * \___|\___/|_| \_\_____| .\" * .\" * Copyright (C) 1998 - 2015, Daniel Stenberg, <daniel@haxx.se>, et al. .\" * .\" * This software is licensed as described in the file COPYING, which .\" * you should have received as part of this distribution. The terms .\" * are also available at https://curl.haxx.se/docs/copyright.html. .\" * .\" * You may opt to use, copy, modify, merge, publish, distribute and/or sell .\" * copies of the Software, and permit persons to whom the Software is .\" * furnished to do so, under the terms of the COPYING file. .\" * .\" * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY .\" * KIND, either express or implied. .\" * .\" ************************************************************************** .\" .TH CURLINFO_CONTENT_TYPE 3 "February 03, 2016" "libcurl 7.54.0" "curl_easy_getinfo options" .SH NAME CURLINFO_CONTENT_TYPE \- get Content-Type .SH SYNOPSIS #include <curl/curl.h> CURLcode curl_easy_getinfo(CURL *handle, CURLINFO_CONTENT_TYPE, char **ct); .SH DESCRIPTION Pass a pointer to a char pointer to receive the content-type of the downloaded object. This is the value read from the Content-Type: field. If you get NULL, it means that the server didn't send a valid Content-Type header or that the protocol used doesn't support this. The \fBct\fP pointer will be NULL or pointing to private memory you MUST NOT free it - it gets freed when you call \fIcurl_easy_cleanup(3)\fP on the corresponding CURL handle. .SH PROTOCOLS HTTP(S) .SH EXAMPLE TODO .SH AVAILABILITY Added in 7.9.4 .SH RETURN VALUE Returns CURLE_OK if the option is supported, and CURLE_UNKNOWN_OPTION if not. .SH "SEE ALSO" .BR curl_easy_getinfo "(3), " curl_easy_setopt "(3), "
{ "pile_set_name": "Github" }
{ "IsLinux" : true, "UseLinuxVersion": false, "osVersion": "CentOS 7", "tagTemplates": [ "centos-#shorttag#" ], "SubRepository": "test-deps", "OptionalTests": [ "test-deps" ], "TestProperties": { "size": 575 } }
{ "pile_set_name": "Github" }
<template name="elements_vulnerabilities_txorigin"> <div class="row clear account-page-notification"> <div class="row clear"> <a href="{{#if owners}} {{#if (isWatchOnly _id)}} # {{else}} {{pathFor route='createAccount' query=upgradeParams}} {{/if}} {{else}} {{pathFor route='account' address=walletAddress}} {{/if}}"> {{#if owners}} {{i18n "wallet.app.warnings.txOriginVulnerabilityWallet"}} {{#unless (isWatchOnly _id)}} <br><strong>{{i18n "wallet.app.warnings.txOriginVulnerabilityCallToAction"}}</strong> {{/unless}} {{else}} {{i18n "wallet.app.warnings.txOriginVulnerabilityAccount"}} {{/if}} </a> </div> <a href="https://blog.ethereum.org/2016/06/24/security-alert-smart-contract-wallets-created-in-frontier-are-vulnerable-to-phishing-attacks/" class="learn-more-link" target="_blank">{{i18n "wallet.app.warnings.learnMore"}}</a> </div> </template>
{ "pile_set_name": "Github" }
<?php /** * Squiz_Sniffs_NamingConventions_ValidVariableNameSniff. * * PHP version 5 * * @category PHP * @package PHP_CodeSniffer * @author Greg Sherwood <gsherwood@squiz.net> * @author Marc McIntyre <mmcintyre@squiz.net> * @copyright 2006-2014 Squiz Pty Ltd (ABN 77 084 670 600) * @license https://github.com/squizlabs/PHP_CodeSniffer/blob/master/licence.txt BSD Licence * @link http://pear.php.net/package/PHP_CodeSniffer */ if (class_exists('PHP_CodeSniffer_Standards_AbstractVariableSniff', true) === false) { throw new PHP_CodeSniffer_Exception('Class PHP_CodeSniffer_Standards_AbstractVariableSniff not found'); } /** * Squiz_Sniffs_NamingConventions_ValidVariableNameSniff. * * Checks the naming of variables and member variables. * * @category PHP * @package PHP_CodeSniffer * @author Greg Sherwood <gsherwood@squiz.net> * @author Marc McIntyre <mmcintyre@squiz.net> * @copyright 2006-2014 Squiz Pty Ltd (ABN 77 084 670 600) * @license https://github.com/squizlabs/PHP_CodeSniffer/blob/master/licence.txt BSD Licence * @version Release: @package_version@ * @link http://pear.php.net/package/PHP_CodeSniffer */ class Zend_Sniffs_NamingConventions_ValidVariableNameSniff extends PHP_CodeSniffer_Standards_AbstractVariableSniff { /** * Tokens to ignore so that we can find a DOUBLE_COLON. * * @var array */ private $_ignore = array( T_WHITESPACE, T_COMMENT, ); /** * Processes this test, when one of its tokens is encountered. * * @param PHP_CodeSniffer_File $phpcsFile The file being scanned. * @param int $stackPtr The position of the current token in the * stack passed in $tokens. * * @return void */ protected function processVariable(PHP_CodeSniffer_File $phpcsFile, $stackPtr) { $tokens = $phpcsFile->getTokens(); $varName = ltrim($tokens[$stackPtr]['content'], '$'); $phpReservedVars = array( '_SERVER', '_GET', '_POST', '_REQUEST', '_SESSION', '_ENV', '_COOKIE', '_FILES', 'GLOBALS', 'http_response_header', 'HTTP_RAW_POST_DATA', 'php_errormsg', ); // If it's a php reserved var, then its ok. if (in_array($varName, $phpReservedVars) === true) { return; } $objOperator = $phpcsFile->findNext(array(T_WHITESPACE), ($stackPtr + 1), null, true); if ($tokens[$objOperator]['code'] === T_OBJECT_OPERATOR) { // Check to see if we are using a variable from an object. $var = $phpcsFile->findNext(array(T_WHITESPACE), ($objOperator + 1), null, true); if ($tokens[$var]['code'] === T_STRING) { // Either a var name or a function call, so check for bracket. $bracket = $phpcsFile->findNext(array(T_WHITESPACE), ($var + 1), null, true); if ($tokens[$bracket]['code'] !== T_OPEN_PARENTHESIS) { $objVarName = $tokens[$var]['content']; // There is no way for us to know if the var is public or private, // so we have to ignore a leading underscore if there is one and just // check the main part of the variable name. $originalVarName = $objVarName; if (substr($objVarName, 0, 1) === '_') { $objVarName = substr($objVarName, 1); } if (PHP_CodeSniffer::isCamelCaps($objVarName, false, true, false) === false) { $error = 'Variable "%s" is not in valid camel caps format'; $data = array($originalVarName); $phpcsFile->addError($error, $var, 'NotCamelCaps', $data); } else if (preg_match('|\d|', $objVarName) === 1) { $warning = 'Variable "%s" contains numbers but this is discouraged'; $data = array($originalVarName); $phpcsFile->addWarning($warning, $stackPtr, 'ContainsNumbers', $data); } }//end if }//end if }//end if // There is no way for us to know if the var is public or private, // so we have to ignore a leading underscore if there is one and just // check the main part of the variable name. $originalVarName = $varName; if (substr($varName, 0, 1) === '_') { $objOperator = $phpcsFile->findPrevious(array(T_WHITESPACE), ($stackPtr - 1), null, true); if ($tokens[$objOperator]['code'] === T_DOUBLE_COLON) { // The variable lives within a class, and is referenced like // this: MyClass::$_variable, so we don't know its scope. $inClass = true; } else { $inClass = $phpcsFile->hasCondition($stackPtr, array(T_CLASS, T_INTERFACE, T_TRAIT)); } if ($inClass === true) { $varName = substr($varName, 1); } } if (PHP_CodeSniffer::isCamelCaps($varName, false, true, false) === false) { $error = 'Variable "%s" is not in valid camel caps format'; $data = array($originalVarName); $phpcsFile->addError($error, $stackPtr, 'NotCamelCaps', $data); } else if (preg_match('|\d|', $varName) === 1) { $warning = 'Variable "%s" contains numbers but this is discouraged'; $data = array($originalVarName); $phpcsFile->addWarning($warning, $stackPtr, 'ContainsNumbers', $data); } }//end processVariable() /** * Processes class member variables. * * @param PHP_CodeSniffer_File $phpcsFile The file being scanned. * @param int $stackPtr The position of the current token in the * stack passed in $tokens. * * @return void */ protected function processMemberVar(PHP_CodeSniffer_File $phpcsFile, $stackPtr) { $tokens = $phpcsFile->getTokens(); $varName = ltrim($tokens[$stackPtr]['content'], '$'); $memberProps = $phpcsFile->getMemberProperties($stackPtr); $public = ($memberProps['scope'] === 'public'); if ($public === true) { if (substr($varName, 0, 1) === '_') { $error = 'Public member variable "%s" must not contain a leading underscore'; $data = array($varName); $phpcsFile->addError($error, $stackPtr, 'PublicHasUnderscore', $data); return; } } else { if (substr($varName, 0, 1) !== '_') { $scope = ucfirst($memberProps['scope']); $error = '%s member variable "%s" must contain a leading underscore'; $data = array( $scope, $varName, ); $phpcsFile->addError($error, $stackPtr, 'PrivateNoUnderscore', $data); return; } } if (PHP_CodeSniffer::isCamelCaps($varName, false, $public, false) === false) { $error = 'Member variable "%s" is not in valid camel caps format'; $data = array($varName); $phpcsFile->addError($error, $stackPtr, 'MemberVarNotCamelCaps', $data); } else if (preg_match('|\d|', $varName) === 1) { $warning = 'Member variable "%s" contains numbers but this is discouraged'; $data = array($varName); $phpcsFile->addWarning($warning, $stackPtr, 'MemberVarContainsNumbers', $data); } }//end processMemberVar() /** * Processes the variable found within a double quoted string. * * @param PHP_CodeSniffer_File $phpcsFile The file being scanned. * @param int $stackPtr The position of the double quoted * string. * * @return void */ protected function processVariableInString(PHP_CodeSniffer_File $phpcsFile, $stackPtr) { $tokens = $phpcsFile->getTokens(); $phpReservedVars = array( '_SERVER', '_GET', '_POST', '_REQUEST', '_SESSION', '_ENV', '_COOKIE', '_FILES', 'GLOBALS', 'http_response_header', 'HTTP_RAW_POST_DATA', 'php_errormsg', ); if (preg_match_all('|[^\\\]\$([a-zA-Z_\x7f-\xff][a-zA-Z0-9_\x7f-\xff]*)|', $tokens[$stackPtr]['content'], $matches) !== 0) { foreach ($matches[1] as $varName) { // If it's a php reserved var, then its ok. if (in_array($varName, $phpReservedVars) === true) { continue; } if (PHP_CodeSniffer::isCamelCaps($varName, false, true, false) === false) { $error = 'Variable "%s" is not in valid camel caps format'; $data = array($varName); $phpcsFile->addError($error, $stackPtr, 'StringVarNotCamelCaps', $data); } else if (preg_match('|\d|', $varName) === 1) { $warning = 'Variable "%s" contains numbers but this is discouraged'; $data = array($varName); $phpcsFile->addWarning($warning, $stackPtr, 'StringVarContainsNumbers', $data); } }//end foreach }//end if }//end processVariableInString() }//end class
{ "pile_set_name": "Github" }
#region License // Copyright (c) 2013, ClearCanvas Inc. // All rights reserved. // http://www.clearcanvas.ca // // This file is part of the ClearCanvas RIS/PACS open source project. // // The ClearCanvas RIS/PACS open source project is free software: you can // redistribute it and/or modify it under the terms of the GNU General Public // License as published by the Free Software Foundation, either version 3 of the // License, or (at your option) any later version. // // The ClearCanvas RIS/PACS open source project is distributed in the hope that it // will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General // Public License for more details. // // You should have received a copy of the GNU General Public License along with // the ClearCanvas RIS/PACS open source project. If not, see // <http://www.gnu.org/licenses/>. #endregion using System.Collections.Generic; using System.Linq; using System.Security.Permissions; using ClearCanvas.Common; using ClearCanvas.Common.Utilities; using ClearCanvas.Desktop.Tools; using System.Threading; using ClearCanvas.Ris.Application.Common; using ClearCanvas.Ris.Application.Common.RegistrationWorkflow; namespace ClearCanvas.Ris.Client.Workflow { [ExtensionPoint] public class RegistrationWorkflowFolderExtensionPoint : ExtensionPoint<IWorklistFolder> { } [ExtensionPoint] public class RegistrationWorkflowItemToolExtensionPoint : ExtensionPoint<ITool> { } [ExtensionPoint] public class RegistrationWorkflowFolderToolExtensionPoint : ExtensionPoint<ITool> { } [ExtensionOf(typeof(FolderSystemExtensionPoint))] [PrincipalPermission(SecurityAction.Demand, Role = ClearCanvas.Ris.Application.Common.AuthorityTokens.FolderSystems.Registration)] public class RegistrationWorkflowFolderSystem : RegistrationWorkflowFolderSystemBase<RegistrationWorkflowFolderExtensionPoint, RegistrationWorkflowFolderToolExtensionPoint, RegistrationWorkflowItemToolExtensionPoint> { public RegistrationWorkflowFolderSystem() : base(SR.TitleRegistrationFolderSystem) { } protected override string GetPreviewUrl(WorkflowFolder folder, ICollection<RegistrationWorklistItemSummary> items) { return WebResourcesSettings.Default.RegistrationFolderSystemUrl; } protected override PreviewOperationAuditData[] GetPreviewAuditData(WorkflowFolder folder, ICollection<RegistrationWorklistItemSummary> items) { return items.Select(item => new PreviewOperationAuditData("Registration", item)).ToArray(); } protected override SearchResultsFolder CreateSearchResultsFolder() { return new Folders.Registration.RegistrationSearchFolder(); } } }
{ "pile_set_name": "Github" }
// RUN: %clang_cc1 -fsyntax-only -verify -std=c++11 %s // expected-no-diagnostics struct foo { foo(); foo(int); }; int func(foo& f) { decltype(foo())(); f = (decltype(foo()))5; return decltype(3)(5); }
{ "pile_set_name": "Github" }
// director_cmds.h // sub commands for svc_director #define DRC_ACTIVE 0 // tells client that he's an spectator and will get director command #define DRC_STATUS 1 // send status infos about proxy #define DRC_CAMERA 2 // set the actual director camera position #define DRC_EVENT 3 // informs the dircetor about ann important game event #define DRC_FLAG_PRIO_MASK 0x0F // priorities between 0 and 15 (15 most important) #define DRC_FLAG_SIDE (1<<4) #define DRC_FLAG_DRAMATIC (1<<5) // commands of the director API function CallDirectorProc(...) #define DRCAPI_NOP 0 // no operation #define DRCAPI_ACTIVE 1 // de/acivates director mode in engine #define DRCAPI_STATUS 2 // request proxy information #define DRCAPI_SETCAM 3 // set camera n to given position and angle #define DRCAPI_GETCAM 4 // request camera n position and angle #define DRCAPI_DIRPLAY 5 // set director time and play with normal speed #define DRCAPI_DIRFREEZE 6 // freeze directo at this time #define DRCAPI_SETVIEWMODE 7 // overview or 4 cameras #define DRCAPI_SETOVERVIEWPARAMS 8 // sets parameter for overview mode #define DRCAPI_SETFOCUS 9 // set the camera which has the input focus #define DRCAPI_GETTARGETS 10 // queries engine for player list #define DRCAPI_SETVIEWPOINTS 11 // gives engine all waypoints
{ "pile_set_name": "Github" }
// // SDLSetAppIconSpec.m // SmartDeviceLink #import <Foundation/Foundation.h> #import <Quick/Quick.h> #import <Nimble/Nimble.h> #import "SDLSetAppIcon.h" #import "SDLRPCParameterNames.h" #import "SDLRPCFunctionNames.h" QuickSpecBegin(SDLSetAppIconSpec) describe(@"Getter/Setter Tests", ^ { it(@"Should set and get correctly", ^ { SDLSetAppIcon* testRequest = [[SDLSetAppIcon alloc] init]; testRequest.syncFileName = @"A/File/Name"; expect(testRequest.syncFileName).to(equal(@"A/File/Name")); }); it(@"Should get correctly when initialized", ^ { NSMutableDictionary<NSString *, id> *dict = [@{SDLRPCParameterNameRequest: @{SDLRPCParameterNameParameters: @{SDLRPCParameterNameSyncFileName:@"A/File/Name"}, SDLRPCParameterNameOperationName:SDLRPCFunctionNameSetAppIcon}} mutableCopy]; #pragma clang diagnostic push #pragma clang diagnostic ignored "-Wdeprecated-declarations" SDLSetAppIcon* testRequest = [[SDLSetAppIcon alloc] initWithDictionary:dict]; #pragma clang diagnostic pop expect(testRequest.syncFileName).to(equal(@"A/File/Name")); }); it(@"Should return nil if not set", ^ { SDLSetAppIcon* testRequest = [[SDLSetAppIcon alloc] init]; expect(testRequest.syncFileName).to(beNil()); }); }); QuickSpecEnd
{ "pile_set_name": "Github" }
<?xml version="1.0" encoding="utf-8"?> <Project ToolsVersion="4.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003"> <ItemDefinitionGroup Condition="'$(Configuration)' == 'Debug' AND '$(PlatformToolset)' == 'v141' AND '$(Platform)' == 'ARM'"> <Link> <AdditionalDependencies>$(MSBuildThisFileDirectory)lib\$(Platform)\v141\$(Configuration)\cpprest141d_uwp_2_9.lib;$(MSBuildThisFileDirectory)lib\$(Platform)\v141\$(Configuration)\Microsoft.Xbox.Services.141.UWP.Ship.Cpp.lib;%(AdditionalDependencies)</AdditionalDependencies> </Link> </ItemDefinitionGroup> <ItemDefinitionGroup Condition="'$(Configuration)' != 'Debug' AND '$(PlatformToolset)' == 'v141' AND '$(Platform)' == 'ARM'"> <Link> <AdditionalDependencies>$(MSBuildThisFileDirectory)lib\$(Platform)\v141\Release\cpprest141_uwp_2_9.lib;$(MSBuildThisFileDirectory)lib\$(Platform)\v141\Release\Microsoft.Xbox.Services.141.UWP.Ship.Cpp.lib;%(AdditionalDependencies)</AdditionalDependencies> </Link> </ItemDefinitionGroup> </Project>
{ "pile_set_name": "Github" }
<?xml version="1.0" encoding="UTF-8"?> <!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd"> <plist version="1.0"> <dict> <key>BuildVersion</key> <string>3</string> <key>CFBundleShortVersionString</key> <string>1.0</string> <key>CFBundleVersion</key> <string>1.0</string> <key>ProjectName</key> <string>${EXECUTABLE_NAME}</string> <key>SourceVersion</key> <string>590000</string> </dict> </plist>
{ "pile_set_name": "Github" }
<?xml version="1.0" encoding="ascii"?> <!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "DTD/xhtml1-transitional.dtd"> <html xmlns="http://www.w3.org/1999/xhtml" xml:lang="en" lang="en"> <head> <title>epydoc.gui.GUILogger</title> <link rel="stylesheet" href="epydoc.css" type="text/css" /> <script type="text/javascript" src="epydoc.js"></script> </head> <body bgcolor="white" text="black" link="blue" vlink="#204080" alink="#204080"> <!-- ==================== NAVIGATION BAR ==================== --> <table class="navbar" border="0" width="100%" cellpadding="0" bgcolor="#a0c0ff" cellspacing="0"> <tr valign="middle"> <!-- Home link --> <th>&nbsp;&nbsp;&nbsp;<a href="epydoc-module.html">Home</a>&nbsp;&nbsp;&nbsp;</th> <!-- Tree link --> <th>&nbsp;&nbsp;&nbsp;<a href="module-tree.html">Trees</a>&nbsp;&nbsp;&nbsp;</th> <!-- Index link --> <th>&nbsp;&nbsp;&nbsp;<a href="identifier-index.html">Indices</a>&nbsp;&nbsp;&nbsp;</th> <!-- Help link --> <th>&nbsp;&nbsp;&nbsp;<a href="help.html">Help</a>&nbsp;&nbsp;&nbsp;</th> <!-- Project homepage --> <th class="navbar" align="right" width="100%"> <table border="0" cellpadding="0" cellspacing="0"> <tr><th class="navbar" align="center" ><a class="navbar" target="_top" href="http://epydoc.sourceforge.net">epydoc 3.0.1</a></th> </tr></table></th> </tr> </table> <table width="100%" cellpadding="0" cellspacing="0"> <tr valign="top"> <td width="100%"> <span class="breadcrumbs"> <a href="epydoc-module.html">Package&nbsp;epydoc</a> :: <a href="epydoc.gui-module.html">Module&nbsp;gui</a> :: Class&nbsp;GUILogger </span> </td> <td> <table cellpadding="0" cellspacing="0"> <!-- hide/show private --> <tr><td align="right"><span class="options">[<a href="javascript:void(0);" class="privatelink" onclick="toggle_private();">hide&nbsp;private</a>]</span></td></tr> <tr><td align="right"><span class="options" >[<a href="frames.html" target="_top">frames</a >]&nbsp;|&nbsp;<a href="epydoc.gui.GUILogger-class.html" target="_top">no&nbsp;frames</a>]</span></td></tr> </table> </td> </tr> </table> <!-- ==================== CLASS DESCRIPTION ==================== --> <h1 class="epydoc">Class GUILogger</h1><p class="nomargin-top"><span class="codelink"><a href="epydoc.gui-pysrc.html#GUILogger">source&nbsp;code</a></span></p> <center> <center> <map id="uml_class_diagram_for_epydoc_g" name="uml_class_diagram_for_epydoc_g"> <area shape="rect" href="epydoc.log.Logger&#45;class.html#close" title="Perform any tasks needed to close this logger." alt="" coords="92,36,159,55" /> <area shape="rect" href="epydoc.log.Logger&#45;class.html" title="An abstract base class that defines the interface for loggers, which are used by epydoc to report information back to the user." alt="" coords="80,6,171,62" /> <area shape="rect" href="epydoc.gui.GUILogger&#45;class.html#__init__" title="epydoc.gui.GUILogger.__init__" alt="" coords="18,111,234,130" /> <area shape="rect" href="epydoc.gui.GUILogger&#45;class.html#clear" title="epydoc.gui.GUILogger.clear" alt="" coords="18,130,234,148" /> <area shape="rect" href="epydoc.gui.GUILogger&#45;class.html#log" title="Display a message." alt="" coords="18,148,234,167" /> <area shape="rect" href="epydoc.gui.GUILogger&#45;class.html#start_block" title="Start a new message block." alt="" coords="18,167,234,186" /> <area shape="rect" href="epydoc.gui.GUILogger&#45;class.html#end_block" title="End a warning block." alt="" coords="18,186,234,204" /> <area shape="rect" href="epydoc.gui.GUILogger&#45;class.html#start_progress" title="Begin displaying progress for a new task." alt="" coords="18,204,234,223" /> <area shape="rect" href="epydoc.gui.GUILogger&#45;class.html#end_progress" title="Finish off the display of progress for the current task." alt="" coords="18,223,234,242" /> <area shape="rect" href="epydoc.gui.GUILogger&#45;class.html#progress" title="Update the progress display." alt="" coords="18,242,234,260" /> <area shape="rect" href="epydoc.gui.GUILogger&#45;class.html#read" title="epydoc.gui.GUILogger.read" alt="" coords="18,260,234,279" /> <area shape="rect" href="epydoc.gui.GUILogger&#45;class.html" title="epydoc.gui.GUILogger" alt="" coords="6,80,246,286" /> </map> <img src="uml_class_diagram_for_epydoc_g.gif" alt='' usemap="#uml_class_diagram_for_epydoc_g" ismap="ismap" class="graph-without-title" /> </center> </center> <hr /> <!-- ==================== INSTANCE METHODS ==================== --> <a name="section-InstanceMethods"></a> <table class="summary" border="1" cellpadding="3" cellspacing="0" width="100%" bgcolor="white"> <tr bgcolor="#70b0f0" class="table-header"> <td colspan="2" class="table-header"> <table border="0" cellpadding="0" cellspacing="0" width="100%"> <tr valign="top"> <td align="left"><span class="table-header">Instance Methods</span></td> <td align="right" valign="top" ><span class="options">[<a href="#section-InstanceMethods" class="privatelink" onclick="toggle_private();" >hide private</a>]</span></td> </tr> </table> </td> </tr> <tr> <td width="15%" align="right" valign="top" class="summary"> <span class="summary-type">&nbsp;</span> </td><td class="summary"> <table width="100%" cellpadding="0" cellspacing="0" border="0"> <tr> <td><span class="summary-sig"><a name="__init__"></a><span class="summary-sig-name">__init__</span>(<span class="summary-sig-arg">self</span>, <span class="summary-sig-arg">progress</span>, <span class="summary-sig-arg">cancel</span>)</span></td> <td align="right" valign="top"> <span class="codelink"><a href="epydoc.gui-pysrc.html#GUILogger.__init__">source&nbsp;code</a></span> </td> </tr> </table> </td> </tr> <tr> <td width="15%" align="right" valign="top" class="summary"> <span class="summary-type">&nbsp;</span> </td><td class="summary"> <table width="100%" cellpadding="0" cellspacing="0" border="0"> <tr> <td><span class="summary-sig"><a name="clear"></a><span class="summary-sig-name">clear</span>(<span class="summary-sig-arg">self</span>)</span></td> <td align="right" valign="top"> <span class="codelink"><a href="epydoc.gui-pysrc.html#GUILogger.clear">source&nbsp;code</a></span> </td> </tr> </table> </td> </tr> <tr> <td width="15%" align="right" valign="top" class="summary"> <span class="summary-type">&nbsp;</span> </td><td class="summary"> <table width="100%" cellpadding="0" cellspacing="0" border="0"> <tr> <td><span class="summary-sig"><a href="epydoc.gui.GUILogger-class.html#log" class="summary-sig-name">log</a>(<span class="summary-sig-arg">self</span>, <span class="summary-sig-arg">level</span>, <span class="summary-sig-arg">message</span>)</span><br /> Display a message.</td> <td align="right" valign="top"> <span class="codelink"><a href="epydoc.gui-pysrc.html#GUILogger.log">source&nbsp;code</a></span> </td> </tr> </table> </td> </tr> <tr> <td width="15%" align="right" valign="top" class="summary"> <span class="summary-type">&nbsp;</span> </td><td class="summary"> <table width="100%" cellpadding="0" cellspacing="0" border="0"> <tr> <td><span class="summary-sig"><a href="epydoc.gui.GUILogger-class.html#start_block" class="summary-sig-name">start_block</a>(<span class="summary-sig-arg">self</span>, <span class="summary-sig-arg">header</span>)</span><br /> Start a new message block.</td> <td align="right" valign="top"> <span class="codelink"><a href="epydoc.gui-pysrc.html#GUILogger.start_block">source&nbsp;code</a></span> </td> </tr> </table> </td> </tr> <tr> <td width="15%" align="right" valign="top" class="summary"> <span class="summary-type">&nbsp;</span> </td><td class="summary"> <table width="100%" cellpadding="0" cellspacing="0" border="0"> <tr> <td><span class="summary-sig"><a href="epydoc.gui.GUILogger-class.html#end_block" class="summary-sig-name">end_block</a>(<span class="summary-sig-arg">self</span>)</span><br /> End a warning block.</td> <td align="right" valign="top"> <span class="codelink"><a href="epydoc.gui-pysrc.html#GUILogger.end_block">source&nbsp;code</a></span> </td> </tr> </table> </td> </tr> <tr> <td width="15%" align="right" valign="top" class="summary"> <span class="summary-type">&nbsp;</span> </td><td class="summary"> <table width="100%" cellpadding="0" cellspacing="0" border="0"> <tr> <td><span class="summary-sig"><a href="epydoc.gui.GUILogger-class.html#start_progress" class="summary-sig-name">start_progress</a>(<span class="summary-sig-arg">self</span>, <span class="summary-sig-arg">header</span>=<span class="summary-sig-default">None</span>)</span><br /> Begin displaying progress for a new task.</td> <td align="right" valign="top"> <span class="codelink"><a href="epydoc.gui-pysrc.html#GUILogger.start_progress">source&nbsp;code</a></span> </td> </tr> </table> </td> </tr> <tr> <td width="15%" align="right" valign="top" class="summary"> <span class="summary-type">&nbsp;</span> </td><td class="summary"> <table width="100%" cellpadding="0" cellspacing="0" border="0"> <tr> <td><span class="summary-sig"><a href="epydoc.gui.GUILogger-class.html#end_progress" class="summary-sig-name">end_progress</a>(<span class="summary-sig-arg">self</span>)</span><br /> Finish off the display of progress for the current task.</td> <td align="right" valign="top"> <span class="codelink"><a href="epydoc.gui-pysrc.html#GUILogger.end_progress">source&nbsp;code</a></span> </td> </tr> </table> </td> </tr> <tr> <td width="15%" align="right" valign="top" class="summary"> <span class="summary-type">&nbsp;</span> </td><td class="summary"> <table width="100%" cellpadding="0" cellspacing="0" border="0"> <tr> <td><span class="summary-sig"><a href="epydoc.gui.GUILogger-class.html#progress" class="summary-sig-name">progress</a>(<span class="summary-sig-arg">self</span>, <span class="summary-sig-arg">percent</span>, <span class="summary-sig-arg">message</span>=<span class="summary-sig-default"><code class="variable-quote">'</code><code class="variable-string"></code><code class="variable-quote">'</code></span>)</span><br /> Update the progress display.</td> <td align="right" valign="top"> <span class="codelink"><a href="epydoc.gui-pysrc.html#GUILogger.progress">source&nbsp;code</a></span> </td> </tr> </table> </td> </tr> <tr> <td width="15%" align="right" valign="top" class="summary"> <span class="summary-type">&nbsp;</span> </td><td class="summary"> <table width="100%" cellpadding="0" cellspacing="0" border="0"> <tr> <td><span class="summary-sig"><a name="read"></a><span class="summary-sig-name">read</span>(<span class="summary-sig-arg">self</span>)</span></td> <td align="right" valign="top"> <span class="codelink"><a href="epydoc.gui-pysrc.html#GUILogger.read">source&nbsp;code</a></span> </td> </tr> </table> </td> </tr> <tr> <td colspan="2" class="summary"> <p class="indent-wrapped-lines"><b>Inherited from <code><a href="epydoc.log.Logger-class.html">log.Logger</a></code></b>: <code><a href="epydoc.log.Logger-class.html#close">close</a></code> </p> </td> </tr> </table> <!-- ==================== CLASS VARIABLES ==================== --> <a name="section-ClassVariables"></a> <table class="summary" border="1" cellpadding="3" cellspacing="0" width="100%" bgcolor="white"> <tr bgcolor="#70b0f0" class="table-header"> <td colspan="2" class="table-header"> <table border="0" cellpadding="0" cellspacing="0" width="100%"> <tr valign="top"> <td align="left"><span class="table-header">Class Variables</span></td> <td align="right" valign="top" ><span class="options">[<a href="#section-ClassVariables" class="privatelink" onclick="toggle_private();" >hide private</a>]</span></td> </tr> </table> </td> </tr> <tr class="private"> <td width="15%" align="right" valign="top" class="summary"> <span class="summary-type">&nbsp;</span> </td><td class="summary"> <a name="_STAGES"></a><span class="summary-name">_STAGES</span> = <code title="[40, 7, 1, 3, 1, 30, 1, 2, 100]"><code class="variable-group">[</code>40<code class="variable-op">, </code>7<code class="variable-op">, </code>1<code class="variable-op">, </code>3<code class="variable-op">, </code>1<code class="variable-op">, </code>30<code class="variable-op">, </code>1<code class="variable-op">, </code>2<code class="variable-op">, </code>100<code class="variable-group">]</code></code> </td> </tr> </table> <!-- ==================== METHOD DETAILS ==================== --> <a name="section-MethodDetails"></a> <table class="details" border="1" cellpadding="3" cellspacing="0" width="100%" bgcolor="white"> <tr bgcolor="#70b0f0" class="table-header"> <td colspan="2" class="table-header"> <table border="0" cellpadding="0" cellspacing="0" width="100%"> <tr valign="top"> <td align="left"><span class="table-header">Method Details</span></td> <td align="right" valign="top" ><span class="options">[<a href="#section-MethodDetails" class="privatelink" onclick="toggle_private();" >hide private</a>]</span></td> </tr> </table> </td> </tr> </table> <a name="log"></a> <div> <table class="details" border="1" cellpadding="3" cellspacing="0" width="100%" bgcolor="white"> <tr><td> <table width="100%" cellpadding="0" cellspacing="0" border="0"> <tr valign="top"><td> <h3 class="epydoc"><span class="sig"><span class="sig-name">log</span>(<span class="sig-arg">self</span>, <span class="sig-arg">level</span>, <span class="sig-arg">message</span>)</span> </h3> </td><td align="right" valign="top" ><span class="codelink"><a href="epydoc.gui-pysrc.html#GUILogger.log">source&nbsp;code</a></span>&nbsp; </td> </tr></table> <p>Display a message.</p> <dl class="fields"> <dt>Parameters:</dt> <dd><ul class="nomargin-top"> <li><strong class="pname"><code>message</code></strong> - The message string to display. <code>message</code> may contain newlines, but does not need to end in a newline.</li> <li><strong class="pname"><code>level</code></strong> - An integer value indicating the severity of the message.</li> </ul></dd> <dt>Overrides: <a href="epydoc.log.Logger-class.html#log">log.Logger.log</a> <dd><em class="note">(inherited documentation)</em></dd> </dt> </dl> </td></tr></table> </div> <a name="start_block"></a> <div> <table class="details" border="1" cellpadding="3" cellspacing="0" width="100%" bgcolor="white"> <tr><td> <table width="100%" cellpadding="0" cellspacing="0" border="0"> <tr valign="top"><td> <h3 class="epydoc"><span class="sig"><span class="sig-name">start_block</span>(<span class="sig-arg">self</span>, <span class="sig-arg">header</span>)</span> </h3> </td><td align="right" valign="top" ><span class="codelink"><a href="epydoc.gui-pysrc.html#GUILogger.start_block">source&nbsp;code</a></span>&nbsp; </td> </tr></table> <p>Start a new message block. Any calls to <a href="epydoc.log-module.html#info" class="link">info()</a>, <a href="epydoc.log-module.html#warning" class="link">warning()</a>, or <a href="epydoc.log-module.html#error" class="link">error()</a> that occur between a call to <code>start_block</code> and a corresponding call to <code>end_block</code> will be grouped together, and displayed with a common header. <code>start_block</code> can be called multiple times (to form nested blocks), but every call to <code>start_block</code> <i>must</i> be balanced by a call to <code>end_block</code>.</p> <dl class="fields"> <dt>Overrides: <a href="epydoc.log.Logger-class.html#start_block">log.Logger.start_block</a> <dd><em class="note">(inherited documentation)</em></dd> </dt> </dl> </td></tr></table> </div> <a name="end_block"></a> <div> <table class="details" border="1" cellpadding="3" cellspacing="0" width="100%" bgcolor="white"> <tr><td> <table width="100%" cellpadding="0" cellspacing="0" border="0"> <tr valign="top"><td> <h3 class="epydoc"><span class="sig"><span class="sig-name">end_block</span>(<span class="sig-arg">self</span>)</span> </h3> </td><td align="right" valign="top" ><span class="codelink"><a href="epydoc.gui-pysrc.html#GUILogger.end_block">source&nbsp;code</a></span>&nbsp; </td> </tr></table> <p>End a warning block. See <a href="epydoc.cli.ConsoleLogger-class.html#start_block" class="link">start_block</a> for details.</p> <dl class="fields"> <dt>Overrides: <a href="epydoc.log.Logger-class.html#end_block">log.Logger.end_block</a> <dd><em class="note">(inherited documentation)</em></dd> </dt> </dl> </td></tr></table> </div> <a name="start_progress"></a> <div> <table class="details" border="1" cellpadding="3" cellspacing="0" width="100%" bgcolor="white"> <tr><td> <table width="100%" cellpadding="0" cellspacing="0" border="0"> <tr valign="top"><td> <h3 class="epydoc"><span class="sig"><span class="sig-name">start_progress</span>(<span class="sig-arg">self</span>, <span class="sig-arg">header</span>=<span class="sig-default">None</span>)</span> </h3> </td><td align="right" valign="top" ><span class="codelink"><a href="epydoc.gui-pysrc.html#GUILogger.start_progress">source&nbsp;code</a></span>&nbsp; </td> </tr></table> <p>Begin displaying progress for a new task. <code>header</code> is a description of the task for which progress is being reported. Each call to <code>start_progress</code> must be followed by a call to <code>end_progress</code> (with no intervening calls to <code>start_progress</code>).</p> <dl class="fields"> <dt>Overrides: <a href="epydoc.log.Logger-class.html#start_progress">log.Logger.start_progress</a> <dd><em class="note">(inherited documentation)</em></dd> </dt> </dl> </td></tr></table> </div> <a name="end_progress"></a> <div> <table class="details" border="1" cellpadding="3" cellspacing="0" width="100%" bgcolor="white"> <tr><td> <table width="100%" cellpadding="0" cellspacing="0" border="0"> <tr valign="top"><td> <h3 class="epydoc"><span class="sig"><span class="sig-name">end_progress</span>(<span class="sig-arg">self</span>)</span> </h3> </td><td align="right" valign="top" ><span class="codelink"><a href="epydoc.gui-pysrc.html#GUILogger.end_progress">source&nbsp;code</a></span>&nbsp; </td> </tr></table> <p>Finish off the display of progress for the current task. See <a href="epydoc.cli.ConsoleLogger-class.html#start_progress" class="link">start_progress</a> for more information.</p> <dl class="fields"> <dt>Overrides: <a href="epydoc.log.Logger-class.html#end_progress">log.Logger.end_progress</a> <dd><em class="note">(inherited documentation)</em></dd> </dt> </dl> </td></tr></table> </div> <a name="progress"></a> <div> <table class="details" border="1" cellpadding="3" cellspacing="0" width="100%" bgcolor="white"> <tr><td> <table width="100%" cellpadding="0" cellspacing="0" border="0"> <tr valign="top"><td> <h3 class="epydoc"><span class="sig"><span class="sig-name">progress</span>(<span class="sig-arg">self</span>, <span class="sig-arg">percent</span>, <span class="sig-arg">message</span>=<span class="sig-default"><code class="variable-quote">'</code><code class="variable-string"></code><code class="variable-quote">'</code></span>)</span> </h3> </td><td align="right" valign="top" ><span class="codelink"><a href="epydoc.gui-pysrc.html#GUILogger.progress">source&nbsp;code</a></span>&nbsp; </td> </tr></table> <p>Update the progress display.</p> <dl class="fields"> <dt>Parameters:</dt> <dd><ul class="nomargin-top"> <li><strong class="pname"><code>percent</code></strong> - A float from 0.0 to 1.0, indicating how much progress has been made.</li> <li><strong class="pname"><code>message</code></strong> - A message indicating the most recent action that contributed towards that progress.</li> </ul></dd> <dt>Overrides: <a href="epydoc.log.Logger-class.html#progress">log.Logger.progress</a> <dd><em class="note">(inherited documentation)</em></dd> </dt> </dl> </td></tr></table> </div> <br /> <!-- ==================== NAVIGATION BAR ==================== --> <table class="navbar" border="0" width="100%" cellpadding="0" bgcolor="#a0c0ff" cellspacing="0"> <tr valign="middle"> <!-- Home link --> <th>&nbsp;&nbsp;&nbsp;<a href="epydoc-module.html">Home</a>&nbsp;&nbsp;&nbsp;</th> <!-- Tree link --> <th>&nbsp;&nbsp;&nbsp;<a href="module-tree.html">Trees</a>&nbsp;&nbsp;&nbsp;</th> <!-- Index link --> <th>&nbsp;&nbsp;&nbsp;<a href="identifier-index.html">Indices</a>&nbsp;&nbsp;&nbsp;</th> <!-- Help link --> <th>&nbsp;&nbsp;&nbsp;<a href="help.html">Help</a>&nbsp;&nbsp;&nbsp;</th> <!-- Project homepage --> <th class="navbar" align="right" width="100%"> <table border="0" cellpadding="0" cellspacing="0"> <tr><th class="navbar" align="center" ><a class="navbar" target="_top" href="http://epydoc.sourceforge.net">epydoc 3.0.1</a></th> </tr></table></th> </tr> </table> <table border="0" cellpadding="0" cellspacing="0" width="100%%"> <tr> <td align="left" class="footer"> <a href="epydoc-log.html">Generated by Epydoc 3.0.1 on Wed Jan 30 14:08:21 2008</a> </td> <td align="right" class="footer"> <a target="mainFrame" href="http://epydoc.sourceforge.net" >http://epydoc.sourceforge.net</a> </td> </tr> </table> <script type="text/javascript"> <!-- // Private objects are initially displayed (because if // javascript is turned off then we want them to be // visible); but by default, we want to hide them. So hide // them unless we have a cookie that says to show them. checkCookie(); // --> </script> </body> </html>
{ "pile_set_name": "Github" }
import org.junit.Test; import static org.junit.Assert.assertEquals; /** * Unit test for simple App. */ public class AppTestDummy { @Test public void testMain() throws Exception { assertEquals(App.main(), -1); } }
{ "pile_set_name": "Github" }
/** @file The Super I/O Protocol is installed by the Super I/O driver. The Super I/O driver is a UEFI driver model compliant driver. In the Start() routine of the Super I/O driver, a handle with an instance of EFI_SIO_PROTOCOL is created for each device within the Super I/O. The device within the Super I/O is powered up, enabled, and assigned with the default set of resources. In the Stop() routine of the Super I/O driver, the device is disabled and Super I/O protocol is uninstalled. Copyright (c) 2006 - 2010, Intel Corporation. All rights reserved.<BR> This program and the accompanying materials are licensed and made available under the terms and conditions of the BSD License which accompanies this distribution. The full text of the license may be found at http://opensource.org/licenses/bsd-license.php THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED. **/ #ifndef __EFI_SUPER_IO_PROTOCOL_H__ #define __EFI_SUPER_IO_PROTOCOL_H__ #include <IndustryStandard/Acpi.h> #define EFI_SIO_PROTOCOL_GUID \ { 0x215fdd18, 0xbd50, 0x4feb, { 0x89, 0xb, 0x58, 0xca, 0xb, 0x47, 0x39, 0xe9 } } typedef union { ACPI_SMALL_RESOURCE_HEADER *SmallHeader; ACPI_LARGE_RESOURCE_HEADER *LargeHeader; } ACPI_RESOURCE_HEADER_PTR; typedef struct { UINT8 Register; ///< Register number. UINT8 AndMask; ///< Bitwise AND mask. UINT8 OrMask; ///< Bitwise OR mask. } EFI_SIO_REGISTER_MODIFY; typedef struct _EFI_SIO_PROTOCOL EFI_SIO_PROTOCOL; /** Provides a low level access to the registers for the Super I/O. @param[in] This Indicates a pointer to the calling context. @param[in] Write Specifies the type of the register operation. If this parameter is TRUE, Value is interpreted as an input parameter and the operation is a register write. If this parameter is FALSE, Value is interpreted as an output parameter and the operation is a register read. @param[in] ExitCfgMode Exit Configuration Mode Indicator. If this parameter is set to TRUE, the Super I/O driver will turn off configuration mode of the Super I/O prior to returning from this function. If this parameter is set to FALSE, the Super I/O driver will leave Super I/O in the configuration mode. The Super I/O driver must track the current state of the Super I/O and enable the configuration mode of Super I/O if necessary prior to register access. @param[in] Register Register number. @param[in, out] Value If Write is TRUE, Value is a pointer to the buffer containing the byte of data to be written to the Super I/O register. If Write is FALSE, Value is a pointer to the destination buffer for the byte of data to be read from the Super I/O register. @retval EFI_SUCCESS The operation completed successfully @retval EFI_INVALID_PARAMETER The Value is NULL @retval EFI_INVALID_PARAMETER Invalid Register number **/ typedef EFI_STATUS (EFIAPI *EFI_SIO_REGISTER_ACCESS)( IN CONST EFI_SIO_PROTOCOL *This, IN BOOLEAN Write, IN BOOLEAN ExitCfgMode, IN UINT8 Register, IN OUT UINT8 *Value ); /** Provides an interface to get a list of the current resources consumed by the device in the ACPI Resource Descriptor format. GetResources() returns a list of resources currently consumed by the device. The ResourceList is a pointer to the buffer containing resource descriptors for the device. The descriptors are in the format of Small or Large ACPI resource descriptor as defined by ACPI specification (2.0 & 3.0). The buffer of resource descriptors is terminated with the 'End tag' resource descriptor. @param[in] This Indicates a pointer to the calling context. @param[out] ResourceList A pointer to an ACPI resource descriptor list that defines the current resources used by the device. Type ACPI_RESOURCE_HEADER_PTR is defined in the "Related Definitions" below. @retval EFI_SUCCESS The operation completed successfully @retval EFI_INVALID_PARAMETER ResourceList is NULL **/ typedef EFI_STATUS (EFIAPI *EFI_SIO_GET_RESOURCES)( IN CONST EFI_SIO_PROTOCOL *This, OUT ACPI_RESOURCE_HEADER_PTR *ResourceList ); /** Sets the resources for the device. @param[in] This Indicates a pointer to the calling context. @param[in] ResourceList Pointer to the ACPI resource descriptor list. Type ACPI_RESOURCE_HEADER_PTR is defined in the "Related Definitions" section of EFI_SIO_PROTOCOL.GetResources(). @retval EFI_SUCCESS The operation completed successfully @retval EFI_INVALID_PARAMETER ResourceList is invalid @retval EFI_ACCESS_DENIED Some of the resources in ResourceList are in use **/ typedef EFI_STATUS (EFIAPI *EFI_SIO_SET_RESOURCES)( IN CONST EFI_SIO_PROTOCOL *This, IN ACPI_RESOURCE_HEADER_PTR ResourceList ); /** Provides a collection of resource descriptor lists. Each resource descriptor list in the collection defines a combination of resources that can potentially be used by the device. @param[in] This Indicates a pointer to the calling context. @param[out] ResourceCollection Collection of the resource descriptor lists. @retval EFI_SUCCESS The operation completed successfully @retval EFI_INVALID_PARAMETER ResourceCollection is NULL **/ typedef EFI_STATUS (EFIAPI *EFI_SIO_POSSIBLE_RESOURCES)( IN CONST EFI_SIO_PROTOCOL *This, OUT ACPI_RESOURCE_HEADER_PTR *ResourceCollection ); /** Provides an interface for a table based programming of the Super I/O registers. The Modify() function provides an interface for table based programming of the Super I/O registers. This function can be used to perform programming of multiple Super I/O registers with a single function call. For each table entry, the Register is read, its content is bitwise ANDed with AndMask, and then ORed with OrMask before being written back to the Register. The Super I/O driver must track the current state of the Super I/O and enable the configuration mode of Super I/ O if necessary prior to table processing. Once the table is processed, the Super I/O device has to be returned to the original state. @param[in] This Indicates a pointer to the calling context. @param[in] Command A pointer to an array of NumberOfCommands EFI_SIO_REGISTER_MODIFY structures. Each structure specifies a single Super I/O register modify operation. Type EFI_SIO_REGISTER_MODIFY is defined in the "Related Definitions" below. @param[in] NumberOfCommands Number of elements in the Command array. @retval EFI_SUCCESS The operation completed successfully @retval EFI_INVALID_PARAMETER Command is NULL **/ typedef EFI_STATUS (EFIAPI *EFI_SIO_MODIFY)( IN CONST EFI_SIO_PROTOCOL *This, IN CONST EFI_SIO_REGISTER_MODIFY *Command, IN UINTN NumberOfCommands ); struct _EFI_SIO_PROTOCOL { EFI_SIO_REGISTER_ACCESS RegisterAccess; EFI_SIO_GET_RESOURCES GetResources; EFI_SIO_SET_RESOURCES SetResources; EFI_SIO_POSSIBLE_RESOURCES PossibleResources; EFI_SIO_MODIFY Modify; }; extern EFI_GUID gEfiSioProtocolGuid; #endif // __EFI_SUPER_IO_PROTOCOL_H__
{ "pile_set_name": "Github" }
ctlz __intrinsics__ UInt64 UInt64 UInt32 UInt32 UInt16 UInt16 UInt8 UInt8 59 4 27 4 11 4 3 4
{ "pile_set_name": "Github" }
// Copyright 2011 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // Package sql provides a generic interface around SQL (or SQL-like) // databases. // // The sql package must be used in conjunction with a database driver. // See http://golang.org/s/sqldrivers for a list of drivers. // // For more usage examples, see the wiki page at // http://golang.org/s/sqlwiki. package sql import ( "database/sql/driver" "errors" "fmt" "io" "runtime" "sort" "sync" ) var drivers = make(map[string]driver.Driver) // Register makes a database driver available by the provided name. // If Register is called twice with the same name or if driver is nil, // it panics. func Register(name string, driver driver.Driver) { if driver == nil { panic("sql: Register driver is nil") } if _, dup := drivers[name]; dup { panic("sql: Register called twice for driver " + name) } drivers[name] = driver } func unregisterAllDrivers() { // For tests. drivers = make(map[string]driver.Driver) } // Drivers returns a sorted list of the names of the registered drivers. func Drivers() []string { var list []string for name := range drivers { list = append(list, name) } sort.Strings(list) return list } // RawBytes is a byte slice that holds a reference to memory owned by // the database itself. After a Scan into a RawBytes, the slice is only // valid until the next call to Next, Scan, or Close. type RawBytes []byte // NullString represents a string that may be null. // NullString implements the Scanner interface so // it can be used as a scan destination: // // var s NullString // err := db.QueryRow("SELECT name FROM foo WHERE id=?", id).Scan(&s) // ... // if s.Valid { // // use s.String // } else { // // NULL value // } // type NullString struct { String string Valid bool // Valid is true if String is not NULL } // Scan implements the Scanner interface. func (ns *NullString) Scan(value interface{}) error { if value == nil { ns.String, ns.Valid = "", false return nil } ns.Valid = true return convertAssign(&ns.String, value) } // Value implements the driver Valuer interface. func (ns NullString) Value() (driver.Value, error) { if !ns.Valid { return nil, nil } return ns.String, nil } // NullInt64 represents an int64 that may be null. // NullInt64 implements the Scanner interface so // it can be used as a scan destination, similar to NullString. type NullInt64 struct { Int64 int64 Valid bool // Valid is true if Int64 is not NULL } // Scan implements the Scanner interface. func (n *NullInt64) Scan(value interface{}) error { if value == nil { n.Int64, n.Valid = 0, false return nil } n.Valid = true return convertAssign(&n.Int64, value) } // Value implements the driver Valuer interface. func (n NullInt64) Value() (driver.Value, error) { if !n.Valid { return nil, nil } return n.Int64, nil } // NullFloat64 represents a float64 that may be null. // NullFloat64 implements the Scanner interface so // it can be used as a scan destination, similar to NullString. type NullFloat64 struct { Float64 float64 Valid bool // Valid is true if Float64 is not NULL } // Scan implements the Scanner interface. func (n *NullFloat64) Scan(value interface{}) error { if value == nil { n.Float64, n.Valid = 0, false return nil } n.Valid = true return convertAssign(&n.Float64, value) } // Value implements the driver Valuer interface. func (n NullFloat64) Value() (driver.Value, error) { if !n.Valid { return nil, nil } return n.Float64, nil } // NullBool represents a bool that may be null. // NullBool implements the Scanner interface so // it can be used as a scan destination, similar to NullString. type NullBool struct { Bool bool Valid bool // Valid is true if Bool is not NULL } // Scan implements the Scanner interface. func (n *NullBool) Scan(value interface{}) error { if value == nil { n.Bool, n.Valid = false, false return nil } n.Valid = true return convertAssign(&n.Bool, value) } // Value implements the driver Valuer interface. func (n NullBool) Value() (driver.Value, error) { if !n.Valid { return nil, nil } return n.Bool, nil } // Scanner is an interface used by Scan. type Scanner interface { // Scan assigns a value from a database driver. // // The src value will be of one of the following restricted // set of types: // // int64 // float64 // bool // []byte // string // time.Time // nil - for NULL values // // An error should be returned if the value can not be stored // without loss of information. Scan(src interface{}) error } // ErrNoRows is returned by Scan when QueryRow doesn't return a // row. In such a case, QueryRow returns a placeholder *Row value that // defers this error until a Scan. var ErrNoRows = errors.New("sql: no rows in result set") // DB is a database handle representing a pool of zero or more // underlying connections. It's safe for concurrent use by multiple // goroutines. // // The sql package creates and frees connections automatically; it // also maintains a free pool of idle connections. If the database has // a concept of per-connection state, such state can only be reliably // observed within a transaction. Once DB.Begin is called, the // returned Tx is bound to a single connection. Once Commit or // Rollback is called on the transaction, that transaction's // connection is returned to DB's idle connection pool. The pool size // can be controlled with SetMaxIdleConns. type DB struct { driver driver.Driver dsn string mu sync.Mutex // protects following fields freeConn []*driverConn connRequests []chan connRequest numOpen int pendingOpens int // Used to signal the need for new connections // a goroutine running connectionOpener() reads on this chan and // maybeOpenNewConnections sends on the chan (one send per needed connection) // It is closed during db.Close(). The close tells the connectionOpener // goroutine to exit. openerCh chan struct{} closed bool dep map[finalCloser]depSet lastPut map[*driverConn]string // stacktrace of last conn's put; debug only maxIdle int // zero means defaultMaxIdleConns; negative means 0 maxOpen int // <= 0 means unlimited } // driverConn wraps a driver.Conn with a mutex, to // be held during all calls into the Conn. (including any calls onto // interfaces returned via that Conn, such as calls on Tx, Stmt, // Result, Rows) type driverConn struct { db *DB sync.Mutex // guards following ci driver.Conn closed bool finalClosed bool // ci.Close has been called openStmt map[driver.Stmt]bool // guarded by db.mu inUse bool onPut []func() // code (with db.mu held) run when conn is next returned dbmuClosed bool // same as closed, but guarded by db.mu, for connIfFree } func (dc *driverConn) releaseConn(err error) { dc.db.putConn(dc, err) } func (dc *driverConn) removeOpenStmt(si driver.Stmt) { dc.Lock() defer dc.Unlock() delete(dc.openStmt, si) } func (dc *driverConn) prepareLocked(query string) (driver.Stmt, error) { si, err := dc.ci.Prepare(query) if err == nil { // Track each driverConn's open statements, so we can close them // before closing the conn. // // TODO(bradfitz): let drivers opt out of caring about // stmt closes if the conn is about to close anyway? For now // do the safe thing, in case stmts need to be closed. // // TODO(bradfitz): after Go 1.2, closing driver.Stmts // should be moved to driverStmt, using unique // *driverStmts everywhere (including from // *Stmt.connStmt, instead of returning a // driver.Stmt), using driverStmt as a pointer // everywhere, and making it a finalCloser. if dc.openStmt == nil { dc.openStmt = make(map[driver.Stmt]bool) } dc.openStmt[si] = true } return si, err } // the dc.db's Mutex is held. func (dc *driverConn) closeDBLocked() func() error { dc.Lock() defer dc.Unlock() if dc.closed { return func() error { return errors.New("sql: duplicate driverConn close") } } dc.closed = true return dc.db.removeDepLocked(dc, dc) } func (dc *driverConn) Close() error { dc.Lock() if dc.closed { dc.Unlock() return errors.New("sql: duplicate driverConn close") } dc.closed = true dc.Unlock() // not defer; removeDep finalClose calls may need to lock // And now updates that require holding dc.mu.Lock. dc.db.mu.Lock() dc.dbmuClosed = true fn := dc.db.removeDepLocked(dc, dc) dc.db.mu.Unlock() return fn() } func (dc *driverConn) finalClose() error { dc.Lock() for si := range dc.openStmt { si.Close() } dc.openStmt = nil err := dc.ci.Close() dc.ci = nil dc.finalClosed = true dc.Unlock() dc.db.mu.Lock() dc.db.numOpen-- dc.db.maybeOpenNewConnections() dc.db.mu.Unlock() return err } // driverStmt associates a driver.Stmt with the // *driverConn from which it came, so the driverConn's lock can be // held during calls. type driverStmt struct { sync.Locker // the *driverConn si driver.Stmt } func (ds *driverStmt) Close() error { ds.Lock() defer ds.Unlock() return ds.si.Close() } // depSet is a finalCloser's outstanding dependencies type depSet map[interface{}]bool // set of true bools // The finalCloser interface is used by (*DB).addDep and related // dependency reference counting. type finalCloser interface { // finalClose is called when the reference count of an object // goes to zero. (*DB).mu is not held while calling it. finalClose() error } // addDep notes that x now depends on dep, and x's finalClose won't be // called until all of x's dependencies are removed with removeDep. func (db *DB) addDep(x finalCloser, dep interface{}) { //println(fmt.Sprintf("addDep(%T %p, %T %p)", x, x, dep, dep)) db.mu.Lock() defer db.mu.Unlock() db.addDepLocked(x, dep) } func (db *DB) addDepLocked(x finalCloser, dep interface{}) { if db.dep == nil { db.dep = make(map[finalCloser]depSet) } xdep := db.dep[x] if xdep == nil { xdep = make(depSet) db.dep[x] = xdep } xdep[dep] = true } // removeDep notes that x no longer depends on dep. // If x still has dependencies, nil is returned. // If x no longer has any dependencies, its finalClose method will be // called and its error value will be returned. func (db *DB) removeDep(x finalCloser, dep interface{}) error { db.mu.Lock() fn := db.removeDepLocked(x, dep) db.mu.Unlock() return fn() } func (db *DB) removeDepLocked(x finalCloser, dep interface{}) func() error { //println(fmt.Sprintf("removeDep(%T %p, %T %p)", x, x, dep, dep)) xdep, ok := db.dep[x] if !ok { panic(fmt.Sprintf("unpaired removeDep: no deps for %T", x)) } l0 := len(xdep) delete(xdep, dep) switch len(xdep) { case l0: // Nothing removed. Shouldn't happen. panic(fmt.Sprintf("unpaired removeDep: no %T dep on %T", dep, x)) case 0: // No more dependencies. delete(db.dep, x) return x.finalClose default: // Dependencies remain. return func() error { return nil } } } // This is the size of the connectionOpener request chan (dn.openerCh). // This value should be larger than the maximum typical value // used for db.maxOpen. If maxOpen is significantly larger than // connectionRequestQueueSize then it is possible for ALL calls into the *DB // to block until the connectionOpener can satisfy the backlog of requests. var connectionRequestQueueSize = 1000000 // Open opens a database specified by its database driver name and a // driver-specific data source name, usually consisting of at least a // database name and connection information. // // Most users will open a database via a driver-specific connection // helper function that returns a *DB. No database drivers are included // in the Go standard library. See http://golang.org/s/sqldrivers for // a list of third-party drivers. // // Open may just validate its arguments without creating a connection // to the database. To verify that the data source name is valid, call // Ping. // // The returned DB is safe for concurrent use by multiple goroutines // and maintains its own pool of idle connections. Thus, the Open // function should be called just once. It is rarely necessary to // close a DB. func Open(driverName, dataSourceName string) (*DB, error) { driveri, ok := drivers[driverName] if !ok { return nil, fmt.Errorf("sql: unknown driver %q (forgotten import?)", driverName) } db := &DB{ driver: driveri, dsn: dataSourceName, openerCh: make(chan struct{}, connectionRequestQueueSize), lastPut: make(map[*driverConn]string), } go db.connectionOpener() return db, nil } // Ping verifies a connection to the database is still alive, // establishing a connection if necessary. func (db *DB) Ping() error { // TODO(bradfitz): give drivers an optional hook to implement // this in a more efficient or more reliable way, if they // have one. dc, err := db.conn() if err != nil { return err } db.putConn(dc, nil) return nil } // Close closes the database, releasing any open resources. // // It is rare to Close a DB, as the DB handle is meant to be // long-lived and shared between many goroutines. func (db *DB) Close() error { db.mu.Lock() if db.closed { // Make DB.Close idempotent db.mu.Unlock() return nil } close(db.openerCh) var err error fns := make([]func() error, 0, len(db.freeConn)) for _, dc := range db.freeConn { fns = append(fns, dc.closeDBLocked()) } db.freeConn = nil db.closed = true for _, req := range db.connRequests { close(req) } db.mu.Unlock() for _, fn := range fns { err1 := fn() if err1 != nil { err = err1 } } return err } const defaultMaxIdleConns = 2 func (db *DB) maxIdleConnsLocked() int { n := db.maxIdle switch { case n == 0: // TODO(bradfitz): ask driver, if supported, for its default preference return defaultMaxIdleConns case n < 0: return 0 default: return n } } // SetMaxIdleConns sets the maximum number of connections in the idle // connection pool. // // If MaxOpenConns is greater than 0 but less than the new MaxIdleConns // then the new MaxIdleConns will be reduced to match the MaxOpenConns limit // // If n <= 0, no idle connections are retained. func (db *DB) SetMaxIdleConns(n int) { db.mu.Lock() if n > 0 { db.maxIdle = n } else { // No idle connections. db.maxIdle = -1 } // Make sure maxIdle doesn't exceed maxOpen if db.maxOpen > 0 && db.maxIdleConnsLocked() > db.maxOpen { db.maxIdle = db.maxOpen } var closing []*driverConn idleCount := len(db.freeConn) maxIdle := db.maxIdleConnsLocked() if idleCount > maxIdle { closing = db.freeConn[maxIdle:] db.freeConn = db.freeConn[:maxIdle] } db.mu.Unlock() for _, c := range closing { c.Close() } } // SetMaxOpenConns sets the maximum number of open connections to the database. // // If MaxIdleConns is greater than 0 and the new MaxOpenConns is less than // MaxIdleConns, then MaxIdleConns will be reduced to match the new // MaxOpenConns limit // // If n <= 0, then there is no limit on the number of open connections. // The default is 0 (unlimited). func (db *DB) SetMaxOpenConns(n int) { db.mu.Lock() db.maxOpen = n if n < 0 { db.maxOpen = 0 } syncMaxIdle := db.maxOpen > 0 && db.maxIdleConnsLocked() > db.maxOpen db.mu.Unlock() if syncMaxIdle { db.SetMaxIdleConns(n) } } // Assumes db.mu is locked. // If there are connRequests and the connection limit hasn't been reached, // then tell the connectionOpener to open new connections. func (db *DB) maybeOpenNewConnections() { numRequests := len(db.connRequests) - db.pendingOpens if db.maxOpen > 0 { numCanOpen := db.maxOpen - (db.numOpen + db.pendingOpens) if numRequests > numCanOpen { numRequests = numCanOpen } } for numRequests > 0 { db.pendingOpens++ numRequests-- db.openerCh <- struct{}{} } } // Runs in a separate goroutine, opens new connections when requested. func (db *DB) connectionOpener() { for range db.openerCh { db.openNewConnection() } } // Open one new connection func (db *DB) openNewConnection() { ci, err := db.driver.Open(db.dsn) db.mu.Lock() defer db.mu.Unlock() if db.closed { if err == nil { ci.Close() } return } db.pendingOpens-- if err != nil { db.putConnDBLocked(nil, err) return } dc := &driverConn{ db: db, ci: ci, } if db.putConnDBLocked(dc, err) { db.addDepLocked(dc, dc) db.numOpen++ } else { ci.Close() } } // connRequest represents one request for a new connection // When there are no idle connections available, DB.conn will create // a new connRequest and put it on the db.connRequests list. type connRequest struct { conn *driverConn err error } var errDBClosed = errors.New("sql: database is closed") // conn returns a newly-opened or cached *driverConn func (db *DB) conn() (*driverConn, error) { db.mu.Lock() if db.closed { db.mu.Unlock() return nil, errDBClosed } // If db.maxOpen > 0 and the number of open connections is over the limit // and there are no free connection, make a request and wait. if db.maxOpen > 0 && db.numOpen >= db.maxOpen && len(db.freeConn) == 0 { // Make the connRequest channel. It's buffered so that the // connectionOpener doesn't block while waiting for the req to be read. req := make(chan connRequest, 1) db.connRequests = append(db.connRequests, req) db.maybeOpenNewConnections() db.mu.Unlock() ret := <-req return ret.conn, ret.err } if c := len(db.freeConn); c > 0 { conn := db.freeConn[0] copy(db.freeConn, db.freeConn[1:]) db.freeConn = db.freeConn[:c-1] conn.inUse = true db.mu.Unlock() return conn, nil } db.numOpen++ // optimistically db.mu.Unlock() ci, err := db.driver.Open(db.dsn) if err != nil { db.mu.Lock() db.numOpen-- // correct for earlier optimism db.mu.Unlock() return nil, err } db.mu.Lock() dc := &driverConn{ db: db, ci: ci, } db.addDepLocked(dc, dc) dc.inUse = true db.mu.Unlock() return dc, nil } var ( errConnClosed = errors.New("database/sql: internal sentinel error: conn is closed") errConnBusy = errors.New("database/sql: internal sentinel error: conn is busy") ) // connIfFree returns (wanted, nil) if wanted is still a valid conn and // isn't in use. // // The error is errConnClosed if the connection if the requested connection // is invalid because it's been closed. // // The error is errConnBusy if the connection is in use. func (db *DB) connIfFree(wanted *driverConn) (*driverConn, error) { db.mu.Lock() defer db.mu.Unlock() if wanted.dbmuClosed { return nil, errConnClosed } if wanted.inUse { return nil, errConnBusy } idx := -1 for ii, v := range db.freeConn { if v == wanted { idx = ii break } } if idx >= 0 { db.freeConn = append(db.freeConn[:idx], db.freeConn[idx+1:]...) wanted.inUse = true return wanted, nil } // TODO(bradfitz): shouldn't get here. After Go 1.1, change this to: // panic("connIfFree call requested a non-closed, non-busy, non-free conn") // Which passes all the tests, but I'm too paranoid to include this // late in Go 1.1. // Instead, treat it like a busy connection: return nil, errConnBusy } // putConnHook is a hook for testing. var putConnHook func(*DB, *driverConn) // noteUnusedDriverStatement notes that si is no longer used and should // be closed whenever possible (when c is next not in use), unless c is // already closed. func (db *DB) noteUnusedDriverStatement(c *driverConn, si driver.Stmt) { db.mu.Lock() defer db.mu.Unlock() if c.inUse { c.onPut = append(c.onPut, func() { si.Close() }) } else { c.Lock() defer c.Unlock() if !c.finalClosed { si.Close() } } } // debugGetPut determines whether getConn & putConn calls' stack traces // are returned for more verbose crashes. const debugGetPut = false // putConn adds a connection to the db's free pool. // err is optionally the last error that occurred on this connection. func (db *DB) putConn(dc *driverConn, err error) { db.mu.Lock() if !dc.inUse { if debugGetPut { fmt.Printf("putConn(%v) DUPLICATE was: %s\n\nPREVIOUS was: %s", dc, stack(), db.lastPut[dc]) } panic("sql: connection returned that was never out") } if debugGetPut { db.lastPut[dc] = stack() } dc.inUse = false for _, fn := range dc.onPut { fn() } dc.onPut = nil if err == driver.ErrBadConn { // Don't reuse bad connections. // Since the conn is considered bad and is being discarded, treat it // as closed. Don't decrement the open count here, finalClose will // take care of that. db.maybeOpenNewConnections() db.mu.Unlock() dc.Close() return } if putConnHook != nil { putConnHook(db, dc) } added := db.putConnDBLocked(dc, nil) db.mu.Unlock() if !added { dc.Close() } } // Satisfy a connRequest or put the driverConn in the idle pool and return true // or return false. // putConnDBLocked will satisfy a connRequest if there is one, or it will // return the *driverConn to the freeConn list if err == nil and the idle // connection limit will not be exceeded. // If err != nil, the value of dc is ignored. // If err == nil, then dc must not equal nil. // If a connRequest was fulfilled or the *driverConn was placed in the // freeConn list, then true is returned, otherwise false is returned. func (db *DB) putConnDBLocked(dc *driverConn, err error) bool { if c := len(db.connRequests); c > 0 { req := db.connRequests[0] // This copy is O(n) but in practice faster than a linked list. // TODO: consider compacting it down less often and // moving the base instead? copy(db.connRequests, db.connRequests[1:]) db.connRequests = db.connRequests[:c-1] if err == nil { dc.inUse = true } req <- connRequest{ conn: dc, err: err, } return true } else if err == nil && !db.closed && db.maxIdleConnsLocked() > len(db.freeConn) { db.freeConn = append(db.freeConn, dc) return true } return false } // maxBadConnRetries is the number of maximum retries if the driver returns // driver.ErrBadConn to signal a broken connection. const maxBadConnRetries = 10 // Prepare creates a prepared statement for later queries or executions. // Multiple queries or executions may be run concurrently from the // returned statement. func (db *DB) Prepare(query string) (*Stmt, error) { var stmt *Stmt var err error for i := 0; i < maxBadConnRetries; i++ { stmt, err = db.prepare(query) if err != driver.ErrBadConn { break } } return stmt, err } func (db *DB) prepare(query string) (*Stmt, error) { // TODO: check if db.driver supports an optional // driver.Preparer interface and call that instead, if so, // otherwise we make a prepared statement that's bound // to a connection, and to execute this prepared statement // we either need to use this connection (if it's free), else // get a new connection + re-prepare + execute on that one. dc, err := db.conn() if err != nil { return nil, err } dc.Lock() si, err := dc.prepareLocked(query) dc.Unlock() if err != nil { db.putConn(dc, err) return nil, err } stmt := &Stmt{ db: db, query: query, css: []connStmt{{dc, si}}, } db.addDep(stmt, stmt) db.putConn(dc, nil) return stmt, nil } // Exec executes a query without returning any rows. // The args are for any placeholder parameters in the query. func (db *DB) Exec(query string, args ...interface{}) (Result, error) { var res Result var err error for i := 0; i < maxBadConnRetries; i++ { res, err = db.exec(query, args) if err != driver.ErrBadConn { break } } return res, err } func (db *DB) exec(query string, args []interface{}) (res Result, err error) { dc, err := db.conn() if err != nil { return nil, err } defer func() { db.putConn(dc, err) }() if execer, ok := dc.ci.(driver.Execer); ok { dargs, err := driverArgs(nil, args) if err != nil { return nil, err } dc.Lock() resi, err := execer.Exec(query, dargs) dc.Unlock() if err != driver.ErrSkip { if err != nil { return nil, err } return driverResult{dc, resi}, nil } } dc.Lock() si, err := dc.ci.Prepare(query) dc.Unlock() if err != nil { return nil, err } defer withLock(dc, func() { si.Close() }) return resultFromStatement(driverStmt{dc, si}, args...) } // Query executes a query that returns rows, typically a SELECT. // The args are for any placeholder parameters in the query. func (db *DB) Query(query string, args ...interface{}) (*Rows, error) { var rows *Rows var err error for i := 0; i < maxBadConnRetries; i++ { rows, err = db.query(query, args) if err != driver.ErrBadConn { break } } return rows, err } func (db *DB) query(query string, args []interface{}) (*Rows, error) { ci, err := db.conn() if err != nil { return nil, err } return db.queryConn(ci, ci.releaseConn, query, args) } // queryConn executes a query on the given connection. // The connection gets released by the releaseConn function. func (db *DB) queryConn(dc *driverConn, releaseConn func(error), query string, args []interface{}) (*Rows, error) { if queryer, ok := dc.ci.(driver.Queryer); ok { dargs, err := driverArgs(nil, args) if err != nil { releaseConn(err) return nil, err } dc.Lock() rowsi, err := queryer.Query(query, dargs) dc.Unlock() if err != driver.ErrSkip { if err != nil { releaseConn(err) return nil, err } // Note: ownership of dc passes to the *Rows, to be freed // with releaseConn. rows := &Rows{ dc: dc, releaseConn: releaseConn, rowsi: rowsi, } return rows, nil } } dc.Lock() si, err := dc.ci.Prepare(query) dc.Unlock() if err != nil { releaseConn(err) return nil, err } ds := driverStmt{dc, si} rowsi, err := rowsiFromStatement(ds, args...) if err != nil { dc.Lock() si.Close() dc.Unlock() releaseConn(err) return nil, err } // Note: ownership of ci passes to the *Rows, to be freed // with releaseConn. rows := &Rows{ dc: dc, releaseConn: releaseConn, rowsi: rowsi, closeStmt: si, } return rows, nil } // QueryRow executes a query that is expected to return at most one row. // QueryRow always return a non-nil value. Errors are deferred until // Row's Scan method is called. func (db *DB) QueryRow(query string, args ...interface{}) *Row { rows, err := db.Query(query, args...) return &Row{rows: rows, err: err} } // Begin starts a transaction. The isolation level is dependent on // the driver. func (db *DB) Begin() (*Tx, error) { var tx *Tx var err error for i := 0; i < maxBadConnRetries; i++ { tx, err = db.begin() if err != driver.ErrBadConn { break } } return tx, err } func (db *DB) begin() (tx *Tx, err error) { dc, err := db.conn() if err != nil { return nil, err } dc.Lock() txi, err := dc.ci.Begin() dc.Unlock() if err != nil { db.putConn(dc, err) return nil, err } return &Tx{ db: db, dc: dc, txi: txi, }, nil } // Driver returns the database's underlying driver. func (db *DB) Driver() driver.Driver { return db.driver } // Tx is an in-progress database transaction. // // A transaction must end with a call to Commit or Rollback. // // After a call to Commit or Rollback, all operations on the // transaction fail with ErrTxDone. type Tx struct { db *DB // dc is owned exclusively until Commit or Rollback, at which point // it's returned with putConn. dc *driverConn txi driver.Tx // done transitions from false to true exactly once, on Commit // or Rollback. once done, all operations fail with // ErrTxDone. done bool // All Stmts prepared for this transaction. These will be closed after the // transaction has been committed or rolled back. stmts struct { sync.Mutex v []*Stmt } } var ErrTxDone = errors.New("sql: Transaction has already been committed or rolled back") func (tx *Tx) close() { if tx.done { panic("double close") // internal error } tx.done = true tx.db.putConn(tx.dc, nil) tx.dc = nil tx.txi = nil } func (tx *Tx) grabConn() (*driverConn, error) { if tx.done { return nil, ErrTxDone } return tx.dc, nil } // Closes all Stmts prepared for this transaction. func (tx *Tx) closePrepared() { tx.stmts.Lock() for _, stmt := range tx.stmts.v { stmt.Close() } tx.stmts.Unlock() } // Commit commits the transaction. func (tx *Tx) Commit() error { if tx.done { return ErrTxDone } defer tx.close() tx.dc.Lock() err := tx.txi.Commit() tx.dc.Unlock() if err != driver.ErrBadConn { tx.closePrepared() } return err } // Rollback aborts the transaction. func (tx *Tx) Rollback() error { if tx.done { return ErrTxDone } defer tx.close() tx.dc.Lock() err := tx.txi.Rollback() tx.dc.Unlock() if err != driver.ErrBadConn { tx.closePrepared() } return err } // Prepare creates a prepared statement for use within a transaction. // // The returned statement operates within the transaction and can no longer // be used once the transaction has been committed or rolled back. // // To use an existing prepared statement on this transaction, see Tx.Stmt. func (tx *Tx) Prepare(query string) (*Stmt, error) { // TODO(bradfitz): We could be more efficient here and either // provide a method to take an existing Stmt (created on // perhaps a different Conn), and re-create it on this Conn if // necessary. Or, better: keep a map in DB of query string to // Stmts, and have Stmt.Execute do the right thing and // re-prepare if the Conn in use doesn't have that prepared // statement. But we'll want to avoid caching the statement // in the case where we only call conn.Prepare implicitly // (such as in db.Exec or tx.Exec), but the caller package // can't be holding a reference to the returned statement. // Perhaps just looking at the reference count (by noting // Stmt.Close) would be enough. We might also want a finalizer // on Stmt to drop the reference count. dc, err := tx.grabConn() if err != nil { return nil, err } dc.Lock() si, err := dc.ci.Prepare(query) dc.Unlock() if err != nil { return nil, err } stmt := &Stmt{ db: tx.db, tx: tx, txsi: &driverStmt{ Locker: dc, si: si, }, query: query, } tx.stmts.Lock() tx.stmts.v = append(tx.stmts.v, stmt) tx.stmts.Unlock() return stmt, nil } // Stmt returns a transaction-specific prepared statement from // an existing statement. // // Example: // updateMoney, err := db.Prepare("UPDATE balance SET money=money+? WHERE id=?") // ... // tx, err := db.Begin() // ... // res, err := tx.Stmt(updateMoney).Exec(123.45, 98293203) func (tx *Tx) Stmt(stmt *Stmt) *Stmt { // TODO(bradfitz): optimize this. Currently this re-prepares // each time. This is fine for now to illustrate the API but // we should really cache already-prepared statements // per-Conn. See also the big comment in Tx.Prepare. if tx.db != stmt.db { return &Stmt{stickyErr: errors.New("sql: Tx.Stmt: statement from different database used")} } dc, err := tx.grabConn() if err != nil { return &Stmt{stickyErr: err} } dc.Lock() si, err := dc.ci.Prepare(stmt.query) dc.Unlock() txs := &Stmt{ db: tx.db, tx: tx, txsi: &driverStmt{ Locker: dc, si: si, }, query: stmt.query, stickyErr: err, } tx.stmts.Lock() tx.stmts.v = append(tx.stmts.v, txs) tx.stmts.Unlock() return txs } // Exec executes a query that doesn't return rows. // For example: an INSERT and UPDATE. func (tx *Tx) Exec(query string, args ...interface{}) (Result, error) { dc, err := tx.grabConn() if err != nil { return nil, err } if execer, ok := dc.ci.(driver.Execer); ok { dargs, err := driverArgs(nil, args) if err != nil { return nil, err } dc.Lock() resi, err := execer.Exec(query, dargs) dc.Unlock() if err == nil { return driverResult{dc, resi}, nil } if err != driver.ErrSkip { return nil, err } } dc.Lock() si, err := dc.ci.Prepare(query) dc.Unlock() if err != nil { return nil, err } defer withLock(dc, func() { si.Close() }) return resultFromStatement(driverStmt{dc, si}, args...) } // Query executes a query that returns rows, typically a SELECT. func (tx *Tx) Query(query string, args ...interface{}) (*Rows, error) { dc, err := tx.grabConn() if err != nil { return nil, err } releaseConn := func(error) {} return tx.db.queryConn(dc, releaseConn, query, args) } // QueryRow executes a query that is expected to return at most one row. // QueryRow always return a non-nil value. Errors are deferred until // Row's Scan method is called. func (tx *Tx) QueryRow(query string, args ...interface{}) *Row { rows, err := tx.Query(query, args...) return &Row{rows: rows, err: err} } // connStmt is a prepared statement on a particular connection. type connStmt struct { dc *driverConn si driver.Stmt } // Stmt is a prepared statement. Stmt is safe for concurrent use by multiple goroutines. type Stmt struct { // Immutable: db *DB // where we came from query string // that created the Stmt stickyErr error // if non-nil, this error is returned for all operations closemu sync.RWMutex // held exclusively during close, for read otherwise. // If in a transaction, else both nil: tx *Tx txsi *driverStmt mu sync.Mutex // protects the rest of the fields closed bool // css is a list of underlying driver statement interfaces // that are valid on particular connections. This is only // used if tx == nil and one is found that has idle // connections. If tx != nil, txsi is always used. css []connStmt } // Exec executes a prepared statement with the given arguments and // returns a Result summarizing the effect of the statement. func (s *Stmt) Exec(args ...interface{}) (Result, error) { s.closemu.RLock() defer s.closemu.RUnlock() var res Result for i := 0; i < maxBadConnRetries; i++ { dc, releaseConn, si, err := s.connStmt() if err != nil { if err == driver.ErrBadConn { continue } return nil, err } res, err = resultFromStatement(driverStmt{dc, si}, args...) releaseConn(err) if err != driver.ErrBadConn { return res, err } } return nil, driver.ErrBadConn } func resultFromStatement(ds driverStmt, args ...interface{}) (Result, error) { ds.Lock() want := ds.si.NumInput() ds.Unlock() // -1 means the driver doesn't know how to count the number of // placeholders, so we won't sanity check input here and instead let the // driver deal with errors. if want != -1 && len(args) != want { return nil, fmt.Errorf("sql: expected %d arguments, got %d", want, len(args)) } dargs, err := driverArgs(&ds, args) if err != nil { return nil, err } ds.Lock() resi, err := ds.si.Exec(dargs) ds.Unlock() if err != nil { return nil, err } return driverResult{ds.Locker, resi}, nil } // connStmt returns a free driver connection on which to execute the // statement, a function to call to release the connection, and a // statement bound to that connection. func (s *Stmt) connStmt() (ci *driverConn, releaseConn func(error), si driver.Stmt, err error) { if err = s.stickyErr; err != nil { return } s.mu.Lock() if s.closed { s.mu.Unlock() err = errors.New("sql: statement is closed") return } // In a transaction, we always use the connection that the // transaction was created on. if s.tx != nil { s.mu.Unlock() ci, err = s.tx.grabConn() // blocks, waiting for the connection. if err != nil { return } releaseConn = func(error) {} return ci, releaseConn, s.txsi.si, nil } for i := 0; i < len(s.css); i++ { v := s.css[i] _, err := s.db.connIfFree(v.dc) if err == nil { s.mu.Unlock() return v.dc, v.dc.releaseConn, v.si, nil } if err == errConnClosed { // Lazily remove dead conn from our freelist. s.css[i] = s.css[len(s.css)-1] s.css = s.css[:len(s.css)-1] i-- } } s.mu.Unlock() // If all connections are busy, either wait for one to become available (if // we've already hit the maximum number of open connections) or create a // new one. // // TODO(bradfitz): or always wait for one? make configurable later? dc, err := s.db.conn() if err != nil { return nil, nil, nil, err } // Do another pass over the list to see whether this statement has // already been prepared on the connection assigned to us. s.mu.Lock() for _, v := range s.css { if v.dc == dc { s.mu.Unlock() return dc, dc.releaseConn, v.si, nil } } s.mu.Unlock() // No luck; we need to prepare the statement on this connection dc.Lock() si, err = dc.prepareLocked(s.query) dc.Unlock() if err != nil { s.db.putConn(dc, err) return nil, nil, nil, err } s.mu.Lock() cs := connStmt{dc, si} s.css = append(s.css, cs) s.mu.Unlock() return dc, dc.releaseConn, si, nil } // Query executes a prepared query statement with the given arguments // and returns the query results as a *Rows. func (s *Stmt) Query(args ...interface{}) (*Rows, error) { s.closemu.RLock() defer s.closemu.RUnlock() var rowsi driver.Rows for i := 0; i < maxBadConnRetries; i++ { dc, releaseConn, si, err := s.connStmt() if err != nil { if err == driver.ErrBadConn { continue } return nil, err } rowsi, err = rowsiFromStatement(driverStmt{dc, si}, args...) if err == nil { // Note: ownership of ci passes to the *Rows, to be freed // with releaseConn. rows := &Rows{ dc: dc, rowsi: rowsi, // releaseConn set below } s.db.addDep(s, rows) rows.releaseConn = func(err error) { releaseConn(err) s.db.removeDep(s, rows) } return rows, nil } releaseConn(err) if err != driver.ErrBadConn { return nil, err } } return nil, driver.ErrBadConn } func rowsiFromStatement(ds driverStmt, args ...interface{}) (driver.Rows, error) { ds.Lock() want := ds.si.NumInput() ds.Unlock() // -1 means the driver doesn't know how to count the number of // placeholders, so we won't sanity check input here and instead let the // driver deal with errors. if want != -1 && len(args) != want { return nil, fmt.Errorf("sql: statement expects %d inputs; got %d", want, len(args)) } dargs, err := driverArgs(&ds, args) if err != nil { return nil, err } ds.Lock() rowsi, err := ds.si.Query(dargs) ds.Unlock() if err != nil { return nil, err } return rowsi, nil } // QueryRow executes a prepared query statement with the given arguments. // If an error occurs during the execution of the statement, that error will // be returned by a call to Scan on the returned *Row, which is always non-nil. // If the query selects no rows, the *Row's Scan will return ErrNoRows. // Otherwise, the *Row's Scan scans the first selected row and discards // the rest. // // Example usage: // // var name string // err := nameByUseridStmt.QueryRow(id).Scan(&name) func (s *Stmt) QueryRow(args ...interface{}) *Row { rows, err := s.Query(args...) if err != nil { return &Row{err: err} } return &Row{rows: rows} } // Close closes the statement. func (s *Stmt) Close() error { s.closemu.Lock() defer s.closemu.Unlock() if s.stickyErr != nil { return s.stickyErr } s.mu.Lock() if s.closed { s.mu.Unlock() return nil } s.closed = true if s.tx != nil { s.txsi.Close() s.mu.Unlock() return nil } s.mu.Unlock() return s.db.removeDep(s, s) } func (s *Stmt) finalClose() error { s.mu.Lock() defer s.mu.Unlock() if s.css != nil { for _, v := range s.css { s.db.noteUnusedDriverStatement(v.dc, v.si) v.dc.removeOpenStmt(v.si) } s.css = nil } return nil } // Rows is the result of a query. Its cursor starts before the first row // of the result set. Use Next to advance through the rows: // // rows, err := db.Query("SELECT ...") // ... // defer rows.Close() // for rows.Next() { // var id int // var name string // err = rows.Scan(&id, &name) // ... // } // err = rows.Err() // get any error encountered during iteration // ... type Rows struct { dc *driverConn // owned; must call releaseConn when closed to release releaseConn func(error) rowsi driver.Rows closed bool lastcols []driver.Value lasterr error // non-nil only if closed is true closeStmt driver.Stmt // if non-nil, statement to Close on close } // Next prepares the next result row for reading with the Scan method. It // returns true on success, or false if there is no next result row or an error // happened while preparing it. Err should be consulted to distinguish between // the two cases. // // Every call to Scan, even the first one, must be preceded by a call to Next. func (rs *Rows) Next() bool { if rs.closed { return false } if rs.lastcols == nil { rs.lastcols = make([]driver.Value, len(rs.rowsi.Columns())) } rs.lasterr = rs.rowsi.Next(rs.lastcols) if rs.lasterr != nil { rs.Close() return false } return true } // Err returns the error, if any, that was encountered during iteration. // Err may be called after an explicit or implicit Close. func (rs *Rows) Err() error { if rs.lasterr == io.EOF { return nil } return rs.lasterr } // Columns returns the column names. // Columns returns an error if the rows are closed, or if the rows // are from QueryRow and there was a deferred error. func (rs *Rows) Columns() ([]string, error) { if rs.closed { return nil, errors.New("sql: Rows are closed") } if rs.rowsi == nil { return nil, errors.New("sql: no Rows available") } return rs.rowsi.Columns(), nil } // Scan copies the columns in the current row into the values pointed // at by dest. // // If an argument has type *[]byte, Scan saves in that argument a copy // of the corresponding data. The copy is owned by the caller and can // be modified and held indefinitely. The copy can be avoided by using // an argument of type *RawBytes instead; see the documentation for // RawBytes for restrictions on its use. // // If an argument has type *interface{}, Scan copies the value // provided by the underlying driver without conversion. If the value // is of type []byte, a copy is made and the caller owns the result. func (rs *Rows) Scan(dest ...interface{}) error { if rs.closed { return errors.New("sql: Rows are closed") } if rs.lastcols == nil { return errors.New("sql: Scan called without calling Next") } if len(dest) != len(rs.lastcols) { return fmt.Errorf("sql: expected %d destination arguments in Scan, not %d", len(rs.lastcols), len(dest)) } for i, sv := range rs.lastcols { err := convertAssign(dest[i], sv) if err != nil { return fmt.Errorf("sql: Scan error on column index %d: %v", i, err) } } return nil } var rowsCloseHook func(*Rows, *error) // Close closes the Rows, preventing further enumeration. If Next returns // false, the Rows are closed automatically and it will suffice to check the // result of Err. Close is idempotent and does not affect the result of Err. func (rs *Rows) Close() error { if rs.closed { return nil } rs.closed = true err := rs.rowsi.Close() if fn := rowsCloseHook; fn != nil { fn(rs, &err) } if rs.closeStmt != nil { rs.closeStmt.Close() } rs.releaseConn(err) return err } // Row is the result of calling QueryRow to select a single row. type Row struct { // One of these two will be non-nil: err error // deferred error for easy chaining rows *Rows } // Scan copies the columns from the matched row into the values // pointed at by dest. If more than one row matches the query, // Scan uses the first row and discards the rest. If no row matches // the query, Scan returns ErrNoRows. func (r *Row) Scan(dest ...interface{}) error { if r.err != nil { return r.err } // TODO(bradfitz): for now we need to defensively clone all // []byte that the driver returned (not permitting // *RawBytes in Rows.Scan), since we're about to close // the Rows in our defer, when we return from this function. // the contract with the driver.Next(...) interface is that it // can return slices into read-only temporary memory that's // only valid until the next Scan/Close. But the TODO is that // for a lot of drivers, this copy will be unnecessary. We // should provide an optional interface for drivers to // implement to say, "don't worry, the []bytes that I return // from Next will not be modified again." (for instance, if // they were obtained from the network anyway) But for now we // don't care. defer r.rows.Close() for _, dp := range dest { if _, ok := dp.(*RawBytes); ok { return errors.New("sql: RawBytes isn't allowed on Row.Scan") } } if !r.rows.Next() { if err := r.rows.Err(); err != nil { return err } return ErrNoRows } err := r.rows.Scan(dest...) if err != nil { return err } // Make sure the query can be processed to completion with no errors. if err := r.rows.Close(); err != nil { return err } return nil } // A Result summarizes an executed SQL command. type Result interface { // LastInsertId returns the integer generated by the database // in response to a command. Typically this will be from an // "auto increment" column when inserting a new row. Not all // databases support this feature, and the syntax of such // statements varies. LastInsertId() (int64, error) // RowsAffected returns the number of rows affected by an // update, insert, or delete. Not every database or database // driver may support this. RowsAffected() (int64, error) } type driverResult struct { sync.Locker // the *driverConn resi driver.Result } func (dr driverResult) LastInsertId() (int64, error) { dr.Lock() defer dr.Unlock() return dr.resi.LastInsertId() } func (dr driverResult) RowsAffected() (int64, error) { dr.Lock() defer dr.Unlock() return dr.resi.RowsAffected() } func stack() string { var buf [2 << 10]byte return string(buf[:runtime.Stack(buf[:], false)]) } // withLock runs while holding lk. func withLock(lk sync.Locker, fn func()) { lk.Lock() fn() lk.Unlock() }
{ "pile_set_name": "Github" }
{ "id": "white-wet-suit", "name": "White Wet Suit", "category": "Wetsuits", "games": { "nl": { "orderable": false, "sources": [ "Island Gift Shop (Club Tortimer)" ], "buyPrices": [ { "currency": "bells", "value": 40 } ] } } }
{ "pile_set_name": "Github" }
--TEST-- Testing $argc and $argv handling (cli) --SKIPIF-- <?php if(php_sapi_name()!='cli') echo 'skip'; ?> --INI-- register_argc_argv=1 variables_order=GPS --ARGS-- ab cd ef 123 test --FILE-- <?php $argc = $_SERVER['argc']; $argv = $_SERVER['argv']; for ($i=1; $i<$argc; $i++) { echo ($i-1).": ".$argv[$i]."\n"; } ?> --EXPECT-- 0: ab 1: cd 2: ef 3: 123 4: test
{ "pile_set_name": "Github" }
import { Timeline } from './Timeline'; import { TimelineChunks } from './TimelineChunks'; import { Vector3 } from 'three'; const TranslationSegment = { compiler: function(segment) { return ` ${TimelineChunks.delayDuration(segment)} ${TimelineChunks.vec3(`cTranslateFrom${segment.key}`, segment.transition.from, 2)} ${TimelineChunks.vec3(`cTranslateTo${segment.key}`, segment.transition.to, 2)} void applyTransform${segment.key}(float time, inout vec3 v) { ${TimelineChunks.renderCheck(segment)} ${TimelineChunks.progress(segment)} v += mix(cTranslateFrom${segment.key}, cTranslateTo${segment.key}, progress); } `; }, defaultFrom: new Vector3(0, 0, 0) }; Timeline.register('translate', TranslationSegment); export { TranslationSegment };
{ "pile_set_name": "Github" }
/* t_spki.c */ /* * Written by Dr Stephen N Henson (steve@openssl.org) for the OpenSSL project * 1999. */ /* ==================================================================== * Copyright (c) 1999 The OpenSSL Project. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * 3. All advertising materials mentioning features or use of this * software must display the following acknowledgment: * "This product includes software developed by the OpenSSL Project * for use in the OpenSSL Toolkit. (http://www.OpenSSL.org/)" * * 4. The names "OpenSSL Toolkit" and "OpenSSL Project" must not be used to * endorse or promote products derived from this software without * prior written permission. For written permission, please contact * licensing@OpenSSL.org. * * 5. Products derived from this software may not be called "OpenSSL" * nor may "OpenSSL" appear in their names without prior written * permission of the OpenSSL Project. * * 6. Redistributions of any form whatsoever must retain the following * acknowledgment: * "This product includes software developed by the OpenSSL Project * for use in the OpenSSL Toolkit (http://www.OpenSSL.org/)" * * THIS SOFTWARE IS PROVIDED BY THE OpenSSL PROJECT ``AS IS'' AND ANY * EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE OpenSSL PROJECT OR * ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED * OF THE POSSIBILITY OF SUCH DAMAGE. * ==================================================================== * * This product includes cryptographic software written by Eric Young * (eay@cryptsoft.com). This product includes software written by Tim * Hudson (tjh@cryptsoft.com). * */ #include <stdio.h> #include "cryptlib.h" #include <openssl/x509.h> #include <openssl/asn1.h> #ifndef OPENSSL_NO_RSA # include <openssl/rsa.h> #endif #ifndef OPENSSL_NO_DSA # include <openssl/dsa.h> #endif #include <openssl/bn.h> /* Print out an SPKI */ int NETSCAPE_SPKI_print(BIO *out, NETSCAPE_SPKI *spki) { EVP_PKEY *pkey; ASN1_IA5STRING *chal; int i, n; char *s; BIO_printf(out, "Netscape SPKI:\n"); i = OBJ_obj2nid(spki->spkac->pubkey->algor->algorithm); BIO_printf(out, " Public Key Algorithm: %s\n", (i == NID_undef) ? "UNKNOWN" : OBJ_nid2ln(i)); pkey = X509_PUBKEY_get(spki->spkac->pubkey); if (!pkey) BIO_printf(out, " Unable to load public key\n"); else { EVP_PKEY_print_public(out, pkey, 4, NULL); EVP_PKEY_free(pkey); } chal = spki->spkac->challenge; if (chal->length) BIO_printf(out, " Challenge String: %s\n", chal->data); i = OBJ_obj2nid(spki->sig_algor->algorithm); BIO_printf(out, " Signature Algorithm: %s", (i == NID_undef) ? "UNKNOWN" : OBJ_nid2ln(i)); n = spki->signature->length; s = (char *)spki->signature->data; for (i = 0; i < n; i++) { if ((i % 18) == 0) BIO_write(out, "\n ", 7); BIO_printf(out, "%02x%s", (unsigned char)s[i], ((i + 1) == n) ? "" : ":"); } BIO_write(out, "\n", 1); return 1; }
{ "pile_set_name": "Github" }
All suggestions or complaints should be sent to xaizek@posteo.net. Current author is xaizek <xaizek@posteo.net> (since 2011). Original author is Ken Steen <ksteen@users.sourceforge.net> (2001 - 2011). The pauseme script (removed in 0.6.3) was from the tkdesk program. Some of the code is from emelfm by Michael Clark. The screen-open-region-with-program and screen-ren-program-in-region were written by Miciah Dashiel Butler Masters. The file sorting function is from the git program by Tudor Hulubei and Andrei Pitis. A patch for filetypes detection was written by Stefan Walter. FreeBSD and NetBSD ports by Stefan Walter. Debian package by Edelhard Becker. Terry Brown wrote part of the keybinding code. Gentoo package by Peter Johanson. Ralf Arens added the configurable color code and rewrote the bookmarks code. Dmitry Suzdalev fixed the :!<Enter> bug. MacOSX Fink package by Damien Ferrand Karsten Schoelzel - submitted a patch to fix the handling of files with a % in the name. Ali Gholami Rudi - submitted patch fixing buffer overflow in colorschemes. Ondrej Martinak - Patch to fix handling of directories with spaces. Patch for file sizes larger than 4 Gb and to correct typos. pld-linux group patch for malloc in colorschemes Woody Kiang - command completion and wide character support in :commands. Minozake - start vifm --help fix Dan Price - wrote the human friendly file size and the ascending and descending sort. from fork of wsdookadr (Petrea Stefan Corneliu): - %b - unmount all FUSE mounts on exit - functional keys in user mappings - idea of temporary blocking inotify events from very active files seatest is written by Keith Nicholas Patches for Vim plugin and handling of errors from external applications without temporary files by Colin Cartade. Patch that fixes using of invalid (or at least strange) item names in the stat structure by Kaspars Bankovskis. Chris Yuen provided a patch to fix compilation on Mac OS X. Chris Skalenda wrote a patch to allow setting the trash directory location. Miles (pkordy) provided a patch to fix opening of tagged (selected) symbolic links to a directory. Gentoo ebuild by Oleg Gordienko (gordio). Seth VanHeulen (a.k.a. svanheulen) added integration with tmux. MadMaverick9 provided patches for various fixes. lyuts fixed trash emptying in csh-like shells. Patches for Vim plugin by Jonathan Da Silva. More human-friendly (xterm-like) names for 256 colors by Michael Vetter (a.k.a. jubalh). Some build system fixes/updates/best practices by Hendrik Jaeger (a.k.a. henk). Shell completions and patches for Vim plugin, vifm-screen-split and documentation by filterfalse. Christian Fillion (a.k.a. cfillion) fixed crash on navigation to end of line in command-line mode. %FOREGROUND FUSE mount flag by Johannes (a.k.a. johannesmeng). Brian Callahan (a.k.a. ibara) fixed build on OpenBSD. Cosmin Popescu (a.k.a. cosminadrianpopescu) added "*" to 'slowfs', match numbers to search messages, :shell!, extension sorting fix, fileext sorting key, dynamic view column alignment. Michel Normand fixed compilation for PowerPC where PAGE_SIZE isn't defined. Dennis Hamester fixed interaction with emacsclient due to reopening terminal as /dev/tty. Svyatoslav Mishyn (a.k.a. juef) aligned options in conflict resolution dialog. oo- added shortcuts to dialogs. Dmitry Frank (a.k.a. dimonomid) provided multiple patches. John Shea (a.k.a. coachshea) provided fix for the plugin related to neovim. Daniel Mueller fixed CWD of the process not matching current view after vifm picked up change in file system and on startup. Ma_Sys.ma implemented 'syncregs' option which defines group of instances that share registers. Konst Mayer (a.k.a. cdlscpmv) dropped leading space from value of 'timefmt'. rbong authored a number of changes that extend functionality of the Vim plugin originally as a separate plugin for neovim, later merged into the main one. zsugabubus provided multiple patches. Von Welch submitted vifm-media-osx script. Richard Benson provided icon for the project. Roman Plášil (a.k.a. Quiark) authored script for converting Vim's color scheme into a Vifm's one. Bas Bossink (a.k.a. basbossink) fixed issues with non-tiny 'shellcmdflag' on Windows. Stephen Horst (a.k.a. sjhorst) fixed adding files with spaces in their names as e-mail attachments in the plugin. Hans Bieshaar added HardLink highlight group. Alborz Jafari added rate and progress bar to progress dialog.
{ "pile_set_name": "Github" }
/* This file is part of TON Blockchain Library. TON Blockchain Library is free software: you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation, either version 2 of the License, or (at your option) any later version. TON Blockchain Library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with TON Blockchain Library. If not, see <http://www.gnu.org/licenses/>. Copyright 2017-2020 Telegram Systems LLP */ #include "td/utils/port/PollFlags.h" namespace td { bool PollFlagsSet::write_flags(PollFlags flags) { if (flags.empty()) { return false; } auto old_flags = to_write_.fetch_or(flags.raw(), std::memory_order_relaxed); return (flags.raw() & ~old_flags) != 0; } bool PollFlagsSet::write_flags_local(PollFlags flags) { return flags_.add_flags(flags); } bool PollFlagsSet::flush() const { if (to_write_.load(std::memory_order_relaxed) == 0) { return false; } auto to_write = to_write_.exchange(0, std::memory_order_relaxed); auto old_flags = flags_; flags_.add_flags(PollFlags::from_raw(to_write)); if (flags_.can_close()) { flags_.remove_flags(PollFlags::Write()); } return flags_ != old_flags; } PollFlags PollFlagsSet::read_flags() const { flush(); return flags_; } PollFlags PollFlagsSet::read_flags_local() const { return flags_; } void PollFlagsSet::clear_flags(PollFlags flags) { flags_.remove_flags(flags); } void PollFlagsSet::clear() { to_write_ = 0; flags_ = {}; } StringBuilder &operator<<(StringBuilder &sb, PollFlags flags) { sb << '['; if (flags.can_read()) { sb << 'R'; } if (flags.can_write()) { sb << 'W'; } if (flags.can_close()) { sb << 'C'; } if (flags.has_pending_error()) { sb << 'E'; } return sb << ']'; } } // namespace td
{ "pile_set_name": "Github" }
/* amdgpu_drm.h -- Public header for the amdgpu driver -*- linux-c -*- * * Copyright 2000 Precision Insight, Inc., Cedar Park, Texas. * Copyright 2000 VA Linux Systems, Inc., Fremont, California. * Copyright 2002 Tungsten Graphics, Inc., Cedar Park, Texas. * Copyright 2014 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: * Kevin E. Martin <martin@valinux.com> * Gareth Hughes <gareth@valinux.com> * Keith Whitwell <keith@tungstengraphics.com> */ #ifndef __AMDGPU_DRM_H__ #define __AMDGPU_DRM_H__ #include "drm.h" #if defined(__cplusplus) extern "C" { #endif #define DRM_AMDGPU_GEM_CREATE 0x00 #define DRM_AMDGPU_GEM_MMAP 0x01 #define DRM_AMDGPU_CTX 0x02 #define DRM_AMDGPU_BO_LIST 0x03 #define DRM_AMDGPU_CS 0x04 #define DRM_AMDGPU_INFO 0x05 #define DRM_AMDGPU_GEM_METADATA 0x06 #define DRM_AMDGPU_GEM_WAIT_IDLE 0x07 #define DRM_AMDGPU_GEM_VA 0x08 #define DRM_AMDGPU_WAIT_CS 0x09 #define DRM_AMDGPU_GEM_OP 0x10 #define DRM_AMDGPU_GEM_USERPTR 0x11 #define DRM_AMDGPU_WAIT_FENCES 0x12 #define DRM_AMDGPU_VM 0x13 #define DRM_AMDGPU_FENCE_TO_HANDLE 0x14 #define DRM_AMDGPU_SCHED 0x15 #define DRM_IOCTL_AMDGPU_GEM_CREATE DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_GEM_CREATE, union drm_amdgpu_gem_create) #define DRM_IOCTL_AMDGPU_GEM_MMAP DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_GEM_MMAP, union drm_amdgpu_gem_mmap) #define DRM_IOCTL_AMDGPU_CTX DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_CTX, union drm_amdgpu_ctx) #define DRM_IOCTL_AMDGPU_BO_LIST DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_BO_LIST, union drm_amdgpu_bo_list) #define DRM_IOCTL_AMDGPU_CS DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_CS, union drm_amdgpu_cs) #define DRM_IOCTL_AMDGPU_INFO DRM_IOW(DRM_COMMAND_BASE + DRM_AMDGPU_INFO, struct drm_amdgpu_info) #define DRM_IOCTL_AMDGPU_GEM_METADATA DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_GEM_METADATA, struct drm_amdgpu_gem_metadata) #define DRM_IOCTL_AMDGPU_GEM_WAIT_IDLE DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_GEM_WAIT_IDLE, union drm_amdgpu_gem_wait_idle) #define DRM_IOCTL_AMDGPU_GEM_VA DRM_IOW(DRM_COMMAND_BASE + DRM_AMDGPU_GEM_VA, struct drm_amdgpu_gem_va) #define DRM_IOCTL_AMDGPU_WAIT_CS DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_WAIT_CS, union drm_amdgpu_wait_cs) #define DRM_IOCTL_AMDGPU_GEM_OP DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_GEM_OP, struct drm_amdgpu_gem_op) #define DRM_IOCTL_AMDGPU_GEM_USERPTR DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_GEM_USERPTR, struct drm_amdgpu_gem_userptr) #define DRM_IOCTL_AMDGPU_WAIT_FENCES DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_WAIT_FENCES, union drm_amdgpu_wait_fences) #define DRM_IOCTL_AMDGPU_VM DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_VM, union drm_amdgpu_vm) #define DRM_IOCTL_AMDGPU_FENCE_TO_HANDLE DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_FENCE_TO_HANDLE, union drm_amdgpu_fence_to_handle) #define DRM_IOCTL_AMDGPU_SCHED DRM_IOW(DRM_COMMAND_BASE + DRM_AMDGPU_SCHED, union drm_amdgpu_sched) #define AMDGPU_GEM_DOMAIN_CPU 0x1 #define AMDGPU_GEM_DOMAIN_GTT 0x2 #define AMDGPU_GEM_DOMAIN_VRAM 0x4 #define AMDGPU_GEM_DOMAIN_GDS 0x8 #define AMDGPU_GEM_DOMAIN_GWS 0x10 #define AMDGPU_GEM_DOMAIN_OA 0x20 /* Flag that CPU access will be required for the case of VRAM domain */ #define AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED (1 << 0) /* Flag that CPU access will not work, this VRAM domain is invisible */ #define AMDGPU_GEM_CREATE_NO_CPU_ACCESS (1 << 1) /* Flag that USWC attributes should be used for GTT */ #define AMDGPU_GEM_CREATE_CPU_GTT_USWC (1 << 2) /* Flag that the memory should be in VRAM and cleared */ #define AMDGPU_GEM_CREATE_VRAM_CLEARED (1 << 3) /* Flag that create shadow bo(GTT) while allocating vram bo */ #define AMDGPU_GEM_CREATE_SHADOW (1 << 4) /* Flag that allocating the BO should use linear VRAM */ #define AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS (1 << 5) /* Flag that BO is always valid in this VM */ #define AMDGPU_GEM_CREATE_VM_ALWAYS_VALID (1 << 6) /* Flag that BO sharing will be explicitly synchronized */ #define AMDGPU_GEM_CREATE_EXPLICIT_SYNC (1 << 7) struct drm_amdgpu_gem_create_in { /** the requested memory size */ __u64 bo_size; /** physical start_addr alignment in bytes for some HW requirements */ __u64 alignment; /** the requested memory domains */ __u64 domains; /** allocation flags */ __u64 domain_flags; }; struct drm_amdgpu_gem_create_out { /** returned GEM object handle */ __u32 handle; __u32 _pad; }; union drm_amdgpu_gem_create { struct drm_amdgpu_gem_create_in in; struct drm_amdgpu_gem_create_out out; }; /** Opcode to create new residency list. */ #define AMDGPU_BO_LIST_OP_CREATE 0 /** Opcode to destroy previously created residency list */ #define AMDGPU_BO_LIST_OP_DESTROY 1 /** Opcode to update resource information in the list */ #define AMDGPU_BO_LIST_OP_UPDATE 2 struct drm_amdgpu_bo_list_in { /** Type of operation */ __u32 operation; /** Handle of list or 0 if we want to create one */ __u32 list_handle; /** Number of BOs in list */ __u32 bo_number; /** Size of each element describing BO */ __u32 bo_info_size; /** Pointer to array describing BOs */ __u64 bo_info_ptr; }; struct drm_amdgpu_bo_list_entry { /** Handle of BO */ __u32 bo_handle; /** New (if specified) BO priority to be used during migration */ __u32 bo_priority; }; struct drm_amdgpu_bo_list_out { /** Handle of resource list */ __u32 list_handle; __u32 _pad; }; union drm_amdgpu_bo_list { struct drm_amdgpu_bo_list_in in; struct drm_amdgpu_bo_list_out out; }; /* context related */ #define AMDGPU_CTX_OP_ALLOC_CTX 1 #define AMDGPU_CTX_OP_FREE_CTX 2 #define AMDGPU_CTX_OP_QUERY_STATE 3 /* GPU reset status */ #define AMDGPU_CTX_NO_RESET 0 /* this the context caused it */ #define AMDGPU_CTX_GUILTY_RESET 1 /* some other context caused it */ #define AMDGPU_CTX_INNOCENT_RESET 2 /* unknown cause */ #define AMDGPU_CTX_UNKNOWN_RESET 3 /* Context priority level */ #define AMDGPU_CTX_PRIORITY_UNSET -2048 #define AMDGPU_CTX_PRIORITY_VERY_LOW -1023 #define AMDGPU_CTX_PRIORITY_LOW -512 #define AMDGPU_CTX_PRIORITY_NORMAL 0 /* Selecting a priority above NORMAL requires CAP_SYS_NICE or DRM_MASTER */ #define AMDGPU_CTX_PRIORITY_HIGH 512 #define AMDGPU_CTX_PRIORITY_VERY_HIGH 1023 struct drm_amdgpu_ctx_in { /** AMDGPU_CTX_OP_* */ __u32 op; /** For future use, no flags defined so far */ __u32 flags; __u32 ctx_id; __s32 priority; }; union drm_amdgpu_ctx_out { struct { __u32 ctx_id; __u32 _pad; } alloc; struct { /** For future use, no flags defined so far */ __u64 flags; /** Number of resets caused by this context so far. */ __u32 hangs; /** Reset status since the last call of the ioctl. */ __u32 reset_status; } state; }; union drm_amdgpu_ctx { struct drm_amdgpu_ctx_in in; union drm_amdgpu_ctx_out out; }; /* vm ioctl */ #define AMDGPU_VM_OP_RESERVE_VMID 1 #define AMDGPU_VM_OP_UNRESERVE_VMID 2 struct drm_amdgpu_vm_in { /** AMDGPU_VM_OP_* */ __u32 op; __u32 flags; }; struct drm_amdgpu_vm_out { /** For future use, no flags defined so far */ __u64 flags; }; union drm_amdgpu_vm { struct drm_amdgpu_vm_in in; struct drm_amdgpu_vm_out out; }; /* sched ioctl */ #define AMDGPU_SCHED_OP_PROCESS_PRIORITY_OVERRIDE 1 struct drm_amdgpu_sched_in { /* AMDGPU_SCHED_OP_* */ __u32 op; __u32 fd; __s32 priority; __u32 flags; }; union drm_amdgpu_sched { struct drm_amdgpu_sched_in in; }; /* * This is not a reliable API and you should expect it to fail for any * number of reasons and have fallback path that do not use userptr to * perform any operation. */ #define AMDGPU_GEM_USERPTR_READONLY (1 << 0) #define AMDGPU_GEM_USERPTR_ANONONLY (1 << 1) #define AMDGPU_GEM_USERPTR_VALIDATE (1 << 2) #define AMDGPU_GEM_USERPTR_REGISTER (1 << 3) struct drm_amdgpu_gem_userptr { __u64 addr; __u64 size; /* AMDGPU_GEM_USERPTR_* */ __u32 flags; /* Resulting GEM handle */ __u32 handle; }; /* SI-CI-VI: */ /* same meaning as the GB_TILE_MODE and GL_MACRO_TILE_MODE fields */ #define AMDGPU_TILING_ARRAY_MODE_SHIFT 0 #define AMDGPU_TILING_ARRAY_MODE_MASK 0xf #define AMDGPU_TILING_PIPE_CONFIG_SHIFT 4 #define AMDGPU_TILING_PIPE_CONFIG_MASK 0x1f #define AMDGPU_TILING_TILE_SPLIT_SHIFT 9 #define AMDGPU_TILING_TILE_SPLIT_MASK 0x7 #define AMDGPU_TILING_MICRO_TILE_MODE_SHIFT 12 #define AMDGPU_TILING_MICRO_TILE_MODE_MASK 0x7 #define AMDGPU_TILING_BANK_WIDTH_SHIFT 15 #define AMDGPU_TILING_BANK_WIDTH_MASK 0x3 #define AMDGPU_TILING_BANK_HEIGHT_SHIFT 17 #define AMDGPU_TILING_BANK_HEIGHT_MASK 0x3 #define AMDGPU_TILING_MACRO_TILE_ASPECT_SHIFT 19 #define AMDGPU_TILING_MACRO_TILE_ASPECT_MASK 0x3 #define AMDGPU_TILING_NUM_BANKS_SHIFT 21 #define AMDGPU_TILING_NUM_BANKS_MASK 0x3 /* GFX9 and later: */ #define AMDGPU_TILING_SWIZZLE_MODE_SHIFT 0 #define AMDGPU_TILING_SWIZZLE_MODE_MASK 0x1f /* Set/Get helpers for tiling flags. */ #define AMDGPU_TILING_SET(field, value) \ (((__u64)(value) & AMDGPU_TILING_##field##_MASK) << AMDGPU_TILING_##field##_SHIFT) #define AMDGPU_TILING_GET(value, field) \ (((__u64)(value) >> AMDGPU_TILING_##field##_SHIFT) & AMDGPU_TILING_##field##_MASK) #define AMDGPU_GEM_METADATA_OP_SET_METADATA 1 #define AMDGPU_GEM_METADATA_OP_GET_METADATA 2 /** The same structure is shared for input/output */ struct drm_amdgpu_gem_metadata { /** GEM Object handle */ __u32 handle; /** Do we want get or set metadata */ __u32 op; struct { /** For future use, no flags defined so far */ __u64 flags; /** family specific tiling info */ __u64 tiling_info; __u32 data_size_bytes; __u32 data[64]; } data; }; struct drm_amdgpu_gem_mmap_in { /** the GEM object handle */ __u32 handle; __u32 _pad; }; struct drm_amdgpu_gem_mmap_out { /** mmap offset from the vma offset manager */ __u64 addr_ptr; }; union drm_amdgpu_gem_mmap { struct drm_amdgpu_gem_mmap_in in; struct drm_amdgpu_gem_mmap_out out; }; struct drm_amdgpu_gem_wait_idle_in { /** GEM object handle */ __u32 handle; /** For future use, no flags defined so far */ __u32 flags; /** Absolute timeout to wait */ __u64 timeout; }; struct drm_amdgpu_gem_wait_idle_out { /** BO status: 0 - BO is idle, 1 - BO is busy */ __u32 status; /** Returned current memory domain */ __u32 domain; }; union drm_amdgpu_gem_wait_idle { struct drm_amdgpu_gem_wait_idle_in in; struct drm_amdgpu_gem_wait_idle_out out; }; struct drm_amdgpu_wait_cs_in { /* Command submission handle * handle equals 0 means none to wait for * handle equals ~0ull means wait for the latest sequence number */ __u64 handle; /** Absolute timeout to wait */ __u64 timeout; __u32 ip_type; __u32 ip_instance; __u32 ring; __u32 ctx_id; }; struct drm_amdgpu_wait_cs_out { /** CS status: 0 - CS completed, 1 - CS still busy */ __u64 status; }; union drm_amdgpu_wait_cs { struct drm_amdgpu_wait_cs_in in; struct drm_amdgpu_wait_cs_out out; }; struct drm_amdgpu_fence { __u32 ctx_id; __u32 ip_type; __u32 ip_instance; __u32 ring; __u64 seq_no; }; struct drm_amdgpu_wait_fences_in { /** This points to uint64_t * which points to fences */ __u64 fences; __u32 fence_count; __u32 wait_all; __u64 timeout_ns; }; struct drm_amdgpu_wait_fences_out { __u32 status; __u32 first_signaled; }; union drm_amdgpu_wait_fences { struct drm_amdgpu_wait_fences_in in; struct drm_amdgpu_wait_fences_out out; }; #define AMDGPU_GEM_OP_GET_GEM_CREATE_INFO 0 #define AMDGPU_GEM_OP_SET_PLACEMENT 1 /* Sets or returns a value associated with a buffer. */ struct drm_amdgpu_gem_op { /** GEM object handle */ __u32 handle; /** AMDGPU_GEM_OP_* */ __u32 op; /** Input or return value */ __u64 value; }; #define AMDGPU_VA_OP_MAP 1 #define AMDGPU_VA_OP_UNMAP 2 #define AMDGPU_VA_OP_CLEAR 3 #define AMDGPU_VA_OP_REPLACE 4 /* Delay the page table update till the next CS */ #define AMDGPU_VM_DELAY_UPDATE (1 << 0) /* Mapping flags */ /* readable mapping */ #define AMDGPU_VM_PAGE_READABLE (1 << 1) /* writable mapping */ #define AMDGPU_VM_PAGE_WRITEABLE (1 << 2) /* executable mapping, new for VI */ #define AMDGPU_VM_PAGE_EXECUTABLE (1 << 3) /* partially resident texture */ #define AMDGPU_VM_PAGE_PRT (1 << 4) /* MTYPE flags use bit 5 to 8 */ #define AMDGPU_VM_MTYPE_MASK (0xf << 5) /* Default MTYPE. Pre-AI must use this. Recommended for newer ASICs. */ #define AMDGPU_VM_MTYPE_DEFAULT (0 << 5) /* Use NC MTYPE instead of default MTYPE */ #define AMDGPU_VM_MTYPE_NC (1 << 5) /* Use WC MTYPE instead of default MTYPE */ #define AMDGPU_VM_MTYPE_WC (2 << 5) /* Use CC MTYPE instead of default MTYPE */ #define AMDGPU_VM_MTYPE_CC (3 << 5) /* Use UC MTYPE instead of default MTYPE */ #define AMDGPU_VM_MTYPE_UC (4 << 5) struct drm_amdgpu_gem_va { /** GEM object handle */ __u32 handle; __u32 _pad; /** AMDGPU_VA_OP_* */ __u32 operation; /** AMDGPU_VM_PAGE_* */ __u32 flags; /** va address to assign . Must be correctly aligned.*/ __u64 va_address; /** Specify offset inside of BO to assign. Must be correctly aligned.*/ __u64 offset_in_bo; /** Specify mapping size. Must be correctly aligned. */ __u64 map_size; }; #define AMDGPU_HW_IP_GFX 0 #define AMDGPU_HW_IP_COMPUTE 1 #define AMDGPU_HW_IP_DMA 2 #define AMDGPU_HW_IP_UVD 3 #define AMDGPU_HW_IP_VCE 4 #define AMDGPU_HW_IP_UVD_ENC 5 #define AMDGPU_HW_IP_VCN_DEC 6 #define AMDGPU_HW_IP_VCN_ENC 7 #define AMDGPU_HW_IP_NUM 8 #define AMDGPU_HW_IP_INSTANCE_MAX_COUNT 1 #define AMDGPU_CHUNK_ID_IB 0x01 #define AMDGPU_CHUNK_ID_FENCE 0x02 #define AMDGPU_CHUNK_ID_DEPENDENCIES 0x03 #define AMDGPU_CHUNK_ID_SYNCOBJ_IN 0x04 #define AMDGPU_CHUNK_ID_SYNCOBJ_OUT 0x05 struct drm_amdgpu_cs_chunk { __u32 chunk_id; __u32 length_dw; __u64 chunk_data; }; struct drm_amdgpu_cs_in { /** Rendering context id */ __u32 ctx_id; /** Handle of resource list associated with CS */ __u32 bo_list_handle; __u32 num_chunks; __u32 _pad; /** this points to __u64 * which point to cs chunks */ __u64 chunks; }; struct drm_amdgpu_cs_out { __u64 handle; }; union drm_amdgpu_cs { struct drm_amdgpu_cs_in in; struct drm_amdgpu_cs_out out; }; /* Specify flags to be used for IB */ /* This IB should be submitted to CE */ #define AMDGPU_IB_FLAG_CE (1<<0) /* Preamble flag, which means the IB could be dropped if no context switch */ #define AMDGPU_IB_FLAG_PREAMBLE (1<<1) /* Preempt flag, IB should set Pre_enb bit if PREEMPT flag detected */ #define AMDGPU_IB_FLAG_PREEMPT (1<<2) struct drm_amdgpu_cs_chunk_ib { __u32 _pad; /** AMDGPU_IB_FLAG_* */ __u32 flags; /** Virtual address to begin IB execution */ __u64 va_start; /** Size of submission */ __u32 ib_bytes; /** HW IP to submit to */ __u32 ip_type; /** HW IP index of the same type to submit to */ __u32 ip_instance; /** Ring index to submit to */ __u32 ring; }; struct drm_amdgpu_cs_chunk_dep { __u32 ip_type; __u32 ip_instance; __u32 ring; __u32 ctx_id; __u64 handle; }; struct drm_amdgpu_cs_chunk_fence { __u32 handle; __u32 offset; }; struct drm_amdgpu_cs_chunk_sem { __u32 handle; }; #define AMDGPU_FENCE_TO_HANDLE_GET_SYNCOBJ 0 #define AMDGPU_FENCE_TO_HANDLE_GET_SYNCOBJ_FD 1 #define AMDGPU_FENCE_TO_HANDLE_GET_SYNC_FILE_FD 2 union drm_amdgpu_fence_to_handle { struct { struct drm_amdgpu_fence fence; __u32 what; __u32 pad; } in; struct { __u32 handle; } out; }; struct drm_amdgpu_cs_chunk_data { union { struct drm_amdgpu_cs_chunk_ib ib_data; struct drm_amdgpu_cs_chunk_fence fence_data; }; }; /** * Query h/w info: Flag that this is integrated (a.h.a. fusion) GPU * */ #define AMDGPU_IDS_FLAGS_FUSION 0x1 #define AMDGPU_IDS_FLAGS_PREEMPTION 0x2 /* indicate if acceleration can be working */ #define AMDGPU_INFO_ACCEL_WORKING 0x00 /* get the crtc_id from the mode object id? */ #define AMDGPU_INFO_CRTC_FROM_ID 0x01 /* query hw IP info */ #define AMDGPU_INFO_HW_IP_INFO 0x02 /* query hw IP instance count for the specified type */ #define AMDGPU_INFO_HW_IP_COUNT 0x03 /* timestamp for GL_ARB_timer_query */ #define AMDGPU_INFO_TIMESTAMP 0x05 /* Query the firmware version */ #define AMDGPU_INFO_FW_VERSION 0x0e /* Subquery id: Query VCE firmware version */ #define AMDGPU_INFO_FW_VCE 0x1 /* Subquery id: Query UVD firmware version */ #define AMDGPU_INFO_FW_UVD 0x2 /* Subquery id: Query GMC firmware version */ #define AMDGPU_INFO_FW_GMC 0x03 /* Subquery id: Query GFX ME firmware version */ #define AMDGPU_INFO_FW_GFX_ME 0x04 /* Subquery id: Query GFX PFP firmware version */ #define AMDGPU_INFO_FW_GFX_PFP 0x05 /* Subquery id: Query GFX CE firmware version */ #define AMDGPU_INFO_FW_GFX_CE 0x06 /* Subquery id: Query GFX RLC firmware version */ #define AMDGPU_INFO_FW_GFX_RLC 0x07 /* Subquery id: Query GFX MEC firmware version */ #define AMDGPU_INFO_FW_GFX_MEC 0x08 /* Subquery id: Query SMC firmware version */ #define AMDGPU_INFO_FW_SMC 0x0a /* Subquery id: Query SDMA firmware version */ #define AMDGPU_INFO_FW_SDMA 0x0b /* Subquery id: Query PSP SOS firmware version */ #define AMDGPU_INFO_FW_SOS 0x0c /* Subquery id: Query PSP ASD firmware version */ #define AMDGPU_INFO_FW_ASD 0x0d /* number of bytes moved for TTM migration */ #define AMDGPU_INFO_NUM_BYTES_MOVED 0x0f /* the used VRAM size */ #define AMDGPU_INFO_VRAM_USAGE 0x10 /* the used GTT size */ #define AMDGPU_INFO_GTT_USAGE 0x11 /* Information about GDS, etc. resource configuration */ #define AMDGPU_INFO_GDS_CONFIG 0x13 /* Query information about VRAM and GTT domains */ #define AMDGPU_INFO_VRAM_GTT 0x14 /* Query information about register in MMR address space*/ #define AMDGPU_INFO_READ_MMR_REG 0x15 /* Query information about device: rev id, family, etc. */ #define AMDGPU_INFO_DEV_INFO 0x16 /* visible vram usage */ #define AMDGPU_INFO_VIS_VRAM_USAGE 0x17 /* number of TTM buffer evictions */ #define AMDGPU_INFO_NUM_EVICTIONS 0x18 /* Query memory about VRAM and GTT domains */ #define AMDGPU_INFO_MEMORY 0x19 /* Query vce clock table */ #define AMDGPU_INFO_VCE_CLOCK_TABLE 0x1A /* Query vbios related information */ #define AMDGPU_INFO_VBIOS 0x1B /* Subquery id: Query vbios size */ #define AMDGPU_INFO_VBIOS_SIZE 0x1 /* Subquery id: Query vbios image */ #define AMDGPU_INFO_VBIOS_IMAGE 0x2 /* Query UVD handles */ #define AMDGPU_INFO_NUM_HANDLES 0x1C /* Query sensor related information */ #define AMDGPU_INFO_SENSOR 0x1D /* Subquery id: Query GPU shader clock */ #define AMDGPU_INFO_SENSOR_GFX_SCLK 0x1 /* Subquery id: Query GPU memory clock */ #define AMDGPU_INFO_SENSOR_GFX_MCLK 0x2 /* Subquery id: Query GPU temperature */ #define AMDGPU_INFO_SENSOR_GPU_TEMP 0x3 /* Subquery id: Query GPU load */ #define AMDGPU_INFO_SENSOR_GPU_LOAD 0x4 /* Subquery id: Query average GPU power */ #define AMDGPU_INFO_SENSOR_GPU_AVG_POWER 0x5 /* Subquery id: Query northbridge voltage */ #define AMDGPU_INFO_SENSOR_VDDNB 0x6 /* Subquery id: Query graphics voltage */ #define AMDGPU_INFO_SENSOR_VDDGFX 0x7 /* Number of VRAM page faults on CPU access. */ #define AMDGPU_INFO_NUM_VRAM_CPU_PAGE_FAULTS 0x1E #define AMDGPU_INFO_VRAM_LOST_COUNTER 0x1F #define AMDGPU_INFO_MMR_SE_INDEX_SHIFT 0 #define AMDGPU_INFO_MMR_SE_INDEX_MASK 0xff #define AMDGPU_INFO_MMR_SH_INDEX_SHIFT 8 #define AMDGPU_INFO_MMR_SH_INDEX_MASK 0xff struct drm_amdgpu_query_fw { /** AMDGPU_INFO_FW_* */ __u32 fw_type; /** * Index of the IP if there are more IPs of * the same type. */ __u32 ip_instance; /** * Index of the engine. Whether this is used depends * on the firmware type. (e.g. MEC, SDMA) */ __u32 index; __u32 _pad; }; /* Input structure for the INFO ioctl */ struct drm_amdgpu_info { /* Where the return value will be stored */ __u64 return_pointer; /* The size of the return value. Just like "size" in "snprintf", * it limits how many bytes the kernel can write. */ __u32 return_size; /* The query request id. */ __u32 query; union { struct { __u32 id; __u32 _pad; } mode_crtc; struct { /** AMDGPU_HW_IP_* */ __u32 type; /** * Index of the IP if there are more IPs of the same * type. Ignored by AMDGPU_INFO_HW_IP_COUNT. */ __u32 ip_instance; } query_hw_ip; struct { __u32 dword_offset; /** number of registers to read */ __u32 count; __u32 instance; /** For future use, no flags defined so far */ __u32 flags; } read_mmr_reg; struct drm_amdgpu_query_fw query_fw; struct { __u32 type; __u32 offset; } vbios_info; struct { __u32 type; } sensor_info; }; }; struct drm_amdgpu_info_gds { /** GDS GFX partition size */ __u32 gds_gfx_partition_size; /** GDS compute partition size */ __u32 compute_partition_size; /** total GDS memory size */ __u32 gds_total_size; /** GWS size per GFX partition */ __u32 gws_per_gfx_partition; /** GSW size per compute partition */ __u32 gws_per_compute_partition; /** OA size per GFX partition */ __u32 oa_per_gfx_partition; /** OA size per compute partition */ __u32 oa_per_compute_partition; __u32 _pad; }; struct drm_amdgpu_info_vram_gtt { __u64 vram_size; __u64 vram_cpu_accessible_size; __u64 gtt_size; }; struct drm_amdgpu_heap_info { /** max. physical memory */ __u64 total_heap_size; /** Theoretical max. available memory in the given heap */ __u64 usable_heap_size; /** * Number of bytes allocated in the heap. This includes all processes * and private allocations in the kernel. It changes when new buffers * are allocated, freed, and moved. It cannot be larger than * heap_size. */ __u64 heap_usage; /** * Theoretical possible max. size of buffer which * could be allocated in the given heap */ __u64 max_allocation; }; struct drm_amdgpu_memory_info { struct drm_amdgpu_heap_info vram; struct drm_amdgpu_heap_info cpu_accessible_vram; struct drm_amdgpu_heap_info gtt; }; struct drm_amdgpu_info_firmware { __u32 ver; __u32 feature; }; #define AMDGPU_VRAM_TYPE_UNKNOWN 0 #define AMDGPU_VRAM_TYPE_GDDR1 1 #define AMDGPU_VRAM_TYPE_DDR2 2 #define AMDGPU_VRAM_TYPE_GDDR3 3 #define AMDGPU_VRAM_TYPE_GDDR4 4 #define AMDGPU_VRAM_TYPE_GDDR5 5 #define AMDGPU_VRAM_TYPE_HBM 6 #define AMDGPU_VRAM_TYPE_DDR3 7 struct drm_amdgpu_info_device { /** PCI Device ID */ __u32 device_id; /** Internal chip revision: A0, A1, etc.) */ __u32 chip_rev; __u32 external_rev; /** Revision id in PCI Config space */ __u32 pci_rev; __u32 family; __u32 num_shader_engines; __u32 num_shader_arrays_per_engine; /* in KHz */ __u32 gpu_counter_freq; __u64 max_engine_clock; __u64 max_memory_clock; /* cu information */ __u32 cu_active_number; /* NOTE: cu_ao_mask is INVALID, DON'T use it */ __u32 cu_ao_mask; __u32 cu_bitmap[4][4]; /** Render backend pipe mask. One render backend is CB+DB. */ __u32 enabled_rb_pipes_mask; __u32 num_rb_pipes; __u32 num_hw_gfx_contexts; __u32 _pad; __u64 ids_flags; /** Starting virtual address for UMDs. */ __u64 virtual_address_offset; /** The maximum virtual address */ __u64 virtual_address_max; /** Required alignment of virtual addresses. */ __u32 virtual_address_alignment; /** Page table entry - fragment size */ __u32 pte_fragment_size; __u32 gart_page_size; /** constant engine ram size*/ __u32 ce_ram_size; /** video memory type info*/ __u32 vram_type; /** video memory bit width*/ __u32 vram_bit_width; /* vce harvesting instance */ __u32 vce_harvest_config; /* gfx double offchip LDS buffers */ __u32 gc_double_offchip_lds_buf; /* NGG Primitive Buffer */ __u64 prim_buf_gpu_addr; /* NGG Position Buffer */ __u64 pos_buf_gpu_addr; /* NGG Control Sideband */ __u64 cntl_sb_buf_gpu_addr; /* NGG Parameter Cache */ __u64 param_buf_gpu_addr; __u32 prim_buf_size; __u32 pos_buf_size; __u32 cntl_sb_buf_size; __u32 param_buf_size; /* wavefront size*/ __u32 wave_front_size; /* shader visible vgprs*/ __u32 num_shader_visible_vgprs; /* CU per shader array*/ __u32 num_cu_per_sh; /* number of tcc blocks*/ __u32 num_tcc_blocks; /* gs vgt table depth*/ __u32 gs_vgt_table_depth; /* gs primitive buffer depth*/ __u32 gs_prim_buffer_depth; /* max gs wavefront per vgt*/ __u32 max_gs_waves_per_vgt; __u32 _pad1; /* always on cu bitmap */ __u32 cu_ao_bitmap[4][4]; }; struct drm_amdgpu_info_hw_ip { /** Version of h/w IP */ __u32 hw_ip_version_major; __u32 hw_ip_version_minor; /** Capabilities */ __u64 capabilities_flags; /** command buffer address start alignment*/ __u32 ib_start_alignment; /** command buffer size alignment*/ __u32 ib_size_alignment; /** Bitmask of available rings. Bit 0 means ring 0, etc. */ __u32 available_rings; __u32 _pad; }; struct drm_amdgpu_info_num_handles { /** Max handles as supported by firmware for UVD */ __u32 uvd_max_handles; /** Handles currently in use for UVD */ __u32 uvd_used_handles; }; #define AMDGPU_VCE_CLOCK_TABLE_ENTRIES 6 struct drm_amdgpu_info_vce_clock_table_entry { /** System clock */ __u32 sclk; /** Memory clock */ __u32 mclk; /** VCE clock */ __u32 eclk; __u32 pad; }; struct drm_amdgpu_info_vce_clock_table { struct drm_amdgpu_info_vce_clock_table_entry entries[AMDGPU_VCE_CLOCK_TABLE_ENTRIES]; __u32 num_valid_entries; __u32 pad; }; /* * Supported GPU families */ #define AMDGPU_FAMILY_UNKNOWN 0 #define AMDGPU_FAMILY_SI 110 /* Hainan, Oland, Verde, Pitcairn, Tahiti */ #define AMDGPU_FAMILY_CI 120 /* Bonaire, Hawaii */ #define AMDGPU_FAMILY_KV 125 /* Kaveri, Kabini, Mullins */ #define AMDGPU_FAMILY_VI 130 /* Iceland, Tonga */ #define AMDGPU_FAMILY_CZ 135 /* Carrizo, Stoney */ #define AMDGPU_FAMILY_AI 141 /* Vega10 */ #define AMDGPU_FAMILY_RV 142 /* Raven */ #if defined(__cplusplus) } #endif #endif
{ "pile_set_name": "Github" }
This guide is aimed to contributors wishing to understand the internals of the code in order to change/evolve the component. **Note:** this guide refers to **version 5** which is currently in alpha and will be updated as we progress ## Introduction This component consists actually of 2 subcomponent UI widgets one for the date and one for the time selection process. The developers can configure which of those are needed and also the granularity that the component will allow the users to select a date/time. Developers also choose the format that the selected date/time will be displayed in the input field. The component uses on `jQuery`, `moment.js` libraries. ## Code ### Private variables * `element` - Holds the DOM element this instance is attached to * `options` - Holds an object with the currently set options for the specific instance of the component. Don't directly change the properties of that object use the public API methods instead. DO NOT expose this object or its properties outside of the component. * `date` - Holds the moment object for the model value of the component. **DON'T** directly change this variable unless you **REALLY** know what you are doing. Use `setValue()` function to set it. It handles all component logic for updating the model value and emitting all the appropriate events * `viewDate` - Holds the currently selected value that the user has selected through the widget. This is not the model value this is the view value. Changing this usually requires a subsequent call to `update()` function * `unset` - A `boolean` variable that holds whether the components model value is set or not. Model's value starts as `unset = true` and if is either set by the user or programmatically through the api to a valid value then it is set to `false`. If subsequent events lead to an invalid value then this variable is set to `true` again. Setting this variable usually takes place in the `setValue()` function. * `input` - Hold the DOM input element this instance is attached to * `component` - Holds a reference to the .input-group DOM element that the widget is attached or false if it is attached directly on an input field * `widget` - Holds a reference to the DOM element containing the widget or `false` if the widget is hidden * `use24hours` - Holds whether the component uses 24 hours format or not. This is initialized on the `format()` function * `minViewModeNumber` - Holds the Numeric equivalent of the options.minViewMode parameter * `format` - Holds the current format string that is used for formatting the date model value. Note this is not the same thing as the `options.format` as the second could be set to `false` in which case the first takes the locale's `L` or `LT` value * `currentViewMode` - Hold the state of the current viewMode for the DatePicker subcomponent * `datePickerModes` - An array of objects with configuration parameters for the different views of the DatePicker subcomponent * `viewModes` - An array of strings containing all the possible strings that `options.viewMode` can take through `viewMode()` public api function * `directionModes` - An array of strings containing all the possible strings that `options.direction` can take through `direction()` public api function * `orientationModes` - An array of strings containing all the possible strings that `options.orientation` can take through `orientation()` public api function ### Private functions #### Events related * `notifyEvent(e)` - Use this function when you want to send en event to listener this could be used as a filter later * `stopEvent(e)` - Shortcut for stopping propagation of events * `keydown(e)` - Function to trap * `change(e)` - Listener function to track change events occurring on the `input` dom element the component is attached to * `attachDatePickerElementEvents()` - Attaches listeners to the existing DOM elements the component is attached to. Called upon construction of each datetimepicker instance * `detachDatePickerElementEvents()` - Detaches listeners from the DOM element the component is attached to. Called on `destroy()` * `attachDatePickerWidgetEvents()` - Attaches listeners on the components widget. Called on `show()` * `detachDatePickerWidgetEvents()` - Detaches listeners on the components widget. Called on `hide()` #### Model related * `setValue(targetMoment)` - Sets the model value of the component takes a moment object. An `error` event will be emmited if the `targetMoment` does not pass the configured validations. Otherwise the `date` variable will be set and the relevant events will be fired. * `isValid(targetMoment, granularity)` - returns `true` if the `targetMoment` moment object is valid according to the components set validation rules (`min/maxDates`, `disabled/enabledDates` and `daysOfWeekDisabled`). You may pass a second variable to check only up the the specific granularity `year, month, day, hour, minute, second` #### Utilities * `indexGivenDates (givenDatesArray)` - Function that takes the array from `enabledDates()` and `disabledDates()` public functions and stores them as object keys to enable quick lookup * `isInEnableDates(date)` - Checks whether if the given moment object exists in the `options.enabledDates` object * `isInDisableDates(date)` - Checks whether if the given moment object exists in the `options.disabledDates` array * `dataToOptions()` - Parses `data-date-*` options set on the input dom element the component is attached to and returns an object with them * `isInFixed()` - Checks if the dom element or its parents has a fixed position css rule. * `parseInputDate(date)` - Parses a date parameter with moment using the component's `options.format` and `options.useStrict`. It returns a `moment` object or false if `parsedMoment#isValid()` returns `false`. Use this to parse date inputs from outside the component (public API calls). * `init()` - Initializes the component. Called when the component instance is created
{ "pile_set_name": "Github" }
{ "parent": "create:block/water_wheel" }
{ "pile_set_name": "Github" }
// cgo -godefs -- -Wall -Werror -static -I/tmp/include linux/types.go | go run mkpost.go // Code generated by the command above; see README.md. DO NOT EDIT. // +build mips64le,linux package unix const ( SizeofPtr = 0x8 SizeofShort = 0x2 SizeofInt = 0x4 SizeofLong = 0x8 SizeofLongLong = 0x8 PathMax = 0x1000 ) type ( _C_short int16 _C_int int32 _C_long int64 _C_long_long int64 ) type Timespec struct { Sec int64 Nsec int64 } type Timeval struct { Sec int64 Usec int64 } type Timex struct { Modes uint32 Offset int64 Freq int64 Maxerror int64 Esterror int64 Status int32 Constant int64 Precision int64 Tolerance int64 Time Timeval Tick int64 Ppsfreq int64 Jitter int64 Shift int32 Stabil int64 Jitcnt int64 Calcnt int64 Errcnt int64 Stbcnt int64 Tai int32 _ [44]byte } type Time_t int64 type Tms struct { Utime int64 Stime int64 Cutime int64 Cstime int64 } type Utimbuf struct { Actime int64 Modtime int64 } type Rusage struct { Utime Timeval Stime Timeval Maxrss int64 Ixrss int64 Idrss int64 Isrss int64 Minflt int64 Majflt int64 Nswap int64 Inblock int64 Oublock int64 Msgsnd int64 Msgrcv int64 Nsignals int64 Nvcsw int64 Nivcsw int64 } type Rlimit struct { Cur uint64 Max uint64 } type _Gid_t uint32 type Stat_t struct { Dev uint32 Pad1 [3]uint32 Ino uint64 Mode uint32 Nlink uint32 Uid uint32 Gid uint32 Rdev uint32 Pad2 [3]uint32 Size int64 Atim Timespec Mtim Timespec Ctim Timespec Blksize uint32 Pad4 uint32 Blocks int64 } type StatxTimestamp struct { Sec int64 Nsec uint32 _ int32 } type Statx_t struct { Mask uint32 Blksize uint32 Attributes uint64 Nlink uint32 Uid uint32 Gid uint32 Mode uint16 _ [1]uint16 Ino uint64 Size uint64 Blocks uint64 Attributes_mask uint64 Atime StatxTimestamp Btime StatxTimestamp Ctime StatxTimestamp Mtime StatxTimestamp Rdev_major uint32 Rdev_minor uint32 Dev_major uint32 Dev_minor uint32 _ [14]uint64 } type Dirent struct { Ino uint64 Off int64 Reclen uint16 Type uint8 Name [256]int8 _ [5]byte } type Fsid struct { Val [2]int32 } type Flock_t struct { Type int16 Whence int16 Start int64 Len int64 Pid int32 _ [4]byte } type FscryptPolicy struct { Version uint8 Contents_encryption_mode uint8 Filenames_encryption_mode uint8 Flags uint8 Master_key_descriptor [8]uint8 } type FscryptKey struct { Mode uint32 Raw [64]uint8 Size uint32 } type KeyctlDHParams struct { Private int32 Prime int32 Base int32 } const ( FADV_NORMAL = 0x0 FADV_RANDOM = 0x1 FADV_SEQUENTIAL = 0x2 FADV_WILLNEED = 0x3 FADV_DONTNEED = 0x4 FADV_NOREUSE = 0x5 ) type RawSockaddrInet4 struct { Family uint16 Port uint16 Addr [4]byte /* in_addr */ Zero [8]uint8 } type RawSockaddrInet6 struct { Family uint16 Port uint16 Flowinfo uint32 Addr [16]byte /* in6_addr */ Scope_id uint32 } type RawSockaddrUnix struct { Family uint16 Path [108]int8 } type RawSockaddrLinklayer struct { Family uint16 Protocol uint16 Ifindex int32 Hatype uint16 Pkttype uint8 Halen uint8 Addr [8]uint8 } type RawSockaddrNetlink struct { Family uint16 Pad uint16 Pid uint32 Groups uint32 } type RawSockaddrHCI struct { Family uint16 Dev uint16 Channel uint16 } type RawSockaddrL2 struct { Family uint16 Psm uint16 Bdaddr [6]uint8 Cid uint16 Bdaddr_type uint8 _ [1]byte } type RawSockaddrRFCOMM struct { Family uint16 Bdaddr [6]uint8 Channel uint8 _ [1]byte } type RawSockaddrCAN struct { Family uint16 Ifindex int32 Addr [8]byte } type RawSockaddrALG struct { Family uint16 Type [14]uint8 Feat uint32 Mask uint32 Name [64]uint8 } type RawSockaddrVM struct { Family uint16 Reserved1 uint16 Port uint32 Cid uint32 Zero [4]uint8 } type RawSockaddrXDP struct { Family uint16 Flags uint16 Ifindex uint32 Queue_id uint32 Shared_umem_fd uint32 } type RawSockaddrPPPoX [0x1e]byte type RawSockaddr struct { Family uint16 Data [14]int8 } type RawSockaddrAny struct { Addr RawSockaddr Pad [96]int8 } type _Socklen uint32 type Linger struct { Onoff int32 Linger int32 } type Iovec struct { Base *byte Len uint64 } type IPMreq struct { Multiaddr [4]byte /* in_addr */ Interface [4]byte /* in_addr */ } type IPMreqn struct { Multiaddr [4]byte /* in_addr */ Address [4]byte /* in_addr */ Ifindex int32 } type IPv6Mreq struct { Multiaddr [16]byte /* in6_addr */ Interface uint32 } type PacketMreq struct { Ifindex int32 Type uint16 Alen uint16 Address [8]uint8 } type Msghdr struct { Name *byte Namelen uint32 Iov *Iovec Iovlen uint64 Control *byte Controllen uint64 Flags int32 _ [4]byte } type Cmsghdr struct { Len uint64 Level int32 Type int32 } type Inet4Pktinfo struct { Ifindex int32 Spec_dst [4]byte /* in_addr */ Addr [4]byte /* in_addr */ } type Inet6Pktinfo struct { Addr [16]byte /* in6_addr */ Ifindex uint32 } type IPv6MTUInfo struct { Addr RawSockaddrInet6 Mtu uint32 } type ICMPv6Filter struct { Data [8]uint32 } type Ucred struct { Pid int32 Uid uint32 Gid uint32 } type TCPInfo struct { State uint8 Ca_state uint8 Retransmits uint8 Probes uint8 Backoff uint8 Options uint8 Rto uint32 Ato uint32 Snd_mss uint32 Rcv_mss uint32 Unacked uint32 Sacked uint32 Lost uint32 Retrans uint32 Fackets uint32 Last_data_sent uint32 Last_ack_sent uint32 Last_data_recv uint32 Last_ack_recv uint32 Pmtu uint32 Rcv_ssthresh uint32 Rtt uint32 Rttvar uint32 Snd_ssthresh uint32 Snd_cwnd uint32 Advmss uint32 Reordering uint32 Rcv_rtt uint32 Rcv_space uint32 Total_retrans uint32 } type CanFilter struct { Id uint32 Mask uint32 } const ( SizeofSockaddrInet4 = 0x10 SizeofSockaddrInet6 = 0x1c SizeofSockaddrAny = 0x70 SizeofSockaddrUnix = 0x6e SizeofSockaddrLinklayer = 0x14 SizeofSockaddrNetlink = 0xc SizeofSockaddrHCI = 0x6 SizeofSockaddrL2 = 0xe SizeofSockaddrRFCOMM = 0xa SizeofSockaddrCAN = 0x10 SizeofSockaddrALG = 0x58 SizeofSockaddrVM = 0x10 SizeofSockaddrXDP = 0x10 SizeofSockaddrPPPoX = 0x1e SizeofLinger = 0x8 SizeofIovec = 0x10 SizeofIPMreq = 0x8 SizeofIPMreqn = 0xc SizeofIPv6Mreq = 0x14 SizeofPacketMreq = 0x10 SizeofMsghdr = 0x38 SizeofCmsghdr = 0x10 SizeofInet4Pktinfo = 0xc SizeofInet6Pktinfo = 0x14 SizeofIPv6MTUInfo = 0x20 SizeofICMPv6Filter = 0x20 SizeofUcred = 0xc SizeofTCPInfo = 0x68 SizeofCanFilter = 0x8 ) const ( NDA_UNSPEC = 0x0 NDA_DST = 0x1 NDA_LLADDR = 0x2 NDA_CACHEINFO = 0x3 NDA_PROBES = 0x4 NDA_VLAN = 0x5 NDA_PORT = 0x6 NDA_VNI = 0x7 NDA_IFINDEX = 0x8 NDA_MASTER = 0x9 NDA_LINK_NETNSID = 0xa NDA_SRC_VNI = 0xb NTF_USE = 0x1 NTF_SELF = 0x2 NTF_MASTER = 0x4 NTF_PROXY = 0x8 NTF_EXT_LEARNED = 0x10 NTF_OFFLOADED = 0x20 NTF_ROUTER = 0x80 NUD_INCOMPLETE = 0x1 NUD_REACHABLE = 0x2 NUD_STALE = 0x4 NUD_DELAY = 0x8 NUD_PROBE = 0x10 NUD_FAILED = 0x20 NUD_NOARP = 0x40 NUD_PERMANENT = 0x80 NUD_NONE = 0x0 IFA_UNSPEC = 0x0 IFA_ADDRESS = 0x1 IFA_LOCAL = 0x2 IFA_LABEL = 0x3 IFA_BROADCAST = 0x4 IFA_ANYCAST = 0x5 IFA_CACHEINFO = 0x6 IFA_MULTICAST = 0x7 IFA_FLAGS = 0x8 IFA_RT_PRIORITY = 0x9 IFA_TARGET_NETNSID = 0xa IFLA_UNSPEC = 0x0 IFLA_ADDRESS = 0x1 IFLA_BROADCAST = 0x2 IFLA_IFNAME = 0x3 IFLA_MTU = 0x4 IFLA_LINK = 0x5 IFLA_QDISC = 0x6 IFLA_STATS = 0x7 IFLA_COST = 0x8 IFLA_PRIORITY = 0x9 IFLA_MASTER = 0xa IFLA_WIRELESS = 0xb IFLA_PROTINFO = 0xc IFLA_TXQLEN = 0xd IFLA_MAP = 0xe IFLA_WEIGHT = 0xf IFLA_OPERSTATE = 0x10 IFLA_LINKMODE = 0x11 IFLA_LINKINFO = 0x12 IFLA_NET_NS_PID = 0x13 IFLA_IFALIAS = 0x14 IFLA_NUM_VF = 0x15 IFLA_VFINFO_LIST = 0x16 IFLA_STATS64 = 0x17 IFLA_VF_PORTS = 0x18 IFLA_PORT_SELF = 0x19 IFLA_AF_SPEC = 0x1a IFLA_GROUP = 0x1b IFLA_NET_NS_FD = 0x1c IFLA_EXT_MASK = 0x1d IFLA_PROMISCUITY = 0x1e IFLA_NUM_TX_QUEUES = 0x1f IFLA_NUM_RX_QUEUES = 0x20 IFLA_CARRIER = 0x21 IFLA_PHYS_PORT_ID = 0x22 IFLA_CARRIER_CHANGES = 0x23 IFLA_PHYS_SWITCH_ID = 0x24 IFLA_LINK_NETNSID = 0x25 IFLA_PHYS_PORT_NAME = 0x26 IFLA_PROTO_DOWN = 0x27 IFLA_GSO_MAX_SEGS = 0x28 IFLA_GSO_MAX_SIZE = 0x29 IFLA_PAD = 0x2a IFLA_XDP = 0x2b IFLA_EVENT = 0x2c IFLA_NEW_NETNSID = 0x2d IFLA_IF_NETNSID = 0x2e IFLA_TARGET_NETNSID = 0x2e IFLA_CARRIER_UP_COUNT = 0x2f IFLA_CARRIER_DOWN_COUNT = 0x30 IFLA_NEW_IFINDEX = 0x31 IFLA_MIN_MTU = 0x32 IFLA_MAX_MTU = 0x33 IFLA_MAX = 0x33 IFLA_INFO_KIND = 0x1 IFLA_INFO_DATA = 0x2 IFLA_INFO_XSTATS = 0x3 IFLA_INFO_SLAVE_KIND = 0x4 IFLA_INFO_SLAVE_DATA = 0x5 RT_SCOPE_UNIVERSE = 0x0 RT_SCOPE_SITE = 0xc8 RT_SCOPE_LINK = 0xfd RT_SCOPE_HOST = 0xfe RT_SCOPE_NOWHERE = 0xff RT_TABLE_UNSPEC = 0x0 RT_TABLE_COMPAT = 0xfc RT_TABLE_DEFAULT = 0xfd RT_TABLE_MAIN = 0xfe RT_TABLE_LOCAL = 0xff RT_TABLE_MAX = 0xffffffff RTA_UNSPEC = 0x0 RTA_DST = 0x1 RTA_SRC = 0x2 RTA_IIF = 0x3 RTA_OIF = 0x4 RTA_GATEWAY = 0x5 RTA_PRIORITY = 0x6 RTA_PREFSRC = 0x7 RTA_METRICS = 0x8 RTA_MULTIPATH = 0x9 RTA_FLOW = 0xb RTA_CACHEINFO = 0xc RTA_TABLE = 0xf RTA_MARK = 0x10 RTA_MFC_STATS = 0x11 RTA_VIA = 0x12 RTA_NEWDST = 0x13 RTA_PREF = 0x14 RTA_ENCAP_TYPE = 0x15 RTA_ENCAP = 0x16 RTA_EXPIRES = 0x17 RTA_PAD = 0x18 RTA_UID = 0x19 RTA_TTL_PROPAGATE = 0x1a RTA_IP_PROTO = 0x1b RTA_SPORT = 0x1c RTA_DPORT = 0x1d RTN_UNSPEC = 0x0 RTN_UNICAST = 0x1 RTN_LOCAL = 0x2 RTN_BROADCAST = 0x3 RTN_ANYCAST = 0x4 RTN_MULTICAST = 0x5 RTN_BLACKHOLE = 0x6 RTN_UNREACHABLE = 0x7 RTN_PROHIBIT = 0x8 RTN_THROW = 0x9 RTN_NAT = 0xa RTN_XRESOLVE = 0xb RTNLGRP_NONE = 0x0 RTNLGRP_LINK = 0x1 RTNLGRP_NOTIFY = 0x2 RTNLGRP_NEIGH = 0x3 RTNLGRP_TC = 0x4 RTNLGRP_IPV4_IFADDR = 0x5 RTNLGRP_IPV4_MROUTE = 0x6 RTNLGRP_IPV4_ROUTE = 0x7 RTNLGRP_IPV4_RULE = 0x8 RTNLGRP_IPV6_IFADDR = 0x9 RTNLGRP_IPV6_MROUTE = 0xa RTNLGRP_IPV6_ROUTE = 0xb RTNLGRP_IPV6_IFINFO = 0xc RTNLGRP_IPV6_PREFIX = 0x12 RTNLGRP_IPV6_RULE = 0x13 RTNLGRP_ND_USEROPT = 0x14 SizeofNlMsghdr = 0x10 SizeofNlMsgerr = 0x14 SizeofRtGenmsg = 0x1 SizeofNlAttr = 0x4 SizeofRtAttr = 0x4 SizeofIfInfomsg = 0x10 SizeofIfAddrmsg = 0x8 SizeofRtMsg = 0xc SizeofRtNexthop = 0x8 SizeofNdUseroptmsg = 0x10 SizeofNdMsg = 0xc ) type NlMsghdr struct { Len uint32 Type uint16 Flags uint16 Seq uint32 Pid uint32 } type NlMsgerr struct { Error int32 Msg NlMsghdr } type RtGenmsg struct { Family uint8 } type NlAttr struct { Len uint16 Type uint16 } type RtAttr struct { Len uint16 Type uint16 } type IfInfomsg struct { Family uint8 _ uint8 Type uint16 Index int32 Flags uint32 Change uint32 } type IfAddrmsg struct { Family uint8 Prefixlen uint8 Flags uint8 Scope uint8 Index uint32 } type RtMsg struct { Family uint8 Dst_len uint8 Src_len uint8 Tos uint8 Table uint8 Protocol uint8 Scope uint8 Type uint8 Flags uint32 } type RtNexthop struct { Len uint16 Flags uint8 Hops uint8 Ifindex int32 } type NdUseroptmsg struct { Family uint8 Pad1 uint8 Opts_len uint16 Ifindex int32 Icmp_type uint8 Icmp_code uint8 Pad2 uint16 Pad3 uint32 } type NdMsg struct { Family uint8 Pad1 uint8 Pad2 uint16 Ifindex int32 State uint16 Flags uint8 Type uint8 } const ( SizeofSockFilter = 0x8 SizeofSockFprog = 0x10 ) type SockFilter struct { Code uint16 Jt uint8 Jf uint8 K uint32 } type SockFprog struct { Len uint16 Filter *SockFilter } type InotifyEvent struct { Wd int32 Mask uint32 Cookie uint32 Len uint32 } const SizeofInotifyEvent = 0x10 type PtraceRegs struct { Regs [32]uint64 Lo uint64 Hi uint64 Epc uint64 Badvaddr uint64 Status uint64 Cause uint64 } type FdSet struct { Bits [16]int64 } type Sysinfo_t struct { Uptime int64 Loads [3]uint64 Totalram uint64 Freeram uint64 Sharedram uint64 Bufferram uint64 Totalswap uint64 Freeswap uint64 Procs uint16 Pad uint16 Totalhigh uint64 Freehigh uint64 Unit uint32 _ [0]int8 _ [4]byte } type Utsname struct { Sysname [65]byte Nodename [65]byte Release [65]byte Version [65]byte Machine [65]byte Domainname [65]byte } type Ustat_t struct { Tfree int32 Tinode uint64 Fname [6]int8 Fpack [6]int8 _ [4]byte } type EpollEvent struct { Events uint32 Fd int32 Pad int32 } const ( AT_EMPTY_PATH = 0x1000 AT_FDCWD = -0x64 AT_NO_AUTOMOUNT = 0x800 AT_REMOVEDIR = 0x200 AT_STATX_SYNC_AS_STAT = 0x0 AT_STATX_FORCE_SYNC = 0x2000 AT_STATX_DONT_SYNC = 0x4000 AT_SYMLINK_FOLLOW = 0x400 AT_SYMLINK_NOFOLLOW = 0x100 AT_EACCESS = 0x200 ) type PollFd struct { Fd int32 Events int16 Revents int16 } const ( POLLIN = 0x1 POLLPRI = 0x2 POLLOUT = 0x4 POLLRDHUP = 0x2000 POLLERR = 0x8 POLLHUP = 0x10 POLLNVAL = 0x20 ) type Sigset_t struct { Val [16]uint64 } const _C__NSIG = 0x80 type SignalfdSiginfo struct { Signo uint32 Errno int32 Code int32 Pid uint32 Uid uint32 Fd int32 Tid uint32 Band uint32 Overrun uint32 Trapno uint32 Status int32 Int int32 Ptr uint64 Utime uint64 Stime uint64 Addr uint64 Addr_lsb uint16 _ uint16 Syscall int32 Call_addr uint64 Arch uint32 _ [28]uint8 } const PERF_IOC_FLAG_GROUP = 0x1 type Termios struct { Iflag uint32 Oflag uint32 Cflag uint32 Lflag uint32 Line uint8 Cc [23]uint8 Ispeed uint32 Ospeed uint32 } type Winsize struct { Row uint16 Col uint16 Xpixel uint16 Ypixel uint16 } type Taskstats struct { Version uint16 Ac_exitcode uint32 Ac_flag uint8 Ac_nice uint8 Cpu_count uint64 Cpu_delay_total uint64 Blkio_count uint64 Blkio_delay_total uint64 Swapin_count uint64 Swapin_delay_total uint64 Cpu_run_real_total uint64 Cpu_run_virtual_total uint64 Ac_comm [32]int8 Ac_sched uint8 Ac_pad [3]uint8 _ [4]byte Ac_uid uint32 Ac_gid uint32 Ac_pid uint32 Ac_ppid uint32 Ac_btime uint32 Ac_etime uint64 Ac_utime uint64 Ac_stime uint64 Ac_minflt uint64 Ac_majflt uint64 Coremem uint64 Virtmem uint64 Hiwater_rss uint64 Hiwater_vm uint64 Read_char uint64 Write_char uint64 Read_syscalls uint64 Write_syscalls uint64 Read_bytes uint64 Write_bytes uint64 Cancelled_write_bytes uint64 Nvcsw uint64 Nivcsw uint64 Ac_utimescaled uint64 Ac_stimescaled uint64 Cpu_scaled_run_real_total uint64 Freepages_count uint64 Freepages_delay_total uint64 Thrashing_count uint64 Thrashing_delay_total uint64 } const ( TASKSTATS_CMD_UNSPEC = 0x0 TASKSTATS_CMD_GET = 0x1 TASKSTATS_CMD_NEW = 0x2 TASKSTATS_TYPE_UNSPEC = 0x0 TASKSTATS_TYPE_PID = 0x1 TASKSTATS_TYPE_TGID = 0x2 TASKSTATS_TYPE_STATS = 0x3 TASKSTATS_TYPE_AGGR_PID = 0x4 TASKSTATS_TYPE_AGGR_TGID = 0x5 TASKSTATS_TYPE_NULL = 0x6 TASKSTATS_CMD_ATTR_UNSPEC = 0x0 TASKSTATS_CMD_ATTR_PID = 0x1 TASKSTATS_CMD_ATTR_TGID = 0x2 TASKSTATS_CMD_ATTR_REGISTER_CPUMASK = 0x3 TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK = 0x4 ) type CGroupStats struct { Sleeping uint64 Running uint64 Stopped uint64 Uninterruptible uint64 Io_wait uint64 } const ( CGROUPSTATS_CMD_UNSPEC = 0x3 CGROUPSTATS_CMD_GET = 0x4 CGROUPSTATS_CMD_NEW = 0x5 CGROUPSTATS_TYPE_UNSPEC = 0x0 CGROUPSTATS_TYPE_CGROUP_STATS = 0x1 CGROUPSTATS_CMD_ATTR_UNSPEC = 0x0 CGROUPSTATS_CMD_ATTR_FD = 0x1 ) type Genlmsghdr struct { Cmd uint8 Version uint8 Reserved uint16 } const ( CTRL_CMD_UNSPEC = 0x0 CTRL_CMD_NEWFAMILY = 0x1 CTRL_CMD_DELFAMILY = 0x2 CTRL_CMD_GETFAMILY = 0x3 CTRL_CMD_NEWOPS = 0x4 CTRL_CMD_DELOPS = 0x5 CTRL_CMD_GETOPS = 0x6 CTRL_CMD_NEWMCAST_GRP = 0x7 CTRL_CMD_DELMCAST_GRP = 0x8 CTRL_CMD_GETMCAST_GRP = 0x9 CTRL_ATTR_UNSPEC = 0x0 CTRL_ATTR_FAMILY_ID = 0x1 CTRL_ATTR_FAMILY_NAME = 0x2 CTRL_ATTR_VERSION = 0x3 CTRL_ATTR_HDRSIZE = 0x4 CTRL_ATTR_MAXATTR = 0x5 CTRL_ATTR_OPS = 0x6 CTRL_ATTR_MCAST_GROUPS = 0x7 CTRL_ATTR_OP_UNSPEC = 0x0 CTRL_ATTR_OP_ID = 0x1 CTRL_ATTR_OP_FLAGS = 0x2 CTRL_ATTR_MCAST_GRP_UNSPEC = 0x0 CTRL_ATTR_MCAST_GRP_NAME = 0x1 CTRL_ATTR_MCAST_GRP_ID = 0x2 ) type cpuMask uint64 const ( _CPU_SETSIZE = 0x400 _NCPUBITS = 0x40 ) const ( BDADDR_BREDR = 0x0 BDADDR_LE_PUBLIC = 0x1 BDADDR_LE_RANDOM = 0x2 ) type PerfEventAttr struct { Type uint32 Size uint32 Config uint64 Sample uint64 Sample_type uint64 Read_format uint64 Bits uint64 Wakeup uint32 Bp_type uint32 Ext1 uint64 Ext2 uint64 Branch_sample_type uint64 Sample_regs_user uint64 Sample_stack_user uint32 Clockid int32 Sample_regs_intr uint64 Aux_watermark uint32 Sample_max_stack uint16 _ uint16 } type PerfEventMmapPage struct { Version uint32 Compat_version uint32 Lock uint32 Index uint32 Offset int64 Time_enabled uint64 Time_running uint64 Capabilities uint64 Pmc_width uint16 Time_shift uint16 Time_mult uint32 Time_offset uint64 Time_zero uint64 Size uint32 _ [948]uint8 Data_head uint64 Data_tail uint64 Data_offset uint64 Data_size uint64 Aux_head uint64 Aux_tail uint64 Aux_offset uint64 Aux_size uint64 } const ( PerfBitDisabled uint64 = CBitFieldMaskBit0 PerfBitInherit = CBitFieldMaskBit1 PerfBitPinned = CBitFieldMaskBit2 PerfBitExclusive = CBitFieldMaskBit3 PerfBitExcludeUser = CBitFieldMaskBit4 PerfBitExcludeKernel = CBitFieldMaskBit5 PerfBitExcludeHv = CBitFieldMaskBit6 PerfBitExcludeIdle = CBitFieldMaskBit7 PerfBitMmap = CBitFieldMaskBit8 PerfBitComm = CBitFieldMaskBit9 PerfBitFreq = CBitFieldMaskBit10 PerfBitInheritStat = CBitFieldMaskBit11 PerfBitEnableOnExec = CBitFieldMaskBit12 PerfBitTask = CBitFieldMaskBit13 PerfBitWatermark = CBitFieldMaskBit14 PerfBitPreciseIPBit1 = CBitFieldMaskBit15 PerfBitPreciseIPBit2 = CBitFieldMaskBit16 PerfBitMmapData = CBitFieldMaskBit17 PerfBitSampleIDAll = CBitFieldMaskBit18 PerfBitExcludeHost = CBitFieldMaskBit19 PerfBitExcludeGuest = CBitFieldMaskBit20 PerfBitExcludeCallchainKernel = CBitFieldMaskBit21 PerfBitExcludeCallchainUser = CBitFieldMaskBit22 PerfBitMmap2 = CBitFieldMaskBit23 PerfBitCommExec = CBitFieldMaskBit24 PerfBitUseClockID = CBitFieldMaskBit25 PerfBitContextSwitch = CBitFieldMaskBit26 ) const ( PERF_TYPE_HARDWARE = 0x0 PERF_TYPE_SOFTWARE = 0x1 PERF_TYPE_TRACEPOINT = 0x2 PERF_TYPE_HW_CACHE = 0x3 PERF_TYPE_RAW = 0x4 PERF_TYPE_BREAKPOINT = 0x5 PERF_COUNT_HW_CPU_CYCLES = 0x0 PERF_COUNT_HW_INSTRUCTIONS = 0x1 PERF_COUNT_HW_CACHE_REFERENCES = 0x2 PERF_COUNT_HW_CACHE_MISSES = 0x3 PERF_COUNT_HW_BRANCH_INSTRUCTIONS = 0x4 PERF_COUNT_HW_BRANCH_MISSES = 0x5 PERF_COUNT_HW_BUS_CYCLES = 0x6 PERF_COUNT_HW_STALLED_CYCLES_FRONTEND = 0x7 PERF_COUNT_HW_STALLED_CYCLES_BACKEND = 0x8 PERF_COUNT_HW_REF_CPU_CYCLES = 0x9 PERF_COUNT_HW_CACHE_L1D = 0x0 PERF_COUNT_HW_CACHE_L1I = 0x1 PERF_COUNT_HW_CACHE_LL = 0x2 PERF_COUNT_HW_CACHE_DTLB = 0x3 PERF_COUNT_HW_CACHE_ITLB = 0x4 PERF_COUNT_HW_CACHE_BPU = 0x5 PERF_COUNT_HW_CACHE_NODE = 0x6 PERF_COUNT_HW_CACHE_OP_READ = 0x0 PERF_COUNT_HW_CACHE_OP_WRITE = 0x1 PERF_COUNT_HW_CACHE_OP_PREFETCH = 0x2 PERF_COUNT_HW_CACHE_RESULT_ACCESS = 0x0 PERF_COUNT_HW_CACHE_RESULT_MISS = 0x1 PERF_COUNT_SW_CPU_CLOCK = 0x0 PERF_COUNT_SW_TASK_CLOCK = 0x1 PERF_COUNT_SW_PAGE_FAULTS = 0x2 PERF_COUNT_SW_CONTEXT_SWITCHES = 0x3 PERF_COUNT_SW_CPU_MIGRATIONS = 0x4 PERF_COUNT_SW_PAGE_FAULTS_MIN = 0x5 PERF_COUNT_SW_PAGE_FAULTS_MAJ = 0x6 PERF_COUNT_SW_ALIGNMENT_FAULTS = 0x7 PERF_COUNT_SW_EMULATION_FAULTS = 0x8 PERF_COUNT_SW_DUMMY = 0x9 PERF_COUNT_SW_BPF_OUTPUT = 0xa PERF_SAMPLE_IP = 0x1 PERF_SAMPLE_TID = 0x2 PERF_SAMPLE_TIME = 0x4 PERF_SAMPLE_ADDR = 0x8 PERF_SAMPLE_READ = 0x10 PERF_SAMPLE_CALLCHAIN = 0x20 PERF_SAMPLE_ID = 0x40 PERF_SAMPLE_CPU = 0x80 PERF_SAMPLE_PERIOD = 0x100 PERF_SAMPLE_STREAM_ID = 0x200 PERF_SAMPLE_RAW = 0x400 PERF_SAMPLE_BRANCH_STACK = 0x800 PERF_SAMPLE_BRANCH_USER = 0x1 PERF_SAMPLE_BRANCH_KERNEL = 0x2 PERF_SAMPLE_BRANCH_HV = 0x4 PERF_SAMPLE_BRANCH_ANY = 0x8 PERF_SAMPLE_BRANCH_ANY_CALL = 0x10 PERF_SAMPLE_BRANCH_ANY_RETURN = 0x20 PERF_SAMPLE_BRANCH_IND_CALL = 0x40 PERF_SAMPLE_BRANCH_ABORT_TX = 0x80 PERF_SAMPLE_BRANCH_IN_TX = 0x100 PERF_SAMPLE_BRANCH_NO_TX = 0x200 PERF_SAMPLE_BRANCH_COND = 0x400 PERF_SAMPLE_BRANCH_CALL_STACK = 0x800 PERF_SAMPLE_BRANCH_IND_JUMP = 0x1000 PERF_SAMPLE_BRANCH_CALL = 0x2000 PERF_SAMPLE_BRANCH_NO_FLAGS = 0x4000 PERF_SAMPLE_BRANCH_NO_CYCLES = 0x8000 PERF_SAMPLE_BRANCH_TYPE_SAVE = 0x10000 PERF_FORMAT_TOTAL_TIME_ENABLED = 0x1 PERF_FORMAT_TOTAL_TIME_RUNNING = 0x2 PERF_FORMAT_ID = 0x4 PERF_FORMAT_GROUP = 0x8 PERF_RECORD_MMAP = 0x1 PERF_RECORD_LOST = 0x2 PERF_RECORD_COMM = 0x3 PERF_RECORD_EXIT = 0x4 PERF_RECORD_THROTTLE = 0x5 PERF_RECORD_UNTHROTTLE = 0x6 PERF_RECORD_FORK = 0x7 PERF_RECORD_READ = 0x8 PERF_RECORD_SAMPLE = 0x9 PERF_RECORD_MMAP2 = 0xa PERF_RECORD_AUX = 0xb PERF_RECORD_ITRACE_START = 0xc PERF_RECORD_LOST_SAMPLES = 0xd PERF_RECORD_SWITCH = 0xe PERF_RECORD_SWITCH_CPU_WIDE = 0xf PERF_RECORD_NAMESPACES = 0x10 PERF_CONTEXT_HV = -0x20 PERF_CONTEXT_KERNEL = -0x80 PERF_CONTEXT_USER = -0x200 PERF_CONTEXT_GUEST = -0x800 PERF_CONTEXT_GUEST_KERNEL = -0x880 PERF_CONTEXT_GUEST_USER = -0xa00 PERF_FLAG_FD_NO_GROUP = 0x1 PERF_FLAG_FD_OUTPUT = 0x2 PERF_FLAG_PID_CGROUP = 0x4 PERF_FLAG_FD_CLOEXEC = 0x8 ) const ( CBitFieldMaskBit0 = 0x1 CBitFieldMaskBit1 = 0x2 CBitFieldMaskBit2 = 0x4 CBitFieldMaskBit3 = 0x8 CBitFieldMaskBit4 = 0x10 CBitFieldMaskBit5 = 0x20 CBitFieldMaskBit6 = 0x40 CBitFieldMaskBit7 = 0x80 CBitFieldMaskBit8 = 0x100 CBitFieldMaskBit9 = 0x200 CBitFieldMaskBit10 = 0x400 CBitFieldMaskBit11 = 0x800 CBitFieldMaskBit12 = 0x1000 CBitFieldMaskBit13 = 0x2000 CBitFieldMaskBit14 = 0x4000 CBitFieldMaskBit15 = 0x8000 CBitFieldMaskBit16 = 0x10000 CBitFieldMaskBit17 = 0x20000 CBitFieldMaskBit18 = 0x40000 CBitFieldMaskBit19 = 0x80000 CBitFieldMaskBit20 = 0x100000 CBitFieldMaskBit21 = 0x200000 CBitFieldMaskBit22 = 0x400000 CBitFieldMaskBit23 = 0x800000 CBitFieldMaskBit24 = 0x1000000 CBitFieldMaskBit25 = 0x2000000 CBitFieldMaskBit26 = 0x4000000 CBitFieldMaskBit27 = 0x8000000 CBitFieldMaskBit28 = 0x10000000 CBitFieldMaskBit29 = 0x20000000 CBitFieldMaskBit30 = 0x40000000 CBitFieldMaskBit31 = 0x80000000 CBitFieldMaskBit32 = 0x100000000 CBitFieldMaskBit33 = 0x200000000 CBitFieldMaskBit34 = 0x400000000 CBitFieldMaskBit35 = 0x800000000 CBitFieldMaskBit36 = 0x1000000000 CBitFieldMaskBit37 = 0x2000000000 CBitFieldMaskBit38 = 0x4000000000 CBitFieldMaskBit39 = 0x8000000000 CBitFieldMaskBit40 = 0x10000000000 CBitFieldMaskBit41 = 0x20000000000 CBitFieldMaskBit42 = 0x40000000000 CBitFieldMaskBit43 = 0x80000000000 CBitFieldMaskBit44 = 0x100000000000 CBitFieldMaskBit45 = 0x200000000000 CBitFieldMaskBit46 = 0x400000000000 CBitFieldMaskBit47 = 0x800000000000 CBitFieldMaskBit48 = 0x1000000000000 CBitFieldMaskBit49 = 0x2000000000000 CBitFieldMaskBit50 = 0x4000000000000 CBitFieldMaskBit51 = 0x8000000000000 CBitFieldMaskBit52 = 0x10000000000000 CBitFieldMaskBit53 = 0x20000000000000 CBitFieldMaskBit54 = 0x40000000000000 CBitFieldMaskBit55 = 0x80000000000000 CBitFieldMaskBit56 = 0x100000000000000 CBitFieldMaskBit57 = 0x200000000000000 CBitFieldMaskBit58 = 0x400000000000000 CBitFieldMaskBit59 = 0x800000000000000 CBitFieldMaskBit60 = 0x1000000000000000 CBitFieldMaskBit61 = 0x2000000000000000 CBitFieldMaskBit62 = 0x4000000000000000 CBitFieldMaskBit63 = 0x8000000000000000 ) type SockaddrStorage struct { Family uint16 _ [118]int8 _ uint64 } type TCPMD5Sig struct { Addr SockaddrStorage Flags uint8 Prefixlen uint8 Keylen uint16 _ uint32 Key [80]uint8 } type HDDriveCmdHdr struct { Command uint8 Number uint8 Feature uint8 Count uint8 } type HDGeometry struct { Heads uint8 Sectors uint8 Cylinders uint16 Start uint64 } type HDDriveID struct { Config uint16 Cyls uint16 Reserved2 uint16 Heads uint16 Track_bytes uint16 Sector_bytes uint16 Sectors uint16 Vendor0 uint16 Vendor1 uint16 Vendor2 uint16 Serial_no [20]uint8 Buf_type uint16 Buf_size uint16 Ecc_bytes uint16 Fw_rev [8]uint8 Model [40]uint8 Max_multsect uint8 Vendor3 uint8 Dword_io uint16 Vendor4 uint8 Capability uint8 Reserved50 uint16 Vendor5 uint8 TPIO uint8 Vendor6 uint8 TDMA uint8 Field_valid uint16 Cur_cyls uint16 Cur_heads uint16 Cur_sectors uint16 Cur_capacity0 uint16 Cur_capacity1 uint16 Multsect uint8 Multsect_valid uint8 Lba_capacity uint32 Dma_1word uint16 Dma_mword uint16 Eide_pio_modes uint16 Eide_dma_min uint16 Eide_dma_time uint16 Eide_pio uint16 Eide_pio_iordy uint16 Words69_70 [2]uint16 Words71_74 [4]uint16 Queue_depth uint16 Words76_79 [4]uint16 Major_rev_num uint16 Minor_rev_num uint16 Command_set_1 uint16 Command_set_2 uint16 Cfsse uint16 Cfs_enable_1 uint16 Cfs_enable_2 uint16 Csf_default uint16 Dma_ultra uint16 Trseuc uint16 TrsEuc uint16 CurAPMvalues uint16 Mprc uint16 Hw_config uint16 Acoustic uint16 Msrqs uint16 Sxfert uint16 Sal uint16 Spg uint32 Lba_capacity_2 uint64 Words104_125 [22]uint16 Last_lun uint16 Word127 uint16 Dlf uint16 Csfo uint16 Words130_155 [26]uint16 Word156 uint16 Words157_159 [3]uint16 Cfa_power uint16 Words161_175 [15]uint16 Words176_205 [30]uint16 Words206_254 [49]uint16 Integrity_word uint16 } type Statfs_t struct { Type int64 Bsize int64 Frsize int64 Blocks uint64 Bfree uint64 Files uint64 Ffree uint64 Bavail uint64 Fsid Fsid Namelen int64 Flags int64 Spare [5]int64 } const ( ST_MANDLOCK = 0x40 ST_NOATIME = 0x400 ST_NODEV = 0x4 ST_NODIRATIME = 0x800 ST_NOEXEC = 0x8 ST_NOSUID = 0x2 ST_RDONLY = 0x1 ST_RELATIME = 0x1000 ST_SYNCHRONOUS = 0x10 ) type TpacketHdr struct { Status uint64 Len uint32 Snaplen uint32 Mac uint16 Net uint16 Sec uint32 Usec uint32 _ [4]byte } type Tpacket2Hdr struct { Status uint32 Len uint32 Snaplen uint32 Mac uint16 Net uint16 Sec uint32 Nsec uint32 Vlan_tci uint16 Vlan_tpid uint16 _ [4]uint8 } type Tpacket3Hdr struct { Next_offset uint32 Sec uint32 Nsec uint32 Snaplen uint32 Len uint32 Status uint32 Mac uint16 Net uint16 Hv1 TpacketHdrVariant1 _ [8]uint8 } type TpacketHdrVariant1 struct { Rxhash uint32 Vlan_tci uint32 Vlan_tpid uint16 _ uint16 } type TpacketBlockDesc struct { Version uint32 To_priv uint32 Hdr [40]byte } type TpacketBDTS struct { Sec uint32 Usec uint32 } type TpacketHdrV1 struct { Block_status uint32 Num_pkts uint32 Offset_to_first_pkt uint32 Blk_len uint32 Seq_num uint64 Ts_first_pkt TpacketBDTS Ts_last_pkt TpacketBDTS } type TpacketReq struct { Block_size uint32 Block_nr uint32 Frame_size uint32 Frame_nr uint32 } type TpacketReq3 struct { Block_size uint32 Block_nr uint32 Frame_size uint32 Frame_nr uint32 Retire_blk_tov uint32 Sizeof_priv uint32 Feature_req_word uint32 } type TpacketStats struct { Packets uint32 Drops uint32 } type TpacketStatsV3 struct { Packets uint32 Drops uint32 Freeze_q_cnt uint32 } type TpacketAuxdata struct { Status uint32 Len uint32 Snaplen uint32 Mac uint16 Net uint16 Vlan_tci uint16 Vlan_tpid uint16 } const ( TPACKET_V1 = 0x0 TPACKET_V2 = 0x1 TPACKET_V3 = 0x2 ) const ( SizeofTpacketHdr = 0x20 SizeofTpacket2Hdr = 0x20 SizeofTpacket3Hdr = 0x30 SizeofTpacketStats = 0x8 SizeofTpacketStatsV3 = 0xc ) const ( NF_INET_PRE_ROUTING = 0x0 NF_INET_LOCAL_IN = 0x1 NF_INET_FORWARD = 0x2 NF_INET_LOCAL_OUT = 0x3 NF_INET_POST_ROUTING = 0x4 NF_INET_NUMHOOKS = 0x5 ) const ( NF_NETDEV_INGRESS = 0x0 NF_NETDEV_NUMHOOKS = 0x1 ) const ( NFPROTO_UNSPEC = 0x0 NFPROTO_INET = 0x1 NFPROTO_IPV4 = 0x2 NFPROTO_ARP = 0x3 NFPROTO_NETDEV = 0x5 NFPROTO_BRIDGE = 0x7 NFPROTO_IPV6 = 0xa NFPROTO_DECNET = 0xc NFPROTO_NUMPROTO = 0xd ) type Nfgenmsg struct { Nfgen_family uint8 Version uint8 Res_id uint16 } const ( NFNL_BATCH_UNSPEC = 0x0 NFNL_BATCH_GENID = 0x1 ) const ( NFT_REG_VERDICT = 0x0 NFT_REG_1 = 0x1 NFT_REG_2 = 0x2 NFT_REG_3 = 0x3 NFT_REG_4 = 0x4 NFT_REG32_00 = 0x8 NFT_REG32_01 = 0x9 NFT_REG32_02 = 0xa NFT_REG32_03 = 0xb NFT_REG32_04 = 0xc NFT_REG32_05 = 0xd NFT_REG32_06 = 0xe NFT_REG32_07 = 0xf NFT_REG32_08 = 0x10 NFT_REG32_09 = 0x11 NFT_REG32_10 = 0x12 NFT_REG32_11 = 0x13 NFT_REG32_12 = 0x14 NFT_REG32_13 = 0x15 NFT_REG32_14 = 0x16 NFT_REG32_15 = 0x17 NFT_CONTINUE = -0x1 NFT_BREAK = -0x2 NFT_JUMP = -0x3 NFT_GOTO = -0x4 NFT_RETURN = -0x5 NFT_MSG_NEWTABLE = 0x0 NFT_MSG_GETTABLE = 0x1 NFT_MSG_DELTABLE = 0x2 NFT_MSG_NEWCHAIN = 0x3 NFT_MSG_GETCHAIN = 0x4 NFT_MSG_DELCHAIN = 0x5 NFT_MSG_NEWRULE = 0x6 NFT_MSG_GETRULE = 0x7 NFT_MSG_DELRULE = 0x8 NFT_MSG_NEWSET = 0x9 NFT_MSG_GETSET = 0xa NFT_MSG_DELSET = 0xb NFT_MSG_NEWSETELEM = 0xc NFT_MSG_GETSETELEM = 0xd NFT_MSG_DELSETELEM = 0xe NFT_MSG_NEWGEN = 0xf NFT_MSG_GETGEN = 0x10 NFT_MSG_TRACE = 0x11 NFT_MSG_NEWOBJ = 0x12 NFT_MSG_GETOBJ = 0x13 NFT_MSG_DELOBJ = 0x14 NFT_MSG_GETOBJ_RESET = 0x15 NFT_MSG_MAX = 0x19 NFTA_LIST_UNPEC = 0x0 NFTA_LIST_ELEM = 0x1 NFTA_HOOK_UNSPEC = 0x0 NFTA_HOOK_HOOKNUM = 0x1 NFTA_HOOK_PRIORITY = 0x2 NFTA_HOOK_DEV = 0x3 NFT_TABLE_F_DORMANT = 0x1 NFTA_TABLE_UNSPEC = 0x0 NFTA_TABLE_NAME = 0x1 NFTA_TABLE_FLAGS = 0x2 NFTA_TABLE_USE = 0x3 NFTA_CHAIN_UNSPEC = 0x0 NFTA_CHAIN_TABLE = 0x1 NFTA_CHAIN_HANDLE = 0x2 NFTA_CHAIN_NAME = 0x3 NFTA_CHAIN_HOOK = 0x4 NFTA_CHAIN_POLICY = 0x5 NFTA_CHAIN_USE = 0x6 NFTA_CHAIN_TYPE = 0x7 NFTA_CHAIN_COUNTERS = 0x8 NFTA_CHAIN_PAD = 0x9 NFTA_RULE_UNSPEC = 0x0 NFTA_RULE_TABLE = 0x1 NFTA_RULE_CHAIN = 0x2 NFTA_RULE_HANDLE = 0x3 NFTA_RULE_EXPRESSIONS = 0x4 NFTA_RULE_COMPAT = 0x5 NFTA_RULE_POSITION = 0x6 NFTA_RULE_USERDATA = 0x7 NFTA_RULE_PAD = 0x8 NFTA_RULE_ID = 0x9 NFT_RULE_COMPAT_F_INV = 0x2 NFT_RULE_COMPAT_F_MASK = 0x2 NFTA_RULE_COMPAT_UNSPEC = 0x0 NFTA_RULE_COMPAT_PROTO = 0x1 NFTA_RULE_COMPAT_FLAGS = 0x2 NFT_SET_ANONYMOUS = 0x1 NFT_SET_CONSTANT = 0x2 NFT_SET_INTERVAL = 0x4 NFT_SET_MAP = 0x8 NFT_SET_TIMEOUT = 0x10 NFT_SET_EVAL = 0x20 NFT_SET_OBJECT = 0x40 NFT_SET_POL_PERFORMANCE = 0x0 NFT_SET_POL_MEMORY = 0x1 NFTA_SET_DESC_UNSPEC = 0x0 NFTA_SET_DESC_SIZE = 0x1 NFTA_SET_UNSPEC = 0x0 NFTA_SET_TABLE = 0x1 NFTA_SET_NAME = 0x2 NFTA_SET_FLAGS = 0x3 NFTA_SET_KEY_TYPE = 0x4 NFTA_SET_KEY_LEN = 0x5 NFTA_SET_DATA_TYPE = 0x6 NFTA_SET_DATA_LEN = 0x7 NFTA_SET_POLICY = 0x8 NFTA_SET_DESC = 0x9 NFTA_SET_ID = 0xa NFTA_SET_TIMEOUT = 0xb NFTA_SET_GC_INTERVAL = 0xc NFTA_SET_USERDATA = 0xd NFTA_SET_PAD = 0xe NFTA_SET_OBJ_TYPE = 0xf NFT_SET_ELEM_INTERVAL_END = 0x1 NFTA_SET_ELEM_UNSPEC = 0x0 NFTA_SET_ELEM_KEY = 0x1 NFTA_SET_ELEM_DATA = 0x2 NFTA_SET_ELEM_FLAGS = 0x3 NFTA_SET_ELEM_TIMEOUT = 0x4 NFTA_SET_ELEM_EXPIRATION = 0x5 NFTA_SET_ELEM_USERDATA = 0x6 NFTA_SET_ELEM_EXPR = 0x7 NFTA_SET_ELEM_PAD = 0x8 NFTA_SET_ELEM_OBJREF = 0x9 NFTA_SET_ELEM_LIST_UNSPEC = 0x0 NFTA_SET_ELEM_LIST_TABLE = 0x1 NFTA_SET_ELEM_LIST_SET = 0x2 NFTA_SET_ELEM_LIST_ELEMENTS = 0x3 NFTA_SET_ELEM_LIST_SET_ID = 0x4 NFT_DATA_VALUE = 0x0 NFT_DATA_VERDICT = 0xffffff00 NFTA_DATA_UNSPEC = 0x0 NFTA_DATA_VALUE = 0x1 NFTA_DATA_VERDICT = 0x2 NFTA_VERDICT_UNSPEC = 0x0 NFTA_VERDICT_CODE = 0x1 NFTA_VERDICT_CHAIN = 0x2 NFTA_EXPR_UNSPEC = 0x0 NFTA_EXPR_NAME = 0x1 NFTA_EXPR_DATA = 0x2 NFTA_IMMEDIATE_UNSPEC = 0x0 NFTA_IMMEDIATE_DREG = 0x1 NFTA_IMMEDIATE_DATA = 0x2 NFTA_BITWISE_UNSPEC = 0x0 NFTA_BITWISE_SREG = 0x1 NFTA_BITWISE_DREG = 0x2 NFTA_BITWISE_LEN = 0x3 NFTA_BITWISE_MASK = 0x4 NFTA_BITWISE_XOR = 0x5 NFT_BYTEORDER_NTOH = 0x0 NFT_BYTEORDER_HTON = 0x1 NFTA_BYTEORDER_UNSPEC = 0x0 NFTA_BYTEORDER_SREG = 0x1 NFTA_BYTEORDER_DREG = 0x2 NFTA_BYTEORDER_OP = 0x3 NFTA_BYTEORDER_LEN = 0x4 NFTA_BYTEORDER_SIZE = 0x5 NFT_CMP_EQ = 0x0 NFT_CMP_NEQ = 0x1 NFT_CMP_LT = 0x2 NFT_CMP_LTE = 0x3 NFT_CMP_GT = 0x4 NFT_CMP_GTE = 0x5 NFTA_CMP_UNSPEC = 0x0 NFTA_CMP_SREG = 0x1 NFTA_CMP_OP = 0x2 NFTA_CMP_DATA = 0x3 NFT_RANGE_EQ = 0x0 NFT_RANGE_NEQ = 0x1 NFTA_RANGE_UNSPEC = 0x0 NFTA_RANGE_SREG = 0x1 NFTA_RANGE_OP = 0x2 NFTA_RANGE_FROM_DATA = 0x3 NFTA_RANGE_TO_DATA = 0x4 NFT_LOOKUP_F_INV = 0x1 NFTA_LOOKUP_UNSPEC = 0x0 NFTA_LOOKUP_SET = 0x1 NFTA_LOOKUP_SREG = 0x2 NFTA_LOOKUP_DREG = 0x3 NFTA_LOOKUP_SET_ID = 0x4 NFTA_LOOKUP_FLAGS = 0x5 NFT_DYNSET_OP_ADD = 0x0 NFT_DYNSET_OP_UPDATE = 0x1 NFT_DYNSET_F_INV = 0x1 NFTA_DYNSET_UNSPEC = 0x0 NFTA_DYNSET_SET_NAME = 0x1 NFTA_DYNSET_SET_ID = 0x2 NFTA_DYNSET_OP = 0x3 NFTA_DYNSET_SREG_KEY = 0x4 NFTA_DYNSET_SREG_DATA = 0x5 NFTA_DYNSET_TIMEOUT = 0x6 NFTA_DYNSET_EXPR = 0x7 NFTA_DYNSET_PAD = 0x8 NFTA_DYNSET_FLAGS = 0x9 NFT_PAYLOAD_LL_HEADER = 0x0 NFT_PAYLOAD_NETWORK_HEADER = 0x1 NFT_PAYLOAD_TRANSPORT_HEADER = 0x2 NFT_PAYLOAD_CSUM_NONE = 0x0 NFT_PAYLOAD_CSUM_INET = 0x1 NFT_PAYLOAD_L4CSUM_PSEUDOHDR = 0x1 NFTA_PAYLOAD_UNSPEC = 0x0 NFTA_PAYLOAD_DREG = 0x1 NFTA_PAYLOAD_BASE = 0x2 NFTA_PAYLOAD_OFFSET = 0x3 NFTA_PAYLOAD_LEN = 0x4 NFTA_PAYLOAD_SREG = 0x5 NFTA_PAYLOAD_CSUM_TYPE = 0x6 NFTA_PAYLOAD_CSUM_OFFSET = 0x7 NFTA_PAYLOAD_CSUM_FLAGS = 0x8 NFT_EXTHDR_F_PRESENT = 0x1 NFT_EXTHDR_OP_IPV6 = 0x0 NFT_EXTHDR_OP_TCPOPT = 0x1 NFTA_EXTHDR_UNSPEC = 0x0 NFTA_EXTHDR_DREG = 0x1 NFTA_EXTHDR_TYPE = 0x2 NFTA_EXTHDR_OFFSET = 0x3 NFTA_EXTHDR_LEN = 0x4 NFTA_EXTHDR_FLAGS = 0x5 NFTA_EXTHDR_OP = 0x6 NFTA_EXTHDR_SREG = 0x7 NFT_META_LEN = 0x0 NFT_META_PROTOCOL = 0x1 NFT_META_PRIORITY = 0x2 NFT_META_MARK = 0x3 NFT_META_IIF = 0x4 NFT_META_OIF = 0x5 NFT_META_IIFNAME = 0x6 NFT_META_OIFNAME = 0x7 NFT_META_IIFTYPE = 0x8 NFT_META_OIFTYPE = 0x9 NFT_META_SKUID = 0xa NFT_META_SKGID = 0xb NFT_META_NFTRACE = 0xc NFT_META_RTCLASSID = 0xd NFT_META_SECMARK = 0xe NFT_META_NFPROTO = 0xf NFT_META_L4PROTO = 0x10 NFT_META_BRI_IIFNAME = 0x11 NFT_META_BRI_OIFNAME = 0x12 NFT_META_PKTTYPE = 0x13 NFT_META_CPU = 0x14 NFT_META_IIFGROUP = 0x15 NFT_META_OIFGROUP = 0x16 NFT_META_CGROUP = 0x17 NFT_META_PRANDOM = 0x18 NFT_RT_CLASSID = 0x0 NFT_RT_NEXTHOP4 = 0x1 NFT_RT_NEXTHOP6 = 0x2 NFT_RT_TCPMSS = 0x3 NFT_HASH_JENKINS = 0x0 NFT_HASH_SYM = 0x1 NFTA_HASH_UNSPEC = 0x0 NFTA_HASH_SREG = 0x1 NFTA_HASH_DREG = 0x2 NFTA_HASH_LEN = 0x3 NFTA_HASH_MODULUS = 0x4 NFTA_HASH_SEED = 0x5 NFTA_HASH_OFFSET = 0x6 NFTA_HASH_TYPE = 0x7 NFTA_META_UNSPEC = 0x0 NFTA_META_DREG = 0x1 NFTA_META_KEY = 0x2 NFTA_META_SREG = 0x3 NFTA_RT_UNSPEC = 0x0 NFTA_RT_DREG = 0x1 NFTA_RT_KEY = 0x2 NFT_CT_STATE = 0x0 NFT_CT_DIRECTION = 0x1 NFT_CT_STATUS = 0x2 NFT_CT_MARK = 0x3 NFT_CT_SECMARK = 0x4 NFT_CT_EXPIRATION = 0x5 NFT_CT_HELPER = 0x6 NFT_CT_L3PROTOCOL = 0x7 NFT_CT_SRC = 0x8 NFT_CT_DST = 0x9 NFT_CT_PROTOCOL = 0xa NFT_CT_PROTO_SRC = 0xb NFT_CT_PROTO_DST = 0xc NFT_CT_LABELS = 0xd NFT_CT_PKTS = 0xe NFT_CT_BYTES = 0xf NFT_CT_AVGPKT = 0x10 NFT_CT_ZONE = 0x11 NFT_CT_EVENTMASK = 0x12 NFTA_CT_UNSPEC = 0x0 NFTA_CT_DREG = 0x1 NFTA_CT_KEY = 0x2 NFTA_CT_DIRECTION = 0x3 NFTA_CT_SREG = 0x4 NFT_LIMIT_PKTS = 0x0 NFT_LIMIT_PKT_BYTES = 0x1 NFT_LIMIT_F_INV = 0x1 NFTA_LIMIT_UNSPEC = 0x0 NFTA_LIMIT_RATE = 0x1 NFTA_LIMIT_UNIT = 0x2 NFTA_LIMIT_BURST = 0x3 NFTA_LIMIT_TYPE = 0x4 NFTA_LIMIT_FLAGS = 0x5 NFTA_LIMIT_PAD = 0x6 NFTA_COUNTER_UNSPEC = 0x0 NFTA_COUNTER_BYTES = 0x1 NFTA_COUNTER_PACKETS = 0x2 NFTA_COUNTER_PAD = 0x3 NFTA_LOG_UNSPEC = 0x0 NFTA_LOG_GROUP = 0x1 NFTA_LOG_PREFIX = 0x2 NFTA_LOG_SNAPLEN = 0x3 NFTA_LOG_QTHRESHOLD = 0x4 NFTA_LOG_LEVEL = 0x5 NFTA_LOG_FLAGS = 0x6 NFTA_QUEUE_UNSPEC = 0x0 NFTA_QUEUE_NUM = 0x1 NFTA_QUEUE_TOTAL = 0x2 NFTA_QUEUE_FLAGS = 0x3 NFTA_QUEUE_SREG_QNUM = 0x4 NFT_QUOTA_F_INV = 0x1 NFT_QUOTA_F_DEPLETED = 0x2 NFTA_QUOTA_UNSPEC = 0x0 NFTA_QUOTA_BYTES = 0x1 NFTA_QUOTA_FLAGS = 0x2 NFTA_QUOTA_PAD = 0x3 NFTA_QUOTA_CONSUMED = 0x4 NFT_REJECT_ICMP_UNREACH = 0x0 NFT_REJECT_TCP_RST = 0x1 NFT_REJECT_ICMPX_UNREACH = 0x2 NFT_REJECT_ICMPX_NO_ROUTE = 0x0 NFT_REJECT_ICMPX_PORT_UNREACH = 0x1 NFT_REJECT_ICMPX_HOST_UNREACH = 0x2 NFT_REJECT_ICMPX_ADMIN_PROHIBITED = 0x3 NFTA_REJECT_UNSPEC = 0x0 NFTA_REJECT_TYPE = 0x1 NFTA_REJECT_ICMP_CODE = 0x2 NFT_NAT_SNAT = 0x0 NFT_NAT_DNAT = 0x1 NFTA_NAT_UNSPEC = 0x0 NFTA_NAT_TYPE = 0x1 NFTA_NAT_FAMILY = 0x2 NFTA_NAT_REG_ADDR_MIN = 0x3 NFTA_NAT_REG_ADDR_MAX = 0x4 NFTA_NAT_REG_PROTO_MIN = 0x5 NFTA_NAT_REG_PROTO_MAX = 0x6 NFTA_NAT_FLAGS = 0x7 NFTA_MASQ_UNSPEC = 0x0 NFTA_MASQ_FLAGS = 0x1 NFTA_MASQ_REG_PROTO_MIN = 0x2 NFTA_MASQ_REG_PROTO_MAX = 0x3 NFTA_REDIR_UNSPEC = 0x0 NFTA_REDIR_REG_PROTO_MIN = 0x1 NFTA_REDIR_REG_PROTO_MAX = 0x2 NFTA_REDIR_FLAGS = 0x3 NFTA_DUP_UNSPEC = 0x0 NFTA_DUP_SREG_ADDR = 0x1 NFTA_DUP_SREG_DEV = 0x2 NFTA_FWD_UNSPEC = 0x0 NFTA_FWD_SREG_DEV = 0x1 NFTA_OBJREF_UNSPEC = 0x0 NFTA_OBJREF_IMM_TYPE = 0x1 NFTA_OBJREF_IMM_NAME = 0x2 NFTA_OBJREF_SET_SREG = 0x3 NFTA_OBJREF_SET_NAME = 0x4 NFTA_OBJREF_SET_ID = 0x5 NFTA_GEN_UNSPEC = 0x0 NFTA_GEN_ID = 0x1 NFTA_GEN_PROC_PID = 0x2 NFTA_GEN_PROC_NAME = 0x3 NFTA_FIB_UNSPEC = 0x0 NFTA_FIB_DREG = 0x1 NFTA_FIB_RESULT = 0x2 NFTA_FIB_FLAGS = 0x3 NFT_FIB_RESULT_UNSPEC = 0x0 NFT_FIB_RESULT_OIF = 0x1 NFT_FIB_RESULT_OIFNAME = 0x2 NFT_FIB_RESULT_ADDRTYPE = 0x3 NFTA_FIB_F_SADDR = 0x1 NFTA_FIB_F_DADDR = 0x2 NFTA_FIB_F_MARK = 0x4 NFTA_FIB_F_IIF = 0x8 NFTA_FIB_F_OIF = 0x10 NFTA_FIB_F_PRESENT = 0x20 NFTA_CT_HELPER_UNSPEC = 0x0 NFTA_CT_HELPER_NAME = 0x1 NFTA_CT_HELPER_L3PROTO = 0x2 NFTA_CT_HELPER_L4PROTO = 0x3 NFTA_OBJ_UNSPEC = 0x0 NFTA_OBJ_TABLE = 0x1 NFTA_OBJ_NAME = 0x2 NFTA_OBJ_TYPE = 0x3 NFTA_OBJ_DATA = 0x4 NFTA_OBJ_USE = 0x5 NFTA_TRACE_UNSPEC = 0x0 NFTA_TRACE_TABLE = 0x1 NFTA_TRACE_CHAIN = 0x2 NFTA_TRACE_RULE_HANDLE = 0x3 NFTA_TRACE_TYPE = 0x4 NFTA_TRACE_VERDICT = 0x5 NFTA_TRACE_ID = 0x6 NFTA_TRACE_LL_HEADER = 0x7 NFTA_TRACE_NETWORK_HEADER = 0x8 NFTA_TRACE_TRANSPORT_HEADER = 0x9 NFTA_TRACE_IIF = 0xa NFTA_TRACE_IIFTYPE = 0xb NFTA_TRACE_OIF = 0xc NFTA_TRACE_OIFTYPE = 0xd NFTA_TRACE_MARK = 0xe NFTA_TRACE_NFPROTO = 0xf NFTA_TRACE_POLICY = 0x10 NFTA_TRACE_PAD = 0x11 NFT_TRACETYPE_UNSPEC = 0x0 NFT_TRACETYPE_POLICY = 0x1 NFT_TRACETYPE_RETURN = 0x2 NFT_TRACETYPE_RULE = 0x3 NFTA_NG_UNSPEC = 0x0 NFTA_NG_DREG = 0x1 NFTA_NG_MODULUS = 0x2 NFTA_NG_TYPE = 0x3 NFTA_NG_OFFSET = 0x4 NFT_NG_INCREMENTAL = 0x0 NFT_NG_RANDOM = 0x1 ) type RTCTime struct { Sec int32 Min int32 Hour int32 Mday int32 Mon int32 Year int32 Wday int32 Yday int32 Isdst int32 } type RTCWkAlrm struct { Enabled uint8 Pending uint8 Time RTCTime } type RTCPLLInfo struct { Ctrl int32 Value int32 Max int32 Min int32 Posmult int32 Negmult int32 Clock int64 } type BlkpgIoctlArg struct { Op int32 Flags int32 Datalen int32 Data *byte } type BlkpgPartition struct { Start int64 Length int64 Pno int32 Devname [64]uint8 Volname [64]uint8 _ [4]byte } const ( BLKPG = 0x20001269 BLKPG_ADD_PARTITION = 0x1 BLKPG_DEL_PARTITION = 0x2 BLKPG_RESIZE_PARTITION = 0x3 ) const ( NETNSA_NONE = 0x0 NETNSA_NSID = 0x1 NETNSA_PID = 0x2 NETNSA_FD = 0x3 ) type XDPRingOffset struct { Producer uint64 Consumer uint64 Desc uint64 } type XDPMmapOffsets struct { Rx XDPRingOffset Tx XDPRingOffset Fr XDPRingOffset Cr XDPRingOffset } type XDPUmemReg struct { Addr uint64 Len uint64 Size uint32 Headroom uint32 } type XDPStatistics struct { Rx_dropped uint64 Rx_invalid_descs uint64 Tx_invalid_descs uint64 } type XDPDesc struct { Addr uint64 Len uint32 Options uint32 } const ( NCSI_CMD_UNSPEC = 0x0 NCSI_CMD_PKG_INFO = 0x1 NCSI_CMD_SET_INTERFACE = 0x2 NCSI_CMD_CLEAR_INTERFACE = 0x3 NCSI_ATTR_UNSPEC = 0x0 NCSI_ATTR_IFINDEX = 0x1 NCSI_ATTR_PACKAGE_LIST = 0x2 NCSI_ATTR_PACKAGE_ID = 0x3 NCSI_ATTR_CHANNEL_ID = 0x4 NCSI_PKG_ATTR_UNSPEC = 0x0 NCSI_PKG_ATTR = 0x1 NCSI_PKG_ATTR_ID = 0x2 NCSI_PKG_ATTR_FORCED = 0x3 NCSI_PKG_ATTR_CHANNEL_LIST = 0x4 NCSI_CHANNEL_ATTR_UNSPEC = 0x0 NCSI_CHANNEL_ATTR = 0x1 NCSI_CHANNEL_ATTR_ID = 0x2 NCSI_CHANNEL_ATTR_VERSION_MAJOR = 0x3 NCSI_CHANNEL_ATTR_VERSION_MINOR = 0x4 NCSI_CHANNEL_ATTR_VERSION_STR = 0x5 NCSI_CHANNEL_ATTR_LINK_STATE = 0x6 NCSI_CHANNEL_ATTR_ACTIVE = 0x7 NCSI_CHANNEL_ATTR_FORCED = 0x8 NCSI_CHANNEL_ATTR_VLAN_LIST = 0x9 NCSI_CHANNEL_ATTR_VLAN_ID = 0xa ) type ScmTimestamping struct { Ts [3]Timespec } const ( SOF_TIMESTAMPING_TX_HARDWARE = 0x1 SOF_TIMESTAMPING_TX_SOFTWARE = 0x2 SOF_TIMESTAMPING_RX_HARDWARE = 0x4 SOF_TIMESTAMPING_RX_SOFTWARE = 0x8 SOF_TIMESTAMPING_SOFTWARE = 0x10 SOF_TIMESTAMPING_SYS_HARDWARE = 0x20 SOF_TIMESTAMPING_RAW_HARDWARE = 0x40 SOF_TIMESTAMPING_OPT_ID = 0x80 SOF_TIMESTAMPING_TX_SCHED = 0x100 SOF_TIMESTAMPING_TX_ACK = 0x200 SOF_TIMESTAMPING_OPT_CMSG = 0x400 SOF_TIMESTAMPING_OPT_TSONLY = 0x800 SOF_TIMESTAMPING_OPT_STATS = 0x1000 SOF_TIMESTAMPING_OPT_PKTINFO = 0x2000 SOF_TIMESTAMPING_OPT_TX_SWHW = 0x4000 SOF_TIMESTAMPING_LAST = 0x4000 SOF_TIMESTAMPING_MASK = 0x7fff SCM_TSTAMP_SND = 0x0 SCM_TSTAMP_SCHED = 0x1 SCM_TSTAMP_ACK = 0x2 ) type SockExtendedErr struct { Errno uint32 Origin uint8 Type uint8 Code uint8 Pad uint8 Info uint32 Data uint32 } type FanotifyEventMetadata struct { Event_len uint32 Vers uint8 Reserved uint8 Metadata_len uint16 Mask uint64 Fd int32 Pid int32 } type FanotifyResponse struct { Fd int32 Response uint32 } const ( CRYPTO_MSG_BASE = 0x10 CRYPTO_MSG_NEWALG = 0x10 CRYPTO_MSG_DELALG = 0x11 CRYPTO_MSG_UPDATEALG = 0x12 CRYPTO_MSG_GETALG = 0x13 CRYPTO_MSG_DELRNG = 0x14 CRYPTO_MSG_GETSTAT = 0x15 ) const ( CRYPTOCFGA_UNSPEC = 0x0 CRYPTOCFGA_PRIORITY_VAL = 0x1 CRYPTOCFGA_REPORT_LARVAL = 0x2 CRYPTOCFGA_REPORT_HASH = 0x3 CRYPTOCFGA_REPORT_BLKCIPHER = 0x4 CRYPTOCFGA_REPORT_AEAD = 0x5 CRYPTOCFGA_REPORT_COMPRESS = 0x6 CRYPTOCFGA_REPORT_RNG = 0x7 CRYPTOCFGA_REPORT_CIPHER = 0x8 CRYPTOCFGA_REPORT_AKCIPHER = 0x9 CRYPTOCFGA_REPORT_KPP = 0xa CRYPTOCFGA_REPORT_ACOMP = 0xb CRYPTOCFGA_STAT_LARVAL = 0xc CRYPTOCFGA_STAT_HASH = 0xd CRYPTOCFGA_STAT_BLKCIPHER = 0xe CRYPTOCFGA_STAT_AEAD = 0xf CRYPTOCFGA_STAT_COMPRESS = 0x10 CRYPTOCFGA_STAT_RNG = 0x11 CRYPTOCFGA_STAT_CIPHER = 0x12 CRYPTOCFGA_STAT_AKCIPHER = 0x13 CRYPTOCFGA_STAT_KPP = 0x14 CRYPTOCFGA_STAT_ACOMP = 0x15 ) type CryptoUserAlg struct { Name [64]int8 Driver_name [64]int8 Module_name [64]int8 Type uint32 Mask uint32 Refcnt uint32 Flags uint32 } type CryptoStatAEAD struct { Type [64]int8 Encrypt_cnt uint64 Encrypt_tlen uint64 Decrypt_cnt uint64 Decrypt_tlen uint64 Err_cnt uint64 } type CryptoStatAKCipher struct { Type [64]int8 Encrypt_cnt uint64 Encrypt_tlen uint64 Decrypt_cnt uint64 Decrypt_tlen uint64 Verify_cnt uint64 Sign_cnt uint64 Err_cnt uint64 } type CryptoStatCipher struct { Type [64]int8 Encrypt_cnt uint64 Encrypt_tlen uint64 Decrypt_cnt uint64 Decrypt_tlen uint64 Err_cnt uint64 } type CryptoStatCompress struct { Type [64]int8 Compress_cnt uint64 Compress_tlen uint64 Decompress_cnt uint64 Decompress_tlen uint64 Err_cnt uint64 } type CryptoStatHash struct { Type [64]int8 Hash_cnt uint64 Hash_tlen uint64 Err_cnt uint64 } type CryptoStatKPP struct { Type [64]int8 Setsecret_cnt uint64 Generate_public_key_cnt uint64 Compute_shared_secret_cnt uint64 Err_cnt uint64 } type CryptoStatRNG struct { Type [64]int8 Generate_cnt uint64 Generate_tlen uint64 Seed_cnt uint64 Err_cnt uint64 } type CryptoStatLarval struct { Type [64]int8 } type CryptoReportLarval struct { Type [64]int8 } type CryptoReportHash struct { Type [64]int8 Blocksize uint32 Digestsize uint32 } type CryptoReportCipher struct { Type [64]int8 Blocksize uint32 Min_keysize uint32 Max_keysize uint32 } type CryptoReportBlkCipher struct { Type [64]int8 Geniv [64]int8 Blocksize uint32 Min_keysize uint32 Max_keysize uint32 Ivsize uint32 } type CryptoReportAEAD struct { Type [64]int8 Geniv [64]int8 Blocksize uint32 Maxauthsize uint32 Ivsize uint32 } type CryptoReportComp struct { Type [64]int8 } type CryptoReportRNG struct { Type [64]int8 Seedsize uint32 } type CryptoReportAKCipher struct { Type [64]int8 } type CryptoReportKPP struct { Type [64]int8 } type CryptoReportAcomp struct { Type [64]int8 } const ( BPF_REG_0 = 0x0 BPF_REG_1 = 0x1 BPF_REG_2 = 0x2 BPF_REG_3 = 0x3 BPF_REG_4 = 0x4 BPF_REG_5 = 0x5 BPF_REG_6 = 0x6 BPF_REG_7 = 0x7 BPF_REG_8 = 0x8 BPF_REG_9 = 0x9 BPF_REG_10 = 0xa BPF_MAP_CREATE = 0x0 BPF_MAP_LOOKUP_ELEM = 0x1 BPF_MAP_UPDATE_ELEM = 0x2 BPF_MAP_DELETE_ELEM = 0x3 BPF_MAP_GET_NEXT_KEY = 0x4 BPF_PROG_LOAD = 0x5 BPF_OBJ_PIN = 0x6 BPF_OBJ_GET = 0x7 BPF_PROG_ATTACH = 0x8 BPF_PROG_DETACH = 0x9 BPF_PROG_TEST_RUN = 0xa BPF_PROG_GET_NEXT_ID = 0xb BPF_MAP_GET_NEXT_ID = 0xc BPF_PROG_GET_FD_BY_ID = 0xd BPF_MAP_GET_FD_BY_ID = 0xe BPF_OBJ_GET_INFO_BY_FD = 0xf BPF_PROG_QUERY = 0x10 BPF_RAW_TRACEPOINT_OPEN = 0x11 BPF_BTF_LOAD = 0x12 BPF_BTF_GET_FD_BY_ID = 0x13 BPF_TASK_FD_QUERY = 0x14 BPF_MAP_LOOKUP_AND_DELETE_ELEM = 0x15 BPF_MAP_TYPE_UNSPEC = 0x0 BPF_MAP_TYPE_HASH = 0x1 BPF_MAP_TYPE_ARRAY = 0x2 BPF_MAP_TYPE_PROG_ARRAY = 0x3 BPF_MAP_TYPE_PERF_EVENT_ARRAY = 0x4 BPF_MAP_TYPE_PERCPU_HASH = 0x5 BPF_MAP_TYPE_PERCPU_ARRAY = 0x6 BPF_MAP_TYPE_STACK_TRACE = 0x7 BPF_MAP_TYPE_CGROUP_ARRAY = 0x8 BPF_MAP_TYPE_LRU_HASH = 0x9 BPF_MAP_TYPE_LRU_PERCPU_HASH = 0xa BPF_MAP_TYPE_LPM_TRIE = 0xb BPF_MAP_TYPE_ARRAY_OF_MAPS = 0xc BPF_MAP_TYPE_HASH_OF_MAPS = 0xd BPF_MAP_TYPE_DEVMAP = 0xe BPF_MAP_TYPE_SOCKMAP = 0xf BPF_MAP_TYPE_CPUMAP = 0x10 BPF_MAP_TYPE_XSKMAP = 0x11 BPF_MAP_TYPE_SOCKHASH = 0x12 BPF_MAP_TYPE_CGROUP_STORAGE = 0x13 BPF_MAP_TYPE_REUSEPORT_SOCKARRAY = 0x14 BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE = 0x15 BPF_MAP_TYPE_QUEUE = 0x16 BPF_MAP_TYPE_STACK = 0x17 BPF_PROG_TYPE_UNSPEC = 0x0 BPF_PROG_TYPE_SOCKET_FILTER = 0x1 BPF_PROG_TYPE_KPROBE = 0x2 BPF_PROG_TYPE_SCHED_CLS = 0x3 BPF_PROG_TYPE_SCHED_ACT = 0x4 BPF_PROG_TYPE_TRACEPOINT = 0x5 BPF_PROG_TYPE_XDP = 0x6 BPF_PROG_TYPE_PERF_EVENT = 0x7 BPF_PROG_TYPE_CGROUP_SKB = 0x8 BPF_PROG_TYPE_CGROUP_SOCK = 0x9 BPF_PROG_TYPE_LWT_IN = 0xa BPF_PROG_TYPE_LWT_OUT = 0xb BPF_PROG_TYPE_LWT_XMIT = 0xc BPF_PROG_TYPE_SOCK_OPS = 0xd BPF_PROG_TYPE_SK_SKB = 0xe BPF_PROG_TYPE_CGROUP_DEVICE = 0xf BPF_PROG_TYPE_SK_MSG = 0x10 BPF_PROG_TYPE_RAW_TRACEPOINT = 0x11 BPF_PROG_TYPE_CGROUP_SOCK_ADDR = 0x12 BPF_PROG_TYPE_LWT_SEG6LOCAL = 0x13 BPF_PROG_TYPE_LIRC_MODE2 = 0x14 BPF_PROG_TYPE_SK_REUSEPORT = 0x15 BPF_PROG_TYPE_FLOW_DISSECTOR = 0x16 BPF_CGROUP_INET_INGRESS = 0x0 BPF_CGROUP_INET_EGRESS = 0x1 BPF_CGROUP_INET_SOCK_CREATE = 0x2 BPF_CGROUP_SOCK_OPS = 0x3 BPF_SK_SKB_STREAM_PARSER = 0x4 BPF_SK_SKB_STREAM_VERDICT = 0x5 BPF_CGROUP_DEVICE = 0x6 BPF_SK_MSG_VERDICT = 0x7 BPF_CGROUP_INET4_BIND = 0x8 BPF_CGROUP_INET6_BIND = 0x9 BPF_CGROUP_INET4_CONNECT = 0xa BPF_CGROUP_INET6_CONNECT = 0xb BPF_CGROUP_INET4_POST_BIND = 0xc BPF_CGROUP_INET6_POST_BIND = 0xd BPF_CGROUP_UDP4_SENDMSG = 0xe BPF_CGROUP_UDP6_SENDMSG = 0xf BPF_LIRC_MODE2 = 0x10 BPF_FLOW_DISSECTOR = 0x11 BPF_STACK_BUILD_ID_EMPTY = 0x0 BPF_STACK_BUILD_ID_VALID = 0x1 BPF_STACK_BUILD_ID_IP = 0x2 BPF_ADJ_ROOM_NET = 0x0 BPF_HDR_START_MAC = 0x0 BPF_HDR_START_NET = 0x1 BPF_LWT_ENCAP_SEG6 = 0x0 BPF_LWT_ENCAP_SEG6_INLINE = 0x1 BPF_OK = 0x0 BPF_DROP = 0x2 BPF_REDIRECT = 0x7 BPF_SOCK_OPS_VOID = 0x0 BPF_SOCK_OPS_TIMEOUT_INIT = 0x1 BPF_SOCK_OPS_RWND_INIT = 0x2 BPF_SOCK_OPS_TCP_CONNECT_CB = 0x3 BPF_SOCK_OPS_ACTIVE_ESTABLISHED_CB = 0x4 BPF_SOCK_OPS_PASSIVE_ESTABLISHED_CB = 0x5 BPF_SOCK_OPS_NEEDS_ECN = 0x6 BPF_SOCK_OPS_BASE_RTT = 0x7 BPF_SOCK_OPS_RTO_CB = 0x8 BPF_SOCK_OPS_RETRANS_CB = 0x9 BPF_SOCK_OPS_STATE_CB = 0xa BPF_SOCK_OPS_TCP_LISTEN_CB = 0xb BPF_TCP_ESTABLISHED = 0x1 BPF_TCP_SYN_SENT = 0x2 BPF_TCP_SYN_RECV = 0x3 BPF_TCP_FIN_WAIT1 = 0x4 BPF_TCP_FIN_WAIT2 = 0x5 BPF_TCP_TIME_WAIT = 0x6 BPF_TCP_CLOSE = 0x7 BPF_TCP_CLOSE_WAIT = 0x8 BPF_TCP_LAST_ACK = 0x9 BPF_TCP_LISTEN = 0xa BPF_TCP_CLOSING = 0xb BPF_TCP_NEW_SYN_RECV = 0xc BPF_TCP_MAX_STATES = 0xd BPF_FIB_LKUP_RET_SUCCESS = 0x0 BPF_FIB_LKUP_RET_BLACKHOLE = 0x1 BPF_FIB_LKUP_RET_UNREACHABLE = 0x2 BPF_FIB_LKUP_RET_PROHIBIT = 0x3 BPF_FIB_LKUP_RET_NOT_FWDED = 0x4 BPF_FIB_LKUP_RET_FWD_DISABLED = 0x5 BPF_FIB_LKUP_RET_UNSUPP_LWT = 0x6 BPF_FIB_LKUP_RET_NO_NEIGH = 0x7 BPF_FIB_LKUP_RET_FRAG_NEEDED = 0x8 BPF_FD_TYPE_RAW_TRACEPOINT = 0x0 BPF_FD_TYPE_TRACEPOINT = 0x1 BPF_FD_TYPE_KPROBE = 0x2 BPF_FD_TYPE_KRETPROBE = 0x3 BPF_FD_TYPE_UPROBE = 0x4 BPF_FD_TYPE_URETPROBE = 0x5 ) type CapUserHeader struct { Version uint32 Pid int32 } type CapUserData struct { Effective uint32 Permitted uint32 Inheritable uint32 } const ( LINUX_CAPABILITY_VERSION_1 = 0x19980330 LINUX_CAPABILITY_VERSION_2 = 0x20071026 LINUX_CAPABILITY_VERSION_3 = 0x20080522 ) const ( LO_FLAGS_READ_ONLY = 0x1 LO_FLAGS_AUTOCLEAR = 0x4 LO_FLAGS_PARTSCAN = 0x8 LO_FLAGS_DIRECT_IO = 0x10 ) type LoopInfo struct { Number int32 Device uint32 Inode uint64 Rdevice uint32 Offset int32 Encrypt_type int32 Encrypt_key_size int32 Flags int32 Name [64]int8 Encrypt_key [32]uint8 Init [2]uint64 Reserved [4]int8 _ [4]byte } type LoopInfo64 struct { Device uint64 Inode uint64 Rdevice uint64 Offset uint64 Sizelimit uint64 Number uint32 Encrypt_type uint32 Encrypt_key_size uint32 Flags uint32 File_name [64]uint8 Crypt_name [64]uint8 Encrypt_key [32]uint8 Init [2]uint64 }
{ "pile_set_name": "Github" }
# cmr-indexer-app This is the indexer application for the CMR. It is responsible for indexing modified data into Elasticsearch. ### Index a concept curl -i -XPOST -H "Content-Type: application/json" http://localhost:3004 -d '{"concept-id": "C1234-PROV1", "revision-id": "1"}' ### Delete a concept curl -i -XDELETE -H "Content-Type: application/json" http://localhost:3004/C1234-PROV1/2 ### Delete a provider This will un-index all concepts within the given provider. curl -i -XDELETE http://localhost:3004/provider/PROV1?token=XXXX ## Administrative Tasks These tasks require an admin user token with the INGEST_MANAGEMENT_ACL with read or update permission. ### Reset elastic and cache *WARNING - this endpoint drops all data from the index.* Every CMR application has a reset function to reset it back to it's initial state. This will reset the indexes back to their initial state and also clear the cache. curl -i -XPOST http://localhost:3004/reset?token=XXXX ### Clear the cache cache curl -i -XPOST http://localhost:3004/caches/clear-cache?token=XXXX ### Querying caches Endpoints are provided for querying the contents of the various caches used by the application. The following curl will return the list of caches: curl -i http://localhost:3004/caches The following curl will return the keys for a specific cache: curl -i http://localhost:3004/caches/cache-name This curl will return the value for a specific key in the named cache: curl -i http://localhost:3004/caches/cache-name/cache-key ### Check application health This will report the current health of the application. It checks all resources and services used by the application and reports their healthes in the response body in JSON format. For resources, the report includes an "ok?" status and a "problem" field if the resource is not OK. For services, the report includes an overall "ok?" status for the service and health reports for each of its dependencies. It returns HTTP status code 200 when the application is healthy, which means all its interfacing resources and services are healthy; or HTTP status code 503 when one of the resources or services is not healthy. curl -i -XGET "http://localhost:3004/health" Example healthy response body: ``` { "elastic_search" : { "ok?" : true }, "echo" : { "ok?" : true }, "metadata-db" : { "ok?" : true, "dependencies" : { "oracle" : { "ok?" : true }, "echo" : { "ok?" : true } } }, "message-queue": { "ok?": true } } ``` Example un-healthy response body: ``` { "elastic_search" : { "ok?" : true }, "echo" : { "ok?" : true }, "metadata-db" : { "ok?" : false, "problem" : { "oracle" : { "ok?" : false, "problem" : "db-spec cmr.common.memory_db.connection.MemoryStore@aead584 is missing a required parameter" }, "echo" : { "ok?" : true } } }, "message-queue": { "ok?": true } } ``` ### Update the index set mappings By default, a comparison is run between the existing elasticsearch indexes and what is configured in index-set, and only apply the update when there is a difference between the two. User can override the default by passing in query parameter "force=true" and always update the elasticsearch indexes with the current configuration. curl -XPOST http://localhost:3004/update-indexes?token=XXXX ### Reindex collections in a provider curl -XPOST -H "Content-Type: application/json" http://localhost:3004/reindex-provider-collections?token=XXXX -d '["PROV1","PROV2"]' ### Reindex all tags curl -XPOST http://localhost:3004/reindex-tags?token=XXXX' ### Create index-set using json string ``` curl -i -H "Accept: application/json" -H "Content-type: application/json" -XPOST "http://localhost:3004/index-sets" -d "{\"index-set\":{\"name\":\"cmr-base-index-set\",\"create-reason\":\"include message about reasons for creating this index set\",\"granule\":{\"index-names\":[\"G2-PROV1\",\"G4-Prov3\",\"g5_prov5\"],\"mapping\":{\"granule\":{\"_all\":{\"enabled\":false},\"properties\":{\"collection-concept-id\":{\"store\":\"yes\",\"index_options\":\"docs\",\"norms\":\"false\",\"type\":\"string\",\"index\":\"not_analyzed\"},\"concept-id\":{\"store\":\"yes\",\"index_options\":\"docs\",\"norms\":\"false\",\"type\":\"string\",\"index\":\"not_analyzed\"}},\"dynamic\":\"strict\",\"_source\":{\"enabled\":false},\"_id\":{\"path\":\"concept-id\"}}},\"settings\":{\"index\":{\"number_of_replicas\":0,\"refresh_interval\":\"10s\",\"number_of_shards\":1}}},\"collection\":{\"index-names\":[\"C4-collections\",\"c6_Collections\"],\"mapping\":{\"collection\":{\"_all\":{\"enabled\":false},\"properties\":{\"entry-title\":{\"store\":\"yes\",\"index_options\":\"docs\",\"omit_norms\":\"true\",\"type\":\"string\",\"index\":\"not_analyzed\"},\"concept-id\":{\"store\":\"yes\",\"index_options\":\"docs\",\"omit_norms\":\"true\",\"type\":\"string\",\"index\":\"not_analyzed\"}},\"dynamic\":\"strict\",\"_source\":{\"enabled\":false},\"_id\":{\"path\":\"concept-id\"}}},\"settings\":{\"index\":{\"number_of_replicas\":0,\"refresh_interval\":\"20s\",\"number_of_shards\":1}}},\"id\":3}}" ``` ### Get index-set by id curl -XGET "http://localhost:3004/index-sets/3" ### Get all index-sets curl -XGET "http://localhost:3004/index-sets" ### Delete index-set by id curl -XDELETE "http://localhost:3004/index-sets/3" ### Mark a collection as rebalancing There are multiple granule indexes for performance. Larger collections are split out into their own indexes. Smaller collections are grouped in a small_collections index. When calling the endpoint the `target` query parameter is required, and it has two valid values, `separate-index` and `small-collections`. In either case the collection is added to the list of collections being rebalanced. If `target=separate-index` a new granule index is created in addition to updating the index-set. curl -XPOST http://localhost:3004/index-sets/3/rebalancing-collections/C5-PROV1/start?target=separate-index ### Finalize a rebalancing collection Finalizing a rebalancing collection removes the collection from the list of collections are are being rebalanced and updates the index-set appropriately based on what the target destination was set to on the call to start. curl -XPOST http://localhost:3004/index-sets/3/rebalancing-collections/C5-PROV1/finalize ### Update a rebalancing collection's status Make changes to the collection's rebalancing status. This will update a mapping of collection id to rebalancing status in the index-set. curl -XPOST http://localhost:3004/index-sets/3/rebalancing-collections/C5-PROV1/update-status?status=COMPLETE ### Reset for dev purposes curl -i -H "Accept: application/json" -H "Content-type: application/json" -XPOST "http://localhost:3004/reset" ### See indices listing curl http://localhost:9210/index_sets/_aliases?pretty=1 ### Ignore version conflict By default, version conflict returned from elasticsearch will be ignored. User can override the default by passing in query parameter "ignore_conflict=false" to the request. ### Message queues The ingest application will publish messages for the indexer application to consume. The messages will be to index or delete concepts from elasticsearch. Messaging is handled using the message-queue-lib which uses RabbitMQ. #### Message Queue Error Handling ##### Caught Error in the Indexer If an error occurs in the indexer either because Elasticsearch is unavailable or an unexpected error occurs during indexing we will catch that error. The message will be placed on a Wait Queue as described in the message-queue-lib README. We will use an exponential backoff to retry after a set period of time. After the message has been successfully queued on the wait queue the indexer will acknowledge the message. ##### Uncaught Error in the Indexer An uncaught error such as indexer dying or running out of memory will be handled through non-acknowledgment of the message. RabbitMQ will consider the messages as not having been processed and requeue it. ##### Alerts The indexer has a background job that monitors the RabbitMQ message queue size and logs it. If the message queue size exceeds the configured size (CMR_INDEXER_WARN_QUEUE_SIZE) we will log extra infomation that splunk can detect. We will add a splunk alert to look for the log mesage indicating the queue size has exceeded threshhold and email CMR Operations. ## Sample outputs - Get all index-sets response ``` [{:id 3, :name "cmr-base-index-set", :concepts {:collection {:c6_Collections "3_c6_collections", :C4-collections "3_c4_collections"}, :granule {:g5_prov5 "3_g5_prov5", :G4-Prov3 "3_g4_prov3", :G2-PROV1 "3_g2_prov1"}}} {:id 55, :name "cmr-base-index-set", :concepts {:collection {:c6_Collections "55_c6_collections", :C4-collections "55_c4_collections"}, :granule {:g5_prov5 "55_g5_prov5", :G4-Prov3 "55_g4_prov3", :G2-PROV1 "55_g2_prov1"}}}] ``` - Get an index-set by id response ``` {:index-set {:concepts {:collection {:c6_Collections "3_c6_collections", :C4-collections "3_c4_collections"}, :granule {:g5_prov5 "3_g5_prov5", :G4-Prov3 "3_g4_prov3", :G2-PROV1 "3_g2_prov1"}}, :name "cmr-base-index-set", :create-reason "include message about reasons for creating this index set", :granule {:index-names ["G2-PROV1" "G4-Prov3" "g5_prov5"], :mapping {:granule {:_all {:enabled false}, :properties {:collection-concept-id {:store "yes", :index_options "docs", :norms false, :type "string", :index "not_analyzed"}, :concept-id {:store "yes", :index_options "docs", :norms false, :type "string", :index "not_analyzed"}}, :dynamic "strict", :_source {:enabled false}, :_id {:path "concept-id"}}}, :settings {:index {:number_of_replicas 0, :refresh_interval "10s", :number_of_shards 1}}}, :collection {:index-names ["C4-collections" "c6_Collections"], :mapping {:collection {:_all {:enabled false}, :properties {:entry-title {:store "yes", :index_options "docs", :norms false, :type "string", :index "not_analyzed"}, :concept-id {:store "yes", :index_options "docs", :norms false, :type "string", :index "not_analyzed"}}, :dynamic "strict", :_source {:enabled false}, :_id {:path "concept-id"}}}, :settings {:index {:number_of_replicas 0, :refresh_interval "20s", :number_of_shards 1}}}, :id 3}} ``` ## License Copyright © 2014-2015 NASA
{ "pile_set_name": "Github" }
<pluginMetaData path="dicomviewer"> <name>DICOM Viewer</name> <author>NextGen Healthcare</author> <pluginVersion>@mirthversion</pluginVersion> <mirthVersion>3.10.0</mirthVersion> <url>http://www.nextgen.com</url> <description>This plugin provides DICOM attachment viewing capability in message browser</description> <clientClasses> <string>com.mirth.connect.plugins.dicomviewer.DICOMViewer</string> </clientClasses> <library type="CLIENT" path="dicomviewer-client.jar" /> <library type="SHARED" path="lib/ij.jar" /> </pluginMetaData>
{ "pile_set_name": "Github" }
package deployconfig import ( "reflect" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/util/validation/field" apirequest "k8s.io/apiserver/pkg/endpoints/request" "k8s.io/apiserver/pkg/registry/rest" "k8s.io/apiserver/pkg/storage/names" kapi "k8s.io/kubernetes/pkg/api" deployapi "github.com/openshift/origin/pkg/apps/apis/apps" deployapiv1 "github.com/openshift/origin/pkg/apps/apis/apps/v1" "github.com/openshift/origin/pkg/apps/apis/apps/validation" ) // strategy implements behavior for DeploymentConfig objects type strategy struct { runtime.ObjectTyper names.NameGenerator } // CommonStrategy is the default logic that applies when creating and updating DeploymentConfig objects. var CommonStrategy = strategy{kapi.Scheme, names.SimpleNameGenerator} // LegacyStrategy is the logic that applies when creating and updating DeploymentConfig objects in the legacy API. // An example would be setting different defaults depending on API group var LegacyStrategy = legacyStrategy{CommonStrategy} // GroupStrategy is the logic that applies when creating and updating DeploymentConfig objects in the group API. // An example would be setting different defaults depending on API group var GroupStrategy = groupStrategy{CommonStrategy} // NamespaceScoped is true for DeploymentConfig objects. func (strategy) NamespaceScoped() bool { return true } // AllowCreateOnUpdate is false for DeploymentConfig objects. func (strategy) AllowCreateOnUpdate() bool { return false } func (strategy) AllowUnconditionalUpdate() bool { return false } func (s strategy) Export(ctx apirequest.Context, obj runtime.Object, exact bool) error { s.PrepareForCreate(ctx, obj) return nil } // PrepareForCreate clears fields that are not allowed to be set by end users on creation. func (strategy) PrepareForCreate(ctx apirequest.Context, obj runtime.Object) { dc := obj.(*deployapi.DeploymentConfig) dc.Generation = 1 dc.Status = deployapi.DeploymentConfigStatus{} for i := range dc.Spec.Triggers { if params := dc.Spec.Triggers[i].ImageChangeParams; params != nil { params.LastTriggeredImage = "" } } } // PrepareForUpdate clears fields that are not allowed to be set by end users on update. func (strategy) PrepareForUpdate(ctx apirequest.Context, obj, old runtime.Object) { newDc := obj.(*deployapi.DeploymentConfig) oldDc := old.(*deployapi.DeploymentConfig) newVersion := newDc.Status.LatestVersion oldVersion := oldDc.Status.LatestVersion // Persist status newDc.Status = oldDc.Status // oc deploy --latest from old clients // TODO: Remove once we drop support for older clients if newVersion == oldVersion+1 { newDc.Status.LatestVersion = newVersion } // TODO: Disallow lastTriggeredImage updates from this update path. // Any changes to the spec or labels, increment the generation number, any changes // to the status should reflect the generation number of the corresponding object // (should be handled by the controller). if !reflect.DeepEqual(oldDc.Spec, newDc.Spec) || newDc.Status.LatestVersion != oldDc.Status.LatestVersion { newDc.Generation = oldDc.Generation + 1 } } // Canonicalize normalizes the object after validation. func (strategy) Canonicalize(obj runtime.Object) { } // Validate validates a new policy. func (strategy) Validate(ctx apirequest.Context, obj runtime.Object) field.ErrorList { return validation.ValidateDeploymentConfig(obj.(*deployapi.DeploymentConfig)) } // ValidateUpdate is the default update validation for an end user. func (strategy) ValidateUpdate(ctx apirequest.Context, obj, old runtime.Object) field.ErrorList { return validation.ValidateDeploymentConfigUpdate(obj.(*deployapi.DeploymentConfig), old.(*deployapi.DeploymentConfig)) } // CheckGracefulDelete allows a deployment config to be gracefully deleted. func (strategy) CheckGracefulDelete(obj runtime.Object, options *metav1.DeleteOptions) bool { return false } // legacyStrategy implements behavior for DeploymentConfig objects in the legacy API type legacyStrategy struct { strategy } // PrepareForCreate delegates to the common strategy. func (s legacyStrategy) PrepareForCreate(ctx apirequest.Context, obj runtime.Object) { s.strategy.PrepareForCreate(ctx, obj) } // DefaultGarbageCollectionPolicy for legacy DeploymentConfigs will orphan dependents. func (s legacyStrategy) DefaultGarbageCollectionPolicy() rest.GarbageCollectionPolicy { return rest.OrphanDependents } // groupStrategy implements behavior for DeploymentConfig objects in the Group API type groupStrategy struct { strategy } // PrepareForCreate delegates to the common strategy and sets defaults applicable only to Group API func (s groupStrategy) PrepareForCreate(ctx apirequest.Context, obj runtime.Object) { s.strategy.PrepareForCreate(ctx, obj) dc := obj.(*deployapi.DeploymentConfig) deployapiv1.AppsV1DeploymentConfigLayeredDefaults(dc) } // statusStrategy implements behavior for DeploymentConfig status updates. type statusStrategy struct { strategy } var StatusStrategy = statusStrategy{CommonStrategy} // PrepareForUpdate clears fields that are not allowed to be set by end users on update of status. func (statusStrategy) PrepareForUpdate(ctx apirequest.Context, obj, old runtime.Object) { newDc := obj.(*deployapi.DeploymentConfig) oldDc := old.(*deployapi.DeploymentConfig) newDc.Spec = oldDc.Spec newDc.Labels = oldDc.Labels } // ValidateUpdate is the default update validation for an end user updating status. func (statusStrategy) ValidateUpdate(ctx apirequest.Context, obj, old runtime.Object) field.ErrorList { return validation.ValidateDeploymentConfigStatusUpdate(obj.(*deployapi.DeploymentConfig), old.(*deployapi.DeploymentConfig)) }
{ "pile_set_name": "Github" }
<!DOCTYPE html> <html> <head> <meta charset="utf-8"> <body> <script> Fonts = function(fonts){ window.parent.postMessage(fonts,'*'); } </script> <object id="fontlist" type="application/x-shockwave-flash" width="1" height="1" data="fontlists.swf"></object> </body> </html>
{ "pile_set_name": "Github" }
@namespace url("http://www.mozilla.org/keymaster/gatekeeper/there.is.only.xul"); @namespace html url("http://www.w3.org/1999/xhtml"); ecolorpicker { -moz-binding: url("chrome://bluegriffon/content/bindings/ecolorpicker.xml#ecolorpicker"); } filepickerbutton { -moz-binding: url('chrome://bluegriffon/content/bindings/filepickerbutton.xml#filepickerbutton'); } .multibutton[checked], .multibutton:hover:active:not([disabled]) { color: #00abff; } .multibutton { margin: 4px 0px; background-color: var(--bg-dark-background); -moz-appearance: none; border: thin var(--bg-dark-border) solid; color: var(--bg-foreground-color); padding: 2px 6px; text-shadow: none; } .multibutton:-moz-locale-dir(ltr) { border-top-left-radius: 5px; border-bottom-left-radius: 5px; } .multibutton:-moz-locale-dir(rtl) { border-top-right-radius: 5px; border-bottom-right-radius: 5px; } .multibutton:-moz-locale-dir(ltr) + .multibutton { border-top-left-radius: 0px; border-bottom-left-radius: 0px; border-left: 0px; } .multibutton:-moz-locale-dir(rtl) + .multibutton { border-top-right-radius: 0px; border-bottom-right-radius: 0px; border-right: 0px; } .multibutton:last-child:-moz-locale-dir(ltr) { border-top-right-radius: 5px; border-bottom-right-radius: 5px; margin-right: 1em; } .multibutton:last-child:-moz-locale-dir(rtl) { border-top-left-radius: 5px; border-bottom-left-radius: 5px; margin-left: 1em; } .multibutton .toolbarbutton-icon[src] { margin-left: 1em; margin-right: 1em; } .multibutton:not([label]) .toolbarbutton-text { display: none; } medium { -moz-binding: url('chrome://bluegriffon/content/bindings/media.xml#medium'); } length { -moz-binding: url('chrome://bluegriffon/content/bindings/media.xml#length'); } .medium-delete-button { visibility: hidden; list-style-image: url("chrome://global/skin/icons/close.png"); -moz-image-region: rect(0, 16px, 16px, 0); } hbox:hover > .medium-delete-button { visibility: visible; } .medium-delete-button:hover { -moz-image-region: rect(0, 32px, 16px, 16px); } .medium-delete-button:hover:active { -moz-image-region: rect(0, 48px, 16px, 32px); } %ifdef XP_UNIX toolbarbutton, toolbarbutton:hover { -moz-appearance: none; border: 0px none ! important; background-image: none ! important; background-color: transparent ! important; } %endif
{ "pile_set_name": "Github" }
TOP=../.. include $(TOP)/Makefile.common
{ "pile_set_name": "Github" }
/* * Copyright (C) 2013 The Android Open Source Project * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #ifndef LATINIME_SCORING_H #define LATINIME_SCORING_H #include "defines.h" namespace latinime { class DicNode; class DicTraverseSession; class SuggestionResults; // This class basically tweaks suggestions and distances apart from CompoundDistance class Scoring { public: virtual int calculateFinalScore(const float compoundDistance, const int inputSize, const ErrorTypeUtils::ErrorType containedErrorTypes, const bool forceCommit, const bool boostExactMatches, const bool hasProbabilityZero) const = 0; virtual void getMostProbableString(const DicTraverseSession *const traverseSession, const float weightOfLangModelVsSpatialModel, SuggestionResults *const outSuggestionResults) const = 0; virtual float getAdjustedWeightOfLangModelVsSpatialModel( DicTraverseSession *const traverseSession, DicNode *const terminals, const int size) const = 0; virtual float getDoubleLetterDemotionDistanceCost( const DicNode *const terminalDicNode) const = 0; virtual bool autoCorrectsToMultiWordSuggestionIfTop() const = 0; virtual bool sameAsTyped(const DicTraverseSession *const traverseSession, const DicNode *const dicNode) const = 0; protected: Scoring() {} virtual ~Scoring() {} private: DISALLOW_COPY_AND_ASSIGN(Scoring); }; } // namespace latinime #endif // LATINIME_SCORING_H
{ "pile_set_name": "Github" }
/* * The MIT License * * Copyright (c) 2016 CloudBees, Inc., Nikolas Falco * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * THE SOFTWARE. * */ package com.cloudbees.jenkins.plugins.bitbucket.server.client.pullrequest; import com.cloudbees.jenkins.plugins.bitbucket.api.BitbucketBranch; import com.cloudbees.jenkins.plugins.bitbucket.api.BitbucketCommit; import com.cloudbees.jenkins.plugins.bitbucket.api.BitbucketPullRequestDestination; import com.cloudbees.jenkins.plugins.bitbucket.server.client.branch.BitbucketServerBranch; import com.cloudbees.jenkins.plugins.bitbucket.server.client.branch.BitbucketServerCommit; import com.cloudbees.jenkins.plugins.bitbucket.server.client.repository.BitbucketServerRepository; import com.fasterxml.jackson.annotation.JsonProperty; public class BitbucketServerPullRequestDestination implements BitbucketPullRequestDestination { @JsonProperty("displayId") private String branchName; @JsonProperty private String latestCommit; private BitbucketServerRepository repository; private BitbucketServerBranch branch; @JsonProperty private BitbucketServerCommit commit; @Override public BitbucketServerRepository getRepository() { return repository; } @Override public BitbucketBranch getBranch() { if (branch == null) { branch = new BitbucketServerBranch(branchName, latestCommit); } return branch; } @Override public BitbucketCommit getCommit() { if (branch != null && commit == null) { commit = new BitbucketServerCommit(branch.getMessage(), latestCommit, branch.getDateMillis(), branch.getAuthor()); } return commit; } public void setRepository(BitbucketServerRepository repository) { this.repository = repository; } }
{ "pile_set_name": "Github" }
; RUN: llc < %s -mtriple=i386-apple-darwin -disable-cgp-branch-opts | grep movw | not grep ", %e" %struct.DBC_t = type { i32, i8*, i16, %struct.DBC_t*, i8*, i8*, i8*, i8*, i8*, %struct.DBC_t*, i32, i32, i32, i32, i8*, i8*, i8*, i8*, i8*, i32, i32, i32, i32, i32, i32, i32, i32, i16, i16, i32*, i8, i16, %struct.DRVOPT*, i16 } %struct.DRVOPT = type { i16, i32, i8, %struct.DRVOPT* } %struct.GENV_t = type { i32, i8*, i16, i8*, i8*, i32, i32, i32, i32, %struct.DBC_t*, i16 } %struct.pthread_mutex_t = type { i32, [40 x i8] } @iodbcdm_global_lock = external global %struct.pthread_mutex_t ; <%struct.pthread_mutex_t*> [#uses=1] define i16 @SQLDriversW(i8* %henv, i16 zeroext %fDir, i32* %szDrvDesc, i16 signext %cbDrvDescMax, i16* %pcbDrvDesc, i32* %szDrvAttr, i16 signext %cbDrvAttrMax, i16* %pcbDrvAttr) nounwind { entry: %tmp12 = bitcast i8* %henv to %struct.GENV_t* ; <%struct.GENV_t*> [#uses=1] br i1 true, label %bb28, label %bb bb: ; preds = %entry ret i16 0 bb28: ; preds = %entry br i1 false, label %bb37, label %done bb37: ; preds = %bb28 %tmp46 = getelementptr %struct.GENV_t* %tmp12, i32 0, i32 10 ; <i16*> [#uses=1] store i16 0, i16* %tmp46, align 4 br i1 false, label %bb74, label %bb92 bb74: ; preds = %bb37 br label %bb92 bb92: ; preds = %bb74, %bb37 %tmp95180 = shl i16 %cbDrvAttrMax, 2 ; <i16> [#uses=1] %tmp100178 = shl i16 %cbDrvDescMax, 2 ; <i16> [#uses=1] %tmp113 = tail call i16 @SQLDrivers_Internal( i8* %henv, i16 zeroext %fDir, i8* null, i16 signext %tmp100178, i16* %pcbDrvDesc, i8* null, i16 signext %tmp95180, i16* %pcbDrvAttr, i8 zeroext 87 ) nounwind ; <i16> [#uses=1] br i1 false, label %done, label %bb137 bb137: ; preds = %bb92 ret i16 0 done: ; preds = %bb92, %bb28 %retcode.0 = phi i16 [ -2, %bb28 ], [ %tmp113, %bb92 ] ; <i16> [#uses=2] br i1 false, label %bb167, label %bb150 bb150: ; preds = %done %tmp157158 = sext i16 %retcode.0 to i32 ; <i32> [#uses=1] tail call void @trace_SQLDriversW( i32 1, i32 %tmp157158, i8* %henv, i16 zeroext %fDir, i32* %szDrvDesc, i16 signext %cbDrvDescMax, i16* %pcbDrvDesc, i32* %szDrvAttr, i16 signext %cbDrvAttrMax, i16* %pcbDrvAttr ) nounwind ret i16 0 bb167: ; preds = %done %tmp168 = tail call i32 @pthread_mutex_unlock( %struct.pthread_mutex_t* @iodbcdm_global_lock ) nounwind ; <i32> [#uses=0] ret i16 %retcode.0 } declare i32 @pthread_mutex_unlock(%struct.pthread_mutex_t*) declare i16 @SQLDrivers_Internal(i8*, i16 zeroext , i8*, i16 signext , i16*, i8*, i16 signext , i16*, i8 zeroext ) nounwind declare void @trace_SQLDriversW(i32, i32, i8*, i16 zeroext , i32*, i16 signext , i16*, i32*, i16 signext , i16*)
{ "pile_set_name": "Github" }
/* KLayout Layout Viewer Copyright (C) 2006-2020 Matthias Koefferlein This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ #ifndef HDR_tlLog #define HDR_tlLog #include "tlCommon.h" #include "tlString.h" #include "tlThreads.h" #include "tlObjectCollection.h" namespace tl { /** * @brief Set the verbosity level * * Predefined levels are: * 0: none * 10: basic * 11: basic timing * 20: detailed * 21: detailed timing * 30: verbose * 31: verbose timing * 100+: very verbose */ TL_PUBLIC void verbosity (int level); /** * @brief Get the verbosity level */ TL_PUBLIC int verbosity (); /** * @brief A "endl" tag class * * This class is supposed to issue a end-of-line. */ struct TL_PUBLIC ChannelEndl { // just a "tag" }; extern TL_PUBLIC ChannelEndl endl; /** * @brief A "noendl" tag class * * This class is supposed to suppress the implicit end-of-line. */ struct TL_PUBLIC ChannelNoendl { // just a "tag" }; extern TL_PUBLIC ChannelNoendl noendl; class TL_PUBLIC ChannelProxy; /** * @brief A basic channel * * Channels are supposed to be derived by subclasses providing * a special implementation for the channels. */ class TL_PUBLIC Channel : public tl::Object { public: /** * @brief Construct a channel */ Channel (); /** * @brief Destructor */ virtual ~Channel (); /** * @brief Output "something" * * A proxy object to the original channel is returned that does * locking of the channel and reference counting such that the * channel is freed again once it is no longer used. */ template <class T> ChannelProxy operator<< (const T &t); /** * @brief Output a const char * * * For the return object see the generic operator<< */ ChannelProxy operator<< (const char *s); /** * @brief A end-of-line output */ ChannelProxy operator<< (ChannelEndl); /** * @brief Suppress the implicit end of line at the end */ ChannelProxy operator<< (ChannelNoendl); protected: // this is the interface implemented by the subclasses virtual void puts (const char *s) = 0; virtual void endl () = 0; virtual void end () = 0; virtual void begin () = 0; tl::Mutex m_lock; private: friend class ChannelProxy; friend class LogTee; ChannelProxy issue_proxy (); void release_proxy (); void noendl () { m_no_endl = true; } bool m_no_endl; bool m_active; }; /** * @brief A channel proxy * * The proxy objects are used to control when the channel is to * be released. */ class TL_PUBLIC ChannelProxy { public: /** * @brief Construct a channel proxy to a channel */ ChannelProxy (Channel *channel); /** * @brief Destructor */ ~ChannelProxy (); /** * @brief Output "something" */ template <class T> ChannelProxy &operator<< (const T &t) { mp_channel->puts (tl::to_string (t).c_str ()); return *this; } /** * @brief Output a const char * */ ChannelProxy &operator<< (const char *s) { mp_channel->puts (s); return *this; } /** * @brief A end-of-line output */ ChannelProxy &operator<< (ChannelEndl) { mp_channel->endl (); return *this; } /** * @brief A end-of-line output */ ChannelProxy &operator<< (ChannelNoendl) { mp_channel->noendl (); return *this; } private: Channel *mp_channel; // copying only by the Channel class. This one knows what it does friend class Channel; ChannelProxy &operator= (const ChannelProxy &); ChannelProxy (const ChannelProxy &); }; template <class T> inline ChannelProxy Channel::operator<< (const T &t) { ChannelProxy p = issue_proxy (); puts (tl::to_string (t).c_str ()); return p; } inline ChannelProxy Channel::operator<< (const char *s) { ChannelProxy p = issue_proxy (); puts (s); return p; } inline ChannelProxy Channel::operator<< (ChannelEndl) { ChannelProxy p = issue_proxy (); endl (); return p; } /** * @brief A multi-cast log distribution object */ class TL_PUBLIC LogTee : public Channel { public: LogTee (); LogTee (Channel *first, bool owned); void add (Channel *other, bool owned); void prepend (Channel *other, bool owned); void clear (); protected: virtual void puts (const char *s); virtual void endl (); virtual void end (); virtual void begin (); private: tl::weak_collection<tl::Channel> m_channels; tl::shared_collection<tl::Channel> m_owned_channels; }; /// The static instance of the log channel /// The log channel is identical to the info channel but is silent depending on the verbosity and /// the output mode. It should be used for general notifications like the beginning of a operation. extern TL_PUBLIC LogTee log; /// The static instance of the info channel extern TL_PUBLIC LogTee info; /// The static instance of the warning channel extern TL_PUBLIC LogTee warn; /// The static instance of the error channel extern TL_PUBLIC LogTee error; } // namespace tl #endif
{ "pile_set_name": "Github" }
var cloneArrayBuffer = require('./_cloneArrayBuffer'); /** * Creates a clone of `typedArray`. * * @private * @param {Object} typedArray The typed array to clone. * @param {boolean} [isDeep] Specify a deep clone. * @returns {Object} Returns the cloned typed array. */ function cloneTypedArray(typedArray, isDeep) { var buffer = isDeep ? cloneArrayBuffer(typedArray.buffer) : typedArray.buffer; return new typedArray.constructor(buffer, typedArray.byteOffset, typedArray.length); } module.exports = cloneTypedArray;
{ "pile_set_name": "Github" }
#pragma once #include "DaeTestAutomationPluginAutomationTestFrameworkTestContext.h" #include <CoreMinimal.h> #include <Misc/AutomationTest.h> class ADaeTestSuiteActor; /** Waits for the current test suite to finish. */ DEFINE_LATENT_AUTOMATION_COMMAND_ONE_PARAMETER( FDaeTestAutomationPluginWaitForEndOfTestSuite, FDaeTestAutomationPluginAutomationTestFrameworkTestContext, Context);
{ "pile_set_name": "Github" }
// Copyright 2016 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // +build s390x // +build linux // +build !gccgo #include "textflag.h" // // System calls for s390x, Linux // // Just jump to package syscall's implementation for all these functions. // The runtime may know about them. TEXT ·Syscall(SB),NOSPLIT,$0-56 BR syscall·Syscall(SB) TEXT ·Syscall6(SB),NOSPLIT,$0-80 BR syscall·Syscall6(SB) TEXT ·SyscallNoError(SB),NOSPLIT,$0-48 BL runtime·entersyscall(SB) MOVD a1+8(FP), R2 MOVD a2+16(FP), R3 MOVD a3+24(FP), R4 MOVD $0, R5 MOVD $0, R6 MOVD $0, R7 MOVD trap+0(FP), R1 // syscall entry SYSCALL MOVD R2, r1+32(FP) MOVD R3, r2+40(FP) BL runtime·exitsyscall(SB) RET TEXT ·RawSyscall(SB),NOSPLIT,$0-56 BR syscall·RawSyscall(SB) TEXT ·RawSyscall6(SB),NOSPLIT,$0-80 BR syscall·RawSyscall6(SB) TEXT ·RawSyscallNoError(SB),NOSPLIT,$0-48 MOVD a1+8(FP), R2 MOVD a2+16(FP), R3 MOVD a3+24(FP), R4 MOVD $0, R5 MOVD $0, R6 MOVD $0, R7 MOVD trap+0(FP), R1 // syscall entry SYSCALL MOVD R2, r1+32(FP) MOVD R3, r2+40(FP) RET
{ "pile_set_name": "Github" }
#! /usr/bin/env node var rc = require('./index') console.log(JSON.stringify(rc(process.argv[2]), false, 2))
{ "pile_set_name": "Github" }
/* Copyright (c) 2003-2018, CKSource - Frederico Knabben. All rights reserved. For licensing, see LICENSE.md or https://ckeditor.com/legal/ckeditor-oss-license */ CKEDITOR.plugins.setLang( 'elementspath', 'ka', { eleLabel: 'ელემეტის გზა', eleTitle: '%1 ელემენტი' } );
{ "pile_set_name": "Github" }
--- layout: documentation title: HS-WV100+ - ZWave --- {% include base.html %} # HS-WV100+ Z-Wave water valve controller This describes the Z-Wave device *HS-WV100+*, manufactured by *[HomeSeer Technologies](http://www.homeseer.com/)* with the thing type UID of ```homeseer_hswv100_00_000```. The device is in the category of *Valve*, defining Valves used to control water or gas. e.g. a flow stop valve.. ![HS-WV100+ product image](https://opensmarthouse.org/zwavedatabase/1271/image/) The HS-WV100+ supports routing. This allows the device to communicate using other routing enabled devices as intermediate routers. This device is also able to participate in the routing of data between other devices in the mesh network. ## Overview HS-WV100+ is a valve controller system that is designed to control the flow of water when Z-Wave commands are received. It may be used with compatible home automation systems (and sensors) to mitigate water damage when leaks are detected. It may also be used for a variety of other automated or remotely controlled water or irrigation needs.  ### Inclusion Information 1. Put your home automation controller into ‘inclusion’ mode. Consult your system’s manual for details. 2. Press the “program” button on the water valve controller unit. Wait for the process to finish. ### Exclusion Information 1. Put your home automation controller into ‘exclusion’ mode. Consult your system’s manual for details. 2. Press the “program” button on the water valve controller unit. Wait for the process to finish ### General Usage Information # OPERATION ## Remote Valve Operation (use this procedure for remote or automatic control of the water valve) HS-WV100+ valve controller behaves like a Z-Wave “binary” device, with “ON” and “OFF” values, when added to most home automation systems. * Open Valve - Issue “ON” command from Z-Wave controller or Z-Wave compatible home automation system. * Close Valve - Issue “OFF” command from Z-Wave controller or Z-Wave compatible home automation system. ## Local Valve Operation (use this procedure for convenient local control of the water valve) * Open Valve - Press and release the ’Open’ button on the water valve controller unit to open the water valve. * Close Valve - Press and release the ’Close’ button on the water valve controller unit to close the water valve. ## Local Manual Valve Operation (for use in the event of a power failure or water valve controller failure) * Open Valve - Pull the thumbwheel on the water valve housing up and rotate wheel counter-clockwise. * Close Valve - Pull the thumbwheel on the water valve housing up and rotate wheel clockwise. ## Channels The following table summarises the channels available for the HS-WV100+ -: | Channel Name | Channel ID | Channel Type | Category | Item Type | |--------------|------------|--------------|----------|-----------| | Switch | switch_binary | switch_binary | Switch | Switch | | Alarm (system) | alarm_system | alarm_system | | Switch | ### Switch Switch the power on and off. The ```switch_binary``` channel is of type ```switch_binary``` and supports the ```Switch``` item and is in the ```Switch``` category. ### Alarm (system) Indicates if a system alarm is triggered. The ```alarm_system``` channel is of type ```alarm_system``` and supports the ```Switch``` item. This is a read only channel so will only be updated following state changes from the device. The following state translation is provided for this channel to the ```Switch``` item type -: | Value | Label | |-------|-----------| | OFF | OK | | ON | Alarm | ## Device Configuration The device has no configuration parameters defined. ## Association Groups Association groups allow the device to send unsolicited reports to the controller, or other devices in the network. Using association groups can allow you to eliminate polling, providing instant feedback of a device state change without unnecessary network traffic. The HS-WV100+ supports 1 association group. ### Group 1: Lifeline The Lifeline association group reports device status to a hub and is not designed to control other devices directly. When using the Lineline group with a hub, in most cases, only the lifeline group will need to be configured and normally the hub will perform this automatically during the device initialisation. Association group 1 supports 4 nodes. ## Technical Information ### Endpoints #### Endpoint 0 | Command Class | Comment | |---------------|---------| | COMMAND_CLASS_NO_OPERATION_V1| | | COMMAND_CLASS_BASIC_V1| | | COMMAND_CLASS_SWITCH_BINARY_V1| | | COMMAND_CLASS_ASSOCIATION_GRP_INFO_V1| | | COMMAND_CLASS_DEVICE_RESET_LOCALLY_V1| | | COMMAND_CLASS_ZWAVEPLUS_INFO_V1| | | COMMAND_CLASS_ALARM_V4| | | COMMAND_CLASS_MANUFACTURER_SPECIFIC_V1| | | COMMAND_CLASS_POWERLEVEL_V1| | | COMMAND_CLASS_FIRMWARE_UPDATE_MD_V1| | | COMMAND_CLASS_ASSOCIATION_V2| | | COMMAND_CLASS_VERSION_V2| | ### Documentation Links * [User Manual](https://opensmarthouse.org/zwavedatabase/1271/HS-WV100-Manual.pdf) --- Did you spot an error in the above definition or want to improve the content? You can [contribute to the database here](https://opensmarthouse.org/zwavedatabase/1271).
{ "pile_set_name": "Github" }
package tokens import ( "net/http" "github.com/rackspace/gophercloud" ) // Scope allows a created token to be limited to a specific domain or project. type Scope struct { ProjectID string ProjectName string DomainID string DomainName string } func subjectTokenHeaders(c *gophercloud.ServiceClient, subjectToken string) map[string]string { return map[string]string{ "X-Subject-Token": subjectToken, } } // Create authenticates and either generates a new token, or changes the Scope of an existing token. func Create(c *gophercloud.ServiceClient, options gophercloud.AuthOptions, scope *Scope) CreateResult { type domainReq struct { ID *string `json:"id,omitempty"` Name *string `json:"name,omitempty"` } type projectReq struct { Domain *domainReq `json:"domain,omitempty"` Name *string `json:"name,omitempty"` ID *string `json:"id,omitempty"` } type userReq struct { ID *string `json:"id,omitempty"` Name *string `json:"name,omitempty"` Password string `json:"password"` Domain *domainReq `json:"domain,omitempty"` } type passwordReq struct { User userReq `json:"user"` } type tokenReq struct { ID string `json:"id"` } type identityReq struct { Methods []string `json:"methods"` Password *passwordReq `json:"password,omitempty"` Token *tokenReq `json:"token,omitempty"` } type scopeReq struct { Domain *domainReq `json:"domain,omitempty"` Project *projectReq `json:"project,omitempty"` } type authReq struct { Identity identityReq `json:"identity"` Scope *scopeReq `json:"scope,omitempty"` } type request struct { Auth authReq `json:"auth"` } // Populate the request structure based on the provided arguments. Create and return an error // if insufficient or incompatible information is present. var req request // Test first for unrecognized arguments. if options.APIKey != "" { return createErr(ErrAPIKeyProvided) } if options.TenantID != "" { return createErr(ErrTenantIDProvided) } if options.TenantName != "" { return createErr(ErrTenantNameProvided) } if options.Password == "" { if c.TokenID != "" { // Because we aren't using password authentication, it's an error to also provide any of the user-based authentication // parameters. if options.Username != "" { return createErr(ErrUsernameWithToken) } if options.UserID != "" { return createErr(ErrUserIDWithToken) } if options.DomainID != "" { return createErr(ErrDomainIDWithToken) } if options.DomainName != "" { return createErr(ErrDomainNameWithToken) } // Configure the request for Token authentication. req.Auth.Identity.Methods = []string{"token"} req.Auth.Identity.Token = &tokenReq{ ID: c.TokenID, } } else { // If no password or token ID are available, authentication can't continue. return createErr(ErrMissingPassword) } } else { // Password authentication. req.Auth.Identity.Methods = []string{"password"} // At least one of Username and UserID must be specified. if options.Username == "" && options.UserID == "" { return createErr(ErrUsernameOrUserID) } if options.Username != "" { // If Username is provided, UserID may not be provided. if options.UserID != "" { return createErr(ErrUsernameOrUserID) } // Either DomainID or DomainName must also be specified. if options.DomainID == "" && options.DomainName == "" { return createErr(ErrDomainIDOrDomainName) } if options.DomainID != "" { if options.DomainName != "" { return createErr(ErrDomainIDOrDomainName) } // Configure the request for Username and Password authentication with a DomainID. req.Auth.Identity.Password = &passwordReq{ User: userReq{ Name: &options.Username, Password: options.Password, Domain: &domainReq{ID: &options.DomainID}, }, } } if options.DomainName != "" { // Configure the request for Username and Password authentication with a DomainName. req.Auth.Identity.Password = &passwordReq{ User: userReq{ Name: &options.Username, Password: options.Password, Domain: &domainReq{Name: &options.DomainName}, }, } } } if options.UserID != "" { // If UserID is specified, neither DomainID nor DomainName may be. if options.DomainID != "" { return createErr(ErrDomainIDWithUserID) } if options.DomainName != "" { return createErr(ErrDomainNameWithUserID) } // Configure the request for UserID and Password authentication. req.Auth.Identity.Password = &passwordReq{ User: userReq{ID: &options.UserID, Password: options.Password}, } } } // Add a "scope" element if a Scope has been provided. if scope != nil { if scope.ProjectName != "" { // ProjectName provided: either DomainID or DomainName must also be supplied. // ProjectID may not be supplied. if scope.DomainID == "" && scope.DomainName == "" { return createErr(ErrScopeDomainIDOrDomainName) } if scope.ProjectID != "" { return createErr(ErrScopeProjectIDOrProjectName) } if scope.DomainID != "" { // ProjectName + DomainID req.Auth.Scope = &scopeReq{ Project: &projectReq{ Name: &scope.ProjectName, Domain: &domainReq{ID: &scope.DomainID}, }, } } if scope.DomainName != "" { // ProjectName + DomainName req.Auth.Scope = &scopeReq{ Project: &projectReq{ Name: &scope.ProjectName, Domain: &domainReq{Name: &scope.DomainName}, }, } } } else if scope.ProjectID != "" { // ProjectID provided. ProjectName, DomainID, and DomainName may not be provided. if scope.DomainID != "" { return createErr(ErrScopeProjectIDAlone) } if scope.DomainName != "" { return createErr(ErrScopeProjectIDAlone) } // ProjectID req.Auth.Scope = &scopeReq{ Project: &projectReq{ID: &scope.ProjectID}, } } else if scope.DomainID != "" { // DomainID provided. ProjectID, ProjectName, and DomainName may not be provided. if scope.DomainName != "" { return createErr(ErrScopeDomainIDOrDomainName) } // DomainID req.Auth.Scope = &scopeReq{ Domain: &domainReq{ID: &scope.DomainID}, } } else if scope.DomainName != "" { return createErr(ErrScopeDomainName) } else { return createErr(ErrScopeEmpty) } } var result CreateResult var response *http.Response response, result.Err = c.Post(tokenURL(c), req, &result.Body, nil) if result.Err != nil { return result } result.Header = response.Header return result } // Get validates and retrieves information about another token. func Get(c *gophercloud.ServiceClient, token string) GetResult { var result GetResult var response *http.Response response, result.Err = c.Get(tokenURL(c), &result.Body, &gophercloud.RequestOpts{ MoreHeaders: subjectTokenHeaders(c, token), OkCodes: []int{200, 203}, }) if result.Err != nil { return result } result.Header = response.Header return result } // Validate determines if a specified token is valid or not. func Validate(c *gophercloud.ServiceClient, token string) (bool, error) { response, err := c.Request("HEAD", tokenURL(c), gophercloud.RequestOpts{ MoreHeaders: subjectTokenHeaders(c, token), OkCodes: []int{204, 404}, }) if err != nil { return false, err } return response.StatusCode == 204, nil } // Revoke immediately makes specified token invalid. func Revoke(c *gophercloud.ServiceClient, token string) RevokeResult { var res RevokeResult _, res.Err = c.Delete(tokenURL(c), &gophercloud.RequestOpts{ MoreHeaders: subjectTokenHeaders(c, token), }) return res }
{ "pile_set_name": "Github" }
from .preresnet import * from .preresnet_dropout import * from .vgg import * from .vgg_dropout import * from .wide_resnet import * from .wide_resnet_dropout import * from .lenet5 import * from .tiramisu import *
{ "pile_set_name": "Github" }
// // Generated by class-dump 3.5 (64 bit) (Debug version compiled Jun 6 2019 20:12:56). // // class-dump is Copyright (C) 1997-1998, 2000-2001, 2004-2015 by Steve Nygard. // #import <IDEFoundation/IDEBatchFindFileResult.h> @interface IDEBatchFindFileResult (UIAdditions) - (id)icon; @end
{ "pile_set_name": "Github" }
version: 1 dn: m-oid=2.5.21.8,ou=attributeTypes,cn=system,ou=schema creatorsname: uid=admin,ou=system objectclass: metaAttributeType objectclass: metaTop objectclass: top m-equality: objectIdentifierFirstComponentMatch m-syntax: 1.3.6.1.4.1.1466.115.121.1.31 m-singlevalue: FALSE m-collective: FALSE m-nousermodification: FALSE m-usage: DIRECTORY_OPERATION m-oid: 2.5.21.8 m-name: matchingRuleUse m-description: RFC2252: matching rule uses m-obsolete: FALSE entryUUID: b7e60a16-c233-4448-99fe-cb1c8d79243b entryCSN: 20130919081859.237000Z#000000#000#000000 entryParentId: 00d07a58-ce34-4e15-b277-c443ee6d991b createTimestamp: 20130919081910.065Z
{ "pile_set_name": "Github" }
name: flutter_gradients description: A curated collection of awesome gradients made in Dart for Flutter. version: 1.0.0+2 homepage: https://github.com/JonathanMonga/flutter_gradients environment: sdk: ">=2.6.0 <3.0.0" dependencies: flutter: sdk: flutter vector_math: ^2.0.8 dev_dependencies: flutter_test: sdk: flutter
{ "pile_set_name": "Github" }
/* * SonarQube Java * Copyright (C) 2012-2020 SonarSource SA * mailto:info AT sonarsource DOT com * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 3 of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public License * along with this program; if not, write to the Free Software Foundation, * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. */ package org.sonar.java.checks; import java.util.Optional; import org.sonar.check.Rule; import org.sonar.java.checks.methods.AbstractMethodDetection; import org.sonar.java.model.ExpressionUtils; import org.sonar.plugins.java.api.semantic.MethodMatchers; import org.sonar.plugins.java.api.tree.ExpressionTree; import org.sonar.plugins.java.api.tree.IdentifierTree; import org.sonar.plugins.java.api.tree.LambdaExpressionTree; import org.sonar.plugins.java.api.tree.MemberSelectExpressionTree; import org.sonar.plugins.java.api.tree.MethodInvocationTree; import org.sonar.plugins.java.api.tree.MethodReferenceTree; import org.sonar.plugins.java.api.tree.Tree; @Rule(key = "S4034") public class PreferStreamAnyMatchCheck extends AbstractMethodDetection { private static final String[] STREAM_TYPES = { "java.util.stream.Stream", "java.util.stream.IntStream", "java.util.stream.LongStream", "java.util.stream.DoubleStream" }; private static final MethodMatchers FIND_METHODS = MethodMatchers.create() .ofTypes(STREAM_TYPES).names("findFirst", "findAny").addWithoutParametersMatcher().build(); private static final MethodMatchers MAP_METHODS = MethodMatchers.create() .ofTypes(STREAM_TYPES).names("map").addParametersMatcher("java.util.function.Function").build(); private static final MethodMatchers FILTER_METHODS = MethodMatchers.create() .ofTypes(STREAM_TYPES).names("filter").withAnyParameters().build(); private static final MethodMatchers BOOLEAN_VALUE = MethodMatchers.create() .ofTypes("java.lang.Boolean") .names("booleanValue") .addWithoutParametersMatcher() .build(); @Override protected MethodMatchers getMethodInvocationMatchers() { return MethodMatchers.or( MethodMatchers.create() .ofTypes("java.util.Optional", "java.util.OptionalInt", "java.util.OptionalLong", "java.util.OptionalDouble") .names("isPresent") .addWithoutParametersMatcher() .build(), MethodMatchers.create() .ofTypes(STREAM_TYPES) .names("anyMatch") .addParametersMatcher("java.util.function.Predicate") .build()); } @Override protected void onMethodInvocationFound(MethodInvocationTree mit) { String methodName = mit.symbol().name(); if (methodName.equals("isPresent")) { handleIsPresent(mit); } else if (methodName.equals("anyMatch")) { handleAnyMatch(mit); } } private void handleAnyMatch(MethodInvocationTree anyMatchMIT) { ExpressionTree predicate = anyMatchMIT.arguments().get(0); IdentifierTree reportTree = ExpressionUtils.methodName(anyMatchMIT); if (anyMatchMIT.parent().is(Tree.Kind.LOGICAL_COMPLEMENT)) { if (predicate.is(Tree.Kind.LAMBDA_EXPRESSION) && ((LambdaExpressionTree) predicate).body().is(Tree.Kind.LOGICAL_COMPLEMENT)) { // !stream.anyMatch(x -> !(...)) context.reportIssue(this, reportTree, "Replace this double negation with \"allMatch()\" and positive predicate."); } else { context.reportIssue(this, reportTree, "Replace this negation and \"anyMatch()\" with \"noneMatch()\"."); } } if (predicate.is(Tree.Kind.METHOD_REFERENCE) && isBooleanValueReference((MethodReferenceTree) predicate)) { previousMITInChain(anyMatchMIT) .filter(MAP_METHODS::matches) .ifPresent(mapMIT -> context.reportIssue(this, reportTree, "Use mapper from \"map()\" directly as predicate in \"anyMatch()\".")); } } private static boolean isBooleanValueReference(MethodReferenceTree predicate) { return BOOLEAN_VALUE.matches(predicate.method().symbol()); } private void handleIsPresent(MethodInvocationTree isPresentMIT) { previousMITInChain(isPresentMIT) .filter(FIND_METHODS::matches) .ifPresent(findMIT -> previousMITInChain(findMIT).filter(FILTER_METHODS::matches) .ifPresent(filterMIT -> context.reportIssue(this, ExpressionUtils.methodName(filterMIT), ExpressionUtils.methodName(isPresentMIT), "Replace this \"filter()." + ExpressionUtils.methodName(findMIT).name() + "().isPresent()\" chain with \"anyMatch()\"."))); } private static Optional<MethodInvocationTree> previousMITInChain(MethodInvocationTree mit) { ExpressionTree methodSelect = mit.methodSelect(); if (methodSelect.is(Tree.Kind.MEMBER_SELECT)) { ExpressionTree expression = ((MemberSelectExpressionTree) methodSelect).expression(); if (expression.is(Tree.Kind.METHOD_INVOCATION)) { MethodInvocationTree previousInvocation = (MethodInvocationTree) expression; return Optional.of(previousInvocation); } } return Optional.empty(); } }
{ "pile_set_name": "Github" }
/* * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd * Author: Chris Zhong <zyw@rock-chips.com> * * This software is licensed under the terms of the GNU General Public * License version 2, as published by the Free Software Foundation, and * may be copied, distributed, and modified under those terms. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include <drm/drmP.h> #include <drm/drm_atomic_helper.h> #include <drm/drm_crtc_helper.h> #include <drm/drm_dp_helper.h> #include <drm/drm_edid.h> #include <drm/drm_of.h> #include <linux/clk.h> #include <linux/component.h> #include <linux/extcon.h> #include <linux/firmware.h> #include <linux/regmap.h> #include <linux/reset.h> #include <linux/mfd/syscon.h> #include <linux/phy/phy.h> #include <sound/hdmi-codec.h> #include "cdn-dp-core.h" #include "cdn-dp-reg.h" #include "rockchip_drm_vop.h" #define connector_to_dp(c) \ container_of(c, struct cdn_dp_device, connector) #define encoder_to_dp(c) \ container_of(c, struct cdn_dp_device, encoder) #define GRF_SOC_CON9 0x6224 #define DP_SEL_VOP_LIT BIT(12) #define GRF_SOC_CON26 0x6268 #define UPHY_SEL_BIT 3 #define UPHY_SEL_MASK BIT(19) #define DPTX_HPD_SEL (3 << 12) #define DPTX_HPD_DEL (2 << 12) #define DPTX_HPD_SEL_MASK (3 << 28) #define CDN_FW_TIMEOUT_MS (64 * 1000) #define CDN_DPCD_TIMEOUT_MS 5000 #define CDN_DP_FIRMWARE "rockchip/dptx.bin" struct cdn_dp_data { u8 max_phy; }; struct cdn_dp_data rk3399_cdn_dp = { .max_phy = 2, }; static const struct of_device_id cdn_dp_dt_ids[] = { { .compatible = "rockchip,rk3399-cdn-dp", .data = (void *)&rk3399_cdn_dp }, {} }; MODULE_DEVICE_TABLE(of, cdn_dp_dt_ids); static int cdn_dp_grf_write(struct cdn_dp_device *dp, unsigned int reg, unsigned int val) { int ret; ret = clk_prepare_enable(dp->grf_clk); if (ret) { DRM_DEV_ERROR(dp->dev, "Failed to prepare_enable grf clock\n"); return ret; } ret = regmap_write(dp->grf, reg, val); if (ret) { DRM_DEV_ERROR(dp->dev, "Could not write to GRF: %d\n", ret); return ret; } clk_disable_unprepare(dp->grf_clk); return 0; } static int cdn_dp_clk_enable(struct cdn_dp_device *dp) { int ret; unsigned long rate; ret = clk_prepare_enable(dp->pclk); if (ret < 0) { DRM_DEV_ERROR(dp->dev, "cannot enable dp pclk %d\n", ret); goto err_pclk; } ret = clk_prepare_enable(dp->core_clk); if (ret < 0) { DRM_DEV_ERROR(dp->dev, "cannot enable core_clk %d\n", ret); goto err_core_clk; } ret = pm_runtime_get_sync(dp->dev); if (ret < 0) { DRM_DEV_ERROR(dp->dev, "cannot get pm runtime %d\n", ret); goto err_pm_runtime_get; } reset_control_assert(dp->core_rst); reset_control_assert(dp->dptx_rst); reset_control_assert(dp->apb_rst); reset_control_deassert(dp->core_rst); reset_control_deassert(dp->dptx_rst); reset_control_deassert(dp->apb_rst); rate = clk_get_rate(dp->core_clk); if (!rate) { DRM_DEV_ERROR(dp->dev, "get clk rate failed\n"); ret = -EINVAL; goto err_set_rate; } cdn_dp_set_fw_clk(dp, rate); cdn_dp_clock_reset(dp); return 0; err_set_rate: pm_runtime_put(dp->dev); err_pm_runtime_get: clk_disable_unprepare(dp->core_clk); err_core_clk: clk_disable_unprepare(dp->pclk); err_pclk: return ret; } static void cdn_dp_clk_disable(struct cdn_dp_device *dp) { pm_runtime_put_sync(dp->dev); clk_disable_unprepare(dp->pclk); clk_disable_unprepare(dp->core_clk); } static int cdn_dp_get_port_lanes(struct cdn_dp_port *port) { struct extcon_dev *edev = port->extcon; union extcon_property_value property; int dptx; u8 lanes; dptx = extcon_get_state(edev, EXTCON_DISP_DP); if (dptx > 0) { extcon_get_property(edev, EXTCON_DISP_DP, EXTCON_PROP_USB_SS, &property); if (property.intval) lanes = 2; else lanes = 4; } else { lanes = 0; } return lanes; } static int cdn_dp_get_sink_count(struct cdn_dp_device *dp, u8 *sink_count) { int ret; u8 value; *sink_count = 0; ret = cdn_dp_dpcd_read(dp, DP_SINK_COUNT, &value, 1); if (ret) return ret; *sink_count = DP_GET_SINK_COUNT(value); return 0; } static struct cdn_dp_port *cdn_dp_connected_port(struct cdn_dp_device *dp) { struct cdn_dp_port *port; int i, lanes; for (i = 0; i < dp->ports; i++) { port = dp->port[i]; lanes = cdn_dp_get_port_lanes(port); if (lanes) return port; } return NULL; } static bool cdn_dp_check_sink_connection(struct cdn_dp_device *dp) { unsigned long timeout = jiffies + msecs_to_jiffies(CDN_DPCD_TIMEOUT_MS); struct cdn_dp_port *port; u8 sink_count = 0; if (dp->active_port < 0 || dp->active_port >= dp->ports) { DRM_DEV_ERROR(dp->dev, "active_port is wrong!\n"); return false; } port = dp->port[dp->active_port]; /* * Attempt to read sink count, retry in case the sink may not be ready. * * Sinks are *supposed* to come up within 1ms from an off state, but * some docks need more time to power up. */ while (time_before(jiffies, timeout)) { if (!extcon_get_state(port->extcon, EXTCON_DISP_DP)) return false; if (!cdn_dp_get_sink_count(dp, &sink_count)) return sink_count ? true : false; usleep_range(5000, 10000); } DRM_DEV_ERROR(dp->dev, "Get sink capability timed out\n"); return false; } static enum drm_connector_status cdn_dp_connector_detect(struct drm_connector *connector, bool force) { struct cdn_dp_device *dp = connector_to_dp(connector); enum drm_connector_status status = connector_status_disconnected; mutex_lock(&dp->lock); if (dp->connected) status = connector_status_connected; mutex_unlock(&dp->lock); return status; } static void cdn_dp_connector_destroy(struct drm_connector *connector) { drm_connector_unregister(connector); drm_connector_cleanup(connector); } static const struct drm_connector_funcs cdn_dp_atomic_connector_funcs = { .detect = cdn_dp_connector_detect, .destroy = cdn_dp_connector_destroy, .fill_modes = drm_helper_probe_single_connector_modes, .reset = drm_atomic_helper_connector_reset, .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, }; static int cdn_dp_connector_get_modes(struct drm_connector *connector) { struct cdn_dp_device *dp = connector_to_dp(connector); struct edid *edid; int ret = 0; mutex_lock(&dp->lock); edid = dp->edid; if (edid) { DRM_DEV_DEBUG_KMS(dp->dev, "got edid: width[%d] x height[%d]\n", edid->width_cm, edid->height_cm); dp->sink_has_audio = drm_detect_monitor_audio(edid); ret = drm_add_edid_modes(connector, edid); if (ret) { drm_mode_connector_update_edid_property(connector, edid); drm_edid_to_eld(connector, edid); } } mutex_unlock(&dp->lock); return ret; } static struct drm_encoder * cdn_dp_connector_best_encoder(struct drm_connector *connector) { struct cdn_dp_device *dp = connector_to_dp(connector); return &dp->encoder; } static int cdn_dp_connector_mode_valid(struct drm_connector *connector, struct drm_display_mode *mode) { struct cdn_dp_device *dp = connector_to_dp(connector); struct drm_display_info *display_info = &dp->connector.display_info; u32 requested, actual, rate, sink_max, source_max = 0; u8 lanes, bpc; /* If DP is disconnected, every mode is invalid */ if (!dp->connected) return MODE_BAD; switch (display_info->bpc) { case 10: bpc = 10; break; case 6: bpc = 6; break; default: bpc = 8; break; } requested = mode->clock * bpc * 3 / 1000; source_max = dp->lanes; sink_max = drm_dp_max_lane_count(dp->dpcd); lanes = min(source_max, sink_max); source_max = drm_dp_bw_code_to_link_rate(CDN_DP_MAX_LINK_RATE); sink_max = drm_dp_max_link_rate(dp->dpcd); rate = min(source_max, sink_max); actual = rate * lanes / 100; /* efficiency is about 0.8 */ actual = actual * 8 / 10; if (requested > actual) { DRM_DEV_DEBUG_KMS(dp->dev, "requested=%d, actual=%d, clock=%d\n", requested, actual, mode->clock); return MODE_CLOCK_HIGH; } return MODE_OK; } static struct drm_connector_helper_funcs cdn_dp_connector_helper_funcs = { .get_modes = cdn_dp_connector_get_modes, .best_encoder = cdn_dp_connector_best_encoder, .mode_valid = cdn_dp_connector_mode_valid, }; static int cdn_dp_firmware_init(struct cdn_dp_device *dp) { int ret; const u32 *iram_data, *dram_data; const struct firmware *fw = dp->fw; const struct cdn_firmware_header *hdr; hdr = (struct cdn_firmware_header *)fw->data; if (fw->size != le32_to_cpu(hdr->size_bytes)) { DRM_DEV_ERROR(dp->dev, "firmware is invalid\n"); return -EINVAL; } iram_data = (const u32 *)(fw->data + hdr->header_size); dram_data = (const u32 *)(fw->data + hdr->header_size + hdr->iram_size); ret = cdn_dp_load_firmware(dp, iram_data, hdr->iram_size, dram_data, hdr->dram_size); if (ret) return ret; ret = cdn_dp_set_firmware_active(dp, true); if (ret) { DRM_DEV_ERROR(dp->dev, "active ucpu failed: %d\n", ret); return ret; } return cdn_dp_event_config(dp); } static int cdn_dp_get_sink_capability(struct cdn_dp_device *dp) { int ret; if (!cdn_dp_check_sink_connection(dp)) return -ENODEV; ret = cdn_dp_dpcd_read(dp, DP_DPCD_REV, dp->dpcd, DP_RECEIVER_CAP_SIZE); if (ret) { DRM_DEV_ERROR(dp->dev, "Failed to get caps %d\n", ret); return ret; } kfree(dp->edid); dp->edid = drm_do_get_edid(&dp->connector, cdn_dp_get_edid_block, dp); return 0; } static int cdn_dp_enable_phy(struct cdn_dp_device *dp, struct cdn_dp_port *port) { union extcon_property_value property; int ret; ret = cdn_dp_grf_write(dp, GRF_SOC_CON26, (port->id << UPHY_SEL_BIT) | UPHY_SEL_MASK); if (ret) return ret; if (!port->phy_enabled) { ret = phy_power_on(port->phy); if (ret) { DRM_DEV_ERROR(dp->dev, "phy power on failed: %d\n", ret); goto err_phy; } port->phy_enabled = true; } ret = cdn_dp_grf_write(dp, GRF_SOC_CON26, DPTX_HPD_SEL_MASK | DPTX_HPD_SEL); if (ret) { DRM_DEV_ERROR(dp->dev, "Failed to write HPD_SEL %d\n", ret); goto err_power_on; } ret = cdn_dp_get_hpd_status(dp); if (ret <= 0) { if (!ret) DRM_DEV_ERROR(dp->dev, "hpd does not exist\n"); goto err_power_on; } ret = extcon_get_property(port->extcon, EXTCON_DISP_DP, EXTCON_PROP_USB_TYPEC_POLARITY, &property); if (ret) { DRM_DEV_ERROR(dp->dev, "get property failed\n"); goto err_power_on; } port->lanes = cdn_dp_get_port_lanes(port); ret = cdn_dp_set_host_cap(dp, port->lanes, property.intval); if (ret) { DRM_DEV_ERROR(dp->dev, "set host capabilities failed: %d\n", ret); goto err_power_on; } dp->active_port = port->id; return 0; err_power_on: if (phy_power_off(port->phy)) DRM_DEV_ERROR(dp->dev, "phy power off failed: %d", ret); else port->phy_enabled = false; err_phy: cdn_dp_grf_write(dp, GRF_SOC_CON26, DPTX_HPD_SEL_MASK | DPTX_HPD_DEL); return ret; } static int cdn_dp_disable_phy(struct cdn_dp_device *dp, struct cdn_dp_port *port) { int ret; if (port->phy_enabled) { ret = phy_power_off(port->phy); if (ret) { DRM_DEV_ERROR(dp->dev, "phy power off failed: %d", ret); return ret; } } port->phy_enabled = false; port->lanes = 0; dp->active_port = -1; return 0; } static int cdn_dp_disable(struct cdn_dp_device *dp) { int ret, i; if (!dp->active) return 0; for (i = 0; i < dp->ports; i++) cdn_dp_disable_phy(dp, dp->port[i]); ret = cdn_dp_grf_write(dp, GRF_SOC_CON26, DPTX_HPD_SEL_MASK | DPTX_HPD_DEL); if (ret) { DRM_DEV_ERROR(dp->dev, "Failed to clear hpd sel %d\n", ret); return ret; } cdn_dp_set_firmware_active(dp, false); cdn_dp_clk_disable(dp); dp->active = false; dp->link.rate = 0; dp->link.num_lanes = 0; if (!dp->connected) { kfree(dp->edid); dp->edid = NULL; } return 0; } static int cdn_dp_enable(struct cdn_dp_device *dp) { int ret, i, lanes; struct cdn_dp_port *port; port = cdn_dp_connected_port(dp); if (!port) { DRM_DEV_ERROR(dp->dev, "Can't enable without connection\n"); return -ENODEV; } if (dp->active) return 0; ret = cdn_dp_clk_enable(dp); if (ret) return ret; ret = cdn_dp_firmware_init(dp); if (ret) { DRM_DEV_ERROR(dp->dev, "firmware init failed: %d", ret); goto err_clk_disable; } /* only enable the port that connected with downstream device */ for (i = port->id; i < dp->ports; i++) { port = dp->port[i]; lanes = cdn_dp_get_port_lanes(port); if (lanes) { ret = cdn_dp_enable_phy(dp, port); if (ret) continue; ret = cdn_dp_get_sink_capability(dp); if (ret) { cdn_dp_disable_phy(dp, port); } else { dp->active = true; dp->lanes = port->lanes; return 0; } } } err_clk_disable: cdn_dp_clk_disable(dp); return ret; } static void cdn_dp_encoder_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, struct drm_display_mode *adjusted) { struct cdn_dp_device *dp = encoder_to_dp(encoder); struct drm_display_info *display_info = &dp->connector.display_info; struct video_info *video = &dp->video_info; switch (display_info->bpc) { case 10: video->color_depth = 10; break; case 6: video->color_depth = 6; break; default: video->color_depth = 8; break; } video->color_fmt = PXL_RGB; video->v_sync_polarity = !!(mode->flags & DRM_MODE_FLAG_NVSYNC); video->h_sync_polarity = !!(mode->flags & DRM_MODE_FLAG_NHSYNC); memcpy(&dp->mode, adjusted, sizeof(*mode)); } static bool cdn_dp_check_link_status(struct cdn_dp_device *dp) { u8 link_status[DP_LINK_STATUS_SIZE]; struct cdn_dp_port *port = cdn_dp_connected_port(dp); u8 sink_lanes = drm_dp_max_lane_count(dp->dpcd); if (!port || !dp->link.rate || !dp->link.num_lanes) return false; if (cdn_dp_dpcd_read(dp, DP_LANE0_1_STATUS, link_status, DP_LINK_STATUS_SIZE)) { DRM_ERROR("Failed to get link status\n"); return false; } /* if link training is requested we should perform it always */ return drm_dp_channel_eq_ok(link_status, min(port->lanes, sink_lanes)); } static void cdn_dp_encoder_enable(struct drm_encoder *encoder) { struct cdn_dp_device *dp = encoder_to_dp(encoder); int ret, val; ret = drm_of_encoder_active_endpoint_id(dp->dev->of_node, encoder); if (ret < 0) { DRM_DEV_ERROR(dp->dev, "Could not get vop id, %d", ret); return; } DRM_DEV_DEBUG_KMS(dp->dev, "vop %s output to cdn-dp\n", (ret) ? "LIT" : "BIG"); if (ret) val = DP_SEL_VOP_LIT | (DP_SEL_VOP_LIT << 16); else val = DP_SEL_VOP_LIT << 16; ret = cdn_dp_grf_write(dp, GRF_SOC_CON9, val); if (ret) return; mutex_lock(&dp->lock); ret = cdn_dp_enable(dp); if (ret) { DRM_DEV_ERROR(dp->dev, "Failed to enable encoder %d\n", ret); goto out; } if (!cdn_dp_check_link_status(dp)) { ret = cdn_dp_train_link(dp); if (ret) { DRM_DEV_ERROR(dp->dev, "Failed link train %d\n", ret); goto out; } } ret = cdn_dp_set_video_status(dp, CONTROL_VIDEO_IDLE); if (ret) { DRM_DEV_ERROR(dp->dev, "Failed to idle video %d\n", ret); goto out; } ret = cdn_dp_config_video(dp); if (ret) { DRM_DEV_ERROR(dp->dev, "Failed to config video %d\n", ret); goto out; } ret = cdn_dp_set_video_status(dp, CONTROL_VIDEO_VALID); if (ret) { DRM_DEV_ERROR(dp->dev, "Failed to valid video %d\n", ret); goto out; } out: mutex_unlock(&dp->lock); } static void cdn_dp_encoder_disable(struct drm_encoder *encoder) { struct cdn_dp_device *dp = encoder_to_dp(encoder); int ret; mutex_lock(&dp->lock); if (dp->active) { ret = cdn_dp_disable(dp); if (ret) { DRM_DEV_ERROR(dp->dev, "Failed to disable encoder %d\n", ret); } } mutex_unlock(&dp->lock); /* * In the following 2 cases, we need to run the event_work to re-enable * the DP: * 1. If there is not just one port device is connected, and remove one * device from a port, the DP will be disabled here, at this case, * run the event_work to re-open DP for the other port. * 2. If re-training or re-config failed, the DP will be disabled here. * run the event_work to re-connect it. */ if (!dp->connected && cdn_dp_connected_port(dp)) schedule_work(&dp->event_work); } static int cdn_dp_encoder_atomic_check(struct drm_encoder *encoder, struct drm_crtc_state *crtc_state, struct drm_connector_state *conn_state) { struct rockchip_crtc_state *s = to_rockchip_crtc_state(crtc_state); s->output_mode = ROCKCHIP_OUT_MODE_AAAA; s->output_type = DRM_MODE_CONNECTOR_DisplayPort; return 0; } static const struct drm_encoder_helper_funcs cdn_dp_encoder_helper_funcs = { .mode_set = cdn_dp_encoder_mode_set, .enable = cdn_dp_encoder_enable, .disable = cdn_dp_encoder_disable, .atomic_check = cdn_dp_encoder_atomic_check, }; static const struct drm_encoder_funcs cdn_dp_encoder_funcs = { .destroy = drm_encoder_cleanup, }; static int cdn_dp_parse_dt(struct cdn_dp_device *dp) { struct device *dev = dp->dev; struct device_node *np = dev->of_node; struct platform_device *pdev = to_platform_device(dev); struct resource *res; dp->grf = syscon_regmap_lookup_by_phandle(np, "rockchip,grf"); if (IS_ERR(dp->grf)) { DRM_DEV_ERROR(dev, "cdn-dp needs rockchip,grf property\n"); return PTR_ERR(dp->grf); } res = platform_get_resource(pdev, IORESOURCE_MEM, 0); dp->regs = devm_ioremap_resource(dev, res); if (IS_ERR(dp->regs)) { DRM_DEV_ERROR(dev, "ioremap reg failed\n"); return PTR_ERR(dp->regs); } dp->core_clk = devm_clk_get(dev, "core-clk"); if (IS_ERR(dp->core_clk)) { DRM_DEV_ERROR(dev, "cannot get core_clk_dp\n"); return PTR_ERR(dp->core_clk); } dp->pclk = devm_clk_get(dev, "pclk"); if (IS_ERR(dp->pclk)) { DRM_DEV_ERROR(dev, "cannot get pclk\n"); return PTR_ERR(dp->pclk); } dp->spdif_clk = devm_clk_get(dev, "spdif"); if (IS_ERR(dp->spdif_clk)) { DRM_DEV_ERROR(dev, "cannot get spdif_clk\n"); return PTR_ERR(dp->spdif_clk); } dp->grf_clk = devm_clk_get(dev, "grf"); if (IS_ERR(dp->grf_clk)) { DRM_DEV_ERROR(dev, "cannot get grf clk\n"); return PTR_ERR(dp->grf_clk); } dp->spdif_rst = devm_reset_control_get(dev, "spdif"); if (IS_ERR(dp->spdif_rst)) { DRM_DEV_ERROR(dev, "no spdif reset control found\n"); return PTR_ERR(dp->spdif_rst); } dp->dptx_rst = devm_reset_control_get(dev, "dptx"); if (IS_ERR(dp->dptx_rst)) { DRM_DEV_ERROR(dev, "no uphy reset control found\n"); return PTR_ERR(dp->dptx_rst); } dp->core_rst = devm_reset_control_get(dev, "core"); if (IS_ERR(dp->core_rst)) { DRM_DEV_ERROR(dev, "no core reset control found\n"); return PTR_ERR(dp->core_rst); } dp->apb_rst = devm_reset_control_get(dev, "apb"); if (IS_ERR(dp->apb_rst)) { DRM_DEV_ERROR(dev, "no apb reset control found\n"); return PTR_ERR(dp->apb_rst); } return 0; } static int cdn_dp_audio_hw_params(struct device *dev, void *data, struct hdmi_codec_daifmt *daifmt, struct hdmi_codec_params *params) { struct cdn_dp_device *dp = dev_get_drvdata(dev); struct audio_info audio = { .sample_width = params->sample_width, .sample_rate = params->sample_rate, .channels = params->channels, }; int ret; mutex_lock(&dp->lock); if (!dp->active) { ret = -ENODEV; goto out; } switch (daifmt->fmt) { case HDMI_I2S: audio.format = AFMT_I2S; break; case HDMI_SPDIF: audio.format = AFMT_SPDIF; break; default: DRM_DEV_ERROR(dev, "Invalid format %d\n", daifmt->fmt); ret = -EINVAL; goto out; } ret = cdn_dp_audio_config(dp, &audio); if (!ret) dp->audio_info = audio; out: mutex_unlock(&dp->lock); return ret; } static void cdn_dp_audio_shutdown(struct device *dev, void *data) { struct cdn_dp_device *dp = dev_get_drvdata(dev); int ret; mutex_lock(&dp->lock); if (!dp->active) goto out; ret = cdn_dp_audio_stop(dp, &dp->audio_info); if (!ret) dp->audio_info.format = AFMT_UNUSED; out: mutex_unlock(&dp->lock); } static int cdn_dp_audio_digital_mute(struct device *dev, void *data, bool enable) { struct cdn_dp_device *dp = dev_get_drvdata(dev); int ret; mutex_lock(&dp->lock); if (!dp->active) { ret = -ENODEV; goto out; } ret = cdn_dp_audio_mute(dp, enable); out: mutex_unlock(&dp->lock); return ret; } static int cdn_dp_audio_get_eld(struct device *dev, void *data, u8 *buf, size_t len) { struct cdn_dp_device *dp = dev_get_drvdata(dev); memcpy(buf, dp->connector.eld, min(sizeof(dp->connector.eld), len)); return 0; } static const struct hdmi_codec_ops audio_codec_ops = { .hw_params = cdn_dp_audio_hw_params, .audio_shutdown = cdn_dp_audio_shutdown, .digital_mute = cdn_dp_audio_digital_mute, .get_eld = cdn_dp_audio_get_eld, }; static int cdn_dp_audio_codec_init(struct cdn_dp_device *dp, struct device *dev) { struct hdmi_codec_pdata codec_data = { .i2s = 1, .spdif = 1, .ops = &audio_codec_ops, .max_i2s_channels = 8, }; dp->audio_pdev = platform_device_register_data( dev, HDMI_CODEC_DRV_NAME, PLATFORM_DEVID_AUTO, &codec_data, sizeof(codec_data)); return PTR_ERR_OR_ZERO(dp->audio_pdev); } static int cdn_dp_request_firmware(struct cdn_dp_device *dp) { int ret; unsigned long timeout = jiffies + msecs_to_jiffies(CDN_FW_TIMEOUT_MS); unsigned long sleep = 1000; WARN_ON(!mutex_is_locked(&dp->lock)); if (dp->fw_loaded) return 0; /* Drop the lock before getting the firmware to avoid blocking boot */ mutex_unlock(&dp->lock); while (time_before(jiffies, timeout)) { ret = request_firmware(&dp->fw, CDN_DP_FIRMWARE, dp->dev); if (ret == -ENOENT) { msleep(sleep); sleep *= 2; continue; } else if (ret) { DRM_DEV_ERROR(dp->dev, "failed to request firmware: %d\n", ret); goto out; } dp->fw_loaded = true; ret = 0; goto out; } DRM_DEV_ERROR(dp->dev, "Timed out trying to load firmware\n"); ret = -ETIMEDOUT; out: mutex_lock(&dp->lock); return ret; } static void cdn_dp_pd_event_work(struct work_struct *work) { struct cdn_dp_device *dp = container_of(work, struct cdn_dp_device, event_work); struct drm_connector *connector = &dp->connector; enum drm_connector_status old_status; int ret; mutex_lock(&dp->lock); if (dp->suspended) goto out; ret = cdn_dp_request_firmware(dp); if (ret) goto out; dp->connected = true; /* Not connected, notify userspace to disable the block */ if (!cdn_dp_connected_port(dp)) { DRM_DEV_INFO(dp->dev, "Not connected. Disabling cdn\n"); dp->connected = false; /* Connected but not enabled, enable the block */ } else if (!dp->active) { DRM_DEV_INFO(dp->dev, "Connected, not enabled. Enabling cdn\n"); ret = cdn_dp_enable(dp); if (ret) { DRM_DEV_ERROR(dp->dev, "Enable dp failed %d\n", ret); dp->connected = false; } /* Enabled and connected to a dongle without a sink, notify userspace */ } else if (!cdn_dp_check_sink_connection(dp)) { DRM_DEV_INFO(dp->dev, "Connected without sink. Assert hpd\n"); dp->connected = false; /* Enabled and connected with a sink, re-train if requested */ } else if (!cdn_dp_check_link_status(dp)) { unsigned int rate = dp->link.rate; unsigned int lanes = dp->link.num_lanes; struct drm_display_mode *mode = &dp->mode; DRM_DEV_INFO(dp->dev, "Connected with sink. Re-train link\n"); ret = cdn_dp_train_link(dp); if (ret) { dp->connected = false; DRM_DEV_ERROR(dp->dev, "Train link failed %d\n", ret); goto out; } /* If training result is changed, update the video config */ if (mode->clock && (rate != dp->link.rate || lanes != dp->link.num_lanes)) { ret = cdn_dp_config_video(dp); if (ret) { dp->connected = false; DRM_DEV_ERROR(dp->dev, "Failed to config video %d\n", ret); } } } out: mutex_unlock(&dp->lock); old_status = connector->status; connector->status = connector->funcs->detect(connector, false); if (old_status != connector->status) drm_kms_helper_hotplug_event(dp->drm_dev); } static int cdn_dp_pd_event(struct notifier_block *nb, unsigned long event, void *priv) { struct cdn_dp_port *port = container_of(nb, struct cdn_dp_port, event_nb); struct cdn_dp_device *dp = port->dp; /* * It would be nice to be able to just do the work inline right here. * However, we need to make a bunch of calls that might sleep in order * to turn on the block/phy, so use a worker instead. */ schedule_work(&dp->event_work); return NOTIFY_DONE; } static int cdn_dp_bind(struct device *dev, struct device *master, void *data) { struct cdn_dp_device *dp = dev_get_drvdata(dev); struct drm_encoder *encoder; struct drm_connector *connector; struct cdn_dp_port *port; struct drm_device *drm_dev = data; int ret, i; ret = cdn_dp_parse_dt(dp); if (ret < 0) return ret; dp->drm_dev = drm_dev; dp->connected = false; dp->active = false; dp->active_port = -1; dp->fw_loaded = false; INIT_WORK(&dp->event_work, cdn_dp_pd_event_work); encoder = &dp->encoder; encoder->possible_crtcs = drm_of_find_possible_crtcs(drm_dev, dev->of_node); DRM_DEBUG_KMS("possible_crtcs = 0x%x\n", encoder->possible_crtcs); ret = drm_encoder_init(drm_dev, encoder, &cdn_dp_encoder_funcs, DRM_MODE_ENCODER_TMDS, NULL); if (ret) { DRM_ERROR("failed to initialize encoder with drm\n"); return ret; } drm_encoder_helper_add(encoder, &cdn_dp_encoder_helper_funcs); connector = &dp->connector; connector->polled = DRM_CONNECTOR_POLL_HPD; connector->dpms = DRM_MODE_DPMS_OFF; ret = drm_connector_init(drm_dev, connector, &cdn_dp_atomic_connector_funcs, DRM_MODE_CONNECTOR_DisplayPort); if (ret) { DRM_ERROR("failed to initialize connector with drm\n"); goto err_free_encoder; } drm_connector_helper_add(connector, &cdn_dp_connector_helper_funcs); ret = drm_mode_connector_attach_encoder(connector, encoder); if (ret) { DRM_ERROR("failed to attach connector and encoder\n"); goto err_free_connector; } for (i = 0; i < dp->ports; i++) { port = dp->port[i]; port->event_nb.notifier_call = cdn_dp_pd_event; ret = devm_extcon_register_notifier(dp->dev, port->extcon, EXTCON_DISP_DP, &port->event_nb); if (ret) { DRM_DEV_ERROR(dev, "register EXTCON_DISP_DP notifier err\n"); goto err_free_connector; } } pm_runtime_enable(dev); schedule_work(&dp->event_work); return 0; err_free_connector: drm_connector_cleanup(connector); err_free_encoder: drm_encoder_cleanup(encoder); return ret; } static void cdn_dp_unbind(struct device *dev, struct device *master, void *data) { struct cdn_dp_device *dp = dev_get_drvdata(dev); struct drm_encoder *encoder = &dp->encoder; struct drm_connector *connector = &dp->connector; cancel_work_sync(&dp->event_work); cdn_dp_encoder_disable(encoder); encoder->funcs->destroy(encoder); connector->funcs->destroy(connector); pm_runtime_disable(dev); if (dp->fw_loaded) release_firmware(dp->fw); kfree(dp->edid); dp->edid = NULL; } static const struct component_ops cdn_dp_component_ops = { .bind = cdn_dp_bind, .unbind = cdn_dp_unbind, }; int cdn_dp_suspend(struct device *dev) { struct cdn_dp_device *dp = dev_get_drvdata(dev); int ret = 0; mutex_lock(&dp->lock); if (dp->active) ret = cdn_dp_disable(dp); dp->suspended = true; mutex_unlock(&dp->lock); return ret; } int cdn_dp_resume(struct device *dev) { struct cdn_dp_device *dp = dev_get_drvdata(dev); mutex_lock(&dp->lock); dp->suspended = false; if (dp->fw_loaded) schedule_work(&dp->event_work); mutex_unlock(&dp->lock); return 0; } static int cdn_dp_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; const struct of_device_id *match; struct cdn_dp_data *dp_data; struct cdn_dp_port *port; struct cdn_dp_device *dp; struct extcon_dev *extcon; struct phy *phy; int i; dp = devm_kzalloc(dev, sizeof(*dp), GFP_KERNEL); if (!dp) return -ENOMEM; dp->dev = dev; match = of_match_node(cdn_dp_dt_ids, pdev->dev.of_node); dp_data = (struct cdn_dp_data *)match->data; for (i = 0; i < dp_data->max_phy; i++) { extcon = extcon_get_edev_by_phandle(dev, i); phy = devm_of_phy_get_by_index(dev, dev->of_node, i); if (PTR_ERR(extcon) == -EPROBE_DEFER || PTR_ERR(phy) == -EPROBE_DEFER) return -EPROBE_DEFER; if (IS_ERR(extcon) || IS_ERR(phy)) continue; port = devm_kzalloc(dev, sizeof(*port), GFP_KERNEL); if (!port) return -ENOMEM; port->extcon = extcon; port->phy = phy; port->dp = dp; port->id = i; dp->port[dp->ports++] = port; } if (!dp->ports) { DRM_DEV_ERROR(dev, "missing extcon or phy\n"); return -EINVAL; } mutex_init(&dp->lock); dev_set_drvdata(dev, dp); cdn_dp_audio_codec_init(dp, dev); return component_add(dev, &cdn_dp_component_ops); } static int cdn_dp_remove(struct platform_device *pdev) { struct cdn_dp_device *dp = platform_get_drvdata(pdev); platform_device_unregister(dp->audio_pdev); cdn_dp_suspend(dp->dev); component_del(&pdev->dev, &cdn_dp_component_ops); return 0; } static void cdn_dp_shutdown(struct platform_device *pdev) { struct cdn_dp_device *dp = platform_get_drvdata(pdev); cdn_dp_suspend(dp->dev); } static const struct dev_pm_ops cdn_dp_pm_ops = { SET_SYSTEM_SLEEP_PM_OPS(cdn_dp_suspend, cdn_dp_resume) }; struct platform_driver cdn_dp_driver = { .probe = cdn_dp_probe, .remove = cdn_dp_remove, .shutdown = cdn_dp_shutdown, .driver = { .name = "cdn-dp", .owner = THIS_MODULE, .of_match_table = of_match_ptr(cdn_dp_dt_ids), .pm = &cdn_dp_pm_ops, }, };
{ "pile_set_name": "Github" }
goog.module('gents.untyped'); var x; var n = 4; var b = true; var s = 'hello'; var foo = function(v) { return v; }; function bar(b) { return !b; } exports = {foo, bar};
{ "pile_set_name": "Github" }
package ml.wolfe.examples import cc.factorie.la.DenseTensor1 import ml.wolfe.SimpleIndex import ml.wolfe.nlp._ import ml.wolfe.term.TermImplicits._ import ml.wolfe.term._ import ml.wolfe.term.Argmaxer._ import ml.wolfe.term.LearningObjective._ /** * Created by luke on 29/05/15. */ object TaggingDemo extends App { val doc = TokenSplitter(SentenceSplitter( "John Denver is a Songwriter. Denver has produced many records")) val words = doc.tokens.map(_.word).distinct val tags = Seq("O", "B-LOC", "I-LOC", "B-PER", "I-PER") val maxLength = 15 implicit val Words = words.toDom withOOV "[OOV]" implicit val Tags = tags.toDom implicit val Y = Seqs(Tags, 0, maxLength) implicit val Weights = Vectors(dim = 1000) implicit val index = new SimpleIndex() implicit val maxProductParams = BPParameters(iterations = 2) val firstName = Set("John", "Jack") val lastName = Set("Denver") val location = Set("Denver", "Dallas") val punct = Set(",", ".", "?", ";") def lowercase(w: Words.Value) = w.head.isLower def model(w: Weights.Term)(x: Seq[Words.Value])(y: Y.Term) = { def matches = matchingPairs(x) sum(0 until x.length) { i => w dot feature('bias, y(i)) } + sum(0 until x.length) { i => w dot feature('word, y(i) -> Words.Const(x(i))) } + sum(0 until x.length) { i => w dot feature('firstName, I(Bools.Const(firstName(x(i)))), y(i)) } + sum(0 until x.length) { i => w dot feature('lastName, I(Bools.Const(lastName(x(i)))), y(i)) } + sum(0 until x.length) { i => w dot feature('location, I(Bools.Const(location(x(i)))), y(i)) } + sum(0 until x.length) { i => w dot feature('lowercase, I(Bools.Const(lowercase(x(i)))), y(i)) } + sum(0 until x.length) { i => w dot feature('punct, I(Bools.Const(punct(x(i)))), y(i)) } + sum(0 until x.length - 1) { i => w dot feature('pair, y(i) -> y(i + 1)) } + sum(0 until matches.length) { i => w dot feature('match, y(matches(i)._1) -> y(matches(i)._2))} } subjectTo (y.length === x.length) // val wStar = new DenseTensor1(Seq( // feature('location, 1.1, Tags.Const("B-LOC")), // feature('lastName, 1.0, Tags.Const("B-PER")), // feature('firstName, 3.0, Tags.Const("B-PER")), // feature('lowercase, 1.0, Tags.Const("O")), // feature('punct, 1.0, Tags.Const("O")), // feature('pair, 2.0, Tags.Const("B-PER") -> Tags.Const("I-PER")), // feature('bias, 1.0, Tags.Const("O")), // feature('match, 2.0, Tags.Const("B-PER") -> Tags.Const("B-PER")), // feature('match, 2.0, Tags.Const("B-PER") -> Tags.Const("I-PER")), // feature('match, 2.0, Tags.Const("I-PER") -> Tags.Const("I-PER")), // feature('match, 2.0, Tags.Const("I-PER") -> Tags.Const("B-PER")) // ).map(_.eval()).reduce(_+_)) val wStar = new DenseTensor1(Seq( feature('match, 1.1, Tags.Const("I-PER") -> Tags.Const("B-PER")), feature('location, 1.1, Tags.Const("B-LOC")), feature('lastName, 0.7, Tags.Const("B-PER")), feature('lastName, 0.3, Tags.Const("I-PER")), feature('firstName, 2, Tags.Const("B-PER")), feature('pair, 0.9, Tags.Const("B-PER") -> Tags.Const("I-PER")), feature('bias, 1, Tags.Const("O")) ).map(_.eval()).reduce(_+_)) def matchingPairs(x: Seq[Words.Value]) = for (j <- 0 until x.length; i <- 0 until j; if x(i) != "." && x(i) == x(j)) yield (i, j) def predict(x: Seq[Words.Value]) = argmax(Y) { model(Weights.Const(wStar))(x) } by maxProduct val xTest = doc.tokens.map(_.word) val yStar = predict(xTest).eval() //val yStar = IndexedSeq("B-PER", "O", "O", "O", "O", "O", "O", "O", "O", "O", "O") println(xTest) println(yStar) println(model(Weights.Const(wStar))(xTest)(Y.Const(yStar)).eval()) //println(best.eval()) }
{ "pile_set_name": "Github" }
package org.nd4j.linalg.api.blas.impl; import org.nd4j.linalg.api.blas.Level2; import org.nd4j.linalg.api.blas.params.SparseCOOGemvParameters; import org.nd4j.linalg.api.buffer.DataBuffer; import org.nd4j.linalg.api.complex.IComplexNDArray; import org.nd4j.linalg.api.complex.IComplexNumber; import org.nd4j.linalg.api.ndarray.INDArray; import org.nd4j.linalg.api.ops.executioner.DefaultOpExecutioner; import static org.nd4j.base.Preconditions.checkArgument; /** * @author Audrey Loeffel */ public abstract class SparseBaseLevel2 extends SparseBaseLevel implements Level2 { @Override public void gemv(char order, char transA, double alpha, INDArray A, INDArray X, double beta, INDArray Y) { checkArgument(A.isMatrix(), "A must be a matrix"); checkArgument(X.isVector(), "X must be a vector"); checkArgument(Y.isVector(), "Y must be a vector"); SparseCOOGemvParameters parameters = new SparseCOOGemvParameters(A, X, Y); switch (A.data().dataType()) { case DOUBLE: DefaultOpExecutioner.validateDataType(DataBuffer.Type.DOUBLE, parameters.getA(), parameters.getX(), parameters.getY()); dcoomv(parameters.getAOrdering(), parameters.getM(), parameters.getVal(), parameters.getRowInd(), parameters.getColInd(), parameters.getNnz(), parameters.getX(), parameters.getY()); break; case FLOAT: DefaultOpExecutioner.validateDataType(DataBuffer.Type.FLOAT, parameters.getA(), parameters.getX(), parameters.getY()); scoomv(parameters.getAOrdering(), parameters.getM(), parameters.getVal(), parameters.getRowInd(), parameters.getColInd(), parameters.getNnz(), parameters.getX(), parameters.getY()); break; default: throw new UnsupportedOperationException(); } } @Override public void gemv(char order, char transA, IComplexNumber alpha, IComplexNDArray A, IComplexNDArray X, IComplexNumber beta, IComplexNDArray Y) { throw new UnsupportedOperationException(); } @Override public void gbmv(char order, char TransA, int KL, int KU, double alpha, INDArray A, INDArray X, double beta, INDArray Y) { } @Override public void gbmv(char order, char TransA, int KL, int KU, IComplexNumber alpha, IComplexNDArray A, IComplexNDArray X, IComplexNumber beta, IComplexNDArray Y) { throw new UnsupportedOperationException(); } @Override public void ger(char order, double alpha, INDArray X, INDArray Y, INDArray A) { } @Override public void geru(char order, IComplexNumber alpha, IComplexNDArray X, IComplexNDArray Y, IComplexNDArray A) { } @Override public void hbmv(char order, char Uplo, IComplexNumber alpha, IComplexNDArray A, IComplexNDArray X, IComplexNumber beta, IComplexNDArray Y) { } @Override public void hemv(char order, char Uplo, IComplexNumber alpha, IComplexNDArray A, IComplexNDArray X, IComplexNumber beta, IComplexNDArray Y) { } @Override public void her2(char order, char Uplo, IComplexNumber alpha, IComplexNDArray X, IComplexNDArray Y, IComplexNDArray A) { } @Override public void hpmv(char order, char Uplo, int N, IComplexNumber alpha, IComplexNDArray Ap, IComplexNDArray X, IComplexNumber beta, IComplexNDArray Y) { } @Override public void hpr2(char order, char Uplo, IComplexNumber alpha, IComplexNDArray X, IComplexNDArray Y, IComplexNDArray Ap) { } @Override public void sbmv(char order, char Uplo, double alpha, INDArray A, INDArray X, double beta, INDArray Y) { } @Override public void spmv(char order, char Uplo, double alpha, INDArray Ap, INDArray X, double beta, INDArray Y) { } @Override public void spr(char order, char Uplo, double alpha, INDArray X, INDArray Ap) { } @Override public void spr2(char order, char Uplo, double alpha, INDArray X, INDArray Y, INDArray A) { } @Override public void symv(char order, char Uplo, double alpha, INDArray A, INDArray X, double beta, INDArray Y) { } @Override public void syr(char order, char Uplo, int N, double alpha, INDArray X, INDArray A) { } @Override public void syr2(char order, char Uplo, double alpha, INDArray X, INDArray Y, INDArray A) { } @Override public void tbmv(char order, char Uplo, char TransA, char Diag, INDArray A, INDArray X) { } @Override public void tbsv(char order, char Uplo, char TransA, char Diag, INDArray A, INDArray X) { } @Override public void tpmv(char order, char Uplo, char TransA, char Diag, INDArray Ap, INDArray X) { } @Override public void tpsv(char order, char Uplo, char TransA, char Diag, INDArray Ap, INDArray X) { } @Override public void trmv(char order, char Uplo, char TransA, char Diag, INDArray A, INDArray X) { } @Override public void trsv(char order, char Uplo, char TransA, char Diag, INDArray A, INDArray X) { } // ---- protected abstract void scoomv(char transA, int M, DataBuffer values, DataBuffer rowInd, DataBuffer colInd, int nnz, INDArray x, INDArray y); protected abstract void dcoomv(char transA, int M, DataBuffer values, DataBuffer rowInd, DataBuffer colInd, int nnz, INDArray x, INDArray y); }
{ "pile_set_name": "Github" }
/* * Copyright (c) 2013 Dave Collins <dave@davec.name> * * Permission to use, copy, modify, and distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ /* Package spew implements a deep pretty printer for Go data structures to aid in debugging. A quick overview of the additional features spew provides over the built-in printing facilities for Go data types are as follows: * Pointers are dereferenced and followed * Circular data structures are detected and handled properly * Custom Stringer/error interfaces are optionally invoked, including on unexported types * Custom types which only implement the Stringer/error interfaces via a pointer receiver are optionally invoked when passing non-pointer variables * Byte arrays and slices are dumped like the hexdump -C command which includes offsets, byte values in hex, and ASCII output (only when using Dump style) There are two different approaches spew allows for dumping Go data structures: * Dump style which prints with newlines, customizable indentation, and additional debug information such as types and all pointer addresses used to indirect to the final value * A custom Formatter interface that integrates cleanly with the standard fmt package and replaces %v, %+v, %#v, and %#+v to provide inline printing similar to the default %v while providing the additional functionality outlined above and passing unsupported format verbs such as %x and %q along to fmt Quick Start This section demonstrates how to quickly get started with spew. See the sections below for further details on formatting and configuration options. To dump a variable with full newlines, indentation, type, and pointer information use Dump, Fdump, or Sdump: spew.Dump(myVar1, myVar2, ...) spew.Fdump(someWriter, myVar1, myVar2, ...) str := spew.Sdump(myVar1, myVar2, ...) Alternatively, if you would prefer to use format strings with a compacted inline printing style, use the convenience wrappers Printf, Fprintf, etc with %v (most compact), %+v (adds pointer addresses), %#v (adds types), or %#+v (adds types and pointer addresses): spew.Printf("myVar1: %v -- myVar2: %+v", myVar1, myVar2) spew.Printf("myVar3: %#v -- myVar4: %#+v", myVar3, myVar4) spew.Fprintf(someWriter, "myVar1: %v -- myVar2: %+v", myVar1, myVar2) spew.Fprintf(someWriter, "myVar3: %#v -- myVar4: %#+v", myVar3, myVar4) Configuration Options Configuration of spew is handled by fields in the ConfigState type. For convenience, all of the top-level functions use a global state available via the spew.Config global. It is also possible to create a ConfigState instance that provides methods equivalent to the top-level functions. This allows concurrent configuration options. See the ConfigState documentation for more details. The following configuration options are available: * Indent String to use for each indentation level for Dump functions. It is a single space by default. A popular alternative is "\t". * MaxDepth Maximum number of levels to descend into nested data structures. There is no limit by default. * DisableMethods Disables invocation of error and Stringer interface methods. Method invocation is enabled by default. * DisablePointerMethods Disables invocation of error and Stringer interface methods on types which only accept pointer receivers from non-pointer variables. Pointer method invocation is enabled by default. * ContinueOnMethod Enables recursion into types after invoking error and Stringer interface methods. Recursion after method invocation is disabled by default. * SortKeys Specifies map keys should be sorted before being printed. Use this to have a more deterministic, diffable output. Note that only native types (bool, int, uint, floats, uintptr and string) and types which implement error or Stringer interfaces are supported with other types sorted according to the reflect.Value.String() output which guarantees display stability. Natural map order is used by default. * SpewKeys Specifies that, as a last resort attempt, map keys should be spewed to strings and sorted by those strings. This is only considered if SortKeys is true. Dump Usage Simply call spew.Dump with a list of variables you want to dump: spew.Dump(myVar1, myVar2, ...) You may also call spew.Fdump if you would prefer to output to an arbitrary io.Writer. For example, to dump to standard error: spew.Fdump(os.Stderr, myVar1, myVar2, ...) A third option is to call spew.Sdump to get the formatted output as a string: str := spew.Sdump(myVar1, myVar2, ...) Sample Dump Output See the Dump example for details on the setup of the types and variables being shown here. (main.Foo) { unexportedField: (*main.Bar)(0xf84002e210)({ flag: (main.Flag) flagTwo, data: (uintptr) <nil> }), ExportedField: (map[interface {}]interface {}) (len=1) { (string) (len=3) "one": (bool) true } } Byte (and uint8) arrays and slices are displayed uniquely like the hexdump -C command as shown. ([]uint8) (len=32 cap=32) { 00000000 11 12 13 14 15 16 17 18 19 1a 1b 1c 1d 1e 1f 20 |............... | 00000010 21 22 23 24 25 26 27 28 29 2a 2b 2c 2d 2e 2f 30 |!"#$%&'()*+,-./0| 00000020 31 32 |12| } Custom Formatter Spew provides a custom formatter that implements the fmt.Formatter interface so that it integrates cleanly with standard fmt package printing functions. The formatter is useful for inline printing of smaller data types similar to the standard %v format specifier. The custom formatter only responds to the %v (most compact), %+v (adds pointer addresses), %#v (adds types), or %#+v (adds types and pointer addresses) verb combinations. Any other verbs such as %x and %q will be sent to the the standard fmt package for formatting. In addition, the custom formatter ignores the width and precision arguments (however they will still work on the format specifiers not handled by the custom formatter). Custom Formatter Usage The simplest way to make use of the spew custom formatter is to call one of the convenience functions such as spew.Printf, spew.Println, or spew.Printf. The functions have syntax you are most likely already familiar with: spew.Printf("myVar1: %v -- myVar2: %+v", myVar1, myVar2) spew.Printf("myVar3: %#v -- myVar4: %#+v", myVar3, myVar4) spew.Println(myVar, myVar2) spew.Fprintf(os.Stderr, "myVar1: %v -- myVar2: %+v", myVar1, myVar2) spew.Fprintf(os.Stderr, "myVar3: %#v -- myVar4: %#+v", myVar3, myVar4) See the Index for the full list convenience functions. Sample Formatter Output Double pointer to a uint8: %v: <**>5 %+v: <**>(0xf8400420d0->0xf8400420c8)5 %#v: (**uint8)5 %#+v: (**uint8)(0xf8400420d0->0xf8400420c8)5 Pointer to circular struct with a uint8 field and a pointer to itself: %v: <*>{1 <*><shown>} %+v: <*>(0xf84003e260){ui8:1 c:<*>(0xf84003e260)<shown>} %#v: (*main.circular){ui8:(uint8)1 c:(*main.circular)<shown>} %#+v: (*main.circular)(0xf84003e260){ui8:(uint8)1 c:(*main.circular)(0xf84003e260)<shown>} See the Printf example for details on the setup of variables being shown here. Errors Since it is possible for custom Stringer/error interfaces to panic, spew detects them and handles them internally by printing the panic information inline with the output. Since spew is intended to provide deep pretty printing capabilities on structures, it intentionally does not return any errors. */ package spew
{ "pile_set_name": "Github" }
/* * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 only, as * published by the Free Software Foundation. Oracle designates this * particular file as subject to the "Classpath" exception as provided * by Oracle in the LICENSE file that accompanied this code. * * This code is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * version 2 for more details (a copy is included in the LICENSE file that * accompanied this code). * * You should have received a copy of the GNU General Public License version * 2 along with this work; if not, write to the Free Software Foundation, * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. * * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA * or visit www.oracle.com if you need additional information or have any * questions. */ package java.util.function; /** * Represents an operation that accepts an object-valued and a * {@code double}-valued argument, and returns no result. This is the * {@code (reference, double)} specialization of {@link BiConsumer}. * Unlike most other functional interfaces, {@code ObjDoubleConsumer} is * expected to operate via side-effects. * * <p>This is a <a href="package-summary.html">functional interface</a> * whose functional method is {@link #accept(Object, double)}. * * @param <T> the type of the object argument to the operation * * @see BiConsumer * @since 1.8 */ @FunctionalInterface public interface ObjDoubleConsumer<T> { /** * Performs this operation on the given arguments. * * @param t the first input argument * @param value the second input argument */ void accept(T t, double value); }
{ "pile_set_name": "Github" }
// Copyright 2011 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package net import ( "os" "syscall" ) func setDefaultSockopts(s, family, sotype int, ipv6only bool) error { if family == syscall.AF_INET6 && sotype != syscall.SOCK_RAW { // Allow both IP versions even if the OS default // is otherwise. Note that some operating systems // never admit this option. syscall.SetsockoptInt(s, syscall.IPPROTO_IPV6, syscall.IPV6_V6ONLY, boolint(ipv6only)) } // Allow broadcast. return os.NewSyscallError("setsockopt", syscall.SetsockoptInt(s, syscall.SOL_SOCKET, syscall.SO_BROADCAST, 1)) } func setDefaultListenerSockopts(s int) error { // Allow reuse of recently-used addresses. return os.NewSyscallError("setsockopt", syscall.SetsockoptInt(s, syscall.SOL_SOCKET, syscall.SO_REUSEADDR, 1)) } func setDefaultMulticastSockopts(s int) error { // Allow multicast UDP and raw IP datagram sockets to listen // concurrently across multiple listeners. return os.NewSyscallError("setsockopt", syscall.SetsockoptInt(s, syscall.SOL_SOCKET, syscall.SO_REUSEADDR, 1)) }
{ "pile_set_name": "Github" }
// Create a queue, but don't obliterate an existing one! var analytics = analytics || []; (function () { // A list of all the methods we want to generate queueing stubs for. var methods = [ 'identify', 'track', 'trackLink', 'trackForm', 'trackClick', 'trackSubmit', 'page', 'pageview', 'ab', 'alias', 'ready', 'group' ]; // For each of our methods, generate a queueing method that pushes arrays of // arguments onto our `analytics` queue. The first element of the array // is always the name of the analytics.js method itself (eg. `track`), so that // we know where to replay them when analytics.js finally loads. var factory = function (method) { return function () { analytics.push([method].concat(Array.prototype.slice.call(arguments, 0))); }; }; for (var i = 0; i < methods.length; i++) { analytics[methods[i]] = factory(methods[i]); } }()); // Define a method that will asynchronously load analytics.js from our CDN. analytics.load = function(apiKey) { // Create an async script element for analytics.js based on your API key. var script = document.createElement('script'); script.type = 'text/javascript'; script.async = true; script.src = ('https:' === document.location.protocol ? 'https://' : 'http://') + 'd2dq2ahtl5zl1z.cloudfront.net/analytics.js/v1/' + apiKey + '/analytics.min.js'; // Find the first script element on the page and insert our script next to it. var firstScript = document.getElementsByTagName('script')[0]; firstScript.parentNode.insertBefore(script, firstScript); }; // Load analytics.js with your API key, which will automatically load all of the // analytics integrations you've turned on for your account. Boosh! analytics.load("<%= ENV['SEGMENT_IO_KEY'] %>");
{ "pile_set_name": "Github" }
.TH "NPM\-PACK" "1" "August 2015" "" "" .SH "NAME" \fBnpm-pack\fR \- Create a tarball from a package .SH SYNOPSIS .P .RS 2 .nf npm pack [<pkg> [<pkg> \.\.\.]] .fi .RE .SH DESCRIPTION .P For anything that's installable (that is, a package folder, tarball, tarball url, name@tag, name@version, or name), this command will fetch it to the cache, and then copy the tarball to the current working directory as \fB<name>\-<version>\.tgz\fP, and then write the filenames out to stdout\. .P If the same package is specified multiple times, then the file will be overwritten the second time\. .P If no arguments are supplied, then npm packs the current package folder\. .SH SEE ALSO .RS 0 .IP \(bu 2 npm help cache .IP \(bu 2 npm help publish .IP \(bu 2 npm help config .IP \(bu 2 npm help 7 config .IP \(bu 2 npm help 5 npmrc .RE
{ "pile_set_name": "Github" }
// WARNING: DO NOT EDIT THIS FILE. THIS FILE IS MANAGED BY SPRING ROO. // You may push code into the target .java compilation unit if you wish to edit any member(s). package nl.bzk.brp.model.data.kern; import java.util.List; import javax.persistence.EntityManager; import javax.persistence.PersistenceContext; import nl.bzk.brp.model.data.kern.Actie; import org.springframework.transaction.annotation.Transactional; privileged aspect Actie_Roo_Jpa_ActiveRecord { @PersistenceContext transient EntityManager Actie.entityManager; public static final EntityManager Actie.entityManager() { EntityManager em = new Actie().entityManager; if (em == null) throw new IllegalStateException("Entity manager has not been injected (is the Spring Aspects JAR configured as an AJC/AJDT aspects library?)"); return em; } public static long Actie.countActies() { return entityManager().createQuery("SELECT COUNT(o) FROM Actie o", Long.class).getSingleResult(); } public static List<Actie> Actie.findAllActies() { return entityManager().createQuery("SELECT o FROM Actie o", Actie.class).getResultList(); } public static Actie Actie.findActie(Long id) { if (id == null) return null; return entityManager().find(Actie.class, id); } public static List<Actie> Actie.findActieEntries(int firstResult, int maxResults) { return entityManager().createQuery("SELECT o FROM Actie o", Actie.class).setFirstResult(firstResult).setMaxResults(maxResults).getResultList(); } @Transactional public void Actie.persist() { if (this.entityManager == null) this.entityManager = entityManager(); this.entityManager.persist(this); } @Transactional public void Actie.remove() { if (this.entityManager == null) this.entityManager = entityManager(); if (this.entityManager.contains(this)) { this.entityManager.remove(this); } else { Actie attached = Actie.findActie(this.id); this.entityManager.remove(attached); } } @Transactional public void Actie.flush() { if (this.entityManager == null) this.entityManager = entityManager(); this.entityManager.flush(); } @Transactional public void Actie.clear() { if (this.entityManager == null) this.entityManager = entityManager(); this.entityManager.clear(); } @Transactional public Actie Actie.merge() { if (this.entityManager == null) this.entityManager = entityManager(); Actie merged = this.entityManager.merge(this); this.entityManager.flush(); return merged; } }
{ "pile_set_name": "Github" }
/* Copyright Charly Chevalier 2015 Copyright Joel Falcou 2015 Distributed under the Boost Software License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) */ #include <msgpack/predef/hardware/simd/x86.h> #include <msgpack/predef/hardware/simd/x86_amd.h> #include <msgpack/predef/hardware/simd/arm.h> #include <msgpack/predef/hardware/simd/ppc.h> #ifndef MSGPACK_PREDEF_HARDWARE_SIMD_H #define MSGPACK_PREDEF_HARDWARE_SIMD_H #include <msgpack/predef/version_number.h> /*` [section Using the `MSGPACK_HW_SIMD_*` predefs] [include ../doc/hardware_simd.qbk] [endsect] [/ --------------------------- ] [section `MSGPACK_HW_SIMD_*`] [heading `MSGPACK_HW_SIMD`] The SIMD extension detected for a specific architectures. Version number depends on the detected extension. [table [[__predef_symbol__] [__predef_version__]] [[`MSGPACK_HW_SIMD_X86_AVAILABLE`] [__predef_detection__]] [[`MSGPACK_HW_SIMD_X86_AMD_AVAILABLE`] [__predef_detection__]] [[`MSGPACK_HW_SIMD_ARM_AVAILABLE`] [__predef_detection__]] [[`MSGPACK_HW_SIMD_PPC_AVAILABLE`] [__predef_detection__]] ] [include ../include/msgpack/predef/hardware/simd/x86.h] [include ../include/msgpack/predef/hardware/simd/x86_amd.h] [include ../include/msgpack/predef/hardware/simd/arm.h] [include ../include/msgpack/predef/hardware/simd/ppc.h] [endsect] [/ --------------------------- ] [section `MSGPACK_HW_SIMD_X86_*_VERSION`] [include ../include/msgpack/predef/hardware/simd/x86/versions.h] [endsect] [section `MSGPACK_HW_SIMD_X86_AMD_*_VERSION`] [include ../include/msgpack/predef/hardware/simd/x86_amd/versions.h] [endsect] [section `MSGPACK_HW_SIMD_ARM_*_VERSION`] [include ../include/msgpack/predef/hardware/simd/arm/versions.h] [endsect] [section `MSGPACK_HW_SIMD_PPC_*_VERSION`] [include ../include/msgpack/predef/hardware/simd/ppc/versions.h] [endsect] */ // We check if SIMD extension of multiples architectures have been detected, // if yes, then this is an error! // // NOTE: _X86_AMD implies _X86, so there is no need to check for it here! // #if defined(MSGPACK_HW_SIMD_ARM_AVAILABLE) && defined(MSGPACK_HW_SIMD_PPC_AVAILABLE) ||\ defined(MSGPACK_HW_SIMD_ARM_AVAILABLE) && defined(MSGPACK_HW_SIMD_X86_AVAILABLE) ||\ defined(MSGPACK_HW_SIMD_PPC_AVAILABLE) && defined(MSGPACK_HW_SIMD_X86_AVAILABLE) # error "Multiple SIMD architectures detected, this cannot happen!" #endif #if defined(MSGPACK_HW_SIMD_X86_AVAILABLE) && defined(MSGPACK_HW_SIMD_X86_AMD_AVAILABLE) // If both standard _X86 and _X86_AMD are available, // then take the biggest version of the two! # if MSGPACK_HW_SIMD_X86 >= MSGPACK_HW_SIMD_X86_AMD # define MSGPACK_HW_SIMD MSGPACK_HW_SIMD_X86 # else # define MSGPACK_HW_SIMD MSGPACK_HW_SIMD_X86_AMD # endif #endif #if !defined(MSGPACK_HW_SIMD) // At this point, only one of these two is defined # if defined(MSGPACK_HW_SIMD_X86_AVAILABLE) # define MSGPACK_HW_SIMD MSGPACK_HW_SIMD_X86 # endif # if defined(MSGPACK_HW_SIMD_X86_AMD_AVAILABLE) # define MSGPACK_HW_SIMD MSGPACK_HW_SIMD_X86_AMD # endif #endif #if defined(MSGPACK_HW_SIMD_ARM_AVAILABLE) # define MSGPACK_HW_SIMD MSGPACK_HW_SIMD_ARM #endif #if defined(MSGPACK_HW_SIMD_PPC_AVAILABLE) # define MSGPACK_HW_SIMD MSGPACK_HW_SIMD_PPC #endif #if defined(MSGPACK_HW_SIMD) # define MSGPACK_HW_SIMD_AVAILABLE #else # define MSGPACK_HW_SIMD MSGPACK_VERSION_NUMBER_NOT_AVAILABLE #endif #define MSGPACK_HW_SIMD_NAME "Hardware SIMD" #endif #include <msgpack/predef/detail/test.h> MSGPACK_PREDEF_DECLARE_TEST(MSGPACK_HW_SIMD, MSGPACK_HW_SIMD_NAME)
{ "pile_set_name": "Github" }
/* Copyright (C) 2019, 2020 Monomax Software Pty Ltd * * This file is part of Dnote. * * Dnote is free software: you can redistribute it and/or modify * it under the terms of the GNU Affero General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * Dnote is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Affero General Public License for more details. * * You should have received a copy of the GNU Affero General Public License * along with Dnote. If not, see <https://www.gnu.org/licenses/>. */ import { ADD, REFRESH, RECEIVE, START_FETCHING, RECEIVE_ERROR, RESET, REMOVE, NotesActionType, NotesState } from './type'; const initialState: NotesState = { data: [], total: 0, isFetching: false, isFetched: false, errorMessage: null }; export default function ( state = initialState, action: NotesActionType ): NotesState { switch (action.type) { case START_FETCHING: { return { ...state, isFetched: false, isFetching: true, errorMessage: '' }; } case ADD: { return state; } case REFRESH: { return state; } case REMOVE: { return state; } case RECEIVE: { const { notes, total } = action.data; return { ...state, data: notes, total, isFetched: true, isFetching: false }; } case RECEIVE_ERROR: { return { ...state, errorMessage: action.data.error, isFetching: false, isFetched: false }; } case RESET: { return initialState; } default: return state; } }
{ "pile_set_name": "Github" }
<?php /** * This autoloading setup is really more complicated than it needs to be for most * applications. The added complexity is simply to reduce the time it takes for * new developers to be productive with a fresh skeleton. It allows autoloading * to be correctly configured, regardless of the installation method and keeps * the use of composer completely optional. This setup should work fine for * most users, however, feel free to configure autoloading however you'd like. */ // Composer autoloading if (file_exists('vendor/autoload.php')) { $loader = include 'vendor/autoload.php'; } // Support for ZF2_PATH environment variable or git submodule if ($zf2Path = getenv('ZF2_PATH') ?: (is_dir('vendor/ZF2/library') ? 'vendor/ZF2/library' : false)) { if (isset($loader)) { $loader->add('Zend', $zf2Path . '/Zend'); } else { include $zf2Path . '/Zend/Loader/AutoloaderFactory.php'; Zend\Loader\AutoloaderFactory::factory(array( 'Zend\Loader\StandardAutoloader' => array( 'autoregister_zf' => true ) )); } } if (!class_exists('Zend\Loader\AutoloaderFactory')) { throw new RuntimeException('Unable to load ZF2. Run `php composer.phar install` or define a ZF2_PATH environment variable.'); }
{ "pile_set_name": "Github" }
import "../global/dialog.service"; /** * This state displays the contact list. * It also provides a nested ui-view (viewport) for child states to fill in. * * The contacts are fetched using a resolve. */ export const contactsState = { parent: 'app', // declares that 'contacts' is a child of 'app' name: "contacts", url: "/contacts", resolve: { // Resolve all the contacts. The resolved contacts are injected into the controller. contacts: ['Contacts', (Contacts) => Contacts.all()] }, data: { requiresAuth: true }, deepStateRedirect: true, sticky: true, views: { contacts: 'contacts' }, }; /** * This state displays a single contact. * The contact to display is fetched using a resolve, based on the `contactId` parameter. */ export const viewContactState = { name: 'contacts.contact', url: '/:contactId', resolve: { // Resolve the contact, based on the contactId parameter value. // The resolved contact is provided to the contactComponent's contact binding contact: ['Contacts', '$transition$', (Contacts, $transition$) => Contacts.get($transition$.params().contactId)] }, component: 'contactView' }; /** * This state allows a user to edit a contact * * The contact data to edit is injected from the parent state's resolve. * * This state uses view targeting to replace the parent ui-view (which would normally be filled * by 'contacts.contact') with the edit contact template/controller */ export const editContactState = { name: 'contacts.contact.edit', url: '/edit', views: { // Relatively target the grand-parent-state's $default (unnamed) ui-view // This could also have been written using ui-view@state addressing: $default@contacts // Or, this could also have been written using absolute ui-view addressing: !$default.$default.$default '^.^.$default': { bindings: { pristineContact: "contact" }, component: 'editContact' } } }; /** * This state allows a user to create a new contact * * The contact data to edit is injected into the component from the parent state's resolve. */ export const newContactState = { name: 'contacts.new', url: '/new', component: 'editContact' };
{ "pile_set_name": "Github" }
{ "dataset_reader": { "type": "squad", "token_indexers": { "tokens": { "type": "single_id", "lowercase_tokens": true }, "token_characters": { "type": "characters", "character_tokenizer": { "byte_encoding": "utf-8" }, "min_padding_length": 5 } } }, "train_data_path": "test_fixtures/rc/squad.json", "validation_data_path": "test_fixtures/rc/squad.json", "model": { "type": "bidaf", "text_field_embedder": { "token_embedders": { "tokens": { "type": "embedding", "embedding_dim": 2, "trainable": false }, "token_characters": { "type": "character_encoding", "embedding": { "num_embeddings": 260, "embedding_dim": 8 }, "encoder": { "type": "cnn", "embedding_dim": 8, "num_filters": 8, "ngram_filter_sizes": [5] } } } }, "num_highway_layers": 1, "phrase_layer": { "type": "lstm", "input_size": 10, "hidden_size": 10, "num_layers": 1 }, "matrix_attention": { "type": "linear", "combination": "x,y,x*y", "tensor_1_dim": 10, "tensor_2_dim": 10 }, "modeling_layer": { "type": "lstm", "input_size": 40, "hidden_size": 10, "num_layers": 1 }, "span_end_encoder": { "type": "lstm", "input_size": 70, "hidden_size": 10, "num_layers": 1 } }, "data_loader": { "batch_sampler": { "type": "bucket", "padding_noise": 0.0, "batch_size": 40 }, }, "trainer": { "num_epochs": 1, "grad_norm": 10.0, "patience" : 12, "cuda_device" : -1, "optimizer": { "type": "adadelta", "lr": 0.5, "rho": 0.95 } } }
{ "pile_set_name": "Github" }
#!/usr/bin/env python """Test the copy of nose in this directory, by running that nose against itself. You can test nose using nose in other ways, but if you don't use this script, you might have one installation of nose testing another installation, which is not supported. """ # More detail: # In the absence of some sort of deep renaming magic, nose can't reasonably # test a different installation of itself, given the existence of the global # module registry sys.modules . # If installed system-wide with setuptools, setuptools (via the site-packages # easy-install.pth) takes you at your word and ensures that the installed nose # comes first on sys.path . So the only way to test a copy of nose other than # the installed one is to install that version (e.g. by running python setup.py # develop). # This script provides a way of running nose on nose's own tests without # installing the version to be tested, nor uninstalling the currently-installed # version. import glob import os import sys if __name__ == "__main__": this_dir = os.path.normpath(os.path.abspath(os.path.dirname(__file__))) lib_dirs = [this_dir] test_dir = this_dir if sys.version_info >= (3,): # Under Python 3.x, we need to 'build' the source (using 2to3, etc) # first. 'python3 setup.py build_tests' will put everything under # build/tests (including nose itself, since some tests are inside the # nose source) # The 'py3where' argument in setup.cfg will take care of making sure we # pull our tests only from the build/tests directory. We just need to # make sure the right things are on sys.path. lib_dirs = glob.glob(os.path.join(this_dir, 'build', 'lib*')) test_dir = os.path.join(this_dir, 'build', 'tests') if not os.path.isdir(test_dir): raise AssertionError("Error: %s does not exist. Use the setup.py 'build_tests' command to create it." % (test_dir,)) try: import pkg_resources env = pkg_resources.Environment(search_path=lib_dirs) distributions = env["nose"] assert len(distributions) == 1, ( "Incorrect usage of selftest.py; please see DEVELOPERS.txt") dist = distributions[0] dist.activate() except ImportError: pass # Always make sure our chosen test dir is first on the path sys.path.insert(0, test_dir) import nose nose.run_exit()
{ "pile_set_name": "Github" }
import React from "react" import { Col} from "react-bootstrap" import { Link } from "react-router" import { formatLatencyBin } from "../../../util/Format" import { is } from "immutable" export default class SourceLatencyStatsCol extends React.Component { shouldComponentUpdate(nextProps) { return !is(this.props.latencyPercentileBinStats, nextProps.latencyPercentileBinStats); } render() { const { latencyPercentileBinStats } = this.props; const fiftiethBin = latencyPercentileBinStats.get("50.0"); const ninetyFifthBin = latencyPercentileBinStats.get("95.0"); const ninetyNinthBin = latencyPercentileBinStats.get("99.0"); return( <div> <Col lg={4}><p>{formatLatencyBin(fiftiethBin)}</p></Col> <Col lg={4}><p>{formatLatencyBin(ninetyFifthBin)}</p></Col> <Col lg={4}><p>{formatLatencyBin(ninetyNinthBin)}</p></Col> </div> ) } }
{ "pile_set_name": "Github" }
// SWFrameButton.h // // Copyright (c) 2014 Sarun Wongpatcharapakorn // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal // in the Software without restriction, including without limitation the rights // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell // copies of the Software, and to permit persons to whom the Software is // furnished to do so, subject to the following conditions: // // The above copyright notice and this permission notice shall be included in // all copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN // THE SOFTWARE. #import <UIKit/UIKit.h> @interface SWFrameButton : UIButton @property (nonatomic, assign) IBInspectable CGFloat cornerRadius; //UI_APPEARANCE_SELECTOR; @property (nonatomic, assign) IBInspectable CGFloat borderWidth; //UI_APPEARANCE_SELECTOR; @end
{ "pile_set_name": "Github" }
declare namespace _default { export const inserted: import('vue').DirectiveFunction; export { unbind }; } export default _default; export type IntersectEl = HTMLElement & { _vtsIntersect?: IntersectionObserver; }; /** * @typedef {HTMLElement & { _vtsIntersect?: IntersectionObserver}} IntersectEl * * @param {IntersectEl} el */ declare function unbind(el: IntersectEl): void;
{ "pile_set_name": "Github" }
#include "Eigen/LU"
{ "pile_set_name": "Github" }
SUBROUTINE CGEESX( JOBVS, SORT, SELECT, SENSE, N, A, LDA, SDIM, W, $ VS, LDVS, RCONDE, RCONDV, WORK, LWORK, RWORK, $ BWORK, INFO ) * * -- LAPACK driver routine (version 3.0) -- * Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., * Courant Institute, Argonne National Lab, and Rice University * June 30, 1999 * * .. Scalar Arguments .. CHARACTER JOBVS, SENSE, SORT INTEGER INFO, LDA, LDVS, LWORK, N, SDIM REAL RCONDE, RCONDV * .. * .. Array Arguments .. LOGICAL BWORK( * ) REAL RWORK( * ) COMPLEX A( LDA, * ), VS( LDVS, * ), W( * ), WORK( * ) * .. * .. Function Arguments .. LOGICAL SELECT EXTERNAL SELECT * .. * * Purpose * ======= * * CGEESX computes for an N-by-N complex nonsymmetric matrix A, the * eigenvalues, the Schur form T, and, optionally, the matrix of Schur * vectors Z. This gives the Schur factorization A = Z*T*(Z**H). * * Optionally, it also orders the eigenvalues on the diagonal of the * Schur form so that selected eigenvalues are at the top left; * computes a reciprocal condition number for the average of the * selected eigenvalues (RCONDE); and computes a reciprocal condition * number for the right invariant subspace corresponding to the * selected eigenvalues (RCONDV). The leading columns of Z form an * orthonormal basis for this invariant subspace. * * For further explanation of the reciprocal condition numbers RCONDE * and RCONDV, see Section 4.10 of the LAPACK Users' Guide (where * these quantities are called s and sep respectively). * * A complex matrix is in Schur form if it is upper triangular. * * Arguments * ========= * * JOBVS (input) CHARACTER*1 * = 'N': Schur vectors are not computed; * = 'V': Schur vectors are computed. * * SORT (input) CHARACTER*1 * Specifies whether or not to order the eigenvalues on the * diagonal of the Schur form. * = 'N': Eigenvalues are not ordered; * = 'S': Eigenvalues are ordered (see SELECT). * * SELECT (input) LOGICAL FUNCTION of one COMPLEX argument * SELECT must be declared EXTERNAL in the calling subroutine. * If SORT = 'S', SELECT is used to select eigenvalues to order * to the top left of the Schur form. * If SORT = 'N', SELECT is not referenced. * An eigenvalue W(j) is selected if SELECT(W(j)) is true. * * SENSE (input) CHARACTER*1 * Determines which reciprocal condition numbers are computed. * = 'N': None are computed; * = 'E': Computed for average of selected eigenvalues only; * = 'V': Computed for selected right invariant subspace only; * = 'B': Computed for both. * If SENSE = 'E', 'V' or 'B', SORT must equal 'S'. * * N (input) INTEGER * The order of the matrix A. N >= 0. * * A (input/output) COMPLEX array, dimension (LDA, N) * On entry, the N-by-N matrix A. * On exit, A is overwritten by its Schur form T. * * LDA (input) INTEGER * The leading dimension of the array A. LDA >= max(1,N). * * SDIM (output) INTEGER * If SORT = 'N', SDIM = 0. * If SORT = 'S', SDIM = number of eigenvalues for which * SELECT is true. * * W (output) COMPLEX array, dimension (N) * W contains the computed eigenvalues, in the same order * that they appear on the diagonal of the output Schur form T. * * VS (output) COMPLEX array, dimension (LDVS,N) * If JOBVS = 'V', VS contains the unitary matrix Z of Schur * vectors. * If JOBVS = 'N', VS is not referenced. * * LDVS (input) INTEGER * The leading dimension of the array VS. LDVS >= 1, and if * JOBVS = 'V', LDVS >= N. * * RCONDE (output) REAL * If SENSE = 'E' or 'B', RCONDE contains the reciprocal * condition number for the average of the selected eigenvalues. * Not referenced if SENSE = 'N' or 'V'. * * RCONDV (output) REAL * If SENSE = 'V' or 'B', RCONDV contains the reciprocal * condition number for the selected right invariant subspace. * Not referenced if SENSE = 'N' or 'E'. * * WORK (workspace/output) COMPLEX array, dimension (LWORK) * On exit, if INFO = 0, WORK(1) returns the optimal LWORK. * * LWORK (input) INTEGER * The dimension of the array WORK. LWORK >= max(1,2*N). * Also, if SENSE = 'E' or 'V' or 'B', LWORK >= 2*SDIM*(N-SDIM), * where SDIM is the number of selected eigenvalues computed by * this routine. Note that 2*SDIM*(N-SDIM) <= N*N/2. * For good performance, LWORK must generally be larger. * * RWORK (workspace) REAL array, dimension (N) * * BWORK (workspace) LOGICAL array, dimension (N) * Not referenced if SORT = 'N'. * * INFO (output) INTEGER * = 0: successful exit * < 0: if INFO = -i, the i-th argument had an illegal value. * > 0: if INFO = i, and i is * <= N: the QR algorithm failed to compute all the * eigenvalues; elements 1:ILO-1 and i+1:N of W * contain those eigenvalues which have converged; if * JOBVS = 'V', VS contains the transformation which * reduces A to its partially converged Schur form. * = N+1: the eigenvalues could not be reordered because some * eigenvalues were too close to separate (the problem * is very ill-conditioned); * = N+2: after reordering, roundoff changed values of some * complex eigenvalues so that leading eigenvalues in * the Schur form no longer satisfy SELECT=.TRUE. This * could also be caused by underflow due to scaling. * * ===================================================================== * * .. Parameters .. REAL ZERO, ONE PARAMETER ( ZERO = 0.0E0, ONE = 1.0E0 ) * .. * .. Local Scalars .. LOGICAL SCALEA, WANTSB, WANTSE, WANTSN, WANTST, $ WANTSV, WANTVS INTEGER HSWORK, I, IBAL, ICOND, IERR, IEVAL, IHI, ILO, $ ITAU, IWRK, K, MAXB, MAXWRK, MINWRK REAL ANRM, BIGNUM, CSCALE, EPS, SMLNUM * .. * .. Local Arrays .. REAL DUM( 1 ) * .. * .. External Subroutines .. EXTERNAL CCOPY, CGEBAK, CGEBAL, CGEHRD, CHSEQR, CLACPY, $ CLASCL, CTRSEN, CUNGHR, SLABAD, SLASCL, XERBLA * .. * .. External Functions .. LOGICAL LSAME INTEGER ILAENV REAL CLANGE, SLAMCH EXTERNAL LSAME, ILAENV, CLANGE, SLAMCH * .. * .. Intrinsic Functions .. INTRINSIC MAX, MIN, SQRT * .. * .. Executable Statements .. * * Test the input arguments * INFO = 0 WANTVS = LSAME( JOBVS, 'V' ) WANTST = LSAME( SORT, 'S' ) WANTSN = LSAME( SENSE, 'N' ) WANTSE = LSAME( SENSE, 'E' ) WANTSV = LSAME( SENSE, 'V' ) WANTSB = LSAME( SENSE, 'B' ) IF( ( .NOT.WANTVS ) .AND. ( .NOT.LSAME( JOBVS, 'N' ) ) ) THEN INFO = -1 ELSE IF( ( .NOT.WANTST ) .AND. ( .NOT.LSAME( SORT, 'N' ) ) ) THEN INFO = -2 ELSE IF( .NOT.( WANTSN .OR. WANTSE .OR. WANTSV .OR. WANTSB ) .OR. $ ( .NOT.WANTST .AND. .NOT.WANTSN ) ) THEN INFO = -4 ELSE IF( N.LT.0 ) THEN INFO = -5 ELSE IF( LDA.LT.MAX( 1, N ) ) THEN INFO = -7 ELSE IF( LDVS.LT.1 .OR. ( WANTVS .AND. LDVS.LT.N ) ) THEN INFO = -11 END IF * * Compute workspace * (Note: Comments in the code beginning "Workspace:" describe the * minimal amount of real workspace needed at that point in the * code, as well as the preferred amount for good performance. * CWorkspace refers to complex workspace, and RWorkspace to real * workspace. NB refers to the optimal block size for the * immediately following subroutine, as returned by ILAENV. * HSWORK refers to the workspace preferred by CHSEQR, as * calculated below. HSWORK is computed assuming ILO=1 and IHI=N, * the worst case. * If SENSE = 'E', 'V' or 'B', then the amount of workspace needed * depends on SDIM, which is computed by the routine CTRSEN later * in the code.) * MINWRK = 1 IF( INFO.EQ.0 .AND. ( LWORK.GE.1 ) ) THEN MAXWRK = N + N*ILAENV( 1, 'CGEHRD', ' ', N, 1, N, 0 ) MINWRK = MAX( 1, 2*N ) IF( .NOT.WANTVS ) THEN MAXB = MAX( ILAENV( 8, 'CHSEQR', 'SN', N, 1, N, -1 ), 2 ) K = MIN( MAXB, N, MAX( 2, ILAENV( 4, 'CHSEQR', 'SN', N, 1, $ N, -1 ) ) ) HSWORK = MAX( K*( K+2 ), 2*N ) MAXWRK = MAX( MAXWRK, HSWORK, 1 ) ELSE MAXWRK = MAX( MAXWRK, N+( N-1 )* $ ILAENV( 1, 'CUNGHR', ' ', N, 1, N, -1 ) ) MAXB = MAX( ILAENV( 8, 'CHSEQR', 'SV', N, 1, N, -1 ), 2 ) K = MIN( MAXB, N, MAX( 2, ILAENV( 4, 'CHSEQR', 'SV', N, 1, $ N, -1 ) ) ) HSWORK = MAX( K*( K+2 ), 2*N ) MAXWRK = MAX( MAXWRK, HSWORK, 1 ) END IF WORK( 1 ) = MAXWRK END IF IF( LWORK.LT.MINWRK ) THEN INFO = -15 END IF IF( INFO.NE.0 ) THEN CALL XERBLA( 'CGEESX', -INFO ) RETURN END IF * * Quick return if possible * IF( N.EQ.0 ) THEN SDIM = 0 RETURN END IF * * Get machine constants * EPS = SLAMCH( 'P' ) SMLNUM = SLAMCH( 'S' ) BIGNUM = ONE / SMLNUM CALL SLABAD( SMLNUM, BIGNUM ) SMLNUM = SQRT( SMLNUM ) / EPS BIGNUM = ONE / SMLNUM * * Scale A if max element outside range [SMLNUM,BIGNUM] * ANRM = CLANGE( 'M', N, N, A, LDA, DUM ) SCALEA = .FALSE. IF( ANRM.GT.ZERO .AND. ANRM.LT.SMLNUM ) THEN SCALEA = .TRUE. CSCALE = SMLNUM ELSE IF( ANRM.GT.BIGNUM ) THEN SCALEA = .TRUE. CSCALE = BIGNUM END IF IF( SCALEA ) $ CALL CLASCL( 'G', 0, 0, ANRM, CSCALE, N, N, A, LDA, IERR ) * * * Permute the matrix to make it more nearly triangular * (CWorkspace: none) * (RWorkspace: need N) * IBAL = 1 CALL CGEBAL( 'P', N, A, LDA, ILO, IHI, RWORK( IBAL ), IERR ) * * Reduce to upper Hessenberg form * (CWorkspace: need 2*N, prefer N+N*NB) * (RWorkspace: none) * ITAU = 1 IWRK = N + ITAU CALL CGEHRD( N, ILO, IHI, A, LDA, WORK( ITAU ), WORK( IWRK ), $ LWORK-IWRK+1, IERR ) * IF( WANTVS ) THEN * * Copy Householder vectors to VS * CALL CLACPY( 'L', N, N, A, LDA, VS, LDVS ) * * Generate unitary matrix in VS * (CWorkspace: need 2*N-1, prefer N+(N-1)*NB) * (RWorkspace: none) * CALL CUNGHR( N, ILO, IHI, VS, LDVS, WORK( ITAU ), WORK( IWRK ), $ LWORK-IWRK+1, IERR ) END IF * SDIM = 0 * * Perform QR iteration, accumulating Schur vectors in VS if desired * (CWorkspace: need 1, prefer HSWORK (see comments) ) * (RWorkspace: none) * IWRK = ITAU CALL CHSEQR( 'S', JOBVS, N, ILO, IHI, A, LDA, W, VS, LDVS, $ WORK( IWRK ), LWORK-IWRK+1, IEVAL ) IF( IEVAL.GT.0 ) $ INFO = IEVAL * * Sort eigenvalues if desired * IF( WANTST .AND. INFO.EQ.0 ) THEN IF( SCALEA ) $ CALL CLASCL( 'G', 0, 0, CSCALE, ANRM, N, 1, W, N, IERR ) DO 10 I = 1, N BWORK( I ) = SELECT( W( I ) ) 10 CONTINUE * * Reorder eigenvalues, transform Schur vectors, and compute * reciprocal condition numbers * (CWorkspace: if SENSE is not 'N', need 2*SDIM*(N-SDIM) * otherwise, need none ) * (RWorkspace: none) * CALL CTRSEN( SENSE, JOBVS, BWORK, N, A, LDA, VS, LDVS, W, SDIM, $ RCONDE, RCONDV, WORK( IWRK ), LWORK-IWRK+1, $ ICOND ) IF( .NOT.WANTSN ) $ MAXWRK = MAX( MAXWRK, 2*SDIM*( N-SDIM ) ) IF( ICOND.EQ.-14 ) THEN * * Not enough complex workspace * INFO = -15 END IF END IF * IF( WANTVS ) THEN * * Undo balancing * (CWorkspace: none) * (RWorkspace: need N) * CALL CGEBAK( 'P', 'R', N, ILO, IHI, RWORK( IBAL ), N, VS, LDVS, $ IERR ) END IF * IF( SCALEA ) THEN * * Undo scaling for the Schur form of A * CALL CLASCL( 'U', 0, 0, CSCALE, ANRM, N, N, A, LDA, IERR ) CALL CCOPY( N, A, LDA+1, W, 1 ) IF( ( WANTSV .OR. WANTSB ) .AND. INFO.EQ.0 ) THEN DUM( 1 ) = RCONDV CALL SLASCL( 'G', 0, 0, CSCALE, ANRM, 1, 1, DUM, 1, IERR ) RCONDV = DUM( 1 ) END IF END IF * WORK( 1 ) = MAXWRK RETURN * * End of CGEESX * END
{ "pile_set_name": "Github" }
--- title: Web demo description: Image classification demo running as a Flask web server. category: example include_in_docs: true priority: 10 --- # Web Demo ## Requirements The demo server requires Python with some dependencies. To make sure you have the dependencies, please run `pip install -r examples/web_demo/requirements.txt`, and also make sure that you've compiled the Python Caffe interface and that it is on your `PYTHONPATH` (see [installation instructions](/installation.html)). Make sure that you have obtained the Reference CaffeNet Model and the ImageNet Auxiliary Data: ./scripts/download_model_binary.py models/bvlc_reference_caffenet ./data/ilsvrc12/get_ilsvrc_aux.sh NOTE: if you run into trouble, try re-downloading the auxiliary files. ## Run Running `python examples/web_demo/app.py` will bring up the demo server, accessible at `http://0.0.0.0:5000`. You can enable debug mode of the web server, or switch to a different port: % python examples/web_demo/app.py -h Usage: app.py [options] Options: -h, --help show this help message and exit -d, --debug enable debug mode -p PORT, --port=PORT which port to serve content on ## How are the "maximally accurate" results generated? In a nutshell: ImageNet predictions are made at the leaf nodes, but the organization of the project allows leaf nodes to be united via more general parent nodes, with 'entity' at the very top. To give "maximally accurate" results, we "back off" from maximally specific predictions to maintain a high accuracy. The `bet_file` that is loaded in the demo provides the graph structure and names of all relevant ImageNet nodes as well as measures of information gain between them. Please see the "Hedging your bets" paper from [CVPR 2012](http://www.image-net.org/projects/hedging/) for further information.
{ "pile_set_name": "Github" }
tests.add("Existence", function () { return Jsonary.UriTemplate != undefined; }); tests.add("Escaped", function () { var template = new Jsonary.UriTemplate("test{value}test"); var result = template.fill(function (name) { return "a#b"; }); this.assert(result == "testa%23btest", "Template does not match"); return true; }); tests.add("Unescaped: +", function () { var template = new Jsonary.UriTemplate("test{+key1}test{+key2}_"); var values = { key1: "a#b", key2: "/" }; var result = template.fill(function (name) { return values[name]; }); this.assert(result == "testa#btest/_", "Template does not match"); return true; }); var values = { "var": "value", "hello": "Hello World!", "path": "/foo/bar", "empty": "", "x": "1024", "y": "768", "list": ["red", "green", "blue"], "keys": {"semi": ";", "dot": ".", "comma": ","} }; var subFunction = function (name) {return values[name];}; var examples = { "{+var}": "value", "{+hello}": "Hello%20World!", "{+path}/here": "/foo/bar/here", "here?ref={+path}": "here?ref=/foo/bar", "X{#var}": "X#value", "X{#hello}": "X#Hello%20World!", "map?{x,y}": "map?1024,768", "{x,hello,y}": "1024,Hello%20World%21,768", "{#x,hello,y}": "#1024,Hello%20World!,768", "{+path,x}/here": "/foo/bar,1024/here", "X{.var}": "X.value", "X{.x,y}": "X.1024.768", "{/var}": "/value", "{/var,x}/here": "/value/1024/here", "{;x,y}": ";x=1024;y=768", "{;x,y,empty}": ";x=1024;y=768;empty", "{?x,y}": "?x=1024&y=768", "{?x,y,empty}": "?x=1024&y=768&empty=", "?fixed=yes{&x}": "?fixed=yes&x=1024", "{&x,y,empty}": "&x=1024&y=768&empty=", "{var:3}": "val", "{var:30}": "value", "{list}": "red,green,blue", "{list*}": "red,green,blue", "{keys}": "semi,%3B,dot,.,comma,%2C", "{keys*}": "semi=%3B,dot=.,comma=%2C", "{+path:6}/here": "/foo/b/here", "{+list}": "red,green,blue", "{+list*}": "red,green,blue", "{+keys}": "semi,;,dot,.,comma,,", "{+keys*}": "semi=;,dot=.,comma=,", "{#path:6}/here": "#/foo/b/here", "{#list}": "#red,green,blue", "{#list*}": "#red,green,blue", "{#keys}": "#semi,;,dot,.,comma,,", "{#keys*}": "#semi=;,dot=.,comma=,", "X{.var:3}": "X.val", "X{.list}": "X.red,green,blue", "X{.list*}": "X.red.green.blue", "X{.keys}": "X.semi,%3B,dot,.,comma,%2C", "X{.keys*}": "X.semi=%3B.dot=..comma=%2C", "{/var:1,var}": "/v/value", "{/list}": "/red,green,blue", "{/list*}": "/red/green/blue", "{/list*,path:4}": "/red/green/blue/%2Ffoo", "{/keys}": "/semi,%3B,dot,.,comma,%2C", "{/keys*}": "/semi=%3B/dot=./comma=%2C", "{;hello:5}": ";hello=Hello", "{;list}": ";list=red,green,blue", "{;list*}": ";list=red;list=green;list=blue", "{;keys}": ";keys=semi,%3B,dot,.,comma,%2C", "{;keys*}": ";semi=%3B;dot=.;comma=%2C", "{?var:3}": "?var=val", "{?list}": "?list=red,green,blue", "{?list*}": "?list=red&list=green&list=blue", "{?keys}": "?keys=semi,%3B,dot,.,comma,%2C", "{?keys*}": "?semi=%3B&dot=.&comma=%2C", "{&var:3}": "&var=val", "{&list}": "&list=red,green,blue", "{&list*}": "&list=red&list=green&list=blue", "{&keys}": "&keys=semi,%3B,dot,.,comma,%2C", "{&keys*}": "&semi=%3B&dot=.&comma=%2C" }; for (var sub in examples) { (function (sub, expected) { tests.add(sub, function () { var template = new Jsonary.UriTemplate(sub); var result = template.fill(subFunction); this.assert(result == expected, JSON.stringify(result) + " != " + JSON.stringify(expected)); return true; }); })(sub, examples[sub]); }
{ "pile_set_name": "Github" }
using System; using System.Collections.Generic; using System.Diagnostics.Contracts; using System.Linq; using System.Text; using System.Threading.Tasks; using Sermo.Data.Contracts; using Sermo.Infrastructure.Contracts; using Sermo.UI.Contracts; namespace Sermo.UI.Controllers { public class RepositoryRoomViewModelService : IRoomViewModelReader, IRoomViewModelWriter { public RepositoryRoomViewModelService(IRoomRepository roomRepository, IMessageRepository messageRepository, IViewModelMapper mapper) { Contract.Requires<ArgumentNullException>(roomRepository != null); Contract.Requires<ArgumentNullException>(messageRepository != null); Contract.Requires<ArgumentNullException>(mapper != null); this.roomRepository = roomRepository; this.messageRepository = messageRepository; this.mapper = mapper; } public IEnumerable<RoomViewModel> GetAllRooms() { var allRooms = new List<RoomViewModel>(); var allRoomRecords = roomRepository.GetAllRooms(); foreach(var roomRecord in allRoomRecords) { allRooms.Add(mapper.MapRoomRecordToRoomViewModel(roomRecord)); } return allRooms; } public IEnumerable<MessageViewModel> GetRoomMessages(int roomID) { var roomMessages = new List<MessageViewModel>(); var roomMessageRecords = messageRepository.GetMessagesForRoomID(roomID); foreach(var messageRecord in roomMessageRecords) { roomMessages.Add(mapper.MapMessageRecordToMessageViewModel(messageRecord)); } return roomMessages; } public void CreateRoom(RoomViewModel roomViewModel) { var roomRecord = mapper.MapRoomViewModelToRoomRecord(roomViewModel); roomRepository.CreateRoom(roomRecord.Name); } public void AddMessage(MessageViewModel messageViewModel) { var messageRecord = mapper.MapMessageViewModelToMessageRecord(messageViewModel); messageRepository.AddMessageToRoom(messageRecord.RoomID, messageRecord.AuthorName, messageRecord.Text); } private readonly IRoomRepository roomRepository; private readonly IMessageRepository messageRepository; private readonly IViewModelMapper mapper; } }
{ "pile_set_name": "Github" }
<!doctype html5> <html> <head> <script src="../build/sereal.js"></script> <script> var urls = [ "./srl/test1.srl", "./srl/test4.srl", "./srl/test5.srl", "./srl/test21.srl", "./srl/test22.srl", "./srl/test23.srl", "./srl/test24.srl", "./srl/test27.srl", "./srl/test29.srl", "./srl/test2.srl", "./srl/test3.srl", ]; async function main() { for (var url of urls) { try { var res = await decode(url); console.log(url, res); } catch (err) { console.log(url, err); } } } async function decode(url) { var bytes = await getBinary(url); var dec = new Sereal.Decoder(); var doc = dec.decodeDocument(bytes); // DataReader or TypedArray var data = doc.body; return data; } function getBinary(url) { return new Promise((resolve, reject) => { var xhr = new XMLHttpRequest(); xhr.open("GET", url, true); xhr.responseType = "arraybuffer"; xhr.onload = e => { var byteArray = xhr.response; var arr = new Uint8Array(byteArray); resolve(arr); }; xhr.send(); }); } </script> </head> <body onload="main()"> <p>Please run this page using a static web server (http), and open the console to see the results</p> </body> </html>
{ "pile_set_name": "Github" }
<?xml version="1.0"?> <ZopeData> <record id="1" aka="AAAAAAAAAAE="> <pickle> <global name="Category" module="erp5.portal_type"/> </pickle> <pickle> <dictionary> <item> <key> <string>categories</string> </key> <value> <tuple> <string>budget_section/section_inv/prev_sans_exc/021</string> </tuple> </value> </item> <item> <key> <string>id</string> </key> <value> <string>021</string> </value> </item> <item> <key> <string>int_index</string> </key> <value> <int>2</int> </value> </item> <item> <key> <string>portal_type</string> </key> <value> <string>Category</string> </value> </item> <item> <key> <string>short_title</string> </key> <value> <string>Virement de la section d’exploitation</string> </value> </item> <item> <key> <string>title</string> </key> <value> <string>021 - Virement de la section d’exploitation (recettes)</string> </value> </item> </dictionary> </pickle> </record> </ZopeData>
{ "pile_set_name": "Github" }
#version 450 struct Foo { vec4 a; }; struct Bar { Foo foo; Foo foo2; }; layout(binding = 0, std140) uniform UBO { Bar bar; } _7; layout(location = 0) out vec4 FragColor; void main() { FragColor = _7.bar.foo.a + _7.bar.foo2.a; }
{ "pile_set_name": "Github" }
St Helena, Ascension and Tristan da Cunha is a British overseas territory. Contact the St Helena, Ascension and Tristan da Cunha Government to find out about getting married there, including what documents you’ll need. $A Governor’s Office St Helena The Castle Jamestown St Helena, Ascension and Tristan da Cunha $A $C [Governor’s Office St Helena - opening hours](https://www.gov.uk/world/organisations/governors-office-st-helena-island-south-atlantic-ocean/office/governors-office-st-helena-island-south-atlantic-ocean) $C ^You should [get legal advice](/government/collections/list-of-lawyers) before making any plans.^
{ "pile_set_name": "Github" }
あいうえお かきくけこ ほげら
{ "pile_set_name": "Github" }
using System; using System.Collections.Generic; using System.Configuration; using System.Data; using System.Linq; using System.Threading.Tasks; using System.Windows; namespace CustomWatermarkTool { /// <summary> /// Interaction logic for App.xaml /// </summary> public partial class App : Application { } }
{ "pile_set_name": "Github" }
// // MimeVisitorExamples.cs // // Author: Jeffrey Stedfast <jeff@xamarin.com> // // Copyright (c) 2013-2016 Xamarin Inc. (www.xamarin.com) // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal // in the Software without restriction, including without limitation the rights // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell // copies of the Software, and to permit persons to whom the Software is // furnished to do so, subject to the following conditions: // // The above copyright notice and this permission notice shall be included in // all copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN // THE SOFTWARE. // using System; using System.IO; using System.Linq; using System.Text; using System.Collections.Generic; using MimeKit; namespace MimeKit.Examples { public static class ForwardExamples { #region ForwardAttached public static MimeMessage Forward (MimeMessage original, MailboxAddress from, IEnumerable<InternetAddress> to) { var message = new MimeMessage (); message.From.Add (from); message.To.AddRange (to); // set the forwarded subject if (!original.Subject.StartsWith ("FW:", StringComparison.OrdinalIgnoreCase)) message.Subject = "FW: " + original.Subject; else message.Subject = original.Subject; // create the main textual body of the message var text = new TextPart ("plain") { Text = "Here's the forwarded message:" }; // create the message/rfc822 attachment for the original message var rfc822 = new MessagePart { Message = original }; // create a multipart/mixed container for the text body and the forwarded message var multipart = new Multipart ("mixed"); multipart.Add (text); multipart.Add (rfc822); // set the multipart as the body of the message message.Body = multipart; return message; } #endregion ForwardAttached #region ForwardInline public static MimeMessage Forward (MimeMessage original, MailboxAddress from, IEnumerable<InternetAddress> to) { var message = new MimeMessage (); message.From.Add (from); message.To.AddRange (to); // set the forwarded subject if (!original.Subject.StartsWith ("FW:", StringComparison.OrdinalIgnoreCase)) message.Subject = "FW: " + original.Subject; else message.Subject = original.Subject; // quote the original message text using (var text = new StringWriter ()) { text.WriteLine (); text.WriteLine ("-----Original Message-----"); test.WriteLine ("From: {0}", original.From); text.WriteLine ("Sent: {0}", DateUtils.FormatDate (original.Date)); text.WriteLine ("To: {0}", original.To); text.WriteLine ("Subject: {0}", original.Subject); text.WriteLine (); text.Write (original.TextBody); message.Body = new TextPart ("plain") { Text = text.ToString () }; } return message; } #endregion ForwardInline } }
{ "pile_set_name": "Github" }
fileFormatVersion: 2 guid: 06f20acc315372c45bb9fd7679ac82fc VisualEffectImporter: externalObjects: {} userData: assetBundleName: assetBundleVariant:
{ "pile_set_name": "Github" }
require(File.expand_path(File.dirname(__FILE__)+'/helpers_tests.rb')) require 'ostruct' class StatsampleGGobiTestCase < MiniTest::Unit::TestCase def setup v1=([10.2,20.3,10,20,30,40,30,20,30,40]*10).to_vector(:scale) @v2=(%w{a b c a a a b b c d}*10).to_vector(:nominal) @v2.labels={"a"=>"letter a","d"=>"letter d"} v3=([1,2,3,4,5,4,3,2,1,2]*10).to_vector(:ordinal) @ds={'v1'=>v1,'v2'=>@v2,'v3'=>v3}.to_dataset end def test_values_definition a=[1.0,2,"a",nil] assert_equal("1.0 2 a NA", Statsample::GGobi.values_definition(a,"NA")) end def test_variable_definition carrier=OpenStruct.new carrier.categorials=[] carrier.conversions={} real_var_definition=Statsample::GGobi.variable_definition(carrier,@v2,'variable 2',"v2") expected=<<-EOS <categoricalvariable name="variable 2" nickname="v2"> <levels count="4"> <level value="1">letter a</level> <level value="2">b</level> <level value="3">c</level> <level value="4">letter d</level></levels> </categoricalvariable> EOS assert_equal(expected.gsub(/\s/," "),real_var_definition.gsub(/\s/," ")) assert_equal({'variable 2'=>{'a'=>1,'b'=>2,'c'=>3,'d'=>4}},carrier.conversions) assert_equal(['variable 2'],carrier.categorials) end end
{ "pile_set_name": "Github" }
// ================================================================================ // + Base // -------------------------------------------------------------------------------- // - Colors & Interaction: Mix-Ins // -------------------------------------------------------------------------------- @mixin Icon-OutlineColor($SCOPED__TextOrBox, $SCOPED__Icon-OutlineColor) { #{$SCOPED__TextOrBox}-shadow: -1px -1px 0px $SCOPED__Icon-OutlineColor, 0px -1px 0px $SCOPED__Icon-OutlineColor, 1px -1px 0px $SCOPED__Icon-OutlineColor, 1px 0px 0px $SCOPED__Icon-OutlineColor, 1px 1px 0px $SCOPED__Icon-OutlineColor, 0px 1px 0px $SCOPED__Icon-OutlineColor, -1px 1px 0px $SCOPED__Icon-OutlineColor, -1px 0px 0px $SCOPED__Icon-OutlineColor, 0px 0px 1px $SCOPED__Icon-OutlineColor; } @mixin Icon-Colouring($SCOPED__Icon-Font_PaintColor, $SCOPED__Icon-Font_OutlineColor, $SCOPED__Icon-Shape_PaintColor, $SCOPED__Icon-Shape_OutlineColor, $SCOPED__Icon-BackgroundColor, $SCOPED__Icon-BorderColor) { color: $SCOPED__Icon-BorderColor; border-color: $SCOPED__Icon-BorderColor; background-color: $SCOPED__Icon-BackgroundColor; &:before, &:after { color: $SCOPED__Icon-Font_PaintColor; @if $SCOPED__Icon-Font_OutlineColor != transparent { @include Icon-OutlineColor("text", $SCOPED__Icon-Font_OutlineColor); } } span.bibi-shape-spreads { span.bibi-shape-spread { span.bibi-shape-item { border-color: $SCOPED__Icon-Shape_OutlineColor; background-color: $SCOPED__Icon-Shape_PaintColor; } } } &.bibi-icon-toggle-panel { >span { background-color: $SCOPED__Icon-Font_PaintColor; @if $SCOPED__Icon-Font_OutlineColor != transparent { @include Icon-OutlineColor("box", $SCOPED__Icon-Font_OutlineColor); } } } } // - Colors & Interaction: Default // -------------------------------------------------------------------------------- .bibi-icon { .bibi-button.disabled & { &:before, &:after { opacity: 0.33 !important; } } #bibi-menu & { @include Icon-Colouring( $Menu-Icon-Font_PaintColor, $Menu-Icon-Font_OutlineColor, $Menu-Icon-Shape_PaintColor, $Menu-Icon-Shape_OutlineColor, $Menu-Icon_BackgroundColor, $Menu-Icon_BorderColor ); &, &:before, &:after, * { transition: $Menu-Icon_Transition; } &:before, &:after { transform: $Menu-Icon_Transform; } } #bibi-slider & { @include Icon-Colouring( $Slider-Icon-Font_PaintColor, $Slider-Icon-Font_OutlineColor, $Slider-Icon-Shape_PaintColor, $Slider-Icon-Shape_OutlineColor, $Slider-Icon_BackgroundColor, $Slider-Icon_BorderColor ); &, &:before, &:after, * { transition: $Slider-Icon_Transition; } &:before, &:after { transform: $Slider-Icon_Transform; } } .bibi-subpanel & { @include Icon-Colouring( $Subpanel-Icon-Font_PaintColor, $Subpanel-Icon-Font_OutlineColor, $Subpanel-Icon-Shape_PaintColor, $Subpanel-Icon-Shape_OutlineColor, $Subpanel-Icon_BackgroundColor, $Subpanel-Icon_BorderColor ); &, &:before, &:after, * { transition: $Subpanel-Icon_Transition; } &:before, &:after { transform: $Subpanel-Icon_Transform; } } } // - Colors & Interaction: Default + Hover // -------------------------------------------------------------------------------- .bibi-button.default { &.hover/*, &:hover*/ { .bibi-icon { #bibi-menu & { @include Icon-Colouring( $Menu-Icon-Font_PaintColor__Hover, $Menu-Icon-Font_OutlineColor__Hover, $Menu-Icon-Shape_PaintColor__Hover, $Menu-Icon-Shape_OutlineColor__Hover, $Menu-Icon_BackgroundColor__Hover, $Menu-Icon_BorderColor__Hover ); &:before, &:after { transform: $Menu-Icon_Transform__Hover; } } #bibi-slider & { @include Icon-Colouring( $Slider-Icon-Font_PaintColor__Hover, $Slider-Icon-Font_OutlineColor__Hover, $Slider-Icon-Shape_PaintColor__Hover, $Slider-Icon-Shape_OutlineColor__Hover, $Slider-Icon_BackgroundColor__Hover, $Slider-Icon_BorderColor__Hover ); &:before, &:after { transform: $Slider-Icon_Transform__Hover; } } .bibi-subpanel & { @include Icon-Colouring( $Subpanel-Icon-Font_PaintColor__Hover, $Subpanel-Icon-Font_OutlineColor__Hover, $Subpanel-Icon-Shape_PaintColor__Hover, $Subpanel-Icon-Shape_OutlineColor__Hover, $Subpanel-Icon_BackgroundColor__Hover, $Subpanel-Icon_BorderColor__Hover ); &:before, &:after { transform: $Subpanel-Icon_Transform__Hover; } } }}} // - Colors & Interaction: Active // -------------------------------------------------------------------------------- .bibi-button.active { .bibi-icon { #bibi-menu & { @include Icon-Colouring( $Menu-Icon-Font_PaintColor__Active, $Menu-Icon-Font_OutlineColor__Active, $Menu-Icon-Shape_PaintColor__Active, $Menu-Icon-Shape_OutlineColor__Active, $Menu-Icon_BackgroundColor__Active, $Menu-Icon_BorderColor__Active ); &:before, &:after { transform: $Menu-Icon_Transform__Active; } } #bibi-slider & { @include Icon-Colouring( $Slider-Icon-Font_PaintColor__Active, $Slider-Icon-Font_OutlineColor__Active, $Slider-Icon-Shape_PaintColor__Active, $Slider-Icon-Shape_OutlineColor__Active, $Slider-Icon_BackgroundColor__Active, $Slider-Icon_BorderColor__Active ); &:before, &:after { transform: $Slider-Icon_Transform__Active; } } .bibi-subpanel & { @include Icon-Colouring( $Subpanel-Icon-Font_PaintColor__Active, $Subpanel-Icon-Font_OutlineColor__Active, $Subpanel-Icon-Shape_PaintColor__Active, $Subpanel-Icon-Shape_OutlineColor__Active, $Subpanel-Icon_BackgroundColor__Active, $Subpanel-Icon_BorderColor__Active ); &:before, &:after { transform: $Subpanel-Icon_Transform__Active; } } }} // - Colors & Interaction: Active + Hover // -------------------------------------------------------------------------------- .bibi-button-normal, .bibi-button-toggle { &.active { &.hover/*, &:hover*/ { .bibi-icon { #bibi-menu & { @include Icon-Colouring( $Menu-Icon-Font_PaintColor__Active-Hover, $Menu-Icon-Font_OutlineColor__Active-Hover, $Menu-Icon-Shape_PaintColor__Active-Hover, $Menu-Icon-Shape_OutlineColor__Active-Hover, $Menu-Icon_BackgroundColor__Active-Hover, $Menu-Icon_BorderColor__Active-Hover ); &:before, &:after { transform: $Menu-Icon_Transform__Active-Hover; } } #bibi-slider & { @include Icon-Colouring( $Slider-Icon-Font_PaintColor__Active-Hover, $Slider-Icon-Font_OutlineColor__Active-Hover, $Slider-Icon-Shape_PaintColor__Active-Hover, $Slider-Icon-Shape_OutlineColor__Active-Hover, $Slider-Icon_BackgroundColor__Active-Hover, $Slider-Icon_BorderColor__Active-Hover ); &:before, &:after { transform: $Slider-Icon_Transform__Active-Hover; } } .bibi-subpanel & { @include Icon-Colouring( $Subpanel-Icon-Font_PaintColor__Active-Hover, $Subpanel-Icon-Font_OutlineColor__Active-Hover, $Subpanel-Icon-Shape_PaintColor__Active-Hover, $Subpanel-Icon-Shape_OutlineColor__Active-Hover, $Subpanel-Icon_BackgroundColor__Active-Hover, $Subpanel-Icon_BorderColor__Active-Hover ); &:before, &:after { transform: $Subpanel-Icon_Transform__Active-Hover; } } }}}} // - Colors & Interaction: Disabled // -------------------------------------------------------------------------------- .bibi-button.disabled .bibi-icon { #bibi-menu & { &:before, &:after { opacity: $Menu-Icon_Opacity__Disabled !important; } } #bibi-slider & { &:before, &:after { opacity: $Slider-Icon_Opacity__Disabled !important; } } .bibi-subpanel & { &:before, &:after { opacity: $Subpanel-Icon_Opacity__Disabled !important; } } } // ================================================================================ // + General Icons // -------------------------------------------------------------------------------- // - General Icons' Common Style // -------------------------------------------------------------------------------- // -- In Menu .bibi-icon-config, .bibi-icon-change-fontsize, .bibi-icon-loupe, .bibi-icon-manage-bookmarks, // -- In Slider .bibi-icon-history, // -- In Subpanel .bibi-icon-full-breadth-layout, .bibi-icon-toggle-fullscreen, .bibi-icon-open-newwindow, .bibi-icon-fontsize, .bibi-icon-bookmark { @include GENERALICON__Common(); } // - General Icons in Menu // -------------------------------------------------------------------------------- .bibi-icon-config, .bibi-icon-change-fontsize, .bibi-icon-loupe, .bibi-icon-manage-bookmarks { @include GENERALICON__CommonInMenu(); } .bibi-icon-config { @include GENERALICON__Config(); } .bibi-icon-change-fontsize { @include GENERALICON__ChangeFontSize(); } .bibi-icon-loupe { @include GENERALICON__LoupeCommon(); &-zoomin { @include GENERALICON__LoupeZoomIn(); } &-zoomout { @include GENERALICON__LoupeZoomOut(); } &-reset { @include GENERALICON__LoupeReset(); } } .bibi-icon-manage-bookmarks { @include GENERALICON__ManageBookmarks(); } // - General Icons in Slider // -------------------------------------------------------------------------------- .bibi-icon-history { @include GENERALICON__CommonInSlider(); } .bibi-icon-history { @include GENERALICON__History(); } // - General Icons in Subpanels // -------------------------------------------------------------------------------- .bibi-icon-full-breadth-layout, .bibi-icon-toggle-fullscreen, .bibi-icon-open-newwindow, .bibi-icon-fontsize, .bibi-icon-bookmark { @include GENERALICON__CommonInSubpanels(); } .bibi-icon-full-breadth-layout { @include GENERALICON__UseFullBreadth(); } .bibi-icon-toggle-fullscreen { @include GENERALICON__ToggleFullscreen(); } .bibi-icon-open-newwindow { @include GENERALICON__OpenNewWindow(); } .bibi-icon-fontsize { @include GENERALICON__FontSizeCommon(); &-exlarge { @include GENERALICON__FontSizeXL(); } &-large { @include GENERALICON__FontSizeL(); } &-medium { @include GENERALICON__FontSizeM(); } &-small { @include GENERALICON__FontSizeS(); } &-exsmall { @include GENERALICON__FontSizeXS(); } } .bibi-icon-bookmark { @include GENERALICON__BookmarkCommon(); } .bibi-icon-add-a-bookmark { @include GENERALICON__AddABookmark(); } .bibi-icon-a-bookmark { @include GENERALICON__ABookmark(); } // ================================================================================ // + Special Icons // -------------------------------------------------------------------------------- // - "Toggle Panel" Icon in Menu // -------------------------------------------------------------------------------- .bibi-icon-toggle-panel { @include SPECIALICON__TogglePanel(); .bibi-button.active & { @include SPECIALICON__TogglePanel__Active(); } } // - "View Xxxx" Icons in Subpanels // -------------------------------------------------------------------------------- .bibi-icon-view { @include SPECIALICON__View_Common(); &-paged { @include SPECIALICON__ViewPaged(); } &-horizontal { @include SPECIALICON__ViewHorizontal(); } &-vertical { @include SPECIALICON__ViewVertical(); } }
{ "pile_set_name": "Github" }
c18fb1de6a156c5639d73524172ec8988baaba0d
{ "pile_set_name": "Github" }
/* * Copyright (C) 2019 Peng fei Pan <panpfpanpf@outlook.me> * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package me.panpf.sketch.optionsfilter; import androidx.annotation.NonNull; import me.panpf.sketch.request.DownloadOptions; import me.panpf.sketch.request.LoadOptions; /** * 低质量 Bitmap.Config */ public class LowQualityOptionsFilter implements OptionsFilter { @Override public void filter(@NonNull DownloadOptions options) { if (options instanceof LoadOptions) { ((LoadOptions) options).setLowQualityImage(true); } } }
{ "pile_set_name": "Github" }
// Copyright 2018 Google LLC All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package partial import ( "github.com/google/go-containerregistry/pkg/v1/types" ) // imageCore is the core set of properties without which we cannot build a v1.Image type imageCore interface { // RawConfigFile returns the serialized bytes of this image's config file. RawConfigFile() ([]byte, error) // MediaType of this image's manifest. MediaType() (types.MediaType, error) }
{ "pile_set_name": "Github" }
config SELECT_MEMORY_MODEL def_bool y depends on ARCH_SELECT_MEMORY_MODEL choice prompt "Memory model" depends on SELECT_MEMORY_MODEL default DISCONTIGMEM_MANUAL if ARCH_DISCONTIGMEM_DEFAULT default SPARSEMEM_MANUAL if ARCH_SPARSEMEM_DEFAULT default FLATMEM_MANUAL config FLATMEM_MANUAL bool "Flat Memory" depends on !(ARCH_DISCONTIGMEM_ENABLE || ARCH_SPARSEMEM_ENABLE) || ARCH_FLATMEM_ENABLE help This option allows you to change some of the ways that Linux manages its memory internally. Most users will only have one option here: FLATMEM. This is normal and a correct option. Some users of more advanced features like NUMA and memory hotplug may have different options here. DISCONTIGMEM is a more mature, better tested system, but is incompatible with memory hotplug and may suffer decreased performance over SPARSEMEM. If unsure between "Sparse Memory" and "Discontiguous Memory", choose "Discontiguous Memory". If unsure, choose this option (Flat Memory) over any other. config DISCONTIGMEM_MANUAL bool "Discontiguous Memory" depends on ARCH_DISCONTIGMEM_ENABLE help This option provides enhanced support for discontiguous memory systems, over FLATMEM. These systems have holes in their physical address spaces, and this option provides more efficient handling of these holes. However, the vast majority of hardware has quite flat address spaces, and can have degraded performance from the extra overhead that this option imposes. Many NUMA configurations will have this as the only option. If unsure, choose "Flat Memory" over this option. config SPARSEMEM_MANUAL bool "Sparse Memory" depends on ARCH_SPARSEMEM_ENABLE help This will be the only option for some systems, including memory hotplug systems. This is normal. For many other systems, this will be an alternative to "Discontiguous Memory". This option provides some potential performance benefits, along with decreased code complexity, but it is newer, and more experimental. If unsure, choose "Discontiguous Memory" or "Flat Memory" over this option. endchoice config DISCONTIGMEM def_bool y depends on (!SELECT_MEMORY_MODEL && ARCH_DISCONTIGMEM_ENABLE) || DISCONTIGMEM_MANUAL config SPARSEMEM def_bool y depends on (!SELECT_MEMORY_MODEL && ARCH_SPARSEMEM_ENABLE) || SPARSEMEM_MANUAL config FLATMEM def_bool y depends on (!DISCONTIGMEM && !SPARSEMEM) || FLATMEM_MANUAL config FLAT_NODE_MEM_MAP def_bool y depends on !SPARSEMEM # # Both the NUMA code and DISCONTIGMEM use arrays of pg_data_t's # to represent different areas of memory. This variable allows # those dependencies to exist individually. # config NEED_MULTIPLE_NODES def_bool y depends on DISCONTIGMEM || NUMA config HAVE_MEMORY_PRESENT def_bool y depends on ARCH_HAVE_MEMORY_PRESENT || SPARSEMEM # # SPARSEMEM_EXTREME (which is the default) does some bootmem # allocations when memory_present() is called. If this cannot # be done on your architecture, select this option. However, # statically allocating the mem_section[] array can potentially # consume vast quantities of .bss, so be careful. # # This option will also potentially produce smaller runtime code # with gcc 3.4 and later. # config SPARSEMEM_STATIC bool # # Architecture platforms which require a two level mem_section in SPARSEMEM # must select this option. This is usually for architecture platforms with # an extremely sparse physical address space. # config SPARSEMEM_EXTREME def_bool y depends on SPARSEMEM && !SPARSEMEM_STATIC config SPARSEMEM_VMEMMAP_ENABLE bool config SPARSEMEM_ALLOC_MEM_MAP_TOGETHER def_bool y depends on SPARSEMEM && X86_64 config SPARSEMEM_VMEMMAP bool "Sparse Memory virtual memmap" depends on SPARSEMEM && SPARSEMEM_VMEMMAP_ENABLE default y help SPARSEMEM_VMEMMAP uses a virtually mapped memmap to optimise pfn_to_page and page_to_pfn operations. This is the most efficient option when sufficient kernel resources are available. config HAVE_MEMBLOCK bool config HAVE_MEMBLOCK_NODE_MAP bool config HAVE_MEMBLOCK_PHYS_MAP bool config HAVE_GENERIC_RCU_GUP bool config ARCH_DISCARD_MEMBLOCK bool config NO_BOOTMEM bool config MEMORY_ISOLATION bool config MOVABLE_NODE bool "Enable to assign a node which has only movable memory" depends on HAVE_MEMBLOCK depends on NO_BOOTMEM depends on X86_64 depends on NUMA default n help Allow a node to have only movable memory. Pages used by the kernel, such as direct mapping pages cannot be migrated. So the corresponding memory device cannot be hotplugged. This option allows the following two things: - When the system is booting, node full of hotpluggable memory can be arranged to have only movable memory so that the whole node can be hot-removed. (need movable_node boot option specified). - After the system is up, the option allows users to online all the memory of a node as movable memory so that the whole node can be hot-removed. Users who don't use the memory hotplug feature are fine with this option on since they don't specify movable_node boot option or they don't online memory as movable. Say Y here if you want to hotplug a whole node. Say N here if you want kernel to use memory on all nodes evenly. # # Only be set on architectures that have completely implemented memory hotplug # feature. If you are not sure, don't touch it. # config HAVE_BOOTMEM_INFO_NODE def_bool n # eventually, we can have this option just 'select SPARSEMEM' config MEMORY_HOTPLUG bool "Allow for memory hot-add" depends on SPARSEMEM || X86_64_ACPI_NUMA depends on ARCH_ENABLE_MEMORY_HOTPLUG depends on COMPILE_TEST || !KASAN config MEMORY_HOTPLUG_SPARSE def_bool y depends on SPARSEMEM && MEMORY_HOTPLUG config MEMORY_HOTPLUG_DEFAULT_ONLINE bool "Online the newly added memory blocks by default" default n depends on MEMORY_HOTPLUG help This option sets the default policy setting for memory hotplug onlining policy (/sys/devices/system/memory/auto_online_blocks) which determines what happens to newly added memory regions. Policy setting can always be changed at runtime. See Documentation/memory-hotplug.txt for more information. Say Y here if you want all hot-plugged memory blocks to appear in 'online' state by default. Say N here if you want the default policy to keep all hot-plugged memory blocks in 'offline' state. config MEMORY_HOTREMOVE bool "Allow for memory hot remove" select MEMORY_ISOLATION select HAVE_BOOTMEM_INFO_NODE if (X86_64 || PPC64) depends on MEMORY_HOTPLUG && ARCH_ENABLE_MEMORY_HOTREMOVE depends on MIGRATION # Heavily threaded applications may benefit from splitting the mm-wide # page_table_lock, so that faults on different parts of the user address # space can be handled with less contention: split it at this NR_CPUS. # Default to 4 for wider testing, though 8 might be more appropriate. # ARM's adjust_pte (unused if VIPT) depends on mm-wide page_table_lock. # PA-RISC 7xxx's spinlock_t would enlarge struct page from 32 to 44 bytes. # DEBUG_SPINLOCK and DEBUG_LOCK_ALLOC spinlock_t also enlarge struct page. # config SPLIT_PTLOCK_CPUS int default "999999" if !MMU default "999999" if ARM && !CPU_CACHE_VIPT default "999999" if PARISC && !PA20 default "4" config ARCH_ENABLE_SPLIT_PMD_PTLOCK bool # # support for memory balloon config MEMORY_BALLOON bool # # support for memory balloon compaction config BALLOON_COMPACTION bool "Allow for balloon memory compaction/migration" def_bool y depends on COMPACTION && MEMORY_BALLOON help Memory fragmentation introduced by ballooning might reduce significantly the number of 2MB contiguous memory blocks that can be used within a guest, thus imposing performance penalties associated with the reduced number of transparent huge pages that could be used by the guest workload. Allowing the compaction & migration for memory pages enlisted as being part of memory balloon devices avoids the scenario aforementioned and helps improving memory defragmentation. # # support for memory compaction config COMPACTION bool "Allow for memory compaction" def_bool y select MIGRATION depends on MMU help Compaction is the only memory management component to form high order (larger physically contiguous) memory blocks reliably. The page allocator relies on compaction heavily and the lack of the feature can lead to unexpected OOM killer invocations for high order memory requests. You shouldn't disable this option unless there really is a strong reason for it and then we would be really interested to hear about that at linux-mm@kvack.org. # # support for page migration # config MIGRATION bool "Page migration" def_bool y depends on (NUMA || ARCH_ENABLE_MEMORY_HOTREMOVE || COMPACTION || CMA) && MMU help Allows the migration of the physical location of pages of processes while the virtual addresses are not changed. This is useful in two situations. The first is on NUMA systems to put pages nearer to the processors accessing. The second is when allocating huge pages as migration can relocate pages to satisfy a huge page allocation instead of reclaiming. config ARCH_ENABLE_HUGEPAGE_MIGRATION bool config PHYS_ADDR_T_64BIT def_bool 64BIT || ARCH_PHYS_ADDR_T_64BIT config BOUNCE bool "Enable bounce buffers" default y depends on BLOCK && MMU && (ZONE_DMA || HIGHMEM) help Enable bounce buffers for devices that cannot access the full range of memory available to the CPU. Enabled by default when ZONE_DMA or HIGHMEM is selected, but you may say n to override this. # On the 'tile' arch, USB OHCI needs the bounce pool since tilegx will often # have more than 4GB of memory, but we don't currently use the IOTLB to present # a 32-bit address to OHCI. So we need to use a bounce pool instead. config NEED_BOUNCE_POOL bool default y if TILE && USB_OHCI_HCD config NR_QUICK int depends on QUICKLIST default "2" if AVR32 default "1" config VIRT_TO_BUS bool help An architecture should select this if it implements the deprecated interface virt_to_bus(). All new architectures should probably not select this. config MMU_NOTIFIER bool select SRCU config KSM bool "Enable KSM for page merging" depends on MMU help Enable Kernel Samepage Merging: KSM periodically scans those areas of an application's address space that an app has advised may be mergeable. When it finds pages of identical content, it replaces the many instances by a single page with that content, so saving memory until one or another app needs to modify the content. Recommended for use with KVM, or with other duplicative applications. See Documentation/vm/ksm.txt for more information: KSM is inactive until a program has madvised that an area is MADV_MERGEABLE, and root has set /sys/kernel/mm/ksm/run to 1 (if CONFIG_SYSFS is set). config DEFAULT_MMAP_MIN_ADDR int "Low address space to protect from user allocation" depends on MMU default 4096 help This is the portion of low virtual memory which should be protected from userspace allocation. Keeping a user from writing to low pages can help reduce the impact of kernel NULL pointer bugs. For most ia64, ppc64 and x86 users with lots of address space a value of 65536 is reasonable and should cause no problems. On arm and other archs it should not be higher than 32768. Programs which use vm86 functionality or have some need to map this low address space will need CAP_SYS_RAWIO or disable this protection by setting the value to 0. This value can be changed after boot using the /proc/sys/vm/mmap_min_addr tunable. config ARCH_SUPPORTS_MEMORY_FAILURE bool config MEMORY_FAILURE depends on MMU depends on ARCH_SUPPORTS_MEMORY_FAILURE bool "Enable recovery from hardware memory errors" select MEMORY_ISOLATION select RAS help Enables code to recover from some memory failures on systems with MCA recovery. This allows a system to continue running even when some of its memory has uncorrected errors. This requires special hardware support and typically ECC memory. config HWPOISON_INJECT tristate "HWPoison pages injector" depends on MEMORY_FAILURE && DEBUG_KERNEL && PROC_FS select PROC_PAGE_MONITOR config NOMMU_INITIAL_TRIM_EXCESS int "Turn on mmap() excess space trimming before booting" depends on !MMU default 1 help The NOMMU mmap() frequently needs to allocate large contiguous chunks of memory on which to store mappings, but it can only ask the system allocator for chunks in 2^N*PAGE_SIZE amounts - which is frequently more than it requires. To deal with this, mmap() is able to trim off the excess and return it to the allocator. If trimming is enabled, the excess is trimmed off and returned to the system allocator, which can cause extra fragmentation, particularly if there are a lot of transient processes. If trimming is disabled, the excess is kept, but not used, which for long-term mappings means that the space is wasted. Trimming can be dynamically controlled through a sysctl option (/proc/sys/vm/nr_trim_pages) which specifies the minimum number of excess pages there must be before trimming should occur, or zero if no trimming is to occur. This option specifies the initial value of this option. The default of 1 says that all excess pages should be trimmed. See Documentation/nommu-mmap.txt for more information. config TRANSPARENT_HUGEPAGE bool "Transparent Hugepage Support" depends on HAVE_ARCH_TRANSPARENT_HUGEPAGE select COMPACTION select RADIX_TREE_MULTIORDER help Transparent Hugepages allows the kernel to use huge pages and huge tlb transparently to the applications whenever possible. This feature can improve computing performance to certain applications by speeding up page faults during memory allocation, by reducing the number of tlb misses and by speeding up the pagetable walking. If memory constrained on embedded, you may want to say N. choice prompt "Transparent Hugepage Support sysfs defaults" depends on TRANSPARENT_HUGEPAGE default TRANSPARENT_HUGEPAGE_ALWAYS help Selects the sysfs defaults for Transparent Hugepage Support. config TRANSPARENT_HUGEPAGE_ALWAYS bool "always" help Enabling Transparent Hugepage always, can increase the memory footprint of applications without a guaranteed benefit but it will work automatically for all applications. config TRANSPARENT_HUGEPAGE_MADVISE bool "madvise" help Enabling Transparent Hugepage madvise, will only provide a performance improvement benefit to the applications using madvise(MADV_HUGEPAGE) but it won't risk to increase the memory footprint of applications without a guaranteed benefit. endchoice # # We don't deposit page tables on file THP mapping, # but Power makes use of them to address MMU quirk. # config TRANSPARENT_HUGE_PAGECACHE def_bool y depends on TRANSPARENT_HUGEPAGE && !PPC # # UP and nommu archs use km based percpu allocator # config NEED_PER_CPU_KM depends on !SMP bool default y config CLEANCACHE bool "Enable cleancache driver to cache clean pages if tmem is present" default n help Cleancache can be thought of as a page-granularity victim cache for clean pages that the kernel's pageframe replacement algorithm (PFRA) would like to keep around, but can't since there isn't enough memory. So when the PFRA "evicts" a page, it first attempts to use cleancache code to put the data contained in that page into "transcendent memory", memory that is not directly accessible or addressable by the kernel and is of unknown and possibly time-varying size. And when a cleancache-enabled filesystem wishes to access a page in a file on disk, it first checks cleancache to see if it already contains it; if it does, the page is copied into the kernel and a disk access is avoided. When a transcendent memory driver is available (such as zcache or Xen transcendent memory), a significant I/O reduction may be achieved. When none is available, all cleancache calls are reduced to a single pointer-compare-against-NULL resulting in a negligible performance hit. If unsure, say Y to enable cleancache config FRONTSWAP bool "Enable frontswap to cache swap pages if tmem is present" depends on SWAP default n help Frontswap is so named because it can be thought of as the opposite of a "backing" store for a swap device. The data is stored into "transcendent memory", memory that is not directly accessible or addressable by the kernel and is of unknown and possibly time-varying size. When space in transcendent memory is available, a significant swap I/O reduction may be achieved. When none is available, all frontswap calls are reduced to a single pointer- compare-against-NULL resulting in a negligible performance hit and swap data is stored as normal on the matching swap device. If unsure, say Y to enable frontswap. config CMA bool "Contiguous Memory Allocator" depends on HAVE_MEMBLOCK && MMU select MIGRATION select MEMORY_ISOLATION help This enables the Contiguous Memory Allocator which allows other subsystems to allocate big physically-contiguous blocks of memory. CMA reserves a region of memory and allows only movable pages to be allocated from it. This way, the kernel can use the memory for pagecache and when a subsystem requests for contiguous area, the allocated pages are migrated away to serve the contiguous request. If unsure, say "n". config CMA_DEBUG bool "CMA debug messages (DEVELOPMENT)" depends on DEBUG_KERNEL && CMA help Turns on debug messages in CMA. This produces KERN_DEBUG messages for every CMA call as well as various messages while processing calls such as dma_alloc_from_contiguous(). This option does not affect warning and error messages. config CMA_DEBUGFS bool "CMA debugfs interface" depends on CMA && DEBUG_FS help Turns on the DebugFS interface for CMA. config CMA_AREAS int "Maximum count of the CMA areas" depends on CMA default 7 help CMA allows to create CMA areas for particular purpose, mainly, used as device private area. This parameter sets the maximum number of CMA area in the system. If unsure, leave the default value "7". config MEM_SOFT_DIRTY bool "Track memory changes" depends on CHECKPOINT_RESTORE && HAVE_ARCH_SOFT_DIRTY && PROC_FS select PROC_PAGE_MONITOR help This option enables memory changes tracking by introducing a soft-dirty bit on pte-s. This bit it set when someone writes into a page just as regular dirty bit, but unlike the latter it can be cleared by hands. See Documentation/vm/soft-dirty.txt for more details. config ZSWAP bool "Compressed cache for swap pages (EXPERIMENTAL)" depends on FRONTSWAP && CRYPTO=y select CRYPTO_LZO select ZPOOL default n help A lightweight compressed cache for swap pages. It takes pages that are in the process of being swapped out and attempts to compress them into a dynamically allocated RAM-based memory pool. This can result in a significant I/O reduction on swap device and, in the case where decompressing from RAM is faster that swap device reads, can also improve workload performance. This is marked experimental because it is a new feature (as of v3.11) that interacts heavily with memory reclaim. While these interactions don't cause any known issues on simple memory setups, they have not be fully explored on the large set of potential configurations and workloads that exist. config ZPOOL tristate "Common API for compressed memory storage" default n help Compressed memory storage API. This allows using either zbud or zsmalloc. config ZBUD tristate "Low (Up to 2x) density storage for compressed pages" default n help A special purpose allocator for storing compressed pages. It is designed to store up to two compressed pages per physical page. While this design limits storage density, it has simple and deterministic reclaim properties that make it preferable to a higher density approach when reclaim will be used. config Z3FOLD tristate "Up to 3x density storage for compressed pages" depends on ZPOOL default n help A special purpose allocator for storing compressed pages. It is designed to store up to three compressed pages per physical page. It is a ZBUD derivative so the simplicity and determinism are still there. config ZSMALLOC tristate "Memory allocator for compressed pages" depends on MMU default n help zsmalloc is a slab-based memory allocator designed to store compressed RAM pages. zsmalloc uses virtual memory mapping in order to reduce fragmentation. However, this results in a non-standard allocator interface where a handle, not a pointer, is returned by an alloc(). This handle must be mapped in order to access the allocated space. config PGTABLE_MAPPING bool "Use page table mapping to access object in zsmalloc" depends on ZSMALLOC help By default, zsmalloc uses a copy-based object mapping method to access allocations that span two pages. However, if a particular architecture (ex, ARM) performs VM mapping faster than copying, then you should select this. This causes zsmalloc to use page table mapping rather than copying for object mapping. You can check speed with zsmalloc benchmark: https://github.com/spartacus06/zsmapbench config ZSMALLOC_STAT bool "Export zsmalloc statistics" depends on ZSMALLOC select DEBUG_FS help This option enables code in the zsmalloc to collect various statistics about whats happening in zsmalloc and exports that information to userspace via debugfs. If unsure, say N. config GENERIC_EARLY_IOREMAP bool config MAX_STACK_SIZE_MB int "Maximum user stack size for 32-bit processes (MB)" default 80 range 8 256 if METAG range 8 2048 depends on STACK_GROWSUP && (!64BIT || COMPAT) help This is the maximum stack size in Megabytes in the VM layout of 32-bit user processes when the stack grows upwards (currently only on parisc and metag arch). The stack will be located at the highest memory address minus the given value, unless the RLIMIT_STACK hard limit is changed to a smaller value in which case that is used. A sane initial value is 80 MB. # For architectures that support deferred memory initialisation config ARCH_SUPPORTS_DEFERRED_STRUCT_PAGE_INIT bool config DEFERRED_STRUCT_PAGE_INIT bool "Defer initialisation of struct pages to kthreads" default n depends on ARCH_SUPPORTS_DEFERRED_STRUCT_PAGE_INIT depends on NO_BOOTMEM && MEMORY_HOTPLUG depends on !FLATMEM help Ordinarily all struct pages are initialised during early boot in a single thread. On very large machines this can take a considerable amount of time. If this option is set, large machines will bring up a subset of memmap at boot and then initialise the rest in parallel by starting one-off "pgdatinitX" kernel thread for each node X. This has a potential performance impact on processes running early in the lifetime of the system until these kthreads finish the initialisation. config IDLE_PAGE_TRACKING bool "Enable idle page tracking" depends on SYSFS && MMU select PAGE_EXTENSION if !64BIT help This feature allows to estimate the amount of user pages that have not been touched during a given period of time. This information can be useful to tune memory cgroup limits and/or for job placement within a compute cluster. See Documentation/vm/idle_page_tracking.txt for more details. config ZONE_DEVICE bool "Device memory (pmem, etc...) hotplug support" depends on MEMORY_HOTPLUG depends on MEMORY_HOTREMOVE depends on SPARSEMEM_VMEMMAP depends on X86_64 #arch_add_memory() comprehends device memory help Device memory hotplug support allows for establishing pmem, or other device driver discovered memory regions, in the memmap. This allows pfn_to_page() lookups of otherwise "device-physical" addresses which is needed for using a DAX mapping in an O_DIRECT operation, among other things. If FS_DAX is enabled, then say Y. config FRAME_VECTOR bool config ARCH_USES_HIGH_VMA_FLAGS bool config ARCH_HAS_PKEYS bool
{ "pile_set_name": "Github" }
# An extremely simple HTTP server import socket, sys, time # Server runs on all IP addresses by default HOST='' # 8080 can be used without root priviledges PORT=8080 BUFLEN=8192 # buffer size s = socket.socket(socket.AF_INET6, socket.SOCK_STREAM) try: print "Starting HTTP server on port ", PORT s.bind((HOST,PORT,0,0)) except socket.error : print "Cannot bind to port :",PORT sys.exit(-1) s.listen(10) # maximum 10 queued connections while True: # a real server would be multithreaded and would catch exceptions conn, addr = s.accept() print "Connection from ", addr data='' while not '\n' in data : # wait until first line has been received data = data+conn.recv(BUFLEN) if data.startswith('GET'): # GET request conn.send('HTTP/1.0 404 Not Found\r\n') # a real server should serve files else: # other type of HTTP request conn.send('HTTP/1.0 501 Not implemented\r\n') now = time.strftime("%a, %d %b %Y %H:%M:%S", time.localtime()) conn.send('Date: ' + now +'\r\n') conn.send('Server: Dummy-HTTP-Server\r\n') conn.send('\r\n') conn.shutdown(socket.SHUT_RDWR) conn.close()
{ "pile_set_name": "Github" }
<?php /*********************************************************************************** * X2Engine Open Source Edition is a customer relationship management program developed by * X2 Engine, Inc. Copyright (C) 2011-2019 X2 Engine Inc. * * This program is free software; you can redistribute it and/or modify it under * the terms of the GNU Affero General Public License version 3 as published by the * Free Software Foundation with the addition of the following permission added * to Section 15 as permitted in Section 7(a): FOR ANY PART OF THE COVERED WORK * IN WHICH THE COPYRIGHT IS OWNED BY X2ENGINE, X2ENGINE DISCLAIMS THE WARRANTY * OF NON INFRINGEMENT OF THIRD PARTY RIGHTS. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS * FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for more * details. * * You should have received a copy of the GNU Affero General Public License along with * this program; if not, see http://www.gnu.org/licenses or write to the Free * Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA * 02110-1301 USA. * * You can contact X2Engine, Inc. P.O. Box 610121, Redwood City, * California 94061, USA. or at email address contact@x2engine.com. * * The interactive user interfaces in modified source and object code versions * of this program must display Appropriate Legal Notices, as required under * Section 5 of the GNU Affero General Public License version 3. * * In accordance with Section 7(b) of the GNU Affero General Public License version 3, * these Appropriate Legal Notices must retain the display of the "Powered by * X2 Engine" logo. If the display of the logo is not reasonably feasible for * technical reasons, the Appropriate Legal Notices must display the words * "Powered by X2 Engine". **********************************************************************************/ ?> <div id="advanced-controls" class="form" style="display:none;"> <?php Yii::import('application.extensions.CJuiDateTimePicker.CJuiDateTimePicker'); echo CHtml::form(); echo Yii::t('actions',"Show me")." " .CHtml::dropDownList('complete',!empty($complete)?$complete:'No',array('No'=>Yii::t('actions','unfinished'),'Yes'=>Yii::t('actions','complete'),'all'=>Yii::t('actions','all'))) ." ".Yii::t('actions',"{actions} assigned to", array('{actions}'=>Modules::displayName()))." " .CHtml::dropDownList('assignedTo',!empty($assignedTo)?$assignedTo:'me',array('me'=>Yii::t('actions','me'),'both'=>Yii::t('actions','me or anyone'),'all'=>Yii::t('actions','everyone'))) ." ".Yii::t('actions',"that")." " .CHtml::dropDownList('dateType',!empty($dateType)?$dateType:'due',array('due'=>Yii::t('actions','are due'),'create'=>Yii::t('actions','were created'))) ." " .CHtml::dropDownList('dateRange',!empty($dateRange)?$dateRange:'today',array( 'today'=>Yii::t('actions','today'), 'tomorrow'=>Yii::t('actions','tomorrow'), 'week'=>Yii::t('actions','this week'), 'month'=>Yii::t('actions','this month'), 'all'=>Yii::t('actions','any time'), 'range'=>Yii::t('actions','between these dates'), )); echo "<span id='date-controls' style='".((!empty($dateRange) && $dateRange=='range')?"":"display:none")."'> ("; Yii::app()->controller->widget('CJuiDateTimePicker', array( 'name' => 'start', 'value'=>!empty($start)?$start:'', // 'title'=>Yii::t('actions','Start Date'), // 'model'=>$model, //Model object // 'attribute'=>$field->fieldName, //attribute name 'mode' => 'date', //use "time","date" or "datetime" (default) 'options' => array( 'dateFormat' => Formatter::formatDatePicker(), 'changeMonth' => true, 'changeYear' => true, ), // jquery plugin options 'htmlOptions' => array('id' => 'startDate', 'width' => 20), 'language' => (Yii::app()->language == 'en') ? '' : Yii::app()->getLanguage(), )); echo " and "; Yii::app()->controller->widget('CJuiDateTimePicker', array( 'name' => 'end', // 'value'=>$startDate, 'value'=>!empty($end)?$end:'', // 'title'=>Yii::t('actions','Start Date'), // 'model'=>$model, //Model object // 'attribute'=>$field->fieldName, //attribute name 'mode' => 'date', //use "time","date" or "datetime" (default) 'options' => array( 'dateFormat' => Formatter::formatDatePicker(), 'changeMonth' => true, 'changeYear' => true, ), // jquery plugin options 'htmlOptions' => array('id' => 'endDate', 'width' => 20), 'language' => (Yii::app()->language == 'en') ? '' : Yii::app()->getLanguage(), )); echo ") </span>"; echo" ".Yii::t('actions',"and order them by")." " .CHtml::dropDownList('orderType',!empty($orderType)?$orderType:'desc',array('desc'=>Yii::t('actions','descending'),'asc'=>Yii::t('actions','ascending'))) ." " .CHtml::dropDownList('order',!empty($order)?$order:'due',array('due'=>Yii::t('actions','due date'),'create'=>Yii::t('actions','create date'),'priority'=>Yii::t('actions','priority'))); echo " ".CHtml::submitButton(Yii::t('app','Go'),array('class'=>'x2-button','style'=>'padding: 1px 15px;display:inline;')); echo CHtml::endForm(); ?> </div> <script> $('#dateRange').on('change',function(){ if($('#dateRange').val()=='range'){ $('#date-controls').fadeIn(); }else{ $('#date-controls').fadeOut(); } }); </script>
{ "pile_set_name": "Github" }
<?php /* * Copyright 2014 Google Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); you may not * use this file except in compliance with the License. You may obtain a copy of * the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations under * the License. */ class Google_Service_DisplayVideo_CombinedAudience extends Google_Model { public $combinedAudienceId; public $displayName; public $name; public function setCombinedAudienceId($combinedAudienceId) { $this->combinedAudienceId = $combinedAudienceId; } public function getCombinedAudienceId() { return $this->combinedAudienceId; } public function setDisplayName($displayName) { $this->displayName = $displayName; } public function getDisplayName() { return $this->displayName; } public function setName($name) { $this->name = $name; } public function getName() { return $this->name; } }
{ "pile_set_name": "Github" }
cleo.search.network.typeahead.config.name=i019 cleo.search.network.typeahead.config.partition.start=90000000 cleo.search.network.typeahead.config.partition.count=5000000 cleo.search.network.typeahead.config.homeDir=network-typeahead/member/i019 cleo.search.network.typeahead.config.elementSerializer.class=cleo.search.TypeaheadElementSerializer cleo.search.network.typeahead.config.connectionFilter.class=cleo.search.connection.TransitivePartitionConnectionFilter cleo.search.network.typeahead.config.elementStoreDir=${cleo.search.network.typeahead.config.homeDir}/element-store cleo.search.network.typeahead.config.elementStoreIndexStart=${cleo.search.network.typeahead.config.partition.start} cleo.search.network.typeahead.config.elementStoreCapacity=${cleo.search.network.typeahead.config.partition.count} cleo.search.network.typeahead.config.elementStoreSegmentMB=32 cleo.search.network.typeahead.config.elementStoreCached=true cleo.search.network.typeahead.config.connectionsStoreDir=${cleo.search.network.typeahead.config.homeDir}/weighted-connections-store cleo.search.network.typeahead.config.connectionsStoreIndexStart=0 cleo.search.network.typeahead.config.connectionsStoreCapacity=150000000 cleo.search.network.typeahead.config.connectionsStoreSegmentMB=64 cleo.search.network.typeahead.config.filterPrefixLength=2
{ "pile_set_name": "Github" }
/* Localized versions of Info.plist keys */
{ "pile_set_name": "Github" }
/**************************************************************************** ** ** Copyright (C) 2009 Stephen Kelly <steveire@gmail.com> ** All rights reserved. ** Contact: Nokia Corporation (qt-info@nokia.com) ** ** This file is part of the test suite of the Qt Toolkit. ** ** This file is free software: you can redistribute it and/or modify ** it under the terms of the GNU General Public License as published by ** the Free Software Foundation, either version 3 of the License, or ** (at your option) any later version. ** ** $QT_BEGIN_LICENSE:LGPL$ ** GNU Lesser General Public License Usage ** This file may be used under the terms of the GNU Lesser General Public ** License version 2.1 as published by the Free Software Foundation and ** appearing in the file LICENSE.LGPL included in the packaging of this ** file. Please review the following information to ensure the GNU Lesser ** General Public License version 2.1 requirements will be met: ** http://www.gnu.org/licenses/old-licenses/lgpl-2.1.html. ** ** In addition, as a special exception, Nokia gives you certain additional ** rights. These rights are described in the Nokia Qt LGPL Exception ** version 1.1, included in the file LGPL_EXCEPTION.txt in this package. ** ** GNU General Public License Usage ** Alternatively, this file may be used under the terms of the GNU General ** Public License version 3.0 as published by the Free Software Foundation ** and appearing in the file LICENSE.GPL included in the packaging of this ** file. Please review the following information to ensure the GNU General ** Public License version 3.0 requirements will be met: ** http://www.gnu.org/copyleft/gpl.html. ** ** Other Usage ** Alternatively, this file may be used in accordance with the terms and ** conditions contained in a signed written agreement between you and Nokia. ** ** ** ** ** ** $QT_END_LICENSE$ ** ****************************************************************************/ #ifndef DYNAMICTREEMODEL_H #define DYNAMICTREEMODEL_H #include <QtCore/QAbstractItemModel> #include <QtCore/QHash> #include <QtCore/QList> class DynamicTreeModel : public QAbstractItemModel { Q_OBJECT public: DynamicTreeModel( QObject *parent = 0 ); QModelIndex index( int row, int column, const QModelIndex &parent = QModelIndex() ) const; QModelIndex parent( const QModelIndex &index ) const; int rowCount( const QModelIndex &index = QModelIndex() ) const; int columnCount( const QModelIndex &index = QModelIndex() ) const; QVariant data( const QModelIndex &index, int role = Qt::DisplayRole ) const; void clear(); protected slots: /** Finds the parent id of the string with id @p searchId. Returns -1 if not found. */ qint64 findParentId( qint64 searchId ) const; private: QHash<qint64, QString> m_items; QHash<qint64, QList<QList<qint64> > > m_childItems; qint64 nextId; qint64 newId() { return nextId++; }; QModelIndex m_nextParentIndex; int m_nextRow; int m_depth; int maxDepth; friend class ModelInsertCommand; friend class ModelMoveCommand; friend class ModelResetCommand; friend class ModelResetCommandFixed; }; class ModelChangeCommand : public QObject { Q_OBJECT public: ModelChangeCommand( DynamicTreeModel *model, QObject *parent = 0 ); virtual ~ModelChangeCommand() {} void setAncestorRowNumbers( QList<int> rowNumbers ) { m_rowNumbers = rowNumbers; } QModelIndex findIndex( QList<int> rows ); void setStartRow( int row ) { m_startRow = row; } void setEndRow( int row ) { m_endRow = row; } void setNumCols( int cols ) { m_numCols = cols; } virtual void doCommand() = 0; protected: DynamicTreeModel *m_model; QList<int> m_rowNumbers; int m_numCols; int m_startRow; int m_endRow; }; typedef QList<ModelChangeCommand *> ModelChangeCommandList; class ModelInsertCommand : public ModelChangeCommand { Q_OBJECT public: ModelInsertCommand( DynamicTreeModel *model, QObject *parent = 0 ); virtual ~ModelInsertCommand() {} virtual void doCommand(); }; class ModelMoveCommand : public ModelChangeCommand { Q_OBJECT public: ModelMoveCommand( DynamicTreeModel *model, QObject *parent ); virtual ~ModelMoveCommand() {} virtual bool emitPreSignal( const QModelIndex &srcParent, int srcStart, int srcEnd, const QModelIndex &destParent, int destRow ); virtual void doCommand(); virtual void emitPostSignal(); void setDestAncestors( QList<int> rows ) { m_destRowNumbers = rows; } void setDestRow( int row ) { m_destRow = row; } protected: QList<int> m_destRowNumbers; int m_destRow; }; /** A command which does a move and emits a reset signal. */ class ModelResetCommand : public ModelMoveCommand { Q_OBJECT public: ModelResetCommand( DynamicTreeModel *model, QObject *parent = 0 ); virtual ~ModelResetCommand(); virtual bool emitPreSignal( const QModelIndex &srcParent, int srcStart, int srcEnd, const QModelIndex &destParent, int destRow ); virtual void emitPostSignal(); }; /** A command which does a move and emits a beginResetModel and endResetModel signals. */ class ModelResetCommandFixed : public ModelMoveCommand { Q_OBJECT public: ModelResetCommandFixed( DynamicTreeModel *model, QObject *parent = 0 ); virtual ~ModelResetCommandFixed(); virtual bool emitPreSignal( const QModelIndex &srcParent, int srcStart, int srcEnd, const QModelIndex &destParent, int destRow ); virtual void emitPostSignal(); }; #endif
{ "pile_set_name": "Github" }