file_name large_stringlengths 4 140 | prefix large_stringlengths 0 39k | suffix large_stringlengths 0 36.1k | middle large_stringlengths 0 29.4k | fim_type large_stringclasses 4
values |
|---|---|---|---|---|
service.rs | use std::io::Read;
use std::sync::{Arc, Mutex};
use std::collections::HashMap;
use futures::{self, Future, BoxFuture};
use curl::easy::{Easy, List};
use tokio_core::reactor::Handle;
use tokio_curl::{Session, PerformError};
use serde_json::{from_value, from_str, Value};
pub type Fut<T> = BoxFuture<T, PerformError>;... | {
handle: Handle,
marathon_url: String,
mesos_url: String,
max_mem_usage: f64,
max_cpu_usage: f64,
multiplier: f64,
max_instances: i64,
}
impl Service {
pub fn new(handle: Handle, marathon_url: String, mesos_url: String,
max_mem_usage: f64, max_cpu_usage: f64,
... | Service | identifier_name |
service.rs | use std::io::Read;
use std::sync::{Arc, Mutex};
use std::collections::HashMap;
use futures::{self, Future, BoxFuture};
use curl::easy::{Easy, List};
use tokio_core::reactor::Handle;
use tokio_curl::{Session, PerformError};
use serde_json::{from_value, from_str, Value};
pub type Fut<T> = BoxFuture<T, PerformError>;... |
("AUTOSCALE_MEM_PERCENT", v) => {
max_mem_usage = from_value(v.clone()).unwrap();
}
("AUTOSCALE_CPU_PERCENT", v) => {
max_cpu_usage = from_value(v.clone()).unwrap();
}
_ =... | {
max_instances = from_value(v.clone()).unwrap();
} | conditional_block |
audio_processing.py | import os
import tensorflow as tf
def get_label(file_path):
# each file's label is its directory's name
parts = tf.strings.split(file_path, os.path.sep)
return parts[-2]
def prepare_label_dataset(file_paths):
# create dataset by splitting input tensor to individual items
label_ds = tf.data.Data... | tf.range(-N, N + 1, 1, tf.float32), padded[t:t + 2 * N + 1], 1
) / denominator, (1, feat.shape[1])
)
], 0) # [t : t+2*N+1] == [(N+t)-N : (N+t)+N+1]
return delta_feat
def get_mfcc(
log_mel_spectrogram,
num_mel_bins_to_pick=12,
add_energy=False,
... | random_line_split | |
audio_processing.py | import os
import tensorflow as tf
def get_label(file_path):
# each file's label is its directory's name
parts = tf.strings.split(file_path, os.path.sep)
return parts[-2]
def prepare_label_dataset(file_paths):
# create dataset by splitting input tensor to individual items
label_ds = tf.data.Data... |
# https://www.tensorflow.org/api_docs/python/tf/signal/mfccs_from_log_mel_spectrograms#for_example
# Compute MFCCs from log mel spectrograms
# Take num_mel_bins_to_pick bins
mfcc = tf.signal.mfccs_from_log_mel_spectrograms(log_mel_spectrogram)[
..., :num_mel_bins_to_pick]
# add symmetric_z... | energy = tf.slice(
log_mel_spectrogram, [0, log_mel_spectrogram.shape[1] - 1],
[log_mel_spectrogram.shape[0], 1]
)
log_mel_spectrogram = tf.slice(
log_mel_spectrogram, [0, 0],
[log_mel_spectrogram.shape[0], log_mel_spectrogram.shape[1] - 1]
) | conditional_block |
audio_processing.py | import os
import tensorflow as tf
def | (file_path):
# each file's label is its directory's name
parts = tf.strings.split(file_path, os.path.sep)
return parts[-2]
def prepare_label_dataset(file_paths):
# create dataset by splitting input tensor to individual items
label_ds = tf.data.Dataset.from_tensor_slices(file_paths)
# extract ... | get_label | identifier_name |
audio_processing.py | import os
import tensorflow as tf
def get_label(file_path):
# each file's label is its directory's name
parts = tf.strings.split(file_path, os.path.sep)
return parts[-2]
def prepare_label_dataset(file_paths):
# create dataset by splitting input tensor to individual items
label_ds = tf.data.Data... |
def get_stft(waveform, frame_length=512, frame_step=256):
# apply short-time Fourier transform
# splits signal into frames and applies Fourier transform on those
# by default uses smallest power of 2 enclosing frame_length for fft size
# uses hann window, an alternative would be hamming window
# ... | label_ds = prepare_label_dataset(file_paths)
if len(label_list) > 0:
label_ds = label_ds.map(
lambda label: tf.argmax(label == label_list),
num_parallel_calls=tf.data.experimental.AUTOTUNE
)
return tf.data.Dataset.zip((dataset, label_ds)) | identifier_body |
dap_cortex-m7.py | efc_cmd_getd = 0x5a000000
efc_cmd_wp = 0x5a000001
efc_cmd_wpl = 0x5a000002
efc_cmd_ea = 0x5a000005
efc_cmd_epa = 0x5a000007
efc_cmd_slb = 0x5a000008
efc_cmd_clb = 0x5a000009
efc_cmd_glb = 0x5a00000A
efc_cmd_sgpb = 0x5a00000B
efc_cmd_cgpb = 0x5a00000C
efc_cmd_ggpb = 0x5a00000D
if "RH71" in device:
FLASH_ST... |
def is_target_running():#mplab
global g_is_running
dhcsr = dev.Read32(arm.DHCSR)
state = 0 == (dhcsr & 0x20000) # S_HALT
if state != g_is_running:
log.info("Debug: Changed running state to %s" % state)
g_is_running = state
return g_is_running
def end_debug_session():#mplab
de... | reset_and_halt() | identifier_body |
dap_cortex-m7.py | efc_cmd_getd = 0x5a000000
efc_cmd_wp = 0x5a000001
efc_cmd_wpl = 0x5a000002
efc_cmd_ea = 0x5a000005
efc_cmd_epa = 0x5a000007
efc_cmd_slb = 0x5a000008
efc_cmd_clb = 0x5a000009
efc_cmd_glb = 0x5a00000A
efc_cmd_sgpb = 0x5a00000B
efc_cmd_cgpb = 0x5a00000C
efc_cmd_ggpb = 0x5a00000D
if "RH71" in device:
FLASH_ST... | (type_of_mem, address, length, data):#mplab
log.info("Prog: Reading %d bytes from address 0x%0x of %s memory..." % (length, address, type_of_mem))
global need_reset_for_read_operations
if need_reset_for_read_operations:
reset_and_halt() # necessary for reading flash with specific projects, ref MPLA... | prog_read | identifier_name |
dap_cortex-m7.py | efc_cmd_getd = 0x5a000000
efc_cmd_wp = 0x5a000001
efc_cmd_wpl = 0x5a000002
efc_cmd_ea = 0x5a000005
efc_cmd_epa = 0x5a000007
efc_cmd_slb = 0x5a000008
efc_cmd_clb = 0x5a000009
efc_cmd_glb = 0x5a00000A
efc_cmd_sgpb = 0x5a00000B
efc_cmd_cgpb = 0x5a00000C
efc_cmd_ggpb = 0x5a00000D
if "RH71" in device:
FLASH_ST... |
global need_reset_for_read_operations
need_reset_for_read_operations = True if flash_strategy == 1 else False
def bitsInByte(byteValue):
for i in xrange(8):
yield (byteValue >> i) & 1
def log_efc_fsr_error(fsr):
err_string = ""
if fsr & 0x00080000: # FSR_MECCEMSB
err_string = "MEC... | was_running = True
halt_or_raise() | conditional_block |
dap_cortex-m7.py | efc_cmd_getd = 0x5a000000
efc_cmd_wp = 0x5a000001
efc_cmd_wpl = 0x5a000002
efc_cmd_ea = 0x5a000005
efc_cmd_epa = 0x5a000007
efc_cmd_slb = 0x5a000008
efc_cmd_clb = 0x5a000009
efc_cmd_glb = 0x5a00000A
efc_cmd_sgpb = 0x5a00000B
efc_cmd_cgpb = 0x5a00000C
efc_cmd_ggpb = 0x5a00000D
if "RH71" in device:
FLASH_ST... | dev.Disconnect() | random_line_split | |
ctx.rs | //! The ØMQ context type.
use crate::{auth::server::AuthServer, error::*};
use libzmq_sys as sys;
use sys::errno;
use lazy_static::lazy_static;
use serde::{Deserialize, Serialize};
use std::{
os::raw::{c_int, c_void},
ptr, str,
sync::Arc,
thread,
};
lazy_static! {
static ref GLOBAL_CONTEXT: Ctx ... | /// A value of `true` indicates that all new sockets are given a
/// linger timeout of zero.
///
pub fn no_linger(&self) -> bool {
!self.raw.as_ref().get_bool(RawCtxOption::Blocky)
}
/// When set to `true`, all new sockets are given a linger timeout
/// of zero.
///
/// # Defau... | self.raw.as_ref().get(RawCtxOption::SocketLimit)
}
| identifier_body |
ctx.rs | //! The ØMQ context type.
use crate::{auth::server::AuthServer, error::*};
use libzmq_sys as sys;
use sys::errno;
use lazy_static::lazy_static;
use serde::{Deserialize, Serialize};
use std::{
os::raw::{c_int, c_void},
ptr, str,
sync::Arc,
thread,
};
lazy_static! {
static ref GLOBAL_CONTEXT: Ctx ... |
Self { ctx }
}
}
/// A config for a [`Ctx`].
///
/// Usefull in configuration files.
///
/// [`Ctx`]: struct.Ctx.html
#[derive(Default, Clone, PartialEq, Eq, Hash, Serialize, Deserialize)]
pub struct CtxConfig {
io_threads: Option<i32>,
max_msg_size: Option<i32>,
max_sockets: Option<i32>,
... |
panic!(msg_from_errno(unsafe { sys::zmq_errno() }));
}
| conditional_block |
ctx.rs | //! The ØMQ context type.
use crate::{auth::server::AuthServer, error::*};
use libzmq_sys as sys;
use sys::errno;
use lazy_static::lazy_static;
use serde::{Deserialize, Serialize};
use std::{
os::raw::{c_int, c_void},
ptr, str,
sync::Arc,
thread,
};
lazy_static! {
static ref GLOBAL_CONTEXT: Ctx ... | IOThreads,
MaxSockets,
MaxMsgSize,
SocketLimit,
IPV6,
Blocky,
}
impl From<RawCtxOption> for c_int {
fn from(r: RawCtxOption) -> c_int {
match r {
RawCtxOption::IOThreads => sys::ZMQ_IO_THREADS as c_int,
RawCtxOption::MaxSockets => sys::ZMQ_MAX_SOCKETS as c_in... | }
#[derive(Copy, Clone, Debug)]
enum RawCtxOption { | random_line_split |
ctx.rs | //! The ØMQ context type.
use crate::{auth::server::AuthServer, error::*};
use libzmq_sys as sys;
use sys::errno;
use lazy_static::lazy_static;
use serde::{Deserialize, Serialize};
use std::{
os::raw::{c_int, c_void},
ptr, str,
sync::Arc,
thread,
};
lazy_static! {
static ref GLOBAL_CONTEXT: Ctx ... | &mut self, value: i32) -> &mut Self {
self.inner.set_io_threads(Some(value));
self
}
/// See [`set_max_msg_size`].
///
/// [`set_max_msg_size`]: struct.Ctx.html#method.set_max_msg_size
pub fn max_msg_size(&mut self, value: i32) -> &mut Self {
self.inner.set_max_msg_size(Some... | o_threads( | identifier_name |
common_domain_analyser.py | '''
Verify the domain against the list of most popular domains from OpenDNS
(https://github.com/opendns/public-domain-lists). Let's see how useful
it is to prevent phishing domains.
'''
from enum import Enum
import re
import tldextract
import wordsegment
from nostril import nonsense
import idna
from confusable_homogly... |
return record
class DomainMatchingOption(Enum):
'''
Control how strict we want to do our matching.
'''
# For example applefake.it will match with apple.com case ['apple'] is
# a subset of ['apple', 'fake']
SUBSET_MATCH = 0
# Similar but use in instead of issubset so that the ord... | record['analysers'].append({
'analyser': type(self).__name__,
'output': results,
}) | conditional_block |
common_domain_analyser.py | '''
Verify the domain against the list of most popular domains from OpenDNS
(https://github.com/opendns/public-domain-lists). Let's see how useful
it is to prevent phishing domains.
'''
from enum import Enum
import re
import tldextract
import wordsegment
from nostril import nonsense
import idna
from confusable_homogly... | '''
Generate features to detect outliers in the stream. In our case, the outliers is
the 'suspicious' phishing domains.
'''
NOSTRIL_LENGTH_LIMIT = 6
# pylint: disable=invalid-name
def run(self, record):
'''
The list of features will be:
- The number of domain parts, for ... | identifier_body | |
common_domain_analyser.py | '''
Verify the domain against the list of most popular domains from OpenDNS
(https://github.com/opendns/public-domain-lists). Let's see how useful
it is to prevent phishing domains.
'''
from enum import Enum
import re
import tldextract
import wordsegment
from nostril import nonsense
import idna
from confusable_homogly... | (Enum):
'''
Control how strict we want to do our matching.
'''
# For example applefake.it will match with apple.com case ['apple'] is
# a subset of ['apple', 'fake']
SUBSET_MATCH = 0
# Similar but use in instead of issubset so that the order is preserved
ORDER_MATCH = 1
class DomainMa... | DomainMatchingOption | identifier_name |
common_domain_analyser.py | '''
Verify the domain against the list of most popular domains from OpenDNS
(https://github.com/opendns/public-domain-lists). Let's see how useful
it is to prevent phishing domains.
'''
from enum import Enum
import re
import tldextract
import wordsegment
from nostril import nonsense
import idna
from confusable_homogly... | if index == len(alt_characters):
yield current
else:
for alt_c in alt_characters[index]:
yield from self._generate_alternatives(alt_characters,
index + 1,
... | def _generate_alternatives(self, alt_characters, index=0, current=''):
'''
Generate all alternative ASCII names of a domain using the list of all
alternative characters.
''' | random_line_split |
server.go | // Copyright (c) 2014, Markover Inc.
// Use of this source code is governed by the MIT
// license that can be found in the LICENSE file.
// Source code and contact info at http://github.com/poptip/ftc
package ftc
import (
"encoding/json"
"expvar"
"fmt"
"io"
"net/http"
"strings"
"time"
"code.google.com/p/go.ne... | s.Handler != nil {
go s.Handler(c.pubConn)
}
}
// ServeHTTP implements the http.Handler interface for an FTC Server.
func (s *server) ServeHTTP(w http.ResponseWriter, r *http.Request) {
remoteAddr := r.Header.Get("X-Forwarded-For")
if len(remoteAddr) == 0 {
remoteAddr = r.RemoteAddr
}
glog.Infof("%s (%s) %s ... | glog.Errorf("could not encode open payload: %v", err)
return
}
if | conditional_block |
server.go | // Copyright (c) 2014, Markover Inc.
// Use of this source code is governed by the MIT
// license that can be found in the LICENSE file.
// Source code and contact info at http://github.com/poptip/ftc
package ftc
import (
"encoding/json"
"expvar"
"fmt"
"io"
"net/http"
"strings"
"time"
"code.google.com/p/go.ne... | handshakeData returns the JSON encoded data needed
// for the initial connection handshake.
func handshakeData(c *conn) ([]byte, error) {
return json.Marshal(map[string]interface{}{
"pingInterval": 25000,
"pingTimeout": 60000,
"upgrades": getValidUpgrades(),
"sid": c.id,
})
}
// serverError se... | emoteAddr := r.Header.Get("X-Forwarded-For")
if len(remoteAddr) == 0 {
remoteAddr = r.RemoteAddr
}
glog.Infof("%s (%s) %s %s %s", r.Proto, r.Header.Get("X-Forwarded-Proto"), r.Method, remoteAddr, r.URL)
transport := r.FormValue(paramTransport)
if strings.HasPrefix(r.URL.Path, s.basePath) && !validTransports[tra... | identifier_body |
server.go | // Copyright (c) 2014, Markover Inc.
// Use of this source code is governed by the MIT
// license that can be found in the LICENSE file.
// Source code and contact info at http://github.com/poptip/ftc
package ftc
import (
"encoding/json"
"expvar"
"fmt"
"io"
"net/http"
"strings"
"time"
"code.google.com/p/go.ne... | // options and handler. If nil options are passed, the defaults
// specified in the constants above are used instead.
func NewServer(o *Options, h Handler) *server {
opts := Options{}
if o != nil {
opts = *o
}
if len(opts.BasePath) == 0 {
opts.BasePath = defaultBasePath
}
if len(opts.CookieName) == 0 {
opts... | CookieName string
}
// NewServer allocates and returns a new server with the given | random_line_split |
server.go | // Copyright (c) 2014, Markover Inc.
// Use of this source code is governed by the MIT
// license that can be found in the LICENSE file.
// Source code and contact info at http://github.com/poptip/ftc
package ftc
import (
"encoding/json"
"expvar"
"fmt"
"io"
"net/http"
"strings"
"time"
"code.google.com/p/go.ne... | conn) ([]byte, error) {
return json.Marshal(map[string]interface{}{
"pingInterval": 25000,
"pingTimeout": 60000,
"upgrades": getValidUpgrades(),
"sid": c.id,
})
}
// serverError sends a JSON-encoded message to the given io.Writer
// with the given error code.
func serverError(w io.Writer, code ... | shakeData(c * | identifier_name |
symbolizer.go | // Copyright 2022-2023 The Parca Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to... | (key string, locations []*pb.Location) int {
locationsToSymbolize := 0
for _, loc := range locations {
if _, failedBefore := s.symbolizationFailed[key][loc.Address]; failedBefore {
continue
}
pcRange, found := s.pcRanges[key]
if !found {
locationsToSymbolize++
continue
}
if pcRange[0] <= loc.Addr... | countLocationsToSymbolize | identifier_name |
symbolizer.go | // Copyright 2022-2023 The Parca Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to... | return nil, fmt.Errorf("failed to create Symtab liner: %w", err)
}
return lnr, nil
default:
return nil, ErrLinerFailed
}
}
// pcToLines returns the line number of the given PC while keeping the track of symbolization attempts and failures.
func (s *Symbolizer) pcToLines(liner liner, key string, addr uint64... | random_line_split | |
symbolizer.go | // Copyright 2022-2023 The Parca Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to... |
if pcRange[0] <= loc.Address && loc.Address <= pcRange[1] {
locationsToSymbolize++
}
}
return locationsToSymbolize
}
// newLiner creates a new liner for the given mapping and object file path.
func (s *Symbolizer) newLiner(filepath string, f *elf.File, quality *debuginfopb.DebuginfoQuality) (liner, error) {
... | {
locationsToSymbolize++
continue
} | conditional_block |
symbolizer.go | // Copyright 2022-2023 The Parca Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to... |
func WithDemangleMode(mode string) Option {
return func(s *Symbolizer) {
s.demangler = demangle.NewDemangler(mode, false)
}
}
type Symbolizer struct {
logger log.Logger
// attempts counts the total number of symbolication attempts.
// It counts per batch.
attempts prometheus.Counter
// errors counts the tot... | {
return func(s *Symbolizer) {
s.attemptThreshold = t
}
} | identifier_body |
app.js | 'use struct';
/* global moment */
var timesched = angular
.module('timesched', ['ui.bootstrap', 'ui.sortable', 'ui.slider'])
.config(function($locationProvider) {
$locationProvider.html5Mode(true);
});
(function() {
var SELECTABLES = [];
var SELECTABLES_BY_NAME = {};
var SELECTABLES_BY_KEY = {};
f... | }
};
function TimeZoneState(m, zone) {
this.tz = m.tz();
this.urlKey = zone.k;
this.offset = 0;
this.timezoneShortName = zone.n;
this.timezoneName = zone.d;
this.update();
}
TimeZoneState.prototype.update = function(day, homeZone) {
var reftz = homeZone ? homeZone.tz : this.tz;... | random_line_split | |
app.js | 'use struct';
/* global moment */
var timesched = angular
.module('timesched', ['ui.bootstrap', 'ui.sortable', 'ui.slider'])
.config(function($locationProvider) {
$locationProvider.html5Mode(true);
});
(function() {
var SELECTABLES = [];
var SELECTABLES_BY_NAME = {};
var SELECTABLES_BY_KEY = {};
f... |
function zoneExists(input) {
return !!SELECTABLES_BY_NAME[normalizeZoneName(input)];
}
function lookupTimeZoneState(input) {
var zone = SELECTABLES_BY_NAME[normalizeZoneName(input)];
if (!zone) {
zone = SELECTABLES_BY_KEY[input];
if (!zone)
return null;
}
var m;
try ... | {
return zoneName.toLowerCase().replace(/^\s+|\s+$/g, '');
} | identifier_body |
app.js | 'use struct';
/* global moment */
var timesched = angular
.module('timesched', ['ui.bootstrap', 'ui.sortable', 'ui.slider'])
.config(function($locationProvider) {
$locationProvider.html5Mode(true);
});
(function() {
var SELECTABLES = [];
var SELECTABLES_BY_NAME = {};
var SELECTABLES_BY_KEY = {};
f... |
elm.typeahead('setQuery', newVal || '');
}, true);
scope.$on('$destroy', function() {
elm.typeahead('destroy');
});
}
};
});
})();
| {
localChange = false;
return;
} | conditional_block |
app.js | 'use struct';
/* global moment */
var timesched = angular
.module('timesched', ['ui.bootstrap', 'ui.sortable', 'ui.slider'])
.config(function($locationProvider) {
$locationProvider.html5Mode(true);
});
(function() {
var SELECTABLES = [];
var SELECTABLES_BY_NAME = {};
var SELECTABLES_BY_KEY = {};
f... | (zoneName) {
return zoneName.toLowerCase().replace(/^\s+|\s+$/g, '');
}
function zoneExists(input) {
return !!SELECTABLES_BY_NAME[normalizeZoneName(input)];
}
function lookupTimeZoneState(input) {
var zone = SELECTABLES_BY_NAME[normalizeZoneName(input)];
if (!zone) {
zone = SELECTABLES_B... | normalizeZoneName | identifier_name |
add_legacy_redirects.py | import fileinput
import re
import json
import os
from collections import defaultdict
from pathlib import Path
# This script is still very much a work in progress.
# It does a pretty good job matching "new" style urls using a combination of
# scraped Docs 1.0 site data, and the legacy_redirects_metadata.json file.
# "O... | headings.append(
normalize_title(heading_re.sub('', line))
)
mdx_file.close()
return headings
def normalize_title(title):
title = re.sub(r'^\d*\.?\d*\.?\d*\.?\d*\s', '', title.strip())
title = re.sub(r'[\u2000-\u206F\u2E00-\u2E7F\\\'\-!"#$%&()*+,./:;<=>?@[\]^`{|}~’]', '', title)
title = t... | heading_re = re.compile(r'^#+ ')
mdx_file = open(filepath)
for line in mdx_file:
if heading_re.match(line): | random_line_split |
add_legacy_redirects.py | import fileinput
import re
import json
import os
from collections import defaultdict
from pathlib import Path
# This script is still very much a work in progress.
# It does a pretty good job matching "new" style urls using a combination of
# scraped Docs 1.0 site data, and the legacy_redirects_metadata.json file.
# "O... | determine_root_mdx_file(docs_path, mdx_folder = None):
root_path = docs_path
if mdx_folder:
root_path += '/{}'.format(mdx_folder)
index_path = root_path + '/index.mdx'
if not os.path.exists(index_path):
return None
return index_path
def print_report(report_dict):
for key in report_dict.keys():
... | title = re.sub(r'^\d*\.?\d*\.?\d*\.?\d*\s', '', title.strip())
title = re.sub(r'[\u2000-\u206F\u2E00-\u2E7F\\\'\-!"#$%&()*+,./:;<=>?@[\]^`{|}~’]', '', title)
title = title.lower().replace(' ', '').replace('*', '').replace('_', '').replace("\\", '').replace('™','').replace('®','')
return title
def | identifier_body |
add_legacy_redirects.py | import fileinput
import re
import json
import os
from collections import defaultdict
from pathlib import Path
# This script is still very much a work in progress.
# It does a pretty good job matching "new" style urls using a combination of
# scraped Docs 1.0 site data, and the legacy_redirects_metadata.json file.
# "O... | (output):
written = 0
for filepath in Path('product_docs/docs').rglob('*.mdx'):
redirects = output[str(filepath)]
in_frontmatter = False
injected_redirects = False
in_existing_redirect_section = False
for line in fileinput.input(files=[filepath], inplace=1):
if not injected_redirects and ... | write_redirects_to_mdx_files | identifier_name |
add_legacy_redirects.py | import fileinput
import re
import json
import os
from collections import defaultdict
from pathlib import Path
# This script is still very much a work in progress.
# It does a pretty good job matching "new" style urls using a combination of
# scraped Docs 1.0 site data, and the legacy_redirects_metadata.json file.
# "O... | int("\n{0}================ Report ================{1}".format(ANSI_BLUE, ANSI_STOP))
print("\n{0}-- No Metadata Configured (Not Processed) --{1}".format(ANSI_YELLOW, ANSI_STOP))
print_report(no_metadata)
print("\n{0}-- Version Missing (Not Processed) --{1}".format(ANSI_YELLOW, ANSI_STOP))
print_report(version_missing)
... | ct_data = legacy_urls_by_product_version[product]
for version in product_data.keys():
product_version_data = product_data[version]
effective_version = version
if product in equivalent_versions and version in equivalent_versions.get(product):
effective_version = equivalent_versions.get(product).get(v... | conditional_block |
Pipeline_for_videos.py | # -*- coding: utf-8 -*-
"""
Created on Wed Nov 11 09:43:43 2020
@author: Admin
"""
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import numpy as np
import cv2
import pickle
from moviepy.editor import *
fin=[]
out = np.arange(0,250)/250
#print(out.shape)
out1= np.ones(100)
#print(... |
cap.release()
result1.release()
cv2.destroyAllWindows() | count+=1
ret, image = cap.read()
dist_pickle = pickle.load(open('./camera_cal/matrix.p','rb'))
dst = dist_pickle["dist"]
mtx = dist_pickle["mtx"]
if ret:
ksize = 3
img_undist = cv2.undistort(image,mtx,dst,None,mtx)
final_img = np.copy(img_undist)
... | conditional_block |
Pipeline_for_videos.py | # -*- coding: utf-8 -*-
"""
Created on Wed Nov 11 09:43:43 2020
@author: Admin
"""
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import numpy as np
import cv2
import pickle
from moviepy.editor import *
fin=[]
out = np.arange(0,250)/250
#print(out.shape)
out1= np.ones(100)
#print(... | #plt.show()
clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(20,20))
image_yuv[:,:,0] = clahe.apply(image_yuv[:,:,0])
img_output = cv2.cvtColor(image_yuv, cv2.COLOR_YUV2BGR)
return img_output
def yuv_select_lumin(image,thresh=(0,255)):
yuv_img = cv2.cvtColor(image,cv2.COLOR_BGR2YUV)
... | random_line_split | |
Pipeline_for_videos.py | # -*- coding: utf-8 -*-
"""
Created on Wed Nov 11 09:43:43 2020
@author: Admin
"""
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import numpy as np
import cv2
import pickle
from moviepy.editor import *
fin=[]
out = np.arange(0,250)/250
#print(out.shape)
out1= np.ones(100)
#print(... |
def dir_threshold(image, sobel_kernel=3, thresh=(0, np.pi/2)):
# Calculate gradient direction
# Apply threshold
gray_img = cv2.cvtColor(image,cv2.COLOR_BGR2GRAY)
sobelx = cv2.Sobel(gray_img,cv2.CV_64F,1,0,ksize=sobel_kernel)
sobely = cv2.Sobel(gray_img,cv2.CV_64F,0,1,ksize=sobel_kernel)
... | gray_img = cv2.cvtColor(image,cv2.COLOR_BGR2GRAY)
sobelx = cv2.Sobel(gray_img,cv2.CV_64F,1,0,ksize=sobel_kernel)
sobely = cv2.Sobel(gray_img,cv2.CV_64F,0,1,ksize=sobel_kernel)
mag_sobel = np.sqrt((sobelx)**2 + (sobely)**2)
absolute = np.absolute(mag_sobel)
scaled = np.uint8(255*absolute/np.max(... | identifier_body |
Pipeline_for_videos.py | # -*- coding: utf-8 -*-
"""
Created on Wed Nov 11 09:43:43 2020
@author: Admin
"""
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import numpy as np
import cv2
import pickle
from moviepy.editor import *
fin=[]
out = np.arange(0,250)/250
#print(out.shape)
out1= np.ones(100)
#print(... | (image, sobel_kernel=3, mag_thresh=(0, 255)):
# Calculate gradient magnitude
# Apply threshold
gray_img = cv2.cvtColor(image,cv2.COLOR_BGR2GRAY)
sobelx = cv2.Sobel(gray_img,cv2.CV_64F,1,0,ksize=sobel_kernel)
sobely = cv2.Sobel(gray_img,cv2.CV_64F,0,1,ksize=sobel_kernel)
mag_sobel = np.sqrt... | mag_thresh | identifier_name |
decomposition_utils.py | # Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES
#
# SPDX-License-Identifier: BSD-3-Clause
"""
A collection of utility functions for decomposition.
"""
import logging
import numpy
from . import einsum_parser
from . import formatters
from . import tensor_wrapper
from . import typemaps
from . import utils
from .... | Parse decomposition expression in string format, retaining ellipses if present.
"""
input_modes, *output_modes = subscripts.split("->")
if not output_modes:
raise ValueError("Output modes must be explicitly specified for decomposition")
if len(output_modes) > 1:
raise ValueError("sub... | """ | random_line_split |
decomposition_utils.py | # Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES
#
# SPDX-License-Identifier: BSD-3-Clause
"""
A collection of utility functions for decomposition.
"""
import logging
import numpy
from . import einsum_parser
from . import formatters
from . import tensor_wrapper
from . import typemaps
from . import utils
from .... | (handle, wrapped_operands, size_dict, inputs, outputs, mid_extent, method, device_id, stream_ctx, logger):
"""
Create empty tensor operands and corresponding tensor descriptors for a decomposition problem.
"""
# Create input tensor descriptors, output operands and output tensor descriptors
output_cl... | create_operands_and_descriptors | identifier_name |
decomposition_utils.py | # Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES
#
# SPDX-License-Identifier: BSD-3-Clause
"""
A collection of utility functions for decomposition.
"""
import logging
import numpy
from . import einsum_parser
from . import formatters
from . import tensor_wrapper
from . import typemaps
from . import utils
from .... |
def get_svd_info_dict(handle, svd_info):
"""
Parse the information in SVDInfo in a dictionary object.
"""
info = dict()
for key, attr in SVD_INFO_MAP.items():
info[key] = get_svd_config_info_scalar_attr(handle, 'info', svd_info, attr).item()
svd_algorithm = info['algorithm']
algo_s... | """
Given an SVDMethod object, set the corresponding attributes in the SVDConfig.
"""
svd_algorithm = None
for method_attr, attr in SVD_METHOD_CONFIG_MAP.items():
data = getattr(svd_method, method_attr)
if method_attr == 'partition':
data = PARTITION_MAP[data]
elif me... | identifier_body |
decomposition_utils.py | # Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES
#
# SPDX-License-Identifier: BSD-3-Clause
"""
A collection of utility functions for decomposition.
"""
import logging
import numpy
from . import einsum_parser
from . import formatters
from . import tensor_wrapper
from . import typemaps
from . import utils
from .... |
n = num_implicit_modes.pop()
ellipses_modes = tuple(range(label_end-n, label_end))
for i, _modes in enumerate(all_modes):
if Ellipsis not in _modes:
continue
s = _modes.index(Ellipsis)
all_modes[i] = _modes[:s] + ellipses_modes + ... | raise ValueError(f"Ellipsis for all operands must refer to equal number of modes, found {num_implicit_modes}") | conditional_block |
main.rs | #![feature(phase)]
#![feature(globs)]
#[phase(plugin)]
extern crate gfx_macros;
extern crate current;
extern crate shader_version;
extern crate vecmath;
extern crate event;
extern crate input;
extern crate cam;
extern crate gfx;
extern crate device;
extern crate sdl2;
extern crate sdl2_window;
extern crate time;
exte... | // find the textures used by this model from the list of materials
for mat in ai_scene.get_materials().iter() {
let texture_src = mat.get_texture(ai::material::TextureType::Diffuse,
0
);
ma... | // Create the buffer for the bone transformations. We fill this
// up each time we draw, so no need to do it here.
let u_bone_transformations: gfx::BufferHandle<Mat4> =
graphics.device.create_buffer(MAX_BONES, gfx::BufferUsage::Dynamic);
| random_line_split |
main.rs | #![feature(phase)]
#![feature(globs)]
#[phase(plugin)]
extern crate gfx_macros;
extern crate current;
extern crate shader_version;
extern crate vecmath;
extern crate event;
extern crate input;
extern crate cam;
extern crate gfx;
extern crate device;
extern crate sdl2;
extern crate sdl2_window;
extern crate time;
exte... | <'a> {
pub vertices: Vec<Vertex>,
pub indices: Vec<u32>,
pub batches: Vec<ModelComponent>,
pub scene: ai::Scene<'a>,
pub bone_map: RefCell<BoneMap>,
pub global_inverse: ai::Matrix4x4,
pub bone_transform_buffer: gfx::BufferHandle<Mat4>,
}
#[inline(always)]
fn lerp<S, T: Add<T,T> + Sub<T,T> +... | Model | identifier_name |
bot.js | const Discord = require('discord.js');
const client = new Discord.Client();
var prefix = "$";
client.on('ready', () => {
console.log(`Logged in as ${client.user.tag}!`);
client.user.setGame(`The Shadow for ever`,"http://twitch.tv/S-F")
console.log('')
console.log('')
console.log('╔[════════════════════════════... | nd('name', 'Muted');
if (!muteRole) return message.reply(" I Can’t Find 'Muted' Role ").catch(console.error).then(message => message.delete(4000))
if (message.mentions.users.size < 1) return message.reply(' Error : ``Mention a User``').catch(console.error).then(message => message.delete(4000))
if (!message.guild.... | ;
Save New Duplicate & Edit Just Text Twitter
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
const ms = require("ms");
client.on("message", message => {
if(!message.channel.guild) return;
if (message... | conditional_block |
bot.js | const Discord = require('discord.js');
const client = new Discord.Client();
var prefix = "$";
client.on('ready', () => {
console.log(`Logged in as ${client.user.tag}!`);
client.user.setGame(`The Shadow for ever`,"http://twitch.tv/S-F")
console.log('')
console.log('')
console.log('╔[════════════════════════════... |
var guilds = {};
client.on('guildBanAdd', function(guild) {
const rebellog = client.channels.find("name", "log"),
Onumber = 10,
Otime = 10000
guild.fetchAuditLogs({
type: 22
}).then(audit => {
let banner = audit.entries.map(banner => banner.executor.id)
let bans = guilds[guild.i... | random_line_split | |
menus.component.ts | import { Component, OnInit } from '@angular/core';
import { CoreService } from 'src/app/services/core.service';
import { Platform, AlertController, PickerController } from '@ionic/angular';
import { SocialSharing } from '@ionic-native/social-sharing/ngx';
import { TranslateService } from '@ngx-translate/core';
import {... | {
if(this.speakingStart == false)
{
this.alertCtrl.create({
message:'<h1><ion-icon name="mic-outline" class="mix-size pulse-ring1" size="large"></ion-icon></h1> <p><h6>Kattints az OK gombra és beszélj.<h6></p>',
buttons:[
{
text:"OK",
handler:()=>{
this.speech.hasPermission(... | Permission()
| identifier_name |
menus.component.ts | import { Component, OnInit } from '@angular/core';
import { CoreService } from 'src/app/services/core.service';
import { Platform, AlertController, PickerController } from '@ionic/angular';
import { SocialSharing } from '@ionic-native/social-sharing/ngx';
import { TranslateService } from '@ngx-translate/core';
import {... | else{
this.speech.requestPermission().then(()=>{
this.openSpeech();
})
}
},(err)=>{
this.speech.requestPermission().then(()=>{
this.openSpeech();
})
})
}
},
... | this.openSpeech();
}
| conditional_block |
menus.component.ts | import { Component, OnInit } from '@angular/core';
import { CoreService } from 'src/app/services/core.service';
import { Platform, AlertController, PickerController } from '@ionic/angular';
import { SocialSharing } from '@ionic-native/social-sharing/ngx';
import { TranslateService } from '@ngx-translate/core';
import {... |
// reports() {
// this.router.navigateByUrl('../reports/')
// }
// *********************************************************
// Voice search - No City found
// *********************************************************
ifNoResFound(){
this.alertCtrl.create({
message:"<h6>Nincs találat.</h6>... | {
this.iab.create( url, '_system' );
} | identifier_body |
menus.component.ts | import { Component, OnInit } from '@angular/core';
import { CoreService } from 'src/app/services/core.service';
import { Platform, AlertController, PickerController } from '@ionic/angular';
import { SocialSharing } from '@ionic-native/social-sharing/ngx';
import { TranslateService } from '@ngx-translate/core';
import {... | });
}
// Update template
async updateTemplates() {
if (!Array.isArray(this.trans['templates']['options'])) return;
let buttons = [];
// when click templates
this.trans['templates']['options'].forEach(option => {
let button = {
text: option['text'],
cssClass: option['_val... | this.refresh();
}) ;
},(err)=>{
//alert("refresh call 3");
this.refresh(); | random_line_split |
template_model.py | from astromodels.functions.function import Function1D, FunctionMeta
from astromodels.utils.configuration import get_user_data_path
import collections
from astromodels.parameter import Parameter
import numpy as np
import pandas as pd
from pandas import HDFStore
import scipy.interpolate
import os
import re
import warnin... |
# This function will be substituted during construction by another version with
# all the parameters of this template
def evaluate(self, x, K, scale):
# This is overridden in the constructor
raise NotImplementedError("Should not get here!")
def _interpolate(self, energies, scale, p... | self.K.unit = y_unit
self.scale.unit = 1 / x_unit | identifier_body |
template_model.py | from astromodels.functions.function import Function1D, FunctionMeta
from astromodels.utils.configuration import get_user_data_path
import collections
from astromodels.parameter import Parameter
import numpy as np
import pandas as pd
from pandas import HDFStore
import scipy.interpolate
import os
import re
import warnin... | (self, method, name=None):
if name is None:
name = method.func_name
setattr(self.__class__, name, method)
class RectBivariateSplineWrapper(object):
"""
Wrapper around RectBivariateSpline, which supplies a __call__ method which accept the same
syntax as the other interpolation methods
... | add_method | identifier_name |
template_model.py | from astromodels.functions.function import Function1D, FunctionMeta
from astromodels.utils.configuration import get_user_data_path
import collections
from astromodels.parameter import Parameter
import numpy as np
import pandas as pd
from pandas import HDFStore
import scipy.interpolate
import os
import re
import warnin... |
else:
raise IOError("The file %s already exists! You cannot call two different "
"template models with the same name" % filename_sanitized)
# Open the HDF5 file and write objects
with HDFStore(filename_sanitized) as store:
# The... | try:
os.remove(filename_sanitized)
except:
raise IOError("The file %s already exists and cannot be removed (maybe you do not have "
"permissions to do so?). " % filename_sanitized) | conditional_block |
template_model.py | from astromodels.functions.function import Function1D, FunctionMeta
from astromodels.utils.configuration import get_user_data_path
import collections
from astromodels.parameter import Parameter
import numpy as np
import pandas as pd
from pandas import HDFStore
import scipy.interpolate
import os
import re
import warnin... | function_definition['description'] = description
function_definition['latex'] = 'n.a.'
# Now build the parameters according to the content of the parameter grid
parameters = collections.OrderedDict()
parameters['K'] = Parameter('K', 1.0)
parameters['scale'] = Paramete... | # Make the dictionary of parameters
function_definition = collections.OrderedDict()
| random_line_split |
index.js | /**
* Created by bjwsl-001 on 2016/11/9.
*/
var app=angular.module("NBA",['ng','ngRoute']);
//设置post请求的响应头部
app.run(function($http){
$http.defaults.headers.post={"Content-Type":"application/x-www-form-urlencoded"};
})
//设置根控制器
app.controller("rootCtrl",["$scope","$rootScope","$location","$routeParams","$http",funct... | "$rootScope","$routeParams","$http",function($scope,$rootScope,$routeParams,$http){
$rootScope.isPageShow=innerWidth>450?true:false;
$rootScope.searchMsg={};
$rootScope.searchMsg.pclass=$routeParams.id;
$rootScope.num=[];
$rootScope.proList=[];
$rootScope.loadMore(1,"data/5_showProductByPclass.php");
$sco... | }else{
alert("添加失败")
}
})
}
}else{
//TODO 弹出提示框,提醒用户登录
alert("您还未登录,请登录后在使用此功能")
}
}
}]);
app.controller("mallLotteryCtrl",["$scope",function($scope){
}]);
app.controller("mallSearchCtrl",["$scope","$rootScope","$routeParams","$http",function($scope,$rootSc... | conditional_block |
index.js | /**
* Created by bjwsl-001 on 2016/11/9.
*/
var app=angular.module("NBA",['ng','ngRoute']);
//设置post请求的响应头部
app.run(function($http){
$http.defaults.headers.post={"Content-Type":"application/x-www-form-urlencoded"};
})
//设置根控制器
app.controller("rootCtrl",["$scope","$rootScope","$location","$routeParams","$http",funct... | identifier_body | ||
index.js | /**
* Created by bjwsl-001 on 2016/11/9.
*/
var app=angular.module("NBA",['ng','ngRoute']);
//设置post请求的响应头部
app.run(function($http){
$http.defaults.headers.post={"Content-Type":"application/x-www-form-urlencoded"};
})
//设置根控制器
app.controller("rootCtrl",["$scope","$rootScope","$location","$routeParams","$http",funct... | identifier_name | ||
index.js | /**
* Created by bjwsl-001 on 2016/11/9.
*/
var app=angular.module("NBA",['ng','ngRoute']);
//设置post请求的响应头部
app.run(function($http){
$http.defaults.headers.post={"Content-Type":"application/x-www-form-urlencoded"};
})
//设置根控制器
app.controller("rootCtrl",["$scope","$rootScope","$location","$routeParams","$http",funct... | //转换日期格式
$scope.changeTime=function(date){
var year=date.getFullYear();
var mouth=date.getMonth();
mouth=mouth<10?("0"+mouth):mouth;
var day=date.getDate();
day=day<10?("0"+day):day;
var hour=date.getHours();
hour=hour<10?("0"+hour):hour;
var minues=date.getMinute... | $scope.orderList[i].orderTime=$scope.changeTime(date);
var status=$scope.orderList[i].status;
$scope.orderList[i].status=$scope.judgeStatus(status);
}
}) | random_line_split |
checkData.py | #!/usr/bin/python
# -*- coding:utf-8 -*-
import s3PyCmd
import hashlib
import random
import logging
import re
import time
import os
import threading
logFile = 'log/checkData.log'
if not os.path.exists('log'): os.mkdir('log')
if os.path.exists(logFile) and os.path.getsize(logFile) > 104857600: os.remove(logFile)
loggi... | tMarkerFromXML(xmlBody, markerStr):
marker = re.findall('<' + markerStr + '>(.+?)</' + markerStr + '>', xmlBody)
if marker and marker[0]:
logging.info('get marker in response %s' %marker[0])
return marker[0]
else:
logging.info('get no marker in response')
return No... | e.findall('<Key>(.+?)</Key>', xmlBody)
versions = re.findall('<VersionId>(.+?)</VersionId>', xmlBody)
for i in range(len(versions)):
if versions[i] == 'null': versions[i]=None
if len(versions)>0 and len(versions) != len(keys):
logging.error('response error, versions != keys %s' %xmlBody)
... | identifier_body |
checkData.py | #!/usr/bin/python
# -*- coding:utf-8 -*-
import s3PyCmd
import hashlib
import random
import logging
import re
import time
import os
import threading
logFile = 'log/checkData.log'
if not os.path.exists('log'): os.mkdir('log')
if os.path.exists(logFile) and os.path.getsize(logFile) > 104857600: os.remove(logFile)
loggi... | , markerStr):
marker = re.findall('<' + markerStr + '>(.+?)</' + markerStr + '>', xmlBody)
if marker and marker[0]:
logging.info('get marker in response %s' %marker[0])
return marker[0]
else:
logging.info('get no marker in response')
return None
... | rFromXML(xmlBody | identifier_name |
checkData.py | #!/usr/bin/python
# -*- coding:utf-8 -*-
import s3PyCmd
import hashlib
import random
import logging
import re
import time
import os
import threading
logFile = 'log/checkData.log'
if not os.path.exists('log'): os.mkdir('log')
if os.path.exists(logFile) and os.path.getsize(logFile) > 104857600: os.remove(logFile)
loggi... | if not len(versions): versions = [None for i in range(len(keys))]
return zip(keys,versions)
def getMarkerFromXML(xmlBody, markerStr):
marker = re.findall('<' + markerStr + '>(.+?)</' + markerStr + '>', xmlBody)
if marker and marker[0]:
logging.info('get marker in response %s' %marker[0]... | if len(versions)>0 and len(versions) != len(keys):
logging.error('response error, versions != keys %s' %xmlBody)
return [] | random_line_split |
checkData.py | #!/usr/bin/python
# -*- coding:utf-8 -*-
import s3PyCmd
import hashlib
import random
import logging
import re
import time
import os
import threading
logFile = 'log/checkData.log'
if not os.path.exists('log'): os.mkdir('log')
if os.path.exists(logFile) and os.path.getsize(logFile) > 104857600: os.remove(logFile)
loggi... | #记日志、重定向(<400:debug; >=400,<500: warn; >=500:error)
if httpResponse.status < 400:
logging.debug('Request:[%s], conn: [%s->%s], URL:[%s], waitResponseTime:[%.3f], responseStatus:[%s], %r, %r' \
%(s3Request.requestType, localAddr, peerAddr,s3Request.url, waitResponseTime, sta... | son
| conditional_block |
create.go | package statusresource
import (
"context"
"fmt"
"reflect"
"time"
providerv1alpha1 "github.com/giantswarm/apiextensions/v6/pkg/apis/provider/v1alpha1"
"github.com/giantswarm/backoff"
"github.com/giantswarm/errors/tenant"
"github.com/giantswarm/k8sclient/v7/pkg/k8sclient"
"github.com/giantswarm/microerror"
"g... |
func (r *Resource) computeCreateEventPatches(ctx context.Context, obj interface{}) ([]Patch, error) {
clusterStatus, err := r.clusterStatusFunc(obj)
if err != nil {
return nil, microerror.Mask(err)
}
currentVersion := clusterStatus.LatestVersion()
desiredVersion, err := r.versionBundleVersionFunc(obj)
if err... | {
r.logger.LogCtx(ctx, "level", "debug", "message", "patching CR status")
// We process the status updates within its own backoff here to gurantee its
// execution independent of any eventual retries via the retry resource. It
// might happen that the reconciled object is not the latest version so any
// patch wo... | identifier_body |
create.go | package statusresource
import (
"context"
"fmt"
"reflect"
"time"
providerv1alpha1 "github.com/giantswarm/apiextensions/v6/pkg/apis/provider/v1alpha1"
"github.com/giantswarm/backoff"
"github.com/giantswarm/errors/tenant"
"github.com/giantswarm/k8sclient/v7/pkg/k8sclient"
"github.com/giantswarm/microerror"
"g... | else if err != nil {
return microerror.Mask(err)
}
if len(patches) > 0 {
err := r.applyPatches(ctx, newAccessor, patches)
if err != nil {
return microerror.Mask(err)
}
modified = true
}
return nil
}
b := r.backOffFactory()
n := func(err error, d time.Duration) {
r.logge... | {
r.logger.LogCtx(ctx, "level", "debug", "message", "tenant cluster is not available")
r.logger.LogCtx(ctx, "level", "debug", "message", "canceling resource")
return nil
} | conditional_block |
create.go | package statusresource
import (
"context"
"fmt"
"reflect"
"time"
providerv1alpha1 "github.com/giantswarm/apiextensions/v6/pkg/apis/provider/v1alpha1"
"github.com/giantswarm/backoff"
"github.com/giantswarm/errors/tenant"
"github.com/giantswarm/k8sclient/v7/pkg/k8sclient"
"github.com/giantswarm/microerror"
"g... | }
// Update the node status based on what the tenant cluster API tells us.
//
// TODO this is a workaround until we can read the node status information
// from the NodeConfig CR status. This is not possible right now because the
// NodeConfig CRs are still used for draining by older tenant clusters.
{
var k8... | random_line_split | |
create.go | package statusresource
import (
"context"
"fmt"
"reflect"
"time"
providerv1alpha1 "github.com/giantswarm/apiextensions/v6/pkg/apis/provider/v1alpha1"
"github.com/giantswarm/backoff"
"github.com/giantswarm/errors/tenant"
"github.com/giantswarm/k8sclient/v7/pkg/k8sclient"
"github.com/giantswarm/microerror"
"g... | (ctx context.Context, obj interface{}) error {
r.logger.LogCtx(ctx, "level", "debug", "message", "patching CR status")
// We process the status updates within its own backoff here to gurantee its
// execution independent of any eventual retries via the retry resource. It
// might happen that the reconciled object ... | EnsureCreated | identifier_name |
kmeans_to_classifier_main.py | from data_treatment import load_data_yf,data_clean,seperate_label,data_seperate,load_data_new,data_transform_new,plot_eda,data_clean,feature_extend
from models import rf_mdoel,gbdt_mdoel,xgb_model,cat_boost_model,lgb_model,get_stacking
from sklearn.metrics import make_scorer
from sklearn.ensemble import RandomForestCl... |
for i in range(5):
tpo = TPOTClassifier(generations=10, verbosity=2, population_size=150,
scoring='f1', n_jobs=-1, config_dict=tpot_config,
mutation_rate=0.8, crossover_rate=0.2)
x_train_x = np.array(x_train[x_train["chunk_label"] == i].dr... | random_line_split | |
kmeans_to_classifier_main.py | from data_treatment import load_data_yf,data_clean,seperate_label,data_seperate,load_data_new,data_transform_new,plot_eda,data_clean,feature_extend
from models import rf_mdoel,gbdt_mdoel,xgb_model,cat_boost_model,lgb_model,get_stacking
from sklearn.metrics import make_scorer
from sklearn.ensemble import RandomForestCl... | =0.8, crossover_rate=0.2)
x_train_x = np.array(x_train[x_train["chunk_label"] == i].drop(["chunk_label", labels],
axis=1))
x_test_x = np.array(x_test[x_test["chunk_label"] == i].drop(["chunk_label", labels],
... | conditional_block | |
units.py | # units.py
# Unit classes/functions for hammer_vlsi.
#
# See LICENSE for licence details.
from abc import abstractmethod
import sys
try:
from abc import ABC # pylint: disable=ungrouped-imports
except ImportError:
if sys.version_info.major == 3 and sys.version_info.minor < 4:
# Python compatibility:... |
return self.value < other.value
def __le__(self: _TT, other: _TT) -> bool:
"""
Check if self is less than or equal to other.
The types must match.
"""
if type(self) != type(other):
raise TypeError("Types do not match")
return self.value <= other.... | raise TypeError("Types do not match") | conditional_block |
units.py | # units.py
# Unit classes/functions for hammer_vlsi.
#
# See LICENSE for licence details.
from abc import abstractmethod
import sys
try:
from abc import ABC # pylint: disable=ungrouped-imports
except ImportError:
if sys.version_info.major == 3 and sys.version_info.minor < 4:
# Python compatibility:... | (self: _TT, other: float) -> _TT:
"""
Multiply self by a float or an integer.
"""
return type(self)(str(self.value * other),"")
class TimeValue(ValueWithUnit):
"""Time value - e.g. "4 ns".
Parses time values from strings.
"""
@property
def default_prefix(self) -> s... | __mul__ | identifier_name |
units.py | # units.py
# Unit classes/functions for hammer_vlsi.
#
# See LICENSE for licence details.
from abc import abstractmethod
import sys
try:
from abc import ABC # pylint: disable=ungrouped-imports
except ImportError:
if sys.version_info.major == 3 and sys.version_info.minor < 4:
# Python compatibility:... | 'z': 1e-21, # zepto
'a': 1e-18, # atto
'f': 1e-15, # femto
'p': 1e-12, # pico
'n': 1e-9, # nano
'u': 1e-6, # micro
'm': 1e-3, # milli
'c': 1e-2, # centi
'd': 1e-1, # deci
'': 1, # <no prefix>
'k': 1e3, # kilo
'... | _prefix_table = {
'y': 1e-24, # yocto | random_line_split |
units.py | # units.py
# Unit classes/functions for hammer_vlsi.
#
# See LICENSE for licence details.
from abc import abstractmethod
import sys
try:
from abc import ABC # pylint: disable=ungrouped-imports
except ImportError:
if sys.version_info.major == 3 and sys.version_info.minor < 4:
# Python compatibility:... |
@property
@abstractmethod
def unit_type(self) -> str:
"""Get the base unit type for values. (e.g. for "s", this would be "time")
Meant to be overridden by subclasses."""
@property
@abstractmethod
def default_prefix(self) -> str:
"""Get the default prefix for values.
... | """Get the base unit for values (e.g. "s", "m", "V", etc).
Meant to be overridden by subclasses.""" | identifier_body |
attribute_context.pb.go | // Code generated by protoc-gen-go. DO NOT EDIT.
// source: envoy/service/auth/v2/attribute_context.proto
package envoy_service_auth_v2
import (
fmt "fmt"
core "github.com/cilium/proxy/go/envoy/api/v2/core"
proto "github.com/golang/protobuf/proto"
timestamp "github.com/golang/protobuf/ptypes/timestamp"
math "mat... | type AttributeContext_Peer struct {
// The address of the peer, this is typically the IP address.
// It can also be UDS path, or others.
Address *core.Address `protobuf:"bytes,1,opt,name=address,proto3" json:"address,omitempty"`
// The canonical service name of the peer.
// It should be set to :ref:`the HTTP x-env... | random_line_split | |
attribute_context.pb.go | // Code generated by protoc-gen-go. DO NOT EDIT.
// source: envoy/service/auth/v2/attribute_context.proto
package envoy_service_auth_v2
import (
fmt "fmt"
core "github.com/cilium/proxy/go/envoy/api/v2/core"
proto "github.com/golang/protobuf/proto"
timestamp "github.com/golang/protobuf/ptypes/timestamp"
math "mat... |
return ""
}
func (m *AttributeContext_HttpRequest) GetFragment() string {
if m != nil {
return m.Fragment
}
return ""
}
func (m *AttributeContext_HttpRequest) GetSize() int64 {
if m != nil {
return m.Size
}
return 0
}
func (m *AttributeContext_HttpRequest) GetProtocol() string {
if m != nil {
return m... | {
return m.Query
} | conditional_block |
attribute_context.pb.go | // Code generated by protoc-gen-go. DO NOT EDIT.
// source: envoy/service/auth/v2/attribute_context.proto
package envoy_service_auth_v2
import (
fmt "fmt"
core "github.com/cilium/proxy/go/envoy/api/v2/core"
proto "github.com/golang/protobuf/proto"
timestamp "github.com/golang/protobuf/ptypes/timestamp"
math "mat... | () *core.Metadata {
if m != nil {
return m.MetadataContext
}
return nil
}
// This message defines attributes for a node that handles a network request.
// The node can be either a service or an application that sends, forwards,
// or receives the request. Service peers should fill in the `service`,
// `principal`... | GetMetadataContext | identifier_name |
attribute_context.pb.go | // Code generated by protoc-gen-go. DO NOT EDIT.
// source: envoy/service/auth/v2/attribute_context.proto
package envoy_service_auth_v2
import (
fmt "fmt"
core "github.com/cilium/proxy/go/envoy/api/v2/core"
proto "github.com/golang/protobuf/proto"
timestamp "github.com/golang/protobuf/ptypes/timestamp"
math "mat... |
func (m *AttributeContext_HttpRequest) XXX_Merge(src proto.Message) {
xxx_messageInfo_AttributeContext_HttpRequest.Merge(m, src)
}
func (m *AttributeContext_HttpRequest) XXX_Size() int {
return xxx_messageInfo_AttributeContext_HttpRequest.Size(m)
}
func (m *AttributeContext_HttpRequest) XXX_DiscardUnknown() {
xxx_m... | {
return xxx_messageInfo_AttributeContext_HttpRequest.Marshal(b, m, deterministic)
} | identifier_body |
1602.object_view.js | /*
Structure: objectView
*/
$.ov = {
views:[],
objectMemberValue: function(x){/* {{{ */
var val;
switch(typeof x){
case 'boolean':
val = '<input type="checkbox" '+(x?'checked ':'')+'/>';
break;
case 'number':
val = '<pre style="color:blue">'+x+'</pre>';
break;
case 'string... | var objectPlace = $('div#ov',this);
if(objectPlace.size()==0){
var initHtml = '<div id="ov" style="margin-bottom:10px;"></div>';
var t = '';
var c = '';
for(var i in cs){
var cc = $.ov.classes[cs[i]];
if(!cc)continue;
t+='<li><a href="#"><span>'+(cc.collectionTitle || i)+'</span></a></li>';
... | var cs = $.ov.classes[className].collections;
| random_line_split |
1602.object_view.js | /*
Structure: objectView
*/
$.ov = {
views:[],
objectMemberValue: function(x){/* {{{ */
var val;
switch(typeof x){
case 'boolean':
val = '<input type="checkbox" '+(x?'checked ':'')+'/>';
break;
case 'number':
val = '<pre style="color:blue">'+x+'</pre>';
break;
case 'string... | ',\''+mem.className+'\'':'')+')"';
var is_levelup = false;
if(typeof x == 'object')for(var xxx in x){is_levelup = true; break;}
var expanded = is_levelup && (cm_in_array(exp,newPathStr) || x.__ov_expanded || mem.defaultExpanded);
r+='<tr level="'+level+'" expanded="'+(expanded?1:0)+'" '+(mem.classNa... | asses[mem.className].collection.value.apply(x);
}
}
if(typeof mem == 'string'){
label = mem;
}else if(typeof mem == 'object' && mem.label){
label = mem.label;
}
var levelup = 'class="likealink" onclick="$(this.parentNode).objectView(\''+newPathStr+'\''+(mem.className? | conditional_block |
dq_ingestion.go | /*
* Copyright 2019-2020 VMware, Inc.
* All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
* Unless required by applicable law... |
func GetNewObj(objType string) (k8sobjects.MetaObject, error) {
switch objType {
case gslbutils.RouteType:
return k8sobjects.RouteMeta{}, nil
case gslbutils.IngressType:
return k8sobjects.IngressHostMeta{}, nil
case gslbutils.SvcType:
return k8sobjects.SvcMeta{}, nil
default:
return nil, errors.New("unre... | {
var prevChecksum, newChecksum uint32
obj := getObjFromStore(objType, cname, ns, objName, key, gslbutils.AcceptedStore)
if obj == nil {
// error message already logged in the above function
return
}
metaObj := obj.(k8sobjects.MetaObject)
if metaObj.GetHostname() == "" {
gslbutils.Errf("key: %s, msg: %s", ... | identifier_body |
dq_ingestion.go | /*
* Copyright 2019-2020 VMware, Inc.
* All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
* Unless required by applicable law... | }
switch objectOperation {
case gslbutils.ObjectAdd:
AddUpdateObjOperation(key, cname, ns, objType, objName, sharedQueue, false, SharedAviGSGraphLister())
case gslbutils.ObjectDelete:
deleteObjOperation(key, cname, ns, objType, objName, sharedQueue)
case gslbutils.ObjectUpdate:
AddUpdateObjOperation(key, cna... | gslbutils.Warnf("key: %s, msg: %s", key, "not an acceptable object, can't process")
return | random_line_split |
dq_ingestion.go | /*
* Copyright 2019-2020 VMware, Inc.
* All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
* Unless required by applicable law... |
switch objectOperation {
case gslbutils.ObjectAdd:
AddUpdateObjOperation(key, cname, ns, objType, objName, sharedQueue, false, SharedAviGSGraphLister())
case gslbutils.ObjectDelete:
deleteObjOperation(key, cname, ns, objType, objName, sharedQueue)
case gslbutils.ObjectUpdate:
AddUpdateObjOperation(key, cname... | {
gslbutils.Warnf("key: %s, msg: %s", key, "not an acceptable object, can't process")
return
} | conditional_block |
dq_ingestion.go | /*
* Copyright 2019-2020 VMware, Inc.
* All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
* Unless required by applicable law... | (objType, cname, ns, objName, key, storeType string) interface{} {
var store *gslbutils.ClusterStore
switch objType {
case gslbutils.RouteType:
if storeType == gslbutils.AcceptedStore {
store = gslbutils.GetAcceptedRouteStore()
} else {
store = gslbutils.GetRejectedRouteStore()
}
if store == nil {
/... | getObjFromStore | identifier_name |
app.py | # -*- coding: utf-8 -*-
import os
from datetime import datetime
import numpy as np
import shutil
from flask import Flask, render_template, redirect, url_for, request, session, send_from_directory, Response
from flask_uploads import UploadSet, configure_uploads, IMAGES, patch_request_class
from flask_wtf import FlaskFor... | folder_name = address + task.folder_name
filename = folder_name + "\\data.xlsx"
arr = []
ex = xlrd.open_workbook(filename).sheets()[0]
for i in range(ex.nrows):
col = ex.row_values(i)
for index, n in enumerate(col):
if isinstance(n, str):
col[index] = 0
... | random_line_split | |
app.py | # -*- coding: utf-8 -*-
import os
from datetime import datetime
import numpy as np
import shutil
from flask import Flask, render_template, redirect, url_for, request, session, send_from_directory, Response
from flask_uploads import UploadSet, configure_uploads, IMAGES, patch_request_class
from flask_wtf import FlaskFor... | thods=['GET', 'POST'])
def page_list():
user_id = request.headers.get('Authorization',None)
task = Task.query.filter_by(user_id=user_id, status=2).first()
folder_name = address + task.folder_name
if not os.path.exists(folder_name):
return jsonify(0)
files_list = os.listdir(folder_name)
... | 080, debug=True)
# app.run(debug=True)
@app.route('/page_list', me | conditional_block |
app.py | # -*- coding: utf-8 -*-
import os
from datetime import datetime
import numpy as np
import shutil
from flask import Flask, render_template, redirect, url_for, request, session, send_from_directory, Response
from flask_uploads import UploadSet, configure_uploads, IMAGES, patch_request_class
from flask_wtf import FlaskFor... | thods=['GET', 'POST'])
def get_excel(row, line):
user_id = request.headers.get('Authorization', None)
task = Task.query.filter_by(user_id=user_id, status=2).first()
folder_name = address + task.folder_name
row = int(row) - 1
line = int(line) - 1
x1 = xlrd.open_workbook(folder_name + '\\data.xlsx... | for cur_line_number, line in enumerate(open(the_file_path, 'rU')):
if cur_line_number == line_number-1:
return line
return ''
@app.route('/getValue/<row>/<line>', me | identifier_body |
app.py | # -*- coding: utf-8 -*-
import os
from datetime import datetime
import numpy as np
import shutil
from flask import Flask, render_template, redirect, url_for, request, session, send_from_directory, Response
from flask_uploads import UploadSet, configure_uploads, IMAGES, patch_request_class
from flask_wtf import FlaskFor... | , autoincrement=True)
username = db.Column(db.String(100), nullable=False)
password = db.Column(db.String(100), nullable=False)
role = db.Column(db.String(100), nullable=False)
def keys(self):
return ['id', 'username', 'password', 'role']
def __getitem__(self, item):
return getattr... | True | identifier_name |
mongodb-scraper.py | # coding=utf-8
import argparse
import logging
import logging.handlers
import json
import re
from colorlog import ColoredFormatter
from pymongo import MongoClient
from pymongo import errors as mongo_errors
import io
import os
import smtplib
from email.mime.text import MIMEText
class MongodbScraper:
def __init__(se... |
def _check_datafile(self):
size = 0
if os.path.exists('data/' + self.filename):
size = os.path.getsize('data/' + self.filename)
# Did the file grow too large?
if size > (20 * 1024 * 1024):
i = 0
while i < 100:
i += 1
... | try:
threshold = self.settings['email']['threshold']
except KeyError:
# No key set
return
# Result is not interesting enough
if count < threshold:
return
# Do I have all the required strings?
try:
email_from = self.set... | identifier_body |
mongodb-scraper.py | # coding=utf-8
import argparse
import logging
import logging.handlers
import json
import re
from colorlog import ColoredFormatter
from pymongo import MongoClient
from pymongo import errors as mongo_errors
import io
import os
import smtplib
from email.mime.text import MIMEText
class MongodbScraper:
def __init__(se... | (self):
for ip in self.ips:
# Do I have already processed this IP?
if ip in self.processed:
continue
self.logger.info("Connecting to " + ip)
try:
client = MongoClient(ip, connectTimeoutMS=5000)
dbs = client.databas... | scrape | identifier_name |
mongodb-scraper.py | # coding=utf-8
import argparse
import logging
import logging.handlers
import json
import re
from colorlog import ColoredFormatter
from pymongo import MongoClient
from pymongo import errors as mongo_errors
import io
import os
import smtplib
from email.mime.text import MIMEText
class MongodbScraper:
def __init__(se... |
# Load previous data
self._load_data()
# Let's parse some CLI options
parser = argparse.ArgumentParser()
parser.add_argument('-s', '--skip', help='Supply a comma separated string of IPs that should be skipped')
arguments = parser.parse_args()
if arguments.skip... | if not os.path.exists('data'):
os.makedirs('data') | random_line_split |
mongodb-scraper.py | # coding=utf-8
import argparse
import logging
import logging.handlers
import json
import re
from colorlog import ColoredFormatter
from pymongo import MongoClient
from pymongo import errors as mongo_errors
import io
import os
import smtplib
from email.mime.text import MIMEText
class MongodbScraper:
def __init__(se... |
if __name__ == '__main__':
scraper = MongodbScraper()
scraper.scrape()
| if ip in self.processed:
continue
self.logger.info("Connecting to " + ip)
try:
client = MongoClient(ip, connectTimeoutMS=5000)
dbs = client.database_names()
except (KeyboardInterrupt, SystemExit):
return
ex... | conditional_block |
lib.rs | use bitflags::bitflags;
use std::{
fmt,
fs::{File, OpenOptions},
io::{self, prelude::*, Result, SeekFrom},
iter,
mem::{self, MaybeUninit},
ops::{Deref, DerefMut},
os::unix::{
fs::OpenOptionsExt,
io::AsRawFd,
},
ptr, slice,
};
mod arch;
mod kernel;
macro_rules! trace... | /// breakpoint event, it returns an event handler that lets you
/// handle events yourself.
pub fn next_event(&mut self, flags: Flags) -> Result<EventHandler> {
trace!(flags, self.file.write(&flags.bits().to_ne_bytes())?);
Ok(EventHandler { inner: self })
}
/// Convert this tracer to... | random_line_split | |
lib.rs | use bitflags::bitflags;
use std::{
fmt,
fs::{File, OpenOptions},
io::{self, prelude::*, Result, SeekFrom},
iter,
mem::{self, MaybeUninit},
ops::{Deref, DerefMut},
os::unix::{
fs::OpenOptionsExt,
io::AsRawFd,
},
ptr, slice,
};
mod arch;
mod kernel;
macro_rules! trace... | {
pub file: File,
pub regs: Registers,
pub mem: Memory,
}
impl Tracer {
/// Attach to a tracer with the specified PID. This will stop it.
pub fn attach(pid: Pid) -> Result<Self> {
Ok(Self {
file: OpenOptions::new()
.read(true)
.write(true)
... | Tracer | identifier_name |
ipymel.py | """
pymel ipython configuration
Current Features
----------------
tab completion of depend nodes, dag nodes, and attributes
automatic import of pymel
Future Features
---------------
- tab completion of PyNode attributes
- color coding of tab complete options
- to differentiate between methods and attributes
... |
class DagTree(TreePager):
def getChildren(self, obj):
if self.options.shapes:
return obj.getChildren()
else:
return obj.getChildren(type='transform')
def getName(self, obj):
import pymel.core as pm
name = obj.nodeName()
if obj.isInstanced():
... | num = len(roots) - 1
tree = ''
for i, x in enumerate(roots):
for line in self.do_level(x, 0, [i == num]):
tree += line
return tree | identifier_body |
ipymel.py | """
pymel ipython configuration
Current Features
----------------
tab completion of depend nodes, dag nodes, and attributes
automatic import of pymel
Future Features
---------------
- tab completion of PyNode attributes
- color coding of tab complete options
- to differentiate between methods and attributes
... | sigint_plugin_loaded_callback_id = None
DAG_MAGIC_COMPLETER_RE = re.compile(r"(?P<preamble>%dag\s+)(?P<namematch>(?P<previous_parts>([a-zA-Z0-9:_]*\|)*)(?P<current_part>[a-zA-Z0-9:_]*))$")
DAG_COMPLETER_RE = re.compile(r"(?P<preamble>((.+(\s+|\())|(SCENE\.))[^\w|:._]*)(?P<namematch>(?P<previous_parts>([a-zA-Z0-9:_]*\|... | random_line_split | |
ipymel.py | """
pymel ipython configuration
Current Features
----------------
tab completion of depend nodes, dag nodes, and attributes
automatic import of pymel
Future Features
---------------
- tab completion of PyNode attributes
- color coding of tab complete options
- to differentiate between methods and attributes
... |
# if we have only one match, get the children as well
if len(matches) == 1 and not attr_match:
res = get_children(matches[0] + '|', dagOnly)
matches += res
if event.symbol != nametext:
# in some situations, the event.symbol will only have incomplete
# information - ie, if ... | raise TryNext | conditional_block |
ipymel.py | """
pymel ipython configuration
Current Features
----------------
tab completion of depend nodes, dag nodes, and attributes
automatic import of pymel
Future Features
---------------
- tab completion of PyNode attributes
- color coding of tab complete options
- to differentiate between methods and attributes
... | (*args):
# from the docs, as of 2015 the args are:
# ( [ pathToPlugin, pluginName ], clientData )
install_sigint_handler()
sigint_plugin_loaded_callback_id = None
DAG_MAGIC_COMPLETER_RE = re.compile(r"(?P<preamble>%dag\s+)(?P<namematch>(?P<previous_parts>([a-zA-Z0-9:_]*\|)*)(?P<current_part>[a-zA-Z0-9:_... | sigint_plugin_loaded_callback | identifier_name |
Server.py | import numpy as np
def mod_pert_random(low, likely, high, confidence=4, samples=30):
"""Produce random numbers according to the 'Modified PERT'
distribution.
:param low: The lowest value expected as possible.
:param likely: The 'most likely' value, statistically, the mode.
:param high: The highest... | >>> q2.groups[1].get_groupID() # Test whether vip would become the first
0
>>> g2=Group(20,2,False,2)
>>> q2.add_queue(g2)
>>> g3=Group(30,1,True,3)
>>> q2.add_queue(g3)
>>> q2.groups[0].get_groupID() # Test whether vip skip the queue properly
2
>>... | >>> q2.add_queue(g0)
>>> len(q2.groups) # Test whether group is correctly added
1
>>> g1=Group(14,1,True,1)
>>> q2.add_queue(g1) | random_line_split |
Server.py | import numpy as np
def mod_pert_random(low, likely, high, confidence=4, samples=30):
"""Produce random numbers according to the 'Modified PERT'
distribution.
:param low: The lowest value expected as possible.
:param likely: The 'most likely' value, statistically, the mode.
:param high: The highest... |
elif len(self.groups) <= 1:
self.groups.insert(0, group)
elif group.get_vip() is False:
self.groups.insert(0, group)
def del_queue(self): # delete last=delete first come group
"""
Pop the head (index = length of queue -1 ) of queue
:return: ... | self.groups.insert(1, group) | conditional_block |
Server.py | import numpy as np
def mod_pert_random(low, likely, high, confidence=4, samples=30):
"""Produce random numbers according to the 'Modified PERT'
distribution.
:param low: The lowest value expected as possible.
:param likely: The 'most likely' value, statistically, the mode.
:param high: The highest... |
def get_vip(self):
return self.vip
def get_time_request(self):
return self.timeRequest
def tablesSetting(number_tables_2, number_tables_4, number_tables_6):
"""
Initialize tables
:param number_tables_2: number of tables for groups with one or two customers. (6)
:param number... | """
Calculate the waiting time for the group
:param current_time: current time point
:return: waiting time for current group
>>> g0=Group(20,2,False,0)
>>> g0.wait_time(71)
51
"""
return current_time - self.timestamp | identifier_body |
Server.py | import numpy as np
def mod_pert_random(low, likely, high, confidence=4, samples=30):
"""Produce random numbers according to the 'Modified PERT'
distribution.
:param low: The lowest value expected as possible.
:param likely: The 'most likely' value, statistically, the mode.
:param high: The highest... | (self, group):
"""
Add the newly come group into queue properly
:param group: the group watiing for entering into the queue
>>> g0=Group(12,2,False,0)
>>> q2=Queue()
>>> q2.add_queue(g0)
>>> len(q2.groups) # Test whether group is correctly added
1
... | add_queue | identifier_name |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.