query stringlengths 9 9.05k | document stringlengths 10 222k | negatives listlengths 19 20 | metadata dict |
|---|---|---|---|
Just to be sure the constants returned by the generator are reasonable and the correct number. | def test_generate_constants( self ) :
print( "test_generate_constants" )
entropy_bits = \
0xd262fbc7cbc7e757d16234bd7e88f12cc5dfef7c2ee82c9a4e289113d83d8724
n_prngs = 19
for integer_width in [ 64, 128, 256 ] :
for n_prngs in [ 7, 19, 31 ] :
con... | [
"def gen_z_val():\n while True:\n num = random.random()\n f = 1 / (1 + num)\n if random.random() <= f:\n return num",
"def gen_bouncy():\n\tglobal num\n\twhile True:\n\t\tnum+=1\n\t\tif (not is_increasing_number(num) and not is_decreasing_number(num)):\n\t\t\tyield num",
"def ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Just what it says, do we produce a good random table? Real randomness is not for these tests, that is dieharder for components. This just makes sure something stupid isn't wrong. Dieharder is part of the final acceptance test, this is just simple software checks. | def test_generate_random_table( self ) :
print( "\ntest_generate_random_table" )
self.test_name = 'test_generate_random_table'
self.setUp()
str_random_table = generate_random_table( self.the_rnt, 4096, 64 )
# that is strings, so need an integer array
the_program = '\nN... | [
"def test_fair():\n die = Die()\n \n # Set the number of rolls\n rolls = 1000000\n \n # Create a dictionary keep tally\n tally={}\n for i in range(1,7):\n tally[i] =0\n #Roll the dice 'rolls' times\n for i in range(0,rolls):\n tally[die.roll()]+=1\n \n # Assert ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
checks the lcg crypto function for randomness at different sizes. These are complex and exhustive tests. | def test_lcg_crypto( self ) :
print( "\ntest_lcg_crypto" )
self.test_name = 'test_lcg_crypto'
self.setUp() # setup() after setting test_name
check_function( self, LcgCrypto, self.the_rnt ) | [
"def check_function( self, the_function, the_rnt ) :\n print( the_function )\n print( \"vec_size int_width statesize p_lvl difficulty \" +\n \"duplicates zeros all ff's elapsed time byterate\" )\n sys.stdout.flush()\n\n function_return = True\n n_samples = self.difficulty * 64 * 1024\n r... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
checks the prng crypto function for randomness at different sizes. These are complex and exhustive tests. | def test_prng_crypto( self ) :
print( "\ntest_prng_crypto" )
self.test_name = 'test_prng_crypto'
self.setUp() # setup() after setting test_name
check_function( self, PrngCrypto, self.the_rnt ) | [
"def testRandomLarge():\n simulateRandom(\n maxCaps=8,\n maxSpecs=12,\n maxResources=100,\n runsPerConfig=100,\n numConfigs=100,\n verifyFunc=checkValid,\n seed=int(time.time())\n )",
"def check_function( self, the_function, the_rnt ) :\n print( the_fu... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Function to calculate the number days between today and the same day n months ago. | def ndays(nmonth=3):
today0 = datetime.now()
year3, month3 = (today0.year, today0.month - nmonth) if today0.month - nmonth >= 1 \
else (today0.year - 1, today0.month - nmonth + 12)
date3 = datetime(year3, month3, today0.day)
ndays = (today0 - date3).days
return ndays | [
"def days(n):\n return timedelta(days=n)",
"def previous_days(n, before=None):\n before = before or pendulum.today()\n return (before - before.subtract(days=n)).range('days')",
"def diff_dates():\n return abs((target_date - todays_date).days)",
"def get_days_diff(input_date: tuple) -> int:\n re... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Gets a random entry from this text file | def getEntry(path):
l = makeList(path)
#return random.choice(l) | [
"def random_word():\n file_name = \"wordbank.txt\"\n number_of_lines = file_len(file_name) - 1\n target_line = randint(0, number_of_lines)\n target_word = linecache.getline(file_name, target_line)\n return target_word.replace(\"\\n\", \"\")",
"def pick_random_word():\n # open the sowpods dictionary... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Draws the overlay as a box. | def _overlay_box(self, component, gc):
if self._screen_start and self._screen_end:
with gc:
gc.set_antialias(0)
gc.set_line_width(self.border_size)
gc.set_stroke_color(self.border_color_)
gc.clip_to_rect(component.x, component.y, compon... | [
"def cover_box(self, x, y, width, height):\n pg.draw.rect(self.screen, (255, 255, 255), (x, y, width, height))",
"def _draw_box(self, dc, box):\n CIRCLE_RAD = 1 if self.scale == 1.0 else 3\n dc.SetBrush(wx.TRANSPARENT_BRUSH)\n dc.SetPen(wx.Pen(box.color, box.line_width))\n \n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Given start and end points in screen space, returns corresponding low and high points in data space. | def _map_coordinate_box(self, start, end):
low = [0,0]
high = [0,0]
for axis_index, mapper in [(0, self.component.x_mapper), \
(1, self.component.y_mapper)]:
# Ignore missing axis mappers (ColorBar instances only have one).
if not mapper... | [
"def start_stop_indices(t_pts, plot_start, plot_stop):\n start_index = (np.fabs(t_pts-plot_start)).argmin() # index in t_pts array \n stop_index = (np.fabs(t_pts-plot_stop)).argmin() # index in t_pts array \n return start_index, stop_index",
"def get_bounds( reads, start_pos_index, end_pos_index ):\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Verify that the image 'quacks like a spatialimage'. | def is_spatial_image(image: Any) -> bool:
if not isinstance(image, xr.DataArray):
return False
if not set(image.dims).issubset(_supported_dims):
return False
for dim in _spatial_dims.intersection(image.dims):
if not image.coords[dim].dtype == np.float64:
return False
... | [
"def checkForGeom(dataset):\n \n spatial = False\n if \"Shape\" in [f.name for f in arcpy.ListFields(dataset) if f.required]:\n spatial = True\n return spatial",
"def verify_aperture_img_shape(self):\n assert self.tpf[1].header['TDIM5'] == '({},{})'.format(self.tpf[2].header['NAXIS1'], s... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Convert the arraylike to a spatialimage. | def to_spatial_image(
array_like: Any,
dims: Optional[Sequence[Union["t", "z", "y", "x", "c"]]] = None,
scale: Optional[Union[Mapping[Hashable, float]]] = None,
translation: Optional[Union[Mapping[Hashable, float]]] = None,
name: str = default_name,
axis_names: Optional[Union[Mapping[Hashable, s... | [
"def from_ndarray(data):\n assert isinstance(data, np.ndarray), \"Input should be a Numpy array\"\n assert len(data.shape) == 3, \"Input data should be of shape (h, w, nc)\"\n h, w, nc = data.shape\n # Create a synthetic gdal dataset\n driver = gdal.GetDriverByName('MEM')\n itemsize = data[0, 0, 0... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Given the activations, perform the specified dimensionality reduction. | def reduce_activations(acts: np.ndarray, reduction: str = 'NMF', dim: int = 6) -> np.ndarray:
reducer = ChannelReducer(dim, reduction)
if reduction == 'NMF':
# NMF requires activations to be positive
acts = get_positive_activations(acts)
return reducer._reducer.fit_transform(acts) | [
"def dimensionality_reduction(samples_data, config):\n\n drs = {\n 'pca': dr_pca,\n 'tsne': dr_tsne,\n 'rfc': dr_rfc,\n 'irfc': dr_irfc\n }\n\n uuids = samples_data.index[samples_data['selected'] == 1].tolist()\n x_train = samples_data.index[samples_data['train'] == 1].tolist... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
If any activations are negative, return a twiceaslong positive array instead, with the originally positive values in the first half and the originally negative values in the second half. Essentially, this contains all the information in the original array, but in the form of a positive array. e.g. [1, 2, 3] > [0, 2, 3,... | def get_positive_activations(acts: np.ndarray) -> np.ndarray:
if (acts > 0).all():
return acts
else:
return np.concatenate([np.maximum(0, acts), np.maximum(-acts, 0)], axis=-1) | [
"def reordering(array):\n negative = []\n positive = []\n while array:\n x = array.pop()\n if x < 0:\n negative.append(x)\n else:\n positive.append(x)\n negative.reverse()\n positive.reverse()\n return negative + positive",
"def __neg__(self):\n ret_... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Resolve a Subject by Bank Identification Number (BIN). | def by_idin_bin(self, bin):
dao = self.session.query(BankIdentificationNumber)\
.filter(BankIdentificationNumber.bin == bin)\
.one()
return self.dto({
'type': 'idin:bin',
'gsid': dao.gsid.hex,
}) | [
"def _asn_lookup(asn:str):\n \n response = ''\n \n try: \n \n request = requests.get(f\" https://whois.arin.net/rest/asn/AS{asn}\", headers={\"Content-Type\": 'text/plain', 'Accept': 'text/plain'})\n \n #ARIN doesnt return response codes. If not found, return text/html, el... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Resolve a Subject by using the fingerprint of a X.509 certificate, issued by a trused Certification Authority (CA). | def by_x509_fingerprint(self, fingerprint):
dao = self.session.query(CertificateFingerprint)\
.filter(CertificateFingerprint.fingerprint == fingerprint)\
.one()
return self.dto({
'type': 'x509.fingerprint',
'gsid': dao.gsid.hex,
}) | [
"def der_cert_to_subject_hash(der_bytes):\n iterator = ASN1Iterator(der_bytes)\n iterator.step_into() # enter certificate structure\n iterator.step_into() # enter TBSCertificate\n iterator.step_over() # over version\n iterator.step_over() # over serial\n iterator.step_over() # over signature algorithm\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
This hook is used to add a warehouse on existing companies when module stock is installed. | def create_missing_warehouse(self):
company_ids = self.env['res.company'].search([])
company_with_warehouse = self.env['stock.warehouse'].with_context(active_test=False).search([]).mapped('company_id')
company_without_warehouse = company_ids - company_with_warehouse
for company in compa... | [
"def add_to_warehouse(self, warehouse):\n if self.is_assigned():\n assignment = self.current_property()\n assignment.active = False\n assignment.save()\n wf = self.warehousefurniture_set.create(warehouse=warehouse)\n wf.save()\n return wf",
"def add_ite... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Load raw data (image/mask) and resample to fixed resolution. | def resample_raw_image(self, mask_fname, patient_folder, binary=True):
m_nii_fname = os.path.join(patient_folder, mask_fname)
new_res = (1.37, 1.37)
print('Resampling %s at resolution %s to file %s' % (m_nii_fname, str(new_res), new_res))
im_nii = nib.load(m_nii_fname)
im_data = ... | [
"def resample(path, upscale_factor=2):\n with rasterio.open(path) as dataset:\n\n # resample data to target shape\n data = dataset.read(out_shape=(dataset.count,\n int(dataset.height * upscale_factor),\n int(dataset.width *... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Get the next free index in the given dictionary. | def _get_next_free_index(d: dict) -> int:
if _NEXT_FREE_INDEX_KEY not in d:
d[_NEXT_FREE_INDEX_KEY] = 1
next_index = int(d[_NEXT_FREE_INDEX_KEY])
d[_NEXT_FREE_INDEX_KEY] = next_index + 1
return next_index | [
"def next_free_date_index(bit_vector):\n return bit_vector.index(0)",
"def get_index(self, key):\r\n index = self.horner_hash(key)\r\n j = 0\r\n for i in range(0, self.table_size):\r\n j = (index + i ** 2) % self.table_size\r\n if self.hash_table[j] and self.hash_tabl... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Get all meta directives available. | def get_meta_directives() -> Dict[str, MetaDirective]:
directives = {} # type: Dict[str, MetaDirective]
# Helper functions to implement each meta directive.
def append_action(config_dict: dict, key: str, value: Any) -> None:
if key not in config_dict:
config_dict[key] = []
if ... | [
"def get_directives(self):\n return self.directives",
"def directives(self):\n directive_sources = chain(hug.defaults.directives.items(), getattr(self, '_directives', {}).items())\n return {'hug_' + directive_name: directive for directive_name, directive in directive_sources}",
"def extract... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Helper function to replace the given setting with the replacement if it is equal to target_setting. | def replace_if_target_setting(setting: str) -> str:
return replacement_setting if setting == target_setting else setting | [
"def assume_working_value_setting(self, setting, value, type_, source=\"direct\",\n desired_value=None):\n setting.set_value(value, source)\n self.assertEqual(setting.value, desired_value if desired_value is not None else value)\n self.assert_(isinstance(sett... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Helper function to replace the given setting with the replacement if it is equal to target_setting. | def replace_if_target_setting(setting: str) -> str:
return replacement_setting if setting == target_setting else setting | [
"def assume_working_value_setting(self, setting, value, type_, source=\"direct\",\n desired_value=None):\n setting.set_value(value, source)\n self.assertEqual(setting.value, desired_value if desired_value is not None else value)\n self.assert_(isinstance(sett... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Perform substitutions for the given value. If value is a string, perform substitutions in the string. If value is a list, then perform substitutions in every string in the list. | def perform_subst(value: Union[str, List[str]]) -> Union[str, List[str]]:
newval = "" # type: Union[str, List[str]]
if isinstance(value, list):
newval = list(map(lambda input_str: subst_str(input_str, lambda key: config_dict[key]), value))
else:
newv... | [
"def apply_replaces(value, replaces):\n if isinstance(value, list):\n return list(map(lambda v: apply_replaces(v, replaces), value))\n\n for pattern, replacement in replaces.items():\n value = re.sub(pattern, replacement, value)\n\n return value",
"def _substitute(valueish, replacements, ro... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Copy the contents of the referenced key for use as this key's value. If the reference is a list, then apply the crossref for each element of the list. | def crossref_action(config_dict: dict, key: str, value: Any) -> None:
if isinstance(value, str):
config_dict[key] = config_dict[value]
elif isinstance(value, list):
def check_and_get(k: Any) -> Any:
return config_dict[crossref_check_and_cast(k)]
confi... | [
"def reference(self):\n for termkey, termval in six.iteritems(self.terms):\n termval.relations.update(\n (relkey, TermList(\n (self.terms.get(x) or Term(x, '', '')\n if not isinstance(x, Term) else x) for x in relval\n )) for relk... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Transclude the contents of the file pointed to by value. | def transclude_action(config_dict: dict, key: str, value: Any) -> None:
assert isinstance(value, str), "Path to file for transclusion must be a string"
with open(value, "r") as f:
file_contents = str(f.read())
config_dict[key] = file_contents | [
"def expand(self,\n context,\n outputFile,\n outputEncoding=None,\n interpreter=None):\n # This method must wrap outputFile if required by the encoding, and write out\n # any template pre-amble (DTD, Encoding, etc)\n self.expandInline(cont... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Turn the value of the key (JSON list) into a list. | def json2list_action(config_dict: dict, key: str, value: Any) -> None:
assert isinstance(value, str), "json2list requires a JSON string that is a list"
parsed = json.loads(value)
assert isinstance(parsed, list), "json2list requires a JSON string that is a list"
config_dict[key] = parsed | [
"def __GetKeyValueAsList(self, key):\n if not self.request.has_key(key):\n return None\n\n key_value = self.request[key]\n # Check that the value is actually a list\n if not isinstance(key_value, list):\n return None\n\n return key_value",
"def _list(key: str, vals: dict) -> list:\n re... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Prepend the local path of the config dict. | def prependlocal_action(config_dict: dict, key: str, value: Any) -> None:
if isinstance(value, list):
new_values = []
for v in value:
new_values.append(os.path.join(config_dict[_CONFIG_PATH_KEY], str(v)))
config_dict[key] = new_values
else:
... | [
"def prepend_path(path):\n\n environ_path = get_paths()\n if path not in environ_path:\n environ_path.insert(0, path)\n environ[\"PATH\"] = \":\".join(environ_path)",
"def _expandPathInConfig(path,config):\r\n config[path] = os.path.expandvars(config[path])",
"def add_to_default_path(self, ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Perform a deep substitution on the value provided. This will replace any variables that occur in strings of the form ${...} and will also do a special meta replacement on keys which end in _deepsubst_meta. | def deepsubst_action(config_dict: dict, key: str, value: Any) -> None:
def do_subst(oldval: Any) -> Any:
if isinstance(oldval, str):
# This is just regular subst
return subst_str(oldval, lambda key: config_dict[key])
if isinstance(oldval, list):
... | [
"def deepsubst_targets(key: str, value: Any) -> List[str]:\n if isinstance(value, str):\n # This is just regular subst\n return subst_targets(key, value)\n if isinstance(value, (dict, list)):\n # Recursively find all strings\n def find_strings(x: Union[List,... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Look for all substitution targets (${...}) in value and return a list of the targets found. | def deepsubst_targets(key: str, value: Any) -> List[str]:
if isinstance(value, str):
# This is just regular subst
return subst_targets(key, value)
if isinstance(value, (dict, list)):
# Recursively find all strings
def find_strings(x: Union[List, Dict]) -> ... | [
"def get_ninja_targets(path):\n output = subprocess.check_output([\n CMAKE_EXECUTABLE, '--build', path, '--target', 'help'], cwd=path)\n lines = output.decode().splitlines()\n suffix = ':'\n return [\n line.split(' ')[0][:-len(suffix)]\n for line in lines\n if len(line.split(... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Unpack the given config_dict, flattening key names recursively. | def unpack(config_dict: dict, prefix: str = "") -> dict:
# We don't want an extra "." in the beginning.
real_prefix = "" if prefix == "" else prefix + "."
output_dict = {}
for key, value in config_dict.items():
if isinstance(value, dict):
output_dict.update(unpack(value, real_prefix ... | [
"def unpack(config_dict, prefix=\"\"):\n # We don't want an extra \".\" in the beginning.\n real_prefix = \"\" if prefix == \"\" else prefix + \".\"\n output_dict = {}\n for key, value in config_dict.items():\n if isinstance(value, dict):\n output_dict.update(unpack(value, real_prefix ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Expand the meta directives for the given config dict and return a new dictionary containing the updated settings with respect to the base config_dict. | def update_and_expand_meta(config_dict: dict, meta_dict: dict) -> dict:
assert isinstance(config_dict, dict)
assert isinstance(meta_dict, dict)
newdict = deepdict(config_dict)
# Find meta directives.
meta_dict = deepdict(meta_dict) # create a copy so we can remove items.
meta_dict_keys = list... | [
"def get_meta_directives() -> Dict[str, MetaDirective]:\n directives = {} # type: Dict[str, MetaDirective]\n\n # Helper functions to implement each meta directive.\n def append_action(config_dict: dict, key: str, value: Any) -> None:\n if key not in config_dict:\n config_dict[key] = []\n... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Internal keys that shouldn't show up in any final config. | def internal_keys() -> Set[str]:
return {_CONFIG_PATH_KEY, _NEXT_FREE_INDEX_KEY} | [
"def valid_config_keys():\n click.echo(', '.join(get_class_properties(PipelineConfig)))",
"def _pullup_keys(self):\n for k in [\n \"definitions\",\n \"providers\",\n \"handlers\",\n \"remote_vars\",\n \"template_vars\",\n \"terraform_vars... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Get the database (get_config) in JSON form as a string. | def get_database_json(self) -> str:
# The cls=HammerJSONEncoder enables writing Decimals
return json.dumps(self.get_config(), cls=HammerJSONEncoder, sort_keys=True, indent=4, separators=(',', ': ')) | [
"def get_db_info() -> dict:\n env = os.environ['FLASK_ENV']\n if env == Environments.PRODUCTION.value:\n return {\n 'dbname': os.environ['DBNAME'],\n 'user': os.environ['DBUSER'],\n 'host': os.environ['DBHOST'],\n 'password': os.environ['DBPASSWORD'],\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Retrieve a key, first trying with a suffix but returning base if found. | def get_setting_suffix(self, key: str, suffix: str, nullvalue: Any = None, check_type: bool = True) -> Any:
default = key
override = default + "_" + suffix
value = None
try:
value = self.get_config()[override]
except:
try:
value = self.get... | [
"def get_bucket_key(bucket_name, uri):\n pos = uri.find(bucket_name) + len(bucket_name) + 1\n return uri[pos:]",
"def lookup(self, subcmd_prefix):\n for subcmd_name in self.subcmds.keys():\n if subcmd_name.startswith(subcmd_prefix) \\\n and len(subcmd_prefix) >= self.subcmds[... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Checks a setting for correct typing. | def check_setting(self, key: str, cfg: Optional[dict] = None) -> bool:
# Ignore all builtins
if any(key in unpack(builtin) for builtin in self.builtins):
return True
if cfg is None:
cfg = self.get_config()
if key not in self.get_config_types():
self.l... | [
"def _type_check(self, key):\n if self._type == \"I\" and isinstance(key,str):\n raise TypeError(\"STDict keys is set as type int()\")\n\n elif self._type == \"S\" and isinstance(key,int):\n raise TypeError(\"STDict keys is set as type str()\")\n else:\n return"... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Update the core config with the given core config. | def update_core(self, core_config: List[dict], core_config_types: List[dict]) -> None:
self.core = core_config
self.update_defaults(core_config)
self.update_types(core_config_types, True)
self.__config_cache_dirty = True | [
"def reload_core_config(opp):\n opp.services.call(ha.DOMAIN, SERVICE_RELOAD_CORE_CONFIG)",
"def _update_central_core_config(\n self, enabled: bool, delete_backups: bool\n ) -> json_api.system_settings.SystemSettings:\n api_endpoint = ApiEndpoints.central_core.settings_update\n request_o... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Update the tools config with the given tools config. | def update_tools(self, tools_config: List[dict], tool_config_types: List[dict]) -> None:
self.tools = tools_config
self.update_defaults(tools_config)
self.update_types(tool_config_types, True)
self.__config_cache_dirty = True | [
"def update_tool_configs(self) -> None:\n tools = reduce(lambda a, b: a + b, list(self.tool_configs.values()))\n self.database.update_tools(tools)",
"def update_cache(self, tools: Dict[str, Union[ToolInfo, str]]):\r\n with self.db.transaction():\r\n self.db.insert_tool_info([tools.... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Update the technology config with the given technology config. | def update_technology(self, technology_config: List[dict], technology_config_types: List[dict]) -> None:
self.technology = technology_config
self.update_defaults(technology_config)
self.update_types(technology_config_types, True)
self.__config_cache_dirty = True | [
"def update(self, name, config, etag):\n response = self._session.put(\n path=self._session.urljoin(self.RESOURCE_PATH, name).format(base_api=self.base_api),\n headers={\n 'Accept': self._accept_header(),\n 'Content-Type': 'application/json',\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Update the environment config with the given environment config. | def update_environment(self, environment_config: List[dict]) -> None:
self.environment = environment_config
self.__config_cache_dirty = True | [
"def update_config():\n g.config = app.config",
"def update_config(self):\n for key_name, entry in self.config.config.items():\n self.update_config_entry(key_name, entry)",
"def update_environment(environment_id, file):\n _confirm_account()\n\n evolv_client = EvolvClient(EVOLV_CONFIG)... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Update the project config with the given project config. | def update_project(self, project_config: List[dict]) -> None:
self.project = project_config
self.__config_cache_dirty = True | [
"def _update_project_config(self, path):\n projects_path = list(set(CONF.get('main', 'projects_path', [])))\n projects_path = list(projects_path)\n projects_path.append(path)\n CONF.set('main', 'projects_path', projects_path)\n self.load_projects()\n self.update_status('')"... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Update the builtins config with the given builtins config. | def update_builtins(self, builtins_config: List[dict]) -> None:
self.builtins = builtins_config
self.__config_cache_dirty = True | [
"def load_builtins_and_core(cls, database: hammer_config.HammerDatabase) -> None:\n\n # Load in builtins.\n builtins_yml = resources.files(\"hammer.config\") / \"builtins.yml\"\n database.update_builtins([\n hammer_config.load_config_from_string(builtins_yml.read_text(), True),\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Update the default configs with the given config list. This dict gets updated with each additional defaults config file. | def update_defaults(self, default_configs: List[dict]) -> None:
for c in default_configs:
self.defaults = add_dicts(self.defaults, unpack(c)) | [
"def load_defaults(defaults_file: list = []):\n cfg = Config(\"configs/default.yaml\")\n # cfg = cfg.update_config(Config(\"configs/dataset.yaml\"))\n for file in defaults_file:\n print(file)\n cfg = deep_update(cfg, Config(file))\n \n cfg = Opts(cfg).parse_args()\n \n cfg = load_e... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Update the types config with the given types config. | def update_types(self, config_types: List[dict], check_type: bool = True) -> None:
loaded_cfg = combine_configs(config_types)
self.__config_types.update(loaded_cfg)
if check_type:
for k, v in loaded_cfg.items():
if not self.has_setting(k):
self.log... | [
"def update_technology(self, technology_config: List[dict], technology_config_types: List[dict]) -> None:\n self.technology = technology_config\n self.update_defaults(technology_config)\n self.update_types(technology_config_types, True)\n self.__config_cache_dirty = True",
"def update_... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Load config from a string by loading it and unpacking it. | def load_config_from_string(contents: str, is_yaml: bool, path: str = "unspecified") -> dict:
unpacked = unpack(load_yaml(contents) if is_yaml else json.loads(contents))
unpacked[_CONFIG_PATH_KEY] = path
return unpacked | [
"def init_from_string(config_str):\n from StringIO import StringIO\n mconf = ConfigParser()\n mconf.readfp(StringIO(config_str))\n init_from_config(mconf)",
"def loadFromString(self, cfg_string):\n import StringIO\n fp = StringIO.StringIO(cfg_string)\n self.readfp(fp)\n fp.... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Load config from a package's defaults. | def load_config_from_defaults(package: str, types: bool = False) -> Tuple[List[dict], List[dict]]:
package_path = importlib.resources.files(package)
json_file = package_path / "defaults.json"
json_types_file = package_path / "defaults_types.json"
yaml_file = package_path / "defaults.yml"
yaml_types_... | [
"def _load_defaults(self):\n module = self._do_import(self._defaults_module_path)\n self._defaults = {\n k: v for k, v in module.__dict__.items()\n if k.isupper() # ignore anything that doesn't look like a setting\n }",
"def load_defaults(defaults_file: list = []):\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Parses a configuration type. | def parse_setting_type(setting_type: str) -> ConfigType:
m_prim = re.search(PRIMARY_REGEX, setting_type)
m_sec = re.search(INNER_REGEX, setting_type)
if m_prim is None:
raise RuntimeError("Not a valid configuration type")
primary_type = m_prim.group(0)
if primary_type == "Optional":
... | [
"def parse_config(self) -> None:\n # ToDO add support for Lists, Dicts, Datetimes, and Bool\n for s_name, sect_info in self.raw_cfg.items():\n sect = getattr(self, s_name.lower())\n for a_name, attribute in sect_info.items():\n if type(getattr(sect, a_name.lower())... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Chop a message into chunks of max 1024 bytes incl. TIMEOUT and endings ADD/AYE | def prepare_command(message: str, timeout='5'):
packets = []
bodysize = 1024 - len(f'TIMEOUT {timeout} ') - len(' ADD')
chunks = ceil(len(message) / bodysize)
for chunk in range(0, chunks - 1):
data = b'TIMEOUT ' + timeout.encode() + b' '
data += message[:bodysize].encode()
dat... | [
"def chunk_message(self, msg):\n prev = 0\n while prev < len(msg):\n next = min(prev + self.maxMsgSize, len(msg))\n yield msg[prev:next]\n prev = next",
"def test_multibyte_delim():\n\n delim = b'\\r\\n'\n for with_delim in (True, False):\n if with_delim... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Wrapper for selenuium.webdriver.get to handle WebDriverException when "Failed to decode response from marionette" | def get(self, url: str):
try:
super(Browser, self).get(url)
except (WebDriverException, NoSuchWindowException) as e:
if 'Message: Failed to decode response from marionette' in str(e) or \
'Message: Browsing context has been discarded' in str(e):
sel... | [
"def test_get_gets_fake_url_without_internet():\n try:\n icl_b._get(\"http://fakeurl\")\n except Exception as e:\n assert isinstance(e, icl_e.ItaCovidLibConnectionError)",
"def test_get_html_not_defined(self):\n url = \"http://thispagedoesnotexists.com\"\n response, rtime = get_h... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Add new custom types that can be interpolated by this object. This method expects a dict that maps types (the keys) to their custom wrapper classes (the values). The wrapper classes must be a descendant of the Esc class. | def add_types(self, new_types):
self.type_map.update(new_types) | [
"def register(cls):\n if not hasattr(cls, \"__fromjson__\") or not hasattr(cls, \"__tojson__\"):\n raise KeyError(\"register: registered types must have a __fromjson__ method\")\n k = clsKey(cls)\n if k in _types:\n raise Exception(\"tinyjson: mutliple attempts to register class %s\" % k)\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Commiting change a SQL query | def __commit_query(self, SQLquery):
try:
cursor = self.cnx.cursor()
#execute the SQL change
if self.debug == True:
print("Executing following SQL command : " + SQLquery + " on db : " + self.dbname)
cursor.execute(SQLquery)
#commit change in db
self.cnx.commit()
return 0
except:
... | [
"def _do_commit(self):\n self.backend.commit()",
"def _commit(self):\n pass",
"def execute_commit(self, sql, data):\n self.execute(sql, data)\n self.commit()",
"def commit(self):\n\t\tself.dbConnection.commit()",
"def commit(self, session):\n session.commit()",
"def comm... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
populate a table with the provided values | def populate_table(self, table, values):
# to be conpleted according to sqlite3 requirements
if self.platform == STATIC.PLATFORM_WINDOWS:
for value in values:
#print (str(value.MONTH) + " " + value.FLOW + " " + value.CONTRIB + " ")
# rev ex rox
table[STATIC.equivData["Rev"]][STATIC.equivFlow[va... | [
"def fill_table(self, table: ttk.Treeview, data: dict, **kwds) -> None:\r\n assert len(data) > 0, 'wrong data passes to the table'\r\n tag = kwds.get('tag', 'default')\r\n for values in data:\r\n table.insert('', END, values=(values,), tags=tag)",
"def _fill_table(self, table, gen)... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Builds ParameterContainer object that holds ParameterNode objects with attribute namevalue pairs and optional details. | def _build_parameters(
self,
parameter_container: ParameterContainer,
domain: Domain,
*,
variables: Optional[ParameterContainer] = None,
parameters: Optional[Dict[str, ParameterContainer]] = None,
):
validator: Validator = self.get_validator(
domai... | [
"def setup_Component_with_parameters():\n comp = setup_Component_all_keywords()\n\n comp._unfreeze()\n # Need to set up attribute parameters\n comp.new_par1 = 1.5\n comp.new_par2 = 3\n comp.new_par3 = None\n comp.this_par = \"test_val\"\n comp.that_par = \"\\\"txt_string\\\"\"\n # also ne... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Capture comment and redirect to movie page. | def post_movie_comment():
today = date.today()
comment_date = "%d %s %d" % (today.day, month_name[today.month],
today.year)
comment = Comment(comment_date, request.form["name"],
request.form["text"])
title_id = int(request.form["title_id"])
mov... | [
"def post_reply(assignment_name, file_name, comment_id):\n\t\t# grab user input from submitted form\n\t\tcomment_data = request.form['comment']\n\t\tcomment = Comment(file_name, comment_id, comment_data)\n\t\t# apply filter to comment\n\t\tcomment.apply_filter()\n\t\t# propogate changes to db\n\t\tdb.session.add(co... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Check if Journal check_seq defined or not. | def _check_journal_seq(self, journal_id, context=None):
if not journal_id.check_sequence:
raise osv.except_osv(_('Warning'),_('Please add "Check Sequence" for journal %s')%(journal_id.name))
return True | [
"def _is_sequence(self, ddl):\n m_seqs = self._find_seq.search(ddl)\n return m_seqs is not None",
"def hasSequence(self):\n if os.path.exists(self.sequencePath):\n return True\n else:\n return False",
"def check_sequence(sequencer: dict) -> str:\n sequence = ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Changing wizard state to "reprint" | def reprint_new_next(self, cr, uid, ids, context=None):
this = self.browse(cr, uid, ids)[0]
self.write(cr, uid, ids, {'state': 'reprint'}, context=context)
return {
'type': 'ir.actions.act_window',
'res_model': 'account.check.print.wizard',
'view_mode': 'form... | [
"def on_wizard_finish(self, wizard):\r\n pass",
"def finish_printing():\n set_extruder_temp(0,0)\n set_extruder_temp(0,1)\n set_bed_temp()\n drop_bed(100)\n home_axis('x')\n disable_motors()",
"def ToggleProgressPrinting():\n SetProgressPrintingEnabled(not GetProgressPrintingIsEnable... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
This method for creating new Check Payment or update the Check No. | def check_payment(self, cr, uid, ids, context=None):
data = self.browse(cr, uid, ids, context=context)[0]
check_log_pool = self.pool.get('check.log')
sequence_pool = self.pool.get('ir.sequence')
move_pool = self.pool.get('account.move')
voucher_pool = self.pool.get('account.vouc... | [
"def stepCreateCheckPayment(self, sequence=None, sequence_list=None, **kwd):\n self.check_payment = self.check_payment_module.newContent(id = 'check_payment',\n portal_type = 'Check Payment',\n destination_payment_value = self.bank_account_1,\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
This Method check some constraints before printing check from Journal Entry. 1. Move state must be posted. 2. Move Journal must allow check writing. 3. Cheque must pay from cash account. 4. Move Lines must have partner_id (Beneficiary). 5. Cheque must pay to only one partner. | def check_move_data(self, cr, uid, ids, context=None):
move_line_pool = self.pool.get('account.move.line')
move = self.pool.get('account.move').browse(cr, uid, context.get('active_id',[]), context=context)
if move.state != 'posted':
raise osv.except_osv(_('Warning'), _('Payment i... | [
"def check_payment(self, cr, uid, ids, context=None):\n data = self.browse(cr, uid, ids, context=context)[0]\n check_log_pool = self.pool.get('check.log')\n sequence_pool = self.pool.get('ir.sequence')\n move_pool = self.pool.get('account.move') \n voucher_pool = self.pool.get('ac... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
This method for deleting printed check. It delete the chk_seq value in payment & make the check status in check log "delete" | def do_delete(self, cr, uid, ids, context=None):
data = self.browse(cr, uid, ids, context=context)[0]
voucher_pool = self.pool.get('account.voucher')
move_pool = self.pool.get('account.move')
move_line_pool = self.pool.get('account.move.line')
check_log_pool = self.pool.get('che... | [
"def delete_check(self, entity, check):\r\n uri = \"/%s/%s/checks/%s\" % (self.uri_base, utils.get_id(entity),\r\n utils.get_id(check))\r\n resp, resp_body = self.api.method_delete(uri)",
"def delete_check(self, entity, check):\r\n return self._entity_manager.delete_check(entit... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
rest_framework can't deal with ManyToMany relations that have a through table. In xos, most of the through tables we have use defaults or blank fields, so there's no reason why we shouldn't be able to save these objects. So, let's strip out these m2m relations, and deal with them ourself. | def NEED_TO_UPDATE_save_object(self, obj, **kwargs):
obj._complex_m2m_data={};
if getattr(obj, '_m2m_data', None):
for relatedObject in obj._meta.get_all_related_many_to_many_objects():
if (relatedObject.field.rel.through._meta.auto_created):
# These are n... | [
"def save_m2m(self, bundle):\n\n\n for field_name, field_object in self.fields.items():\n\n if not getattr(field_object, 'is_m2m', False):\n continue\n\n if not field_object.attribute:\n continue\n\n if field_object.readonly:\n con... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Reads the table with the help of the data_manager module. Returns the title (str) of the item with the given id (str) on None om case of nonexisting id. | def get_title_by_id(id_):
sales_table = data_manager.get_table_from_file("sales/sales.csv")
title = get_title_by_id_from_table(sales_table, id_)
return title
#[(ui.print_result(row[TITLE_INDEX], "The title is: "), return None) for row in sales_table if id_[0] == row[ID_INDEX]]
#return None | [
"def find_item_title(item_id):\n if item_id == 'UNKNOWN_ID':\n return None\n if item_id in ITEM_CACHE['titles']:\n return ITEM_CACHE['titles'][item_id]\n item_obj = ff_utils.get_metadata(item_id, ff_env='data', add_on='frame=object')\n title = item_obj.get('display_title')\n ITEM_CACHE[... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Reads the table with the help of the data_manager module. Returns the _id_ of the item that was sold most recently. | def get_item_id_sold_last():
# your code
sales_table = data_manager.get_table_from_file("sales/sales.csv")
item_id = get_item_id_sold_last_from_table(sales_table)
return item_id | [
"def get_item_id_sold_last():\n table = data_manager.get_table_from_file(sales_file)\n\n recently_sold = (0, 0)\n\n for line, games in enumerate(table):\n if len(games[3]) == 1:\n month = '0' + str(games[3])\n else:\n month = str(games[3])\n\n if len(games[4]) == ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Reads the table of sales with the help of the data_manager module. Returns the sum of the prices of the items in the item_ids. | def get_the_sum_of_prices(item_ids):
# your code
table = data_manager.get_table_from_file("sales/sales.csv")
return get_the_sum_of_prices_from_table(table, item_ids) | [
"def get_the_sum_of_prices_from_table(table, item_ids):\n\n # your code\n ID_INDEX = 0\n PRICE_INDEX = 2\n sum_of_prices = 0\n for row in table:\n for single_id in item_ids:\n if single_id == row[ID_INDEX]:\n sum_of_prices += int(row[PRICE_INDEX])\n return sum_of_p... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns the sum of the prices of the items in the item_ids. | def get_the_sum_of_prices_from_table(table, item_ids):
# your code
ID_INDEX = 0
PRICE_INDEX = 2
sum_of_prices = 0
for row in table:
for single_id in item_ids:
if single_id == row[ID_INDEX]:
sum_of_prices += int(row[PRICE_INDEX])
return sum_of_prices | [
"def get_the_sum_of_prices(item_ids):\n\n # your code\n table = data_manager.get_table_from_file(\"sales/sales.csv\")\n return get_the_sum_of_prices_from_table(table, item_ids)",
"def calc_total_price(items):\n total_price = 0\n for item in items:\n total_price += item.get('price') * item.ge... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Reads the sales table with the help of the data_manager module. Returns the customer_id that belongs to the given sale_id or None if no such sale_id is in the table. | def get_customer_id_by_sale_id(sale_id):
table = data_manager.get_table_from_file("sales/sales.csv")
return get_customer_id_by_sale_id_from_table(table, sale_id) | [
"def get_sale_by_id(self, sale_id):\n self.prod_id = sale_id\n self.cursor.execute(\n \"Select * from sales where sales_id = %s\",\n (self.prod_id,)\n )\n sale = self.cursor.fetchone()\n return sale",
"def get_sale(sales_id):\n #use Sales instance to cal... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns a set of customer_ids that are present in the table. | def get_all_customer_ids_from_table(table):
all_id = set()
for row in table:
all_id.add(str(row[-1]))
ui.print_result(all_id, "All customers ID: ")
return all_id | [
"def get_all_customer_ids_from_table(table):\n customer_ids = set()\n for row in table:\n id_customer = str(row[0])\n customer_ids.add(id_customer)\n\n return customer_ids # sales_comtroller print the table of this set",
"def load_customer_ids(data_main):\n data_main = data_main.dropna(... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Reads the customersales association table with the help of the data_manager module. | def get_all_sales_ids_for_customer_ids():
# your code
sales_table = data_manager.get_table_from_file("sales/sales.csv")
return get_all_sales_ids_for_customer_ids_from_table(sales_table) | [
"def fetchAllCustomers():\n return CustomerDao().fetch_all_customers()",
"def get_associations(customer_id):\n customer = CustomerController.get(customer_id)\n\n if not customer:\n return jsonify(error='Customer not found'), 500\n\n associations = CustomerController.get_associations(custome... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Use ``git lsremote`` to list branches and tags without cloning the repository. | def lsremote(self, include_tags=True, include_branches=True):
if not include_tags and not include_branches:
return [], []
extra_args = []
if include_tags:
extra_args.append("--tags")
if include_branches:
extra_args.append("--heads")
cmd = ["g... | [
"def ls_remote(remote, config=None, **kwargs):\n if config is None:\n config = StackedConfig.default()\n client, host_path = get_transport_and_path(remote, config=config, **kwargs)\n return client.get_refs(host_path)",
"def git_ls_remote_tags(url):\n return [os.path.basename(line.split(\"\\t\")... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
r""" Return an iterable of submodule paths in this repository. In order to get the submodules paths without initializing them, we parse the .gitmodules file. For this we make use of the ``git config getregexp`` command. Keys and values from the config can contain spaces. In order to parse the output unambiguously, we u... | def submodules(self) -> Iterable[str]:
exit_code, stdout, _ = self.run(
"git",
"config",
"--null",
"--file",
".gitmodules",
"--get-regexp",
# Get only the path key of each submodule.
r"^submodule\..*\.path$",
... | [
"def parse_gitmodule(path):\n rel_path_subm = []\n regex = r\"^path = \"\n with open(os.path.join(path, \".gitmodules\")) as f:\n for line in f:\n line = line.strip()\n match = re.search(regex, line)\n if match:\n rel_path_subm.append(re.sub(regex, '',... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Checkout all repository submodules. If submodules is empty, all submodules will be updated. | def checkout_submodules(self, submodules: list[str], recursive: bool):
self.run('git', 'submodule', 'sync')
cmd = [
'git',
'submodule',
'update',
'--init',
'--force',
]
if recursive:
cmd.append("--recursive")
... | [
"def git_update_all(root_path=os.path.expanduser('~')):\n\n start_time_sec = time.time()\n git_util.git_logger.info('git_update_all() : start')\n updater = GitRepositoryUpdater(root_path, 'config')\n updater.recursively_find_in()\n git_util.git_logger.info('git_update_all() : end')\n git_util.git_... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return if light supports brightness. | def supports_brightness(self):
return self.dimmer.initialized | [
"def supports_dimmer(self) -> bool:\n return bool(self.supported_features & SUPPORT_BRIGHTNESS)",
"def brightness(self) -> int:\n light_brightness = self._device.light_brightness * 16\n if light_brightness == 256:\n light_brightness = 255\n return int(light_brightness)",
"... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
check answer for errors VALIDATION RULES proper columns contigious intragroup order, starting from 1 all intragroup ordered groups must be fully specified groups P and L must be at first and last(s) positions respectively P group must have only 1 member, no intragroup order L group can never have intragroup order only ... | def errorCheckMaster( self, answer):
self.errorCheckSubmission( answer )
for colName in ["Grouping", "IntraGroupOrder", "GroupOrder"]:
assert colName in answer.columns, "We need a %s column in the master spreadsheet" % colName | [
"def _verify_groups_syntax(groups):\n num_errors = 0\n num_warnings = 0\n \n for key, value in groups.items():\n if \"instances\" in value: # Templates\n if type(value[\"instances\"]) != int:\n logging.error(\"Instances must be an Integer for group %s\", key)\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
check answer for errors VALIDATION RULES proper columns | def errorCheckSubmission( self, answer):
for colName in ["Code", "Convention", "GroupOrder"]:
assert colName in answer.columns, "We need a %s column in the master spreadsheet" % colName | [
"def _validate_inputs(self,col_list):\n if not set(col_list).difference(self.raw_data.columns):\n print 'Columns is ok,Begin to Run....'\n else:\n raise ValueError('''The columns not in data's columns ''')",
"def errorCheckMaster( self, answer):\n self.er... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
return a mark, and a marked up submission the latter ready to write back to excel file algorithm find groups and create groupStart array, groupOrder, groupMarking columns??? check intragroup order check group order | def mark( self, submission):
""" did the student not submit anything with this name?"""
if submission is None or len(submission)==0:
submission = pd.DataFrame( columns = self.ma.columns)
#return (pd.DataFrame(), 0, pd.DataFrame())
submission = self.dataClean( submission... | [
"def markGroupOrder(self,submission):\n\n \"\"\" make sure that there exist groupOrders in the answer\"\"\"\n groupOrder = self.ma.ix[ notblank(self.ma.GroupOrder),(\"Grouping\",\"GroupOrder\")]\n if len( groupOrder ) == 0:\n return submission\n\n \"\"\" find out where these g... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
subcodes all the student submission codes search the model answer group slice we are searching for in subcodes find the maximum length set of codes in search which match somewhere in the student subCodes try to find some group in subCodes, of length len(codes)..2 the most important thing is to match the longest length ... | def findSlice( self, subCodes, search):
searchSet=set(search)
for searchSliceLen in range(len(search), 0, -1):
# go through the student answer, from start to end
for startPos in range(0, len(subCodes) - searchSliceLen + 1 ):
# first, look for a contigious match
... | [
"def is_subsequence2(s, w, y, debug=True):\n n, m = len(s), len(w)\n i, j = 0, 0\n e = 0\n sufficient_len = lambda i, j: (n - i) - (m - j) >= 0\n while i < n and j < m and sufficient_len(i, j):\n\n if debug: print(\"j={}, w[j] = {}\".format(j, w[j]))\n\n # l is the list of index in s fo... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
for each code in submission, mark if it has correct prefix assume all submissions are incorrect, mark those that are correct | def markPrefix(self,submission):
label='Prefix?'
submission = self.addColumn( submission, label )
submission.loc[:,label]="Not Correct"
if not 'Prefix' in submission.columns:
return submission
prefixes = submission.ix[:,("Code","Prefix")]
prefixes.columns = [ ... | [
"def markUnorderedGroups(self,submission):\n\n maGroups= self.ma[ eAnd( isblank(self.ma.IntraGroupOrder), notblank(self.ma.Grouping)) ].Grouping.unique()\n\n # P and L groups are taken care of by absoluteOrdering routine. Different marks too\n #maGroups = set(maGroups).difference( set(\"P\", ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
for each code in submission, mark if it has correct convention | def markConvention(self,submission):
label='Convention?'
submission = self.addColumn( submission, label )
submission.loc[:,label]="Not Correct"
if not 'Convention' in submission.columns:
return submission
conventions = submission.ix[:,("Code","Convention")]
c... | [
"def markUnorderedGroups(self,submission):\n\n maGroups= self.ma[ eAnd( isblank(self.ma.IntraGroupOrder), notblank(self.ma.Grouping)) ].Grouping.unique()\n\n # P and L groups are taken care of by absoluteOrdering routine. Different marks too\n #maGroups = set(maGroups).difference( set(\"P\", ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
single interruption ok any group larger than size 1 ok Algorithm for each unorderedGroup (ie, no intragroup order ) for each slice S of size length(uog) , if uog S = null set, we have a winner. Mark group correct | def markUnorderedGroups(self,submission):
maGroups= self.ma[ eAnd( isblank(self.ma.IntraGroupOrder), notblank(self.ma.Grouping)) ].Grouping.unique()
# P and L groups are taken care of by absoluteOrdering routine. Different marks too
#maGroups = set(maGroups).difference( set("P", "L"))
... | [
"def final_penguins_num2(game, ice, my_arrival_turn=-1, groups=[]):\n if ice in game.get_my_icebergs():\n status = \"mine\"\n elif ice in game.get_neutral_icebergs():\n status = \"neutral\"\n else:\n status = \"enemy\"\n my_penguin_amount = ice.penguin_amount\n if status == \"ene... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
for each non null GroupOrder, find out the group for each group, make sure the subSlice for this group is consecutive | def markGroupOrder(self,submission):
""" make sure that there exist groupOrders in the answer"""
groupOrder = self.ma.ix[ notblank(self.ma.GroupOrder),("Grouping","GroupOrder")]
if len( groupOrder ) == 0:
return submission
""" find out where these groups live in the submiss... | [
"def testNGroupSplit(self):\n # Test 2 groups like HalfSplitter first\n hs = NGroupSplitter(2)\n hs_reversed = NGroupSplitter(2, reverse=True)\n\n for isreversed, splitter in enumerate((hs, hs_reversed)):\n splits = list(splitter(self.data))\n self.failUnless(len(sp... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Computes the number of cells that lie within a certain distance of a given query cell. Biologically, this is implemented by feedback signals (e.g. biomolecules that are secreted and later absorbed). | def number_cells_within_range(query_cell, cells, signaling_range):
if len(cells) == 0:
return 0
else:
query_cell_position = np.array([query_cell.position])
cell_positions = np.array([cell.position for cell in cells])
query_cell_distances = cdist(query_cell_position, cell_positio... | [
"def get_number_of_rooms_at_least_distance_away(self, distance: int) -> int:\n return len([_ for d, _ in self.graph.get_node_distances(self.origin) if d >= distance])",
"def compute_distance(ix: int, c: int) -> float:\n if c == cell.FREE:\n nearest_occupied: Optional[\n Tup... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Visualize how the selfrenewal probability of a given (cycling) cell depends upon the number of cycling and quiescent cells in its vicinity | def plot_self_renewal_probability():
def self_renewal_probability(x, y):
from read import read_into_dict
parameterValues = read_into_dict('parameterValues.in')
self_renewal_probability_max = parameterValues['self_renewal_probability_max']
return self_renewal_probability_max * W_posi... | [
"def PlotIdealCellGrowth():\n\n # Make vectors:\n def MakeVectors(cells_start, doubling_time):\n movie_time = 80\n cell_list = [cells_start]\n time_list = [0]\n\n while time_list[-1] < movie_time:\n cell_list.append(cell_list[-1] * 2)\n time_list.append(time_l... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Given a file in appropriate format, returns the triple (feature_vectors, patient_diagnoses, D) feature_vectors is a dictionary that maps integer patient identification numbers to Dvectors where D is the set of feature labels, and patient_diagnoses is a dictionary mapping patient identification numbers to {+1, 1}, where... | def read_training_data(fname, features=None):
file = open(fname)
params = ["radius", "texture", "perimeter","area","smoothness","compactness","concavity","concave points","symmetry","fractal dimension"];
stats = ["(mean)", "(stderr)", "(worst)"]
feature_labels = set([y+x for x in stats for y in params])... | [
"def read_training_data(fname, D=None):\n file = open(fname)\n params = [\"radius\", \"texture\", \"perimeter\",\"area\",\"smoothness\",\"compactness\",\"concavity\",\"concave points\",\"symmetry\",\"fractal dimension\"];\n stats = [\"(mean)\", \"(stderr)\", \"(worst)\"]\n feature_labels = set([y+x for ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Adds a dictionary entry to the serializer_format_dict with a key, value of format, object respectively. | def register_format(self, format, creator):
self.serializer_format_dict[format] = creator | [
"def add_format(self, key, value):\n # type: (str, str) -> None\n self.format_fields[key] = value",
"def register_format(self, serializer):\n self._serializers[serializer.format] = serializer",
"def add_to_format(existing_format, dict_of_properties, workbook):\n new_dict = {}\n for ke... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Retrieves the object to create based on the given serializer format in string form. | def get_serializer(self, format):
creator = self.serializer_format_dict.get(format.upper())
if not creator:
raise ValueError(format)
return creator() | [
"def get_serializer(self, format):\n serializer = self._serializers.get(format)\n if not serializer:\n raise ValueError(format)\n return serializer()",
"def make_primitive_serializer(format):\n structure = struct.Struct(format)\n def loads(b):\n return structure.unpack... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Write list of times to output file 'image_upload_times.csv' | def WriteUploadTimes(self, ui_times):
with open('image_upload_times.csv', 'w', newline='') as file1:
ui_writer = csv.writer(file1)
ui_writer.writerow(['Camera Upload Times'])
for i in ui_times:
print(i)
ui_writer.writerow(i)
... | [
"def WriteVideoQueryTimes(self, vq_times):\n with open('image_upload_times.csv', 'w', newline='') as file1:\n ui_writer = csv.writer(file1)\n ui_writer.writerow(['Camera Upload Times'])\n for i in vq_times:\n ui_writer.writerow(i)\n \n fil... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Write list of times to output file 'video_query_times.csv' | def WriteVideoQueryTimes(self, vq_times):
with open('image_upload_times.csv', 'w', newline='') as file1:
ui_writer = csv.writer(file1)
ui_writer.writerow(['Camera Upload Times'])
for i in vq_times:
ui_writer.writerow(i)
file2.close() | [
"def WriteUploadTimes(self, ui_times):\n with open('image_upload_times.csv', 'w', newline='') as file1:\n ui_writer = csv.writer(file1)\n ui_writer.writerow(['Camera Upload Times'])\n for i in ui_times:\n print(i)\n ui_writer.writerow(i)\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
request modelname refer to the table model name tableheaderdict refer to the table header name in dict format {} filterquery refer to the queryfilter in dict format {} will return list of table | def model_table(request, modelname, tableheaderdict, filterquery = {}, paginations = 10):
modeltable = modelname.objects.filter(delete_field='no', **filterquery)
paginator = Paginator(modeltable, paginations, 1)
page = request.GET.get('page')
try:
list_table = paginator.page(page)
except PageNotAnInteger:
# ... | [
"def model_query(model: db.Model) -> List[dict]:\n result = []\n fields = ['spin_mode', 'basis_set', 'method', 'method_family', 'program', 'version', 'solvation', 'solvent',\n 'embedding', 'periodic_boundaries', 'external_field', 'temperature', 'electronic_temperature']\n for field in fields:\... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
This function will run inference on the image at given path | def run_inference(self, path):
self.vgg_model.eval()
if use_gpu:
self.vgg_model = self.vgg_model.cuda()
img = Image.open(path).convert('RGB').copy()
# img = img.resize((900, 1200))
img = np.asarray(img)
shape = img.shape
img = img[:, :, ::-1] # switch... | [
"def inference(path, model_inf):\n inference_dataset = ImageDetectionDataset()\n inference_dataset.load_inference_classes()\n class_names = inference_dataset.get_class_names()\n\n define_path(path, model_inf, class_names)",
"def run_on_path(model, image_path):\n\n with Image.open(image_path) as img... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Low Pass Filter With Gain | def lowPassFilterWithGain(inputSignal, freqCutoff, freqSampling, gain):
samplingPeriod = 1/freqSampling
filteredSignal = np.zeros_like(inputSignal)
alpha = (2 * np.pi * freqCutoff * samplingPeriod) / (2 * np.pi * freqCutoff * samplingPeriod + 1)
filteredSignal[0] = alpha * inputSignal[0] + ga... | [
"def gyroLowPassFilter( bandwidth=None ):\n if bandwidth and bandwidth in [0,1,2,3,4,5,6,7]:\n i2c.writeto_mem(0x68, 0x1A, pack('b',\n (i2c.readfrom_mem(0x68, 0x1A, 1)[0] & ~7 ) | bandwidth\n ))\n return i2c.readfrom_mem(0x68, 0x1A, 1)[0] & 7",
"def low_pass_filter(self, low_pass_filter... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Takes the tangential distance from next point with current angle, divides by dist so has more effect when closer, and the difference in angle to get error. measured is of the form (x1,y1,theta) set_v is of the form (x2,y2) | def error_finder(measured, set_v):
x1, y1, theta1 = measured
x2, y2 = set_v
dist = ((x1 - x2) ** 2 + (y1 - y2) ** 2) ** (0.5)
x = (y2 - y1)
alpha = math.asin(x / dist)
beta = theta1 - alpha
tangent = math.sin(beta) * dist
dC = -5.0
# tC1 = -3.... | [
"def calculate_error(distance):\n\n u1 = pose2.x - pose1.x\n u2 = pose2.y - pose1.y\n v_error=np.sqrt((u1)**2+(u2)**2)-distance\n theta_goal = np.arctan2(u2,u1)\n u3 = theta_goal - pose1.theta\n theta_error = np.arctan2(np.sin(u3),np.cos(u3))\n return v_error,theta_error",
"def turning_point_... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Use getattr(obj, self._name) as default getter if no getter decorated nor provided at init. | def _default_getter(self, obj):
try:
return getattr(obj, self._name)
except TypeError:
raise | [
"def __get__(self, obj, cls=None):\n return self._getter(obj)",
"def _lazyprop(self):\r\n if not hasattr(self, attr_name):\r\n setattr(self, attr_name, func(self))\r\n return getattr(self, attr_name)",
"def __getattr__(self, key):\n if key.startswith(\"_\"):\n r... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Use setattr(obj, self._name, value) as default setter if no setter decorated nor provided at init. | def _default_setter(self, obj, value):
try:
setattr(obj, self._name, value)
except TypeError:
raise | [
"def _set_attr_or_key(obj, name, value, _isinstance=isinstance, _dict=dict, setter=setattr):\n if _isinstance(obj, _dict):\n obj[name] = value\n else:\n setter(obj, name, value)",
"def __setattr__(self, name, value):\n if name in self.INTERNAL_ATTRS:\n super(Settings, self)._... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Use delattr(obj, self._name) as default deleter if no deleter decorated nor provided at init. | def _default_deleter(self, obj):
try:
delattr(obj, self._name)
except AttributeError:
pass
except TypeError:
raise | [
"def deleter(self, func):\n self.set_delattr_mode(DelAttr.Property, func)\n return func",
"def _linked_deleter(self, obj):\n self._hidden_deleter(obj)\n self._update_linked(obj)",
"def deleter(self, _deleter):\n self._most_recent_linker = self._linked_deleter\n if _dele... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Set keeper and _name / doc from init or decoration. | def keeper(self, _keeper):
self._most_recent_linker = self._linked_keeper
self._attribute_name_of_class_instance = _keeper if _keeper is None else name_to_snake_case(
_keeper.__qualname__.split(".")[-2]
)
if _keeper:
self._name = "_" + _keeper.__name__ if self._na... | [
"def __init__(self,name, parent=None, **meta):\n self.__innerset__(\n name=name,\n meta=meta,\n parent=parent,\n childs={},\n hooks=[],\n )",
"def __init__(self, root, **kw):\n self.root = root\n for k, v in kw.iteritems():\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Called before getter if getter is linker. (Not default will delete dependents on every access.) | def _linked_getter(self, instance):
try:
self._hidden_getter(instance)
except AttributeError:
raise
else:
self._update_linked(instance) | [
"def go_for_prop_loading(self, go_for_prop_loading):\n\n\n self._go_for_prop_loading = go_for_prop_loading",
"def _after_import(self):\n return",
"def post_load(self):\n # get all model methods after loading\n self._set_model(self.model)",
"def __clear_dependencies__(self):\n try:\n... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Set setter if provided else default setter (with linkeddeletion calls if no init linkers). | def setter(self, _setter):
self._most_recent_linker = self._linked_setter
if _setter is None:
self._setter = self._default_setter
if self._chain:
self._chain_setter = self._setter
self._setter = self.chain_setter
self.linker(self)
... | [
"def setter(self, setter):\n if setter is NoDefault:\n\n def paramSetter(p_self, value):\n self.assigned = SINCE_ANYTHING\n p_self.assigned = SINCE_ANYTHING\n setattr(p_self, self.fieldName, value)\n\n elif setter is None:\n\n def para... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Set deleter if provided else accesssafe default deleter (with linkeddeletion calls if no init linkers.) | def deleter(self, _deleter):
self._most_recent_linker = self._linked_deleter
if _deleter is None:
self._deleter = self._default_deleter
if self._chain:
self._chain_deleter = self._deleter
self._deleter = self.chain_deleter
self.linker(s... | [
"def _linked_deleter(self, obj):\n self._hidden_deleter(obj)\n self._update_linked(obj)",
"def _default_deleter(self, obj):\n try:\n delattr(obj, self._name)\n except AttributeError:\n pass\n except TypeError:\n raise",
"def deleter(self, func)... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Called before deleter if deleter is linker. (True if no linkers at init and default deleter.) | def _linked_deleter(self, obj):
self._hidden_deleter(obj)
self._update_linked(obj) | [
"def deleter(self, _deleter):\n self._most_recent_linker = self._linked_deleter\n if _deleter is None:\n self._deleter = self._default_deleter\n if self._chain:\n self._chain_deleter = self._deleter\n self._deleter = self.chain_deleter\n s... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Set explicit linkers at end of init and restore linker decorator. | def _set_explicit_linkers(self, linkers, old_linker):
if isinstance(linkers, str):
self._linker(linkers)
else:
for linker in linkers:
self._linker(linker)
self.linker = old_linker | [
"def _append_linker(self, linker):\n linkerPm = kml.Placemark(NS)\n linkerPm.geometry = linker.shape\n if not self.linkers_wkt.get(linkerPm.geometry.wkt):\n self.linkers_wkt[linkerPm.geometry.wkt] = True\n self.linkers.append(linkerPm)",
"def refiner_reset(self):\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Waits for a baremetal node attribute to reach given status. The client should have a show_node(node_uuid) method to get the node. | def wait_for_bm_node_status(client, node_id, attr, status):
_, node = client.show_node(node_id)
start = int(time.time())
while node[attr] != status:
time.sleep(client.build_interval)
_, node = client.show_node(node_id)
status_curr = node[attr]
if status_curr == status:
... | [
"def _multi_check_node(self, status, node_attrs):\r\n error = None\r\n try:\r\n response = self.connection.request(\r\n status['node_response']['selfLink']).object\r\n except GoogleBaseError:\r\n e = self._catch_error(ignore_errors=node_attrs['ignore_errors'... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
exec_config must be of type ConfigNode because we depend on safe_get(self, key) to correctly handle optional exec provider config parameters. | def __init__(self, exec_config, cwd):
for key in ['command', 'apiVersion']:
if key not in exec_config:
raise ConfigException(
'exec: malformed request. missing key \'%s\'' % key)
self.api_version = exec_config['apiVersion']
self.args = [exec_config... | [
"def executor_config(self):\n return self._executor_config",
"def executor_config(self, executor_config):\n\n self._executor_config = executor_config",
"def config(self, param: str, /) -> Any:",
"def fetchConfigParam(self):\r\n pass",
"def _process_config_request(self, command_dict: dic... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
This function finds the power ratio between the external vehicle and the mlg gear engines. For this the max allowable force on the nose landing gear strut is used. | def opt_force_ratio(F_nlg_allow, a_lst):
m_plane = 97400 # [kg] MRW
m_car = 22000 # [kg] Mass of external vehicle
m_tot = m_plane + m_car # [kg] Total mass
Roll_fric = 0.02 # [-] Rolling friction coefficient
F_tot = m_tot*max(a_lst) + Roll_fric*m_tot*9.81 #... | [
"def max_power_in_existing_storage_rule(_m, g):\r\n\r\n return self.data.existing_storage_units_dict[g]['REG_CAP']",
"def max_power_output_solar_rule(_m, g, t):\r\n\r\n # Existing solar generators\r\n if g in m.G_E_SOLAR:\r\n return m.p_total[g, t] <= m.Q_SOLAR[g, t... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Function that checks that the static friction / tractive force is not exceeded for a given torque and static friction coefficient. If the torque is higher than the limit the wheel will slip. | def stat_traction(torque, N, wheelrad, fric=1):
if torque > (fric*N)*wheelrad:
#print("Too much torque, will slip")
return False
return True | [
"def calc_blade_friction_force():\r\n # return c_a * d * w\r\n return 0",
"def constrain(w):\n VEL_LIMIT = 1000 # rad/s\n w = VEL_LIMIT if w >= VEL_LIMIT else w\n w = -VEL_LIMIT if w <= -VEL_LIMIT else w\n return w",
"def friction(self, magnitude) -> None:\n # Stops the ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |