query stringlengths 9 9.05k | document stringlengths 10 222k | negatives listlengths 19 20 | metadata dict |
|---|---|---|---|
Converts OAuth2Response instance to fastapi Response instance | def _to_fastapi_response(oauth2_response: OAuth2Response):
response_content = (
oauth2_response.content._asdict()
if oauth2_response.content is not None
else {}
)
headers = dict(oauth2_response.headers)
status_code = oauth2_response.status_code
content = json.dumps(response_c... | [
"def _to_response(result: Union[Dict, Response]) -> Response:\n if isinstance(result, Response):\n return result\n\n logger.debug(\"Simple response detected, serializing return before constructing final response\")\n return Response(\n status_code=200,\n content... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Third rule within ← fast if == [\seconds\minutes\second\minute] | def third_rule(requirement, sutime):
if sutime['TIMEX3']['@type'] == 'DURATION':
requirement = re.sub(r'(within )*(\d+(?:\.\d+)? )*(second)+s?', 'fast', requirement)
requirement = re.sub(r'(within )*(\d+(?:\.\d+)? )*(minute)+s?', 'fast', requirement)
return requirement | [
"def check_time(time):\n if time.minute == 0 or time.minute == 30:\n return True\n return False",
"def _is_exact_match(cron, ts):\r\n cron.get_prev()\r\n diff = timeutils.total_seconds(ts - cron.get_next(datetime.datetime))\r\n return abs(diff) < 60 # minute precision\r",
"def... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Fifth rule 2.0 [89][09][\.?[09]?%?][IN | SET]time ← alltimes | def fifth_rule(requirement, sutime):
requirement = re.sub(r'[8-9][0-9][\.?[0-9]?% of the time?', # > 80% of the time
'alltimes', requirement)
requirement = re.sub(r'[8-9][0-9][\.?[0-9]?% up time?', # > 80% uptime
'uninterrupted uptime', requirement)
if sut... | [
"def greedy_claim_schedule(problem):",
"def test_general_subset_invalid_time():\n pass",
"def _get_times(self,):\n s = self._rm_multispace(self._rm_tags(self._get_times_div()))\n m = re.search(self.re_preptime, self._rm_multispace(s))\n if m:\n self.Preptime = m.group(0).strip... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Method used to find a prompt inside an output string This method is used during the first communication with the device. First it find the prompt then caculate the different forms the prompt can take. This will be useful later on while finding prompt in other output stream (read). | def find_prompt(self, text):
# Get last line of the data
prompt = text.split("\n")[-1]
# Remove possible \r in the data
# prompt = prompt.replace("\r", "")
prompt = text.split("\r")[-1]
# Display info message
log.info(f"find_prompt: prompt: '{prompt}'")
... | [
"def prompt(self):\r\n prompt_css = \"section.open-ended-child>div.prompt\"\r\n prompts = self.q(css=prompt_css).map(lambda el: el.get_attribute('innerHTML').strip()).results\r\n\r\n if len(prompts) == 0:\r\n self.warning(\"Could not find essay prompt on page.\")\r\n retur... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Method used to check if a prompt has one of the expected endings then create a list with all possible prompts for the device | def get_possible_prompts(self, prompt):
# By default no prompts are returned
list_of_prompts = []
# Get all the ppossible values of the endings of the prompt
list_of_possible_ending_prompts = self.list_of_possible_ending_prompts
# Temporary variable storing the prompt value
... | [
"def _check_prompt(self, data):\r\n return self._match(self.prompt, data)",
"def test__parse_prompts():\n prompt = OnboardingPrompt(name = 'ibuki')\n \n for input_value, expected_output in (\n ({}, None),\n ({'prompts': None}, None),\n ({'prompts': [prompt.to_data()]}, (prompt... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Method removing the command at the beginning of a string After sending commands an "echo" of the command sent is display in the output string. This method removes it. | def remove_command_in_output(self, text, cmd):
# Display info message
log.info(f"remove_command_in_output: cmd = '{cmd}'")
# Display info message
log.info(f"remove_command_in_output: cmd (hex) = '{cmd.encode().hex()}'")
# Remove the command from the beginning of the output
... | [
"def strip_command(self, command_string, output):\n output_list = output.split(command_string)\n return '\\n'.join(output_list)",
"def strip_command(self, command_string: str, output: str) -> str:\n output_list = output.split(command_string)\n return self.RESPONSE_RETURN.join(output_li... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Method removing the carriage return at the beginning of a string | def remove_starting_carriage_return_in_output(self, text):
# Display info message
log.info("remove_starting_carriage_return_in_output")
# Remove the carriage return at the beginning of the string
output = text.lstrip("\r\n\r")
# Display info message
log.info(f"remove_s... | [
"def __remove_break_line__(self, string):\n return string.rstrip()",
"def delete_first_line(string):\n lines = string.split('\\n')\n return '\\n'.join(lines[1:])",
"def strip_line_breaks(self, text):\n text = re.sub(r'(\\r*)', r'', text)\n text = re.sub(r'(\\n*)', r'', text)\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Async method disabling paging on a device Use the "cmd_disable_paging" attribute | async def disable_paging(self):
# Display info message
log.info("disable_paging")
# Send command to the device to disable paging
await self.send_command(self.cmd_disable_paging) | [
"def disable_paging(device_info, telnet_conn, read_delay=1):\n\n # Execute the command\n cmd = 'terminal length 0\\n'\n telnet_conn.write(cmd)\n oper_prompt = \"%r\" % device_info['oper_prompt']\n admin_prompt = device_info['admin_prompt']\n dummy, match, dummy = telnet_conn.expect([oper_prompt, a... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Async method used for connecting a device using SSH protocol | async def connectSSH(self):
# Display info message
log.info("connectSSH")
# Parameters of the connection
generator = asyncssh.connect(
self.ip,
username=self.username,
password=self.password,
known_hosts=None,
# encryption_alg... | [
"async def _async_connect(self) -> None: # pragma: no cover\n try:\n self.conn_coro = self.client.connected()\n aenter = type(self.conn_coro).__aenter__(self.conn_coro)\n self.stream = await aenter\n logger.info(f\"Agent {str(self.jid)} connected and authenticated... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Async method used for connecting a device using Telnet protocol | async def connectTelnet(self):
# Display info message
log.info("connectTelnet")
try:
# Prepare connection with Telnet
conn = asyncio.open_connection(self.ip, self.port)
except Exception as error:
# Preparation to the connection failed
... | [
"def connect(self):\n\n self.tello.connect()\n self.tello.wait_for_connection(60.0)",
"def connect_telnet(device_info):\n\n try:\n # Create an instance object of the 'Telnet' class\n telnet_client = telnetlib.Telnet()\n\n # uncomment following line to enable 'telnetlib' debug... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Async method used to send command in config mode The commands send can be either a string a list of strings. There are | async def send_config_setTelnet(self, cmds=None, timeout=None):
# Display info message
log.info("send_config_setTelnet")
# Default value of timeout variable
if timeout is None:
timeout = self.timeout
# Clear returned output
returned_output = ""
# C... | [
"async def do_config():\n\n\n async def cfg_add():\n try:\n if config[message.server.id]:\n await bot.send_message(c, 'The channel is already configured.')\n return\n except:\n config[message.server.id] = {\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Asyn method used to get the version of the software of the device | async def get_version(self):
# Display info message
log.info("get_version")
# By default empty string
version = ""
# Run get version on the device
output = await self.send_command(self.cmd_get_version)
# Seek "Version " and "," to get the version in the return... | [
"async def get_version(self):\n\n # Display info message\n log.info(\"get_version\")\n\n # By default empty string\n version = \"\"\n\n # Run get version on the device\n output = await self.send_command(self.cmd_get_version)\n\n # Seek \"Version: \" on each line of t... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Asyn method used to get the model of the device | async def get_model(self):
# Display info message
log.info("get_model")
# Get model
output = await self.send_command(self.cmd_get_model)
# Display info message
log.info(f"get_model: output: '{output}'")
# Remove the useless information in the returned string
... | [
"def _getHardware(self):\n msg = self.asyncRead()\n msg[\"hardware\"] = self.parent.hardware\n msg[\"current\"] = self.parent.trainOnHW\n self.send(msg)",
"def get_device_model(self, device_id):\n request = \"SELECT Model FROM Device WHERE Id = \\'%s\\'\"%(device_id)\n #p... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Asyn method used to get the configuration of the device | async def get_config(self, timeout=None):
# Display info message
log.info("get_config")
# Default value of timeout variable
if timeout is None:
timeout = self.timeout
# Get config
output = await self.send_command(self.cmd_get_config, timeout=timeout)
... | [
"def get_config(self):\n iq_cmd = self.Iq()\n iq_cmd['type'] = 'get'\n action_cmd = ET.Element('oa')\n action_cmd.attrib['xmlns'] = 'connect.logitech.com'\n action_cmd.attrib['mime'] = (\n 'vnd.logitech.harmony/vnd.logitech.harmony.engine?config')\n iq_cmd.set_pa... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Asyn method used to save the current configuration on the device | async def save_config(self):
# Display info message
log.info("save_config")
# Send command
output = await self.send_command(self.cmd_save_config)
# Return the commands of the configuration saving process
return output | [
"async def save(self):\n if self._config_path is None:\n return\n async with self._saving_config_lock:\n temp_config_path = f\"{self._config_path}.{time.time()}\"\n # First we write all the config to a temporary file\n async with aiofiles.open(temp_config_pa... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
run inference on whichever split the dataset is configured for returns a abstract_model.Prediction | def inference(self, dataset, model_dir):
raise NotImplementedError | [
"def infer(\n context: mlrun.MLClientCtx,\n dataset: DatasetType,\n model_path: str,\n drop_columns: Union[str, List[str], int, List[int]] = None,\n label_columns: Union[str, List[str]] = None,\n log_result_set: bool = True,\n result_set_name: str = \"prediction\",\n batch_id: str = None,\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Receives the output from 'cat /proc/cpuinfo' and check whether the firmware mode is OPAL or not | def check_fw_mode(self, cat_cpuinfo_out):
for line in cat_cpuinfo_out.splitlines():
if "firmware" in line:
if "OPAL" in line:
return True
else:
return False
return False | [
"def determineProcessorType(self): \n if commands.getstatusoutput('uname -p')[0] == 0: \n self.processor_type = commands.getoutput('uname -p')",
"def get_cpuinfo() -> str:\n\n # Read /proc/cpuinfo\n try:\n with open('/proc/cpuinfo', 'r') as f:\n return f.read()\n exc... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Receives the output from 'lsblk' and get the block size for the specified device | def get_block_size(self, cmd_lsblk_out, dev):
for line in cmd_lsblk_out.splitlines():
if dev in line:
return line.split()[3]
return | [
"def dev_size(device):\n device_path = \"/sys/block/\"\n num_sectors = open(device_path + device + \"/size\").read().rstrip(\"\\n\")\n sector_size = (\n open(device_path + device + \"/queue/hw_sector_size\")\n .read()\n .rstrip(\"\\n\")\n )\n return int(num_sectors) * int(sector_... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Deploy DC Use Marvin to deploy DC | def deploy_dc(self):
print("==> Deploying Data Center")
# TODO: Replace Marvin
mrv = marvin.marvinInit.MarvinInit(self.marvin_config)
mrv.init()
dc = marvin.deployDataCenter.DeployDataCenters(mrv.getTestClient(), mrv.getParsedConfig())
dc.deploy() | [
"def run_deploy():\n\n subprocess.check_call(['nikola', 'deploy'])",
"def deploy():\n execute(sync)\n fabtools.supervisor.restart_process(env.app)",
"def deploy():\n cmd(\"google_appengine/appcfg.py update app\")",
"def run():\n\n parser = OptionParser()\n parser.add_option(\"-d\", \"--dir\"... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Deploy jacoco agent on management server | def configure_tomcat_to_load_jacoco_agent(self):
open("/tmp/jacoco.conf", "w").write('JAVA_OPTS="$JAVA_OPTS -javaagent:/tmp/jacoco-agent.jar=destfile=/tmp/jacoco-it.exec"\n')
zone = self.config['zones'][0]['name']
for host in self.config['mgtSvr']:
connection = {'hostname': host['mgt... | [
"def configure_agent_to_load_jacoco_agent(self):\n zones = parse('zones[*]').find(self.config)\n for zone in zones:\n hosts = parse('pods[*].clusters[*].hosts[*]').find(zone)\n for host in hosts:\n hostname = host.value['url'].split('/')[-1]\n connec... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Deploy jacoco agent on hypervisor | def configure_agent_to_load_jacoco_agent(self):
zones = parse('zones[*]').find(self.config)
for zone in zones:
hosts = parse('pods[*].clusters[*].hosts[*]').find(zone)
for host in hosts:
hostname = host.value['url'].split('/')[-1]
connection = {'ho... | [
"def configure_tomcat_to_load_jacoco_agent(self):\n open(\"/tmp/jacoco.conf\", \"w\").write('JAVA_OPTS=\"$JAVA_OPTS -javaagent:/tmp/jacoco-agent.jar=destfile=/tmp/jacoco-it.exec\"\\n')\n zone = self.config['zones'][0]['name']\n for host in self.config['mgtSvr']:\n connection = {'host... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Display a map of child GeoRecords, colored by this Indicator. If there are no children (we're at the Tract level), display siblings. | def __init__(self, indicator, georecord, domain, GEOS_point=None, place=None):
self.indicator = indicator
self.georecord = georecord
self.domain = domain
self.markers = []
if GeoLevel.objects.filter(parent=georecord.level).count() == 0:
# display siblings
mappe... | [
"def print_geo_children(self):\n msg = 'parent:%10s %2d geo: %10s %2d #children: %d:' % \\\n (self.pname, self.pindex, self.oname, self.oindex, len(self.list_of_children))\n for geo in self.list_of_children:\n msg += ' %s:%d' % (geo.oname, geo.oindex)\n logger.info(msg)"... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Tell if this function block can be merged with given block. | def isMergableWith(self, op):
if not is_glsl_block_function(op):
return False
if (self.getName() != op.getName()) or (self.getType() != op.getType()):
return False
return True | [
"def can_be_merged(self):\n return self._link is not None or self._include is not None",
"def IsByBlock(self) -> bool:",
"def _mergeCalibBlocks_isMergeable(object1: Any, object2: Any) -> bool:\n if isinstance(object1, list):\n if not isinstance(object2, list):\n return False\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Tell if given object is a GLSL function block. | def is_glsl_block_function(op):
return isinstance(op, GlslBlockFunction) | [
"def is_glsl_block_unary(op):\n return isinstance(op, GlslBlockUnary)",
"def is_free_function(py_object, full_name, index):\n if not tf_inspect.isfunction(py_object):\n return False\n\n parent_name = full_name.rsplit('.', 1)[0]\n if tf_inspect.isclass(index[parent_name]):\n return False\n\n return Tr... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
try to load a given matrix for the recommender, if the matrix is not present in the folder raw data then will fit the recommender and save the matrix | def fit_or_load(r, matrix='r-hat'):
matrix_type = 'test' if args.test else 'valid'
filename = 'raw_data/{}-{}-{}'.format(r.NAME, matrix, matrix_type)
if args.loadrhat:
try:
if matrix == 'r-hat':
try: r.load_r_hat(filename + '.npy')
except: r.load_r_hat(fil... | [
"def load_recommender(filepath):\n r = np.load(filepath)\n if isinstance(r,BaseRecommender):\n model = r\n else:\n model = np.loads(str(r['model']))\n if 'mat' in r.files:\n model.similarity_matrix = r['mat']\n elif 'row' in r.files:\n model.similarity_matr... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Exports the json to a topic | def exportJson(frameJson, topic):
Producer.push_json(topic, frameJson) # Push frame json to specified Kafka topic | [
"def export_topics(self):\n\n # format as a list (for json output), then sort descending by textIDCount\n topics = [{'name': topic['name'], 'count': topic['count'],\n 'verbatims': list(topic['verbatims']), 'textIDs': list(topic['textIDs']),\n 'textIDCount': topic['t... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Smooth the input with scipy ndimage utility | def scipy_smooth(img, sigma=5):
return ndimage.gaussian_filter(img, sigma=sigma) | [
"def _smooth(fwhm, img):\n return smooth_img(img, fwhm)",
"def _smooth_data_array(arr, affine, fwhm, copy=True):\n\n if arr.dtype.kind == 'i':\n if arr.dtype == np.int64:\n arr = arr.astype(np.float64)\n else:\n arr = arr.astype(np.float32)\n if copy:\n arr = ar... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Build match and apply functions based on given re pattern, search text, and replacement. | def build_match_and_apply_functions(pattern, search, replace):
def matches_rule(word):
""" Check if word contains pattern.
"""
return re.search(pattern, word)
def apply_rule(word):
""" Replace text with replacement in word.
"""
return re.sub(search, replace, wor... | [
"def replacement(cls, search_pattern: str, replacement: str) -> PhonTransform:\n sub_func = lambda match: replacement\n return cls(search_pattern, sub_func)",
"def preprocess_regex_check_and_replace(pattern: str, replacement: str) -> Callable[[str], str]:\n\n def preprocess(orig: str) -> str:\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Check if word contains pattern. | def matches_rule(word):
return re.search(pattern, word) | [
"def check_word(word, string):\n regexStr = re.search(r'(\\b%s\\b)' % word, string)\n if regexStr is not None:\n return True\n\n return False",
"def hasPattern(self, text, pattern):\n \n return self.getPattern(text, pattern) is not None",
"def _words_in_text(word, text):\n\n regexwo... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Process a list of folders from a user or collection. | def _process_folders(self, folders, parentId, parentType):
current_folders = {f['name']: f for f in
self.get('folder', {'parentType': parentType,
'parentId': parentId})}
# Add, update or noop listed folders
for folder in f... | [
"def create_folder_structure(_user):\n flds = _user.folders\n logging.info(f\"\\nYou have ** {str(len(_user.folders))} ** \"\n f\"folders in your ArcGIS Organization\\n\")\n # listing documents in the user root folder\n root_folder_items = _user.items()\n _n = 0\n logging.info(f\"T... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Entry point for ansible girder client module | def main():
# Default spec for initalizing and authenticating
argument_spec = {
# __init__
'host': dict(),
'port': dict(),
'apiRoot': dict(),
'apiUrl': dict(),
'scheme': dict(),
'dryrun': dict(),
'blacklist': dict(),
# authenticate
... | [
"def main():\n\n # Define the initial common playbook_kwargs\n playbook_kwargs = {\n \"id\": uuid4(),\n \"private_data_dir\": DEFAULT_RUNNER_DIR,\n \"artifact_dir\": DEFAULT_ARTIFACT_DIR\n }\n # If we are dealing with a module, pass in module_args\n if IS_MODULE:\n playboo... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Resize image for np.array | def _np_resize_image(image, size, dtype='int'):
if dtype == 'int':
_size = (size[1], size[0]) # (H,W) to (W,H)
return cv2.resize(image.astype('uint8'),
_size,
interpolation=cv2.INTER_LINEAR)
elif dtype == 'float':
... | [
"def imresize(im,sz):\n\n pil_im = Image.fromarray(im)\n return array(pil_im.resize(sz))",
"def resizeImage(image: numpy.uint8) -> numpy.uint8:\n if image.shape[0] > 512:\n width = int(numpy.around((image.shape[1]) / 2))\n height = int(numpy.around(image.shape[0] / 2))\n resizedImage... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Binarize probability map into mask. | def _np_get_mask(prob_map, prob_thresh=0.5):
mask = (prob_map > prob_thresh) * 255
return mask.astype(np.uint8) | [
"def bin_binarise(self):\n pass",
"def apply_mask(binary, mask_dict):\n result = \"\"\n for i, val in enumerate(binary):\n if mask_dict[i] in ('X', '1'):\n result += mask_dict[i]\n else:\n result += binary[i]\n return result",
"def remap_histogram_key(original... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Transform original points list into flipped points and concatenate these two list. | def _points_transform(clicks_lists, image_width):
clicks_lists_flipped = []
for clicks_list in clicks_lists:
clicks_list_flipped = []
for click in clicks_list:
# Horizontal flip
_y = image_width - click.coords[1] - 1
_click = clicke... | [
"def mirror_points_point(points, mirror):\n return [mirror_point_point(point, mirror) for point in points]",
"def _reversePoints(points):\n # copy the points\n points = _copyPoints(points)\n # find the first on curve type and recycle\n # it for the last on curve type\n firstOnCurve = None\n f... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Preprocessing the user clicks to points array | def _preprocessing(self):
if self.resize:
self.click_list = self._remapping_coord(self.click_list,
self.input_size,
self.orig_size)
clickers = self._get_clickers(self.click_list)
c... | [
"def data_mouse():\n\timport matplotlib.pyplot as plt\n\tfig = plt.figure()\n\tax = fig.add_subplot(111, xlim=(-1,2), ylim=(-1,2))\n\tX = np.zeros( (0,2) )\n\tY = np.zeros( (0,) )\n\tcol = ['bs','gx','ro']\n\t\n\tdef on_click(event):\n\t\tX.resize( (X.shape[0]+1,X.shape[1]) )\n\t\tX[-1,:] = [event.xdata,event.yda... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Delete the empty related analytic account | def unlink(self):
analytic_accounts_to_delete = self.env['account.analytic.account']
for project in self:
if project.analytic_account_id and not project.analytic_account_id.line_ids:
analytic_accounts_to_delete |= project.analytic_account_id
result = super(Project, se... | [
"def delete_account():\n pass",
"def delete_account(details):\n details.delete_account()",
"def delete_account(self):\n Account.account_details.remove(self)",
"def delete_account(self, account):\n \n pass",
"def delete_account(AccountId=None):\n pass",
"def delete_account... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Given two particles, calculate number of ticks before they collide for the first time. None if they never collide. | def calculate_collision(p1: Particle, p2: Particle):
# First find all the tick numbers, when one coordinate matches
solutions = []
solutions.append(
_collision_one_coord(p1.ax, p1.vx, p1.px, p2.ax, p2.vx, p2.px)
)
solutions.append(
_collision_one_coord(p1.ay, p1.vy, p1.py, p2.ay, p2.... | [
"def particleCollisionTime(self, first_element, second_element):\n\n # Quantities required in following formula\n r = self.measure.distance(first_element, second_element)\n r2 = np.dot(r, r)\n #v = relativeVelocity(first_element, second_element, self.vel)\n v = self.measure.relati... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return participant documents with encoded names. | def get_encoded_participants(case, error_handler):
log = parse_file.get_logger()
for participant in case['participants']:
for encoder_name, encoder in encodings.ENCODERS.items():
result = {'encoding': encoder_name,
'case': case['_id'],
}
... | [
"def mensajespersonaje(personaje):\n query = {\"character_name\": f\"{personaje}\"}\n frases = list(collection.find(query,{\"_id\": 0}))\n return frases",
"def extract_information():\r\n mydir = \"C:/Users/Harm/PycharmProjects/Course10/Data_integratie/\"\r\n file_list = glob.glob(mydir + \"/*.pdf\"... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Samples skills and users | def create_samples(self, skills_sample_fraction=1.0, users_sample_fraction=1.0):
# Sampling
self.sample_skills_to_be_covered(skills_sample_fraction)
self.sample_users(users_sample_fraction) | [
"def sample_skills_to_be_covered_controlled(self, num_sampled_skills=50, rare_sample_fraction=0.33,\n popular_sample_fraction=0.33, rare_threshold=0.33,\n popular_threshold=0.33, user_sample_fraction=1.0):\n print('In... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Samples a fraction of skills that need to be covered instead of all the skills based on the sampling scheme. | def sample_skills_to_be_covered(self, fraction=1.0):
self.skills_covered = np.zeros(self.num_skills)
if fraction < 1.0:
num_sampled_skills = int(fraction * self.num_skills)
sampled_skills = np.random.choice(self.num_skills, size=num_sampled_skills, replace=False)
for... | [
"def sample_skills_to_be_covered_controlled(self, num_sampled_skills=50, rare_sample_fraction=0.33,\n popular_sample_fraction=0.33, rare_threshold=0.33,\n popular_threshold=0.33, user_sample_fraction=1.0):\n print('In... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Categorizes skills of sampled users into three categories based on frequency histogram 1. rare skills (e.g., bottom 33% frequencies) 2. common skills (rest of the skills) 3. popular skills (e.g., top 33% frequencies) | def categorize_skills(self, df_sampled_users, rare_threshold=0.33, popular_threshold=0.33):
# Get frequency of each skills
skills_array = np.array(df_sampled_users['skills_array'].values)
freq = np.sum(skills_array, axis=0)
freq_skills_available = freq[freq > 0]
num_skills_availa... | [
"def sample_skills_to_be_covered_controlled(self, num_sampled_skills=50, rare_sample_fraction=0.33,\n popular_sample_fraction=0.33, rare_threshold=0.33,\n popular_threshold=0.33, user_sample_fraction=1.0):\n print('In... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Creates a sample of skills of size 'num_skills'. In this sample, 'rare_sample_fraction' of them are from rare skills category, 'popular_sample_fraction' of them are from popular skills category. | def sample_skills_to_be_covered_controlled(self, num_sampled_skills=50, rare_sample_fraction=0.33,
popular_sample_fraction=0.33, rare_threshold=0.33,
popular_threshold=0.33, user_sample_fraction=1.0):
print('In freelan... | [
"def create_samples(self, skills_sample_fraction=1.0, users_sample_fraction=1.0):\n # Sampling\n self.sample_skills_to_be_covered(skills_sample_fraction)\n self.sample_users(users_sample_fraction)",
"def sample_skills_to_be_covered(self, fraction=1.0):\n self.skills_covered = np.zeros(... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Assigns the ground set elements to partitions uniformly at random | def assign_ground_set_to_random_partitions(self, num_of_partitions, cardinality_constraint):
print('In freelancer random partition.')
self.partitions = defaultdict(dict,{i:{'users':set(), 'k':cardinality_constraint} for i in range(0,num_of_partitions)})
partition_ids = np.arange(start=0, stop=nu... | [
"def populate_random(self, prob=0.5):\n for row in range(self.height):\n for col in range(self.width):\n self.cells[row][col] = 1 if random.random() <= prob else 0",
"def random_cluster_assignments(self):\n for i in range(self.vectors_num):\n self.cluster_assignm... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Assigns the ground set elements to partitions based on their salary | def assign_ground_set_to_equi_salary_partitions(self, num_of_partitions, cardinality_constraint):
print('In freelancer salary partition.')
costs = set()
for user_id in self.E:
costs.add(self.cost_vector[user_id])
sorted_costs = sorted(list(costs))
# each cost is a par... | [
"def assign_ground_set_to_random_partitions(self, num_of_partitions, cardinality_constraint):\n print('In freelancer random partition.')\n self.partitions = defaultdict(dict,{i:{'users':set(), 'k':cardinality_constraint} for i in range(0,num_of_partitions)})\n partition_ids = np.arange(start=0,... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Reads a n lines from f with an offset of offset lines. | def tail(f, n, offset=0):
avg_line_length = 74
to_read = n + offset
while 1:
try:
f.seek(-(avg_line_length * to_read), 2)
except IOError:
# woops. apparently file is smaller than what we want
# to step back, go to the beginning instead
f.seek(... | [
"def tail(f, n, offset=0):\r\n\tavg_line_length = 74\r\n\tto_read = n + offset\r\n\twhile 1:\r\n\t\ttry:\r\n\t\t\tf.seek(-(avg_line_length * to_read), 2)\r\n\t\texcept IOError:\r\n\t\t\t# woops. apparently file is smaller than what we want\r\n\t\t\t# to step back, go to the beginning instead\r\n\t\t\tf.seek(0)\r\n... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns number of subnets, given the breakdown; or 1 if breakdown doesn't work. | def calculate_subnets(total, breakdown):
sanity_percent = 0 # if this isn't 100% by the end, we got issues.
subnets = 0
for nodep, netp in breakdown:
sanity_percent += nodep
if (sanity_percent > 100):
return -1
subtotal = int(total * .01 * nodep)
groupby = int(254... | [
"def get_subnets_count(self, context, filters=None):\n\n subnets_count = self._count_resource('subnet', context, filters)\n return subnets_count['count']",
"def GetSubdivisionCount(self) -> int:\n ...",
"def GetSubnetIndex(ipv4_cidr_block: str) -> int:\n for ip_list in SUBNETS_EXTRA.values... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Method to pad PIL Image on all sides with constant `fill` value. | def pad(img, padding, fill=0, mode='constant'):
check_type(img)
if not isinstance(padding, (numbers.Number, tuple)):
raise TypeError('Got inappropriate padding arg')
if not isinstance(fill, (numbers.Number, str, tuple)):
raise TypeError('Got inappropriate fill arg')
if isinstance(paddi... | [
"def pad2d(img, width, fill=0):\r\n\r\n return np.lib.pad(img, (width, width), 'constant', constant_values=fill)",
"def pad(img, padding, fill=0, padding_mode='constant'):\n if not _is_numpy(img):\n raise TypeError('img should be Numpy Image. Got {}'.format(type(img)))\n\n if not isinstance(paddin... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Adjust brightness of an Image. | def adjust_brightness(img, brightness_factor):
check_type(img)
enhancer = ImageEnhance.Brightness(img)
img = enhancer.enhance(brightness_factor)
return img | [
"def adjust_brightness(img, brightness_factor):\n\n enhancer = ImageEnhance.Brightness(img)\n img = enhancer.enhance(brightness_factor)\n return img",
"def adjust_brightness(image, delta):\r\n return _clip(image + delta * 255)",
"def increase_brightness(image, value=18):\n hsv = cv2.cvtColor(imag... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Adjust contrast of an Image. | def adjust_contrast(img, contrast_factor):
check_type(img)
enhancer = ImageEnhance.Contrast(img)
img = enhancer.enhance(contrast_factor)
return img | [
"def adjust_contrast(img, contrast_factor):\n\n enhancer = ImageEnhance.Contrast(img)\n img = enhancer.enhance(contrast_factor)\n return img",
"def adjust_contrast(img, contrast_factor):\n _assert_image_tensor(img, 'chw')\n assert contrast_factor >= 0, \"contrast_factor should be non-negative.\"\n\... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Adjust color saturation of an image. | def adjust_saturation(img, saturation_factor):
check_type(img)
enhancer = ImageEnhance.Color(img)
img = enhancer.enhance(saturation_factor)
return img | [
"def adjust_saturation(img, saturation_factor):\n\n enhancer = ImageEnhance.Color(img)\n img = enhancer.enhance(saturation_factor)\n return img",
"def adjust_saturation(img, saturation_factor):\n if not _is_numpy_image(img):\n raise TypeError('img should be PIL Image. Got {}'.format(type(img)))... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Adjust hue of an image. The image hue is adjusted by converting the image to HSV and cyclically shifting the intensities in the hue channel (H). The image is then converted back to original image mode. `hue_factor` is the amount of shift in H channel and must be in the interval `[0.5, 0.5]`. See `Hue`_ for more details... | def adjust_hue(img, hue_factor):
if not(-0.5 <= hue_factor <= 0.5):
raise ValueError('hue_factor is not in [-0.5, 0.5].'.format(hue_factor))
check_type(img)
input_mode = img.mode
assert img.mode not in {'L', '1', 'I', 'F'}, \
"Input image mode should not be {'L', '1', 'I', 'F'}"
h... | [
"def adjust_hue(img, hue_factor):\n\n input_mode = img.mode\n if input_mode in {'L', '1', 'I', 'F'}:\n return img\n\n h, s, v = img.convert('HSV').split()\n\n np_h = np.array(h, dtype=np.uint8)\n # uint8 addition take cares of rotation across boundaries\n with np.errstate(over='ignore'):\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Display the attributes of the layer. | def display_layer_parameters(self):
pprint.pprint(vars(self))
return | [
"def displayAttributes(self):\n # print(\"BitBrick.py <- displayAttributes: inputs=\"+\"self:\"+str(self)+\",\")\n print(\"bitBrick.py <- displayAttributes: {}-{}, status:{}, commands:{}, outputs:{}\".\\\n format(self.fuName, self.name, self.status, self.commands, self.outputs))",
"def ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return the ideal quarter wavelength thickness of the AR coating layer at a given optimization frequency. Arguments | def ideal_thickness(self, opt_freq=160e9):
return (1/np.sqrt(self.dielectric)*3e8/(4*opt_freq)) | [
"def Q(wavelength):\r\n wavelength = wavelength / 10**-9\r\n if wavelength > 300:\r\n Q = 1.22\r\n else: # power-law fit to values from Earth 2017 technology (see Figure)\r\n Q = 1 / ((5.21575598 * (wavelength / 1000)**1.20669934) / 1.22) + 0.22\r\n return Q",
"def wavelength_from_frequ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Connect all the AR coating layer objects, ensuring that the source and terminator layers come first and last, respectively. | def _interconnect(self):
self.clear_structure()
self.structure.append(self.source)
for i in range(len(self.stack)):
self.structure.append(self.stack[i])
self.structure.append(self.terminator)
return | [
"def _create_connections(self):\n\n for l in self.layers:\n for sl in l.sublayers:\n # Check if it's not the last layer\n if (l.level + 1) < len(self.layers):\n for d_sl in self.layers[l.level + 1].sublayers:\n self.insert_con... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return a 2x2 array quickly. Arguments | def _make_2x2(self, A11, A12, A21, A22, dtype=float):
array = np.empty((2,2), dtype=dtype)
array[0,0] = A11
array[0,1] = A12
array[1,0] = A21
array[1,1] = A22
return array | [
"def data2x2ToTwo2x1(arr2x2):\n if arr2x2.size != 2*185*388:\n raise ValueError('Expected n-d array size=185*388*2, input size=%d' % arr2x2.size)\n\n if arr2x2.shape[-1] != 2:\n raise ValueError('Expected n-d array shape=(185,388,2), input shape=%s' % str(arr2x2.shape))\n\n arr2x2.shape = (18... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Organize the refractive indices of the layers in the simulation. Returns | def _sort_ns(self):
n = []
for layer in self.structure:
n.append(layer.get_index())
n = np.asarray(n)
return n | [
"def indices(self):",
"def LayerIndex(self) -> int:",
"def _update_indices(self):\n\n self.nonel_idcs = sorted(self._nonel_tab.keys())\n self.incl_idcs = sorted(self._incl_tab.keys())\n self.incl_diff_idcs = sorted(self._incl_diff_tab.keys())",
"def numbering_rafts(rafts_loc, rafts_radii,... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Handle the special case of unpolarized light by running the model for both 's' and 'p' polarizations and computing the mean of the two results. Arguments | def _unpolarized_simulation(self, frequency, theta_0=0):
s_data = self.simulate(frequency, 's', theta_0)
p_data = self.simulate(frequency, 'p', theta_0)
T = (s_data + p_data)/2
return T | [
"def test_mean(self):\r\n TOL = 1E-13\r\n \r\n V = [5.007,4.994,5.005,4.990,4.999]\r\n mu_V = math.fsum(V)/len(V)\r\n sd = type_a.standard_uncertainty(V)\r\n\r\n u = 0.01\r\n x = [ ureal(v_i,u) for v_i in V ]\r\n\r\n mean = type_a.mean(x)\r\n\r\n self.a... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Create a layer from the set of preprogrammed materials and add it to the AR coating stack Arguments | def add_layer(self, material, thickness=5.0, units='mil', type='layer', \
stack_position=-1):
type = type.lower()
if type == 'layer':
layer = Layer()
layer.name = material.lower()
layer.thickness = thickness
layer.units = units
... | [
"def add_custom_layer(self, material, thickness, units, dielectric, loss_tangent, stack_position=-1):\n layer = Layer()\n layer.units = units\n layer.thickness = thickness\n layer.dielectric = dielectric\n layer.losstangent = loss_tangent\n if (stack_position == -1):\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Add a layer with custom properties to the AR stack. Arguments | def add_custom_layer(self, material, thickness, units, dielectric, loss_tangent, stack_position=-1):
layer = Layer()
layer.units = units
layer.thickness = thickness
layer.dielectric = dielectric
layer.losstangent = loss_tangent
if (stack_position == -1):
self.... | [
"def AddLayer(self, layer):\n pass",
"def add(self, layer):\n self._top = layer(self._top)\n layer_name_ = layer.__class__.__name__\n layer_params_ = layer.params\n self._info.append((layer_name_, layer_params_))",
"def __call__(cls, *args, **kwargs):\n layer = super(LayerAspect, cls).__... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Display all the simulation parameters in one place. | def display_sim_parameters(self):
pprint.pprint(vars(self))
return | [
"def display_parameters(self):\n ips = GAConfig[\"initial_population_size\"]\n ps = GAConfig[\"population_size\"]\n nomp = GAConfig[\"num_mating_pairs\"]\n mf = GAConfig[\"base_mutation_factor\"]\n ne = GAConfig[\"num_evolutions\"]\n noc = GAConfig[\"num_categories\"]\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Remove all elements from the current AR ``structure``. | def clear_structure(self):
self.structure = []
return | [
"def remove(self, structure):\n\n if not isinstance(structure, AtomicStructure):\n raise TypeError(\"{} is not an atomic structure\".format(structure))\n for atom in structure._atoms:\n self.remove_atom(atom)",
"def remove_all(self):\r\n\t\twhile len(self.components_array) > 0:... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Remove the specified layer from the AR coating stack. Arguments | def remove_layer(self, layer_pos):
self.stack.pop(layer_pos)
return | [
"def deleteLayer(self, layer='0'):\n \n pass",
"def removelayer(self, layername):\n if self.layers.has_key(layername):\n self.layers.pop(layername)",
"def remove_layer(self, layer=None):\n\t\tif layer is not None:\n\t\t\ttry:\n\t\t\t\tself.sublayers.remove(layer)\n\t\t\texcept ValueE... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Take the attributes of the ``Builder()`` object and execute the simulation at each frequency in ``Builder().freq_sweep``. Save the output to a columnized, tabseparated text file. Returns | def run_sim(self):
t0 = time.time()
print('Beginning AR coating simulation')
self._d_converter()
self._interconnect()
f_list = []
t_list = []
r_list = []
for f in self.freq_sweep:
results = self.sim_single_freq(f)
f_list.append(f)
... | [
"def generateTestBench(self):\n for it in range(1, len(self.delay)):\n self.delay[it] += self.delay[it-1]\n binPacket = []\n # Get the binary version of the packets to be written in the test bench\n for packet in self.packets:\n binPacket.append(packetBinary(packet)... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
List the materials with known properties. The listed material names are keys in the materials properties dictionary. | def show_materials(self):
print('\nThe materials with known dielectric properties are:\n')
pprint.pprint(mats.Electrical.props)
# pprint.pprint(mats.Electrical.DIELECTRIC)
print('\nThe materials with known loss tangents are:\n')
pprint.pprint(mats.Electrical.props)
# ppri... | [
"def get_materials_properties(dbpath): #<un-named>nook\n odb = openOdb(path=dbpath)\n data = []\n for _name,_mat in odb.materials.items():\n _elastic_mod = _mat.elastic.table[0][0]\n _poisson = _mat.elastic.table[0][1]\n if hasattr(_mat,\"plastic\"):\n _plastic = _mat.plasti... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns the root 'src' absolute path of this Chromium Git checkout. | def get_chromium_src_path() -> pathlib.Path:
_CHROMIUM_SRC_ROOT = pathlib.Path(__file__).parents[3].resolve(strict=True)
if _CHROMIUM_SRC_ROOT.name != 'src':
raise AssertionError(
f'_CHROMIUM_SRC_ROOT "{_CHROMIUM_SRC_ROOT}" should end in "src".')
try:
_assert_git_repository(_CHR... | [
"def source_root_dir():\n return os.path.abspath(os.path.dirname(__file__))",
"def menpowidgets_src_dir_path():\n # to avoid cluttering the menpowidgets.base namespace\n from pathlib import Path\n import os.path\n\n return Path(os.path.abspath(__file__)).parent",
"def getGitPath() -> osp:\n cu... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Gets the datetime of the commit at HEAD for a Git repository in UTC. The datetime returned contains timezone information (in timezone.utc) so that it can be easily be formatted or converted (e.g., to local time) based on the caller's needs. | def get_head_commit_datetime(
git_repo: Optional[Union[str, pathlib.Path]] = None) -> dt.datetime:
if not git_repo:
git_repo = get_chromium_src_path()
if not isinstance(git_repo, pathlib.Path):
git_repo = pathlib.Path(git_repo)
_assert_git_repository(git_repo)
timestamp = subp... | [
"def last_commit_date():\n return subprocess.check_output(['git', 'log', '-1', '--pretty=%ad',\n '--date=format:%d %b %H:%M', 'py/calendon']).decode().strip()",
"def __last_commit_date(self):\n return utils.run('git', ['log', '--all', '-1', '--format=%cI'],\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Takes list or array of postcodes, and returns it in a cleaned numpy array | def clean_postcodes(postcodes):
postcode_df = pd.DataFrame({'Postcode':postcodes})
postcode_df['Postcode'] = postcode_df['Postcode'].str.upper()
# If length is not 7 get rid of spaces. This fixes e.g. "SW19 2AZ" -> "SW192AZ"
postcode_df['Postcode'] = postcode_df['Postcode'].where(
postcode_df['... | [
"def preprocess(self, data: List[Dict[str, list]]) -> np.array:\n if self.encoding_type == 'normal':\n data = np.array([x['normal_vector'] for x in data])\n else:\n data = np.array([x['onehot_vector'] for x in data])\n return data",
"def decode(lst, typecode ):\n a = ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Get an array of WGS84 (latitude, longitude) pairs from a list of postcodes. | def get_lat_long(self, postcodes):
# Fix evil postcodes
postcodes = clean_postcodes(postcodes)
postcode_df = self.postcode_df
postcode_df = postcode_df.fillna('np.nan')
postcode_df = postcode_df.set_index('Postcode')
index_data = postcode_df.loc[postcodes]
lat = ... | [
"def get_coordinates(postcode):\n\n # Remove the space between postcode.\n postcode = postcode.replace(\" \", \"\")\n base_url = \"http://api.postcodes.io/postcodes/{0}\"\n get_url = base_url.format(postcode)\n website_text_result = requests.get(get_url).text\n website_result = json.loads(website_... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Get an array of flood risk probabilities from arrays of eastings and northings. Flood risk data is extracted from the Tool flood risk file. Locations not in a risk band circle return `Zero`, otherwise returns the name of the highest band it sits in. | def get_easting_northing_flood_probability(self, easting, northing):
# Read in risk files as pandas dataframe
risks = self.risk_df
prob_bands = np.full(np.size(easting), "Zero", dtype='<U8')
# For each point we get:
for point, point_east in enumerate(easting):
point_... | [
"def get_sorted_flood_probability(self, postcodes):\n # Fix evil postcodes\n postcodes = clean_postcodes(postcodes)\n\n # Get latitude and longitude\n output = self.get_lat_long(postcodes) # Returns latitude,longitude pairs in an array\n lat_long = pd.DataFrame(\n {'Po... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Get an array of flood risk probabilities from a sequence of postcodes. Probability is ordered High>Medium>Low>Very low>Zero. Flood risk data is extracted from the `Tool` flood risk file. | def get_sorted_flood_probability(self, postcodes):
# Fix evil postcodes
postcodes = clean_postcodes(postcodes)
# Get latitude and longitude
output = self.get_lat_long(postcodes) # Returns latitude,longitude pairs in an array
lat_long = pd.DataFrame(
{'Postcode':post... | [
"def get_sorted_annual_flood_risk(self, postcodes):\n\n # Fix evil postcodes\n postcodes = clean_postcodes(postcodes)\n\n # Get lat, long of postcodes\n arr = self.get_lat_long(postcodes)\n lat = arr[:, 0] # Latitude\n lng = arr[:, 1] # Longitude\n\n # Convert lat, l... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Get an array of estimated cost of a flood event from a sequence of postcodes. | def get_flood_cost(self, postcodes):
# Fix evil postcodes
postcodes = clean_postcodes(postcodes)
values_df = self.values_df[['Postcode', 'Total Value']]
values_df = values_df.loc[values_df.Postcode.isin(postcodes)]
values_df = values_df.set_index('Postcode').reindex(postcodes)
... | [
"def _construct_post_ent_cost(self):\n obs_count = T.cast(self.Xd.shape[0], 'floatX')\n ent_cost = -T.sum(cat_prior_ent(self.Yp)) / obs_count\n return ent_cost",
"def get_cost_function(prices):\n return np.array([prices[scrap] for scrap in RecipeOptimizer.scraps])",
"def get_cost(sel... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Get an array of estimated annual flood risk in pounds sterling per year of a flood event from a sequence of postcodes and flood probabilities. | def get_annual_flood_risk(self, postcodes, probability_bands):
#get cost_value
cost_value = self.get_flood_cost(postcodes)
#create Dataframe for replacing corresonding value
risk_df = pd.DataFrame({'Probability Band': probability_bands})
total_df = risk_df.replace(
{... | [
"def get_sorted_annual_flood_risk(self, postcodes):\n\n # Fix evil postcodes\n postcodes = clean_postcodes(postcodes)\n\n # Get lat, long of postcodes\n arr = self.get_lat_long(postcodes)\n lat = arr[:, 0] # Latitude\n lng = arr[:, 1] # Longitude\n\n # Convert lat, l... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Get a sorted pandas DataFrame of flood risks. | def get_sorted_annual_flood_risk(self, postcodes):
# Fix evil postcodes
postcodes = clean_postcodes(postcodes)
# Get lat, long of postcodes
arr = self.get_lat_long(postcodes)
lat = arr[:, 0] # Latitude
lng = arr[:, 1] # Longitude
# Convert lat, long -> easting,... | [
"def sort_gefs_frame(frame):\n if frame is None:\n return frame\n else:\n return pd.DataFrame(np.sort(frame), index=frame.index)",
"def sort_neighbors_by_site_index_i(neighbor_count_df: pd.DataFrame) -> pd.DataFrame:\n return neighbor_count_df.sort_values(by=[\"i\", \"distance_bin\", \"j\"]... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Insert a new value after a key. | def insert_after(self, key, value):
self._insert_after(self.head, key, value) | [
"def _insert_after(cls, node, key, value):\n # End of list base case\n if node is None:\n return\n\n # Base case for key found\n if node.value == key:\n node.next_ = Node(value, node.next_)\n return\n\n # Recursive case\n cls._insert_after(n... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Insert a new value after a key recursively. | def _insert_after(cls, node, key, value):
# End of list base case
if node is None:
return
# Base case for key found
if node.value == key:
node.next_ = Node(value, node.next_)
return
# Recursive case
cls._insert_after(node.next_, key, ... | [
"def insert_after(self, key, value):\n self._insert_after(self.head, key, value)",
"def insert_recurse(self, key, data=None):\r\n current = self.root\r\n self.insert_recurse_aux(key, data, current)",
"def insert_next(self, key: Any, node: MappingTree):\n if node is None:\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Delete node with the value. | def delete(self, value):
# Iterating to node that has value
node = self.head
last_node = None
while node is not None and node.value != value:
last_node = node
node = node.next_
# Check if the node has been found
if node is None:
return... | [
"def delete_node(self, node):",
"def delete_node(tx, node_value, node_type):\n cql = \"MATCH(n:\" + node_type + \"{name:$node_value}) DETACH DELETE(n);\"\n try:\n tx.run(cql, node_value=node_value)\n except Exception as e:\n print(str(e))",
"def delete_node(name: str, ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Try importing Pyspark or display warn message in streamlit | def try_import_pyspark_in_streamlit():
try:
import pyspark
from pyspark.sql import SparkSession
except:
print("You need Pyspark installed to run NLU. Run <pip install pyspark==3.0.2>")
try:
import streamlit as st
st.error(
"You need Pyspar... | [
"def _visualization_validation_warning():\n if constants.ENV_VARIABLES.LIVY_VERSION_ENV_VAR in os.environ:\n _log(\"Visualizations are not supported in Livy Sessions. \"\n \"Use %%local and %matplotlib inline to access the \"\n \"python kernel from PySpark noteboo... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Authenticate enviroment for JSL Liscensed models. Installs NLPHealthcare if not in enviroment detected Either provide path to spark_nlp_for_healthcare.json file as first param or manually enter them, SPARK_NLP_LICENSE_OR_JSON_PATH,AWS_ACCESS_KEY_ID,AWS_SECRET_ACCESS_KEY,JSL_SECRET . Set gpu=true if you want to enable G... | def auth(SPARK_NLP_LICENSE_OR_JSON_PATH='/content/spark_nlp_for_healthcare.json', AWS_ACCESS_KEY_ID='',
AWS_SECRET_ACCESS_KEY='', JSL_SECRET='', gpu=False):
if os.path.exists(SPARK_NLP_LICENSE_OR_JSON_PATH):
with open(SPARK_NLP_LICENSE_OR_JSON_PATH) as json_file:
j = json.load(json_file... | [
"def _get_or_create_sparksession(model_path=None):\n from johnsnowlabs import nlp\n\n from mlflow.utils._spark_utils import _get_active_spark_session\n\n _validate_env_vars()\n\n spark = _get_active_spark_session()\n if spark is None:\n spark_conf = {}\n spark_conf[\"spark.python.worker... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Wrap function with ST cache method if streamlit is importable | def wrap_with_st_cache_if_avaiable(f):
try:
import streamlit as st
logger.info("Using streamlit cache for load")
return st.cache(f, allow_output_mutation=True, show_spinner=False)
except:
logger.exception("Could not import streamlit and apply caching")
print("You need str... | [
"def test_functools_wraps(self):\n\n import streamlit as st\n\n @st.cache\n def f():\n return True\n\n self.assertEqual(True, hasattr(f, \"__wrapped__\"))",
"def cacheable(importer,exporter,extension='.cache'):\n def cachedecorator(function):\n def wrapper(*args,ca... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Normalize a dict of chunks. | def normalize_chunks(
chunks: Mapping[str, Union[int, Tuple[int, ...]]],
dim_sizes: Mapping[str, int],
) -> Dict[str, int]:
if not chunks.keys() <= dim_sizes.keys():
raise ValueError(
'all dimensions used in chunks must also have an indicated size: '
f'chunks={chunks} vs dim_sizes={dim_siz... | [
"def Normalize(d):\n for k1, d2 in d.iteritems():\n total = sum(d2.itervalues())\n for k2, v2 in d2.iteritems():\n d2[k2] = v2 / total",
"def normalize_all_data_in_dict(data: Data_dict_type, normalizers: Tuple[object, ...]) -> Data_dict_type:\n for key, item in data.items():\n values, sample... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Make a rechunking plan. | def rechunking_plan(
dim_sizes: Mapping[str, int],
source_chunks: Mapping[str, int],
target_chunks: Mapping[str, int],
itemsize: int,
max_mem: int,
) -> List[Dict[str, int]]:
plan_shapes = algorithm.rechunking_plan(
shape=tuple(dim_sizes.values()),
source_chunks=tuple(source_chunks[dim... | [
"def create_plan(self, batch_size):\n self.plan = cufft.cufftPlanMany(\n self.ndim, self.n.ctypes.data,\n self.inembed.ctypes.data, 1, self.idist,\n self.onembed.ctypes.data, 1, self.odist,\n self.ffttype, batch_size)\n self.planned = True",
"def plan(self... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Round down a chunkkey to offsets corresponding to new chunks. | def _round_chunk_key(
chunk_key: core.ChunkKey,
target_chunks: Mapping[str, int],
) -> core.ChunkKey:
new_offsets = {}
for dim, offset in chunk_key.items():
chunk_size = target_chunks.get(dim)
if chunk_size is None:
new_offsets[dim] = offset
elif chunk_size == -1:
new_offsets[dim] = ... | [
"def update_chunk(self):\n for key, value in self.piece_coordinates.items():\n # Why is the key a numpy.int type ???\n self.chunk[value] = key",
"def _transform_indices(self, key):\n ndims = self.ndims\n if all(not (isinstance(el, slice) or callable(el)) for el in key):\... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Combine chunks into a single (ChunkKey, Dataset) pair. | def consolidate_chunks(
inputs: Iterable[Tuple[core.ChunkKey, xarray.Dataset]],
combine_kwargs: Optional[Mapping[str, Any]] = None,
) -> Tuple[core.ChunkKey, xarray.Dataset]:
inputs = list(inputs)
keys = [key for key, _ in inputs]
if len(set(keys)) < len(keys):
raise ValueError(f'chunk keys are not un... | [
"def _add_s3_dataset_chunks(self, transaction, s3_dataset_id, band, output):\n for key_map in output['key_maps']:\n micro_shape = [chunk_dim.stop - chunk_dim.start for chunk_dim in key_map['chunk']]\n # Convert index_min and index_max to scalars\n index_min = list(map(np.assc... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Split a single (ChunkKey, xarray.Dataset) pair into many chunks. | def split_chunks(
key: core.ChunkKey,
dataset: xarray.Dataset,
target_chunks: Mapping[str, int],
) -> Iterator[Tuple[core.ChunkKey, xarray.Dataset]]:
# This function splits consolidated arrays into blocks of new sizes, e.g.,
# ⌈x_00 x_01 ...⌉ ⌈⌈x_00⌉ ⌈x_01⌉ ...⌉
# X = |x_10 x_11 ...| = ||x_1... | [
"def _chunking(\n ds: Union[xr.Dataset, xr.DataArray],\n dim: str = \"time\",\n number_chunks: Union[bool, int] = False,\n chunk_length: Union[bool, int] = False,\n ) -> Union[xr.Dataset, xr.DataArray]:\n if number_chunks and not chunk_length:\n chunk_length = np.flo... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Rechunk inmemory pairs of (ChunkKey, xarray.Dataset). | def in_memory_rechunk(
inputs: List[Tuple[core.ChunkKey, xarray.Dataset]],
target_chunks: Mapping[str, int],
) -> Iterator[Tuple[core.ChunkKey, xarray.Dataset]]:
key, dataset = consolidate_chunks(inputs)
yield from split_chunks(key, dataset, target_chunks) | [
"def split_chunks(\n key: core.ChunkKey,\n dataset: xarray.Dataset,\n target_chunks: Mapping[str, int],\n) -> Iterator[Tuple[core.ChunkKey, xarray.Dataset]]:\n # This function splits consolidated arrays into blocks of new sizes, e.g.,\n # ⌈x_00 x_01 ...⌉ ⌈⌈x_00⌉ ⌈x_01⌉ ...⌉\n # X = |x_10 x_11 ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Tells info about current time | def time(self):
time = datetime.datetime.now().strftime("%I:%M:%S")
self.speak("the current time is")
self.speak(time) | [
"def now():\n print(strftime(\"%Y-%m-%d %H:%M:%S\", gmtime()))",
"def _get_current_time() -> str:\n return datetime.now().strftime(\"%FT%H:%M:%S\")",
"def set_time(self):\n now = datetime.now()\n self.current_date = now.strftime(\"%Y%m%d\")\n self.current_time = now.strftime(\"%H%M\")... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Recursively update a dict. Subdict's won't be overwritten but also updated. | def deepupdate(original, update):
for key, value in original.iteritems():
if not key in update:
update[key] = value
elif isinstance(value, dict):
deepupdate(value, update[key])
return update | [
"def recursive_update(to_update, update):\n if update:\n for key, value in update.items():\n if isinstance(value, dict):\n value = recursive_update(to_update.get(key, {}), value)\n to_update[key] = value\n return to_update",
"def recursive_update(\n base_di... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Ordena una lista de elementos según el método de burbuja. | def ord_burbujeo(lista):
# posición final del segmento a tratar
n = len(lista) - 1
# mientras haya al menos 2 elementos para ordenar
while n > 0:
#burbujear la lista hasta la posición n
burbujear(lista, 0, n)
print("DEBUG: ", lista)
n = n-1 | [
"def ord_burbujeo(lista):\r\n n = len(lista) - 1 \r\n \r\n while n > 0:\r\n # Llama a la función recursiva y le pasa la lista y una longitud. \r\n lista = b_recur(lista, n)\r\n # Resta un valor a la longitud, para que cada llamado a la recursiva \r\n # compare los valor... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Converts a dataframe containing shap values in ohe format back to original genomic positions | def ohe_inverse(df_shap_values):
# Auxiliary list to recreate original shap_values dataframe
list_shap_original = []
# Regular expression to pick attributes names.
# Since in our case attributes names are the genomic positions (i.e. an integer number), we use the regex below
import re
pattern ... | [
"def conversion_function(self):\n conversion = gpd.GeoDataFrame(gpd.GeoSeries(self)) # convert the geoseries into a geodataframe\n # rename the geometry column from '0' to 'geometry'\n conversion = conversion.rename(columns={0: 'geometry'}).set_geometry('geometry')\n return conversion",
"def df_with_... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
creates a new shape with hyp's attributes | def draw(hyp):
print 'g.createShape(',hyp.getAttList(),')'
print type(hyp.getAttList())
g.createShape(hyp.getAttList()) | [
"def draw(hyp):\r\n print 'g.createShape(',hyp.getAttList(),')'\r\n print type(hyp.getAttList())\r\n g.createShape(hyp.getAttList())",
"def _create_main_shape(self):\n\n a, b = gc( self.size/2,\n self._ZERO_DEGREES - self.angle,\n self._180_DEGREES + self.angle)\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
hides the existing shape associated with id | def hide(id):
if type(id) is int: # shapeID
g.hide(g.database[id])
else: # id refers to hypothetical shape
shapeID=pickShape(local_vars[id])
g.hide(g.database[shapeID]) | [
"def hide(id):\r\n if type(id) is int: # shapeID\r\n g.hide(g.database[id])\r\n else: # id refers to hypothetical shape\r\n shapeID=pickShape(local_vars[id])\r\n g.hide(g.database[shapeID])",
"def hideLayer(self, id):\n\n #log.debug('hideLayer: hiding layer %s' % str(id))\n\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
fills unspecified attributes of var with attributes of most recently mentioned shape that matches attributes in var | def one2(var):
varAttList = local_vars[var]
options = g.database.findMatches(local_vars[var])
shapeAttList = g.database[g.referenceOrder.pickMostRecent(options)].getAttList()
local_vars[var] = g.AttributeList(shapeAttList.items()+ varAttList.items()) | [
"def one2(var):\r\n varAttList = local_vars[var]\r\n options = g.database.findMatches(local_vars[var])\r\n shapeAttList = g.database[g.referenceOrder.pickMostRecent(options)].getAttList()\r\n local_vars[var] = g.AttributeList(shapeAttList.items()+ varAttList.items())",
"def copy_attributes(var1, var2)... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
applies a predicate to object represented by id | def applyPredicate(id,cmd):
if type(id) is int: # shapeID
attList = g.database[id].getAttList()
g.updateAttList(attList, cmd)
g.updateShape(id,attList)
elif type(id) is HypotheticalShape:
attList = id.getAttList()
try:
shapeID=pickShape(attList)
... | [
"def applyPredicate(id,cmd):\r\n\r\n if type(id) is int: # shapeID\r\n attList = g.database[id].getAttList()\r\n g.updateAttList(attList, cmd)\r\n g.updateShape(id,attList)\r\n\r\n elif type(id) is HypotheticalShape:\r\n attList = id.getAttList()\r\n try:\r\n shap... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
creates a new shape with hyp's attributes | def draw(hyp):
print 'g.createShape(',hyp.getAttList(),')'
print type(hyp.getAttList())
g.createShape(hyp.getAttList()) | [
"def draw(hyp):\n print 'g.createShape(',hyp.getAttList(),')'\n print type(hyp.getAttList())\n g.createShape(hyp.getAttList())",
"def _create_main_shape(self):\n\n a, b = gc( self.size/2,\n self._ZERO_DEGREES - self.angle,\n self._180_DEGREES + self.angle)\n se... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
hides the existing shape associated with id | def hide(id):
if type(id) is int: # shapeID
g.hide(g.database[id])
else: # id refers to hypothetical shape
shapeID=pickShape(local_vars[id])
g.hide(g.database[shapeID]) | [
"def hide(id):\n if type(id) is int: # shapeID\n g.hide(g.database[id])\n else: # id refers to hypothetical shape\n shapeID=pickShape(local_vars[id])\n g.hide(g.database[shapeID])",
"def hideLayer(self, id):\n\n #log.debug('hideLayer: hiding layer %s' % str(id))\n\n self.l... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
fills unspecified attributes of var with attributes of most recently mentioned shape that matches attributes in var | def one2(var):
varAttList = local_vars[var]
options = g.database.findMatches(local_vars[var])
shapeAttList = g.database[g.referenceOrder.pickMostRecent(options)].getAttList()
local_vars[var] = g.AttributeList(shapeAttList.items()+ varAttList.items()) | [
"def one2(var):\n varAttList = local_vars[var]\n options = g.database.findMatches(local_vars[var])\n shapeAttList = g.database[g.referenceOrder.pickMostRecent(options)].getAttList()\n local_vars[var] = g.AttributeList(shapeAttList.items()+ varAttList.items())",
"def copy_attributes(var1, var2):\n f... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
applies a predicate to object represented by id | def applyPredicate(id,cmd):
if type(id) is int: # shapeID
attList = g.database[id].getAttList()
g.updateAttList(attList, cmd)
g.updateShape(id,attList)
elif type(id) is HypotheticalShape:
attList = id.getAttList()
try:
shapeID=pickShape(attList)
... | [
"def applyPredicate(id,cmd):\n\n if type(id) is int: # shapeID\n attList = g.database[id].getAttList()\n g.updateAttList(attList, cmd)\n g.updateShape(id,attList)\n\n elif type(id) is HypotheticalShape:\n attList = id.getAttList()\n try:\n shapeID=pickShape(attLis... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Compute gravity gradient torques. | def _compute_gravity_torque(self):
pass | [
"def _compute_gravity_torque(self, curr_date):\n if self._to_add[0]:\n # return gravity gradient torque in satellite frame\n body2inertial = self.earth.getBodyFrame().getTransformTo(self.in_frame, curr_date)\n body2sat = self.inertial2Sat.applyTo(body2inertial.getRotation())\... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Property holding magnetic torque vector. | def mTorque(self):
pass | [
"def _compute_gravity_torque(self):\n pass",
"def setMotorTorque(self, torque):\r\n if torque < 0.0:\r\n torque = 0.0\r\n elif torque > 1.0:\r\n torque = 1.0\r\n torque *= self.maxTorque\r\n if self.reverse:\r\n torque *= -1\r\n dTorque = ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Initializes dipole Model. This method uses the simplified dipole model implemented in DipoleModel.py Which needs to initialize the induced Magnetic density in the hysteresis rods. It also adds the hysteresis rods and bar magnets specified in the settings file to the satellite using the DipoleModel class. | def _initialize_dipole_model(self, model):
for key, hyst in model['Hysteresis'].items():
direction = np.array([float(x) for x in hyst['dir'].split(" ")])
self.dipoleM.addHysteresis(direction, hyst['vol'], hyst['Hc'], hyst['Bs'], hyst['Br'])
# initialize values for Hysteresis (ne... | [
"def initialize(self, grid=None, input_file=None, intensity=None, stormduration=None):\n self.grid = grid \n if self.grid==None:\n self.grid = create_and_initialize_grid(input_file) ##<- this is the same input file used for parameters. \n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |