query stringlengths 9 9.05k | document stringlengths 10 222k | negatives listlengths 19 20 | metadata dict |
|---|---|---|---|
Displays two images side by side | def display_side_by_side_imgs(left_img, right_img, left_title='Left Image', right_title='Right Image', figsize=(16, 8)):
# Convert the images to numpy arrays if they aren't already
if isinstance(left_img, torch.Tensor):
left_img = convert_tensor_to_numpy_img(left_img)
if isinstance(right_img, torch... | [
"def imageSideBySide(leftImg, leftTitle, rightImg, rightTitle, figsize=(20, 10), leftCmap=None, rightCmap=None):\n fig, axes = plt.subplots(ncols=2, figsize=figsize)\n if leftCmap == None:\n axes[0].imshow(leftImg)\n else:\n axes[0].imshow(leftImg, cmap=leftCmap)\n axes[0].set_title(leftTi... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Loads the content and style images and resizes them if appropriate | def load_content_and_style_images(content_dir, style_dir, max_allowable_size=400, resize_shape=None):
content_img = Image.open(content_dir).convert('RGB')
style_img = Image.open(style_dir).convert('RGB')
if max(content_img.size) > max_allowable_size:
size = max_allowable_size
else:
siz... | [
"def prepare_images():\n w.DISTRIBUTION = w.CONFIG.get('SIMULATION', 'DISTRIBUTION')\n rabbit_path = getcwd() + w.CONFIG.get('RABBIT', 'RELATIVE_PATH')\n wolf_path = getcwd() + w.CONFIG.get('WOLF', 'RELATIVE_PATH')\n grass_path = getcwd() + w.CONFIG.get('GRASS', 'RELATIVE_PATH')\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Check if item is a container (list, tuple, dict, set). | def is_container(item):
if isinstance(item, str):
return False
elif hasattr(item, "__iter__"):
return True
return False | [
"def is_container(x):\n return isinstance(x, Iterable) and not isinstance(x, str)",
"def is_container(value: object) -> TypeGuard[AnyContainer]:\n if isinstance(value, Container):\n return True\n if hasattr(value, \"__pt_container__\"):\n return is_container(cast(\"MagicContainer\", value).... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Copies all "filesets" found within the nested value (e.g. dict, list,...) into the destination directory. If no nested filesets are found then the original value is returned. Note that multiple nested filesets (e.g. a list) will to have unique names names (i.e. not differentiated by parent directories) otherwise there ... | def copy_nested_files(
value: ty.Any,
dest_dir: os.PathLike,
supported_modes: FileSet.CopyMode = FileSet.CopyMode.any,
**kwargs,
) -> ty.Any:
from ..utils.typing import TypeParser # noqa
cache: ty.Dict[FileSet, FileSet] = {}
def copy_fileset(fileset: FileSet):
try:
ret... | [
"def process_tree(self, src, dst):\n srcset_fmt = self.kw['image_srcset_format']\n srcset_sizes_all = self.kw['image_srcset_sizes']\n base_len = len(src.split(os.sep))\n for root, dirs, files in os.walk(src, followlinks=True):\n root_parts = root.split(os.sep)\n dst... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Update all templates that are present in the input spec. Should be run when all inputs used in the templates are already set. | def template_update(inputs, output_dir, state_ind=None, map_copyfiles=None):
inputs_dict_st = attr.asdict(inputs, recurse=False)
if map_copyfiles is not None:
inputs_dict_st.update(map_copyfiles)
if state_ind is not None:
for k, v in state_ind.items():
k = k.split(".")[1]
... | [
"def template_update(inputs, output_dir, map_copyfiles=None):\n dict_ = attr.asdict(inputs)\n if map_copyfiles is not None:\n dict_.update(map_copyfiles)\n\n from .specs import attr_fields\n\n fields_templ = [\n fld for fld in attr_fields(inputs) if fld.metadata.get(\"output_file_template\... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Update a single template from the input_spec or output_spec based on the value from inputs_dict (checking the types of the fields, that have "output_file_template)" | def template_update_single(
field, inputs, inputs_dict_st=None, output_dir=None, spec_type="input"
):
# if input_dict_st with state specific value is not available,
# the dictionary will be created from inputs object
from ..utils.typing import TypeParser # noqa
from pydra.engine.specs import LazyFi... | [
"def template_update(inputs, output_dir, map_copyfiles=None):\n dict_ = attr.asdict(inputs)\n if map_copyfiles is not None:\n dict_.update(map_copyfiles)\n\n from .specs import attr_fields\n\n fields_templ = [\n fld for fld in attr_fields(inputs) if fld.metadata.get(\"output_file_template\... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Formatting the field template based on the values from inputs. Taking into account that the field with a template can be a MultiOutputFile and the field values needed in the template can be a list returning a list of formatted templates in that case. Allowing for multiple input values used in the template as longs as t... | def _template_formatting(field, inputs, inputs_dict_st):
from .specs import MultiInputObj, MultiOutputFile
# if a template is a function it has to be run first with the inputs as the only arg
template = field.metadata["output_file_template"]
if callable(template):
template = template(inputs)
... | [
"def _template_formatting(field, inputs):\n from .specs import MultiOutputFile\n\n template = field.metadata[\"output_file_template\"]\n # as default, we assume that keep_extension is True\n keep_extension = field.metadata.get(\"keep_extension\", True)\n\n inp_fields = re.findall(\"{\\w+}\", template... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Formatting a single template for a single element (if a list). Taking into account that a file used in the template (file_template) and the template itself could have file extensions (assuming that if template has extension, the field value extension is removed, if field has extension, and no template extension, than i... | def _element_formatting(template, values_template_dict, file_template, keep_extension):
if file_template:
fld_name_file, fld_value_file = file_template
# splitting the filename for name and extension,
# the final value used for formatting depends on the template and keep_extension flag
... | [
"def _template_formatting(field, inputs):\n from .specs import MultiOutputFile\n\n template = field.metadata[\"output_file_template\"]\n # as default, we assume that keep_extension is True\n keep_extension = field.metadata.get(\"keep_extension\", True)\n\n inp_fields = re.findall(\"{\\w+}\", template... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Check whether a file path is on a CIFS filesystem mounted in a POSIX host. POSIX hosts are assumed to have the ``mount`` command. On Windows, Docker mounts host directories into containers through CIFS shares, which has support for Minshall+French symlinks, or text files that the CIFS driver exposes to the OS as symlin... | def on_cifs(cls, path: os.PathLike) -> bool:
return cls.get_mount(path)[1] == "cifs" | [
"def ismount(path):\r\n unc, rest = splitunc(path)\r\n if unc:\r\n return rest in (\"\", \"/\", \"\\\\\")\r\n p = splitdrive(path)[1]\r\n return len(p) == 1 and p[0] in '/\\\\'",
"def ismount(path):\n try:\n s1 = os.lstat(path)\n except (OSError, ValueError):\n # It doesn't ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Parse the output of ``mount`` to produce (path, fs_type) pairs. Separated from _generate_cifs_table to enable testing logic with real outputs | def parse_mount_table(
cls, exit_code: int, output: str
) -> ty.List[ty.Tuple[str, str]]:
# Not POSIX
if exit_code != 0:
return []
# Linux mount example: sysfs on /sys type sysfs (rw,nosuid,nodev,noexec)
# <PATH>^^^^ ^^^^^<FSTYPE>
... | [
"def _parse_mount_table(exit_code, output):\n # Not POSIX\n if exit_code != 0:\n return []\n\n # Linux mount example: sysfs on /sys type sysfs (rw,nosuid,nodev,noexec)\n # <PATH>^^^^ ^^^^^<FSTYPE>\n # OSX mount example: /dev/disk2 on / (hfs, local, journaled)\... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Patch the mount table with new values. Used in test routines | def patch_table(cls, mount_table: ty.List[ty.Tuple[str, str]]):
orig_table = cls._mount_table
cls._mount_table = list(mount_table)
try:
yield
finally:
cls._mount_table = orig_table | [
"def test_update_generic_table_data(self):\n pass",
"def sync_crafting_table(self):\n\n for i, slot in self.crafting_table.iteritems():\n self.crafting[i[0] * self.crafting_stride + i[1]] = slot",
"def test_patch_hyperflex_cluster(self):\n pass",
"def test_patch_record(self):\n... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Takes OPTAA data streamed to shore from the Cabled Array benthic platforms and cleans up the data set to make it more userfriendly. Primary task is renaming parameters and dropping some that are of limited use. Additionally, recalculate the intermediate products (e.g. absorption and attenuation) and add them to the dat... | def optaa_benthic(ds, cal_file):
# check to see if there is more than one deployment in the data set
if len(np.unique(ds['deployment'].values)) > 1:
raise ValueError('More than one deployment in the data set. Please structure processing request to process '
'one deployment at a... | [
"def optaa_profiler(ds, cal_file):\n # check to see if there is more than one deployment in the data set\n if len(np.unique(ds['deployment'].values)) > 1:\n raise ValueError('More than one deployment in the data set. Please structure processing request to process '\n 'one deplo... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Takes OPTAA data recorded by the Cabled Shallow Profiler system and cleans up the data set to make it more userfriendly. Primary task is renaming parameters and dropping some that are of limited use. Additionally, recalculate the intermediate products (e.g. absorption and attenuation) and add them to the data set. Fina... | def optaa_profiler(ds, cal_file):
# check to see if there is more than one deployment in the data set
if len(np.unique(ds['deployment'].values)) > 1:
raise ValueError('More than one deployment in the data set. Please structure processing request to process '
'one deployment at ... | [
"def optaa_benthic(ds, cal_file):\n # check to see if there is more than one deployment in the data set\n if len(np.unique(ds['deployment'].values)) > 1:\n raise ValueError('More than one deployment in the data set. Please structure processing request to process '\n 'one deploy... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Create a new wavelength named name, belonging to XCrystal object crystal, with wavelength and optionally f_pr, f_prpr assigned. | def __init__(
self, name, crystal, wavelength, f_pr=0.0, f_prpr=0.0, dmin=0.0, dmax=0.0
):
# set up this object
self._name = name
self._crystal = crystal
self._wavelength = wavelength
self._f_pr = f_pr
self._f_prpr = f_prpr
self._resolution_high = dm... | [
"def PM_setWavelength(self,channel,wavelength):\n if channel not in ApexAP1000.PM_CHANNELS:\n raise ValueError('Unknow channel during power measurement')\n sentStr = self.headStr('PM')+'SETWAVELENGTH[%d] %g'%(channel,wavelength)\n return self.write(sentStr)",
"def setWavelength(sel... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Add a sweep to this wavelength. | def add_sweep(
self,
name,
sample,
directory=None,
image=None,
beam=None,
reversephi=False,
distance=None,
gain=0.0,
dmin=0.0,
dmax=0.0,
polarization=0.0,
frames_to_process=None,
user_lattice=None,
us... | [
"def config_sweep(self):\n freq = [100, 400, 900, 1400, 1900, 2200, 2600, 2800, 3200, 3500]\n self.sweep = SpectrumSweep(self.transmit, self.receive,\n frequencies=freq, visualise=False)",
"def add_sweep(self, environment_name, agent_name, agent_params, sweep_dict):... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Remove a sweep object from this wavelength. | def remove_sweep(self, sweep):
try:
self._sweeps.remove(sweep)
except ValueError:
pass | [
"def remove_sweep(self, s):\n\n for wave in self._wavelengths.keys():\n self._wavelengths[wave].remove_sweep(s)",
"def remove_spectrum(self, spectrum):\n\n del self.spectrum_dict[spectrum.name]",
"def remove(self, obj):\n self.bin_list.remove(obj)\n self.sum -= obj",
"de... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test the base Stats class. | def test_base_stats():
# type is required
config = { 'file_name' : 'dummy_file' }
with np.testing.assert_raises(ValueError):
stats = piff.Stats.process(config)
# ... for all stats in list.
config = [ { 'type': 'TwoDHist', 'file_name': 'f1' },
{ 'type': 'Whisker', 'file_name':... | [
"def test_get_stats(self):\n pass",
"def test_stats_class_initialisation(self):\n self.assertIsInstance(self.stats,cardutils.Stats)",
"def test_stats_class_init_empty(self):\n self.assertIsInstance(self.stats, cardutils.Stats)",
"def test_stats(self):\n return self._test(\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test HSMCatalog with fourth_order=True | def test_fourth_order():
if __name__ == '__main__':
logger = piff.config.setup_logger(verbose=2)
else:
logger = piff.config.setup_logger(log_file='output/test_hsmcatalog.log')
image_file = os.path.join('output','test_stats_image.fits')
cat_file = os.path.join('output','test_stats_cat.fi... | [
"def test_shelfmark_sort(self, document, multifragment, empty_solr):\n doc2 = Document.objects.create()\n TextBlock.objects.create(document=doc2, fragment=multifragment)\n # create a third document with shelfmark that should come after\n # one of ours only when natural sorting is enabled... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test that extra property_cols get output correctly in the hsm output file. | def test_property_cols():
image_file = 'input/D00572501_z_c01_r3624p01_immasked.fits.fz'
cat_file = 'input/D00572501_z_c01_r5473p01_piff.fits'
psf_file = os.path.join('output','test_property_cols.piff')
hsm_file = os.path.join('output','test_property_cols_hsm.fits')
nstars = 25
scale = 0.26
... | [
"def test_prints_columns(mock_reader_select_all, dump_command, capsys):\n dump_command.command(Namespace(filename=\"foo\", columns=True, varno=False, verbose=False))\n lines = capsys.readouterr().out.splitlines()\n assert len(lines) == 7\n assert lines[3].startswith(\"lat@hdr\")\n assert lines[4].sta... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Clean realtime ACE data using the status flag. Note Supports 'clean' and 'dirty'. Replaces all fill values with NaN. Clean status flag of zero (nominal data) Dirty status flag < 9 (accepts bad data record, removes no data record) | def clean(self):
# Perform the standard ACE cleaning
max_status = mm_ace.clean(self)
# Replace bad values with NaN and remove times with no valid data
self.data = self.data[self.data['status'] <= max_status]
return | [
"def clean(self):\n # Perform the standard ACE cleaning\n max_status = mm_ace.clean(self)\n\n # Replace bad values with NaN and remove times with no valid data\n ecols = ['eflux_38-53', 'eflux_175-315']\n\n # Evaluate the electron flux data\n self[self.data['status_e'] > max_status, ecols] = np.na... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Sets the last_modified_by of this JsonJdbcIngestionProperties. | def last_modified_by(self, last_modified_by):
self._last_modified_by = last_modified_by | [
"def last_edited_by(self, last_edited_by):\n\n self._last_edited_by = last_edited_by",
"def last_modified_user(self, last_modified_user):\n\n self._last_modified_user = last_modified_user",
"def last_modified_by(self):\n if \"lastModifiedBy\" in self._prop_dict:\n if isinstance(s... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Sets the last_modified_on of this JsonJdbcIngestionProperties. | def last_modified_on(self, last_modified_on):
self._last_modified_on = last_modified_on | [
"def setLastModified(when):",
"def last_modified(self, last_modified):\n self._last_modified = last_modified",
"def last_modified(self, last_modified):\n\n self._last_modified = last_modified",
"def set_last_modification_date(metadata: types.Metadata) -> None:\n metadata['meta_modify_date'] =... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Sets the system of this JsonJdbcIngestionProperties. | def system(self, system):
self._system = system | [
"def sys_config(self, sys_config):\n\n self._sys_config = sys_config",
"def jdbc_properties(self, jdbc_properties):\n\n self._jdbc_properties = jdbc_properties",
"def set_system_name(self, system_name):\n\n\t\tif system_name is not None and not isinstance(system_name, str):\n\t\t\traise SDKExcepti... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Sets the jdbc_properties of this JsonJdbcIngestionProperties. | def jdbc_properties(self, jdbc_properties):
self._jdbc_properties = jdbc_properties | [
"def jdbc_driver(self, jdbc_driver):\n\n self._jdbc_driver = jdbc_driver",
"def set_properties(self, props: Dict) -> 'KafkaSourceBuilder':\n gateway = get_gateway()\n j_properties = gateway.jvm.java.util.Properties()\n for key, value in props.items():\n j_properties.setPrope... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Sets the jdbc_driver of this JsonJdbcIngestionProperties. | def jdbc_driver(self, jdbc_driver):
self._jdbc_driver = jdbc_driver | [
"def jdbc_properties(self, jdbc_properties):\n\n self._jdbc_properties = jdbc_properties",
"def set_driver(self, driver):\n self.driver = driver",
"def featurestore_jdbc_connector_connections(self, featurestore_jdbc_connector_connections):\n\n self._featurestore_jdbc_connector_connections =... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Sets the cron_expression of this JsonJdbcIngestionProperties. | def cron_expression(self, cron_expression):
self._cron_expression = cron_expression | [
"def cron_expression(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"cron_expression\")",
"def cron_time_zone(self, cron_time_zone):\n\n self._cron_time_zone = cron_time_zone",
"def cron(self):\n return",
"def cron_schedule(self) -> Optional[pulumi.Input[str]]:\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Sets the cron_time_zone of this JsonJdbcIngestionProperties. | def cron_time_zone(self, cron_time_zone):
self._cron_time_zone = cron_time_zone | [
"def cron_expression(self, cron_expression):\n\n self._cron_expression = cron_expression",
"def setTimezone(self, timezone):\n\n self.timezone = timezone",
"def time_zone(self, time_zone):\n self._time_zone = time_zone",
"def time_zone(self, time_zone):\n\n self._time_zone = time_z... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Sets the execute_profiling of this JsonJdbcIngestionProperties. | def execute_profiling(self, execute_profiling):
self._execute_profiling = execute_profiling | [
"def profile_progression(self, profile_progression):\n\n self._profile_progression = profile_progression",
"def jdbc_properties(self, jdbc_properties):\n\n self._jdbc_properties = jdbc_properties",
"def setProfileJobs(self,profile=False):\n self.__profileJobs = profile",
"def set_profiling_en... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Sets the extract_data_sample of this JsonJdbcIngestionProperties. | def extract_data_sample(self, extract_data_sample):
self._extract_data_sample = extract_data_sample | [
"def is_sample_data_extracted(self, is_sample_data_extracted):\n self._is_sample_data_extracted = is_sample_data_extracted",
"def sample_url(self, sample_url: str):\n\n self._sample_url = sample_url",
"def sample(self, sample):\n self._sample = sample",
"def is_sample_data_extracted(self)... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Sets the detect_advanced_data_types of this JsonJdbcIngestionProperties. | def detect_advanced_data_types(self, detect_advanced_data_types):
self._detect_advanced_data_types = detect_advanced_data_types | [
"def supported_data_types(cls):",
"def set_dtype(self, dtype):\n _d = dtype.lower()\n if \"phot\" in _d:\n self.dtype = \"photon\"\n elif \"ener\" in _d:\n self.dtype = \"energy\"\n else:\n raise ValueError('Unknown detector type {0}'.format(dtype))",
... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Sets the tables_to_skip of this JsonJdbcIngestionProperties. | def tables_to_skip(self, tables_to_skip):
self._tables_to_skip = tables_to_skip | [
"def tables(self, tables):\n\n self._tables = tables",
"def source_tables(self, source_tables):\n self._source_tables = source_tables",
"def set_tables(self, tables):\n self.navigation_table = tables[self.navigation_table_class._meta.name]\n self.content_table = tables[self.content_t... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Decode the response headers and create appropriate metrics based on the header values. The response_headers are passed in as a list of tuples. [(HEADER_NAME0, HEADER_VALUE0), (HEADER_NAME1, HEADER_VALUE1)] | def process_response_headers(self, response_headers):
settings = self.settings
if not settings:
return
if not settings.cross_application_tracer.enabled:
return
appdata = None
try:
for k, v in response_headers:
if k.upper() ==... | [
"def process_response(self, metric_response):\n result = {}\n for meter_name, spectator_metric in metric_response.items():\n self.process_metric(meter_name, spectator_metric, result)\n return result",
"def metrics(self) -> global___Response.Metrics:",
"def __get_metrics(responses: Responses) -> Di... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
It takes in a website link as input and return all the classnames used on the website. | def extract_all_tags(final_link, driver):
#driver = webdriver.Chrome(executable_path="ChromeDriver/chromedriver.exe")
driver.get(str(final_link))
classes = []
tags = ['div', 'td', 'li', 'a']
for tag in tags:
a = driver.find_elements_by_tag_name(str(tag))
b = len(a)
for i in ... | [
"def get_classes(html):\n # elements = html.find_all(\"span\", \"code\")\n # titles = html.find_all(\"span\", \"title\")\n # classes = []\n # for i in range(len(elements)):\n # item = elements[i]\n # tit = titles[i]\n # classes += [(item.text.replace('\\xa0', ' '), tit.text.replace(... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
1. It takes all the useful links from the home website as input. 2. Enforce web crawling and extract all useful found 3. Extract the useful data from all the useful links extracted | def deep_link_scraping(final_links, driver):
import re
second_links = []
for website2 in final_links:
links2 = extract_all_links(website2, driver)
final_links1 = find_usefull_links(links2, classmodel, class_count_vect)
final_links2 = list(set(final_links1) - set(final_links))
... | [
"def link_scraping(final_links, driver):\n\n for final_link in final_links:\n tags = extract_all_tags(final_link, driver)\n if len(tags) != 0:\n final_tags = find_usefull_tags(tags, tagmodel, tag_count_vect)\n if len(final_tags) != 0:\n print('Extracting(classna... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
It takes useful links as an input and calls the function scrape_data or scrape_data_tag on the basis if useful tags are received | def link_scraping(final_links, driver):
for final_link in final_links:
tags = extract_all_tags(final_link, driver)
if len(tags) != 0:
final_tags = find_usefull_tags(tags, tagmodel, tag_count_vect)
if len(final_tags) != 0:
print('Extracting(classname): ', fina... | [
"def deep_link_scraping(final_links, driver):\n\n import re\n second_links = [] \n for website2 in final_links:\n links2 = extract_all_links(website2, driver)\n final_links1 = find_usefull_links(links2, classmodel, class_count_vect)\n final_links2 = list(set(final_links1) - set(final_l... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
initializes an empty graph (no nodes, no edges) and an empty list that stores graph copies | def __init__(self):
self._graph = DirectedGraph()
self._graph_copies = [] | [
"def empty_graph():\n from graph import Graph\n return Graph()",
"def __init__(self):\n self.graph = {}\n self.edges = 0\n self.vertices = 0",
"def __init__(self, graph):\n self.graph = graph\n self.vertices = []\n self.edges = []\n self.path_length = 0",
... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
checks if there is an edge between two vertices read from keyboard. | def check_edges(self):
start = int(input('Enter start vertex: '))
end = int(input('Enter end vertex: '))
if self._graph.is_edge_between(start, end):
print('There is an edge from ' + str(start) + ' to ' + str(end))
else:
print('There is NO edge from ' + str(start) ... | [
"def has_edge(self, v0, v1) -> bool:\n for i in self.ed:\n if v0 in i[0] and i[0].nbr(v0) == v1:\n return True\n return False",
"def isEdge(self,x,y):\n\t\treturn y in self._dictOut[x]",
"def IsEdge(self, p_int, p_int_1):\n ...",
"def has_edge(self, v1, v2):\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
prints the degree of a vertex read from keyboard | def print_degree(self):
vertex = int(input('enter vertex: '))
in_degree = self._graph.get_in_degree(vertex)
out_degree = self._graph.get_out_degree(vertex)
print('The in degree of ' + str(vertex) + ' is ' + str(in_degree))
print('The out degree of ' + str(vertex) + ' is ' + str(o... | [
"def degree(self, v) -> {int}:\n assert self.has_vertex(v), \\\n f'{v} is not a valid vertex'\n return len(self.graph_dict[v])",
"def degree(self, v):\n self._validateVertex(v)\n return self._adj[v].size()",
"def degree(self, vertex):\n deg = 0\n for v in self.graph:... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
iterates all outbound edges of vertex read from keyboard. prints them and their cost | def iterate_outbound_edges(self):
vertex = int(input('enter vertex: '))
try:
vertices = self._graph.get_outbound_edges(vertex)
except ValueError as ve:
print(ve)
return
print('Outbound edges from ' + str(vertex) + ':')
for v in vertices:
... | [
"def iterate_inbound_edges(self):\n vertex = int(input('enter vertex: '))\n try:\n vertices = self._graph.get_inbound_edges(vertex)\n except ValueError as ve:\n print(ve)\n return\n print('Inbound edges from ' + str(vertex) + ':')\n for v in vertic... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
iterates all inbound edges of vertex read from keyboard, prints them and their cost. | def iterate_inbound_edges(self):
vertex = int(input('enter vertex: '))
try:
vertices = self._graph.get_inbound_edges(vertex)
except ValueError as ve:
print(ve)
return
print('Inbound edges from ' + str(vertex) + ':')
for v in vertices:
... | [
"def iterate_outbound_edges(self):\n vertex = int(input('enter vertex: '))\n try:\n vertices = self._graph.get_outbound_edges(vertex)\n except ValueError as ve:\n print(ve)\n return\n print('Outbound edges from ' + str(vertex) + ':')\n for v in ver... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
prints the cost of an edge between two vertices read from keyboard, if the edge exist. If the edge does not exist, or vertices are not valid, raise ValueError | def print_cost(self):
start = int(input('start vertex: '))
end = int(input('end vertex: '))
cost = self._graph.get_cost(start, end)
print('the cost of edge is ' + str(cost)) | [
"def edge_cost():\n return 1",
"def check_edges(self):\n start = int(input('Enter start vertex: '))\n end = int(input('Enter end vertex: '))\n if self._graph.is_edge_between(start, end):\n print('There is an edge from ' + str(start) + ' to ' + str(end))\n else:\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
restores the state of the graph from a previous copy made. If there is at least one copy to restore from | def restore_graph_ui(self):
if len(self._graph_copies) == 0:
print('No copies to restore!')
return
# the last made copy is restored in graph
self._graph = self._graph_copies.pop(-1) | [
"def _restoreGraph(self):\n\n # self.tempG = self.g.copy()\n\n if nx.is_directed(self.g):\n self.tempG = nx.DiGraph(self.g)\n else:\n self.tempG = nx.Graph(self.g)\n self.deletedEdges = []\n self.deletedNodes = []",
"def restore(self):\n\n self.brain... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
reads graph from a file whose name is given from keyboard. | def read_graph_ui(self):
filename = input('enter filename: ')
try:
self._graph = read_graph(filename)
except FileNotFoundError:
print('invalid filename! ') | [
"def read_graph(filename):\n with open(filename) as f:\n g = eval(f.read())\n return g",
"def read_graph_file(file_name):\n def _line_to_edge(index, line):\n \"\"\" Parse each line \"\"\"\n items = line.upper().strip().split(',')\n if len(items) != 3: \n raise Excep... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Generate positions (x, y coordinates) for each spike on the probe. This function assumes that the spikes were generated with the kilosort algorithm so the base_folder holds all the necessary .npy arrays. In order for this function to find which channels are the most relevant in each spike it looks into the spike's assi... | def generate_probe_positions_of_spikes(base_folder, binary_data_filename, number_of_channels_in_binary_file,
used_spikes_indices=None, position_mult=2.25, threshold=0.1):
# Load the required data from the kilosort folder
channel_map = np.load(os.path.join(base_folder, 'cha... | [
"def generate_probe_positions_of_templates(base_folder, threshold=0.1, new_templates_array=None):\n # Load the required data from the kilosort folder\n channel_positions = np.load(os.path.join(base_folder, 'channel_positions.npy'))\n if new_templates_array is None:\n try:\n templates = np... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Generate positions (x, y coordinates) for each template found by kilosort on the probe or passed to it by the new_templates_array. This function assumes that the base_folder holds all the necessary .npy arrays. If no new_templates_array is passed it will look for the templates.npy file (created by kilosort) which is th... | def generate_probe_positions_of_templates(base_folder, threshold=0.1, new_templates_array=None):
# Load the required data from the kilosort folder
channel_positions = np.load(os.path.join(base_folder, 'channel_positions.npy'))
if new_templates_array is None:
try:
templates = np.load(os.p... | [
"def generate_probe_positions_of_spikes(base_folder, binary_data_filename, number_of_channels_in_binary_file,\n used_spikes_indices=None, position_mult=2.25, threshold=0.1):\n # Load the required data from the kilosort folder\n channel_map = np.load(os.path.join(base_fold... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Plot the spike positions as a scatter plot on a probe marked with brain regions | def view_spike_positions(spike_positions, brain_regions, probe_dimensions, labels_offset=80, font_size=20):
fig = plt.figure()
ax = fig.add_axes([0.08, 0.05, 0.9, 0.9])
ax.scatter(spike_positions[:, 0], spike_positions[:, 1], s=5)
ax.set_xlim(0, probe_dimensions[0])
ax.set_ylim(0, probe_dimensions[... | [
"def plot_variant_positions(strains):\n if strains.lower() == 'all':\n strains = None\n strains = get_required_strains(strains)\n gd_data = []\n with database.make_connection() as connection:\n for strain in strains:\n hits = r.table(TABLE).filter(lambda row: row['StrainID'].mat... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns the Collatz function value of value. | def collatz(value):
assert value >= 1
if value % 2 == 0:
return value/2
else:
return 3 * value + 1 | [
"def z(self) -> float:\n return self.A[3] if self.scalar_vector else self.A[2]",
"def zvalue(value, sigma, mu):\n return (value - mu) / sigma",
"def get_z(self):\n return self.coords[2]",
"def value(self, par):\n return _fiasco_numpy.ScalarFunction_value(self, par)",
"def z(self, val... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns the collatz length associated with val, building the collatz_len_dict as it goes. | def collatz_length(val):
assert val >= 1
# Seed the dictionary with collatz_length(1) = 1.
if val == 1:
collatz_len_dict[1] = 1
return collatz_len_dict[1]
# Return the collatz length if it exists in the dictionary.
if val in collatz_len_dict:
return collat... | [
"def main():\r\n \r\n TOP_VAL = 1000000\r\n for i in xrange(1, TOP_VAL):\r\n _ = collatz_length(i) # Seed each collatz length.\r\n \r\n # Find the key with largest value in the collatz length dictionary.\r\n value, collatz_len = max(collatz_len_dict.iteritems(),\r\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Finds the number with the longest Collatz sequence under 1 million. | def main():
TOP_VAL = 1000000
for i in xrange(1, TOP_VAL):
_ = collatz_length(i) # Seed each collatz length.
# Find the key with largest value in the collatz length dictionary.
value, collatz_len = max(collatz_len_dict.iteritems(),
key=lambda x:x[1... | [
"def find_longest(x):\n max_len = -1\n result = -1\n for i in xrange(1, x):\n length = len(collatz(i))\n if length > max_len:\n max_len = length\n result = i\n return result",
"def longest_collatz_seq(upto):\n collatz_dict, longestseq = {}, []\n\n for i in ran... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
convert a matplotlib colormap into a PIL palette | def cmap_to_pil_palette(cmap):
# return (255.*np.array(
# map(lambda x: cmap(x)[0:3], np.linspace(0., 1.,256)))
# .ravel()).astype('int')
return (255. * np.array(
[cmap(x)[:3] for x in np.linspace(0,1,256)]).ravel().astype('int')) | [
"def tif_cmap(c):\n a = plt.get_cmap(c)(np.arange(256))\n return np.swapaxes(255 * a, 0, 1)[0:3, :].astype('u1')",
"def view_colormap(cmap):\r\n cmap = plt.cm.get_cmap(cmap)\r\n colors = cmap(np.arange(cmap.N))\r\n \r\n cmap = grayscale_cmap(cmap)\r\n grayscale = cmap(np.arange(cmap.N... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Save a png image of a colorbar. One can use this code directly, or use it as an example to modify. | def save_colorbar(img=None, vmin=None, vmax=None, cmap="jet",
filename=None, title="Colorbar", lab=""):
fig = plt.figure(figsize=(1.0, 4.0), facecolor=None, frameon=False)
ax = fig.add_axes([0.0, 0.05, 0.2, 0.9])
if vmin is None: vmin = np.min(img)
if vmax is None: vmax = np.max(img)
... | [
"def add_colorbar(\n map_path: Path = typer.Option(...),\n colorbar_path: Path = typer.Option(...),\n label: str = typer.Option(...),\n output_path: Path = typer.Option(...),\n):\n img = plt.imread(map_path)\n w, h = 3779 / 1000, 3749 / 1000\n fig, ax = plt.subplots(dpi=100)\n assert isinsta... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test a all successful scenario's for TOTP validation. | def test_successful_verification(self):
for i in (-2, -1, 0, 1, 2):
description = "TOTP not verified for `i={0}`".format(i)
calculated = self.algorithm.calculate(self.device.secret, drift=i)
confirmed = self.relate.verify(calculated, save=False)
self.assertTrue(... | [
"def test_valid_otp(self, client, valid_otp_data):\n resp = client.post(self.url, json=valid_otp_data)\n assert resp.status_code == 200\n assert resp.json()[\"status\"] == \"OK\"",
"def test_unsuccessful_verification(self):\n for i in (-4, -3, 3, 4):\n description = \"TOTP v... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test all unsuccessful scenario's for TOTP validation. | def test_unsuccessful_verification(self):
for i in (-4, -3, 3, 4):
description = "TOTP verified for `i={0}`".format(i)
calculated = self.algorithm.calculate(self.device.secret, drift=i)
confirmed = self.relate.verify(calculated, save=False)
self.assertFalse(confi... | [
"def test_invalid_otp(self, client, valid_otp_data):\n\n valid_otp_data[\"code\"] += \"1\"\n\n resp = client.post(self.url, json=valid_otp_data)\n assert resp.status_code == 401\n assert resp.json()[\"code\"] == \"invalid_otp\"",
"def testBadOTP(self):\r\n otp._ClearUserHistory()\r\... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
changes hue of image | def change_hue(image, delta):
imHueChange = tf.image.adjust_hue(image, delta=delta, name=None)
return imHueChange | [
"def adjust_hue(img, hue_factor):\n\n input_mode = img.mode\n if input_mode in {'L', '1', 'I', 'F'}:\n return img\n\n h, s, v = img.convert('HSV').split()\n\n np_h = np.array(h, dtype=np.uint8)\n # uint8 addition take cares of rotation across boundaries\n with np.errstate(over='ignore'):\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Removes all Tkinter widgets from a master window [master], skipping widgets of type [ignore] | def removeWidgets(master, ignore=None):
for w in master.winfo_children():
if w.winfo_class() != ignore:
w.destroy() | [
"def nukeWidgets(self):\r\n i = 0;\r\n while i < 10:\r\n try:\r\n item = self.grid.itemAtPosition(2, i)\r\n if item != None:\r\n widget = item.widget()\r\n self.grid.removeWidget(widget)\r\n widget.delete... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Create a new player and write data to the filename. | def create_player (self, username = None):
# Get unique username if needed
if (username == None):
username = "default_username" + str (time.time ())
self.username = username
r = requests.post (self.url_endpoint, data = {"new_player": self.username})
if (r.status_code ... | [
"def create_player(self,player_name, attr = None, team_file = None):\n player_first, player_last = player_name.split(\" \")\n player_file = player_name.replace(\" \", \"\") + '.json'\n if(os.path.exists(self.player_path + player_file)):\n return(False)\n else:\n wit... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Create a new game, return the game_name for others to join. | def create_new_game (self, game_name = None, ai_game = False):
if (game_name == None):
game_name = "default_game_name" + str (time.time ())
self.cur_game_name = game_name
data =\
{
"new_game": True,
"player_secret": self.secret,
"game_name"... | [
"def new_game():\n #name,players = None,None\n create_game_form = NewGameForm()\n if create_game_form.validate_on_submit():\n #split the string of player names\n #TODO: error handling if this is not formatted correctly\n players = create_game_form.players.data.split(',')\n #Inst... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Join the game given by game_name. | def join_game (self, game_name):
r = requests.post (self.url_endpoint,
data = {"join_game": True, "player_secret": self.secret, "game_name": game_name})
if (r.status_code != 201):
print (f"ERROR: Failed to join game <{game_name}>:\n", r.text)
return r
join_da... | [
"def joinGame(self):\n\t\tjoinRequest = {\"Request\": Request.Join, \"ID\":self.ID, \"age\":self.age}\n\t\tself.sendRequest(joinRequest)",
"def join_game(game_id, name):\n name = name or generate_player_name()\n game_data = load_state(game_id)\n if not game_data:\n return None\n if game_data['a... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Play a random number in the current game | def play_random_number (self):
if (self.cur_game_secret == ""):
print ("ERROR: No current game, join a game.")
return 1
play_value = self.hand.pop (random.randint (0, len (self.hand) - 1))
r = requests.post (self.url_endpoint, data = {"play_game": True, "game_name": self... | [
"def playRandom(self):\n index = random.randint(0, len(self.notes)-1)\n note = list(self.notes.values())[index]\n note.play()",
"def play_once(human_plays_first):\n # This is all dummy scaffolding code right at the moment\n import random\n rng =random.Random()\n # Pick a random re... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Create the file if it does not exist The file name or directory after the main_work_directory is needed | def createFile(file):
file_ = os.path.join(os.getcwd(),file)
if not(os.path.isfile(file_)):
with open(file_,"a") as f:
f.close() | [
"def _create_file_if_needed(self):\n if not os.path.exists(self._file.filename()):\n old_umask = os.umask(0o177)\n try:\n open(self._file.filename(), 'a+b').close()\n finally:\n os.umask(old_umask)",
"def create_file(file: str):\n if not exi... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
This function is to remove duplicates of csv file that stores all links | def unqiueList(csvfile):
links_array = genfromtxt(getFulldirAddress(csvfile), delimiter='\n', dtype="unicode")
links_array_2 = np.unique(links_array)
np.savetxt(getFulldirAddress(csvfile), links_array_2, fmt='%s') | [
"def reduce_data(old_file, new_file):\n links_list = list()\n\n with open(old_file, \"r\") as file:\n for line in file:\n link = line.replace('\\n', '')\n links_list.append(link)\n\n result_list = list(set(links_list)) # eliminate duplicate links\n\n with open(new_file, \"w... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Ingests temperature and precipitation values for nClimDiv datasets. Uses a matching soil constants file from open source indices_python github repository. | def _ingest_netcdf(output_netcdf, # pragma: no cover
release_date,
temp_var_name,
precip_var_name,
awc_var_name):
try:
# parse the soil constant (available water capacity)
soil_url = 'https://raw.githubuse... | [
"def load_data(files,varname,extra,res,minval=-100,maxval=600):\n \n hucfilename=extra[4]\n geosubset=extra[2]\n geo_matchfile=extra[5]\n files.sort()\n # if we don't have a geomatchfile then use nearest neighbor resampling\n usenn=(geo_matchfile==None)\n usenn=True # force nearest neighbor ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
get valid direcitons by action | def _get_valid_directions(self, turn, action):
if not isinstance(action, Action):
raise TypeError("Action must be Action class")
if action.x < 0 or action.x > 7 or action.y < 0 or action.y > 7:
raise Exception("You must set disk in board.")
if self.board[action.x][actio... | [
"def valid_actions(self) -> List[str]:\n return list(self.action_map().keys())",
"def dominated_actions(self, tol=None, method=None):\n out = []\n for action in range(self.num_actions):\n if self.is_dominated(action, tol=tol, method=method):\n out.append(action)\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
register function as agent actor | def agent_actor(self, func):
if len(self.agents) is 2:
raise Exception("You cannot register 3 or more agents.")
self.agents.append(func)
return func | [
"def _register_agent(self) -> None:\n strategy = cast(Strategy, self.context.strategy)\n description = strategy.get_location_description()\n self._register(description, \"registering agent on SOEF.\")",
"def register(self):\n self.logger.info(\"Registering agent %s\", \"/registry/\" + ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
regiseter function as renderer | def renderer(self, func):
self.renderers.append(func)
return func | [
"def _make_renderer(self):\n return _make_renderer()",
"def dspyRender(self):\n pass",
"def __call__(self):\n return self.render()",
"def _render_callback(self, _sim, _viewer):\n pass",
"def renderer(self, ctx, name):\n\t\tif name in self.service.nevowRenderers:\n\t\t\treturn sel... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Changes the state of a cell to 'dead' (deletes it from _state) | def die(self, cell: Position):
self._next_state.remove(cell) | [
"def set_dead(self):\r\n \r\n # dead to prevent movement and off the map to free up\r\n # cell for movement of other animal until permanently removed\r\n self.alive = False\r\n self.cell = None",
"def kill(cell): \n\t# define what happens to a cell's state when it dies\n\tcell.g... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Checks if a given cell is alive or not. | def is_alive(self, cell: Position) -> bool:
return cell in self._state | [
"def check_cell(self, coord: tuple[int, int]) -> bool:\n aliveNeighbors = self.count_alive_neighbors(coord)\n if self.get_cell(coord):\n if aliveNeighbors in self.ruleSetValSurvive:\n return True\n else:\n if aliveNeighbors in self.ruleSetValBirth:\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns coordinates of all neighbours of a given cell. | def get_neighbours(self, cell: Position) -> Iterable[Position]:
x, y = cell
return [
(x - 1, y - 1), (x, y - 1), (x + 1, y - 1),
(x - 1, y), (x + 1, y),
(x - 1, y + 1), (x, y + 1), (x + 1, y + 1),
] | [
"def get_neighbours(self, cell: Cell) -> Cells:\n row, col = cell\n # Рассчитываем начальные и конечные ячейки\n st_row = row - 1 + int(row == 0)\n st_col = col - 1 + int(col == 0)\n en_row = (row + 1) - int(row == self.cell_height - 1)\n en_col = (col + 1) - int(col == sel... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Version 2 Counts the number of alive cells around a given cell. | def get_neighbours_count(self, cell: Position) -> int:
possible_neighbours = self.get_neighbours(cell)
return sum(self.is_alive(n) for n in possible_neighbours) | [
"def count_alive(self) -> int:\n alive = 0\n for cell in self.grid:\n if cell:\n alive += 1\n return alive",
"def count_alive_cells(self, x, y):\n\n # indices of surrounding cells.\n ul = max(y - 1, 0) # upper left\n ur = min(y + 2, self.f_shape... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Log the test method name at the information level | def logTestName(self):
logging.info('%s', self.id()) | [
"def get_test_method_name(self):\n for element in inspect.stack():\n if element[3].find('test_') == 0:\n return str(element[3])\n\n return \"test_method_name_not_found\"",
"def test_log_info(self):\n self.assertEqual(None, self.logger.info(\"test log info\",\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Check the target. Create the directory if it does not exists. Remove the file if it exists. | def check_output(self):
directory, file = split(self.target)
if not exists(directory):
mkdir(directory)
if exists(self.target):
unlink(self.target) | [
"def _create_target_dir(self):\n try:\n if self.source_dir != self.target_dir:\n self.source_dir.parent.joinpath(\n self.target_dir.joinpath()).mkdir()\n return True\n except FileExistsError:\n return True # File already exists\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Call 'lame' to convert the source into target. | def convert(self):
#lame --mp3input --silent -h -b BITRATE SOURCE TARGET
self.success = False
command = ['lame', '-h', '--silent']
command.append('-b ' + str(self.bitrate))
command.append(self.source)
command.append(self.target)
msg('command', command)
err... | [
"def do_luafmt(args):\n return process_game_files(args.filename, luafmt,\n overwrite=args.overwrite, args=args)",
"def slew_to_target(self):\n raise NotImplementedError",
"def main(args):\n print('loading {}'.format(args.stem_path))\n y, fs = librosa.load(args.stem_p... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Print the conversion command. | def print_command(self):
self.success = False
command = ['lame', '-h', '--silent']
command.append('-b ' + str(self.bitrate))
command.append(self.source)
command.append(self.target)
print(' '.join(command)) | [
"def cmd_print(command: str):\n print('#', command)",
"def printcommand(self, cmdobj, showcommand=True):\n\n sexported = 'Yes' if cmdobj.exported.lower(\n ) == 'new' else cmdobj.exported\n\n sformatted = ' Name: {}\\n'.format(cmdobj.name) + \\\n ' Comment: {}\\n'.for... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Create a list of tasks to convert each file in 'from_dir' to a file in 'to_dir'. The leaf directory name is appended to 'to_dir' to create the target file name. | def create_tasks(from_dir, to_dir):
try:
tasks = []
source_files = list_directory(from_dir)
dir_name = get_last_dir(from_dir)
print('dir_name', dir_name)
for file in source_files:
source = join(from_dir, file)
target = join(to_dir, dir_name, file)
... | [
"def _sync_directories(from_directory, to_directory):\n if not os.path.exists(to_directory):\n os.mkdir(to_directory)\n for root, dirs, files in os.walk(from_directory):\n to_root = root.replace(from_directory, to_directory)\n for directory in dirs:\n to... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Get the name of the last directory in the path. | def get_last_dir(path):
head, tail = split(path)
while not tail:
head, tail = split(head)
return tail | [
"def get_last_dir(self, path):\r\n\t\t\r\n\t\tstrPath = str(path)\r\n\r\n\t\tif not os.path.isdir(strPath):\r\n\t\t\tself.sys_error_message(\"The folder %s is not exist.\" % strPath)\r\n\t\t\treturn None\r\n\t\t\r\n\t\treturn os.path.basename(os.path.normpath(path))",
"def get_last_path_node(path):\n __, tail ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Print the argument list, if messages are enabled. | def msg(*args):
if messages_on:
print(*args) | [
"def print_args(self):\n print(self.args)",
"def _p(self, *args, level=2, **kwargs):\n if self._verbosity >= level:\n print(*args, **kwargs)",
"def nonTestModePrint(*args):\n if not testMode:\n print(args)",
"def display_checking_message():\n print(\"Valid command-line argu... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Verify that the token is mapped with an leave request. | def _check_leave_request(self, cr, uid, request, token, context=None):
holidays_obj = request.registry['hr.holidays']
holidays_ids = holidays_obj.search(cr, uid, [
('token', '=', token)
])
if len(holidays_ids) == 0:
return request.website.render(
... | [
"def leave_request_decline(self, token, **kwargs):\n cr, uid, context = self._get_cr_uid_context()\n res = self._check_leave_request(\n cr, uid, request, token, context=context\n )\n if isinstance(res, http.Response):\n return res\n if res:\n res.s... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Accept the leave request | def leave_request_accept(self, token, **kwargs):
cr, uid, context = self._get_cr_uid_context()
res = self._check_leave_request(
cr, uid, request, token, context=context
)
if isinstance(res, http.Response):
return res
if res:
res.signal_workflow... | [
"def leave_request_decline(self, token, **kwargs):\n cr, uid, context = self._get_cr_uid_context()\n res = self._check_leave_request(\n cr, uid, request, token, context=context\n )\n if isinstance(res, http.Response):\n return res\n if res:\n res.s... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Refuse the leave request | def leave_request_decline(self, token, **kwargs):
cr, uid, context = self._get_cr_uid_context()
res = self._check_leave_request(
cr, uid, request, token, context=context
)
if isinstance(res, http.Response):
return res
if res:
res.signal_workflo... | [
"def leave(self):\n pass",
"def leaving(self):\n pass",
"def on_leave(self):\n\n self.check_connection.cancel()",
"async def leave(self, ctx): # ?leave - makes him go away temporarily\n await ctx.channel.send(\"I'll be off then :(\")\n global closed\n closed = True",
"d... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Thinning image using morphological operations | def thinning_morph(image, kernel):
thining_image = np.zeros_like(image)
img = image.copy()
while 1:
erosion = cv.erode(img, kernel, iterations = 1)
dilatate = cv.dilate(erosion, kernel, iterations = 1)
subs_img = np.subtract(img, dilatate)
cv.bitwise_or(thining_image, subs_... | [
"def thin_image(pix):\n\n # Skeletonize removes layers of the foreground, leaving only a skeleton \n thinned_pix = skeletonize(pix)\n\n return thinned_pix",
"def normalise(image):",
"def skeletonize_image(bw_image, prunings=0, display=False):\n\n # generate the skeleton through thinning using the st... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
return the document viewer | def viewer(self):
return self.parent | [
"def getViewer(self, v_id=-1) -> Viewer:\n viewer_ctrl = self.getViewerController(v_id)\n if viewer_ctrl is None:\n return None\n return viewer_ctrl.view",
"def getSceneviewer(self):\n return self._sceneviewer",
"def __newDocumentView(self):\n aw = self.activeWindow... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
On total lines changed | def updateTotalLines(self):
self.viewer().TotalLinesChanged.emit( self.editor().lines() ) | [
"def _dig_line_count_changed(self, text):\n self._setup_table_digital()",
"def _changedlines(self, changedlines):\n self.changedlines = changedlines\n self.before = self.context\n self.context = []",
"def update_lines(self, lines):\n self._sum_len = [0]\n fo... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Emit signal from parent to update the position of the cursor | def onCursorPositionChanged (self , ln, col):
self.viewer().CursorPositionChanged.emit( ln, col ) | [
"def _(event):\n system_line.cursor_left()",
"def move_cursor (self, view, blockno, line, oldpos, delta):\n\t\tpass",
"def setCursor(self, pos=(0, 0)):\n x, y = pos\n self.cursor = (x, y)",
"def position_changed(self, position):\n pass",
"def cursorPositionChanged (self, ln, col)... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Active or deactivate the lines numbering | def setLinesNumbering (self, visible):
if visible:
self.srcEditor.setMarginLineNumbers(1, visible)
self.srcEditor.onLinesChanged()
else:
self.srcEditor.setMarginLineNumbers(1, visible)
self.srcEditor.setMarginWidth(1, 0) | [
"def ToggleLineNumbers(self, switch=None):\n if (switch is None and \\\n not self.GetMarginWidth(NUM_MARGIN)) or switch:\n self.EnableLineNumbers(True)\n else:\n self.EnableLineNumbers(False)",
"def setLinesNumbering (self, visible):\n if visible:\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Active or deactivate indentation guides visibility | def setIndentationGuidesVisible (self, visible):
if visible:
self.srcEditor.setIndentationGuidesVisible(visible)
else:
self.srcEditor.setIndentationGuidesVisible(visible) | [
"def toggleIndentGuidesVisibility(self, checked):\n self.indentationGuidesVisible = checked\n for tabId in xrange( self.tab.count() ):\n doc = self.tab.widget(tabId)\n # bypass the welcome page\n if isinstance(doc, WelcomePage):\n continue\n #... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Called when focus on editors Emit the signal "focusChanged" | def focusChanged (self):
weditor = QApplication.focusWidget()
if isinstance(weditor, PyEditor):
if weditor.editorId == self.TEST_DATA_EDITOR:
self.viewer().findWidget.setEditor( editor = self.srcEditor)
self.viewer().FocusChanged.emit(self) | [
"def focusChanged (self):\n weditor = QApplication.focusWidget()\n if isinstance(weditor, PyEditor):\n if weditor.editorId == self.TEST_DEF_EDITOR:\n self.viewer().findWidget.setEditor( editor = self.srcEditor)\n if weditor.editorId == self.TEST_EXEC_EDITOR:\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Set the default cursor position | def setDefaultCursorPosition(self):
self.srcEditor.setFocus()
self.srcEditor.setCursorPosition(0,0) | [
"def setCursor(self, pos=(0, 0)):\n x, y = pos\n self.cursor = (x, y)",
"def set_cursor_at(self, position: int):\n self._cursor = position",
"def _set_cursor_pos(self, position: int) -> None:\n self._cursor_pos = position\n self._selection_end_pos = position\n self._force_b... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return how many hours, rounded to 2 decimals, Python 2 has left on Planet Earth (calculated from start_date) | def py2_earth_hours_left(start_date=BITE_CREATED_DT):
td = (PY2_DEATH_DT - start_date)
return round((td.days*24 + td.seconds/3600), 1) | [
"def py2_earth_hours_left(start_date=BITE_CREATED_DT):\n td = PY2_DEATH_DT - start_date\n return round(td.days * 24 + td.seconds / 60 / 60, 2)",
"def py2_earth_hours_left(start_date=BITE_CREATED_DT):\r\n duration = PY2_DEATH_DT - start_date\r\n return round(duration.total_seconds() / 3600, 2)",
"def... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
function loads a random images from a random folder in our test path | def getRandomImage(path):
folders = list(filter(lambda x: os.path.isdir(os.path.join(path, x)), os.listdir(path)))
random_directory = np.random.randint(0,len(folders))
path_class = folders[random_directory]
print("Class - " + five_celeb_dict_n[str(path_class)])
file_path = path + path_class
file... | [
"def random_test_image():\r\n c = np.random.choice(cat_df['category'])\r\n root = testdir + c + '/'\r\n img_path = root + np.random.choice(os.listdir(root))\r\n return img_path",
"def getRandomImage(path, img_width, img_height):\n folders = list(filter(lambda x: os.path.isdir(os.path.join(path, x))... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Given an spotify track id, returns the audio features from the api | def get_song_features(tid):
# dictionary of features to return
spotify_track_data = SpotifyData[tid]
features = {}
features['name'] = spotify_track_data.name
features['artists'] = spotify_track_data.artists
features['popularity'] = spotify_track_data.popularity
features['album'] = spotify_... | [
"def audio_features():\n\n token = credentials.get_access_token()\n spotify = Spotify(auth=token)\n track_id = request.args.get(\n 'track_id', default='06w9JimcZu16KyO3WXR459', type=str\n )\n results = spotify.audio_features(track_id)\n return json.dumps(results)... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Given a list of seed track ids and candidate_tids returns a dataframe summarizing the differences between each candidate track's features and seed features. | def compute_df_features(seed_tids, candidate_tids, relevences):
seed_features = compute_seedset_features(seed_tids)
# drop candidate songs w/0 all features
candidate_df = get_features_dataframe(candidate_tids)
candidate_df['relevence'] = relevences
candidate_df.dropna(axis=0)
df = {}
df['re... | [
"def audio_feature_collector(track_id_lst):\n audio_features = []\n batchsize = 100\n\n # Iterate over 100 song batches (due to API limit per request)\n for i in tqdm(range(0,len(track_id_lst),batchsize)):\n batch = track_id_lst[i:i+batchsize]\n # Collect features for 100 tracks\n feature_results = sp.... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Given an input playlist and factors computed from stage 1, returns a df for stage 2 | def compute_df(playlist, song_factors, playlist_factors=None, method='ensemble'):
playlist = playlist.str.replace('spotify:track:', '')
playlist_set = set(playlist)
seed_ids = []
while len(seed_ids) < 2:
rand = list(playlist.sample(n=1))[0]
if rand in tid_to_idx and rand not in seed_ids:... | [
"def create_df_playlist(api_results,sp = None, append_audio = True):\r\n df = create_df_saved_songs(api_results[\"tracks\"])\r\n if append_audio == True:\r\n assert sp != None, \"sp needs to be specified for appending audio features\"\r\n df = append_audio_features(df,sp)\r\n return df",
"d... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Updates all FixedAssets data (in FA_DATA directory) for user defined year and frequency | def update_all_fa(year, frequency):
failed_dict = {}
mb_remaining = 100
requests_remaining = 100
fa_table_ids = pybea.get_parameter_values(UserID, 'FixedAssets', ParameterName='TableName', ResultFormat='JSON')
tablenames = fa_table_ids['TableName'].values
for x in tablenames:
temp = py... | [
"def fixed_assets(self, table_name: str, year: List[str]) -> Dict:\n\n if year != 'ALL':\n year = ','.join(year)\n\n # Define the parameters.\n params = {\n 'userid': self.api_key,\n 'method': 'GetData',\n 'datasetname': 'FixedAssets',\n 'y... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Calculates the dependecy matrix, which is | def calc_dependency_matrix(encoder: nn.Module, latents: torch.Tensor) -> torch.Tensor:
# calculate the jacobian
jacob = jacobian(encoder.forward, latents)
# take the absolute value
return jacob.abs() | [
"def derivative_matrix(self):\n from sage.matrix.constructor import matrix\n [a,b,c,d,e,f] = self.coefficients()\n return matrix([[ 2*a , b , c ],\n [ b , 2*d , e ],\n [ c , e , 2*f ]])",
"def _build_dmatrix(self, formula, num_col):\r\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Параметры function функция кнопки logic_engine объект логического движка graphic_engine объект графического движка graphical_x графическая координата x кнопки в [px] graphical_y графическая координата y кнопки в [px] width ширина изображения кнопки в [px] height высота изображения кнопки в [px] | def __init__(self, function, logic_engine, graphic_engine, game, graphical_x: int, graphical_y: int,
width: int, height: int):
# Логика
self.function = function
# Физика
self.graphical_height: int = height # Графическая высота кнопки в [px]
self.graphical_widt... | [
"def __init__(self, gfx, x1, y1, x2, y2,\n title=\"Graph\", xaxis=\"X\", yaxis=\"Y\",\n styleFlags=DEFAULT_STYLE,\n axisPen=Gfx.BLACK_PEN, labelPen=Gfx.BLACK_PEN,\n titlePen=Gfx.BLACK_PEN, captionPen=Gfx.BLACK_PEN,\n backgroundPen=Gfx.... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Nonnested view of the unfaceted_array_of_objects field | def non_nested_array_of_objects(self, unfaceted_array_of_objects):
return unfaceted_array_of_objects | [
"def all_field_objects(self):\n pass",
"def format_array_fields(self):\n for field in self.array_fields:\n for value, selector in deep_get(self.json, field):\n if value is not None and not isinstance(value, list):\n deep_set(self.json, selector, [value])"... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Load image from file and perform preprocessing. Args | def _load_preprocess_image(self, image_file):
image_raw = tf.io.read_file(image_file)
image = self._preprocess_image(image_raw)
return image | [
"def _load_preprocess_image(self, image_file):\n\n image_raw = tf.io.read_file(image_file)\n\n image = self._preprocess_image(image_raw)\n\n return image",
"def load_and_preprocess_image(path):\n image = tf.io.read_file(path)\n return preprocess_image(image)",
"def load_and_preprocess... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Generate the trainingholdout folds from a set of splits. Args | def _get_folds(self, splits):
train = [splits.copy() for i in range(self.n_splits)]
holdout = [train[i].pop(i) for i in range(self.n_splits)]
train_flat = [list(chain(*row)) for row in train]
return list(zip(train_flat, holdout)) | [
"def buildSplits(self, args):\n trainData = []\n testData = []\n splits = []\n trainDir = args[0]\n if len(args) == 1:\n print '[INFO]\\tPerforming %d-fold cross-validation on data set:\\t%s' % (self.numFolds, trainDir)\n\n posTrainFileNames = os.listdir('%s/pos/' % trainDir)\n negTrai... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Display sorted results for each metric. Args | def display_results(self, metrics):
for k, v in self.cv_results_.items():
# sorted_results = sort_results(v)
print(f'Results for {k} metric:')
print()
print(v.sort_values(by=['Metric mean'], ascending=False))
print() | [
"def sort_results(self):\n pass",
"def sort_by_metric(self): \n return self._sort_by_metric",
"def print_results(_results):\n order = [\"Top\", \"Second\", \"Third\"]\n\n for index, ele in enumerate(_results):\n print(\n f\"{order[index]} match: \\\"{... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Builds a dense network. Args | def _build_network(self,
input_dim,
dense_layers,
nodes_per_layer=None,
hidden_act='relu',
output_act='sigmoid',
dropout_layers=None):
if nodes_per_layer is None:
... | [
"def build_dense_network(data, hidden_layers, **kwargs):\n # Input layer\n with tf.variable_scope(\"layer_1\"): \n weights = tf.get_variable(\"weights\", shape = [input_shape[-1] + 1\n , hidden_layers[0]], initializer = tf.variance_scaling_initializer())\n\n output = tf.nn.leaky_relu(... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |