query
stringlengths
9
9.05k
document
stringlengths
10
222k
negatives
listlengths
19
20
metadata
dict
Load dipole(s) from .txt file and plot spectrograms
def loadDisplayData(self): fname = QFileDialog.getOpenFileName(self, 'Open .txt file', 'data') fname = os.path.abspath(fname[0]) if not os.path.isfile(fname): return self.m.index = 0 file_data = np.loadtxt(fname, dtype=float) if file_data.shape[1] > 2: ...
[ "def from_file(file,):\n\n raise NotImplementedError(\n \"Loading Spectrograms from images is not implemented yet\"\n )", "def plot_spectrogram(self):\n f, t, sxx = self.compute_spectrogram()\n plt.pcolormesh(t, f, sxx)\n plt.ylabel('Frequency [Hz]')\n plt.xlab...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Clear data from file and revert to SimData
def clearDataFile(self): self.specs = [] self.lextfiles = [] self.m.index = 0 self.loadSimData(self.params['sim_prefix'], self.params['f_max_spec'])
[ "def clear(self):\n self.file.seek(0)\n self.file.truncate()\n self.file.close()\n self.open()", "def reset(self):\n self.data = {}\n self.updates = {}\n self._convert_raw(self.raw_filepath)", "def reset(self):\n\t\tf = open(self.file_path, \"w+\")\n\t\tf.close()...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get some testing calibrationd data
def get_cal_data() -> CalibrationData: return CalibrationData( sensor="testsens", serial=20, static_gain=5, frequency=[1, 2, 3, 4, 5], magnitude=[10, 11, 12, 13, 14], phase=[0.1, 0.2, 0.3, 0.4, 0.5], )
[ "def getTestingData(self):", "def _load_calibration_data():\n\tpickle_dict = pickle.load(open('./calibration.p', 'rb'))\n\tmtx = pickle_dict['mtx']\n\tdist = pickle_dict['dist']\n\treturn mtx, dist", "def getTrainingData(self):", "def getTestData():\n\ttestFileList = listdir('../../datasets/testDigits')\n\tn ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Mock the read_bytes used by pydantic
def mock_read_bytes(*args): return get_cal_data().json().encode()
[ "def test_convert_file_type(self):\n\n data = \"\"\"id,name,surname\n1,Adam,Kowalski\n2,Seth,McFarlane\"\"\"\n\n with patch('builtins.open', mock_open(read_data=data)):\n result = zadanie.convert_file('foo')\n\n self.assertIs(type(result), io.StringIO)", "def test_getbytes(self):\n...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return mock file object
def mock_open(*args, **kwargs): return MockFileObject()
[ "def mock_opened_file():\n with patch('builtins.open', mock_open()) as mock_file:\n yield mock_file", "def test_convert_file_type(self):\n\n data = \"\"\"id,name,surname\n1,Adam,Kowalski\n2,Seth,McFarlane\"\"\"\n\n with patch('builtins.open', mock_open(read_data=data)):\n result...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
canonicalize the query, replace strings to a special place holder
def canonicalize_query(query): str_count = 0 str_map = dict() matches = QUOTED_STRING_RE.findall(query) # de-duplicate cur_replaced_strs = set() for match in matches: # If one or more groups are present in the pattern, # it returns a list of groups quote = match[0] ...
[ "def prepare_solr_query_string(value):\r\n from haystack.query import SearchQuerySet\r\n value = clean_tags(value)\r\n value = clean_printf_vars(value)\r\n value = clean_especial_chars(value)\r\n value = clean_extra_spaces(value)\r\n for word in SearchQuerySet().query.backend.RESERVED_WORDS:\r\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Logging and I/O setup for the current processes.
def setup_process(**config): def ensure_directory(fname): """Make sure the directory containing the given name exists.""" dirname = os.path.dirname(fname) if dirname and not os.path.exists(dirname): os.makedirs(dirname) if 'task' not in config: config['task'] = 'ca...
[ "def __enter__(self):\n self.process = Process(target=do_logging, \n args=(self.dut,self.result_id))\n self.process.start()\n print 'CONSOLE: launched console logging system for', self.dut", "def startup_processes(self):\n self.load_config()\n self....
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Extend to add the private token, if any, to the headers.
def _headers(self) -> dict[str, str]: headers = super()._headers() if private_token := self._parameter("private_token"): headers["Private-Token"] = str(private_token) return headers
[ "def get_headers(self) -> Dict[str, str]:\n return {\"Authorization\": f\"Bearer {self.token}\"}", "def make_auth_header(self):\n headers = {\n \"Authorization\": f\"Bearer {self._get_auth_token()}\"\n }\n return headers", "def _add_custom_headers(self, dct):\r\n pa...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return the next (pagination) links from the responses.
async def _next_urls(self, responses: SourceResponses) -> list[URL]: return [URL(next_url) for response in responses if (next_url := response.links.get("next", {}).get("url"))]
[ "def _next_url(self, response):\n return response.links.get(\"page-next\", {}).get(\"url\", None)", "def _scrape_next_results_page_link(self, response):\n next_pages = response.xpath('//*[@id=\"pagnNextLink\"]/@href |'\n '//ul[contains(@class, \"a-pagination\")]'\n...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return a GitLab API url for a project, if present in the parameters.
async def _gitlab_api_url(self, api: str) -> URL: url = await super()._api_url() project = self._parameter("project", quote=True) api_url = f"{url}/api/v4/projects/{project}" + (f"/{api}" if api else "") sep = "&" if "?" in api_url else "?" api_url += f"{sep}per_page=100" ...
[ "def gitlab_api_url(api_url_template, repository, url_params={}, query_params={}):\n is_enterprise, fqdn = get_api_fqdn(repository)\n base_path = \"/api/v4\" if is_enterprise else \"\"\n # project ID can be specified as an encoded namespaced path:\n # https://docs.gitlab.com/ee/api/README.html#namespace...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return the next (pagination) links from the responses as long as we're within lookback days.
async def _next_urls(self, responses: SourceResponses) -> list[URL]: # Note: the GitLab documentation (https://docs.gitlab.com/ee/api/jobs.html#list-project-jobs) says: # "Jobs are sorted in descending order of their IDs." The API has no query parameters to sort jobs by date # created or by date...
[ "async def _next_urls(self, responses: SourceResponses) -> list[URL]:\n return [URL(next_url) for response in responses if (next_url := response.links.get(\"next\", {}).get(\"url\"))]", "def _next_url(self, response):\n return response.links.get(\"page-next\", {}).get(\"url\", None)", "def _scrape...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return the newer of the two jobs.
def newer(job1: Job, job2: Job) -> Job: return job1 if job1["created_at"] > job2["created_at"] else job2
[ "def latest_job(self):\n return self._latest_job", "def last_run_state_for_jobs(jobs):\n return [(chronos_job, chronos_tools.get_status_last_run(chronos_job)[-1]) for chronos_job in jobs]", "def result_compare(self, result1, result2):\n return cmp(result1.time, result2.time)", "def newer(name...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return the build date of the job.
def _build_date(job: Job) -> date: return parse_datetime(job.get("finished_at") or job["created_at"]).date()
[ "def conky_build_date(self):\n self.writeCommand('conky_build_date')\n return self", "def get_date(job):\n return datetime.datetime.strptime(job.creation_date(),\n '%Y-%m-%dT%H:%M:%S.%fZ')", "def last_build_date(self) -> Optional[datetime.datetime]:\...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Execute an action, either buy sell or hold
def execute(self, action, spread, stock1_price, stock2_price, penalty): action = Actions(action) if action == Actions.BUY: self.spread_when_bought = spread if self.status == Status.INVESTED_IN_SPREAD: first = False if(penalty != 1): ...
[ "def buy(self):\n buy_request = str(input(\"\\nWhat do you want to buy? \"\n \"1 - espresso, \"\n \"2 - latte, \"\n \"3 - cappuccino:, \"\n \"back - to main menu:\\n\"))\n\n if b...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Calculates maximum amount of stock that can be bought with current cash balance. Returns the new cash and stock balance values.
def buy(self, stock_price): available_cash_to_spend = self.balance - self.transaction_fee max_stocks_to_buy = available_cash_to_spend // stock_price new_cash_balance = self.balance - \ (max_stocks_to_buy * stock_price) - \ self.transaction_fee return (new_cash...
[ "def __set_max_cash_available(self) -> None:\r\n self._max_cash = sum(a * b for a, b in self._cash_dict.items())", "def get_max_profit2(stock_prices):\n\n #make sure we have at least two prices to compare\n assert len(stock_prices) > 1, \"too few prices!\"\n\n #keep running track of the cheapest p...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Calculates cash balance that is returned when a stock is sold. Returns the new cash and stock balance values.
def sell(self, stock_price, stock_balance): stock_value = stock_balance * stock_price new_cash_balance = self.balance + \ stock_value - \ self.transaction_fee return (new_cash_balance, 0)
[ "def buy(self, stock_price):\n\n available_cash_to_spend = self.balance - self.transaction_fee\n\n max_stocks_to_buy = available_cash_to_spend // stock_price\n\n new_cash_balance = self.balance - \\\n (max_stocks_to_buy * stock_price) - \\\n self.transaction_fee\n\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Counts the number of occurrences of each value in an integer array `arr`. Works like `tf.math.bincount`, but provides an `axis` kwarg that specifies dimensions to reduce over. With `~axis = [i for i in range(arr.ndim) if i not in axis]`, this function returns a `Tensor` of shape `[K] + arr.shape[~axis]`. If `minlength`...
def count_integers(arr, weights=None, minlength=None, maxlength=None, axis=None, dtype=tf.int32, name=None): with tf.name_scope(name or 'count_integers'): if axis is None: return tf.math.bincoun...
[ "def bincount(arr: ragged_tensor.RaggedTensor,\n weights=None,\n minlength=None,\n maxlength=None,\n dtype=dtypes.int32,\n name=None,\n axis=None,\n binary_output=False):\n name = \"bincount\" if name is None else name\n with ops....
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get values of y at the indices implied by interp_type.
def _get_indices(interp_type): if interp_type == 'lower': indices = tf.math.floor((d - 1) * frac_at_q_or_below) elif interp_type == 'higher': indices = tf.math.ceil((d - 1) * frac_at_q_or_below) elif interp_type == 'nearest': indices = tf.round((d - 1) * frac_at_q_or_below) ...
[ "def interpolation(cls, time, y, time_interp):\r\n f = interp.interp1d(time, y, fill_value=\"extrapolate\")\r\n y_interp = f(time_interp)\r\n return y_interp", "def parametric_interpolation(x ,y, t, type = 'linear'):\n fx_t = interp1d(t,x, fill_value='extrapolate')\n fy_t = interp1d(t,y...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Compute quantiles of `x` along `axis`. The quantiles of a distribution are cut points dividing the range into intervals with equal probabilities. Given a vector `x` of samples, this function estimates the cut points by returning `num_quantiles + 1` cut points, `(c0, ..., cn)`, such that, roughly speaking, equal number ...
def quantiles(x, num_quantiles, axis=None, interpolation=None, keepdims=False, validate_args=False, name=None): with tf.name_scope(name or 'quantiles'): x = tf.convert_to_tensor(x, name='x') return percentile( x, ...
[ "def percentile_per_dim(x, q):\n out = np.zeros(x.shape[1])\n for i in range(len(out)):\n out[i] = np.percentile(x[:,i], q)\n return out", "def get_quantile_values(x, num_quantiles):\n ### helper function ###\n def f(interval, bins):\n \"\"\"\n helper function for get_quantile_...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get static number of dimensions and assert that some expectations are met. This function returns the number of dimensions 'ndims' of x, as a Python int. The optional expect arguments are used to check the ndims of x, but this is only done if the static ndims of x is not None.
def _get_static_ndims(x, expect_static=False, expect_ndims=None, expect_ndims_no_more_than=None, expect_ndims_at_least=None): ndims = tensorshape_util.rank(x.shape) if ndims is None: if expect_static: raise ValueError...
[ "def _dimensions(ds):\n ds = dshape(ds)\n if isdimension(ds[0]):\n return 1 + _dimensions(ds.subarray(1))\n if isinstance(ds[0], Record):\n return 1 + max(map(_dimensions, ds[0].types))\n if len(ds) == 1 and isunit(ds[0]):\n return 0\n raise NotImplementedError('Can not compute d...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get static ndims if possible. Fallback on `tf.rank(x)`.
def _get_best_effort_ndims(x, expect_ndims=None, expect_ndims_at_least=None, expect_ndims_no_more_than=None): ndims_static = _get_static_ndims( x, expect_ndims=expect_ndims, expect_ndims_at_least=expect_ndims_at_least, ...
[ "def static_or_dynamic_dim_size(tensor, i):\n static_shape = tensor.shape\n dyn_shape = tf.shape(tensor)\n return (static_shape[i].value if hasattr(static_shape[i], 'value')\n else static_shape[i]) or dyn_shape[i]", "def rank(inp):\n return len(np_tf_get_shape(inp))", "def ResolveBatchDim(shape: ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Insert the dims in `axis` back as singletons after being removed.
def _insert_back_keepdims(x, axis): for i in sorted(axis): x = tf.expand_dims(x, axis=i) return x
[ "def popinsert_axes(arr, axis0, axis1, lib=tf):\n r = rank(arr)\n assert(-r <= axis0 <= r - 1), \"rank oob\"\n assert(-r <= axis1 <= r - 1), \"rank oob\"\n return lib.transpose(arr, get_popinsert_permutation(r, axis0, axis1))", "def squeeze(a, axis=None):\n return cpp.squeeze(a, axis)", "def squi...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Convert possibly negatively indexed axis to nonnegative list of ints.
def _make_static_axis_non_negative_list(axis, ndims): axis = ps.non_negative_axis(axis, ndims) axis_const = tf.get_static_value(axis) if axis_const is None: raise ValueError( 'Expected argument `axis` to be statically available. ' 'Found: {}.'.format(axis)) # Make at least 1-D. axis = ax...
[ "def tensorshape_to_intlist(tensorshape):\n\treturn list(map(lambda j: 1 if j is None else int(j), tensorshape))", "def convert_neg_indices(indices, ndim):\n def canonicalizer(ax):\n return ax + ndim if ax < 0 else ax\n indices = tuple([canonicalizer(axis) for axis in indices])\n return indices", ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Move dims corresponding to `axis` in `x` to the end, then flatten.
def _move_dims_to_flat_end(x, axis, x_ndims, right_end=True): if not axis: return x # Suppose x.shape = [a, b, c, d] # Suppose axis = [1, 3] # other_dims = [0, 2] in example above. other_dims = sorted(set(range(x_ndims)).difference(axis)) # x_permed.shape = [a, c, b, d] perm = other_dims + list(axi...
[ "def _insert_back_keepdims(x, axis):\n for i in sorted(axis):\n x = tf.expand_dims(x, axis=i)\n return x", "def unflatten(space: Space, x: np.ndarray) -> np.ndarray:\n raise NotImplementedError(f\"Unknown space: `{space}`\")", "def flatten(t, dim=-1):\n shape = list(t.shape)\n shape[dim - 1] *= sh...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Generate safe random filename based on UUID4.
def generate_safe_random_filename(extension="txt"): name = uuid.uuid4() filename = base64.urlsafe_b64encode(name.bytes).decode("utf-8").rstrip("=\n") return "{filename}.{extension}".format(filename=filename, extension=extension)
[ "def create_file_name():\n # This generates a name that is between 3 to 63 chars long\n return str(uuid.uuid4())", "def get_uuid_filename(filename):\n ext = filename.split('.')[-1]\n return \"{}.{}\".format(uuid.uuid4().hex, ext)", "def generate_unique_filename(original_filename):\n # keep file e...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Determines whether a number is square of another number.
def isSquare(n): if (n > 0): if (math.sqrt(n) - int(math.sqrt(n))): return False return True return False
[ "def is_square(n):\n if n < 0:\n return False\n sqrt = n ** (1 / 2)\n number_dec = str(sqrt-int(sqrt))[1:]\n if len(number_dec) > 2:\n return False\n else:\n return True", "def is_twice_square(n):\n return int((n // 2) ** 0.5) ** 2 * 2 == n", "def isSquareNum(cls, x):\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Determines whether a number is square of five.
def isSquareFive(n): if(n>0): while n%5==0: n/=5 if n==1: return True elif n<5: return False if (n % 5): return False return False
[ "def is_twice_square(n):\n return int((n // 2) ** 0.5) ** 2 * 2 == n", "def is_it_5(some_number) -> bool:\n well_is_it = some_number == 5\n return well_is_it", "def is_square(n):\n if n < 0:\n return False\n sqrt = n ** (1 / 2)\n number_dec = str(sqrt-int(sqrt))[1:]\n if len(number_d...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Call TD endpoint /providers/physical/sync to sync devices in topology discovery
def sync_physical_devices(task): devices = task["inputData"]["devices"] labels = task["inputData"]["labels"] sync_all_installed_devices = int(task["inputData"]["sync_all_installed_devices"]) data = {} # If there is MOCK_UNICONFIG_URL_BASE in composefile use mock uniconfig to get installed devices ...
[ "async def sync_devices(self):\n msg = self.construct_message('{\"cmdId\":' + str(Command.GET_ALL_EQUIPMENT_STATUS.value) + ',\"device_status\":\"\"}')\n logging.info(\"sync devices\")\n await self.send_data(msg)", "async def sync_device_status(self, devices=None):\n device_status = \"...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return a deep copy of this Stack as a new object
def copy(self): copystack = Stack(self.maxlen, self.name) copystack.populate(copy.deepcopy(self.data)) return copystack
[ "def copy(self) -> 'VariableStack':\n new_stack = type(self)()\n new_stack.value = copy.copy(self.value)\n new_stack.empty_pops = self.empty_pops\n new_stack.max_size = self.max_size\n return new_stack", "def copy(self):\n return Struct(self.__dict__.copy())", "def clon...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Pop the last `count` items off of the Stack. Default 1 returns the most recently popped item
def pop(self, count=1): for c in range(count): if len(self) == 0: raise StackError("Cannot pop from empty Stack") lastitem = self.data[-1] del self.data[-1] return lastitem
[ "def pop_n(self, n):\n if n:\n ret = self.stack[-n:]\n self.stack[-n:] = []\n return ret\n else:\n return []", "def stack_pop(self, num_items=1, type_hint=None):\n return self._stack.pop(num_items, type_hint)", "def last(count):", "def pop() -> ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Push as many items as possible from `itemlist` onto the Stack if `destructive`==True, the current data will be overwritten and discard the rest.
def populate(self, itemlist, destructive=False): if isinstance(itemlist, range): itemlist = list(itemlist) if isinstance(itemlist, list): if destructive: itemlist = itemlist[:self.maxlen] self.data = itemlist else: if self.maxlen: itemlist = itemlist[:self.maxlen - len(self)] self.data...
[ "def push(self,item):\n if len(self) >= ArrayStack.DEFAULT_CAPACITY:\n temp = Arrays(ArrayStack.DEFAULT_CAPACITY * 2)\n for i in range(len(self)):\n temp[i] = self._items[i]\n self._items = temp\n self._items[len(self)] =item\n self._size += 1", ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get geocode results from FCC API. Note, that in the case of multiple FCC geocode results, this function returns details of the FIRST result.
def get_fcc_results(latitude,longitude,showall=False): # Set up your Geocoding url geocode_url = "http://data.fcc.gov/api/block/find?format=json&latitude={}&longitude={}".format(latitude,longitude) # Ping FCC for the reuslts: results = requests.get(geocode_url) # Results will be in JSON format - co...
[ "def get_geo_location(address, max_result):\n if Geocoder.isPresent():\n print(\"GeoCoder is present...\")\n geo = Geocoder(PythonActivity.mActivity, Locale.getDefault())\n print(\"Looked up addresses\")\n java_list = geo.getFromLocationName(address, max_result)\n if java_list:...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Compile this ``Regex`` as a Python regular expression.
def compile(self): return re.compile(self.pattern, self.flags)
[ "def get_compiled(self, name: str) -> re.compile:\n rx = re.compile(self.regexp)\n if self.flag_multiline:\n rx.flags ^= re.MULTILINE\n if self.flag_dotall:\n rx.flags ^= re.DOTALL\n return rx", "def compile_regex(regex):\n return re.compile(regex, re.U)", "d...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns the MD5 checksum of a local file.
def generate_md5_checksum(local_path): with open(local_path, 'rb') as local_file: data = local_file.read() return hashlib.md5(data).hexdigest()
[ "def checksum_md5 (filename) :\n fname = filename\n block_size = 0x10000\n fd = open(fname, \"rb\")\n try:\n block = [ fd.read(block_size) ]\n while len(block[-1]) > 0 :\n block.append ( fd.read(block_size) )\n contents = block\n zero = hashlib.md5()\n i = ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Checks if a local file is present and downloads it from the specified path otherwise. If checksum_reference is specified, the file's md5 checksum is compared against the expected value.
def download_file(local_path, link, checksum_reference=None): if not os.path.exists(local_path): print('Downloading from %s, this may take a while...' % link) wget.download(link, local_path) print() if checksum_reference is not None: checksum = generate_md5_checksum(local_path) ...
[ "def mirror_file(\n self,\n remote_path: str,\n remote_stat: str,\n local_root: str,\n force_download: bool = False,\n integrity_check: bool = True,\n ):\n local_path = self.remote_path_to_local_path(remote_path, local_root)\n if force_download or not self....
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
push many values all at once in sequential order
def push_many(self, *args): for i in args: self.push(i)
[ "def test_push_many(self):\n stk = Stack()\n items = list(range(100))\n for i in items:\n stk.push(i)\n self.assertEqual(stk.show(), list(reversed(items)))", "def batch_insert_push(self, batch_data):\n data = self.data\n for key, value in batch_data.items():\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Sample indices based on proportions.
def _sample_proportional(self) -> List[int]: indices = [] p_total = self.sum_tree.sum(0, len(self) - 1) segment = p_total / self.batch_size for i in range(self.batch_size): a = segment * i b = segment * (i + 1) upperbound = random.uniform(a, b...
[ "def _sample_proportional(self):\n indices = []\n p_total = self.sum_tree.sum(0, len(self)-1)\n\n segment = p_total / self.batch_size\n\n for i in range(self.batch_size):\n a = segment * i\n b = segment * (i+1)\n upperbound = random.uniform(a, b)\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Given a state (or batch of states) calculate the Qvalues. Basically run your network on these states. Return Qvalues for the state(s)
def calc_q_values(self, state): return self._sess.run(self._q_pred_0, feed_dict={self._state_placeholder:state});
[ "def computeQValueFromValues(self, state, action):\n \"*** YOUR CODE HERE ***\"\n\n #Just to note:\n #Q(s,a) = \\sum_{s'} T(s,a,s')[R(s,a,s') + \\gammaV(s')]\n\n # T is list of (nextState, probability)\n T = self.mdp.getTransitionStatesAndProbs(state, action)\n gamma = self...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Calculate mean and standanr deviation of all features Return
def calc_mean_std(self): # get ob_next sets from memory memory_len = len(self._memory) all_obs_next = [] col_len = len(self._memory[memory_len - 1].obs_nex) for i in range(memory_len): all_obs_next.append(self._memory[i].obs_nex) # cacualte ave...
[ "def compute_mean_std(self, verbose=False):\n sum_intensities = 0.0\n numel = 0\n\n with mt_datasets.DatasetManager(self,\n override_transform=mt_transforms.ToTensor()) as dset:\n pbar = tqdm(dset, desc=\"Mean calculation\", disable=not verbose)\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Converts an H3 64bit integer index to a hexadecimal string.
def h3_to_string(x): return _cy.int2hex(x)
[ "def get_base_64_index_string(self):\n packed_indices = ''\n for tile_index in self.tile_indices:\n packed_indices += struct.pack(\"<L\", tile_index)\n\n return packed_indices.encode('base64')", "def form_hex(dense_hash):\n return ''.join([format(number, '02x') for number in den...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return the average area of an H3 hexagon for the given resolution. This average excludes pentagons. Returns float
def hex_area(resolution, unit='km^2'): # todo: `mean_hex_area` in 4.0 return _cy.mean_hex_area(resolution, unit)
[ "def get_area(shape: gpd.geodataframe.GeoDataFrame) -> float:\n return round(sum(shape.area) / 10 ** 6, 4)", "def pixel_area(self) -> float:\n return round((self.side_length ** 2) / 1_000_000, 1)", "def calc_area_element(img):#not precise unless a txt file with actual counts is loaded\n return int(...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return the average hexagon edge length for the given resolution. This average excludes pentagons. Returns float
def edge_length(resolution, unit='km'): # todo: `mean_edge_length` in 4.0 return _cy.mean_edge_length(resolution, unit)
[ "def pixel_area(self) -> float:\n return round((self.side_length ** 2) / 1_000_000, 1)", "def hex_area(resolution, unit='km^2'):\n # todo: `mean_hex_area` in 4.0\n return _cy.mean_hex_area(resolution, unit)", "def get_average_resolution(self):\n value = 0.0 * self.info.instrument.get...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Validates an H3 cell (hexagon or pentagon). Returns bool
def h3_is_valid(h): try: h = _in_scalar(h) return _cy.is_cell(h) except (ValueError, TypeError): return False
[ "def check_tile_validity(tile_coords):\n if len(tile_coords) != 2:\n return False;\n\n row = tile_coords[1]\n col = tile_coords[0]\n\n if row != '1' and row != '2' and row != '3':\n return False;\n elif col != 'A' and col != 'B' and col != 'C':\n return False;\n\n return True;...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Validates an H3 unidirectional edge. Returns bool
def h3_unidirectional_edge_is_valid(edge): try: e = _in_scalar(edge) return _cy.is_edge(e) except (ValueError, TypeError): return False
[ "def has_edge(self, u, v):", "def has_edge(self, id1: int, id2: int) -> bool:\r\n return not self.get_node(id1).get_hashOut().get(id2) is None", "def check_edge(self):\n\t\tscreen_rect = self.screen.get_rect()\n\t\tif self.rect.right <= screen_rect.right:\n\t\t\treturn True", "def check_edge(self):\n\n...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return the resolution of an H3 cell.
def h3_get_resolution(h): # todo: could also work for edges return _cy.resolution(_in_scalar(h))
[ "def _get_width(self) -> \"double\" :\n return _core.OrientedBoundingBox3D__get_width(self)", "def get_width(self):\n dividechars = 1\n table_size = self.hits.get_width() + self.columns[1][0] + self.columns[2][0] + dividechars * 3\n return table_size", "def GetObjectDimension(self) -...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Compute the H3 distance between two cells. The H3 distance is defined as the length of the shortest path between the cells in the graph formed by connecting adjacent cells.
def h3_distance(h1, h2): h1 = _in_scalar(h1) h2 = _in_scalar(h2) d = _cy.distance(h1, h2) return d
[ "def get_distance(a, b):\n return math.sqrt((a.x - b.x) ** 2 + (a.y - b.y) ** 2)# + (a.z - b.z) ** 2)", "def _h3_cmp_dcostheta_ ( h1 ,\n h2 ,\n density = False ) :\n assert isinstance ( h1 , ROOT.TH3 ) and 3 == h1.dim () , \\\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return unordered set of cells with H3 distance `<= k` from `h`. That is, the "filledin" disk.
def k_ring(h, k=1): mv = _cy.disk(_in_scalar(h), k) return _out_unordered(mv)
[ "def compute_h_set(self, hmin, hmax, dh):\n\t\treturn np.arange(hmin, hmax+1e-6, dh)", "def hex_range_distances(h, K):\n h = _in_scalar(h)\n\n out = [\n _out_unordered(_cy.ring(h, k))\n for k in range(K + 1)\n ]\n\n return out", "def get_chebyshev_indices(dim, x...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return unordered set of cells with H3 distance `== k` from `h`. That is, the "hollow" ring.
def hex_ring(h, k=1): mv = _cy.ring(_in_scalar(h), k) return _out_unordered(mv)
[ "def k_ring(h, k=1):\n mv = _cy.disk(_in_scalar(h), k)\n\n return _out_unordered(mv)", "def hex_range_distances(h, K):\n h = _in_scalar(h)\n\n out = [\n _out_unordered(_cy.ring(h, k))\n for k in range(K + 1)\n ]\n\n return out", "def get_chebyshev_...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Ordered list of the "hollow" rings around `h`, up to and including distance `K`.
def hex_range_distances(h, K): h = _in_scalar(h) out = [ _out_unordered(_cy.ring(h, k)) for k in range(K + 1) ] return out
[ "def hex_ring(h, k=1):\n mv = _cy.ring(_in_scalar(h), k)\n\n return _out_unordered(mv)", "def k_ring(h, k=1):\n mv = _cy.disk(_in_scalar(h), k)\n\n return _out_unordered(mv)", "def horn(n):\n if n == 0:\n yield 'o', ()\n else:\n for k in range(0, n):\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Compact a collection of H3 cells by combining smaller cells into larger cells, if all child cells are present.
def compact(hexes): # todo: does compact work on mixed-resolution collections? hu = _in_collection(hexes) hc = _cy.compact(hu) return _out_unordered(hc)
[ "def _remove_empty_cells(self, contents, cells_col_row_list):\n contents_copy = contents.copy()\n for idx, content in reversed(list(enumerate(contents))):\n if not content:\n del contents_copy[idx]\n del cells_col_row_list[idx]\n return contents_copy, ce...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Reverse the `compact` operation. Return a collection of H3 cells, all of resolution `res`.
def uncompact(hexes, res): hc = _in_collection(hexes) hu = _cy.uncompact(hc, res) return _out_unordered(hu)
[ "def compact(hexes):\n # todo: does compact work on mixed-resolution collections?\n hu = _in_collection(hexes)\n hc = _cy.compact(hu)\n\n return _out_unordered(hc)", "def compact(self):\n # each list corresponds to a component of a coordinate set so the first time None is not fo...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get GeoJSONlike MultiPolygon describing the outline of the area covered by a set of H3 cells.
def h3_set_to_multi_polygon(hexes, geo_json=False): # todo: this function output does not match with `polyfill`. # This function returns a list of polygons, while `polyfill` returns # a GeoJSON-like dictionary object. hexes = _in_collection(hexes) return _cy.h3_set_to_multi_polyg...
[ "def MultiPolygon(coordinates, *rest):\n return {\n 'type': 'MultiPolygon',\n 'coordinates': Feature._makeGeometry(coordinates, 4, rest)\n }", "def geojson_highlight(bounds, world_bounds):\n return { 'type' : 'FeatureCollection',\n 'features' : [\n {\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Identify if an H3 cell is a pentagon.
def h3_is_pentagon(h): return _cy.is_pentagon(_in_scalar(h))
[ "def isPentagonal(number):\n\n if number == 1:\n return True\n else:\n n = ((24 * number + 1) ** .5 + 1)\n if n % 6 == 0:\n return True\n else:\n return False", "def is_lattice_hex(self, drawing_params):\n lattice_type_str = self.get_lattice_type_str(...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return the base cell number (`0` to `121`) of the given cell. The base cell number and the H3Index are two different representations
def h3_get_base_cell(h): return _cy.get_base_cell(_in_scalar(h))
[ "def cell_mapping(self,cell) :\n\n j = np.floor(cell/self.param.n_x)\n i = cell - j*self.param.n_x\n\n return i,j", "def cellID ( self ) :\n return self._cellID", "def cellId(self, i):\n return self._handle['id/cells'][i]", "def get_cell(self, cell_num) -> CGP_cell:\r\n return s...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Create an H3 Index denoting a unidirectional edge. The edge is constructed from neighboring cells `origin` and `destination`.
def get_h3_unidirectional_edge(origin, destination): o = _in_scalar(origin) d = _in_scalar(destination) e = _cy.edge(o, d) e = _out_scalar(e) return e
[ "def get_h3_unidirectional_edges_from_hexagon(origin):\n mv = _cy.edges_from_cell(_in_scalar(origin))\n\n return _out_unordered(mv)", "def get_destination_h3_index_from_unidirectional_edge(e):\n e = _in_scalar(e)\n d = _cy.edge_destination(e)\n d = _out_scalar(d)\n\n retu...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Origin cell from an H3 directed edge.
def get_origin_h3_index_from_unidirectional_edge(e): e = _in_scalar(e) o = _cy.edge_origin(e) o = _out_scalar(o) return o
[ "def get_h3_unidirectional_edge(origin, destination):\n o = _in_scalar(origin)\n d = _in_scalar(destination)\n e = _cy.edge(o, d)\n e = _out_scalar(e)\n\n return e", "def get_destination_h3_index_from_unidirectional_edge(e):\n e = _in_scalar(e)\n d = _cy.edge_desti...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Destination cell from an H3 directed edge.
def get_destination_h3_index_from_unidirectional_edge(e): e = _in_scalar(e) d = _cy.edge_destination(e) d = _out_scalar(d) return d
[ "def get_h3_unidirectional_edge(origin, destination):\n o = _in_scalar(origin)\n d = _in_scalar(destination)\n e = _cy.edge(o, d)\n e = _out_scalar(e)\n\n return e", "def get_edge(self, destination):\r\n for edge in self.edges:\r\n if edge.destination =...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return all directed edges starting from `origin` cell.
def get_h3_unidirectional_edges_from_hexagon(origin): mv = _cy.edges_from_cell(_in_scalar(origin)) return _out_unordered(mv)
[ "def GenAdjacentPoints(origin):\n for i in [1, 0, -1]:\n for j in [-1, 0, 1]:\n if i == 0 and j == 0:\n continue\n yield Point(origin.x + j, origin.y + i)", "def edges_directed(self):\n return self.__generate_edges_directed()", "def get_edges(self):\n # N...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Determine if cell has orientation "Class II" or "Class III". The orientation of pentagons/hexagons on the icosahedron can be one
def h3_is_res_class_III(h): return _cy.is_res_class_iii(_in_scalar(h))
[ "def is_portrait(self):\n pj = self.kna[0] - self.krb[0] # panjang\n lb = self.kna[1] - self.krb[1] # lebar\n if pj < lb:\n return True\n else:\n return False", "def orientation(self):\r\n tag=self.readinfo('Image Orientation Patient')\r\n \r\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return all pentagons at a given resolution.
def get_pentagon_indexes(resolution): mv = _cy.get_pentagon_indexes(resolution) return _out_unordered(mv)
[ "def get_pokemon_locations(self):\n return generate_pokemons(self.grid_size, self.num_pokemon)", "def create_pentagon_numbers(limit):\n pentagons = {0}\n increment = 1\n value = 0\n while True:\n value += increment\n increment += 3\n if value > limit:\n break\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return icosahedron faces intersecting a given H3 cell. There are twenty possible faces, ranging from 019.
def h3_get_faces(h): h = _in_scalar(h) faces = _cy.get_faces(h) return faces
[ "def icosahedron(self, upward=False):\n phi = (1 + 5**0.5) / 2\n radius = (phi**2 + 1)**0.5\n vertices = [0, 1, phi, 0, -1, phi, 0, 1, -phi, 0, -1, -phi, phi, 0, 1,\n phi, 0, -1, -phi, 0, 1, -phi, 0, -1, 1, phi, 0, -1, phi, 0,\n 1, -phi, 0, -1, -phi, 0]\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Files to be attached
def set_attachments(self,files): self.__attacments = files
[ "def send_email(sender, to, cc, subject, body, body_format, file_path, file_list):\n\n msg = MIMEMultipart()\n msg['From'] = sender\n msg['To'] = to\n msg['Cc'] = cc\n msg['Subject'] = subject\n text = body\n\n part1 = MIMEText(text, body_format)\n msg.attach(part1)\n\n ## ATTACHMENT PART...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Internal method that send mail with current configuration parameters (smtp, notification and attachments)
def __send_mail(self,send_from, send_to, send_cc, send_bcc, subject, message, message_type): # Message data msg = None if self.__attacments != None: # --- Message with attachments --- msg = MIMEMultipart() # sender and recipients msg['...
[ "def send_mail(config, frm_addr, to_addr, Subject, text, headers = None, files=[], html_body=False, **kw):\n #In headers we send type of data, cc, Subjects\n if headers is None: headers = {}\n \n #with Default settings it works without using SSL and without login.\n server = config.get('server')\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Initialize the Drop Target, passing in the Object Reference to indicate what should receive the dropped text
def __init__(self, obj): # Initialize the wx.TextDropTarget Object wx.TextDropTarget.__init__(self) # Store the Object Reference for dropped text self.obj = obj
[ "def __init__(self, obj):\n\n wx.DropTarget.__init__(self)\n \n self._obj = obj\n \n self.data = ArticlesIDsDropData()\n self.SetDataObject(self.data)", "def __init__(self):\n \n wx.CustomDataObject.__init__(self, ArticlesIDsDropData.NAME)", "def dropEvent...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns the current load average as a value between 0.0 (representing the min_load_average value) and 1.0 (representing the max_load_average value). These default to 0.0 and 1.0 respectively.
def value(self): load_average_range = self.max_load_average - self.min_load_average return (self.load_average - self.min_load_average) / load_average_range
[ "def getLoadAverage(self):\n result = S_OK()\n comm = '/bin/cat /proc/loadavg'\n loadAvgDict = shellCall(5,comm)\n if loadAvgDict['OK']:\n la = float(string.split(loadAvgDict['Value'][1])[0])\n result['Value'] = la\n else:\n result = S_ERROR('Could not obtain load average')\n self...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Instantiate an actor of type 'actor_type'. Parameters are passed in 'args', 'name' is an optional parameter in 'args', specifying a human readable name. Returns actor id on success and raises an exception if anything goes wrong. Optionally applies a serialized state to the actor, the supplied args are ignored and args ...
def new(self, actor_type, args, state=None, prev_connections=None, connection_list=None, callback=None, signature=None, app_id=None, master_nodes=[]): _log.debug("class: %s args: %s state: %s, signature: %s" % (actor_type, args, state, signature)) callback = CalvinCB(self._after_new, prev_co...
[ "def _new(self, actor_type, args, state=None, signature=None, app_id=None, master_nodes=[], callback=None):\n _log.analyze(self.node.id, \"+\", {'actor_type': actor_type, 'state': state})\n\n return self.factory.create_actor(actor_type=actor_type, state=state, args=args, signature=signature,\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Instantiate an actor of type 'actor_type'. Parameters are passed in 'args', 'name' is an optional parameter in 'args', specifying a human readable name. Returns actor id on success and raises an exception if anything goes wrong.
def _new(self, actor_type, args, state=None, signature=None, app_id=None, master_nodes=[], callback=None): _log.analyze(self.node.id, "+", {'actor_type': actor_type, 'state': state}) return self.factory.create_actor(actor_type=actor_type, state=state, args=args, signature=signature, ...
[ "def actor(self, logdata=None):\n if not logdata:\n name = self.UNDEFINED\n else:\n match = re.match(self.actor_pattern, logdata)\n if not match:\n raise InvalidDataError(logdata, 'invalid actor or target', self.actor_pattern)\n name = match.g...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Migrate an actor actor_id to peer node node_id
def migrate(self, actor_id, node_id, callback=None): _log.info("Migrate actor {} to node {}".format(actor_id, node_id)) if actor_id not in self.actors: _log.warning("Trying to migrate non-local actor {}, aborting".format(actor_id)) # Can only migrate actors from our node ...
[ "def match_relationship_peer_id(self, peer_id, match):\n pass", "def actor_to_id(actor_name):\n return namesdb[actor_name]", "def _migrate_disconnected(self, actor, actor_type, ports, node_id, status, callback=None, **state):\n if status:\n state = actor.state()\n self.del...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Actor disconnected, continue migration
def _migrate_disconnected(self, actor, actor_type, ports, node_id, status, callback=None, **state): if status: state = actor.state() self.delete_actor(actor.id) if actor.app_id: self.node.storage.delete_replica_node(actor.app_id, self.node.id, actor.name, cb=N...
[ "def disconnect_from_db(self):\n raise NotImplementedError", "def attemptMigration(self):\n def _cb(ign):\n self.deleteFromStore()\n\n def _eb(f):\n log.failure(\n 'Error during migration of {objectId!r} from {source!r} to {destination!r}',\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Replicate an actor actor_id to peer node node_id
def replicate(self, actor_id, node_id, callback=None): if actor_id not in self.actors: _log.warning("Failed to replicate {} to {}. Can only replicate actors from our node.".format( actor_id, node_id)) # Can only replicate actors from our node if callback: ...
[ "def add_actor(new_actor):\n session.add(new_actor)\n session.commit()", "def add_actor(self, new_name, new_age, new_price):\n session.add(Actor(name=new_name, age=new_age, price=new_price))\n session.commit()", "def _update_actor(self, actor, **kwargs):\n return actor", "def migrate(self, ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
program to read in yashiro CME catalog
def get_yashiro_catalog(data_path=os.path.join(os.path.dirname(os.path.realpath(__file__)), "data")): #try to read from file this program creates -- delete if you want to redownload the data try: cmes=pd.read_csv("cmes.txt", sep=" ", index_col=0, parse_dates=["date"]) except: #try download...
[ "def readCatalogue(self):\n\t\twith open(config['Fixed']['CatalogFile'], mode = 'r') as cat_file:\n\t\t\tcatalog = json.load(cat_file)\n\n\t\treturn catalog", "def read_Behroozi_catalog():\n\n #get filename \n filename = sys.argv[1]\n\n #make sure this is a .list file\n if 'list' not in filename.split('.'):\n...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Gets the plan_id of this Manifest.
def plan_id(self) -> str: return self._plan_id
[ "def plan_key(self):\n return self.__plan_key", "def service_plan_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"service_plan_id\")", "def service_plan_id(self):\n return self.nuvo_mgmt.get_serviceplan_id()", "def GetId(apig,usageplan_name: str):\n\t\t\t\tusageplan_list = AWS...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Sets the plan_id of this Manifest.
def plan_id(self, plan_id: str): if plan_id is None: raise ValueError("Invalid value for `plan_id`, must not be `None`") # noqa: E501 self._plan_id = plan_id
[ "def scheduled_plan(self, scheduled_plan):\n \n self._scheduled_plan = scheduled_plan", "def setPlannerId(self, planner_id):\n self.planner_id = str(planner_id)", "def get_plan(self, plan_id):\n\t\treturn Plan(plan_id)", "def plan(self, value):\r\n from .plan import Plan\r\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Sets the service_id of this Manifest.
def service_id(self, service_id: str): if service_id is None: raise ValueError("Invalid value for `service_id`, must not be `None`") # noqa: E501 self._service_id = service_id
[ "def set_as_broadcast_service(service_id):\n data = validate(request.get_json(), service_broadcast_settings_schema)\n service = dao_fetch_service_by_id(service_id)\n\n set_broadcast_service_type(\n service,\n service_mode=data[\"service_mode\"],\n broadcast_channel=data[\"broadcast_cha...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Gets the manifest_type of this Manifest.
def manifest_type(self) -> str: return self._manifest_type
[ "def atom_type(self):\n return self._atom_type", "def manifest_type(self, manifest_type: str):\n if manifest_type is None:\n raise ValueError(\"Invalid value for `manifest_type`, must not be `None`\") # noqa: E501\n\n self._manifest_type = manifest_type", "def ResolveManifestTyp...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Sets the manifest_type of this Manifest.
def manifest_type(self, manifest_type: str): if manifest_type is None: raise ValueError("Invalid value for `manifest_type`, must not be `None`") # noqa: E501 self._manifest_type = manifest_type
[ "def manifest_type(self) -> str:\n return self._manifest_type", "def docker_image_manifest_media_type(self, docker_image_manifest_media_type):\n\n self._docker_image_manifest_media_type = docker_image_manifest_media_type", "def set_type(self, entry_type):\n if entry_type not in self._types:...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Gets the manifest_content of this Manifest.
def manifest_content(self) -> str: return self._manifest_content
[ "def get_manifest(self):\n logger.debug(\"Getting manifest {}\".format(self))\n text = self.get_text(self.get_manifest_key())\n return json.loads(text)", "def get_manifest(self) -> SdkManifest:\n return self._read_json(os.path.join('meta', 'manifest.json'))", "def get_manifest(self) ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Sets the manifest_content of this Manifest.
def manifest_content(self, manifest_content: str): if manifest_content is None: raise ValueError("Invalid value for `manifest_content`, must not be `None`") # noqa: E501 self._manifest_content = manifest_content
[ "def manifest(self, value):\n\n self._manifest.set(value)", "def setContent(self, data):\n self._content = data", "def set_content(self, content):\n if hasattr(content, 'read'):\n self.content = content\n else:\n self.content = StringIO.StringIO(content)", "de...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Gets the endpoints of this Manifest.
def endpoints(self) -> object: return self._endpoints
[ "def get_endpoints(self):\n\n return self._get_component_metadata()['endpoints']", "def get_mediapackage_endpoints(self):\n return self.live_info.get(\"mediapackage\").get(\"endpoints\")", "def get_endpoints(configuration):\n pass", "def endpoints(self) -> pulumi.Input[Sequence[pulumi.Inp...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Sets the endpoints of this Manifest.
def endpoints(self, endpoints: object): self._endpoints = endpoints
[ "def _attach_endpoints(self):\n\n for name, endpoint in inspect.getmembers(self):\n if (inspect.isclass(endpoint) and\n issubclass(endpoint, self._Endpoint) and\n endpoint is not self._Endpoint):\n endpoint_instance = endpoint(self.requester)\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns a payment card request for a user.
def get_user_payment_card_request(self, user_token): return { "user_token": user_token, "account_number": "4112344112344113", "cvv_number": "123", "exp_date": "0323", "zip": "94612" }
[ "def get_credit_card(self):\n card = CreditCard(**self.cleaned_data)\n if self.gateway is not None:\n self.gateway.validate_card(card)\n return card", "def test_payment_card_create_user(self):\n\n user = self.client.users.create({})\n\n payment_card_request = FundingS...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns a payment card address request for a user.
def get_user_card_holder_address_request(self, user_token): return { "user_token": user_token, "first_name": "Marqeta", "last_name": "QE", "address_1": "180 Grand Ave.", "city": "Oakland", "state": "CA", "zip": "94612", ...
[ "def get_address(self, address: str) -> Address:", "def address(self, **kwargs):\n valid_address = None\n\n address_dict = OrderedDict()\n address_dict[\"Address1\"] = kwargs.get(\"address1\", \"\")\n address_dict[\"Address2\"] = kwargs.get(\"address2\", \"\")\n address_dict[\"C...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns a unique program name.
def get_program_name(self): return "qe_program_" + str(int(time.time() % 1000000000))
[ "def GetProgramName(self):\n\n return common.program_name", "def program_name():\n return os.path.basename(sys.argv[0])", "def generate_default_name():\n return \"{}{}\".format(os.getpid(), str(time.time()).replace(\".\", \"\"))", "def name():\n app_name = current_app.name.split(\".\")[0]\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Verifies a GPA return matches the expected values.
def verify_gpa_return(self, response, verify): # Verify the expected attributes are defined expected_attributes = [ 'token', 'amount', 'created_time', 'last_modified_time', 'transaction_token', 'state', 'response', ...
[ "def verify_ret(ret, expected_ret):\n assert ret == expected_ret, (\n \"Function should return: \"\n + ret_vals_dictionary[expected_ret]\n + \".\\nInstead returned: \"\n + ret_vals_dictionary[ret]\n )", "def test_garages_checking_fail(self):\r\n expected_res = 'Sorry, you ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Finds a gpa unload for an order funded by a user payment card.
def test_gpa_orders_unloads_find_payment_card_user(self): user = self.client.users.create({}) card_request = self.get_user_payment_card_request(user.token) payment_card = self.client.funding_sources.payment_card.create( card_request) address_request = self.get_user_card_ho...
[ "def test_gpa_orders_unloads_find_does_not_exist(self):\n\n with self.assertRaises(MarqetaError):\n self.client.gpa_orders.unloads.find('Not an unload token')", "def handle_unsolicited_payment(self, message: Message):\n amount_paid_mob = message.payment.amount_mob\n self.logger.war...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Tries to find an unload that doesn't exist.
def test_gpa_orders_unloads_find_does_not_exist(self): with self.assertRaises(MarqetaError): self.client.gpa_orders.unloads.find('Not an unload token')
[ "def unload(overlay): \r\n with open(SLOTS_FILE, 'rb') as f:\r\n slots = f.readlines()\r\n for slot in slots:\r\n if overlay in slot:\r\n load('-%i' % int(slot.split(':')[0]))\r\n return True\r\n return False", "def test_script_not_found(self):\n stderr = io.StringIO(execute_pwb(['pywiki...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Performs a multiple sequence alignment of two or more sequences. By default, the protein sequences are aligned using PROBCONS. This is probably the most accurate alignment program. However, it is slow and consumes large amounts of memory if you are aligning a very large number of sequences (typically if you are alignin...
def Align(headers_seqs, progpath, musclegapopen=None): if not (isinstance(headers_seqs, list) and len(headers_seqs) >= 2): raise ValueError, 'header_seqs does not specify a list with at least two entries.' if not os.path.isdir(progpath): raise ValueError, "Cannot find directory %s." % progpath ...
[ "def align(self, sequences):\n seqs = [copy.deepcopy(s) for s in sequences]\n c = seqs[0]\n aligned = [c]\n klass = c.__class__\n with tqdm(total=len(seqs)-1) as pbar:\n for s in seqs[1:]:\n score, traceback = c.global_align_multiple_solutions(s, self.sm,...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Reads sequences from a FASTA file. 'fastafile' should specify the name of a FASTA file. This function reads all sequences from the FASTA file. It returns the list 'headers_seqs'. This list is composed of a seq_record objects.
def ReadFASTA(fastafile): seqs =[] header = None for seq_record in SeqIO.parse(fastafile, "fasta"): seq_record.seq.alphabet=IUPAC.unambiguous_dna seqs.append(seq_record) return seqs
[ "def read_sequences_from_fasta_file(filename):\n sequences = []\n current_record_lines = None\n for line in open(filename):\n if line.startswith(\">\"):\n if current_record_lines is not None:\n sequences.append(sequence_from_fasta_lines(current_record_lines))\n c...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Create all indexes for the RelationshipSets in a list of Parsers.
def create_index(graph, parser_list): for parser in parser_list: for relationshipset in parser.container.relationshipsets: relationshipset.create_index(graph) for nodeset in parser.container.nodesets: nodeset.create_index(graph)
[ "def create_index():\n for indexer in _get_registered():\n _create_index(indexer)", "def setupIndexes( self, reindex=None, REQUEST=None ):\n reindexed = []\n\n # Setup new indexes\n for item in self.enumerateIndexes():\n index, typ = item[0:2]\n extra = len(ite...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Create the NodeSets for a list of parsers
def create_nodesets(graph, parser_list): for parser in parser_list: log.info("Create nodes for parser {}".format(parser.__class__.__name__)) for nodeset in parser.container.nodesets: nodeset.merge(graph)
[ "def createNodes(self):\n\t\tfor sw in setting.switches:\n\t\t\tself.SwitchList.append(self.addSwitch(sw))\n\n\t\tfor host in setting.hosts:\n\t\t\tself.HostList.append(self.addHost(host))", "def populate_registries(self):\n if len(self.parsers) == 0:\n raise ValueError('GraphBuilder object need...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Create the RelationshipSets for a list of parsers
def create_relationshipsets(graph, parser_list): for parser in parser_list: log.info("Create relationships for parser {}".format(parser.__class__.__name__)) for relset in parser.container.relationshipsets: relset.merge(graph)
[ "def populate_registries(self):\n if len(self.parsers) == 0:\n raise ValueError('GraphBuilder object needs at least one parser in self.parsers to run self.populate_registries.')\n \n for parser in self.parsers:\n parser.resgister_nodes_and_edges(self.node_dict,self.edge_di...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Upload files to existing Snapshot
def upload(self, local_path, overwrite=True): return self.snapshots.upload(snapshot=self, local_path=local_path, overwrite=overwrite)
[ "def file_upload(self, path, old_sha):\n raise NotImplementedError('Method file_upload not implemented in root(Git*Connect) class')", "def upload(self, fileobj, tileset_name):\n url = self.stage(fileobj)\n return self.create(url, tileset_name)", "def upload_file(self,file_from,_file_to):\r\...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Checks whether cached function and original one return the same result
def test_cached_func_returns_the_same_as_original(): def foo(a, b): return a + b arguments = 10, 5 cached_foo = cache(foo) assert foo(*arguments) == cached_foo(*arguments)
[ "def test_cached_func_called_only_once_on_the_same_data():\n mock = Mock()\n mock.return_value = 15\n arguments = 10, 5\n cached_mock = cache(mock)\n _ = cached_mock(*arguments)\n _ = cached_mock(*arguments)\n mock.assert_called_once()", "def _cached_call(self, args, kwargs, shelving=False):\...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Tests if the original function is called only once, while cached function may be called several times with the same args. So we can be sure the previous result is really cached
def test_cached_func_called_only_once_on_the_same_data(): mock = Mock() mock.return_value = 15 arguments = 10, 5 cached_mock = cache(mock) _ = cached_mock(*arguments) _ = cached_mock(*arguments) mock.assert_called_once()
[ "def test_cached_func_returns_the_same_as_original():\n\n def foo(a, b):\n return a + b\n\n arguments = 10, 5\n cached_foo = cache(foo)\n\n assert foo(*arguments) == cached_foo(*arguments)", "def memoized(*args, **kwargs):\n\n arguments = args + tuple((a, b) for a, b in kwargs.items())\n...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Set the default value of a parameter automatically. This is often useful for changing hidden parameters on the fly.
def set_default(self, param, defval): if param not in self._rpars: raise ClineError( 'set_default: parameter = "' + param + '" has not been registered.' ) if self._rpars[param]['g_or_l'] == Cline.GLOBAL: self._gpars[param] = defval ...
[ "def set_defaults( ):\n __param=__default", "def set_default(parameter, default, num_models=1):\n if len(parameter) == 0:\n for i in range(0, num_models):\n parameter.append(default)\n return parameter", "def default_(self, default_):\n\n self._default_ = default_", "def set_...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Gets the current default value of a parameter called 'param'
def get_default(self, param): if param not in self._rpars: raise ClineError( 'set_default: parameter = "' + param + '" has not been registered.' ) if self._rpars[param]['g_or_l'] == Cline.GLOBAL: defval = self._gpars[param] els...
[ "def get_param(self, key: str, default=None):\n if key not in self._parameters:\n self._parameters[key] = default\n if default:\n self._update_run()\n return default\n return self._parameters[key]", "def get_optional_param(param_name: str, default: str...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Gets the value of a parameter, either from the command arguments, or by retrieving default values or by prompting the user as required. This is the main function of Cline. The value obtained is used to update the defaults which, if 'nodefs' has not been defined, are written to disk at the end of the command.
def get_value(self, param, prompt, defval, minval=None, maxval=None, lvals=None, fixlen=True, multipleof=None, ignore=None): if param not in self._rpars: raise ClineError( 'parameter = "{:s}" has not been registered.'.format( param.upper()) ...
[ "def getarg(self, parname):\n # list of strings that should parse to boolean true\n # we need to handle booleans separately, because bool(\"False\")\n # evaluates to True\n booltrue = ['yes','true','1','t']\n\n parname = parname.lower() # so we don't have to worry about case\n\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Trains the approximate network in 10 fold cross validation manner
def train_approximate_network(): model_dict = {} # all the different models model_dict['UNet'] = UNet model_dict['UNetLite'] = UNetLite model_dict['UNetWide40'] = UNetWide40 model_dict['UNetWide48'] = UNetWide48 model_dict['UNetDS64'] = UNetDS64 ...
[ "def train(self):\n TM = TrainingMode()\n\n \"\"\"\n Training Arguments\n \"\"\"\n train_args = {'use_global_valid': False,\n 'use_custom_obj': False,\n 'show_importance': False,\n 'save_final_pred': True,\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Trains the refinement network in 10 fold cross validation manner
def train_refinement_network(): model_dict = {} # all the different models model_dict['UNet'] = UNet model_dict['UNetLite'] = UNetLite model_dict['UNetWide40'] = UNetWide40 model_dict['UNetWide48'] = UNetWide48 model_dict['UNetDS64'] = UNetDS64 ...
[ "def train_approximate_network():\n \n model_dict = {} # all the different models\n model_dict['UNet'] = UNet\n model_dict['UNetLite'] = UNetLite\n model_dict['UNetWide40'] = UNetWide40\n model_dict['UNetWide48'] = UNetWide48\n model_dict['UNetDS64'] ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Make up a text shows list and info of parsers available.
def make_parsers_txt() -> str: sep = os.linesep indent = ' ' parser_types = ', '.join(list_parser_types()) file_ext_vs_parsers = sep.join( f'{indent}{x}: ' + ', '.join(p.cid() for p in ps) for x, ps in api.list_by_extension() ) return sep.join( [ 'Supported...
[ "def parseManagerText(self, text):\n\n # Regular expressions for scanning the file\n find_active = re.compile(r\"^\\s*?(\\w+)\\.py\", re.MULTILINE)\n find_inactive = re.compile(r\"^\\s*?#\\s*(\\w+)\\.py\", re.MULTILINE)\n find_manager = re.compile(r\"^\\s*plugin_manager\\.py\", re.MULTIL...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }