query
stringlengths
9
9.05k
document
stringlengths
10
222k
negatives
listlengths
19
20
metadata
dict
Removes the 'tmp' directory if there is one under the given `builddpath`.
def clean_tmp(builddpath: str): tmpdpath = os.path.join(builddpath, "tmp") if os.path.isdir(tmpdpath): shutil.rmtree(tmpdpath)
[ "def remove_testdir(tmpdir):\n yield\n if tmpdir.check():\n tmpdir.remove()", "def delete_tmp_dir(self,name):\r\n\t\tnorm_name = os.path.normpath(name)\r\n\t\trel_path = os.path.relpath(norm_name, os.path.abspath(VDOM_CONFIG[\"TEMP-DIRECTORY\"]))\r\n\t\tif rel_path.find('/')>=0 or rel_path.find('\\\\...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Copy all the Artie Libraries into the given folder.
def copy_artie_libs(dest): libpath = os.path.join(repo_root(), "libraries") libs = [os.path.join(libpath, d) for d in os.listdir(libpath) if os.path.isdir(os.path.join(libpath, d)) and d != "base-image"] for lib in libs: destpath = os.path.join(dest, os.path.basename(lib)) if not os.path.exi...
[ "def copy_to_a3(self):\n\t\tprint_blue(\"Copying addon to Arma 3 folder.\")\n\n\t\treg = winreg.ConnectRegistry(None, winreg.HKEY_LOCAL_MACHINE)\n\t\ttry:\n\t\t\tk = winreg.OpenKey(reg, r\"SOFTWARE\\Wow6432Node\\Bohemia Interactive\\Arma 3\")\n\t\t\ta3_path = winreg.EnumValue(k, 1)[1]\n\t\t\twinreg.CloseKey(k)\n\t\...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get the default build location.
def default_build_location(): return os.path.join(repo_root(), "build-artifacts")
[ "def getDefaultOutputPath(self):\n return self.session.request('bootcdbuilder/defaults')", "def _output_directory_default(self):\n return os.getcwd()", "def _get_default_path(self):\n #return os.path.join(cfg.DATA_DIR, 'SNUBH_BUS')\n return cfg.DATA_DIR", "def get_llvm_build_dir():...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get the default test results location.
def default_test_results_location(): return os.path.join(repo_root(), "test-results")
[ "def get_results_path(self):\n\n return constants[\"RESULTS_BASE_PATH\"] / self.get_module_path()", "def _get_default_path(self):\n return os.path.join(cfg.DATA_DIR, 'ftdata')", "def get_analysis_results_path(self):\n if self.config:\n try:\n return self.config.get...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get a random string suitable for a temporary directory.
def get_random_dirname() -> str: return "tempdir-" + "".join(random.choices(string.ascii_letters, k=8))
[ "def get_random_sting():\n return \"random string\"", "def randstring():\n return binascii.b2a_hex(os.urandom(15)).upper()", "def tempname(length, lowercase=False):\n\n chars = string.ascii_lowercase + string.digits\n if not lowercase:\n chars += string.ascii_uppercase\n random_part = ''.j...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get the file names (without .py) of all the task modules for dynamic import.
def get_task_modules(): task_folder = os.path.join(repo_root(), "artietool", "tasks") return [os.path.splitext(fname)[0] for fname in os.listdir(task_folder) if os.path.splitext(fname)[-1] == ".py"]
[ "def getTaskModules(callback = None):\n\tpl = getPluginLoader()\n\treturn pl.getModules(\"Task\", callback = callback, moduleType = \"Task module\")", "def get_import_paths() -> List[str]:\n return _redun_import_paths", "def _obtain_imports(self):\n imports = ''\n for model in self.models:\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Finds and retrieves the Task from the tasks list based on its name. Returns None if can't find it.
def find_task_from_name(name: str, tasks): for t in tasks: if t.name == name: return t return None
[ "def getTaskByID(self,taskID : str):\n if self.taskLock.acquire():\n try:\n for item in self.tasks:\n if item.ID == taskID:\n return item\n return None\n finally:\n self.taskLock.release()", "def ge...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return a location we can use for scratch stuff.
def get_scratch_location(): scratch_location = os.path.join(repo_root(), "tmp") if not os.path.isdir(scratch_location): os.makedirs(scratch_location, exist_ok=True) # Hopefully nip any race conditions in the bud return scratch_location
[ "def Location(self) -> str:", "def full_bed_location():\n return \"tests/test_data/full_bed.bed\"", "def getFrom(self) -> ghidra.program.util.ProgramLocation:\n ...", "def printable_location(self):\n return '\"{0}\" ({1})'.format(\n concise_path(self.base_dir), self.pyver)", "def...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return the git tag of the Artie repo.
def git_tag() -> str: p = subprocess.run("git log --format='%h' -n 1".split(' '), capture_output=True) p.check_returncode() return p.stdout.decode('utf-8').strip().strip("'")
[ "def git_tag():\n return execute(\"git tag\", capture=True).split('\\n')", "def get_latest_tag() -> str:\n return exec_cmd(\"git describe --tags --abbrev=0\").strip()", "def last_git_tag(cwd: str) -> str:\n res = subproc.run(\n \"git describe --abbrev=0\".split(),\n capture_stdout=True,\n...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return the absolute path of the root of the Artie repository.
def repo_root() -> str: thisdir = os.path.dirname(os.path.abspath(__file__)) root = os.path.join(thisdir, "..") if not os.path.isdir(root): raise FileNotFoundError("The Artie directory seems to have been altered in a way that I can't understand.") return os.path.abspath(root)
[ "def _get_arc_root():\n return os.path.abspath(\n os.path.join(os.path.dirname(os.path.realpath(__file__)), '..', '..'))", "def full_repository(self):\n base = self.base_repository\n if base:\n if not base.endswith('/'):\n base += '/'\n return urlparse.urlj...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Create a list of TODO information based on the given files.
def parse_TODOs(self, repo_PATH): # https://gist.github.com/nickpascucci/1267938 # TODO : parser part 2 comments = [] todo_info = None print ('repo path == ', repo_PATH) for (dirpath, dirnames, filenames) in os.walk(repo_PATH, topdown=True, onerror=None, followlinks=False): if not 'QuEST' in repo_PATH: ...
[ "def listToDo():\n with open(\"todo.txt\") as toDoFile:\n tasks = toDoFile.readlines()\n if len(tasks) > 0:\n for index, task in enumerate(tasks, 1):\n print(\"[{0}] {1}\".format(\n len(tasks) - index + 1, task.strip(\"\\n\")))\n else:\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns the summation of the integers from low to high.
def summation2(low, high): total = 0 for number in range(low, high + 1): total += number return total
[ "def get_sum(a, b):\n return sum(range(min(a, b), max(a, b) + 1))", "def rangeSumBST(self, root: Optional[TreeNode], low: int, high: int) -> int:\n ans = 0\n t = [root]\n while t:\n node = t.pop()\n if node:\n if low <= node.val <= high:\n ans += node.val\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Given a graph, return False, if target node not in graph. else, return True. Using depthfirst search.
def node_in_graph(graph, target_node, start=None, visited=set()): if start == target_node: return True stack = list(start) while stack: cur = stack.pop() visited.add(cur) for child in graph[cur]: if child == target_node: return True if...
[ "def if_conn(graph):\n\n nodes = graph.nodes()\n first_node = nodes[0]\n last_node = nodes[-1]\n return nx.has_path(graph, first_node, last_node)", "def depth_first_search(graph, start, v):\n\n visited = set()\n stack = [start]\n\n while stack:\n curr = stack.pop()\n if curr == ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Ensure UserMessage rows with historical flag are also considered for read receipts.
def test_historical_usermessages_read_flag_not_considered(self) -> None: hamlet = self.example_user("hamlet") cordelia = self.example_user("cordelia") stream_name = "test stream" self.subscribe(cordelia, stream_name) message_id = self.send_stream_message(cordelia, stream_name, ...
[ "def mark_as_unread(self):\n if self.read_at:\n self.read_at = None\n return self.save(query=True)", "def get_unread_messages(self, user):\n return Message.objects.filter(recipient=user, read_at=None)", "def mark_as_read(self, message):\n\n if message.read_at is None:\...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Find most popular goods, then set popular good identifiers equal to one but other good identifiers equal to zero.
def process_orders(self): for person_orders in self.orders.values(): non_zero_count = np.count_nonzero(person_orders) if non_zero_count < self.num_popular_ids: non_zero_ind = person_orders.argsort()[::-1][:non_zero_count] sub_index = [] f...
[ "def update_counts(self, great = 0, good = 0, bad = 0, miss = 0):\n \n self.great = great\n self.good = good\n self.bad = bad\n self.miss = miss\n self.perfect = self.total_count() - (miss + bad + good + great)", "def frequent_itemset(transactions, minsup):\n pass", "def good_count(self, go...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
fetches a price ticker, a statistical calculation with the information calculated over the past 24 hours for a specific market
async def fetch_ticker(self, symbol: str, params={}): await self.load_markets() market = self.market(symbol) request = { 'market': market['id'], } response = await self.publicGetTicker24h(self.extend(request, params)) # # { # "marke...
[ "def run(self):\n\n self.sleep_if_market_not_available()\n\n LOG_INSTANCE.info(f\"Retrieving {self.ticker} price\")\n self.reset_cache()\n\n # curls and save intraday data\n intraday_price_so_far = self.retrieve_start_price()\n self.cache_intraday_ticker_data(intraday_price...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
fetch the trading fees for multiple markets
async def fetch_trading_fees(self, params={}): await self.load_markets() response = await self.privateGetAccount(params) # # { # "fees": { # "taker": "0.0025", # "maker": "0.0015", # "volume": "10000.00" # ...
[ "async def fetch_trading_fees(self, params={}):\n await self.load_markets()\n response = await self.privateGetFees(params)\n #\n # {\n # \"maker_fee_rate\": \"0.0050\",\n # \"taker_fee_rate\": \"0.0050\",\n # \"usd_volume\": \"43806.92\"\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
fetch the deposit address for a currency associated with self account
async def fetch_deposit_address(self, code: str, params={}): await self.load_markets() currency = self.currency(code) request = { 'symbol': currency['id'], } response = await self.privateGetDeposit(self.extend(request, params)) # # { # ...
[ "async def get_deposit_address(self, **params):\r\n return await self.client_helper(\"get_deposit_address\", **params)", "def get_deposit_address(self, coin):\r\n url = self.url_base + \"id=\" + self.user_id + '&deposit=' + str(coin)\r\n\r\n if self.debug == 1:\r\n print url\r\n\r\...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Compute the multiplicative modular inverse of `a` mod n. That is, solve the linear congruence a x \equiv 1 (mod n). On success, a solution tuple (base, mod), which represents a solution ` base + mod k`, with k { Z, is returned. If no solution is found (that is, gcd(a, n) != n), None is returned
def modinv(a: int, n: int) -> Optional[Tuple[int, int]]: return solve_lincongr(a, 1, n, simplify=True)
[ "def modular_linear_equation_solver(a, b, n):\n d, x_prime, _ = recur_extended_euclid(a, n)\n if not b % d:\n x = x_prime*(b//d) % n\n return True, [(x + i*(n//d)) % n for i in range(d)]\n return False, []", "def modinv(a, N):\n x, y, d = extended_Euclid(a, N)\n if d == 1:\n re...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Indicates whether or not the achieved goal successfully achieved the desired goal.
def _is_success(self, achieved_goal, desired_goal): # TODO: may need to tune parameters return np.logical_and( goal_distance(achieved_goal[..., :2], desired_goal[..., :2]) < 5e-3 * self.SCALING, np.abs(achieved_goal[..., -1] - desired_goal[..., -1]) < 4e-3 * self.SCALING ...
[ "def success(self) -> bool:\n return self.status == \"completed\" and self.code == 0", "def goal_test(self, current):\n\n if current.state == self.goal_state:\n return True\n else:\n return False", "def is_goal(self):\r\n return np.array_equal(PuzzleState.SOLVED...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Samples a new goal and returns it.
def _sample_goal(self) -> np.ndarray: goal = np.array(get_link_pose(self.obj_ids['fixed'][1], self._pegs[0])[0]) return goal.copy()
[ "def sample_goal(self):\n #TODO: We don't need this\n raise NotImplementedError", "def sample_goal_params(self):\n pass", "def _sample_achieved_goal(self, episode_transitions, transition_idx):\r\n if self.goal_selection_strategy == \"future\":\r\n # Sample a goal that was ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Using the CUDA version (the NVCC version) and the target architectures, compute the nvcc architecture flags.
def cuda_select_nvcc_arch_flags(cuda_version, cuda_arch_list="Auto", detected=""): cuda_known_gpu_architectures = ["Fermi", "Kepler", "Maxwell"] cuda_common_gpu_architectures = ["3.0", "3.5", "5.0"] cuda_limit_gpu_architecture = None cuda_all_gpu_architectures = ["3.0", "3.2", "3.5", "5.0"] if cuda_ver_c...
[ "def _nvcc_gencode_options(cuda_version: int) -> List[str]:\n\n if sys.argv == ['setup.py', 'develop']:\n return []\n\n envcfg = os.getenv('CUPY_NVCC_GENERATE_CODE', None)\n if envcfg is not None and envcfg != 'current':\n return ['--generate-code={}'.format(arch)\n for arch in...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
r""" Given a file handle to write in to (which should act like a Python `file` object), write out the landmark data. No value is returned. Writes out the LJSON format which is a verbose format that closely resembles the landmark group format. It describes semantic labels and connectivity between labels. The first axis ...
def LJSONExporter(landmark_group, file_handle, **kwargs): lg_json = landmark_group.tojson() # Add version string lg_json['version'] = 2 # Convert nan values to None so that json correctly maps them to 'null' points = lg_json['landmarks']['points'] # Flatten list try: ndim = len(poin...
[ "def ljson_exporter(lmk_points, filepath, **kwargs):\n\n lmk_points[np.isnan(lmk_points)] = None\n\n lmk_points = [list(_tmp) for _tmp in lmk_points]\n\n ljson = {\n 'version': 2,\n 'labels': [],\n 'landmarks': {\n 'points': lmk_points\n }\n }\n\n with open(file...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
r""" Given a file handle to write in to (which should act like a Python `file` object), write out the landmark data. No value is returned. Writes out the PTS format which is a very simple format that does not contain any semantic labels. We assume that the PTS format has been created using Matlab and so use 1based inde...
def PTSExporter(landmark_group, file_handle, **kwargs): pts = landmark_group.lms.points # Swap the x and y axis and add 1 to undo our processing # We are assuming (as on import) that the landmark file was created using # Matlab which is 1 based pts = pts[:, [1, 0]] + 1 header = 'version: 1\nn_p...
[ "def pts_exporter(pts, file_handle, **kwargs):\n # Swap the x and y axis and add 1 to undo our processing\n # We are assuming (as on import) that the landmark file was created using\n # Matlab which is 1 based\n\n if len(pts.shape) == 2:\n pts = pts[:, [1, 0]] + 1\n else:\n pts = pts[:,...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Overriding this to avoid having orderOfMagnitude reset elsewhere
def _set_orderOfMagnitude(self, range): self.orderOfMagnitude = self._order_of_mag
[ "def mag_phase(self):\n\n self.magnitudes = []\n self.phases = []\n for system in self.systems:\n m, p = self.mag_phase_system(system)\n self.magnitudes.append(m)\n self.phases.append(p)", "def _getMagnitudes(self):\r\n assert self.isTwoComponents()\r\n...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
returns the most specialized API, with the required flavour (IVICOM or IVIC), or None if the flavour is not supported
def get_session(self, flavour = None): specialized_apis_com = [] general_apis_com = [] specialized_apis_c = [] general_apis_c = [] for papi_name, papi in self.com_apis.items(): if papi_name in SPECIALIZED_APIS: specialized_apis_com.append(papi...
[ "def determine_os_api(some_function):\r\n @functools.wraps(some_function)\r\n def provide_os_determination_and_call(*args, **kwargs):\r\n # expand request values\r\n if 'params' in request.values:\r\n pairs = [s.split('=', 1) for s in request.values['params'].split('&')]\r\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Physically queries the instrument model at the given address
def get_model_name(address): from visa import VisaIOError import visa model = "no device" try: instr = visa.Instrument(str(address)) timeout = instr.timeout except VisaIOError: print("instrument at address " + str(address) + " didn't reply in " ...
[ "def get(address_type, address):", "def get_address(self, address: str) -> Address:", "def test_get_xrp__ripple_address_details(self):\n pass", "def test_list_xrp__ripple_transactions_by_address(self):\n pass", "def impedance(address, name):\n explore = explorepy.explore.Explore()\n expl...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
use this decorator to register the wrapper class for an instrument type
def register_wrapper(flavour, instrument_type): def func(cls): WRAPPERS[flavour][instrument_type] = cls return cls return func
[ "def register_wrapper(cls):\n for wrapped in cls._WRAPPED:\n if wrapped in cls._WRAPPERS:\n LOGGER.warn('{} is already registered to {}.'.format(wrapped, cls._WRAPPERS[wrapped]))\n\n if LANTZ_BUILDING_DOCS:\n cls._WRAPPERS[wrapped] = type(wrapped.__name__ + 'Wrapped',\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
True if C{tr} is a 4x4 homogeneous transform.
def ishomog(tr): return tr.shape == (4,4)
[ "def is_tt_matrix(self):\n return len(self.get_raw_shape()) == 2", "def check_conv2d_transpose(extract):\n if not ethosn_available():\n return False\n\n return _ethosn.conv2d_transpose(extract)", "def is_identity(self):\n return self.m == Matrix4x4()", "def is_four_bit_m...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
True if C{tr} is an lvector.
def isvec(v, l=3): return v.shape == (l,1) or v.shape == (1,l) or v.shape == (l,)
[ "def isVectorType(self):\n if re.match(r'^(ivec\\d|vec\\d)$', self.__type):\n return True\n return False", "def __is_ltr(cer_inss):\n is_ltr = False\n for cer_ins in cer_inss.get_all():\n if len(cer_ins.features) > 0:\n is_ltr = True\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Number of columns in a matrix.
def numcols(m): return m.shape[1];
[ "def numCols(self) -> int:\n return self._java_matrix_wrapper.call(\"numCols\")", "def board_n_columns(board: Board) -> int:\n return len(board[0])", "def numColBlocks(self) -> int:\n return self._java_matrix_wrapper.call(\"numColBlocks\")", "def size(self, matrix):\r\n return matrix.s...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Convert a 1dimensional argument that is either a list, array or matrix to an array.
def arg2array(arg): if isinstance(arg, (matrix, ndarray)): s = arg.shape; if len(s) == 1: return array(arg); if min(s) == 1: return array(arg).flatten(); elif isinstance(arg, list): return array(arg); elif isinstance(arg, (int, float, float32, float6...
[ "def T_arr1d(*args):\n return _seb.T_arr1d(*args)", "def to_array(arr):\n return arr if isinstance(arr, np.ndarray) else np.array(arr)", "def listtoarray(self,x):\n dim=len(x)\n matrice=zeros((1,dim))\n for i in range(dim):\n matrice[0][i]=x[i]\n return matrice", "de...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Common error handler. Display the error string, execute a traceback then raise an execption to return to the interactive prompt.
def error(s): print('Robotics toolbox error:', s) #traceback.print_exc(); raise ValueError
[ "def error(s):\n print s\n exit(1)", "def displayError(err):\n print(\"\\nError: %s.\" % err)\n displayUsage()", "def error (\n\n text,\n fatal = False\n ) :\n\n text = str( text )\n\n directory, name = os.path.split( sys.executable )\n\n identifier, extension = os.path.splitext( n...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Removes a proxy from the internal store.
def remove_proxy(self, id, proxy): if id not in self._stores: return self._stores[id].difference_update({proxy, })
[ "def _delete_proxy(self, proxy):\n print \"except, remove proxy: \", proxy \n new_set = set(self.proxy_list)\n new_set.remove(proxy)\n self.proxy_list = list(new_set)", "def delete(self, proxy):\n self.db.delete(proxy)", "def remove(self, proxy):\n self.SMProperty.Remo...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Updates the store with the given proxies. This clears the store of preexisting proxies and adds the new ones.
def update_store(self, id, proxies): if id not in self._stores: return store = self._stores[id] with self._lock: store.clear() if proxies: store.update(proxies)
[ "def set_proxies(self, proxies):\n if proxies:\n protocols = [\"http\", \"https\", \"ftp\", \"socks\"]\n for protocol in protocols:\n entry_id = protocol + \"_proxy_entry\"\n entry_widget = self.ui.get_object(entry_id)\n port_id = protocol + ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
reads the image from path and return a PIL image object
def read_image(self, path: str) -> Image: raise NotImplementedError
[ "def __open_image(path) -> PILImage:\n try:\n return pilImage.open(path)\n except IOError as ex:\n LOGGER.critical('Failed to open image file at %s: %s' % (path, str(ex)))\n raise", "def load_img(path):\n if pil_image is None:\n raise ImportError('Could not...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
return JSON of all albums
def get(self): return getAllAlbums()
[ "def albums():\n albums = app.config[\"albums\"]\n # TODO complete (return albums.get_albums() in JSON format)\n return json.dumps(albums.get_albums())", "def vk_get_album_list(request):\n if not request.user.is_superuser:\n return redirect('%s?next=%s' % (reverse('dc_parse:admin_auth'), reques...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
return JSON of album with album_id
def get(self, album_id): return jsonify(getAlbumData(album_id))
[ "def get_album(self, album_id):\n track = []\n img = None\n\n for i in self.__albums:\n for t_id, info in self.__tracks.items():\n if i[\"id\"] == t_id and t_id == album_id:\n img = i[\"img\"]\n\n for a, b in info.items():\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Samples individuals from each replicant population, and stores the genotypes of that sample in the database.
def sampleIndividuals(pop, param): (ssize, mutation, popsize, sim_id, num_loci) = param popID = pop.dvars().rep gen = pop.dvars().gen sample = drawRandomSample(pop, sizes=ssize) samplelist = [] for idx in range(ssize): genotype_list = list(sample.individual(idx).genotype()) indi...
[ "def mutate_population(self):\n for invidual in self.population:\n if(np.random.uniform(0, 1) < self.mutation_chance):\n invidual.mutate_random_gene(self.mutation_scale)\n self.rate_population()", "def generate_random_population(self):\n for i in range(POPULATION_SIZE)...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
getsutdic loads the student.json file in a dictionnary
def getstudic(): global studentdicsingleton if studentdicsingleton == None: try: studentdicsingleton = json.load(open("student.json","r")) except Exception as e: # TODO gestion des exceptions dans plExecutor studentdicsingleton = dict() # retourne un dico vide return studentdicsingleton
[ "def readstu(self) -> None:\n path :str = os.path.join(self.directory_path,\"students.txt\")\n for cwid, name, major in file_reader(path, 3, sep='\\t',header=True): \n b: Student = Student(cwid,name,major)\n self.studict[cwid]=b", "def load_json(filename):\n with open(filen...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Initializes data and establishes connection with server, where the payload's properties are registered, updated and tracked.
def __init__(self): self.files_handler = None self.client = BaseClient() # connects to server and starts session self.client.connect(SERVER_IP, PORT) self.session = self.client.get_session() # sends payload's properties self.session.send_text(JSON_TEXT) # ...
[ "def _initialize_observing(self):\n self.build_client_snapshot()\n self.build_shared_snapshot()\n self.load_local_dir_state()\n self.create_observer()\n self.observer.start()\n self.sync_with_server()", "def __init__(self, store):\n self.data = dict()\n self...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Notifies the server when a new file was encrypted
def notify_file_encryption(self, _file): self.session.send_text('{{"action": {}, "file":"{}"}}'.format(FILE_ENCRYPTED, _file))
[ "def handler(event, context):\n print('encrypting file')\n\n # Get file key from the event\n s3_key = event['Records'][0]['s3']['object']['key']\n\n # get string contents of file\n file_string = aws.get_file_as_string(s3_key)\n\n # encrypt\n encrypted_string = encrypter.encrypt(file_string, 123...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Unpack a bytearray from chunks of n bytes
def unpack(chunks, size): bts = bytearray() for c in chunks: b = [] for _ in range(size): b.append(c & 0xff) c = c >> 8 # bts.extend(filter(lambda a: a != 0, reversed(b))) bts.extend(reversed(b)) return bts
[ "def read_bytes(iterable, n=0):\n\n iterator = iter(iterable)\n value = bytearray()\n \n for i in range(n):\n \n nextByte = next(iterator)\n \n if isinstance(nextByte, int):\n value.append(nextByte)\n elif isinstance(nextByte, bytes):\n value += n...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Given a column definition, return the bigquery SchemaField object
def build_schema_field(column): mode = column.get('mode', 'NULLABLE') if column['type'] != "RECORD": return bigquery.schema.SchemaField(column['name'], column['type'], mode) fields = set([build_schema_field(field) for field in column['fields']]) return bigquery.schema.SchemaField(column['name'],...
[ "def column_to_bq_schema(self) -> SchemaField:\n kwargs = {}\n if len(self.fields) > 0:\n fields = [field.column_to_bq_schema() for field in self.fields]\n kwargs = {\"fields\": fields}\n\n return SchemaField(self.name, self.dtype, self.mode, **kwargs)", "def get_column_...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Given a document and a set of column definitions, return the dictionary representing the row to upload to bigquery that contains only the fields matching the column definitions
def get_row(document, columns): row_to_upload = {} for column in columns: try: mongo_field = column.get('mongo_field') if not mongo_field: mongo_field = column.get('name') if column['type'] != "RECORD": if mongo_field == '_id': ...
[ "def columns(self, column_list: list[str], documents: list[Document], ):\n filtered_list = []\n for document in documents:\n filtered_dict = {k: v for k, v in document.items() if k in column_list}\n filtered_list.append(Document(filtered_dict, document.doc_id))\n return fi...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Given the set of rows to upload, upload them to the specified bigquery table
def upload_rows(bigquery_client, bigquery_table, rows_to_upload, collection_to_watch, operation_type): for row in rows_to_upload: row['time_archived'] = time.time() row['operationType'] = operation_type errors = bigquery_client.create_rows(bigquery_table, rows_to_upload, skip_invalid_rows=True) ...
[ "def upload_bq(bq_project, bq_dataset, table_name,gsc_schemas,bq_tmp_file,cl,bq_dataset_location,bq_check,bq_alert_empty,\n bq_alert_callback,script_file):\n\n\n # create the configuration for an upload job\n final_table_name = u\"%s.%s.%s\" % (bq_project, bq_dataset, table_name)\n jc = bigquer...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Given a Mongo collection, determine the schema that allows the most data to be uploaded to Bigquery Returns the schema, sets _id as the primary key always
def construct_schema(collection): columns_dict = {} columns = [] for row in collection.find(): for field in row.keys(): field_type = get_type(field, row[field]) if field not in columns_dict.keys(): columns_dict[field] = field_type else: ...
[ "def get_schema_for_doc(doc_id, path_only=False):\n (coll_name, _) = doc_id.split(\"/\")\n return get_schema(\"collection\", coll_name, path_only)", "def __init__(self, name, data=None, schema=None, **kwargs):\n self._name = name\n self._kwargs = kwargs\n conn = self._get_connection()\n...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
For a record definition, remove all the invalid fields, otherwise return itself
def remove_invalid_fields(field): if field.get('type', 'INVALID') == 'RECORD': field['fields'] = [remove_invalid_fields(subfield) for subfield in field['fields'] if subfield.get('type', 'INVALID') != 'INVALID'] field['fields'] = [subfield for subfield in field['fields'] if subfield['type'] != 'RECOR...
[ "def clean_rec (self, r):\n\t\t# new rec to store info in & assoc errors\n\t\tnew_rec = {}\n\t\terrors = []\n\n\t\tself.pre_process (r, new_rec)\n\n\t\tfor cr in self.rules:\n\t\t\t# get values from the source fields\n\t\t\tvals = cr.get_src_vals_from_rec (r)\n\n\t\t\t# abort all vals are in cleaner or rule nulls\n...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Given a mongo field name, make one bigquery is happy with
def get_bq_name(mongo_field): return ''.join([ch for ch in mongo_field if ch.isalnum() or ch == '_'])
[ "def get_bigquery_sanitized_field_name(field_name):\n # type: (str) -> str\n assert field_name # field_name must not be empty by this stage.\n if not re.match('[a-zA-Z]', field_name[0]):\n field_name = _FALLBACK_FIELD_NAME_PREFIX + field_name\n return re.sub('[^a-zA-Z0-9_]', '_', field_name)", "def _get_f...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
return a single die roll of an n_faces die.
def roll_die(n_faces: int = 6): return int(random.random() * n_faces) + 1
[ "def roll_dice(number_of_faces:int, repetitions:int):\r\n pass", "def roll_dice():\r\n die1 = random.randrange(1, 7)\r\n die2 = random.randrange(1, 7)\r\n return (die1, die2) # pack die face values into a tuple\r", "def roll_die(num_sides):\r\n result = random.randrange(0, num_sides) + 1...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Given sequences to count and samples to try, return probabilities of simuated outcomes.
def calculate_probabilities( sequences_of_note: Tuple[List[int]], n_samples: int ): seq_len = len(sequences_of_note[0]) if not all([ len(seq) == seq_len for seq in sequences_of_note ]): raise RuntimeError( 'sequences_of_note must all be the same length' ) def...
[ "def test_probability_by_state_sequence(self):\n observations = [0,1,1]\n probabilities = Algs.analysis_of_state_sequences(self.model3, observations)\n total_probability = sum(prob for sequence, prob in probabilities)\n self.assertAlmostEquals(total_probability,\n Algs.probabi...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Looks at a status, compares it to a cache of known urls and textonly Tweets. Returns either the status, or None if the status has been seen before.
def consider_status(status, cache, cache_length=604800, expand_fn=None): logger = logging.getLogger("twitterdedupe.consider_status") if expand_fn is None: expand_fn = lengthen_url if len(status.entities['urls']) == 0: # Hey there's only text here key = str(hash("%s.%s" % (status.user...
[ "def fetch(account):\n \n import twitterapi\n import rfc822\n import datetime\n items_existing = 0\n items_created = 0\n twitterapi = twitterapi.Api()\n \n # get the latest tweet we already have\n if TwitterStatus.objects.count() > 0:\n latest_id = TwitterStatus.objects.latest()...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Tests the load ratings function
def testloadratings(ratingstablename, filepath, openconnection, rowsininpfile): MyAssignment.loadratings(ratingstablename, filepath, openconnection) # Test 1: Count the number of rows inserted with openconnection.cursor() as cur: cur.execute('SELECT COUNT(*) from {0}'.format(RATINGS_TABLE)) ...
[ "def test_get_ratings(self):\n self.base_method()\n response = self.client.get(self.url, format='json')\n assert response.status_code == 200", "def test_import(self):\n path = os.path.dirname('')\n vote_history = os.path.abspath(\n 'import_ratings/tests/test_d...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Tests the round robin partitioning for Completness, Disjointness and Reconstruction
def testroundrobinpartition(ratingstablename, numberofpartitions, openconnection, robinpartitiontableprefix, partitionstartindex): try: MyAssignment.roundrobinpartition(ratingstablename, numberofpartitions, openconnection) except Exception: # ignore any exceptions rai...
[ "def test_move_partition_rg_imbalanced(self):\n assert not self.move_partition_valid(0, 1, 3)", "def test_random_partition(self):\n stirling = spn.utils.Stirling()\n for num_subsets in range(1, len(TestPartition.test_set) + 1):\n # Run test for various num_subsets\n with...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Tests the roundrobin insert function by checking whether the tuple is inserted in he Expected table you provide
def testroundrobininsert(ratingstablename, userid, itemid, rating, openconnection, expectedtablename): try: MyAssignment.roundrobininsert(ratingstablename, userid, itemid, rating, openconnection) except Exception: # ignore any exceptions raised by function pass if not testrangerobini...
[ "def testrangeinsert(ratingstablename, userid, itemid, rating, openconnection, expectedtablename):\n try:\n MyAssignment.rangeinsert(ratingstablename, userid, itemid, rating, openconnection)\n except Exception:\n # ignore any exceptions raised by function\n pass\n if not testrangerobin...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Tests the range insert function by checking whether the tuple is inserted in he Expected table you provide
def testrangeinsert(ratingstablename, userid, itemid, rating, openconnection, expectedtablename): try: MyAssignment.rangeinsert(ratingstablename, userid, itemid, rating, openconnection) except Exception: # ignore any exceptions raised by function pass if not testrangerobininsert(expe...
[ "def test_insert_number_range(self):\n value = 122\n temperature = temperature_record.TempTracer()\n result = temperature.insert(value)\n self.assertFalse(result)", "def test_insert_rows(employees):\n df = insrow(employees, pos=1, values=['Paula', 23, 35])\n assert list(df.index)...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
This function is supposed to merge all strings in the list return this merged string
def mergeList(l): res = "" for x in l: res += x return res
[ "def concat_list(str_list):\r\n new_string = '' #this empty string will fill up with\r\n # strings\r\n for component in str_list:\r\n new_string += component + ' '#a new component is added to\r\n #the string every ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Initializes starting value for initial state distribution pi and state transition matrix A. A and pi are initialized with random starting values which satisfies the summation and nonnegativity constraints.
def initialize(n_states, x): seed = 5340 np.random.seed(seed) pi = np.random.random(n_states) A = np.random.random([n_states, n_states]) # We use softmax to satisify the summation constraints. Since the random # values are small and similar in magnitude, the resulting values are close # to...
[ "def _initialize_state_vector(self):\n np.random.seed(self.seed)\n self.initial_state = [0.0] * self.num_state_variables", "def rand_init_state(self):\n state = np.random.random((self.lattice, self.lattice))\n state[state >= 0.5] = 1\n state[state < 0.5] = -1\n return sta...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Fit HMM parameters to observed data using BaumWelch algorithm
def fit_hmm(x_list, n_states): # We randomly initialize pi and A, and use k-means to initialize phi # Please do NOT change the initialization function since that will affect # grading pi, A, phi = initialize(n_states, x_list) """ YOUR CODE HERE Populate the values of pi, A, phi with the corre...
[ "def fit_predict(self,dataSet):\n\t\tnp.random.seed()\n\t\tn = dataSet.shape[0] # total number of data\n\n\t\tHMM_data = pp.scale(dataSet[[\"Var\",\"Gradient\"]])\n\n\t\tif self.nc == 3:\n\t\t\t# initialize transition matrix\n\t\t\ttransmat = np.zeros((3, 3))\n\t\t\ttransmat[0, 1] = 3.0/n\n\t\t\ttransmat[0, 0] = 1....
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns the value inside nested structure of data located at period delimited path When traversing a list, as long as that list is containing objects of type dict, items in that list will have their "name" and "type" values tested against the current key in the path.
def dict_get_path(data, path, default=None): keys = path.split(".") for k in keys: if type(data) == list: found = False for item in data: name = item.get("name", item.get("type")) if name == k: found = True ...
[ "def get_value_by_path(data, path):\n\n if not isinstance(data, dict) or path == '':\n return None\n\n value_keys = path.split('.')\n result = data\n\n for key in value_keys:\n if key in result.keys():\n result = result[key]\n else:\n result = None\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Measure CPU consumption of the firefoxbin process
def measure_cpu(url): print (url) os.system('echo %s >> ./cpu-usage' % url) cpu_list = [] # Loop for i in range(90): cpu = os.popen("top -n 1 | grep /usr/lib/chromium/chrome | head -1 | awk '{print $8;}'").read().strip() time.sleep(1) cpu_list.append(cpu) os.system('echo ...
[ "def monitor_cpu():\n return psutil.cpu_percent(interval=0.4)", "def monitor_cpu(self) -> None:\n last_write = 0\n _ = psutil.cpu_percent()\n _ = self.process.cpu_percent()\n system_usage = list()\n process_usage = list()\n\n process_cpu_times = self.process.cpu_times(...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Recovery original data for each point of the sequence.
def recover_original_data(idx_seq, dataset): recover = dataset[idx_seq] return recover
[ "def reset(self):\r\n self.array=self.original\r\n self.original=list(self.original)\r\n return self.array", "def test_prep_recover_data(self):\n u = np.random.rand(12, 17, 73, 144)\n up, uinfo = prep_data(u, 'tzyx')\n ur = recover_data(up, uinfo)\n err = error(u, ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
van der Pol equation
def vanDerPol(): pars = {'eps': 1.0, 'a': 0.5} dsargs = { 'name': 'vanDerPol', 'pars': pars, 'varspecs': { 'x': '(y - (x * x * x / 3 - x)) / eps', 'y': 'a - x', }, 'ics': { 'x': pars['a'], 'y': pars['a'] - pow(pars['a'], 3)...
[ "def _lorentz(x,p,w):\n return 1./(1.+((p-x)/(w/2.))**2)", "def vinvpol(x, t, p):\n\t\n u, v, w, q = x\n mu1, a1, b1, c1, e1, mu2, a2, b2, c2, e2 = p\n\n #The velocity function v = d(u,v,w,q)/dt:\n vel = [2*mu1*u + 2*a1*u**2 + 2*b1*u*v + c1*w,\n \t 2*mu2*v + 2*a2*u*v + 2*b2*v**2 + c2*w,\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Function that resets the saved users file
def reset_user_saved_file(): with open("./assets/vegan_cosmetics_saved.txt", "w") as file: file.write("")
[ "def reset(self):\n\t\tf = open(self.file_path, \"w+\")\n\t\tf.close()", "def reset(self):\n if self.path.exists():\n warn_msg =(f\"WARNING:\\n \"\n f\"This will overwrite file at {self.path}...\\n \"\n f\"Type 'DELETE' to continute.\")\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Function that resets the users favorite list
def reset_user_fav_list(): user_fav_list = [] return user_fav_list
[ "def rm_favourite():\n\n user_id = request.args['user_id']\n photo_id = request.args['photo_id']\n\n remove_favourite(user_id, photo_id)\n\n flash(\"Picture was deleted from your favourites!\")\n return redirect(url_for(\"favourites\"))", "def cleanTurnOffAllFavoriteChannels(self):\n self.rc...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Function takes in user input whether or not they want to order anything from our store, if they do, then it brings in the search products function and allows them to order. If they dont, the program will quit. Also Covers the edge case, if they enter the wrong input, it will ask them the question again.
def user_input(user_fav_list = []): order_now = input(dedent( ''' Would you like to see anything from our store catalog (y/n) Or would you like to quit (q)? ''')) if order_now == 'y': search_product(user_fav_list) elif order_now == 'n': grab_saved_product() elif order_now == 'q': print("*" ...
[ "def search_product(user_fav_list=[]):\n print(dedent(\n '''\n These are the categories and individual products available:\n\n Eye Vegan Products: mascara, eye shadow, liner\n Lip Vegan Products: lip products, liner, pencil\n Face Vegan Products: cream, moisturizer, bronzer, foundation, blush, prime...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Ask the user what they would like to order, then it plugs the input into the find_search_product helper function which searchs the products in the databse based on regex.
def search_product(user_fav_list=[]): print(dedent( ''' These are the categories and individual products available: Eye Vegan Products: mascara, eye shadow, liner Lip Vegan Products: lip products, liner, pencil Face Vegan Products: cream, moisturizer, bronzer, foundation, blush, primer Nail V...
[ "def test_search_by_product_name(self):\n # Test data\n product_title = 'Final Fantasy XV - Xbox One'\n product_price = '$19.99'\n product_rating = '4.6'\n\n self.search_controller.open_search_page()\n self.search_controller.dismiss_subscribe_modal()\n self.search_co...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
function has a dictionary of regex terms, then it iterates through a list of data and if the regex matches up with the search, it appends the items to user_fav_list.
def find_search_product(search_word, user_fav_list): regex_dict = {'mascara':'\w*.ascara\w*', 'foundation': '\w*.oundation\w*', 'eye shadow': '\w*.hadow\w*', 'lip products': '\w*.ip\w*', 'bronzer': '\w*.onzer\w*', 'liner': '\w*[Ll]iner\w*', 'pencil' : '\w*.encil', 'blush' : '\w*.lush', 'cream' : '\w*.ream\w*', 'mo...
[ "def search_product(user_fav_list=[]):\n print(dedent(\n '''\n These are the categories and individual products available:\n\n Eye Vegan Products: mascara, eye shadow, liner\n Lip Vegan Products: lip products, liner, pencil\n Face Vegan Products: cream, moisturizer, bronzer, foundation, blush, prime...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns the amount of shared memory in bytes consumed in a single stage of a kernel.
def calculate_smem_usage_per_stage(td: TileDescription, operation_kind: cutlass.OperationKind) -> int: m, n, k = td.threadblock_shape if operation_kind == cutlass.OperationKind.Gemm: stage_barrier_bytes = 32 return ( (DataTypeSize[td.math_instruction.element_a] * m * k // 8) ...
[ "def shared_memory_size(data_buffers=None):\n\n shared_size = 0\n\n if data_buffers is None:\n data_buffers = inject.get_injectable(\"data_buffers\", {})\n\n for k, data_buffer in data_buffers.items():\n if isinstance(data_buffer, str) and data_buffer.startswith(\"sh.Dataset:\"):\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Checks whether a device with `cc` supports the number of stages within `tile_description`, both based on raw limits on the number of stages and based on shared memory capacity
def valid_stage_count( cc: int, td: TileDescription, element_C: cutlass.DataType = None, element_D: cutlass.DataType = None) -> tuple: if cc == 90: if (td.stages is None or td.stages == 0): # Stage count of None or 0 for SM90 indicates that the CollectiveBuilder automatically ...
[ "def has_available_build_slots(date_check, production_unit):\n capacity = Capacity.objects.get_restore_or_create(day=date_check, production_unit=production_unit, defaults={'capacity': 0}).capacity\n order_count = Order.objects.filter(\n build__build_date=date_check,\n build__build_order__product...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Checks whether a device with `cc` supports a thread block cluster of shape `cluster_shape`.
def valid_cluster_shape(cc: int, cluster_shape: list) -> tuple: if cc < 90: if cluster_shape != [1, 1, 1]: return (False, f"Cluster shape for pre-SM90 architectures must be [1, 1, 1]. Received cluster shape of " f"{cluster_shape} for SM{cc}.") els...
[ "def _has_clusters(self):\n return self.cluster_column in self.data.df.columns", "async def do_check_clusters(self, clusters):\n raise NotImplementedError", "def is_clustering_valid(clustering_model: Type[Clustering]) -> bool:\n n_labels = len(set(clustering_model.model.labels_)) # type: ignor...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Checks that the kernel and epilogue schedules passed in are a valid combination for a device of compute capability ``cc``.
def valid_schedule( cc: int, kernel_schedule: cutlass.KernelScheduleType, epilogue_schedule: cutlass.EpilogueScheduleType, tile_scheduler: cutlass.TileSchedulerType) -> tuple: kernel_auto = (kernel_schedule == cutlass.KernelScheduleType.ScheduleAuto) epilogue_auto = (epilogue_schedule == cutlass...
[ "def is_valid(schedule: dict[str, tuple[str, str, tuple]]) -> bool:\n # Gives all the values of the dictionary\n sc_sections = [schedule[key] for key in schedule]\n return all([not sections_conflict(x, y) for x in sc_sections for y in sc_sections if x is not y])", "def _assert_ecg_input(ecg_processor: \"...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns `alignment_provided` if it is set, otherwise `default_alignment` and checks that `alignment_provided` does not exceed `default_alignment`.
def alignment_or_default(alignment_provided: int, default_alignment: int) -> int: if alignment_provided is not None: if alignment_provided > default_alignment: raise Exception(f"Alignment {alignment_provided} exceeds the maximum supported of {default_alignment}.") return alignment_provid...
[ "def update_alignment(alignment_provided:int, default_alignment: int) -> int:\n if alignment_provided is not None:\n if alignment_provided > default_alignment:\n if alignment_provided % default_alignment == 0:\n return default_alignment\n raise Exception(f\"Alignment {...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns `alignment_provided` if it is set, otherwise `default_alignment` and checks that `alignment_provided` does not exceed `default_alignment`.
def update_alignment(alignment_provided:int, default_alignment: int) -> int: if alignment_provided is not None: if alignment_provided > default_alignment: if alignment_provided % default_alignment == 0: return default_alignment raise Exception(f"Alignment {alignment_p...
[ "def alignment_or_default(alignment_provided: int, default_alignment: int) -> int:\n if alignment_provided is not None:\n if alignment_provided > default_alignment:\n raise Exception(f\"Alignment {alignment_provided} exceeds the maximum supported of {default_alignment}.\")\n return align...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Kafka producer that receives avro schema in record header.
def producer_header(sdc_builder, topic, cluster, confluent): builder = sdc_builder.get_pipeline_builder() builder.add_error_stage('Discard') dev_raw_data_source = builder.add_stage('Dev Raw Data Source') dev_raw_data_source.set_attributes(data_format='JSON', raw_d...
[ "def producer_inline(sdc_builder, topic, cluster, confluent):\n builder = sdc_builder.get_pipeline_builder()\n builder.add_error_stage('Discard')\n\n dev_raw_data_source = builder.add_stage('Dev Raw Data Source')\n dev_raw_data_source.set_attributes(data_format='JSON',\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Kafka producer that receives avro schema the pipeline configuration.
def producer_inline(sdc_builder, topic, cluster, confluent): builder = sdc_builder.get_pipeline_builder() builder.add_error_stage('Discard') dev_raw_data_source = builder.add_stage('Dev Raw Data Source') dev_raw_data_source.set_attributes(data_format='JSON', raw_d...
[ "def producer_header(sdc_builder, topic, cluster, confluent):\n builder = sdc_builder.get_pipeline_builder()\n builder.add_error_stage('Discard')\n\n dev_raw_data_source = builder.add_stage('Dev Raw Data Source')\n dev_raw_data_source.set_attributes(data_format='JSON',\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Kafka producer that receives avro schema from schema registry (must exists before pipeline run).
def producer_registry(sdc_builder, topic, cluster, confluent): builder = sdc_builder.get_pipeline_builder() builder.add_error_stage('Discard') dev_raw_data_source = builder.add_stage('Dev Raw Data Source') dev_raw_data_source.set_attributes(data_format='JSON', raw...
[ "def save_avro_schema_stream(df: DataFrame, epochid: int, schema_path=None):\n save_avro_schema(df, schema_path)", "def producer_inline(sdc_builder, topic, cluster, confluent):\n builder = sdc_builder.get_pipeline_builder()\n builder.add_error_stage('Discard')\n\n dev_raw_data_source = builder.add_sta...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns an image that has been flipped through the vertical axis _Author_ = Jackie So (101153622) >>>image = load_image(choose_file()) >>>flip_vertical(image)
def flip_vertical(image: Image) -> Image: flipped_image = copy(image) width = get_width(image) for y in range(get_height(flipped_image)): for x in range(get_width(flipped_image)): new_color = get_color(image, width - y - 1, y) set_color(flipped_image, width - y- 1, y...
[ "def vertical_flip_image(image: Image) -> Image:\n return Image(image.size, vertical_flip_pixels(image.pixels))", "def vertical_flip(self):\n\n im = self._image.as_numpy_array()\n self._image.im_representation = np.flipud(im)", "def verticalFlip(self):\r\n self.ser.write('A')\r\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
This function will spawn a thread and run the given function using the args, kwargs and return the given default value if the timeout_duration is exceeded.
def timeout(func, args=(), kwargs={}, timeout_duration=10, default=None, log=None): class InterruptableThread(threading.Thread): def __init__(self): threading.Thread.__init__(self) self.result = default def run(self): self.result = func(*args, **kwar...
[ "def _timeout(func, args=(), kwargs={}, timeout_duration=10, default=None): \r\n import threading\r\n class InterruptableThread(threading.Thread):\r\n def __init__(self):\r\n threading.Thread.__init__(self)\r\n self.result = default\r\n\r\n def run(s...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns the nearest perfect square that is less than equal to num
def nearest_square(num): root=0 while (root+1)**2<=num: root+=1 return root**2
[ "def nearest_square(number):\n\tif math.sqrt(number)%1 == 0:\n\t\treturn number\n\telse:\n\t\tflag = True\n\t\tlower_number = number -1\n\t\twhile flag:\n\t\t\tif math.sqrt(lower_number)%1 == 0:\n\t\t\t\tflag = False\n\t\t\telse: lower_number -= 1\n\n\t\treturn lower_number", "def min_square(n):\n return int(n...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get ParallelCluster bucket name.
def get_bucket_name(account_id, region): return "-".join( [ "parallelcluster", S3Bucket.generate_s3_bucket_hash_suffix(account_id, region), PCLUSTER_S3_BUCKET_VERSION, "do", "not", "delete", ]...
[ "def get_bucket_name(bucket):\n return _objstore_backend.get_bucket_name(bucket)", "def cluster_name(self):\n return self.base_config.cluster_name if hasattr(self.base_config, \"cluster_name\") else None", "def s3_bucket_name():\n if is_local_env():\n return LOCAL_BUCKET_NAME\n\n # ge...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Generate 16 characters hash suffix for ParallelCluster s3 bucket.
def generate_s3_bucket_hash_suffix(account_id, region): return hashlib.sha256((account_id + region).encode()).hexdigest()[0:16]
[ "def get_bucket_name(account_id, region):\n return \"-\".join(\n [\n \"parallelcluster\",\n S3Bucket.generate_s3_bucket_hash_suffix(account_id, region),\n PCLUSTER_S3_BUCKET_VERSION,\n \"do\",\n \"not\",\n \"...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Create a new S3 bucket.
def create_bucket(self): AWSApi.instance().s3.create_bucket(bucket_name=self.name, region=self.region)
[ "def create_bucket(name):\r\n s3.create_bucket(Bucket=name)", "def create_bucket():\n\n s3 = session.resource('s3')\n\n try:\n s3.create_bucket(Bucket=f\"lambda-source-{os.environ['AWS_ACCOUNT']}\", ACL='private')\n print('Created S3 bucket!')\n\n except Exception as e:\n print(f\...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Configure s3 bucket to satisfy pcluster setting.
def configure_s3_bucket(self): AWSApi.instance().s3.put_bucket_versioning(bucket_name=self.name, configuration={"Status": "Enabled"}) AWSApi.instance().s3.put_bucket_encryption( bucket_name=self.name, configuration={"Rules": [{"ApplyServerSideEncryptionByDefault": {"SSEAlgorithm"...
[ "def _set_s3(self):\n logger.info(\"Setting up s3 ...\")\n\n cluster_name_id = AXClusterId().get_cluster_name_id()\n\n self._bucket_name = AXClusterDataPath(cluster_name_id).bucket()\n self._bucket = Cloud().get_bucket(self._bucket_name)\n artifact_prefix = AXClusterDataPath(clust...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get object key of an artifact.
def get_object_key(self, object_type: S3FileType, object_name): return "/".join([self.artifact_directory, object_type.value, object_name])
[ "def key(obj):\n try:\n return obj.key()\n except AttributeError:\n return obj", "def get_key(bucket, obj):\n\n key = bucket.get_key(obj)\n if not key or not key.exists():\n msg = _(\"Could not find key %(obj)s in bucket %(bucket)s\") % locals()\n logger.error(msg)\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Cleanup S3 bucket artifact directory.
def delete_s3_artifacts(self): LOGGER.debug( "Cleaning up S3 resources bucket_name=%s, service_name=%s, remove_artifact=%s", self.name, self._service_name, self._cleanup_on_deletion, ) if self.artifact_directory and self._cleanup_on_deletion: ...
[ "def s3cleanup(request):\n s3interface = S3Interface()\n\n deleted = s3interface.delete_all_images()\n print('Deleted %d object(s) from S3 bucket \"%s\" using prefix \"%s\"' % (\n len(deleted), s3interface.bucket_name, s3interface.prefix))", "def _delete_file_from_s3(self, artifact_id, bucket, key...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Upload bootstrapped file to identify bucket is configured successfully.
def upload_bootstrapped_file(self): AWSApi.instance().s3.put_object( bucket_name=self.name, body="bucket is configured successfully.", key="/".join([self._root_directory, self._bootstrapped_file_name]), )
[ "def object_upload():\n # SELECT BUCKET\n if not (bucket := select_bucket('Which bucket would you like to upload the file to: ')):\n input('Invalid bucket. Press enter to go back to the main menu.')\n return\n\n # SELECT FILE\n my_file = Path(input('What is the full path to the file you wi...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Check bucket is configured successfully or not by bootstrapped file.
def check_bucket_is_bootstrapped(self): AWSApi.instance().s3.head_object( bucket_name=self.name, object_name="/".join([self._root_directory, self._bootstrapped_file_name]) )
[ "def check_bundle():\n s3 = boto3.client('s3', region_name=os.environ['TF_VAR_aws_region'])\n\n try:\n s3.get_object(\n Bucket=os.environ['TF_VAR_elastic_beanstalk_s3_bucket'],\n Key=os.environ['TF_VAR_elastic_beanstalk_s3_key']\n )\n except botocore.exceptions.ClientErr...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Upload config file to S3 bucket.
def upload_config(self, config, config_name, format=S3FileFormat.YAML): return self.upload_file(file_type=S3FileType.CONFIGS, content=config, file_name=config_name, format=format)
[ "def upload_to_s3(file_name, bucket, object_name):\n print(file_name, bucket, object_name)\n s3_client = boto3.client('s3')\n response = s3_client.upload_file(file_name, bucket, object_name)\n return response", "def uploadFileToS3(self, filename):\n destDir = '' # Root folder of the S3 bucket\...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Upload cloudformation template to S3 bucket.
def upload_cfn_template(self, template_body, template_name, format=S3FileFormat.YAML): return self.upload_file( file_type=S3FileType.TEMPLATES, content=template_body, file_name=template_name, format=format )
[ "def object_upload():\n # SELECT BUCKET\n if not (bucket := select_bucket('Which bucket would you like to upload the file to: ')):\n input('Invalid bucket. Press enter to go back to the main menu.')\n return\n\n # SELECT FILE\n my_file = Path(input('What is the full path to the file you wi...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Upload custom resources to S3 bucket.
def upload_resources(self, resource_dir, custom_artifacts_name): for res in os.listdir(resource_dir): path = os.path.join(resource_dir, res) if os.path.isdir(path): AWSApi.instance().s3.upload_fileobj( file_obj=zip_dir(os.path.join(resource_dir, res)),...
[ "def upload_to_s3(file_name, bucket, object_name):\n print(file_name, bucket, object_name)\n s3_client = boto3.client('s3')\n response = s3_client.upload_file(file_name, bucket, object_name)\n return response", "def upload(src, dest_bucket, dest_object):\n # TODO\n pass", "def object_upload():\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get an S3 presigned URL for the config file.
def get_config_presigned_url(self, config_name: str, version_id=None): return AWSApi.instance().s3.create_presigned_url( self.name, self.get_object_key(S3FileType.CONFIGS, config_name), version_id )
[ "def get_config_s3_url(self, config_name):\n return self._get_file_s3_url(file_name=config_name, file_type=S3FileType.CONFIGS)", "def _get_s3_presigned_url(input_json):\n url = input_json['url']\n return url['scheme']+'://'+url['host']+url['path']+'?'+url['query']", "def get_presigned_get_u...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get config file s3 url path in S3 bucket.
def get_config_s3_url(self, config_name): return self._get_file_s3_url(file_name=config_name, file_type=S3FileType.CONFIGS)
[ "def _get_s3_url(self):\n return 'https://' + CLUSTER_VIP + ':' + COHESITY_S3_PORT", "def get_s3_object_url(bucket_name, file_name,):\n\n session = boto3.session.Session()\n current_region = session.region_name\n url = \"https://{}.s3.{}.amazonaws.com/{}\".format(str(bucket_name),\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get cfn template from S3 bucket.
def get_cfn_template(self, template_name, version_id=None, format=S3FileFormat.YAML): return self._get_file( file_type=S3FileType.TEMPLATES, file_name=template_name, version_id=version_id, format=format )
[ "def get_cfn_template_url(self, template_name):\n return self._get_file_url(file_type=S3FileType.TEMPLATES, file_name=template_name)", "def get_cloudformation_template(cfn_client, stack_name):\n\n response = cfn_client.get_template(StackName=stack_name)\n return response[\"TemplateBody\"]", "def s3...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get cfn template http url from S3 bucket.
def get_cfn_template_url(self, template_name): return self._get_file_url(file_type=S3FileType.TEMPLATES, file_name=template_name)
[ "def get_s3_object_url(bucket_name, file_name,):\n\n session = boto3.session.Session()\n current_region = session.region_name\n url = \"https://{}.s3.{}.amazonaws.com/{}\".format(str(bucket_name),\n current_region, str(file_name))\n return url", "def...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Parse s3 url to get bucket name and object name.
def parse_bucket_url(url): match = re.match(r"s3://(.*?)/(.*)", url) if match: bucket_name = match.group(1) object_key = match.group(2) object_name = object_key.split("/")[-1] else: raise Exception("Invalid S3 url: {0}".format(url)) return {"bucket_name": bucket_name, "o...
[ "def parse_s3_url(s3url):\n parsed_url = urlparse(s3url)\n \n if not parsed_url.netloc:\n raise AirflowException('Please provide a bucket_name instead of \"{s3url}\"'.format(s3url=s3url))\n\n bucket_name = parsed_url.netloc\n key = parsed_url.path.strip('/')\n\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return content formatted by the given S3 File Format. If format is not in the S3FileFormat Enum, it returns the content without any formatting
def format_content(content, s3_file_format: S3FileFormat): if s3_file_format == S3FileFormat.YAML: return yaml.dump(content) elif s3_file_format == S3FileFormat.JSON: return json.dumps(content) elif s3_file_format == S3FileFormat.MINIFIED_JSON: return json.dumps(content, separators=(...
[ "def getS3FileContent(Bucket=None, Key=None, Size=None, Date=None):\n try:\n DATA = str() # return variable\n FLAG = None # return variable\n BUCKET = Bucket\n KEY = Key\n SIZE = int(Size)\n LLIMIT = int(1024*1024*1024*12) # x\n ULIMIT = int(1024*1024*10...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Simple t' bin loop of the 'addComaValueForZeroMode(...)' method
def addComaValueForZeroMode(self, val, unitsOf = 'smallestComaValue'): for tb in self.bins: tb.addComaValueForZeroMode(val, unitsOf = unitsOf)
[ "def setZeroModeParameters(self, zmp):\n\t\tif not len(zmp) == len(self.bins):\n\t\t\traise IndexError(\"Mismatch in number of t' bins\")\n\t\tfor i,pp in enumerate(zmp):\n\t\t\tself.bins[i].setZeroModeParameters(pp)", "def zero(self):\n for i in range(len(self.b)):\n self.b[i] = 0", "def bina...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return NDF as list of (NDF, 2nZero, 2nFunc, nPar) (Simple t' bin loop)
def getNDF(self): retVal = [] for tb in self.bins: retVal.append(tb.getNDF()) return retVal
[ "def get_freqs(Fs, n):\n\n return np.linspace(0, Fs / 2, int(n / 2 + 1))", "def _get_nc2nps_fields_tupple(fieldList, date, metDataTopdir):\n ret_list = []\n for fld in fieldList:\n #mf = get_met_field(fieldName, metDataTopdir)\n # TODO : units and description are not in the metfield, they a...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Removes ALL correlations from the covariance matrix (Simple t' bin loop)
def removeAllCorrelations(self): for tb in self.bins: tb.removeAllCorrelations()
[ "def clear_over_correlated_columns(self):\n removed_cols = []\n corr_list = []\n col_list = list(combinations(self.cont_cols,2))#Gets all combinations of all continuous columns in group sizes of two\n for col1,col2 in col_list:\n print(f\"OVER CORR TEST FOR {col1} {col2}\")\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }