query
stringlengths
9
9.05k
document
stringlengths
10
222k
negatives
listlengths
19
20
metadata
dict
execute the remove_all_blocks command
def _do_remove_all_blocks(self, args): bus_type = args[1] slave_id = int(args[2]) if bus_type == 'rtu': slave = self.server._servers[0].get_slave(slave_id) elif bus_type == 'tcp': slave = self.server._servers[1].get_slave(slave_id) slave.remove_all_...
[ "def clean_blocks(blocks):\n \n for block in blocks:\n block.empty()", "def deleteblocks():\n for root, directories, files in os.walk('./index_blocks'):\n for f in files:\n os.unlink(os.path.join(root, f))\n for directory in directories:\n shutil.rmtree(os.path....
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
install a function as a hook
def _do_install_hook(self, args): hook_name = args[1] fct_name = args[2] hooks.install_hook(hook_name, self._hooks_fct[fct_name])
[ "def _install(cls):\n if not cls._hook:\n cls._hook = cls()\n cls._insert_hook()", "def register_hook(self, hook):\n self.hook = hook", "def install_hook():\n _hooks.append(uncaught_hook)\n sys.excepthook = hook", "def _execute_pre_hook(self, context, func_name, *args...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
uninstall a function as a hook. If no function is given, uninstall all functions
def _do_uninstall_hook(self, args): hook_name = args[1] try: hooks.uninstall_hook(hook_name) except KeyError as exception: LOGGER.error(str(exception))
[ "def _uninstall(cls):\n if cls._hook:\n sys.meta_path.remove(cls._hook)\n cls._hook = None", "def uninstall_hook(self):\n\n if self.is_hooked is None:\n return\n self.user32.UnhookWindowsHookEx(self.is_hooked)\n self.is_hooked = None", "def uninstall_...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
change the verbosity of the server
def _do_set_verbose(self, args): verbose = int(args[1]) self.server.set_verbose(verbose) return "%d" % verbose
[ "def verbosity(v):\n assert v in [0,1,2] # debug, warn, info\n GLOBAL['VERBOSITY'] = v", "def setVerbosity(self, level):", "def set_verbosity(level):\n global verbosity\n verbosity = level", "def setVerbosity(self, *args):\n return _yarp.Port_setVerbosity(self, *args)", "def set_verbos...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get the current version or exit the process.
def version_or_exit(path): with cd(path): versioning_file = join(os.curdir, 'versioning.py') try: get_version = run_command(versioning_file) if get_version.returncode: abort(colors.red('versioning.py') + ' returned an error.') else: ...
[ "def current_version(self):\n path = self.bin_path()\n try:\n p = subprocess.Popen(\n [self.bin_path(), '--version'],\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE,\n )\n version, err = p.communicate()\n # TO...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Roll back the tagging that was just done and inform the user. >>> rollback('not_a_tag')
def rollback(tag): done = run_command(['git', 'tag', '-d', tag]) if done.returncode: echo.bold(colors.red(str(done))) sys.exit(done.returncode) echo.cyan('Done:', done.stdout.strip())
[ "def rollback(self):\n self.__target.rollback()", "def rollback():\n\n rollback_release()", "def rollback(self, stage, enodes, exception):", "def rollback():\n get_session().rollback()", "def rollback(self):\n\t\tif not self.inTransac:\n\t\t\tsys.stdout.write(\"NO TRANSACTION\\n\")\n\n\t\telse:...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Do a release step, possibly rolling back the tagging. >>> do_release_step('true', 'rollback_tag')
def do_release_step(command, tag, no_rollback=None): echo.cyan('running:', command) published = run_command(command) if published.returncode: echo.bold(colors.red('Failed:')) echo.yellow(published.stderr) echo.white(published.stdout) if no_rollback: echo.cyan(no_r...
[ "def rollback(delete = 'delete'):\n\n print \"Rolling Back\"\n\n with cd(\"%s/releases/\" % env.path):\n folders = run(\"ls -A | tail -2\")\n folders = folders.split('\\r\\n')\n\n if folders.count < 2:\n print \"There is no available release to rollback to\"\n return...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
menu menu page logic. displaying all the products in our DB
def menu(request): cart = cartData(request) cart_items = cart['cart_items'] # order = cart['order'] # items = cart['items'] # Get all our object products = BobaProduct.objects.all() # Dictionary to hold our products context = {"products": products, "cart_items": cart_items} return re...
[ "def product_menu(request):\n\n brands = Brand.objects.all().order_by('brand')\n categories = Category.objects.all().order_by('name')\n\n context = {\n 'brands': brands,\n 'categories': categories\n\n }\n\n return context", "def __products_menu(self):\n log.debug(\"Displaying _...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
guestChat If the user is not authenticated, they will be redirected to this site where they can input a guest name and enter the chatbox
def guestChat(request): form = GuestChat() if request.method == "POST": form = GuestChat(request.POST) if form.is_valid(): guestName = form.cleaned_data.get('guest_name') return render(request, 'chat/room.html', {"guestName": guestName}) context = {"form"...
[ "def chat():\n name = session.get('name', '')\n room = session.get('room', '')\n if name == '' or room == '':\n return redirect(url_for('.index'))\n return render_template('chat.html', name=name, room=room)", "def chat(request):\n message = '{}: {}'.format(request.form['user'], request.form[...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Computes the Wasserstein distance of order 2 between two Gaussian distributions
def wass_gaussians(mu1, mu2, Sigma1, Sigma2): d = mu1.shape[0] if d == 1: w2 = (mu1 - mu2)**2 + (np.sqrt(Sigma1) - np.sqrt(Sigma2))**2 else: prodSigmas = Sigma2**(1/2)*Sigma1*Sigma2**(1/2) w2 = np.linalg.norm(mu1 - mu2)**2 + np.trace(Sigma1 + Sigma2 - 2*(prodSigmas)**(1/2)) retur...
[ "def wasserstein2_gaussian(\n m1: np.ndarray, C1: np.ndarray, m2: np.ndarray, C2: np.ndarray\n) -> float:\n result = np.sum((m1 - m2) ** 2)\n sqrt_C2 = np.ascontiguousarray(mat_sqrt(C2))\n prod_matrix = sqrt_C2 @ C1 @ sqrt_C2\n sqrt_prod_matrix = mat_sqrt(prod_matrix)\n correction_matrix = C1 + C2...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Computes the Hilbert distance of order p
def hilbert_distance(X, Y, p=2): # We consider N_X = N_Y xordered = X[HilbertCode_caller.hilbert_order_(X.T)] yordered = Y[HilbertCode_caller.hilbert_order_(Y.T)] hilbert_dist = (np.abs(xordered - yordered) ** p).sum() hilbert_dist /= X.shape[0] hilbert_dist = hilbert_dist ** (1/p) return hi...
[ "def kl_dirichlet(q, p):\n q = np.asarray(q)\n p = np.asarray(p)\n qsum = q.sum()\n psum = p.sum()\n return (gammaln(qsum) - gammaln(psum)\n - np.sum(gammaln(q) - gammaln(p))\n + np.einsum(\"i,i->\", (q - p), (digamma(q) - digamma(qsum))))", "def HammingDistance(p, q):\r\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Computes the swapping distance
def swap_distance(X, Y, n_sweeps=10000, tol=1e-8, p=2): # We consider N_X = N_Y if p == 2: M = ot.dist(X, Y) # Cost matrix o1 = HilbertCode_caller.hilbert_order_(X.T) o2 = HilbertCode_caller.hilbert_order_(Y.T) permutation = o2[np.argsort(o1)] total_cost = list(map(lambda k: M[k, permut...
[ "def _pairwise_dist(self,s1,s2):\n\n return 0.0", "def squarredDistance(A, B):\r\n return sum((B - A)**2)", "def rebalance(dest, src, dest_center, src_center, m, dimension):\n distances = []\n to_delete = []\n for i,e in enumerate(src):\n heapq.heappush(distances, (distance(dest_center...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Cleans passed string to either return a valid SVGRGBHEXnotation or an empty string.
def cleanup_passed_color_value(s): reo = re.compile('[0-9a-f]') cannotBeCleaned = '' if s[0] == '#' and len(s) in [4,7] and reo.match(s[1:]): return s if s in colorNamesAndCodes: col = colorNamesAndCodes[s] if reo.match(col[1:]): return col else: r...
[ "def normalize_hex_str(value: str) -> str:\n\n try:\n if value[0] == \"#\":\n return value.lower()\n\n if value[0:2] in [\"0x\", \"0X\"]:\n return \"#\" + value[2:].lower()\n\n if all(c in hexdigits for c in value):\n return \"#\" + value.lower()\n except ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Prints debugging information when the script encounters an illegal color.
def print_illegal_color_format_screen( enteredBGColor, enteredFGColor, convertedBGColor, convertedFGColor ): print "" print "Error: are the passed in colors valid?" print " - passed in background-color '" + enteredBGColor + "' was converted to '" + convertedBGColor + "'." print " - passed in foregroun...
[ "def print_failure(msg):\n print RED + msg + END_COLOR", "def is_invalid():\n print(colored('Invalid input\\n', 'red', attrs=['bold']))", "def debug(str_):\n return _color_level(str_, 'debug')", "def color_debug(self):\n return self.LEVEL_COLOR['DEBUG']", "def check_color_scoping(self):\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
If a tfq_simulate op is asked to simulate states given circuits acting on different numbers of qubits, the op should return a tensor padded with zeros up to the size of the largest circuit. The padding should be physically correct, such that samples taken from the padded states still match samples taken from the origin...
def test_simulate_state_output_padding(self, all_n_qubits): circuit_batch = [] for n_qubits in all_n_qubits: qubits = cirq.GridQubit.rect(1, n_qubits) circuit_batch += util.random_circuit_resolver_batch(qubits, 1)[0] tfq_results = tfq_simulate_ops.tfq_simulate_state( ...
[ "def test_sampling_output_padding(self, all_n_qubits, n_samples):\n op = tfq_simulate_ops.tfq_simulate_samples\n circuits = []\n expected_outputs = []\n for n_qubits in all_n_qubits:\n this_expected_output = np.zeros((n_samples, max(all_n_qubits)))\n this_expected_o...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Check that the sampling ops pad outputs correctly
def test_sampling_output_padding(self, all_n_qubits, n_samples): op = tfq_simulate_ops.tfq_simulate_samples circuits = [] expected_outputs = [] for n_qubits in all_n_qubits: this_expected_output = np.zeros((n_samples, max(all_n_qubits))) this_expected_output[:, ma...
[ "def _is_padding_necessary(self, signal: np.array) -> bool:\n if len(signal) < self.number_expected_samples:\n return True\n else:\n return False", "def test_pad():\n x = randtool(\"float\", -10, 10, [3, 2, 1, 2])\n pad = [1, 1, 2, 3]\n mode = \"constant\"\n value =...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns new subLightCurve, choosing ndays with maximum RMS variation
def best_sublc(self, ndays, npoints=600, chunksize=300, flat_order=3, **kwargs): x_full = self.x_full y_full = self.y_full N = len(x_full) cadence = np.median(x_full[1:] - x_full[:-1]) window = int(ndays / cadence) stepsize = window//50 i1 = 0...
[ "def light_curve_peak_match_subtract(light_curve_to_subtract_from_df, light_curve_to_subtract_with_df, estimated_time_of_peak,\n max_seconds_shift=1800,\n plot_path_filename=None, verbose=False, logger=None):\n\n # Prepare the logger for verbo...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Pick an agent at random, step it, bump counts.
def step(self): self.agents[random.randint(self.get_agent_count())].step() self.steps += 1 self.time += 1
[ "def random_test_run():\n env = Reacher()\n agent = Agent(10000, action_size=4, actor_count=20, state_size=33)\n state = env.reset(train_mode=False)\n for step_idx in range(100):\n # noinspection PyUnresolvedReferences\n act_random = np.clip(np.random.randn(20, 4), -1, 1)\n step_res...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return agent's spilling state.
def spilling(self): return self._spilling
[ "def getPacmanState( self ):\n return self.data.agentStates[0].copy()", "def get_agent_state(self):\n return self.world_state", "def gsteady(self, Ppump):\n return(self.steadystate(Ppump)[1])", "def lease_state(self) -> str:\n return pulumi.get(self, \"lease_state\")", "def get_a...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Agent has been selected and a grain of sand is added to it. If the cell exceeds it capacity it add's itself to the model's spill queue. The model handles distributing the spill to adjacent cells.
def step(self): self.grains += 1 if self.grains > self.spill_size: print('spill -> ', self.agent_id) self.model.spill(self)
[ "def spill(self, agent):\n self.spill_list.append(agent)", "def step(self):\n self.age += 1\n self.move_agent()\n self.sugar -= self.metabolism\n\n # Eat sugar\n available_sugar = self.get_sugar(self.pos).amount\n self.sugar += available_sugar\n# self.total_...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Add agent to model's spill queue.
def spill(self, agent): self.spill_list.append(agent)
[ "def add_to_grid_queue(self, agent):\n self.pipes[agent.grid_queue].send(\"add\")\n self.pipes[agent.grid_queue].send(agent)", "def add_to_simulation(self,agent):\n self.agents[agent.name] = agent\n self.network.add_node(agent)\n \n #agent given a grid queue at initializa...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Process spill_list and advance the model one step.
def step(self): for c in self.spill_list: self._schedule.step()
[ "def step(self):\n\n self.grains += 1\n\n if self.grains > self.spill_size:\n print('spill -> ', self.agent_id)\n self.model.spill(self)", "def spill(self, agent):\n self.spill_list.append(agent)", "def spill(self):\n if self.accumulated_dfs:\n self._...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns the cost of a particular sequence of actions. If those actions include an illegal move, return 999999.
def getCostOfActions(self, actions): if actions == None: return 999999 x, y = self.getStartState() cost = 0 for action in actions: # Check figure out the next state and see whether its' legal dx, dy = Actions.directionToVector(action) x, y = int(x + dx...
[ "def getCostOfActions(self, actions):\n if actions == None:\n return 999999\n x, y = self.getStartState()\n cost = 0\n for action in actions:\n # Check figure out the next state and see whether its' legal\n dx, dy = Actions.directionToVector(action)\n x, y = int(x + dx), int(y + dy)\...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Your heuristic for the FoodSearchProblem goes here. This heuristic must be consistent to ensure correctness. First, try to come up with an admissible heuristic; almost all admissible heuristics will be consistent as well. If using A ever finds a solution that is worse uniform cost search finds, your heuristic is not co...
def foodHeuristic(state, problem): position, foodGrid = state "*** YOUR CODE HERE ***" """ Mi heurística consiste en hacer simplemente el máximo de las distancias reales del state a cada nodo con comida He provado diferentes heurísticas y esta es la que me expande menos nodos, aunque no es la más óp...
[ "def foodHeuristic(state, problem):\n position, foodGrid = state\n \"*** YOUR CODE HERE ***\"\n # Función heurística: Suma de los pesos de las aristas del Árbol Recubridor Minimal\n # (Minimum Spanning Tree) formado por los puntos faltantes y la posición actual,\n # tomando como pesos de las aristas ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns a path (a list of actions) to the closest dot, starting from gameState.
def findPathToClosestDot(self, gameState): # Here are some useful elements of the startState startPosition = gameState.getPacmanPosition() food = gameState.getFood() walls = gameState.getWalls() problem = AnyFoodSearchProblem(gameState) "*** YOUR CODE HERE ***" r...
[ "def findPathToClosestDot(self, gameState):\n # Here are some useful elements of the startState\n startPosition = gameState.getPacmanPosition(self.index)\n food = gameState.getFood()\n walls = gameState.getWalls()\n problem = AnyFoodSearchProblem(gameState, self.index)\n\n\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns the maze distance between any two points, using the search functions you have already built. The gameState can be any game state Pacman's position in that state is ignored.
def mazeDistance(point1, point2, gameState): x1, y1 = point1 x2, y2 = point2 walls = gameState.getWalls() assert not walls[x1][y1], 'point1 is a wall: ' + str(point1) assert not walls[x2][y2], 'point2 is a wall: ' + str(point2) prob = PositionSearchProblem(gameState, start=point1, goal=point2, w...
[ "def mazeDistance(point1, point2, gameState):\n print point1, point2\n x1, y1 = point1\n x2, y2 = point2\n x1 = int(x1)\n x2 = int(x2)\n y1 = int(y1)\n y2 = int(y2)\n\n walls = gameState.getWalls()\n assert not walls[x1][y1], 'point1 is a wall: ' + point1\n assert not walls[x2][y2], 'point2 is a wall: ' +...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
(file open for reading) > Twitterverse dictionary Read data_file and return information in the Twitterverse dictionary format.
def process_data (data_file): twitter_file = data_file.readlines() twitter_dict = {} x = 0 while x < (len(twitter_file) - 1): small_dict = {} small_dict['name'] = twitter_file[x + 1].strip('\n') small_dict['location'] = twitter_file[x + 2].strip('\n') ...
[ "def ReadDictionary(self, file):\r\n\r\n fil = dictfile.DictFile(file)\r\n\r\n state = {}\r\n state['vendor'] = ''\r\n\r\n self.defer_parse = []\r\n for line in fil:\r\n state['file'] = fil.File()\r\n state['line'] = fil.Line()\r\n line = line.spli...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
(file open for reading) > query dictionary Read query_file and return information in the query dictionary format.
def process_query(query_file): query_data = query_file.readlines() query_dict = {} x = 1 search_dict = {} search_dict['username'] = query_data[x].strip('\n') x += 1 operation_list = [] while query_data[x] != 'FILTER\n': operation_list.append(query_data[x].strip('...
[ "def process_query(query_file):\n \n #create main query_dictionary where all data from query files will be placed\n query_dict = {}\n query_file.readline()\n \n \n #add SEARCH specification to query_dict\n query_dict['search'] = {}\n operations = []\n \n username = query_file.readli...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
(dict, list, str, int) > dict Return a dict with key filter_type of query_data given the index.
def filter_format(filter_dict, query_data, filter_type, index): filter_list = '' count = 0 while query_data[index] != 'PRESENT\n': if filter_type in query_data[index]: count += 1 filter_keyword = query_data[index].strip(filter_type) fil...
[ "def convert_from_index(self, index: Index) -> Dict[str, Any]:\n return index.dict()", "def generate_query_json(*, filters: Dict = {}, page_index: int, search_term: str) -> Dict:\n return {\n 'page_index': int(page_index),\n 'search_request': {\n 'type': 'AND',\n 'fil...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
(Twitterverse dictionary, str) > list of str Return a list of all users following twitter_name in twitter_dict. >>> twitter_file = open('data.txt', 'r') >>> twitter_dictionary = process_data(twitter_file) >>> all_followers(twitter_dictionary, 'NicoleKidman') ['PerezHilton', 'q', 'p', 'tomCruise'] >>> twitter_file = ope...
def all_followers(twitter_dict, twitter_name): following_list = [] for user in twitter_dict: f_list = twitter_dict[user]['following'] if twitter_name in f_list: following_list.append(user) return following_list
[ "def all_followers (twitter_data, username):\n\n # initialize\n followers = []\n\n for key in twitter_data: # go through every username in twitter_data\n if username in twitter_data [key]['following']: # check each 'following'\n followers.append (key)\n\n followers.sort() # sort the li...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
(Twitterverse dictionary, search specification dictionary) > list of str Return a list of users from twitter_dict that fit the specification declared by search_dict. >>> data_file = open('data.txt', 'r') >>> twitter_dict = process_data(data_file) >>> query_file = open('query3.txt', 'r') >>> query_dict = process_query(q...
def get_search_results(twitter_dict, search_dict): search_list = [search_dict['username']] search_specified_list = [] for user in search_list: search_users_list = [user] for operation in search_dict['operations']: search_users_list = search_helper(search_user...
[ "def get_search_results(twitterverse_dict, spec_dict):\n \n processed_list = []\n spec_list = []\n spec_list.append(spec_dict['username'])\n operations = spec_dict['operations']\n \n i = 0 \n #perform all operation in specification_dict\n while i < len(operations):\n if operations[...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
(list of str, str, twitterverse dictionary) > list of str Return the list of users that result from operation having applied to name_list from the twitter_dict. >>> data_file = open('data.txt', 'r') >>> twitter_dict = process_data(data_file) >>> query_file = open('query3.txt', 'r') >>> query_dict = process_query(query_...
def search_helper(name_list, operation, twitter_dict): return_list = [] for name in name_list: if operation == 'following': search_specified_list = twitter_dict[name]['following'] for following_names in search_specified_list: ...
[ "def get_search_results(twitter_dict, search_dict):\r\n\r\n search_list = [search_dict['username']] \r\n search_specified_list = []\r\n\r\n for user in search_list:\r\n search_users_list = [user]\r\n \r\n for operation in search_dict['operations']:\r\n search_users_list = se...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
(Twitterverse dictionary, list of str, filter specification dictionary) > list of str >>> data_file = open('data.txt', 'r') >>> twitter_dict = process_data(data_file) >>> query_file = open('query2.txt', 'r') >>> query_dict = process_query(query_file) >>> username_list = get_search_results(twitter_dict, search_dict) >>>...
def get_filter_results(twitter_dict, username_list, filter_dict): twitter_handles = username_list name_filtered_list = [] upper_user = [] if 'name_includes' in filter_dict: for user in twitter_handles: user = user.upper() upper_user.append(user) n...
[ "def get_filter_results (twitter_data, search_list, filter_data):\n\n #initialize\n filter_list = []\n\n for operation in filter_data:\n if operation == 'name-includes':\n for username in search_list:\n # since case doesnt matter, eveything is made uppercase and\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
(Twitterverse dictionary, list of str, presentation specification dictionary) > str Return final_list of users from twitter_dict in the order and format as indicated by present_dict. >>> data_file = open('data.txt', 'r') >>> twitter_dict = process_data(data_file) >>> query_file = open('query2.txt', 'r') >>> query_dict ...
def get_present_string(twitter_dict, final_list, present_dict): if present_dict['sort-by'] == 'username': tweet_sort(twitter_dict, final_list, username_first) if present_dict['sort-by'] == 'name': tweet_sort(twitter_dict, final_list, name_first) if present_di...
[ "def get_present_string (twitter_data, filter_list, present_data):\n\n #initialize\n present_string = ''\n present_list = filter_list\n\n if present_data ['sort-by'] == 'username':\n tweet_sort (twitter_data, present_list, username_first)\n\n elif present_data ['sort-by'] == 'name':\n t...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
(Twitterverse dictionary, list of str, function) > NoneType Sort the results list using the comparison function cmp and the data in twitter_data. >>> twitter_data = {\
def tweet_sort(twitter_data, results, cmp): # Insertion sort for i in range(1, len(results)): current = results[i] position = i while position > 0 and cmp(twitter_data, results[position - 1], current) > 0: results[position] = results[position - 1] pos...
[ "def tweet_sort(twitter_data, results, cmp):\n \n # Insertion sort\n for i in range(1, len(results)):\n current = results[i]\n position = i\n while position > 0 and cmp(twitter_data, results[position - 1], current) > 0:\n results[position] = results[position - 1]\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
(Twitterverse dictionary, str, str) > int Return 1 if user a has more followers than user b, 1 if fewer followers, and the result of sorting by username if they have the same, based on the data in twitter_data. >>> twitter_data = {\
def more_popular(twitter_data, a, b): a_popularity = len(all_followers(twitter_data, a)) b_popularity = len(all_followers(twitter_data, b)) if a_popularity > b_popularity: return -1 if a_popularity < b_popularity: return 1 return username_first(twitter_data, a, b)
[ "def more_popular(twitter_data, a, b):\n \n a_popularity = len(all_followers(twitter_data, a)) \n b_popularity = len(all_followers(twitter_data, b))\n if a_popularity > b_popularity:\n return -1\n if a_popularity < b_popularity:\n return 1\n return username_first(twitter_data, a, b)"...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
(Twitterverse dictionary, str, str) > int Return 1 if user a has a username that comes after user b's username alphabetically, 1 if user a's username comes before user b's username, and 0 if a tie, based on the data in twitter_data. >>> twitter_data = {\
def username_first(twitter_data, a, b): if a < b: return -1 if a > b: return 1 return 0
[ "def username_first(twitter_data, a, b):\n \n if a < b:\n return -1\n if a > b:\n return 1\n return 0", "def username_first(twitter_data, a, b):\n\n if a < b:\n return -1\n if a > b:\n return 1\n return 0", "def name_first(twitter_data, a, b):\n \n a_name =...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
(Twitterverse dictionary, str, str) > int Return 1 if user a's name comes after user b's name alphabetically, 1 if user a's name comes before user b's name, and the ordering of their usernames if there is a tie, based on the data in twitter_data. >>> twitter_data = {\
def name_first(twitter_data, a, b): a_name = twitter_data[a]["name"] b_name = twitter_data[b]["name"] if a_name < b_name: return -1 if a_name > b_name: return 1 return username_first(twitter_data, a, b)
[ "def name_first(twitter_data, a, b):\n \n a_name = twitter_data[a][\"name\"]\n b_name = twitter_data[b][\"name\"]\n if a_name < b_name:\n return -1\n if a_name > b_name:\n return 1\n return username_first(twitter_data, a, b)", "def name_first(twitter_data, a, b):\n\n a_name = tw...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get the first letter in the message from user if it is not an alpha
def _get_first_letter_in_text(text: str) -> str: for letter in text: if letter.isalpha(): return letter return text[0]
[ "def first_letter_ft(string):\n return string[:1]", "def first_letter(string):\n if not len(string): return '' # noqa: E701\n match = re.search(r'\\w', string)\n return match.group() if match else ''", "def first_letter_filter(self, letter, phrase):\n result, word = \"\", \"\"\n for lette...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Convert a Composer checkpoint to a pretrained HF checkpoint folder. Write a ``config.json`` and ``pytorch_model.bin``, like
def write_huggingface_pretrained_from_composer_checkpoint( checkpoint_path: Union[Path, str], output_path: Union[Path, str], output_precision: str = 'fp32', local_checkpoint_save_location: Optional[Union[Path, str]] = None ) -> Tuple[PretrainedConfig, Optional[PreTrainedTokenizerBase]]: dtype = { ...
[ "def main():\n parser = argparse.ArgumentParser(description=\"Convert a checkpoint file into a support sets and a reconstructor \"\n \"weights files\")\n parser.add_argument('--exp', type=str, required=True, help=\"set experiment's model dir (created by `train.p...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Generate a complex layout report with simple elements
def gen_report_complex_no_files() -> dp.Report: select = dp.Select(blocks=[md_block, md_block], type=dp.SelectType.TABS) group = dp.Group(md_block, md_block, columns=2) return dp.Report( dp.Page( blocks=[ dp.Group(md_block, md_block, columns=2), dp.Select...
[ "def _generate_layout(self):\n\n pass", "def report(self,html):\n html.add(\"<div class='sample'>\")\n html.add(\"<a name='%s'><h2>%s</h2></a>\" % (self.name,self.name))\n html.add(\"<table><tr>\")\n # Boxplots\n html.add(\"<td>\")\n self.report_boxplots(html,paire...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test TextReport API and id/naming handling
def test_textreport_gen(): s_df = gen_df() # Simple report = dp.TextReport("Text-3") assert_text_report(report, 1) # multiple blocks report = dp.TextReport("Text-1", "Text-2", s_df) assert_text_report(report, 3) # empty - raise error with pytest.raises(DPError): report = d...
[ "def test_search_report_detail(self):\n pass", "def test_search_report(self):\n pass", "def test_test_report(self):\n self.__opener.contents = '''<Report><Doc><Summary failed=\"1\" passed=\"2\"/></Doc></Report>'''\n self.assertEqual(1, self.__uft.failed_tests('url'))\n self.as...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Set required and widgets for fields.
def __init__(self, *args, **kwargs): super(SignupForm, self).__init__(*args, **kwargs) self.fields['email'].required = True self.fields['first_name'].required = True self.fields['password'].widget = forms.PasswordInput() for field in self.fields: self.fields[...
[ "def set_required(self, field, required=True):\n self.fields[field].required = required", "def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.fields['first_name'].required = False\n self.fields['last_name'].required = False\n self.fields['institution...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Create the sh script for starting unblur
def create_sh_script( unblur_path, input_image, output_dir, input_dir, input_suffix, options ): strSh = '' # To make sure it is a bash script strSh += '#!/bin/bash\n\n' # Export number of threads strSh += 'export OMP_NUM_THREADS={:d}\n'.forma...
[ "def cli_sky_images():\n from joint_crab.sky_image import main\n\n main()", "def launchgui(image):\n from filter import launch\n launch(image)", "def main():\n superrocket = SuperRocket.from_cmd_args()\n superrocket.run()", "def generate_linux_script():\n with open(\"start_pcmonitor.s...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Creates a dictionary that maps domains to encoded ids.
def _get_domain_mappings(domain_to_intents: Dict) -> Dict: domain2id = {} domains = list(domain_to_intents) for index, domain in enumerate(domains): domain2id[domain] = index return domain2id
[ "def _get_intent_mappings(domain_to_intents: Dict) -> Dict:\n domain_to_intent2id = {}\n for domain in domain_to_intents:\n intent_labels = {}\n for index, intent in enumerate(domain_to_intents[domain]):\n intent_labels[intent] = index\n domain_to_intent...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Creates a dictionary that maps intents to encoded ids.
def _get_intent_mappings(domain_to_intents: Dict) -> Dict: domain_to_intent2id = {} for domain in domain_to_intents: intent_labels = {} for index, intent in enumerate(domain_to_intents[domain]): intent_labels[intent] = index domain_to_intent2id[domain]...
[ "def _create_intent_token_dict(intents, intent_split_symbol):\r\n\r\n distinct_tokens = set([token\r\n for intent in intents\r\n for token in intent.split(\r\n intent_split_symbol)])\r\n return {token: i...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Creates a class label for a set of queries. These labels are used to split queries by type. Labels follow the format of "domain" or "domain|intent". For example, "date|get_date".
def get_class_labels( tuning_level: list, query_list: ProcessedQueryList ) -> List[str]: if TuneLevel.INTENT.value in tuning_level: return [ f"{d}.{i}" for d, i in zip(query_list.domains(), query_list.intents()) ] else: return [f"{d}" for d...
[ "def _MakeQuery(self, query_type: str) -> str:\n return (\n 'resource.type=\"{query_type:s}\"\\n'\n 'resource.labels.project_id=\"{project_id:s}\"\\n'\n 'resource.labels.cluster_name=\"{cluster_id:s}\"\\n'\n 'resource.labels.location=\"{zone:s}\"\\n'.format(\n query_type=qu...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Removes duplicates in the text queries.
def deduplicate_raw_text_queries(log_queries_iter) -> List[str]: return list(set(q for q in log_queries_iter))
[ "def _remove_duplicates(self):\n self.search_query = remove_duplicates(self.search_query)", "def drop_duplicates(self):\n print('Dropping duplicates...')\n\n self.__data = self.__data.drop_duplicates(subset=['text'])", "def prune_text(self):\n\n c = self._conn.cursor()\n command = 'se...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Converts text queries to processed queries using an annotator.
def convert_text_queries_to_processed( self, text_queries: List[str] ) -> List[ProcessedQuery]: logger.info("Loading a Bootstrap Annotator to process log queries.") annotator_params = DEFAULT_AUTO_ANNOTATOR_CONFIG annotator_params["app_path"] = self.app_path bootstrap_annotat...
[ "def analyze_query(self, queries, text):\n # TODO: check this works fine\n query_num = ''\n title = ''\n accumulate_desc = False\n accumulate_narr = False\n desc = ''\n narrative = ''\n for line in text:\n line = line.replace('\\n', '')\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Method to get multiple queries from the QueryCache given a list of query ids.
def get_queries(self, query_ids): return [ self.resource_loader.query_cache.get(query_id) for query_id in query_ids ]
[ "def query_many(self, queries):\n assert isinstance(queries, list)\n cursor = self._cursor()\n results = []\n for query in queries:\n try:\n cursor.execute(query)\n result = cursor.fetchall()\n except Exception as e:\n pr...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Update the current set of sampled queries by adding the set of newly sampled queries. A new PrcoessedQueryList object is created with the updated set of query ids.
def update_sampled_queries(self, newly_sampled_queries_ids): sampled_queries_ids = self.sampled_queries.elements + newly_sampled_queries_ids self.sampled_queries = ProcessedQueryList( cache=self.resource_loader.query_cache, elements=sampled_queries_ids )
[ "def update_unsampled_queries(self, remaining_indices):\n remaining_queries_ids = [\n self.unsampled_queries.elements[i] for i in remaining_indices\n ]\n self.unsampled_queries = ProcessedQueryList(\n cache=self.resource_loader.query_cache, elements=remaining_queries_ids\n...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Update the current set of unsampled queries by removing the set of newly sampled queries. A new PrcoessedQueryList object is created with the updated set of query ids.
def update_unsampled_queries(self, remaining_indices): remaining_queries_ids = [ self.unsampled_queries.elements[i] for i in remaining_indices ] self.unsampled_queries = ProcessedQueryList( cache=self.resource_loader.query_cache, elements=remaining_queries_ids )
[ "def update_sampled_queries(self, newly_sampled_queries_ids):\n sampled_queries_ids = self.sampled_queries.elements + newly_sampled_queries_ids\n self.sampled_queries = ProcessedQueryList(\n cache=self.resource_loader.query_cache, elements=sampled_queries_ids\n )", "def del_query(s...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Establish http routes for the given list of routes containing tuples of the form (route, handler object)
def make_routes(routelist): return webapp2.WSGIApplication(routelist, debug=True)
[ "def add_routes(self, routes) -> None:\n for route_def in routes:\n route_def.register(self)", "def create_routes(self):\n if self.component:\n route = self.uri_base\n self._app.route(route, methods=['GET', 'POST'], endpoint='api_%s' % (self.component,))(self.process...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Replaces all of the ultisnips variables with the corresponding vscode
def _replace_variables(self, string): conversions = {"VISUAL": "TM_SELECTED_TEXT"} for old, new in conversions.items(): string = string.replace(old, new) return string
[ "def updateVariables(self) -> None:\n ...", "def fix_variables(m, variables):\r\n\r\n for var_name, values in variables.items():\r\n for var_index, var_value in values.items():\r\n m.__getattribute__(var_name)[var_index].fix(var_value)\r\n\r\n return m", "def repla...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Parses out the snippets into JSON form with the following schema {
def parse_snippet(self, ultisnip_file: Path) -> dict: snippets_dictionary = {} with open(ultisnip_file, "r") as f: for line in f: if line.startswith("snippet"): snippet = {} prefix = line.split()[1].strip() snippet["...
[ "def snippet_list( request, format=None ):\n if request.method == 'GET':\n snippets = Snippet.objects.all()\n serializer = SnippetSerializer( snippets )\n return Response( serializer.data )\n \n elif request.method == 'POST':\n serializer = SnippetSerializer( data=request.DATA )...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
This function gets the trial sets for each leaf node in this graph.
def get_trial_sets(graph, leaves, diff = 2): trialsets = {} for leaf in leaves: parents = get_parent_path(graph, leaf) psizes = [len(graph.node[p]['leaves']) for p in parents] root = parents[-1] l1id = 1 while l1id < len(parents) -1 and psizes[l1id] < 5: l1id...
[ "def get_leaf_set(self):\n return self.leaf_set", "def getSets():", "def get_leafs(self):\n return list(self.iter_leafs())", "def get_known_trees(self):\n a = set(self.tree_metric_state.keys())\n b = set(self.tree_interest_state.keys())\n return a.union(b)", "def get_leafs...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
generate a trial from the given trialset and image maps
def generate_trial(trialset, synset2img, trialtype, num_imgs): # randomly shuffle the sets. for s in trialset: random.shuffle(s) source = trialset[trialtype] # sample images # make sure we have the most specific guy src_imgs = [random.choice(synset2img[trialset[0][0]])] for i in rang...
[ "def create_sets(\n path: tuple,\n maps_ath: str,\n gt_maps_path: str,\n ds_index: int = 0,\n skip_black: bool = True,\n skip_water: bool = True,\n skip_no_class: bool = True,\n):\n maps = get_maps(maps_ath, MAPS_EXT)\n gt_maps = get_maps(gt_maps_path, GT_MAPS_...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Calculates the fuzzy match of needle in haystack, using a modified version of the Levenshtein distance algorithm. The function is modified from the levenshtein function in the bktree module by Adam Hupp
def __fuzzy_substring(needle, haystack): m, n = len(needle), len(haystack) # base cases if m == 1: # return not needle in haystack row = [len(haystack)] * len(haystack) row[haystack.find(needle)] = 0 return row if not n: return m row1 = [0] * (n + 1) for...
[ "def fuzzy_substring(needle, haystack):\n\n m, n = len(needle), len(haystack)\n\n # base cases\n if m == 1:\n return needle not in haystack\n if not n:\n return m\n\n row1 = [0] * (n + 1)\n for i in range(0, m):\n row2 = [i + 1]\n for j in range(0, n):\n cost...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Sets the check_revocation of this TypesConsoleCertificateSettings.
def check_revocation(self, check_revocation): self._check_revocation = check_revocation
[ "def set_check_dates(self, docheck):\n\n self.__enable_check_rev_dates = docheck", "def certificate_check(self, certificate_check):\n self._certificate_check = certificate_check", "def verify_cert(self, verify_cert):\n\n self._verify_cert = verify_cert", "def revoke(self):\n self.r...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Sets the console_ca_cert of this TypesConsoleCertificateSettings.
def console_ca_cert(self, console_ca_cert): self._console_ca_cert = console_ca_cert
[ "def ca_cert(self, ca_cert):\n\n self._ca_cert = ca_cert", "def set_ca(self, ca):\r\n self.set_ca_var(ca)\r\n ca = self.__pack_value(VpnTag.TAG_CA, CA)\r\n self.vpn_line_update_re(self._re_dic[VpnTag.TAG_CA], ca)", "def set_ca_var(self, ca):\r\n self._val_dic[VpnTag.TAG_CA] = ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Sets the console_custom_cert of this TypesConsoleCertificateSettings.
def console_custom_cert(self, console_custom_cert): self._console_custom_cert = console_custom_cert
[ "def console_ca_cert(self, console_ca_cert):\n\n self._console_ca_cert = console_ca_cert", "def custom_options(self, custom_options):\n\n self._custom_options = custom_options", "def set_custom_property(self, sNewVmCustomProperty):\n\t\tcall_sdk_function('PrlVmCfg_SetCustomProperty', self.handle, ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Sets the hpkp of this TypesConsoleCertificateSettings.
def hpkp(self, hpkp): self._hpkp = hpkp
[ "def pssh(self, pssh):\n self._pssh = pssh\n return self", "def setPip(key, value, prefClass=Prefs):\n prefClass.settings.setValue(\"Pip/\" + key, value)", "def hdp_version(self, hdp_version):\n\n self._hdp_version = hdp_version", "def set_kp():\n kp = request.params.get(\"kp\", 0, ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Generates a unique id which will be used by paynow to refer to the payment initiated
def generate_transaction_id(): return str(int(time.time() * 1000))
[ "def gen_id() -> str:\n # id is set according to the current unix time\n return f'cli-reminder-{time.time()}'", "def generate_id():\n return str(hex(int(time.time() * 10 ** 7)))[5:]", "def generate_wallet_id(cls) -> str:\n return str(uuid.uuid4())", "def _get_id():\n return str(uuid.uuid4())"...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Reflect the elements of a numpy array along a specified axis about the first element.
def reflect(arr,axis=0,sign=1): refl_idx = axis * [slice(None)] + [slice(None,0,-1), Ellipsis] return np.concatenate((arr[tuple(refl_idx)],arr), axis=axis)
[ "def reflect(arr, axis=0, sign=1):\n refl_idx = axis * [slice(None)] + [slice(None, 0, -1), Ellipsis]\n return np.concatenate((arr[tuple(refl_idx)], arr), axis=axis)", "def reflect_array(x, axis=1, kind='even'):\n if axis == 0:\n x_sym = np.flipud(x)\n elif axis == 1:\n x_sym = np.fliplr...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns a list of flows with randomly selected sources and destinations that will saturate the network (i.e. a flow will be admitted provided that it would not cause the utilization of any link in the network to exceed 1. Flows are equally split across the K shortest paths connecting the source node to the destination ...
def compute_path_hopping_flow_allocations(target_graph, K=3): flow_allocation_seed_number = 0xCAFE_BABE np.random.seed(flow_allocation_seed_number) # id_to_dpid = topo_mapper.get_and_validate_onos_topo_x(target_graph) link_utilization = {(u, v): 0.0 for u, v in target_graph.edges} node_c...
[ "def k_shortest_paths(self,graph, src, dst, weight='weight', k=5):\n generator = nx.shortest_simple_paths(graph, source=src, target=dst, weight=weight)\n shortest_paths = []\n try:\n for path in generator:\n if k <= 0:\n break\n shorte...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns a list of flows with randomly selected sources and destinations that will saturate the network (i.e. a flow will be addmitted provided that it will not cause the utilization of any link in the network to exceed 1. Flows are split across the K least utilized paths connecting the source node to the destination no...
def compute_greedy_flow_allocations( target_graph , flow_selection_fn , seed_number=DEFAULT_SEED_NUMBER): flow_allocation_seed_number = seed_number np.random.seed(flow_allocation_seed_number) link_utilization = {tuple(sorted(link_tup...
[ "def compute_path_hopping_flow_allocations(target_graph, K=3):\n flow_allocation_seed_number = 0xCAFE_BABE\n np.random.seed(flow_allocation_seed_number)\n # id_to_dpid = topo_mapper.get_and_validate_onos_topo_x(target_graph)\n link_utilization = {(u, v): 0.0 for u, v in target_graph.edges}\n...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
RETURNS A set of outgoing links and corresponding splitting ratios for flow f at node s
def get_paths_for_flow(F, s, f): links = [((u, v), split_ratio) for (flow_id, u, v), split_ratio in F.items() if flow_id == f and u == s and split_ratio > 0.001] return links
[ "def _calc_spanning_tree():\n\n def flip(link):\n return Discovery.Link(link.dpid2, link.port2, link.dpid1, link.port1, link.link_type,link.available)\n\n adj = defaultdict(lambda: defaultdict(lambda: []))\n switches = set()\n # Add all links and switches\n for l in generator_for_link('lldp'):\n adj[l.dp...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
RETURNS A set of outgoing links and corresponding splitting ratios for flow f at node s
def get_paths_for_flow(F, s, f): links = [((u, v), split_ratio) for (flow_id, u, v), split_ratio in F.items() if flow_id == f and u == s and split_ratio > 0.001] return links
[ "def _calc_spanning_tree():\n\n def flip(link):\n return Discovery.Link(link.dpid2, link.port2, link.dpid1, link.port1, link.link_type,link.available)\n\n adj = defaultdict(lambda: defaultdict(lambda: []))\n switches = set()\n # Add all links and switches\n for l in generator_for_link('lldp'):\n adj[l.dp...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
To check if a route is feasible using given vehicle type, and return check result and route cost.
def check_violation(route, vehicle_type): if len(route) == 2: # [0, 0] route return True, 0, 0, 0 else: accu_res = [0, 0, 0] # 0-leaving time, 1-accumulated distance, 2-volume if vehicle_type == 2: veh_cap = small_veh elif vehicle_type == 3: veh_...
[ "def route_type(route):\r\n typ = 2\r\n vol_accu = 0 # accumulated volume\r\n\r\n if len(route) <= 2:\r\n return typ\r\n else:\r\n for i in range(1, len(route) - 1):\r\n cust0 = route[i]\r\n vol_accu += (num_demd[cust0][0] * bskt_vol + num_demd[cust0][1] * trsf_vol +...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Given a route, return the vehicle type of the route. Samll vehicle first, medium second, large last.
def route_type(route): typ = 2 vol_accu = 0 # accumulated volume if len(route) <= 2: return typ else: for i in range(1, len(route) - 1): cust0 = route[i] vol_accu += (num_demd[cust0][0] * bskt_vol + num_demd[cust0][1] * trsf_vol + (num_demd[cust0][2] + ...
[ "def vehicle_type():\n pass", "def analysis_pt_route_type(self, hierarchy): \n route_type_dict = self.links['route_type'].to_dict()\n\n def higher_route_type(route_types):\n for mode in hierarchy:\n if mode in route_types:\n return mode\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Try to move 1 customer to anywhere it can be put, and see if the move can cut the total cost.
def shift_1_cust(self, sol_in1, cust, c_loc, curr_temp, sol_type1, sa_lns): route_ing = copy.deepcopy(sol_in1[c_loc[0]]) route_new = route_ing move_to_route = c_loc[0] orgn_type1 = sol_type1[c_loc[0]] origin_cost1 = check_violation(route_ing, orgn_type1)[1] route_...
[ "def shift_3_cust(self, sol_in6, cust, c_loc, curr_temp, sol_type6, sa_lns):\r\n\r\n route_ing = copy.deepcopy(sol_in6[c_loc[0]])\r\n route_new = route_ing\r\n move_to_route = c_loc[0]\r\n orgn_type1 = sol_type6[c_loc[0]]\r\n cust_folw1 = route_ing[c_loc[1] + 1]\r\n cust_fo...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Try to move 2 consecutive customers to anywhere they can be put, see if they move can cut the total cost.
def shift_2_cust(self, sol_in2, cust, c_loc, curr_temp, sol_type2, sa_lns): route_ing = copy.deepcopy(sol_in2[c_loc[0]]) route_new = route_ing move_to_route = c_loc[0] orgn_type1 = sol_type2[c_loc[0]] cust_folw = route_ing[c_loc[1]+1] origin_cost1 = check_violatio...
[ "def shift_1_cust(self, sol_in1, cust, c_loc, curr_temp, sol_type1, sa_lns):\r\n\r\n route_ing = copy.deepcopy(sol_in1[c_loc[0]])\r\n route_new = route_ing\r\n move_to_route = c_loc[0]\r\n orgn_type1 = sol_type1[c_loc[0]]\r\n origin_cost1 = check_violation(route_ing, orgn_type1)[1...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Try to move 3 consecutive customers to anywhere they can be put, see if they move can cut the total cost.
def shift_3_cust(self, sol_in6, cust, c_loc, curr_temp, sol_type6, sa_lns): route_ing = copy.deepcopy(sol_in6[c_loc[0]]) route_new = route_ing move_to_route = c_loc[0] orgn_type1 = sol_type6[c_loc[0]] cust_folw1 = route_ing[c_loc[1] + 1] cust_folw2 = route_ing[c_l...
[ "def exchange_1_cust(self, sol_in3, cust, c_loc, curr_temp, sol_type3, sa_lns):\r\n\r\n route_ing = copy.deepcopy(sol_in3[c_loc[0]])\r\n\r\n route_new_1 = route_ing\r\n route_new_2 = route_ing\r\n exch_to_route = c_loc[0]\r\n orgn_type1 = sol_type3[exch_to_route]\r\n origin...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Exchange the position of two customers(same route or not) if feasible, and see if it can cut the total cost.
def exchange_1_cust(self, sol_in3, cust, c_loc, curr_temp, sol_type3, sa_lns): route_ing = copy.deepcopy(sol_in3[c_loc[0]]) route_new_1 = route_ing route_new_2 = route_ing exch_to_route = c_loc[0] orgn_type1 = sol_type3[exch_to_route] origin_cost1 = check_violat...
[ "def exchange_2_cust(self, sol_in4, cust, c_loc, curr_temp, sol_type4, sa_lns):\r\n\r\n route_ing = copy.deepcopy(sol_in4[c_loc[0]])\r\n route_new_1 = route_ing\r\n route_new_2 = route_ing\r\n cust_folw = route_ing[c_loc[1] + 1]\r\n exch_to_route = c_loc[0]\r\n origin_cost1...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Exchange 2 consecutive customers' position with another 2 customers' position, and see if it can cut cost.
def exchange_2_cust(self, sol_in4, cust, c_loc, curr_temp, sol_type4, sa_lns): route_ing = copy.deepcopy(sol_in4[c_loc[0]]) route_new_1 = route_ing route_new_2 = route_ing cust_folw = route_ing[c_loc[1] + 1] exch_to_route = c_loc[0] origin_cost1 = check_violation(...
[ "def shift_2_cust(self, sol_in2, cust, c_loc, curr_temp, sol_type2, sa_lns):\r\n\r\n route_ing = copy.deepcopy(sol_in2[c_loc[0]])\r\n route_new = route_ing\r\n move_to_route = c_loc[0]\r\n orgn_type1 = sol_type2[c_loc[0]]\r\n cust_folw = route_ing[c_loc[1]+1]\r\n origin_cos...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
creation du fichier xml de destination ecriture de la phrase d'entete xml fermeture fichier
def creer_fichier(nom_file): fichier = open(nom_file, 'w') fichier.write("<?xml version='1.0' encoding='UTF-8' standalone='yes'?>\n") fichier.close()
[ "def genXML(self):\n XML = \"<cortes>\\n\"\n for i in self.u:\n if(i[0]==1):\n XML+=\" <corte>\\n <cuadros>\"+str(i[1])+\"->\"+str(i[1]+1)+\"</cuadros>\\n </corte>\\n\"\n XML += \"</cortes>\"\n f = open(self.xml,\"w\")\n f.write(XML)\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Solves the given system of |equations|. |equations| should be a list of lists of terms summing to 0. Each term should be a tuple of the form (coeff, var), where coeff is a number and var is a variable (string). Constants can be represented by (const, None). Returns a dictionary mapping the variables in the equations to...
def solve_equations(equations): # variables in the system of equations var_list = list(reduce(set.union, (set(var for coeff, var in eqn if var) for eqn in equations))) # number of variables num_vars = len(var_list) # the index of each variable in |var_list| var_index = dict(zip(var_list, range(num_var...
[ "def solve_equations(eqs):\n consistent, eqs = reduce_equations(eqs)\n if not consistent:\n return {} # or None, or what?\n return {var: -le.constant\n for le in eqs\n for var in le.defines_var()}", "def solve(equations, variables, eq_matrix, ordinate, symbolic=...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Gives default department by checking if present in the context
def get_default_department_id(self, cr, uid, context=None): user_obj = self.pool.get('res.users').browse(cr,uid,uid).employee_ids if user_obj: dept_id = self.pool.get('res.users').browse(cr,uid,uid).employee_ids[0].department_id and self.pool.get('res.users').browse(cr,uid,uid).employee_id...
[ "def getDefaultDept():\n try:\n dept = department.objects.get(DeptID=1)\n except:\n try:\n dept = department(DeptID=1, DeptName='Default Dept', parent=0)\n dept.save()\n except:\n dept = department.objects.all()[0]\n\n return dept", "def department(se...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Parsed content of Config file into Dictionary [client] > becomes primarykey with values as what follows host="host" user="un" > becomes secondary keyvalues password="pw" > becomse secondary keyvalues port="port"
def load_config_file(cfgFile): with open(cfgFile) as f: content = f.readlines() cfg = {} primary_key = 0 # has primary key been discovered (the string enclosed in bracket in config file) for line in content: if primary_key: if '=' in line: kv = line.split('=')...
[ "def _create_dict_from_file(self, **kwargs):\r\n\r\n if not self.linux_handle.download(local_file='ipsec.conf', remote_file=self.conf_path, protocol='scp'):\r\n self.linux_handle.log(\"Downloading ipsec.conf file failed\")\r\n raise Exception(\"Downloading ipsec.conf file failed \")\r\n...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Loads sql statments in sql_fn and replaces variables based on supplied var_replace dictionary SQL statements are returned as string
def read_sql(sql_fn,var_replace): with open(sql_fn,'r') as sql: sql_stmts = sql.read() for key in var_replace: sql_stmts = sql_stmts.replace(key,var_replace[key]) return sql_stmts
[ "def replace_params(self):\n raw_sql = self.raw_sql\n for placeholder in self.to_replace:\n newreg = re.compile(placeholder)\n repl = self.get_replacement_value(placeholder)\n if repl:\n raw_sql = newreg.sub(str(repl), raw_sql)\n self.sql = raw_sq...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Tells if the target is newer than the source. Return true if 'source' exists and is more recently modified than 'target', or if 'source' exists and 'target' doesn't. Return false if both exist and 'target' is the same age or younger than 'source'. Raise DistutilsFileError if 'source' does not exist.
def newer(source, target): if not os.path.exists(source): raise DistutilsFileError("file '%s' does not exist" % os.path.abspath(source)) if not os.path.exists(target): return True return os.stat(source).st_mtime > os.stat(target).st_mtime
[ "def newer (source, target):\r\n\r\n if not os.path.exists (target):\r\n return 1\r\n\r\n from stat import ST_MTIME\r\n mtime1 = os.stat(source)[ST_MTIME]\r\n mtime2 = os.stat(target)[ST_MTIME]\r\n\r\n return mtime1 > mtime2", "def newer(src, target):\r\n return (not os.path.isfile(target...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Sets the platform string identifier returned by get_platform(). Note that this change doesn't impact the value returned by sysconfig.get_platform() and is local to Distutils
def set_platform(identifier): global _PLATFORM _PLATFORM = identifier
[ "def _set_platform(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=six.text_type, is_leaf=True, yang_name=\"platform\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return 'pathname' as a name that will work on the native filesystem. i.e. split it on '/' and put it back together again using the current directory separator. Needed because filenames in the setup script are always supplied in Unix style, and have to be converted to the local convention before we can actually use them...
def convert_path(pathname): if os.sep == '/': return pathname if not pathname: return pathname if pathname[0] == '/': raise ValueError("path '%s' cannot be absolute" % pathname) if pathname[-1] == '/': raise ValueError("path '%s' cannot end with '/'" % pathname) path...
[ "def normpath(pathname):\n if pathname == \"\":\n return pathname\n\n end = \"/\" if pathname.endswith(\"/\") else \"\"\n\n from os.path import exists\n pathname = str(pathname)\n\n # Try to expand a Windows drive letter to a UNC name.\n # E.g. \"J:/anfinrud_1106\" to \"//mx340hs/data/anfin...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return 'pathname' with 'new_root' prepended. If 'pathname' is relative, this is equivalent to "os.path.join(new_root,pathname)". Otherwise, it requires making 'pathname' relative and then joining the two, which is tricky on DOS/Windows and Mac OS.
def change_root(new_root, pathname): if os.name == 'posix': if not os.path.isabs(pathname): return os.path.join(new_root, pathname) else: return os.path.join(new_root, pathname[1:]) elif os.name == 'nt': (drive, path) = os.path.splitdrive(pathname) if pat...
[ "def change_root(new_root: str, pathname: str) -> str:\n if os.name == \"posix\":\n if not os.path.isabs(pathname):\n return os.path.join(new_root, pathname)\n else:\n return os.path.join(new_root, pathname[1:])\n\n elif os.name == \"nt\":\n (drive, path) = os.path.s...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Ensure that 'os.environ' has all the environment variables needed. We guarantee that users can use in config files, commandline options,
def check_environ(): global _environ_checked if _environ_checked: return if os.name == 'posix' and 'HOME' not in os.environ: import pwd os.environ['HOME'] = pwd.getpwuid(os.getuid())[5] if 'PLAT' not in os.environ: os.environ['PLAT'] = _sysconfig.get_platform() _en...
[ "def _verify_env() -> None:\n for var in ['dyn-password', 'log_dir']:\n if environ.get(var) is None:\n raise AttributeError(f'Environmental variable `{var}` not set')", "def verify_environment():\n reqs = ['NAME', 'RECIPIENT', 'SUBJECT', 'MESSAGE',\n 'MAILGUN_API_KEY...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Perform shell/Perlstyle variable substitution on 'string'. Every occurrence of '$' followed by a name is considered a variable, and variable is substituted by the value found in the 'local_vars' dictionary, or in 'os.environ' if it's not in 'local_vars'. 'os.environ' is first checked/augmented to guarantee that it cont...
def subst_vars(s, local_vars): check_environ() def _subst(match, local_vars=local_vars): var_name = match.group(1) if var_name in local_vars: return str(local_vars[var_name]) else: return os.environ[var_name] try: return re.sub(r'\$([a-zA-Z_][a-zA-Z_...
[ "def expand_vars(string, env_vars=None):\n if env_vars is None:\n env_vars = os.environ\n # create a replacement callback function that uses env_vars as it's first\n # argument, additional arguments will be added after it\n repl_callback = functools.partial(_var_repl, env_vars)\n return re.sub...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Generate a useful error message from an EnvironmentError. This will generate an IOError or an OSError exception object. Handles Python 1.5.1 and 1.5.2 styles, and does what it can to deal with exception objects that don't have a filename (which happens when the error is due to a twofile operation, such as 'rename()' or...
def grok_environment_error(exc, prefix="error: "): # check for Python 1.5.2-style {IO,OS}Error exception objects if hasattr(exc, 'filename') and hasattr(exc, 'strerror'): if exc.filename: error = prefix + "%s: %s" % (exc.filename, exc.strerror) else: # two-argument functi...
[ "def _ecl_err_msg(self, e):\n errno, errmsg, errtask = self._ecl_exception_properties(e)\n if errno and errmsg and errtask:\n text = (f\"Error ({errno:d}): on line {self._ecl_get_lineno():d} \"\n f\"of '{self._name}' from '{errtask}':\\n\\t'{errmsg}'\")\n else:\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Convert a string representation of truth to true (1) or false (0). True values are 'y', 'yes', 't', 'true', 'on', and '1'; false values are 'n', 'no', 'f', 'false', 'off', and '0'. Raises ValueError if 'val' is anything else.
def strtobool(val): val = val.lower() if val in ('y', 'yes', 't', 'true', 'on', '1'): return 1 elif val in ('n', 'no', 'f', 'false', 'off', '0'): return 0 else: raise ValueError("invalid truth value %r" % (val,))
[ "def _str2bool(val):\n if isinstance(val, basestring):\n val = val.lower()\n if val in ('y', 'yes', 't', 'true', 'on', '1'):\n return 1\n elif val in ('n', 'no', 'f', 'false', 'off', '0'):\n return 0\n else:\n raise ValueError(\"invalid truth value %r\...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Bytecompile a collection of Python source files to either .pyc or .pyo files in the same directory. 'py_files' is a list of files to compile; any files that don't end in
def byte_compile(py_files, optimize=0, force=0, prefix=None, base_dir=None, verbose=1, dry_run=0, direct=None): # nothing is done if sys.dont_write_bytecode is True if hasattr(sys, 'dont_write_bytecode') and sys.dont_write_bytecode: raise DistutilsByteCompileError('byte-compiling is di...
[ "def compile_bytecode():\n for dirpath, dirnames, filenames in os.walk('.'):\n paths = [os.path.join(dirpath, filename) for filename in filenames]\n # Must remove any .pyc files first in case they turn out to be present but readonly.\n # This seems to happen on some rare cases, we're not 100...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return a version of the string escaped for inclusion in an RFC822 header, by ensuring there are 8 spaces space after each newline.
def rfc822_escape(header): lines = header.split('\n') sep = '\n' + 8 * ' ' return sep.join(lines)
[ "def _spacestuff(self, line, force=False):\n if not line:\n return line\n # Although the RFC doesn't say so explicitly, in practice 'From' only\n # needs escaping when (1) not quoted and (2) actually encoded as\n # 'From' (so independent of the unicode sequence u'From').\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Finds the ld version. The version scheme differs under Mac OSX.
def _find_ld_version(): if sys.platform == 'darwin': return _find_exe_version('ld -v', _MAC_OS_X_LD_VERSION) else: return _find_exe_version('ld -v')
[ "def get_version():\n # this implementation avoids calling Foundation and will work on\n # non Apple OSes.\n vers = \"UNKNOWN\"\n build = \"\"\n # find the munkilib directory, and the version file\n munkilibdir = os.path.dirname(os.path.abspath(__file__))\n versionfile = os.path.join(munkilibdi...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Find the version of an executable by running `cmd` in the shell. `pattern` is a compiled regular expression. If not provided, default to _RE_VERSION. If the command is not found, or the output does not match the mattern, returns None.
def _find_exe_version(cmd, pattern=_RE_VERSION): from subprocess import Popen, PIPE executable = cmd.split()[0] if find_executable(executable) is None: return None pipe = Popen(cmd, shell=True, stdout=PIPE, stderr=PIPE) try: stdout, stderr = pipe.stdout.read(), pipe.stderr.read() ...
[ "def _get_version(self, cmd_out: str):\n my_re = re.compile(self.config.vre)\n match = my_re.search(cmd_out)\n return match", "def _find_exe_version(cmd):\n executable = cmd.split()[0]\n if find_executable(executable) is None:\n return None\n out = Popen(cmd, shell=True, stdou...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns a tuple providing the versions of gcc, ld and dllwrap For each command, if a command is not found, None is returned. Otherwise a string with the version is returned.
def get_compiler_versions(): gcc = _find_exe_version('gcc -dumpversion') ld = _find_ld_version() dllwrap = _find_exe_version('dllwrap --version') return gcc, ld, dllwrap
[ "def get_versions():\n commands = ['gcc -dumpversion', 'ld -v', 'dllwrap --version']\n return tuple([_find_exe_version(cmd) for cmd in commands])", "def get_gcc_ver(exe=\"gcc\"):\n cmd = [exe, '-v']\n major = -1\n minor = -1\n patch = -1\n raw = sub.check_output(cmd, stderr=sub.STDOUT).decode...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return true if 'target' is outofdate with respect to any file listed in 'sources'. In other words, if 'target' exists and is newer than every file in 'sources', return false; otherwise return true. 'missing' controls what we do when a source file is missing; the default ("error") is to blow up with an OSError from insi...
def newer_group(sources, target, missing='error'): # If the target doesn't even exist, then it's definitely out-of-date. if not os.path.exists(target): return True # Otherwise we have to find out the hard way: if *any* source file # is more recent than 'target', then 'target' is out-of-date and...
[ "def any_changed(sources, target):\r\n\r\n if not os.path.exists(target):\r\n return True\r\n\r\n target_time = os.path.getmtime(target)\r\n return any(target_time < getmtime(source) for source in sources)", "def is_stale(target, source):\n if not os.path.exists(target):\n return True\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns True if path is a package (a dir with an __init__ file.
def _is_package(path): if not os.path.isdir(path): return False return os.path.isfile(os.path.join(path, '__init__.py'))
[ "def _is_package(path):\n return (\n os.path.isdir(path)\n and os.path.exists(os.path.join(path, '__init__.py'))\n )", "def is_package(path: str) -> bool:\n return os.path.isdir(path) and \"__init__.py\" in os.listdir(path)", "def is_module_or_package(path):\r\n is_module = osp.isfile(...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns a dotted package name, given a subpath.
def _package_name(root_path, path): if not _under(path, root_path): raise ValueError('"%s" is not a subpath of "%s"' % (path, root_path)) return path[len(root_path) + 1:].replace(os.sep, '.')
[ "def pkgname_from_path(path):\n pkg, ext = os.path.splitext(os.path.basename(_expand(path)))\n return pkg", "def package_to_path(package):\n return package.replace('.', '/')", "def package_to_path(package):\n return package.replace('.','/')", "def module_name_from_path(path: Path, root: Path) -> s...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return a list all Python packages found recursively within directories 'paths' 'paths' should be supplied as a sequence of "crossplatform" (i.e. URLstyle) path; it will be converted to the appropriate local path syntax. 'exclude' is a sequence of package names to exclude; '' can be used as a wildcard in the names, such...
def find_packages(paths=(os.curdir,), exclude=()): packages = [] discarded = [] def _discarded(path): for discard in discarded: if _under(path, discard): return True return False for path in paths: path = convert_path(path) for root, dirs, fi...
[ "def find_packages(where='.', exclude=()):\n out = []\n stack=[(convert_path(where), '')]\n while stack:\n where,prefix = stack.pop(0)\n for name in os.listdir(where):\n fn = os.path.join(where,name)\n looks_like_package = (\n '.' not in name\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Like os.path.splitext, but take off .tar too
def splitext(path): base, ext = posixpath.splitext(path) if base.lower().endswith('.tar'): ext = base[-4:] + ext base = base[:-4] return base, ext
[ "def splitext(path):\r\n base, ext = posixpath.splitext(path)\r\n if base.lower().endswith('.tar'):\r\n ext = base[-4:] + ext\r\n base = base[:-4]\r\n return base, ext", "def splitext(self, the_path):\n base, ext = posixpath.splitext(the_path)\n if base.lower().endswith(\".tar...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns true if all the paths have the same leading path name (i.e., everything is in one subdirectory in an archive)
def has_leading_dir(paths): common_prefix = None for path in paths: prefix, rest = split_leading_dir(path) if not prefix: return False elif common_prefix is None: common_prefix = prefix elif prefix != common_prefix: return False return True
[ "def has_leading_dir(paths):\r\n common_prefix = None\r\n for path in paths:\r\n prefix, rest = split_leading_dir(path)\r\n if not prefix:\r\n return False\r\n elif common_prefix is None:\r\n common_prefix = prefix\r\n elif prefix != common_prefix:\r\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Quote commandline arguments for DOS/Windows conventions. Just wraps every argument which contains blanks in double quotes, and returns a new argument list.
def _nt_quote_args(args): # XXX this doesn't seem very robust to me -- but if the Windows guys # say it'll work, I guess I'll have to accept it. (What if an arg # contains quotes? What other magic characters, other than spaces, # have to be escaped? Is there an escaping mechanism other than # quo...
[ "def quoteCommandlineArgs( commandline ):\n if platform.system() == 'Windows':\n return subprocess.list2cmdline( commandline )\n else:\n return \" \".join( quote(arg) for arg in commandline )", "def quote_arguments(args):\n if isinstance(args, str):\n args_list = [arg...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }