query
stringlengths
9
9.05k
document
stringlengths
10
222k
negatives
listlengths
19
20
metadata
dict
Nonspecific for a tree. Used to write an SWC file from a morphology stored in this
def write_SWC_tree_to_file(self,file_n) : writer = open(file_n,'w') nodes = self.get_nodes() nodes.sort() # 3 point soma representation (See Neuromoprho.org FAQ) s1p = nodes[0].content["p3d"] s1_xyz = s1p.xyz s2p = nodes[1].content["p3d"] s2_xyz = s2p.xyz...
[ "def write_SWC_tree_to_file(self,file_n) :\n raise Exception(\"Not yet implemented\")\n writer = open(file_n,'w')\n nodes = self.get_nodes()\n nodes.sort()\n for node in nodes :\n p3d = node.get_content()['p3d'] # update 2013-03-08\n p3d_string = p3d.swc_str(...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Nonspecific for a "tree data structure" Read and load a morphology from an SWC file and parse it into an STree2 object. On the NeuroMorpho.org website, 5 types of somadescriptions are
def read_SWC_tree_from_file(self,file_n,types=range(1,10)) : # check soma-representation: 3-point soma or a non-standard representation soma_type = self._determine_soma_type(file_n) #print "STree2::read_SWC_tree_from_file found soma_type=%i" % soma_type file = open(file_n,'r') ...
[ "def read_socorro(filename):\n socorro_in = SocorroIn(open(filename).readlines())\n tags = socorro_in.get_variables()\n avec = [tags['scale'][i] * np.array(tags['avec'][i]) for i in range(3)]\n\n symbols = tags['atoms']['spfnames']\n numbers = []\n for s in symbols:\n numbers.append(symbol_...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Costly method to determine the soma type used in the SWC file. This method searches the whole file for soma entries.
def _determine_soma_type(self,file_n): file = open(file_n,"r") somas = 0 for line in file: if not line.startswith('#') : split = line.split() index = int(split[0].rstrip()) s_type = int(split[1].rstrip()) if s_type == 1 ...
[ "def get_filetype(file):\n for line in read_file(file):\n if 'gamess' in line.lower():\n return 'gamess'\n if 'gaussian' in line.lower():\n return 'gauss'", "def guess_type(file_data,report=True,field=None):\n file_info = {'type': 'unknown', 'nrecs':0, 'fields': {} }\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Find adb, or download it if needed.
def _copy_adb_to(self, dest): logging.info("Retrieving platform-tools") tools = PlatformTools() tools.extract_adb(dest)
[ "def _get_adb_path(self):\r\n if self._is_on_64bit():\r\n architecture = \"x64\"\r\n else:\r\n architecture = \"x86\"\r\n adb_path = os.path.join(ADB_DIRECTORY, architecture, \"adb.exe\")\r\n\r\n assert os.path.isfile(adb_path), \"adb.exe not found!\"\r\n\r\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Creates the docker container, returning the sha of the container, or None in case of failure.
def create_container(self): identity = None print("Creating docker image: {}.. be patient this can take a while!".format(self.tag)) try: logging.info("build(path=%s, tag=%s, rm=True, decode=True)", self.dest, self.tag) api_client = self.get_api_client() result...
[ "def _create_container(container_name, image_name, output_folder):\n # Create the container\n docker_cmd = DOCKER_TOOL + \" create -i --rm --name \" + container_name + \" \"\n\n # Add output folder\n local_output_folder = os.path.join(os.getcwd(), output_folder)\n if not os.path.isdir(local_output_fo...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Launches the container with the given sha, publishing abd on port, and grpc on port + 1. Returns the container.
def launch(self, image_sha, port=5555): client = docker.from_env() try: container = client.containers.run( image=image_sha, privileged=True, publish_all_ports=True, detach=True, ports={"5555/tcp": port, "5556/tcp...
[ "def opencontainer(args, ctrl):\n if len(args.services) > 1:\n print('Cannot open more than 1 service in 1 call')\n return False\n name = args.services[0]\n serv = ctrl.services[name]\n try:\n if isinstance(ctrl.services[name]['open'], list):\n (\n ctrl.ser...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
A LaTeX representation method in Jupyter notebook.
def _repr_latex_(self): return self._latex
[ "def _reprlatex(self):\n pass # pragma: no cover", "def _repr_latex_(self):\n return f\"${self._reprlatex}$\"", "def _repr_latex_(self):\n return unit_format.Latex.to_string(self)", "def as_tex(self) -> str:\n out = sympy.latex(sympy.simplify(sympy.expand(self._f)))\n out =...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
LSTM with cached / preserved hidden state
def _attach_cached_lstm_nodes( input, hparams=None ): # LSTM with cached / preserved hidden state # https://r2rt.com/non-zero-initial-states-for-recurrent-neural-networks.html cell = tf.contrib.rnn.LSTMCell( num_units=NUM_SHARDS * hparams.word_embedding_size, num_proj=hpa...
[ "def LSTM(inputs, dim, seq_len, name):\n with tf.name_scope(name):\n cell = tf.contrib.rnn.LSTMCell(num_units=dim)\n hidden_states, cell_states = tf.nn.dynamic_rnn(cell, inputs=inputs,\n sequence_length=seq_len, dtype=tf.float32, scope=name)\n\n...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Search regex if the expected value is a string. Compare otherwise.
def _check_value(self, expected, actual): if (type(expected) is str and (type(actual) is str or type(actual) is unicode)): return re.search(expected, actual) is not None return expected == actual
[ "def __call__(self, value):\n if not (self.inverse_match is not bool(self.regex.search(value))):\n self.fail(detail=self.message)", "def test_match_type_function(value, expected_type, does_match):\n # then\n assert utils.match_type(value, expected_type) is does_match", "def matches_reg_e...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Check that place (lat,long) is contained by place bounding_box
def _check_place(self, expected_place): place = self.json['place'] if place is None: raise TestFailedError('FIELD place: Expected: expected_place,' ' Actual: Not present') min_long, min_lat = place['bounding_box']['coordinates'][0][0] max_lon...
[ "def check_bounding_box_of_file(self, bound_min_lon, bound_min_lat,\n bound_max_lon, bound_max_lat):\n if (((float(bound_min_lon)) <= (self.min_lon)) &\n ((float(bound_min_lon)) <= (self.max_lon)) &\n ((float(bound_min_lat)) <= (self.min_lat)) &\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Verifies that each status has the required fields given by expected_status. Values match the regex/string.
def _check_statuses(self, expected_status): statuses = self.json['statuses'] if len(self.json['statuses']) == 0: raise TestFailedError( 'FIELD STATUSES: Expected: At least one status,' ' Actual: No status') for status in self.json['statuses']: ...
[ "def assert_unit_state(self, status: str='controllable'):\n for unit_type, _, idx in self.vars:\n if status not in self.net[unit_type]:\n print(f\"'{status}' of {unit_type}_{idx} not defined. Assumed to be True!\")\n else:\n assert bool(self.net[unit_type][...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Verifies search response json metadata.
def _check_metadata(self, expected_metadata): metadata = self.json['search_metadata'] for key in expected_metadata.keys(): expected = expected_metadata[key] actual = metadata[key] if metadata.has_key(key)\ else 'Not present' if not self._check_...
[ "def version_checking(self,meta):\n if meta[0] == self._valid_metadata:\n pass\n else:\n raise Exception('Incorrect Metadata format')", "def validate_metadata(self, document_metadata):\n\n metadata_file_path = document_metadata['location'] + '/metadata.json'\n met...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Detect when a crossing should be used, total of 2 cases;
def is_crossing(self) -> bool: return self.num_river >= 3 or (self.num_coast == 1 and self.num_river == 2)
[ "def detect_cross(A,B,C,D):\n\n Ax, Ay = A[0], A[1]\n Bx, By = B[0], B[1]\n Cx, Cy = C[0], C[1]\n Dx, Dy = D[0], D[1]\n\n # We assume A,B,C,D are all different nodes.\n # i.e. have NOT been sent edges (A,B) and (A,C)\n # So we can check for coincidence.\n\n if Ax == Bx and Ay == By: print...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
River starts only have 1 river next to them, and never a coast.
def is_start(self) -> bool: return self.num_river == 1 and self.num_coast == 0
[ "def river(profile):\n wmg = generate_weak_margin_graph(profile)\n cw = profile.condorcet_winner()\n # Ranked Pairs is Condorcet consistent, so simply return the Condorcet winner if exists\n if cw is not None: \n winners = [cw]\n else:\n winners = list() \n margins = s...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Runs the ABI tests.
def RunAbiTest(config_name, parsed_args): if _DoBuild(config_name, parsed_args) != 0: return 1 symbols = _GetSymbols(config_name) # Checkout the other reference and do another build. logging.info('Checking out %s to compare against...', parsed_args.ref) if subprocess.call(['git', '-C', ROOT_DIR, 'checkou...
[ "def build_tests():\r\n run(dir(\"Macaroni\", \"Next\", \"Tests\"), \"cavatappi -d -i\")", "def test_arch():\n\n arch()\n arch(\"-f\")\n arch(\"--frontend\")\n arch(\"-b\")\n arch(\"--backend\")", "def run_tests():\n print blue(\"Running tests suites\")\n with fabtools.python.virtualenv(...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Visualize time chops of a given Timechop object using matplotlib
def visualize_chops(chopper, show_as_of_times=True, show_boundaries=True, save_target=None): chops = chopper.chop_time() chops.reverse() fig, ax = plt.subplots(nrows=len(chops), sharex=True, sharey=True, squeeze=False, figsize=FIG_SIZE) for idx, chop in enumerate(chops): train_as_of_times = c...
[ "def plot_time_step(self):\n timestep = self.timestep\n fig = plt.figure(1)\n ax = plt.subplot(1, 1, 1)\n ax.plot(zip(*timestep)[0], zip(*timestep)[1], '-b.')\n ax.set_xlabel('Simulation Hour')\n ax.set_ylabel('Average time step in hour (minutes)')\n plt.show()", "...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Or operation on the data stack. Pops off two numbers from the data stack and performs a bitwise OR and pushes the result onto the data stack.
def or_func(): global dstack dstack[-2] = dstack[-1] | dstack[-2] popd() return
[ "def __or__(self, other: IntegerValue) -> IntegerValue:\n return _binop(ops.BitwiseOr, self, other)", "def bitwise_or_(self, e):\n return self.__lazy_operate(operator.or_, e)", "def opcode_set_register_bitwise_or(self, opcode: bytes) -> None:\n # Get the necessary information from the opcod...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Interrogate the OSM nominatim API for a city polygon. Nominatim may not always return city matches in the most intuitive order, so results need to be searched for a compatible polygon. The index of the polygon is required for proper use of osmnx.graph_from_place(). Some cities do not have a polygon at all, in which cas...
def find_osm_polygon(city): search_params = {'format': 'json', 'limit': 5, 'dedupe': 0, 'polygon_geojson': 1, 'q': city} url = 'https://nominatim.openstreetmap.org/search' response = requests.get(url, params=search_params) for index, match in enumerate(response.json()): #...
[ "def test_geocode_city(self):\n self._select_geocoder()\n resource = GeocoderResource()\n req = HttpRequest()\n req.method = 'GET'\n req.GET['q'] = \"Denver\"\n bundle = resource.build_bundle(request=req)\n results = resource.obj_get_list(bundle)\n self.assert...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Populate the cross streets for each node, and add unique ids to the ways
def get_connections(ways, nodes): node_info = {} for way in ways: # There are some collector roads and others that don't # have names. Skip these if way['properties']['name']: # While we are still merging segments with different names, # just use both roads. Thi...
[ "def add_roads(nodetree, grid_node, road_bl_objects):\n # take object names from SceneCity high-poly assets collection\n road_collector_node = nodetree.nodes.new(\"RoadPortionsCollectionNode\")\n road_collector_node.location = (600, -500)\n\n for i, (name, kind) in enumerate(road_bl_objects):\n #...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Reads in osm_ways file, cleans up the features, and reprojects results into 3857 projection Additionally writes a key which shows the correspondence between highway type as a string and the resulting int feature
def clean_ways(orig_file, DOC_FP): way_lines = fiona.open(orig_file) highway_keys = {} results = [] for way_line in way_lines: # All features need to be ints, so convert them here # Use speed limit if given in osm speed = way_line['properties']['maxspeed'] if speed: ...
[ "def osm_to_serial_pickles(filename):\n node_start = re.compile(r'\\s*<node.*?id=\"(\\d+)\".*?lat=\"(-?\\d+\\.?\\d*)\".*?lon=\"(-?\\d+\\.?\\d*)\".*?(/?)>')\n node_end = re.compile(r'\\s*</node>')\n way_start = re.compile(r'\\s*<way.*?id=\"(\\d+)\".*?>')\n way_node = re.compile(r'\\s*<nd ref=\"(\\d+)\".*...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Given a list of ways, intersection nodes, and all nodes, write them out to a geojson file.
def write_geojson(way_results, node_results, outfp): feats = way_results for node in node_results: if not node['properties']['dead_end']: node['properties']['intersection'] = 1 if node['properties']['highway'] == 'traffic_signals': node['properties']['signal'] = 1 ...
[ "def kml_multiple_to_geojson(infile_path, outdir_path, geojson_properties={}):\n data = __read_file(infile_path)\n coord_dict = __get_all_coords(data)\n if not os.path.exists(outdir_path):\n os.makedirs(outdir_path) \n for section_id, coords in list(coord_dict.items...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Parse csv file with student anonymous data and prepare a list of students for enrollment in edxadapt.
def get_students_for_enrollment(headers, **kwargs): path_to_file = kwargs['csvfile'] if not os.path.exists(path_to_file): print("File with path: {} does not exist, please try again".format(path_to_file)) sys.exit() enrolled_students = get_enrolled_students(headers, **kwargs) with open(pa...
[ "def load_students() -> List[List[str]]:\n path = os.path.join(BASE_PATH, 'hta/groups/students.csv')\n return line_read(path, delim=\",\")", "def readCSV(self, csvfile):\n\n students = []\n with open(csvfile, 'rb') as f :\n reader = csv.reader(f, delimiter=';')\n for row ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get student already enrolled in edxadapt.
def get_enrolled_students(headers, **kwargs): users = requests.get('https://{host}:{port}/api/v1/course/{course_id}/user'.format(**kwargs), headers=headers) if users: users = users.json() enrolled_users = {'started': set(), 'not_started': set()} enrolled_users['started'].update(set(users...
[ "def get_enrolled_students(self) -> List[str]:\n return self.enrolled_students", "def get_enrolled_students(course_id):\n objects = UserProfile.objects\n course_key = CourseKey.from_string(course_id)\n students = objects.filter(user__courseenrollment__course_id=course_key,\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get the team from a link to a user
def get_teams(userlink): page = requests.get("https://ctftime.org%s" % userlink) tree = html.fromstring(page.content) teams = tree.xpath('/html/body/div[2]/table/tr/td/a/text()') teamlinks = tree.xpath('/html/body/div[2]/table/tr/td/a/@href') return dict(zip(map(unicode, teams), map(str, teamli...
[ "def team(request, league_url, team_url):\n\n # Check for valid league / team \n league_name = decode_url(league_url)\n league = get_object_or_404(League, name=league_name)\n\n team_name = decode_url(team_url)\n team = get_object_or_404(league.team_set, name=team_name)\n\n players = team.player_se...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get the placement of a team at a certain CTF
def get_team_placement(teamlink, eventid): page = requests.get("https://ctftime.org%s" % teamlink) tree = html.fromstring(page.content) placement = tree.xpath( '//*/table/tr/td[3]/a[@href="/event/%s"]' '/../../td[@class="place"]/text()' % eventid) try: return int(...
[ "def get_position_team_name_home(self, surface):\n return 0, 0", "def get_initial_player_placement(self, team):\n min_x = 1\n max_x = self.BOX_WIDTH - 2\n if team == 1:\n min_y = self.BOX_HEIGHT - 3\n max_y = self.BOX_HEIGHT - 2\n elif team == 2:\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Read the .last_check file.
def _read_last_check(file_path): _UpdateHandler._check_path(file_path=file_path) with io.open(file_path, mode="r") as in_file: first_line = in_file.readline() try: last_check = datetime.strptime(first_line, UTC_FMT) except ValueError: return None ...
[ "def check_last_updated():\n date_today = date.today()\n with (open(LAST_UPDATED, 'r')) as f:\n return f.readline() == str(date_today)", "def _read_last_commit_hash(self):\n last_commit_hash_file_name = self._last_build_hash_file_name()\n if _fortworth.exists(last_commit_hash_file_name)...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Write the .last_check file.
def _write_last_check(file_path, content): with io.open(file_path, mode="w", newline="\n") as out_file: out_file.write(str(content.strftime(UTC_FMT)))
[ "def write_counters_to_file(self):\n with open(os.path.join(self.cwd,'data/others/counters.txt'),'w') as outputfile:\n json.dump(CounterValues().last_counter,outputfile)\n return True \n return False", "def write_to_file(self):\n\n filename = self.entries[0].timestamp + ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
List the current and latest release info, and latest available files.
def list(update_info): if not os.path.exists(update_info.installed_path): installed_info = "- None!\n\n" else: installed = _UpdateHandler._read_json(file_path=update_info.installed_path) installed_info = _UpdateHandler._get_release_message(json_data=installed) ...
[ "def list_releases():\n print blue(\"Releases list\")\n cur = env.releases.current()\n print \"Current release: %s\" % cur\n for r in env.releases.list():\n mdata = {\n 'user': '<somebody>',\n 'host': '<somewhere>',\n 'environ': '<some_env>'\n }\n pa...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Find the download URL for the file in the GitHub API JSON response doc.
def _find_download_url(update_info, json_data, file_name): rel_name = json_data["tag_name"] files = json_data["assets"] if len(files) == 0: raise PyXFormError( "No files attached to release '{r}'.\n\n{h}" "".format(r=rel_name, h=update_info.manual_msg...
[ "def get_file_url(self):\n return self.raw['url']", "def download_url():", "def fetch_github_file_listing(endpoint):\n resp = requests.get(endpoint)\n resp.raise_for_status()\n return resp.json()", "def get_url(self):\n return self.file.url", "def get_download_url(self):\n\n re...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
For each bin file, get the zip file item file name and the output path. Ignore files that may appear in the __MACOSX info dir, and if two files have the same destination path and the same CRC then they're probably duplicate files so only one of them is copied out.
def _unzip_find_jobs(open_zip_file, bin_paths, out_path): zip_info = open_zip_file.infolist() zip_jobs = {} for zip_item in zip_info: if zip_item.filename.startswith("__MACOSX"): continue for file_target in bin_paths: if fnmatch.fnmatch(zip...
[ "def package_files( converted_folder, output_folder ):\r\n\r\n\tpackage_executable = os.path.join( WORKING_DIR, 'vpkg_wd.exe' )\r\n\tif not os.path.lexists( package_executable ):\r\n\t\twx.MessageBox( 'vpkg_wd.exe was missing from the converter director', style = wx.OK, caption = 'Volition FBX Converter' )\r\n\t\tr...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
This method adds an ice cream flavor to the the flavors list.
def add_flavor(self, flavor): self.flavors.append(flavor)
[ "def get_icecream_flavors(self):\n print(f\"These are the Ice-Cream Flavors Available:\\n {self.icecream_flavors}\")", "def testAddFlavor(self):\n # Login\n self.simulate_login()\n # Get response as a result of posting new flavor\n response = self.client.post(\"/flavor/add\", da...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
This mehtod removes a flavor from the list of ice cream flavors.
def remove_flavor(self, flavor): self.flavors.remove(flavor)
[ "def purge_flavors(self, class_filter, flavors):\n current_flavors = self.get_flavors(class_filter=class_filter)\n purged = False\n for flavor in current_flavors:\n if flavor.name not in flavors[class_filter]:\n self.debug_log('delete flavor {}'.format(flavor.name))\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
This method displays all the flavors available at the ice cream stand.
def show_flavors(self): print("Available ice cream flavors are: ", end="") for flavor in self.flavors: if flavor == self.flavors[-1]: print(f"{flavor.title()}.", end="") else: print(f"{flavor.title()}, ", end="") print("")
[ "def display_flavors(self):\n\t\tprint(\"The following flavors are offered: \")\n\t\tfor flavor in self.flavors:\n\t\t\tprint(\"- \" + flavor.title())", "def show_flavors(self):\n print(f\"The following flavors are available {self.flavors}\")", "def get_icecream_flavors(self):\n print(f\"These are...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Split a stereo file into separate mono files.
def split_stereo(input_file, output_file_left, output_file_right): left_args = ['sox', '-D', input_file, output_file_left, 'remix', '1'] right_args = ['sox', '-D', input_file, output_file_right, 'remix', '2'] return sox(left_args) and sox(right_args)
[ "def splitmono(snd):\n left = audioop.tomono(snd, audio_params[1], 1, 0)\n right = audioop.tomono(snd, audio_params[1], 0, 1)\n\n return [left, right]", "def mono_only(wavfile):\n open_wav = wave.open(wavfile, 'r')\n wav_channels = open_wav.getnchannels()\n file_mono = open_wav\n if wav_chann...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Create a stereo audio file from the 2 mono audio files. Left goes to channel 1, right goes to channel 2.
def multimono_to_stereo(left_channel, right_channel, output_file): return sox(['sox', '-M', left_channel, right_channel, output_file])
[ "def stereo(snd):\n return audioop.tostereo(snd, audio_params[1], 0.5, 0.5)", "def splitmono(snd):\n left = audioop.tomono(snd, audio_params[1], 1, 0)\n right = audioop.tomono(snd, audio_params[1], 0, 1)\n\n return [left, right]", "def stereo_fm(x, fs=2.4e6, file_name='test.wav'):\n N1 = 10\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Naively mix (sum) a list of files into one audio file. Volume of each file is set by the value in weights.
def mix_weighted(file_list, weights, output_file): args = ["sox", "-m"] for fname, weight in zip(file_list, weights): args.append("-v") args.append(str(weight)) args.append(fname) args.append(output_file) return sox(args)
[ "def mergeAudio(self, audiolist, name):\n self.file = AudioSegment.empty()\n for audio in audiolist:\n self.file += AudioSegment.from_mp3(audio)\n self.file.export(name)", "def mix_wavs(input_fns, out_fn=\"temp/mix.wav\"):\n # Shoutout to https://stackoverflow.com/questions/4039...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
This method generates smaller playlists from the input playlist
def generate_playlists(playlist_size, playlist_songs, user_id): # Create the smaller playlists from the given large playlist smaller_playlists = [ playlist_songs[x : x + playlist_size] for x in range(0, len(playlist_songs), playlist_size) ] for index, playlist in enumerate(smaller_playl...
[ "def _create_play_list(self, pid_item, ploc, glist):\n \n if pid_item.tag in [\"I00001\",]:\n webpage = os.path.normpath('../{}/index.html'.format(self.project))\n else:\n# webpage = os.path.normpath('../{}/{}.html'.format(self.project, pid_item.tag))\n webpage ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
creates a database and database user with the name passed in. returns the database password
def create_db(server, name): user = name[:14] password = lib.password_creator.create(10) print("creating database with the password {}".format(password)) wf, wf_id = webfaction.connect(server) res = wf.create_db_user(wf_id, user, password, "mysql") print(res) res = wf.create_db(wf_id, name[:...
[ "def create_db_and_user(self, name):\n if self.create_db(name):\n password = self.create_user(name, name)\n return password\n else:\n return False", "def create_user(dbUsername,\n dbUserHost='localhost',\n dbPassword=False,\n dbRootUs...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Applies threshold operation and returns list of contour bounding rectangles in a form of a lists [x, y, width, height] img single channel 8bit image threshold threshold value
def get_rects(img, threshold): threshold_result = np.zeros(img.shape, np.uint8) cv2.threshold(img, threshold, 255, cv2.THRESH_BINARY, threshold_result) im2, contours, hierarchy = cv2.findContours(threshold_result, cv2.RETR_EXTERNAL, ...
[ "def get_Contours(self, img,t1, t2, opp, opp2):\n _, thresh1 = cv2.threshold(img, 122, 51, cv2.THRESH_BINARY)\n contours, hierarchy = cv2.findContours(thresh1, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)\n big_cnt = []\n for cnt in contours:\n area = cv2.contourArea(cnt)\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Draws rectangles on a image img 3 channel BGR 8bit image rects list of rectangles [x, y, width, height] color BGR color
def draw_rects(img, rects, color=(255, 0, 0)): for rect in rects: cv2.rectangle(img, (rect[0], rect[1]), (rect[0] + rect[2], rect[1] + rect[3]), color, 3)
[ "def draw_rects(img, rects, color):\n for x1, y1, x2, y2 in rects:\n cv2.rectangle(img, (x1, y1), (x1 + x2, y1 + y2), color, 2)", "def drawRectangles(img, rects):\n for rect in rects:\n x1, y1, x2, y2 = makeTuple(rect)\n b = random.randint(0,255)\n g = random.randint(0,255)\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Testa investidor logado com investimentos cadastrados
def test_investidor_logado_com_investimentos(self): nizbel = User.objects.create_user('nizbel', 'nizbel@teste.com', 'nizbel') self.nizbel = nizbel.investidor # Cadastrar investimentos #CDB/RDB cdb_rdb_1 = CDB_RDB.objects.create(investidor=self.nizbel, nome='CDB teste 1'...
[ "def test_investidor_logado_com_investimentos_vencidos(self):\n vencido = User.objects.create_user('vencido', 'vencido@teste.com', 'vencido')\n self.vencido = vencido.investidor \n \n # Cadastrar investimentos\n # CRI/CRA\n cri_cra_1 = CRI_CRA.objects.create(nome='CRI teste...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Testa investidor logado com investimentos vencidos
def test_investidor_logado_com_investimentos_vencidos(self): vencido = User.objects.create_user('vencido', 'vencido@teste.com', 'vencido') self.vencido = vencido.investidor # Cadastrar investimentos # CRI/CRA cri_cra_1 = CRI_CRA.objects.create(nome='CRI teste 1', codigo...
[ "def test_investidor_logado_com_investimentos(self):\n nizbel = User.objects.create_user('nizbel', 'nizbel@teste.com', 'nizbel')\n self.nizbel = nizbel.investidor \n \n # Cadastrar investimentos\n #CDB/RDB\n cdb_rdb_1 = CDB_RDB.objects.create(investidor=self.nizbel, nome='C...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
preprocess() is documented to raise PreprocessError but the default (circa py3.9) impl. is a noop, and the 'unix' impl. raises CompileError. Always a pleasure to find accurate documentation...
def _default_preprocess(self, *args, **kws): raise CompileError("preprocess() not implemented")
[ "def _msvc_preprocess(self, source, output_file=None, macros=None,\n include_dirs=None, extra_preargs=None, extra_postargs=None):\n # validate and normalize\n ignore, macros, include_dirs = self._fix_compile_args(None, macros, include_dirs)\n # translate macros/include_dirs into -D/-U/-...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Replacement for default (noop) distutils.msvccompiler.MSVCCompiler.preprocess()
def _msvc_preprocess(self, source, output_file=None, macros=None, include_dirs=None, extra_preargs=None, extra_postargs=None): # validate and normalize ignore, macros, include_dirs = self._fix_compile_args(None, macros, include_dirs) # translate macros/include_dirs into -D/-U/-I strings...
[ "def _default_preprocess(self, *args, **kws):\n raise CompileError(\"preprocess() not implemented\")", "def preprocess (self,\r\n source,\r\n output_file=None,\r\n macros=None,\r\n include_dirs=None,\r\n extra_prearg...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Print detail of the contents of a histogram bin.
def _printBin(bin_): print('Bin has %d items:' % len(bin_), file=sys.stderr) for i, hashInfo in enumerate(bin_, start=1): print(' Item %d:' % i, file=sys.stderr) for key, value in hashInfo.items(): # The 16 below is the length of the longest key (subjectTrigPoint). print...
[ "def printBins(self, minBin, maxBin):\n pass", "def visualize_histogram(histogram, annotation, fig_size=(20,10), fontsize=12):\n plt.rcParams.update({'font.size': fontsize})\n fig = plt.figure(figsize=fig_size)\n plt.bar(histogram[1][:-1], histogram[0], edgecolor=\"black\", align=\"edge\")\n pl...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Print debugging information to stderr showing the local variables from a call to normalizeBin and then raise an C{AssertionError}.
def debugPrint(locals, msg): print('normalizeBin error:', file=sys.stderr) print(' queryLen: %d' % queryLen, file=sys.stderr) skipVars = set(('debugPrint', 'bin_', 'allQueryFeatures', 'allQueryOffsets', 'allSubjectFeatures', 'allSubjectOffsets')) ...
[ "def test_includes_traceback_if_debug_logging_is_on(self):\n error(\"error message\", func=utils.abort, stdout=error)\n assert_contains(self.dummy_string, sys.stdout.getvalue())", "def unexpected_exception():\n global n_assertions\n n_assertions += 1\n traceback.print_exc( file = sys.stdout...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Run the ALAPSchedule pass on `dag`.
def run(self, dag, time_unit=None): # pylint: disable=arguments-differ if len(dag.qregs) != 1 or dag.qregs.get('q', None) is None: raise TranspilerError('ALAP schedule runs on physical circuits only') if not time_unit: time_unit = self.property_set['time_unit'] new_dag...
[ "def run(self, dag):\n if len(dag.qregs) != 1 or dag.qregs.get(\"q\", None) is None:\n raise TranspilerError(\"ASAP schedule runs on physical circuits only\")\n\n time_unit = self.property_set[\"time_unit\"]\n\n new_dag = DAGCircuit()\n for qreg in dag.qregs.values():\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Generating prior with correlated to original psi.
def generate_prior(psi, corr=0.8, min_sigma=0.1, max_sigma=5, steps=2000): sigma_all = np.linspace(min_sigma, max_sigma, steps) corr_all = np.zeros(len(sigma_all)) psi_logit = logit(psi, minval=0.0001) for i in range(len(sigma_all)): _prior_logit = psi_logit + np.random.normal(0, sigma...
[ "def prior(self) -> tfp.distributions.Distribution:\n pass", "def priorLikelihood(self, step):", "def initPriorHypo(self):\n self.priorHypo = np.ones(model.nhypo)/model.nhypo", "def prior(cube, ndim, nparams):\n\n cube[0] = pri.GeneralPrior(cube[0], 'U', 0, 1) \n c...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Calculates the GCD of X and Y using Extended Euclid's Algorithm.
def ext_gcd(x, y): if x < y: # print("Warning: the first number should be greater than the second number.") return ext_gcd(y, x) if y == 0: return (x, 1, 0) (d, a, b) = ext_gcd(y, x % y) return (d, b, a - b * (x // y))
[ "def gcd (x,y):\n while y:\n z=x\n x=y\n y=z%y\n # while x:\n # z=y\n # y=x\n # x=z%x\n # print(\"最大公约数:\",y)\n return x", "def xlgcd(a, b):\n\n d = euclid(a, b)\n step = 0\n \n if a < b:\n a,b = b,a\n else:\n pass\n u, v, x, y ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns the multiplicative inverse of X mod Y.
def mul_inverse(x, y): ans = ext_gcd(y, x)[2] if ans >= 0: return ans return ans + y
[ "def modulo_multiplicative_inverse(A, M, X=1):\n # Find gcd using Extended Euclid's Algorithm\n gcd, x, y = extended_euclid_gcd(A, M, X)\n\n # In case x is negative, we handle it by adding extra M\n # Because we know that multiplicative inverse of A in range M lies\n # in the range [0, M-1]\n if x...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Write the STRING to a file with the given FILENAME
def write_file(filename, string): string = str(string) file = open(filename, 'w') file.write(string) file.close() print("File written successfully.")
[ "def write_string_to_file(filename, file_content):\n with FileIO(filename, mode=\"w\") as f:\n f.write(file_content)", "def write_to_file(data_string, file_path):\n os.makedirs(os.path.dirname(file_path), exist_ok=True)\n with open(file_path, 'w+') as f:\n f.write(data_string)", "def write_file...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Encrypt the STRING using the defined public keys, then write the encrypted string to a file.
def encrypt_to_file(string, filename): encrypted_string = encrypt(string) write_file(filename, encrypted_string) return encrypted_string
[ "def encrypt_file(filename, public_key):\n f = Fernet(public_key)\n with open(filename, \"rb\") as file:\n # read all file data\n file_data = file.read()\n # encrypt data\n encrypted_data = f.encrypt(file_data)\n # write the encrypted file\n with open(filename, \"wb\") as file:\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
A helper function. Takes in a Python List of encrypted characters, decrypt the characters onebyone, then recombine the characters into a string.
def decrypt_lst(char_list): result = [chr(pow(x, d, N)) for x in char_list] return "".join(result)
[ "def letters_to_swap(letter1, letter2, ciphertext):\n result = \"\"\n for c in ciphertext:\n if c == letter1:\n result = result + letter2\n elif c == letter2:\n result = result + letter1\n else:\n result = result + c\n return result", "def decrypt(int...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Read in a file that contains a string of encrypted characters, then decrypt it.
def decrypt_file(filename): file = open(filename, 'r') encrypted = file.read() file.close() return decrypt(encrypted)
[ "def dec():\n filename = click.prompt('Please enter the filename to decrypt', type=str)\n password = click.prompt('Please enter the password (key) to decrypt the file with: ', hide_input=True, type=str)\n # Open the encrypted file and decrypt the contents\n with open(filename, 'rb') as encrypted:\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Dialogflow Messenger Suggestion chips Builder chips=list Sample example of display suggestion chips (["suggestion1", "suggestion2", "suggestion3"])
def suggestion_chips(self, chips:list): add_chip = [] for chip in chips: add_chip.append({ "text": chip, "image": { "src": { "rawUrl": "" } }, "...
[ "def __init__(self, chips=0):\n self.chips = chips\n self.cards = []\n self.splitCards = []\n self.aces = 0\n self.splitAces = 0", "def showChips(self):\n print(\"You currently have %d chips\" % self.chips)", "def generate_random_suggestions(self, num=1):\n\t\t\n\t\tif ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Dialogflow Messenger Simple Title Card Response Builder title=str, sub_title=str, rowUrl=str, action_link=str. Sample example of display_text ex. simple_title_card("Title","Subtitle","
def simple_title_card(self, title, sub_title, raw_url, action_link): simple_title_cards = [] simple_title_cards.append({ "type": "info", "title": title, "subtitle": sub_title, "image": { "src": {...
[ "def informative_card(self, title, data: list):\r\n\r\n return {\r\n \"payload\":\r\n {\r\n \"richContent\": [\r\n [\r\n {\r\n \"type\": \"description\",\r\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Dialogflow Messenger Informative Card Response Builder title=str, data=list. Sample example of display_text ex. informative_card("Title",["Sample Text1","Sample Text2","Sample Text3"])
def informative_card(self, title, data: list): return { "payload": { "richContent": [ [ { "type": "description", "title": title, ...
[ "def build_response_card(title, subtitle, options):\r\n if options is not None:\r\n buttons = []\r\n for i in range(min(5, len(options))):\r\n buttons.append(options[i])\r\n\r\n return {\r\n 'contentType': 'application/vnd.amazonaws.card.generic',\r\n 'version': 1,\r\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Dialogflow Messenger Image Card Response Builder rowUrl=str, accessibility_text=str. Sample example of display_text ex. image_card(" Text")
def image_response(self, raw_url, accessibility_text): return{ "payload": { "richContent": [ [ { "type": "image", ...
[ "def build_response_card(title, subtitle, options):\n buttons = None\n genericAttachments = []\n if options is not None:\n buttons = []\n genericAttachmentElement = {}\n cnt = 0\n for i in range(len(options)):\n buttons.append(options[i])\n cnt = cnt+1\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Dialogflow Messenger Small Button Response Builder text=str, link=str. Sample example of display_text ex. small_button("Sample Text","
def small_button(self, text, link): return{ "payload": { "richContent": [ [ { "type": "button", "icon": { ...
[ "def hudButton(string, labelFontSize=\"string\", allowOverlap=bool, buttonShape=\"string\", block=int, pressCommand=\"string\", visible=bool, blockAlignment=\"string\", section=int, releaseCommand=\"string\", label=\"string\", buttonWidth=int, padding=int, blockSize=\"string\"):\n pass", "def button_text(self,...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Dialogflow Messenger Card With Multiple Options Response Builder data=list of dict
def card_with_multiple_options(self, data:list): one_card_option = [] for i in range(len(data)): one_card_option.append({ "type": "list", "title": data[i]["title"], "subtitle": data[i]["subtitle"], ...
[ "def build_response_card(title, subtitle, options):\n buttons = None\n genericAttachments = []\n if options is not None:\n buttons = []\n genericAttachmentElement = {}\n cnt = 0\n for i in range(len(options)):\n buttons.append(options[i])\n cnt = cnt+1\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Dialogflow Messenger Accordion Small Card Response Builder title=str, sub_title=str, rawUrl=str, text=str. Sample example of display_text ex. accordion_small_card("Title","Subtitle"," Text")
def accordion_small_card(self, title, sub_title, raw_url, text): return { "payload": { "richContent": [ [ { "type": "accordion", "title": title, ...
[ "def simple_title_card(self, title, sub_title, raw_url, action_link):\r\n \r\n simple_title_cards = []\r\n simple_title_cards.append({\r\n \"type\": \"info\",\r\n \"title\": title,\r\n \"subtitle\": sub_title,\r\n \"image\": {\r\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Function for setting a radial density profile to a uniform sphere of particles.
def add_profile(self, gamma=0, method=2): if gamma <= -3: # impossible print("Gamma must be greater than -3. Exiting") exit() elif gamma != 0: print("Setting radial density profile with "+\ "RHO~r**{}".format(gamma)) # we use c...
[ "def simulate_spheres_in_sphere(num_particles: int,\n particle_radius: float = SAME_CELL_RADIUS,\n sphere_radius: float = AGGREGATE_RADIUS,\n rnd=np.random,\n umin: float = 0.0,\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
This method sets the minimum size of all the BitTwiddler controls on the panel based on the size of the largest BitTwiddler. This ensures that all of the BT's line up correctly as they do not play nicely with sizers quite yet.
def SetControlMinSizes(self): wH, wL = 0, 0 h = 0 for settingH, settingL, setting in self.GetSettings(): wsH, hsH = settingH.GetSize() wsL, hsL = settingL.GetSize() wH = max(wH, wsH) wL = max(wL, wsL) h = max(h, hsH, hsL) ...
[ "def _updatesize(self):\r\n self.widgetarea.height = 10\r\n for x in self.widgetarea.children:\r\n if any([isinstance(x, c) for c in \\\r\n # Only these widgets have `texture_size` and `texture_update`\r\n (Label, Image, WrapLabel, AsyncImage, BGLabel)]):\r\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return an iterable list of 3element tuples of all the defaults. The
def GetDefaults(self): return [(self.btsEP26CONFIGH, self.btsEP26CONFIGL, Setting.Ep26Config), (self.btsWORDWIDEH, self.btsWORDWIDEL, Setting.WordWide), (self.btsDATAADDRESSH, self.btsDATAADDRESSL, Setting.DataAddress), (self.btsFIFOCONFIGH, self.btsFIFOCONFIGL, ...
[ "def get_args_default_values(self, node):\n nondefaults = [None] * (len(node.args.args) - len(node.args.defaults))\n defaults = nondefaults + node.args.defaults + node.args.kw_defaults\n if node.args.vararg:\n defaults.append(None)\n if node.args.kwarg:\n defaults.a...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Create closure to xtract db key, description, and geom from base path.
def parse_shapefile(db_key, description_key, filter_tuple): def _parse_shapefile(base_path): """Extract db key, description, and geom from base path. Parameters: base_path (str): path to a database, may be gdal vector, excel, or more. Returns: ...
[ "def refmap(self, fn, x: Context):\n return Context(\n self(fn, x.parent), x.graph, tuple(fn(arg) for arg in x.argkey)\n )", "def get_path(self, **kwargs):\r\n new = self._attrs()\r\n del new['keyspace'], new['key']\r\n new.update(kwargs)\r\n return ColumnPath(**new)", "...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Download url to target and write a token file when it unzips.
def download_and_unzip(url, target_path, token_file): if not os.path.exists(target_path): reproduce.utils.url_fetch_and_validate(url, target_path) if target_path.endswith('zip'): with zipfile.ZipFile(target_path, 'r') as zip_ref: zip_ref.extractall(os.path.dirname(ta...
[ "def download_raw_file( self, url ):\n raw_file_url = self.get_raw_file_url( url )\n f = urlopen( raw_file_url )\n output_filename = self.get_output_file( url )\n output_dirname = os.path.dirname( output_filename )\n print( \"save to %s\" % output_filename )\n if not os.pat...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Plots all the individual points in a coordAmp file
def plot_points(coordAmp): xValues = coordAmp.loc[:, 'xPos 1'::8] yValues = coordAmp.loc[:, 'yPos 1'::8] plt.scatter(xValues, yValues) plt.show()
[ "def plot_all_tracks(coordAmp):\n for track in range(1, coordAmp.shape[0]):\n xPositions = coordAmp.loc[track].loc['xPos 1'::8]\n yPositions = coordAmp.loc[track].loc['yPos 1'::8]\n plt.plot(xPositions, yPositions)\n# plt.xlim(50,80)\n# plt.ylim(50,80)\n plt.show()", "def plot_tra...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Plots a single track from a coordAmp file
def plot_track(coordAmp, track): xPositions = coordAmp.loc[track].loc['xPos 1'::8] yPositions = coordAmp.loc[track].loc['yPos 1'::8] plt.scatter(xPositions, yPositions) plt.plot(xPositions, yPositions)
[ "def plot_all_tracks(coordAmp):\n for track in range(1, coordAmp.shape[0]):\n xPositions = coordAmp.loc[track].loc['xPos 1'::8]\n yPositions = coordAmp.loc[track].loc['yPos 1'::8]\n plt.plot(xPositions, yPositions)\n# plt.xlim(50,80)\n# plt.ylim(50,80)\n plt.show()", "def plot_poi...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Plots all the tracks from a coordAmp file
def plot_all_tracks(coordAmp): for track in range(1, coordAmp.shape[0]): xPositions = coordAmp.loc[track].loc['xPos 1'::8] yPositions = coordAmp.loc[track].loc['yPos 1'::8] plt.plot(xPositions, yPositions) # plt.xlim(50,80) # plt.ylim(50,80) plt.show()
[ "def plot_track(coordAmp, track):\n xPositions = coordAmp.loc[track].loc['xPos 1'::8]\n yPositions = coordAmp.loc[track].loc['yPos 1'::8]\n plt.scatter(xPositions, yPositions)\n plt.plot(xPositions, yPositions)", "def plot_points(coordAmp):\n xValues = coordAmp.loc[:, 'xPos 1'::8]\n yValues = co...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Finds the average positions of each track in a coordAmp file and returns them as a list of tuples
def find_average_positions(coordAmp): averagePositions = [] for track in range(1, coordAmp.shape[0]): xPositions = coordAmp.loc[track].loc['xPos 1'::8] yPositions = coordAmp.loc[track].loc['yPos 1'::8] averagePositions.append((xPositions.mean(),yPositions.mean())) return averagePosit...
[ "def find_track_starts(coordAmp):\n trackStarts = [] \n for track in range(1, coordAmp.shape[0]):\n xPositions = coordAmp.loc[track].loc['xPos 1'::8]\n yPositions = coordAmp.loc[track].loc['yPos 1'::8]\n frame = 0\n xPosition = np.nan\n while (np.isnan(xPosition)):\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Finds the beginning positions of each track and returns them as a list of tuples
def find_track_starts(coordAmp): trackStarts = [] for track in range(1, coordAmp.shape[0]): xPositions = coordAmp.loc[track].loc['xPos 1'::8] yPositions = coordAmp.loc[track].loc['yPos 1'::8] frame = 0 xPosition = np.nan while (np.isnan(xPosition)): xPosit...
[ "def plateouStart(sequence) -> Tuple[List, List]:\n plateau = ([], [])\n for index, center in enumerate(sequence[1:-1], 1):\n before = sequence[index - 1]\n after = sequence[index + 1]\n if before != center == after:\n plateau[0].append(index)\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Redirect unauthorized users to Login page.
def unauthorized(): flash('You must be logged in to view that page.') return redirect(url_for('login'))
[ "def handle_login_error(e): \n flash(\"You do not have access rights.\")\n return redirect(url_for('auth.login'))", "def unauthorized_callback():\n return redirect(url_for('auth.login'))", "def login():\n if not google.authorized:\n return redirect(url_for(\"google.login\"))", "def test_ano...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
For example, these holdout_score_objects might be already tested in another class (e.g. Sensitivity is tested via TwoClassEvaluator), but we want to verify we can instantiate and use.
def test_Misc_scores(self): mock_data = pd.read_csv(os.path.join(os.getcwd(), TestHelper.ensure_test_directory('data/test_Evaluators/test_ConfusionMatrix_mock_actual_predictions.csv'))) # noqa predictions_mock = mock_data.drop(columns=['actual', 'predictions']) predictions_mock.columns = [1, 0]...
[ "def test_legacy_box_scores_live(self):\n pass", "def _test_classes(self):", "def test_legacy_box_scores_final(self):\n pass", "def test_legacy_box_scores(self):\n pass", "def test_score_eligible_instances():\n performance_options = [\n PerformanceOption({\n 'option...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Creates main menu so that the user can chose to either translate, add a word to the dictionary, print the current dictionary, or exit the program. Then it makes sure that the user entered the correct number to do one of these capabilities, and it creates user input and returns so they can navigate through the menu
def MainMenu(): print("Main Menu") print("1 -- translate a word") print("2 -- add a word to dict") print("3 -- print current translation dictionary") print("4 -- exit") selection = int(input("Please make a selection: ")) if selection != 1 and se...
[ "def manage_menu():\n while True:\n print\n print 'What do you want to do?'\n print '-----------------------'\n print '1. See the current installed keys'\n print '2. Generate a new pair of secret/public keys'\n print '3. Export a public key'\n print '4. Export a s...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Purpose > Convert characters to integers, a unique value for every character Input > Training data (In list of lists format) along with global variables Output > Converted training data along with global variables
def convert_char2num(mapping_n2c, mapping_c2n, trainwords, maxlen): allchars = [] maxchar = [] errors = 0 # Creates a list of all characters present in the dataset for line in trainwords: maxchar.append(len(line)) try: allchars = set(allchars + line) ...
[ "def str_to_int(map_data_pre_int):\n map_data_as_int = []\n for _ in map_data_pre_int:\n for lines_in_map_data in _:\n map_data_pre_int = int(lines_in_map_data)\n map_data_as_int.append(map_data_pre_int)\n return map_data_as_int", "def convert_strings_to_integers(data):\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Initialize the thread pool manager
def __init__(self): self.thread_pool = ThreadPool()
[ "def _initMultiprocessingPool():\n if _pool[0] is None:\n with _poolLock:\n if _pool[0] is None:\n def initProcess():\n if 'numpy.random' in sys.modules:\n sys.modules['numpy.random'].seed()\n _pool[0] = multiprocessing.Poo...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Make a QuoteModel out of a line in an external file.
def ingest_line(line) -> QuoteModel: quote = line.split("-") body = quote[0].strip() author = quote[1].strip() return QuoteModel(body, author)
[ "def from_line(cls, line):\n return cls(line)", "def from_line(cls, line, user=None, cron=None):\n obj = cls(user=user, cron=cron)\n obj.parse(line.strip())\n return obj", "def get_quote_from_file(self):\n with open('data/mot_quotes.txt', 'r') as read_file:\n data =...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get if a given file can be ingested by current instance.
def can_ingest(cls, path: str) -> bool: file_type = path.split(".")[-1] return os.path.isfile(path) and cls.ingestor_type == file_type
[ "def can_read(cls, file_ref: FileRef) -> bool:", "def isValidFile(self) -> \"SbBool\":\n return _coin.SoInput_isValidFile(self)", "def should_check_file(self, filename):\n raise NotImplementedError", "def is_file_suitable(file_path: str) -> bool:\n pass", "def isavailable(file): # prag...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Parse a pdf file to get a list of quote models.
def parse(self, path: str) -> List[QuoteModel]: quote_models = [] if self.can_ingest(path): try: # the '-' flag outputs the text of a pdf file # to stdout, this way I don't have to handle removing # temp files since none were created ...
[ "def parse(cls, path: str) -> List[QuoteModel]:\n if not cls.can_ingest(path):\n file_type = path.split(\".\")[-1]\n raise Exception(f\"Documents of file type {file_type} cannot be ingested\")\n\n with open(path, \"rb\") as f:\n pdf = pdftotext.PDF(f)\n\n quotes...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
This function sets the ssl context by accepting the passphrase and validating it for user private key and his certificate INPUT
def get_ssl_context(usercert, userkey): pass_str = 'Please enter the pass phrase for' for _ in range(0, 2): # allow two password attempts def prompt_for_password(verify): return getpass.getpass(pass_str + f" '{userkey}':").encode('utf-8') ssl_context = SSL.Context() ssl_cont...
[ "def configure_ssl_context(credentials):\n\n ssl_ctx = ssl.SSLContext(credentials.ssl_version)\n ssl_ctx.verify_mode = ssl.CERT_REQUIRED\n if hasattr(ssl_ctx, 'check_hostname'):\n ssl_ctx.check_hostname = True\n if credentials.cacert_file is None:\n raise SecurityEr...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Write the instance's private key to keypath, backing up keypath to keypath.old if necessary
def write_pkey(self, keypath=None): if not keypath: keypath = self.keypath # Handle already existing key file... utils.safe_rename(keypath) # this is like atomic_write except writing with save_key temp_fd, temp_name = tempfile.mkstemp(dir=self.output_dir) os...
[ "def save_private_key(self, path):\n\n private_bytes = self.key.private_bytes(\n serialization.Encoding.PEM,\n serialization.PrivateFormat.PKCS8,\n serialization.NoEncryption()\n )\n\n with open(path, 'w') as fo:\n fo.write(private_bytes.decode())", ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Extract the base64 encoded string from the contents of a certificate signing request
def base64_csr(self): return self.format_csr(self.x509request.as_pem().decode('utf-8'))
[ "def _x509_certificate_bare_base64(certificate):\n return base64.b64encode(certificate.public_bytes(\n serialization.Encoding.DER)).decode()", "def base64(self) -> str:\n return base64.b64encode(self.pem).decode(\"utf-8\")", "def tbs_certrequest_bytes(self):", "def _sign_string(self, string_t...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
This Method just return the imaginary part of the Complex Number.
def get_imaginary(self): return self.imaginary
[ "def imag(self, *args) -> \"float const\":\n return _stdcomplexPython.stdcomplexF_imag(self, *args)", "def imag(self, *args) -> \"double const\":\n return _stdcomplexPython.stdcomplexD_imag(self, *args)", "def get_imag(self) -> float:\n if self.is_complex():\n for component in (s...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
This the setter for the imaginary part
def set_maginary(self, imaginary): self.imaginary = imaginary
[ "def get_imaginary(self):\n return self.imaginary", "def get_imag(self) -> float:\n if self.is_complex():\n for component in (self.i, self.j, self.k):\n if component != 0.0:\n return component\n elif self.is_scalar():\n return 0.0\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
This Method conjugate our Complex Number, that mean it change the imaginary part whith his opposite.
def conjugate(self): self.imaginary *= -1
[ "def conjugate(x):\n\n pass", "def conjugate(cls, G: SO3element, H: SO3element) -> SO3element:\n return H*G*H.inv()", "def conjugate(self):\n return Eisen(self._reCoord + self._z6Coord, -self._z6Coord)", "def conjugate(self):\n return Quaternion(self.real, -self.i, -self.j, -self.k)", ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
This Method shoul add a liste of Complex Numbers. Returns
def adds(self, c_list): first = c_list[0] for i in range(1, len(c_list)): first.add(c_list[i]) return first
[ "def complexSum(a, b):\n # Unpack the complex numbers\n ax, ay = a\n bx, by = b\n return [ax+bx, ay+by]", "def __iadd__(self, *args):\n return _vnl_vectorPython.vnl_vector_vcl_complexD___iadd__(self, *args)", "def __iadd__(self, *args):\n return _vnl_vectorPython.vnl_vector_vcl_complex...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
This Medthod substract to our Complex Number another Complex Number to our Complex Number by subtracting their real parts together and their imaginary parts together
def sub(self, other): new = ComplexNumber(self.get_real(), self.get_imaginary()) new.real -= other.get_real() new.imaginary -= other.get_imaginary() return new
[ "def __sub__(self, other):\n obj = self._to_complex(other)\n return self.__add__(-obj)", "def test_rsub():\n\tcomplexnr = 2 - Complex(4, 5) - (9 + 2j)\n\tassert complexnr == Complex(-11, -7)", "def __sub__(self, *args):\n return _vnl_vectorPython.vnl_vector_vcl_complexD___sub__(self, *args)...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
This Method multiply our Complex Number with another Complex Number
def multiply(self, other): new = ComplexNumber(self.get_real(), self.get_imaginary()) real = (new.real * other.get_real()) - (new.imaginary * other.get_imaginary()) imaginary = (new.imaginary * other.get_real()) + (new.real * other.get_imaginary()) new.set_real(real) new.set_magi...
[ "def test_mul():\n\tcomplexnr1 = Complex(1, 1)\n\tcomplexnr2 = Complex(2, 2)\n\tassert complexnr1*complexnr2 == Complex(0, 4)", "def __mul__ (self,other):\r\n valor=other.show_number()\r\n def mult():\r\n self.Verificaciones(other)\r\n resultado = NumC(0,self.base)#Este Num gua...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
This Method shoul multiply a list of Complex Numbers
def multiplys(self, c_list): first = c_list[0] for i in range(1, len(c_list)): first.multiply(c_list[i]) return first
[ "def test_mul():\n\tcomplexnr1 = Complex(1, 1)\n\tcomplexnr2 = Complex(2, 2)\n\tassert complexnr1*complexnr2 == Complex(0, 4)", "def mult_numbers(number_list):\n # Failed this one for the same reason, same line of thinking. Commenting out my \n # attempt so you can see it without breaking terminal.\n # p...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
This Method divide our Complex Number with another Complex Number.
def divide(self, other): new = ComplexNumber(self.get_real(), self.get_imaginary()) if other.get_real() == 0.0 and other.get_imaginary() == 0.0: raise DisisionByNullException("Devision by null Error!") other_bar_re = other.get_real() / ((other.get_real() **2) + ...
[ "def __div__(self, other):\n tccd = []\n if isinstance(other, MCCD):\n for ccd,occd in zip(self._data,other._data):\n tccd.append(ccd / occd)\n else:\n for ccd in self._data:\n tccd.append(ccd / other)\n return MCCD(tccd, self.head)", ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
This Method return a float Number which is the absulute Value of our Complex Number
def abs(self): return math.sqrt(self.get_real() **2 + self.get_imaginary() **2)
[ "def real(self, *args) -> \"float const\":\n return _stdcomplexPython.stdcomplexF_real(self, *args)", "def __abs__(self) -> \"RealMultivarTaylor\":\n\n const = self.const\n if const == 0.:\n raise ValueError(\"The absolute value is not differentiable at zero.\")\n return sel...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Differencial rotation matrix from quaternion. dR(q)/dq = [dR(q)/dq0, dR(q)/dq1, dR(q)/dq2, dR(q)/dq3]
def diff_rot_from_quaternion(q: np.ndarray) -> np.ndarray: rot = t3d.quaternions.quat2mat(q) q2 = np.square(q) z = np.sum(q2) z2 = z * z d_rot = np.zeros((4, 3, 3)) d_rot[0, 0, 0] = 4 * q[0] * (q2[2] + q2[3]) / z2 d_rot[1, 0, 0] = 4 * q[1] * (q2[2] + q2[3]) / z2 d_rot[2, 0, 0] = -4 * q[2...
[ "def quaternionToRotationMatrix(q):\n xx = q[0] * q[0]\n xy = q[0] * q[1]\n xz = q[0] * q[2]\n xw = q[0] * q[3]\n yy = q[1] * q[1]\n yz = q[1] * q[2]\n yw = q[1] * q[3]\n zz = q[2] * q[2]\n zw = q[2] * q[3]\n mat = np.zeros((4,4))\n mat[0, 0] = 1 - 2 * ( yy + zz )\n mat[1, 0] =...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Send a request to install Agent.
def install_agent(self, user_name, password, agent_install_arg): # TODO: implement me pass
[ "def install_component(\n self, *, agent_name: str, package_name: str, package_version: str, params: Dict[str, str]\n ) -> None:", "def install_req(self):\n if '(' in self.name:\n return\n print(\"Installing %s(v%s)\" % (self.name, self.version))\n name = self.pkg_locatio...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Send to request to uninstall Agent.
def uninstall_agent(self, user_name, password, parent_agent_id, agent_ids): # TODO: implement me pass
[ "def uninstall_component(self, *, agent_name: str, component_name: str, component_version: str) -> None:", "def UninstallHost(self, bin_dir):\n host_msi = os.path.join(bin_dir, 'chromoting.msi')\n subprocess.Popen(['msiexec', '/x', host_msi, '/passive']).wait()", "def UninstallHost(self, bin_dir):\n re...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Send a request to uninstall modules.
def uninstall_modules(self, user_name, password, parent_agent_id, agent_ids, module_codes): # TODO: implement me pass
[ "def uninstall(cls, *module_names):\n cls.install(*module_names, uninstall=True)", "def uninstall_task(ctx, database, modules,\n config_file=os.environ.get('TRYTOND_CONFIG')):\n if not database or not modules:\n return\n\n if isinstance(modules, str):\n modules = modules.replace(...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Send a request to upgrade modules.
def upgrade_modules(self, user_name, password, parent_agent_id, agent_ids, module_codes): # TODO: implement me pass
[ "def upgrade(self):\n\n while self.toInstall:\n self.upgradeModule(self.toInstall[0])", "def upgrade(config, module, version, module_args, bdb, file):\n if module_args is None:\n module_args = click.prompt('New Custom Module Args', default='' )\n\n upgraded_module = deploy_module(co...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Send a request to start modules.
def start_modules(self, user_name, password, parent_agent_id, agent_ids, module_codes): # TODO: implement me pass
[ "def start(self):\n self.action_server.start()", "def minecraft_start(self):\n return self._post(Endpoint.START_MINECRAFT)", "def handle(self, request, module_name=None):\n\t\tprint self.modules\n\t\tself.stats[\"total\"] += 1\n\t\t# Create the base response\n\t\tresponse = {\n\t\t\t\"_metadata\" ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Send a request to stop modules.
def stop_modules(self, user_name, password, parent_agent_id, agent_ids, module_codes): # TODO: implement me pass
[ "def stop_server(self):\n response = requests.post(self._build_url(\"stop\"))\n return response", "def request_stop(self):\n self._stop_requested = True", "def stop(self):\n\n self.button_text.set(\"Start Modis\")\n self.state = \"off\"\n\n logger.warning(\"Stop...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
create a tensor field on a grid from given expressions
def from_expression( cls, grid: GridBase, expressions: Sequence[Sequence[str]], *, user_funcs: Optional[Dict[str, Callable]] = None, consts: Optional[Dict[str, NumberOrArray]] = None, label: Optional[str] = None, dtype: Optional[DTypeLike] = None, ) ->...
[ "def test_from_expression():\n grid = UnitGrid([1, 2])\n sf = ScalarField.from_expression(grid, \"x * y\", label=\"abc\")\n assert sf.label == \"abc\"\n np.testing.assert_allclose(sf.data, [[0.25, 0.75]])\n\n def f(x, y):\n return x * y\n\n sf = ScalarField.from_expression(grid, \"f(x, y)\"...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
calculate the dot product involving a tensor field This supports the dot product between two tensor fields as well as the product between a tensor and a vector. The resulting fields will be a tensor or vector, respectively.
def dot( self, other: Union[VectorField, Tensor2Field], out: Union[VectorField, Tensor2Field, None] = None, *, conjugate: bool = True, label: str = "dot product", ) -> Union[VectorField, Tensor2Field]: # check input self.grid.assert_grid_compatible(oth...
[ "def dot(self, other):\n if not isinstance(other, Field):\n raise TypeError(\"Argument must be a Field. Got: {} ({})\".format(other, type(other)))\n if not (self.value_dim() == other.value_dim()):\n raise ValueError(\"The cross product is only defined for vector fields of the sam...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }