query
stringlengths
9
9.05k
document
stringlengths
10
222k
negatives
listlengths
19
20
metadata
dict
Check if given variables (params) change or not during training If parameters (params) aren't provided, check all parameters.
def _var_change_helper(self, vars_change: bool, inputs: tuple, params: list = None): if params is None: # get a list of params that are allowed to change params = [np for np in self._model.named_parameters() if np[1].requires_grad] # take a copy initial_params = [(name, ...
[ "def checkUpdate(self, *args):\n g = get_root(self).globals\n if not self.check():\n g.clog.warn(\"Current observing parameters are not valid.\")\n return False\n\n if not g.ipars.check():\n g.clog.warn(\"Current instrument parameters are not valid.\")\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Make sure there are no NaN values in the given tensor.
def assert_not_nan(tensor): try: assert not torch.isnan(tensor).byte().any() except AssertionError: raise ValueError("There was a NaN value in tensor.")
[ "def check_nan(tensor):\n\tassert(not(torch.isnan(tensor).any()))", "def assert_never_inf(tensor):\n try:\n assert torch.isfinite(tensor).byte().any()\n except AssertionError:\n raise ValueError(\"There was an Inf value in tensor\")", "def tensor_missing():\n tensor = torc...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Make sure there are no Inf values in the given tensor.
def assert_never_inf(tensor): try: assert torch.isfinite(tensor).byte().any() except AssertionError: raise ValueError("There was an Inf value in tensor")
[ "def check_nan(tensor):\n\tassert(not(torch.isnan(tensor).any()))", "def assert_not_nan(tensor):\n try:\n assert not torch.isnan(tensor).byte().any()\n except AssertionError:\n raise ValueError(\"There was a NaN value in tensor.\")", "def no_inf_mean(x:torch.Tensor):\n no_...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
get the PDB file name for the structure to work on if the structure is specified by a pymol selection, save it in the temp dir; if the structure is specified by a separate PDB file, use it.
def getStrucPDBFname(self): pdb_fn = None sel = self.pymol_sel.get() if len(sel) > 0: # if any pymol selection is specified # save the pymol selection in the tmp dir all_sel_names = cmd.get_names('selections') # get names of all selections tmp_dir = self.t...
[ "def getSolutePDB(self):\n print(\"Converting xyz to pdb\")\n# cmd = \"babel \" + self.xyz + \" solute.pdb\"\n# subprocess.call(cmd, shell=True)\n obConversion = ob.OBConversion()\n obConversion.SetInAndOutFormats(\"xyz\", \"pdb\")\n obmol = self.solute.OBMol\n obCon...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
set the executable of MSMS program.
def setMsmsBin(self, msms_bin): self.msms_bin = os.path.abspath(os.path.expanduser(msms_bin)) self.msms_wd = os.path.dirname(self.msms_bin) return
[ "def _set_sys_executable(self) -> None:\n python_name: str = os.path.basename(sys.executable)\n if sys.platform == \"win32\":\n compiler_executable = os.path.join(self._env_path, \"Scripts\", python_name)\n else:\n compiler_executable = os.path.join(self._env_path, \"bin\"...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Run MSMS on given pdb file. Output file names are stored.
def run(self, pdb_fn, ofn_root=None): # convert .pdb file to .xyzr and .xyzrn files #fname_root = '.'.join(self.pdb_fn.split('/')[-1].split('.')[:-1]) fname_root = os.path.splitext(os.path.split(pdb_fn)[-1])[0] ## xyzr_fname = '%s/%s.xyzr' % (self.output_dir, fname_root) #xyzrn_...
[ "def savePDB(pdb, filename):\n prody.writePDB(filename, pdb)", "def getSolutePDB(self):\n print(\"Converting xyz to pdb\")\n# cmd = \"babel \" + self.xyz + \" solute.pdb\"\n# subprocess.call(cmd, shell=True)\n obConversion = ob.OBConversion()\n obConversion.SetInAndOutFormats...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Parse the main component, i.e., the external surface. the original pdb file is required for mapping the surface vertices to atoms in the PDB.
def parseMsmsSurface(self, pdb_fn, vert_fn, face_fn, rm_dup=False): surf = MsmsSurface() ############## # read ATOM and HETATM entries in the PDB file ############## pdb_fh = open(pdb_fn) # let the exception raise buf = pdb_fh.readlines() pdb_fh.close() ...
[ "def parseMsmsSurfaceAllComponents(self, pdb_fn,\n vert_fn, face_fn,\n cpn_vert_fns, cpn_face_fns):\n # parse the external surface\n extl_surf = self.parseMsmsSurface(pdb_fn, vert_fn, face_fn)\n # parse internal surfaces\...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Parse all components, including both external and internal surfaces.
def parseMsmsSurfaceAllComponents(self, pdb_fn, vert_fn, face_fn, cpn_vert_fns, cpn_face_fns): # parse the external surface extl_surf = self.parseMsmsSurface(pdb_fn, vert_fn, face_fn) # parse internal surfaces it...
[ "def _parse_submeshes(self) -> None:\n\n # element to boundary element type mapping\n bnd_type = {\n 'triangle' : 'line',\n 'quad' : 'line',\n 'tetra' : 'triangle',\n 'hexahedron' : 'quad',\n }[self.meshio_type]\n\n def find_tagname(tag):\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Parse MSMS .vert file. Read
def parseVertFile(self, vert_fn=None): if vert_fn is None: vert_fn = self.vert_fn try: fh = open(vert_fn, 'r') fd = fh.readlines() fh.close() except IOError: print('Error: MSMS .vert file not found:', self.vert_fn) return ...
[ "def load_nvtFile(fileName, mazeType='', showHeader=False):\n timeStamps = []\n posSamples = []\n eulSamples = []\n\n with open(fileName, 'rb') as f:\n\n header = f.read(16 * 2 ** 10) # the first 16 kB are an ASCII text header\n\n if showHeader:\n print header\n\n count ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Render MSMS surface as mesh (triangles).
def displayMsmsSurfMesh(self, mesh_cgo_name='msms_surf_mesh', mesh_cgo_color=(1.0, 1.0, 1.0)): line_dict = {} mesh_cgo = [BEGIN, LINES, COLOR, mesh_cgo_color[0], mesh_cgo_color[1], mesh_cgo_color[2]] ...
[ "def render3D(self):\n mesh = trimesh.Trimesh(vertices=self.verts, faces=self.faces)\n mesh.show(resolution=(512, 512))", "def extract_triangle_mesh(self):\n tsdf_vol = self._tsdf_vol.cpu().numpy()\n color_vol = self._color_vol.cpu().numpy()\n vol_origin = self._vol_origin.cpu().numpy()\n\n...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Iterates over each row in the given CSV file. It skips the first row if specified and removes leading and trailing whitespaces.
def each_csv_row(csv_file: str, func, skip_header=False, encoding='utf-8'): log.info('parse CSV file %s', csv_file) with open(csv_file, 'r', encoding=encoding, newline='\n') as f: reader = csv.reader(f) i = 0 if skip_header: next(reader) i += 1 for row in ...
[ "def process_csv(self, file_name: str):", "def yield_csv_rows(csv_filename, csv_flavor = COMMA_DELIM):\n with open(csv_filename, 'r') as csvfile:\n spamreader = csv.reader(csvfile, **csv_flavor)\n for row in spamreader:\n yield row", "def csv_iter(filename):\n with open(filename, ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Defines the config attribute.
def define_config(self, config: str) -> None: self.config = config
[ "def config_mode(self):\n\n pass", "def set_config(cfg, name, value):\n\n name = name.split('.')\n suffix = ''\n for item in name[:-1]:\n assert item in cfg, f'attribute {item} not cfg{suffix}'\n cfg = cfg[item]\n suffix += f'.{item}'\n\n assert name[-1] in cfg, f'attribute...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Placeholder for reading raw data from file
def read_raw_file(self) -> bytes: pass
[ "def __readfile(self):\n raise NotImplementedError", "def read_all_raw_files():\n pass", "def read(self, file_, data):\n try:\n return self._read(file_, data)\n except Exception as E:\n # pass through io exceptions, but log the corresponding DataSpec instance\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Placeholder for writing data to a file.
def write_file(self, data) -> None: pass
[ "def write_to_file(self, data):", "def WriteFile(filename, data):\n with open(filename, 'w') as f:\n f.write(data)", "def write_raw_file(self, data: bytes) -> None:\n pass", "def write_to_file(self, data):\n file = open(self.name, \"a\", encoding='utf-8')\n file.write(\"\\n\" + data)"...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Placeholder for writing raw data to a file.
def write_raw_file(self, data: bytes) -> None: pass
[ "def write_file(self, data) -> None:\n pass", "def write_to_file(self, data):", "def file_filler(file, data):\n\n file.write(data)\n return 0", "def test_write():\n\n with open(FILE_DIR+FILE_NAME, mode='w', encoding='utf8')as f:\n f.write(DATA)", "def WriteFile(filename, data):\n wit...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Shift sequence of cities so that the home city is first. This makes it easier to compare solutions with other algorithms. (i.e. if the home city ends up in 5th place, move the first 4 cities to the end of the sequence to create a "shifted" route.
def shift_route(route, home): # if route is already a full cycle, remove last city if route[-1] == route[0]: route = route[:-1] origin = 0 for pos, val in enumerate(route): if val == home: origin = pos break shifted = route[origin:] + route[:origin] retu...
[ "def shift_cities(road_map):\n new_road_map = [road_map[len(road_map) - 1]]\n for i in range(0, len(road_map)-1):\n new_road_map.append(road_map[i])\n return new_road_map", "def rearrange_cars(initial_car_park, final_car_park):\n if len(initial_car_park) != len(final_car_park):\n raise V...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Find longest leg in a cycle.
def longest_leg(A, cycle): if cycle[-1] != cycle[0]: # not a completed cycle cycle.append(cycle[0]) n = len(cycle) longest = 0 for j in range(n-1): city1 = cycle[j] city2 = cycle[j + 1] if A[city1, city2] > longest: longest = A[city1, city2] return long...
[ "def longest_run(strand: List[int]) -> int:\n max_run = 0\n symbol = strand[0]\n curr_run = 0\n for s in strand:\n if s == symbol:\n curr_run += 1\n if curr_run > max_run:\n max_run = curr_run\n else:\n symbol = s\n curr_run = 1\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Plot a route (Hamiltonian cycle).
def route_plot(route, title, distance, cycles): plt.figure() waypoints = np.empty((0, 2)) for vertex in route: waypoints = np.append(waypoints, np.array([[vertex.x, vertex.y]]), axis=0) # add starting vertex to end of route to close the loop waypoints = np.appe...
[ "def visualize_routes(self):\n self.plotTSP([self.states[0]], self.coords, title='Initial starting arrangement (connecting nearest nodes)')\n self.plotTSP([self.states[-1]], self.coords, title='Final arrangement (after simulated annealing optimization)')", "def visualize_vehicle_route(state, vehicle...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Closest Insertion Algorithm from "THE TRAVELING SALESMAN PROBLEM", Thesis by Corinne Brucato, University of Pittsburgh, 2013 Similar to the nearest neighbor algorithm except it searches for a nearest neighbor from both ends of the current path. Results are dependent on starting vertex.
def closest_insertion_tsp(V, start, dists): # intialize ui = V[start] # first vertex u_i from V # Form a path P = {u_i} and set T = {edges connected to u_i} P = [ui] # P must be a list or array to maintain order T = np.empty((0, 3)) for index, dist in enumerate(dists[ui]): if ui != i...
[ "def closest_dirt(self):\r\n position = self.bot_pos\r\n dirts = self.get_dirts(position[0],position[1])\r\n if dirts:\r\n i, j = min(dirts,\r\n key=lambda dirt_pos:((position[0]-dirt_pos[0])**2+(position[1]-dirt_pos[1])**2)**0.5\r\n )\r\n ret...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Constructs the dictionary of words in captions based on frequency of each word.
def dictionary(raw_captions,threshold): caps = [] for im in raw_captions: for s in raw_captions[im]: caps.append(s.split()) word_freq = nltk.FreqDist(itertools.chain(*caps)) id_to_word = ['<pad>'] + [word for word, cnt in word_freq.items() if cnt >= threshold] + ['<unk>'] word_t...
[ "def init_dic(self):\n self.word_dic = {}\n self.bigram = 0 # count counts the number of bigrams for Laplace smoothing\n for i in range(len(self.corpus)):\n ch = self.corpus[i]\n if ch not in self.word_dic:\n self.word_dic[ch] = {}\n # The number...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Sorts in 'lengths' of captions in descending order. Then sorts the images and captions according to the sorted 'lengths'.
def sort(self, images, padded_captions, lengths): lengths = torch.tensor(lengths) lengths, indices = torch.sort(lengths, descending=True) sorted_captions = [] sorted_images = [] for index in indices: sorted_captions.append(padded_captions[index]) sort...
[ "def _sort_by_length(self, reverse_sort):\n sorted_len_indices = [\n a[0] for a in sorted(\n enumerate(self.angs), key=lambda x: x[1].shape[0], reverse=reverse_sort)\n ]\n self.seqs = [self.seqs[i] for i in sorted_len_indices]\n self.str_seqs = [self.str_seqs[i]...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
This function will take two strings and returns a set of words that are common in both
def words_in_both(s1,s2): low_s1 = s1.lower() list_s1 = low_s1.split() low_s2 = s2.lower() list_s2 = low_s2.split() set_1 = set(list_s1) set_2 = set(list_s2) set_3 = set_1 & set_2 return set_3
[ "def common_charecters(string1, string2):\n\n first_String= string1.lower()\n second_String= string2.lower()\n\n common = []\n\n for charecter in first_String:\n if charecter in second_String:\n common.append(charecter)\n else:\n None\n\n print(\"Common letters: {}...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Starts a game of Russian Roulette
async def russian_roulette(self, ctx): session = self.manager.get_session(ctx.channel) if session is None: with self.manager.temp_session(ctx.channel, RussianRouletteSession(ctx)) as inst: inst.add_member(ctx.author) await ctx.send( 'Russian Roulette game is s...
[ "def launch():\n\n Game.separator(1)\n print 'Welcome to Rock-Paper-Scissor'\n Game.separator(1)\n print 'Please type s to start'\n\n player, srv_addr = Game.init()\n\n Game.connect(player, srv_addr)\n\n Game.share_graphs(player)\n\n playing(player)\n\n while Game.play_again(player):\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Convert spectral type codes to string values
def convert_spt_code_to_string_to_code(spectral_codes, decimals=1): if isinstance(spectral_codes, float): spectral_codes = [spectral_codes] spectral_types = [] for spt in spectral_codes: spt_type = '' # Identify major type if 60 <= spt < 70: spt_type = 'M' ...
[ "def spectrum_type(self):\n if self._spectrum_type:\n return 'Power Spectrum'\n else:\n return 'Power Spectral Density'", "def specType(SpT):\n if isinstance(SpT,str) and SpT[0] in ['M','L','T','Y']:\n try: return [l+float(SpT[1:]) for m,l in zip(['M','L','T','Y'],[0,10,20,...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Ingests spectrum using data found in the header
def ingest_spectrum_from_fits(db, source, spectrum_fits_file): header = fits.getheader(spectrum_fits_file) regime = header['SPECBAND'] if regime == 'opt': regime = 'optical' telescope = header['TELESCOP'] instrument = header['INSTRUME'] try: mode = header['MODE'] except KeyEr...
[ "def spec_to_hdf(wd, spec_file, spectrum_file, output_file, img_shape, ic_name=None, x_name=None, y_name=None):\n # read scaler data from spec file\n spec_path = os.path.join(wd, spec_file)\n h = get_header(spec_path)\n spec_data = pd.read_csv(spec_path, names=h, sep=\"\\t\", comment=\"#\", index_col=Fa...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
This function ingests a single row in to the CompanionRelationship table
def ingest_companion_relationships(db, source, companion_name, relationship, projected_separation_arcsec = None, projected_separation_error = None, comment = None, ref = None, other_companion_names = None): # checking relationship entered pos...
[ "def run(cls, row, reader):\n\n cls._parse_keys(row, reader.line_num)\n cls._parse_relationships(row, reader.line_num)", "def process_row(self, row):\n\n return row", "def _process_row(self, row):\n # Must be overridden.", "def _import_horario(self, row):\n profesor = self.g...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Formats channel attributes for use in data transforms. Stored in self._config dict mapping id to namedtuple. Initializes par_ids, names, id_maps.
def _load_config(self): channel_spec = self.spec.channels _spec_fields = ( 'name', 'long', 'word_len', 'bit_mask', 'max_range', 'log_max', 'log_min', 'gain') ParamSpec = namedtuple('SPn', _spec_fields) self._config = { num: ParamSpec(*format_attr(s...
[ "def reformat_meta(self):\n meta = self.annotation # For shorthand (passed by reference)\n channel_properties = []\n\n for key, value in meta.items():\n if key[:3] == \"$P1\":\n if key[3] not in string.digits:\n channel_properties.append(key[3:])\n\...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Applies bit mask and/or normalization to event count parameter.
def __scale_count(self, count_id, norm): event_count = self.raw[count_id] event_spec = self._config.get(count_id) if event_spec.bit_mask: event_count = self.__bit_mask_data(count_id) if norm and event_count.item(0) != 1: event_count = self.__normalize_count(even...
[ "def score_by_event_count(event, attributes):\n return 1", "def update_emit_count(self, stream_id):\n self.update_count(self.EMIT_COUNT, key=stream_id)", "def preprocess_event(self, event):\n \n self.apply_hard_cuts(event)\n self.construct_weighting(event)\n self.handle_edge_li...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Locates or creates event count parameter. Checks for values exceeding maximum possible based on word length. Count is assigned to id 1 and stored in _reference_channels.
def __load_ref_count(self, norm): count_id = self.__locate_count_param() if count_id: event_count = self.__scale_count(count_id, norm) else: event_count = np.arange(1, len(self.raw[1]) + 1) if np.any(event_count[:-1] > event_count[1:]): event_count =...
[ "def _setMaxCount(self, value):\n self.__maxcount = value", "def set_trigger_count(self, count):\n self.count = count", "def _get_max_item_count(self):\n return 7 - len(self.constants)", "def maximum_channel_length(self) -> int:\n return self['channellen']", "def on_change_count(self...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Locates any parameter name ($PnN or $PnS) containing time, msw, lsw.
def __locate_time_params(self): time_id, time_lsw, time_msw = 0, 0, 0 long_names = self.__get_ch_attr('long') for name, long_name in zip(self.names, long_names): name_id = self.id_map.get(name) if long_name: keywords = (name.casefold(), long_name.casefold...
[ "def is_parameter(name):\n return name.startswith('par-')", "def input_parameter_name(name, var_pos):\n return \"para-%s-%s\" % (name, var_pos)", "def find_pipeline_params(p, pattern, verbose=False):\n match_params = list()\n for k in p.get_params().keys():\n if re.search(pattern, string=k):\...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Converts 2 single word length time parameters into actual, double word length time measurement.
def __encode_time(self, time_lsw, time_msw): msw_word_len = self._config.get(time_msw).word_len msw_data = self.raw.get(time_msw) lsw_data = self.raw.get(time_lsw) double_word = ((msw_data << msw_word_len) | lsw_data) return double_word
[ "def convert_to_percents_type_1(start_time, end_time, movie_length):\r\n time1_lst = start_time.split(TIME_SPLITTER)\r\n time1_seconds = int(time1_lst[0]) * HOUR_MULTIPLIER + int(time1_lst[1]) \\\r\n * MINUTE_MULTIPLIER + int(time1_lst[2])\r\n time2_lst = end_time.split(TIME_SPLITTER)\r\...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Loads time parameter and determines if it exists, or it is split between lsw and msw. Applies $TIMESTEP (and gain) factor. Stored in _reference_channels as id 0.
def __load_ref_time(self, norm): time_lsw, time_msw, time_id = self.__locate_time_params() if time_id and (time_lsw or time_msw) and not(time_lsw and time_msw): if time_lsw: time_msw = time_id else: time_lsw = time_id time_id = 0 ...
[ "def initialize_time(self):\n self._cur_time = 0\n self._model_timestep = self.sim.model.opt.timestep\n self._control_timestep = 1.0 / self._control_freq", "def _get_time_step(self):\n if self.data_level == 1:\n self.dt = self._dt_min\n elif self.data_level == 2:\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Initializes time and event count parameters to be stored in _reference_channels under ids 0, 1. Filters any time, event count ids from par_ids.
def load_reference_channels(self, norm_count, norm_time): time_ids = self.__load_ref_time(norm_time) if time_ids: self.ref_ids.extend(time_ids) count_id = self.__load_ref_count(norm_count) self.ref_ids.append(count_id) self.par_ids = tuple(id_ for id_ in self.par_id...
[ "def _initialize(self):\n n_events = self.n_events\n delta_t = int(self.delta_t)\n if self.mode == \"delta_t\":\n n_events = 0\n elif self.mode == 'n_events':\n n_events = self.n_events\n delta_t = 0\n self.buffer_producer = EventsBufferProducer(\n...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Configs group ids for channel, log scale, gain scale, xcxs data sets. Compensation id groups are configured separately alongside comp matrix.
def _set_group_ids(self): self.channel_ids = self.par_ids[:] self.bit_mask_ids = self.__get_ch_attr('bit_mask', dropzero=True) # set scale ids for log and gain self.log_ids = self.__get_ch_attr('log_max', dropzero=True) gain_mask = [(n != 0 and n != 1) for n in self.__get_ch_at...
[ "def test_team_builder_config_product_groups_id_patch(self):\n pass", "def configure_groups():\n from collections import OrderedDict \n\n order = 0.0\n group_config = OrderedDict()\n\n group_config['H5F'] = {}\n group_config['H5D'] = {}\n group_config['MPIIO'] = {}\n group_config['DXT_...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Applies gain scaling based on $PnG value.
def __gain_scale(self, param_n, src_group): spec_ = self._config.get(param_n) param_data = src_group.get(param_n) return param_data / spec_.gain
[ "def gain(self, g):\n return self.normalize(0, 1, scale=g)", "def set_pga_gain(self, pga_num, gain):\n\t\treturn self.config_ads(pga_num, 2, gain)", "def test_16_bandpass_calibrator_gain_amplitudes_scaling():\n\tcasalog.origin(\"test_16_bandpass_calibrator_gain_amplitudes_scaling\")\n\tcasalog.post(\"sta...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
All parameters that have log10 or gain scaling applied. Parameters cannot have both scaling methods.
def set_scale_values(self): for param_n in self.log_ids: log_data = self.__log_scale(param_n, self.channel) self.scale[param_n] = log_data if self.gain_ids: for param_n in self.gain_ids: gain_data = self.__gain_scale(param_n, self.channel) ...
[ "def scale_log(self) -> None:\n # Problem are probabilities below 1\n self.values = [log(1.01 + x, 2) for x in self.values]", "def set_log_scale(self,val): # tested and documented\n if self.__is_int_or_float(val) and self.__is_between(val, 0.1, 10.0):\n self.send_message(\"LOG %.1f...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Sets values for compensation matrix, id groups. _comp_matrix is a dict mapping param id to compensation factor.
def set_compensation_matrix(self, comp_matrix_map, fl_comp_ids): self._comp_matrix = comp_matrix_map self.flcomp_ids = fl_comp_ids self.log_flcomp_ids = tuple(set(self.log_ids) & set(self.flcomp_ids))
[ "def setMatrix(self, mat: 'SbMatrix') -> \"void\":\n return _coin.SoTransform_setMatrix(self, mat)", "def apply_compensation(self, compensation, comp_id='custom_spill'):\n if isinstance(compensation, Matrix):\n self.compensation = compensation\n self._comp_events = self.compens...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Applies log10 scaling for parameters located in compensation matrix that have a log10 scaling value.
def set_logscale_compensated(self): if not self.log_flcomp_ids: print('>>> No compensated parameters have log scaling.') return if not self.compensated: self.set_compensated_values() for param_n in self.log_flcomp_ids: log_ = self.__log_scale(pa...
[ "def log_scale(df):\n print('Log scaling numeric data ....')\n\n df_copy = df.copy()\n for key in tqdm(get_numeric_keys(False)):\n s = np.sign(df[key])\n df_copy[key] = s * np.log(df[key] * s + 1)\n return df_copy", "def scale_log(self) -> None:\n # Problem are probabilities below 1\n self...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Access the Archives containing archived Posts
def archives(): posts = Posts.query.filter(Posts.complete == True).order_by(Posts.date_posted.desc()).all() context = {'posts': posts} if not posts: flash('There are no archived Requests!', 'information') return render_template('archives.html', **context)
[ "def published_posts():\n return Post.objects.filter(publish_date__lte=timezone.now()).order_by('publish_date')[::-1]", "def list_published(request, archive=None):\n fa = FindingAid.objects.order_by('eadid').only('document_name', 'eadid', 'last_modified')\n arch = None\n if archive is not None:\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Add a new request to the Posts table
def add_request(): if request.method == 'POST': new_request = request.form.to_dict() new_request['user_id'] = current_user.id product_type = request.form['product_type'] new_request['product_type'] = \ 'media/img/{}.jpg'.format(product_type) # check new product ...
[ "async def add(request: web.Request) -> web.json_response:\n data = dict(request.query)\n cleaned_data = QueryString().load(data)\n data['count_items'], top_5_link = await parse(**cleaned_data)\n id_create = await Views(request).insert_query(**cleaned_data)\n await Views(request)....
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Access to the Output Form page for a chose Post ID
def output_form(post_id): post = Posts.query.get_or_404(post_id) health_and_safety = Actions.query.filter(Actions.stage == 1, Actions.posts_id == post_id).order_by(Actions.created_on.desc()).first() quality = Actions.query.filter(Actions.stage == 2, Actions.posts_id ...
[ "def show_edit_post_form(postid):\n post = Post.query.get(postid)\n return render_template('edit_post.html', post=post)", "def show_edit_post_form(id):\n post = Post.query.get_or_404(id)\n tags = Tag.query.all()\n\n return render_template(\"post_edit.html\" , post=post , tags=tags)", "def post_ed...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Archive a Post to move it into the Archive page of the App
def archive_request(post_id): post = Posts.query.get_or_404(post_id) if post.status == None: flash('Please ensure the Request is complete', 'error') return redirect(url_for('main.index')) post.complete = True db.session.commit() flash('Request {} has been moved to the Archives'.for...
[ "def _create_archive_page(self):\n posts = sorted(self.posts, key=lambda post: post.date, reverse=True)\n content = self.templates['archive'].render(posts=posts, site=self.site)\n path = os.path.join(BASE_DIR, self.paths['output'],\n self.paths['output_archive'], 'ind...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Move an archived Post back into the live tracker (Home page)
def unarchive_request(post_id): post = Posts.query.get_or_404(post_id) post.complete = False db.session.commit() flash('Request {} has been moved back to the Tracker'.format(post.title), 'success') return redirect(url_for('main.archives'))
[ "def archive_request(post_id):\n post = Posts.query.get_or_404(post_id)\n if post.status == None:\n flash('Please ensure the Request is complete', 'error')\n return redirect(url_for('main.index'))\n\n post.complete = True\n db.session.commit()\n\n flash('Request {} has been moved to the...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
From the camera calibration matrix and the estimated homography compute the 3D projection matrix
def projection_matrix(camera_parameters, homography): # Compute rotation along the x and y axis as well as the translation homography = homography * (-1) rot_and_transl = np.dot(np.linalg.inv(camera_parameters), homography) col_1 = rot_and_transl[:, 0] col_2 = rot_and_transl[:, 1] col_3 = rot_an...
[ "def projection_matrix(camera_parameters, homography):\r\n # Compute rotation along the x and y axis as well as the translation\r\n homography = homography * (-1)\r\n rot_and_transl = np.dot(np.linalg.inv(camera_parameters), homography)\r\n col_1 = rot_and_transl[:, 0]\r\n col_2 = rot_and_t...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return valid neighboring 4letter words of given word.
def neighbors(word, words, isWord): neighbors = [] for let in alphabet: for pos in range(0,4): newWord = word[0:pos] + let + word[pos+1:] if isWord(words, newWord) and newWord != word: neighbors.append(newWord) return neighbors
[ "def _get_possible_locations(self, word):\n ret = []\n\n d = 'acrs'\n for y in range(self.size):\n for x in range(self.size - len(word)):\n if self._is_valid_location(x, y, d, word):\n ret.append((x, y, d))\n\n d = 'down'\n for y in ran...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
A generator that yields a simple AND of two posting lists, given iterators over these. The posting lists are assumed sorted in increasing order according to the document identifiers. raise NotImplementedError
def intersection(p1: Iterator[Posting], p2: Iterator[Posting]) -> Iterator[Posting]: a, b = next(p1, None), next(p2, None) while a is not None and b is not None: if a.document_id == b.document_id: yield(a) a, b = next(p1, None), next(p2, None) eli...
[ "def union(p1: Iterator[Posting], p2: Iterator[Posting]) -> Iterator[Posting]:\n\n a, b = next(p1, None), next(p2, None)\n while a is not None and b is not None:\n if a.document_id == b.document_id:\n yield (a)\n a, b = next(p1, None), next(p2, None)\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
A generator that yields a simple OR of two posting lists, given iterators over these. The posting lists are assumed sorted in increasing order according to the document identifiers. raise NotImplementedError
def union(p1: Iterator[Posting], p2: Iterator[Posting]) -> Iterator[Posting]: a, b = next(p1, None), next(p2, None) while a is not None and b is not None: if a.document_id == b.document_id: yield (a) a, b = next(p1, None), next(p2, None) elif a.do...
[ "def intersection(p1: Iterator[Posting], p2: Iterator[Posting]) -> Iterator[Posting]:\n\n a, b = next(p1, None), next(p2, None)\n while a is not None and b is not None:\n if a.document_id == b.document_id:\n yield(a)\n a, b = next(p1, None), next(p2, None)\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test whether empty FITS file really contains no results.
def test_empty(self): # open file f = fits.open("test.fits") # create ResultsFITS object rf = ResultsFITS(f[0], "HIERARCH ANALYSIS TEST") # check keys self.assertEqual(len(rf.keys()), 0) # close f.close()
[ "def test_empty_file(self):\n file = process_file('./test_files/empty.csv', 'test_empty')\n\n self.assertFalse(file)", "def _empty(self) -> bool:\n return len(self.files) + len(self.directories) == 0", "def is_empty(self):\n return len(self.tiles) == 0", "def is_empty(self):\r\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Create a test file with value/error entry
def _create_test_file_err(): # open file f = fits.open("test.fits", mode='update') # create ResultsFITS object rf = ResultsFITS(f[0], "TEST") # set a value rf["vrad"] = [132.12, 1.54] # save f.flush() f.close()
[ "def test_cannot_write_file(self):\n self.api.write_data('/some-fake/path/to-create-file/', 'some-string')", "def create_test_file(base, path, content=None):\n try:\n os.makedirs(os.path.join(base, *path[:-1]))\n except OSError:\n # directory already exists. the exist_ok parameter exist...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Load FITS file as text and cound occurences of 'token' in header.
def _count_in_header(filename, token): # read file with open(filename, 'r') as content_file: content = content_file.read() # extract header content = content[:content.find(' END ')] # count appearances of token return content.count(token)
[ "def read_header(fits_file):\n\n head = {}\n F = pf.open(fits_file)\n H = F[0].header\n head['Ntot'] = H['N_TOT']\n head['Nmu'] = H['N_MU']\n head['Nsig'] = H['N_SIGMA']\n head['Nv'] = H['N_VOIGT']\n head['Ncoef'] = H['N_COEF']\n head['Nspa'] = H['N_SPARSE']\n head['mu'] = [H['MU1'], H...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return the ith basis dimension. In the polynomial case, this is Xindex. You can write your own basis function here, inheriting from this class and the gradients will still check.
def basis(self, X, i): raise NotImplementedError('Implement the basis you want to optimize over.')
[ "def get_num_basis_functions(self):\n if hasattr(self, 'num_functions'):\n return self.num_functions\n\n self.num_functions = (self.n + 1.0) ** self.d\n return self.num_functions", "def get_basis_function(self, n):\n return ElementBasisFunction(self, n)", "def compute_basi...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Select torrent betwen sdate to fdate.
def select_torrent(club, sdate, fdate): conn = pymysql.connect(host=host, port=port, user=user, passwd=passwd, db=db) cur = conn.cursor() cur.execute("SELECT comp,date,time,torrent,status FROM utorrent where club={0} and date >= '{1}' " "and date <= '{2}' order by torrent,comp".format(club, ...
[ "def search_date(self):", "def get_bse_zip_url_for_fdate(fdate):\n return \"http://www.bseindia.com/download/BhavCopy/Equity/EQ\" + fdate + \"_CSV.ZIP\"", "def earliestDateStamp():", "def test_dfs_slates_by_date(self):\n pass", "def get_comp(table, club, sdate, fdate):\n conn = pymysql.connect(...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Select all comp for club in table.
def get_comp(table, club, sdate, fdate): conn = pymysql.connect(host=host, port=port, user=user, passwd=passwd, db=db) cur = conn.cursor() cur.execute("SELECT comp FROM {0} WHERE club='{1}' and date >= '{2}' and date <= '{3}'".format(table, club, sdate, fdate)) d = cur.fetchall() cur.close() con...
[ "def test_CompositionalSelector(self):\n df = self.df\n tmr = CompositionalSelector()\n for input in [df]:\n with self.subTest(input=input):\n out = tmr.transform(input)", "def collect_clubs():\n clubs = {}\n for club_num in range(1, 11):\n club_variable...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Create a frequency sweep.
def create_frequency_sweep(self, setupname, unit="GHz", freqstart=1e-3, freqstop=10, sweepname=None, num_of_freq_points=451, sweeptype="Interpolating", interpolation_tol=0.5, interpolation_max_solutions=250): if sweepname is None: sweepn...
[ "def init_freq_sweep(self, start_freq, stop_freq, num_pts):\n curr_phase = 'Running'\n printMsg(curr_phase,\"Setting frequency range from \" + start_freq + \" to \" + stop_freq + \" with \" + str(num_pts) + \" points\")\n self.instrument.write(\"STAR \" + start_freq)\n self.instrument.wr...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Create a discrete sweep with the specified number of points.
def create_linear_count_sweep(self, setupname, unit, freqstart, freqstop, num_of_freq_points, sweepname=None, save_fields=True, save_rad_fields=False): if sweepname is None: sweepname = generate_unique_name("Sweep") if setupname not in self.setup_names: ...
[ "def create_discrete_sweep(self, setupname, sweepname=\"SinglePoint\", freq=\"1GHz\", save_field=True,\n save_radiating_field=False):\n\n if sweepname is None:\n sweepname = generate_unique_name(\"Sweep\")\n\n if setupname not in self.setup_names:\n r...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Create a linked antenna.
def create_sbr_linked_antenna(self, source_object, target_cs="Global", solution=None, fieldtype="nearfield", use_composite_ports=False, use_global_current=True, current_conformance="Disable", thin_sources=True, power_fraction="0.95"): if self.s...
[ "def make_link(self, node0, node1):\r\n Link(node0, node1)", "def _create_link(as1: UserAS, as2: UserAS, ixp: IXP) -> IXPLink:\n if1 = _create_peering_interface(as1, ixp)\n if2 = _create_peering_interface(as2, ixp)\n return IXPLink.objects.create(Link.PEER, if1, if2, ixp)", "def initializeTello(...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Sets Sbr+ TX RX Antenna Settings
def set_sbr_txrx_settings(self, txrx_settings): if self.solution_type != "SBR+": self.add_error_message("This Boundary only applies to SBR+ Solution") return False id = 0 props=OrderedDict({}) for el, val in txrx_settings.items(): props["Tx/Rx List " +...
[ "def setRxSNR(self, rx_SNR):\n \n self.rx_SNR = rx_SNR", "def telnet_setting(self, params={}):\n\n idle_times = {\n '0': '0',\n # '15m':'900',\n '30m': '1800',\n # '1h':'3600',\n # ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Create a discrete sweep with a single frequency value.
def create_discrete_sweep(self, setupname, sweepname="SinglePoint", freq="1GHz", save_field=True, save_radiating_field=False): if sweepname is None: sweepname = generate_unique_name("Sweep") if setupname not in self.setup_names: return False ...
[ "def create_frequency_sweep(self, setupname, unit=\"GHz\", freqstart=1e-3, freqstop=10, sweepname=None,\n num_of_freq_points=451, sweeptype=\"Interpolating\",\n interpolation_tol=0.5, interpolation_max_solutions=250):\n\n if sweepname is None:\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Create a voltage source taking the closest edges of two objects.
def create_voltage_source_from_objects(self, startobj, endobject, axisdir=0, sourcename=None, source_on_plane=True): if not self.modeler.primitives.does_object_exists(startobj) or not self.modeler.primitives.does_object_exists( endobject): self._messenger.add_error_message("One or b...
[ "def getClosestUVs(*args, **kwargs):\n \n pass", "def alternate_source(roi, source, name, skydir, model):\r\n roi.get_source(source.name) # make sure selected\r\n altsrc = sources.PointSource(name=name, skydir=skydir, model=model)\r\n saved_model = source.spectral_model\r\n saved_sed = sourc...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Create a source excitation.
def create_source_excitation(self, sheet_name, point1, point2, sourcename, sourcetype="Voltage"): props = OrderedDict({"Objects": [sheet_name], "Direction": OrderedDict({"Start": point1, "End": point2})}) return self._create_boundary(sourcename, props, sourcetype)
[ "def create_source(\n source_id,\n source_table,\n source_lines,\n origin_params,\n cube_cor_filename,\n cube_std_filename,\n mask_filename,\n skymask_filename,\n spectra_fits_filename,\n segmaps,\n version,\n source_ts,\n profile_fwhm,\n *,\n author=\"\",\n nb_fwhm=2...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Creates a lumped RLC taking the closest edges of two objects.
def create_lumped_rlc_between_objects(self, startobj, endobject, axisdir=0, sourcename=None, rlctype="Parallel", Rvalue=None, Lvalue=None, Cvalue=None, bound_on_plane=True): if not self.modeler.primitives.does_object_exists(startobj) or not self.modeler.primitives.does...
[ "def makeWedge(line1, line2):\n # this expects a point and two lines.\n # The lines are presented as two points each, and one of those must be in both lines.\n shared1 = shared2 = notShared1 = notShared2 = -1\n for i in range(0, 2):\n if line1[i] in line2:\n shared1 = i\n else:\...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Create an impedance taking the closest edges of two objects.
def create_impedance_between_objects(self, startobj, endobject, axisdir=0, sourcename=None, resistance=50, reactance=0, is_infground=False, bound_on_plane=True): if not self.modeler.primitives.does_object_exists(startobj) or not self.modeler.primitives.does_object_exist...
[ "def extrapolate_nearest(src, dst):\n\n return extrapolate(src, dst, BorderType.NearestNeighbour)", "def extrapolate_circular(src, dst):\n\n return extrapolate(src, dst, BorderType.Circular)", "def _make_compatible(self, other):\n a1, b1 = self.domain()\n a2, b2 = other.domain()\n a = min...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Create a waveport on sheet objects created starting from sheets.
def create_wave_port_from_sheet(self, sheet, deemb=0, axisdir=0, impedance=50, nummodes=1, portname=None, renorm=True): sheet = self.modeler.convert_to_selections(sheet, True) portnames = [] for obj in sheet: refid, int_start, int_stop = self._get...
[ "def _initialize_sheet(self, sheet_name):\n \n # Creates the sheet\n write_name = sheet_name[:31] if (len(sheet_name) > 31) else sheet_name\n self.sheets[sheet_name] = self.wb.add_worksheet(write_name)\n \n # Widens the first column\n self.sheets[sheet_name].set_column('A:A', 19)\n \n # S...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Create a lumped port taking one sheet.
def create_lumped_port_to_sheet(self, sheet_name, axisdir=0, impedance=50, portname=None, renorm=True, deemb=False, reference_object_list=[]): if self.solution_type in ["DrivenModal", "DrivenTerminal", "Transient Network"]: point0, point1 = self.modeler.primitive...
[ "def _make_port(self, port):\n return Port(port)", "def add_port(\n self,\n name: Optional[Union[str, int]] = None,\n midpoint: Any = (0, 0),\n width: Union[float64, int, float] = 1,\n orientation: Union[int, int64, float] = 45,\n port: Optional[Port] = None,\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Create a voltage source taking one sheet.
def assig_voltage_source_to_sheet(self, sheet_name, axisdir=0, sourcename=None): warnings.warn('`assig_voltage_source_to_sheet is deprecated`. Use `assign_voltage_source_to_sheet` instead.', DeprecationWarning) self.assign_voltage_source_to_sheet(sheet_name, axisdir=0, sourcename=...
[ "def assign_voltage_source_to_sheet(self, sheet_name, axisdir=0, sourcename=None):\n\n if self.solution_type in [\"DrivenModal\", \"DrivenTerminal\", \"Transient Network\"]:\n point0, point1 = self.modeler.primitives.get_mid_points_on_dir(sheet_name, axisdir)\n if not sourcename:\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Create a voltage source taking one sheet.
def assign_voltage_source_to_sheet(self, sheet_name, axisdir=0, sourcename=None): if self.solution_type in ["DrivenModal", "DrivenTerminal", "Transient Network"]: point0, point1 = self.modeler.primitives.get_mid_points_on_dir(sheet_name, axisdir) if not sourcename: sourc...
[ "def assig_voltage_source_to_sheet(self, sheet_name, axisdir=0, sourcename=None):\n\n warnings.warn('`assig_voltage_source_to_sheet is deprecated`. Use `assign_voltage_source_to_sheet` instead.',\n DeprecationWarning)\n self.assign_voltage_source_to_sheet(sheet_name, axisdir=0, so...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Create a current source taking one sheet.
def assign_current_source_to_sheet(self, sheet_name, axisdir=0, sourcename=None): if self.solution_type in ["DrivenModal", "DrivenTerminal", "Transient Network"]: point0, point1 = self.modeler.primitives.get_mid_points_on_dir(sheet_name, axisdir) if not sourcename: sourc...
[ "def get_or_create_new(worksheet_title):\n for worksheet in worksheets:\n if worksheet.title == worksheet_title:\n return worksheet\n return sh.add_worksheet(title=worksheet_title, rows=str(num_students), cols=\"26\")", "def sheets_create(auth, name, parent=None):\n\n return file_create(a...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Create a Perfect E taking one sheet.
def assign_perfecte_to_sheets(self, sheet_list, sourcename=None, is_infinite_gnd=False): if self.solution_type in ["DrivenModal", "DrivenTerminal", "Transient Network", "SBR+"]: if not sourcename: sourcename = generate_unique_name("PerfE") elif sourcename in self.modeler...
[ "def create_math_sheet():\n # create spread objet con id\n spread = create_spread()\n print(\"id: {}\".format(spreadsheet_id))\n\n # create dataframe form loaded spreadsheet\n origin_df = spread.sheet_to_df(index=0)\n print(\"Data frame from sheet:\")\n print(origin_df.head())\n\n # structur...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Assign a Perfect H to sheets.
def assign_perfecth_to_sheets(self, sheet_list, sourcename=None): if self.solution_type in ["DrivenModal", "DrivenTerminal", "Transient Network", "SBR+"]: if not sourcename: sourcename = generate_unique_name("PerfH") elif sourcename in self.modeler.get_boundaries_name()...
[ "def stats_sheets_hwhh(self, sheets, contrast):\n raise NotImplementedError()", "def sheets_hwhh(self, sheets, contrast):\n raise NotImplementedError()", "def apply_info_to_sheet(self, sheet: Sheet, value: S):\n pass", "def _initialize_sheet(self, sheet_name):\n \n # Creates the sheet\n...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Create a lumped RLC taking one sheet.
def assign_lumped_rlc_to_sheet(self, sheet_name, axisdir=0, sourcename=None, rlctype="Parallel", Rvalue=None, Lvalue=None, Cvalue=None): if self.solution_type in ["DrivenModal", "DrivenTerminal", "Transient Network", "SBR+"] and ( Rvalue or Lvalue or Cvalue): ...
[ "def create_lumped_rlc_between_objects(self, startobj, endobject, axisdir=0, sourcename=None, rlctype=\"Parallel\",\n Rvalue=None, Lvalue=None, Cvalue=None, bound_on_plane=True):\n\n if not self.modeler.primitives.does_object_exists(startobj) or not self.modeler.primi...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Create an impedance taking one sheet.
def assign_impedance_to_sheet(self, sheet_name, sourcename=None, resistance=50, reactance=0, is_infground=False): if self.solution_type in ["DrivenModal", "DrivenTerminal", "Transient Network"]: if not sourcename: sourcename = generate_unique_name("Imped") elif sourcena...
[ "def create_lumped_port_to_sheet(self, sheet_name, axisdir=0, impedance=50, portname=None,\n renorm=True, deemb=False, reference_object_list=[]):\n\n if self.solution_type in [\"DrivenModal\", \"DrivenTerminal\", \"Transient Network\"]:\n point0, point1 = self.mo...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Create thickened sheets over a list of input port sheets.
def thicken_port_sheets(self, inputlist, value, internalExtr=True, internalvalue=1): tol = 1e-6 ports_ID = {} aedt_bounding_box = self.modeler.primitives.get_model_bounding_box() directions = {} for el in inputlist: objID = self.modeler.oeditor.GetFaceIDs(el) ...
[ "def generate_spreadsheets(picklist,pul_picklist):\n\tversions = ['nyu','pul']\n\tname, ext = os.path.splitext(picklist.filename)\n\toutfile = str(name+ext)\n\tdeliverables = []\n\n\tfor version in versions:\n\t\tif version == 'pul':\n\t\t\toutfile = 'pul_'+outfile\n\t\t\n\t\twith open(outdir+pul_picklist,'rb') as ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Validate a design based on an expected value and save information to the log file.
def validate_full_design(self, dname=None, outputdir=None, ports=None): self._messenger.add_debug_message("Design Validation Checks") validation_ok = True val_list = [] if not dname: dname = self.design_name if not outputdir: outputdir = self.project_path...
[ "def test_is_valid(self):\n self.assertTrue(self.ld.isValidFile(logfile=self.log.logfile, separator=self.ld.separator,\n expected_length=self.ld.expected_length,\n level_position=self.ld.level_position,\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Create a scattering report.
def create_scattering(self, plot_name="S Parameter Plot Nominal", sweep_name=None, port_names=None, port_excited=None, variations=None): Families = ["Freq:=", ["All"]] if variations: Families += variations else: Families += self.get_nominal_vari...
[ "def create_scatter_plot(self):\n xy = self.get_x_and_y_as_dict()\n x = xy[\"x\"]\n y = xy[\"y\"]\n plt.scatter(x, y)\n plt.xlabel(\"x\")\n plt.ylabel(\"y\")\n plt.title(\"Scatter plot of x and y values\")\n plt.savefig(f\"{self.save_directory}/task_2_scatter_...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Set automatic export of the Touchstone file after simulation.
def set_export_touchstone(self, activate, export_dir=""): settings = [] if activate: settings.append("NAME:Design Settings Data") settings.append("Export After Simulation:=") settings.append(True) settings.append("Export Dir:=") settings.append...
[ "def start_exporting(self):\n pass", "def set_auto_download(value):\n lnp.userconfig['downloadBaselines'] = value\n lnp.userconfig.save_data()", "def set_exporting(self, enabled=True):\n safe_update(self.options, {\"exporting\": {\"enabled\": enabled}})", "def export(self, filename='test',...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Assign a radiation boundary to one or more objects (usually airbox objects).
def assign_radiation_boundary_to_objects(self, obj_names, boundary_name=""): object_list = self.modeler.convert_to_selections(obj_names, return_list=True) if boundary_name: rad_name = boundary_name else: rad_name = generate_unique_name("Rad_") return self.create_...
[ "def setBoundaryCondition(self):\n \n \n if self.grid.bc == 'constant' and self.t == 0.0:\n # conditions are fixed to their starting values at edges\n self.__qR__ = np.array([[self.q[0][-1]],[self.q[1][-1]],[self.q[2][-1]]])\n self.__qL__ = np.array([[self.q[0][0]] ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Assign a radiation boundary to one or more faces.
def assign_radiation_boundary_to_faces(self, faces_id, boundary_name=""): if type(faces_id) is not list: faces_list = [int(faces_id)] else: faces_list = [int(i) for i in faces_id] if boundary_name: rad_name = boundary_name else: rad_name =...
[ "def setFaces(self, faces, deep=0):\n self._faces = np.c_[np.tile(faces.shape[1], faces.shape[0]),\n faces].flatten().astype(np.int64)\n self._f = numpy_support.numpy_to_vtkIdTypeArray(self._faces, deep=deep)\n self.polys.SetCells(len(faces), self._f)\n self.me...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Sets the symbol_type of this SymbolUploadBeginRequest.
def symbol_type(self, symbol_type): if symbol_type is None: raise ValueError("Invalid value for `symbol_type`, must not be `None`") # noqa: E501 allowed_values = [undefined, undefined, undefined, undefined, ] # noqa: E501 self._symbol_type = symbol_type
[ "def set_type(self, ttype):\n self.type = ttype\n self.token.type = ttype", "def set_type(self, type):\n return _raw_util.raw_message_sptr_set_type(self, type)", "def FLISetFrameType(self, handle, frame_type):\n frame_type = self._check_valid(frame_type, 'frame type')\n self._...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Sets the client_callback of this SymbolUploadBeginRequest.
def client_callback(self, client_callback): self._client_callback = client_callback
[ "def setClientCallback(self, callback):\n self.client_callback = callback", "def set_fsclient(self, fs_client):\n self._afs = fs_client", "def client_connection(self, client_connection):\n\n self._client_connection = client_connection", "def set_callback_url(self, callback_url):\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Sets the build of this SymbolUploadBeginRequest.
def build(self, build): self._build = build
[ "def set_upload_object(self, upload):\n self.upload = upload", "def build_property(self, value) -> None:\n self._build_property = value", "def buildRequest(self, payload):\n # type: (bytearray) -> bytearray\n pass", "def build(self, api_spec, request_data):\n pass", "def s...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return all the type records.
def find_all(cls): return db.session.query(MhrDocumentType).all()
[ "def get_all(self):\n with self._db_connection() as connection:\n return self._make_many(connection.get_all_metadata_types())", "def get(self):\n return get_all_type_docs()", "def list_record_types(self):\r\n return list(self.RECORD_TYPE_MAP.keys())", "def get_all_entries(type)...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
in1 input, input 0 in2 input, input 1 control input, control ine out output, output
def mux(in1, in2, control, out): @always_comb def muxLogic(): if (control == 0): out.next = in1 else: out.next = in2 return muxLogic
[ "def mux3(in1, in2, in3, control, out):\n\n @always_comb\n def muxLogic():\n if (control == 0):\n out.next = in1\n elif (control == 1):\n out.next = in2\n elif (control == 2):\n out.next = in3\n\n return muxLogic", "def logic(self):\n #for loop for...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
in1 input, input 0 in2 input, input 1 in3 input, input 2 control input, control ine out output, output
def mux3(in1, in2, in3, control, out): @always_comb def muxLogic(): if (control == 0): out.next = in1 elif (control == 1): out.next = in2 elif (control == 2): out.next = in3 return muxLogic
[ "def mux(in1, in2, control, out):\n\n @always_comb\n def muxLogic():\n if (control == 0):\n out.next = in1\n else:\n out.next = in2\n\n return muxLogic", "def logic(self):\n #for loop for creating all of the 2 port muxes required\n output_offset = 0 #Variable for...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
triplet margin loss for TransE.
def triplet_margin_loss(pos_src_emb, pos_edge_emb, pos_dst_emb, neg_src_emb, neg_edge_emb, neg_dst_emb, margin, neg_num, L=1): if L==2: pos_d = tf.reduce_sum(tf.square(pos_src_emb + pos_edge_emb - pos_dst_emb), axis=-1) neg_d = tf.reduce_sum(tf.square(neg_src_em...
[ "def mr_loss(model, triple):\n # S_good = mg.dot(triple[1], model(triple[0])))\n # S_bad = mg.dot(triple[1], model(triple[2])))\n # margin_ranking_loss(S_good, S_bad, y, margin)\n\n good_images = []\n good_captions = []\n bad_images = []\n\n for good_img, good_cap, bad_img in triple:\n g...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
triplet softplus loss for DistMult.
def triplet_softplus_loss(pos_src_emb, pos_edge_emb, pos_dst_emb, neg_src_emb, neg_edge_emb, neg_dst_emb): pos_s = tf.reduce_sum(pos_src_emb * pos_edge_emb * pos_dst_emb, axis=-1) neg_s = tf.reduce_sum(neg_src_emb * neg_edge_emb * neg_dst_emb, axis=-1) loss = (tf.reduce_mean(tf.nn.softpl...
[ "def softplus_loss(self,X,Z):\n \n return T.sum((X - T.nnet.softplus(Z)) **2, axis = 1)", "def triplet_loss(y_true, y_pred, alpha = 0.2):\r\n \r\n anchor, positive, negative = y_pred[0], y_pred[1], y_pred[2]\r\n \r\n ### START CODE HERE ### (≈ 4 lines)\r\n # Step 1: Compute the (encod...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Checks if user is authenticated, if not returns false. if the request is GET it check if the user has a valid profile of any type, if the request is POST it check if the user has a valid regular User profile (because only users can add orders), if the request is PATCH it check if the user has a valid driver profile, (b...
def has_permission(self, request, view): if not request.user.is_authenticated: return False if request.method == 'GET': if hasattr(request.user, 'profile') or hasattr(request.user, 'driver_profile') or hasattr(request.user, ...
[ "def test_func(self):\n profile_id = self.get_object().id\n if self.request.user.id == profile_id:\n # then we can allow updating\n return True\n return False", "def has_object_permission(self, request, view, obj):\n\n if request.method == 'GET':\n if h...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Checks if the user has permissions to get or update a certain order. if the request is GET, it check if the user is either the user who ordered it or the driver or one of the shops in that order's products list If the request id PATCH it checks if the user is the order's driver.
def has_object_permission(self, request, view, obj): if request.method == 'GET': if hasattr(request.user, 'profile'): if obj.user == request.user.profile: return True if hasattr(request.user, 'driver_profile'): if obj.driver == reques...
[ "def test_user_cannot_update_other_users_orders(self):\n self.client.force_authenticate(user=self.user2)\n data = {\n \"item_name\": \"updated item1\",\n }\n res = self.client.patch(self.order_url, data)\n self.assertEqual(res.status_code, status.HTTP_403_FORBIDDEN)\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Function to insert subtour elimination constraints
def eliminate_subtour(prob, G): # Initialize s to an empty list to provide it as an output # parameter s = [] try: prob.getlpsol(s, None, None, None) except: print("Can't get LP solution at this node, bailing out") return 0 # bail out # Starting from node 1, gather al...
[ "def constraints(lbs, ubs):\n\n lp = np.zeros((lbs.shape[0],2)) # point locateing on the lower linear constraint\n up = np.zeros((lbs.shape[0],2)) # point locating on the upper linear constraint\n\n A = (np.tanh(ubs)-np.tanh(lbs))/(ubs-lbs)\n\n # lbs>=0 and ubs>=0\n dims_pos = np.logical_and(lbs > 0,...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Like `tensor[np.arange(len(tensor)), indices]` in numpy.
def get_at_indices(tensor, indices): counter = tf.range(tf.shape(indices, out_type=indices.dtype)[0]) return tf.gather_nd(tensor, tf.stack((counter, indices), -1))
[ "def select_indices(tensor, indices):\n return tensor.gather(1, indices.unsqueeze(1)).squeeze()", "def indices():\n return [1.0, 3.0, 1.0, 3.0, 1.0]", "def indices(self):\n nx, ny, nz = self.shape()\n return [(ix,iy,iz) for ix in range(nx) for iy in range(ny) for iz in range(nz)]", "def id...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get catalog row given a source name.
def get_source_by_name(self, source_name): index = np.where(self.table['Source_Name'] == source_name) return self.table[index[0]]
[ "def get_source_by_name(self, name):\r\n sources = self.call(GetSourcesList())\r\n for source in sources.getSources():\r\n if source[\"name\"] == name:\r\n return source\r\n return None", "def get_row(self, index):\n if self.name_list:\n return self.name_list[index]\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Empty where queries should return empty arrays. Here, we replaces AqHTTP.post with a mock post that returns an empty array.
def test_where_queries_should_return_empty_array(monkeypatch, mock_login_post): # Create a mock session monkeypatch.setattr(requests, "post", mock_login_post) aquarium_url = "http://52.52.525.52" session = AqSession("username", "password", aquarium_url) def mock_post(*args, **kwargs): retu...
[ "def test_post_empty_query(mock_app):\n\n # When a POST request is missing data\n response = mock_app.test_client().post(\"\".join([API_V1, \"query?\"]), headers=HEADERS)\n\n # Then it should return error\n assert response.status_code == 400", "def test_split_queries_none(self, write_query_seq):\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Empty find queries should return None. Here, we replace the AqHTTP.post with a mock post, that has an error code 422 (which is thrown by Aquarium in cases where it cannot find the model).
def test_find_query_returns_none(monkeypatch, mock_login_post): class MockResponse: def __init__(self, json_data, status_code): self.json_data = json_data self.status_code = status_code def json(self): return self.json_data # Create a mock session monke...
[ "def test_post_empty_query(mock_app):\n\n # When a POST request is missing data\n response = mock_app.test_client().post(\"\".join([API_V1, \"query?\"]), headers=HEADERS)\n\n # Then it should return error\n assert response.status_code == 400", "async def test_update_record_not_found(client: Client) ->...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Decorator that skips tests if we don't have the memory image.
def RequireTestImage(f): @functools.wraps(f) def Decorator(testinstance): image_path = os.path.join(testinstance.base_path, "win7_trial_64bit.raw") if os.access(image_path, os.R_OK): return f(testinstance) else: return testinstance.skipTest("No win7_trial_64bit.raw memory image," ...
[ "def test_write_nopages():\n with TempFileName('nopages') as fname:\n with TiffWriter(fname) as tif:\n pass\n with TiffFile(fname) as tif:\n assert len(tif.pages) == 0\n tif.asarray()\n if not SKIP_VALIDATE:\n with pytest.raises(ValueError):\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Tests that we can run a simple DLLList Action.
def testDLLList(self): request = rdf_rekall_types.RekallRequest() request.plugins = [ # Only use these methods for listing processes. rdf_rekall_types.PluginRequest( plugin="dlllist", args=dict(proc_regex="dumpit", method="PsActiveProcessHead")), ] session_id = s...
[ "def test_list_operations(self):\n pass", "def test_list_action_post_is_listed(self):\n action = self.createActionAndActionTypeAndSetItOnApi(\"jIO Web Section\", \"post\")\n self.tic()\n action_list = self.web_site.api.ERP5Site_getAllActionListForAPIPost()\n self.assertEqual(len(action_list), 1...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return a feature vector of this cluster
def get_feature_vector(self): return self.feature_vector[:]
[ "def features_as_vector(self):\r\n return self._features_as_vector_fcn()", "def features_as_vector_centered(self):\r\n return self.features_as_vector - self.VECTOR_OFFSET", "def cluster_vectors(self, cluster):\n return [self.data[idx] for idx in self.labels[cluster]]", "def feature_list(s...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Copy this cluster object
def __copy__(self): new = Cluster() new.nodes = self.nodes[:] new.feature_vector = self.feature_vector[:] return new
[ "def copy(self):\n #return self.__class__(dict(self._cmtynodes))\n import copy\n new = copy.copy(self)\n new._cmtynodes = dict(self._cmtynodes)\n return new", "def clone(self):\n\n clone = copy(self)\n clone.graph = copy(self.graph)\n clone.labels = dict(sel...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
No merging on this downloader because it is too big.
def merge(self): pass
[ "def downloadChunks(url):\n global download_list\n baseFile = os.path.basename(url)\n\n \n #move the file to a more uniq path\n\n os.umask(0002)\n\n temp_path = os.getcwd()\n\n try:\n\n file = baseFile\n if os.path.exists(file):\n print baseFile, \"already exists\"\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Protocol of communication between ToyCord DHT Servers.
def __init__(self, host, master, defined_port = None): self.operations = {'quit': self._quit, 'join': self._join, 'next': self._update_my_front, 'prev': self._update_my_back, 'depart': self._depart, ...
[ "def protocol_send(self, data, sock):", "def connectionMade(self):\n super().connectionMade()\n # negociate telnet options\n self.transport.negotiationMap[LINEMODE] = self.telnet_LINEMODE\n self.transport.negotiationMap[PLUGIN] = self.telnet_PLUGIN\n self.transport.negotiationMa...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Servers join the DHT
def DHT_join(self): back_ip, back_port, back_hash,\ front_ip, front_port, front_hash,\ self.replication, self.strategy, self.id = \ find_neighbors(self.hash, self.m_host, self.m_port , self.host, self.port) self.replication = int(self.replication) # Send a request to ...
[ "async def on_server_join(self, server):", "def join_meshnet():\n log.info(\"Joining the meshnet\")\n mapping = get_config_mapping()\n join_consul_neighbours(mapping)", "def __init__(self, host, master, defined_port = None):\n self.operations = {'quit': self._quit,\n 'j...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
After recieving an overlay request
def _overlay(self,data, sock): _,start_node, host_list = data.split(':') start_node_host ,start_node_port = start_node.split("/") # If this node is the one that started the overlay => cycle # So we return the overlay of the DHT if start_node_port == str(self.port) and host_list ...
[ "def onParcelOverlay(self, packet):\n\n # unpack the data\n sequence_id = packet['ParcelData'][0]['SequenceID']\n data = packet['ParcelData'][0]['Data']\n\n # store the data\n # ToDo: make sense of the binary blob in data\n self.parcel_overlay[sequence_id] = data", "def d...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }